From 0028242dbbf8efab46fb0e25cef649ef7bea1730 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 31 Mar 2021 10:36:16 -0700 Subject: [PATCH 001/998] Change version to 1.38.0-dev (#4306) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index c149b22ad8a1..f73faed82920 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.37.0-dev" +const Version = "1.38.0-dev" From c72e1c8f7528615e2b5b887d279015abb2b6c659 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 31 Mar 2021 16:30:10 -0700 Subject: [PATCH 002/998] xds/resolver: support inline RDS resource from LDS response (#4299) --- xds/internal/client/client.go | 8 +++ xds/internal/client/lds_test.go | 53 ++++++++++++++ xds/internal/client/xds.go | 16 +++-- xds/internal/resolver/watch_service.go | 39 ++++++++--- xds/internal/resolver/watch_service_test.go | 78 +++++++++++++++++++++ 5 files changed, 177 insertions(+), 17 deletions(-) diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go index 5c0f38a9782f..d06473b02a6b 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/client/client.go @@ -195,7 +195,15 @@ type UpdateMetadata struct { type ListenerUpdate struct { // RouteConfigName is the route configuration name corresponding to the // target which is being watched through LDS. + // + // Only one of RouteConfigName and InlineRouteConfig is set. RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned inside LDS. + // + // Only one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate + // MaxStreamDuration contains the HTTP connection manager's // common_http_protocol_options.max_stream_duration field, or zero if // unset. diff --git a/xds/internal/client/lds_test.go b/xds/internal/client/lds_test.go index df8098df8368..da0e0f956156 100644 --- a/xds/internal/client/lds_test.go +++ b/xds/internal/client/lds_test.go @@ -54,6 +54,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { v3LDSTarget = "lds.target.good:3333" v2RouteConfigName = "v2RouteConfig" v3RouteConfigName = "v3RouteConfig" + routeName = "routeName" ) var ( @@ -132,6 +133,39 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: unknownFilterConfig}, IsOptional: true, } + v3LisWithInlineRoute = &anypb.Any{ + TypeUrl: version.V3ListenerURL, + Value: func() []byte { + hcm := &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{v3LDSTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}}}}}}, + }, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + } + mcm := marshalAny(hcm) + lis := &v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: mcm, + }, + } + mLis, _ := proto.Marshal(lis) + return mLis + }(), + } v3LisWithFilters = func(fs ...*v3httppb.HttpFilter) *anypb.Any { hcm := &v3httppb.HttpConnectionManager{ RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ @@ -650,6 +684,25 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Version: testVersion, }, }, + { + name: "v3 listener with inline route configuration", + resources: []*anypb.Any{v3LisWithInlineRoute}, + wantUpdate: map[string]ListenerUpdate{ + v3LDSTarget: { + InlineRouteConfig: &RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{v3LDSTarget}, + Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}}}, + }}}, + MaxStreamDuration: time.Second, + Raw: v3LisWithInlineRoute, + }, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, { name: "multiple listener resources", resources: []*anypb.Any{v2Lis, v3LisWithFilters()}, diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index 2791603ce26e..fc1112e180bc 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -72,7 +72,7 @@ func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (stri } logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, lis) - lu, err := processListener(lis, v2) + lu, err := processListener(lis, logger, v2) if err != nil { return lis.GetName(), ListenerUpdate{}, err } @@ -80,16 +80,16 @@ func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (stri return lis.GetName(), *lu, nil } -func processListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUpdate, error) { +func processListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { if lis.GetApiListener() != nil { - return processClientSideListener(lis, v2) + return processClientSideListener(lis, logger, v2) } return processServerSideListener(lis) } // processClientSideListener checks if the provided Listener proto meets // the expected criteria. If so, it returns a non-empty routeConfigName. -func processClientSideListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUpdate, error) { +func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { update := &ListenerUpdate{} apiLisAny := lis.GetApiListener().GetApiListener() @@ -112,9 +112,11 @@ func processClientSideListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUp } update.RouteConfigName = name case *v3httppb.HttpConnectionManager_RouteConfig: - // TODO: Add support for specifying the RouteConfiguration inline - // in the LDS response. - return nil, fmt.Errorf("LDS response contains RDS config inline. Not supported for now: %+v", apiLis) + routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), logger, v2) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + update.InlineRouteConfig = &routeU case nil: return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) default: diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 913ac4ced15c..42ede988300c 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -110,6 +110,22 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er httpFilterConfig: update.HTTPFilters, } + if update.InlineRouteConfig != nil { + // If there was an RDS watch, cancel it. + w.rdsName = "" + if w.rdsCancel != nil { + w.rdsCancel() + w.rdsCancel = nil + } + + // Handle the inline RDS update as if it's from an RDS watch. + w.updateVirtualHostsFromRDS(*update.InlineRouteConfig) + return + } + + // RDS name from update is not an empty string, need RDS to fetch the + // routes. + if w.rdsName == update.RouteConfigName { // If the new RouteConfigName is same as the previous, don't cancel and // restart the RDS watch. @@ -126,6 +142,18 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er w.rdsCancel = w.c.WatchRouteConfig(update.RouteConfigName, w.handleRDSResp) } +func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteConfigUpdate) { + matchVh := findBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) + if matchVh == nil { + // No matching virtual host found. + w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName)) + return + } + + w.lastUpdate.virtualHost = matchVh + w.serviceCb(w.lastUpdate, nil) +} + func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate, err error) { w.logger.Infof("received RDS update: %+v, err: %v", update, err) w.mu.Lock() @@ -142,16 +170,7 @@ func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate, w.serviceCb(serviceUpdate{}, err) return } - - matchVh := findBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) - if matchVh == nil { - // No matching virtual host found. - w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName)) - return - } - - w.lastUpdate.virtualHost = matchVh - w.serviceCb(w.lastUpdate, nil) + w.updateVirtualHostsFromRDS(update) } func (w *serviceUpdateWatcher) close() { diff --git a/xds/internal/resolver/watch_service_test.go b/xds/internal/resolver/watch_service_test.go index 705a3d35ae1b..2bfe3e984d3c 100644 --- a/xds/internal/resolver/watch_service_test.go +++ b/xds/internal/resolver/watch_service_test.go @@ -356,3 +356,81 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) { t.Fatalf("wait for cancel route watch failed: %v, want nil", err) } } + +// TestServiceWatchInlineRDS covers the cases switching between: +// - LDS update contains RDS name to watch +// - LDS update contains inline RDS resource +func (s) TestServiceWatchInlineRDS(t *testing.T) { + serviceUpdateCh := testutils.NewChannel() + xdsC := fakeclient.NewClient() + cancelWatch := watchService(xdsC, targetStr, func(update serviceUpdate, err error) { + serviceUpdateCh.Send(serviceUpdateErr{u: update, err: err}) + }, nil) + defer cancelWatch() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // First LDS update is LDS with RDS name to watch. + waitForWatchListener(ctx, t, xdsC, targetStr) + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + VirtualHosts: []*xdsclient.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + }, + }, + }, nil) + if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Switch LDS resp to a LDS with inline RDS resource + wantVirtualHosts2 := &xdsclient.VirtualHost{Domains: []string{"target"}, + Routes: []*xdsclient.Route{{ + Path: newStringP(""), + WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}, + }}, + } + wantUpdate2 := serviceUpdate{virtualHost: wantVirtualHosts2} + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{InlineRouteConfig: &xdsclient.RouteConfigUpdate{ + VirtualHosts: []*xdsclient.VirtualHost{wantVirtualHosts2}, + }}, nil) + // This inline RDS resource should cause the RDS watch to be canceled. + if err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { + t.Fatalf("wait for cancel route watch failed: %v, want nil", err) + } + if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate2); err != nil { + t.Fatal(err) + } + + // Switch LDS update back to LDS with RDS name to watch. + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + VirtualHosts: []*xdsclient.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + }, + }, + }, nil) + if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Switch LDS resp to a LDS with inline RDS resource again. + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{InlineRouteConfig: &xdsclient.RouteConfigUpdate{ + VirtualHosts: []*xdsclient.VirtualHost{wantVirtualHosts2}, + }}, nil) + // This inline RDS resource should cause the RDS watch to be canceled. + if err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { + t.Fatalf("wait for cancel route watch failed: %v, want nil", err) + } + if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate2); err != nil { + t.Fatal(err) + } +} From f6bb3972ed15a0aaf47730344c47e9840bb5dbba Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 31 Mar 2021 16:58:24 -0700 Subject: [PATCH 003/998] xds: filter chain matching logic for server-side (#4281) --- xds/internal/client/client.go | 56 +- xds/internal/client/filter_chain.go | 552 +++++++ xds/internal/client/filter_chain_test.go | 1316 +++++++++++++++++ xds/internal/client/lds_test.go | 844 ++++------- xds/internal/client/xds.go | 110 +- xds/internal/server/conn_wrapper.go | 36 +- xds/internal/server/listener_wrapper.go | 143 +- xds/internal/server/listener_wrapper_test.go | 335 +++++ .../test/xds_server_integration_test.go | 19 + xds/server_test.go | 65 +- 10 files changed, 2655 insertions(+), 821 deletions(-) create mode 100644 xds/internal/client/filter_chain.go create mode 100644 xds/internal/client/filter_chain_test.go create mode 100644 xds/internal/server/listener_wrapper_test.go diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go index d06473b02a6b..37eaae79f2d5 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/client/client.go @@ -24,7 +24,6 @@ import ( "context" "errors" "fmt" - "net" "sync" "time" @@ -241,60 +240,7 @@ type InboundListenerConfig struct { // accept incoming connections. Port string // FilterChains is the list of filter chains associated with this listener. - FilterChains []*FilterChain - // DefaultFilterChain is the filter chain to be used when none of the above - // filter chains matches an incoming connection. - DefaultFilterChain *FilterChain -} - -// FilterChain wraps a set of match criteria and associated security -// configuration. -// -// The actual set filters associated with this filter chain are not captured -// here, since we do not support these filters on the server yet. -type FilterChain struct { - // Match contains the criteria to use when matching a connection to this - // filter chain. - Match *FilterChainMatch - // SecurityCfg contains transport socket security configuration. - SecurityCfg *SecurityConfig -} - -// SourceType specifies the connection source IP match type. -type SourceType int - -const ( - // SourceTypeAny matches connection attempts from any source. - SourceTypeAny SourceType = iota - // SourceTypeSameOrLoopback matches connection attempts from the same host. - SourceTypeSameOrLoopback - // SourceTypeExternal matches connection attempts from a different host. - SourceTypeExternal -) - -// FilterChainMatch specifies the match criteria for selecting a specific filter -// chain of a listener, for an incoming connection. -// -// The xDS FilterChainMatch proto specifies 8 match criteria. But we only have a -// subset of those fields here because we explicitly ignore filter chains whose -// match criteria specifies values for fields like destination_port, -// server_names, application_protocols, transport_protocol. -type FilterChainMatch struct { - // DestPrefixRanges specifies a set of IP addresses and prefix lengths to - // match the destination address of the incoming connection when the - // listener is bound to 0.0.0.0/[::]. If this field is empty, the - // destination address is ignored. - DestPrefixRanges []net.IP - // SourceType specifies the connection source IP match type. Can be any, - // local or external network. - SourceType SourceType - // SourcePrefixRanges specifies a set of IP addresses and prefix lengths to - // match the source address of the incoming connection. If this field is - // empty, the source address is ignored. - SourcePrefixRanges []net.IP - // SourcePorts specifies a set of ports to match the source port of the - // incoming connection. If this field is empty, the source port is ignored. - SourcePorts []uint32 + FilterChains *FilterChainManager } // RouteConfigUpdate contains information received in an RDS response, which is diff --git a/xds/internal/client/filter_chain.go b/xds/internal/client/filter_chain.go new file mode 100644 index 000000000000..8e24ab858230 --- /dev/null +++ b/xds/internal/client/filter_chain.go @@ -0,0 +1,552 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package client + +import ( + "errors" + "fmt" + "net" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/golang/protobuf/proto" + + "google.golang.org/grpc/xds/internal/version" +) + +// Represents a wildcard IP prefix. Go stdlib `Contains()` method works for both +// v4 and v6 addresses when used on this wildcard address. +const emptyAddrMapKey = "0.0.0.0/0" + +var ( + // Parsed wildcard IP prefix. + _, zeroIP, _ = net.ParseCIDR("0.0.0.0/0") +) + +// FilterChain captures information from within a FilterChain message in a +// Listener resource. +// +// Currently, this simply contains the security configuration found in the +// 'transport_socket' field of the filter chain. The actual set of filters +// associated with this filter chain are not captured here, since we do not +// support these filters on the server-side yet. +type FilterChain struct { + // SecurityCfg contains transport socket security configuration. + SecurityCfg *SecurityConfig +} + +// SourceType specifies the connection source IP match type. +type SourceType int + +const ( + // SourceTypeAny matches connection attempts from any source. + SourceTypeAny SourceType = iota + // SourceTypeSameOrLoopback matches connection attempts from the same host. + SourceTypeSameOrLoopback + // SourceTypeExternal matches connection attempts from a different host. + SourceTypeExternal +) + +// FilterChainManager contains all the match criteria specified through all +// filter chains in a single Listener resource. It also contains the default +// filter chain specified in the Listener resource. It provides two important +// pieces of functionality: +// 1. Validate the filter chains in an incoming Listener resource to make sure +// that there aren't filter chains which contain the same match criteria. +// 2. As part of performing the above validation, it builds an internal data +// structure which will if used to look up the matching filter chain at +// connection time. +// +// The logic specified in the documentation around the xDS FilterChainMatch +// proto mentions 8 criteria to match on. gRPC does not support 4 of those, and +// we ignore filter chains which contain any of these unsupported fields at +// parsing time. Here we use the remaining 4 criteria to find a matching filter +// chain in the following order: +// Destination IP address, Source type, Source IP address, Source port. +// TODO: Ignore chains with unsupported fields *only* at connection time. +type FilterChainManager struct { + // Destination prefix is the first match criteria that we support. + // Therefore, this multi-stage map is indexed on destination prefixes + // specified in the match criteria. + // Unspecified destination prefix matches end up as a wildcard entry here + // with a key of 0.0.0.0/0. + dstPrefixMap map[string]*destPrefixEntry + + // At connection time, we do not have the actual destination prefix to match + // on. We only have the real destination address of the incoming connection. + // This means that we cannot use the above map at connection time. This list + // contains the map entries from the above map that we can use at connection + // time to find matching destination prefixes in O(n) time. + // + // TODO: Implement LC-trie to support logarithmic time lookups. If that + // involves too much time/effort, sort this slice based on the netmask size. + dstPrefixes []*destPrefixEntry + + def *FilterChain // Default filter chain, if specified. + fcCnt int // Count of supported filter chains, for validation. +} + +// destPrefixEntry is the value type of the map indexed on destination prefixes. +type destPrefixEntry struct { + net *net.IPNet // The actual destination prefix. + // For each specified source type in the filter chain match criteria, this + // array points to the set of specified source prefixes. + // Unspecified source type matches end up as a wildcard entry here with an + // index of 0, which actually represents the source type `ANY`. + srcTypeArr [3]*sourcePrefixes +} + +// sourcePrefixes contains source prefix related information specified in the +// match criteria. These are pointed to by the array of source types. +type sourcePrefixes struct { + // These are very similar to the 'dstPrefixMap' and 'dstPrefixes' field of + // FilterChainManager. Go there for more info. + srcPrefixMap map[string]*sourcePrefixEntry + srcPrefixes []*sourcePrefixEntry +} + +// sourcePrefixEntry contains match criteria per source prefix. +type sourcePrefixEntry struct { + net *net.IPNet // The actual source prefix. + // Mapping from source ports specified in the match criteria to the actual + // filter chain. Unspecified source port matches en up as a wildcard entry + // here with a key of 0. + srcPortMap map[int]*FilterChain +} + +// NewFilterChainManager parses the received Listener resource and builds a +// FilterChainManager. Returns a non-nil error on validation failures. +// +// This function is only exported so that tests outside of this package can +// create a FilterChainManager. +func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { + // Parse all the filter chains and build the internal data structures. + fci := &FilterChainManager{dstPrefixMap: make(map[string]*destPrefixEntry)} + if err := fci.addFilterChains(lis.GetFilterChains()); err != nil { + return nil, err + } + + // Retrieve the default filter chain. The match criteria specified on the + // default filter chain is never used. The default filter chain simply gets + // used when none of the other filter chains match. + var def *FilterChain + if dfc := lis.GetDefaultFilterChain(); dfc != nil { + var err error + if def, err = filterChainFromProto(dfc); err != nil { + return nil, err + } + } + fci.def = def + + // If there are no supported filter chains and no default filter chain, we + // fail here. This will call the Listener resource to be NACK'ed. + if fci.fcCnt == 0 && fci.def == nil { + return nil, fmt.Errorf("no supported filter chains and no default filter chain") + } + return fci, nil +} + +// addFilterChains parses the filter chains in fcs and adds the required +// internal data structures corresponding to the match criteria. +func (fci *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) error { + for _, fc := range fcs { + // Skip filter chains with unsupported match fields/criteria. + fcm := fc.GetFilterChainMatch() + if fcm.GetDestinationPort().GetValue() != 0 || + fcm.GetServerNames() != nil || + (fcm.GetTransportProtocol() != "" && fcm.TransportProtocol != "raw_buffer") || + fcm.GetApplicationProtocols() != nil { + continue + } + + // Extract the supported match criteria, which will be used by + // successive addFilterChainsForXxx() functions. + var dstPrefixes []*net.IPNet + for _, pr := range fcm.GetPrefixRanges() { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse destination prefix range: %+v", pr) + } + dstPrefixes = append(dstPrefixes, ipnet) + } + + var srcType SourceType + switch fcm.GetSourceType() { + case v3listenerpb.FilterChainMatch_ANY: + srcType = SourceTypeAny + case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: + srcType = SourceTypeSameOrLoopback + case v3listenerpb.FilterChainMatch_EXTERNAL: + srcType = SourceTypeExternal + default: + return fmt.Errorf("unsupported source type: %v", fcm.GetSourceType()) + } + + var srcPrefixes []*net.IPNet + for _, pr := range fcm.GetSourcePrefixRanges() { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse source prefix range: %+v", pr) + } + srcPrefixes = append(srcPrefixes, ipnet) + } + + var srcPorts []int + for _, port := range fcm.GetSourcePorts() { + srcPorts = append(srcPorts, int(port)) + } + + // Build the internal representation of the filter chain match fields. + if err := fci.addFilterChainsForDestPrefixes(dstPrefixes, srcType, srcPrefixes, srcPorts, fc); err != nil { + return err + } + fci.fcCnt++ + } + + // Build the source and dest prefix slices used by Lookup(). + for _, dstPrefix := range fci.dstPrefixMap { + fci.dstPrefixes = append(fci.dstPrefixes, dstPrefix) + for _, st := range dstPrefix.srcTypeArr { + if st == nil { + continue + } + for _, srcPrefix := range st.srcPrefixMap { + st.srcPrefixes = append(st.srcPrefixes, srcPrefix) + } + } + } + return nil +} + +// addFilterChainsForDestPrefixes adds destination prefixes to the internal data +// structures and delegates control to addFilterChainsForSourceType to continue +// building the internal data structure. +func (fci *FilterChainManager) addFilterChainsForDestPrefixes(dstPrefixes []*net.IPNet, srcType SourceType, srcPrefixes []*net.IPNet, srcPorts []int, fc *v3listenerpb.FilterChain) error { + if len(dstPrefixes) == 0 { + // Use the wildcard IP when destination prefix is unspecified. + if fci.dstPrefixMap[emptyAddrMapKey] == nil { + fci.dstPrefixMap[emptyAddrMapKey] = &destPrefixEntry{net: zeroIP} + } + return fci.addFilterChainsForSourceType(fci.dstPrefixMap[emptyAddrMapKey], srcType, srcPrefixes, srcPorts, fc) + } + for _, prefix := range dstPrefixes { + p := prefix.String() + if fci.dstPrefixMap[p] == nil { + fci.dstPrefixMap[p] = &destPrefixEntry{net: prefix} + } + if err := fci.addFilterChainsForSourceType(fci.dstPrefixMap[p], srcType, srcPrefixes, srcPorts, fc); err != nil { + return err + } + } + return nil +} + +// addFilterChainsForSourceType adds source types to the internal data +// structures and delegates control to addFilterChainsForSourcePrefixes to +// continue building the internal data structure. +func (fci *FilterChainManager) addFilterChainsForSourceType(dstEntry *destPrefixEntry, srcType SourceType, srcPrefixes []*net.IPNet, srcPorts []int, fc *v3listenerpb.FilterChain) error { + st := int(srcType) + if dstEntry.srcTypeArr[st] == nil { + dstEntry.srcTypeArr[st] = &sourcePrefixes{srcPrefixMap: make(map[string]*sourcePrefixEntry)} + } + return fci.addFilterChainsForSourcePrefixes(dstEntry.srcTypeArr[st].srcPrefixMap, srcPrefixes, srcPorts, fc) +} + +// addFilterChainsForSourcePrefixes adds source prefixes to the internal data +// structures and delegates control to addFilterChainsForSourcePorts to continue +// building the internal data structure. +func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map[string]*sourcePrefixEntry, srcPrefixes []*net.IPNet, srcPorts []int, fc *v3listenerpb.FilterChain) error { + if len(srcPrefixes) == 0 { + // Use the wildcard IP when source prefix is unspecified. + if srcPrefixMap[emptyAddrMapKey] == nil { + srcPrefixMap[emptyAddrMapKey] = &sourcePrefixEntry{ + net: zeroIP, + srcPortMap: make(map[int]*FilterChain), + } + } + return fci.addFilterChainsForSourcePorts(srcPrefixMap[emptyAddrMapKey], srcPorts, fc) + } + for _, prefix := range srcPrefixes { + p := prefix.String() + if srcPrefixMap[p] == nil { + srcPrefixMap[p] = &sourcePrefixEntry{ + net: prefix, + srcPortMap: make(map[int]*FilterChain), + } + } + if err := fci.addFilterChainsForSourcePorts(srcPrefixMap[p], srcPorts, fc); err != nil { + return err + } + } + return nil +} + +// addFilterChainsForSourcePorts adds source ports to the internal data +// structures and completes the process of building the internal data structure. +// It is here that we determine if there are multiple filter chains with +// overlapping matching rules. +func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePrefixEntry, srcPorts []int, fcProto *v3listenerpb.FilterChain) error { + fc, err := filterChainFromProto(fcProto) + if err != nil { + return err + } + + if len(srcPorts) == 0 { + // Use the wildcard port '0', when source ports are unspecified. + if curFC := srcEntry.srcPortMap[0]; curFC != nil { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + srcEntry.srcPortMap[0] = fc + return nil + } + for _, port := range srcPorts { + if curFC := srcEntry.srcPortMap[port]; curFC != nil { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + srcEntry.srcPortMap[port] = fc + } + return nil +} + +// filterChainFromProto extracts the relevant information from the FilterChain +// proto and stores it in our internal representation. Currently, we only +// process the security configuration stored in the transport_socket field. +func filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { + // If the transport_socket field is not specified, it means that the control + // plane has not sent us any security config. This is fine and the server + // will use the fallback credentials configured as part of the + // xdsCredentials. + ts := fc.GetTransportSocket() + if ts == nil { + return &FilterChain{}, nil + } + if name := ts.GetName(); name != transportSocketName { + return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + any := ts.GetTypedConfig() + if any == nil || any.TypeUrl != version.V3DownstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) + } + downstreamCtx := &v3tlspb.DownstreamTlsContext{} + if err := proto.Unmarshal(any.GetValue(), downstreamCtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) + } + if downstreamCtx.GetCommonTlsContext() == nil { + return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") + } + sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext()) + if err != nil { + return nil, err + } + if sc.IdentityInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + } + sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() + if sc.RequireClientCert && sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") + } + return &FilterChain{SecurityCfg: sc}, nil +} + +// FilterChainLookupParams wraps parameters to be passed to Lookup. +type FilterChainLookupParams struct { + // IsUnspecified indicates whether the server is listening on a wildcard + // address, "0.0.0.0" for IPv4 and "::" for IPv6. Only when this is set to + // true, do we consider the destination prefixes specified in the filter + // chain match criteria. + IsUnspecifiedListener bool + // DestAddr is the local address of an incoming connection. + DestAddr net.IP + // SourceAddr is the remote address of an incoming connection. + SourceAddr net.IP + // SourcePort is the remote port of an incoming connection. + SourcePort int +} + +// Lookup returns the most specific matching filter chain to be used for an +// incoming connection on the server side. +// +// Returns a non-nil error if no matching filter chain could be found or +// multiple matching filter chains were found, and in both cases, the incoming +// connection must be dropped. +func (fci *FilterChainManager) Lookup(params FilterChainLookupParams) (*FilterChain, error) { + dstPrefixes := filterByDestinationPrefixes(fci.dstPrefixes, params.IsUnspecifiedListener, params.DestAddr) + if len(dstPrefixes) == 0 { + if fci.def != nil { + return fci.def, nil + } + return nil, fmt.Errorf("no matching filter chain based on destination prefix match for %+v", params) + } + + srcType := SourceTypeExternal + if params.SourceAddr.Equal(params.DestAddr) || params.SourceAddr.IsLoopback() { + srcType = SourceTypeSameOrLoopback + } + srcPrefixes := filterBySourceType(dstPrefixes, srcType) + if len(srcPrefixes) == 0 { + if fci.def != nil { + return fci.def, nil + } + return nil, fmt.Errorf("no matching filter chain based on source type match for %+v", params) + } + srcPrefixEntry, err := filterBySourcePrefixes(srcPrefixes, params.SourceAddr) + if err != nil { + return nil, err + } + if fc := filterBySourcePorts(srcPrefixEntry, params.SourcePort); fc != nil { + return fc, nil + } + if fci.def != nil { + return fci.def, nil + } + return nil, fmt.Errorf("no matching filter chain after all match criteria for %+v", params) +} + +// filterByDestinationPrefixes is the first stage of the filter chain +// matching algorithm. It takes the complete set of configured filter chain +// matchers and returns the most specific matchers based on the destination +// prefix match criteria (the prefixes which match the most number of bits). +func filterByDestinationPrefixes(dstPrefixes []*destPrefixEntry, isUnspecified bool, dstAddr net.IP) []*destPrefixEntry { + if !isUnspecified { + // Destination prefix matchers are considered only when the listener is + // bound to the wildcard address. + return dstPrefixes + } + var ( + matchingDstPrefixes []*destPrefixEntry + maxSubnetMatch int + ) + for _, prefix := range dstPrefixes { + if !prefix.net.Contains(dstAddr) { + continue + } + matchSize, _ := prefix.net.Mask.Size() + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingDstPrefixes = make([]*destPrefixEntry, 0, 1) + } + matchingDstPrefixes = append(matchingDstPrefixes, prefix) + } + return matchingDstPrefixes +} + +// filterBySourceType is the second stage of the matching algorithm. It +// trims the filter chains based on the most specific source type match. +func filterBySourceType(dstPrefixes []*destPrefixEntry, srcType SourceType) []*sourcePrefixes { + var ( + srcPrefixes []*sourcePrefixes + bestSrcTypeMatch int + ) + for _, prefix := range dstPrefixes { + var ( + srcPrefix *sourcePrefixes + match int + ) + switch srcType { + case SourceTypeExternal: + match = int(SourceTypeExternal) + srcPrefix = prefix.srcTypeArr[match] + case SourceTypeSameOrLoopback: + match = int(SourceTypeSameOrLoopback) + srcPrefix = prefix.srcTypeArr[match] + } + if srcPrefix == nil { + match = int(SourceTypeAny) + srcPrefix = prefix.srcTypeArr[match] + } + if match < bestSrcTypeMatch { + continue + } + if match > bestSrcTypeMatch { + bestSrcTypeMatch = match + srcPrefixes = make([]*sourcePrefixes, 0) + } + if srcPrefix != nil { + // The source type array always has 3 entries, but these could be + // nil if the appropriate source type match was not specified. + srcPrefixes = append(srcPrefixes, srcPrefix) + } + } + return srcPrefixes +} + +// filterBySourcePrefixes is the third stage of the filter chain matching +// algorithm. It trims the filter chains based on the source prefix. At most one +// filter chain with the most specific match progress to the next stage. +func filterBySourcePrefixes(srcPrefixes []*sourcePrefixes, srcAddr net.IP) (*sourcePrefixEntry, error) { + var ( + matchingSrcPrefixes []*sourcePrefixEntry + maxSubnetMatch int + ) + for _, sp := range srcPrefixes { + for _, prefix := range sp.srcPrefixes { + if !prefix.net.Contains(srcAddr) { + continue + } + matchSize, _ := prefix.net.Mask.Size() + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingSrcPrefixes = make([]*sourcePrefixEntry, 0, 1) + } + matchingSrcPrefixes = append(matchingSrcPrefixes, prefix) + } + } + if len(matchingSrcPrefixes) == 0 { + // Finding no match is not an error condition. The caller will end up + // using the default filter chain if one was configured. + return nil, nil + } + // We expect at most a single matching source prefix entry at this point. If + // we have multiple entries here, and some of their source port matchers had + // wildcard entries, we could be left with more than one matching filter + // chain and hence would have been flagged as an invalid configuration at + // config validation time. + if len(matchingSrcPrefixes) != 1 { + return nil, errors.New("multiple matching filter chains") + } + return matchingSrcPrefixes[0], nil +} + +// filterBySourcePorts is the last stage of the filter chain matching +// algorithm. It trims the filter chains based on the source ports. +func filterBySourcePorts(spe *sourcePrefixEntry, srcPort int) *FilterChain { + if spe == nil { + return nil + } + // A match could be a wildcard match (this happens when the match + // criteria does not specify source ports) or a specific port match (this + // happens when the match criteria specifies a set of ports and the source + // port of the incoming connection matches one of the specified ports). The + // latter is considered to be a more specific match. + if fc := spe.srcPortMap[srcPort]; fc != nil { + return fc + } + if fc := spe.srcPortMap[0]; fc != nil { + return fc + } + return nil +} diff --git a/xds/internal/client/filter_chain_test.go b/xds/internal/client/filter_chain_test.go new file mode 100644 index 000000000000..ba0ed944160d --- /dev/null +++ b/xds/internal/client/filter_chain_test.go @@ -0,0 +1,1316 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package client + +import ( + "fmt" + "net" + "strings" + "testing" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + "google.golang.org/grpc/xds/internal/version" +) + +func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + }{ + { + desc: "unsupported destination port field", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{DestinationPort: &wrapperspb.UInt32Value{Value: 666}}, + }, + }, + }, + }, + { + desc: "unsupported server names field", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ServerNames: []string{"example-server"}}, + }, + }, + }, + }, + { + desc: "unsupported transport protocol ", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{TransportProtocol: "tls"}, + }, + }, + }, + }, + { + desc: "unsupported application protocol field", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ApplicationProtocols: []string{"h2"}}, + }, + }, + }, + }, + { + desc: "bad dest address prefix", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{{AddressPrefix: "a.b.c.d"}}}, + }, + }, + }, + }, + { + desc: "bad dest prefix length", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.1.1.0", 50)}}, + }, + }, + }, + }, + { + desc: "bad source address prefix", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePrefixRanges: []*v3corepb.CidrRange{{AddressPrefix: "a.b.c.d"}}}, + }, + }, + }, + }, + { + desc: "bad source prefix length", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.1.1.0", 50)}}, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + if fci, err := NewFilterChainManager(test.lis); err == nil { + t.Fatalf("NewFilterChainManager() returned %v when expected to fail", fci) + } + }) + } +} + +func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + }{ + { + desc: "matching destination prefixes with no other matchers", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16), cidrRangeFromAddressAndPrefixLen("10.0.0.0", 0)}, + }, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.2.2", 16)}, + }, + }, + }, + }, + }, + { + desc: "matching source type", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_ANY}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_EXTERNAL}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_EXTERNAL}, + }, + }, + }, + }, + { + desc: "matching source prefixes", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16), cidrRangeFromAddressAndPrefixLen("10.0.0.0", 0)}, + }, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.2.2", 16)}, + }, + }, + }, + }, + }, + { + desc: "matching source ports", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3, 4, 5}}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{5, 6, 7}}, + }, + }, + }, + }, + } + + const wantErr = "multiple filter chains with overlapping matching rules are defined" + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + _, err := NewFilterChainManager(test.lis) + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, wantErr) + } + }) + } +} + +func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + wantErr string + }{ + { + desc: "no filter chains", + lis: &v3listenerpb.Listener{}, + wantErr: "no supported filter chains and no default filter chain", + }, + { + desc: "unexpected transport socket name", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{Name: "unsupported-transport-socket-name"}, + }, + }, + }, + wantErr: "transport_socket field has unexpected name", + }, + { + desc: "unexpected transport socket URL", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.UpstreamTlsContext{}), + }, + }, + }, + }, + }, + wantErr: "transport_socket field has unexpected typeURL", + }, + { + desc: "badly marshaled transport socket", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: &anypb.Any{ + TypeUrl: version.V3DownstreamTLSContextURL, + Value: []byte{1, 2, 3, 4}, + }, + }, + }, + }, + }, + }, + wantErr: "failed to unmarshal DownstreamTlsContext in LDS response", + }, + { + desc: "missing CommonTlsContext", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{}), + }, + }, + }, + }, + }, + wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", + }, + { + desc: "unsupported validation context in transport socket", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", + }, + }, + }, + }), + }, + }, + }, + }, + }, + wantErr: "validation context contains unexpected type", + }, + { + desc: "no root certificate provider with require_client_cert", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, + }, + wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", + }, + { + desc: "no identity certificate provider", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{}, + }), + }, + }, + }, + }, + }, + wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + _, err := NewFilterChainManager(test.lis) + if err == nil || !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) + } + }) + } +} + +func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + desc: "empty transport socket", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{}, + fcCnt: 1, + }, + }, + { + desc: "no validation context", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + }, + }), + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + }, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + }, + }, + fcCnt: 1, + }, + }, + { + desc: "validation context with certificate provider", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + }, + }, + }), + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultRootPluginInstance", + CertificateName: "defaultRootCertName", + }, + }, + }, + }), + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + RootInstanceName: "rootPluginInstance", + RootCertName: "rootCertName", + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + RequireClientCert: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + RootInstanceName: "defaultRootPluginInstance", + RootCertName: "defaultRootCertName", + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + RequireClientCert: true, + }, + }, + fcCnt: 1, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{})) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + +func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + desc: "multiple destination prefixes", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}}, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + "10.0.0.0/8": { + net: ipNetFromCIDR("10.0.0.0/8"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{}, + fcCnt: 3, + }, + }, + { + desc: "multiple source types", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + nil, + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + nil, + nil, + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{}, + fcCnt: 2, + }, + }, + { + desc: "multiple source prefixes", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "10.0.0.0/8": { + net: ipNetFromCIDR("10.0.0.0/8"), + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.0.0/16"), + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{}, + fcCnt: 2, + }, + }, + { + desc: "multiple source ports", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3}}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + SourcePorts: []uint32{1, 2, 3}, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 1: {}, + 2: {}, + 3: {}, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + nil, + nil, + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.0.0/16"), + srcPortMap: map[int]*FilterChain{ + 1: {}, + 2: {}, + 3: {}, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{}, + fcCnt: 2, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{})) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + +func TestLookup_Failures(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + params FilterChainLookupParams + wantErr string + }{ + { + desc: "no destination prefix match", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + }, + }, + }, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(10, 1, 1, 1), + }, + wantErr: "no matching filter chain based on destination prefix match", + }, + { + desc: "no source type match", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + }, + }, + }, + }, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 2), + }, + wantErr: "no matching filter chain based on source type match", + }, + { + desc: "no source prefix match", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 24)}, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + }, + }, + }, + }, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + }, + wantErr: "no matching filter chain after all match criteria", + }, + { + desc: "multiple matching filter chains", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3}}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourcePorts: []uint32{1}, + }, + }, + }, + }, + params: FilterChainLookupParams{ + // IsUnspecified is not set. This means that the destination + // prefix matchers will be ignored. + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + SourcePort: 1, + }, + wantErr: "multiple matching filter chains", + }, + { + desc: "no default filter chain", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3}}, + }, + }, + }, + params: FilterChainLookupParams{ + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + SourcePort: 80, + }, + wantErr: "no matching filter chain after all match criteria", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + fci, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() failed: %v", err) + } + fc, err := fci.Lookup(test.params) + if err == nil || !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("FilterChainManager.Lookup(%v) = (%v, %v) want (nil, %s)", test.params, fc, err, test.wantErr) + } + }) + } +} + +func TestLookup_Successes(t *testing.T) { + lisWithDefaultChain := &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{InstanceName: "instance1"}, + }, + }), + }, + }, + }, + }, + // A default filter chain with an empty transport socket. + DefaultFilterChain: &v3listenerpb.FilterChain{ + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{InstanceName: "default"}, + }, + }), + }, + }, + }, + } + lisWithoutDefaultChain := &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: transportSocketWithInstanceName("wildcard"), + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + TransportSocket: transportSocketWithInstanceName("any-destination-prefix"), + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-wildcard-source-type"), + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 24)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-specific-source-type"), + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 24)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.92.1", 24)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-specific-source-type-specific-source-prefix"), + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 24)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.92.1", 24)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + SourcePorts: []uint32{80}, + }, + TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-specific-source-type-specific-source-prefix-specific-source-port"), + }, + }, + } + + tests := []struct { + desc string + lis *v3listenerpb.Listener + params FilterChainLookupParams + wantFC *FilterChain + }{ + { + desc: "default filter chain", + lis: lisWithDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(10, 1, 1, 1), + }, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "default"}}, + }, + { + desc: "wildcard destination match", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(10, 1, 1, 1), + SourceAddr: net.IPv4(10, 1, 1, 1), + SourcePort: 1, + }, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard"}}, + }, + { + desc: "ANY destination match", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(10, 1, 1, 1), + SourceAddr: net.IPv4(10, 1, 1, 2), + SourcePort: 1, + }, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "any-destination-prefix"}}, + }, + { + desc: "specific destination and wildcard source type match", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + SourcePort: 80, + }, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-wildcard-source-type"}}, + }, + { + desc: "specific destination and source type match", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 1, 1), + SourceAddr: net.IPv4(10, 1, 1, 1), + SourcePort: 80, + }, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type"}}, + }, + { + desc: "specific destination source type and source prefix", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 1, 1), + SourceAddr: net.IPv4(192, 168, 92, 100), + SourcePort: 70, + }, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix"}}, + }, + { + desc: "specific destination source type source prefix and source port", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 1, 1), + SourceAddr: net.IPv4(192, 168, 92, 100), + SourcePort: 80, + }, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix-specific-source-port"}}, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + fci, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() failed: %v", err) + } + gotFC, err := fci.Lookup(test.params) + if err != nil { + t.Fatalf("FilterChainManager.Lookup(%v) failed: %v", test.params, err) + } + if !cmp.Equal(gotFC, test.wantFC) { + t.Fatalf("FilterChainManager.Lookup(%v) = %v, want %v", test.params, gotFC, test.wantFC) + } + }) + } +} + +// The Equal() methods defined below help with using cmp.Equal() on these types +// which contain all unexported fields. + +func (fci *FilterChainManager) Equal(other *FilterChainManager) bool { + if (fci == nil) != (other == nil) { + return false + } + if fci == nil { + return true + } + switch { + case !cmp.Equal(fci.dstPrefixMap, other.dstPrefixMap): + return false + case !cmp.Equal(fci.def, other.def): + return false + case fci.fcCnt != other.fcCnt: + return false + } + return true +} + +func (dpe *destPrefixEntry) Equal(other *destPrefixEntry) bool { + if (dpe == nil) != (other == nil) { + return false + } + if dpe == nil { + return true + } + if !cmp.Equal(dpe.net, other.net) { + return false + } + for i, st := range dpe.srcTypeArr { + if !cmp.Equal(st, other.srcTypeArr[i]) { + return false + } + } + return true +} + +func (sp *sourcePrefixes) Equal(other *sourcePrefixes) bool { + if (sp == nil) != (other == nil) { + return false + } + if sp == nil { + return true + } + return cmp.Equal(sp.srcPrefixMap, other.srcPrefixMap) +} + +func (spe *sourcePrefixEntry) Equal(other *sourcePrefixEntry) bool { + if (spe == nil) != (other == nil) { + return false + } + if spe == nil { + return true + } + switch { + case !cmp.Equal(spe.net, other.net): + return false + case !cmp.Equal(spe.srcPortMap, other.srcPortMap): + return false + } + return true +} + +// The String() methods defined below help with debugging test failures as the +// regular %v or %+v formatting directives do not expands pointer fields inside +// structs, and these types have a lot of pointers pointing to other structs. +func (fci *FilterChainManager) String() string { + if fci == nil { + return "" + } + + var sb strings.Builder + if fci.dstPrefixMap != nil { + sb.WriteString("destination_prefix_map: map {\n") + for k, v := range fci.dstPrefixMap { + sb.WriteString(fmt.Sprintf("%q: %v\n", k, v)) + } + sb.WriteString("}\n") + } + if fci.dstPrefixes != nil { + sb.WriteString("destination_prefixes: [") + for _, p := range fci.dstPrefixes { + sb.WriteString(fmt.Sprintf("%v ", p)) + } + sb.WriteString("]") + } + if fci.def != nil { + sb.WriteString(fmt.Sprintf("default_filter_chain: %+v ", fci.def)) + } + sb.WriteString(fmt.Sprintf("filter_chain_count: %d ", fci.fcCnt)) + return sb.String() +} + +func (dpe *destPrefixEntry) String() string { + if dpe == nil { + return "" + } + var sb strings.Builder + if dpe.net != nil { + sb.WriteString(fmt.Sprintf("destination_prefix: %s ", dpe.net.String())) + } + sb.WriteString("source_types_array: [") + for _, st := range dpe.srcTypeArr { + sb.WriteString(fmt.Sprintf("%v ", st)) + } + sb.WriteString("]") + return sb.String() +} + +func (sp *sourcePrefixes) String() string { + if sp == nil { + return "" + } + var sb strings.Builder + if sp.srcPrefixMap != nil { + sb.WriteString("source_prefix_map: map {") + for k, v := range sp.srcPrefixMap { + sb.WriteString(fmt.Sprintf("%q: %v ", k, v)) + } + sb.WriteString("}") + } + if sp.srcPrefixes != nil { + sb.WriteString("source_prefixes: [") + for _, p := range sp.srcPrefixes { + sb.WriteString(fmt.Sprintf("%v ", p)) + } + sb.WriteString("]") + } + return sb.String() +} + +func (spe *sourcePrefixEntry) String() string { + if spe == nil { + return "" + } + var sb strings.Builder + if spe.net != nil { + sb.WriteString(fmt.Sprintf("source_prefix: %s ", spe.net.String())) + } + if spe.srcPortMap != nil { + sb.WriteString("source_ports_map: map {") + for k, v := range spe.srcPortMap { + sb.WriteString(fmt.Sprintf("%d: %+v ", k, v)) + } + sb.WriteString("}") + } + return sb.String() +} + +func (f *FilterChain) String() string { + if f == nil || f.SecurityCfg == nil { + return "" + } + return fmt.Sprintf("security_config: %v", f.SecurityCfg) +} + +func ipNetFromCIDR(cidr string) *net.IPNet { + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + panic(err) + } + return ipnet +} + +func transportSocketWithInstanceName(name string) *v3corepb.TransportSocket { + return &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{InstanceName: name}, + }, + }), + }, + } +} + +func cidrRangeFromAddressAndPrefixLen(address string, len int) *v3corepb.CidrRange { + return &v3corepb.CidrRange{ + AddressPrefix: address, + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(len), + }, + } +} diff --git a/xds/internal/client/lds_test.go b/xds/internal/client/lds_test.go index da0e0f956156..42421f66b2d1 100644 --- a/xds/internal/client/lds_test.go +++ b/xds/internal/client/lds_test.go @@ -20,7 +20,6 @@ package client import ( "fmt" - "net" "strings" "testing" "time" @@ -782,181 +781,132 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { const v3LDSTarget = "grpc/server?xds.resource.listening_address=0.0.0.0:9999" var ( - listenerEmptyTransportSocket = &anypb.Any{ - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", + listenerEmptyTransportSocket = marshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "0.0.0.0", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 9999, }, }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } - listenerNoValidationContext = &anypb.Any{ - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + }, + }, + }) + listenerNoValidationContext = marshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "0.0.0.0", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 9999, }, }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", }, }, - }, + }), }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{ - Name: "default-filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "defaultIdentityPluginInstance", - CertificateName: "defaultIdentityCertName", - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", }, }, - }, + }), }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } - listenerWithValidationContext = &anypb.Any{ - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, + }, + }, + }) + listenerWithValidationContext = marshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "0.0.0.0", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 9999, }, }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "rootPluginInstance", - CertificateName: "rootCertName", - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, }, }, - }, + }), }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{ - Name: "default-filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "defaultIdentityPluginInstance", - CertificateName: "defaultIdentityCertName", - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "defaultRootPluginInstance", - CertificateName: "defaultRootCertName", - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultRootPluginInstance", + CertificateName: "defaultRootCertName", + }, }, }, - }, + }), }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } + }, + }, + }) ) const testVersion = "test-version-lds-server" @@ -1073,6 +1023,92 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, wantErr: "no socket_address field in LDS response", }, + { + name: "no filter chains and no default filter chain", + resources: []*anypb.Any{ + { + TypeUrl: version.V3ListenerURL, + Value: func() []byte { + lis := &v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "0.0.0.0", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 9999, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{DestinationPort: &wrapperspb.UInt32Value{Value: 666}}, + }, + }, + } + mLis, _ := proto.Marshal(lis) + return mLis + }(), + }, + }, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: UpdateMetadata{ + Status: ServiceStatusNACKed, + Version: testVersion, + ErrState: &UpdateErrorMetadata{ + Version: testVersion, + Err: errPlaceHolder, + }, + }, + wantErr: "no supported filter chains and no default filter chain", + }, + { + name: "overlapping filter chain match criteria", + resources: []*anypb.Any{ + { + TypeUrl: version.V3ListenerURL, + Value: func() []byte { + lis := &v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "0.0.0.0", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 9999, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3, 4, 5}}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{5, 6, 7}}, + }, + }, + } + mLis, _ := proto.Marshal(lis) + return mLis + }(), + }, + }, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: UpdateMetadata{ + Status: ServiceStatusNACKed, + Version: testVersion, + ErrState: &UpdateErrorMetadata{ + Version: testVersion, + Err: errPlaceHolder, + }, + }, + wantErr: "multiple filter chains with overlapping matching rules are defined", + }, { name: "unexpected transport socket name", resources: []*anypb.Any{ @@ -1333,9 +1369,28 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { wantUpdate: map[string]ListenerUpdate{ v3LDSTarget: { InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: []*FilterChain{{Match: &FilterChainMatch{}}}, + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + }, + fcCnt: 1, + }, }, Raw: listenerEmptyTransportSocket, }, @@ -1469,21 +1524,36 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { InboundListenerCfg: &InboundListenerConfig{ Address: "0.0.0.0", Port: "9999", - FilterChains: []*FilterChain{ - { - Match: &FilterChainMatch{}, - SecurityCfg: &SecurityConfig{ - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + }, + }, + }, + }, + }, + }, + }, }, }, - }, - DefaultFilterChain: &FilterChain{ - Match: &FilterChainMatch{}, - SecurityCfg: &SecurityConfig{ - IdentityInstanceName: "defaultIdentityPluginInstance", - IdentityCertName: "defaultIdentityCertName", + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + }, }, + fcCnt: 1, }, }, Raw: listenerNoValidationContext, @@ -1502,27 +1572,42 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { InboundListenerCfg: &InboundListenerConfig{ Address: "0.0.0.0", Port: "9999", - FilterChains: []*FilterChain{ - { - Match: &FilterChainMatch{}, + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + RootInstanceName: "rootPluginInstance", + RootCertName: "rootCertName", + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + RequireClientCert: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ SecurityCfg: &SecurityConfig{ - RootInstanceName: "rootPluginInstance", - RootCertName: "rootCertName", - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", + RootInstanceName: "defaultRootPluginInstance", + RootCertName: "defaultRootCertName", + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", RequireClientCert: true, }, }, - }, - DefaultFilterChain: &FilterChain{ - Match: &FilterChainMatch{}, - SecurityCfg: &SecurityConfig{ - RootInstanceName: "defaultRootPluginInstance", - RootCertName: "defaultRootCertName", - IdentityInstanceName: "defaultIdentityPluginInstance", - IdentityCertName: "defaultIdentityCertName", - RequireClientCert: true, - }, + fcCnt: 1, }, }, Raw: listenerWithValidationContext, @@ -1554,389 +1639,6 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { } } -func (s) TestGetFilterChain(t *testing.T) { - tests := []struct { - desc string - inputFilterChain *v3listenerpb.FilterChain - wantFilterChain *FilterChain - wantErr bool - }{ - { - desc: "empty", - inputFilterChain: nil, - }, - { - desc: "unsupported destination port", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - DestinationPort: &wrapperspb.UInt32Value{ - Value: 666, - }, - }, - }, - }, - { - desc: "unsupported server names", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - ServerNames: []string{"example-server"}, - }, - }, - }, - { - desc: "unsupported transport protocol", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - TransportProtocol: "tls", - }, - }, - }, - { - desc: "unsupported application protocol", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - ApplicationProtocols: []string{"h2"}, - }, - }, - }, - { - desc: "bad dest address prefix", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "a.b.c.d", - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "bad dest prefix length", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 50, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "dest prefix ranges", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 8, - }, - }, - { - AddressPrefix: "192.168.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 24, - }, - }, - }, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - DestPrefixRanges: []net.IP{ - net.IPv4(10, 1, 1, 0), - net.IPv4(192, 168, 1, 0), - }, - }, - }, - }, - { - desc: "source type local", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - SourceType: SourceTypeSameOrLoopback, - }, - }, - }, - { - desc: "source type external", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - SourceType: SourceTypeExternal, - }, - }, - }, - { - desc: "source type any", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourceType: v3listenerpb.FilterChainMatch_ANY, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - SourceType: SourceTypeAny, - }, - }, - }, - { - desc: "bad source address prefix", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "a.b.c.d", - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "bad source prefix length", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 50, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "source prefix ranges", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 8, - }, - }, - { - AddressPrefix: "192.168.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 24, - }, - }, - }, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - SourcePrefixRanges: []net.IP{ - net.IPv4(10, 1, 1, 0), - net.IPv4(192, 168, 1, 0), - }, - }, - }, - }, - { - desc: "empty transport socket", - inputFilterChain: &v3listenerpb.FilterChain{}, - wantFilterChain: &FilterChain{Match: &FilterChainMatch{}}, - }, - { - desc: "bad transport socket name", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "unsupported-transport-socket-name", - }, - }, - wantErr: true, - }, - { - desc: "unexpected url in transport socket", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.UpstreamTlsContext{}), - }, - }, - }, - wantErr: true, - }, - { - desc: "badly marshaled downstream tls context", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "missing common tls context", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{}), - }, - }, - }, - wantErr: true, - }, - { - desc: "unsupported validation context", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ - ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ - Name: "foo-sds-secret", - }, - }, - }, - }), - }, - }, - }, - wantErr: true, - }, - { - desc: "no identity and root certificate providers", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - }, - }), - }, - }, - }, - wantErr: true, - }, - { - desc: "no identity certificate provider with require_client_cert", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{}, - }), - }, - }, - }, - wantErr: true, - }, - { - desc: "happy case", - inputFilterChain: &v3listenerpb.FilterChain{ - Name: "filter-chain-1", - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 8, - }, - }, - }, - SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 8, - }, - }, - }, - SourcePorts: []uint32{80, 8080}, - }, - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "rootPluginInstance", - CertificateName: "rootCertName", - }, - }, - }, - }), - }, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - DestPrefixRanges: []net.IP{net.IPv4(10, 1, 1, 0)}, - SourceType: SourceTypeExternal, - SourcePrefixRanges: []net.IP{net.IPv4(10, 1, 1, 0)}, - SourcePorts: []uint32{80, 8080}, - }, - SecurityCfg: &SecurityConfig{ - RootInstanceName: "rootPluginInstance", - RootCertName: "rootCertName", - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", - RequireClientCert: true, - }, - }, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - gotFilterChain, gotErr := getFilterChain(test.inputFilterChain) - if (gotErr != nil) != test.wantErr { - t.Fatalf("getFilterChain(%+v) returned error: %v, wantErr: %v", test.inputFilterChain, gotErr, test.wantErr) - } - if diff := cmp.Diff(test.wantFilterChain, gotFilterChain); diff != "" { - t.Errorf("getFilterChain(%+v) returned unexpected, diff (-want +got):\n%s", test.inputFilterChain, diff) - } - }) - } -} - type filterConfig struct { httpfilter.FilterConfig Cfg proto.Message diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index fc1112e180bc..9b957399983c 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -270,120 +270,14 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err }, } - var filterChains []*FilterChain - for _, fc := range lis.GetFilterChains() { - filterChain, err := getFilterChain(fc) - if err != nil { - return nil, err - } - filterChains = append(filterChains, filterChain) - } - defaultFilterChain, err := getFilterChain(lis.GetDefaultFilterChain()) + fcMgr, err := NewFilterChainManager(lis) if err != nil { return nil, err } - if len(filterChains) == 0 && defaultFilterChain == nil { - return nil, fmt.Errorf("xds: no supported filter chains and no default filter chain") - } - lu.InboundListenerCfg.FilterChains = filterChains - lu.InboundListenerCfg.DefaultFilterChain = defaultFilterChain + lu.InboundListenerCfg.FilterChains = fcMgr return lu, nil } -// getFilterChain parses the filter chain proto and converts it into the local -// representation. If fc contains unsupported filter chain match fields, a nil -// FilterChain object and a nil error are returned. If fc does not parse or -// contains other invalid data, an non-nil error is returned. -func getFilterChain(fc *v3listenerpb.FilterChain) (*FilterChain, error) { - if fc == nil { - return nil, nil - } - - // If the match criteria contains unsupported fields, skip the filter chain. - fcm := fc.GetFilterChainMatch() - if fcm.GetDestinationPort().GetValue() != 0 || - fcm.GetServerNames() != nil || - (fcm.GetTransportProtocol() != "" && fcm.TransportProtocol != "raw_buffer") || - fcm.GetApplicationProtocols() != nil { - return nil, nil - } - - // Extract the supported match criteria. - var dstPrefixRanges []net.IP - for _, pr := range fcm.GetPrefixRanges() { - cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) - ip, _, err := net.ParseCIDR(cidr) - if err != nil { - return nil, fmt.Errorf("xds: failed to parse destination prefix range: %+v", pr) - } - dstPrefixRanges = append(dstPrefixRanges, ip) - } - var srcType SourceType - switch fcm.GetSourceType() { - case v3listenerpb.FilterChainMatch_ANY: - srcType = SourceTypeAny - case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: - srcType = SourceTypeSameOrLoopback - case v3listenerpb.FilterChainMatch_EXTERNAL: - srcType = SourceTypeExternal - default: - return nil, fmt.Errorf("xds: unsupported source type: %v", fcm.GetSourceType()) - } - var srcPrefixRanges []net.IP - for _, pr := range fcm.GetSourcePrefixRanges() { - cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) - ip, _, err := net.ParseCIDR(cidr) - if err != nil { - return nil, fmt.Errorf("xds: failed to parse source prefix range: %+v", pr) - } - srcPrefixRanges = append(srcPrefixRanges, ip) - } - filterChain := &FilterChain{ - Match: &FilterChainMatch{ - DestPrefixRanges: dstPrefixRanges, - SourceType: srcType, - SourcePrefixRanges: srcPrefixRanges, - SourcePorts: fcm.GetSourcePorts(), - }, - } - - // If the transport_socket field is not specified, it means that the control - // plane has not sent us any security config. This is fine and the server - // will use the fallback credentials configured as part of the - // xdsCredentials. - ts := fc.GetTransportSocket() - if ts == nil { - return filterChain, nil - } - if name := ts.GetName(); name != transportSocketName { - return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) - } - any := ts.GetTypedConfig() - if any == nil || any.TypeUrl != version.V3DownstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) - } - downstreamCtx := &v3tlspb.DownstreamTlsContext{} - if err := proto.Unmarshal(any.GetValue(), downstreamCtx); err != nil { - return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) - } - if downstreamCtx.GetCommonTlsContext() == nil { - return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") - } - sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext()) - if err != nil { - return nil, err - } - if sc.IdentityInstanceName == "" { - return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") - } - sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() - if sc.RequireClientCert && sc.RootInstanceName == "" { - return nil, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") - } - filterChain.SecurityCfg = sc - return filterChain, nil -} - // UnmarshalRouteConfig processes resources received in an RDS response, // validates them, and transforms them into a native struct which contains only // fields we are interested in. The provided hostname determines the route diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index 359674417dcf..a92a9ddb038f 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -42,8 +42,10 @@ import ( type connWrapper struct { net.Conn + // The specific filter chain picked for handling this connection. + filterChain *xdsclient.FilterChain + // A reference fo the listenerWrapper on which this connection was accepted. - // Used to access the filter chains during the server-side handshake. parent *listenerWrapper // The certificate providers created for this connection. @@ -90,12 +92,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { return nil, errors.New("user has not configured xDS credentials") } - fc := c.getMatchingFilterChain() - if fc == nil { - return nil, errors.New("no matching filter chain for incoming connection") - } - - if fc.SecurityCfg == nil { + if c.filterChain.SecurityCfg == nil { // If the security config is empty, this means that the control plane // did not provide any security configuration and therefore we should // return an empty HandshakeInfo here so that the xdsCreds can use the @@ -106,13 +103,14 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs // Identity provider name is mandatory on the server-side, and this is // enforced when the resource is received at the xdsClient layer. - ip, err := buildProviderFunc(cpc, fc.SecurityCfg.IdentityInstanceName, fc.SecurityCfg.IdentityCertName, true, false) + secCfg := c.filterChain.SecurityCfg + ip, err := buildProviderFunc(cpc, secCfg.IdentityInstanceName, secCfg.IdentityCertName, true, false) if err != nil { return nil, err } // Root provider name is optional and required only when doing mTLS. var rp certprovider.Provider - if instance, cert := fc.SecurityCfg.RootInstanceName, fc.SecurityCfg.RootCertName; instance != "" { + if instance, cert := secCfg.RootInstanceName, secCfg.RootCertName; instance != "" { rp, err = buildProviderFunc(cpc, instance, cert, false, true) if err != nil { return nil, err @@ -122,28 +120,10 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { c.rootProvider = rp xdsHI := xdsinternal.NewHandshakeInfo(c.identityProvider, c.rootProvider) - xdsHI.SetRequireClientCert(fc.SecurityCfg.RequireClientCert) + xdsHI.SetRequireClientCert(secCfg.RequireClientCert) return xdsHI, nil } -// The logic specified in the documentation around the xDS FilterChainMatch -// proto mentions 8 criteria to match on. gRPC does not support 4 of those, and -// hence we got rid of them at the time of parsing the received Listener -// resource. Here we use the remaining 4 criteria to find a matching filter -// chain: Destination IP address, Source type, Source IP address, Source port. -func (c *connWrapper) getMatchingFilterChain() *xdsclient.FilterChain { - c.parent.mu.RLock() - defer c.parent.mu.RUnlock() - - // TODO: Do the filter chain match here and return the best match. - // For now, we simply return the first filter_chain in the list or the - // default one. - if len(c.parent.filterChains) == 0 { - return c.parent.defaultFilterChain - } - return c.parent.filterChains[0] -} - func (c *connWrapper) Close() error { if c.identityProvider != nil { c.identityProvider.Close() diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 078b2112581d..c65736540920 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -24,18 +24,32 @@ import ( "fmt" "net" "sync" + "time" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/grpclog" + internalbackoff "google.golang.org/grpc/internal/backoff" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/client/bootstrap" ) -var logger = grpclog.Component("xds") +var ( + logger = grpclog.Component("xds") + + // Backoff strategy for temporary errors received from Accept(). If this + // needs to be configurable, we can inject it through ListenerWrapperParams. + bs = internalbackoff.Exponential{Config: backoff.Config{ + BaseDelay: 5 * time.Millisecond, + Multiplier: 2.0, + MaxDelay: 1 * time.Second, + }} + backoffFunc = bs.Backoff +) func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p]", p)) + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", p)) } // XDSClientInterface wraps the methods on the xdsClient which are required by @@ -61,18 +75,26 @@ type ListenerWrapperParams struct { // NewListenerWrapper creates a new listenerWrapper with params. It returns a // net.Listener and a channel which is written to, indicating that the former is // ready to be passed to grpc.Serve(). +// +// Only TCP listeners are supported. func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan struct{}) { lw := &listenerWrapper{ - Listener: params.Listener, - name: params.ListenerResourceName, - xdsCredsInUse: params.XDSCredsInUse, - xdsC: params.XDSClient, + Listener: params.Listener, + name: params.ListenerResourceName, + xdsCredsInUse: params.XDSCredsInUse, + xdsC: params.XDSClient, + isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), closed: grpcsync.NewEvent(), goodUpdate: grpcsync.NewEvent(), } lw.logger = prefixLogger(lw) + // Serve() verifies that Addr() returns a valid TCPAddr. So, it is safe to + // ignore the error from SplitHostPort(). + lisAddr := lw.Listener.Addr().String() + lw.addr, lw.port, _ = net.SplitHostPort(lisAddr) + cancelWatch := lw.xdsC.WatchListener(lw.name, lw.handleListenerUpdate) lw.logger.Infof("Watch started on resource name %v", lw.name) lw.cancelWatch = func() { @@ -96,6 +118,13 @@ type listenerWrapper struct { xdsC XDSClientInterface cancelWatch func() + // Set to true if the listener is bound to the IP_ANY address (which is + // "0.0.0.0" for IPv4 and "::" for IPv6). + isUnspecifiedAddr bool + // Listening address and port. Used to validate the socket address in the + // Listener resource received from the control plane. + addr, port string + // This is used to notify that a good update has been received and that // Serve() can be invoked on the underlying gRPC server. Using an event // instead of a vanilla channel simplifies the update handler as it need not @@ -113,20 +142,80 @@ type listenerWrapper struct { // using an rw lock here is that this field will be read by all connections // during their server-side handshake (in the hot path), but writes to this // happen rarely (when we get a Listener resource update). - mu sync.RWMutex - filterChains []*xdsclient.FilterChain - defaultFilterChain *xdsclient.FilterChain + mu sync.RWMutex + filterChains *xdsclient.FilterChainManager } // Accept blocks on an Accept() on the underlying listener, and wraps the // returned net.connWrapper with the configured certificate providers. func (l *listenerWrapper) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return nil, err + var retries int + for { + conn, err := l.Listener.Accept() + if err != nil { + // Temporary() method is implemented by certain error types returned + // from the net package, and it is useful for us to not shutdown the + // server in these conditions. The listen queue being full is one + // such case. + if ne, ok := err.(interface{ Temporary() bool }); !ok || !ne.Temporary() { + return nil, err + } + retries++ + timer := time.NewTimer(backoffFunc(retries)) + select { + case <-timer.C: + case <-l.closed.Done(): + timer.Stop() + // Continuing here will cause us to call Accept() again + // which will return a non-temporary error. + continue + } + continue + } + // Reset retries after a successful Accept(). + retries = 0 + + // TODO: Close connections if in "non-serving" state + + // Since the net.Conn represents an incoming connection, the source and + // destination address can be retrieved from the local address and + // remote address of the net.Conn respectively. + destAddr, ok1 := conn.LocalAddr().(*net.TCPAddr) + srcAddr, ok2 := conn.RemoteAddr().(*net.TCPAddr) + if !ok1 || !ok2 { + // If the incoming connection is not a TCP connection, which is + // really unexpected since we check whether the provided listener is + // a TCP listener in Serve(), we return an error which would cause + // us to stop serving. + return nil, fmt.Errorf("received connection with non-TCP address (local: %T, remote %T)", conn.LocalAddr(), conn.RemoteAddr()) + } + + l.mu.RLock() + fc, err := l.filterChains.Lookup(xdsclient.FilterChainLookupParams{ + IsUnspecifiedListener: l.isUnspecifiedAddr, + DestAddr: destAddr.IP, + SourceAddr: srcAddr.IP, + SourcePort: srcAddr.Port, + }) + l.mu.RUnlock() + if err != nil { + // When a matching filter chain is not found, we close the + // connection right away, but do not return an error back to + // `grpc.Serve()` from where this Accept() was invoked. Returning an + // error to `grpc.Serve()` causes the server to shutdown. If we want + // to avoid the server from shutting down, we would need to return + // an error type which implements the `Temporary() bool` method, + // which is invoked by `grpc.Serve()` to see if the returned error + // represents a temporary condition. In the case of a temporary + // error, `grpc.Serve()` method sleeps for a small duration and + // therefore ends up blocking all connection attempts during that + // time frame, which is also not ideal for an error like this. + l.logger.Warningf("connection from %s to %s failed to find any matching filter chain", conn.RemoteAddr().String(), conn.LocalAddr().String()) + conn.Close() + continue + } + return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil } - // TODO: Close connections if in "non-serving" state. - return &connWrapper{Conn: c, parent: l}, nil } // Close closes the underlying listener. It also cancels the xDS watch @@ -168,32 +257,18 @@ func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, // - this is a very context-dependent check and only the server has the // appropriate context to perform this check. // - // What this means is that the xdsClient has ACKed a resource which is going - // to push the server into a "not serving" state. This is not ideal, but - // this is what we have decided to do. See gRPC A36 for more details. - // TODO(easwars): Switch to "not serving" if the host:port does not match. - lisAddr := l.Listener.Addr().String() - addr, port, err := net.SplitHostPort(lisAddr) - if err != nil { - // This is never expected to return a non-nil error since we have made - // sure that the listener is a TCP listener at the beginning of Serve(). - // This is simply paranoia. - l.logger.Warningf("Local listener address %q failed to parse as IP:port: %v", lisAddr, err) - return - } + // What this means is that the xdsClient has ACKed a resource which can push + // the server into a "not serving" state. This is not ideal, but this is + // what we have decided to do. See gRPC A36 for more details. ilc := update.InboundListenerCfg - if ilc == nil { - l.logger.Warningf("Missing host:port in Listener updates") - return - } - if ilc.Address != addr || ilc.Port != port { - l.logger.Warningf("Received host:port (%s:%d) in Listener update does not match local listening address: %s", ilc.Address, ilc.Port, lisAddr) + if ilc.Address != l.addr || ilc.Port != l.port { + // TODO: Switch to "not serving" if the host:port does not match. + l.logger.Warningf("Received host:port (%s:%d) in Listener update does not match local listening address: (%s:%s", ilc.Address, ilc.Port, l.addr, l.port) return } l.mu.Lock() l.filterChains = ilc.FilterChains - l.defaultFilterChain = ilc.DefaultFilterChain l.mu.Unlock() l.goodUpdate.Fire() // TODO: Move to serving state on receipt of a good response. diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go new file mode 100644 index 000000000000..220be0e08ae7 --- /dev/null +++ b/xds/internal/server/listener_wrapper_test.go @@ -0,0 +1,335 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "context" + "errors" + "fmt" + "net" + "strconv" + "testing" + "time" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + anypb "github.com/golang/protobuf/ptypes/any" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/testutils/fakeclient" +) + +const ( + fakeListenerHost = "0.0.0.0" + fakeListenerPort = 50051 + testListenerResourceName = "lds.target.1.2.3.4:1111" + defaultTestTimeout = 1 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + +var listenerWithFilterChains = &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourcePorts: []uint32{80}, + }, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type tempError struct{} + +func (tempError) Error() string { + return "listenerWrapper test temporary error" +} + +func (tempError) Temporary() bool { + return true +} + +// connAndErr wraps a net.Conn and an error. +type connAndErr struct { + conn net.Conn + err error +} + +// fakeListener allows the user to inject conns returned by Accept(). +type fakeListener struct { + acceptCh chan connAndErr + closeCh *testutils.Channel +} + +func (fl *fakeListener) Accept() (net.Conn, error) { + cne := <-fl.acceptCh + return cne.conn, cne.err +} + +func (fl *fakeListener) Close() error { + fl.closeCh.Send(nil) + return nil +} + +func (fl *fakeListener) Addr() net.Addr { + return &net.TCPAddr{ + IP: net.IPv4(0, 0, 0, 0), + Port: fakeListenerPort, + } +} + +// fakeConn overrides LocalAddr, RemoteAddr and Close methods. +type fakeConn struct { + net.Conn + local, remote net.Addr + closeCh *testutils.Channel +} + +func (fc *fakeConn) LocalAddr() net.Addr { + return fc.local +} + +func (fc *fakeConn) RemoteAddr() net.Addr { + return fc.remote +} + +func (fc *fakeConn) Close() error { + fc.closeCh.Send(nil) + return nil +} + +func newListenerWrapper(t *testing.T) (*listenerWrapper, <-chan struct{}, *fakeclient.Client, *fakeListener, func()) { + t.Helper() + + // Create a listener wrapper with a fake listener and fake xdsClient and + // verify that it extracts the host and port from the passed in listener. + lis := &fakeListener{ + acceptCh: make(chan connAndErr, 1), + closeCh: testutils.NewChannel(), + } + xdsC := fakeclient.NewClient() + lParams := ListenerWrapperParams{ + Listener: lis, + ListenerResourceName: testListenerResourceName, + XDSClient: xdsC, + } + l, readyCh := NewListenerWrapper(lParams) + if l == nil { + t.Fatalf("NewListenerWrapper(%+v) returned nil", lParams) + } + lw, ok := l.(*listenerWrapper) + if !ok { + t.Fatalf("NewListenerWrapper(%+v) returned listener of type %T want *listenerWrapper", lParams, l) + } + if lw.addr != fakeListenerHost || lw.port != strconv.Itoa(fakeListenerPort) { + t.Fatalf("listenerWrapper has host:port %s:%s, want %s:%d", lw.addr, lw.port, fakeListenerHost, fakeListenerPort) + } + return lw, readyCh, xdsC, lis, func() { l.Close() } +} + +func (s) TestNewListenerWrapper(t *testing.T) { + _, readyCh, xdsC, _, cleanup := newListenerWrapper(t) + defer cleanup() + + // Verify that the listener wrapper registers a listener watch for the + // expected Listener resource name. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + name, err := xdsC.WaitForWatchListener(ctx) + if err != nil { + t.Fatalf("error when waiting for a watch on a Listener resource: %v", err) + } + if name != testListenerResourceName { + t.Fatalf("listenerWrapper registered a watch on %s, want %s", name, testListenerResourceName) + } + + // Push an error to the listener update handler. + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, errors.New("bad listener update")) + timer := time.NewTimer(defaultTestShortTimeout) + select { + case <-timer.C: + timer.Stop() + case <-readyCh: + t.Fatalf("ready channel written to after receipt of a bad Listener update") + } + + // Push an update whose address does not match the address to which our + // listener is bound, and verify that the ready channel is not written to. + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + InboundListenerCfg: &xdsclient.InboundListenerConfig{ + Address: "10.0.0.1", + Port: "50051", + }}, nil) + timer = time.NewTimer(defaultTestShortTimeout) + select { + case <-timer.C: + timer.Stop() + case <-readyCh: + t.Fatalf("ready channel written to after receipt of a bad Listener update") + } + + // Push a good update, and verify that the ready channel is written to. + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + InboundListenerCfg: &xdsclient.InboundListenerConfig{ + Address: fakeListenerHost, + Port: strconv.Itoa(fakeListenerPort), + }}, nil) + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for the ready channel to be written to after receipt of a good Listener update") + case <-readyCh: + } +} + +func (s) TestListenerWrapper_Accept(t *testing.T) { + boCh := testutils.NewChannel() + origBackoffFunc := backoffFunc + backoffFunc = func(v int) time.Duration { + boCh.Send(v) + return 0 + } + defer func() { backoffFunc = origBackoffFunc }() + + lw, readyCh, xdsC, lis, cleanup := newListenerWrapper(t) + defer cleanup() + + // Push a good update with a filter chain which accepts local connections on + // 192.168.0.0/16 subnet and port 80. + fcm, err := xdsclient.NewFilterChainManager(listenerWithFilterChains) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + InboundListenerCfg: &xdsclient.InboundListenerConfig{ + Address: fakeListenerHost, + Port: strconv.Itoa(fakeListenerPort), + FilterChains: fcm, + }}, nil) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for the ready channel to be written to after receipt of a good Listener update") + case <-readyCh: + } + + // Push a non-temporary error into Accept(). + nonTempErr := errors.New("a non-temporary error") + lis.acceptCh <- connAndErr{err: nonTempErr} + if _, err := lw.Accept(); err != nonTempErr { + t.Fatalf("listenerWrapper.Accept() returned error: %v, want: %v", err, nonTempErr) + } + + // Invoke Accept() in a goroutine since we expect it to swallow: + // 1. temporary errors returned from the underlying listener + // 2. errors related to finding a matching filter chain for the incoming + // connection. + errCh := testutils.NewChannel() + go func() { + conn, err := lw.Accept() + if err != nil { + errCh.Send(err) + return + } + if _, ok := conn.(*connWrapper); !ok { + errCh.Send(errors.New("listenerWrapper.Accept() returned a Conn of type %T, want *connWrapper")) + return + } + errCh.Send(nil) + }() + + // Push a temporary error into Accept() and verify that it backs off. + lis.acceptCh <- connAndErr{err: tempError{}} + if _, err := boCh.Receive(ctx); err != nil { + t.Fatalf("error when waiting for Accept() to backoff on temporary errors: %v", err) + } + + // Push a fakeConn which does not match any filter chains configured on the + // received Listener resource. Verify that the conn is closed. + fc := &fakeConn{ + local: &net.TCPAddr{IP: net.IPv4(192, 168, 1, 2), Port: 79}, + remote: &net.TCPAddr{IP: net.IPv4(10, 1, 1, 1), Port: 80}, + closeCh: testutils.NewChannel(), + } + lis.acceptCh <- connAndErr{conn: fc} + if _, err := fc.closeCh.Receive(ctx); err != nil { + t.Fatalf("error when waiting for conn to be closed on no filter chain match: %v", err) + } + + // Push a fakeConn which matches the filter chains configured on the + // received Listener resource. Verify that Accept() returns. + fc = &fakeConn{ + local: &net.TCPAddr{IP: net.IPv4(192, 168, 1, 2)}, + remote: &net.TCPAddr{IP: net.IPv4(192, 168, 1, 2), Port: 80}, + closeCh: testutils.NewChannel(), + } + lis.acceptCh <- connAndErr{conn: fc} + if _, err := errCh.Receive(ctx); err != nil { + t.Fatalf("error when waiting for Accept() to return the conn on filter chain match: %v", err) + } +} + +func marshalAny(m proto.Message) *anypb.Any { + a, err := ptypes.MarshalAny(m) + if err != nil { + panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", m, err)) + } + return a +} diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index f18d4e356aa6..7606a35218ca 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -252,6 +252,25 @@ func listenerResourceWithSecurityConfig(t *testing.T, lis net.Listener) *v3liste FilterChains: []*v3listenerpb.FilterChain{ { Name: "filter-chain-1", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ diff --git a/xds/server_test.go b/xds/server_test.go index f787a129057e..2a6677a3ccbb 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -28,6 +28,12 @@ import ( "testing" "time" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + anypb "github.com/golang/protobuf/ptypes/any" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" @@ -626,21 +632,35 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { // Push a good LDS response with security config, and wait for Serve() to be // invoked on the underlying grpc.Server. Also make sure that certificate // providers are not created. + fcm, err := xdsclient.NewFilterChainManager(&v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, + }) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } addr, port := splitHostPort(lis.Addr().String()) client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ RouteConfigName: "routeconfig", InboundListenerCfg: &xdsclient.InboundListenerConfig{ - Address: addr, - Port: port, - FilterChains: []*xdsclient.FilterChain{ - { - SecurityCfg: &xdsclient.SecurityConfig{ - RootInstanceName: "default1", - IdentityInstanceName: "default2", - RequireClientCert: true, - }, - }, - }, + Address: addr, + Port: port, + FilterChains: fcm, }, }, nil) if _, err := fs.serveCh.Receive(ctx); err != nil { @@ -703,20 +723,7 @@ func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) { // Push an error to the registered listener watch callback and make sure // that Serve does not return. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ - FilterChains: []*xdsclient.FilterChain{ - { - SecurityCfg: &xdsclient.SecurityConfig{ - RootInstanceName: "default1", - IdentityInstanceName: "default2", - RequireClientCert: true, - }, - }, - }, - }, - }, errors.New("LDS error")) + client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, errors.New("LDS error")) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { @@ -742,3 +749,11 @@ func verifyCertProviderNotCreated() error { } return nil } + +func marshalAny(m proto.Message) *anypb.Any { + a, err := ptypes.MarshalAny(m) + if err != nil { + panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", m, err)) + } + return a +} From db816235452978bb98c6d18ac03ce643e9ab13fc Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 1 Apr 2021 14:41:47 -0400 Subject: [PATCH 004/998] xds: Add fields to cluster update (#4277) * Added support for more fields in CDS response --- xds/internal/client/cds_test.go | 58 ++++++++++++++++++++++++------- xds/internal/client/client.go | 21 ++++++++++++ xds/internal/client/xds.go | 61 +++++++++++++++++++++++---------- 3 files changed, 110 insertions(+), 30 deletions(-) diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index 104260759b95..3fb889db9486 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -53,7 +53,7 @@ func (s) TestValidateCluster_Failure(t *testing.T) { wantErr bool }{ { - name: "non-eds-cluster-type", + name: "non-supported-cluster-type-static", cluster: &v3clusterpb.Cluster{ ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ @@ -68,6 +68,22 @@ func (s) TestValidateCluster_Failure(t *testing.T) { wantUpdate: emptyUpdate, wantErr: true, }, + { + name: "non-supported-cluster-type-original-dst", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_ORIGINAL_DST}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }, + LbPolicy: v3clusterpb.Cluster_LEAST_REQUEST, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, { name: "no-eds-config", cluster: &v3clusterpb.Cluster{ @@ -107,8 +123,8 @@ func (s) TestValidateCluster_Failure(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if update, err := validateCluster(test.cluster); err == nil { - t.Errorf("validateCluster(%+v) = %v, wanted error", test.cluster, update) + if update, err := validateClusterAndConstructClusterUpdate(test.cluster); err == nil { + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) = %v, wanted error", test.cluster, update) } }) } @@ -120,6 +136,24 @@ func (s) TestValidateCluster_Success(t *testing.T) { cluster *v3clusterpb.Cluster wantUpdate ClusterUpdate }{ + { + name: "happy-case-logical-dns", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: ClusterUpdate{ServiceName: "", EnableLRS: false, ClusterType: ClusterTypeLogicalDNS}, + }, + { + name: "happy-case-aggregate-v3", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{Name: "envoy.clusters.aggregate"}, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: ClusterUpdate{ServiceName: "", EnableLRS: false, ClusterType: ClusterTypeAggregate}, + }, { name: "happy-case-no-service-name-no-lrs", cluster: &v3clusterpb.Cluster{ @@ -214,12 +248,12 @@ func (s) TestValidateCluster_Success(t *testing.T) { defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { - update, err := validateCluster(test.cluster) + update, err := validateClusterAndConstructClusterUpdate(test.cluster) if err != nil { - t.Errorf("validateCluster(%+v) failed: %v", test.cluster, err) + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) failed: %v", test.cluster, err) } if !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) { - t.Errorf("validateCluster(%+v) = %v, want: %v", test.cluster, update, test.wantUpdate) + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) = %v, want: %v", test.cluster, update, test.wantUpdate) } }) } @@ -269,12 +303,12 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { ServiceName: serviceName, EnableLRS: false, } - gotUpdate, err := validateCluster(cluster) + gotUpdate, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { - t.Errorf("validateCluster() failed: %v", err) + t.Errorf("validateClusterAndConstructClusterUpdate() failed: %v", err) } if diff := cmp.Diff(wantUpdate, gotUpdate); diff != "" { - t.Errorf("validateCluster() returned unexpected diff (-want, got):\n%s", diff) + t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, got):\n%s", diff) } } @@ -790,12 +824,12 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - update, err := validateCluster(test.cluster) + update, err := validateClusterAndConstructClusterUpdate(test.cluster) if (err != nil) != test.wantErr { - t.Errorf("validateCluster() returned err %v wantErr %v)", err, test.wantErr) + t.Errorf("validateClusterAndConstructClusterUpdate() returned err %v wantErr %v)", err, test.wantErr) } if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmp.AllowUnexported(regexp.Regexp{})); diff != "" { - t.Errorf("validateCluster() returned unexpected diff (-want, +got):\n%s", diff) + t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, +got):\n%s", diff) } }) } diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go index 37eaae79f2d5..2daceede5398 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/client/client.go @@ -355,9 +355,26 @@ type SecurityConfig struct { RequireClientCert bool } +// ClusterType is the type of cluster from a received CDS response. +type ClusterType int + +const ( + // ClusterTypeEDS represents the EDS cluster type, which will delegate endpoint + // discovery to the management server. + ClusterTypeEDS ClusterType = iota + // ClusterTypeLogicalDNS represents the Logical DNS cluster type, which essentially + // maps to the gRPC behavior of using the DNS resolver with pick_first LB policy. + ClusterTypeLogicalDNS + // ClusterTypeAggregate represents the Aggregate Cluster type, which provides a + // prioritized list of clusters to use. It is used for failover between clusters + // with a different configuration. + ClusterTypeAggregate +) + // ClusterUpdate contains information from a received CDS response, which is of // interest to the registered CDS watcher. type ClusterUpdate struct { + ClusterType ClusterType // ServiceName is the service name corresponding to the clusterName which // is being watched for through CDS. ServiceName string @@ -370,6 +387,10 @@ type ClusterUpdate struct { // Raw is the resource from the xds response. Raw *anypb.Any + + // PrioritizedClusterNames is used only for cluster type aggregate. It represents + // a prioritized list of cluster names. + PrioritizedClusterNames []string } // OverloadDropConfig contains the config to drop overloads. diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index 9b957399983c..571fff670daa 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -32,6 +32,7 @@ import ( v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" @@ -499,30 +500,52 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, cluster) - - cu, err := validateCluster(cluster) + cu, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { return cluster.GetName(), ClusterUpdate{}, err } cu.Raw = r - // If the Cluster message in the CDS response did not contain a - // serviceName, we will just use the clusterName for EDS. - if cu.ServiceName == "" { - cu.ServiceName = cluster.GetName() - } + return cluster.GetName(), cu, nil } -func validateCluster(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { +func clusterTypeFromCluster(cluster *v3clusterpb.Cluster) (ClusterType, string, []string, error) { + if cluster.GetType() == v3clusterpb.Cluster_EDS { + if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { + return 0, "", nil, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) + } + // If the Cluster message in the CDS response did not contain a + // serviceName, we will just use the clusterName for EDS. + if cluster.GetEdsClusterConfig().GetServiceName() == "" { + return ClusterTypeEDS, cluster.GetName(), nil, nil + } + return ClusterTypeEDS, cluster.GetEdsClusterConfig().GetServiceName(), nil, nil + } + + if cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS { + return ClusterTypeLogicalDNS, cluster.GetName(), nil, nil + } + + if cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate" { + // Loop through ClusterConfig here to get cluster names. + clusters := &v3aggregateclusterpb.ClusterConfig{} + if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { + return 0, "", nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return ClusterTypeAggregate, cluster.GetName(), clusters.Clusters, nil + } + return 0, "", nil, fmt.Errorf("unexpected cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) +} + +func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { emptyUpdate := ClusterUpdate{ServiceName: "", EnableLRS: false} - switch { - case cluster.GetType() != v3clusterpb.Cluster_EDS: - return emptyUpdate, fmt.Errorf("unexpected cluster type %v in response: %+v", cluster.GetType(), cluster) - case cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil: - return emptyUpdate, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) - case cluster.GetLbPolicy() != v3clusterpb.Cluster_ROUND_ROBIN: + if cluster.GetLbPolicy() != v3clusterpb.Cluster_ROUND_ROBIN { return emptyUpdate, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } + clusterType, serviceName, prioritizedClusters, err := clusterTypeFromCluster(cluster) + if err != nil { + return emptyUpdate, err + } // Process security configuration received from the control plane iff the // corresponding environment variable is set. @@ -535,10 +558,12 @@ func validateCluster(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { } return ClusterUpdate{ - ServiceName: cluster.GetEdsClusterConfig().GetServiceName(), - EnableLRS: cluster.GetLrsServer().GetSelf() != nil, - SecurityCfg: sc, - MaxRequests: circuitBreakersFromCluster(cluster), + ClusterType: clusterType, + ServiceName: serviceName, + EnableLRS: cluster.GetLrsServer().GetSelf() != nil, + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + PrioritizedClusterNames: prioritizedClusters, }, nil } From 5730f8d113ee31f14709a787572c4a3f3af5d3dd Mon Sep 17 00:00:00 2001 From: ZhenLian Date: Fri, 2 Apr 2021 11:19:22 -0700 Subject: [PATCH 005/998] Invoke Go Vet Check in Sub-modules (#4302) * Invoke Go Vet Check in Sub-modules --- test/tools/tools.go | 7 +++---- test/tools/tools_vet.go | 21 +++++++++++++++++++++ vet.sh | 24 ++++++++++++++---------- 3 files changed, 38 insertions(+), 14 deletions(-) create mode 100644 test/tools/tools_vet.go diff --git a/test/tools/tools.go b/test/tools/tools.go index 511dc2534462..3a614f765f50 100644 --- a/test/tools/tools.go +++ b/test/tools/tools.go @@ -18,10 +18,9 @@ * */ -// This package exists to cause `go mod` and `go get` to believe these tools -// are dependencies, even though they are not runtime dependencies of any grpc -// package. This means they will appear in our `go.mod` file, but will not be -// a part of the build. +// This file is not intended to be compiled. Because some of these imports are +// not actual go packages, we use a build constraint at the top of this file to +// prevent tools from inspecting the imports. package tools diff --git a/test/tools/tools_vet.go b/test/tools/tools_vet.go new file mode 100644 index 000000000000..06ab2fd10be2 --- /dev/null +++ b/test/tools/tools_vet.go @@ -0,0 +1,21 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tools is used to pin specific versions of external tools in this +// module's go.mod that gRPC uses for internal testing. +package tools diff --git a/vet.sh b/vet.sh index dcd939bb3907..1a0dbd7ee5ab 100755 --- a/vet.sh +++ b/vet.sh @@ -105,12 +105,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*. # TODO: Remove when we drop Go 1.10 support go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go -# - gofmt, goimports, golint (with exceptions for generated code), go vet. -gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | not grep -vE "\.pb\.go" -golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" -go vet -all ./... - misspell -error . # - Check that generated proto files are up to date. @@ -120,12 +114,22 @@ if [[ -z "${VET_SKIP_PROTO}" ]]; then (git status; git --no-pager diff; exit 1) fi -# - Check that our modules are tidy. -if go help mod >& /dev/null; then - find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + + go mod tidy git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) -fi + popd +done # - Collection of static analysis checks # From 8892a7b247c0aef5059175bacee30f2b055aac88 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 5 Apr 2021 13:56:00 -0700 Subject: [PATCH 006/998] [xds_interop_client_admin] xds/interop: register admin services and reflection (#4307) --- interop/xds/client/client.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/interop/xds/client/client.go b/interop/xds/client/client.go index 5b755272d3e7..b028ec79228e 100644 --- a/interop/xds/client/client.go +++ b/interop/xds/client/client.go @@ -31,9 +31,11 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/admin" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/reflection" "google.golang.org/grpc/status" _ "google.golang.org/grpc/xds" @@ -370,6 +372,12 @@ func main() { defer s.Stop() testgrpc.RegisterLoadBalancerStatsServiceServer(s, &statsService{}) testgrpc.RegisterXdsUpdateClientConfigureServiceServer(s, &configureService{}) + reflection.Register(s) + cleanup, err := admin.Register(s) + if err != nil { + logger.Fatalf("failed to register admin: %v", err) + } + defer cleanup() go s.Serve(lis) clients := make([]testgrpc.TestServiceClient, *numChannels) From 777b228b599fd383aafd29155c35741d617b564c Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 6 Apr 2021 10:55:19 -0700 Subject: [PATCH 007/998] xds: fix service request counter flaky test (#4324) --- xds/internal/client/requests_counter.go | 4 ++++ xds/internal/client/requests_counter_test.go | 6 +++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/xds/internal/client/requests_counter.go b/xds/internal/client/requests_counter.go index 7ef18345ed6c..f033e1920991 100644 --- a/xds/internal/client/requests_counter.go +++ b/xds/internal/client/requests_counter.go @@ -56,6 +56,10 @@ func GetServiceRequestsCounter(serviceName string) *ServiceRequestsCounter { // StartRequest starts a request for a service, incrementing its number of // requests by 1. Returns an error if the max number of requests is exceeded. func (c *ServiceRequestsCounter) StartRequest(max uint32) error { + // Note that during race, the limits could be exceeded. This is allowed: + // "Since the implementation is eventually consistent, races between threads + // may allow limits to be potentially exceeded." + // https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/circuit_breaking#arch-overview-circuit-break. if atomic.LoadUint32(&c.numRequests) >= max { return fmt.Errorf("max requests %v exceeded on service %v", max, c.ServiceName) } diff --git a/xds/internal/client/requests_counter_test.go b/xds/internal/client/requests_counter_test.go index fe532724d14e..2dc336d1c1d5 100644 --- a/xds/internal/client/requests_counter_test.go +++ b/xds/internal/client/requests_counter_test.go @@ -91,7 +91,11 @@ func testCounter(t *testing.T, test counterTest) { if test.expectedErrors == 0 && loadedError != nil { t.Errorf("error starting request: %v", loadedError.(error)) } - if successes != test.expectedSuccesses || errors != test.expectedErrors { + // We allow the limits to be exceeded during races. + // + // But we should never over-limit, so this test fails if there are less + // successes than expected. + if successes < test.expectedSuccesses || errors > test.expectedErrors { t.Errorf("unexpected number of (successes, errors), expected (%v, %v), encountered (%v, %v)", test.expectedSuccesses, test.expectedErrors, successes, errors) } } From 9a10f357871cf04dbc16b064b993e81e66c660f7 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 6 Apr 2021 13:11:49 -0700 Subject: [PATCH 008/998] balancergroup: fix leak child balancer not closed (#4308) --- .../balancer/balancergroup/balancergroup.go | 7 +++-- .../balancergroup/balancergroup_test.go | 30 +++++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/xds/internal/balancer/balancergroup/balancergroup.go b/xds/internal/balancer/balancergroup/balancergroup.go index 2ec576a4b572..5b6d42a25e44 100644 --- a/xds/internal/balancer/balancergroup/balancergroup.go +++ b/xds/internal/balancer/balancergroup/balancergroup.go @@ -479,6 +479,10 @@ func (bg *BalancerGroup) Close() { } bg.incomingMu.Unlock() + // Clear(true) runs clear function to close sub-balancers in cache. It + // must be called out of outgoing mutex. + bg.balancerCache.Clear(true) + bg.outgoingMu.Lock() if bg.outgoingStarted { bg.outgoingStarted = false @@ -487,9 +491,6 @@ func (bg *BalancerGroup) Close() { } } bg.outgoingMu.Unlock() - // Clear(true) runs clear function to close sub-balancers in cache. It - // must be called out of outgoing mutex. - bg.balancerCache.Clear(true) } const ( diff --git a/xds/internal/balancer/balancergroup/balancergroup_test.go b/xds/internal/balancer/balancergroup/balancergroup_test.go index 0ad4bf8df10f..ab6ac3913cbb 100644 --- a/xds/internal/balancer/balancergroup/balancergroup_test.go +++ b/xds/internal/balancer/balancergroup/balancergroup_test.go @@ -938,6 +938,36 @@ func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *test } } +// After removing a sub-balancer, it will be kept in cache. Make sure that this +// sub-balancer's Close is called when the balancer group is closed. +func (s) TestBalancerGroup_CloseStopsBalancerInCache(t *testing.T) { + const balancerName = "stub-TestBalancerGroup_check_close" + closed := make(chan struct{}) + stub.Register(balancerName, stub.BalancerFuncs{Close: func(_ *stub.BalancerData) { + close(closed) + }}) + builder := balancer.Get(balancerName) + + defer replaceDefaultSubBalancerCloseTimeout(time.Second)() + gator, bg, _, _ := initBalancerGroupForCachingTest(t) + + // Add balancer, and remove + gator.Add(testBalancerIDs[2], 1) + bg.Add(testBalancerIDs[2], builder) + gator.Remove(testBalancerIDs[2]) + bg.Remove(testBalancerIDs[2]) + + // Immediately close balancergroup, before the cache timeout. + bg.Close() + + // Make sure the removed child balancer is closed eventually. + select { + case <-closed: + case <-time.After(time.Second * 2): + t.Fatalf("timeout waiting for the child balancer in cache to be closed") + } +} + // TestBalancerGroupBuildOptions verifies that the balancer.BuildOptions passed // to the balancergroup at creation time is passed to child policies. func (s) TestBalancerGroupBuildOptions(t *testing.T) { From 004ef8ade68b267f285c82e955a2f663c9a591be Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 6 Apr 2021 13:47:15 -0700 Subject: [PATCH 009/998] xds/clusterimpl: fix picker update race after balancer is closed (#4318) --- .../balancer/clusterimpl/balancer_test.go | 45 +++++++++++++++++++ .../balancer/clusterimpl/clusterimpl.go | 25 ++++++++++- 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 3e6ac0fd2900..6d9b7a5082f6 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -324,3 +324,48 @@ func TestDropCircuitBreaking(t *testing.T) { t.Fatalf("got unexpected drop reports, diff (-got, +want): %v", diff) } } + +// TestPickerUpdateAfterClose covers the case that cluster_impl wants to update +// picker after it's closed. Because picker updates are sent in the run() +// goroutine. +func TestPickerUpdateAfterClose(t *testing.T) { + xdsC := fakeclient.NewClient() + oldNewXDSClient := newXDSClient + newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + defer func() { newXDSClient = oldNewXDSClient }() + + builder := balancer.Get(clusterImplName) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + + var maxRequest uint32 = 50 + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: testBackendAddrs, + }, + BalancerConfig: &lbConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + MaxConcurrentRequests: &maxRequest, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + b.Close() + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + // Send SubConn state changes to trigger picker updates. Balancer will + // closed in a defer. + sc1 := <-cc.NewSubConnCh + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // This close will race with the SubConn state update. + b.Close() + + select { + case <-cc.NewPickerCh: + t.Fatalf("unexpected picker update after balancer is closed") + case <-time.After(time.Millisecond * 10): + } +} diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 4e4af5a02b45..4435f9e65a03 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -26,6 +26,7 @@ package clusterimpl import ( "encoding/json" "fmt" + "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/buffer" @@ -90,8 +91,19 @@ type xdsClientInterface interface { type clusterImplBalancer struct { balancer.ClientConn - bOpts balancer.BuildOptions + + // mu guarantees mutual exclusion between Close() and handling of picker + // update to the parent ClientConn in run(). It's to make sure that the + // run() goroutine doesn't send picker update to parent after the balancer + // is closed. + // + // It's only used by the run() goroutine, but not the other exported + // functions. Because the exported functions are guaranteed to be + // synchronized with Close(). + mu sync.Mutex closed *grpcsync.Event + + bOpts balancer.BuildOptions logger *grpclog.PrefixLogger xdsC xdsClientInterface @@ -274,12 +286,15 @@ func (cib *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balanc } func (cib *clusterImplBalancer) Close() { + cib.mu.Lock() + cib.closed.Fire() + cib.mu.Unlock() + if cib.childLB != nil { cib.childLB.Close() cib.childLB = nil } cib.xdsC.Close() - cib.closed.Fire() cib.logger.Infof("Shutdown") } @@ -301,6 +316,11 @@ func (cib *clusterImplBalancer) run() { select { case update := <-cib.pickerUpdateCh.Get(): cib.pickerUpdateCh.Load() + cib.mu.Lock() + if cib.closed.HasFired() { + cib.mu.Unlock() + return + } switch u := update.(type) { case balancer.State: cib.childState = u @@ -322,6 +342,7 @@ func (cib *clusterImplBalancer) run() { }) } } + cib.mu.Unlock() case <-cib.closed.Done(): return } From 493d388ad24c7a3e957f552a1a15dccdd1c9124b Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 6 Apr 2021 15:09:00 -0700 Subject: [PATCH 010/998] xds/csds: update proto imports to separate grpc from non-grpc symbols (#4326) --- xds/csds/csds_test.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 2993beea0e5d..867d74e5b25b 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -49,6 +49,7 @@ import ( v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + v3statuspbgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) const ( @@ -246,7 +247,7 @@ func TestCSDS(t *testing.T) { } } -func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServer, string, v3statuspb.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { +func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServer, string, v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { t.Helper() // Spin up a xDS management server on a local port. @@ -281,7 +282,7 @@ func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServ if err != nil { t.Fatal(err) } - v3statuspb.RegisterClientStatusDiscoveryServiceServer(server, csdss) + v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) // Create a local listener and pass it to Serve(). lis, err := testutils.LocalTCPListener() if err != nil { @@ -298,7 +299,7 @@ func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServ if err != nil { t.Fatalf("cannot connect to server: %v", err) } - c := v3statuspb.NewClientStatusDiscoveryServiceClient(conn) + c := v3statuspbgrpc.NewClientStatusDiscoveryServiceClient(conn) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) if err != nil { @@ -317,7 +318,7 @@ func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServ } } -func checkForRequested(stream v3statuspb.ClientStatusDiscoveryService_StreamClientStatusClient) error { +func checkForRequested(stream v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient) error { if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { return fmt.Errorf("failed to send request: %v", err) } @@ -395,7 +396,7 @@ func checkForRequested(stream v3statuspb.ClientStatusDiscoveryService_StreamClie return nil } -func checkForACKed(stream v3statuspb.ClientStatusDiscoveryService_StreamClientStatusClient) error { +func checkForACKed(stream v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient) error { const wantVersion = "1" if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { @@ -492,7 +493,7 @@ func checkForACKed(stream v3statuspb.ClientStatusDiscoveryService_StreamClientSt return nil } -func checkForNACKed(nackResourceIdx int, stream v3statuspb.ClientStatusDiscoveryService_StreamClientStatusClient) error { +func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient) error { const ( ackVersion = "1" nackVersion = "2" From 1895da54b012305f2628e3feee697937149aac57 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 8 Apr 2021 11:34:02 -0700 Subject: [PATCH 011/998] xds/resolver: fix panic when two LDS updates are receives without RDS in between (#4327) Also confirmed that the LDS updates shouldn't trigger state update without the RDS. --- xds/internal/resolver/watch_service.go | 10 +++++- xds/internal/resolver/xds_resolver_test.go | 42 ++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 42ede988300c..7667592ccd6f 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -132,7 +132,15 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er // // If the route name did change, then we must wait until the first RDS // update before reporting this LDS config. - w.serviceCb(w.lastUpdate, nil) + if w.lastUpdate.virtualHost != nil { + // We want to send an update with the new fields from the new LDS + // (e.g. max stream duration), and old fields from the the previous + // RDS. + // + // But note that this should only happen when virtual host is set, + // which means an RDS was received. + w.serviceCb(w.lastUpdate, nil) + } return } w.rdsName = update.RouteConfigName diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 36c7416cb436..82355eecc70b 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -1030,6 +1030,48 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { } } +// TestXDSResolverMultipleLDSUpdates tests the case where two LDS updates with +// the same RDS name to watch are received without an RDS in between. Those LDS +// updates shouldn't trigger service config update. +// +// This test case also makes sure the resolver doesn't panic. +func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { + xdsC := fakeclient.NewClient() + xdsR, tcc, cancel := testSetup(t, setupOpts{ + xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + }) + defer func() { + cancel() + xdsR.Close() + }() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + waitForWatchListener(ctx, t, xdsC, targetStr) + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + defer replaceRandNumGenerator(0)() + + // Send a new LDS update, with the same fields. + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + // Should NOT trigger a state update. + gotState, err := tcc.stateCh.Receive(ctx) + if err == nil { + t.Fatalf("ClientConn.UpdateState received %v, want timeout error", gotState) + } + + // Send a new LDS update, with the same RDS name, but different fields. + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) + ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + gotState, err = tcc.stateCh.Receive(ctx) + if err == nil { + t.Fatalf("ClientConn.UpdateState received %v, want timeout error", gotState) + } +} + type filterBuilder struct { httpfilter.Filter // embedded as we do not need to implement registry / parsing in this test. path *[]string From c7a203dcb5c97bf4cc7fd79b905b044ab14a5fbc Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 8 Apr 2021 14:31:20 -0700 Subject: [PATCH 012/998] xds/interop: move header/path matching to all (#4325) --- test/kokoro/xds.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/kokoro/xds.sh b/test/kokoro/xds.sh index e75743dd9e57..65b35e3acac6 100755 --- a/test/kokoro/xds.sh +++ b/test/kokoro/xds.sh @@ -27,7 +27,7 @@ grpc/tools/run_tests/helper_scripts/prep_xds.sh # they are added into "all". GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info \ python3 grpc/tools/run_tests/run_xds_tests.py \ - --test_case="all,path_matching,header_matching,circuit_breaking,timeout,fault_injection" \ + --test_case="all,circuit_breaking,timeout,fault_injection" \ --project_id=grpc-testing \ --project_num=830293263384 \ --source_image=projects/grpc-testing/global/images/xds-test-server-4 \ From 69f6f5a51249d3a9f4b6a9262167ddd984599cdc Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 8 Apr 2021 15:52:49 -0700 Subject: [PATCH 013/998] xds: add support for unsupported filter matchers (#4315) --- xds/internal/client/filter_chain.go | 216 ++++++++++++++--------- xds/internal/client/filter_chain_test.go | 173 ++++++++++++++++-- xds/internal/client/lds_test.go | 3 - 3 files changed, 291 insertions(+), 101 deletions(-) diff --git a/xds/internal/client/filter_chain.go b/xds/internal/client/filter_chain.go index 8e24ab858230..73ffa18347e8 100644 --- a/xds/internal/client/filter_chain.go +++ b/xds/internal/client/filter_chain.go @@ -74,12 +74,17 @@ const ( // connection time. // // The logic specified in the documentation around the xDS FilterChainMatch -// proto mentions 8 criteria to match on. gRPC does not support 4 of those, and -// we ignore filter chains which contain any of these unsupported fields at -// parsing time. Here we use the remaining 4 criteria to find a matching filter -// chain in the following order: -// Destination IP address, Source type, Source IP address, Source port. -// TODO: Ignore chains with unsupported fields *only* at connection time. +// proto mentions 8 criteria to match on. +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. type FilterChainManager struct { // Destination prefix is the first match criteria that we support. // Therefore, this multi-stage map is indexed on destination prefixes @@ -98,20 +103,29 @@ type FilterChainManager struct { // involves too much time/effort, sort this slice based on the netmask size. dstPrefixes []*destPrefixEntry - def *FilterChain // Default filter chain, if specified. - fcCnt int // Count of supported filter chains, for validation. + def *FilterChain // Default filter chain, if specified. } // destPrefixEntry is the value type of the map indexed on destination prefixes. type destPrefixEntry struct { net *net.IPNet // The actual destination prefix. + // We need to keep track of the transport protocols seen as part of the + // config validation (and internal structure building) phase. The only two + // values that we support are empty string and "raw_buffer", with the latter + // taking preference. Once we have seen one filter chain with "raw_buffer", + // we can drop everything other filter chain with an empty transport + // protocol. + rawBufferSeen bool // For each specified source type in the filter chain match criteria, this // array points to the set of specified source prefixes. // Unspecified source type matches end up as a wildcard entry here with an // index of 0, which actually represents the source type `ANY`. - srcTypeArr [3]*sourcePrefixes + srcTypeArr sourceTypesArray } +// An array for the fixed number of source types that we have. +type sourceTypesArray [3]*sourcePrefixes + // sourcePrefixes contains source prefix related information specified in the // match criteria. These are pointed to by the array of source types. type sourcePrefixes struct { @@ -141,6 +155,24 @@ func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, err if err := fci.addFilterChains(lis.GetFilterChains()); err != nil { return nil, err } + // Build the source and dest prefix slices used by Lookup(). + fcSeen := false + for _, dstPrefix := range fci.dstPrefixMap { + fci.dstPrefixes = append(fci.dstPrefixes, dstPrefix) + for _, st := range dstPrefix.srcTypeArr { + if st == nil { + continue + } + for _, srcPrefix := range st.srcPrefixMap { + st.srcPrefixes = append(st.srcPrefixes, srcPrefix) + for _, fc := range srcPrefix.srcPortMap { + if fc != nil { + fcSeen = true + } + } + } + } + } // Retrieve the default filter chain. The match criteria specified on the // default filter chain is never used. The default filter chain simply gets @@ -156,7 +188,7 @@ func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, err // If there are no supported filter chains and no default filter chain, we // fail here. This will call the Listener resource to be NACK'ed. - if fci.fcCnt == 0 && fci.def == nil { + if !fcSeen && fci.def == nil { return nil, fmt.Errorf("no supported filter chains and no default filter chain") } return fci, nil @@ -166,114 +198,133 @@ func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, err // internal data structures corresponding to the match criteria. func (fci *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) error { for _, fc := range fcs { - // Skip filter chains with unsupported match fields/criteria. fcm := fc.GetFilterChainMatch() - if fcm.GetDestinationPort().GetValue() != 0 || - fcm.GetServerNames() != nil || - (fcm.GetTransportProtocol() != "" && fcm.TransportProtocol != "raw_buffer") || - fcm.GetApplicationProtocols() != nil { + if fcm.GetDestinationPort().GetValue() != 0 { + // Destination port is the first match criteria and we do not + // support filter chains which contains this match criteria. + logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) continue } - // Extract the supported match criteria, which will be used by - // successive addFilterChainsForXxx() functions. - var dstPrefixes []*net.IPNet - for _, pr := range fcm.GetPrefixRanges() { - cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) - _, ipnet, err := net.ParseCIDR(cidr) - if err != nil { - return fmt.Errorf("failed to parse destination prefix range: %+v", pr) - } - dstPrefixes = append(dstPrefixes, ipnet) - } - - var srcType SourceType - switch fcm.GetSourceType() { - case v3listenerpb.FilterChainMatch_ANY: - srcType = SourceTypeAny - case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: - srcType = SourceTypeSameOrLoopback - case v3listenerpb.FilterChainMatch_EXTERNAL: - srcType = SourceTypeExternal - default: - return fmt.Errorf("unsupported source type: %v", fcm.GetSourceType()) - } - - var srcPrefixes []*net.IPNet - for _, pr := range fcm.GetSourcePrefixRanges() { - cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) - _, ipnet, err := net.ParseCIDR(cidr) - if err != nil { - return fmt.Errorf("failed to parse source prefix range: %+v", pr) - } - srcPrefixes = append(srcPrefixes, ipnet) - } - - var srcPorts []int - for _, port := range fcm.GetSourcePorts() { - srcPorts = append(srcPorts, int(port)) - } - // Build the internal representation of the filter chain match fields. - if err := fci.addFilterChainsForDestPrefixes(dstPrefixes, srcType, srcPrefixes, srcPorts, fc); err != nil { + if err := fci.addFilterChainsForDestPrefixes(fc); err != nil { return err } - fci.fcCnt++ } - // Build the source and dest prefix slices used by Lookup(). - for _, dstPrefix := range fci.dstPrefixMap { - fci.dstPrefixes = append(fci.dstPrefixes, dstPrefix) - for _, st := range dstPrefix.srcTypeArr { - if st == nil { - continue - } - for _, srcPrefix := range st.srcPrefixMap { - st.srcPrefixes = append(st.srcPrefixes, srcPrefix) - } - } - } return nil } -// addFilterChainsForDestPrefixes adds destination prefixes to the internal data -// structures and delegates control to addFilterChainsForSourceType to continue -// building the internal data structure. -func (fci *FilterChainManager) addFilterChainsForDestPrefixes(dstPrefixes []*net.IPNet, srcType SourceType, srcPrefixes []*net.IPNet, srcPorts []int, fc *v3listenerpb.FilterChain) error { +func (fci *FilterChainManager) addFilterChainsForDestPrefixes(fc *v3listenerpb.FilterChain) error { + var dstPrefixes []*net.IPNet + for _, pr := range fc.GetFilterChainMatch().GetPrefixRanges() { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse destination prefix range: %+v", pr) + } + dstPrefixes = append(dstPrefixes, ipnet) + } + if len(dstPrefixes) == 0 { // Use the wildcard IP when destination prefix is unspecified. if fci.dstPrefixMap[emptyAddrMapKey] == nil { fci.dstPrefixMap[emptyAddrMapKey] = &destPrefixEntry{net: zeroIP} } - return fci.addFilterChainsForSourceType(fci.dstPrefixMap[emptyAddrMapKey], srcType, srcPrefixes, srcPorts, fc) + return fci.addFilterChainsForServerNames(fci.dstPrefixMap[emptyAddrMapKey], fc) } for _, prefix := range dstPrefixes { p := prefix.String() if fci.dstPrefixMap[p] == nil { fci.dstPrefixMap[p] = &destPrefixEntry{net: prefix} } - if err := fci.addFilterChainsForSourceType(fci.dstPrefixMap[p], srcType, srcPrefixes, srcPorts, fc); err != nil { + if err := fci.addFilterChainsForServerNames(fci.dstPrefixMap[p], fc); err != nil { return err } } return nil } +func (fci *FilterChainManager) addFilterChainsForServerNames(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + // Filter chains specifying server names in their match criteria always fail + // a match at connection time. So, these filter chains can be dropped now. + if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { + logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) + return nil + } + + return fci.addFilterChainsForTransportProtocols(dstEntry, fc) +} + +func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + tp := fc.GetFilterChainMatch().GetTransportProtocol() + switch { + case tp != "" && tp != "raw_buffer": + // Only allow filter chains with transport protocol set to empty string + // or "raw_buffer". + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + return nil + case tp == "" && dstEntry.rawBufferSeen: + // If we have already seen filter chains with transport protocol set to + // "raw_buffer", we can drop filter chains with transport protocol set + // to empty string, since the former takes precedence. + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + return nil + case tp != "" && !dstEntry.rawBufferSeen: + // This is the first "raw_buffer" that we are seeing. Set the bit and + // reset the source types array which might contain entries for filter + // chains with transport protocol set to empty string. + dstEntry.rawBufferSeen = true + dstEntry.srcTypeArr = sourceTypesArray{} + } + return fci.addFilterChainsForApplicationProtocols(dstEntry, fc) +} + +func (fci *FilterChainManager) addFilterChainsForApplicationProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { + logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) + return nil + } + return fci.addFilterChainsForSourceType(dstEntry, fc) +} + // addFilterChainsForSourceType adds source types to the internal data // structures and delegates control to addFilterChainsForSourcePrefixes to // continue building the internal data structure. -func (fci *FilterChainManager) addFilterChainsForSourceType(dstEntry *destPrefixEntry, srcType SourceType, srcPrefixes []*net.IPNet, srcPorts []int, fc *v3listenerpb.FilterChain) error { +func (fci *FilterChainManager) addFilterChainsForSourceType(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + var srcType SourceType + switch st := fc.GetFilterChainMatch().GetSourceType(); st { + case v3listenerpb.FilterChainMatch_ANY: + srcType = SourceTypeAny + case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: + srcType = SourceTypeSameOrLoopback + case v3listenerpb.FilterChainMatch_EXTERNAL: + srcType = SourceTypeExternal + default: + return fmt.Errorf("unsupported source type: %v", st) + } + st := int(srcType) if dstEntry.srcTypeArr[st] == nil { dstEntry.srcTypeArr[st] = &sourcePrefixes{srcPrefixMap: make(map[string]*sourcePrefixEntry)} } - return fci.addFilterChainsForSourcePrefixes(dstEntry.srcTypeArr[st].srcPrefixMap, srcPrefixes, srcPorts, fc) + return fci.addFilterChainsForSourcePrefixes(dstEntry.srcTypeArr[st].srcPrefixMap, fc) } // addFilterChainsForSourcePrefixes adds source prefixes to the internal data // structures and delegates control to addFilterChainsForSourcePorts to continue // building the internal data structure. -func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map[string]*sourcePrefixEntry, srcPrefixes []*net.IPNet, srcPorts []int, fc *v3listenerpb.FilterChain) error { +func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map[string]*sourcePrefixEntry, fc *v3listenerpb.FilterChain) error { + var srcPrefixes []*net.IPNet + for _, pr := range fc.GetFilterChainMatch().GetSourcePrefixRanges() { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse source prefix range: %+v", pr) + } + srcPrefixes = append(srcPrefixes, ipnet) + } + if len(srcPrefixes) == 0 { // Use the wildcard IP when source prefix is unspecified. if srcPrefixMap[emptyAddrMapKey] == nil { @@ -282,7 +333,7 @@ func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map srcPortMap: make(map[int]*FilterChain), } } - return fci.addFilterChainsForSourcePorts(srcPrefixMap[emptyAddrMapKey], srcPorts, fc) + return fci.addFilterChainsForSourcePorts(srcPrefixMap[emptyAddrMapKey], fc) } for _, prefix := range srcPrefixes { p := prefix.String() @@ -292,7 +343,7 @@ func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map srcPortMap: make(map[int]*FilterChain), } } - if err := fci.addFilterChainsForSourcePorts(srcPrefixMap[p], srcPorts, fc); err != nil { + if err := fci.addFilterChainsForSourcePorts(srcPrefixMap[p], fc); err != nil { return err } } @@ -303,7 +354,12 @@ func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map // structures and completes the process of building the internal data structure. // It is here that we determine if there are multiple filter chains with // overlapping matching rules. -func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePrefixEntry, srcPorts []int, fcProto *v3listenerpb.FilterChain) error { +func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePrefixEntry, fcProto *v3listenerpb.FilterChain) error { + var srcPorts []int + for _, port := range fcProto.GetFilterChainMatch().GetSourcePorts() { + srcPorts = append(srcPorts, int(port)) + } + fc, err := filterChainFromProto(fcProto) if err != nil { return err diff --git a/xds/internal/client/filter_chain_test.go b/xds/internal/client/filter_chain_test.go index ba0ed944160d..ec100c561c96 100644 --- a/xds/internal/client/filter_chain_test.go +++ b/xds/internal/client/filter_chain_test.go @@ -402,8 +402,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }, }, }, - def: &FilterChain{}, - fcCnt: 1, + def: &FilterChain{}, }, }, { @@ -471,7 +470,6 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { IdentityCertName: "defaultIdentityCertName", }, }, - fcCnt: 1, }, }, { @@ -560,7 +558,6 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { RequireClientCert: true, }, }, - fcCnt: 1, }, }, } @@ -648,8 +645,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{}, - fcCnt: 3, + def: &FilterChain{}, }, }, { @@ -704,8 +700,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{}, - fcCnt: 2, + def: &FilterChain{}, }, }, { @@ -757,8 +752,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{}, - fcCnt: 2, + def: &FilterChain{}, }, }, { @@ -818,8 +812,126 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{}, - fcCnt: 2, + def: &FilterChain{}, + }, + }, + { + desc: "some chains have unsupported fields", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}, + TransportProtocol: "raw_buffer", + }, + }, + { + // This chain will be dropped in favor of the above + // filter chain because they both have the same + // destination prefix, but this one has an empty + // transport protocol while the above chain has the more + // preferred "raw_buffer". + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}, + TransportProtocol: "", + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 16)}, + }, + }, + { + // This chain will be dropped for unsupported server + // names. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.1", 32)}, + ServerNames: []string{"foo", "bar"}, + }, + }, + { + // This chain will be dropped for unsupported transport + // protocol. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.2", 32)}, + TransportProtocol: "not-raw-buffer", + }, + }, + { + // This chain will be dropped for unsupported + // application protocol. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.3", 32)}, + ApplicationProtocols: []string{"h2"}, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + "10.0.0.0/8": { + net: ipNetFromCIDR("10.0.0.0/8"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "0.0.0.0/0": { + net: zeroIP, + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + "192.168.100.1/32": { + net: ipNetFromCIDR("192.168.100.1/32"), + srcTypeArr: [3]*sourcePrefixes{}, + }, + "192.168.100.2/32": { + net: ipNetFromCIDR("192.168.100.2/32"), + srcTypeArr: [3]*sourcePrefixes{}, + }, + "192.168.100.3/32": { + net: ipNetFromCIDR("192.168.100.3/32"), + srcTypeArr: [3]*sourcePrefixes{}, + }, + }, + def: &FilterChain{}, }, }, } @@ -931,12 +1043,40 @@ func TestLookup_Failures(t *testing.T) { }, }, params: FilterChainLookupParams{ - DestAddr: net.IPv4(192, 168, 100, 1), - SourceAddr: net.IPv4(192, 168, 100, 1), - SourcePort: 80, + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + SourcePort: 80, }, wantErr: "no matching filter chain after all match criteria", }, + { + desc: "most specific match dropped for unsupported field", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + // This chain will be picked in the destination prefix + // stage, but will be dropped at the server names stage. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.1", 32)}, + ServerNames: []string{"foo"}, + }, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.0", 16)}, + }, + }, + }, + }, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + SourcePort: 80, + }, + wantErr: "no matching filter chain based on source type match", + }, } for _, test := range tests { @@ -1142,8 +1282,6 @@ func (fci *FilterChainManager) Equal(other *FilterChainManager) bool { return false case !cmp.Equal(fci.def, other.def): return false - case fci.fcCnt != other.fcCnt: - return false } return true } @@ -1218,7 +1356,6 @@ func (fci *FilterChainManager) String() string { if fci.def != nil { sb.WriteString(fmt.Sprintf("default_filter_chain: %+v ", fci.def)) } - sb.WriteString(fmt.Sprintf("filter_chain_count: %d ", fci.fcCnt)) return sb.String() } diff --git a/xds/internal/client/lds_test.go b/xds/internal/client/lds_test.go index 42421f66b2d1..76ea543b2d94 100644 --- a/xds/internal/client/lds_test.go +++ b/xds/internal/client/lds_test.go @@ -1389,7 +1389,6 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - fcCnt: 1, }, }, Raw: listenerEmptyTransportSocket, @@ -1553,7 +1552,6 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { IdentityCertName: "defaultIdentityCertName", }, }, - fcCnt: 1, }, }, Raw: listenerNoValidationContext, @@ -1607,7 +1605,6 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { RequireClientCert: true, }, }, - fcCnt: 1, }, }, Raw: listenerWithValidationContext, From 2df4370b332809e4daf1e2109b2389500e64c1c0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 8 Apr 2021 16:02:52 -0700 Subject: [PATCH 014/998] examples: update xds examples for PSM security (#4256) --- examples/features/xds/client/main.go | 66 +++++++------------- examples/features/xds/server/main.go | 93 ++++++++++------------------ 2 files changed, 53 insertions(+), 106 deletions(-) diff --git a/examples/features/xds/client/main.go b/examples/features/xds/client/main.go index b1daa1cae9c8..97918faa2245 100644 --- a/examples/features/xds/client/main.go +++ b/examples/features/xds/client/main.go @@ -16,78 +16,56 @@ * */ -// Package main implements a client for Greeter service. +// Binary main implements a client for Greeter service using gRPC's client-side +// support for xDS APIs. package main import ( "context" "flag" - "fmt" "log" + "strings" "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" pb "google.golang.org/grpc/examples/helloworld/helloworld" _ "google.golang.org/grpc/xds" // To install the xds resolvers and balancers. ) -const ( - defaultTarget = "localhost:50051" - defaultName = "world" +var ( + target = flag.String("target", "xds:///localhost:50051", "uri of the Greeter Server, e.g. 'xds:///helloworld-service:8080'") + name = flag.String("name", "world", "name you wished to be greeted by the server") + xdsCreds = flag.Bool("xds_creds", false, "whether the server should use xDS APIs to receive security configuration") ) -var help = flag.Bool("help", false, "Print usage information") - -func init() { - flag.Usage = func() { - fmt.Fprintf(flag.CommandLine.Output(), ` -Usage: client [name [target]] - - name - The name you wish to be greeted by. Defaults to %q - target - The URI of the server, e.g. "xds:///helloworld-service". Defaults to %q -`, defaultName, defaultTarget) - - flag.PrintDefaults() - } -} - func main() { flag.Parse() - if *help { - flag.Usage() - return - } - args := flag.Args() - - if len(args) > 2 { - flag.Usage() - return - } - name := defaultName - if len(args) > 0 { - name = args[0] + if !strings.HasPrefix(*target, "xds:///") { + log.Fatalf("-target must use a URI with scheme set to 'xds'") } - target := defaultTarget - if len(args) > 1 { - target = args[1] + creds := insecure.NewCredentials() + if *xdsCreds { + log.Println("Using xDS credentials...") + var err error + if creds, err = xdscreds.NewClientCredentials(xdscreds.ClientOptions{FallbackCreds: insecure.NewCredentials()}); err != nil { + log.Fatalf("failed to create client-side xDS credentials: %v", err) + } } - - // Set up a connection to the server. - conn, err := grpc.Dial(target, grpc.WithInsecure()) + conn, err := grpc.Dial(*target, grpc.WithTransportCredentials(creds)) if err != nil { - log.Fatalf("did not connect: %v", err) + log.Fatalf("grpc.Dial(%s) failed: %v", *target, err) } defer conn.Close() - c := pb.NewGreeterClient(conn) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) + c := pb.NewGreeterClient(conn) + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: *name}) if err != nil { log.Fatalf("could not greet: %v", err) } diff --git a/examples/features/xds/server/main.go b/examples/features/xds/server/main.go index 7e0815645e5a..0367060f4b5d 100644 --- a/examples/features/xds/server/main.go +++ b/examples/features/xds/server/main.go @@ -16,7 +16,8 @@ * */ -// Package main starts Greeter service that will response with the hostname. +// Binary server demonstrated gRPC's support for xDS APIs on the server-side. It +// exposes the Greeter service that will response with the hostname. package main import ( @@ -27,36 +28,29 @@ import ( "math/rand" "net" "os" - "strconv" "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" pb "google.golang.org/grpc/examples/helloworld/helloworld" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/reflection" + "google.golang.org/grpc/xds" ) -var help = flag.Bool("help", false, "Print usage information") - -const ( - defaultPort = 50051 +var ( + port = flag.Int("port", 50051, "the port to serve Greeter service requests on. Health service will be served on `port+1`") + xdsCreds = flag.Bool("xds_creds", false, "whether the server should use xDS APIs to receive security configuration") ) -// server is used to implement helloworld.GreeterServer. +// server implements helloworld.GreeterServer interface. type server struct { pb.UnimplementedGreeterServer - serverName string } -func newServer(serverName string) *server { - return &server{ - serverName: serverName, - } -} - -// SayHello implements helloworld.GreeterServer +// SayHello implements helloworld.GreeterServer interface. func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { log.Printf("Received: %v", in.GetName()) return &pb.HelloReply{Message: "Hello " + in.GetName() + ", from " + s.serverName}, nil @@ -72,65 +66,40 @@ func determineHostname() string { return hostname } -func init() { - flag.Usage = func() { - fmt.Fprintf(flag.CommandLine.Output(), ` -Usage: server [port [hostname]] - - port - The listen port. Defaults to %d - hostname - The name clients will see in greet responses. Defaults to the machine's hostname -`, defaultPort) - - flag.PrintDefaults() - } -} - func main() { flag.Parse() - if *help { - flag.Usage() - return - } - args := flag.Args() - if len(args) > 2 { - flag.Usage() - return + greeterPort := fmt.Sprintf(":%d", *port) + greeterLis, err := net.Listen("tcp4", greeterPort) + if err != nil { + log.Fatalf("net.Listen(tcp4, %q) failed: %v", greeterPort, err) } - port := defaultPort - if len(args) > 0 { + creds := insecure.NewCredentials() + if *xdsCreds { + log.Println("Using xDS credentials...") var err error - port, err = strconv.Atoi(args[0]) - if err != nil { - log.Printf("Invalid port number: %v", err) - flag.Usage() - return + if creds, err = xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}); err != nil { + log.Fatalf("failed to create server-side xDS credentials: %v", err) } } - var hostname string - if len(args) > 1 { - hostname = args[1] - } - if hostname == "" { - hostname = determineHostname() - } + greeterServer := xds.NewGRPCServer(grpc.Creds(creds)) + pb.RegisterGreeterServer(greeterServer, &server{serverName: determineHostname()}) - lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + healthPort := fmt.Sprintf(":%d", *port+1) + healthLis, err := net.Listen("tcp4", healthPort) if err != nil { - log.Fatalf("failed to listen: %v", err) + log.Fatalf("net.Listen(tcp4, %q) failed: %v", healthPort, err) } - s := grpc.NewServer() - pb.RegisterGreeterServer(s, newServer(hostname)) - - reflection.Register(s) + grpcServer := grpc.NewServer() healthServer := health.NewServer() healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) - healthpb.RegisterHealthServer(s, healthServer) + healthpb.RegisterHealthServer(grpcServer, healthServer) - log.Printf("serving on %s, hostname %s", lis.Addr(), hostname) - s.Serve(lis) + log.Printf("Serving GreeterService on %s and HealthService on %s", greeterLis.Addr().String(), healthLis.Addr().String()) + go func() { + greeterServer.Serve(greeterLis) + }() + grpcServer.Serve(healthLis) } From 1d1bbb55b381f39fbe93edbb1d0fd96a6b1ecaef Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 8 Apr 2021 16:11:44 -0700 Subject: [PATCH 015/998] weightedtarget: handle updating child policy name (#4309) --- .../balancer/weightedtarget/weightedtarget.go | 10 +++++ .../weightedtarget_config_test.go | 28 ++++++------- .../weightedtarget/weightedtarget_test.go | 40 +++++++++++++++++++ 3 files changed, 64 insertions(+), 14 deletions(-) diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/xds/internal/balancer/weightedtarget/weightedtarget.go index 02b199258cd2..ac1aaecd8e51 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget.go @@ -115,6 +115,16 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat w.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) // Not trigger a state/picker update. Wait for the new sub-balancer // to send its updates. + } else if newT.ChildPolicy.Name != oldT.ChildPolicy.Name { + // If the child policy name is differet, remove from balancer group + // and re-add. + w.stateAggregator.Remove(name) + w.bg.Remove(name) + w.stateAggregator.Add(name, newT.Weight) + w.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + // Trigger a state/picker update, because we don't want `ClientConn` + // to pick this sub-balancer anymore. + rebuildStateAndPicker = true } else if newT.Weight != oldT.Weight { // If this is an existing sub-balancer, update weight if necessary. w.stateAggregator.UpdateWeight(name, newT.Weight) diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go index 2208117f60e1..92dff8f5fbfc 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go @@ -24,7 +24,8 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" + + _ "google.golang.org/grpc/xds/internal/balancer/lrs" // Register LRS balancer, so we can use it as child policy in the config tests. ) const ( @@ -32,24 +33,23 @@ const ( "targets": { "cluster_1" : { "weight":75, - "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] + "childPolicy":[{"lrs_experimental":{"clusterName":"cluster_1","lrsLoadReportingServerName":"lrs.server","locality":{"zone":"test-zone-1"}}}] }, "cluster_2" : { "weight":25, - "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] + "childPolicy":[{"lrs_experimental":{"clusterName":"cluster_2","lrsLoadReportingServerName":"lrs.server","locality":{"zone":"test-zone-2"}}}] } } }` - - cdsName = "cds_experimental" + lrsBalancerName = "lrs_experimental" ) var ( - cdsConfigParser = balancer.Get(cdsName).(balancer.ConfigParser) - cdsConfigJSON1 = `{"cluster":"cluster_1"}` - cdsConfig1, _ = cdsConfigParser.ParseConfig([]byte(cdsConfigJSON1)) - cdsConfigJSON2 = `{"cluster":"cluster_2"}` - cdsConfig2, _ = cdsConfigParser.ParseConfig([]byte(cdsConfigJSON2)) + lrsConfigParser = balancer.Get(lrsBalancerName).(balancer.ConfigParser) + lrsConfigJSON1 = `{"clusterName":"cluster_1","lrsLoadReportingServerName":"lrs.server","locality":{"zone":"test-zone-1"}}` + lrsConfig1, _ = lrsConfigParser.ParseConfig([]byte(lrsConfigJSON1)) + lrsConfigJSON2 = `{"clusterName":"cluster_2","lrsLoadReportingServerName":"lrs.server","locality":{"zone":"test-zone-2"}}` + lrsConfig2, _ = lrsConfigParser.ParseConfig([]byte(lrsConfigJSON2)) ) func Test_parseConfig(t *testing.T) { @@ -73,15 +73,15 @@ func Test_parseConfig(t *testing.T) { "cluster_1": { Weight: 75, ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: cdsName, - Config: cdsConfig1, + Name: lrsBalancerName, + Config: lrsConfig1, }, }, "cluster_2": { Weight: 25, ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: cdsName, - Config: cdsConfig2, + Name: lrsBalancerName, + Config: lrsConfig2, }, }, }, diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_test.go index 7f9e566ca5b5..131f89832c79 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_test.go @@ -215,6 +215,46 @@ func TestWeightedTarget(t *testing.T) { if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } + + // Replace child policy of "cluster_1" to "round_robin". + config3, err := wtbParser.ParseConfig([]byte(`{"targets":{"cluster_2":{"weight":1,"childPolicy":[{"round_robin":""}]}}}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + wantAddr4 := resolver.Address{Addr: testBackendAddrStrs[0], Attributes: nil} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddr4, []string{"cluster_2"}), + }}, + BalancerConfig: config3, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that a subconn is created with the address, and the hierarchy path + // in the address is cleared. + addr4 := <-cc.NewSubConnAddrsCh + if want := []resolver.Address{ + hierarchy.Set(wantAddr4, []string{}), + }; !cmp.Equal(addr4, want, cmp.AllowUnexported(attributes.Attributes{})) { + t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr4, want, cmp.AllowUnexported(attributes.Attributes{}))) + } + + // Send subconn state change. + sc4 := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // Test pick with one backend. + p3 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p3.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc4, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc4) + } + } } func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { From d6abfb459860721299c6f0bc7ffcbed5f9feebe4 Mon Sep 17 00:00:00 2001 From: Aliaksandr Mianzhynski Date: Sat, 10 Apr 2021 02:30:59 +0300 Subject: [PATCH 016/998] cmd/protoc-gen-go-grpc: add protoc and protoc-gen-go-grpc versions to top comment (#4313) --- .../grpc_lb_v1/load_balancer_grpc.pb.go | 4 ++++ .../proto/grpc_lookup_v1/rls_grpc.pb.go | 4 ++++ channelz/grpc_channelz_v1/channelz_grpc.pb.go | 4 ++++ cmd/protoc-gen-go-grpc/grpc.go | 21 ++++++++++++++++++- .../proto/grpc_gcp/handshaker_grpc.pb.go | 4 ++++ .../meshca/internal/v1/meshca_grpc.pb.go | 4 ++++ examples/features/proto/echo/echo_grpc.pb.go | 4 ++++ .../helloworld/helloworld_grpc.pb.go | 4 ++++ .../routeguide/route_guide_grpc.pb.go | 4 ++++ health/grpc_health_v1/health_grpc.pb.go | 4 ++++ .../grpc_testing/benchmark_service_grpc.pb.go | 4 ++++ .../report_qps_scenario_service_grpc.pb.go | 4 ++++ interop/grpc_testing/test_grpc.pb.go | 4 ++++ .../grpc_testing/worker_service_grpc.pb.go | 4 ++++ profiling/proto/service_grpc.pb.go | 4 ++++ .../reflection_grpc.pb.go | 4 ++++ reflection/grpc_testing/test_grpc.pb.go | 4 ++++ stress/grpc_testing/metrics_grpc.pb.go | 4 ++++ test/grpc_testing/test_grpc.pb.go | 4 ++++ 19 files changed, 92 insertions(+), 1 deletion(-) diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index d56b77cca634..50cc9da4a907 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/balancer/rls/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index b469089ed570..39d79e13343e 100644 --- a/balancer/rls/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/balancer/rls/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 diff --git a/channelz/grpc_channelz_v1/channelz_grpc.pb.go b/channelz/grpc_channelz_v1/channelz_grpc.pb.go index 051d1ac440c7..ee425c219940 100644 --- a/channelz/grpc_channelz_v1/channelz_grpc.pb.go +++ b/channelz/grpc_channelz_v1/channelz_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: grpc/channelz/v1/channelz.proto package grpc_channelz_v1 diff --git a/cmd/protoc-gen-go-grpc/grpc.go b/cmd/protoc-gen-go-grpc/grpc.go index 1e787344ebcc..24ad747cc91f 100644 --- a/cmd/protoc-gen-go-grpc/grpc.go +++ b/cmd/protoc-gen-go-grpc/grpc.go @@ -24,7 +24,6 @@ import ( "strings" "google.golang.org/protobuf/compiler/protogen" - "google.golang.org/protobuf/types/descriptorpb" ) @@ -43,6 +42,14 @@ func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.Generated filename := file.GeneratedFilenamePrefix + "_grpc.pb.go" g := gen.NewGeneratedFile(filename, file.GoImportPath) g.P("// Code generated by protoc-gen-go-grpc. DO NOT EDIT.") + g.P("// versions:") + g.P("// - protoc-gen-go-grpc v", version) + g.P("// - protoc ", protocVersion(gen)) + if file.Proto.GetOptions().GetDeprecated() { + g.P("// ", file.Desc.Path(), " is a deprecated file.") + } else { + g.P("// source: ", file.Desc.Path()) + } g.P() g.P("package ", file.GoPackageName) g.P() @@ -50,6 +57,18 @@ func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.Generated return g } +func protocVersion(gen *protogen.Plugin) string { + v := gen.Request.GetCompilerVersion() + if v == nil { + return "(unknown)" + } + var suffix string + if s := v.GetSuffix(); s != "" { + suffix = "-" + s + } + return fmt.Sprintf("v%d.%d.%d%s", v.GetMajor(), v.GetMinor(), v.GetPatch(), suffix) +} + // generateFileContent generates the gRPC service definitions, excluding the package statement. func generateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) { if len(file.Services) == 0 { diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index efdbd13fa304..a02c4582815d 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: grpc/gcp/handshaker.proto package grpc_gcp diff --git a/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go b/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go index e53a61598aba..4663ff1ef35c 100644 --- a/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go +++ b/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: istio/google/security/meshca/v1/meshca.proto package google_security_meshca_v1 diff --git a/examples/features/proto/echo/echo_grpc.pb.go b/examples/features/proto/echo/echo_grpc.pb.go index 052087dae369..e1d24b1e8309 100644 --- a/examples/features/proto/echo/echo_grpc.pb.go +++ b/examples/features/proto/echo/echo_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: examples/features/proto/echo/echo.proto package echo diff --git a/examples/helloworld/helloworld/helloworld_grpc.pb.go b/examples/helloworld/helloworld/helloworld_grpc.pb.go index 39a0301c16b2..ae27dfa3cfee 100644 --- a/examples/helloworld/helloworld/helloworld_grpc.pb.go +++ b/examples/helloworld/helloworld/helloworld_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: examples/helloworld/helloworld/helloworld.proto package helloworld diff --git a/examples/route_guide/routeguide/route_guide_grpc.pb.go b/examples/route_guide/routeguide/route_guide_grpc.pb.go index 66860e63c476..efa7c28ce6f5 100644 --- a/examples/route_guide/routeguide/route_guide_grpc.pb.go +++ b/examples/route_guide/routeguide/route_guide_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: examples/route_guide/routeguide/route_guide.proto package routeguide diff --git a/health/grpc_health_v1/health_grpc.pb.go b/health/grpc_health_v1/health_grpc.pb.go index 386d16ce62d1..bdc3ae284e7a 100644 --- a/health/grpc_health_v1/health_grpc.pb.go +++ b/health/grpc_health_v1/health_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/interop/grpc_testing/benchmark_service_grpc.pb.go b/interop/grpc_testing/benchmark_service_grpc.pb.go index 1dcba4587d29..f4e4436e97e8 100644 --- a/interop/grpc_testing/benchmark_service_grpc.pb.go +++ b/interop/grpc_testing/benchmark_service_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: grpc/testing/benchmark_service.proto package grpc_testing diff --git a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go index b0fe8c8f5ee5..4bf3fce68ab1 100644 --- a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: grpc/testing/report_qps_scenario_service.proto package grpc_testing diff --git a/interop/grpc_testing/test_grpc.pb.go b/interop/grpc_testing/test_grpc.pb.go index ad5310aed623..137a1e98ce6e 100644 --- a/interop/grpc_testing/test_grpc.pb.go +++ b/interop/grpc_testing/test_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: grpc/testing/test.proto package grpc_testing diff --git a/interop/grpc_testing/worker_service_grpc.pb.go b/interop/grpc_testing/worker_service_grpc.pb.go index cc49b22b9261..a97366df09a2 100644 --- a/interop/grpc_testing/worker_service_grpc.pb.go +++ b/interop/grpc_testing/worker_service_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: grpc/testing/worker_service.proto package grpc_testing diff --git a/profiling/proto/service_grpc.pb.go b/profiling/proto/service_grpc.pb.go index bfdcc69bffb8..a0656149bda1 100644 --- a/profiling/proto/service_grpc.pb.go +++ b/profiling/proto/service_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: profiling/proto/service.proto package proto diff --git a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index c2b7429a06b0..7d05c14ebd89 100644 --- a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: reflection/grpc_reflection_v1alpha/reflection.proto package grpc_reflection_v1alpha diff --git a/reflection/grpc_testing/test_grpc.pb.go b/reflection/grpc_testing/test_grpc.pb.go index 76ec8d52d684..235b5d82484b 100644 --- a/reflection/grpc_testing/test_grpc.pb.go +++ b/reflection/grpc_testing/test_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: reflection/grpc_testing/test.proto package grpc_testing diff --git a/stress/grpc_testing/metrics_grpc.pb.go b/stress/grpc_testing/metrics_grpc.pb.go index 2ece03255630..0730fad49a46 100644 --- a/stress/grpc_testing/metrics_grpc.pb.go +++ b/stress/grpc_testing/metrics_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: stress/grpc_testing/metrics.proto package grpc_testing diff --git a/test/grpc_testing/test_grpc.pb.go b/test/grpc_testing/test_grpc.pb.go index ab3b68a92bcc..76b3935620ca 100644 --- a/test/grpc_testing/test_grpc.pb.go +++ b/test/grpc_testing/test_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.14.0 +// source: test/grpc_testing/test.proto package grpc_testing From fab5982df20a27885393f866db267ee7b35808d2 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 9 Apr 2021 16:49:25 -0700 Subject: [PATCH 017/998] xds: server-side listener network filter validation (#4312) --- internal/testutils/marshal_any.go | 36 + xds/internal/client/filter_chain_test.go | 25 +- xds/internal/client/lds_test.go | 1305 +++++++---------- xds/internal/client/xds.go | 62 + xds/internal/httpfilter/httpfilter.go | 3 - xds/internal/httpfilter/router/router.go | 17 +- .../test/xds_server_integration_test.go | 46 +- 7 files changed, 655 insertions(+), 839 deletions(-) create mode 100644 internal/testutils/marshal_any.go diff --git a/internal/testutils/marshal_any.go b/internal/testutils/marshal_any.go new file mode 100644 index 000000000000..9ddef6de15d6 --- /dev/null +++ b/internal/testutils/marshal_any.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package testutils + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/types/known/anypb" +) + +// MarshalAny is a convenience function to marshal protobuf messages into any +// protos. It will panic if the marshaling fails. +func MarshalAny(m proto.Message) *anypb.Any { + a, err := ptypes.MarshalAny(m) + if err != nil { + panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", m, err)) + } + return a +} diff --git a/xds/internal/client/filter_chain_test.go b/xds/internal/client/filter_chain_test.go index ec100c561c96..e66f518828ac 100644 --- a/xds/internal/client/filter_chain_test.go +++ b/xds/internal/client/filter_chain_test.go @@ -31,6 +31,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/wrapperspb" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/version" ) @@ -247,7 +248,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.UpstreamTlsContext{}), + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{}), }, }, }, @@ -282,7 +283,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{}), + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{}), }, }, }, @@ -298,7 +299,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ @@ -322,7 +323,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ @@ -346,7 +347,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{}, }), }, @@ -413,7 +414,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ InstanceName: "identityPluginInstance", @@ -429,7 +430,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ InstanceName: "defaultIdentityPluginInstance", @@ -480,7 +481,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ @@ -504,7 +505,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ @@ -1101,7 +1102,7 @@ func TestLookup_Successes(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{InstanceName: "instance1"}, }, @@ -1115,7 +1116,7 @@ func TestLookup_Successes(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{InstanceName: "default"}, }, @@ -1434,7 +1435,7 @@ func transportSocketWithInstanceName(name string) *v3corepb.TransportSocket { return &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{InstanceName: name}, }, diff --git a/xds/internal/client/lds_test.go b/xds/internal/client/lds_test.go index 76ea543b2d94..21718a4edc5e 100644 --- a/xds/internal/client/lds_test.go +++ b/xds/internal/client/lds_test.go @@ -27,13 +27,14 @@ import ( v1typepb "github.com/cncf/udpa/go/udpa/type/v1" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "github.com/golang/protobuf/ptypes/struct" "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/types/known/durationpb" + + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" - "google.golang.org/protobuf/types/known/durationpb" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -54,13 +55,14 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { v2RouteConfigName = "v2RouteConfig" v3RouteConfigName = "v3RouteConfig" routeName = "routeName" + testVersion = "test-version-lds-client" ) var ( - v2Lis = &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v2httppb.HttpConnectionManager{ + v2Lis = testutils.MarshalAny(&v2xdspb.Listener{ + Name: v2LDSTarget, + ApiListener: &v2listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v2httppb.HttpConnectionManager{ RouteSpecifier: &v2httppb.HttpConnectionManager_Rds{ Rds: &v2httppb.Rds{ ConfigSource: &v2corepb.ConfigSource{ @@ -69,21 +71,9 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { RouteConfigName: v2RouteConfigName, }, }, - } - mcm, _ := proto.Marshal(cm) - lis := &v2xdspb.Listener{ - Name: v2LDSTarget, - ApiListener: &v2listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2HTTPConnManagerURL, - Value: mcm, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } + }), + }, + }) customFilter = &v3httppb.HttpFilter{ Name: "customFilter", ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, @@ -132,10 +122,10 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: unknownFilterConfig}, IsOptional: true, } - v3LisWithInlineRoute = &anypb.Any{ - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - hcm := &v3httppb.HttpConnectionManager{ + v3LisWithInlineRoute = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: &v3routepb.RouteConfiguration{ Name: routeName, @@ -153,50 +143,40 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ MaxStreamDuration: durationpb.New(time.Second), }, - } - mcm := marshalAny(hcm) - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: mcm, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } + }), + }, + }) v3LisWithFilters = func(fs ...*v3httppb.HttpFilter) *anypb.Any { - hcm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: v3RouteConfigName, - }, - }, - CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ - MaxStreamDuration: durationpb.New(time.Second), + return testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny( + &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: v3RouteConfigName, + }, + }, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + HttpFilters: fs, + }), }, - HttpFilters: fs, - } - return &anypb.Any{ - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - mcm := marshalAny(hcm) - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: mcm, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } + }) + } + errMD = UpdateMetadata{ + Status: ServiceStatusNACKed, + Version: testVersion, + ErrState: &UpdateErrorMetadata{ + Version: testVersion, + Err: errPlaceHolder, + }, } ) - const testVersion = "test-version-lds-client" tests := []struct { name string @@ -209,15 +189,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "non-listener resource", resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "badly marshaled listener resource", @@ -240,181 +213,80 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, }, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "wrong type in apiListener", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: v3RouteConfigName, - }, - }, - } - mcm, _ := proto.Marshal(cm) - return mcm - }(), - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v2xdspb.Listener{}), }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "empty httpConnMgr in apiListener", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{}, - }, - } - mcm, _ := proto.Marshal(cm) - return mcm - }(), - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{}, + }, + }), }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "scopedRoutes routeConfig in apiListener", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, - } - mcm, _ := proto.Marshal(cm) - return mcm - }(), - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: &anypb.Any{ + TypeUrl: version.V2ListenerURL, + Value: func() []byte { + cm := &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + } + mcm, _ := proto.Marshal(cm) + return mcm + }(), + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "rds.ConfigSource in apiListener is not ADS", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Path{ - Path: "/some/path", - }, - }, - RouteConfigName: v3RouteConfigName, - }, - }, - } - mcm, _ := proto.Marshal(cm) - return mcm - }(), + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Path{ + Path: "/some/path", + }, }, + RouteConfigName: v3RouteConfigName, }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }), }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "empty resource list", @@ -507,15 +379,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { name: "v3 with two filters with same name", resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter)}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "v3 with two filters - same type different name", @@ -544,15 +409,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { name: "v3 with server-only filter", resources: []*anypb.Any{v3LisWithFilters(serverOnlyCustomFilter)}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "v3 with optional server-only filter", @@ -592,43 +450,22 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { name: "v3 with err filter", resources: []*anypb.Any{v3LisWithFilters(errFilter)}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "v3 with optional err filter", resources: []*anypb.Any{v3LisWithFilters(errOptionalFilter)}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "v3 with unknown filter", resources: []*anypb.Any{v3LisWithFilters(unknownFilter)}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, + wantMD: errMD, + wantErr: true, }, { name: "v3 with unknown filter (optional)", @@ -745,14 +582,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters()}, "bad": {}, }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, + wantMD: errMD, wantErr: true, }, } @@ -778,46 +608,51 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { } func (s) TestUnmarshalListener_ServerSide(t *testing.T) { - const v3LDSTarget = "grpc/server?xds.resource.listening_address=0.0.0.0:9999" + const ( + v3LDSTarget = "grpc/server?xds.resource.listening_address=0.0.0.0:9999" + testVersion = "test-version-lds-server" + ) var ( - listenerEmptyTransportSocket = marshalAny(&v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, + emptyValidNetworkFilters = []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + } + localSocketAddress = &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "0.0.0.0", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 9999, }, }, }, + } + listenerEmptyTransportSocket = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ { - Name: "filter-chain-1", + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, }, }, }) - listenerNoValidationContext = marshalAny(&v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, + listenerNoValidationContext = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ { - Name: "filter-chain-1", + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ InstanceName: "identityPluginInstance", @@ -830,11 +665,12 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, DefaultFilterChain: &v3listenerpb.FilterChain{ - Name: "default-filter-chain-1", + Name: "default-filter-chain-1", + Filters: emptyValidNetworkFilters, TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ InstanceName: "defaultIdentityPluginInstance", @@ -846,25 +682,17 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }) - listenerWithValidationContext = marshalAny(&v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, + listenerWithValidationContext = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ { - Name: "filter-chain-1", + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ @@ -884,11 +712,12 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, DefaultFilterChain: &v3listenerpb.FilterChain{ - Name: "default-filter-chain-1", + Name: "default-filter-chain-1", + Filters: emptyValidNetworkFilters, TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ @@ -907,10 +736,16 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }) + errMD = UpdateMetadata{ + Status: ServiceStatusNACKed, + Version: testVersion, + ErrState: &UpdateErrorMetadata{ + Version: testVersion, + Err: errPlaceHolder, + }, + } ) - const testVersion = "test-version-lds-server" - tests := []struct { name string resources []*anypb.Any @@ -920,448 +755,366 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }{ { name: "non-empty listener filters", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ListenerFilters: []*v3listenerpb.ListenerFilter{ - {Name: "listener-filter-1"}, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ListenerFilters: []*v3listenerpb.ListenerFilter{ + {Name: "listener-filter-1"}, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "unsupported field 'listener_filters'", + wantMD: errMD, + wantErr: "unsupported field 'listener_filters'", }, { name: "use_original_dst is set", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - UseOriginalDst: &wrapperspb.BoolValue{Value: true}, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + UseOriginalDst: &wrapperspb.BoolValue{Value: true}, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "unsupported field 'use_original_dst'", + wantMD: errMD, + wantErr: "unsupported field 'use_original_dst'", }, { - name: "no address field", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, + name: "no address field", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{Name: v3LDSTarget})}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "no address field in LDS response", + wantMD: errMD, + wantErr: "no address field in LDS response", }, { name: "no socket address field", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{}, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: &v3corepb.Address{}, + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "no socket_address field in LDS response", + }, + { + name: "no filter chains and no default filter chain", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{DestinationPort: &wrapperspb.UInt32Value{Value: 666}}, + Filters: emptyValidNetworkFilters, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, + wantMD: errMD, + wantErr: "no supported filter chains and no default filter chain", + }, + { + name: "missing http connection manager network filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + }, }, - }, - wantErr: "no socket_address field in LDS response", + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "missing HttpConnectionManager filter", }, { - name: "no filter chains and no default filter chain", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - FilterChainMatch: &v3listenerpb.FilterChainMatch{DestinationPort: &wrapperspb.UInt32Value{Value: 666}}, + name: "missing filter name in http filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), }, }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "no supported filter chains and no default filter chain", + wantMD: errMD, + wantErr: "missing name field in filter", }, { - name: "overlapping filter chain match criteria", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, + name: "duplicate filter names in http filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), }, }, - FilterChains: []*v3listenerpb.FilterChain{ - { - FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3, 4, 5}}, - }, - { - FilterChainMatch: &v3listenerpb.FilterChainMatch{}, - }, - { - FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{5, 6, 7}}, + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), }, }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "multiple filter chains with overlapping matching rules are defined", + wantMD: errMD, + wantErr: "duplicate filter name", }, { - name: "unexpected transport socket name", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "unsupported-transport-socket-name", - }, - }, + name: "unsupported oneof in typed config of http filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_ConfigDiscovery{}, }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, + wantMD: errMD, + wantErr: "unsupported config_type", + }, + { + name: "overlapping filter chain match criteria", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3, 4, 5}}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{5, 6, 7}}, + Filters: emptyValidNetworkFilters, + }, }, - }, - wantErr: "transport_socket field has unexpected name", + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "multiple filter chains with overlapping matching rules are defined", }, { - name: "unexpected transport socket typedConfig URL", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, + name: "unsupported network filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.LocalReplyConfig{}), }, }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - }, - }, - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "transport_socket field has unexpected typeURL", + wantMD: errMD, + wantErr: "unsupported network filter", }, { - name: "badly marshaled transport socket", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, + name: "badly marshaled network filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: &anypb.Any{ + TypeUrl: version.V3HTTPConnManagerURL, + Value: []byte{1, 2, 3, 4}, }, }, }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: []byte{1, 2, 3, 4}, + }, + }, + }, + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "failed unmarshaling of network filter", + }, + { + name: "client only http filter inside the network filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + { + Name: "clientOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: clientOnlyCustomFilterConfig}, }, }, - }, + }), }, }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, + wantMD: errMD, + wantErr: "not supported server-side", + }, + { + name: "unexpected transport socket name", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "unsupported-transport-socket-name", + }, + }, }, - }, - wantErr: "failed to unmarshal DownstreamTlsContext in LDS response", + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "transport_socket field has unexpected name", }, { - name: "missing CommonTlsContext", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, + name: "unexpected transport socket typedConfig URL", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{}), }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{} - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, + }, + }, + }, + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "transport_socket field has unexpected typeURL", + }, + { + name: "badly marshaled transport socket", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: &anypb.Any{ + TypeUrl: version.V3DownstreamTLSContextURL, + Value: []byte{1, 2, 3, 4}, }, }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, + wantMD: errMD, + wantErr: "failed to unmarshal DownstreamTlsContext in LDS response", + }, + { + name: "missing CommonTlsContext", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{}), + }, + }, + }, }, - }, - wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", }, { name: "unsupported validation context in transport socket", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ - ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ - Name: "foo-sds-secret", - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", }, }, }, - }, + }), }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "validation context contains unexpected type", + wantMD: errMD, + wantErr: "validation context contains unexpected type", }, { name: "empty transport socket", @@ -1401,119 +1154,57 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, { name: "no identity and root certificate providers", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", }, }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, + }), }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", + wantMD: errMD, + wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", }, { name: "no identity certificate provider with require_client_cert", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{}, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{}, + }), }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), + }, + }, }, - }, + })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", + wantMD: errMD, + wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", }, { name: "happy case with no validation context", @@ -1619,6 +1310,10 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + oldFI := env.FaultInjectionSupport + env.FaultInjectionSupport = true + defer func() { env.FaultInjectionSupport = oldFI }() + gotUpdate, md, err := UnmarshalListener(testVersion, test.resources, nil) if (err != nil) != (test.wantErr != "") { t.Fatalf("UnmarshalListener(), got err: %v, wantErr: %v", err, test.wantErr) @@ -1741,7 +1436,7 @@ var customFilterTypedStructConfig = &v1typepb.TypedStruct{ var wrappedCustomFilterTypedStructConfig *anypb.Any func init() { - wrappedCustomFilterTypedStructConfig = marshalAny(customFilterTypedStructConfig) + wrappedCustomFilterTypedStructConfig = testutils.MarshalAny(customFilterTypedStructConfig) } var unknownFilterConfig = &anypb.Any{ @@ -1750,7 +1445,7 @@ var unknownFilterConfig = &anypb.Any{ } func wrappedOptionalFilter(name string) *anypb.Any { - return marshalAny(&v3routepb.FilterConfig{ + return testutils.MarshalAny(&v3routepb.FilterConfig{ IsOptional: true, Config: &anypb.Any{ TypeUrl: name, @@ -1758,11 +1453,3 @@ func wrappedOptionalFilter(name string) *anypb.Any { }, }) } - -func marshalAny(m proto.Message) *anypb.Any { - a, err := ptypes.MarshalAny(m) - if err != nil { - panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", m, err)) - } - return a -} diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index 571fff670daa..c0caf5cceb57 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -270,6 +270,13 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err Port: strconv.Itoa(int(sockAddr.GetPortValue())), }, } + chains := lis.GetFilterChains() + if def := lis.GetDefaultFilterChain(); def != nil { + chains = append(chains, def) + } + if err := validateNetworkFilterChains(chains); err != nil { + return nil, err + } fcMgr, err := NewFilterChainManager(lis) if err != nil { @@ -279,6 +286,61 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err return lu, nil } +func validateNetworkFilterChains(filterChains []*v3listenerpb.FilterChain) error { + for _, filterChain := range filterChains { + seenNames := make(map[string]bool, len(filterChain.GetFilters())) + seenHCM := false + for _, filter := range filterChain.GetFilters() { + name := filter.GetName() + if name == "" { + return fmt.Errorf("filter chain {%+v} is missing name field in filter: {%+v}", filterChain, filter) + } + if seenNames[name] { + return fmt.Errorf("filter chain {%+v} has duplicate filter name %q", filterChain, name) + } + seenNames[name] = true + + // Network filters have a oneof field named `config_type` where we + // only support `TypedConfig` variant. + switch typ := filter.GetConfigType().(type) { + case *v3listenerpb.Filter_TypedConfig: + // The typed_config field has an `anypb.Any` proto which could + // directly contain the serialized bytes of the actual filter + // configuration, or it could be encoded as a `TypedStruct`. + // TODO: Add support for `TypedStruct`. + tc := filter.GetTypedConfig() + + // The only network filter that we currently support is the v3 + // HttpConnectionManager. So, we can directly check the type_url + // and unmarshal the config. + // TODO: Implement a registry of supported network filters (like + // we have for HTTP filters), when we have to support network + // filters other than HttpConnectionManager. + if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { + return fmt.Errorf("filter chain {%+v} has unsupported network filter %q in filter {%+v}", filterChain, tc.GetTypeUrl(), filter) + } + hcm := &v3httppb.HttpConnectionManager{} + if err := ptypes.UnmarshalAny(tc, hcm); err != nil { + return fmt.Errorf("filter chain {%+v} failed unmarshaling of network filter {%+v}: %v", filterChain, filter, err) + } + // We currently don't support HTTP filters on the server-side. + // We will be adding support for it in the future. So, we want + // to make sure that the http_filters configuration is valid. + if _, err := processHTTPFilters(hcm.GetHttpFilters(), true); err != nil { + return err + } + seenHCM = true + default: + return fmt.Errorf("filter chain {%+v} has unsupported config_type %T in filter %s", filterChain, typ, filter.GetName()) + } + } + if !seenHCM { + return fmt.Errorf("filter chain {%+v} missing HttpConnectionManager filter", filterChain) + } + } + return nil +} + // UnmarshalRouteConfig processes resources received in an RDS response, // validates them, and transforms them into a native struct which contains only // fields we are interested in. The provided hostname determines the route diff --git a/xds/internal/httpfilter/httpfilter.go b/xds/internal/httpfilter/httpfilter.go index 6650241fab71..1f5f005e9bd2 100644 --- a/xds/internal/httpfilter/httpfilter.go +++ b/xds/internal/httpfilter/httpfilter.go @@ -65,9 +65,6 @@ type ClientInterceptorBuilder interface { // ServerInterceptorBuilder constructs a Server Interceptor. If this type is // implemented by a Filter, it is capable of working on a server. -// -// Server side filters are not currently supported, but this interface is -// defined for clarity. type ServerInterceptorBuilder interface { // BuildServerInterceptor uses the FilterConfigs produced above to produce // an HTTP filter interceptor for servers. config will always be non-nil, diff --git a/xds/internal/httpfilter/router/router.go b/xds/internal/httpfilter/router/router.go index 26e3acb5a4f4..b0f9d9d9a1e9 100644 --- a/xds/internal/httpfilter/router/router.go +++ b/xds/internal/httpfilter/router/router.go @@ -73,7 +73,10 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil return config{}, nil } -var _ httpfilter.ClientInterceptorBuilder = builder{} +var ( + _ httpfilter.ClientInterceptorBuilder = builder{} + _ httpfilter.ServerInterceptorBuilder = builder{} +) func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ClientInterceptor, error) { if _, ok := cfg.(config); !ok { @@ -88,6 +91,18 @@ func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (ir return nil, nil } +func (builder) BuildServerInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ServerInterceptor, error) { + if _, ok := cfg.(config); !ok { + return nil, fmt.Errorf("router: incorrect config type provided (%T): %v", cfg, cfg) + } + if override != nil { + return nil, fmt.Errorf("router: unexpected override configuration specified: %v", override) + } + // The gRPC router is currently unimplemented on the server side. So we + // return a nil HTTPFilter, which will not be invoked. + return nil, nil +} + // The gRPC router filter does not currently support any configuration. Verify // type only. type config struct { diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 7606a35218ca..9c2e6daebfec 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -33,27 +33,29 @@ import ( "strconv" "testing" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/uuid" - xds2 "google.golang.org/grpc/internal/xds" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" - xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" "google.golang.org/grpc/xds" - "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + xdscreds "google.golang.org/grpc/credentials/xds" + xdsinternal "google.golang.org/grpc/internal/xds" + testpb "google.golang.org/grpc/test/grpc_testing" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" ) const ( @@ -151,8 +153,8 @@ func commonSetup(t *testing.T) (*e2e.ManagementServer, string, net.Listener, fun cpc := e2e.DefaultFileWatcherConfig(path.Join(tmpdir, certFile), path.Join(tmpdir, keyFile), path.Join(tmpdir, rootFile)) // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := xds2.SetupBootstrapFile(xds2.BootstrapOptions{ - Version: xds2.TransportV3, + bootstrapCleanup, err := xdsinternal.SetupBootstrapFile(xdsinternal.BootstrapOptions{ + Version: xdsinternal.TransportV3, NodeID: nodeID, ServerURI: fs.Address, CertificateProviders: cpc, @@ -175,7 +177,7 @@ func commonSetup(t *testing.T) (*e2e.ManagementServer, string, net.Listener, fun testpb.RegisterTestServiceServer(server, &testService{}) // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() + lis, err := xdstestutils.LocalTCPListener() if err != nil { t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } @@ -229,6 +231,14 @@ func listenerResourceWithoutSecurityConfig(t *testing.T, lis net.Listener) *v3li FilterChains: []*v3listenerpb.FilterChain{ { Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + }, }, }, } @@ -271,6 +281,14 @@ func listenerResourceWithSecurityConfig(t *testing.T, lis net.Listener) *v3liste }, }, }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + }, TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ From 950ddd3c37fc38deaf95f3a27b5883af4776a679 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 12 Apr 2021 09:56:37 -0700 Subject: [PATCH 018/998] xds/google_default_creds: handshake based on cluster name in address attributes (#4310) --- credentials/google/google.go | 13 +- credentials/google/google_test.go | 132 ++++++++++++++++++ credentials/google/xds.go | 90 ++++++++++++ internal/credentials/xds/handshake_cluster.go | 42 ++++++ .../balancer/clusterimpl/balancer_test.go | 91 ++++++++++++ .../balancer/clusterimpl/clusterimpl.go | 44 +++++- xds/internal/balancer/edsbalancer/eds.go | 4 + xds/internal/balancer/edsbalancer/eds_impl.go | 33 ++++- .../balancer/edsbalancer/eds_impl_test.go | 71 ++++++++++ xds/internal/balancer/edsbalancer/eds_test.go | 71 ++++++++++ 10 files changed, 584 insertions(+), 7 deletions(-) create mode 100644 credentials/google/google_test.go create mode 100644 credentials/google/xds.go create mode 100644 internal/credentials/xds/handshake_cluster.go diff --git a/credentials/google/google.go b/credentials/google/google.go index 7f3e240e475b..265d193c7c37 100644 --- a/credentials/google/google.go +++ b/credentials/google/google.go @@ -99,6 +99,15 @@ func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { return c.perRPCCreds } +var ( + newTLS = func() credentials.TransportCredentials { + return credentials.NewTLS(nil) + } + newALTS = func() credentials.TransportCredentials { + return alts.NewClientCreds(alts.DefaultClientOptions()) + } +) + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { @@ -110,11 +119,11 @@ func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { // Create transport credentials. switch mode { case internal.CredsBundleModeFallback: - newCreds.transportCreds = credentials.NewTLS(nil) + newCreds.transportCreds = newClusterTransportCreds(newTLS(), newALTS()) case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: // Only the clients can use google default credentials, so we only need // to create new ALTS client creds here. - newCreds.transportCreds = alts.NewClientCreds(alts.DefaultClientOptions()) + newCreds.transportCreds = newALTS() default: return nil, fmt.Errorf("unsupported mode: %v", mode) } diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go new file mode 100644 index 000000000000..c20445811202 --- /dev/null +++ b/credentials/google/google_test.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package google + +import ( + "context" + "net" + "testing" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/resolver" +) + +type testCreds struct { + credentials.TransportCredentials + typ string +} + +func (c *testCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return nil, &testAuthInfo{typ: c.typ}, nil +} + +func (c *testCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return nil, &testAuthInfo{typ: c.typ}, nil +} + +type testAuthInfo struct { + typ string +} + +func (t *testAuthInfo) AuthType() string { + return t.typ +} + +var ( + testTLS = &testCreds{typ: "tls"} + testALTS = &testCreds{typ: "alts"} + + contextWithHandshakeInfo = internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) +) + +func overrideNewCredsFuncs() func() { + oldNewTLS := newTLS + newTLS = func() credentials.TransportCredentials { + return testTLS + } + oldNewALTS := newALTS + newALTS = func() credentials.TransportCredentials { + return testALTS + } + return func() { + newTLS = oldNewTLS + newALTS = oldNewALTS + } +} + +// TestClientHandshakeBasedOnClusterName that by default (without switching +// modes), ClientHandshake does either tls or alts base on the cluster name in +// attributes. +func TestClientHandshakeBasedOnClusterName(t *testing.T) { + defer overrideNewCredsFuncs()() + for bundleTyp, tc := range map[string]credentials.Bundle{ + "defaultCreds": NewDefaultCredentials(), + "computeCreds": NewComputeEngineCredentials(), + } { + tests := []struct { + name string + ctx context.Context + wantTyp string + }{ + { + name: "no cluster name", + ctx: context.Background(), + wantTyp: "tls", + }, + { + name: "with non-CFE cluster name", + ctx: contextWithHandshakeInfo(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: xdsinternal.SetHandshakeClusterName(resolver.Address{}, "lalala").Attributes, + }), + // non-CFE backends should use alts. + wantTyp: "alts", + }, + { + name: "with CFE cluster name", + ctx: contextWithHandshakeInfo(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: xdsinternal.SetHandshakeClusterName(resolver.Address{}, cfeClusterName).Attributes, + }), + // CFE should use tls. + wantTyp: "tls", + }, + } + for _, tt := range tests { + t.Run(bundleTyp+" "+tt.name, func(t *testing.T) { + _, info, err := tc.TransportCredentials().ClientHandshake(tt.ctx, "", nil) + if err != nil { + t.Fatalf("ClientHandshake failed: %v", err) + } + if gotType := info.AuthType(); gotType != tt.wantTyp { + t.Fatalf("unexpected authtype: %v, want: %v", gotType, tt.wantTyp) + } + + _, infoServer, err := tc.TransportCredentials().ServerHandshake(nil) + if err != nil { + t.Fatalf("ClientHandshake failed: %v", err) + } + // ServerHandshake should always do TLS. + if gotType := infoServer.AuthType(); gotType != "tls" { + t.Fatalf("unexpected server authtype: %v, want: %v", gotType, "tls") + } + }) + } + } +} diff --git a/credentials/google/xds.go b/credentials/google/xds.go new file mode 100644 index 000000000000..22997ce2532c --- /dev/null +++ b/credentials/google/xds.go @@ -0,0 +1,90 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package google + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" +) + +const cfeClusterName = "google-cfe" + +// clusterTransportCreds is a combo of TLS + ALTS. +// +// On the client, ClientHandshake picks TLS or ALTS based on address attributes. +// - if attributes has cluster name +// - if cluster name is "google_cfe", use TLS +// - otherwise, use ALTS +// - else, do TLS +// +// On the server, ServerHandshake always does TLS. +type clusterTransportCreds struct { + tls credentials.TransportCredentials + alts credentials.TransportCredentials +} + +func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clusterTransportCreds { + return &clusterTransportCreds{ + tls: tls, + alts: alts, + } +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + chi := credentials.ClientHandshakeInfoFromContext(ctx) + if chi.Attributes == nil { + return c.tls.ClientHandshake(ctx, authority, rawConn) + } + cn, ok := xdsinternal.GetHandshakeClusterName(chi.Attributes) + if !ok || cn == cfeClusterName { + return c.tls.ClientHandshake(ctx, authority, rawConn) + } + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) +} + +func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return c.tls.ServerHandshake(conn) +} + +func (c *clusterTransportCreds) Info() credentials.ProtocolInfo { + // TODO: this always returns tls.Info now, because we don't have a cluster + // name to check when this method is called. This method doesn't affect + // anything important now. We may want to revisit this if it becomes more + // important later. + return c.tls.Info() +} + +func (c *clusterTransportCreds) Clone() credentials.TransportCredentials { + return &clusterTransportCreds{ + tls: c.tls.Clone(), + alts: c.alts.Clone(), + } +} + +func (c *clusterTransportCreds) OverrideServerName(s string) error { + if err := c.tls.OverrideServerName(s); err != nil { + return err + } + return c.alts.OverrideServerName(s) +} diff --git a/internal/credentials/xds/handshake_cluster.go b/internal/credentials/xds/handshake_cluster.go new file mode 100644 index 000000000000..cb059bd6669a --- /dev/null +++ b/internal/credentials/xds/handshake_cluster.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetHandshakeClusterName returns cluster name stored in attr. +func GetHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 6d9b7a5082f6..7fb31ab7affa 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/client/load" @@ -369,3 +370,93 @@ func TestPickerUpdateAfterClose(t *testing.T) { case <-time.After(time.Millisecond * 10): } } + +// TestClusterNameInAddressAttributes covers the case that cluster name is +// attached to the subconn address attributes. +func TestClusterNameInAddressAttributes(t *testing.T) { + xdsC := fakeclient.NewClient() + oldNewXDSClient := newXDSClient + newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + defer func() { newXDSClient = oldNewXDSClient }() + + builder := balancer.Get(clusterImplName) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + defer b.Close() + + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: testBackendAddrs, + }, + BalancerConfig: &lbConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + sc1 := <-cc.NewSubConnCh + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // This should get the connecting picker. + p0 := <-cc.NewPickerCh + for i := 0; i < 10; i++ { + _, err := p0.Pick(balancer.PickInfo{}) + if err != balancer.ErrNoSubConnAvailable { + t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) + } + } + + addrs1 := <-cc.NewSubConnAddrsCh + if got, want := addrs1[0].Addr, testBackendAddrs[0].Addr; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + cn, ok := xdsinternal.GetHandshakeClusterName(addrs1[0].Attributes) + if !ok || cn != testClusterName { + t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn, ok, testClusterName) + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Test pick with one backend. + p1 := <-cc.NewPickerCh + const rpcCount = 20 + for i := 0; i < rpcCount; i++ { + gotSCSt, err := p1.Pick(balancer.PickInfo{}) + if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + if gotSCSt.Done != nil { + gotSCSt.Done(balancer.DoneInfo{}) + } + } + + const testClusterName2 = "test-cluster-2" + var addr2 = resolver.Address{Addr: "2.2.2.2"} + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{addr2}, + }, + BalancerConfig: &lbConfig{ + Cluster: testClusterName2, + EDSServiceName: testServiceName, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + addrs2 := <-cc.NewSubConnAddrsCh + if got, want := addrs2[0].Addr, addr2.Addr; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + // New addresses should have the new cluster name. + cn2, ok := xdsinternal.GetHandshakeClusterName(addrs2[0].Attributes) + if !ok || cn2 != testClusterName2 { + t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn2, ok, testClusterName2) + } +} diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 4435f9e65a03..0cc8d0d82f5c 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -30,8 +30,10 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/buffer" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/loadstore" xdsclient "google.golang.org/grpc/xds/internal/client" @@ -110,11 +112,13 @@ type clusterImplBalancer struct { config *lbConfig childLB balancer.Balancer cancelLoadReport func() - clusterName string edsServiceName string lrsServerName string loadWrapper *loadstore.Wrapper + clusterNameMu sync.Mutex + clusterName string + // childState/drops/requestCounter can only be accessed in run(). And run() // is the only goroutine that sends picker to the parent ClientConn. All // requests to update picker need to be sent to pickerUpdateCh. @@ -132,9 +136,11 @@ func (cib *clusterImplBalancer) updateLoadStore(newConfig *lbConfig) error { // ClusterName is different, restart. ClusterName is from ClusterName and // EdsServiceName. - if cib.clusterName != newConfig.Cluster { + clusterName := cib.getClusterName() + if clusterName != newConfig.Cluster { updateLoadClusterAndService = true - cib.clusterName = newConfig.Cluster + cib.setClusterName(newConfig.Cluster) + clusterName = newConfig.Cluster } if cib.edsServiceName != newConfig.EDSServiceName { updateLoadClusterAndService = true @@ -149,7 +155,7 @@ func (cib *clusterImplBalancer) updateLoadStore(newConfig *lbConfig) error { // On the other hand, this will almost never happen. Each LRS policy // shouldn't get updated config. The parent should do a graceful switch // when the clusterName or serviceName is changed. - cib.loadWrapper.UpdateClusterAndService(cib.clusterName, cib.edsServiceName) + cib.loadWrapper.UpdateClusterAndService(clusterName, cib.edsServiceName) } // Check if it's necessary to restart load report. @@ -305,6 +311,36 @@ func (cib *clusterImplBalancer) UpdateState(state balancer.State) { cib.pickerUpdateCh.Put(state) } +func (cib *clusterImplBalancer) setClusterName(n string) { + cib.clusterNameMu.Lock() + defer cib.clusterNameMu.Unlock() + cib.clusterName = n +} + +func (cib *clusterImplBalancer) getClusterName() string { + cib.clusterNameMu.Lock() + defer cib.clusterNameMu.Unlock() + return cib.clusterName +} + +func (cib *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + clusterName := cib.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xdsinternal.SetHandshakeClusterName(addr, clusterName) + } + return cib.ClientConn.NewSubConn(newAddrs, opts) +} + +func (cib *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + clusterName := cib.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xdsinternal.SetHandshakeClusterName(addr, clusterName) + } + cib.ClientConn.UpdateAddresses(sc, newAddrs) +} + type dropConfigs struct { drops []*dropper requestCounter *xdsclient.ServiceRequestsCounter diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index 423df7aed95e..de724701df94 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -116,6 +116,9 @@ type edsBalancerImplInterface interface { // updateServiceRequestsConfig updates the service requests counter to the // one for the given service name. updateServiceRequestsConfig(serviceName string, max *uint32) + // updateClusterName updates the cluster name that will be attached to the + // address attributes. + updateClusterName(name string) // close closes the eds balancer. close() } @@ -250,6 +253,7 @@ func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { // This is OK for now, because we don't actually expect edsServiceName // to change. Fix this (a bigger change) will happen later. x.lsw.updateServiceName(x.edsServiceName) + x.edsImpl.updateClusterName(x.edsServiceName) } // Restart load reporting when the loadReportServer name has changed. diff --git a/xds/internal/balancer/edsbalancer/eds_impl.go b/xds/internal/balancer/edsbalancer/eds_impl.go index 5318a5342e83..94f643d3355e 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl.go +++ b/xds/internal/balancer/edsbalancer/eds_impl.go @@ -23,6 +23,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -104,6 +105,9 @@ type edsBalancerImpl struct { innerState balancer.State // The state of the picker without drop support. serviceRequestsCounter *client.ServiceRequestsCounter serviceRequestCountMax uint32 + + clusterNameMu sync.Mutex + clusterName string } // newEDSBalancerImpl create a new edsBalancerImpl. @@ -444,6 +448,18 @@ func (edsImpl *edsBalancerImpl) updateServiceRequestsConfig(serviceName string, edsImpl.pickerMu.Unlock() } +func (edsImpl *edsBalancerImpl) updateClusterName(name string) { + edsImpl.clusterNameMu.Lock() + defer edsImpl.clusterNameMu.Unlock() + edsImpl.clusterName = name +} + +func (edsImpl *edsBalancerImpl) getClusterName() string { + edsImpl.clusterNameMu.Lock() + defer edsImpl.clusterNameMu.Unlock() + return edsImpl.clusterName +} + // updateState first handles priority, and then wraps picker in a drop picker // before forwarding the update. func (edsImpl *edsBalancerImpl) updateState(priority priorityType, s balancer.State) { @@ -479,8 +495,23 @@ type edsBalancerWrapperCC struct { } func (ebwcc *edsBalancerWrapperCC) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - return ebwcc.parent.newSubConn(ebwcc.priority, addrs, opts) + clusterName := ebwcc.parent.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xdsinternal.SetHandshakeClusterName(addr, clusterName) + } + return ebwcc.parent.newSubConn(ebwcc.priority, newAddrs, opts) } + +func (ebwcc *edsBalancerWrapperCC) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + clusterName := ebwcc.parent.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + for i, addr := range addrs { + newAddrs[i] = xdsinternal.SetHandshakeClusterName(addr, clusterName) + } + ebwcc.ClientConn.UpdateAddresses(sc, newAddrs) +} + func (ebwcc *edsBalancerWrapperCC) UpdateState(state balancer.State) { ebwcc.parent.enqueueChildBalancerStateUpdate(ebwcc.priority, state) } diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/edsbalancer/eds_impl_test.go index ebaea13cc88a..79332dfe1fd3 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_test.go @@ -26,6 +26,7 @@ import ( corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" @@ -933,3 +934,73 @@ func (s) TestEDS_LoadReportDisabled(t *testing.T) { p1.Pick(balancer.PickInfo{}) } } + +// TestEDS_ClusterNameInAddressAttributes covers the case that cluster name is +// attached to the subconn address attributes. +func (s) TestEDS_ClusterNameInAddressAttributes(t *testing.T) { + cc := testutils.NewTestClientConn(t) + edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) + edsb.enqueueChildBalancerStateUpdate = edsb.updateState + + const clusterName1 = "cluster-name-1" + edsb.updateClusterName(clusterName1) + + // One locality with one backend. + clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) + edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + + addrs1 := <-cc.NewSubConnAddrsCh + if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + cn, ok := xdsinternal.GetHandshakeClusterName(addrs1[0].Attributes) + if !ok || cn != clusterName1 { + t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn, ok, clusterName1) + } + + sc1 := <-cc.NewSubConnCh + edsb.handleSubConnStateChange(sc1, connectivity.Connecting) + edsb.handleSubConnStateChange(sc1, connectivity.Ready) + + // Pick with only the first backend. + p1 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p1.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) + } + } + + // Change cluster name. + const clusterName2 = "cluster-name-2" + edsb.updateClusterName(clusterName2) + + // Change backend. + clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[1:2], nil) + edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) + + addrs2 := <-cc.NewSubConnAddrsCh + if got, want := addrs2[0].Addr, testEndpointAddrs[1]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + // New addresses should have the new cluster name. + cn2, ok := xdsinternal.GetHandshakeClusterName(addrs2[0].Attributes) + if !ok || cn2 != clusterName2 { + t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn2, ok, clusterName1) + } + + sc2 := <-cc.NewSubConnCh + edsb.handleSubConnStateChange(sc2, connectivity.Connecting) + edsb.handleSubConnStateChange(sc2, connectivity.Ready) + + // Test roundrobin with two subconns. + p2 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p2.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) + } + } +} diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 5fe1f2ef6b90..65b74a1b8af5 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -117,6 +117,7 @@ type fakeEDSBalancer struct { edsUpdate *testutils.Channel serviceName *testutils.Channel serviceRequestMax *testutils.Channel + clusterName *testutils.Channel } func (f *fakeEDSBalancer) handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { @@ -138,6 +139,10 @@ func (f *fakeEDSBalancer) updateServiceRequestsConfig(serviceName string, max *u f.serviceRequestMax.Send(max) } +func (f *fakeEDSBalancer) updateClusterName(name string) { + f.clusterName.Send(name) +} + func (f *fakeEDSBalancer) close() {} func (f *fakeEDSBalancer) waitForChildPolicy(ctx context.Context, wantPolicy *loadBalancingConfig) error { @@ -207,6 +212,18 @@ func (f *fakeEDSBalancer) waitForCountMaxUpdate(ctx context.Context, want *uint3 return fmt.Errorf("got countMax %+v, want %+v", got, want) } +func (f *fakeEDSBalancer) waitForClusterNameUpdate(ctx context.Context, wantClusterName string) error { + val, err := f.clusterName.Receive(ctx) + if err != nil { + return err + } + gotServiceName := val.(string) + if gotServiceName != wantClusterName { + return fmt.Errorf("got clusterName %v, want %v", gotServiceName, wantClusterName) + } + return nil +} + func newFakeEDSBalancer(cc balancer.ClientConn) edsBalancerImplInterface { return &fakeEDSBalancer{ cc: cc, @@ -215,6 +232,7 @@ func newFakeEDSBalancer(cc balancer.ClientConn) edsBalancerImplInterface { edsUpdate: testutils.NewChannelWithSize(10), serviceName: testutils.NewChannelWithSize(10), serviceRequestMax: testutils.NewChannelWithSize(10), + clusterName: testutils.NewChannelWithSize(10), } } @@ -657,6 +675,59 @@ func (s) TestCounterUpdate(t *testing.T) { } } +// TestClusterNameUpdateInAddressAttributes verifies that cluster name update in +// edsImpl is triggered with the update from a new service config. +func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { + edsLBCh := testutils.NewChannel() + xdsC, cleanup := setup(edsLBCh) + defer cleanup() + + builder := balancer.Get(edsName) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) + if edsB == nil { + t.Fatalf("builder.Build(%s) failed and returned nil", edsName) + } + defer edsB.Close() + + // Update should trigger counter update with provided service name. + if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ + EDSServiceName: "foobar-1", + }, + }); err != nil { + t.Fatal(err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotCluster, err := xdsC.WaitForWatchEDS(ctx) + if err != nil || gotCluster != "foobar-1" { + t.Fatalf("unexpected EDS watch: %v, %v", gotCluster, err) + } + edsI := edsB.(*edsBalancer).edsImpl.(*fakeEDSBalancer) + if err := edsI.waitForClusterNameUpdate(ctx, "foobar-1"); err != nil { + t.Fatal(err) + } + + // Update should trigger counter update with provided service name. + if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ + EDSServiceName: "foobar-2", + }, + }); err != nil { + t.Fatal(err) + } + if err := xdsC.WaitForCancelEDSWatch(ctx); err != nil { + t.Fatalf("failed to wait for EDS cancel: %v", err) + } + gotCluster2, err := xdsC.WaitForWatchEDS(ctx) + if err != nil || gotCluster2 != "foobar-2" { + t.Fatalf("unexpected EDS watch: %v, %v", gotCluster2, err) + } + if err := edsI.waitForClusterNameUpdate(ctx, "foobar-2"); err != nil { + t.Fatal(err) + } +} + func (s) TestBalancerConfigParsing(t *testing.T) { const testEDSName = "eds.service" var testLRSName = "lrs.server" From 7a6ab591158c9c43b13b229a5d0a6471abfbeca6 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 13 Apr 2021 11:47:25 -0700 Subject: [PATCH 019/998] multiple: go mod tidy to make vet happy (#4337) --- examples/go.sum | 5 ----- go.sum | 3 --- security/authorization/go.sum | 11 ----------- 3 files changed, 19 deletions(-) diff --git a/examples/go.sum b/examples/go.sum index f55a3e5c8968..1984770a80b3 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -17,14 +17,12 @@ github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -62,7 +60,6 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IV golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= @@ -71,12 +68,10 @@ google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLY google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= diff --git a/go.sum b/go.sum index bb25cd49156d..24d2976abbaf 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,8 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -43,7 +41,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/security/authorization/go.sum b/security/authorization/go.sum index a953711e01e6..3c7ea6cf47fe 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -14,14 +14,11 @@ github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTX github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -36,7 +33,6 @@ github.com/google/cel-spec v0.4.0/go.mod h1:2pBM5cU4UKjbPDXBgwWkiwBsVgnxknuEJ7C5 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -49,7 +45,6 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -58,11 +53,9 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -76,18 +69,14 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= From c229922995e2c1af095282ef4d17abcd7300ecaf Mon Sep 17 00:00:00 2001 From: apolcyn Date: Tue, 13 Apr 2021 13:06:05 -0700 Subject: [PATCH 020/998] client: propagate connection error causes to RPC statuses (#4311) --- clientconn.go | 4 +-- internal/transport/http2_client.go | 47 +++++++++++++++------------- internal/transport/keepalive_test.go | 33 +++++++++---------- internal/transport/transport.go | 2 +- internal/transport/transport_test.go | 42 ++++++++++++------------- test/end2end_test.go | 47 ++++++++++++++++++++++++++++ 6 files changed, 114 insertions(+), 61 deletions(-) diff --git a/clientconn.go b/clientconn.go index 77a08fd33bf8..0db796ccbd66 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1197,7 +1197,7 @@ func (ac *addrConn) resetTransport() { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() - newTr.Close() + newTr.Close(fmt.Errorf("reached connectivity state: SHUTDOWN")) return } ac.curAddr = addr @@ -1329,7 +1329,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne select { case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. - newTr.Close() + newTr.Close(fmt.Errorf("failed to receive server preface within timeout")) channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index d5bbe720db54..ce0166012d7d 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -347,12 +347,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + t.Close(err) + return nil, err } if n != len(clientPreface) { - t.Close() - return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + t.Close(err) + return nil, err } var ss []http2.Setting @@ -370,14 +372,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } err = t.framer.fr.WriteSettings(ss...) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + t.Close(err) + return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + t.Close(err) + return nil, err } } @@ -845,12 +849,12 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // This method blocks until the addrConn that initiated this transport is // re-connected. This happens because t.onClose() begins reconnect logic at the // addrConn level and blocks until the addrConn is successfully connected. -func (t *http2Client) Close() error { +func (t *http2Client) Close(err error) { t.mu.Lock() // Make sure we only Close once. if t.state == closing { t.mu.Unlock() - return nil + return } // Call t.onClose before setting the state to closing to prevent the client // from attempting to create new streams ASAP. @@ -866,13 +870,13 @@ func (t *http2Client) Close() error { t.mu.Unlock() t.controlBuf.finish() t.cancel() - err := t.conn.Close() + t.conn.Close() if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } // Notify all active streams. for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, status.New(codes.Unavailable, err.Error()), nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -880,7 +884,6 @@ func (t *http2Client) Close() error { } t.statsHandler.HandleConn(t.ctx, connEnd) } - return err } // GracefulClose sets the state to draining, which prevents new streams from @@ -899,7 +902,7 @@ func (t *http2Client) GracefulClose() { active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(ErrConnClosing) return } t.controlBuf.put(&incomingGoAway{}) @@ -1147,7 +1150,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 != 1 { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1165,7 +1168,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) return } default: @@ -1195,7 +1198,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) } } @@ -1313,7 +1316,8 @@ func (t *http2Client) reader() { // Check the validity of server preface. frame, err := t.framer.fr.ReadFrame() if err != nil { - t.Close() // this kicks off resetTransport, so must be last before return + err = connectionErrorf(true, err, "error reading server preface: %v", err) + t.Close(err) // this kicks off resetTransport, so must be last before return return } t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) @@ -1322,7 +1326,8 @@ func (t *http2Client) reader() { } sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() // this kicks off resetTransport, so must be last before return + // this kicks off resetTransport, so must be last before return + t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) return } t.onPrefaceReceipt() @@ -1358,7 +1363,7 @@ func (t *http2Client) reader() { continue } else { // Transport error. - t.Close() + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) return } } @@ -1417,7 +1422,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) return } t.mu.Lock() diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index c8f177fecf1b..1f6603cd759e 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -24,6 +24,7 @@ package transport import ( "context" + "fmt" "io" "net" "testing" @@ -47,7 +48,7 @@ func (s) TestMaxConnectionIdle(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -86,7 +87,7 @@ func (s) TestMaxConnectionIdleBusyClient(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -122,7 +123,7 @@ func (s) TestMaxConnectionAge(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -169,7 +170,7 @@ func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -228,7 +229,7 @@ func (s) TestKeepaliveServerWithResponsiveClient(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -257,7 +258,7 @@ func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { PermitWithoutStream: true, }}, connCh) defer cancel() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) conn, ok := <-connCh if !ok { @@ -288,7 +289,7 @@ func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { Timeout: 1 * time.Second, }}, connCh) defer cancel() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) conn, ok := <-connCh if !ok { @@ -317,7 +318,7 @@ func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { Timeout: 1 * time.Second, }}, connCh) defer cancel() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) conn, ok := <-connCh if !ok { @@ -352,7 +353,7 @@ func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { PermitWithoutStream: true, }}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -391,7 +392,7 @@ func (s) TestKeepaliveClientFrequency(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -436,7 +437,7 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -480,7 +481,7 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -530,7 +531,7 @@ func (s) TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -564,7 +565,7 @@ func (s) TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -604,7 +605,7 @@ func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -658,7 +659,7 @@ func (s) TestTCPUserTimeout(t *testing.T) { }, ) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 5cf7c5f80fe1..068f4d0e5023 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -622,7 +622,7 @@ type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. - Close() error + Close(err error) // GracefulClose starts to tear down the transport: the transport will stop // accepting new RPCs and NewStream will return error. Once all streams are diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 1d8d3ed355df..c3830a8fd0b1 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -481,7 +481,7 @@ func (s) TestInflightStreamClosing(t *testing.T) { server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -550,7 +550,7 @@ func (s) TestClientSendAndReceive(t *testing.T) { if recvErr != io.EOF { t.Fatalf("Error: %v; want ", recvErr) } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -560,7 +560,7 @@ func (s) TestClientErrorNotify(t *testing.T) { go server.stop() // ct.reader should detect the error and activate ct.Error(). <-ct.Error() - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) } func performOneRPC(ct ClientTransport) { @@ -597,7 +597,7 @@ func (s) TestClientMix(t *testing.T) { }(s) go func(ct ClientTransport) { <-ct.Error() - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) }(ct) for i := 0; i < 1000; i++ { time.Sleep(10 * time.Millisecond) @@ -636,7 +636,7 @@ func (s) TestLargeMessage(t *testing.T) { }() } wg.Wait() - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -653,7 +653,7 @@ func (s) TestLargeMessageWithDelayRead(t *testing.T) { server, ct, cancel := setUpWithOptions(t, 0, sc, delayRead, co) defer cancel() defer server.stop() - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) server.mu.Lock() ready := server.ready server.mu.Unlock() @@ -831,7 +831,7 @@ func (s) TestLargeMessageSuspension(t *testing.T) { if _, err := s.Read(make([]byte, 8)); err.Error() != expectedErr.Error() { t.Fatalf("Read got %v of type %T, want %v", err, err, expectedErr) } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -841,7 +841,7 @@ func (s) TestMaxStreams(t *testing.T) { } server, ct, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer cancel() - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) defer server.stop() callHdr := &CallHdr{ Host: "localhost", @@ -901,7 +901,7 @@ func (s) TestMaxStreams(t *testing.T) { // Close the first stream created so that the new stream can finally be created. ct.CloseStream(s, nil) <-done - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) <-ct.writerDone if ct.maxConcurrentStreams != 1 { t.Fatalf("ct.maxConcurrentStreams: %d, want 1", ct.maxConcurrentStreams) @@ -960,7 +960,7 @@ func (s) TestServerContextCanceledOnClosedConnection(t *testing.T) { sc.mu.Unlock() break } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) select { case <-ss.Context().Done(): if ss.Context().Err() != context.Canceled { @@ -980,7 +980,7 @@ func (s) TestClientConnDecoupledFromApplicationRead(t *testing.T) { server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, notifyCall, connectOptions) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) waitWhileTrue(t, func() (bool, error) { server.mu.Lock() @@ -1069,7 +1069,7 @@ func (s) TestServerConnDecoupledFromApplicationRead(t *testing.T) { server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() @@ -1302,7 +1302,7 @@ func (s) TestClientWithMisbehavedServer(t *testing.T) { if err != nil { t.Fatalf("Error while creating client transport: %v", err) } - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) str, err := ct.NewStream(connectCtx, &CallHdr{}) if err != nil { t.Fatalf("Error while creating stream: %v", err) @@ -1345,7 +1345,7 @@ func (s) TestEncodingRequiredStatus(t *testing.T) { if !testutils.StatusErrEqual(s.Status().Err(), encodingTestStatus.Err()) { t.Fatalf("stream with status %v, want %v", s.Status(), encodingTestStatus) } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -1367,7 +1367,7 @@ func (s) TestInvalidHeaderField(t *testing.T) { if se, ok := status.FromError(err); !ok || se.Code() != codes.Internal || !strings.Contains(err.Error(), expectedInvalidHeaderField) { t.Fatalf("Read got error %v, want error with code %s and contains %q", err, codes.Internal, expectedInvalidHeaderField) } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -1375,7 +1375,7 @@ func (s) TestHeaderChanClosedAfterReceivingAnInvalidHeader(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, invalidHeaderField) defer cancel() defer server.stop() - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() s, err := ct.NewStream(ctx, &CallHdr{Host: "localhost", Method: "foo"}) @@ -1481,7 +1481,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) server, client, cancel := setUpWithOptions(t, 0, sc, pingpong, co) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() @@ -1563,7 +1563,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) } // Close down both server and client so that their internals can be read without data // races. - client.Close() + client.Close(fmt.Errorf("closed manually by test")) st.Close() <-st.readerDone <-st.writerDone @@ -1762,7 +1762,7 @@ func runPingPongTest(t *testing.T, msgSize int) { server, client, cancel := setUp(t, 0, 0, pingpong) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() @@ -1850,7 +1850,7 @@ func (s) TestHeaderTblSize(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) defer server.stop() ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -1969,7 +1969,7 @@ func (s) TestClientHandshakeInfo(t *testing.T) { if err != nil { t.Fatalf("NewClientTransport(): %v", err) } - defer tr.Close() + defer tr.Close(fmt.Errorf("closed manually by test")) wantAttr := attributes.New(testAttrKey, testAttrVal) if gotAttr := creds.attr; !cmp.Equal(gotAttr, wantAttr, cmp.AllowUnexported(attributes.Attributes{})) { diff --git a/test/end2end_test.go b/test/end2end_test.go index 902e94241048..746df1745116 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -1333,6 +1333,53 @@ func testConcurrentServerStopAndGoAway(t *testing.T, e env) { awaitNewConnLogOutput() } +func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) { + rpcStartedOnServer := make(chan struct{}) + rpcDoneOnClient := make(chan struct{}) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + close(rpcStartedOnServer) + <-rpcDoneOnClient + return status.Error(codes.Internal, "arbitrary status") + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // The precise behavior of this test is subject to raceyness around the timing of when TCP packets + // are sent from client to server, and when we tell the server to stop, so we need to account for both + // of these possible error messages: + // 1) If the call to ss.S.Stop() causes the server's sockets to close while there's still in-fight + // data from the client on the TCP connection, then the kernel can send an RST back to the client (also + // see https://stackoverflow.com/questions/33053507/econnreset-in-send-linux-c). Note that while this + // condition is expected to be rare due to the rpcStartedOnServer synchronization, in theory it should + // be possible, e.g. if the client sends a BDP ping at the right time. + // 2) If, for example, the call to ss.S.Stop() happens after the RPC headers have been received at the + // server, then the TCP connection can shutdown gracefully when the server's socket closes. + const possibleConnResetMsg = "connection reset by peer" + const possibleEOFMsg = "error reading from server: EOF" + // Start an RPC. Then, while the RPC is still being accepted or handled at the server, abruptly + // stop the server, killing the connection. The RPC error message should include details about the specific + // connection error that was encountered. + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) + } + // Block until the RPC has been started on the server. This ensures that the ClientConn will find a healthy + // connection for the RPC to go out on initially, and that the TCP connection will shut down strictly after + // the RPC has been started on it. + <-rpcStartedOnServer + ss.S.Stop() + if _, err := stream.Recv(); err == nil || (!strings.Contains(err.Error(), possibleConnResetMsg) && !strings.Contains(err.Error(), possibleEOFMsg)) { + t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q OR %q", stream, err, possibleConnResetMsg, possibleEOFMsg) + } + close(rpcDoneOnClient) +} + func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { From 6fafb9193bde04c61d75a2da9de53c4d029748b4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 13 Apr 2021 15:31:34 -0700 Subject: [PATCH 021/998] xds: support unspecified and wildcard filter chain prefixes (#4333) --- xds/internal/client/filter_chain.go | 85 +++-- xds/internal/client/filter_chain_test.go | 301 +++++++++++++++--- xds/internal/client/lds_test.go | 18 +- .../test/xds_server_integration_test.go | 97 ++++-- 4 files changed, 374 insertions(+), 127 deletions(-) diff --git a/xds/internal/client/filter_chain.go b/xds/internal/client/filter_chain.go index 73ffa18347e8..66d26d03b634 100644 --- a/xds/internal/client/filter_chain.go +++ b/xds/internal/client/filter_chain.go @@ -30,13 +30,22 @@ import ( "google.golang.org/grpc/xds/internal/version" ) -// Represents a wildcard IP prefix. Go stdlib `Contains()` method works for both -// v4 and v6 addresses when used on this wildcard address. -const emptyAddrMapKey = "0.0.0.0/0" - -var ( - // Parsed wildcard IP prefix. - _, zeroIP, _ = net.ParseCIDR("0.0.0.0/0") +const ( + // Used as the map key for unspecified prefixes. The actual value of this + // key is immaterial. + unspecifiedPrefixMapKey = "unspecified" + + // An unspecified destination or source prefix should be considered a less + // specific match than a wildcard prefix, `0.0.0.0/0` or `::/0`. Also, an + // unspecified prefix should match most v4 and v6 addresses compared to the + // wildcard prefixes which match only a specific network (v4 or v6). + // + // We use these constants when looking up the most specific prefix match. A + // wildcard prefix will match 0 bits, and to make sure that a wildcard + // prefix is considered a more specific match than an unspecified prefix, we + // use a value of -1 for the latter. + noPrefixMatch = -2 + unspecifiedPrefixMatch = -1 ) // FilterChain captures information from within a FilterChain message in a @@ -108,7 +117,8 @@ type FilterChainManager struct { // destPrefixEntry is the value type of the map indexed on destination prefixes. type destPrefixEntry struct { - net *net.IPNet // The actual destination prefix. + // The actual destination prefix. Set to nil for unspecified prefixes. + net *net.IPNet // We need to keep track of the transport protocols seen as part of the // config validation (and internal structure building) phase. The only two // values that we support are empty string and "raw_buffer", with the latter @@ -137,7 +147,8 @@ type sourcePrefixes struct { // sourcePrefixEntry contains match criteria per source prefix. type sourcePrefixEntry struct { - net *net.IPNet // The actual source prefix. + // The actual destination prefix. Set to nil for unspecified prefixes. + net *net.IPNet // Mapping from source ports specified in the match criteria to the actual // filter chain. Unspecified source port matches en up as a wildcard entry // here with a key of 0. @@ -227,11 +238,12 @@ func (fci *FilterChainManager) addFilterChainsForDestPrefixes(fc *v3listenerpb.F } if len(dstPrefixes) == 0 { - // Use the wildcard IP when destination prefix is unspecified. - if fci.dstPrefixMap[emptyAddrMapKey] == nil { - fci.dstPrefixMap[emptyAddrMapKey] = &destPrefixEntry{net: zeroIP} + // Use the unspecified entry when destination prefix is unspecified, and + // set the `net` field to nil. + if fci.dstPrefixMap[unspecifiedPrefixMapKey] == nil { + fci.dstPrefixMap[unspecifiedPrefixMapKey] = &destPrefixEntry{} } - return fci.addFilterChainsForServerNames(fci.dstPrefixMap[emptyAddrMapKey], fc) + return fci.addFilterChainsForServerNames(fci.dstPrefixMap[unspecifiedPrefixMapKey], fc) } for _, prefix := range dstPrefixes { p := prefix.String() @@ -326,14 +338,14 @@ func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map } if len(srcPrefixes) == 0 { - // Use the wildcard IP when source prefix is unspecified. - if srcPrefixMap[emptyAddrMapKey] == nil { - srcPrefixMap[emptyAddrMapKey] = &sourcePrefixEntry{ - net: zeroIP, + // Use the unspecified entry when destination prefix is unspecified, and + // set the `net` field to nil. + if srcPrefixMap[unspecifiedPrefixMapKey] == nil { + srcPrefixMap[unspecifiedPrefixMapKey] = &sourcePrefixEntry{ srcPortMap: make(map[int]*FilterChain), } } - return fci.addFilterChainsForSourcePorts(srcPrefixMap[emptyAddrMapKey], fc) + return fci.addFilterChainsForSourcePorts(srcPrefixMap[unspecifiedPrefixMapKey], fc) } for _, prefix := range srcPrefixes { p := prefix.String() @@ -486,15 +498,22 @@ func filterByDestinationPrefixes(dstPrefixes []*destPrefixEntry, isUnspecified b // bound to the wildcard address. return dstPrefixes } - var ( - matchingDstPrefixes []*destPrefixEntry - maxSubnetMatch int - ) + + var matchingDstPrefixes []*destPrefixEntry + maxSubnetMatch := noPrefixMatch for _, prefix := range dstPrefixes { - if !prefix.net.Contains(dstAddr) { + if prefix.net != nil && !prefix.net.Contains(dstAddr) { + // Skip prefixes which don't match. continue } - matchSize, _ := prefix.net.Mask.Size() + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } if matchSize < maxSubnetMatch { continue } @@ -551,16 +570,22 @@ func filterBySourceType(dstPrefixes []*destPrefixEntry, srcType SourceType) []*s // algorithm. It trims the filter chains based on the source prefix. At most one // filter chain with the most specific match progress to the next stage. func filterBySourcePrefixes(srcPrefixes []*sourcePrefixes, srcAddr net.IP) (*sourcePrefixEntry, error) { - var ( - matchingSrcPrefixes []*sourcePrefixEntry - maxSubnetMatch int - ) + var matchingSrcPrefixes []*sourcePrefixEntry + maxSubnetMatch := noPrefixMatch for _, sp := range srcPrefixes { for _, prefix := range sp.srcPrefixes { - if !prefix.net.Contains(srcAddr) { + if prefix.net != nil && !prefix.net.Contains(srcAddr) { + // Skip prefixes which don't match. continue } - matchSize, _ := prefix.net.Mask.Size() + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } if matchSize < maxSubnetMatch { continue } diff --git a/xds/internal/client/filter_chain_test.go b/xds/internal/client/filter_chain_test.go index e66f518828ac..afb0c81fda14 100644 --- a/xds/internal/client/filter_chain_test.go +++ b/xds/internal/client/filter_chain_test.go @@ -35,6 +35,8 @@ import ( "google.golang.org/grpc/xds/internal/version" ) +// TestNewFilterChainImpl_Failure_BadMatchFields verifies cases where we have a +// single filter chain with match criteria that contains unsupported fields. func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { tests := []struct { desc string @@ -131,6 +133,8 @@ func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { } } +// TestNewFilterChainImpl_Failure_OverlappingMatchingRules verifies cases where +// there are multiple filter chains and they have overlapping match rules. func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { tests := []struct { desc string @@ -218,6 +222,8 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { } } +// TestNewFilterChainImpl_Failure_BadSecurityConfig verifies cases where the +// security configuration in the filter chain is invalid. func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { tests := []struct { desc string @@ -369,6 +375,8 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { } } +// TestNewFilterChainImpl_Success_SecurityConfig verifies cases where the +// security configuration in the filter chain contains valid data. func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { tests := []struct { desc string @@ -387,13 +395,11 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -444,13 +450,11 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: { SecurityCfg: &SecurityConfig{ @@ -526,13 +530,11 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: { SecurityCfg: &SecurityConfig{ @@ -576,6 +578,153 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { } } +// TestNewFilterChainImpl_Success_UnsupportedMatchFields verifies cases where +// there are multiple filter chains, and one of them is valid while the other +// contains unsupported match fields. These configurations should lead to +// success at config validation time and the filter chains which contains +// unsupported match fields will be skipped at lookup time. +func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { + unspecifiedEntry := &destPrefixEntry{ + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + } + + tests := []struct { + desc string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + desc: "unsupported destination port", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "good-chain", + }, + { + Name: "unsupported-destination-port", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + DestinationPort: &wrapperspb.UInt32Value{Value: 666}, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: unspecifiedEntry, + }, + def: &FilterChain{}, + }, + }, + { + desc: "unsupported server names", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "good-chain", + }, + { + Name: "unsupported-server-names", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + ServerNames: []string{"example-server"}, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: unspecifiedEntry, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + }, + }, + def: &FilterChain{}, + }, + }, + { + desc: "unsupported transport protocol", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "good-chain", + }, + { + Name: "unsupported-transport-protocol", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + TransportProtocol: "tls", + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: unspecifiedEntry, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + }, + }, + def: &FilterChain{}, + }, + }, + { + desc: "unsupported application protocol", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "good-chain", + }, + { + Name: "unsupported-application-protocol", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + ApplicationProtocols: []string{"h2"}, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: unspecifiedEntry, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + }, + }, + def: &FilterChain{}, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{})) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + +// TestNewFilterChainImpl_Success_AllCombinations verifies different +// combinations of the supported match criteria. func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { tests := []struct { desc string @@ -587,8 +736,23 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { lis: &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { + // Unspecified destination prefix. FilterChainMatch: &v3listenerpb.FilterChainMatch{}, }, + { + // v4 wildcard destination prefix. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + }, + { + // v6 wildcard destination prefix. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("::", 0)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, }, @@ -600,13 +764,43 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, "0.0.0.0/0": { - net: zeroIP, + net: ipNetFromCIDR("0.0.0.0/0"), + srcTypeArr: [3]*sourcePrefixes{ + nil, + nil, + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: {}, + }, + }, + }, + }, + }, + }, + "::/0": { + net: ipNetFromCIDR("::/0"), srcTypeArr: [3]*sourcePrefixes{ + nil, + nil, { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -620,8 +814,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -635,8 +828,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -667,14 +859,12 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ nil, { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -690,8 +880,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { nil, { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -722,8 +911,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ @@ -776,13 +964,11 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 1: {}, 2: {}, @@ -874,13 +1060,11 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -894,8 +1078,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -909,8 +1092,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -1128,18 +1310,24 @@ func TestLookup_Successes(t *testing.T) { lisWithoutDefaultChain := &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { - TransportSocket: transportSocketWithInstanceName("wildcard"), + TransportSocket: transportSocketWithInstanceName("unspecified-dest-and-source-prefix"), }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, - SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, }, - TransportSocket: transportSocketWithInstanceName("any-destination-prefix"), + TransportSocket: transportSocketWithInstanceName("wildcard-prefixes-v4"), + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("::", 0)}, + }, + TransportSocket: transportSocketWithInstanceName("wildcard-source-prefix-v6"), }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, - TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-wildcard-source-type"), + TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-unspecified-source-type"), }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ @@ -1184,26 +1372,37 @@ func TestLookup_Successes(t *testing.T) { wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "default"}}, }, { - desc: "wildcard destination match", + desc: "unspecified destination match", lis: lisWithoutDefaultChain, params: FilterChainLookupParams{ IsUnspecifiedListener: true, - DestAddr: net.IPv4(10, 1, 1, 1), + DestAddr: net.ParseIP("2001:68::db8"), SourceAddr: net.IPv4(10, 1, 1, 1), SourcePort: 1, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard"}}, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "unspecified-dest-and-source-prefix"}}, }, { - desc: "ANY destination match", + desc: "wildcard destination match v4", lis: lisWithoutDefaultChain, params: FilterChainLookupParams{ IsUnspecifiedListener: true, DestAddr: net.IPv4(10, 1, 1, 1), - SourceAddr: net.IPv4(10, 1, 1, 2), + SourceAddr: net.IPv4(10, 1, 1, 1), + SourcePort: 1, + }, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-prefixes-v4"}}, + }, + { + desc: "wildcard source match v6", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.ParseIP("2001:68::1"), + SourceAddr: net.ParseIP("2001:68::2"), SourcePort: 1, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "any-destination-prefix"}}, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-source-prefix-v6"}}, }, { desc: "specific destination and wildcard source type match", @@ -1214,7 +1413,7 @@ func TestLookup_Successes(t *testing.T) { SourceAddr: net.IPv4(192, 168, 100, 1), SourcePort: 80, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-wildcard-source-type"}}, + wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-unspecified-source-type"}}, }, { desc: "specific destination and source type match", diff --git a/xds/internal/client/lds_test.go b/xds/internal/client/lds_test.go index 21718a4edc5e..21e94557b3e9 100644 --- a/xds/internal/client/lds_test.go +++ b/xds/internal/client/lds_test.go @@ -1126,13 +1126,11 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { Port: "9999", FilterChains: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: {}, }, @@ -1216,13 +1214,11 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { Port: "9999", FilterChains: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: { SecurityCfg: &SecurityConfig{ @@ -1263,13 +1259,11 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { Port: "9999", FilterChains: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcTypeArr: [3]*sourcePrefixes{ { srcPrefixMap: map[string]*sourcePrefixEntry{ - "0.0.0.0/0": { - net: zeroIP, + unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ 0: { SecurityCfg: &SecurityConfig{ diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 9c2e6daebfec..e31bba772ed9 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -33,29 +33,26 @@ import ( "strconv" "testing" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/uuid" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal/testutils" + xdsinternal "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/status" + testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" "google.golang.org/grpc/xds" - "google.golang.org/grpc/xds/internal/testutils/e2e" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - xdscreds "google.golang.org/grpc/credentials/xds" - xdsinternal "google.golang.org/grpc/internal/xds" - testpb "google.golang.org/grpc/test/grpc_testing" xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/testutils/e2e" ) const ( @@ -248,6 +245,24 @@ func listenerResourceWithoutSecurityConfig(t *testing.T, lis net.Listener) *v3li // configuration pointing to the use of the file_watcher certificate provider // plugin, and name and address fields matching the passed in net.Listener. func listenerResourceWithSecurityConfig(t *testing.T, lis net.Listener) *v3listenerpb.Listener { + transportSocket := &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "google_cloud_private_spiffe", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "google_cloud_private_spiffe", + }, + }, + }, + }), + }, + } host, port := hostPortFromListener(t, lis) return &v3listenerpb.Listener{ // This needs to match the name we are querying for. @@ -261,7 +276,7 @@ func listenerResourceWithSecurityConfig(t *testing.T, lis net.Listener) *v3liste }}}}, FilterChains: []*v3listenerpb.FilterChain{ { - Name: "filter-chain-1", + Name: "v4-wildcard", FilterChainMatch: &v3listenerpb.FilterChainMatch{ PrefixRanges: []*v3corepb.CidrRange{ { @@ -289,26 +304,40 @@ func listenerResourceWithSecurityConfig(t *testing.T, lis net.Listener) *v3liste }, }, }, - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "google_cloud_private_spiffe", - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "google_cloud_private_spiffe", - }}}} - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }}}}}, + TransportSocket: transportSocket, + }, + { + Name: "v6-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + }, + TransportSocket: transportSocket, + }, + }, } } From 87eb5b7502493f758e76c4d09430c0049a81a557 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 13 Apr 2021 16:19:17 -0700 Subject: [PATCH 022/998] credentials/google: remove unnecessary dependency on xds protos (#4339) --- credentials/credentials.go | 24 ++------- credentials/google/google_test.go | 12 ++--- credentials/google/xds.go | 4 +- credentials/sts/sts_test.go | 4 +- credentials/xds/xds_client_test.go | 8 ++- internal/credentials/credentials.go | 49 +++++++++++++++++++ internal/internal.go | 6 --- internal/transport/http2_client.go | 14 +++--- ...ke_cluster.go => xds_handshake_cluster.go} | 12 ++--- .../balancer/clusterimpl/balancer_test.go | 6 +-- .../balancer/clusterimpl/clusterimpl.go | 6 +-- xds/internal/balancer/edsbalancer/eds_impl.go | 15 +++--- .../balancer/edsbalancer/eds_impl_test.go | 14 +++--- 13 files changed, 96 insertions(+), 78 deletions(-) create mode 100644 internal/credentials/credentials.go rename internal/{credentials/xds/handshake_cluster.go => xds_handshake_cluster.go} (75%) diff --git a/credentials/credentials.go b/credentials/credentials.go index e69562e78786..7eee7e4ec126 100644 --- a/credentials/credentials.go +++ b/credentials/credentials.go @@ -30,7 +30,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -188,15 +188,12 @@ type RequestInfo struct { AuthInfo AuthInfo } -// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. -type requestInfoKey struct{} - // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) - return + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok } // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes @@ -211,16 +208,12 @@ type ClientHandshakeInfo struct { Attributes *attributes.Attributes } -// clientHandshakeInfoKey is a struct used as the key to store -// ClientHandshakeInfo in a context. -type clientHandshakeInfoKey struct{} - // ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored // in ctx. // // This API is experimental. func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { - chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) return chi } @@ -249,15 +242,6 @@ func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { return nil } -func init() { - internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) - } - internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { - return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) - } -} - // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. // diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go index c20445811202..fee51f945016 100644 --- a/credentials/google/google_test.go +++ b/credentials/google/google_test.go @@ -25,7 +25,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" - xdsinternal "google.golang.org/grpc/internal/credentials/xds" + icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/resolver" ) @@ -53,8 +53,6 @@ func (t *testAuthInfo) AuthType() string { var ( testTLS = &testCreds{typ: "tls"} testALTS = &testCreds{typ: "alts"} - - contextWithHandshakeInfo = internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) ) func overrideNewCredsFuncs() func() { @@ -93,16 +91,16 @@ func TestClientHandshakeBasedOnClusterName(t *testing.T) { }, { name: "with non-CFE cluster name", - ctx: contextWithHandshakeInfo(context.Background(), credentials.ClientHandshakeInfo{ - Attributes: xdsinternal.SetHandshakeClusterName(resolver.Address{}, "lalala").Attributes, + ctx: icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, "lalala").Attributes, }), // non-CFE backends should use alts. wantTyp: "alts", }, { name: "with CFE cluster name", - ctx: contextWithHandshakeInfo(context.Background(), credentials.ClientHandshakeInfo{ - Attributes: xdsinternal.SetHandshakeClusterName(resolver.Address{}, cfeClusterName).Attributes, + ctx: icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, cfeClusterName).Attributes, }), // CFE should use tls. wantTyp: "tls", diff --git a/credentials/google/xds.go b/credentials/google/xds.go index 22997ce2532c..588c685e2592 100644 --- a/credentials/google/xds.go +++ b/credentials/google/xds.go @@ -23,7 +23,7 @@ import ( "net" "google.golang.org/grpc/credentials" - xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal" ) const cfeClusterName = "google-cfe" @@ -54,7 +54,7 @@ func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority s if chi.Attributes == nil { return c.tls.ClientHandshake(ctx, authority, rawConn) } - cn, ok := xdsinternal.GetHandshakeClusterName(chi.Attributes) + cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes) if !ok || cn == cfeClusterName { return c.tls.ClientHandshake(ctx, authority, rawConn) } diff --git a/credentials/sts/sts_test.go b/credentials/sts/sts_test.go index ac680e001112..8738c424d087 100644 --- a/credentials/sts/sts_test.go +++ b/credentials/sts/sts_test.go @@ -37,7 +37,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" ) @@ -104,7 +104,7 @@ func createTestContext(ctx context.Context, s credentials.SecurityLevel) context Method: "testInfo", AuthInfo: auth, } - return internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + return icredentials.NewRequestInfoContext(ctx, ri) } // errReader implements the io.Reader interface and returns an error from the diff --git a/credentials/xds/xds_client_test.go b/credentials/xds/xds_client_test.go index 82cfa5876acb..8859946ef5fa 100644 --- a/credentials/xds/xds_client_test.go +++ b/credentials/xds/xds_client_test.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/tls/certprovider" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" @@ -228,8 +228,7 @@ func newTestContextWithHandshakeInfo(parent context.Context, root, identity cert // Moving the attributes from the resolver.Address to the context passed to // the handshaker is done in the transport layer. Since we directly call the // handshaker in these tests, we need to do the same here. - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - return contextWithHandshakeInfo(parent, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + return icredentials.NewClientHandshakeInfoContext(parent, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) } // compareAuthInfo compares the AuthInfo received on the client side after a @@ -541,8 +540,7 @@ func (s) TestClientCredsProviderSwitch(t *testing.T) { // here because we need access to the underlying HandshakeInfo so that we // can update it before the next call to ClientHandshake(). addr := xdsinternal.SetHandshakeInfo(resolver.Address{}, handshakeInfo) - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - ctx = contextWithHandshakeInfo(ctx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + ctx = icredentials.NewClientHandshakeInfoContext(ctx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) if _, _, err := creds.ClientHandshake(ctx, authority, conn); err == nil { t.Fatal("ClientHandshake() succeeded when expected to fail") } diff --git a/internal/credentials/credentials.go b/internal/credentials/credentials.go new file mode 100644 index 000000000000..32c9b59033cd --- /dev/null +++ b/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/internal/internal.go b/internal/internal.go index 1e2834c70f67..2a3243bd701a 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -38,12 +38,6 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // NewRequestInfoContext creates a new context based on the argument context attaching - // the passed in RequestInfo to the new context. - NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context - // NewClientHandshakeInfoContext returns a copy of the input context with - // the passed in ClientHandshakeInfo struct added to it. - NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context // ParseServiceConfigForTesting is for creating a fake // ClientConn for resolver testing only ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index ce0166012d7d..892317cc6fca 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -32,15 +32,14 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpcutil" - imetadata "google.golang.org/grpc/internal/metadata" - "google.golang.org/grpc/internal/transport/networktype" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -238,8 +237,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // Attributes field of resolver.Address, which is shoved into connectCtx // and passed to the credential handshaker. This makes it possible for // address specific arbitrary data to reach the credential handshaker. - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) @@ -458,7 +456,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err diff --git a/internal/credentials/xds/handshake_cluster.go b/internal/xds_handshake_cluster.go similarity index 75% rename from internal/credentials/xds/handshake_cluster.go rename to internal/xds_handshake_cluster.go index cb059bd6669a..3677c3f04f84 100644 --- a/internal/credentials/xds/handshake_cluster.go +++ b/internal/xds_handshake_cluster.go @@ -1,5 +1,4 @@ /* - * * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,10 +12,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xds +package internal import ( "google.golang.org/grpc/attributes" @@ -27,15 +25,15 @@ import ( // the Attributes field of resolver.Address. type handshakeClusterNameKey struct{} -// SetHandshakeClusterName returns a copy of addr in which the Attributes field +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field // is updated with the cluster name. -func SetHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) return addr } -// GetHandshakeClusterName returns cluster name stored in attr. -func GetHandshakeClusterName(attr *attributes.Attributes) (string, bool) { +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { v := attr.Value(handshakeClusterNameKey{}) name, ok := v.(string) return name, ok diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 7fb31ab7affa..f002c6a239d7 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -29,7 +29,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" - xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/client/load" @@ -414,7 +414,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { if got, want := addrs1[0].Addr, testBackendAddrs[0].Addr; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } - cn, ok := xdsinternal.GetHandshakeClusterName(addrs1[0].Attributes) + cn, ok := internal.GetXDSHandshakeClusterName(addrs1[0].Attributes) if !ok || cn != testClusterName { t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn, ok, testClusterName) } @@ -455,7 +455,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { t.Fatalf("sc is created with addr %v, want %v", got, want) } // New addresses should have the new cluster name. - cn2, ok := xdsinternal.GetHandshakeClusterName(addrs2[0].Attributes) + cn2, ok := internal.GetXDSHandshakeClusterName(addrs2[0].Attributes) if !ok || cn2 != testClusterName2 { t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn2, ok, testClusterName2) } diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 0cc8d0d82f5c..cc87ba0cc898 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -29,8 +29,8 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/buffer" - xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" @@ -327,7 +327,7 @@ func (cib *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balanc clusterName := cib.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) for i, addr := range addrs { - newAddrs[i] = xdsinternal.SetHandshakeClusterName(addr, clusterName) + newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) } return cib.ClientConn.NewSubConn(newAddrs, opts) } @@ -336,7 +336,7 @@ func (cib *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []res clusterName := cib.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) for i, addr := range addrs { - newAddrs[i] = xdsinternal.SetHandshakeClusterName(addr, clusterName) + newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) } cib.ClientConn.UpdateAddresses(sc, newAddrs) } diff --git a/xds/internal/balancer/edsbalancer/eds_impl.go b/xds/internal/balancer/edsbalancer/eds_impl.go index 94f643d3355e..63b75caae8f9 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl.go +++ b/xds/internal/balancer/edsbalancer/eds_impl.go @@ -23,19 +23,18 @@ import ( "time" "github.com/google/go-cmp/cmp" - xdsinternal "google.golang.org/grpc/internal/credentials/xds" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal" + xdsi "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/balancergroup" "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" "google.golang.org/grpc/xds/internal/client" @@ -58,7 +57,7 @@ type localityConfig struct { type balancerGroupWithConfig struct { bg *balancergroup.BalancerGroup stateAggregator *weightedaggregator.Aggregator - configs map[internal.LocalityID]*localityConfig + configs map[xdsi.LocalityID]*localityConfig } // edsBalancerImpl does load balancing based on the EDS responses. Note that it @@ -261,7 +260,7 @@ func (edsImpl *edsBalancerImpl) handleEDSResponse(edsResp xdsclient.EndpointsUpd bgwc = &balancerGroupWithConfig{ bg: balancergroup.New(ccPriorityWrapper, edsImpl.buildOpts, stateAggregator, edsImpl.loadReporter, edsImpl.logger), stateAggregator: stateAggregator, - configs: make(map[internal.LocalityID]*localityConfig), + configs: make(map[xdsi.LocalityID]*localityConfig), } edsImpl.priorityToLocalities[priority] = bgwc priorityChanged = true @@ -295,7 +294,7 @@ func (edsImpl *edsBalancerImpl) handleEDSResponsePerPriority(bgwc *balancerGroup // newLocalitiesSet contains all names of localities in the new EDS response // for the same priority. It's used to delete localities that are removed in // the new EDS response. - newLocalitiesSet := make(map[internal.LocalityID]struct{}) + newLocalitiesSet := make(map[xdsi.LocalityID]struct{}) var rebuildStateAndPicker bool for _, locality := range newLocalities { // One balancer for each locality. @@ -498,7 +497,7 @@ func (ebwcc *edsBalancerWrapperCC) NewSubConn(addrs []resolver.Address, opts bal clusterName := ebwcc.parent.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) for i, addr := range addrs { - newAddrs[i] = xdsinternal.SetHandshakeClusterName(addr, clusterName) + newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) } return ebwcc.parent.newSubConn(ebwcc.priority, newAddrs, opts) } @@ -507,7 +506,7 @@ func (ebwcc *edsBalancerWrapperCC) UpdateAddresses(sc balancer.SubConn, addrs [] clusterName := ebwcc.parent.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) for i, addr := range addrs { - newAddrs[i] = xdsinternal.SetHandshakeClusterName(addr, clusterName) + newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) } ebwcc.ClientConn.UpdateAddresses(sc, newAddrs) } diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/edsbalancer/eds_impl_test.go index 79332dfe1fd3..7e793c034a84 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_test.go @@ -26,14 +26,14 @@ import ( corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal" + xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/balancergroup" "google.golang.org/grpc/xds/internal/client" xdsclient "google.golang.org/grpc/xds/internal/client" @@ -834,7 +834,7 @@ func (s) TestEDS_LoadReport(t *testing.T) { edsb.updateServiceRequestsConfig(testServiceName, &maxRequestsTemp) defer client.ClearCounterForTesting(testServiceName) - backendToBalancerID := make(map[balancer.SubConn]internal.LocalityID) + backendToBalancerID := make(map[balancer.SubConn]xdsinternal.LocalityID) const testDropCategory = "test-drop" // Two localities, each with one backend. @@ -844,7 +844,7 @@ func (s) TestEDS_LoadReport(t *testing.T) { sc1 := <-cc.NewSubConnCh edsb.handleSubConnStateChange(sc1, connectivity.Connecting) edsb.handleSubConnStateChange(sc1, connectivity.Ready) - locality1 := internal.LocalityID{SubZone: testSubZones[0]} + locality1 := xdsinternal.LocalityID{SubZone: testSubZones[0]} backendToBalancerID[sc1] = locality1 // Add the second locality later to make sure sc2 belongs to the second @@ -855,7 +855,7 @@ func (s) TestEDS_LoadReport(t *testing.T) { sc2 := <-cc.NewSubConnCh edsb.handleSubConnStateChange(sc2, connectivity.Connecting) edsb.handleSubConnStateChange(sc2, connectivity.Ready) - locality2 := internal.LocalityID{SubZone: testSubZones[1]} + locality2 := xdsinternal.LocalityID{SubZone: testSubZones[1]} backendToBalancerID[sc2] = locality2 // Test roundrobin with two subconns. @@ -954,7 +954,7 @@ func (s) TestEDS_ClusterNameInAddressAttributes(t *testing.T) { if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } - cn, ok := xdsinternal.GetHandshakeClusterName(addrs1[0].Attributes) + cn, ok := internal.GetXDSHandshakeClusterName(addrs1[0].Attributes) if !ok || cn != clusterName1 { t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn, ok, clusterName1) } @@ -986,7 +986,7 @@ func (s) TestEDS_ClusterNameInAddressAttributes(t *testing.T) { t.Fatalf("sc is created with addr %v, want %v", got, want) } // New addresses should have the new cluster name. - cn2, ok := xdsinternal.GetHandshakeClusterName(addrs2[0].Attributes) + cn2, ok := internal.GetXDSHandshakeClusterName(addrs2[0].Attributes) if !ok || cn2 != clusterName2 { t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn2, ok, clusterName1) } From 1a870aec2ff99bb682d5e200763c9124185eafca Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 15 Apr 2021 15:08:03 -0700 Subject: [PATCH 023/998] xds/clusterimpl: trigger re-resolution on subconn transient_failure (#4314) --- .../balancer/clusterimpl/balancer_test.go | 95 +++++++++++++++++++ .../balancer/clusterimpl/clusterimpl.go | 12 +++ xds/internal/testutils/balancer.go | 18 ++-- 3 files changed, 119 insertions(+), 6 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index f002c6a239d7..d3789ffddca3 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/internal" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/client/load" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" @@ -55,6 +56,13 @@ var ( } ) +func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { + return func() balancer.SubConn { + scst, _ := p.Pick(balancer.PickInfo{}) + return scst.SubConn + } +} + func init() { newRandomWRR = testutils.NewTestWRR } @@ -62,6 +70,7 @@ func init() { // TestDropByCategory verifies that the balancer correctly drops the picks, and // that the drops are reported. func TestDropByCategory(t *testing.T) { + defer client.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } @@ -219,6 +228,7 @@ func TestDropByCategory(t *testing.T) { // TestDropCircuitBreaking verifies that the balancer correctly drops the picks // due to circuit breaking, and that the drops are reported. func TestDropCircuitBreaking(t *testing.T) { + defer client.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } @@ -330,6 +340,7 @@ func TestDropCircuitBreaking(t *testing.T) { // picker after it's closed. Because picker updates are sent in the run() // goroutine. func TestPickerUpdateAfterClose(t *testing.T) { + defer client.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } @@ -374,6 +385,7 @@ func TestPickerUpdateAfterClose(t *testing.T) { // TestClusterNameInAddressAttributes covers the case that cluster name is // attached to the subconn address attributes. func TestClusterNameInAddressAttributes(t *testing.T) { + defer client.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } @@ -460,3 +472,86 @@ func TestClusterNameInAddressAttributes(t *testing.T) { t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn2, ok, testClusterName2) } } + +// TestReResolution verifies that when a SubConn turns transient failure, +// re-resolution is triggered. +func TestReResolution(t *testing.T) { + defer client.ClearCounterForTesting(testClusterName) + xdsC := fakeclient.NewClient() + oldNewXDSClient := newXDSClient + newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + defer func() { newXDSClient = oldNewXDSClient }() + + builder := balancer.Get(clusterImplName) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + defer b.Close() + + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: testBackendAddrs, + }, + BalancerConfig: &lbConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + sc1 := <-cc.NewSubConnCh + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // This should get the connecting picker. + p0 := <-cc.NewPickerCh + for i := 0; i < 10; i++ { + _, err := p0.Pick(balancer.PickInfo{}) + if err != balancer.ErrNoSubConnAvailable { + t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) + } + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + // This should get the transient failure picker. + p1 := <-cc.NewPickerCh + for i := 0; i < 10; i++ { + _, err := p1.Pick(balancer.PickInfo{}) + if err == nil { + t.Fatalf("picker.Pick, got _,%v, want not nil", err) + } + } + + // The transient failure should trigger a re-resolution. + select { + case <-cc.ResolveNowCh: + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for ResolveNow()") + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Test pick with one backend. + p2 := <-cc.NewPickerCh + want := []balancer.SubConn{sc1} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + // This should get the transient failure picker. + p3 := <-cc.NewPickerCh + for i := 0; i < 10; i++ { + _, err := p3.Pick(balancer.PickInfo{}) + if err == nil { + t.Fatalf("picker.Pick, got _,%v, want not nil", err) + } + } + + // The transient failure should trigger a re-resolution. + select { + case <-cc.ResolveNowCh: + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for ResolveNow()") + } +} diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index cc87ba0cc898..7d4f7695daf4 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -29,6 +29,7 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" @@ -286,6 +287,17 @@ func (cib *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balanc return } + // Trigger re-resolution when a SubConn turns transient failure. This is + // necessary for the LogicalDNS in cluster_resolver policy to re-resolve. + // + // Note that this happens not only for the addresses from DNS, but also for + // EDS (cluster_impl doesn't know if it's DNS or EDS, only the parent + // knows). The parent priority policy is configured to ignore re-resolution + // signal from the EDS children. + if s.ConnectivityState == connectivity.TransientFailure { + cib.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) + } + if cib.childLB != nil { cib.childLB.UpdateSubConnState(sc, s) } diff --git a/xds/internal/testutils/balancer.go b/xds/internal/testutils/balancer.go index dab84a84e072..820d5bf3a1ac 100644 --- a/xds/internal/testutils/balancer.go +++ b/xds/internal/testutils/balancer.go @@ -76,8 +76,9 @@ type TestClientConn struct { RemoveSubConnCh chan balancer.SubConn // the last 10 subconn removed. UpdateAddressesAddrsCh chan []resolver.Address // last updated address via UpdateAddresses(). - NewPickerCh chan balancer.Picker // the last picker updated. - NewStateCh chan connectivity.State // the last state. + NewPickerCh chan balancer.Picker // the last picker updated. + NewStateCh chan connectivity.State // the last state. + ResolveNowCh chan resolver.ResolveNowOptions // the last ResolveNow(). subConnIdx int } @@ -92,8 +93,9 @@ func NewTestClientConn(t *testing.T) *TestClientConn { RemoveSubConnCh: make(chan balancer.SubConn, 10), UpdateAddressesAddrsCh: make(chan []resolver.Address, 1), - NewPickerCh: make(chan balancer.Picker, 1), - NewStateCh: make(chan connectivity.State, 1), + NewPickerCh: make(chan balancer.Picker, 1), + NewStateCh: make(chan connectivity.State, 1), + ResolveNowCh: make(chan resolver.ResolveNowOptions, 1), } } @@ -151,8 +153,12 @@ func (tcc *TestClientConn) UpdateState(bs balancer.State) { } // ResolveNow panics. -func (tcc *TestClientConn) ResolveNow(resolver.ResolveNowOptions) { - panic("not implemented") +func (tcc *TestClientConn) ResolveNow(o resolver.ResolveNowOptions) { + select { + case <-tcc.ResolveNowCh: + default: + } + tcc.ResolveNowCh <- o } // Target panics. From 41676e61b1d576484cf2c0315a25fe2c9438c769 Mon Sep 17 00:00:00 2001 From: lzhfromustc <43191155+lzhfromustc@users.noreply.github.com> Date: Mon, 19 Apr 2021 12:49:37 -0400 Subject: [PATCH 024/998] Fix goroutine leaks (#4214) --- clientconn_state_transition_test.go | 16 ++++++---------- .../alts/internal/handshaker/handshaker_test.go | 7 +++++-- credentials/local/local_test.go | 2 ++ internal/transport/keepalive_test.go | 2 +- internal/transport/proxy_test.go | 2 +- test/end2end_test.go | 2 -- 6 files changed, 15 insertions(+), 16 deletions(-) diff --git a/clientconn_state_transition_test.go b/clientconn_state_transition_test.go index 0c58131a1c6f..cd1213fb4fd1 100644 --- a/clientconn_state_transition_test.go +++ b/clientconn_state_transition_test.go @@ -210,7 +210,8 @@ func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { } defer lis.Close() - sawReady := make(chan struct{}) + sawReady := make(chan struct{}, 1) + defer close(sawReady) // Launch the server. go func() { @@ -250,7 +251,7 @@ func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen == connectivity.Ready { - close(sawReady) + sawReady <- struct{}{} } if seen != want[i] { t.Fatalf("expected to see %v at position %d in flow %v, got %v", want[i], i, want, seen) @@ -378,7 +379,8 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { defer lis2.Close() server1Done := make(chan struct{}) - sawReady := make(chan struct{}) + sawReady := make(chan struct{}, 1) + defer close(sawReady) // Launch server 1. go func() { @@ -400,12 +402,6 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { conn.Close() - _, err = lis1.Accept() - if err != nil { - t.Error(err) - return - } - close(server1Done) }() @@ -430,7 +426,7 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen == connectivity.Ready { - close(sawReady) + sawReady <- struct{}{} } if seen != want[i] { t.Fatalf("expected to see %v at position %d in flow %v, got %v", want[i], i, want, seen) diff --git a/credentials/alts/internal/handshaker/handshaker_test.go b/credentials/alts/internal/handshaker/handshaker_test.go index bf516dc53c87..14a0721054f2 100644 --- a/credentials/alts/internal/handshaker/handshaker_test.go +++ b/credentials/alts/internal/handshaker/handshaker_test.go @@ -21,6 +21,7 @@ package handshaker import ( "bytes" "context" + "errors" "testing" "time" @@ -163,7 +164,8 @@ func (s) TestClientHandshake(t *testing.T) { go func() { _, context, err := chs.ClientHandshake(ctx) if err == nil && context == nil { - panic("expected non-nil ALTS context") + errc <- errors.New("expected non-nil ALTS context") + return } errc <- err chs.Close() @@ -219,7 +221,8 @@ func (s) TestServerHandshake(t *testing.T) { go func() { _, context, err := shs.ServerHandshake(ctx) if err == nil && context == nil { - panic("expected non-nil ALTS context") + errc <- errors.New("expected non-nil ALTS context") + return } errc <- err shs.Close() diff --git a/credentials/local/local_test.go b/credentials/local/local_test.go index 00ae39f07e56..47f8dbb4ec85 100644 --- a/credentials/local/local_test.go +++ b/credentials/local/local_test.go @@ -131,11 +131,13 @@ func serverHandle(hs serverHandshake, done chan testServerHandleResult, lis net. serverRawConn, err := lis.Accept() if err != nil { done <- testServerHandleResult{authInfo: nil, err: fmt.Errorf("Server failed to accept connection. Error: %v", err)} + return } serverAuthInfo, err := hs(serverRawConn) if err != nil { serverRawConn.Close() done <- testServerHandleResult{authInfo: nil, err: fmt.Errorf("Server failed while handshake. Error: %v", err)} + return } done <- testServerHandleResult{authInfo: serverAuthInfo, err: nil} } diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index 1f6603cd759e..d684f5827103 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -193,7 +193,7 @@ func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { // We read from the net.Conn till we get an error, which is expected when // the server closes the connection as part of the keepalive logic. - errCh := make(chan error) + errCh := make(chan error, 1) go func() { b := make([]byte, 24) for { diff --git a/internal/transport/proxy_test.go b/internal/transport/proxy_test.go index a2f1aa438546..5434e9ed5c01 100644 --- a/internal/transport/proxy_test.go +++ b/internal/transport/proxy_test.go @@ -119,7 +119,7 @@ func testHTTPConnect(t *testing.T, proxyURLModify func(*url.URL) *url.URL, proxy msg := []byte{4, 3, 5, 2} recvBuf := make([]byte, len(msg)) - done := make(chan error) + done := make(chan error, 1) go func() { in, err := blis.Accept() if err != nil { diff --git a/test/end2end_test.go b/test/end2end_test.go index 746df1745116..832ac8bd7180 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -6504,10 +6504,8 @@ func (s) TestDisabledIOBuffers(t *testing.T) { t.Fatalf("Failed to create listener: %v", err) } - done := make(chan struct{}) go func() { s.Serve(lis) - close(done) }() defer s.Stop() dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second) From 1c598a11a4e503e1cfd500999c040e72072dc16b Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 20 Apr 2021 13:20:09 -0400 Subject: [PATCH 025/998] Move exponential backoff to DNS resolver from resolver.ClientConn (#4270) --- balancer_conn_wrappers_test.go | 90 ------- dialoptions.go | 17 +- internal/resolver/dns/dns_resolver.go | 39 ++- internal/resolver/dns/dns_resolver_test.go | 268 ++++++++++++++++++++- resolver/resolver.go | 2 +- resolver_conn_wrapper.go | 63 +---- resolver_conn_wrapper_test.go | 85 ------- test/balancer_test.go | 14 +- xds/internal/resolver/xds_resolver_test.go | 3 +- 9 files changed, 301 insertions(+), 280 deletions(-) delete mode 100644 balancer_conn_wrappers_test.go diff --git a/balancer_conn_wrappers_test.go b/balancer_conn_wrappers_test.go deleted file mode 100644 index 935d11d1d391..000000000000 --- a/balancer_conn_wrappers_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "net" - "testing" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/internal/balancer/stub" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" -) - -// TestBalancerErrorResolverPolling injects balancer errors and verifies -// ResolveNow is called on the resolver with the appropriate backoff strategy -// being consulted between ResolveNow calls. -func (s) TestBalancerErrorResolverPolling(t *testing.T) { - // The test balancer will return ErrBadResolverState iff the - // ClientConnState contains no addresses. - bf := stub.BalancerFuncs{ - UpdateClientConnState: func(_ *stub.BalancerData, s balancer.ClientConnState) error { - if len(s.ResolverState.Addresses) == 0 { - return balancer.ErrBadResolverState - } - return nil - }, - } - const balName = "BalancerErrorResolverPolling" - stub.Register(balName, bf) - - testResolverErrorPolling(t, - func(r *manual.Resolver) { - // No addresses so the balancer will fail. - r.CC.UpdateState(resolver.State{}) - }, func(r *manual.Resolver) { - // UpdateState will block if ResolveNow is being called (which blocks on - // rn), so call it in a goroutine. Include some address so the balancer - // will be happy. - go r.CC.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "x"}}}) - }, - WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, balName))) -} - -// TestRoundRobinZeroAddressesResolverPolling reports no addresses to the round -// robin balancer and verifies ResolveNow is called on the resolver with the -// appropriate backoff strategy being consulted between ResolveNow calls. -func (s) TestRoundRobinZeroAddressesResolverPolling(t *testing.T) { - // We need to start a real server or else the connecting loop will call - // ResolveNow after every iteration, even after a valid resolver result is - // returned. - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error while listening. Err: %v", err) - } - defer lis.Close() - s := NewServer() - defer s.Stop() - go s.Serve(lis) - - testResolverErrorPolling(t, - func(r *manual.Resolver) { - // No addresses so the balancer will fail. - r.CC.UpdateState(resolver.State{}) - }, func(r *manual.Resolver) { - // UpdateState will block if ResolveNow is being called (which - // blocks on rn), so call it in a goroutine. Include a valid - // address so the balancer will be happy. - go r.CC.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - }, - WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, roundrobin.Name))) -} diff --git a/dialoptions.go b/dialoptions.go index e7f86e6d7c81..7a497237bbd3 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -66,11 +66,7 @@ type dialOptions struct { minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string - // This is used by ccResolverWrapper to backoff between successive calls to - // resolver.ResolveNow(). The user will have no need to configure this, but - // we need to be able to configure this in tests. - resolveNowBackoff func(int) time.Duration - resolvers []resolver.Builder + resolvers []resolver.Builder } // DialOption configures how we set up the connection. @@ -596,7 +592,6 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, - resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, } } @@ -611,16 +606,6 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } -// withResolveNowBackoff specifies the function that clientconn uses to backoff -// between successive calls to resolver.ResolveNow(). -// -// For testing purpose only. -func withResolveNowBackoff(f func(int) time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolveNowBackoff = f - }) -} - // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index 304235566589..9d86460ab6fb 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -34,6 +34,7 @@ import ( grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" @@ -46,6 +47,9 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") +// A global to stub out in tests. +var newTimer = time.NewTimer + func init() { resolver.Register(NewBuilder()) } @@ -143,7 +147,6 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts d.wg.Add(1) go d.watcher() - d.ResolveNow(resolver.ResolveNowOptions{}) return d, nil } @@ -201,28 +204,38 @@ func (d *dnsResolver) Close() { func (d *dnsResolver) watcher() { defer d.wg.Done() + backoffIndex := 1 for { - select { - case <-d.ctx.Done(): - return - case <-d.rn: - } - state, err := d.lookup() if err != nil { + // Report error to the underlying grpc.ClientConn. d.cc.ReportError(err) } else { - d.cc.UpdateState(*state) + err = d.cc.UpdateState(*state) } - // Sleep to prevent excessive re-resolutions. Incoming resolution requests - // will be queued in d.rn. - t := time.NewTimer(minDNSResRate) + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. + backoffIndex = 1 + timer = time.NewTimer(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } select { - case <-t.C: case <-d.ctx.Done(): - t.Stop() + timer.Stop() return + case <-timer.C: } } } diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index 1c8469a275a7..52067e39cc68 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -30,9 +30,11 @@ import ( "testing" "time" + "google.golang.org/grpc/balancer" grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/leakcheck" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,7 +49,8 @@ func TestMain(m *testing.M) { } const ( - txtBytesLimit = 255 + txtBytesLimit = 255 + defaultTestTimeout = 10 * time.Second ) type testClientConn struct { @@ -57,13 +60,17 @@ type testClientConn struct { state resolver.State updateStateCalls int errChan chan error + updateStateErr error } -func (t *testClientConn) UpdateState(s resolver.State) { +func (t *testClientConn) UpdateState(s resolver.State) error { t.m1.Lock() defer t.m1.Unlock() t.state = s t.updateStateCalls++ + // This error determines whether DNS Resolver actually decides to exponentially backoff or not. + // This can be any error. + return t.updateStateErr } func (t *testClientConn) getState() (resolver.State, int) { @@ -669,6 +676,13 @@ func TestResolve(t *testing.T) { func testDNSResolver(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string addrWant []resolver.Address @@ -736,12 +750,151 @@ func testDNSResolver(t *testing.T) { } } +// DNS Resolver immediately starts polling on an error from grpc. This should continue until the ClientConn doesn't +// send back an error from updating the DNS Resolver's state. +func TestDNSResolverExponentialBackoff(t *testing.T) { + defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + timerChan := testutils.NewChannel() + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, allows this test to call timer immediately. + t := time.NewTimer(time.Hour) + timerChan.Send(t) + return t + } + tests := []struct { + name string + target string + addrWant []resolver.Address + scWant string + }{ + { + "happy case default port", + "foo.bar.com", + []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}}, + generateSC("foo.bar.com"), + }, + { + "happy case specified port", + "foo.bar.com:1234", + []resolver.Address{{Addr: "1.2.3.4:1234"}, {Addr: "5.6.7.8:1234"}}, + generateSC("foo.bar.com"), + }, + { + "happy case another default port", + "srv.ipv4.single.fake", + []resolver.Address{{Addr: "2.4.6.8" + colonDefaultPort}}, + generateSC("srv.ipv4.single.fake"), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b := NewBuilder() + cc := &testClientConn{target: test.target} + // Cause ClientConn to return an error. + cc.updateStateErr = balancer.ErrBadResolverState + r, err := b.Build(resolver.Target{Endpoint: test.target}, cc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("Error building resolver for target %v: %v", test.target, err) + } + var state resolver.State + var cnt int + for i := 0; i < 2000; i++ { + state, cnt = cc.getState() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if cnt == 0 { + t.Fatalf("UpdateState not called after 2s; aborting") + } + if !reflect.DeepEqual(test.addrWant, state.Addresses) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v", test.target, state.Addresses, test.addrWant) + } + sc := scFromState(state) + if test.scWant != sc { + t.Errorf("Resolved service config of target: %q = %+v, want %+v", test.target, sc, test.scWant) + } + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + // Cause timer to go off 10 times, and see if it calls updateState() correctly. + for i := 0; i < 10; i++ { + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + } + // Poll to see if DNS Resolver updated state the correct number of times, which allows time for the DNS Resolver to call + // ClientConn update state. + deadline := time.Now().Add(defaultTestTimeout) + for { + cc.m1.Lock() + got := cc.updateStateCalls + cc.m1.Unlock() + if got == 11 { + break + } + + if time.Now().After(deadline) { + t.Fatalf("Exponential backoff is not working as expected - should update state 11 times instead of %d", got) + } + + time.Sleep(time.Millisecond) + } + + // Update resolver.ClientConn to not return an error anymore - this should stop it from backing off. + cc.updateStateErr = nil + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + // Poll to see if DNS Resolver updated state the correct number of times, which allows time for the DNS Resolver to call + // ClientConn update state the final time. The DNS Resolver should then stop polling. + deadline = time.Now().Add(defaultTestTimeout) + for { + cc.m1.Lock() + got := cc.updateStateCalls + cc.m1.Unlock() + if got == 12 { + break + } + + if time.Now().After(deadline) { + t.Fatalf("Exponential backoff is not working as expected - should stop backing off at 12 total UpdateState calls instead of %d", got) + } + + _, err := timerChan.ReceiveOrFail() + if err { + t.Fatalf("Should not poll again after Client Conn stops returning error.") + } + + time.Sleep(time.Millisecond) + } + r.Close() + }) + } +} + func testDNSResolverWithSRV(t *testing.T) { EnableSRVLookups = true defer func() { EnableSRVLookups = false }() defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string addrWant []resolver.Address @@ -855,6 +1008,13 @@ func mutateTbl(target string) func() { func testDNSResolveNow(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string addrWant []resolver.Address @@ -926,6 +1086,13 @@ const colonDefaultPort = ":" + defaultPort func testIPResolver(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string want []resolver.Address @@ -975,6 +1142,13 @@ func testIPResolver(t *testing.T) { func TestResolveFunc(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { addr string want error @@ -1013,6 +1187,13 @@ func TestResolveFunc(t *testing.T) { func TestDisableServiceConfig(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string scWant string @@ -1059,6 +1240,13 @@ func TestDisableServiceConfig(t *testing.T) { func TestTXTError(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } defer func(v bool) { envconfig.TXTErrIgnore = v }(envconfig.TXTErrIgnore) for _, ignore := range []bool{false, true} { envconfig.TXTErrIgnore = ignore @@ -1090,6 +1278,13 @@ func TestTXTError(t *testing.T) { } func TestDNSResolverRetry(t *testing.T) { + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } b := NewBuilder() target := "ipv4.single.fake" cc := &testClientConn{target: target} @@ -1144,6 +1339,13 @@ func TestDNSResolverRetry(t *testing.T) { func TestCustomAuthority(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { authority string @@ -1251,6 +1453,13 @@ func TestCustomAuthority(t *testing.T) { // requests are made. func TestRateLimitedResolve(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } const dnsResRate = 10 * time.Millisecond dc := replaceDNSResRate(dnsResRate) @@ -1347,21 +1556,66 @@ func TestRateLimitedResolve(t *testing.T) { } } +// DNS Resolver immediately starts polling on an error. This will cause the re-resolution to return another error. +// Thus, test that it constantly sends errors to the grpc.ClientConn. func TestReportError(t *testing.T) { const target = "notfoundaddress" + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + timerChan := testutils.NewChannel() + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, allows this test to call timer immediately. + t := time.NewTimer(time.Hour) + timerChan.Send(t) + return t + } cc := &testClientConn{target: target, errChan: make(chan error)} + totalTimesCalledError := 0 b := NewBuilder() r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) if err != nil { - t.Fatalf("%v\n", err) + t.Fatalf("Error building resolver for target %v: %v", target, err) + } + // Should receive first error. + err = <-cc.errChan + if !strings.Contains(err.Error(), "hostLookup error") { + t.Fatalf(`ReportError(err=%v) called; want err contains "hostLookupError"`, err) } + totalTimesCalledError++ + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) defer r.Close() - select { - case err := <-cc.errChan: + + // Cause timer to go off 10 times, and see if it matches DNS Resolver updating Error. + for i := 0; i < 10; i++ { + // Should call ReportError(). + err = <-cc.errChan if !strings.Contains(err.Error(), "hostLookup error") { t.Fatalf(`ReportError(err=%v) called; want err contains "hostLookupError"`, err) } - case <-time.After(time.Second): - t.Fatalf("did not receive error after 1s") + totalTimesCalledError++ + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + } + + if totalTimesCalledError != 11 { + t.Errorf("ReportError() not called 11 times, instead called %d times.", totalTimesCalledError) + } + // Clean up final watcher iteration. + <-cc.errChan + _, err = timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) } } diff --git a/resolver/resolver.go b/resolver/resolver.go index e9fa8e33d923..6a9d234a597a 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -181,7 +181,7 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) + UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling // ResolveNow on the Resolver with exponential backoff. diff --git a/resolver_conn_wrapper.go b/resolver_conn_wrapper.go index f2d81968f9ec..4118de571ab5 100644 --- a/resolver_conn_wrapper.go +++ b/resolver_conn_wrapper.go @@ -22,7 +22,6 @@ import ( "fmt" "strings" "sync" - "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" @@ -40,9 +39,6 @@ type ccResolverWrapper struct { resolver resolver.Resolver done *grpcsync.Event curState resolver.State - - pollingMu sync.Mutex - polling chan struct{} } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and @@ -93,59 +89,19 @@ func (ccr *ccResolverWrapper) close() { ccr.resolverMu.Unlock() } -// poll begins or ends asynchronous polling of the resolver based on whether -// err is ErrBadResolverState. -func (ccr *ccResolverWrapper) poll(err error) { - ccr.pollingMu.Lock() - defer ccr.pollingMu.Unlock() - if err != balancer.ErrBadResolverState { - // stop polling - if ccr.polling != nil { - close(ccr.polling) - ccr.polling = nil - } - return - } - if ccr.polling != nil { - // already polling - return - } - p := make(chan struct{}) - ccr.polling = p - go func() { - for i := 0; ; i++ { - ccr.resolveNow(resolver.ResolveNowOptions{}) - t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) - select { - case <-p: - t.Stop() - return - case <-ccr.done.Done(): - // Resolver has been closed. - t.Stop() - return - case <-t.C: - select { - case <-p: - return - default: - } - // Timer expired; re-resolve. - } - } - }() -} - -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { - return + return nil } channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } ccr.curState = s - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + return balancer.ErrBadResolverState + } + return nil } func (ccr *ccResolverWrapper) ReportError(err error) { @@ -153,7 +109,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { return } channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) + ccr.cc.updateResolverState(resolver.State{}, err) } // NewAddress is called by the resolver implementation to send addresses to gRPC. @@ -166,7 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } ccr.curState.Addresses = addrs - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.cc.updateResolverState(ccr.curState, nil) } // NewServiceConfig is called by the resolver implementation to send service @@ -183,14 +139,13 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { scpr := parseServiceConfig(sc) if scpr.Err != nil { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - ccr.poll(balancer.ErrBadResolverState) return } if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) } ccr.curState.ServiceConfig = scpr - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.cc.updateResolverState(ccr.curState, nil) } func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { diff --git a/resolver_conn_wrapper_test.go b/resolver_conn_wrapper_test.go index f13a408937b1..81c5b9ea874f 100644 --- a/resolver_conn_wrapper_test.go +++ b/resolver_conn_wrapper_test.go @@ -67,62 +67,6 @@ func (s) TestDialParseTargetUnknownScheme(t *testing.T) { } } -func testResolverErrorPolling(t *testing.T, badUpdate func(*manual.Resolver), goodUpdate func(*manual.Resolver), dopts ...DialOption) { - boIter := make(chan int) - resolverBackoff := func(v int) time.Duration { - boIter <- v - return 0 - } - - r := manual.NewBuilderWithScheme("whatever") - rn := make(chan struct{}) - defer func() { close(rn) }() - r.ResolveNowCallback = func(resolver.ResolveNowOptions) { rn <- struct{}{} } - - defaultDialOptions := []DialOption{ - WithInsecure(), - WithResolvers(r), - withResolveNowBackoff(resolverBackoff), - } - cc, err := Dial(r.Scheme()+":///test.server", append(defaultDialOptions, dopts...)...) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) - } - defer cc.Close() - badUpdate(r) - - panicAfter := time.AfterFunc(5*time.Second, func() { panic("timed out polling resolver") }) - defer panicAfter.Stop() - - // Ensure ResolveNow is called, then Backoff with the right parameter, several times - for i := 0; i < 7; i++ { - <-rn - if v := <-boIter; v != i { - t.Errorf("Backoff call %v uses value %v", i, v) - } - } - - // UpdateState will block if ResolveNow is being called (which blocks on - // rn), so call it in a goroutine. - goodUpdate(r) - - // Wait awhile to ensure ResolveNow and Backoff stop being called when the - // state is OK (i.e. polling was cancelled). - for { - t := time.NewTimer(50 * time.Millisecond) - select { - case <-rn: - // ClientConn is still calling ResolveNow - <-boIter - time.Sleep(5 * time.Millisecond) - continue - case <-t.C: - // ClientConn stopped calling ResolveNow; success - } - break - } -} - const happyBalancerName = "happy balancer" func init() { @@ -136,35 +80,6 @@ func init() { stub.Register(happyBalancerName, bf) } -// TestResolverErrorPolling injects resolver errors and verifies ResolveNow is -// called with the appropriate backoff strategy being consulted between -// ResolveNow calls. -func (s) TestResolverErrorPolling(t *testing.T) { - testResolverErrorPolling(t, func(r *manual.Resolver) { - r.CC.ReportError(errors.New("res err")) - }, func(r *manual.Resolver) { - // UpdateState will block if ResolveNow is being called (which blocks on - // rn), so call it in a goroutine. - go r.CC.UpdateState(resolver.State{}) - }, - WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, happyBalancerName))) -} - -// TestServiceConfigErrorPolling injects a service config error and verifies -// ResolveNow is called with the appropriate backoff strategy being consulted -// between ResolveNow calls. -func (s) TestServiceConfigErrorPolling(t *testing.T) { - testResolverErrorPolling(t, func(r *manual.Resolver) { - badsc := r.CC.ParseServiceConfig("bad config") - r.UpdateState(resolver.State{ServiceConfig: badsc}) - }, func(r *manual.Resolver) { - // UpdateState will block if ResolveNow is being called (which blocks on - // rn), so call it in a goroutine. - go r.CC.UpdateState(resolver.State{}) - }, - WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, happyBalancerName))) -} - // TestResolverErrorInBuild makes the resolver.Builder call into the ClientConn // during the Build call. We use two separate mutexes in the code which make // sure there is no data race in this code path, and also that there is no diff --git a/test/balancer_test.go b/test/balancer_test.go index bc22036dbac3..a6a8f726afa8 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -37,7 +37,6 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/balancerload" - "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/stubserver" @@ -698,10 +697,7 @@ func (s) TestEmptyAddrs(t *testing.T) { // Initialize pickfirst client pfr := manual.NewBuilderWithScheme("whatever") - pfrnCalled := grpcsync.NewEvent() - pfr.ResolveNowCallback = func(resolver.ResolveNowOptions) { - pfrnCalled.Fire() - } + pfr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) pfcc, err := grpc.DialContext(ctx, pfr.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(pfr)) @@ -718,16 +714,10 @@ func (s) TestEmptyAddrs(t *testing.T) { // Remove all addresses. pfr.UpdateState(resolver.State{}) - // Wait for a ResolveNow call on the pick first client's resolver. - <-pfrnCalled.Done() // Initialize roundrobin client rrr := manual.NewBuilderWithScheme("whatever") - rrrnCalled := grpcsync.NewEvent() - rrr.ResolveNowCallback = func(resolver.ResolveNowOptions) { - rrrnCalled.Fire() - } rrr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) rrcc, err := grpc.DialContext(ctx, rrr.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(rrr), @@ -745,8 +735,6 @@ func (s) TestEmptyAddrs(t *testing.T) { // Remove all addresses. rrr.UpdateState(resolver.State{}) - // Wait for a ResolveNow call on the round robin client's resolver. - <-rrrnCalled.Done() // Confirm several new RPCs succeed on pick first. for i := 0; i < 10; i++ { diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 82355eecc70b..b3c2006b72be 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -88,8 +88,9 @@ type testClientConn struct { errorCh *testutils.Channel } -func (t *testClientConn) UpdateState(s resolver.State) { +func (t *testClientConn) UpdateState(s resolver.State) error { t.stateCh.Send(s) + return nil } func (t *testClientConn) ReportError(err error) { From 970aa0928304dec8dbf2bc11ee0dd49ad16c8f30 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 21 Apr 2021 10:11:28 -0700 Subject: [PATCH 026/998] xds/balancers: export balancer names and config structs (#4334) --- .../balancer/clusterimpl/balancer_test.go | 44 +++--- .../balancer/clusterimpl/clusterimpl.go | 19 +-- xds/internal/balancer/clusterimpl/config.go | 27 ++-- .../balancer/clusterimpl/config_test.go | 14 +- xds/internal/balancer/clusterimpl/picker.go | 2 +- xds/internal/balancer/edsbalancer/eds_test.go | 2 +- xds/internal/balancer/lrs/balancer.go | 23 +-- xds/internal/balancer/lrs/balancer_test.go | 14 +- xds/internal/balancer/lrs/config.go | 24 +-- xds/internal/balancer/lrs/config_test.go | 10 +- xds/internal/balancer/priority/balancer.go | 12 +- .../balancer/priority/balancer_test.go | 148 +++++++++--------- xds/internal/balancer/priority/config.go | 18 ++- xds/internal/balancer/priority/config_test.go | 6 +- .../balancer/weightedtarget/weightedtarget.go | 9 +- .../weightedtarget/weightedtarget_config.go | 29 ++-- .../weightedtarget_config_test.go | 6 +- .../weightedtarget/weightedtarget_test.go | 2 +- 18 files changed, 208 insertions(+), 201 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index d3789ffddca3..f421f2281b59 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -76,7 +76,7 @@ func TestDropByCategory(t *testing.T) { newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() - builder := balancer.Get(clusterImplName) + builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) b := builder.Build(cc, balancer.BuildOptions{}) defer b.Close() @@ -90,11 +90,11 @@ func TestDropByCategory(t *testing.T) { ResolverState: resolver.State{ Addresses: testBackendAddrs, }, - BalancerConfig: &lbConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LRSLoadReportingServerName: newString(testLRSServerName), - DropCategories: []dropCategory{{ + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServerName: newString(testLRSServerName), + DropCategories: []DropConfig{{ Category: dropReason, RequestsPerMillion: million * dropNumerator / dropDenominator, }}, @@ -177,11 +177,11 @@ func TestDropByCategory(t *testing.T) { ResolverState: resolver.State{ Addresses: testBackendAddrs, }, - BalancerConfig: &lbConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LRSLoadReportingServerName: newString(testLRSServerName), - DropCategories: []dropCategory{{ + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServerName: newString(testLRSServerName), + DropCategories: []DropConfig{{ Category: dropReason2, RequestsPerMillion: million * dropNumerator2 / dropDenominator2, }}, @@ -234,7 +234,7 @@ func TestDropCircuitBreaking(t *testing.T) { newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() - builder := balancer.Get(clusterImplName) + builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) b := builder.Build(cc, balancer.BuildOptions{}) defer b.Close() @@ -244,11 +244,11 @@ func TestDropCircuitBreaking(t *testing.T) { ResolverState: resolver.State{ Addresses: testBackendAddrs, }, - BalancerConfig: &lbConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LRSLoadReportingServerName: newString(testLRSServerName), - MaxConcurrentRequests: &maxRequest, + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServerName: newString(testLRSServerName), + MaxConcurrentRequests: &maxRequest, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, @@ -346,7 +346,7 @@ func TestPickerUpdateAfterClose(t *testing.T) { newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() - builder := balancer.Get(clusterImplName) + builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) b := builder.Build(cc, balancer.BuildOptions{}) @@ -355,7 +355,7 @@ func TestPickerUpdateAfterClose(t *testing.T) { ResolverState: resolver.State{ Addresses: testBackendAddrs, }, - BalancerConfig: &lbConfig{ + BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, MaxConcurrentRequests: &maxRequest, @@ -391,7 +391,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() - builder := balancer.Get(clusterImplName) + builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) b := builder.Build(cc, balancer.BuildOptions{}) defer b.Close() @@ -400,7 +400,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { ResolverState: resolver.State{ Addresses: testBackendAddrs, }, - BalancerConfig: &lbConfig{ + BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, ChildPolicy: &internalserviceconfig.BalancerConfig{ @@ -451,7 +451,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { ResolverState: resolver.State{ Addresses: []resolver.Address{addr2}, }, - BalancerConfig: &lbConfig{ + BalancerConfig: &LBConfig{ Cluster: testClusterName2, EDSServiceName: testServiceName, ChildPolicy: &internalserviceconfig.BalancerConfig{ diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 7d4f7695daf4..56664e391ac6 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -42,7 +42,8 @@ import ( ) const ( - clusterImplName = "xds_cluster_impl_experimental" + // Name is the name of the cluster_impl balancer. + Name = "xds_cluster_impl_experimental" defaultRequestCountMax = 1024 ) @@ -78,7 +79,7 @@ func (clusterImplBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) } func (clusterImplBB) Name() string { - return clusterImplName + return Name } func (clusterImplBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { @@ -110,7 +111,7 @@ type clusterImplBalancer struct { logger *grpclog.PrefixLogger xdsC xdsClientInterface - config *lbConfig + config *LBConfig childLB balancer.Balancer cancelLoadReport func() edsServiceName string @@ -132,11 +133,11 @@ type clusterImplBalancer struct { // updateLoadStore checks the config for load store, and decides whether it // needs to restart the load reporting stream. -func (cib *clusterImplBalancer) updateLoadStore(newConfig *lbConfig) error { +func (cib *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { var updateLoadClusterAndService bool // ClusterName is different, restart. ClusterName is from ClusterName and - // EdsServiceName. + // EDSServiceName. clusterName := cib.getClusterName() if clusterName != newConfig.Cluster { updateLoadClusterAndService = true @@ -161,11 +162,11 @@ func (cib *clusterImplBalancer) updateLoadStore(newConfig *lbConfig) error { // Check if it's necessary to restart load report. var newLRSServerName string - if newConfig.LRSLoadReportingServerName != nil { - newLRSServerName = *newConfig.LRSLoadReportingServerName + if newConfig.LoadReportingServerName != nil { + newLRSServerName = *newConfig.LoadReportingServerName } if cib.lrsServerName != newLRSServerName { - // LrsLoadReportingServerName is different, load should be report to a + // LoadReportingServerName is different, load should be report to a // different server, restart. cib.lrsServerName = newLRSServerName if cib.cancelLoadReport != nil { @@ -188,7 +189,7 @@ func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState return nil } - newConfig, ok := s.BalancerConfig.(*lbConfig) + newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } diff --git a/xds/internal/balancer/clusterimpl/config.go b/xds/internal/balancer/clusterimpl/config.go index 548ab34bce4d..51ff654f6eb5 100644 --- a/xds/internal/balancer/clusterimpl/config.go +++ b/xds/internal/balancer/clusterimpl/config.go @@ -25,32 +25,33 @@ import ( "google.golang.org/grpc/serviceconfig" ) -type dropCategory struct { +// DropConfig contains the category, and drop ratio. +type DropConfig struct { Category string RequestsPerMillion uint32 } -// lbConfig is the balancer config for weighted_target. -type lbConfig struct { - serviceconfig.LoadBalancingConfig +// LBConfig is the balancer config for cluster_impl balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` - Cluster string - EDSServiceName string - LRSLoadReportingServerName *string - MaxConcurrentRequests *uint32 - DropCategories []dropCategory - ChildPolicy *internalserviceconfig.BalancerConfig + Cluster string `json:"cluster,omitempty"` + EDSServiceName string `json:"edsServiceName,omitempty"` + LoadReportingServerName *string `json:"lrsLoadReportingServerName,omitempty"` + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + DropCategories []DropConfig `json:"dropCategories,omitempty"` + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } -func parseConfig(c json.RawMessage) (*lbConfig, error) { - var cfg lbConfig +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } return &cfg, nil } -func equalDropCategories(a, b []dropCategory) bool { +func equalDropCategories(a, b []DropConfig) bool { if len(a) != len(b) { return false } diff --git a/xds/internal/balancer/clusterimpl/config_test.go b/xds/internal/balancer/clusterimpl/config_test.go index 89696981e2a0..ccb0c5e74d90 100644 --- a/xds/internal/balancer/clusterimpl/config_test.go +++ b/xds/internal/balancer/clusterimpl/config_test.go @@ -87,7 +87,7 @@ func TestParseConfig(t *testing.T) { tests := []struct { name string js string - want *lbConfig + want *LBConfig wantErr bool }{ { @@ -105,12 +105,12 @@ func TestParseConfig(t *testing.T) { { name: "OK", js: testJSONConfig, - want: &lbConfig{ - Cluster: "test_cluster", - EDSServiceName: "test-eds", - LRSLoadReportingServerName: newString("lrs_server"), - MaxConcurrentRequests: newUint32(123), - DropCategories: []dropCategory{ + want: &LBConfig{ + Cluster: "test_cluster", + EDSServiceName: "test-eds", + LoadReportingServerName: newString("lrs_server"), + MaxConcurrentRequests: newUint32(123), + DropCategories: []DropConfig{ {Category: "drop-1", RequestsPerMillion: 314}, {Category: "drop-2", RequestsPerMillion: 159}, }, diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index 6e9d27911534..87faba13a746 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -47,7 +47,7 @@ func gcd(a, b uint32) uint32 { return a } -func newDropper(c dropCategory) *dropper { +func newDropper(c DropConfig) *dropper { w := newRandomWRR() gcdv := gcd(c.RequestsPerMillion, million) // Return true for RequestPerMillion, false for the rest. diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 65b74a1b8af5..544d0a301672 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -818,7 +818,7 @@ func (s) TestBalancerConfigParsing(t *testing.T) { }, }, { - // json with no lrs server name, LrsLoadReportingServerName should + // json with no lrs server name, LoadReportingServerName should // be nil (not an empty string). name: "no-lrs-server-name", js: json.RawMessage(` diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go index ab9ee7109db1..460788eb53c3 100644 --- a/xds/internal/balancer/lrs/balancer.go +++ b/xds/internal/balancer/lrs/balancer.go @@ -37,7 +37,8 @@ func init() { var newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } -const lrsBalancerName = "lrs_experimental" +// Name is the name of the LRS balancer. +const Name = "lrs_experimental" type lrsBB struct{} @@ -60,7 +61,7 @@ func (l *lrsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balanc } func (l *lrsBB) Name() string { - return lrsBalancerName + return Name } func (l *lrsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { @@ -74,12 +75,12 @@ type lrsBalancer struct { logger *grpclog.PrefixLogger client *xdsClientWrapper - config *lbConfig + config *LBConfig lb balancer.Balancer // The sub balancer. } func (b *lrsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - newConfig, ok := s.BalancerConfig.(*lbConfig) + newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } @@ -182,21 +183,21 @@ func newXDSClientWrapper(c xdsClientInterface) *xdsClientWrapper { // update checks the config and xdsclient, and decides whether it needs to // restart the load reporting stream. -func (w *xdsClientWrapper) update(newConfig *lbConfig) error { +func (w *xdsClientWrapper) update(newConfig *LBConfig) error { var ( restartLoadReport bool updateLoadClusterAndService bool ) // ClusterName is different, restart. ClusterName is from ClusterName and - // EdsServiceName. + // EDSServiceName. if w.clusterName != newConfig.ClusterName { updateLoadClusterAndService = true w.clusterName = newConfig.ClusterName } - if w.edsServiceName != newConfig.EdsServiceName { + if w.edsServiceName != newConfig.EDSServiceName { updateLoadClusterAndService = true - w.edsServiceName = newConfig.EdsServiceName + w.edsServiceName = newConfig.EDSServiceName } if updateLoadClusterAndService { @@ -211,11 +212,11 @@ func (w *xdsClientWrapper) update(newConfig *lbConfig) error { w.loadWrapper.UpdateClusterAndService(w.clusterName, w.edsServiceName) } - if w.lrsServerName != newConfig.LrsLoadReportingServerName { - // LrsLoadReportingServerName is different, load should be report to a + if w.lrsServerName != newConfig.LoadReportingServerName { + // LoadReportingServerName is different, load should be report to a // different server, restart. restartLoadReport = true - w.lrsServerName = newConfig.LrsLoadReportingServerName + w.lrsServerName = newConfig.LoadReportingServerName } if restartLoadReport { diff --git a/xds/internal/balancer/lrs/balancer_test.go b/xds/internal/balancer/lrs/balancer_test.go index 0b575b112104..b115860bf16d 100644 --- a/xds/internal/balancer/lrs/balancer_test.go +++ b/xds/internal/balancer/lrs/balancer_test.go @@ -49,7 +49,7 @@ var ( ) // TestLoadReporting verifies that the lrs balancer starts the loadReport -// stream when the lbConfig passed to it contains a valid value for the LRS +// stream when the LBConfig passed to it contains a valid value for the LRS // server (empty string). func TestLoadReporting(t *testing.T) { xdsC := fakeclient.NewClient() @@ -57,7 +57,7 @@ func TestLoadReporting(t *testing.T) { newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() - builder := balancer.Get(lrsBalancerName) + builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) lrsB := builder.Build(cc, balancer.BuildOptions{}) defer lrsB.Close() @@ -66,11 +66,11 @@ func TestLoadReporting(t *testing.T) { ResolverState: resolver.State{ Addresses: testBackendAddrs, }, - BalancerConfig: &lbConfig{ - ClusterName: testClusterName, - EdsServiceName: testServiceName, - LrsLoadReportingServerName: testLRSServerName, - Locality: testLocality, + BalancerConfig: &LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServerName: testLRSServerName, + Locality: testLocality, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, diff --git a/xds/internal/balancer/lrs/config.go b/xds/internal/balancer/lrs/config.go index 3d39961401b5..9e8fc1045d4d 100644 --- a/xds/internal/balancer/lrs/config.go +++ b/xds/internal/balancer/lrs/config.go @@ -27,25 +27,27 @@ import ( "google.golang.org/grpc/xds/internal" ) -type lbConfig struct { - serviceconfig.LoadBalancingConfig - ClusterName string - EdsServiceName string - LrsLoadReportingServerName string - Locality *internal.LocalityID - ChildPolicy *internalserviceconfig.BalancerConfig +// LBConfig is the balancer config for lrs balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + ClusterName string `json:"clusterName,omitempty"` + EDSServiceName string `json:"edsServiceName,omitempty"` + LoadReportingServerName string `json:"lrsLoadReportingServerName,omitempty"` + Locality *internal.LocalityID `json:"locality,omitempty"` + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } -func parseConfig(c json.RawMessage) (*lbConfig, error) { - var cfg lbConfig +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } if cfg.ClusterName == "" { return nil, fmt.Errorf("required ClusterName is not set in %+v", cfg) } - if cfg.LrsLoadReportingServerName == "" { - return nil, fmt.Errorf("required LrsLoadReportingServerName is not set in %+v", cfg) + if cfg.LoadReportingServerName == "" { + return nil, fmt.Errorf("required LoadReportingServerName is not set in %+v", cfg) } if cfg.Locality == nil { return nil, fmt.Errorf("required Locality is not set in %+v", cfg) diff --git a/xds/internal/balancer/lrs/config_test.go b/xds/internal/balancer/lrs/config_test.go index f49430569fed..c460cd008fff 100644 --- a/xds/internal/balancer/lrs/config_test.go +++ b/xds/internal/balancer/lrs/config_test.go @@ -37,7 +37,7 @@ func TestParseConfig(t *testing.T) { tests := []struct { name string js string - want *lbConfig + want *LBConfig wantErr bool }{ { @@ -95,10 +95,10 @@ func TestParseConfig(t *testing.T) { "childPolicy":[{"round_robin":{}}] } `, - want: &lbConfig{ - ClusterName: testClusterName, - EdsServiceName: testServiceName, - LrsLoadReportingServerName: testLRSServerName, + want: &LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServerName: testLRSServerName, Locality: &xdsinternal.LocalityID{ Region: "test-region", Zone: "test-zone", diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index 6c4ff08378ec..d5c99b0b9146 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -24,6 +24,7 @@ package priority import ( + "encoding/json" "fmt" "sync" "time" @@ -34,10 +35,12 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/hierarchy" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/balancergroup" ) -const priorityBalancerName = "priority_experimental" +// Name is the name of the priority balancer. +const Name = "priority_experimental" func init() { balancer.Register(priorityBB{}) @@ -60,11 +63,14 @@ func (priorityBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) bal go b.run() b.logger.Infof("Created") return b +} +func (b priorityBB) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(s) } func (priorityBB) Name() string { - return priorityBalancerName + return Name } // timerWrapper wraps a timer with a boolean. So that when a race happens @@ -102,7 +108,7 @@ type priorityBalancer struct { } func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - newConfig, ok := s.BalancerConfig.(*lbConfig) + newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index be14231dcb3f..b15ea303d78f 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -83,7 +83,7 @@ func subConnFromPicker(t *testing.T, p balancer.Picker) func() balancer.SubConn // Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0. func (s) TestPriority_HighPriorityReady(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -95,8 +95,8 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -132,8 +132,8 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, @@ -162,8 +162,8 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -190,7 +190,7 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { // down, use 2; remove 2, use 1. func (s) TestPriority_SwitchPriority(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -202,8 +202,8 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -269,8 +269,8 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, @@ -328,8 +328,8 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -373,7 +373,7 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { // use 0. func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -385,8 +385,8 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -468,7 +468,7 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { // Init 0 and 1; 0 and 1 both down; add 2, use 2. func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -480,8 +480,8 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -534,8 +534,8 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, @@ -579,7 +579,7 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { // defer time.Sleep(10 * time.Millisecond) cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -592,8 +592,8 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, @@ -695,7 +695,7 @@ func (s) TestPriority_InitTimeout(t *testing.T) { }()() cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -707,8 +707,8 @@ func (s) TestPriority_InitTimeout(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -765,7 +765,7 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { }()() cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -777,8 +777,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -808,7 +808,7 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { ResolverState: resolver.State{ Addresses: nil, }, - BalancerConfig: &lbConfig{ + BalancerConfig: &LBConfig{ Children: nil, Priorities: nil, }, @@ -838,8 +838,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[3]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -882,8 +882,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, @@ -933,7 +933,7 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { // will be used. func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -945,8 +945,8 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -980,8 +980,8 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -1032,7 +1032,7 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { defaultPriorityInitTimeout = testPriorityInitTimeout cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1043,8 +1043,8 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, @@ -1058,7 +1058,7 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { ResolverState: resolver.State{ Addresses: nil, }, - BalancerConfig: &lbConfig{ + BalancerConfig: &LBConfig{ Children: nil, Priorities: nil, }, @@ -1075,7 +1075,7 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { // Init a(p0) and b(p1); a(p0) is up, use a; move b to p0, a to p1, use b. func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1087,8 +1087,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -1124,8 +1124,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -1176,7 +1176,7 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { // Init a(p0) and b(p1); a(p0) is down, use b; move b to p0, a to p1, use b. func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1188,8 +1188,8 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -1240,8 +1240,8 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -1276,7 +1276,7 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { // Init a(p0) and b(p1); a(p0) is down, use b; move b to p0, a to p1, use b. func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1288,8 +1288,8 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, @@ -1338,8 +1338,8 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, @@ -1384,7 +1384,7 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { }()() cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1395,8 +1395,8 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, @@ -1426,7 +1426,7 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { // be different. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{}, - BalancerConfig: &lbConfig{}, + BalancerConfig: &LBConfig{}, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } @@ -1454,8 +1454,8 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, @@ -1487,7 +1487,7 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { // Init 0; 0 is up, use 0; change 0's policy, 0 is used. func (s) TestPriority_ChildPolicyChange(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1498,8 +1498,8 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, @@ -1533,8 +1533,8 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: testRRBalancerName}}, }, Priorities: []string{"child-0"}, @@ -1587,7 +1587,7 @@ func init() { // by acquiring a locked mutex. func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1598,8 +1598,8 @@ func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ "child-0": {&internalserviceconfig.BalancerConfig{Name: inlineUpdateBalancerName}}, }, Priorities: []string{"child-0"}, diff --git a/xds/internal/balancer/priority/config.go b/xds/internal/balancer/priority/config.go index da085908c71d..7704f21d13bd 100644 --- a/xds/internal/balancer/priority/config.go +++ b/xds/internal/balancer/priority/config.go @@ -26,24 +26,26 @@ import ( "google.golang.org/grpc/serviceconfig" ) -type child struct { - Config *internalserviceconfig.BalancerConfig +// Child is a child of priority balancer. +type Child struct { + Config *internalserviceconfig.BalancerConfig `json:"config,omitempty"` } -type lbConfig struct { - serviceconfig.LoadBalancingConfig +// LBConfig represents priority balancer's config. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` // Children is a map from the child balancer names to their configs. Child // names can be found in field Priorities. - Children map[string]*child + Children map[string]*Child `json:"children,omitempty"` // Priorities is a list of child balancer names. They are sorted from // highest priority to low. The type/config for each child can be found in // field Children, with the balancer name as the key. - Priorities []string + Priorities []string `json:"priorities,omitempty"` } -func parseConfig(c json.RawMessage) (*lbConfig, error) { - var cfg lbConfig +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } diff --git a/xds/internal/balancer/priority/config_test.go b/xds/internal/balancer/priority/config_test.go index 15c4069dd1e7..189aa1c91ca4 100644 --- a/xds/internal/balancer/priority/config_test.go +++ b/xds/internal/balancer/priority/config_test.go @@ -30,7 +30,7 @@ func TestParseConfig(t *testing.T) { tests := []struct { name string js string - want *lbConfig + want *LBConfig wantErr bool }{ { @@ -69,8 +69,8 @@ func TestParseConfig(t *testing.T) { } } `, - want: &lbConfig{ - Children: map[string]*child{ + want: &LBConfig{ + Children: map[string]*Child{ "child-1": { &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/xds/internal/balancer/weightedtarget/weightedtarget.go index ac1aaecd8e51..a210816332b0 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget.go @@ -33,7 +33,8 @@ import ( "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" ) -const weightedTargetName = "weighted_target_experimental" +// Name is the name of the weighted_target balancer. +const Name = "weighted_target_experimental" // newRandomWRR is the WRR constructor used to pick sub-pickers from // sub-balancers. It's to be modified in tests. @@ -57,7 +58,7 @@ func (wt *weightedTargetBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOp } func (wt *weightedTargetBB) Name() string { - return weightedTargetName + return Name } func (wt *weightedTargetBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { @@ -75,14 +76,14 @@ type weightedTargetBalancer struct { bg *balancergroup.BalancerGroup stateAggregator *weightedaggregator.Aggregator - targets map[string]target + targets map[string]Target } // UpdateClientConnState takes the new targets in balancer group, // creates/deletes sub-balancers and sends them update. Addresses are split into // groups based on hierarchy path. func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - newConfig, ok := s.BalancerConfig.(*lbConfig) + newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config.go b/xds/internal/balancer/weightedtarget/weightedtarget_config.go index 747ce918bc68..52090cd67b02 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_config.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_config.go @@ -25,30 +25,23 @@ import ( "google.golang.org/grpc/serviceconfig" ) -type target struct { +// Target represents one target with the weight and the child policy. +type Target struct { // Weight is the weight of the child policy. - Weight uint32 + Weight uint32 `json:"weight,omitempty"` // ChildPolicy is the child policy and it's config. - ChildPolicy *internalserviceconfig.BalancerConfig + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } -// lbConfig is the balancer config for weighted_target. The proto representation -// is: -// -// message WeightedTargetConfig { -// message Target { -// uint32 weight = 1; -// repeated LoadBalancingConfig child_policy = 2; -// } -// map targets = 1; -// } -type lbConfig struct { - serviceconfig.LoadBalancingConfig - Targets map[string]target +// LBConfig is the balancer config for weighted_target. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + Targets map[string]Target `json:"targets,omitempty"` } -func parseConfig(c json.RawMessage) (*lbConfig, error) { - var cfg lbConfig +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go index 92dff8f5fbfc..57cad5c85d0f 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go @@ -56,7 +56,7 @@ func Test_parseConfig(t *testing.T) { tests := []struct { name string js string - want *lbConfig + want *LBConfig wantErr bool }{ { @@ -68,8 +68,8 @@ func Test_parseConfig(t *testing.T) { { name: "OK", js: testJSONConfig, - want: &lbConfig{ - Targets: map[string]target{ + want: &LBConfig{ + Targets: map[string]Target{ "cluster_1": { Weight: 75, ChildPolicy: &internalserviceconfig.BalancerConfig{ diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_test.go index 131f89832c79..eeebab733d61 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_test.go @@ -103,7 +103,7 @@ func init() { for i := 0; i < testBackendAddrsCount; i++ { testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) } - wtbBuilder = balancer.Get(weightedTargetName) + wtbBuilder = balancer.Get(Name) wtbParser = wtbBuilder.(balancer.ConfigParser) balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond From 671707bdf3bfa85f176f07810de5100d0109776b Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 21 Apr 2021 14:06:54 -0700 Subject: [PATCH 027/998] internal: fix symbol undefined build failure (#4353) Caused by git merge --- xds/internal/balancer/clusterimpl/balancer_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index f421f2281b59..0ff27894ebd5 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -482,7 +482,7 @@ func TestReResolution(t *testing.T) { newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() - builder := balancer.Get(clusterImplName) + builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) b := builder.Build(cc, balancer.BuildOptions{}) defer b.Close() @@ -491,7 +491,7 @@ func TestReResolution(t *testing.T) { ResolverState: resolver.State{ Addresses: testBackendAddrs, }, - BalancerConfig: &lbConfig{ + BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, ChildPolicy: &internalserviceconfig.BalancerConfig{ From 6f35bbbfb82de348a1537774af2ffd706cd3bb12 Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Wed, 21 Apr 2021 17:27:51 -0700 Subject: [PATCH 028/998] test: enable xDS CSDS test (#4354) --- test/kokoro/xds.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/kokoro/xds.sh b/test/kokoro/xds.sh index 65b35e3acac6..f9cb7dab7332 100755 --- a/test/kokoro/xds.sh +++ b/test/kokoro/xds.sh @@ -27,7 +27,7 @@ grpc/tools/run_tests/helper_scripts/prep_xds.sh # they are added into "all". GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info \ python3 grpc/tools/run_tests/run_xds_tests.py \ - --test_case="all,circuit_breaking,timeout,fault_injection" \ + --test_case="all,circuit_breaking,timeout,fault_injection,csds" \ --project_id=grpc-testing \ --project_num=830293263384 \ --source_image=projects/grpc-testing/global/images/xds-test-server-4 \ From f2783f271924fd379910c91fb62aae1dbfad83bd Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Thu, 22 Apr 2021 18:08:53 +0200 Subject: [PATCH 029/998] Run emulated linux arm64 tests (#4344) --- .github/workflows/testing.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 378e2846676f..348800443705 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -53,12 +53,22 @@ jobs: goversion: 1.13 - type: tests111 goversion: 1.11 # Keep until interop tests no longer require Go1.11 + - type: arm64 + goversion: 1.15 steps: # Setup the environment. - name: Setup GOARCH=386 if: ${{ matrix.type == '386' }} run: echo "GOARCH=386" >> $GITHUB_ENV + - name: Setup GOARCH=arm64 + if: ${{ matrix.type == 'arm64' }} + run: echo "GOARCH=arm64" >> $GITHUB_ENV + - name: Setup qemu emulator + if: ${{ matrix.type == 'arm64' }} + # setup qemu-user-static emulator and register it with binfmt_misc so that aarch64 binaries + # are automatically executed using qemu. + run: docker run --rm --privileged multiarch/qemu-user-static:5.2.0-2 --reset --credential yes --persistent yes - name: Setup RETRY if: ${{ matrix.type == 'retry' }} run: echo "GRPC_GO_RETRY=on" >> $GITHUB_ENV From 7276af6dd73483d9edfedbef778c831f044736eb Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 22 Apr 2021 10:45:24 -0700 Subject: [PATCH 030/998] client: fix leaked addrConn struct when addresses are updated (#4347) --- balancer_conn_wrappers.go | 4 ++-- clientconn.go | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 41061d6d3dc5..4cc7f9159b16 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -205,7 +205,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.mu.Lock() defer acbw.mu.Unlock() if len(addrs) <= 0 { - acbw.ac.tearDown(errConnDrain) + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) return } if !acbw.ac.tryUpdateAddrs(addrs) { @@ -220,7 +220,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.ac.acbw = nil acbw.ac.mu.Unlock() acState := acbw.ac.getState() - acbw.ac.tearDown(errConnDrain) + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) if acState == connectivity.Shutdown { return diff --git a/clientconn.go b/clientconn.go index 0db796ccbd66..d57e54b4dc52 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1446,10 +1446,9 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { } // tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. func (ac *addrConn) tearDown(err error) { ac.mu.Lock() if ac.state == connectivity.Shutdown { From f02863c306d287e05bcb796035b38fd956db1576 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 22 Apr 2021 14:58:58 -0700 Subject: [PATCH 031/998] xds: specify "h2" as the alpn in xds creds (#4361) --- internal/credentials/xds/handshake_info.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/internal/credentials/xds/handshake_info.go b/internal/credentials/xds/handshake_info.go index ca2e39edd6d1..6789a4cf2e51 100644 --- a/internal/credentials/xds/handshake_info.go +++ b/internal/credentials/xds/handshake_info.go @@ -138,7 +138,10 @@ func (hi *HandshakeInfo) ClientSideTLSConfig(ctx context.Context) (*tls.Config, // Currently the Go stdlib does complete verification of the cert (which // includes hostname verification) or none. We are forced to go with the // latter and perform the normal cert validation ourselves. - cfg := &tls.Config{InsecureSkipVerify: true} + cfg := &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{"h2"}, + } km, err := rootProv.KeyMaterial(ctx) if err != nil { @@ -159,7 +162,10 @@ func (hi *HandshakeInfo) ClientSideTLSConfig(ctx context.Context) (*tls.Config, // ServerSideTLSConfig constructs a tls.Config to be used in a server-side // handshake based on the contents of the HandshakeInfo. func (hi *HandshakeInfo) ServerSideTLSConfig(ctx context.Context) (*tls.Config, error) { - cfg := &tls.Config{ClientAuth: tls.NoClientCert} + cfg := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: []string{"h2"}, + } hi.mu.Lock() // On the server side, identityProvider is mandatory. RootProvider is // optional based on whether the server is doing TLS or mTLS. From 74fe6eaa41706a8451df3c03a0b131c70f71773d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 22 Apr 2021 14:59:51 -0700 Subject: [PATCH 032/998] github: testing action workflow improvements and update to test Go1.16 (#4358) --- .github/workflows/testing.yml | 89 +++++++++---------- .../tls/certprovider/pemfile/builder_test.go | 2 + .../tls/certprovider/pemfile/watcher_test.go | 2 + credentials/xds/xds_client_test.go | 2 + credentials/xds/xds_server_test.go | 2 + .../advancedtls_integration_test.go | 2 + security/advancedtls/advancedtls_test.go | 2 + security/authorization/engine/engine_test.go | 2 + security/authorization/engine/util_test.go | 2 + xds/csds/csds_test.go | 2 + xds/googledirectpath/googlec2p_test.go | 2 + xds/googledirectpath/utils.go | 4 +- .../balancergroup/balancergroup_test.go | 2 + .../balancer/balancergroup/testutils_test.go | 2 + .../cdsbalancer/cdsbalancer_security_test.go | 2 + .../balancer/cdsbalancer/cdsbalancer_test.go | 2 + .../balancer/clusterimpl/balancer_test.go | 2 + .../balancer/clusterimpl/config_test.go | 2 + .../clustermanager/clustermanager_test.go | 2 + .../balancer/clustermanager/config_test.go | 2 + .../edsbalancer/eds_impl_priority_test.go | 2 + .../balancer/edsbalancer/eds_impl_test.go | 2 + xds/internal/balancer/edsbalancer/eds_test.go | 2 + .../balancer/edsbalancer/util_test.go | 2 + .../balancer/edsbalancer/xds_lrs_test.go | 2 + xds/internal/balancer/lrs/balancer_test.go | 2 + xds/internal/balancer/lrs/config_test.go | 2 + xds/internal/balancer/orca/orca_test.go | 2 + .../balancer/priority/balancer_test.go | 2 + xds/internal/balancer/priority/config_test.go | 2 + xds/internal/balancer/priority/utils_test.go | 2 + .../weightedtarget_config_test.go | 2 + .../weightedtarget/weightedtarget_test.go | 2 + .../client/bootstrap/bootstrap_test.go | 2 + xds/internal/client/cds_test.go | 2 + xds/internal/client/client_test.go | 2 + xds/internal/client/eds_test.go | 2 + xds/internal/client/filter_chain_test.go | 2 + xds/internal/client/lds_test.go | 2 + xds/internal/client/load/store_test.go | 2 + xds/internal/client/rds_test.go | 2 + xds/internal/client/requests_counter_test.go | 2 + xds/internal/client/tests/client_test.go | 2 + xds/internal/client/tests/dump_test.go | 2 + xds/internal/client/tests/loadreport_test.go | 2 + xds/internal/client/v2/ack_test.go | 2 + xds/internal/client/v2/cds_test.go | 2 + xds/internal/client/v2/client_test.go | 2 + xds/internal/client/v2/eds_test.go | 2 + xds/internal/client/v2/lds_test.go | 2 + xds/internal/client/v2/rds_test.go | 2 + xds/internal/client/watchers_cluster_test.go | 2 + .../client/watchers_endpoints_test.go | 2 + xds/internal/client/watchers_listener_test.go | 2 + xds/internal/client/watchers_route_test.go | 2 + xds/internal/httpfilter/fault/fault_test.go | 1 + xds/internal/internal_test.go | 2 + xds/internal/resolver/matcher_header_test.go | 2 + xds/internal/resolver/matcher_path_test.go | 2 + xds/internal/resolver/matcher_test.go | 2 + xds/internal/resolver/serviceconfig_test.go | 2 + xds/internal/resolver/watch_service_test.go | 2 + xds/internal/resolver/xds_resolver_test.go | 2 + xds/internal/server/listener_wrapper_test.go | 2 + .../test/xds_client_integration_test.go | 1 + xds/internal/test/xds_integration_test.go | 1 + .../test/xds_server_integration_test.go | 1 + xds/internal/testutils/balancer_test.go | 2 + xds/server_test.go | 2 + 69 files changed, 175 insertions(+), 48 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 348800443705..b6277ea3065f 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -37,71 +37,77 @@ jobs: strategy: matrix: include: - - type: vet - goversion: 1.15 - - type: race - goversion: 1.15 - - type: 386 - goversion: 1.15 - - type: retry - goversion: 1.15 + - type: vet+tests + goversion: 1.16 + + - type: tests + goversion: 1.16 + testflags: -race + + - type: tests + goversion: 1.16 + grpcenv: GRPC_GO_RETRY=on + - type: extras - goversion: 1.15 + goversion: 1.16 + - type: tests - goversion: 1.14 + goversion: 1.16 + goarch: 386 + + - type: tests + goversion: 1.16 + goarch: arm64 + - type: tests - goversion: 1.13 - - type: tests111 - goversion: 1.11 # Keep until interop tests no longer require Go1.11 - - type: arm64 goversion: 1.15 + - type: tests + goversion: 1.14 + + - type: tests # Keep until interop tests no longer require Go1.11 + goversion: 1.11 + steps: # Setup the environment. - - name: Setup GOARCH=386 - if: ${{ matrix.type == '386' }} - run: echo "GOARCH=386" >> $GITHUB_ENV - - name: Setup GOARCH=arm64 - if: ${{ matrix.type == 'arm64' }} - run: echo "GOARCH=arm64" >> $GITHUB_ENV + - name: Setup GOARCH + if: matrix.goarch != '' + run: echo "GOARCH=${{ matrix.goarch }}" >> $GITHUB_ENV + - name: Setup qemu emulator - if: ${{ matrix.type == 'arm64' }} + if: matrix.goarch == 'arm64' # setup qemu-user-static emulator and register it with binfmt_misc so that aarch64 binaries # are automatically executed using qemu. run: docker run --rm --privileged multiarch/qemu-user-static:5.2.0-2 --reset --credential yes --persistent yes - - name: Setup RETRY - if: ${{ matrix.type == 'retry' }} - run: echo "GRPC_GO_RETRY=on" >> $GITHUB_ENV + + - name: Setup GRPC environment + if: matrix.grpcenv != '' + run: echo "${{ matrix.grpcenv }}" >> $GITHUB_ENV + - name: Setup Go uses: actions/setup-go@v2 with: go-version: ${{ matrix.goversion }} + - name: Checkout repo uses: actions/checkout@v2 # Only run vet for 'vet' runs. - name: Run vet.sh - if: ${{ matrix.type == 'vet' }} + if: startsWith(matrix.type, 'vet') run: ./vet.sh -install && ./vet.sh - # Main tests run for everything except when testing "extras", the race - # detector and Go1.11 (where we run a reduced set of tests). + # Main tests run for everything except when testing "extras" + # (where we run a reduced set of tests). - name: Run tests - if: ${{ matrix.type != 'extras' && matrix.type != 'race' && matrix.type != 'tests111' }} - run: | - go version - go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - - # Race detector tests - - name: Run test race - if: ${{ matrix.TYPE == 'race' }} + if: contains(matrix.type, 'tests') run: | go version - go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + go test ${{ matrix.testflags }} -cpu 1,4 -timeout 7m google.golang.org/grpc/... # Non-core gRPC tests (examples, interop, etc) - name: Run extras tests - if: ${{ matrix.TYPE == 'extras' }} + if: matrix.type == 'extras' run: | go version examples/examples_test.sh @@ -109,12 +115,3 @@ jobs: interop/interop_test.sh cd ${GITHUB_WORKSPACE}/security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... cd ${GITHUB_WORKSPACE}/security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... - - # Reduced set of tests for Go 1.11 - - name: Run Go1.11 tests - if: ${{ matrix.type == 'tests111' }} - run: | - go version - tests=$(find ${GITHUB_WORKSPACE} -name '*_test.go' | xargs -n1 dirname | sort -u | sed "s:^${GITHUB_WORKSPACE}:.:" | sed "s:\/$::" | grep -v ^./security | grep -v ^./credentials/sts | grep -v ^./credentials/tls/certprovider | grep -v ^./credentials/xds | grep -v ^./xds ) - echo "Running tests for " ${tests} - go test -cpu 1,4 -timeout 7m ${tests} diff --git a/credentials/tls/certprovider/pemfile/builder_test.go b/credentials/tls/certprovider/pemfile/builder_test.go index bef00e10c19d..2e49289ff899 100644 --- a/credentials/tls/certprovider/pemfile/builder_test.go +++ b/credentials/tls/certprovider/pemfile/builder_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/credentials/tls/certprovider/pemfile/watcher_test.go b/credentials/tls/certprovider/pemfile/watcher_test.go index e43cf7358eca..8b772245525e 100644 --- a/credentials/tls/certprovider/pemfile/watcher_test.go +++ b/credentials/tls/certprovider/pemfile/watcher_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/credentials/xds/xds_client_test.go b/credentials/xds/xds_client_test.go index 8859946ef5fa..2c882be8a549 100644 --- a/credentials/xds/xds_client_test.go +++ b/credentials/xds/xds_client_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/credentials/xds/xds_server_test.go b/credentials/xds/xds_server_test.go index 5c29ba38c286..65f7e8ffa3b9 100644 --- a/credentials/xds/xds_server_test.go +++ b/credentials/xds/xds_server_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/security/advancedtls/advancedtls_integration_test.go b/security/advancedtls/advancedtls_integration_test.go index 4bb9e645b0a1..2fabe8f17a8d 100644 --- a/security/advancedtls/advancedtls_integration_test.go +++ b/security/advancedtls/advancedtls_integration_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/security/advancedtls/advancedtls_test.go b/security/advancedtls/advancedtls_test.go index 64da81a1700c..827cf031ef6f 100644 --- a/security/advancedtls/advancedtls_test.go +++ b/security/advancedtls/advancedtls_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/security/authorization/engine/engine_test.go b/security/authorization/engine/engine_test.go index c159c4bd5c21..e56f218e5e2b 100644 --- a/security/authorization/engine/engine_test.go +++ b/security/authorization/engine/engine_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * Copyright 2020 gRPC authors. * diff --git a/security/authorization/engine/util_test.go b/security/authorization/engine/util_test.go index e766fbf3ffe0..43514296d83a 100644 --- a/security/authorization/engine/util_test.go +++ b/security/authorization/engine/util_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 867d74e5b25b..04a71a7d1e6c 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2021 gRPC authors. diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 2dd31d754f3f..524bb82e0f39 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2021 gRPC authors. diff --git a/xds/googledirectpath/utils.go b/xds/googledirectpath/utils.go index 553b87adf47a..600441979785 100644 --- a/xds/googledirectpath/utils.go +++ b/xds/googledirectpath/utils.go @@ -41,7 +41,7 @@ func getFromMetadata(timeout time.Duration, urlStr string) ([]byte, error) { } resp, err := client.Do(req) if err != nil { - return nil, fmt.Errorf("failed communicating with metadata server: %w", err) + return nil, fmt.Errorf("failed communicating with metadata server: %v", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { @@ -49,7 +49,7 @@ func getFromMetadata(timeout time.Duration, urlStr string) ([]byte, error) { } body, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("failed reading from metadata server: %w", err) + return nil, fmt.Errorf("failed reading from metadata server: %v", err) } return body, nil } diff --git a/xds/internal/balancer/balancergroup/balancergroup_test.go b/xds/internal/balancer/balancergroup/balancergroup_test.go index ab6ac3913cbb..1ba9195ab1d0 100644 --- a/xds/internal/balancer/balancergroup/balancergroup_test.go +++ b/xds/internal/balancer/balancergroup/balancergroup_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * Copyright 2019 gRPC authors. * diff --git a/xds/internal/balancer/balancergroup/testutils_test.go b/xds/internal/balancer/balancergroup/testutils_test.go index 1429fa87b3f2..8c0543083ab3 100644 --- a/xds/internal/balancer/balancergroup/testutils_test.go +++ b/xds/internal/balancer/balancergroup/testutils_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 73459dd64101..5c746cfa163c 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * Copyright 2020 gRPC authors. * diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 9c7bc2362ab7..4476a1532d05 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * Copyright 2019 gRPC authors. * diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 0ff27894ebd5..d1da371c27f3 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/clusterimpl/config_test.go b/xds/internal/balancer/clusterimpl/config_test.go index ccb0c5e74d90..f83155b5339a 100644 --- a/xds/internal/balancer/clusterimpl/config_test.go +++ b/xds/internal/balancer/clusterimpl/config_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index a40d954ad64f..42c53648553e 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/clustermanager/config_test.go b/xds/internal/balancer/clustermanager/config_test.go index 3328ba1d300f..f591f5ad32d8 100644 --- a/xds/internal/balancer/clustermanager/config_test.go +++ b/xds/internal/balancer/clustermanager/config_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go b/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go index 7696feb5bd04..51b35f22f09d 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/edsbalancer/eds_impl_test.go index 7e793c034a84..c5e3071d10d8 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * Copyright 2019 gRPC authors. * diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 544d0a301672..5c9e5f0b1d53 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/balancer/edsbalancer/util_test.go b/xds/internal/balancer/edsbalancer/util_test.go index 748aeffe2bb9..b94905d49889 100644 --- a/xds/internal/balancer/edsbalancer/util_test.go +++ b/xds/internal/balancer/edsbalancer/util_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * Copyright 2019 gRPC authors. * diff --git a/xds/internal/balancer/edsbalancer/xds_lrs_test.go b/xds/internal/balancer/edsbalancer/xds_lrs_test.go index 9f93e0b42f08..8b7aab657667 100644 --- a/xds/internal/balancer/edsbalancer/xds_lrs_test.go +++ b/xds/internal/balancer/edsbalancer/xds_lrs_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/balancer/lrs/balancer_test.go b/xds/internal/balancer/lrs/balancer_test.go index b115860bf16d..f91937385a92 100644 --- a/xds/internal/balancer/lrs/balancer_test.go +++ b/xds/internal/balancer/lrs/balancer_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/balancer/lrs/config_test.go b/xds/internal/balancer/lrs/config_test.go index c460cd008fff..35118298b18e 100644 --- a/xds/internal/balancer/lrs/config_test.go +++ b/xds/internal/balancer/lrs/config_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/orca/orca_test.go b/xds/internal/balancer/orca/orca_test.go index d7a44134e22b..ff02b3c16087 100644 --- a/xds/internal/balancer/orca/orca_test.go +++ b/xds/internal/balancer/orca/orca_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * Copyright 2019 gRPC authors. * diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index b15ea303d78f..d546216123d1 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/balancer/priority/config_test.go b/xds/internal/balancer/priority/config_test.go index 189aa1c91ca4..f3a09fe3a32e 100644 --- a/xds/internal/balancer/priority/config_test.go +++ b/xds/internal/balancer/priority/config_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/priority/utils_test.go b/xds/internal/balancer/priority/utils_test.go index c80a89b080f9..a4b1a5285102 100644 --- a/xds/internal/balancer/priority/utils_test.go +++ b/xds/internal/balancer/priority/utils_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go index 57cad5c85d0f..351a13553e4e 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_test.go index eeebab733d61..b792c28c6ab0 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/bootstrap/bootstrap_test.go b/xds/internal/client/bootstrap/bootstrap_test.go index 501d62102d21..f62ed2b54245 100644 --- a/xds/internal/client/bootstrap/bootstrap_test.go +++ b/xds/internal/client/bootstrap/bootstrap_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index 3fb889db9486..bb1117ec5349 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/client_test.go b/xds/internal/client/client_test.go index 8275ea60e0dc..69930557b26e 100644 --- a/xds/internal/client/client_test.go +++ b/xds/internal/client/client_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/client/eds_test.go b/xds/internal/client/eds_test.go index daa5d6525e19..9d6a3113b0c3 100644 --- a/xds/internal/client/eds_test.go +++ b/xds/internal/client/eds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/filter_chain_test.go b/xds/internal/client/filter_chain_test.go index afb0c81fda14..c68e22286763 100644 --- a/xds/internal/client/filter_chain_test.go +++ b/xds/internal/client/filter_chain_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/client/lds_test.go b/xds/internal/client/lds_test.go index 21e94557b3e9..9fb27987e36b 100644 --- a/xds/internal/client/lds_test.go +++ b/xds/internal/client/lds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/load/store_test.go b/xds/internal/client/load/store_test.go index 46568591f9e4..e7db4e26b176 100644 --- a/xds/internal/client/load/store_test.go +++ b/xds/internal/client/load/store_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/rds_test.go b/xds/internal/client/rds_test.go index 2ca01dca9ca2..cde40ee80dfe 100644 --- a/xds/internal/client/rds_test.go +++ b/xds/internal/client/rds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/requests_counter_test.go b/xds/internal/client/requests_counter_test.go index 2dc336d1c1d5..30892fc747a0 100644 --- a/xds/internal/client/requests_counter_test.go +++ b/xds/internal/client/requests_counter_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/tests/client_test.go b/xds/internal/client/tests/client_test.go index f5a57fbcd218..755f0e05ea45 100644 --- a/xds/internal/client/tests/client_test.go +++ b/xds/internal/client/tests/client_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/tests/dump_test.go b/xds/internal/client/tests/dump_test.go index 58220866eb19..de3fcade47e9 100644 --- a/xds/internal/client/tests/dump_test.go +++ b/xds/internal/client/tests/dump_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/client/tests/loadreport_test.go b/xds/internal/client/tests/loadreport_test.go index af145e7f2a92..b1ec37294771 100644 --- a/xds/internal/client/tests/loadreport_test.go +++ b/xds/internal/client/tests/loadreport_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/v2/ack_test.go b/xds/internal/client/v2/ack_test.go index 813d8baa79d9..53c8cef189d5 100644 --- a/xds/internal/client/v2/ack_test.go +++ b/xds/internal/client/v2/ack_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/client/v2/cds_test.go b/xds/internal/client/v2/cds_test.go index c71b84532315..b56ae6108bbe 100644 --- a/xds/internal/client/v2/cds_test.go +++ b/xds/internal/client/v2/cds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/client/v2/client_test.go b/xds/internal/client/v2/client_test.go index e770324e1b12..1e464405eeaf 100644 --- a/xds/internal/client/v2/client_test.go +++ b/xds/internal/client/v2/client_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/client/v2/eds_test.go b/xds/internal/client/v2/eds_test.go index 0990e7ebae0e..7eba32f5c605 100644 --- a/xds/internal/client/v2/eds_test.go +++ b/xds/internal/client/v2/eds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/client/v2/lds_test.go b/xds/internal/client/v2/lds_test.go index 1f4c980fae5e..22fa35d5e51e 100644 --- a/xds/internal/client/v2/lds_test.go +++ b/xds/internal/client/v2/lds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/client/v2/rds_test.go b/xds/internal/client/v2/rds_test.go index dd145158b8a9..12495428bf95 100644 --- a/xds/internal/client/v2/rds_test.go +++ b/xds/internal/client/v2/rds_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/watchers_cluster_test.go b/xds/internal/client/watchers_cluster_test.go index fdef0cf61649..2d10c7f43b5f 100644 --- a/xds/internal/client/watchers_cluster_test.go +++ b/xds/internal/client/watchers_cluster_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/watchers_endpoints_test.go b/xds/internal/client/watchers_endpoints_test.go index b79397414d4a..bff4544d2679 100644 --- a/xds/internal/client/watchers_endpoints_test.go +++ b/xds/internal/client/watchers_endpoints_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/watchers_listener_test.go b/xds/internal/client/watchers_listener_test.go index bf3a122da075..fdd4ebd163fa 100644 --- a/xds/internal/client/watchers_listener_test.go +++ b/xds/internal/client/watchers_listener_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/client/watchers_route_test.go b/xds/internal/client/watchers_route_test.go index 5f44e5493330..41640b85b574 100644 --- a/xds/internal/client/watchers_route_test.go +++ b/xds/internal/client/watchers_route_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 61100e8c44f8..20de9b9a697a 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -1,3 +1,4 @@ +// +build go1.12 // +build !386 /* diff --git a/xds/internal/internal_test.go b/xds/internal/internal_test.go index 903b9db23c48..9240d0a89d58 100644 --- a/xds/internal/internal_test.go +++ b/xds/internal/internal_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/resolver/matcher_header_test.go b/xds/internal/resolver/matcher_header_test.go index fb87cc5dd329..c83c3ec3923c 100644 --- a/xds/internal/resolver/matcher_header_test.go +++ b/xds/internal/resolver/matcher_header_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/resolver/matcher_path_test.go b/xds/internal/resolver/matcher_path_test.go index 263a049108e4..7b0d296fc324 100644 --- a/xds/internal/resolver/matcher_path_test.go +++ b/xds/internal/resolver/matcher_path_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/resolver/matcher_test.go b/xds/internal/resolver/matcher_test.go index 7657b87bf45f..5c8dca5c9e5b 100644 --- a/xds/internal/resolver/matcher_test.go +++ b/xds/internal/resolver/matcher_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/resolver/serviceconfig_test.go b/xds/internal/resolver/serviceconfig_test.go index 1e253841e801..7fe8218160f5 100644 --- a/xds/internal/resolver/serviceconfig_test.go +++ b/xds/internal/resolver/serviceconfig_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/resolver/watch_service_test.go b/xds/internal/resolver/watch_service_test.go index 2bfe3e984d3c..421e5345a9d2 100644 --- a/xds/internal/resolver/watch_service_test.go +++ b/xds/internal/resolver/watch_service_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index b3c2006b72be..8ec29af9ebf2 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 220be0e08ae7..8b5b5c3851de 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index f97e42af2a0a..39b3add77fbc 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -1,3 +1,4 @@ +// +build go1.12 // +build !386 /* diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index ae306ae7864e..13ab5e351a52 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -1,3 +1,4 @@ +// +build go1.12 // +build !386 /* diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index e31bba772ed9..d5b9b8dd20a2 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -1,3 +1,4 @@ +// +build go1.12 // +build !386 /* diff --git a/xds/internal/testutils/balancer_test.go b/xds/internal/testutils/balancer_test.go index 4891eb9cdadf..83393dcd1e98 100644 --- a/xds/internal/testutils/balancer_test.go +++ b/xds/internal/testutils/balancer_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. diff --git a/xds/server_test.go b/xds/server_test.go index 2a6677a3ccbb..41767bc1555a 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2020 gRPC authors. From e158e3e82cbac01ba513de4b0982b35b1fcc6183 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 23 Apr 2021 13:15:21 -0700 Subject: [PATCH 033/998] xds/lrs: server name is not required to be non-empty (#4356) --- xds/internal/balancer/lrs/balancer.go | 8 ++++---- xds/internal/balancer/lrs/config.go | 3 --- xds/internal/balancer/lrs/config_test.go | 15 --------------- 3 files changed, 4 insertions(+), 22 deletions(-) diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go index 460788eb53c3..e062fa234363 100644 --- a/xds/internal/balancer/lrs/balancer.go +++ b/xds/internal/balancer/lrs/balancer.go @@ -168,7 +168,7 @@ type xdsClientWrapper struct { cancelLoadReport func() clusterName string edsServiceName string - lrsServerName string + lrsServerName *string // loadWrapper is a wrapper with loadOriginal, with clusterName and // edsServiceName. It's used children to report loads. loadWrapper *loadstore.Wrapper @@ -212,11 +212,11 @@ func (w *xdsClientWrapper) update(newConfig *LBConfig) error { w.loadWrapper.UpdateClusterAndService(w.clusterName, w.edsServiceName) } - if w.lrsServerName != newConfig.LoadReportingServerName { + if w.lrsServerName == nil || *w.lrsServerName != newConfig.LoadReportingServerName { // LoadReportingServerName is different, load should be report to a // different server, restart. restartLoadReport = true - w.lrsServerName = newConfig.LoadReportingServerName + w.lrsServerName = &newConfig.LoadReportingServerName } if restartLoadReport { @@ -226,7 +226,7 @@ func (w *xdsClientWrapper) update(newConfig *LBConfig) error { } var loadStore *load.Store if w.c != nil { - loadStore, w.cancelLoadReport = w.c.ReportLoad(w.lrsServerName) + loadStore, w.cancelLoadReport = w.c.ReportLoad(*w.lrsServerName) } w.loadWrapper.UpdateLoadStore(loadStore) } diff --git a/xds/internal/balancer/lrs/config.go b/xds/internal/balancer/lrs/config.go index 9e8fc1045d4d..e0e30bbb8821 100644 --- a/xds/internal/balancer/lrs/config.go +++ b/xds/internal/balancer/lrs/config.go @@ -46,9 +46,6 @@ func parseConfig(c json.RawMessage) (*LBConfig, error) { if cfg.ClusterName == "" { return nil, fmt.Errorf("required ClusterName is not set in %+v", cfg) } - if cfg.LoadReportingServerName == "" { - return nil, fmt.Errorf("required LoadReportingServerName is not set in %+v", cfg) - } if cfg.Locality == nil { return nil, fmt.Errorf("required Locality is not set in %+v", cfg) } diff --git a/xds/internal/balancer/lrs/config_test.go b/xds/internal/balancer/lrs/config_test.go index 35118298b18e..eaf902ac535d 100644 --- a/xds/internal/balancer/lrs/config_test.go +++ b/xds/internal/balancer/lrs/config_test.go @@ -53,21 +53,6 @@ func TestParseConfig(t *testing.T) { "subZone": "test-sub-zone" }, "childPolicy":[{"round_robin":{}}] -} - `, - wantErr: true, - }, - { - name: "no LRS server name", - js: `{ - "clusterName": "test-cluster", - "edsServiceName": "test-eds-service", - "locality": { - "region": "test-region", - "zone": "test-zone", - "subZone": "test-sub-zone" - }, - "childPolicy":[{"round_robin":{}}] } `, wantErr: true, From 9572fd6faeaee33fe295ce3a79eab729d05bb349 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Fri, 23 Apr 2021 17:26:26 -0700 Subject: [PATCH 034/998] client: include details about GOAWAYs in status messages (#4316) --- internal/transport/http2_client.go | 16 ++++-- internal/transport/keepalive_test.go | 10 ++-- internal/transport/transport.go | 5 +- test/end2end_test.go | 80 ++++++++++++++++++++++++++++ 4 files changed, 101 insertions(+), 10 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 892317cc6fca..48c5e52edae9 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -115,6 +115,9 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string // A condition variable used to signal when the keepalive goroutine should // go dormant. The condition for dormancy is based on the number of active // streams and the `PermitWithoutStream` keepalive client parameter. And @@ -872,6 +875,12 @@ func (t *http2Client) Close(err error) { if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + if len(goAwayDebugMessage) > 0 { + err = fmt.Errorf("closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + } // Notify all active streams. for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, status.New(codes.Unavailable, err.Error()), nil, false) @@ -1146,7 +1155,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { } } id := f.LastStreamID - if id > 0 && id%2 != 1 { + if id > 0 && id%2 == 0 { t.mu.Unlock() t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) return @@ -1212,12 +1221,13 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %v", f.ErrCode, string(f.DebugData())) } -func (t *http2Client) GetGoAwayReason() GoAwayReason { +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { t.mu.Lock() defer t.mu.Unlock() - return t.goAwayReason + return t.goAwayReason, t.goAwayDebugMessage } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index d684f5827103..571cacca7e91 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -69,7 +69,7 @@ func (s) TestMaxConnectionIdle(t *testing.T) { if !timeout.Stop() { <-timeout.C } - if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { + if reason, _ := client.GetGoAwayReason(); reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } case <-timeout.C: @@ -143,7 +143,7 @@ func (s) TestMaxConnectionAge(t *testing.T) { if !timeout.Stop() { <-timeout.C } - if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { + if reason, _ := client.GetGoAwayReason(); reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } case <-timeout.C: @@ -403,7 +403,7 @@ func (s) TestKeepaliveClientFrequency(t *testing.T) { if !timeout.Stop() { <-timeout.C } - if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { + if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) } case <-timeout.C: @@ -448,7 +448,7 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { if !timeout.Stop() { <-timeout.C } - if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { + if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) } case <-timeout.C: @@ -498,7 +498,7 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { if !timeout.Stop() { <-timeout.C } - if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { + if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) } case <-timeout.C: diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 068f4d0e5023..6cc1031fd92f 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -656,8 +656,9 @@ type ClientTransport interface { // HTTP/2). GoAway() <-chan struct{} - // GetGoAwayReason returns the reason why GoAway frame was received. - GetGoAwayReason() GoAwayReason + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/test/end2end_test.go b/test/end2end_test.go index 832ac8bd7180..1baf2e347d15 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -55,12 +55,14 @@ import ( "google.golang.org/grpc/health" healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -1380,6 +1382,84 @@ func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) { close(rpcDoneOnClient) } +func (s) TestDetailedGoawayErrorOnGracefulClosePropagatesToRPCError(t *testing.T) { + rpcDoneOnClient := make(chan struct{}) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + <-rpcDoneOnClient + return status.Error(codes.Internal, "arbitrary status") + }, + } + sopts := []grpc.ServerOption{ + grpc.KeepaliveParams(keepalive.ServerParameters{ + MaxConnectionAge: time.Millisecond * 100, + MaxConnectionAgeGrace: time.Millisecond, + }), + } + if err := ss.Start(sopts); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) + } + const expectedErrorMessageSubstring = "received prior goaway: code: NO_ERROR" + _, err = stream.Recv() + close(rpcDoneOnClient) + if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) { + t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q", stream, err, expectedErrorMessageSubstring) + } +} + +func (s) TestDetailedGoawayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) { + // set the min keepalive time very low so that this test can take + // a reasonable amount of time + prev := internal.KeepaliveMinPingTime + internal.KeepaliveMinPingTime = time.Millisecond + defer func() { internal.KeepaliveMinPingTime = prev }() + + rpcDoneOnClient := make(chan struct{}) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + <-rpcDoneOnClient + return status.Error(codes.Internal, "arbitrary status") + }, + } + sopts := []grpc.ServerOption{ + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: time.Second * 1000, /* arbitrary, large value */ + }), + } + dopts := []grpc.DialOption{ + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: time.Millisecond, /* should trigger "too many pings" error quickly */ + Timeout: time.Second * 1000, /* arbitrary, large value */ + PermitWithoutStream: false, + }), + } + if err := ss.Start(sopts, dopts...); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) + } + const expectedErrorMessageSubstring = "received prior goaway: code: ENHANCE_YOUR_CALM, debug data: too_many_pings" + _, err = stream.Recv() + close(rpcDoneOnClient) + if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) { + t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: |%v|", stream, err, expectedErrorMessageSubstring) + } +} + func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { From 52a707c0dafe4ac6c0443c3d83dfdeeb9b828684 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 26 Apr 2021 14:29:06 -0700 Subject: [PATCH 035/998] xds: serving mode changes outlined in gRFC A36 (#4328) --- internal/internal.go | 5 + server.go | 79 +++-- xds/internal/server/listener_wrapper.go | 101 ++++-- xds/internal/test/xds_integration_test.go | 3 +- .../test/xds_server_serving_mode_test.go | 297 ++++++++++++++++++ xds/server.go | 85 ++++- xds/server_options.go | 84 +++++ xds/server_test.go | 42 ++- xds/xds.go | 5 + 9 files changed, 639 insertions(+), 62 deletions(-) create mode 100644 xds/internal/test/xds_server_serving_mode_test.go create mode 100644 xds/server_options.go diff --git a/internal/internal.go b/internal/internal.go index 2a3243bd701a..1b596bf3579f 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -59,6 +59,11 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/server.go b/server.go index 7a2aa28a1147..b2793ab00b53 100644 --- a/server.go +++ b/server.go @@ -57,12 +57,22 @@ import ( const ( defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" ) func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } } var statusOK = status.New(codes.OK, "") @@ -107,9 +117,12 @@ type serverWorkerData struct { type Server struct { opts serverOptions - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool serve bool drain bool cv *sync.Cond // signaled when connections close for GracefulStop @@ -519,7 +532,7 @@ func NewServer(opt ...ServerOption) *Server { s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[transport.ServerTransport]bool), + conns: make(map[string]map[transport.ServerTransport]bool), services: make(map[string]*serviceInfo), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), @@ -778,7 +791,7 @@ func (s *Server) Serve(lis net.Listener) error { // s.conns before this conn can be added. s.serveWG.Add(1) go func() { - s.handleRawConn(rawConn) + s.handleRawConn(lis.Addr().String(), rawConn) s.serveWG.Done() }() } @@ -786,7 +799,7 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn forks a goroutine to handle a just-accepted connection that // has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { if s.quit.HasFired() { rawConn.Close() return @@ -814,15 +827,24 @@ func (s *Server) handleRawConn(rawConn net.Conn) { } rawConn.SetDeadline(time.Time{}) - if !s.addConn(st) { + if !s.addConn(lisAddr, st) { return } go func() { s.serveStreams(st) - s.removeConn(st) + s.removeConn(lisAddr, st) }() } +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain() + } + s.mu.Unlock() +} + // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { @@ -924,10 +946,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - if !s.addConn(st) { + if !s.addConn(listenerAddressForServeHTTP, st) { return } - defer s.removeConn(st) + defer s.removeConn(listenerAddressForServeHTTP, st) s.serveStreams(st) } @@ -955,7 +977,7 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea return trInfo } -func (s *Server) addConn(st transport.ServerTransport) bool { +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { @@ -967,15 +989,28 @@ func (s *Server) addConn(st transport.ServerTransport) bool { // immediately. st.Drain() } - s.conns[st] = true + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true return true } -func (s *Server) removeConn(st transport.ServerTransport) { +func (s *Server) removeConn(addr string, st transport.ServerTransport) { s.mu.Lock() defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, st) + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } s.cv.Broadcast() } } @@ -1639,7 +1674,7 @@ func (s *Server) Stop() { s.mu.Lock() listeners := s.lis s.lis = nil - st := s.conns + conns := s.conns s.conns = nil // interrupt GracefulStop if Stop and GracefulStop are called concurrently. s.cv.Broadcast() @@ -1648,8 +1683,10 @@ func (s *Server) Stop() { for lis := range listeners { lis.Close() } - for c := range st { - c.Close() + for _, cs := range conns { + for st := range cs { + st.Close() + } } if s.opts.numServerWorkers > 0 { s.stopServerWorkers() @@ -1686,8 +1723,10 @@ func (s *Server) GracefulStop() { } s.lis = nil if !s.drain { - for st := range s.conns { - st.Drain() + for _, conns := range s.conns { + for st := range conns { + st.Drain() + } } s.drain = true } diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index c65736540920..17f31f28f576 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -48,6 +48,42 @@ var ( backoffFunc = bs.Backoff ) +// ServingMode indicates the current mode of operation of the server. +// +// This API exactly mirrors the one in the public xds package. We have to +// redefine it here to avoid a cyclic dependency. +type ServingMode int + +const ( + // ServingModeStarting indicates that the serving is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates the the server contains all required xDS + // configuration is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required xDS configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeNotServing: + return "not-serving" + case ServingModeServing: + return "serving" + default: + return "starting" + } +} + +// ServingModeCallback is the callback that users can register to get notified +// about the server's serving mode changes. The callback is invoked with the +// address of the listener and its new mode. The err parameter is set to a +// non-nil error if the server has transitioned into not-serving mode. +type ServingModeCallback func(addr net.Addr, mode ServingMode, err error) + func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", p)) } @@ -70,6 +106,8 @@ type ListenerWrapperParams struct { XDSCredsInUse bool // XDSClient provides the functionality from the xdsClient required here. XDSClient XDSClientInterface + // ModeCallback is the callback to invoke when the serving mode changes. + ModeCallback ServingModeCallback } // NewListenerWrapper creates a new listenerWrapper with params. It returns a @@ -83,6 +121,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru name: params.ListenerResourceName, xdsCredsInUse: params.XDSCredsInUse, xdsC: params.XDSClient, + modeCallback: params.ModeCallback, isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), closed: grpcsync.NewEvent(), @@ -111,12 +150,11 @@ type listenerWrapper struct { net.Listener logger *internalgrpclog.PrefixLogger - // TODO: Maintain serving state of this listener. - name string xdsCredsInUse bool xdsC XDSClientInterface cancelWatch func() + modeCallback ServingModeCallback // Set to true if the listener is bound to the IP_ANY address (which is // "0.0.0.0" for IPv4 and "::" for IPv6). @@ -138,11 +176,14 @@ type listenerWrapper struct { // updates received in the callback if this event has fired. closed *grpcsync.Event - // Filter chains received as part of the last good update. The reason for - // using an rw lock here is that this field will be read by all connections - // during their server-side handshake (in the hot path), but writes to this - // happen rarely (when we get a Listener resource update). - mu sync.RWMutex + // mu guards access to the current serving mode and the filter chains. The + // reason for using an rw lock here is that these fields are read in + // Accept() for all incoming connections, but writes happen rarely (when we + // get a Listener resource update). + mu sync.RWMutex + // Current serving mode. + mode ServingMode + // Filter chains received as part of the last good update. filterChains *xdsclient.FilterChainManager } @@ -175,8 +216,6 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { // Reset retries after a successful Accept(). retries = 0 - // TODO: Close connections if in "non-serving" state - // Since the net.Conn represents an incoming connection, the source and // destination address can be retrieved from the local address and // remote address of the net.Conn respectively. @@ -191,6 +230,17 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { } l.mu.RLock() + if l.mode == ServingModeNotServing { + // Close connections as soon as we accept them when we are in + // "not-serving" mode. Since we accept a net.Listener from the user + // in Serve(), we cannot close the listener when we move to + // "not-serving". Closing the connection immediately upon accepting + // is one of the other ways to implement the "not-serving" mode as + // outlined in gRFC A36. + l.mu.RUnlock() + conn.Close() + continue + } fc, err := l.filterChains.Lookup(xdsclient.FilterChainLookupParams{ IsUnspecifiedListener: l.isUnspecifiedAddr, DestAddr: destAddr.IP, @@ -236,14 +286,13 @@ func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, return } - // TODO: Handle resource-not-found errors by moving to not-serving state. if err != nil { - // We simply log an error here and hope we get a successful update - // in the future. The error could be because of a timeout or an - // actual error, like the requested resource not found. In any case, - // it is fine for the server to hang indefinitely until Stop() is - // called. l.logger.Warningf("Received error for resource %q: %+v", l.name, err) + if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { + l.switchMode(nil, ServingModeNotServing, err) + } + // For errors which are anything other than "resource-not-found", we + // continue to use the old configuration. return } l.logger.Infof("Received update for resource %q: %+v", l.name, update) @@ -258,18 +307,26 @@ func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, // appropriate context to perform this check. // // What this means is that the xdsClient has ACKed a resource which can push - // the server into a "not serving" state. This is not ideal, but this is + // the server into a "not serving" mode. This is not ideal, but this is // what we have decided to do. See gRPC A36 for more details. ilc := update.InboundListenerCfg if ilc.Address != l.addr || ilc.Port != l.port { - // TODO: Switch to "not serving" if the host:port does not match. - l.logger.Warningf("Received host:port (%s:%d) in Listener update does not match local listening address: (%s:%s", ilc.Address, ilc.Port, l.addr, l.port) + l.switchMode(nil, ServingModeNotServing, fmt.Errorf("address (%s:%s) in Listener update does not match listening address: (%s:%s)", ilc.Address, ilc.Port, l.addr, l.port)) return } - l.mu.Lock() - l.filterChains = ilc.FilterChains - l.mu.Unlock() + l.switchMode(ilc.FilterChains, ServingModeServing, nil) l.goodUpdate.Fire() - // TODO: Move to serving state on receipt of a good response. +} + +func (l *listenerWrapper) switchMode(fcs *xdsclient.FilterChainManager, newMode ServingMode, err error) { + l.mu.Lock() + defer l.mu.Unlock() + + l.filterChains = fcs + l.mode = newMode + if l.modeCallback != nil { + l.modeCallback(l.Listener.Addr(), newMode, err) + } + l.logger.Warningf("Listener %q entering mode: %q due to error: %v", l.Addr(), newMode, err) } diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index 13ab5e351a52..1c4b73ac58f8 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -32,7 +32,8 @@ import ( ) const ( - defaultTestTimeout = 10 * time.Second + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 100 * time.Millisecond ) type s struct { diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go new file mode 100644 index 000000000000..0055bc7be508 --- /dev/null +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -0,0 +1,297 @@ +// +build go1.13 +// +build !386 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xds_test contains e2e tests for xDS use. +package xds_test + +import ( + "context" + "fmt" + "net" + "path" + "sync" + "testing" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + "github.com/google/uuid" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/testutils" + xdsinternal "google.golang.org/grpc/internal/xds" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/xds" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/testutils/e2e" +) + +// A convenience typed used to keep track of mode changes on multiple listeners. +type modeTracker struct { + mu sync.Mutex + modes map[string]xds.ServingMode + updateCh *testutils.Channel +} + +func newModeTracker() *modeTracker { + return &modeTracker{ + modes: make(map[string]xds.ServingMode), + updateCh: testutils.NewChannel(), + } +} + +func (mt *modeTracker) updateMode(addr net.Addr, mode xds.ServingMode) { + mt.mu.Lock() + defer mt.mu.Unlock() + + mt.modes[addr.String()] = mode + mt.updateCh.Send(nil) +} + +func (mt *modeTracker) getMode(addr net.Addr) xds.ServingMode { + mt.mu.Lock() + defer mt.mu.Unlock() + return mt.modes[addr.String()] +} + +func (mt *modeTracker) waitForUpdate(ctx context.Context) error { + _, err := mt.updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("error when waiting for a mode change update: %v", err) + } + return nil +} + +// TestServerSideXDS_ServingModeChanges tests the serving mode functionality in +// xDS enabled gRPC servers. It verifies that appropriate mode changes happen in +// the server, and also verifies behavior of clientConns under these modes. +func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { + // Spin up a xDS management server on a local port. + nodeID := uuid.New().String() + fs, err := e2e.StartManagementServer() + if err != nil { + t.Fatal(err) + } + defer fs.Stop() + + // Create certificate and key files in a temporary directory and generate + // certificate provider configuration for a file_watcher plugin. + tmpdir := createTmpDirWithFiles(t, "testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + cpc := e2e.DefaultFileWatcherConfig(path.Join(tmpdir, certFile), path.Join(tmpdir, keyFile), path.Join(tmpdir, rootFile)) + + // Create a bootstrap file in a temporary directory. + bsCleanup, err := xdsinternal.SetupBootstrapFile(xdsinternal.BootstrapOptions{ + Version: xdsinternal.TransportV3, + NodeID: nodeID, + ServerURI: fs.Address, + CertificateProviders: cpc, + ServerListenerResourceNameTemplate: serverListenerResourceNameTemplate, + }) + if err != nil { + t.Fatal(err) + } + defer bsCleanup() + + // Configure xDS credentials to be used on the server-side. + creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a server option to get notified about serving mode changes. + modeTracker := newModeTracker() + modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { + t.Logf("serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + modeTracker.updateMode(addr, args.Mode) + }) + + // Initialize an xDS-enabled gRPC server and register the stubServer on it. + server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt) + defer server.Stop() + testpb.RegisterTestServiceServer(server, &testService{}) + + // Create two local listeners and pass it to Serve(). + lis1, err := xdstestutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis2, err := xdstestutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + go func() { + if err := server.Serve(lis1); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + go func() { + if err := server.Serve(lis2); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + // Setup the fake management server to respond with Listener resources that + // we are interested in. + listener1 := listenerResourceWithoutSecurityConfig(t, lis1) + listener2 := listenerResourceWithoutSecurityConfig(t, lis2) + if err := fs.Update(e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener1, listener2}, + }); err != nil { + t.Error(err) + } + + // Wait for both listeners to move to "serving" mode. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := waitForModeChange(ctx, modeTracker, lis1.Addr(), xds.ServingModeServing); err != nil { + t.Fatal(err) + } + if err := waitForModeChange(ctx, modeTracker, lis2.Addr(), xds.ServingModeServing); err != nil { + t.Fatal(err) + } + + // Create a ClientConn to the first listener and make a successful RPCs. + cc1, err := grpc.DialContext(ctx, lis1.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc1.Close() + + client1 := testpb.NewTestServiceClient(cc1) + if _, err := client1.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Create a ClientConn to the second listener and make a successful RPCs. + cc2, err := grpc.DialContext(ctx, lis2.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc2.Close() + + client2 := testpb.NewTestServiceClient(cc2) + if _, err := client2.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Update the management server to remove the second listener resource. This should + // push the only the second listener into "not-serving" mode. + if err := fs.Update(e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener1}, + }); err != nil { + t.Error(err) + } + if err := waitForModeChange(ctx, modeTracker, lis2.Addr(), xds.ServingModeNotServing); err != nil { + t.Fatal(err) + } + + // Make sure cc1 is still in READY state, while cc2 has moved out of READY. + if s := cc1.GetState(); s != connectivity.Ready { + t.Fatalf("clientConn1 state is %s, want %s", s, connectivity.Ready) + } + if !cc2.WaitForStateChange(ctx, connectivity.Ready) { + t.Fatal("clientConn2 failed to move out of READY") + } + + // Make sure RPCs succeed on cc1 and fail on cc2. + if _, err := client1.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + if _, err := client2.EmptyCall(ctx, &testpb.Empty{}); err == nil { + t.Fatal("rpc EmptyCall() succeeded when expected to fail") + } + + // Update the management server to remove the first listener resource as + // well. This should push the first listener into "not-serving" mode. Second + // listener is already in "not-serving" mode. + if err := fs.Update(e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{}, + }); err != nil { + t.Error(err) + } + if err := waitForModeChange(ctx, modeTracker, lis1.Addr(), xds.ServingModeNotServing); err != nil { + t.Fatal(err) + } + + // Make sure cc1 has moved out of READY. + if !cc1.WaitForStateChange(ctx, connectivity.Ready) { + t.Fatal("clientConn1 failed to move out of READY") + } + + // Make sure RPCs fail on both. + if _, err := client1.EmptyCall(ctx, &testpb.Empty{}); err == nil { + t.Fatal("rpc EmptyCall() succeeded when expected to fail") + } + if _, err := client2.EmptyCall(ctx, &testpb.Empty{}); err == nil { + t.Fatal("rpc EmptyCall() succeeded when expected to fail") + } + + // Make sure new connection attempts to "not-serving" servers fail. We use a + // short timeout since we expect this to fail. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + _, err = grpc.DialContext(sCtx, lis1.Addr().String(), grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err == nil { + t.Fatal("successfully created clientConn to a server in \"not-serving\" state") + } + + // Update the management server with both listener resources. + if err := fs.Update(e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener1, listener2}, + }); err != nil { + t.Error(err) + } + + // Wait for both listeners to move to "serving" mode. + if err := waitForModeChange(ctx, modeTracker, lis1.Addr(), xds.ServingModeServing); err != nil { + t.Fatal(err) + } + if err := waitForModeChange(ctx, modeTracker, lis2.Addr(), xds.ServingModeServing); err != nil { + t.Fatal(err) + } + + // The clientConns created earlier should be able to make RPCs now. + if _, err := client1.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + if _, err := client2.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} + +func waitForModeChange(ctx context.Context, modeTracker *modeTracker, addr net.Addr, wantMode xds.ServingMode) error { + for { + if gotMode := modeTracker.getMode(addr); gotMode == wantMode { + return nil + } + if err := modeTracker.waitForUpdate(ctx); err != nil { + return err + } + } +} diff --git a/xds/server.go b/xds/server.go index 805f59b4f5ac..3a2b629ae986 100644 --- a/xds/server.go +++ b/xds/server.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/buffer" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" xdsclient "google.golang.org/grpc/xds/internal/client" @@ -48,9 +49,9 @@ var ( return grpc.NewServer(opts...) } - // Unexported function to retrieve transport credentials from a gRPC server. - grpcGetServerCreds = internal.GetServerCredentials.(func(*grpc.Server) credentials.TransportCredentials) - logger = grpclog.Component("xds") + grpcGetServerCreds = internal.GetServerCredentials.(func(*grpc.Server) credentials.TransportCredentials) + drainServerTransports = internal.DrainServerTransports.(func(*grpc.Server, string)) + logger = grpclog.Component("xds") ) func prefixLogger(p *GRPCServer) *internalgrpclog.PrefixLogger { @@ -78,16 +79,12 @@ type grpcServerInterface interface { // communication with a management server using xDS APIs. It implements the // grpc.ServiceRegistrar interface and can be passed to service registration // functions in IDL generated code. -// -// Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. type GRPCServer struct { gs grpcServerInterface quit *grpcsync.Event logger *internalgrpclog.PrefixLogger xdsCredsInUse bool + opts *serverOptions // clientMu is used only in initXDSClient(), which is called at the // beginning of Serve(), where we have to decide if we have to create a @@ -99,11 +96,6 @@ type GRPCServer struct { // NewGRPCServer creates an xDS-enabled gRPC server using the passed in opts. // The underlying gRPC server has no service registered and has not started to // accept requests yet. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { newOpts := []grpc.ServerOption{ grpc.ChainUnaryInterceptor(xdsUnaryInterceptor), @@ -113,6 +105,7 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { s := &GRPCServer{ gs: newGRPCServer(newOpts...), quit: grpcsync.NewEvent(), + opts: handleServerOptions(opts), } s.logger = prefixLogger(s) s.logger.Infof("Created xds.GRPCServer") @@ -133,6 +126,18 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { return s } +// handleServerOptions iterates through the list of server options passed in by +// the user, and handles the xDS server specific options. +func handleServerOptions(opts []grpc.ServerOption) *serverOptions { + so := &serverOptions{} + for _, opt := range opts { + if o, ok := opt.(serverOption); ok { + o.applyServerOption(so) + } + } + return so +} + // RegisterService registers a service and its implementation to the underlying // gRPC server. It is called from the IDL generated code. This must be called // before invoking Serve. @@ -165,7 +170,6 @@ func (s *GRPCServer) initXDSClient() error { // initiated here. // // Serve will return a non-nil error unless Stop or GracefulStop is called. -// TODO: Support callback to get notified on serving state changes. func (s *GRPCServer) Serve(lis net.Listener) error { s.logger.Infof("Serve() passed a net.Listener on %s", lis.Addr().String()) if _, ok := lis.Addr().(*net.TCPAddr); !ok { @@ -207,6 +211,11 @@ func (s *GRPCServer) Serve(lis net.Listener) error { name = strings.Replace(cfg.ServerListenerResourceNameTemplate, "%s", lis.Addr().String(), -1) } + modeUpdateCh := buffer.NewUnbounded() + go func() { + s.handleServingModeChanges(modeUpdateCh) + }() + // Create a listenerWrapper which handles all functionality required by // this particular instance of Serve(). lw, goodUpdateCh := server.NewListenerWrapper(server.ListenerWrapperParams{ @@ -214,6 +223,13 @@ func (s *GRPCServer) Serve(lis net.Listener) error { ListenerResourceName: name, XDSCredsInUse: s.xdsCredsInUse, XDSClient: s.xdsC, + ModeCallback: func(addr net.Addr, mode server.ServingMode, err error) { + modeUpdateCh.Put(&modeChangeArgs{ + addr: addr, + mode: mode, + err: err, + }) + }, }) // Block until a good LDS response is received or the server is stopped. @@ -229,6 +245,47 @@ func (s *GRPCServer) Serve(lis net.Listener) error { return s.gs.Serve(lw) } +// modeChangeArgs wraps argument required for invoking mode change callback. +type modeChangeArgs struct { + addr net.Addr + mode server.ServingMode + err error +} + +// handleServingModeChanges runs as a separate goroutine, spawned from Serve(). +// It reads a channel on to which mode change arguments are pushed, and in turn +// invokes the user registered callback. It also calls an internal method on the +// underlying grpc.Server to gracefully close existing connections, if the +// listener moved to a "not-serving" mode. +func (s *GRPCServer) handleServingModeChanges(updateCh *buffer.Unbounded) { + for { + select { + case <-s.quit.Done(): + return + case u := <-updateCh.Get(): + updateCh.Load() + args := u.(*modeChangeArgs) + if args.mode == ServingModeNotServing { + // We type assert our underlying gRPC server to the real + // grpc.Server here before trying to initiate the drain + // operation. This approach avoids performing the same type + // assertion in the grpc package which provides the + // implementation for internal.GetServerCredentials, and allows + // us to use a fake gRPC server in tests. + if gs, ok := s.gs.(*grpc.Server); ok { + drainServerTransports(gs, args.addr.String()) + } + } + if s.opts.modeCallback != nil { + s.opts.modeCallback(args.addr, ServingModeChangeArgs{ + Mode: args.mode, + Err: args.err, + }) + } + } + } +} + // Stop stops the underlying gRPC server. It immediately closes all open // connections. It cancels all active RPCs on the server side and the // corresponding pending RPCs on the client side will get notified by connection diff --git a/xds/server_options.go b/xds/server_options.go new file mode 100644 index 000000000000..44b7b374fd00 --- /dev/null +++ b/xds/server_options.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "net" + + "google.golang.org/grpc" + iserver "google.golang.org/grpc/xds/internal/server" +) + +// ServingModeCallback returns a grpc.ServerOption which allows users to +// register a callback to get notified about serving mode changes. +func ServingModeCallback(cb ServingModeCallbackFunc) grpc.ServerOption { + return &smcOption{cb: cb} +} + +type serverOption interface { + applyServerOption(*serverOptions) +} + +// smcOption is a server option containing a callback to be invoked when the +// serving mode changes. +type smcOption struct { + // Embedding the empty server option makes it safe to pass it to + // grpc.NewServer(). + grpc.EmptyServerOption + cb ServingModeCallbackFunc +} + +func (s *smcOption) applyServerOption(o *serverOptions) { + o.modeCallback = s.cb +} + +type serverOptions struct { + modeCallback ServingModeCallbackFunc +} + +// ServingMode indicates the current mode of operation of the server. +type ServingMode = iserver.ServingMode + +const ( + // ServingModeServing indicates the the server contains all required xDS + // configuration is serving RPCs. + ServingModeServing = iserver.ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required xDS configuration to serve RPCs. + ServingModeNotServing = iserver.ServingModeNotServing +) + +// ServingModeCallbackFunc is the callback that users can register to get +// notified about the server's serving mode changes. The callback is invoked +// with the address of the listener and its new mode. +// +// Users must not perform any blocking operations in this callback. +type ServingModeCallbackFunc func(addr net.Addr, args ServingModeChangeArgs) + +// ServingModeChangeArgs wraps the arguments passed to the serving mode callback +// function. +type ServingModeChangeArgs struct { + // Mode is the new serving mode of the server listener. + Mode ServingMode + // Err is set to a non-nil error if the server has transitioned into + // not-serving mode. + Err error +} diff --git a/xds/server_test.go b/xds/server_test.go index 41767bc1555a..3fb3bcd3818b 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -311,7 +311,14 @@ func (s) TestServeSuccess(t *testing.T) { fs, clientCh, cleanup := setupOverrides() defer cleanup() - server := NewGRPCServer() + // Create a new xDS-enabled gRPC server and pass it a server option to get + // notified about serving mode changes. + modeChangeCh := testutils.NewChannel() + modeChangeOption := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) { + t.Logf("server mode change callback invoked for listener %q with mode %q and error %v", addr.String(), args.Mode, args.Err) + modeChangeCh.Send(args.Mode) + }) + server := NewGRPCServer(modeChangeOption) defer server.Stop() lis, err := xdstestutils.LocalTCPListener() @@ -349,13 +356,22 @@ func (s) TestServeSuccess(t *testing.T) { // Push an error to the registered listener watch callback and make sure // that Serve does not return. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, errors.New("LDS error")) + client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "LDS resource not found")) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { t.Fatal("Serve() returned after a bad LDS response") } + // Make sure the serving mode changes appropriately. + v, err := modeChangeCh.Receive(ctx) + if err != nil { + t.Fatalf("error when waiting for serving mode to change: %v", err) + } + if mode := v.(ServingMode); mode != ServingModeNotServing { + t.Fatalf("server mode is %q, want %q", mode, ServingModeNotServing) + } + // Push a good LDS response, and wait for Serve() to be invoked on the // underlying grpc.Server. addr, port := splitHostPort(lis.Addr().String()) @@ -370,11 +386,18 @@ func (s) TestServeSuccess(t *testing.T) { t.Fatalf("error when waiting for Serve() to be invoked on the grpc.Server") } + // Make sure the serving mode changes appropriately. + v, err = modeChangeCh.Receive(ctx) + if err != nil { + t.Fatalf("error when waiting for serving mode to change: %v", err) + } + if mode := v.(ServingMode); mode != ServingModeServing { + t.Fatalf("server mode is %q, want %q", mode, ServingModeServing) + } + // Push an update to the registered listener watch callback with a Listener // resource whose host:port does not match the actual listening address and - // port. Serve() should not return and should continue to use the old state. - // - // This will change once we add start tracking serving state. + // port. This will push the listener to "not-serving" mode. client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ RouteConfigName: "routeconfig", InboundListenerCfg: &xdsclient.InboundListenerConfig{ @@ -387,6 +410,15 @@ func (s) TestServeSuccess(t *testing.T) { if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { t.Fatal("Serve() returned after a bad LDS response") } + + // Make sure the serving mode changes appropriately. + v, err = modeChangeCh.Receive(ctx) + if err != nil { + t.Fatalf("error when waiting for serving mode to change: %v", err) + } + if mode := v.(ServingMode); mode != ServingModeNotServing { + t.Fatalf("server mode is %q, want %q", mode, ServingModeNotServing) + } } // TestServeWithStop tests the case where Stop() is called before an LDS update diff --git a/xds/xds.go b/xds/xds.go index 562c5aa82abc..23c88903f40b 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -25,6 +25,11 @@ // // See https://github.com/grpc/grpc-go/tree/master/examples/features/xds for // example. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. package xds import ( From 145f12a95b19d2a2f926176cd63fe5645b376186 Mon Sep 17 00:00:00 2001 From: Joshua Humphries Date: Tue, 27 Apr 2021 16:15:08 -0400 Subject: [PATCH 036/998] reflection: accept interface instead of grpc.Server struct in Register() (#4340) --- reflection/serverreflection.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/reflection/serverreflection.go b/reflection/serverreflection.go index d2696168b10c..82a5ba7f2444 100644 --- a/reflection/serverreflection.go +++ b/reflection/serverreflection.go @@ -54,9 +54,19 @@ import ( "google.golang.org/grpc/status" ) +// GRPCServer is the interface provided by a gRPC server. It is implemented by +// *grpc.Server, but could also be implemented by other concrete types. It acts +// as a registry, for accumulating the services exposed by the server. +type GRPCServer interface { + grpc.ServiceRegistrar + GetServiceInfo() map[string]grpc.ServiceInfo +} + +var _ GRPCServer = (*grpc.Server)(nil) + type serverReflectionServer struct { rpb.UnimplementedServerReflectionServer - s *grpc.Server + s GRPCServer initSymbols sync.Once serviceNames []string @@ -64,7 +74,7 @@ type serverReflectionServer struct { } // Register registers the server reflection service on the given gRPC server. -func Register(s *grpc.Server) { +func Register(s GRPCServer) { rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ s: s, }) From 7c5e73795d163c13e616aa53066f9e1d845275dd Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 27 Apr 2021 13:37:48 -0700 Subject: [PATCH 037/998] xds/cds: add separate fields for cluster name and eds service name (#4352) --- .../balancer/cdsbalancer/cdsbalancer.go | 3 +- .../cdsbalancer/cdsbalancer_security_test.go | 14 +-- .../balancer/cdsbalancer/cdsbalancer_test.go | 16 ++-- xds/internal/balancer/edsbalancer/config.go | 8 +- xds/internal/balancer/edsbalancer/eds.go | 56 ++++++++---- .../balancer/edsbalancer/eds_impl_test.go | 11 +-- xds/internal/balancer/edsbalancer/eds_test.go | 22 +++-- .../edsbalancer/load_store_wrapper.go | 88 ------------------- xds/internal/client/cds_test.go | 75 +++++++++++----- xds/internal/client/client.go | 15 ++-- xds/internal/client/client_test.go | 4 +- xds/internal/client/v2/cds_test.go | 4 +- xds/internal/client/watchers_cluster_test.go | 16 ++-- xds/internal/client/xds.go | 75 +++++++--------- 14 files changed, 190 insertions(+), 217 deletions(-) delete mode 100644 xds/internal/balancer/edsbalancer/load_store_wrapper.go diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index b991981c14c0..c97e92bcd02f 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -340,7 +340,8 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName) } lbCfg := &edsbalancer.EDSConfig{ - EDSServiceName: update.cds.ServiceName, + ClusterName: update.cds.ClusterName, + EDSServiceName: update.cds.EDSServiceName, MaxConcurrentRequests: update.cds.MaxRequests, } if update.cds.EnableLRS { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 5c746cfa163c..18a2298c5258 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -60,7 +60,7 @@ var ( fpb1, fpb2 *fakeProviderBuilder bootstrapConfig *bootstrap.Config cdsUpdateWithGoodSecurityCfg = xdsclient.ClusterUpdate{ - ServiceName: serviceName, + ClusterName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "default1", IdentityInstanceName: "default2", @@ -68,7 +68,7 @@ var ( }, } cdsUpdateWithMissingSecurityCfg = xdsclient.ClusterUpdate{ - ServiceName: serviceName, + ClusterName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "not-default", }, @@ -256,7 +256,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -312,7 +312,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -572,7 +572,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // an update which contains bad security config. So, we expect the CDS // balancer to forward this error to the EDS balancer and eventually the // channel needs to be put in a bad state. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -678,7 +678,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ - ServiceName: serviceName, + ClusterName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "default1", SubjectAltNameMatchers: testSANMatchers, @@ -703,7 +703,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // Push another update with a new security configuration. cdsUpdate = xdsclient.ClusterUpdate{ - ServiceName: serviceName, + ClusterName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 4476a1532d05..13b17306df8b 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -197,7 +197,7 @@ func cdsCCS(cluster string) balancer.ClientConnState { // cdsBalancer to the edsBalancer. func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientConnState { lbCfg := &edsbalancer.EDSConfig{ - EDSServiceName: service, + ClusterName: service, MaxConcurrentRequests: countMax, } if enableLRS { @@ -354,12 +354,12 @@ func (s) TestHandleClusterUpdate(t *testing.T) { }{ { name: "happy-case-with-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName, EnableLRS: true}, + cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName, EnableLRS: true}, wantCCS: edsCCS(serviceName, nil, true), }, { name: "happy-case-without-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName}, + cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName}, wantCCS: edsCCS(serviceName, nil, false), }, } @@ -427,7 +427,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) @@ -512,7 +512,7 @@ func (s) TestResolverError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) @@ -561,7 +561,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -596,7 +596,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will update // the service's counter with the new max requests. var maxRequests uint32 = 1 - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName, MaxRequests: &maxRequests} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName, MaxRequests: &maxRequests} wantCCS := edsCCS(serviceName, &maxRequests, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -629,7 +629,7 @@ func (s) TestClose(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() diff --git a/xds/internal/balancer/edsbalancer/config.go b/xds/internal/balancer/edsbalancer/config.go index 11c1338c81f7..d1583e2bf276 100644 --- a/xds/internal/balancer/edsbalancer/config.go +++ b/xds/internal/balancer/edsbalancer/config.go @@ -35,8 +35,10 @@ type EDSConfig struct { // FallBackPolicy represents the load balancing config for the // fallback. FallBackPolicy *loadBalancingConfig - // Name to use in EDS query. If not present, defaults to the server - // name from the target URI. + // ClusterName is the cluster name. + ClusterName string + // EDSServiceName is the name to use in EDS query. If not set, use + // ClusterName. EDSServiceName string // MaxConcurrentRequests is the max number of concurrent request allowed for // this service. If unset, default value 1024 is used. @@ -59,6 +61,7 @@ type EDSConfig struct { type edsConfigJSON struct { ChildPolicy []*loadBalancingConfig FallbackPolicy []*loadBalancingConfig + ClusterName string EDSServiceName string MaxConcurrentRequests *uint32 LRSLoadReportingServerName *string @@ -73,6 +76,7 @@ func (l *EDSConfig) UnmarshalJSON(data []byte) error { return err } + l.ClusterName = configJSON.ClusterName l.EDSServiceName = configJSON.EDSServiceName l.MaxConcurrentRequests = configJSON.MaxConcurrentRequests l.LrsLoadReportingServerName = configJSON.LRSLoadReportingServerName diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index de724701df94..1b5191aa1037 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -25,6 +25,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/xds/internal/balancer/loadstore" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" @@ -68,7 +69,7 @@ func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOp grpcUpdate: make(chan interface{}), xdsClientUpdate: make(chan *edsUpdate), childPolicyUpdate: buffer.NewUnbounded(), - lsw: &loadStoreWrapper{}, + loadWrapper: loadstore.NewWrapper(), config: &EDSConfig{}, } x.logger = prefixLogger(x) @@ -80,7 +81,7 @@ func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOp } x.xdsClient = client - x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.lsw, x.logger) + x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.loadWrapper, x.logger) x.logger.Infof("Created") go x.run() return x @@ -138,14 +139,14 @@ type edsBalancer struct { xdsClientUpdate chan *edsUpdate childPolicyUpdate *buffer.Unbounded - xdsClient xdsClientInterface - lsw *loadStoreWrapper - config *EDSConfig // may change when passed a different service config - edsImpl edsBalancerImplInterface + xdsClient xdsClientInterface + loadWrapper *loadstore.Wrapper + config *EDSConfig // may change when passed a different service config + edsImpl edsBalancerImplInterface - // edsServiceName is the edsServiceName currently being watched, not - // necessary the edsServiceName from service config. + clusterName string edsServiceName string + edsToWatch string // this is edsServiceName if it's set, otherwise, it's clusterName. cancelEndpointsWatch func() loadReportServer *string // LRS is disabled if loadReporterServer is nil. cancelLoadReport func() @@ -241,10 +242,35 @@ func (x *edsBalancer) handleGRPCUpdate(update interface{}) { // handleServiceConfigUpdate applies the service config update, watching a new // EDS service name and restarting LRS stream, as required. func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { - // Restart EDS watch when the edsServiceName has changed. + var updateLoadClusterAndService bool + if x.clusterName != config.ClusterName { + updateLoadClusterAndService = true + x.clusterName = config.ClusterName + x.edsImpl.updateClusterName(x.clusterName) + } if x.edsServiceName != config.EDSServiceName { + updateLoadClusterAndService = true x.edsServiceName = config.EDSServiceName + } + + // If EDSServiceName is set, use it to watch EDS. Otherwise, use the cluster + // name. + newEDSToWatch := config.EDSServiceName + if newEDSToWatch == "" { + newEDSToWatch = config.ClusterName + } + var restartEDSWatch bool + if x.edsToWatch != newEDSToWatch { + restartEDSWatch = true + x.edsToWatch = newEDSToWatch + } + + // Restart EDS watch when the eds name has changed. + if restartEDSWatch { x.startEndpointsWatch() + } + + if updateLoadClusterAndService { // TODO: this update for the LRS service name is too early. It should // only apply to the new EDS response. But this is applied to the RPCs // before the new EDS response. To fully fix this, the EDS balancer @@ -252,14 +278,13 @@ func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { // // This is OK for now, because we don't actually expect edsServiceName // to change. Fix this (a bigger change) will happen later. - x.lsw.updateServiceName(x.edsServiceName) - x.edsImpl.updateClusterName(x.edsServiceName) + x.loadWrapper.UpdateClusterAndService(x.clusterName, x.edsServiceName) } // Restart load reporting when the loadReportServer name has changed. if !equalStringPointers(x.loadReportServer, config.LrsLoadReportingServerName) { loadStore := x.startLoadReport(config.LrsLoadReportingServerName) - x.lsw.updateLoadStore(loadStore) + x.loadWrapper.UpdateLoadStore(loadStore) } return nil @@ -273,14 +298,15 @@ func (x *edsBalancer) startEndpointsWatch() { if x.cancelEndpointsWatch != nil { x.cancelEndpointsWatch() } - cancelEDSWatch := x.xdsClient.WatchEndpoints(x.edsServiceName, func(update xdsclient.EndpointsUpdate, err error) { + edsToWatch := x.edsToWatch + cancelEDSWatch := x.xdsClient.WatchEndpoints(edsToWatch, func(update xdsclient.EndpointsUpdate, err error) { x.logger.Infof("Watch update from xds-client %p, content: %+v", x.xdsClient, update) x.handleEDSUpdate(update, err) }) - x.logger.Infof("Watch started on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) + x.logger.Infof("Watch started on resource name %v with xds-client %p", edsToWatch, x.xdsClient) x.cancelEndpointsWatch = func() { cancelEDSWatch() - x.logger.Infof("Watch cancelled on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) + x.logger.Infof("Watch cancelled on resource name %v with xds-client %p", edsToWatch, x.xdsClient) } } diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/edsbalancer/eds_impl_test.go index c5e3071d10d8..3cfc620a2400 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_test.go @@ -29,6 +29,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal" + "google.golang.org/grpc/xds/internal/balancer/loadstore" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" @@ -820,9 +821,9 @@ func (s) TestEDS_LoadReport(t *testing.T) { // implements the LoadStore() method to return the underlying load.Store to // be used. loadStore := load.NewStore() - lsWrapper := &loadStoreWrapper{} - lsWrapper.updateServiceName(testClusterNames[0]) - lsWrapper.updateLoadStore(loadStore) + lsWrapper := loadstore.NewWrapper() + lsWrapper.UpdateClusterAndService(testClusterNames[0], "") + lsWrapper.UpdateLoadStore(loadStore) cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, lsWrapper, nil) @@ -913,8 +914,8 @@ func (s) TestEDS_LoadReport(t *testing.T) { // TestEDS_LoadReportDisabled covers the case that LRS is disabled. It makes // sure the EDS implementation isn't broken (doesn't panic). func (s) TestEDS_LoadReportDisabled(t *testing.T) { - lsWrapper := &loadStoreWrapper{} - lsWrapper.updateServiceName(testClusterNames[0]) + lsWrapper := loadstore.NewWrapper() + lsWrapper.UpdateClusterAndService(testClusterNames[0], "") // Not calling lsWrapper.updateLoadStore(loadStore) because LRS is disabled. cc := testutils.NewTestClientConn(t) diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 5c9e5f0b1d53..b986654bc409 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -586,7 +586,7 @@ func verifyExpectedRequests(ctx context.Context, fc *fakeclient.Client, resource if err := fc.WaitForCancelEDSWatch(ctx); err != nil { return fmt.Errorf("timed out when expecting resource %q", name) } - return nil + continue } resName, err := fc.WaitForWatchEDS(ctx) @@ -615,6 +615,18 @@ func (s) TestClientWatchEDS(t *testing.T) { } defer edsB.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // If eds service name is not set, should watch for cluster name. + if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ClusterName: "cluster-1"}, + }); err != nil { + t.Fatal(err) + } + if err := verifyExpectedRequests(ctx, xdsC, "cluster-1"); err != nil { + t.Fatal(err) + } + // Update with an non-empty edsServiceName should trigger an EDS watch for // the same. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ @@ -622,9 +634,7 @@ func (s) TestClientWatchEDS(t *testing.T) { }); err != nil { t.Fatal(err) } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := verifyExpectedRequests(ctx, xdsC, "foobar-1"); err != nil { + if err := verifyExpectedRequests(ctx, xdsC, "", "foobar-1"); err != nil { t.Fatal(err) } @@ -694,7 +704,7 @@ func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ - EDSServiceName: "foobar-1", + ClusterName: "foobar-1", }, }); err != nil { t.Fatal(err) @@ -713,7 +723,7 @@ func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ - EDSServiceName: "foobar-2", + ClusterName: "foobar-2", }, }); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/edsbalancer/load_store_wrapper.go b/xds/internal/balancer/edsbalancer/load_store_wrapper.go deleted file mode 100644 index 18904e47a42e..000000000000 --- a/xds/internal/balancer/edsbalancer/load_store_wrapper.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package edsbalancer - -import ( - "sync" - - "google.golang.org/grpc/xds/internal/client/load" -) - -type loadStoreWrapper struct { - mu sync.RWMutex - service string - // Both store and perCluster will be nil if load reporting is disabled (EDS - // response doesn't have LRS server name). Note that methods on Store and - // perCluster all handle nil, so there's no need to check nil before calling - // them. - store *load.Store - perCluster load.PerClusterReporter -} - -func (lsw *loadStoreWrapper) updateServiceName(service string) { - lsw.mu.Lock() - defer lsw.mu.Unlock() - if lsw.service == service { - return - } - lsw.service = service - lsw.perCluster = lsw.store.PerCluster(lsw.service, "") -} - -func (lsw *loadStoreWrapper) updateLoadStore(store *load.Store) { - lsw.mu.Lock() - defer lsw.mu.Unlock() - if store == lsw.store { - return - } - lsw.store = store - lsw.perCluster = lsw.store.PerCluster(lsw.service, "") -} - -func (lsw *loadStoreWrapper) CallStarted(locality string) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallStarted(locality) - } -} - -func (lsw *loadStoreWrapper) CallFinished(locality string, err error) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallFinished(locality, err) - } -} - -func (lsw *loadStoreWrapper) CallServerLoad(locality, name string, val float64) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallServerLoad(locality, name, val) - } -} - -func (lsw *loadStoreWrapper) CallDropped(category string) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallDropped(category) - } -} diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index bb1117ec5349..0fa2b402ffc9 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -28,12 +28,14 @@ import ( v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" "github.com/golang/protobuf/proto" anypb "github.com/golang/protobuf/ptypes/any" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/testutils" xdsinternal "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/version" @@ -45,7 +47,7 @@ const ( serviceName = "service" ) -var emptyUpdate = ClusterUpdate{ServiceName: "", EnableLRS: false} +var emptyUpdate = ClusterUpdate{ClusterName: clusterName, EnableLRS: false} func (s) TestValidateCluster_Failure(t *testing.T) { tests := []struct { @@ -141,24 +143,35 @@ func (s) TestValidateCluster_Success(t *testing.T) { { name: "happy-case-logical-dns", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ServiceName: "", EnableLRS: false, ClusterType: ClusterTypeLogicalDNS}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EnableLRS: false, ClusterType: ClusterTypeLogicalDNS}, }, { name: "happy-case-aggregate-v3", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ - ClusterType: &v3clusterpb.Cluster_CustomClusterType{Name: "envoy.clusters.aggregate"}, + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ + Clusters: []string{"a", "b", "c"}, + }), + }, }, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ServiceName: "", EnableLRS: false, ClusterType: ClusterTypeAggregate}, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, EnableLRS: false, ClusterType: ClusterTypeAggregate, + PrioritizedClusterNames: []string{"a", "b", "c"}, + }, }, { name: "happy-case-no-service-name-no-lrs", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -174,6 +187,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { { name: "happy-case-no-lrs", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -185,7 +199,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: false}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: false}, }, { name: "happiest-case", @@ -207,7 +221,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, }, - wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: true}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true}, }, { name: "happiest-case-with-circuitbreakers", @@ -241,7 +255,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, }, - wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: true, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, }, } @@ -254,8 +268,8 @@ func (s) TestValidateCluster_Success(t *testing.T) { if err != nil { t.Errorf("validateClusterAndConstructClusterUpdate(%+v) failed: %v", test.cluster, err) } - if !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) { - t.Errorf("validateClusterAndConstructClusterUpdate(%+v) = %v, want: %v", test.cluster, update, test.wantUpdate) + if diff := cmp.Diff(update, test.wantUpdate, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) got diff: %v (-got, +want)", test.cluster, diff) } }) } @@ -268,6 +282,7 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { defer func() { env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }() cluster := &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -302,8 +317,9 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { }, } wantUpdate := ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, } gotUpdate, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { @@ -325,6 +341,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { identityCertName = "identityCert" rootPluginInstance = "rootPluginInstance" rootCertName = "rootCert" + clusterName = "cluster" serviceName = "service" sanExact = "san-exact" sanPrefix = "san-prefix" @@ -657,6 +674,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { { name: "happy-case-with-no-identity-certs", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -691,8 +709,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -702,6 +721,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { { name: "happy-case-with-validation-context-provider-instance", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -740,8 +760,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -753,6 +774,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { { name: "happy-case-with-combined-validation-context", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -805,8 +827,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -968,7 +991,8 @@ func (s) TestUnmarshalCluster(t *testing.T) { resources: []*anypb.Any{v2ClusterAny}, wantUpdate: map[string]ClusterUpdate{ v2ClusterName: { - ServiceName: v2Service, EnableLRS: true, + ClusterName: v2ClusterName, + EDSServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, }, }, @@ -982,7 +1006,8 @@ func (s) TestUnmarshalCluster(t *testing.T) { resources: []*anypb.Any{v3ClusterAny}, wantUpdate: map[string]ClusterUpdate{ v3ClusterName: { - ServiceName: v3Service, EnableLRS: true, + ClusterName: v3ClusterName, + EDSServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, }, }, @@ -996,11 +1021,13 @@ func (s) TestUnmarshalCluster(t *testing.T) { resources: []*anypb.Any{v2ClusterAny, v3ClusterAny}, wantUpdate: map[string]ClusterUpdate{ v2ClusterName: { - ServiceName: v2Service, EnableLRS: true, + ClusterName: v2ClusterName, + EDSServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, }, v3ClusterName: { - ServiceName: v3Service, EnableLRS: true, + ClusterName: v3ClusterName, + EDSServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, }, }, @@ -1030,11 +1057,13 @@ func (s) TestUnmarshalCluster(t *testing.T) { }, wantUpdate: map[string]ClusterUpdate{ v2ClusterName: { - ServiceName: v2Service, EnableLRS: true, + ClusterName: v2ClusterName, + EDSServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, }, v3ClusterName: { - ServiceName: v3Service, EnableLRS: true, + ClusterName: v3ClusterName, + EDSServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, }, "bad": {}, diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go index 2daceede5398..4067536dda24 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/client/client.go @@ -375,22 +375,23 @@ const ( // interest to the registered CDS watcher. type ClusterUpdate struct { ClusterType ClusterType - // ServiceName is the service name corresponding to the clusterName which - // is being watched for through CDS. - ServiceName string + // ClusterName is the clusterName being watched for through CDS. + ClusterName string + // EDSServiceName is an optional name for EDS. If it's not set, the balancer + // should watch ClusterName for the EDS resources. + EDSServiceName string // EnableLRS indicates whether or not load should be reported through LRS. EnableLRS bool // SecurityCfg contains security configuration sent by the control plane. SecurityCfg *SecurityConfig // MaxRequests for circuit breaking, if any (otherwise nil). MaxRequests *uint32 - - // Raw is the resource from the xds response. - Raw *anypb.Any - // PrioritizedClusterNames is used only for cluster type aggregate. It represents // a prioritized list of cluster names. PrioritizedClusterNames []string + + // Raw is the resource from the xds response. + Raw *anypb.Any } // OverloadDropConfig contains the config to drop overloads. diff --git a/xds/internal/client/client_test.go b/xds/internal/client/client_test.go index 69930557b26e..d57bc20e2c2c 100644 --- a/xds/internal/client/client_test.go +++ b/xds/internal/client/client_test.go @@ -185,13 +185,13 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { t.Fatal(err) } - wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} + wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2); err != nil { t.Fatal(err) diff --git a/xds/internal/client/v2/cds_test.go b/xds/internal/client/v2/cds_test.go index b56ae6108bbe..d868beb1831b 100644 --- a/xds/internal/client/v2/cds_test.go +++ b/xds/internal/client/v2/cds_test.go @@ -151,7 +151,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: goodCDSResponse2, wantErr: false, wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName2: {ServiceName: serviceName2, Raw: marshaledCluster2}, + goodClusterName2: {ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, @@ -164,7 +164,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: goodCDSResponse1, wantErr: false, wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName1: {ServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}, + goodClusterName1: {ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, diff --git a/xds/internal/client/watchers_cluster_test.go b/xds/internal/client/watchers_cluster_test.go index 2d10c7f43b5f..c9837cd51978 100644 --- a/xds/internal/client/watchers_cluster_test.go +++ b/xds/internal/client/watchers_cluster_test.go @@ -64,7 +64,7 @@ func (s) TestClusterWatch(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { t.Fatal(err) @@ -128,7 +128,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate); err != nil { @@ -200,8 +200,8 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} + wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} + wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{ testCDSName + "1": wantUpdate1, testCDSName + "2": wantUpdate2, @@ -245,7 +245,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{ testCDSName: wantUpdate, }, UpdateMetadata{}) @@ -345,7 +345,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{ testCDSName: wantUpdate, }, UpdateMetadata{}) @@ -402,8 +402,8 @@ func (s) TestClusterResourceRemoved(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} + wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} + wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{ testCDSName + "1": wantUpdate1, testCDSName + "2": wantUpdate2, diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index c0caf5cceb57..f64d22ce67d3 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -571,42 +571,9 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin return cluster.GetName(), cu, nil } -func clusterTypeFromCluster(cluster *v3clusterpb.Cluster) (ClusterType, string, []string, error) { - if cluster.GetType() == v3clusterpb.Cluster_EDS { - if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { - return 0, "", nil, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) - } - // If the Cluster message in the CDS response did not contain a - // serviceName, we will just use the clusterName for EDS. - if cluster.GetEdsClusterConfig().GetServiceName() == "" { - return ClusterTypeEDS, cluster.GetName(), nil, nil - } - return ClusterTypeEDS, cluster.GetEdsClusterConfig().GetServiceName(), nil, nil - } - - if cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS { - return ClusterTypeLogicalDNS, cluster.GetName(), nil, nil - } - - if cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate" { - // Loop through ClusterConfig here to get cluster names. - clusters := &v3aggregateclusterpb.ClusterConfig{} - if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { - return 0, "", nil, fmt.Errorf("failed to unmarshal resource: %v", err) - } - return ClusterTypeAggregate, cluster.GetName(), clusters.Clusters, nil - } - return 0, "", nil, fmt.Errorf("unexpected cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) -} - func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { - emptyUpdate := ClusterUpdate{ServiceName: "", EnableLRS: false} if cluster.GetLbPolicy() != v3clusterpb.Cluster_ROUND_ROBIN { - return emptyUpdate, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) - } - clusterType, serviceName, prioritizedClusters, err := clusterTypeFromCluster(cluster) - if err != nil { - return emptyUpdate, err + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } // Process security configuration received from the control plane iff the @@ -615,18 +582,40 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu if env.ClientSideSecuritySupport { var err error if sc, err = securityConfigFromCluster(cluster); err != nil { - return emptyUpdate, err + return ClusterUpdate{}, err } } - return ClusterUpdate{ - ClusterType: clusterType, - ServiceName: serviceName, - EnableLRS: cluster.GetLrsServer().GetSelf() != nil, - SecurityCfg: sc, - MaxRequests: circuitBreakersFromCluster(cluster), - PrioritizedClusterNames: prioritizedClusters, - }, nil + ret := ClusterUpdate{ + ClusterName: cluster.GetName(), + EnableLRS: cluster.GetLrsServer().GetSelf() != nil, + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + } + + // Validate and set cluster type from the response. + switch { + case cluster.GetType() == v3clusterpb.Cluster_EDS: + if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { + return ClusterUpdate{}, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) + } + ret.ClusterType = ClusterTypeEDS + ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + return ret, nil + case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: + ret.ClusterType = ClusterTypeLogicalDNS + return ret, nil + case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": + clusters := &v3aggregateclusterpb.ClusterConfig{} + if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { + return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + ret.ClusterType = ClusterTypeAggregate + ret.PrioritizedClusterNames = clusters.Clusters + return ret, nil + default: + return ClusterUpdate{}, fmt.Errorf("unexpected cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } } // securityConfigFromCluster extracts the relevant security configuration from From 24d03d9f769106b3c96b4145244ce682999d3d88 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 27 Apr 2021 15:22:25 -0700 Subject: [PATCH 038/998] xds/priority: add ignore reresolution boolean to config (#4275) --- xds/internal/balancer/priority/balancer.go | 6 +- .../balancer/priority/balancer_child.go | 20 +- .../balancer/priority/balancer_test.go | 266 ++++++++++++++---- xds/internal/balancer/priority/config.go | 3 +- xds/internal/balancer/priority/config_test.go | 9 +- .../balancer/priority/ignore_resolve_now.go | 73 +++++ .../priority/ignore_resolve_now_test.go | 106 +++++++ 7 files changed, 417 insertions(+), 66 deletions(-) create mode 100644 xds/internal/balancer/priority/ignore_resolve_now.go create mode 100644 xds/internal/balancer/priority/ignore_resolve_now_test.go diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index d5c99b0b9146..e12a5068737f 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -131,7 +131,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // the balancer isn't built, because this child can be a low // priority. If necessary, it will be built when syncing priorities. cb := newChildBalancer(name, b, bb) - cb.updateConfig(newSubConfig.Config.Config, resolver.State{ + cb.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, @@ -146,13 +146,13 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // rebuild, rebuild will happen when syncing priorities. if currentChild.bb.Name() != bb.Name() { currentChild.stop() - currentChild.bb = bb + currentChild.updateBuilder(bb) } // Update config and address, but note that this doesn't send the // updates to child balancer (the child balancer might not be built, if // it's a low priority). - currentChild.updateConfig(newSubConfig.Config.Config, resolver.State{ + currentChild.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, diff --git a/xds/internal/balancer/priority/balancer_child.go b/xds/internal/balancer/priority/balancer_child.go index d012ad4e4593..600705da01af 100644 --- a/xds/internal/balancer/priority/balancer_child.go +++ b/xds/internal/balancer/priority/balancer_child.go @@ -29,10 +29,11 @@ import ( type childBalancer struct { name string parent *priorityBalancer - bb balancer.Builder + bb *ignoreResolveNowBalancerBuilder - config serviceconfig.LoadBalancingConfig - rState resolver.State + ignoreReresolutionRequests bool + config serviceconfig.LoadBalancingConfig + rState resolver.State started bool state balancer.State @@ -44,7 +45,7 @@ func newChildBalancer(name string, parent *priorityBalancer, bb balancer.Builder return &childBalancer{ name: name, parent: parent, - bb: bb, + bb: newIgnoreResolveNowBalancerBuilder(bb, false), started: false, // Start with the connecting state and picker with re-pick error, so // that when a priority switch causes this child picked before it's @@ -56,10 +57,16 @@ func newChildBalancer(name string, parent *priorityBalancer, bb balancer.Builder } } +// updateBuilder updates builder for the child, but doesn't build. +func (cb *childBalancer) updateBuilder(bb balancer.Builder) { + cb.bb = newIgnoreResolveNowBalancerBuilder(bb, cb.ignoreReresolutionRequests) +} + // updateConfig sets childBalancer's config and state, but doesn't send update to // the child balancer. -func (cb *childBalancer) updateConfig(config serviceconfig.LoadBalancingConfig, rState resolver.State) { - cb.config = config +func (cb *childBalancer) updateConfig(child *Child, rState resolver.State) { + cb.ignoreReresolutionRequests = child.IgnoreReresolutionRequests + cb.config = child.Config.Config cb.rState = rState } @@ -76,6 +83,7 @@ func (cb *childBalancer) start() { // sendUpdate sends the addresses and config to the child balancer. func (cb *childBalancer) sendUpdate() { + cb.bb.updateIgnoreResolveNow(cb.ignoreReresolutionRequests) // TODO: return and aggregate the returned error in the parent. err := cb.parent.bg.UpdateClientConnState(cb.name, balancer.ClientConnState{ ResolverState: cb.rState, diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index d546216123d1..61e3dee94d9a 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -21,6 +21,7 @@ package priority import ( + "context" "fmt" "testing" "time" @@ -99,8 +100,8 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -136,9 +137,9 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1", "child-2"}, }, @@ -166,8 +167,8 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -206,8 +207,8 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -273,9 +274,9 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1", "child-2"}, }, @@ -332,8 +333,8 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -389,8 +390,8 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -484,8 +485,8 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -538,9 +539,9 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1", "child-2"}, }, @@ -596,9 +597,9 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1", "child-2"}, }, @@ -711,8 +712,8 @@ func (s) TestPriority_InitTimeout(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -781,8 +782,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -842,8 +843,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -886,7 +887,7 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -949,8 +950,8 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -984,8 +985,8 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -1047,7 +1048,7 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1091,8 +1092,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -1128,8 +1129,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-1", "child-0"}, }, @@ -1192,8 +1193,8 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -1244,8 +1245,8 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-1", "child-0"}, }, @@ -1292,8 +1293,8 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -1342,7 +1343,7 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1399,7 +1400,7 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1458,7 +1459,7 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1502,7 +1503,7 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1537,7 +1538,7 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: testRRBalancerName}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: testRRBalancerName}}, }, Priorities: []string{"child-0"}, }, @@ -1602,7 +1603,7 @@ func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { }, BalancerConfig: &LBConfig{ Children: map[string]*Child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: inlineUpdateBalancerName}}, + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: inlineUpdateBalancerName}}, }, Priorities: []string{"child-0"}, }, @@ -1618,3 +1619,164 @@ func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { } } } + +// When the child policy's configured to ignore reresolution requests, the +// ResolveNow() calls from this child should be all ignored. +func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + // One children, with priorities [0], with one backend, reresolution is + // ignored. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": { + Config: &internalserviceconfig.BalancerConfig{Name: resolveNowBalancerName}, + IgnoreReresolutionRequests: true, + }, + }, + Priorities: []string{"child-0"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + // This is the balancer.ClientConn that the inner resolverNowBalancer is + // built with. + balancerCCI, err := resolveNowBalancerCCCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout waiting for ClientConn from balancer builder") + } + balancerCC := balancerCCI.(balancer.ClientConn) + + // Since IgnoreReresolutionRequests was set to true, all ResolveNow() calls + // should be ignored. + for i := 0; i < 5; i++ { + balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + } + select { + case <-cc.ResolveNowCh: + t.Fatalf("got unexpected ResolveNow() call") + case <-time.After(time.Millisecond * 100): + } + + // Send another update to set IgnoreReresolutionRequests to false. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": { + Config: &internalserviceconfig.BalancerConfig{Name: resolveNowBalancerName}, + IgnoreReresolutionRequests: false, + }, + }, + Priorities: []string{"child-0"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Call ResolveNow() on the CC, it should be forwarded. + balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + select { + case <-cc.ResolveNowCh: + case <-time.After(time.Second): + t.Fatalf("timeout waiting for ResolveNow()") + } + +} + +// When the child policy's configured to ignore reresolution requests, the +// ResolveNow() calls from this child should be all ignored, from the other +// children are forwarded. +func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + // One children, with priorities [0, 1], each with one backend. + // Reresolution is ignored for p0. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": { + Config: &internalserviceconfig.BalancerConfig{Name: resolveNowBalancerName}, + IgnoreReresolutionRequests: true, + }, + "child-1": { + Config: &internalserviceconfig.BalancerConfig{Name: resolveNowBalancerName}, + }, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + // This is the balancer.ClientConn from p0. + balancerCCI0, err := resolveNowBalancerCCCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout waiting for ClientConn from balancer builder 0") + } + balancerCC0 := balancerCCI0.(balancer.ClientConn) + + // Set p0 to transient failure, p1 will be started. + addrs0 := <-cc.NewSubConnAddrsCh + if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc0 := <-cc.NewSubConnCh + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + + // This is the balancer.ClientConn from p1. + ctx1, cancel1 := context.WithTimeout(context.Background(), time.Second) + defer cancel1() + balancerCCI1, err := resolveNowBalancerCCCh.Receive(ctx1) + if err != nil { + t.Fatalf("timeout waiting for ClientConn from balancer builder 1") + } + balancerCC1 := balancerCCI1.(balancer.ClientConn) + + // Since IgnoreReresolutionRequests was set to true for p0, ResolveNow() + // from p0 should all be ignored. + for i := 0; i < 5; i++ { + balancerCC0.ResolveNow(resolver.ResolveNowOptions{}) + } + select { + case <-cc.ResolveNowCh: + t.Fatalf("got unexpected ResolveNow() call") + case <-time.After(time.Millisecond * 100): + } + + // But IgnoreReresolutionRequests was false for p1, ResolveNow() from p1 + // should be forwarded. + balancerCC1.ResolveNow(resolver.ResolveNowOptions{}) + select { + case <-cc.ResolveNowCh: + case <-time.After(time.Second): + t.Fatalf("timeout waiting for ResolveNow()") + } +} diff --git a/xds/internal/balancer/priority/config.go b/xds/internal/balancer/priority/config.go index 7704f21d13bd..c9cb16e323f0 100644 --- a/xds/internal/balancer/priority/config.go +++ b/xds/internal/balancer/priority/config.go @@ -28,7 +28,8 @@ import ( // Child is a child of priority balancer. type Child struct { - Config *internalserviceconfig.BalancerConfig `json:"config,omitempty"` + Config *internalserviceconfig.BalancerConfig `json:"config,omitempty"` + IgnoreReresolutionRequests bool } // LBConfig represents priority balancer's config. diff --git a/xds/internal/balancer/priority/config_test.go b/xds/internal/balancer/priority/config_test.go index f3a09fe3a32e..42498bfa2b74 100644 --- a/xds/internal/balancer/priority/config_test.go +++ b/xds/internal/balancer/priority/config_test.go @@ -65,7 +65,7 @@ func TestParseConfig(t *testing.T) { js: `{ "priorities": ["child-1", "child-2", "child-3"], "children": { - "child-1": {"config": [{"round_robin":{}}]}, + "child-1": {"config": [{"round_robin":{}}], "ignoreReresolutionRequests": true}, "child-2": {"config": [{"round_robin":{}}]}, "child-3": {"config": [{"round_robin":{}}]} } @@ -74,17 +74,18 @@ func TestParseConfig(t *testing.T) { want: &LBConfig{ Children: map[string]*Child{ "child-1": { - &internalserviceconfig.BalancerConfig{ + Config: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, + IgnoreReresolutionRequests: true, }, "child-2": { - &internalserviceconfig.BalancerConfig{ + Config: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, }, "child-3": { - &internalserviceconfig.BalancerConfig{ + Config: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, }, diff --git a/xds/internal/balancer/priority/ignore_resolve_now.go b/xds/internal/balancer/priority/ignore_resolve_now.go new file mode 100644 index 000000000000..9a9f4777269a --- /dev/null +++ b/xds/internal/balancer/priority/ignore_resolve_now.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +type ignoreResolveNowBalancerBuilder struct { + balancer.Builder + ignoreResolveNow *uint32 +} + +// If `ignore` is true, all `ResolveNow()` from the balancer built from this +// builder will be ignored. +// +// `ignore` can be updated later by `updateIgnoreResolveNow`, and the update +// will be propagated to all the old and new balancers built with this. +func newIgnoreResolveNowBalancerBuilder(bb balancer.Builder, ignore bool) *ignoreResolveNowBalancerBuilder { + ret := &ignoreResolveNowBalancerBuilder{ + Builder: bb, + ignoreResolveNow: new(uint32), + } + ret.updateIgnoreResolveNow(ignore) + return ret +} + +func (irnbb *ignoreResolveNowBalancerBuilder) updateIgnoreResolveNow(b bool) { + if b { + atomic.StoreUint32(irnbb.ignoreResolveNow, 1) + return + } + atomic.StoreUint32(irnbb.ignoreResolveNow, 0) + +} + +func (irnbb *ignoreResolveNowBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return irnbb.Builder.Build(&ignoreResolveNowClientConn{ + ClientConn: cc, + ignoreResolveNow: irnbb.ignoreResolveNow, + }, opts) +} + +type ignoreResolveNowClientConn struct { + balancer.ClientConn + ignoreResolveNow *uint32 +} + +func (i ignoreResolveNowClientConn) ResolveNow(o resolver.ResolveNowOptions) { + if atomic.LoadUint32(i.ignoreResolveNow) != 0 { + return + } + i.ClientConn.ResolveNow(o) +} diff --git a/xds/internal/balancer/priority/ignore_resolve_now_test.go b/xds/internal/balancer/priority/ignore_resolve_now_test.go new file mode 100644 index 000000000000..452753de6c7b --- /dev/null +++ b/xds/internal/balancer/priority/ignore_resolve_now_test.go @@ -0,0 +1,106 @@ +// +build go1.12 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "context" + "testing" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + grpctestutils "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/testutils" +) + +const resolveNowBalancerName = "test-resolve-now-balancer" + +var resolveNowBalancerCCCh = grpctestutils.NewChannel() + +type resolveNowBalancerBuilder struct { + balancer.Builder +} + +func (r *resolveNowBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + resolveNowBalancerCCCh.Send(cc) + return r.Builder.Build(cc, opts) +} + +func (r *resolveNowBalancerBuilder) Name() string { + return resolveNowBalancerName +} + +func init() { + balancer.Register(&resolveNowBalancerBuilder{ + Builder: balancer.Get(roundrobin.Name), + }) +} + +func (s) TestIgnoreResolveNowBalancerBuilder(t *testing.T) { + resolveNowBB := balancer.Get(resolveNowBalancerName) + // Create a build wrapper, but will not ignore ResolveNow(). + ignoreResolveNowBB := newIgnoreResolveNowBalancerBuilder(resolveNowBB, false) + + cc := testutils.NewTestClientConn(t) + tb := ignoreResolveNowBB.Build(cc, balancer.BuildOptions{}) + defer tb.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + // This is the balancer.ClientConn that the inner resolverNowBalancer is + // built with. + balancerCCI, err := resolveNowBalancerCCCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout waiting for ClientConn from balancer builder") + } + balancerCC := balancerCCI.(balancer.ClientConn) + + // Call ResolveNow() on the CC, it should be forwarded. + balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + select { + case <-cc.ResolveNowCh: + case <-time.After(time.Second): + t.Fatalf("timeout waiting for ResolveNow()") + } + + // Update ignoreResolveNow to true, call ResolveNow() on the CC, they should + // all be ignored. + ignoreResolveNowBB.updateIgnoreResolveNow(true) + for i := 0; i < 5; i++ { + balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + } + select { + case <-cc.ResolveNowCh: + t.Fatalf("got unexpected ResolveNow() call") + case <-time.After(time.Millisecond * 100): + } + + // Update ignoreResolveNow to false, new ResolveNow() calls should be + // forwarded. + ignoreResolveNowBB.updateIgnoreResolveNow(false) + balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + select { + case <-cc.ResolveNowCh: + case <-time.After(time.Second): + t.Fatalf("timeout waiting for ResolveNow()") + } +} From b602d17e459c0e4d64e24b6d07875f58d5f40f0e Mon Sep 17 00:00:00 2001 From: irfan sharif Date: Wed, 28 Apr 2021 13:05:50 -0400 Subject: [PATCH 039/998] metadata: reduce memory footprint in FromOutgoingContext (#4360) When Looking at memory profiles for cockroachdb/cockroach, we observed that the intermediate metadata.MD array constructed to iterate over appended metadata escaped to the heap. Fortunately, this is easily rectifiable. go build -gcflags '-m' google.golang.org/grpc/metadata ... google.golang.org/grpc/metadata/metadata.go:198:13: make([]MD, 0, len(raw.added) + 1) escapes to heap --- metadata/metadata.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/metadata/metadata.go b/metadata/metadata.go index cf6d1b94781c..e4cbea917498 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -75,13 +75,9 @@ func Pairs(kv ...string) MD { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } md := MD{} - var key string - for i, s := range kv { - if i%2 == 0 { - key = strings.ToLower(s) - continue - } - md[key] = append(md[key], s) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) } return md } @@ -195,12 +191,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - mds := make([]MD, 0, len(raw.added)+1) - mds = append(mds, raw.md) - for _, vv := range raw.added { - mds = append(mds, Pairs(vv...)) + out := raw.md.Copy() + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } } - return Join(mds...), ok + return out, ok } type rawMD struct { From 91d8f0c916d76f2a5aac9e846cd7ffcb838db769 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 28 Apr 2021 18:11:45 -0700 Subject: [PATCH 040/998] serviceconfig: support marshalling BalancerConfig to JSON (#4368) --- internal/serviceconfig/serviceconfig.go | 16 ++++++ internal/serviceconfig/serviceconfig_test.go | 53 ++++++++++++++++++-- 2 files changed, 66 insertions(+), 3 deletions(-) diff --git a/internal/serviceconfig/serviceconfig.go b/internal/serviceconfig/serviceconfig.go index bd4b8875f1a7..c0634d152c2e 100644 --- a/internal/serviceconfig/serviceconfig.go +++ b/internal/serviceconfig/serviceconfig.go @@ -46,6 +46,22 @@ type BalancerConfig struct { type intermediateBalancerConfig []map[string]json.RawMessage +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + // UnmarshalJSON implements the json.Unmarshaler interface. // // ServiceConfig contains a list of loadBalancingConfigs, each with a name and diff --git a/internal/serviceconfig/serviceconfig_test.go b/internal/serviceconfig/serviceconfig_test.go index b8abaae027ef..770ee2efeb83 100644 --- a/internal/serviceconfig/serviceconfig_test.go +++ b/internal/serviceconfig/serviceconfig_test.go @@ -29,16 +29,18 @@ import ( ) type testBalancerConfigType struct { - externalserviceconfig.LoadBalancingConfig + externalserviceconfig.LoadBalancingConfig `json:"-"` + + Check bool `json:"check"` } -var testBalancerConfig = testBalancerConfigType{} +var testBalancerConfig = testBalancerConfigType{Check: true} const ( testBalancerBuilderName = "test-bb" testBalancerBuilderNotParserName = "test-bb-not-parser" - testBalancerConfigJSON = `{"test-balancer-config":"true"}` + testBalancerConfigJSON = `{"check":true}` ) type testBalancerBuilder struct { @@ -133,3 +135,48 @@ func TestBalancerConfigUnmarshalJSON(t *testing.T) { }) } } + +func TestBalancerConfigMarshalJSON(t *testing.T) { + tests := []struct { + name string + bc BalancerConfig + wantJSON string + }{ + { + name: "OK", + bc: BalancerConfig{ + Name: testBalancerBuilderName, + Config: testBalancerConfig, + }, + wantJSON: fmt.Sprintf(`[{"test-bb": {"check":true}}]`), + }, + { + name: "OK config is nil", + bc: BalancerConfig{ + Name: testBalancerBuilderNotParserName, + Config: nil, // nil should be marshalled to an empty config "{}". + }, + wantJSON: fmt.Sprintf(`[{"test-bb-not-parser": {}}]`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := tt.bc.MarshalJSON() + if err != nil { + t.Fatalf("failed to marshal: %v", err) + } + + if str := string(b); str != tt.wantJSON { + t.Fatalf("got str %q, want %q", str, tt.wantJSON) + } + + var bc BalancerConfig + if err := bc.UnmarshalJSON(b); err != nil { + t.Errorf("failed to mnmarshal: %v", err) + } + if !cmp.Equal(bc, tt.bc) { + t.Errorf("diff: %v", cmp.Diff(bc, tt.bc)) + } + }) + } +} From c3b66015bd51d33d3e0a75ea5086defcb9d05e64 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 29 Apr 2021 11:56:50 -0700 Subject: [PATCH 041/998] xds/circuit_breaking: use cluster name as key, not EDS service name (#4372) --- xds/internal/balancer/edsbalancer/eds.go | 2 +- xds/internal/balancer/edsbalancer/eds_test.go | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index 1b5191aa1037..09e8b28748a3 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -219,7 +219,7 @@ func (x *edsBalancer) handleGRPCUpdate(update interface{}) { x.logger.Warningf("failed to update xDS client: %v", err) } - x.edsImpl.updateServiceRequestsConfig(cfg.EDSServiceName, cfg.MaxConcurrentRequests) + x.edsImpl.updateServiceRequestsConfig(cfg.ClusterName, cfg.MaxConcurrentRequests) // We will update the edsImpl with the new child policy, if we got a // different one. diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index b986654bc409..37ecb1acfc69 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -354,6 +354,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ ChildPolicy: lbCfgA, + ClusterName: testEDSClusterName, EDSServiceName: testServiceName, }, }); err != nil { @@ -367,7 +368,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsLB.waitForChildPolicy(ctx, lbCfgA); err != nil { t.Fatal(err) } - if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil { + if err := edsLB.waitForCounterUpdate(ctx, testEDSClusterName); err != nil { t.Fatal(err) } if err := edsLB.waitForCountMaxUpdate(ctx, nil); err != nil { @@ -382,6 +383,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ ChildPolicy: lbCfgB, + ClusterName: testEDSClusterName, EDSServiceName: testServiceName, MaxConcurrentRequests: &testCountMax, }, @@ -391,7 +393,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsLB.waitForChildPolicy(ctx, lbCfgB); err != nil { t.Fatal(err) } - if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil { + if err := edsLB.waitForCounterUpdate(ctx, testEDSClusterName); err != nil { // Counter is updated even though the service name didn't change. The // eds_impl will compare the service names, and skip if it didn't change. t.Fatal(err) @@ -670,7 +672,7 @@ func (s) TestCounterUpdate(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ - EDSServiceName: "foobar-1", + ClusterName: "foobar-1", MaxConcurrentRequests: &testCountMax, }, }); err != nil { From aa3ef8fb8ff6c92134743e780cf659eaa7eeccbc Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 29 Apr 2021 12:17:56 -0700 Subject: [PATCH 042/998] internal: regenerate proto (#4373) --- .../internal/proto/grpc_lookup_v1/rls.pb.go | 151 +++++++++++++----- 1 file changed, 111 insertions(+), 40 deletions(-) diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go b/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go index 7741e6649180..9de23aae645c 100644 --- a/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go @@ -39,6 +39,56 @@ const ( // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 +// Possible reasons for making a request. +type RouteLookupRequest_Reason int32 + +const ( + RouteLookupRequest_REASON_UNKNOWN RouteLookupRequest_Reason = 0 // Unused + RouteLookupRequest_REASON_MISS RouteLookupRequest_Reason = 1 // No data available in local cache + RouteLookupRequest_REASON_STALE RouteLookupRequest_Reason = 2 // Data in local cache is stale +) + +// Enum value maps for RouteLookupRequest_Reason. +var ( + RouteLookupRequest_Reason_name = map[int32]string{ + 0: "REASON_UNKNOWN", + 1: "REASON_MISS", + 2: "REASON_STALE", + } + RouteLookupRequest_Reason_value = map[string]int32{ + "REASON_UNKNOWN": 0, + "REASON_MISS": 1, + "REASON_STALE": 2, + } +) + +func (x RouteLookupRequest_Reason) Enum() *RouteLookupRequest_Reason { + p := new(RouteLookupRequest_Reason) + *p = x + return p +} + +func (x RouteLookupRequest_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RouteLookupRequest_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_lookup_v1_rls_proto_enumTypes[0].Descriptor() +} + +func (RouteLookupRequest_Reason) Type() protoreflect.EnumType { + return &file_grpc_lookup_v1_rls_proto_enumTypes[0] +} + +func (x RouteLookupRequest_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RouteLookupRequest_Reason.Descriptor instead. +func (RouteLookupRequest_Reason) EnumDescriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0, 0} +} + type RouteLookupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -59,6 +109,8 @@ type RouteLookupRequest struct { // Target type allows the client to specify what kind of target format it // would like from RLS to allow it to find the regional server, e.g. "grpc". TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` + // Reason for making this request. + Reason RouteLookupRequest_Reason `protobuf:"varint,5,opt,name=reason,proto3,enum=grpc.lookup.v1.RouteLookupRequest_Reason" json:"reason,omitempty"` // Map of key values extracted via key builders for the gRPC or HTTP request. KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -118,6 +170,13 @@ func (x *RouteLookupRequest) GetTargetType() string { return "" } +func (x *RouteLookupRequest) GetReason() RouteLookupRequest_Reason { + if x != nil { + return x.Reason + } + return RouteLookupRequest_REASON_UNKNOWN +} + func (x *RouteLookupRequest) GetKeyMap() map[string]string { if x != nil { return x.KeyMap @@ -191,41 +250,49 @@ var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0xed, 0x01, 0x0a, 0x12, 0x52, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0xf1, 0x02, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, - 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, - 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, - 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, - 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, - 0x39, 0x0a, 0x0b, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, - 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, - 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, + 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, + 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, + 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, + 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x22, 0x5e, + 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, + 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, + 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, + 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, - 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, + 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -240,21 +307,24 @@ func file_grpc_lookup_v1_rls_proto_rawDescGZIP() []byte { return file_grpc_lookup_v1_rls_proto_rawDescData } +var file_grpc_lookup_v1_rls_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_lookup_v1_rls_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_grpc_lookup_v1_rls_proto_goTypes = []interface{}{ - (*RouteLookupRequest)(nil), // 0: grpc.lookup.v1.RouteLookupRequest - (*RouteLookupResponse)(nil), // 1: grpc.lookup.v1.RouteLookupResponse - nil, // 2: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + (RouteLookupRequest_Reason)(0), // 0: grpc.lookup.v1.RouteLookupRequest.Reason + (*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest + (*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse + nil, // 3: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry } var file_grpc_lookup_v1_rls_proto_depIdxs = []int32{ - 2, // 0: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry - 0, // 1: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest - 1, // 2: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 0, // 0: grpc.lookup.v1.RouteLookupRequest.reason:type_name -> grpc.lookup.v1.RouteLookupRequest.Reason + 3, // 1: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + 1, // 2: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest + 2, // 3: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_grpc_lookup_v1_rls_proto_init() } @@ -293,13 +363,14 @@ func file_grpc_lookup_v1_rls_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_lookup_v1_rls_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 3, NumExtensions: 0, NumServices: 1, }, GoTypes: file_grpc_lookup_v1_rls_proto_goTypes, DependencyIndexes: file_grpc_lookup_v1_rls_proto_depIdxs, + EnumInfos: file_grpc_lookup_v1_rls_proto_enumTypes, MessageInfos: file_grpc_lookup_v1_rls_proto_msgTypes, }.Build() File_grpc_lookup_v1_rls_proto = out.File From 28078834f35b944281662807d8ec071645c37307 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 29 Apr 2021 21:44:26 -0700 Subject: [PATCH 043/998] grpc: call balancer.Close() before returning from ccBalancerWrapper.close() (#4364) --- balancer_conn_wrappers.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 4cc7f9159b16..38b48fcdc5b4 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -69,10 +69,10 @@ func (ccb *ccBalancerWrapper) watcher() { select { case t := <-ccb.scBuffer.Get(): ccb.scBuffer.Load() + ccb.balancerMu.Lock() if ccb.done.HasFired() { break } - ccb.balancerMu.Lock() su := t.(*scStateUpdate) ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) ccb.balancerMu.Unlock() @@ -80,7 +80,6 @@ func (ccb *ccBalancerWrapper) watcher() { } if ccb.done.HasFired() { - ccb.balancer.Close() ccb.mu.Lock() scs := ccb.subConns ccb.subConns = nil @@ -95,6 +94,9 @@ func (ccb *ccBalancerWrapper) watcher() { } func (ccb *ccBalancerWrapper) close() { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + ccb.balancer.Close() ccb.done.Fire() } @@ -119,13 +121,19 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { ccb.balancerMu.Lock() defer ccb.balancerMu.Unlock() + if ccb.done.HasFired() { + return nil + } return ccb.balancer.UpdateClientConnState(*ccs) } func (ccb *ccBalancerWrapper) resolverError(err error) { ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + if ccb.done.HasFired() { + return + } ccb.balancer.ResolverError(err) - ccb.balancerMu.Unlock() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { From b418de839e738968aa8f845584efd0d34da4bae8 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 30 Apr 2021 11:53:31 -0700 Subject: [PATCH 044/998] xds/eds: restart EDS watch after previous was canceled (#4378) --- xds/internal/balancer/edsbalancer/eds.go | 5 +++- xds/internal/balancer/edsbalancer/eds_test.go | 23 +++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index 09e8b28748a3..a5f653f737aa 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -314,10 +314,12 @@ func (x *edsBalancer) cancelWatch() { x.loadReportServer = nil if x.cancelLoadReport != nil { x.cancelLoadReport() + x.cancelLoadReport = nil } - x.edsServiceName = "" if x.cancelEndpointsWatch != nil { + x.edsToWatch = "" x.cancelEndpointsWatch() + x.cancelEndpointsWatch = nil } } @@ -331,6 +333,7 @@ func (x *edsBalancer) startLoadReport(loadReportServer *string) *load.Store { x.loadReportServer = loadReportServer if x.cancelLoadReport != nil { x.cancelLoadReport() + x.cancelLoadReport = nil } if loadReportServer == nil { return nil diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 37ecb1acfc69..3fe66098973c 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -512,6 +512,18 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { t.Fatalf("eds impl expecting empty update, got %v", err) } + + // An update with the same service name should not trigger a new watch. + if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, + }); err != nil { + t.Fatal(err) + } + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := xdsC.WaitForWatchEDS(sCtx); err != context.DeadlineExceeded { + t.Fatal("got unexpected new EDS watch") + } } // TestErrorFromResolver verifies that resolver errors are handled correctly. @@ -577,6 +589,17 @@ func (s) TestErrorFromResolver(t *testing.T) { if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { t.Fatalf("EDS impl got unexpected EDS response: %v", err) } + + // An update with the same service name should trigger a new watch, because + // the previous watch was canceled. + if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, + }); err != nil { + t.Fatal(err) + } + if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { + t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) + } } // Given a list of resource names, verifies that EDS requests for the same are From ebd6aba6754d073a696e5727158cd0c917ce1019 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 3 May 2021 15:16:49 -0700 Subject: [PATCH 045/998] Revert "xds/cds: add separate fields for cluster name and eds service name" (#4382) This reverts PRs #4352 (and two follow up fixes #4372 #4378). Because the xds interop tests were flaky. Revert before the branch cut. --- .../balancer/cdsbalancer/cdsbalancer.go | 3 +- .../cdsbalancer/cdsbalancer_security_test.go | 14 +-- .../balancer/cdsbalancer/cdsbalancer_test.go | 16 ++-- xds/internal/balancer/edsbalancer/config.go | 8 +- xds/internal/balancer/edsbalancer/eds.go | 63 ++++--------- .../balancer/edsbalancer/eds_impl_test.go | 11 ++- xds/internal/balancer/edsbalancer/eds_test.go | 53 ++--------- .../edsbalancer/load_store_wrapper.go | 88 +++++++++++++++++++ xds/internal/client/cds_test.go | 75 +++++----------- xds/internal/client/client.go | 15 ++-- xds/internal/client/client_test.go | 4 +- xds/internal/client/v2/cds_test.go | 4 +- xds/internal/client/watchers_cluster_test.go | 16 ++-- xds/internal/client/xds.go | 75 +++++++++------- 14 files changed, 222 insertions(+), 223 deletions(-) create mode 100644 xds/internal/balancer/edsbalancer/load_store_wrapper.go diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index c97e92bcd02f..b991981c14c0 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -340,8 +340,7 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName) } lbCfg := &edsbalancer.EDSConfig{ - ClusterName: update.cds.ClusterName, - EDSServiceName: update.cds.EDSServiceName, + EDSServiceName: update.cds.ServiceName, MaxConcurrentRequests: update.cds.MaxRequests, } if update.cds.EnableLRS { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 18a2298c5258..5c746cfa163c 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -60,7 +60,7 @@ var ( fpb1, fpb2 *fakeProviderBuilder bootstrapConfig *bootstrap.Config cdsUpdateWithGoodSecurityCfg = xdsclient.ClusterUpdate{ - ClusterName: serviceName, + ServiceName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "default1", IdentityInstanceName: "default2", @@ -68,7 +68,7 @@ var ( }, } cdsUpdateWithMissingSecurityCfg = xdsclient.ClusterUpdate{ - ClusterName: serviceName, + ServiceName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "not-default", }, @@ -256,7 +256,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -312,7 +312,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -572,7 +572,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // an update which contains bad security config. So, we expect the CDS // balancer to forward this error to the EDS balancer and eventually the // channel needs to be put in a bad state. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -678,7 +678,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ - ClusterName: serviceName, + ServiceName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "default1", SubjectAltNameMatchers: testSANMatchers, @@ -703,7 +703,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // Push another update with a new security configuration. cdsUpdate = xdsclient.ClusterUpdate{ - ClusterName: serviceName, + ServiceName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 13b17306df8b..4476a1532d05 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -197,7 +197,7 @@ func cdsCCS(cluster string) balancer.ClientConnState { // cdsBalancer to the edsBalancer. func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientConnState { lbCfg := &edsbalancer.EDSConfig{ - ClusterName: service, + EDSServiceName: service, MaxConcurrentRequests: countMax, } if enableLRS { @@ -354,12 +354,12 @@ func (s) TestHandleClusterUpdate(t *testing.T) { }{ { name: "happy-case-with-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName, EnableLRS: true}, + cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName, EnableLRS: true}, wantCCS: edsCCS(serviceName, nil, true), }, { name: "happy-case-without-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName}, + cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName}, wantCCS: edsCCS(serviceName, nil, false), }, } @@ -427,7 +427,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} wantCCS := edsCCS(serviceName, nil, false) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) @@ -512,7 +512,7 @@ func (s) TestResolverError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} wantCCS := edsCCS(serviceName, nil, false) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) @@ -561,7 +561,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -596,7 +596,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will update // the service's counter with the new max requests. var maxRequests uint32 = 1 - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName, MaxRequests: &maxRequests} + cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName, MaxRequests: &maxRequests} wantCCS := edsCCS(serviceName, &maxRequests, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -629,7 +629,7 @@ func (s) TestClose(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() diff --git a/xds/internal/balancer/edsbalancer/config.go b/xds/internal/balancer/edsbalancer/config.go index d1583e2bf276..11c1338c81f7 100644 --- a/xds/internal/balancer/edsbalancer/config.go +++ b/xds/internal/balancer/edsbalancer/config.go @@ -35,10 +35,8 @@ type EDSConfig struct { // FallBackPolicy represents the load balancing config for the // fallback. FallBackPolicy *loadBalancingConfig - // ClusterName is the cluster name. - ClusterName string - // EDSServiceName is the name to use in EDS query. If not set, use - // ClusterName. + // Name to use in EDS query. If not present, defaults to the server + // name from the target URI. EDSServiceName string // MaxConcurrentRequests is the max number of concurrent request allowed for // this service. If unset, default value 1024 is used. @@ -61,7 +59,6 @@ type EDSConfig struct { type edsConfigJSON struct { ChildPolicy []*loadBalancingConfig FallbackPolicy []*loadBalancingConfig - ClusterName string EDSServiceName string MaxConcurrentRequests *uint32 LRSLoadReportingServerName *string @@ -76,7 +73,6 @@ func (l *EDSConfig) UnmarshalJSON(data []byte) error { return err } - l.ClusterName = configJSON.ClusterName l.EDSServiceName = configJSON.EDSServiceName l.MaxConcurrentRequests = configJSON.MaxConcurrentRequests l.LrsLoadReportingServerName = configJSON.LRSLoadReportingServerName diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index a5f653f737aa..de724701df94 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -25,7 +25,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/balancer/loadstore" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" @@ -69,7 +68,7 @@ func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOp grpcUpdate: make(chan interface{}), xdsClientUpdate: make(chan *edsUpdate), childPolicyUpdate: buffer.NewUnbounded(), - loadWrapper: loadstore.NewWrapper(), + lsw: &loadStoreWrapper{}, config: &EDSConfig{}, } x.logger = prefixLogger(x) @@ -81,7 +80,7 @@ func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOp } x.xdsClient = client - x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.loadWrapper, x.logger) + x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.lsw, x.logger) x.logger.Infof("Created") go x.run() return x @@ -139,14 +138,14 @@ type edsBalancer struct { xdsClientUpdate chan *edsUpdate childPolicyUpdate *buffer.Unbounded - xdsClient xdsClientInterface - loadWrapper *loadstore.Wrapper - config *EDSConfig // may change when passed a different service config - edsImpl edsBalancerImplInterface + xdsClient xdsClientInterface + lsw *loadStoreWrapper + config *EDSConfig // may change when passed a different service config + edsImpl edsBalancerImplInterface - clusterName string + // edsServiceName is the edsServiceName currently being watched, not + // necessary the edsServiceName from service config. edsServiceName string - edsToWatch string // this is edsServiceName if it's set, otherwise, it's clusterName. cancelEndpointsWatch func() loadReportServer *string // LRS is disabled if loadReporterServer is nil. cancelLoadReport func() @@ -219,7 +218,7 @@ func (x *edsBalancer) handleGRPCUpdate(update interface{}) { x.logger.Warningf("failed to update xDS client: %v", err) } - x.edsImpl.updateServiceRequestsConfig(cfg.ClusterName, cfg.MaxConcurrentRequests) + x.edsImpl.updateServiceRequestsConfig(cfg.EDSServiceName, cfg.MaxConcurrentRequests) // We will update the edsImpl with the new child policy, if we got a // different one. @@ -242,35 +241,10 @@ func (x *edsBalancer) handleGRPCUpdate(update interface{}) { // handleServiceConfigUpdate applies the service config update, watching a new // EDS service name and restarting LRS stream, as required. func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { - var updateLoadClusterAndService bool - if x.clusterName != config.ClusterName { - updateLoadClusterAndService = true - x.clusterName = config.ClusterName - x.edsImpl.updateClusterName(x.clusterName) - } + // Restart EDS watch when the edsServiceName has changed. if x.edsServiceName != config.EDSServiceName { - updateLoadClusterAndService = true x.edsServiceName = config.EDSServiceName - } - - // If EDSServiceName is set, use it to watch EDS. Otherwise, use the cluster - // name. - newEDSToWatch := config.EDSServiceName - if newEDSToWatch == "" { - newEDSToWatch = config.ClusterName - } - var restartEDSWatch bool - if x.edsToWatch != newEDSToWatch { - restartEDSWatch = true - x.edsToWatch = newEDSToWatch - } - - // Restart EDS watch when the eds name has changed. - if restartEDSWatch { x.startEndpointsWatch() - } - - if updateLoadClusterAndService { // TODO: this update for the LRS service name is too early. It should // only apply to the new EDS response. But this is applied to the RPCs // before the new EDS response. To fully fix this, the EDS balancer @@ -278,13 +252,14 @@ func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { // // This is OK for now, because we don't actually expect edsServiceName // to change. Fix this (a bigger change) will happen later. - x.loadWrapper.UpdateClusterAndService(x.clusterName, x.edsServiceName) + x.lsw.updateServiceName(x.edsServiceName) + x.edsImpl.updateClusterName(x.edsServiceName) } // Restart load reporting when the loadReportServer name has changed. if !equalStringPointers(x.loadReportServer, config.LrsLoadReportingServerName) { loadStore := x.startLoadReport(config.LrsLoadReportingServerName) - x.loadWrapper.UpdateLoadStore(loadStore) + x.lsw.updateLoadStore(loadStore) } return nil @@ -298,15 +273,14 @@ func (x *edsBalancer) startEndpointsWatch() { if x.cancelEndpointsWatch != nil { x.cancelEndpointsWatch() } - edsToWatch := x.edsToWatch - cancelEDSWatch := x.xdsClient.WatchEndpoints(edsToWatch, func(update xdsclient.EndpointsUpdate, err error) { + cancelEDSWatch := x.xdsClient.WatchEndpoints(x.edsServiceName, func(update xdsclient.EndpointsUpdate, err error) { x.logger.Infof("Watch update from xds-client %p, content: %+v", x.xdsClient, update) x.handleEDSUpdate(update, err) }) - x.logger.Infof("Watch started on resource name %v with xds-client %p", edsToWatch, x.xdsClient) + x.logger.Infof("Watch started on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) x.cancelEndpointsWatch = func() { cancelEDSWatch() - x.logger.Infof("Watch cancelled on resource name %v with xds-client %p", edsToWatch, x.xdsClient) + x.logger.Infof("Watch cancelled on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) } } @@ -314,12 +288,10 @@ func (x *edsBalancer) cancelWatch() { x.loadReportServer = nil if x.cancelLoadReport != nil { x.cancelLoadReport() - x.cancelLoadReport = nil } + x.edsServiceName = "" if x.cancelEndpointsWatch != nil { - x.edsToWatch = "" x.cancelEndpointsWatch() - x.cancelEndpointsWatch = nil } } @@ -333,7 +305,6 @@ func (x *edsBalancer) startLoadReport(loadReportServer *string) *load.Store { x.loadReportServer = loadReportServer if x.cancelLoadReport != nil { x.cancelLoadReport() - x.cancelLoadReport = nil } if loadReportServer == nil { return nil diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/edsbalancer/eds_impl_test.go index 3cfc620a2400..c5e3071d10d8 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_test.go @@ -29,7 +29,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal" - "google.golang.org/grpc/xds/internal/balancer/loadstore" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" @@ -821,9 +820,9 @@ func (s) TestEDS_LoadReport(t *testing.T) { // implements the LoadStore() method to return the underlying load.Store to // be used. loadStore := load.NewStore() - lsWrapper := loadstore.NewWrapper() - lsWrapper.UpdateClusterAndService(testClusterNames[0], "") - lsWrapper.UpdateLoadStore(loadStore) + lsWrapper := &loadStoreWrapper{} + lsWrapper.updateServiceName(testClusterNames[0]) + lsWrapper.updateLoadStore(loadStore) cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, lsWrapper, nil) @@ -914,8 +913,8 @@ func (s) TestEDS_LoadReport(t *testing.T) { // TestEDS_LoadReportDisabled covers the case that LRS is disabled. It makes // sure the EDS implementation isn't broken (doesn't panic). func (s) TestEDS_LoadReportDisabled(t *testing.T) { - lsWrapper := loadstore.NewWrapper() - lsWrapper.UpdateClusterAndService(testClusterNames[0], "") + lsWrapper := &loadStoreWrapper{} + lsWrapper.updateServiceName(testClusterNames[0]) // Not calling lsWrapper.updateLoadStore(loadStore) because LRS is disabled. cc := testutils.NewTestClientConn(t) diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 3fe66098973c..5c9e5f0b1d53 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -354,7 +354,6 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ ChildPolicy: lbCfgA, - ClusterName: testEDSClusterName, EDSServiceName: testServiceName, }, }); err != nil { @@ -368,7 +367,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsLB.waitForChildPolicy(ctx, lbCfgA); err != nil { t.Fatal(err) } - if err := edsLB.waitForCounterUpdate(ctx, testEDSClusterName); err != nil { + if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil { t.Fatal(err) } if err := edsLB.waitForCountMaxUpdate(ctx, nil); err != nil { @@ -383,7 +382,6 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ ChildPolicy: lbCfgB, - ClusterName: testEDSClusterName, EDSServiceName: testServiceName, MaxConcurrentRequests: &testCountMax, }, @@ -393,7 +391,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsLB.waitForChildPolicy(ctx, lbCfgB); err != nil { t.Fatal(err) } - if err := edsLB.waitForCounterUpdate(ctx, testEDSClusterName); err != nil { + if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil { // Counter is updated even though the service name didn't change. The // eds_impl will compare the service names, and skip if it didn't change. t.Fatal(err) @@ -512,18 +510,6 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { t.Fatalf("eds impl expecting empty update, got %v", err) } - - // An update with the same service name should not trigger a new watch. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, - }); err != nil { - t.Fatal(err) - } - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if _, err := xdsC.WaitForWatchEDS(sCtx); err != context.DeadlineExceeded { - t.Fatal("got unexpected new EDS watch") - } } // TestErrorFromResolver verifies that resolver errors are handled correctly. @@ -589,17 +575,6 @@ func (s) TestErrorFromResolver(t *testing.T) { if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { t.Fatalf("EDS impl got unexpected EDS response: %v", err) } - - // An update with the same service name should trigger a new watch, because - // the previous watch was canceled. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, - }); err != nil { - t.Fatal(err) - } - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } } // Given a list of resource names, verifies that EDS requests for the same are @@ -611,7 +586,7 @@ func verifyExpectedRequests(ctx context.Context, fc *fakeclient.Client, resource if err := fc.WaitForCancelEDSWatch(ctx); err != nil { return fmt.Errorf("timed out when expecting resource %q", name) } - continue + return nil } resName, err := fc.WaitForWatchEDS(ctx) @@ -640,18 +615,6 @@ func (s) TestClientWatchEDS(t *testing.T) { } defer edsB.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // If eds service name is not set, should watch for cluster name. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ClusterName: "cluster-1"}, - }); err != nil { - t.Fatal(err) - } - if err := verifyExpectedRequests(ctx, xdsC, "cluster-1"); err != nil { - t.Fatal(err) - } - // Update with an non-empty edsServiceName should trigger an EDS watch for // the same. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ @@ -659,7 +622,9 @@ func (s) TestClientWatchEDS(t *testing.T) { }); err != nil { t.Fatal(err) } - if err := verifyExpectedRequests(ctx, xdsC, "", "foobar-1"); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := verifyExpectedRequests(ctx, xdsC, "foobar-1"); err != nil { t.Fatal(err) } @@ -695,7 +660,7 @@ func (s) TestCounterUpdate(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ - ClusterName: "foobar-1", + EDSServiceName: "foobar-1", MaxConcurrentRequests: &testCountMax, }, }); err != nil { @@ -729,7 +694,7 @@ func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ - ClusterName: "foobar-1", + EDSServiceName: "foobar-1", }, }); err != nil { t.Fatal(err) @@ -748,7 +713,7 @@ func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ - ClusterName: "foobar-2", + EDSServiceName: "foobar-2", }, }); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/edsbalancer/load_store_wrapper.go b/xds/internal/balancer/edsbalancer/load_store_wrapper.go new file mode 100644 index 000000000000..18904e47a42e --- /dev/null +++ b/xds/internal/balancer/edsbalancer/load_store_wrapper.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package edsbalancer + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/client/load" +) + +type loadStoreWrapper struct { + mu sync.RWMutex + service string + // Both store and perCluster will be nil if load reporting is disabled (EDS + // response doesn't have LRS server name). Note that methods on Store and + // perCluster all handle nil, so there's no need to check nil before calling + // them. + store *load.Store + perCluster load.PerClusterReporter +} + +func (lsw *loadStoreWrapper) updateServiceName(service string) { + lsw.mu.Lock() + defer lsw.mu.Unlock() + if lsw.service == service { + return + } + lsw.service = service + lsw.perCluster = lsw.store.PerCluster(lsw.service, "") +} + +func (lsw *loadStoreWrapper) updateLoadStore(store *load.Store) { + lsw.mu.Lock() + defer lsw.mu.Unlock() + if store == lsw.store { + return + } + lsw.store = store + lsw.perCluster = lsw.store.PerCluster(lsw.service, "") +} + +func (lsw *loadStoreWrapper) CallStarted(locality string) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallStarted(locality) + } +} + +func (lsw *loadStoreWrapper) CallFinished(locality string, err error) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallFinished(locality, err) + } +} + +func (lsw *loadStoreWrapper) CallServerLoad(locality, name string, val float64) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallServerLoad(locality, name, val) + } +} + +func (lsw *loadStoreWrapper) CallDropped(category string) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallDropped(category) + } +} diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index 0fa2b402ffc9..bb1117ec5349 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -28,14 +28,12 @@ import ( v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" "github.com/golang/protobuf/proto" anypb "github.com/golang/protobuf/ptypes/any" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/internal/testutils" xdsinternal "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/version" @@ -47,7 +45,7 @@ const ( serviceName = "service" ) -var emptyUpdate = ClusterUpdate{ClusterName: clusterName, EnableLRS: false} +var emptyUpdate = ClusterUpdate{ServiceName: "", EnableLRS: false} func (s) TestValidateCluster_Failure(t *testing.T) { tests := []struct { @@ -143,35 +141,24 @@ func (s) TestValidateCluster_Success(t *testing.T) { { name: "happy-case-logical-dns", cluster: &v3clusterpb.Cluster{ - Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EnableLRS: false, ClusterType: ClusterTypeLogicalDNS}, + wantUpdate: ClusterUpdate{ServiceName: "", EnableLRS: false, ClusterType: ClusterTypeLogicalDNS}, }, { name: "happy-case-aggregate-v3", cluster: &v3clusterpb.Cluster{ - Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ - ClusterType: &v3clusterpb.Cluster_CustomClusterType{ - Name: "envoy.clusters.aggregate", - TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ - Clusters: []string{"a", "b", "c"}, - }), - }, + ClusterType: &v3clusterpb.Cluster_CustomClusterType{Name: "envoy.clusters.aggregate"}, }, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ - ClusterName: clusterName, EnableLRS: false, ClusterType: ClusterTypeAggregate, - PrioritizedClusterNames: []string{"a", "b", "c"}, - }, + wantUpdate: ClusterUpdate{ServiceName: "", EnableLRS: false, ClusterType: ClusterTypeAggregate}, }, { name: "happy-case-no-service-name-no-lrs", cluster: &v3clusterpb.Cluster{ - Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -187,7 +174,6 @@ func (s) TestValidateCluster_Success(t *testing.T) { { name: "happy-case-no-lrs", cluster: &v3clusterpb.Cluster{ - Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -199,7 +185,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: false}, + wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: false}, }, { name: "happiest-case", @@ -221,7 +207,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true}, + wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: true}, }, { name: "happiest-case-with-circuitbreakers", @@ -255,7 +241,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, + wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: true, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, }, } @@ -268,8 +254,8 @@ func (s) TestValidateCluster_Success(t *testing.T) { if err != nil { t.Errorf("validateClusterAndConstructClusterUpdate(%+v) failed: %v", test.cluster, err) } - if diff := cmp.Diff(update, test.wantUpdate, cmpopts.EquateEmpty()); diff != "" { - t.Errorf("validateClusterAndConstructClusterUpdate(%+v) got diff: %v (-got, +want)", test.cluster, diff) + if !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) { + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) = %v, want: %v", test.cluster, update, test.wantUpdate) } }) } @@ -282,7 +268,6 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { defer func() { env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }() cluster := &v3clusterpb.Cluster{ - Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -317,9 +302,8 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { }, } wantUpdate := ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ServiceName: serviceName, + EnableLRS: false, } gotUpdate, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { @@ -341,7 +325,6 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { identityCertName = "identityCert" rootPluginInstance = "rootPluginInstance" rootCertName = "rootCert" - clusterName = "cluster" serviceName = "service" sanExact = "san-exact" sanPrefix = "san-prefix" @@ -674,7 +657,6 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { { name: "happy-case-with-no-identity-certs", cluster: &v3clusterpb.Cluster{ - Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -709,9 +691,8 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ServiceName: serviceName, + EnableLRS: false, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -721,7 +702,6 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { { name: "happy-case-with-validation-context-provider-instance", cluster: &v3clusterpb.Cluster{ - Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -760,9 +740,8 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ServiceName: serviceName, + EnableLRS: false, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -774,7 +753,6 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { { name: "happy-case-with-combined-validation-context", cluster: &v3clusterpb.Cluster{ - Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -827,9 +805,8 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ServiceName: serviceName, + EnableLRS: false, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -991,8 +968,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { resources: []*anypb.Any{v2ClusterAny}, wantUpdate: map[string]ClusterUpdate{ v2ClusterName: { - ClusterName: v2ClusterName, - EDSServiceName: v2Service, EnableLRS: true, + ServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, }, }, @@ -1006,8 +982,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { resources: []*anypb.Any{v3ClusterAny}, wantUpdate: map[string]ClusterUpdate{ v3ClusterName: { - ClusterName: v3ClusterName, - EDSServiceName: v3Service, EnableLRS: true, + ServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, }, }, @@ -1021,13 +996,11 @@ func (s) TestUnmarshalCluster(t *testing.T) { resources: []*anypb.Any{v2ClusterAny, v3ClusterAny}, wantUpdate: map[string]ClusterUpdate{ v2ClusterName: { - ClusterName: v2ClusterName, - EDSServiceName: v2Service, EnableLRS: true, + ServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, }, v3ClusterName: { - ClusterName: v3ClusterName, - EDSServiceName: v3Service, EnableLRS: true, + ServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, }, }, @@ -1057,13 +1030,11 @@ func (s) TestUnmarshalCluster(t *testing.T) { }, wantUpdate: map[string]ClusterUpdate{ v2ClusterName: { - ClusterName: v2ClusterName, - EDSServiceName: v2Service, EnableLRS: true, + ServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, }, v3ClusterName: { - ClusterName: v3ClusterName, - EDSServiceName: v3Service, EnableLRS: true, + ServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, }, "bad": {}, diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go index 4067536dda24..2daceede5398 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/client/client.go @@ -375,23 +375,22 @@ const ( // interest to the registered CDS watcher. type ClusterUpdate struct { ClusterType ClusterType - // ClusterName is the clusterName being watched for through CDS. - ClusterName string - // EDSServiceName is an optional name for EDS. If it's not set, the balancer - // should watch ClusterName for the EDS resources. - EDSServiceName string + // ServiceName is the service name corresponding to the clusterName which + // is being watched for through CDS. + ServiceName string // EnableLRS indicates whether or not load should be reported through LRS. EnableLRS bool // SecurityCfg contains security configuration sent by the control plane. SecurityCfg *SecurityConfig // MaxRequests for circuit breaking, if any (otherwise nil). MaxRequests *uint32 - // PrioritizedClusterNames is used only for cluster type aggregate. It represents - // a prioritized list of cluster names. - PrioritizedClusterNames []string // Raw is the resource from the xds response. Raw *anypb.Any + + // PrioritizedClusterNames is used only for cluster type aggregate. It represents + // a prioritized list of cluster names. + PrioritizedClusterNames []string } // OverloadDropConfig contains the config to drop overloads. diff --git a/xds/internal/client/client_test.go b/xds/internal/client/client_test.go index d57bc20e2c2c..69930557b26e 100644 --- a/xds/internal/client/client_test.go +++ b/xds/internal/client/client_test.go @@ -185,13 +185,13 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} + wantUpdate := ClusterUpdate{ServiceName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { t.Fatal(err) } - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} + wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2); err != nil { t.Fatal(err) diff --git a/xds/internal/client/v2/cds_test.go b/xds/internal/client/v2/cds_test.go index d868beb1831b..b56ae6108bbe 100644 --- a/xds/internal/client/v2/cds_test.go +++ b/xds/internal/client/v2/cds_test.go @@ -151,7 +151,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: goodCDSResponse2, wantErr: false, wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName2: {ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}, + goodClusterName2: {ServiceName: serviceName2, Raw: marshaledCluster2}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, @@ -164,7 +164,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: goodCDSResponse1, wantErr: false, wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName1: {ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}, + goodClusterName1: {ServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, diff --git a/xds/internal/client/watchers_cluster_test.go b/xds/internal/client/watchers_cluster_test.go index c9837cd51978..2d10c7f43b5f 100644 --- a/xds/internal/client/watchers_cluster_test.go +++ b/xds/internal/client/watchers_cluster_test.go @@ -64,7 +64,7 @@ func (s) TestClusterWatch(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} + wantUpdate := ClusterUpdate{ServiceName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { t.Fatal(err) @@ -128,7 +128,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} + wantUpdate := ClusterUpdate{ServiceName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate); err != nil { @@ -200,8 +200,8 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} + wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"} + wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{ testCDSName + "1": wantUpdate1, testCDSName + "2": wantUpdate2, @@ -245,7 +245,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} + wantUpdate := ClusterUpdate{ServiceName: testEDSName} client.NewClusters(map[string]ClusterUpdate{ testCDSName: wantUpdate, }, UpdateMetadata{}) @@ -345,7 +345,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} + wantUpdate := ClusterUpdate{ServiceName: testEDSName} client.NewClusters(map[string]ClusterUpdate{ testCDSName: wantUpdate, }, UpdateMetadata{}) @@ -402,8 +402,8 @@ func (s) TestClusterResourceRemoved(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} + wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"} + wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{ testCDSName + "1": wantUpdate1, testCDSName + "2": wantUpdate2, diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index f64d22ce67d3..c0caf5cceb57 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -571,9 +571,42 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin return cluster.GetName(), cu, nil } +func clusterTypeFromCluster(cluster *v3clusterpb.Cluster) (ClusterType, string, []string, error) { + if cluster.GetType() == v3clusterpb.Cluster_EDS { + if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { + return 0, "", nil, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) + } + // If the Cluster message in the CDS response did not contain a + // serviceName, we will just use the clusterName for EDS. + if cluster.GetEdsClusterConfig().GetServiceName() == "" { + return ClusterTypeEDS, cluster.GetName(), nil, nil + } + return ClusterTypeEDS, cluster.GetEdsClusterConfig().GetServiceName(), nil, nil + } + + if cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS { + return ClusterTypeLogicalDNS, cluster.GetName(), nil, nil + } + + if cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate" { + // Loop through ClusterConfig here to get cluster names. + clusters := &v3aggregateclusterpb.ClusterConfig{} + if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { + return 0, "", nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return ClusterTypeAggregate, cluster.GetName(), clusters.Clusters, nil + } + return 0, "", nil, fmt.Errorf("unexpected cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) +} + func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { + emptyUpdate := ClusterUpdate{ServiceName: "", EnableLRS: false} if cluster.GetLbPolicy() != v3clusterpb.Cluster_ROUND_ROBIN { - return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + return emptyUpdate, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + clusterType, serviceName, prioritizedClusters, err := clusterTypeFromCluster(cluster) + if err != nil { + return emptyUpdate, err } // Process security configuration received from the control plane iff the @@ -582,40 +615,18 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu if env.ClientSideSecuritySupport { var err error if sc, err = securityConfigFromCluster(cluster); err != nil { - return ClusterUpdate{}, err + return emptyUpdate, err } } - ret := ClusterUpdate{ - ClusterName: cluster.GetName(), - EnableLRS: cluster.GetLrsServer().GetSelf() != nil, - SecurityCfg: sc, - MaxRequests: circuitBreakersFromCluster(cluster), - } - - // Validate and set cluster type from the response. - switch { - case cluster.GetType() == v3clusterpb.Cluster_EDS: - if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { - return ClusterUpdate{}, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) - } - ret.ClusterType = ClusterTypeEDS - ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() - return ret, nil - case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: - ret.ClusterType = ClusterTypeLogicalDNS - return ret, nil - case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": - clusters := &v3aggregateclusterpb.ClusterConfig{} - if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { - return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - ret.ClusterType = ClusterTypeAggregate - ret.PrioritizedClusterNames = clusters.Clusters - return ret, nil - default: - return ClusterUpdate{}, fmt.Errorf("unexpected cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) - } + return ClusterUpdate{ + ClusterType: clusterType, + ServiceName: serviceName, + EnableLRS: cluster.GetLrsServer().GetSelf() != nil, + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + PrioritizedClusterNames: prioritizedClusters, + }, nil } // securityConfigFromCluster extracts the relevant security configuration from From 75497df97f8bc9d5ec905d6e6b283a207eb3e9f0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 4 May 2021 14:38:47 -0700 Subject: [PATCH 046/998] meshca: remove meshca certificate provider implementation (#4385) --- .../tls/certprovider/meshca/builder.go | 165 ------- .../tls/certprovider/meshca/builder_test.go | 177 ------- credentials/tls/certprovider/meshca/config.go | 310 ------------ .../tls/certprovider/meshca/config_test.go | 375 -------------- .../meshca/internal/v1/meshca.pb.go | 276 ----------- .../meshca/internal/v1/meshca_grpc.pb.go | 110 ----- .../tls/certprovider/meshca/logging.go | 36 -- credentials/tls/certprovider/meshca/plugin.go | 289 ----------- .../tls/certprovider/meshca/plugin_test.go | 459 ------------------ regenerate.sh | 10 - xds/go113.go | 25 - 11 files changed, 2232 deletions(-) delete mode 100644 credentials/tls/certprovider/meshca/builder.go delete mode 100644 credentials/tls/certprovider/meshca/builder_test.go delete mode 100644 credentials/tls/certprovider/meshca/config.go delete mode 100644 credentials/tls/certprovider/meshca/config_test.go delete mode 100644 credentials/tls/certprovider/meshca/internal/v1/meshca.pb.go delete mode 100644 credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go delete mode 100644 credentials/tls/certprovider/meshca/logging.go delete mode 100644 credentials/tls/certprovider/meshca/plugin.go delete mode 100644 credentials/tls/certprovider/meshca/plugin_test.go delete mode 100644 xds/go113.go diff --git a/credentials/tls/certprovider/meshca/builder.go b/credentials/tls/certprovider/meshca/builder.go deleted file mode 100644 index 4b8af7c9b3c5..000000000000 --- a/credentials/tls/certprovider/meshca/builder.go +++ /dev/null @@ -1,165 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "crypto/x509" - "encoding/json" - "fmt" - "sync" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/sts" - "google.golang.org/grpc/credentials/tls/certprovider" - "google.golang.org/grpc/internal/backoff" -) - -const pluginName = "mesh_ca" - -// For overriding in unit tests. -var ( - grpcDialFunc = grpc.Dial - backoffFunc = backoff.DefaultExponential.Backoff -) - -func init() { - certprovider.Register(newPluginBuilder()) -} - -func newPluginBuilder() *pluginBuilder { - return &pluginBuilder{clients: make(map[ccMapKey]*refCountedCC)} -} - -// Key for the map containing ClientConns to the MeshCA server. Only the server -// name and the STS options (which is used to create call creds) from the plugin -// configuration determine if two configs can share the same ClientConn. Hence -// only those form the key to this map. -type ccMapKey struct { - name string - stsOpts sts.Options -} - -// refCountedCC wraps a grpc.ClientConn to MeshCA along with a reference count. -type refCountedCC struct { - cc *grpc.ClientConn - refCnt int -} - -// pluginBuilder is an implementation of the certprovider.Builder interface, -// which builds certificate provider instances to get certificates signed from -// the MeshCA. -type pluginBuilder struct { - // A collection of ClientConns to the MeshCA server along with a reference - // count. Provider instances whose config point to the same server name will - // end up sharing the ClientConn. - mu sync.Mutex - clients map[ccMapKey]*refCountedCC -} - -// ParseConfig parses the configuration to be passed to the MeshCA plugin -// implementation. Expects the config to be a json.RawMessage which contains a -// serialized JSON representation of the meshca_experimental.GoogleMeshCaConfig -// proto message. -// -// Takes care of sharing the ClientConn to the MeshCA server among -// different plugin instantiations. -func (b *pluginBuilder) ParseConfig(c interface{}) (*certprovider.BuildableConfig, error) { - data, ok := c.(json.RawMessage) - if !ok { - return nil, fmt.Errorf("meshca: unsupported config type: %T", c) - } - cfg, err := pluginConfigFromJSON(data) - if err != nil { - return nil, err - } - return certprovider.NewBuildableConfig(pluginName, cfg.canonical(), func(opts certprovider.BuildOptions) certprovider.Provider { - return b.buildFromConfig(cfg, opts) - }), nil -} - -// buildFromConfig builds a certificate provider instance for the given config -// and options. Provider instances are shared wherever possible. -func (b *pluginBuilder) buildFromConfig(cfg *pluginConfig, opts certprovider.BuildOptions) certprovider.Provider { - b.mu.Lock() - defer b.mu.Unlock() - - ccmk := ccMapKey{ - name: cfg.serverURI, - stsOpts: cfg.stsOpts, - } - rcc, ok := b.clients[ccmk] - if !ok { - // STS call credentials take care of exchanging a locally provisioned - // JWT token for an access token which will be accepted by the MeshCA. - callCreds, err := sts.NewCredentials(cfg.stsOpts) - if err != nil { - logger.Errorf("sts.NewCredentials() failed: %v", err) - return nil - } - - // MeshCA is a public endpoint whose certificate is Web-PKI compliant. - // So, we just need to use the system roots to authenticate the MeshCA. - cp, err := x509.SystemCertPool() - if err != nil { - logger.Errorf("x509.SystemCertPool() failed: %v", err) - return nil - } - transportCreds := credentials.NewClientTLSFromCert(cp, "") - - cc, err := grpcDialFunc(cfg.serverURI, grpc.WithTransportCredentials(transportCreds), grpc.WithPerRPCCredentials(callCreds)) - if err != nil { - logger.Errorf("grpc.Dial(%s) failed: %v", cfg.serverURI, err) - return nil - } - - rcc = &refCountedCC{cc: cc} - b.clients[ccmk] = rcc - } - rcc.refCnt++ - - p := newProviderPlugin(providerParams{ - cc: rcc.cc, - cfg: cfg, - opts: opts, - backoff: backoffFunc, - doneFunc: func() { - // The plugin implementation will invoke this function when it is - // being closed, and here we take care of closing the ClientConn - // when there are no more plugins using it. We need to acquire the - // lock before accessing the rcc from the enclosing function. - b.mu.Lock() - defer b.mu.Unlock() - rcc.refCnt-- - if rcc.refCnt == 0 { - logger.Infof("Closing grpc.ClientConn to %s", ccmk.name) - rcc.cc.Close() - delete(b.clients, ccmk) - } - }, - }) - return p -} - -// Name returns the MeshCA plugin name. -func (b *pluginBuilder) Name() string { - return pluginName -} diff --git a/credentials/tls/certprovider/meshca/builder_test.go b/credentials/tls/certprovider/meshca/builder_test.go deleted file mode 100644 index 79035d008d9e..000000000000 --- a/credentials/tls/certprovider/meshca/builder_test.go +++ /dev/null @@ -1,177 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "context" - "encoding/json" - "fmt" - "testing" - - "google.golang.org/grpc" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/tls/certprovider" - "google.golang.org/grpc/internal/testutils" -) - -func overrideHTTPFuncs() func() { - // Directly override the functions which are used to read the zone and - // audience instead of overriding the http.Client. - origReadZone := readZoneFunc - readZoneFunc = func(httpDoer) string { return "test-zone" } - origReadAudience := readAudienceFunc - readAudienceFunc = func(httpDoer) string { return "test-audience" } - return func() { - readZoneFunc = origReadZone - readAudienceFunc = origReadAudience - } -} - -func (s) TestBuildSameConfig(t *testing.T) { - defer overrideHTTPFuncs()() - - // We will attempt to create `cnt` number of providers. So we create a - // channel of the same size here, even though we expect only one ClientConn - // to be pushed into this channel. This makes sure that even if more than - // one ClientConn ends up being created, the Build() call does not block. - const cnt = 5 - ccChan := testutils.NewChannelWithSize(cnt) - - // Override the dial func to dial a dummy MeshCA endpoint, and also push the - // returned ClientConn on a channel to be inspected by the test. - origDialFunc := grpcDialFunc - grpcDialFunc = func(string, ...grpc.DialOption) (*grpc.ClientConn, error) { - cc, err := grpc.Dial("dummy-meshca-endpoint", grpc.WithInsecure()) - ccChan.Send(cc) - return cc, err - } - defer func() { grpcDialFunc = origDialFunc }() - - // Parse a good config to generate a stable config which will be passed to - // invocations of Build(). - builder := newPluginBuilder() - buildableConfig, err := builder.ParseConfig(goodConfigFullySpecified) - if err != nil { - t.Fatalf("builder.ParseConfig(%q) failed: %v", goodConfigFullySpecified, err) - } - - // Create multiple providers with the same config. All these providers must - // end up sharing the same ClientConn. - providers := []certprovider.Provider{} - for i := 0; i < cnt; i++ { - p, err := buildableConfig.Build(certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("Build(%+v) failed: %v", buildableConfig, err) - } - providers = append(providers, p) - } - - // Make sure only one ClientConn is created. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - val, err := ccChan.Receive(ctx) - if err != nil { - t.Fatalf("Failed to create ClientConn: %v", err) - } - testCC := val.(*grpc.ClientConn) - - // Attempt to read the second ClientConn should timeout. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer cancel() - if _, err := ccChan.Receive(ctx); err != context.DeadlineExceeded { - t.Fatal("Builder created more than one ClientConn") - } - - for _, p := range providers { - p.Close() - } - - for { - state := testCC.GetState() - if state == connectivity.Shutdown { - break - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if !testCC.WaitForStateChange(ctx, state) { - t.Fatalf("timeout waiting for clientConn state to change from %s", state) - } - } -} - -func (s) TestBuildDifferentConfig(t *testing.T) { - defer overrideHTTPFuncs()() - - // We will attempt to create two providers with different configs. So we - // expect two ClientConns to be pushed on to this channel. - const cnt = 2 - ccChan := testutils.NewChannelWithSize(cnt) - - // Override the dial func to dial a dummy MeshCA endpoint, and also push the - // returned ClientConn on a channel to be inspected by the test. - origDialFunc := grpcDialFunc - grpcDialFunc = func(string, ...grpc.DialOption) (*grpc.ClientConn, error) { - cc, err := grpc.Dial("dummy-meshca-endpoint", grpc.WithInsecure()) - ccChan.Send(cc) - return cc, err - } - defer func() { grpcDialFunc = origDialFunc }() - - builder := newPluginBuilder() - providers := []certprovider.Provider{} - for i := 0; i < cnt; i++ { - // Copy the good test config and modify the serverURI to make sure that - // a new provider is created for the config. - inputConfig := json.RawMessage(fmt.Sprintf(goodConfigFormatStr, fmt.Sprintf("test-mesh-ca:%d", i))) - buildableConfig, err := builder.ParseConfig(inputConfig) - if err != nil { - t.Fatalf("builder.ParseConfig(%q) failed: %v", inputConfig, err) - } - - p, err := buildableConfig.Build(certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("Build(%+v) failed: %v", buildableConfig, err) - } - providers = append(providers, p) - } - - // Make sure two ClientConns are created. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - for i := 0; i < cnt; i++ { - if _, err := ccChan.Receive(ctx); err != nil { - t.Fatalf("Failed to create ClientConn: %v", err) - } - } - - // Close the first provider, and attempt to read key material from the - // second provider. The call to read key material should timeout, but it - // should not return certprovider.errProviderClosed. - providers[0].Close() - ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer cancel() - if _, err := providers[1].KeyMaterial(ctx); err != context.DeadlineExceeded { - t.Fatalf("provider.KeyMaterial(ctx) = %v, want contextDeadlineExceeded", err) - } - - // Close the second provider to make sure that the leakchecker is happy. - providers[1].Close() -} diff --git a/credentials/tls/certprovider/meshca/config.go b/credentials/tls/certprovider/meshca/config.go deleted file mode 100644 index c0772b3bb7ea..000000000000 --- a/credentials/tls/certprovider/meshca/config.go +++ /dev/null @@ -1,310 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/http/httputil" - "path" - "strings" - "time" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - "github.com/golang/protobuf/ptypes" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/types/known/durationpb" - - "google.golang.org/grpc/credentials/sts" -) - -const ( - // GKE metadata server endpoint. - mdsBaseURI = "http://metadata.google.internal/" - mdsRequestTimeout = 5 * time.Second - - // The following are default values used in the interaction with MeshCA. - defaultMeshCaEndpoint = "meshca.googleapis.com" - defaultCallTimeout = 10 * time.Second - defaultCertLifetimeSecs = 86400 // 24h in seconds - defaultCertGraceTimeSecs = 43200 // 12h in seconds - defaultKeyTypeRSA = "RSA" - defaultKeySize = 2048 - - // The following are default values used in the interaction with STS or - // Secure Token Service, which is used to exchange the JWT token for an - // access token. - defaultSTSEndpoint = "securetoken.googleapis.com" - defaultCloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - defaultRequestedTokenType = "urn:ietf:params:oauth:token-type:access_token" - defaultSubjectTokenType = "urn:ietf:params:oauth:token-type:jwt" -) - -// For overriding in unit tests. -var ( - makeHTTPDoer = makeHTTPClient - readZoneFunc = readZone - readAudienceFunc = readAudience -) - -type pluginConfig struct { - serverURI string - stsOpts sts.Options - callTimeout time.Duration - certLifetime time.Duration - certGraceTime time.Duration - keyType string - keySize int - location string -} - -// Type of key to be embedded in CSRs sent to the MeshCA. -const ( - keyTypeUnknown = 0 - keyTypeRSA = 1 -) - -// pluginConfigFromJSON parses the provided config in JSON. -// -// For certain values missing in the config, we use default values defined at -// the top of this file. -// -// If the location field or STS audience field is missing, we try talking to the -// GKE Metadata server and try to infer these values. If this attempt does not -// succeed, we let those fields have empty values. -func pluginConfigFromJSON(data json.RawMessage) (*pluginConfig, error) { - // This anonymous struct corresponds to the expected JSON config. - cfgJSON := &struct { - Server json.RawMessage `json:"server,omitempty"` // Expect a v3corepb.ApiConfigSource - CertificateLifetime json.RawMessage `json:"certificate_lifetime,omitempty"` // Expect a durationpb.Duration - RenewalGracePeriod json.RawMessage `json:"renewal_grace_period,omitempty"` // Expect a durationpb.Duration - KeyType int `json:"key_type,omitempty"` - KeySize int `json:"key_size,omitempty"` - Location string `json:"location,omitempty"` - }{} - if err := json.Unmarshal(data, cfgJSON); err != nil { - return nil, fmt.Errorf("meshca: failed to unmarshal config: %v", err) - } - - // Further unmarshal fields represented as json.RawMessage in the above - // anonymous struct, and use default values if not specified. - server := &v3corepb.ApiConfigSource{} - if cfgJSON.Server != nil { - if err := protojson.Unmarshal(cfgJSON.Server, server); err != nil { - return nil, fmt.Errorf("meshca: protojson.Unmarshal(%+v) failed: %v", cfgJSON.Server, err) - } - } - certLifetime := &durationpb.Duration{Seconds: defaultCertLifetimeSecs} - if cfgJSON.CertificateLifetime != nil { - if err := protojson.Unmarshal(cfgJSON.CertificateLifetime, certLifetime); err != nil { - return nil, fmt.Errorf("meshca: protojson.Unmarshal(%+v) failed: %v", cfgJSON.CertificateLifetime, err) - } - } - certGraceTime := &durationpb.Duration{Seconds: defaultCertGraceTimeSecs} - if cfgJSON.RenewalGracePeriod != nil { - if err := protojson.Unmarshal(cfgJSON.RenewalGracePeriod, certGraceTime); err != nil { - return nil, fmt.Errorf("meshca: protojson.Unmarshal(%+v) failed: %v", cfgJSON.RenewalGracePeriod, err) - } - } - - if api := server.GetApiType(); api != v3corepb.ApiConfigSource_GRPC { - return nil, fmt.Errorf("meshca: server has apiType %s, want %s", api, v3corepb.ApiConfigSource_GRPC) - } - - pc := &pluginConfig{ - certLifetime: certLifetime.AsDuration(), - certGraceTime: certGraceTime.AsDuration(), - } - gs := server.GetGrpcServices() - if l := len(gs); l != 1 { - return nil, fmt.Errorf("meshca: number of gRPC services in config is %d, expected 1", l) - } - grpcService := gs[0] - googGRPC := grpcService.GetGoogleGrpc() - if googGRPC == nil { - return nil, errors.New("meshca: missing google gRPC service in config") - } - pc.serverURI = googGRPC.GetTargetUri() - if pc.serverURI == "" { - pc.serverURI = defaultMeshCaEndpoint - } - - callCreds := googGRPC.GetCallCredentials() - if len(callCreds) == 0 { - return nil, errors.New("meshca: missing call credentials in config") - } - var stsCallCreds *v3corepb.GrpcService_GoogleGrpc_CallCredentials_StsService - for _, cc := range callCreds { - if stsCallCreds = cc.GetStsService(); stsCallCreds != nil { - break - } - } - if stsCallCreds == nil { - return nil, errors.New("meshca: missing STS call credentials in config") - } - if stsCallCreds.GetSubjectTokenPath() == "" { - return nil, errors.New("meshca: missing subjectTokenPath in STS call credentials config") - } - pc.stsOpts = makeStsOptsWithDefaults(stsCallCreds) - - var err error - if pc.callTimeout, err = ptypes.Duration(grpcService.GetTimeout()); err != nil { - pc.callTimeout = defaultCallTimeout - } - switch cfgJSON.KeyType { - case keyTypeUnknown, keyTypeRSA: - pc.keyType = defaultKeyTypeRSA - default: - return nil, fmt.Errorf("meshca: unsupported key type: %s, only support RSA keys", pc.keyType) - } - pc.keySize = cfgJSON.KeySize - if pc.keySize == 0 { - pc.keySize = defaultKeySize - } - pc.location = cfgJSON.Location - if pc.location == "" { - pc.location = readZoneFunc(makeHTTPDoer()) - } - - return pc, nil -} - -func (pc *pluginConfig) canonical() []byte { - return []byte(fmt.Sprintf("%s:%s:%s:%s:%s:%s:%d:%s", pc.serverURI, pc.stsOpts, pc.callTimeout, pc.certLifetime, pc.certGraceTime, pc.keyType, pc.keySize, pc.location)) -} - -func makeStsOptsWithDefaults(stsCallCreds *v3corepb.GrpcService_GoogleGrpc_CallCredentials_StsService) sts.Options { - opts := sts.Options{ - TokenExchangeServiceURI: stsCallCreds.GetTokenExchangeServiceUri(), - Resource: stsCallCreds.GetResource(), - Audience: stsCallCreds.GetAudience(), - Scope: stsCallCreds.GetScope(), - RequestedTokenType: stsCallCreds.GetRequestedTokenType(), - SubjectTokenPath: stsCallCreds.GetSubjectTokenPath(), - SubjectTokenType: stsCallCreds.GetSubjectTokenType(), - ActorTokenPath: stsCallCreds.GetActorTokenPath(), - ActorTokenType: stsCallCreds.GetActorTokenType(), - } - - // Use sane defaults for unspecified fields. - if opts.TokenExchangeServiceURI == "" { - opts.TokenExchangeServiceURI = defaultSTSEndpoint - } - if opts.Audience == "" { - opts.Audience = readAudienceFunc(makeHTTPDoer()) - } - if opts.Scope == "" { - opts.Scope = defaultCloudPlatformScope - } - if opts.RequestedTokenType == "" { - opts.RequestedTokenType = defaultRequestedTokenType - } - if opts.SubjectTokenType == "" { - opts.SubjectTokenType = defaultSubjectTokenType - } - return opts -} - -// httpDoer wraps the single method on the http.Client type that we use. This -// helps with overriding in unit tests. -type httpDoer interface { - Do(req *http.Request) (*http.Response, error) -} - -func makeHTTPClient() httpDoer { - return &http.Client{Timeout: mdsRequestTimeout} -} - -func readMetadata(client httpDoer, uriPath string) (string, error) { - req, err := http.NewRequest("GET", mdsBaseURI+uriPath, nil) - if err != nil { - return "", err - } - req.Header.Add("Metadata-Flavor", "Google") - - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode != http.StatusOK { - dump, err := httputil.DumpRequestOut(req, false) - if err != nil { - logger.Warningf("Failed to dump HTTP request: %v", err) - } - logger.Warningf("Request %q returned status %v", dump, resp.StatusCode) - } - return string(body), err -} - -func readZone(client httpDoer) string { - zoneURI := "computeMetadata/v1/instance/zone" - data, err := readMetadata(client, zoneURI) - if err != nil { - logger.Warningf("GET %s failed: %v", path.Join(mdsBaseURI, zoneURI), err) - return "" - } - - // The output returned by the metadata server looks like this: - // projects//zones/ - parts := strings.Split(data, "/") - if len(parts) == 0 { - logger.Warningf("GET %s returned {%s}, does not match expected format {projects//zones/}", path.Join(mdsBaseURI, zoneURI)) - return "" - } - return parts[len(parts)-1] -} - -// readAudience constructs the audience field to be used in the STS request, if -// it is not specified in the plugin configuration. -// -// "identitynamespace:{TRUST_DOMAIN}:{GKE_CLUSTER_URL}" is the format of the -// audience field. When workload identity is enabled on a GCP project, a default -// trust domain is created whose value is "{PROJECT_ID}.svc.id.goog". The format -// of the GKE_CLUSTER_URL is: -// https://container.googleapis.com/v1/projects/{PROJECT_ID}/zones/{ZONE}/clusters/{CLUSTER_NAME}. -func readAudience(client httpDoer) string { - projURI := "computeMetadata/v1/project/project-id" - project, err := readMetadata(client, projURI) - if err != nil { - logger.Warningf("GET %s failed: %v", path.Join(mdsBaseURI, projURI), err) - return "" - } - trustDomain := fmt.Sprintf("%s.svc.id.goog", project) - - clusterURI := "computeMetadata/v1/instance/attributes/cluster-name" - cluster, err := readMetadata(client, clusterURI) - if err != nil { - logger.Warningf("GET %s failed: %v", path.Join(mdsBaseURI, clusterURI), err) - return "" - } - zone := readZoneFunc(client) - clusterURL := fmt.Sprintf("https://container.googleapis.com/v1/projects/%s/zones/%s/clusters/%s", project, zone, cluster) - audience := fmt.Sprintf("identitynamespace:%s:%s", trustDomain, clusterURL) - return audience -} diff --git a/credentials/tls/certprovider/meshca/config_test.go b/credentials/tls/certprovider/meshca/config_test.go deleted file mode 100644 index 5deb484f341c..000000000000 --- a/credentials/tls/certprovider/meshca/config_test.go +++ /dev/null @@ -1,375 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" -) - -const ( - testProjectID = "test-project-id" - testGKECluster = "test-gke-cluster" - testGCEZone = "test-zone" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -var ( - goodConfigFormatStr = ` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "target_uri": %q, - "call_credentials": [ - { - "access_token": "foo" - }, - { - "sts_service": { - "token_exchange_service_uri": "http://test-sts", - "resource": "test-resource", - "audience": "test-audience", - "scope": "test-scope", - "requested_token_type": "test-requested-token-type", - "subject_token_path": "test-subject-token-path", - "subject_token_type": "test-subject-token-type", - "actor_token_path": "test-actor-token-path", - "actor_token_type": "test-actor-token-type" - } - } - ] - }, - "timeout": "10s" - } - ] - }, - "certificate_lifetime": "86400s", - "renewal_grace_period": "43200s", - "key_type": 1, - "key_size": 2048, - "location": "us-west1-b" - }` - goodConfigWithDefaults = json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "call_credentials": [ - { - "sts_service": { - "subject_token_path": "test-subject-token-path" - } - } - ] - }, - "timeout": "10s" - } - ] - } - }`) -) - -var goodConfigFullySpecified = json.RawMessage(fmt.Sprintf(goodConfigFormatStr, "test-meshca")) - -// verifyReceivedRequest reads the HTTP request received by the fake client -// (exposed through a channel), and verifies that it matches the expected -// request. -func verifyReceivedRequest(fc *testutils.FakeHTTPClient, wantURI string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - val, err := fc.ReqChan.Receive(ctx) - if err != nil { - return err - } - gotReq := val.(*http.Request) - if gotURI := gotReq.URL.String(); gotURI != wantURI { - return fmt.Errorf("request contains URL %q want %q", gotURI, wantURI) - } - if got, want := gotReq.Header.Get("Metadata-Flavor"), "Google"; got != want { - return fmt.Errorf("request contains flavor %q want %q", got, want) - } - return nil -} - -// TestParseConfigSuccessFullySpecified tests the case where the config is fully -// specified and no defaults are required. -func (s) TestParseConfigSuccessFullySpecified(t *testing.T) { - wantConfig := "test-meshca:http://test-sts:test-resource:test-audience:test-scope:test-requested-token-type:test-subject-token-path:test-subject-token-type:test-actor-token-path:test-actor-token-type:10s:24h0m0s:12h0m0s:RSA:2048:us-west1-b" - - cfg, err := pluginConfigFromJSON(goodConfigFullySpecified) - if err != nil { - t.Fatalf("pluginConfigFromJSON(%q) failed: %v", goodConfigFullySpecified, err) - } - gotConfig := cfg.canonical() - if diff := cmp.Diff(wantConfig, string(gotConfig)); diff != "" { - t.Errorf("pluginConfigFromJSON(%q) returned config does not match expected (-want +got):\n%s", string(goodConfigFullySpecified), diff) - } -} - -// TestParseConfigSuccessWithDefaults tests cases where the config is not fully -// specified, and we end up using some sane defaults. -func (s) TestParseConfigSuccessWithDefaults(t *testing.T) { - wantConfig := fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s", - "meshca.googleapis.com", // Mesh CA Server URI. - "securetoken.googleapis.com", // STS Server URI. - "", // STS Resource Name. - "identitynamespace:test-project-id.svc.id.goog:https://container.googleapis.com/v1/projects/test-project-id/zones/test-zone/clusters/test-gke-cluster", // STS Audience. - "https://www.googleapis.com/auth/cloud-platform", // STS Scope. - "urn:ietf:params:oauth:token-type:access_token", // STS requested token type. - "test-subject-token-path", // STS subject token path. - "urn:ietf:params:oauth:token-type:jwt", // STS subject token type. - "", // STS actor token path. - "", // STS actor token type. - "10s", // Call timeout. - "24h0m0s", // Cert life time. - "12h0m0s", // Cert grace time. - "RSA", // Key type - "2048", // Key size - "test-zone", // Zone - ) - - // We expect the config parser to make four HTTP requests and receive four - // responses. Hence we setup the request and response channels in the fake - // client with appropriate buffer size. - fc := &testutils.FakeHTTPClient{ - ReqChan: testutils.NewChannelWithSize(4), - RespChan: testutils.NewChannelWithSize(4), - } - // Set up the responses to be delivered to the config parser by the fake - // client. The config parser expects responses with project_id, - // gke_cluster_id and gce_zone. The zone is read twice, once as part of - // reading the STS audience and once to get location metadata. - fc.RespChan.Send(&http.Response{ - Status: "200 OK", - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(testProjectID))), - }) - fc.RespChan.Send(&http.Response{ - Status: "200 OK", - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(testGKECluster))), - }) - fc.RespChan.Send(&http.Response{ - Status: "200 OK", - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(fmt.Sprintf("projects/%s/zones/%s", testProjectID, testGCEZone)))), - }) - fc.RespChan.Send(&http.Response{ - Status: "200 OK", - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(fmt.Sprintf("projects/%s/zones/%s", testProjectID, testGCEZone)))), - }) - // Override the http.Client with our fakeClient. - origMakeHTTPDoer := makeHTTPDoer - makeHTTPDoer = func() httpDoer { return fc } - defer func() { makeHTTPDoer = origMakeHTTPDoer }() - - // Spawn a goroutine to verify the HTTP requests sent out as part of the - // config parsing. - errCh := make(chan error, 1) - go func() { - if err := verifyReceivedRequest(fc, "http://metadata.google.internal/computeMetadata/v1/project/project-id"); err != nil { - errCh <- err - return - } - if err := verifyReceivedRequest(fc, "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name"); err != nil { - errCh <- err - return - } - if err := verifyReceivedRequest(fc, "http://metadata.google.internal/computeMetadata/v1/instance/zone"); err != nil { - errCh <- err - return - } - errCh <- nil - }() - - cfg, err := pluginConfigFromJSON(goodConfigWithDefaults) - if err != nil { - t.Fatalf("pluginConfigFromJSON(%q) failed: %v", goodConfigWithDefaults, err) - } - gotConfig := cfg.canonical() - if diff := cmp.Diff(wantConfig, string(gotConfig)); diff != "" { - t.Errorf("builder.ParseConfig(%q) returned config does not match expected (-want +got):\n%s", goodConfigWithDefaults, diff) - } - - if err := <-errCh; err != nil { - t.Fatal(err) - } -} - -// TestParseConfigFailureCases tests several invalid configs which all result in -// config parsing failures. -func (s) TestParseConfigFailureCases(t *testing.T) { - tests := []struct { - desc string - inputConfig json.RawMessage - wantErr string - }{ - { - desc: "invalid JSON", - inputConfig: json.RawMessage(`bad bad json`), - wantErr: "failed to unmarshal config", - }, - { - desc: "bad apiType", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 1 - } - }`), - wantErr: "server has apiType REST, want GRPC", - }, - { - desc: "no grpc services", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2 - } - }`), - wantErr: "number of gRPC services in config is 0, expected 1", - }, - { - desc: "too many grpc services", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [{}, {}] - } - }`), - wantErr: "number of gRPC services in config is 2, expected 1", - }, - { - desc: "missing google grpc service", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "envoyGrpc": {} - } - ] - } - }`), - wantErr: "missing google gRPC service in config", - }, - { - desc: "missing call credentials", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "target_uri": "foo" - } - } - ] - } - }`), - wantErr: "missing call credentials in config", - }, - { - desc: "missing STS call credentials", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "target_uri": "foo", - "call_credentials": [ - { - "access_token": "foo" - } - ] - } - } - ] - } - }`), - wantErr: "missing STS call credentials in config", - }, - { - desc: "with no defaults", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "target_uri": "foo", - "call_credentials": [ - { - "sts_service": {} - } - ] - } - } - ] - } - }`), - wantErr: "missing subjectTokenPath in STS call credentials config", - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - cfg, err := pluginConfigFromJSON(test.inputConfig) - if err == nil { - t.Fatalf("pluginConfigFromJSON(%q) = %v, expected to return error (%v)", test.inputConfig, string(cfg.canonical()), test.wantErr) - - } - if !strings.Contains(err.Error(), test.wantErr) { - t.Fatalf("builder.ParseConfig(%q) = (%v), want error (%v)", test.inputConfig, err, test.wantErr) - } - }) - } -} diff --git a/credentials/tls/certprovider/meshca/internal/v1/meshca.pb.go b/credentials/tls/certprovider/meshca/internal/v1/meshca.pb.go deleted file mode 100644 index 387f8c55abc0..000000000000 --- a/credentials/tls/certprovider/meshca/internal/v1/meshca.pb.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2019 Istio Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: istio/google/security/meshca/v1/meshca.proto - -package google_security_meshca_v1 - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Certificate request message. -type MeshCertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The request ID must be a valid UUID with the exception that zero UUID is - // not supported (00000000-0000-0000-0000-000000000000). - RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - // PEM-encoded certificate request. - Csr string `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` - // Optional: requested certificate validity period. - Validity *durationpb.Duration `protobuf:"bytes,3,opt,name=validity,proto3" json:"validity,omitempty"` // Reserved 4 -} - -func (x *MeshCertificateRequest) Reset() { - *x = MeshCertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_istio_google_security_meshca_v1_meshca_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeshCertificateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeshCertificateRequest) ProtoMessage() {} - -func (x *MeshCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_istio_google_security_meshca_v1_meshca_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeshCertificateRequest.ProtoReflect.Descriptor instead. -func (*MeshCertificateRequest) Descriptor() ([]byte, []int) { - return file_istio_google_security_meshca_v1_meshca_proto_rawDescGZIP(), []int{0} -} - -func (x *MeshCertificateRequest) GetRequestId() string { - if x != nil { - return x.RequestId - } - return "" -} - -func (x *MeshCertificateRequest) GetCsr() string { - if x != nil { - return x.Csr - } - return "" -} - -func (x *MeshCertificateRequest) GetValidity() *durationpb.Duration { - if x != nil { - return x.Validity - } - return nil -} - -// Certificate response message. -type MeshCertificateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // PEM-encoded certificate chain. - // Leaf cert is element '0'. Root cert is element 'n'. - CertChain []string `protobuf:"bytes,1,rep,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` -} - -func (x *MeshCertificateResponse) Reset() { - *x = MeshCertificateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_istio_google_security_meshca_v1_meshca_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeshCertificateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeshCertificateResponse) ProtoMessage() {} - -func (x *MeshCertificateResponse) ProtoReflect() protoreflect.Message { - mi := &file_istio_google_security_meshca_v1_meshca_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeshCertificateResponse.ProtoReflect.Descriptor instead. -func (*MeshCertificateResponse) Descriptor() ([]byte, []int) { - return file_istio_google_security_meshca_v1_meshca_proto_rawDescGZIP(), []int{1} -} - -func (x *MeshCertificateResponse) GetCertChain() []string { - if x != nil { - return x.CertChain - } - return nil -} - -var File_istio_google_security_meshca_v1_meshca_proto protoreflect.FileDescriptor - -var file_istio_google_security_meshca_v1_meshca_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2f, 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2f, 0x76, - 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, - 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x80, 0x01, 0x0a, 0x16, 0x4d, 0x65, - 0x73, 0x68, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x73, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x63, 0x73, 0x72, 0x12, 0x35, 0x0a, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x22, 0x38, 0x0a, 0x17, - 0x4d, 0x65, 0x73, 0x68, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x63, 0x65, 0x72, - 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x32, 0x96, 0x01, 0x0a, 0x16, 0x4d, 0x65, 0x73, 0x68, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x7c, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2e, - 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x6d, 0x65, 0x73, 0x68, - 0x63, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, - 0x2e, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2e, 0x76, 0x31, - 0x42, 0x0b, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_istio_google_security_meshca_v1_meshca_proto_rawDescOnce sync.Once - file_istio_google_security_meshca_v1_meshca_proto_rawDescData = file_istio_google_security_meshca_v1_meshca_proto_rawDesc -) - -func file_istio_google_security_meshca_v1_meshca_proto_rawDescGZIP() []byte { - file_istio_google_security_meshca_v1_meshca_proto_rawDescOnce.Do(func() { - file_istio_google_security_meshca_v1_meshca_proto_rawDescData = protoimpl.X.CompressGZIP(file_istio_google_security_meshca_v1_meshca_proto_rawDescData) - }) - return file_istio_google_security_meshca_v1_meshca_proto_rawDescData -} - -var file_istio_google_security_meshca_v1_meshca_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_istio_google_security_meshca_v1_meshca_proto_goTypes = []interface{}{ - (*MeshCertificateRequest)(nil), // 0: google.security.meshca.v1.MeshCertificateRequest - (*MeshCertificateResponse)(nil), // 1: google.security.meshca.v1.MeshCertificateResponse - (*durationpb.Duration)(nil), // 2: google.protobuf.Duration -} -var file_istio_google_security_meshca_v1_meshca_proto_depIdxs = []int32{ - 2, // 0: google.security.meshca.v1.MeshCertificateRequest.validity:type_name -> google.protobuf.Duration - 0, // 1: google.security.meshca.v1.MeshCertificateService.CreateCertificate:input_type -> google.security.meshca.v1.MeshCertificateRequest - 1, // 2: google.security.meshca.v1.MeshCertificateService.CreateCertificate:output_type -> google.security.meshca.v1.MeshCertificateResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_istio_google_security_meshca_v1_meshca_proto_init() } -func file_istio_google_security_meshca_v1_meshca_proto_init() { - if File_istio_google_security_meshca_v1_meshca_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_istio_google_security_meshca_v1_meshca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeshCertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_istio_google_security_meshca_v1_meshca_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeshCertificateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_istio_google_security_meshca_v1_meshca_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_istio_google_security_meshca_v1_meshca_proto_goTypes, - DependencyIndexes: file_istio_google_security_meshca_v1_meshca_proto_depIdxs, - MessageInfos: file_istio_google_security_meshca_v1_meshca_proto_msgTypes, - }.Build() - File_istio_google_security_meshca_v1_meshca_proto = out.File - file_istio_google_security_meshca_v1_meshca_proto_rawDesc = nil - file_istio_google_security_meshca_v1_meshca_proto_goTypes = nil - file_istio_google_security_meshca_v1_meshca_proto_depIdxs = nil -} diff --git a/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go b/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go deleted file mode 100644 index 4663ff1ef35c..000000000000 --- a/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go +++ /dev/null @@ -1,110 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.1.0 -// - protoc v3.14.0 -// source: istio/google/security/meshca/v1/meshca.proto - -package google_security_meshca_v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// MeshCertificateServiceClient is the client API for MeshCertificateService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type MeshCertificateServiceClient interface { - // Using provided CSR, returns a signed certificate that represents a GCP - // service account identity. - CreateCertificate(ctx context.Context, in *MeshCertificateRequest, opts ...grpc.CallOption) (*MeshCertificateResponse, error) -} - -type meshCertificateServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewMeshCertificateServiceClient(cc grpc.ClientConnInterface) MeshCertificateServiceClient { - return &meshCertificateServiceClient{cc} -} - -func (c *meshCertificateServiceClient) CreateCertificate(ctx context.Context, in *MeshCertificateRequest, opts ...grpc.CallOption) (*MeshCertificateResponse, error) { - out := new(MeshCertificateResponse) - err := c.cc.Invoke(ctx, "/google.security.meshca.v1.MeshCertificateService/CreateCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MeshCertificateServiceServer is the server API for MeshCertificateService service. -// All implementations must embed UnimplementedMeshCertificateServiceServer -// for forward compatibility -type MeshCertificateServiceServer interface { - // Using provided CSR, returns a signed certificate that represents a GCP - // service account identity. - CreateCertificate(context.Context, *MeshCertificateRequest) (*MeshCertificateResponse, error) - mustEmbedUnimplementedMeshCertificateServiceServer() -} - -// UnimplementedMeshCertificateServiceServer must be embedded to have forward compatible implementations. -type UnimplementedMeshCertificateServiceServer struct { -} - -func (UnimplementedMeshCertificateServiceServer) CreateCertificate(context.Context, *MeshCertificateRequest) (*MeshCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateCertificate not implemented") -} -func (UnimplementedMeshCertificateServiceServer) mustEmbedUnimplementedMeshCertificateServiceServer() { -} - -// UnsafeMeshCertificateServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to MeshCertificateServiceServer will -// result in compilation errors. -type UnsafeMeshCertificateServiceServer interface { - mustEmbedUnimplementedMeshCertificateServiceServer() -} - -func RegisterMeshCertificateServiceServer(s grpc.ServiceRegistrar, srv MeshCertificateServiceServer) { - s.RegisterService(&MeshCertificateService_ServiceDesc, srv) -} - -func _MeshCertificateService_CreateCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MeshCertificateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MeshCertificateServiceServer).CreateCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.security.meshca.v1.MeshCertificateService/CreateCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MeshCertificateServiceServer).CreateCertificate(ctx, req.(*MeshCertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// MeshCertificateService_ServiceDesc is the grpc.ServiceDesc for MeshCertificateService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var MeshCertificateService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "google.security.meshca.v1.MeshCertificateService", - HandlerType: (*MeshCertificateServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateCertificate", - Handler: _MeshCertificateService_CreateCertificate_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "istio/google/security/meshca/v1/meshca.proto", -} diff --git a/credentials/tls/certprovider/meshca/logging.go b/credentials/tls/certprovider/meshca/logging.go deleted file mode 100644 index ae20059c4f72..000000000000 --- a/credentials/tls/certprovider/meshca/logging.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "fmt" - - "google.golang.org/grpc/grpclog" - internalgrpclog "google.golang.org/grpc/internal/grpclog" -) - -const prefix = "[%p] " - -var logger = grpclog.Component("meshca") - -func prefixLogger(p *providerPlugin) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) -} diff --git a/credentials/tls/certprovider/meshca/plugin.go b/credentials/tls/certprovider/meshca/plugin.go deleted file mode 100644 index ab1958ac1fd0..000000000000 --- a/credentials/tls/certprovider/meshca/plugin.go +++ /dev/null @@ -1,289 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package meshca provides an implementation of the Provider interface which -// communicates with MeshCA to get certificates signed. -package meshca - -import ( - "context" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" - "github.com/google/uuid" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/tls/certprovider" - meshgrpc "google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1" - meshpb "google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/metadata" -) - -// In requests sent to the MeshCA, we add a metadata header with this key and -// the value being the GCE zone in which the workload is running in. -const locationMetadataKey = "x-goog-request-params" - -// For overriding from unit tests. -var newDistributorFunc = func() distributor { return certprovider.NewDistributor() } - -// distributor wraps the methods on certprovider.Distributor which are used by -// the plugin. This is very useful in tests which need to know exactly when the -// plugin updates its key material. -type distributor interface { - KeyMaterial(ctx context.Context) (*certprovider.KeyMaterial, error) - Set(km *certprovider.KeyMaterial, err error) - Stop() -} - -// providerPlugin is an implementation of the certprovider.Provider interface, -// which gets certificates signed by communicating with the MeshCA. -type providerPlugin struct { - distributor // Holds the key material. - cancel context.CancelFunc - cc *grpc.ClientConn // Connection to MeshCA server. - cfg *pluginConfig // Plugin configuration. - opts certprovider.BuildOptions // Key material options. - logger *grpclog.PrefixLogger // Plugin instance specific prefix. - backoff func(int) time.Duration // Exponential backoff. - doneFunc func() // Notify the builder when done. -} - -// providerParams wraps params passed to the provider plugin at creation time. -type providerParams struct { - // This ClientConn to the MeshCA server is owned by the builder. - cc *grpc.ClientConn - cfg *pluginConfig - opts certprovider.BuildOptions - backoff func(int) time.Duration - doneFunc func() -} - -func newProviderPlugin(params providerParams) *providerPlugin { - ctx, cancel := context.WithCancel(context.Background()) - p := &providerPlugin{ - cancel: cancel, - cc: params.cc, - cfg: params.cfg, - opts: params.opts, - backoff: params.backoff, - doneFunc: params.doneFunc, - distributor: newDistributorFunc(), - } - p.logger = prefixLogger((p)) - p.logger.Infof("plugin created") - go p.run(ctx) - return p -} - -func (p *providerPlugin) Close() { - p.logger.Infof("plugin closed") - p.Stop() // Stop the embedded distributor. - p.cancel() - p.doneFunc() -} - -// run is a long running goroutine which periodically sends out CSRs to the -// MeshCA, and updates the underlying Distributor with the new key material. -func (p *providerPlugin) run(ctx context.Context) { - // We need to start fetching key material right away. The next attempt will - // be triggered by the timer firing. - for { - certValidity, err := p.updateKeyMaterial(ctx) - if err != nil { - return - } - - // We request a certificate with the configured validity duration (which - // is usually twice as much as the grace period). But the server is free - // to return a certificate with whatever validity time it deems right. - refreshAfter := p.cfg.certGraceTime - if refreshAfter > certValidity { - // The default value of cert grace time is half that of the default - // cert validity time. So here, when we have to use a non-default - // cert life time, we will set the grace time again to half that of - // the validity time. - refreshAfter = certValidity / 2 - } - timer := time.NewTimer(refreshAfter) - select { - case <-ctx.Done(): - return - case <-timer.C: - } - } -} - -// updateKeyMaterial generates a CSR and attempts to get it signed from the -// MeshCA. It retries with an exponential backoff till it succeeds or the -// deadline specified in ctx expires. Once it gets the CSR signed from the -// MeshCA, it updates the Distributor with the new key material. -// -// It returns the amount of time the new certificate is valid for. -func (p *providerPlugin) updateKeyMaterial(ctx context.Context) (time.Duration, error) { - client := meshgrpc.NewMeshCertificateServiceClient(p.cc) - retries := 0 - for { - if ctx.Err() != nil { - return 0, ctx.Err() - } - - if retries != 0 { - bi := p.backoff(retries) - p.logger.Warningf("Backing off for %s before attempting the next CreateCertificate() request", bi) - timer := time.NewTimer(bi) - select { - case <-timer.C: - case <-ctx.Done(): - return 0, ctx.Err() - } - } - retries++ - - privKey, err := rsa.GenerateKey(rand.Reader, p.cfg.keySize) - if err != nil { - p.logger.Warningf("RSA key generation failed: %v", err) - continue - } - // We do not set any fields in the CSR (we use an empty - // x509.CertificateRequest as the template) because the MeshCA discards - // them anyways, and uses the workload identity from the access token - // that we present (as part of the STS call creds). - csrBytes, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{}, crypto.PrivateKey(privKey)) - if err != nil { - p.logger.Warningf("CSR creation failed: %v", err) - continue - } - csrPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrBytes}) - - // Send out the CSR with a call timeout and location metadata, as - // specified in the plugin configuration. - req := &meshpb.MeshCertificateRequest{ - RequestId: uuid.New().String(), - Csr: string(csrPEM), - Validity: &durationpb.Duration{Seconds: int64(p.cfg.certLifetime / time.Second)}, - } - p.logger.Debugf("Sending CreateCertificate() request: %v", req) - - callCtx, ctxCancel := context.WithTimeout(context.Background(), p.cfg.callTimeout) - callCtx = metadata.NewOutgoingContext(callCtx, metadata.Pairs(locationMetadataKey, p.cfg.location)) - resp, err := client.CreateCertificate(callCtx, req) - if err != nil { - p.logger.Warningf("CreateCertificate request failed: %v", err) - ctxCancel() - continue - } - ctxCancel() - - // The returned cert chain must contain more than one cert. Leaf cert is - // element '0', while root cert is element 'n', and the intermediate - // entries form the chain from the root to the leaf. - certChain := resp.GetCertChain() - if l := len(certChain); l <= 1 { - p.logger.Errorf("Received certificate chain contains %d certificates, need more than one", l) - continue - } - - // We need to explicitly parse the PEM cert contents as an - // x509.Certificate to read the certificate validity period. We use this - // to decide when to refresh the cert. Even though the call to - // tls.X509KeyPair actually parses the PEM contents into an - // x509.Certificate, it does not store that in the `Leaf` field. See: - // https://golang.org/pkg/crypto/tls/#X509KeyPair. - identity, intermediates, roots, err := parseCertChain(certChain) - if err != nil { - p.logger.Errorf(err.Error()) - continue - } - _, err = identity.Verify(x509.VerifyOptions{ - Intermediates: intermediates, - Roots: roots, - }) - if err != nil { - p.logger.Errorf("Certificate verification failed for return certChain: %v", err) - continue - } - - key := x509.MarshalPKCS1PrivateKey(privKey) - keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: key}) - certPair, err := tls.X509KeyPair([]byte(certChain[0]), keyPEM) - if err != nil { - p.logger.Errorf("Failed to create x509 key pair: %v", err) - continue - } - - // At this point, the received response has been deemed good. - retries = 0 - - // All certs signed by the MeshCA roll up to the same root. And treating - // the last element of the returned chain as the root is the only - // supported option to get the root certificate. So, we ignore the - // options specified in the call to Build(), which contain certificate - // name and whether the caller is interested in identity or root cert. - p.Set(&certprovider.KeyMaterial{Certs: []tls.Certificate{certPair}, Roots: roots}, nil) - return time.Until(identity.NotAfter), nil - } -} - -// ParseCertChain parses the result returned by the MeshCA which consists of a -// list of PEM encoded certs. The first element in the list is the leaf or -// identity cert, while the last element is the root, and everything in between -// form the chain of trust. -// -// Caller needs to make sure that certChain has at least two elements. -func parseCertChain(certChain []string) (*x509.Certificate, *x509.CertPool, *x509.CertPool, error) { - identity, err := parseCert([]byte(certChain[0])) - if err != nil { - return nil, nil, nil, err - } - - intermediates := x509.NewCertPool() - for _, cert := range certChain[1 : len(certChain)-1] { - i, err := parseCert([]byte(cert)) - if err != nil { - return nil, nil, nil, err - } - intermediates.AddCert(i) - } - - roots := x509.NewCertPool() - root, err := parseCert([]byte(certChain[len(certChain)-1])) - if err != nil { - return nil, nil, nil, err - } - roots.AddCert(root) - - return identity, intermediates, roots, nil -} - -func parseCert(certPEM []byte) (*x509.Certificate, error) { - block, _ := pem.Decode(certPEM) - if block == nil { - return nil, fmt.Errorf("failed to decode received PEM data: %v", certPEM) - } - return x509.ParseCertificate(block.Bytes) -} diff --git a/credentials/tls/certprovider/meshca/plugin_test.go b/credentials/tls/certprovider/meshca/plugin_test.go deleted file mode 100644 index 51f545d6a0e3..000000000000 --- a/credentials/tls/certprovider/meshca/plugin_test.go +++ /dev/null @@ -1,459 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net" - "reflect" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/tls/certprovider" - meshgrpc "google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1" - meshpb "google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1" - "google.golang.org/grpc/internal/testutils" -) - -const ( - // Used when waiting for something that is expected to *not* happen. - defaultTestShortTimeout = 10 * time.Millisecond - defaultTestTimeout = 5 * time.Second - defaultTestCertLife = time.Hour - shortTestCertLife = 2 * time.Second - maxErrCount = 2 -) - -// fakeCA provides a very simple fake implementation of the certificate signing -// service as exported by the MeshCA. -type fakeCA struct { - meshgrpc.UnimplementedMeshCertificateServiceServer - - withErrors bool // Whether the CA returns errors to begin with. - withShortLife bool // Whether to create certs with short lifetime - - ccChan *testutils.Channel // Channel to get notified about CreateCertificate calls. - errors int // Error count. - key *rsa.PrivateKey // Private key of CA. - cert *x509.Certificate // Signing certificate. - certPEM []byte // PEM encoding of signing certificate. -} - -// Returns a new instance of the fake Mesh CA. It generates a new RSA key and a -// self-signed certificate which will be used to sign CSRs received in incoming -// requests. -// withErrors controls whether the fake returns errors before succeeding, while -// withShortLife controls whether the fake returns certs with very small -// lifetimes (to test plugin refresh behavior). Every time a CreateCertificate() -// call succeeds, an event is pushed on the ccChan. -func newFakeMeshCA(ccChan *testutils.Channel, withErrors, withShortLife bool) (*fakeCA, error) { - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return nil, fmt.Errorf("RSA key generation failed: %v", err) - } - - now := time.Now() - tmpl := &x509.Certificate{ - Subject: pkix.Name{CommonName: "my-fake-ca"}, - SerialNumber: big.NewInt(10), - NotBefore: now.Add(-time.Hour), - NotAfter: now.Add(time.Hour), - KeyUsage: x509.KeyUsageCertSign, - IsCA: true, - BasicConstraintsValid: true, - } - certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &key.PublicKey, key) - if err != nil { - return nil, fmt.Errorf("x509.CreateCertificate(%v) failed: %v", tmpl, err) - } - // The PEM encoding of the self-signed certificate is stored because we need - // to return a chain of certificates in the response, starting with the - // client certificate and ending in the root. - certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) - cert, err := x509.ParseCertificate(certDER) - if err != nil { - return nil, fmt.Errorf("x509.ParseCertificate(%v) failed: %v", certDER, err) - } - - return &fakeCA{ - withErrors: withErrors, - withShortLife: withShortLife, - ccChan: ccChan, - key: key, - cert: cert, - certPEM: certPEM, - }, nil -} - -// CreateCertificate helps implement the MeshCA service. -// -// If the fakeMeshCA was created with `withErrors` set to true, the first -// `maxErrCount` number of RPC return errors. Subsequent requests are signed and -// returned without error. -func (f *fakeCA) CreateCertificate(ctx context.Context, req *meshpb.MeshCertificateRequest) (*meshpb.MeshCertificateResponse, error) { - if f.withErrors { - if f.errors < maxErrCount { - f.errors++ - return nil, errors.New("fake Mesh CA error") - - } - } - - csrPEM := []byte(req.GetCsr()) - block, _ := pem.Decode(csrPEM) - if block == nil { - return nil, fmt.Errorf("failed to decode received CSR: %v", csrPEM) - } - csr, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, fmt.Errorf("failed to parse received CSR: %v", csrPEM) - } - - // By default, we create certs which are valid for an hour. But if - // `withShortLife` is set, we create certs which are valid only for a couple - // of seconds. - now := time.Now() - notBefore, notAfter := now.Add(-defaultTestCertLife), now.Add(defaultTestCertLife) - if f.withShortLife { - notBefore, notAfter = now.Add(-shortTestCertLife), now.Add(shortTestCertLife) - } - tmpl := &x509.Certificate{ - Subject: pkix.Name{CommonName: "signed-cert"}, - SerialNumber: big.NewInt(10), - NotBefore: notBefore, - NotAfter: notAfter, - KeyUsage: x509.KeyUsageDigitalSignature, - } - certDER, err := x509.CreateCertificate(rand.Reader, tmpl, f.cert, csr.PublicKey, f.key) - if err != nil { - return nil, fmt.Errorf("x509.CreateCertificate(%v) failed: %v", tmpl, err) - } - certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) - - // Push to ccChan to indicate that the RPC is processed. - f.ccChan.Send(nil) - - certChain := []string{ - string(certPEM), // Signed certificate corresponding to CSR - string(f.certPEM), // Root certificate - } - return &meshpb.MeshCertificateResponse{CertChain: certChain}, nil -} - -// opts wraps the options to be passed to setup. -type opts struct { - // Whether the CA returns certs with short lifetime. Used to test client refresh. - withShortLife bool - // Whether the CA returns errors to begin with. Used to test client backoff. - withbackoff bool -} - -// events wraps channels which indicate different events. -type events struct { - // Pushed to when the plugin dials the MeshCA. - dialDone *testutils.Channel - // Pushed to when CreateCertifcate() succeeds on the MeshCA. - createCertDone *testutils.Channel - // Pushed to when the plugin updates the distributor with new key material. - keyMaterialDone *testutils.Channel - // Pushed to when the client backs off after a failed CreateCertificate(). - backoffDone *testutils.Channel -} - -// setup performs tasks common to all tests in this file. -func setup(t *testing.T, o opts) (events, string, func()) { - t.Helper() - - // Create a fake MeshCA which pushes events on the passed channel for - // successful RPCs. - createCertDone := testutils.NewChannel() - fs, err := newFakeMeshCA(createCertDone, o.withbackoff, o.withShortLife) - if err != nil { - t.Fatal(err) - } - - // Create a gRPC server and register the fake MeshCA on it. - server := grpc.NewServer() - meshgrpc.RegisterMeshCertificateServiceServer(server, fs) - - // Start a net.Listener on a local port, and pass it to the gRPC server - // created above and start serving. - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal(err) - } - addr := lis.Addr().String() - go server.Serve(lis) - - // Override the plugin's dial function and perform a blocking dial. Also - // push on dialDone once the dial is complete so that test can block on this - // event before verifying other things. - dialDone := testutils.NewChannel() - origDialFunc := grpcDialFunc - grpcDialFunc = func(uri string, _ ...grpc.DialOption) (*grpc.ClientConn, error) { - if uri != addr { - t.Fatalf("plugin dialing MeshCA at %s, want %s", uri, addr) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - cc, err := grpc.DialContext(ctx, uri, grpc.WithInsecure(), grpc.WithBlock()) - if err != nil { - t.Fatalf("grpc.DialContext(%s) failed: %v", addr, err) - } - dialDone.Send(nil) - return cc, nil - } - - // Override the plugin's newDistributorFunc and return a wrappedDistributor - // which allows the test to be notified whenever the plugin pushes new key - // material into the distributor. - origDistributorFunc := newDistributorFunc - keyMaterialDone := testutils.NewChannel() - d := newWrappedDistributor(keyMaterialDone) - newDistributorFunc = func() distributor { return d } - - // Override the plugin's backoff function to perform no real backoff, but - // push on a channel so that the test can verifiy that backoff actually - // happened. - backoffDone := testutils.NewChannelWithSize(maxErrCount) - origBackoffFunc := backoffFunc - if o.withbackoff { - // Override the plugin's backoff function with this, so that we can verify - // that a backoff actually was triggered. - backoffFunc = func(v int) time.Duration { - backoffDone.Send(v) - return 0 - } - } - - // Return all the channels, and a cancel function to undo all the overrides. - e := events{ - dialDone: dialDone, - createCertDone: createCertDone, - keyMaterialDone: keyMaterialDone, - backoffDone: backoffDone, - } - done := func() { - server.Stop() - grpcDialFunc = origDialFunc - newDistributorFunc = origDistributorFunc - backoffFunc = origBackoffFunc - } - return e, addr, done -} - -// wrappedDistributor wraps a distributor and pushes on a channel whenever new -// key material is pushed to the distributor. -type wrappedDistributor struct { - *certprovider.Distributor - kmChan *testutils.Channel -} - -func newWrappedDistributor(kmChan *testutils.Channel) *wrappedDistributor { - return &wrappedDistributor{ - kmChan: kmChan, - Distributor: certprovider.NewDistributor(), - } -} - -func (wd *wrappedDistributor) Set(km *certprovider.KeyMaterial, err error) { - wd.Distributor.Set(km, err) - wd.kmChan.Send(nil) -} - -// TestCreateCertificate verifies the simple case where the MeshCA server -// returns a good certificate. -func (s) TestCreateCertificate(t *testing.T) { - e, addr, cancel := setup(t, opts{}) - defer cancel() - - // Set the MeshCA targetURI to point to our fake MeshCA. - inputConfig := json.RawMessage(fmt.Sprintf(goodConfigFormatStr, addr)) - - // Lookup MeshCA plugin builder, parse config and start the plugin. - prov, err := certprovider.GetProvider(pluginName, inputConfig, certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("GetProvider(%s, %s) failed: %v", pluginName, string(inputConfig), err) - } - defer prov.Close() - - // Wait till the plugin dials the MeshCA server. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.dialDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to dial MeshCA") - } - - // Wait till the plugin makes a CreateCertificate() call. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.createCertDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to make CreateCertificate RPC") - } - - // We don't really care about the exact key material returned here. All we - // care about is whether we get any key material at all, and that we don't - // get any errors. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err = prov.KeyMaterial(ctx); err != nil { - t.Fatalf("provider.KeyMaterial(ctx) failed: %v", err) - } -} - -// TestCreateCertificateWithBackoff verifies the case where the MeshCA server -// returns errors initially and then returns a good certificate. The test makes -// sure that the client backs off when the server returns errors. -func (s) TestCreateCertificateWithBackoff(t *testing.T) { - e, addr, cancel := setup(t, opts{withbackoff: true}) - defer cancel() - - // Set the MeshCA targetURI to point to our fake MeshCA. - inputConfig := json.RawMessage(fmt.Sprintf(goodConfigFormatStr, addr)) - - // Lookup MeshCA plugin builder, parse config and start the plugin. - prov, err := certprovider.GetProvider(pluginName, inputConfig, certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("GetProvider(%s, %s) failed: %v", pluginName, string(inputConfig), err) - } - defer prov.Close() - - // Wait till the plugin dials the MeshCA server. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.dialDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to dial MeshCA") - } - - // Making the CreateCertificateRPC involves generating the keys, creating - // the CSR etc which seem to take reasonable amount of time. And in this - // test, the first two attempts will fail. Hence we give it a reasonable - // deadline here. - ctx, cancel = context.WithTimeout(context.Background(), 3*defaultTestTimeout) - defer cancel() - if _, err := e.createCertDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to make CreateCertificate RPC") - } - - // The first `maxErrCount` calls to CreateCertificate end in failure, and - // should lead to a backoff. - for i := 0; i < maxErrCount; i++ { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.backoffDone.Receive(ctx); err != nil { - t.Fatalf("plugin failed to backoff after error from fake server: %v", err) - } - } - - // We don't really care about the exact key material returned here. All we - // care about is whether we get any key material at all, and that we don't - // get any errors. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err = prov.KeyMaterial(ctx); err != nil { - t.Fatalf("provider.KeyMaterial(ctx) failed: %v", err) - } -} - -// TestCreateCertificateWithRefresh verifies the case where the MeshCA returns a -// certificate with a really short lifetime, and makes sure that the plugin -// refreshes the cert in time. -func (s) TestCreateCertificateWithRefresh(t *testing.T) { - e, addr, cancel := setup(t, opts{withShortLife: true}) - defer cancel() - - // Set the MeshCA targetURI to point to our fake MeshCA. - inputConfig := json.RawMessage(fmt.Sprintf(goodConfigFormatStr, addr)) - - // Lookup MeshCA plugin builder, parse config and start the plugin. - prov, err := certprovider.GetProvider(pluginName, inputConfig, certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("GetProvider(%s, %s) failed: %v", pluginName, string(inputConfig), err) - } - defer prov.Close() - - // Wait till the plugin dials the MeshCA server. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.dialDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to dial MeshCA") - } - - // Wait till the plugin makes a CreateCertificate() call. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.createCertDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to make CreateCertificate RPC") - } - - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - km1, err := prov.KeyMaterial(ctx) - if err != nil { - t.Fatalf("provider.KeyMaterial(ctx) failed: %v", err) - } - - // At this point, we have read the first key material, and since the - // returned key material has a really short validity period, we expect the - // key material to be refreshed quite soon. We drain the channel on which - // the event corresponding to setting of new key material is pushed. This - // enables us to block on the same channel, waiting for refreshed key - // material. - // Since we do not expect this call to block, it is OK to pass the - // background context. - e.keyMaterialDone.Receive(context.Background()) - - // Wait for the next call to CreateCertificate() to refresh the certificate - // returned earlier. - ctx, cancel = context.WithTimeout(context.Background(), 2*shortTestCertLife) - defer cancel() - if _, err := e.keyMaterialDone.Receive(ctx); err != nil { - t.Fatalf("CreateCertificate() RPC not made: %v", err) - } - - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - km2, err := prov.KeyMaterial(ctx) - if err != nil { - t.Fatalf("provider.KeyMaterial(ctx) failed: %v", err) - } - - // TODO(easwars): Remove all references to reflect.DeepEqual and use - // cmp.Equal instead. Currently, the later panics because x509.Certificate - // type defines an Equal method, but does not check for nil. This has been - // fixed in - // https://github.com/golang/go/commit/89865f8ba64ccb27f439cce6daaa37c9aa38f351, - // but this is only available starting go1.14. So, once we remove support - // for go1.13, we can make the switch. - if reflect.DeepEqual(km1, km2) { - t.Error("certificate refresh did not happen in the background") - } -} diff --git a/regenerate.sh b/regenerate.sh index fc6725b89f84..dfd3226a1d96 100755 --- a/regenerate.sh +++ b/regenerate.sh @@ -48,11 +48,6 @@ mkdir -p ${WORKDIR}/googleapis/google/rpc echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto -# Pull in the MeshCA service proto. -mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 -echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" -curl --silent https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto > ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto - mkdir -p ${WORKDIR}/out # Generates sources without the embed requirement @@ -76,7 +71,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto - ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto ) # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an @@ -122,8 +116,4 @@ mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_s mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ -# istio/google/security/meshca/v1/meshca.proto does not have a go_package option. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ -mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/xds/go113.go b/xds/go113.go deleted file mode 100644 index 40f82cde5c1e..000000000000 --- a/xds/go113.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xds - -import ( - _ "google.golang.org/grpc/credentials/tls/certprovider/meshca" // Register the MeshCA certificate provider plugin. -) From 11bd77660dba95e270659c6a5077507ef37a8c41 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 4 May 2021 14:51:32 -0700 Subject: [PATCH 047/998] xds: work around xdsclient race in fault injection test (#4377) --- xds/internal/httpfilter/fault/fault_test.go | 54 +++++++++---------- xds/internal/testutils/e2e/clientresources.go | 6 +-- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 20de9b9a697a..65f616f44ea7 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -465,15 +465,8 @@ func (s) TestFaultInjection_Unary(t *testing.T) { fs, nodeID, port, cleanup := clientSetup(t) defer cleanup() - resources := e2e.DefaultClientResources("myservice", nodeID, "localhost", port) - hcm := new(v3httppb.HttpConnectionManager) - err := ptypes.UnmarshalAny(resources.Listeners[0].GetApiListener().GetApiListener(), hcm) - if err != nil { - t.Fatal(err) - } - routerFilter := hcm.HttpFilters[len(hcm.HttpFilters)-1] - for _, tc := range testCases { + for tcNum, tc := range testCases { t.Run(tc.name, func(t *testing.T) { defer func() { randIntn = grpcrand.Intn; newTimer = time.NewTimer }() var intnCalls []int @@ -489,6 +482,15 @@ func (s) TestFaultInjection_Unary(t *testing.T) { return time.NewTimer(0) } + serviceName := fmt.Sprintf("myservice%d", tcNum) + resources := e2e.DefaultClientResources(serviceName, nodeID, "localhost", port) + hcm := new(v3httppb.HttpConnectionManager) + err := ptypes.UnmarshalAny(resources.Listeners[0].GetApiListener().GetApiListener(), hcm) + if err != nil { + t.Fatal(err) + } + routerFilter := hcm.HttpFilters[len(hcm.HttpFilters)-1] + hcm.HttpFilters = nil for i, cfg := range tc.cfgs { hcm.HttpFilters = append(hcm.HttpFilters, e2e.HTTPFilter(fmt.Sprintf("fault%d", i), cfg)) @@ -506,7 +508,7 @@ func (s) TestFaultInjection_Unary(t *testing.T) { } // Create a ClientConn and run the test case. - cc, err := grpc.Dial("xds:///myservice", grpc.WithTransportCredentials(insecure.NewCredentials())) + cc, err := grpc.Dial("xds:///"+serviceName, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -515,25 +517,23 @@ func (s) TestFaultInjection_Unary(t *testing.T) { client := testpb.NewTestServiceClient(cc) count := 0 for _, want := range tc.want { - t.Run(want.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - if want.repeat == 0 { - t.Fatalf("invalid repeat count") - } - for n := 0; n < want.repeat; n++ { - intnCalls = nil - newTimerCalls = nil - ctx = metadata.NewOutgoingContext(ctx, want.md) - _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) - t.Logf("RPC %d: err: %v, intnCalls: %v, newTimerCalls: %v", count, err, intnCalls, newTimerCalls) - if status.Code(err) != want.code || !reflect.DeepEqual(intnCalls, want.randIn) || !reflect.DeepEqual(newTimerCalls, want.delays) { - t.Errorf("WANTED code: %v, intnCalls: %v, newTimerCalls: %v", want.code, want.randIn, want.delays) - } - randOut += tc.randOutInc - count++ + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + if want.repeat == 0 { + t.Fatalf("invalid repeat count") + } + for n := 0; n < want.repeat; n++ { + intnCalls = nil + newTimerCalls = nil + ctx = metadata.NewOutgoingContext(ctx, want.md) + _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) + t.Logf("%v: RPC %d: err: %v, intnCalls: %v, newTimerCalls: %v", want.name, count, err, intnCalls, newTimerCalls) + if status.Code(err) != want.code || !reflect.DeepEqual(intnCalls, want.randIn) || !reflect.DeepEqual(newTimerCalls, want.delays) { + t.Fatalf("WANTED code: %v, intnCalls: %v, newTimerCalls: %v", want.code, want.randIn, want.delays) } - }) + randOut += tc.randOutInc + count++ + } } }) } diff --git a/xds/internal/testutils/e2e/clientresources.go b/xds/internal/testutils/e2e/clientresources.go index 79424b13b918..86b34861e3db 100644 --- a/xds/internal/testutils/e2e/clientresources.go +++ b/xds/internal/testutils/e2e/clientresources.go @@ -45,9 +45,9 @@ func any(m proto.Message) *anypb.Any { // DefaultClientResources returns a set of resources (LDS, RDS, CDS, EDS) for a // client to generically connect to one server. func DefaultClientResources(target, nodeID, host string, port uint32) UpdateOptions { - const routeConfigName = "route" - const clusterName = "cluster" - const endpointsName = "endpoints" + routeConfigName := "route-" + target + clusterName := "cluster-" + target + endpointsName := "endpoints-" + target return UpdateOptions{ NodeID: nodeID, From 79e55d64442716d4082d373540eac78b018e81c4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 4 May 2021 15:06:43 -0700 Subject: [PATCH 048/998] xds: use SendContext() to fail in time when the channel is full (#4386) --- xds/internal/test/xds_server_serving_mode_test.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 0055bc7be508..c1f9634b16b0 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -59,12 +59,17 @@ func newModeTracker() *modeTracker { } } -func (mt *modeTracker) updateMode(addr net.Addr, mode xds.ServingMode) { +func (mt *modeTracker) updateMode(ctx context.Context, addr net.Addr, mode xds.ServingMode) { mt.mu.Lock() defer mt.mu.Unlock() mt.modes[addr.String()] = mode - mt.updateCh.Send(nil) + // Sometimes we could get state updates which are not expected by the test. + // Using `Send()` here would block in that case and cause the whole test to + // hang and will eventually only timeout when the `-timeout` passed to `go + // test` elapses. Using `SendContext()` here instead fails the test within a + // reasonable timeout. + mt.updateCh.SendContext(ctx, nil) } func (mt *modeTracker) getMode(addr net.Addr) xds.ServingMode { @@ -120,10 +125,12 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } // Create a server option to get notified about serving mode changes. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() modeTracker := newModeTracker() modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { t.Logf("serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) - modeTracker.updateMode(addr, args.Mode) + modeTracker.updateMode(ctx, addr, args.Mode) }) // Initialize an xDS-enabled gRPC server and register the stubServer on it. @@ -164,8 +171,6 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } // Wait for both listeners to move to "serving" mode. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() if err := waitForModeChange(ctx, modeTracker, lis1.Addr(), xds.ServingModeServing); err != nil { t.Fatal(err) } From 4f3aa7cfa157c38bd5c2da7f4568614f815ab4ad Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 4 May 2021 15:29:58 -0700 Subject: [PATCH 049/998] xds: optimize fault injection filter with empty config (#4367) --- xds/internal/httpfilter/fault/fault.go | 7 ++++++- xds/internal/httpfilter/fault/fault_test.go | 22 +++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/xds/internal/httpfilter/fault/fault.go b/xds/internal/httpfilter/fault/fault.go index 639a08db8e3c..ee2ed9fd4922 100644 --- a/xds/internal/httpfilter/fault/fault.go +++ b/xds/internal/httpfilter/fault/fault.go @@ -125,7 +125,12 @@ func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (ir } } - return &interceptor{config: c.config}, nil + icfg := c.config + if (icfg.GetMaxActiveFaults() != nil && icfg.GetMaxActiveFaults().GetValue() == 0) || + (icfg.GetDelay() == nil && icfg.GetAbort() == nil) { + return nil, nil + } + return &interceptor{config: icfg}, nil } type interceptor struct { diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 65f616f44ea7..624bc9f23461 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -161,6 +161,28 @@ func (s) TestFaultInjection_Unary(t *testing.T) { randOutInc int want []subcase }{{ + name: "max faults zero", + cfgs: []*fpb.HTTPFault{{ + MaxActiveFaults: wrapperspb.UInt32(0), + Abort: &fpb.FaultAbort{ + Percentage: &tpb.FractionalPercent{Numerator: 100, Denominator: tpb.FractionalPercent_HUNDRED}, + ErrorType: &fpb.FaultAbort_GrpcStatus{GrpcStatus: uint32(codes.Aborted)}, + }, + }}, + randOutInc: 5, + want: []subcase{{ + code: codes.OK, + repeat: 25, + }}, + }, { + name: "no abort or delay", + cfgs: []*fpb.HTTPFault{{}}, + randOutInc: 5, + want: []subcase{{ + code: codes.OK, + repeat: 25, + }}, + }, { name: "abort always", cfgs: []*fpb.HTTPFault{{ Abort: &fpb.FaultAbort{ From 0fc0397d779d96879d7b903c3fa1b9bd53e490e3 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 4 May 2021 16:54:57 -0700 Subject: [PATCH 050/998] xds: actually close stuff in cds/eds `Close()` (#4381) --- xds/internal/balancer/cdsbalancer/cdsbalancer.go | 8 ++++---- xds/internal/balancer/edsbalancer/eds.go | 6 +++++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index b991981c14c0..c072af45e00b 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -78,6 +78,7 @@ func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. bOpts: opts, updateCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), cancelWatch: func() {}, // No-op at this point. xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), } @@ -181,6 +182,7 @@ type cdsBalancer struct { clusterToWatch string logger *grpclog.PrefixLogger closed *grpcsync.Event + done *grpcsync.Event // The certificate providers are cached here to that they can be closed when // a new provider is to be created. @@ -380,9 +382,6 @@ func (b *cdsBalancer) run() { case *watchUpdate: b.handleWatchUpdate(update) } - - // Close results in cancellation of the CDS watch and closing of the - // underlying edsBalancer and is the only way to exit this goroutine. case <-b.closed.Done(): b.cancelWatch() b.cancelWatch = func() {} @@ -392,8 +391,8 @@ func (b *cdsBalancer) run() { b.edsLB = nil } b.xdsClient.Close() - // This is the *ONLY* point of return from this function. b.logger.Infof("Shutdown") + b.done.Fire() return } } @@ -494,6 +493,7 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub // Close closes the cdsBalancer and the underlying edsBalancer. func (b *cdsBalancer) Close() { b.closed.Fire() + <-b.done.Done() } // ccWrapper wraps the balancer.ClientConn passed to the CDS balancer at diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index de724701df94..d1a226e98987 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -65,6 +65,7 @@ func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOp x := &edsBalancer{ cc: cc, closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), grpcUpdate: make(chan interface{}), xdsClientUpdate: make(chan *edsUpdate), childPolicyUpdate: buffer.NewUnbounded(), @@ -130,6 +131,7 @@ type edsBalancerImplInterface interface { type edsBalancer struct { cc balancer.ClientConn closed *grpcsync.Event + done *grpcsync.Event logger *grpclog.PrefixLogger // edsBalancer continuously monitors the channels below, and will handle @@ -170,6 +172,8 @@ func (x *edsBalancer) run() { x.cancelWatch() x.xdsClient.Close() x.edsImpl.close() + x.logger.Infof("Shutdown") + x.done.Fire() return } } @@ -379,7 +383,7 @@ func (x *edsBalancer) enqueueChildBalancerState(p priorityType, s balancer.State func (x *edsBalancer) Close() { x.closed.Fire() - x.logger.Infof("Shutdown") + <-x.done.Done() } // equalStringPointers returns true if From 40b25c5b2c2d1b06d5f5d750d759294c6037d995 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 5 May 2021 12:34:15 -0700 Subject: [PATCH 051/998] xds: set correct order of certificate providers in handshake info (#4350) --- xds/csds/csds_test.go | 4 +- .../balancer/cdsbalancer/cdsbalancer.go | 9 +- .../balancer/clustermanager/clustermanager.go | 1 + xds/internal/client/transport_helper.go | 2 +- xds/internal/httpfilter/fault/fault_test.go | 16 +- xds/internal/server/conn_wrapper.go | 2 +- .../test/xds_client_integration_test.go | 8 +- .../test/xds_server_integration_test.go | 452 +++++++++--------- .../test/xds_server_serving_mode_test.go | 37 +- xds/internal/testutils/e2e/bootstrap.go | 9 +- xds/internal/testutils/e2e/clientresources.go | 241 +++++++++- 11 files changed, 498 insertions(+), 283 deletions(-) diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 04a71a7d1e6c..018f770494b1 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -149,7 +149,7 @@ var ( func init() { for i := range ldsTargets { - listeners[i] = e2e.DefaultListener(ldsTargets[i], rdsTargets[i]) + listeners[i] = e2e.DefaultClientListener(ldsTargets[i], rdsTargets[i]) listenerAnys[i], _ = ptypes.MarshalAny(listeners[i]) } for i := range rdsTargets { @@ -157,7 +157,7 @@ func init() { routeAnys[i], _ = ptypes.MarshalAny(routes[i]) } for i := range cdsTargets { - clusters[i] = e2e.DefaultCluster(cdsTargets[i], edsTargets[i]) + clusters[i] = e2e.DefaultCluster(cdsTargets[i], edsTargets[i], e2e.SecurityLevelNone) clusterAnys[i], _ = ptypes.MarshalAny(clusters[i]) } for i := range edsTargets { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index c072af45e00b..bf1519bb8ce0 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -391,6 +391,12 @@ func (b *cdsBalancer) run() { b.edsLB = nil } b.xdsClient.Close() + if b.cachedRoot != nil { + b.cachedRoot.Close() + } + if b.cachedIdentity != nil { + b.cachedIdentity.Close() + } b.logger.Infof("Shutdown") b.done.Fire() return @@ -490,7 +496,8 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub b.updateCh.Put(&scUpdate{subConn: sc, state: state}) } -// Close closes the cdsBalancer and the underlying edsBalancer. +// Close cancels the CDS watch, closes the child policy and closes the +// cdsBalancer. func (b *cdsBalancer) Close() { b.closed.Fire() <-b.done.Done() diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index 1e4dee7f5d3a..b4ae3710cd27 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -132,6 +132,7 @@ func (b *bal) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnStat func (b *bal) Close() { b.stateAggregator.close() b.bg.Close() + b.logger.Infof("Shutdown") } const prefix = "[xds-cluster-manager-lb %p] " diff --git a/xds/internal/client/transport_helper.go b/xds/internal/client/transport_helper.go index b286a61d638b..671e5b3220f1 100644 --- a/xds/internal/client/transport_helper.go +++ b/xds/internal/client/transport_helper.go @@ -297,7 +297,7 @@ func (t *TransportHelper) sendExisting(stream grpc.ClientStream) bool { for rType, s := range t.watchMap { if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, "", "", ""); err != nil { - t.logger.Errorf("ADS request failed: %v", err) + t.logger.Warningf("ADS request failed: %v", err) return false } } diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 624bc9f23461..6aeea8a8a782 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -505,7 +505,13 @@ func (s) TestFaultInjection_Unary(t *testing.T) { } serviceName := fmt.Sprintf("myservice%d", tcNum) - resources := e2e.DefaultClientResources(serviceName, nodeID, "localhost", port) + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) hcm := new(v3httppb.HttpConnectionManager) err := ptypes.UnmarshalAny(resources.Listeners[0].GetApiListener().GetApiListener(), hcm) if err != nil { @@ -564,7 +570,13 @@ func (s) TestFaultInjection_Unary(t *testing.T) { func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { fs, nodeID, port, cleanup := clientSetup(t) defer cleanup() - resources := e2e.DefaultClientResources("myservice", nodeID, "localhost", port) + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: "myservice", + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) hcm := new(v3httppb.HttpConnectionManager) err := ptypes.UnmarshalAny(resources.Listeners[0].GetApiListener().GetApiListener(), hcm) if err != nil { diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index a92a9ddb038f..a02d75b21445 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -119,7 +119,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { c.identityProvider = ip c.rootProvider = rp - xdsHI := xdsinternal.NewHandshakeInfo(c.identityProvider, c.rootProvider) + xdsHI := xdsinternal.NewHandshakeInfo(c.rootProvider, c.identityProvider) xdsHI.SetRequireClientCert(secCfg.RequireClientCert) return xdsHI, nil } diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index 39b3add77fbc..2e0e03aa3aca 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -92,7 +92,13 @@ func (s) TestClientSideXDS(t *testing.T) { fs, nodeID, port, cleanup := clientSetup(t) defer cleanup() - resources := e2e.DefaultClientResources("myservice", nodeID, "localhost", port) + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: "myservice", + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) if err := fs.Update(resources); err != nil { t.Fatal(err) } diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index d5b9b8dd20a2..5e266cdc5fb6 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -26,6 +26,7 @@ import ( "context" "crypto/tls" "crypto/x509" + "encoding/json" "fmt" "io/ioutil" "net" @@ -34,26 +35,21 @@ import ( "strconv" "testing" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/uuid" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" - xdscreds "google.golang.org/grpc/credentials/xds" - "google.golang.org/grpc/internal/testutils" - xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" "google.golang.org/grpc/xds" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" + + xdscreds "google.golang.org/grpc/credentials/xds" + xdsinternal "google.golang.org/grpc/internal/xds" + testpb "google.golang.org/grpc/test/grpc_testing" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" ) const ( @@ -62,8 +58,7 @@ const ( keyFile = "key.pem" rootFile = "ca.pem" - // Template for server Listener resource name. - serverListenerResourceNameTemplate = "grpc/server?xds.resource.listening_address=%s" + xdsServiceName = "my-service" ) func createTmpFile(t *testing.T, src, dst string) { @@ -77,7 +72,6 @@ func createTmpFile(t *testing.T, src, dst string) { t.Fatalf("ioutil.WriteFile(%q) failed: %v", dst, err) } t.Logf("Wrote file at: %s", dst) - t.Logf("%s", string(data)) } // createTempDirWithFiles creates a temporary directory under the system default @@ -138,17 +132,29 @@ func createClientTLSCredentials(t *testing.T) credentials.TransportCredentials { func commonSetup(t *testing.T) (*e2e.ManagementServer, string, net.Listener, func()) { t.Helper() - // Spin up a xDS management server on a local port. + // Turn on the env var protection for client-side security. + origClientSideSecurityEnvVar := env.ClientSideSecuritySupport + env.ClientSideSecuritySupport = true + + // Spin up an xDS management server on a local port. nodeID := uuid.New().String() fs, err := e2e.StartManagementServer() if err != nil { t.Fatal(err) } - // Create certificate and key files in a temporary directory and generate - // certificate provider configuration for a file_watcher plugin. - tmpdir := createTmpDirWithFiles(t, "testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") - cpc := e2e.DefaultFileWatcherConfig(path.Join(tmpdir, certFile), path.Join(tmpdir, keyFile), path.Join(tmpdir, rootFile)) + // Create a directory to hold certs and key files used on the server side. + serverDir := createTmpDirWithFiles(t, "testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + + // Create a directory to hold certs and key files used on the client side. + clientDir := createTmpDirWithFiles(t, "testClientSideXDS*", "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + + // Create certificate providers section of the bootstrap config with entries + // for both the client and server sides. + cpc := map[string]json.RawMessage{ + e2e.ServerSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(serverDir, certFile), path.Join(serverDir, keyFile), path.Join(serverDir, rootFile)), + e2e.ClientSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(clientDir, certFile), path.Join(clientDir, keyFile), path.Join(clientDir, rootFile)), + } // Create a bootstrap file in a temporary directory. bootstrapCleanup, err := xdsinternal.SetupBootstrapFile(xdsinternal.BootstrapOptions{ @@ -156,7 +162,7 @@ func commonSetup(t *testing.T) (*e2e.ManagementServer, string, net.Listener, fun NodeID: nodeID, ServerURI: fs.Address, CertificateProviders: cpc, - ServerListenerResourceNameTemplate: serverListenerResourceNameTemplate, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, }) if err != nil { t.Fatal(err) @@ -190,217 +196,74 @@ func commonSetup(t *testing.T) (*e2e.ManagementServer, string, net.Listener, fun fs.Stop() bootstrapCleanup() server.Stop() + env.ClientSideSecuritySupport = origClientSideSecurityEnvVar } } -func hostPortFromListener(t *testing.T, lis net.Listener) (string, uint32) { - t.Helper() - +func hostPortFromListener(lis net.Listener) (string, uint32, error) { host, p, err := net.SplitHostPort(lis.Addr().String()) if err != nil { - t.Fatalf("net.SplitHostPort(%s) failed: %v", lis.Addr().String(), err) + return "", 0, fmt.Errorf("net.SplitHostPort(%s) failed: %v", lis.Addr().String(), err) } port, err := strconv.ParseInt(p, 10, 32) if err != nil { - t.Fatalf("strconv.ParseInt(%s, 10, 32) failed: %v", p, err) - } - return host, uint32(port) - -} - -// listenerResourceWithoutSecurityConfig returns a listener resource with no -// security configuration, and name and address fields matching the passed in -// net.Listener. -func listenerResourceWithoutSecurityConfig(t *testing.T, lis net.Listener) *v3listenerpb.Listener { - host, port := hostPortFromListener(t, lis) - return &v3listenerpb.Listener{ - // This needs to match the name we are querying for. - Name: fmt.Sprintf(serverListenerResourceNameTemplate, lis.Addr().String()), - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: host, - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: port, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - Filters: []*v3listenerpb.Filter{ - { - Name: "filter-1", - ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), - }, - }, - }, - }, - }, + return "", 0, fmt.Errorf("strconv.ParseInt(%s, 10, 32) failed: %v", p, err) } + return host, uint32(port), nil } -// listenerResourceWithSecurityConfig returns a listener resource with security -// configuration pointing to the use of the file_watcher certificate provider -// plugin, and name and address fields matching the passed in net.Listener. -func listenerResourceWithSecurityConfig(t *testing.T, lis net.Listener) *v3listenerpb.Listener { - transportSocket := &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "google_cloud_private_spiffe", - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "google_cloud_private_spiffe", - }, - }, - }, - }), - }, - } - host, port := hostPortFromListener(t, lis) - return &v3listenerpb.Listener{ - // This needs to match the name we are querying for. - Name: fmt.Sprintf(serverListenerResourceNameTemplate, lis.Addr().String()), - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: host, - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: port, - }}}}, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "v4-wildcard", - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "0.0.0.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: uint32(0), - }, - }, - }, - SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "0.0.0.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: uint32(0), - }, - }, - }, - }, - Filters: []*v3listenerpb.Filter{ - { - Name: "filter-1", - ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), - }, - }, - }, - TransportSocket: transportSocket, - }, - { - Name: "v6-wildcard", - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "::", - PrefixLen: &wrapperspb.UInt32Value{ - Value: uint32(0), - }, - }, - }, - SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "::", - PrefixLen: &wrapperspb.UInt32Value{ - Value: uint32(0), - }, - }, - }, - }, - Filters: []*v3listenerpb.Filter{ - { - Name: "filter-1", - ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), - }, - }, - }, - TransportSocket: transportSocket, - }, - }, - } -} - -// TestServerSideXDS_Fallback is an e2e test where xDS is enabled on the -// server-side and xdsCredentials are configured for security. The control plane -// does not provide any security configuration and therefore the xdsCredentials -// uses fallback credentials, which in this case is insecure creds. +// TestServerSideXDS_Fallback is an e2e test which verifies xDS credentials +// fallback functionality. +// +// The following sequence of events happen as part of this test: +// - An xDS-enabled gRPC server is created and xDS credentials are configured. +// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS +// credentials are configured. +// - Control plane is configured to not send any security configuration to both +// the client and the server. This results in both of them using the +// configured fallback credentials (which is insecure creds in this case). func (s) TestServerSideXDS_Fallback(t *testing.T) { fs, nodeID, lis, cleanup := commonSetup(t) defer cleanup() - // Setup the fake management server to respond with a Listener resource that - // does not contain any security configuration. This should force the - // server-side xdsCredentials to use fallback. - listener := listenerResourceWithoutSecurityConfig(t, lis) - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{listener}, - }); err != nil { - t.Error(err) - } - - // Create a ClientConn and make a successful RPC. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - cc, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + // Grab the host and port of the server and create client side xDS resources + // corresponding to it. This contains default resources with no security + // configuration in the Cluster resources. + host, port, err := hostPortFromListener(lis) if err != nil { - t.Fatalf("failed to dial local test server: %v", err) + t.Fatalf("failed to retrieve host and port of server: %v", err) } - defer cc.Close() + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: xdsServiceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) - client := testpb.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } -} + // Create an inbound xDS listener resource for the server side that does not + // contain any security configuration. This should force the server-side + // xdsCredentials to use fallback. + inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources.Listeners = append(resources.Listeners, inboundLis) -// TestServerSideXDS_FileWatcherCerts is an e2e test where xDS is enabled on the -// server-side and xdsCredentials are configured for security. The control plane -// sends security configuration pointing to the use of the file_watcher plugin, -// and we verify that a client connecting with TLS creds is able to successfully -// make an RPC. -func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { - fs, nodeID, lis, cleanup := commonSetup(t) - defer cleanup() + // Setup the management server with client and server-side resources. + if err := fs.Update(resources); err != nil { + t.Fatal(err) + } - // Setup the fake management server to respond with a Listener resource with - // security configuration pointing to the file watcher plugin and requiring - // mTLS. - listener := listenerResourceWithSecurityConfig(t, lis) - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{listener}, - }); err != nil { - t.Error(err) + // Create client-side xDS credentials with an insecure fallback. + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) } - // Create a ClientConn with TLS creds and make a successful RPC. - clientCreds := createClientTLSCredentials(t) + // Create a ClientConn with the xds scheme and make a successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(clientCreds)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", xdsServiceName), grpc.WithTransportCredentials(creds)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -412,55 +275,178 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { } } +// TestServerSideXDS_FileWatcherCerts is an e2e test which verifies xDS +// credentials with file watcher certificate provider. +// +// The following sequence of events happen as part of this test: +// - An xDS-enabled gRPC server is created and xDS credentials are configured. +// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS +// credentials are configured. +// - Control plane is configured to send security configuration to both the +// client and the server, pointing to the file watcher certificate provider. +// We verify both TLS and mTLS scenarios. +func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { + tests := []struct { + name string + secLevel e2e.SecurityLevel + }{ + { + name: "tls", + secLevel: e2e.SecurityLevelTLS, + }, + { + name: "mtls", + secLevel: e2e.SecurityLevelMTLS, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fs, nodeID, lis, cleanup := commonSetup(t) + defer cleanup() + + // Grab the host and port of the server and create client side xDS + // resources corresponding to it. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + + // Create xDS resources to be consumed on the client side. This + // includes the listener, route configuration, cluster (with + // security configuration) and endpoint resources. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: xdsServiceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: test.secLevel, + }) + + // Create an inbound xDS listener resource for the server side that + // contains security configuration pointing to the file watcher + // plugin. + inboundLis := e2e.DefaultServerListener(host, port, test.secLevel) + resources.Listeners = append(resources.Listeners, inboundLis) + + // Setup the management server with client and server resources. + if err := fs.Update(resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make an RPC. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", xdsServiceName), grpc.WithTransportCredentials(creds)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + }) + } +} + // TestServerSideXDS_SecurityConfigChange is an e2e test where xDS is enabled on // the server-side and xdsCredentials are configured for security. The control // plane initially does not any security configuration. This forces the // xdsCredentials to use fallback creds, which is this case is insecure creds. // We verify that a client connecting with TLS creds is not able to successfully -// make an RPC. The control plan then sends a listener resource with security +// make an RPC. The control plane then sends a listener resource with security // configuration pointing to the use of the file_watcher plugin and we verify // that the same client is now able to successfully make an RPC. func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { fs, nodeID, lis, cleanup := commonSetup(t) defer cleanup() - // Setup the fake management server to respond with a Listener resource that - // does not contain any security configuration. This should force the - // server-side xdsCredentials to use fallback. - listener := listenerResourceWithoutSecurityConfig(t, lis) - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{listener}, - }); err != nil { - t.Error(err) + // Grab the host and port of the server and create client side xDS resources + // corresponding to it. This contains default resources with no security + // configuration in the Cluster resource. This should force the xDS + // credentials on the client to use its fallback. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) } + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: xdsServiceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) - // Create a ClientConn with TLS creds. This should fail since the server is - // using fallback credentials which in this case in insecure creds. - clientCreds := createClientTLSCredentials(t) + // Create an inbound xDS listener resource for the server side that does not + // contain any security configuration. This should force the xDS credentials + // on server to use its fallback. + inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources.Listeners = append(resources.Listeners, inboundLis) + + // Setup the management server with client and server-side resources. + if err := fs.Update(resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + xdsCreds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make a successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(clientCreds)) + xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", xdsServiceName), grpc.WithTransportCredentials(xdsCreds)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } - defer cc.Close() + defer xdsCC.Close() + + client := testpb.NewTestServiceClient(xdsCC) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Create a ClientConn with TLS creds. This should fail since the server is + // using fallback credentials which in this case in insecure creds. + tlsCreds := createClientTLSCredentials(t) + tlsCC, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(tlsCreds)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer tlsCC.Close() // We don't set 'waitForReady` here since we want this call to failfast. - client := testpb.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Code() != codes.Unavailable { + client = testpb.NewTestServiceClient(tlsCC) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { t.Fatal("rpc EmptyCall() succeeded when expected to fail") } - // Setup the fake management server to respond with a Listener resource with - // security configuration pointing to the file watcher plugin and requiring - // mTLS. - listener = listenerResourceWithSecurityConfig(t, lis) - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{listener}, - }); err != nil { - t.Error(err) + // Switch server and client side resources with ones that contain required + // security configuration for mTLS with a file watcher certificate provider. + resources = e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: xdsServiceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelMTLS, + }) + inboundLis = e2e.DefaultServerListener(host, port, e2e.SecurityLevelMTLS) + resources.Listeners = append(resources.Listeners, inboundLis) + if err := fs.Update(resources); err != nil { + t.Fatal(err) } // Make another RPC with `waitForReady` set and expect this to succeed. diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index c1f9634b16b0..484a5b5ab741 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -24,6 +24,7 @@ package xds_test import ( "context" + "encoding/json" "fmt" "net" "path" @@ -98,10 +99,13 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } defer fs.Stop() - // Create certificate and key files in a temporary directory and generate - // certificate provider configuration for a file_watcher plugin. - tmpdir := createTmpDirWithFiles(t, "testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") - cpc := e2e.DefaultFileWatcherConfig(path.Join(tmpdir, certFile), path.Join(tmpdir, keyFile), path.Join(tmpdir, rootFile)) + // Create a directory to hold certs and key files used on the server side. + serverDir := createTmpDirWithFiles(t, "testServerSideServingMode*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + + // Create certificate providers section of the bootstrap config. + cpc := map[string]json.RawMessage{ + e2e.ServerSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(serverDir, certFile), path.Join(serverDir, keyFile), path.Join(serverDir, rootFile)), + } // Create a bootstrap file in a temporary directory. bsCleanup, err := xdsinternal.SetupBootstrapFile(xdsinternal.BootstrapOptions{ @@ -109,7 +113,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { NodeID: nodeID, ServerURI: fs.Address, CertificateProviders: cpc, - ServerListenerResourceNameTemplate: serverListenerResourceNameTemplate, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, }) if err != nil { t.Fatal(err) @@ -159,15 +163,24 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } }() - // Setup the fake management server to respond with Listener resources that - // we are interested in. - listener1 := listenerResourceWithoutSecurityConfig(t, lis1) - listener2 := listenerResourceWithoutSecurityConfig(t, lis2) - if err := fs.Update(e2e.UpdateOptions{ + // Setup the management server to respond with server-side Listener + // resources for both listeners. + host1, port1, err := hostPortFromListener(lis1) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + listener1 := e2e.DefaultServerListener(host1, port1, e2e.SecurityLevelNone) + host2, port2, err := hostPortFromListener(lis2) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + listener2 := e2e.DefaultServerListener(host2, port2, e2e.SecurityLevelNone) + resources := e2e.UpdateOptions{ NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener1, listener2}, - }); err != nil { - t.Error(err) + } + if err := fs.Update(resources); err != nil { + t.Fatal(err) } // Wait for both listeners to move to "serving" mode. diff --git a/xds/internal/testutils/e2e/bootstrap.go b/xds/internal/testutils/e2e/bootstrap.go index 72a1a9900cfe..99702032f817 100644 --- a/xds/internal/testutils/e2e/bootstrap.go +++ b/xds/internal/testutils/e2e/bootstrap.go @@ -26,8 +26,8 @@ import ( // DefaultFileWatcherConfig is a helper function to create a default certificate // provider plugin configuration. The test is expected to have setup the files // appropriately before this configuration is used to instantiate providers. -func DefaultFileWatcherConfig(certPath, keyPath, caPath string) map[string]json.RawMessage { - cfg := fmt.Sprintf(`{ +func DefaultFileWatcherConfig(certPath, keyPath, caPath string) json.RawMessage { + return json.RawMessage(fmt.Sprintf(`{ "plugin_name": "file_watcher", "config": { "certificate_file": %q, @@ -35,8 +35,5 @@ func DefaultFileWatcherConfig(certPath, keyPath, caPath string) map[string]json. "ca_certificate_file": %q, "refresh_interval": "600s" } - }`, certPath, keyPath, caPath) - return map[string]json.RawMessage{ - "google_cloud_private_spiffe": json.RawMessage(cfg), - } + }`, certPath, keyPath, caPath)) } diff --git a/xds/internal/testutils/e2e/clientresources.go b/xds/internal/testutils/e2e/clientresources.go index 86b34861e3db..b521db950558 100644 --- a/xds/internal/testutils/e2e/clientresources.go +++ b/xds/internal/testutils/e2e/clientresources.go @@ -19,9 +19,11 @@ package e2e import ( + "fmt" + "github.com/envoyproxy/go-control-plane/pkg/wellknown" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/testutils" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -30,37 +32,73 @@ import ( v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - anypb "github.com/golang/protobuf/ptypes/any" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" ) -func any(m proto.Message) *anypb.Any { - a, err := ptypes.MarshalAny(m) - if err != nil { - panic("error marshalling any: " + err.Error()) - } - return a +const ( + // ServerListenerResourceNameTemplate is the Listener resource name template + // used on the server side. + ServerListenerResourceNameTemplate = "grpc/server?xds.resource.listening_address=%s" + // ClientSideCertProviderInstance is the certificate provider instance name + // used in the Cluster resource on the client side. + ClientSideCertProviderInstance = "client-side-certificate-provider-instance" + // ServerSideCertProviderInstance is the certificate provider instance name + // used in the Listener resource on the server side. + ServerSideCertProviderInstance = "server-side-certificate-provider-instance" +) + +// SecurityLevel allows the test to control the security level to be used in the +// resource returned by this package. +type SecurityLevel int + +const ( + // SecurityLevelNone is used when no security configuration is required. + SecurityLevelNone SecurityLevel = iota + // SecurityLevelTLS is used when security configuration corresponding to TLS + // is required. Only the server presents an identity certificate in this + // configuration. + SecurityLevelTLS + // SecurityLevelMTLS is used when security ocnfiguration corresponding to + // mTLS is required. Both client and server present identity certificates in + // this configuration. + SecurityLevelMTLS +) + +// ResourceParams wraps the arguments to be passed to DefaultClientResources. +type ResourceParams struct { + // DialTarget is the client's dial target. This is used as the name of the + // Listener resource. + DialTarget string + // NodeID is the id of the xdsClient to which this update is to be pushed. + NodeID string + // Host is the host of the default Endpoint resource. + Host string + // port is the port of the default Endpoint resource. + Port uint32 + // SecLevel controls the security configuration in the Cluster resource. + SecLevel SecurityLevel } // DefaultClientResources returns a set of resources (LDS, RDS, CDS, EDS) for a // client to generically connect to one server. -func DefaultClientResources(target, nodeID, host string, port uint32) UpdateOptions { - routeConfigName := "route-" + target - clusterName := "cluster-" + target - endpointsName := "endpoints-" + target - +func DefaultClientResources(params ResourceParams) UpdateOptions { + routeConfigName := "route-" + params.DialTarget + clusterName := "cluster-" + params.DialTarget + endpointsName := "endpoints-" + params.DialTarget return UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{DefaultListener(target, routeConfigName)}, - Routes: []*v3routepb.RouteConfiguration{DefaultRouteConfig(routeConfigName, target, clusterName)}, - Clusters: []*v3clusterpb.Cluster{DefaultCluster(clusterName, endpointsName)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{DefaultEndpoint(endpointsName, host, port)}, + NodeID: params.NodeID, + Listeners: []*v3listenerpb.Listener{DefaultClientListener(params.DialTarget, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{DefaultRouteConfig(routeConfigName, params.DialTarget, clusterName)}, + Clusters: []*v3clusterpb.Cluster{DefaultCluster(clusterName, endpointsName, params.SecLevel)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{DefaultEndpoint(endpointsName, params.Host, params.Port)}, } } -// DefaultListener returns a basic xds Listener resource. -func DefaultListener(target, routeName string) *v3listenerpb.Listener { - hcm := any(&v3httppb.HttpConnectionManager{ +// DefaultClientListener returns a basic xds Listener resource to be used on +// the client side. +func DefaultClientListener(target, routeName string) *v3listenerpb.Listener { + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{Rds: &v3httppb.Rds{ ConfigSource: &v3corepb.ConfigSource{ ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, @@ -82,12 +120,130 @@ func DefaultListener(target, routeName string) *v3listenerpb.Listener { } } +// DefaultServerListener returns a basic xds Listener resource to be used on +// the server side. +func DefaultServerListener(host string, port uint32, secLevel SecurityLevel) *v3listenerpb.Listener { + var tlsContext *v3tlspb.DownstreamTlsContext + switch secLevel { + case SecurityLevelNone: + case SecurityLevelTLS: + tlsContext = &v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ServerSideCertProviderInstance, + }, + }, + } + case SecurityLevelMTLS: + tlsContext = &v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ServerSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ServerSideCertProviderInstance, + }, + }, + }, + } + } + + var ts *v3corepb.TransportSocket + if tlsContext != nil { + ts = &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(tlsContext), + }, + } + } + return &v3listenerpb.Listener{ + Name: fmt.Sprintf(ServerListenerResourceNameTemplate, fmt.Sprintf("%s:%d", host, port)), + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: port, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "v4-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + }, + TransportSocket: ts, + }, + { + Name: "v6-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + }, + TransportSocket: ts, + }, + }, + } +} + // HTTPFilter constructs an xds HttpFilter with the provided name and config. func HTTPFilter(name string, config proto.Message) *v3httppb.HttpFilter { return &v3httppb.HttpFilter{ Name: name, ConfigType: &v3httppb.HttpFilter_TypedConfig{ - TypedConfig: any(config), + TypedConfig: testutils.MarshalAny(config), }, } } @@ -109,8 +265,36 @@ func DefaultRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.Rou } // DefaultCluster returns a basic xds Cluster resource. -func DefaultCluster(clusterName, edsServiceName string) *v3clusterpb.Cluster { - return &v3clusterpb.Cluster{ +func DefaultCluster(clusterName, edsServiceName string, secLevel SecurityLevel) *v3clusterpb.Cluster { + var tlsContext *v3tlspb.UpstreamTlsContext + switch secLevel { + case SecurityLevelNone: + case SecurityLevelTLS: + tlsContext = &v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ClientSideCertProviderInstance, + }, + }, + }, + } + case SecurityLevelMTLS: + tlsContext = &v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ClientSideCertProviderInstance, + }, + }, + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ClientSideCertProviderInstance, + }, + }, + } + } + + cluster := &v3clusterpb.Cluster{ Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ @@ -123,6 +307,15 @@ func DefaultCluster(clusterName, edsServiceName string) *v3clusterpb.Cluster { }, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, } + if tlsContext != nil { + cluster.TransportSocket = &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(tlsContext), + }, + } + } + return cluster } // DefaultEndpoint returns a basic xds Endpoint resource. From d426aa5f2e5e809639b45d9619416ce22e56319a Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Wed, 5 May 2021 13:37:13 -0700 Subject: [PATCH 052/998] test: extend the xDS interop tests timeout to 360 mins (#4380) --- test/kokoro/xds.cfg | 2 +- test/kokoro/xds_v3.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/kokoro/xds.cfg b/test/kokoro/xds.cfg index d1a078217b84..a1e4ed0bb5e6 100644 --- a/test/kokoro/xds.cfg +++ b/test/kokoro/xds.cfg @@ -2,7 +2,7 @@ # Location of the continuous shell script in repository. build_file: "grpc-go/test/kokoro/xds.sh" -timeout_mins: 120 +timeout_mins: 360 action { define_artifacts { regex: "**/*sponge_log.*" diff --git a/test/kokoro/xds_v3.cfg b/test/kokoro/xds_v3.cfg index c4c8aad9e6f2..1991efd325d3 100644 --- a/test/kokoro/xds_v3.cfg +++ b/test/kokoro/xds_v3.cfg @@ -2,7 +2,7 @@ # Location of the continuous shell script in repository. build_file: "grpc-go/test/kokoro/xds_v3.sh" -timeout_mins: 120 +timeout_mins: 360 action { define_artifacts { regex: "**/*sponge_log.*" From d2d6bdae07e844b8a3502dcaf00dc7b1b5519a59 Mon Sep 17 00:00:00 2001 From: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Date: Fri, 7 May 2021 02:40:54 +1000 Subject: [PATCH 053/998] server: add ForceServerCodec() to set a custom encoding.Codec on the server (#4205) --- rpc_util.go | 4 +-- server.go | 29 ++++++++++++++++++++++ test/end2end_test.go | 58 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 2 deletions(-) diff --git a/rpc_util.go b/rpc_util.go index c0a1208f2f30..c8ae0e4444c7 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -429,9 +429,9 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { } func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} -// ForceCodec returns a CallOption that will set the given Codec to be +// ForceCodec returns a CallOption that will set codec to be // used for all request and response messages for a call. The result of calling -// String() will be used as the content-subtype in a case-insensitive manner. +// Name() will be used as the content-subtype in a case-insensitive manner. // // See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for diff --git a/server.go b/server.go index b2793ab00b53..d2bc884277a4 100644 --- a/server.go +++ b/server.go @@ -279,6 +279,35 @@ func CustomCodec(codec Codec) ServerOption { }) } +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + // RPCCompressor returns a ServerOption that sets a compressor for outbound // messages. For backward compatibility, all outbound messages will be sent // using this compressor, regardless of incoming message compression. By diff --git a/test/end2end_test.go b/test/end2end_test.go index 1baf2e347d15..86fc480669ce 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -5284,6 +5284,37 @@ func (s) TestGRPCMethod(t *testing.T) { } } +func (s) TestForceServerCodec(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + codec := &countingProtoCodec{} + if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(codec)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + unmarshalCount := atomic.LoadInt32(&codec.unmarshalCount) + const wantUnmarshalCount = 1 + if unmarshalCount != wantUnmarshalCount { + t.Fatalf("protoCodec.unmarshalCount = %d; want %d", unmarshalCount, wantUnmarshalCount) + } + marshalCount := atomic.LoadInt32(&codec.marshalCount) + const wantMarshalCount = 1 + if marshalCount != wantMarshalCount { + t.Fatalf("protoCodec.marshalCount = %d; want %d", marshalCount, wantMarshalCount) + } +} + func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) { const mdkey = "somedata" @@ -5653,6 +5684,33 @@ func (c *errCodec) Name() string { return "Fermat's near-miss." } +type countingProtoCodec struct { + marshalCount int32 + unmarshalCount int32 +} + +func (p *countingProtoCodec) Marshal(v interface{}) ([]byte, error) { + atomic.AddInt32(&p.marshalCount, 1) + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (p *countingProtoCodec) Unmarshal(data []byte, v interface{}) error { + atomic.AddInt32(&p.unmarshalCount, 1) + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + return proto.Unmarshal(data, vv) +} + +func (*countingProtoCodec) Name() string { + return "proto" +} + func (s) TestEncodeDoesntPanic(t *testing.T) { for _, e := range listTestEnv() { testEncodeDoesntPanic(t, e) From cb396472c2f78e923dc0b28565c9d704291196f8 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 6 May 2021 13:28:27 -0700 Subject: [PATCH 054/998] Revert "grpc: call balancer.Close() before returning from ccBalancerWrapper.close()" (#4391) This reverts commit 28078834f35b944281662807d8ec071645c37307. --- balancer_conn_wrappers.go | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 38b48fcdc5b4..4cc7f9159b16 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -69,10 +69,10 @@ func (ccb *ccBalancerWrapper) watcher() { select { case t := <-ccb.scBuffer.Get(): ccb.scBuffer.Load() - ccb.balancerMu.Lock() if ccb.done.HasFired() { break } + ccb.balancerMu.Lock() su := t.(*scStateUpdate) ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) ccb.balancerMu.Unlock() @@ -80,6 +80,7 @@ func (ccb *ccBalancerWrapper) watcher() { } if ccb.done.HasFired() { + ccb.balancer.Close() ccb.mu.Lock() scs := ccb.subConns ccb.subConns = nil @@ -94,9 +95,6 @@ func (ccb *ccBalancerWrapper) watcher() { } func (ccb *ccBalancerWrapper) close() { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - ccb.balancer.Close() ccb.done.Fire() } @@ -121,19 +119,13 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { ccb.balancerMu.Lock() defer ccb.balancerMu.Unlock() - if ccb.done.HasFired() { - return nil - } return ccb.balancer.UpdateClientConnState(*ccs) } func (ccb *ccBalancerWrapper) resolverError(err error) { ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - if ccb.done.HasFired() { - return - } ccb.balancer.ResolverError(err) + ccb.balancerMu.Unlock() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { From c7ea734087dbbcdb22137ab3b7d8b16842b080bf Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 7 May 2021 08:28:34 -0400 Subject: [PATCH 055/998] dns: fix flaky TestRateLimitedResolve (#4387) * Rewrote TestRateLimitedResolve in dns resolver test to get rid of flakiness. --- internal/resolver/dns/dns_resolver.go | 10 +- internal/resolver/dns/dns_resolver_test.go | 141 +++++++++++++-------- 2 files changed, 92 insertions(+), 59 deletions(-) diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index 9d86460ab6fb..03825bbe7b56 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -47,8 +47,12 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// A global to stub out in tests. -var newTimer = time.NewTimer +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) func init() { resolver.Register(NewBuilder()) @@ -219,7 +223,7 @@ func (d *dnsResolver) watcher() { // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least // to prevent constantly re-resolving. backoffIndex = 1 - timer = time.NewTimer(minDNSResRate) + timer = newTimerDNSResRate(minDNSResRate) select { case <-d.ctx.Done(): timer.Stop() diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index 52067e39cc68..73749eca44b0 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -43,14 +43,15 @@ func TestMain(m *testing.M) { // Set a non-zero duration only for tests which are actually testing that // feature. replaceDNSResRate(time.Duration(0)) // No nead to clean up since we os.Exit - replaceNetFunc(nil) // No nead to clean up since we os.Exit + overrideDefaultResolver(false) // No nead to clean up since we os.Exit code := m.Run() os.Exit(code) } const ( - txtBytesLimit = 255 - defaultTestTimeout = 10 * time.Second + txtBytesLimit = 255 + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond ) type testClientConn struct { @@ -106,12 +107,12 @@ type testResolver struct { // A write to this channel is made when this resolver receives a resolution // request. Tests can rely on reading from this channel to be notified about // resolution requests instead of sleeping for a predefined period of time. - ch chan struct{} + lookupHostCh *testutils.Channel } func (tr *testResolver) LookupHost(ctx context.Context, host string) ([]string, error) { - if tr.ch != nil { - tr.ch <- struct{}{} + if tr.lookupHostCh != nil { + tr.lookupHostCh.Send(nil) } return hostLookup(host) } @@ -124,9 +125,17 @@ func (*testResolver) LookupTXT(ctx context.Context, host string) ([]string, erro return txtLookup(host) } -func replaceNetFunc(ch chan struct{}) func() { +// overrideDefaultResolver overrides the defaultResolver used by the code with +// an instance of the testResolver. pushOnLookup controls whether the +// testResolver created here pushes lookupHost events on its channel. +func overrideDefaultResolver(pushOnLookup bool) func() { oldResolver := defaultResolver - defaultResolver = &testResolver{ch: ch} + + var lookupHostCh *testutils.Channel + if pushOnLookup { + lookupHostCh = testutils.NewChannel() + } + defaultResolver = &testResolver{lookupHostCh: lookupHostCh} return func() { defaultResolver = oldResolver @@ -1451,23 +1460,33 @@ func TestCustomAuthority(t *testing.T) { // requests. It sets the re-resolution rate to a small value and repeatedly // calls ResolveNow() and ensures only the expected number of resolution // requests are made. + func TestRateLimitedResolve(t *testing.T) { defer leakcheck.Check(t) defer func(nt func(d time.Duration) *time.Timer) { newTimer = nt }(newTimer) newTimer = func(d time.Duration) *time.Timer { - // Will never fire on its own, will protect from triggering exponential backoff. + // Will never fire on its own, will protect from triggering exponential + // backoff. return time.NewTimer(time.Hour) } + defer func(nt func(d time.Duration) *time.Timer) { + newTimerDNSResRate = nt + }(newTimerDNSResRate) - const dnsResRate = 10 * time.Millisecond - dc := replaceDNSResRate(dnsResRate) - defer dc() + timerChan := testutils.NewChannel() + newTimerDNSResRate = func(d time.Duration) *time.Timer { + // Will never fire on its own, allows this test to call timer + // immediately. + t := time.NewTimer(time.Hour) + timerChan.Send(t) + return t + } // Create a new testResolver{} for this test because we want the exact count // of the number of times the resolver was invoked. - nc := replaceNetFunc(make(chan struct{})) + nc := overrideDefaultResolver(true) defer nc() target := "foo.bar.com" @@ -1490,55 +1509,65 @@ func TestRateLimitedResolve(t *testing.T) { t.Fatalf("delegate resolver returned unexpected type: %T\n", tr) } - // Observe the time before unblocking the lookupHost call. The 100ms rate - // limiting timer will begin immediately after that. This means the next - // resolution could happen less than 100ms if we read the time *after* - // receiving from tr.ch - start := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() // Wait for the first resolution request to be done. This happens as part - // of the first iteration of the for loop in watcher() because we call - // ResolveNow in Build. - <-tr.ch - - // Here we start a couple of goroutines. One repeatedly calls ResolveNow() - // until asked to stop, and the other waits for two resolution requests to be - // made to our testResolver and stops the former. We measure the start and - // end times, and expect the duration elapsed to be in the interval - // {wantCalls*dnsResRate, wantCalls*dnsResRate} - done := make(chan struct{}) - go func() { - for { - select { - case <-done: - return - default: - r.ResolveNow(resolver.ResolveNowOptions{}) - time.Sleep(1 * time.Millisecond) - } - } - }() + // of the first iteration of the for loop in watcher(). + if _, err := tr.lookupHostCh.Receive(ctx); err != nil { + t.Fatalf("Timed out waiting for lookup() call.") + } - gotCalls := 0 - const wantCalls = 3 - min, max := wantCalls*dnsResRate, (wantCalls+1)*dnsResRate - tMax := time.NewTimer(max) - for gotCalls != wantCalls { - select { - case <-tr.ch: - gotCalls++ - case <-tMax.C: - t.Fatalf("Timed out waiting for %v calls after %v; got %v", wantCalls, max, gotCalls) - } + // Call Resolve Now 100 times, shouldn't continue onto next iteration of + // watcher, thus shouldn't lookup again. + for i := 0; i <= 100; i++ { + r.ResolveNow(resolver.ResolveNowOptions{}) + } + + continueCtx, continueCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer continueCancel() + + if _, err := tr.lookupHostCh.Receive(continueCtx); err == nil { + t.Fatalf("Should not have looked up again as DNS Min Res Rate timer has not gone off.") + } + + // Make the DNSMinResRate timer fire immediately (by receiving it, then + // resetting to 0), this will unblock the resolver which is currently + // blocked on the DNS Min Res Rate timer going off, which will allow it to + // continue to the next iteration of the watcher loop. + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) } - close(done) - elapsed := time.Since(start) + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) - if gotCalls != wantCalls { - t.Fatalf("resolve count mismatch for target: %q = %+v, want %+v\n", target, gotCalls, wantCalls) + // Now that DNS Min Res Rate timer has gone off, it should lookup again. + if _, err := tr.lookupHostCh.Receive(ctx); err != nil { + t.Fatalf("Timed out waiting for lookup() call.") } - if elapsed < min { - t.Fatalf("elapsed time: %v, wanted it to be between {%v and %v}", elapsed, min, max) + + // Resolve Now 1000 more times, shouldn't lookup again as DNS Min Res Rate + // timer has not gone off. + for i := 0; i < 1000; i++ { + r.ResolveNow(resolver.ResolveNowOptions{}) + } + + if _, err = tr.lookupHostCh.Receive(continueCtx); err == nil { + t.Fatalf("Should not have looked up again as DNS Min Res Rate timer has not gone off.") + } + + // Make the DNSMinResRate timer fire immediately again. + timer, err = timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer = timer.(*time.Timer) + timerPointer.Reset(0) + + // Now that DNS Min Res Rate timer has gone off, it should lookup again. + if _, err = tr.lookupHostCh.Receive(ctx); err != nil { + t.Fatalf("Timed out waiting for lookup() call.") } wantAddrs := []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}} From b6f206b84f739768a1c75c1c83fe50ed75845245 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 7 May 2021 11:17:26 -0700 Subject: [PATCH 056/998] grpc: improve docs on StreamDesc (#4397) --- stream.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/stream.go b/stream.go index 77d25742cc3d..1f3e70d2c440 100644 --- a/stream.go +++ b/stream.go @@ -52,14 +52,20 @@ import ( // of the RPC. type StreamHandler func(srv interface{}, stream ServerStream) error -// StreamDesc represents a streaming RPC service's method specification. +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends } // Stream defines the common interface a client or server stream has to satisfy. From 0ab423af82154f9466b48cfece8043314e7114d4 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 7 May 2021 11:55:48 -0700 Subject: [PATCH 057/998] test: fix flaky GoAwayThenClose (#4394) In this test, we 1. make a streaming RPC on a connection 1. graceful stop it to send a GOAWAY 1. hard stop it, so the client will create a connection to another server Before this fix, 2 and 3 can happen too soon, so the RPC in 1 would fail and then transparent retry (because the stream is unprocessed by the server in that case). This retry attempt could pick the new connection, and then the RPC would block until timeout. After this streaming RPC fails, we make unary RPCs with the same deadline (note: deadline not timeout) as the streaming RPC and expect them to succeed. But they will also fail due to timeout. The fix is to make a round-trip on the streaming RPC first, to make sure it actually goes on the first connection. --- test/end2end_test.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/test/end2end_test.go b/test/end2end_test.go index 86fc480669ce..a63dfa1409fd 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -6921,6 +6921,10 @@ func (s) TestGoAwayThenClose(t *testing.T) { return &testpb.SimpleResponse{}, nil }, fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { + if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { + t.Errorf("unexpected error from send: %v", err) + return err + } // Wait forever. _, err := stream.Recv() if err == nil { @@ -6954,12 +6958,19 @@ func (s) TestGoAwayThenClose(t *testing.T) { client := testpb.NewTestServiceClient(cc) - // Should go on connection 1. We use a long-lived RPC because it will cause GracefulStop to send GO_AWAY, but the - // connection doesn't get closed until the server stops and the client receives. + // We make a streaming RPC and do an one-message-round-trip to make sure + // it's created on connection 1. + // + // We use a long-lived RPC because it will cause GracefulStop to send + // GO_AWAY, but the connection doesn't get closed until the server stops and + // the client receives the error. stream, err := client.FullDuplexCall(ctx) if err != nil { t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err) } + if _, err = stream.Recv(); err != nil { + t.Fatalf("unexpected error from first recv: %v", err) + } r.UpdateState(resolver.State{Addresses: []resolver.Address{ {Addr: lis1.Addr().String()}, @@ -6976,8 +6987,7 @@ func (s) TestGoAwayThenClose(t *testing.T) { s1.Stop() // Wait for client to close. - _, err = stream.Recv() - if err == nil { + if _, err = stream.Recv(); err == nil { t.Fatal("expected the stream to die, but got a successful Recv") } From 0439465fe2b4020767d9aab1bc3055e492c14089 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 7 May 2021 11:57:56 -0700 Subject: [PATCH 058/998] xds_resolver: fix flaky Test/XDSResolverDelayedOnCommitted (#4393) Before this change, if two xds client updates came too close together, the second one could replace the first one. The fix is to wait for the effects of the first update before sending the second update. I injected a synthetic delay into handling the updates from the channel to reproduce this flake 100%, and confirmed this change fixes it. As part of this change I also noticed that we're actually calling the context cancellation function twice via defers, and never the cancel function from the test setup, so I fixed that, too. --- xds/internal/resolver/xds_resolver_test.go | 85 +++++++++------------- 1 file changed, 34 insertions(+), 51 deletions(-) diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 8ec29af9ebf2..eca561f369c5 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -289,10 +289,8 @@ func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, }) - defer func() { - cancel() - xdsR.Close() - }() + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -317,10 +315,8 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, }) - defer func() { - cancel() - xdsR.Close() - }() + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -407,7 +403,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { defer cancel() gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -475,7 +471,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -498,7 +494,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) if _, err = tcc.stateCh.Receive(ctx); err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } // "Finish the RPC"; this could cause a panic if the resolver doesn't @@ -544,7 +540,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -577,7 +573,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) if gotState, err = tcc.stateCh.Receive(ctx); err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState = gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -602,7 +598,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { // In the meantime, an empty ServiceConfig update should have been sent. if gotState, err = tcc.stateCh.Receive(ctx); err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState = gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -621,10 +617,8 @@ func (s) TestXDSResolverWRR(t *testing.T) { xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, }) - defer func() { - cancel() - xdsR.Close() - }() + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -651,7 +645,7 @@ func (s) TestXDSResolverWRR(t *testing.T) { gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -684,10 +678,8 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, }) - defer func() { - cancel() - xdsR.Close() - }() + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -722,7 +714,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -789,10 +781,8 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, }) - defer func() { - cancel() - xdsR.Close() - }() + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -813,7 +803,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -860,6 +850,8 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { }, }, }, nil) + tcc.stateCh.Receive(ctx) // Ignore the first update. + xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { @@ -869,10 +861,9 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { }, }, nil) - tcc.stateCh.Receive(ctx) // Ignore the first update gotState, err = tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState = gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -910,7 +901,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { }, nil) gotState, err = tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState = gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -939,10 +930,8 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, }) - defer func() { - cancel() - xdsR.Close() - }() + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -971,7 +960,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { }, nil) gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -995,10 +984,8 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, }) - defer func() { - cancel() - xdsR.Close() - }() + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -1019,7 +1006,7 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { defer cancel() gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) wantParsedConfig := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)("{}") @@ -1043,10 +1030,8 @@ func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, }) - defer func() { - cancel() - xdsR.Close() - }() + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -1220,10 +1205,8 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, }) - defer func() { - cancel() - xdsR.Close() - }() + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -1265,7 +1248,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { From aff517ba8a8ded7306801c3b95f1f7f480c1268b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 7 May 2021 14:35:48 -0700 Subject: [PATCH 059/998] xds: make e2e tests use a single management server instance (#4399) --- .../test/xds_client_integration_test.go | 41 +---- xds/internal/test/xds_integration_test.go | 161 ++++++++++++++++++ .../test/xds_server_integration_test.go | 157 +++-------------- .../test/xds_server_serving_mode_test.go | 49 +----- xds/internal/testutils/e2e/server.go | 1 - 5 files changed, 203 insertions(+), 206 deletions(-) diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index 2e0e03aa3aca..713331b325e0 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -23,13 +23,12 @@ package xds_test import ( "context" + "fmt" "net" "testing" - "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" @@ -37,34 +36,13 @@ import ( ) // clientSetup performs a bunch of steps common to all xDS client tests here: -// - spin up an xDS management server on a local port // - spin up a gRPC server and register the test service on it // - create a local TCP listener and start serving on it // // Returns the following: -// - the management server: tests use this to configure resources -// - nodeID expected by the management server: this is set in the Node proto -// sent by the xdsClient for queries. // - the port the server is listening on // - cleanup function to be invoked by the tests when done -func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { - // Spin up a xDS management server on a local port. - nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer() - if err != nil { - t.Fatal(err) - } - - // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := xds.SetupBootstrapFile(xds.BootstrapOptions{ - Version: xds.TransportV3, - NodeID: nodeID, - ServerURI: fs.Address, - }) - if err != nil { - t.Fatal(err) - } - +func clientSetup(t *testing.T) (uint32, func()) { // Initialize a gRPC server and register the stubServer on it. server := grpc.NewServer() testpb.RegisterTestServiceServer(server, &testService{}) @@ -81,30 +59,29 @@ func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { } }() - return fs, nodeID, uint32(lis.Addr().(*net.TCPAddr).Port), func() { - fs.Stop() - bootstrapCleanup() + return uint32(lis.Addr().(*net.TCPAddr).Port), func() { server.Stop() } } func (s) TestClientSideXDS(t *testing.T) { - fs, nodeID, port, cleanup := clientSetup(t) + port, cleanup := clientSetup(t) defer cleanup() + serviceName := xdsServiceName + "-client-side-xds" resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: "myservice", - NodeID: nodeID, + DialTarget: serviceName, + NodeID: xdsClientNodeID, Host: "localhost", Port: port, SecLevel: e2e.SecurityLevelNone, }) - if err := fs.Update(resources); err != nil { + if err := managementServer.Update(resources); err != nil { t.Fatal(err) } // Create a ClientConn and make a successful RPC. - cc, err := grpc.Dial("xds:///myservice", grpc.WithTransportCredentials(insecure.NewCredentials())) + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index 1c4b73ac58f8..a41fec929762 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -24,10 +24,26 @@ package xds_test import ( "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "path" "testing" "time" + "github.com/google/uuid" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/leakcheck" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/testdata" + "google.golang.org/grpc/xds/internal/testutils/e2e" + + xdsinternal "google.golang.org/grpc/internal/xds" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -51,3 +67,148 @@ type testService struct { func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil } + +var ( + // Globals corresponding to the single instance of the xDS management server + // which is spawned for all the tests in this package. + managementServer *e2e.ManagementServer + xdsClientNodeID string +) + +// TestMain sets up an xDS management server, runs all tests, and stops the +// management server. +func TestMain(m *testing.M) { + // The management server is started and stopped from here, but the leakcheck + // runs after every individual test. So, we need to skip the goroutine which + // spawns the management server and is blocked on the call to `Serve()`. + leakcheck.RegisterIgnoreGoroutine("e2e.StartManagementServer") + + cancel, err := setupManagementServer() + if err != nil { + log.Printf("setupManagementServer() failed: %v", err) + os.Exit(1) + } + + code := m.Run() + cancel() + os.Exit(code) +} + +func createTmpFile(src, dst string) error { + data, err := ioutil.ReadFile(src) + if err != nil { + return fmt.Errorf("ioutil.ReadFile(%q) failed: %v", src, err) + } + if err := ioutil.WriteFile(dst, data, os.ModePerm); err != nil { + return fmt.Errorf("ioutil.WriteFile(%q) failed: %v", dst, err) + } + return nil +} + +// createTempDirWithFiles creates a temporary directory under the system default +// tempDir with the given dirSuffix. It also reads from certSrc, keySrc and +// rootSrc files are creates appropriate files under the newly create tempDir. +// Returns the name of the created tempDir. +func createTmpDirWithFiles(dirSuffix, certSrc, keySrc, rootSrc string) (string, error) { + // Create a temp directory. Passing an empty string for the first argument + // uses the system temp directory. + dir, err := ioutil.TempDir("", dirSuffix) + if err != nil { + return "", fmt.Errorf("ioutil.TempDir() failed: %v", err) + } + + if err := createTmpFile(testdata.Path(certSrc), path.Join(dir, certFile)); err != nil { + return "", err + } + if err := createTmpFile(testdata.Path(keySrc), path.Join(dir, keyFile)); err != nil { + return "", err + } + if err := createTmpFile(testdata.Path(rootSrc), path.Join(dir, rootFile)); err != nil { + return "", err + } + return dir, nil +} + +// createClientTLSCredentials creates client-side TLS transport credentials. +func createClientTLSCredentials(t *testing.T) credentials.TransportCredentials { + t.Helper() + + cert, err := tls.LoadX509KeyPair(testdata.Path("x509/client1_cert.pem"), testdata.Path("x509/client1_key.pem")) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) + } + b, err := ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) + if err != nil { + t.Fatalf("ioutil.ReadFile(x509/server_ca_cert.pem) failed: %v", err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(b) { + t.Fatal("failed to append certificates") + } + return credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + ServerName: "x.test.example.com", + }) +} + +// setupManagement server performs the following: +// - spin up an xDS management server on a local port +// - set up certificates for consumption by the file_watcher plugin +// - sets up the global variables which refer to this management server and the +// nodeID to be used when talking to this management server. +// +// Returns a function to be invoked by the caller to stop the management server. +func setupManagementServer() (func(), error) { + // Turn on the env var protection for client-side security. + origClientSideSecurityEnvVar := env.ClientSideSecuritySupport + env.ClientSideSecuritySupport = true + + // Spin up an xDS management server on a local port. + var err error + managementServer, err = e2e.StartManagementServer() + if err != nil { + return nil, err + } + + // Create a directory to hold certs and key files used on the server side. + serverDir, err := createTmpDirWithFiles("testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + if err != nil { + managementServer.Stop() + return nil, err + } + + // Create a directory to hold certs and key files used on the client side. + clientDir, err := createTmpDirWithFiles("testClientSideXDS*", "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + if err != nil { + managementServer.Stop() + return nil, err + } + + // Create certificate providers section of the bootstrap config with entries + // for both the client and server sides. + cpc := map[string]json.RawMessage{ + e2e.ServerSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(serverDir, certFile), path.Join(serverDir, keyFile), path.Join(serverDir, rootFile)), + e2e.ClientSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(clientDir, certFile), path.Join(clientDir, keyFile), path.Join(clientDir, rootFile)), + } + + // Create a bootstrap file in a temporary directory. + xdsClientNodeID = uuid.New().String() + bootstrapCleanup, err := xdsinternal.SetupBootstrapFile(xdsinternal.BootstrapOptions{ + Version: xdsinternal.TransportV3, + NodeID: xdsClientNodeID, + ServerURI: managementServer.Address, + CertificateProviders: cpc, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + }) + if err != nil { + managementServer.Stop() + return nil, err + } + + return func() { + managementServer.Stop() + bootstrapCleanup() + env.ClientSideSecuritySupport = origClientSideSecurityEnvVar + }, nil +} diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 5e266cdc5fb6..6511a6134cf8 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -24,30 +24,19 @@ package xds_test import ( "context" - "crypto/tls" - "crypto/x509" - "encoding/json" "fmt" - "io/ioutil" "net" - "os" - "path" "strconv" "testing" - "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/status" - "google.golang.org/grpc/testdata" "google.golang.org/grpc/xds" "google.golang.org/grpc/xds/internal/testutils/e2e" xdscreds "google.golang.org/grpc/credentials/xds" - xdsinternal "google.golang.org/grpc/internal/xds" testpb "google.golang.org/grpc/test/grpc_testing" xdstestutils "google.golang.org/grpc/xds/internal/testutils" ) @@ -61,113 +50,17 @@ const ( xdsServiceName = "my-service" ) -func createTmpFile(t *testing.T, src, dst string) { - t.Helper() - - data, err := ioutil.ReadFile(src) - if err != nil { - t.Fatalf("ioutil.ReadFile(%q) failed: %v", src, err) - } - if err := ioutil.WriteFile(dst, data, os.ModePerm); err != nil { - t.Fatalf("ioutil.WriteFile(%q) failed: %v", dst, err) - } - t.Logf("Wrote file at: %s", dst) -} - -// createTempDirWithFiles creates a temporary directory under the system default -// tempDir with the given dirSuffix. It also reads from certSrc, keySrc and -// rootSrc files are creates appropriate files under the newly create tempDir. -// Returns the name of the created tempDir. -func createTmpDirWithFiles(t *testing.T, dirSuffix, certSrc, keySrc, rootSrc string) string { - t.Helper() - - // Create a temp directory. Passing an empty string for the first argument - // uses the system temp directory. - dir, err := ioutil.TempDir("", dirSuffix) - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - t.Logf("Using tmpdir: %s", dir) - - createTmpFile(t, testdata.Path(certSrc), path.Join(dir, certFile)) - createTmpFile(t, testdata.Path(keySrc), path.Join(dir, keyFile)) - createTmpFile(t, testdata.Path(rootSrc), path.Join(dir, rootFile)) - return dir -} - -// createClientTLSCredentials creates client-side TLS transport credentials. -func createClientTLSCredentials(t *testing.T) credentials.TransportCredentials { - cert, err := tls.LoadX509KeyPair(testdata.Path("x509/client1_cert.pem"), testdata.Path("x509/client1_key.pem")) - if err != nil { - t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) - } - b, err := ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) - if err != nil { - t.Fatalf("ioutil.ReadFile(x509/server_ca_cert.pem) failed: %v", err) - } - roots := x509.NewCertPool() - if !roots.AppendCertsFromPEM(b) { - t.Fatal("failed to append certificates") - } - return credentials.NewTLS(&tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: roots, - ServerName: "x.test.example.com", - }) -} - -// commonSetup performs a bunch of steps common to all xDS server tests here: -// - spin up an xDS management server on a local port -// - set up certificates for consumption by the file_watcher plugin +// setupGRPCServer performs the following: // - spin up an xDS-enabled gRPC server, configure it with xdsCredentials and // register the test service on it // - create a local TCP listener and start serving on it // // Returns the following: -// - the management server: tests use this to configure resources -// - nodeID expected by the management server: this is set in the Node proto -// sent by the xdsClient used on the xDS-enabled gRPC server // - local listener on which the xDS-enabled gRPC server is serving on // - cleanup function to be invoked by the tests when done -func commonSetup(t *testing.T) (*e2e.ManagementServer, string, net.Listener, func()) { +func setupGRPCServer(t *testing.T) (net.Listener, func()) { t.Helper() - // Turn on the env var protection for client-side security. - origClientSideSecurityEnvVar := env.ClientSideSecuritySupport - env.ClientSideSecuritySupport = true - - // Spin up an xDS management server on a local port. - nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer() - if err != nil { - t.Fatal(err) - } - - // Create a directory to hold certs and key files used on the server side. - serverDir := createTmpDirWithFiles(t, "testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") - - // Create a directory to hold certs and key files used on the client side. - clientDir := createTmpDirWithFiles(t, "testClientSideXDS*", "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") - - // Create certificate providers section of the bootstrap config with entries - // for both the client and server sides. - cpc := map[string]json.RawMessage{ - e2e.ServerSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(serverDir, certFile), path.Join(serverDir, keyFile), path.Join(serverDir, rootFile)), - e2e.ClientSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(clientDir, certFile), path.Join(clientDir, keyFile), path.Join(clientDir, rootFile)), - } - - // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := xdsinternal.SetupBootstrapFile(xdsinternal.BootstrapOptions{ - Version: xdsinternal.TransportV3, - NodeID: nodeID, - ServerURI: fs.Address, - CertificateProviders: cpc, - ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, - }) - if err != nil { - t.Fatal(err) - } - // Configure xDS credentials to be used on the server-side. creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{ FallbackCreds: insecure.NewCredentials(), @@ -192,11 +85,8 @@ func commonSetup(t *testing.T) (*e2e.ManagementServer, string, net.Listener, fun } }() - return fs, nodeID, lis, func() { - fs.Stop() - bootstrapCleanup() + return lis, func() { server.Stop() - env.ClientSideSecuritySupport = origClientSideSecurityEnvVar } } @@ -223,7 +113,7 @@ func hostPortFromListener(lis net.Listener) (string, uint32, error) { // the client and the server. This results in both of them using the // configured fallback credentials (which is insecure creds in this case). func (s) TestServerSideXDS_Fallback(t *testing.T) { - fs, nodeID, lis, cleanup := commonSetup(t) + lis, cleanup := setupGRPCServer(t) defer cleanup() // Grab the host and port of the server and create client side xDS resources @@ -233,9 +123,10 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { if err != nil { t.Fatalf("failed to retrieve host and port of server: %v", err) } + serviceName := xdsServiceName + "-fallback" resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: xdsServiceName, - NodeID: nodeID, + DialTarget: serviceName, + NodeID: xdsClientNodeID, Host: host, Port: port, SecLevel: e2e.SecurityLevelNone, @@ -248,7 +139,7 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { resources.Listeners = append(resources.Listeners, inboundLis) // Setup the management server with client and server-side resources. - if err := fs.Update(resources); err != nil { + if err := managementServer.Update(resources); err != nil { t.Fatal(err) } @@ -263,7 +154,7 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { // Create a ClientConn with the xds scheme and make a successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", xdsServiceName), grpc.WithTransportCredentials(creds)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -271,7 +162,7 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { client := testpb.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) + t.Errorf("rpc EmptyCall() failed: %v", err) } } @@ -301,7 +192,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - fs, nodeID, lis, cleanup := commonSetup(t) + lis, cleanup := setupGRPCServer(t) defer cleanup() // Grab the host and port of the server and create client side xDS @@ -314,9 +205,10 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { // Create xDS resources to be consumed on the client side. This // includes the listener, route configuration, cluster (with // security configuration) and endpoint resources. + serviceName := xdsServiceName + "-file-watcher-certs-" + test.name resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: xdsServiceName, - NodeID: nodeID, + DialTarget: serviceName, + NodeID: xdsClientNodeID, Host: host, Port: port, SecLevel: test.secLevel, @@ -329,7 +221,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { resources.Listeners = append(resources.Listeners, inboundLis) // Setup the management server with client and server resources. - if err := fs.Update(resources); err != nil { + if err := managementServer.Update(resources); err != nil { t.Fatal(err) } @@ -344,7 +236,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { // Create a ClientConn with the xds scheme and make an RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", xdsServiceName), grpc.WithTransportCredentials(creds)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -367,7 +259,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { // configuration pointing to the use of the file_watcher plugin and we verify // that the same client is now able to successfully make an RPC. func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { - fs, nodeID, lis, cleanup := commonSetup(t) + lis, cleanup := setupGRPCServer(t) defer cleanup() // Grab the host and port of the server and create client side xDS resources @@ -378,9 +270,10 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { if err != nil { t.Fatalf("failed to retrieve host and port of server: %v", err) } + serviceName := xdsServiceName + "-security-config-change" resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: xdsServiceName, - NodeID: nodeID, + DialTarget: serviceName, + NodeID: xdsClientNodeID, Host: host, Port: port, SecLevel: e2e.SecurityLevelNone, @@ -393,7 +286,7 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { resources.Listeners = append(resources.Listeners, inboundLis) // Setup the management server with client and server-side resources. - if err := fs.Update(resources); err != nil { + if err := managementServer.Update(resources); err != nil { t.Fatal(err) } @@ -408,7 +301,7 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { // Create a ClientConn with the xds scheme and make a successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", xdsServiceName), grpc.WithTransportCredentials(xdsCreds)) + xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(xdsCreds)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -437,15 +330,15 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { // Switch server and client side resources with ones that contain required // security configuration for mTLS with a file watcher certificate provider. resources = e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: xdsServiceName, - NodeID: nodeID, + DialTarget: serviceName, + NodeID: xdsClientNodeID, Host: host, Port: port, SecLevel: e2e.SecurityLevelMTLS, }) inboundLis = e2e.DefaultServerListener(host, port, e2e.SecurityLevelMTLS) resources.Listeners = append(resources.Listeners, inboundLis) - if err := fs.Update(resources); err != nil { + if err := managementServer.Update(resources); err != nil { t.Fatal(err) } diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 484a5b5ab741..664f0b85759a 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -24,22 +24,18 @@ package xds_test import ( "context" - "encoding/json" "fmt" "net" - "path" "sync" "testing" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal/testutils" - xdsinternal "google.golang.org/grpc/internal/xds" testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/xds" xdstestutils "google.golang.org/grpc/xds/internal/testutils" @@ -91,35 +87,6 @@ func (mt *modeTracker) waitForUpdate(ctx context.Context) error { // xDS enabled gRPC servers. It verifies that appropriate mode changes happen in // the server, and also verifies behavior of clientConns under these modes. func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { - // Spin up a xDS management server on a local port. - nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer() - if err != nil { - t.Fatal(err) - } - defer fs.Stop() - - // Create a directory to hold certs and key files used on the server side. - serverDir := createTmpDirWithFiles(t, "testServerSideServingMode*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") - - // Create certificate providers section of the bootstrap config. - cpc := map[string]json.RawMessage{ - e2e.ServerSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(serverDir, certFile), path.Join(serverDir, keyFile), path.Join(serverDir, rootFile)), - } - - // Create a bootstrap file in a temporary directory. - bsCleanup, err := xdsinternal.SetupBootstrapFile(xdsinternal.BootstrapOptions{ - Version: xdsinternal.TransportV3, - NodeID: nodeID, - ServerURI: fs.Address, - CertificateProviders: cpc, - ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, - }) - if err != nil { - t.Fatal(err) - } - defer bsCleanup() - // Configure xDS credentials to be used on the server-side. creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{ FallbackCreds: insecure.NewCredentials(), @@ -176,10 +143,10 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } listener2 := e2e.DefaultServerListener(host2, port2, e2e.SecurityLevelNone) resources := e2e.UpdateOptions{ - NodeID: nodeID, + NodeID: xdsClientNodeID, Listeners: []*v3listenerpb.Listener{listener1, listener2}, } - if err := fs.Update(resources); err != nil { + if err := managementServer.Update(resources); err != nil { t.Fatal(err) } @@ -217,8 +184,8 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { // Update the management server to remove the second listener resource. This should // push the only the second listener into "not-serving" mode. - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, + if err := managementServer.Update(e2e.UpdateOptions{ + NodeID: xdsClientNodeID, Listeners: []*v3listenerpb.Listener{listener1}, }); err != nil { t.Error(err) @@ -246,8 +213,8 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { // Update the management server to remove the first listener resource as // well. This should push the first listener into "not-serving" mode. Second // listener is already in "not-serving" mode. - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, + if err := managementServer.Update(e2e.UpdateOptions{ + NodeID: xdsClientNodeID, Listeners: []*v3listenerpb.Listener{}, }); err != nil { t.Error(err) @@ -279,8 +246,8 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } // Update the management server with both listener resources. - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, + if err := managementServer.Update(e2e.UpdateOptions{ + NodeID: xdsClientNodeID, Listeners: []*v3listenerpb.Listener{listener1, listener2}, }); err != nil { t.Error(err) diff --git a/xds/internal/testutils/e2e/server.go b/xds/internal/testutils/e2e/server.go index 9ec2eb0d6f2e..4a71a5054d7d 100644 --- a/xds/internal/testutils/e2e/server.go +++ b/xds/internal/testutils/e2e/server.go @@ -147,7 +147,6 @@ func (s *ManagementServer) Stop() { s.cancel() } s.gs.Stop() - logger.Infof("Stopped the xDS management server...") } // resourceSlice accepts a slice of any type of proto messages and returns a From 328b1d171a65d7e855bcd7bb5cb1f973c7e6f5d2 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 7 May 2021 14:37:52 -0700 Subject: [PATCH 060/998] transport: allow InTapHandle to return status errors (#4365) --- internal/transport/controlbuf.go | 32 ++++++ internal/transport/http2_server.go | 39 ++++--- server.go | 5 + tap/tap.go | 16 +-- test/end2end_test.go | 167 +++++++++++++++++------------ 5 files changed, 163 insertions(+), 96 deletions(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 40ef23923fda..f63a0137622a 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -20,13 +20,17 @@ package transport import ( "bytes" + "errors" "fmt" "runtime" + "strconv" "sync" "sync/atomic" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" ) var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { @@ -128,6 +132,14 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM +type earlyAbortStream struct { + streamID uint32 + contentSubtype string + status *status.Status +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + type dataFrame struct { streamID uint32 endStream bool @@ -749,6 +761,24 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return nil } +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: "200"}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + return nil +} + func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true @@ -787,6 +817,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 7c6c89d4f9b2..11be5599cd47 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -356,26 +356,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if state.data.statsTrace != nil { s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) } - if t.inTapHandle != nil { - var err error - info := &tap.Info{ - FullMethodName: state.data.method, - } - s.ctx, err = t.inTapHandle(s.ctx, info) - if err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) - } - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - s.cancel() - return false - } - } t.mu.Lock() if t.state != reachable { t.mu.Unlock() @@ -417,6 +397,25 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.cancel() return false } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: state.data.method}); err != nil { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + }) + return false + } + } t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} diff --git a/server.go b/server.go index d2bc884277a4..0a151dee4fcb 100644 --- a/server.go +++ b/server.go @@ -418,6 +418,11 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func InTapHandle(h tap.ServerInHandle) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.inTapHandle != nil { diff --git a/tap/tap.go b/tap/tap.go index caea1ebed6e3..dbf34e6bb5f5 100644 --- a/tap/tap.go +++ b/tap/tap.go @@ -37,16 +37,16 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs before a new stream is created -// on the server side. If it returns a non-nil error, the stream will not be -// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. -// The client will receive an RPC error "code = Unavailable, desc = stream -// terminated by RST_STREAM with error code: REFUSED_STREAM". +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. // // It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). And the content of -// the error will be ignored and won't be sent back to the client. For other -// general usages, please use interceptors. +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. // // Note that it is executed in the per-connection I/O goroutine(s) instead of // per-RPC goroutine. Therefore, users should NOT have any diff --git a/test/end2end_test.go b/test/end2end_test.go index a63dfa1409fd..861a2f29623d 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -2507,10 +2507,13 @@ type myTap struct { func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) { if info != nil { - if info.FullMethodName == "/grpc.testing.TestService/EmptyCall" { + switch info.FullMethodName { + case "/grpc.testing.TestService/EmptyCall": t.cnt++ - } else if info.FullMethodName == "/grpc.testing.TestService/UnaryCall" { + case "/grpc.testing.TestService/UnaryCall": return nil, fmt.Errorf("tap error") + case "/grpc.testing.TestService/FullDuplexCall": + return nil, status.Errorf(codes.FailedPrecondition, "test custom error") } } return ctx, nil @@ -2550,8 +2553,15 @@ func testTap(t *testing.T, e env) { ResponseSize: 45, Payload: payload, } - if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Unavailable { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) + if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.PermissionDenied { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.PermissionDenied) + } + str, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected error creating stream: %v", err) + } + if _, err := str.Recv(); status.Code(err) != codes.FailedPrecondition { + t.Fatalf("FullDuplexCall Recv() = _, %v, want _, %s", err, codes.FailedPrecondition) } } @@ -3639,66 +3649,77 @@ func testMalformedHTTP2Metadata(t *testing.T, e env) { } } +// Tests that the client transparently retries correctly when receiving a +// RST_STREAM with code REFUSED_STREAM. func (s) TestTransparentRetry(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - // Fails with RST_STREAM / FLOW_CONTROL_ERROR - continue - } - testTransparentRetry(t, e) - } -} - -// This test makes sure RPCs are retried times when they receive a RST_STREAM -// with the REFUSED_STREAM error code, which the InTapHandle provokes. -func testTransparentRetry(t *testing.T, e env) { - te := newTest(t, e) - attempts := 0 - successAttempt := 2 - te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) { - attempts++ - if attempts < successAttempt { - return nil, errors.New("not now") - } - return ctx, nil - } - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tsc := testpb.NewTestServiceClient(cc) testCases := []struct { - successAttempt int - failFast bool - errCode codes.Code + failFast bool + errCode codes.Code }{{ - successAttempt: 1, + // success attempt: 1, (stream ID 1) }, { - successAttempt: 2, + // success attempt: 2, (stream IDs 3, 5) }, { - successAttempt: 3, - errCode: codes.Unavailable, + // no success attempt (stream IDs 7, 9) + errCode: codes.Unavailable, }, { - successAttempt: 1, - failFast: true, + // success attempt: 1 (stream ID 11), + failFast: true, }, { - successAttempt: 2, - failFast: true, + // success attempt: 2 (stream IDs 13, 15), + failFast: true, }, { - successAttempt: 3, - failFast: true, - errCode: codes.Unavailable, + // no success attempt (stream IDs 17, 19) + failFast: true, + errCode: codes.Unavailable, }} - for _, tc := range testCases { - attempts = 0 - successAttempt = tc.successAttempt - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - _, err := tsc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(!tc.failFast)) - cancel() - if status.Code(err) != tc.errCode { - t.Errorf("%+v: tsc.EmptyCall(_, _) = _, %v, want _, Code=%v", tc, err, tc.errCode) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen. Err: %v", err) + } + defer lis.Close() + server := &httpServer{ + headerFields: [][]string{{ + ":status", "200", + "content-type", "application/grpc", + "grpc-status", "0", + }}, + refuseStream: func(i uint32) bool { + switch i { + case 1, 5, 11, 15: // these stream IDs succeed + return false + } + return true // these are refused + }, + } + server.start(t, lis) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("failed to dial due to err: %v", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + client := testpb.NewTestServiceClient(cc) + + for i, tc := range testCases { + stream, err := client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("error creating stream due to err: %v", err) + } + code := func(err error) codes.Code { + if err == io.EOF { + return codes.OK + } + return status.Code(err) } + if _, err := stream.Recv(); code(err) != tc.errCode { + t.Fatalf("%v: stream.Recv() = _, %v, want error code: %v", i, err, tc.errCode) + } + } } @@ -7191,6 +7212,7 @@ func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) { type httpServer struct { headerFields [][]string + refuseStream func(uint32) bool } func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error { @@ -7238,24 +7260,33 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) { writer.Flush() // necessary since client is expecting preface before declaring connection fully setup. var sid uint32 - // Read frames until a header is received. + // Loop until conn is closed and framer returns io.EOF for { - frame, err := framer.ReadFrame() - if err != nil { - t.Errorf("Error at server-side while reading frame. Err: %v", err) - return - } - if hframe, ok := frame.(*http2.HeadersFrame); ok { - sid = hframe.Header().StreamID - break + // Read frames until a header is received. + for { + frame, err := framer.ReadFrame() + if err != nil { + if err != io.EOF { + t.Errorf("Error at server-side while reading frame. Err: %v", err) + } + return + } + if hframe, ok := frame.(*http2.HeadersFrame); ok { + sid = hframe.Header().StreamID + if s.refuseStream == nil || !s.refuseStream(sid) { + break + } + framer.WriteRSTStream(sid, http2.ErrCodeRefusedStream) + writer.Flush() + } } - } - for i, headers := range s.headerFields { - if err = s.writeHeader(framer, sid, headers, i == len(s.headerFields)-1); err != nil { - t.Errorf("Error at server-side while writing headers. Err: %v", err) - return + for i, headers := range s.headerFields { + if err = s.writeHeader(framer, sid, headers, i == len(s.headerFields)-1); err != nil { + t.Errorf("Error at server-side while writing headers. Err: %v", err) + return + } + writer.Flush() } - writer.Flush() } }() } From c15291b0f5929ab8cf659269a11e8aa79cb71788 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 7 May 2021 15:24:10 -0700 Subject: [PATCH 061/998] client: initialize safe config selector when creating ClientConn (#4398) --- clientconn.go | 1 + 1 file changed, 1 insertion(+) diff --git a/clientconn.go b/clientconn.go index d57e54b4dc52..24109264f557 100644 --- a/clientconn.go +++ b/clientconn.go @@ -143,6 +143,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * firstResolveEvent: grpcsync.NewEvent(), } cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) for _, opt := range opts { From 12a377b1e4c9f1960bd25f47b9156d9dbd732ed0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 7 May 2021 15:42:59 -0700 Subject: [PATCH 062/998] xds: nack route configuration with regexes that don't compile (#4388) --- xds/internal/client/client.go | 25 ++++--- xds/internal/client/rds_test.go | 113 +++++++++++++++++++++++++++---- xds/internal/client/xds.go | 15 +++- xds/internal/resolver/matcher.go | 15 +--- 4 files changed, 128 insertions(+), 40 deletions(-) diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go index 2daceede5398..603632801b04 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/client/client.go @@ -24,6 +24,7 @@ import ( "context" "errors" "fmt" + "regexp" "sync" "time" @@ -271,7 +272,9 @@ type VirtualHost struct { // Route is both a specification of how to match a request as well as an // indication of the action to take upon match. type Route struct { - Path, Prefix, Regex *string + Path *string + Prefix *string + Regex *regexp.Regexp // Indicates if prefix/path matching should be case insensitive. The default // is false (case sensitive). CaseInsensitive bool @@ -304,20 +307,20 @@ type WeightedCluster struct { // HeaderMatcher represents header matchers. type HeaderMatcher struct { - Name string `json:"name"` - InvertMatch *bool `json:"invertMatch,omitempty"` - ExactMatch *string `json:"exactMatch,omitempty"` - RegexMatch *string `json:"regexMatch,omitempty"` - PrefixMatch *string `json:"prefixMatch,omitempty"` - SuffixMatch *string `json:"suffixMatch,omitempty"` - RangeMatch *Int64Range `json:"rangeMatch,omitempty"` - PresentMatch *bool `json:"presentMatch,omitempty"` + Name string + InvertMatch *bool + ExactMatch *string + RegexMatch *regexp.Regexp + PrefixMatch *string + SuffixMatch *string + RangeMatch *Int64Range + PresentMatch *bool } // Int64Range is a range for header range match. type Int64Range struct { - Start int64 `json:"start"` - End int64 `json:"end"` + Start int64 + End int64 } // SecurityConfig contains the security configuration received as part of the diff --git a/xds/internal/client/rds_test.go b/xds/internal/client/rds_test.go index cde40ee80dfe..a4aaf03e4ae0 100644 --- a/xds/internal/client/rds_test.go +++ b/xds/internal/client/rds_test.go @@ -22,24 +22,26 @@ package client import ( "fmt" + "regexp" "testing" "time" - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" - anypb "github.com/golang/protobuf/ptypes/any" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" "google.golang.org/protobuf/types/known/durationpb" + + v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" + v2routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + anypb "github.com/golang/protobuf/ptypes/any" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" ) func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { @@ -915,6 +917,51 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }}, wantErr: false, }, + { + name: "good with regex matchers", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: "/a/"}}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "tv"}}, + }, + }, + RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ + DefaultValue: &v3typepb.FractionalPercent{ + Numerator: 1, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + TotalWeight: &wrapperspb.UInt32Value{Value: 100}, + }}}}, + }, + }, + wantRoutes: []*Route{{ + Regex: func() *regexp.Regexp { return regexp.MustCompile("/a/") }(), + Headers: []*HeaderMatcher{ + { + Name: "th", + InvertMatch: newBoolP(false), + RegexMatch: func() *regexp.Regexp { return regexp.MustCompile("tv") }(), + }, + }, + Fraction: newUInt32P(10000), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + }}, + wantErr: false, + }, { name: "query is ignored", routes: []*v3routepb.Route{ @@ -960,6 +1007,44 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }, wantErr: true, }, + { + name: "bad regex in path specifier", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: "??"}}, + Headers: []*v3routepb.HeaderMatcher{ + { + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "tv"}, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}}, + }, + }, + }, + wantErr: true, + }, + { + name: "bad regex in header specifier", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + Headers: []*v3routepb.HeaderMatcher{ + { + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "??"}}, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}}, + }, + }, + }, + wantErr: true, + }, { name: "unrecognized header match specifier", routes: []*v3routepb.Route{ @@ -1063,7 +1148,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { } cmpOpts := []cmp.Option{ - cmp.AllowUnexported(Route{}, HeaderMatcher{}, Int64Range{}), + cmp.AllowUnexported(Route{}, HeaderMatcher{}, Int64Range{}, regexp.Regexp{}), cmpopts.EquateEmpty(), cmp.Transformer("FilterConfig", func(fc httpfilter.FilterConfig) string { return fmt.Sprint(fc) @@ -1074,17 +1159,15 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { t.Run(tt.name, func(t *testing.T) { oldFI := env.FaultInjectionSupport env.FaultInjectionSupport = !tt.disableFI + defer func() { env.FaultInjectionSupport = oldFI }() got, err := routesProtoToSlice(tt.routes, nil, false) if (err != nil) != tt.wantErr { - t.Errorf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) - return + t.Fatalf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) } - if !cmp.Equal(got, tt.wantRoutes, cmpOpts...) { - t.Errorf("routesProtoToSlice() got = %v, want %v, diff: %v", got, tt.wantRoutes, cmp.Diff(got, tt.wantRoutes, cmpOpts...)) + if diff := cmp.Diff(got, tt.wantRoutes, cmpOpts...); diff != "" { + t.Fatalf("routesProtoToSlice() returned unexpected diff (-got +want):\n%s", diff) } - - env.FaultInjectionSupport = oldFI }) } } diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index c0caf5cceb57..55bcc2e936ca 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "net" + "regexp" "strconv" "strings" "time" @@ -437,7 +438,12 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, case *v3routepb.RouteMatch_Path: route.Path = &pt.Path case *v3routepb.RouteMatch_SafeRegex: - route.Regex = &pt.SafeRegex.Regex + regex := pt.SafeRegex.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + route.Regex = re default: return nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) } @@ -452,7 +458,12 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, case *v3routepb.HeaderMatcher_ExactMatch: header.ExactMatch = &ht.ExactMatch case *v3routepb.HeaderMatcher_SafeRegexMatch: - header.RegexMatch = &ht.SafeRegexMatch.Regex + regex := ht.SafeRegexMatch.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + header.RegexMatch = re case *v3routepb.HeaderMatcher_RangeMatch: header.RangeMatch = &Int64Range{ Start: ht.RangeMatch.Start, diff --git a/xds/internal/resolver/matcher.go b/xds/internal/resolver/matcher.go index b7b5f3db0e3e..06456a585573 100644 --- a/xds/internal/resolver/matcher.go +++ b/xds/internal/resolver/matcher.go @@ -20,7 +20,6 @@ package resolver import ( "fmt" - "regexp" "strings" "google.golang.org/grpc/internal/grpcrand" @@ -34,11 +33,7 @@ func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { var pathMatcher pathMatcherInterface switch { case r.Regex != nil: - re, err := regexp.Compile(*r.Regex) - if err != nil { - return nil, fmt.Errorf("failed to compile regex %q", *r.Regex) - } - pathMatcher = newPathRegexMatcher(re) + pathMatcher = newPathRegexMatcher(r.Regex) case r.Path != nil: pathMatcher = newPathExactMatcher(*r.Path, r.CaseInsensitive) case r.Prefix != nil: @@ -53,12 +48,8 @@ func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { switch { case h.ExactMatch != nil && *h.ExactMatch != "": matcherT = newHeaderExactMatcher(h.Name, *h.ExactMatch) - case h.RegexMatch != nil && *h.RegexMatch != "": - re, err := regexp.Compile(*h.RegexMatch) - if err != nil { - return nil, fmt.Errorf("failed to compile regex %q, skipping this matcher", *h.RegexMatch) - } - matcherT = newHeaderRegexMatcher(h.Name, re) + case h.RegexMatch != nil: + matcherT = newHeaderRegexMatcher(h.Name, h.RegexMatch) case h.PrefixMatch != nil && *h.PrefixMatch != "": matcherT = newHeaderPrefixMatcher(h.Name, *h.PrefixMatch) case h.SuffixMatch != nil && *h.SuffixMatch != "": From 98c895f7e06adc82ad030c4f90bcada672f523a2 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 10 May 2021 09:35:55 -0700 Subject: [PATCH 063/998] cleanup: use testutils.MarshalAny in more places (#4404) --- channelz/service/func_linux.go | 54 ++- xds/csds/csds_test.go | 15 +- xds/internal/client/cds_test.go | 370 +++++++------------ xds/internal/client/eds_test.go | 68 ++-- xds/internal/client/lds_test.go | 39 +- xds/internal/client/rds_test.go | 53 +-- xds/internal/client/tests/dump_test.go | 51 +-- xds/internal/client/v2/cds_test.go | 10 +- xds/internal/client/v2/client_test.go | 66 ++-- xds/internal/client/v2/eds_test.go | 23 +- xds/internal/httpfilter/fault/fault_test.go | 17 +- xds/internal/server/listener_wrapper_test.go | 14 +- xds/server_test.go | 13 +- 13 files changed, 276 insertions(+), 517 deletions(-) diff --git a/channelz/service/func_linux.go b/channelz/service/func_linux.go index ce38a921b974..2e52d5f5a98f 100644 --- a/channelz/service/func_linux.go +++ b/channelz/service/func_linux.go @@ -25,6 +25,7 @@ import ( durpb "github.com/golang/protobuf/ptypes/duration" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/testutils" ) func convertToPtypesDuration(sec int64, usec int64) *durpb.Duration { @@ -34,41 +35,32 @@ func convertToPtypesDuration(sec int64, usec int64) *durpb.Duration { func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOption { var opts []*channelzpb.SocketOption if skopts.Linger != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionLinger{ - Active: skopts.Linger.Onoff != 0, - Duration: convertToPtypesDuration(int64(skopts.Linger.Linger), 0), + opts = append(opts, &channelzpb.SocketOption{ + Name: "SO_LINGER", + Additional: testutils.MarshalAny(&channelzpb.SocketOptionLinger{ + Active: skopts.Linger.Onoff != 0, + Duration: convertToPtypesDuration(int64(skopts.Linger.Linger), 0), + }), }) - if err == nil { - opts = append(opts, &channelzpb.SocketOption{ - Name: "SO_LINGER", - Additional: additional, - }) - } } if skopts.RecvTimeout != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ - Duration: convertToPtypesDuration(int64(skopts.RecvTimeout.Sec), int64(skopts.RecvTimeout.Usec)), + opts = append(opts, &channelzpb.SocketOption{ + Name: "SO_RCVTIMEO", + Additional: testutils.MarshalAny(&channelzpb.SocketOptionTimeout{ + Duration: convertToPtypesDuration(int64(skopts.RecvTimeout.Sec), int64(skopts.RecvTimeout.Usec)), + }), }) - if err == nil { - opts = append(opts, &channelzpb.SocketOption{ - Name: "SO_RCVTIMEO", - Additional: additional, - }) - } } if skopts.SendTimeout != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ - Duration: convertToPtypesDuration(int64(skopts.SendTimeout.Sec), int64(skopts.SendTimeout.Usec)), + opts = append(opts, &channelzpb.SocketOption{ + Name: "SO_SNDTIMEO", + Additional: testutils.MarshalAny(&channelzpb.SocketOptionTimeout{ + Duration: convertToPtypesDuration(int64(skopts.SendTimeout.Sec), int64(skopts.SendTimeout.Usec)), + }), }) - if err == nil { - opts = append(opts, &channelzpb.SocketOption{ - Name: "SO_SNDTIMEO", - Additional: additional, - }) - } } if skopts.TCPInfo != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTcpInfo{ + additional := testutils.MarshalAny(&channelzpb.SocketOptionTcpInfo{ TcpiState: uint32(skopts.TCPInfo.State), TcpiCaState: uint32(skopts.TCPInfo.Ca_state), TcpiRetransmits: uint32(skopts.TCPInfo.Retransmits), @@ -99,12 +91,10 @@ func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOptio TcpiAdvmss: skopts.TCPInfo.Advmss, TcpiReordering: skopts.TCPInfo.Reordering, }) - if err == nil { - opts = append(opts, &channelzpb.SocketOption{ - Name: "TCP_INFO", - Additional: additional, - }) - } + opts = append(opts, &channelzpb.SocketOption{ + Name: "TCP_INFO", + Additional: additional, + }) } return opts } diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 018f770494b1..6cf88f6d3942 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -34,10 +34,11 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" "google.golang.org/grpc" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/xds/internal/client" _ "google.golang.org/grpc/xds/internal/httpfilter/router" - "google.golang.org/grpc/xds/internal/testutils" + xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" @@ -150,19 +151,19 @@ var ( func init() { for i := range ldsTargets { listeners[i] = e2e.DefaultClientListener(ldsTargets[i], rdsTargets[i]) - listenerAnys[i], _ = ptypes.MarshalAny(listeners[i]) + listenerAnys[i] = testutils.MarshalAny(listeners[i]) } for i := range rdsTargets { routes[i] = e2e.DefaultRouteConfig(rdsTargets[i], ldsTargets[i], cdsTargets[i]) - routeAnys[i], _ = ptypes.MarshalAny(routes[i]) + routeAnys[i] = testutils.MarshalAny(routes[i]) } for i := range cdsTargets { clusters[i] = e2e.DefaultCluster(cdsTargets[i], edsTargets[i], e2e.SecurityLevelNone) - clusterAnys[i], _ = ptypes.MarshalAny(clusters[i]) + clusterAnys[i] = testutils.MarshalAny(clusters[i]) } for i := range edsTargets { endpoints[i] = e2e.DefaultEndpoint(edsTargets[i], ips[i], ports[i]) - endpointAnys[i], _ = ptypes.MarshalAny(endpoints[i]) + endpointAnys[i] = testutils.MarshalAny(endpoints[i]) } } @@ -286,9 +287,9 @@ func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServ } v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() + lis, err := xtestutils.LocalTCPListener() if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + t.Fatalf("xtestutils.LocalTCPListener() failed: %v", err) } go func() { if err := server.Serve(lis); err != nil { diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index bb1117ec5349..627229de7ad0 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -30,10 +30,10 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" - "github.com/golang/protobuf/proto" anypb "github.com/golang/protobuf/ptypes/any" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/testutils" xdsinternal "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/version" @@ -281,23 +281,16 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "rootInstance", - CertificateName: "rootCert", - }, - }, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "rootInstance", + CertificateName: "rootCert", }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + }, + }, + }), }, }, } @@ -427,22 +420,15 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, TransportSocket: &v3corepb.TransportSocket{ ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ - ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ - Name: "foo-sds-secret", - }, - }, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + }, + }, + }), }, }, }, @@ -463,16 +449,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, TransportSocket: &v3corepb.TransportSocket{ ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{}, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{}, + }), }, }, }, @@ -493,30 +472,23 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, TransportSocket: &v3corepb.TransportSocket{ ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: ""}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: ""}}, }, }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + }, + }, + }), }, }, }, @@ -537,30 +509,23 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, TransportSocket: &v3corepb.TransportSocket{ ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: ""}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: ""}}, }, }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + }, + }, + }), }, }, }, @@ -581,30 +546,23 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, TransportSocket: &v3corepb.TransportSocket{ ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: ""}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: ""}}, }, }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + }, + }, + }), }, }, }, @@ -625,30 +583,23 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, TransportSocket: &v3corepb.TransportSocket{ ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexBad}}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexBad}}}, }, }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + }, + }, + }), }, }, }, @@ -670,23 +621,16 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, - }, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + }, + }, + }), }, }, }, @@ -715,27 +659,20 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: identityPluginInstance, - CertificateName: identityCertName, - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, - }, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: identityPluginInstance, + CertificateName: identityCertName, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + }, + }, + }), }, }, }, @@ -766,41 +703,34 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: identityPluginInstance, - CertificateName: identityCertName, - }, - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - { - MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: sanExact}, - IgnoreCase: true, - }, - {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: sanPrefix}}, - {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: sanSuffix}}, - {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexGood}}}, - {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: sanContains}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: identityPluginInstance, + CertificateName: identityCertName, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + { + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: sanExact}, + IgnoreCase: true, }, + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: sanPrefix}}, + {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: sanSuffix}}, + {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexGood}}}, + {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: sanContains}}, }, }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, + }, + }, + }), }, }, }, @@ -845,7 +775,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { v3Service = "v2Service" ) var ( - v2Cluster = &v2xdspb.Cluster{ + v2ClusterAny = testutils.MarshalAny(&v2xdspb.Cluster{ Name: v2ClusterName, ClusterDiscoveryType: &v2xdspb.Cluster_Type{Type: v2xdspb.Cluster_EDS}, EdsClusterConfig: &v2xdspb.Cluster_EdsClusterConfig{ @@ -862,16 +792,9 @@ func (s) TestUnmarshalCluster(t *testing.T) { Self: &v2corepb.SelfConfigSource{}, }, }, - } - v2ClusterAny = &anypb.Any{ - TypeUrl: version.V2ClusterURL, - Value: func() []byte { - mcl, _ := proto.Marshal(v2Cluster) - return mcl - }(), - } + }) - v3Cluster = &v3clusterpb.Cluster{ + v3ClusterAny = testutils.MarshalAny(&v3clusterpb.Cluster{ Name: v3ClusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ @@ -888,14 +811,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { Self: &v3corepb.SelfConfigSource{}, }, }, - } - v3ClusterAny = &anypb.Any{ - TypeUrl: version.V3ClusterURL, - Value: func() []byte { - mcl, _ := proto.Marshal(v3Cluster) - return mcl - }(), - } + }) ) const testVersion = "test-version-cds" @@ -940,17 +856,10 @@ func (s) TestUnmarshalCluster(t *testing.T) { { name: "bad cluster resource", resources: []*anypb.Any{ - { - TypeUrl: version.V3ClusterURL, - Value: func() []byte { - cl := &v3clusterpb.Cluster{ - Name: "test", - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, - } - mcl, _ := proto.Marshal(cl) - return mcl - }(), - }, + testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: "test", + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, + }), }, wantUpdate: map[string]ClusterUpdate{"test": {}}, wantMD: UpdateMetadata{ @@ -1014,18 +923,11 @@ func (s) TestUnmarshalCluster(t *testing.T) { name: "good and bad clusters", resources: []*anypb.Any{ v2ClusterAny, - { - // bad cluster resource - TypeUrl: version.V3ClusterURL, - Value: func() []byte { - cl := &v3clusterpb.Cluster{ - Name: "bad", - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, - } - mcl, _ := proto.Marshal(cl) - return mcl - }(), - }, + // bad cluster resource + testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: "bad", + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, + }), v3ClusterAny, }, wantUpdate: map[string]ClusterUpdate{ diff --git a/xds/internal/client/eds_test.go b/xds/internal/client/eds_test.go index 9d6a3113b0c3..467f25269cdf 100644 --- a/xds/internal/client/eds_test.go +++ b/xds/internal/client/eds_test.go @@ -29,10 +29,10 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" - "github.com/golang/protobuf/proto" anypb "github.com/golang/protobuf/ptypes/any" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/version" ) @@ -122,23 +122,18 @@ func (s) TestEDSParseRespProto(t *testing.T) { } func (s) TestUnmarshalEndpoints(t *testing.T) { - var v3EndpointsAny = &anypb.Any{ - TypeUrl: version.V3EndpointsURL, - Value: func() []byte { - clab0 := newClaBuilder("test", nil) - clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{ - Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY}, - Weight: []uint32{271}, - }) - clab0.addLocality("locality-2", 1, 0, []string{"addr2:159"}, &addLocalityOptions{ - Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING}, - Weight: []uint32{828}, - }) - e := clab0.Build() - me, _ := proto.Marshal(e) - return me - }(), - } + var v3EndpointsAny = testutils.MarshalAny(func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY}, + Weight: []uint32{271}, + }) + clab0.addLocality("locality-2", 1, 0, []string{"addr2:159"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING}, + Weight: []uint32{828}, + }) + return clab0.Build() + }()) const testVersion = "test-version-eds" tests := []struct { @@ -181,19 +176,12 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { }, { name: "bad endpoints resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3EndpointsURL, - Value: func() []byte { - clab0 := newClaBuilder("test", nil) - clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) - clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) - e := clab0.Build() - me, _ := proto.Marshal(e) - return me - }(), - }, - }, + resources: []*anypb.Any{testutils.MarshalAny(func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) + clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) + return clab0.Build() + }())}, wantUpdate: map[string]EndpointsUpdate{"test": {}}, wantMD: UpdateMetadata{ Status: ServiceStatusNACKed, @@ -246,18 +234,12 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { name: "good and bad endpoints", resources: []*anypb.Any{ v3EndpointsAny, - { - // bad endpoints resource - TypeUrl: version.V3EndpointsURL, - Value: func() []byte { - clab0 := newClaBuilder("bad", nil) - clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) - clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) - e := clab0.Build() - me, _ := proto.Marshal(e) - return me - }(), - }, + testutils.MarshalAny(func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("bad", nil) + clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) + clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) + return clab0.Build() + }()), }, wantUpdate: map[string]EndpointsUpdate{ "test": { diff --git a/xds/internal/client/lds_test.go b/xds/internal/client/lds_test.go index 9fb27987e36b..ad9af4c885a2 100644 --- a/xds/internal/client/lds_test.go +++ b/xds/internal/client/lds_test.go @@ -251,16 +251,9 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, - } - mcm, _ := proto.Marshal(cm) - return mcm - }(), - }, + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + }), }, })}, wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, @@ -558,25 +551,13 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { name: "good and bad listener resources", resources: []*anypb.Any{ v2Lis, - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: "bad", - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, - } - mcm, _ := proto.Marshal(cm) - return mcm - }()}}} - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, + testutils.MarshalAny(&v3listenerpb.Listener{ + Name: "bad", + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + }), + }}), v3LisWithFilters(), }, wantUpdate: map[string]ListenerUpdate{ diff --git a/xds/internal/client/rds_test.go b/xds/internal/client/rds_test.go index a4aaf03e4ae0..10745e9e97da 100644 --- a/xds/internal/client/rds_test.go +++ b/xds/internal/client/rds_test.go @@ -26,9 +26,9 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" @@ -541,17 +541,10 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }, }, } - v2RouteConfig = &anypb.Any{ - TypeUrl: version.V2RouteConfigURL, - Value: func() []byte { - rc := &v2xdspb.RouteConfiguration{ - Name: v2RouteConfigName, - VirtualHosts: v2VirtualHost, - } - m, _ := proto.Marshal(rc) - return m - }(), - } + v2RouteConfig = testutils.MarshalAny(&v2xdspb.RouteConfiguration{ + Name: v2RouteConfigName, + VirtualHosts: v2VirtualHost, + }) v3VirtualHost = []*v3routepb.VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -580,17 +573,10 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }, }, } - v3RouteConfig = &anypb.Any{ - TypeUrl: version.V2RouteConfigURL, - Value: func() []byte { - rc := &v3routepb.RouteConfiguration{ - Name: v3RouteConfigName, - VirtualHosts: v3VirtualHost, - } - m, _ := proto.Marshal(rc) - return m - }(), - } + v3RouteConfig = testutils.MarshalAny(&v3routepb.RouteConfiguration{ + Name: v3RouteConfigName, + VirtualHosts: v3VirtualHost, + }) ) const testVersion = "test-version-rds" @@ -726,20 +712,13 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { name: "good and bad routeConfig resources", resources: []*anypb.Any{ v2RouteConfig, - { - TypeUrl: version.V2RouteConfigURL, - Value: func() []byte { - rc := &v3routepb.RouteConfiguration{ - Name: "bad", - VirtualHosts: []*v3routepb.VirtualHost{ - {Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_ConnectMatcher_{}}, - }}}}} - m, _ := proto.Marshal(rc) - return m - }(), - }, + testutils.MarshalAny(&v3routepb.RouteConfiguration{ + Name: "bad", + VirtualHosts: []*v3routepb.VirtualHost{ + {Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_ConnectMatcher_{}}, + }}}}}), v3RouteConfig, }, wantUpdate: map[string]RouteConfigUpdate{ diff --git a/xds/internal/client/tests/dump_test.go b/xds/internal/client/tests/dump_test.go index de3fcade47e9..815850973e3d 100644 --- a/xds/internal/client/tests/dump_test.go +++ b/xds/internal/client/tests/dump_test.go @@ -30,7 +30,6 @@ import ( v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - "github.com/golang/protobuf/ptypes" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/protobuf/testing/protocmp" @@ -39,6 +38,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/client/bootstrap" xdstestutils "google.golang.org/grpc/xds/internal/testutils" @@ -58,29 +58,22 @@ func (s) TestLDSConfigDump(t *testing.T) { listenersT := &v3listenerpb.Listener{ Name: ldsTargets[i], ApiListener: &v3listenerpb.ApiListener{ - ApiListener: func() *anypb.Any { - mcm, _ := ptypes.MarshalAny(&v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: routeConfigNames[i], + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, }, + RouteConfigName: routeConfigNames[i], }, - CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ - MaxStreamDuration: durationpb.New(time.Second), - }, - }) - return mcm - }(), + }, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + }), }, } - anyT, err := ptypes.MarshalAny(listenersT) - if err != nil { - t.Fatalf("failed to marshal proto to any: %v", err) - } - listenerRaws[ldsTargets[i]] = anyT + listenerRaws[ldsTargets[i]] = testutils.MarshalAny(listenersT) } client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ @@ -190,11 +183,7 @@ func (s) TestRDSConfigDump(t *testing.T) { }, } - anyT, err := ptypes.MarshalAny(routeConfigT) - if err != nil { - t.Fatalf("failed to marshal proto to any: %v", err) - } - routeRaws[rdsTargets[i]] = anyT + routeRaws[rdsTargets[i]] = testutils.MarshalAny(routeConfigT) } client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ @@ -304,11 +293,7 @@ func (s) TestCDSConfigDump(t *testing.T) { }, } - anyT, err := ptypes.MarshalAny(clusterT) - if err != nil { - t.Fatalf("failed to marshal proto to any: %v", err) - } - clusterRaws[cdsTargets[i]] = anyT + clusterRaws[cdsTargets[i]] = testutils.MarshalAny(clusterT) } client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ @@ -404,11 +389,7 @@ func (s) TestEDSConfigDump(t *testing.T) { clab0.AddLocality(localityNames[i], 1, 1, []string{addrs[i]}, nil) claT := clab0.Build() - anyT, err := ptypes.MarshalAny(claT) - if err != nil { - t.Fatalf("failed to marshal proto to any: %v", err) - } - endpointRaws[edsTargets[i]] = anyT + endpointRaws[edsTargets[i]] = testutils.MarshalAny(claT) } client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ diff --git a/xds/internal/client/v2/cds_test.go b/xds/internal/client/v2/cds_test.go index b56ae6108bbe..e627860d2a9f 100644 --- a/xds/internal/client/v2/cds_test.go +++ b/xds/internal/client/v2/cds_test.go @@ -26,8 +26,8 @@ import ( xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - "github.com/golang/protobuf/ptypes" anypb "github.com/golang/protobuf/ptypes/any" + "google.golang.org/grpc/internal/testutils" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/version" ) @@ -65,8 +65,8 @@ var ( }, }, } - marshaledCluster1, _ = ptypes.MarshalAny(goodCluster1) - goodCluster2 = &xdspb.Cluster{ + marshaledCluster1 = testutils.MarshalAny(goodCluster1) + goodCluster2 = &xdspb.Cluster{ Name: goodClusterName2, ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ @@ -79,8 +79,8 @@ var ( }, LbPolicy: xdspb.Cluster_ROUND_ROBIN, } - marshaledCluster2, _ = ptypes.MarshalAny(goodCluster2) - goodCDSResponse1 = &xdspb.DiscoveryResponse{ + marshaledCluster2 = testutils.MarshalAny(goodCluster2) + goodCDSResponse1 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ marshaledCluster1, }, diff --git a/xds/internal/client/v2/client_test.go b/xds/internal/client/v2/client_test.go index 1e464405eeaf..371375f3ee5c 100644 --- a/xds/internal/client/v2/client_test.go +++ b/xds/internal/client/v2/client_test.go @@ -28,7 +28,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc" @@ -114,30 +113,24 @@ var ( }, }, } - marshaledConnMgr1, _ = proto.Marshal(goodHTTPConnManager1) - goodListener1 = &xdspb.Listener{ + marshaledConnMgr1 = testutils.MarshalAny(goodHTTPConnManager1) + goodListener1 = &xdspb.Listener{ Name: goodLDSTarget1, ApiListener: &listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, + ApiListener: marshaledConnMgr1, }, } - marshaledListener1, _ = ptypes.MarshalAny(goodListener1) - goodListener2 = &xdspb.Listener{ + marshaledListener1 = testutils.MarshalAny(goodListener1) + goodListener2 = &xdspb.Listener{ Name: goodLDSTarget2, ApiListener: &listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, + ApiListener: marshaledConnMgr1, }, } - marshaledListener2, _ = ptypes.MarshalAny(goodListener2) - noAPIListener = &xdspb.Listener{Name: goodLDSTarget1} - marshaledNoAPIListener, _ = proto.Marshal(noAPIListener) - badAPIListener2 = &xdspb.Listener{ + marshaledListener2 = testutils.MarshalAny(goodListener2) + noAPIListener = &xdspb.Listener{Name: goodLDSTarget1} + marshaledNoAPIListener = testutils.MarshalAny(noAPIListener) + badAPIListener2 = &xdspb.Listener{ Name: goodLDSTarget2, ApiListener: &listenerpb.ApiListener{ ApiListener: &anypb.Any{ @@ -170,13 +163,8 @@ var ( TypeUrl: version.V2ListenerURL, } badResourceTypeInLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, - }, - TypeUrl: version.V2ListenerURL, + Resources: []*anypb.Any{marshaledConnMgr1}, + TypeUrl: version.V2ListenerURL, } ldsResponseWithMultipleResources = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ @@ -186,13 +174,8 @@ var ( TypeUrl: version.V2ListenerURL, } noAPIListenerLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2ListenerURL, - Value: marshaledNoAPIListener, - }, - }, - TypeUrl: version.V2ListenerURL, + Resources: []*anypb.Any{marshaledNoAPIListener}, + TypeUrl: version.V2ListenerURL, } goodBadUglyLDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ @@ -215,19 +198,14 @@ var ( TypeUrl: version.V2RouteConfigURL, } badResourceTypeInRDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, - }, - TypeUrl: version.V2RouteConfigURL, + Resources: []*anypb.Any{marshaledConnMgr1}, + TypeUrl: version.V2RouteConfigURL, } noVirtualHostsRouteConfig = &xdspb.RouteConfiguration{ Name: goodRouteName1, } - marshaledNoVirtualHostsRouteConfig, _ = ptypes.MarshalAny(noVirtualHostsRouteConfig) - noVirtualHostsInRDSResponse = &xdspb.DiscoveryResponse{ + marshaledNoVirtualHostsRouteConfig = testutils.MarshalAny(noVirtualHostsRouteConfig) + noVirtualHostsInRDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ marshaledNoVirtualHostsRouteConfig, }, @@ -264,8 +242,8 @@ var ( }, }, } - marshaledGoodRouteConfig1, _ = ptypes.MarshalAny(goodRouteConfig1) - goodRouteConfig2 = &xdspb.RouteConfiguration{ + marshaledGoodRouteConfig1 = testutils.MarshalAny(goodRouteConfig1) + goodRouteConfig2 = &xdspb.RouteConfiguration{ Name: goodRouteName2, VirtualHosts: []*routepb.VirtualHost{ { @@ -296,8 +274,8 @@ var ( }, }, } - marshaledGoodRouteConfig2, _ = ptypes.MarshalAny(goodRouteConfig2) - goodRDSResponse1 = &xdspb.DiscoveryResponse{ + marshaledGoodRouteConfig2 = testutils.MarshalAny(goodRouteConfig2) + goodRDSResponse1 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ marshaledGoodRouteConfig1, }, diff --git a/xds/internal/client/v2/eds_test.go b/xds/internal/client/v2/eds_test.go index 7eba32f5c605..08e75d373017 100644 --- a/xds/internal/client/v2/eds_test.go +++ b/xds/internal/client/v2/eds_test.go @@ -25,11 +25,11 @@ import ( "time" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - "github.com/golang/protobuf/ptypes" anypb "github.com/golang/protobuf/ptypes/any" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/testutils" + xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/version" ) @@ -44,20 +44,14 @@ var ( TypeUrl: version.V2EndpointsURL, } badResourceTypeInEDSResponse = &v2xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, - }, - TypeUrl: version.V2EndpointsURL, + Resources: []*anypb.Any{marshaledConnMgr1}, + TypeUrl: version.V2EndpointsURL, } marshaledGoodCLA1 = func() *anypb.Any { - clab0 := testutils.NewClusterLoadAssignmentBuilder(goodEDSName, nil) + clab0 := xtestutils.NewClusterLoadAssignmentBuilder(goodEDSName, nil) clab0.AddLocality("locality-1", 1, 1, []string{"addr1:314"}, nil) clab0.AddLocality("locality-2", 1, 0, []string{"addr2:159"}, nil) - a, _ := ptypes.MarshalAny(clab0.Build()) - return a + return testutils.MarshalAny(clab0.Build()) }() goodEDSResponse1 = &v2xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ @@ -66,10 +60,9 @@ var ( TypeUrl: version.V2EndpointsURL, } marshaledGoodCLA2 = func() *anypb.Any { - clab0 := testutils.NewClusterLoadAssignmentBuilder("not-goodEDSName", nil) + clab0 := xtestutils.NewClusterLoadAssignmentBuilder("not-goodEDSName", nil) clab0.AddLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) - a, _ := ptypes.MarshalAny(clab0.Build()) - return a + return testutils.MarshalAny(clab0.Build()) }() goodEDSResponse2 = &v2xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 6aeea8a8a782..c132e912f92a 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -38,12 +38,13 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/testutils" + xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/protobuf/types/known/wrapperspb" @@ -121,9 +122,9 @@ func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { testpb.RegisterTestServiceServer(server, &testService{}) // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() + lis, err := xtestutils.LocalTCPListener() if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + t.Fatalf("xtestutils.LocalTCPListener() failed: %v", err) } go func() { @@ -524,10 +525,7 @@ func (s) TestFaultInjection_Unary(t *testing.T) { hcm.HttpFilters = append(hcm.HttpFilters, e2e.HTTPFilter(fmt.Sprintf("fault%d", i), cfg)) } hcm.HttpFilters = append(hcm.HttpFilters, routerFilter) - hcmAny, err := ptypes.MarshalAny(hcm) - if err != nil { - t.Fatal(err) - } + hcmAny := testutils.MarshalAny(hcm) resources.Listeners[0].ApiListener.ApiListener = hcmAny resources.Listeners[0].FilterChains[0].Filters[0].ConfigType = &v3listenerpb.Filter_TypedConfig{TypedConfig: hcmAny} @@ -600,10 +598,7 @@ func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { }, })}, hcm.HttpFilters...) - hcmAny, err := ptypes.MarshalAny(hcm) - if err != nil { - t.Fatal(err) - } + hcmAny := testutils.MarshalAny(hcm) resources.Listeners[0].ApiListener.ApiListener = hcmAny resources.Listeners[0].FilterChains[0].Filters[0].ConfigType = &v3listenerpb.Filter_TypedConfig{TypedConfig: hcmAny} diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 8b5b5c3851de..b22f647a93ca 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -23,7 +23,6 @@ package server import ( "context" "errors" - "fmt" "net" "strconv" "testing" @@ -32,9 +31,6 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - anypb "github.com/golang/protobuf/ptypes/any" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" @@ -76,7 +72,7 @@ var listenerWithFilterChains = &v3listenerpb.Listener{ TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ InstanceName: "identityPluginInstance", @@ -327,11 +323,3 @@ func (s) TestListenerWrapper_Accept(t *testing.T) { t.Fatalf("error when waiting for Accept() to return the conn on filter chain match: %v", err) } } - -func marshalAny(m proto.Message) *anypb.Any { - a, err := ptypes.MarshalAny(m) - if err != nil { - panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", m, err)) - } - return a -} diff --git a/xds/server_test.go b/xds/server_test.go index 3fb3bcd3818b..e16ac36b01f2 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -33,9 +33,6 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - anypb "github.com/golang/protobuf/ptypes/any" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" @@ -672,7 +669,7 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { TransportSocket: &v3corepb.TransportSocket{ Name: "envoy.transport_sockets.tls", ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ CommonTlsContext: &v3tlspb.CommonTlsContext{ TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ InstanceName: "identityPluginInstance", @@ -783,11 +780,3 @@ func verifyCertProviderNotCreated() error { } return nil } - -func marshalAny(m proto.Message) *anypb.Any { - a, err := ptypes.MarshalAny(m) - if err != nil { - panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", m, err)) - } - return a -} From b1940e15f6778067675e2192d8947608e8a20e32 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 10 May 2021 10:11:31 -0700 Subject: [PATCH 064/998] xds: register resources at the mgmt server before requesting them (#4406) --- .../test/xds_server_serving_mode_test.go | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 664f0b85759a..414a559b0982 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -119,17 +119,6 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } - go func() { - if err := server.Serve(lis1); err != nil { - t.Errorf("Serve() failed: %v", err) - } - }() - go func() { - if err := server.Serve(lis2); err != nil { - t.Errorf("Serve() failed: %v", err) - } - }() - // Setup the management server to respond with server-side Listener // resources for both listeners. host1, port1, err := hostPortFromListener(lis1) @@ -150,6 +139,17 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatal(err) } + go func() { + if err := server.Serve(lis1); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + go func() { + if err := server.Serve(lis2); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + // Wait for both listeners to move to "serving" mode. if err := waitForModeChange(ctx, modeTracker, lis1.Addr(), xds.ServingModeServing); err != nil { t.Fatal(err) From 5f95ad62331add45bbf5ee167b67cadc72e1d322 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 11 May 2021 10:39:31 -0700 Subject: [PATCH 065/998] xds: workaround to deflake xds e2e tests (#4413) --- xds/internal/test/xds_integration_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index a41fec929762..c2bb6bc18f67 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -83,6 +83,11 @@ func TestMain(m *testing.M) { // spawns the management server and is blocked on the call to `Serve()`. leakcheck.RegisterIgnoreGoroutine("e2e.StartManagementServer") + // Remove this once https://github.com/envoyproxy/go-control-plane/pull/430 + // is merged. For more information about this goroutine leak, see: + // https://github.com/envoyproxy/go-control-plane/issues/429. + leakcheck.RegisterIgnoreGoroutine("(*server).StreamHandler") + cancel, err := setupManagementServer() if err != nil { log.Printf("setupManagementServer() failed: %v", err) From 81b8cca6a9d92794be3e789b179e798aa1bc3209 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 11 May 2021 15:28:46 -0700 Subject: [PATCH 066/998] Change version to 1.39.0-dev (#4420) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index f73faed82920..4e26aec6ac18 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.38.0-dev" +const Version = "1.39.0-dev" From 62adda2ece5ec803c824c5009b83cea86de5030d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 11 May 2021 17:05:16 -0700 Subject: [PATCH 067/998] client: fix ForceCodec to set content-type header appropriately (#4401) --- rpc_util.go | 19 ++++++++++++---- test/end2end_test.go | 53 ++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 66 insertions(+), 6 deletions(-) diff --git a/rpc_util.go b/rpc_util.go index c8ae0e4444c7..6db356fa56a7 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -429,9 +429,10 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { } func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} -// ForceCodec returns a CallOption that will set codec to be -// used for all request and response messages for a call. The result of calling -// Name() will be used as the content-subtype in a case-insensitive manner. +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. // // See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for @@ -853,7 +854,17 @@ func toRPCErr(err error) error { // setCallInfoCodec should only be called after CallOptions have been applied. func setCallInfoCodec(c *callInfo) error { if c.codec != nil { - // codec was already set by a CallOption; use it. + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } return nil } diff --git a/test/end2end_test.go b/test/end2end_test.go index 861a2f29623d..eb91d09afdf0 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -5293,7 +5293,7 @@ func (s) TestGRPCMethod(t *testing.T) { } defer ss.Stop() - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -5305,6 +5305,55 @@ func (s) TestGRPCMethod(t *testing.T) { } } +// renameProtoCodec is an encoding.Codec wrapper that allows customizing the +// Name() of another codec. +type renameProtoCodec struct { + encoding.Codec + name string +} + +func (r *renameProtoCodec) Name() string { return r.name } + +// TestForceCodecName confirms that the ForceCodec call option sets the subtype +// in the content-type header according to the Name() of the codec provided. +func (s) TestForceCodecName(t *testing.T) { + wantContentTypeCh := make(chan []string, 1) + defer close(wantContentTypeCh) + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Internal, "no metadata in context") + } + if got, want := md["content-type"], <-wantContentTypeCh; !reflect.DeepEqual(got, want) { + return nil, status.Errorf(codes.Internal, "got content-type=%q; want [%q]", got, want) + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(encoding.GetCodec("proto"))}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + codec := &renameProtoCodec{Codec: encoding.GetCodec("proto"), name: "some-test-name"} + wantContentTypeCh <- []string{"application/grpc+some-test-name"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + // Confirm the name is converted to lowercase before transmitting. + codec.name = "aNoTHeRNaME" + wantContentTypeCh <- []string{"application/grpc+anothername"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } +} + func (s) TestForceServerCodec(t *testing.T) { ss := &stubserver.StubServer{ EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { @@ -5317,7 +5366,7 @@ func (s) TestForceServerCodec(t *testing.T) { } defer ss.Stop() - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { From a95a5c3bacecea965def0addd986b3ef709f6e27 Mon Sep 17 00:00:00 2001 From: James Protzman Date: Wed, 12 May 2021 11:49:07 -0400 Subject: [PATCH 068/998] transport: remove decodeState from client to reduce allocations (#3313) --- internal/transport/http2_client.go | 117 ++++++++++++++++++++++++++--- internal/transport/http_util.go | 34 ++++++--- 2 files changed, 130 insertions(+), 21 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 48c5e52edae9..64ebd4a167f3 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -1254,11 +1254,97 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. - state.data.isGRPC = !initialHeader - if h2code, err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream) + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr string + grpcMessage string + statusGen *status.Status + + httpStatus string + rawStatus string + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", hf.Value) + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case "grpc-status": + rawStatus = hf.Value + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + httpStatus = hf.Value + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC { + var ( + code = codes.Internal // when header does not include HTTP status, return INTERNAL + httpStatusCode int + ) + + if httpStatus != "" { + c, err := strconv.ParseInt(httpStatus, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + httpStatusCode = int(c) + + var ok bool + code, ok = HTTPStatusConvTab[httpStatusCode] + if !ok { + code = codes.Unknown + } + } + + // Verify the HTTP response is a 200. + se := status.New(code, constructHTTPErrMsg(&httpStatusCode, contentTypeErr)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1293,9 +1379,8 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + if len(mdata) > 0 { + s.header = mdata } } else { // HEADERS frame block carries a Trailers-Only. @@ -1308,9 +1393,23 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } + if statusGen == nil { + rawStatusCode := codes.Unknown + if rawStatus != "" { + code, err := strconv.ParseInt(rawStatus, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + } + statusGen = status.New(rawStatusCode, grpcMessage) + } + // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } // reader runs as a separate goroutine in charge of reading data from network diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index c7dee140cf1a..2771e224f7bf 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -180,14 +180,6 @@ func isWhitelistedHeader(hdr string) bool { } } -func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) - } - return d.data.statusGen -} - const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { @@ -217,6 +209,18 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err + } + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err + } + return status.FromProto(st), nil +} + func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { // frame.Truncated is set to true when framer detects that the current header // list size hits MaxHeaderListSize limit. @@ -271,18 +275,24 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode // constructErrMsg constructs error message to be returned in HTTP fallback mode. // Format: HTTP status code and its corresponding message + content-type error message. func (d *decodeState) constructHTTPErrMsg() string { + return constructHTTPErrMsg(d.data.httpStatus, d.data.contentTypeErr) +} + +// constructErrMsg constructs error message to be returned in HTTP fallback mode. +// Format: HTTP status code and its corresponding message + content-type error message. +func constructHTTPErrMsg(httpStatus *int, contentTypeErr string) string { var errMsgs []string - if d.data.httpStatus == nil { + if httpStatus == nil { errMsgs = append(errMsgs, "malformed header: missing HTTP status") } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) + errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(httpStatus)), *httpStatus)) } - if d.data.contentTypeErr == "" { + if contentTypeErr == "" { errMsgs = append(errMsgs, "transport: missing content-type field") } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) + errMsgs = append(errMsgs, contentTypeErr) } return strings.Join(errMsgs, "; ") From aa59641d5da52eaa3728c4624e16a3ac76688c39 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 12 May 2021 10:17:13 -0700 Subject: [PATCH 069/998] interop: use credentials.NewTLS() when possible (#4390) --- interop/client/client.go | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/interop/client/client.go b/interop/client/client.go index 975e0b5d2f3c..7b9339d7b614 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -20,7 +20,10 @@ package main import ( + "crypto/tls" + "crypto/x509" "flag" + "io/ioutil" "net" "strconv" @@ -57,7 +60,7 @@ var ( serverHost = flag.String("server_host", "localhost", "The server host name") serverPort = flag.Int("server_port", 10000, "The server port number") serviceConfigJSON = flag.String("service_config_json", "", "Disables service config lookups and sets the provided string as the default service config.") - tlsServerName = flag.String("server_host_override", "", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") + tlsServerName = flag.String("server_host_override", "", "The server name used to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") testCase = flag.String("test_case", "large_unary", `Configure different test cases. Valid options are: empty_unary : empty (zero bytes) request and response; @@ -135,22 +138,25 @@ func main() { var opts []grpc.DialOption switch credsChosen { case credsTLS: - var sn string - if *tlsServerName != "" { - sn = *tlsServerName - } - var creds credentials.TransportCredentials + var roots *x509.CertPool if *testCA { - var err error if *caFile == "" { *caFile = testdata.Path("ca.pem") } - creds, err = credentials.NewClientTLSFromFile(*caFile, sn) + b, err := ioutil.ReadFile(*caFile) if err != nil { - logger.Fatalf("Failed to create TLS credentials %v", err) + logger.Fatalf("Failed to read root certificate file %q: %v", *caFile, err) + } + roots = x509.NewCertPool() + if !roots.AppendCertsFromPEM(b) { + logger.Fatalf("Failed to append certificates: %s", string(b)) } + } + var creds credentials.TransportCredentials + if *tlsServerName != "" { + creds = credentials.NewClientTLSFromCert(roots, *tlsServerName) } else { - creds = credentials.NewClientTLSFromCert(nil, sn) + creds = credentials.NewTLS(&tls.Config{RootCAs: roots}) } opts = append(opts, grpc.WithTransportCredentials(creds)) case credsALTS: From 8bf65c69b99ed9e1106c07c1f5d2f42f312b7ec5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 12 May 2021 10:18:50 -0700 Subject: [PATCH 070/998] xds: use same format while registering and watching resources (#4422) --- xds/internal/testutils/e2e/clientresources.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xds/internal/testutils/e2e/clientresources.go b/xds/internal/testutils/e2e/clientresources.go index b521db950558..7c8311a51cc3 100644 --- a/xds/internal/testutils/e2e/clientresources.go +++ b/xds/internal/testutils/e2e/clientresources.go @@ -20,6 +20,8 @@ package e2e import ( "fmt" + "net" + "strconv" "github.com/envoyproxy/go-control-plane/pkg/wellknown" "github.com/golang/protobuf/proto" @@ -160,7 +162,7 @@ func DefaultServerListener(host string, port uint32, secLevel SecurityLevel) *v3 } } return &v3listenerpb.Listener{ - Name: fmt.Sprintf(ServerListenerResourceNameTemplate, fmt.Sprintf("%s:%d", host, port)), + Name: fmt.Sprintf(ServerListenerResourceNameTemplate, net.JoinHostPort(host, strconv.Itoa(int(port)))), Address: &v3corepb.Address{ Address: &v3corepb.Address_SocketAddress{ SocketAddress: &v3corepb.SocketAddress{ From 45e60095da54baad1e7ae28391941b64a40477e5 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 12 May 2021 17:28:49 -0400 Subject: [PATCH 071/998] xds: add support for aggregate clusters (#4332) Add support for aggregate clusters in CDS Balancer --- .../balancer/cdsbalancer/cdsbalancer.go | 5 + .../cdsbalancer/cdsbalancer_security_test.go | 2 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 14 +- .../balancer/cdsbalancer/cluster_handler.go | 273 +++++++ .../cdsbalancer/cluster_handler_test.go | 676 ++++++++++++++++++ xds/internal/testutils/fakeclient/client.go | 42 +- 6 files changed, 992 insertions(+), 20 deletions(-) create mode 100644 xds/internal/balancer/cdsbalancer/cluster_handler.go create mode 100644 xds/internal/balancer/cdsbalancer/cluster_handler_test.go diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index bf1519bb8ce0..9b987c00c2ca 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -151,6 +151,11 @@ type ccUpdate struct { err error } +type clusterHandlerUpdate struct { + chu []xdsclient.ClusterUpdate + err error +} + // scUpdate wraps a subConn update received from gRPC. This is directly passed // on to the edsBalancer. type scUpdate struct { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 5c746cfa163c..d1074f2a1c45 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -640,7 +640,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // registered watch should not be cancelled. sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } } diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 4476a1532d05..e93df5a10aab 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -399,7 +399,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // registered watch should not be cancelled. sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } // The CDS balancer has not yet created an EDS balancer. So, this resolver @@ -438,7 +438,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // Make sure the registered watch is not cancelled. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } // Make sure the error is forwarded to the EDS balancer. @@ -453,7 +453,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // request cluster resource is not found. We should continue to watch it. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a resource-not-found-error") } // Make sure the error is forwarded to the EDS balancer. @@ -485,7 +485,7 @@ func (s) TestResolverError(t *testing.T) { // registered watch should not be cancelled. sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } // The CDS balancer has not yet created an EDS balancer. So, this resolver @@ -523,7 +523,7 @@ func (s) TestResolverError(t *testing.T) { // Make sure the registered watch is not cancelled. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } // Make sure the error is forwarded to the EDS balancer. @@ -535,7 +535,7 @@ func (s) TestResolverError(t *testing.T) { resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") cdsB.ResolverError(resourceErr) // Make sure the registered watch is cancelled. - if err := xdsC.WaitForCancelClusterWatch(ctx); err != nil { + if _, err := xdsC.WaitForCancelClusterWatch(ctx); err != nil { t.Fatalf("want watch to be canceled, watchForCancel failed: %v", err) } // Make sure the error is forwarded to the EDS balancer. @@ -642,7 +642,7 @@ func (s) TestClose(t *testing.T) { // Make sure that the cluster watch registered by the CDS balancer is // cancelled. - if err := xdsC.WaitForCancelClusterWatch(ctx); err != nil { + if _, err := xdsC.WaitForCancelClusterWatch(ctx); err != nil { t.Fatal(err) } diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go new file mode 100644 index 000000000000..2dafb212f4c9 --- /dev/null +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -0,0 +1,273 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cdsbalancer + +import ( + "errors" + "sync" + + xdsclient "google.golang.org/grpc/xds/internal/client" +) + +var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") + +// clusterHandler will be given a name representing a cluster. It will then +// update the CDS policy constantly with a list of Clusters to pass down to +// XdsClusterResolverLoadBalancingPolicyConfig in a stream like fashion. +type clusterHandler struct { + // A mutex to protect entire tree of clusters. + clusterMutex sync.Mutex + root *clusterNode + rootClusterName string + + // A way to ping CDS Balancer about any updates or errors to a Node in the + // tree. This will either get called from this handler constructing an + // update or from a child with an error. Capacity of one as the only update + // CDS Balancer cares about is the most recent update. + updateChannel chan clusterHandlerUpdate + + xdsClient xdsClientInterface +} + +func (ch *clusterHandler) updateRootCluster(rootClusterName string) { + ch.clusterMutex.Lock() + defer ch.clusterMutex.Unlock() + if ch.root == nil { + // Construct a root node on first update. + ch.root = createClusterNode(rootClusterName, ch.xdsClient, ch) + ch.rootClusterName = rootClusterName + return + } + // Check if root cluster was changed. If it was, delete old one and start + // new one, if not do nothing. + if rootClusterName != ch.rootClusterName { + ch.root.delete() + ch.root = createClusterNode(rootClusterName, ch.xdsClient, ch) + ch.rootClusterName = rootClusterName + } +} + +// This function tries to construct a cluster update to send to CDS. +func (ch *clusterHandler) constructClusterUpdate() { + // If there was an error received no op, as this simply means one of the + // children hasn't received an update yet. + if clusterUpdate, err := ch.root.constructClusterUpdate(); err == nil { + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-ch.updateChannel: + default: + } + ch.updateChannel <- clusterHandlerUpdate{chu: clusterUpdate, err: nil} + } +} + +// close() is meant to be called by CDS when the CDS balancer is closed, and it +// cancels the watches for every cluster in the cluster tree. +func (ch *clusterHandler) close() { + ch.clusterMutex.Lock() + defer ch.clusterMutex.Unlock() + ch.root.delete() + ch.root = nil + ch.rootClusterName = "" +} + +// This logically represents a cluster. This handles all the logic for starting +// and stopping a cluster watch, handling any updates, and constructing a list +// recursively for the ClusterHandler. +type clusterNode struct { + // A way to cancel the watch for the cluster. + cancelFunc func() + + // A list of children, as the Node can be an aggregate Cluster. + children []*clusterNode + + // A ClusterUpdate in order to build a list of cluster updates for CDS to + // send down to child XdsClusterResolverLoadBalancingPolicy. + clusterUpdate xdsclient.ClusterUpdate + + // This boolean determines whether this Node has received an update or not. + // This isn't the best practice, but this will protect a list of Cluster + // Updates from being constructed if a cluster in the tree has not received + // an update yet. + receivedUpdate bool + + clusterHandler *clusterHandler +} + +// CreateClusterNode creates a cluster node from a given clusterName. This will +// also start the watch for that cluster. +func createClusterNode(clusterName string, xdsClient xdsClientInterface, topLevelHandler *clusterHandler) *clusterNode { + c := &clusterNode{ + clusterHandler: topLevelHandler, + } + // Communicate with the xds client here. + c.cancelFunc = xdsClient.WatchCluster(clusterName, c.handleResp) + return c +} + +// This function cancels the cluster watch on the cluster and all of it's +// children. +func (c *clusterNode) delete() { + c.cancelFunc() + for _, child := range c.children { + child.delete() + } +} + +// Construct cluster update (potentially a list of ClusterUpdates) for a node. +func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error) { + // If the cluster has not yet received an update, the cluster update is not + // yet ready. + if !c.receivedUpdate { + return nil, errNotReceivedUpdate + } + + // Base case - LogicalDNS or EDS. Both of these cluster types will be tied + // to a single ClusterUpdate. + if c.clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate { + return []xdsclient.ClusterUpdate{c.clusterUpdate}, nil + } + + // If an aggregate construct a list by recursively calling down to all of + // it's children. + var childrenUpdates []xdsclient.ClusterUpdate + for _, child := range c.children { + childUpdateList, err := child.constructClusterUpdate() + if err != nil { + return nil, err + } + childrenUpdates = append(childrenUpdates, childUpdateList...) + } + return childrenUpdates, nil +} + +// handleResp handles a xds response for a particular cluster. This function +// also handles any logic with regards to any child state that may have changed. +// At the end of the handleResp(), the clusterUpdate will be pinged in certain +// situations to try and construct an update to send back to CDS. +func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err error) { + c.clusterHandler.clusterMutex.Lock() + defer c.clusterHandler.clusterMutex.Unlock() + if err != nil { // Write this error for run() to pick up in CDS LB policy. + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-c.clusterHandler.updateChannel: + default: + } + c.clusterHandler.updateChannel <- clusterHandlerUpdate{chu: nil, err: err} + return + } + + // deltaInClusterUpdateFields determines whether there was a delta in the + // clusterUpdate fields (forgetting the children). This will be used to help + // determine whether to pingClusterHandler at the end of this callback or + // not. + deltaInClusterUpdateFields := clusterUpdate.ServiceName != c.clusterUpdate.ServiceName || clusterUpdate.ClusterType != c.clusterUpdate.ClusterType + c.receivedUpdate = true + c.clusterUpdate = clusterUpdate + + // If the cluster was a leaf node, if the cluster update received had change + // in the cluster update then the overall cluster update would change and + // there is a possibility for the overall update to build so ping cluster + // handler to return. Also, if there was any children from previously, + // delete the children, as the cluster type is no longer an aggregate + // cluster. + if clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate { + for _, child := range c.children { + child.delete() + } + c.children = nil + if deltaInClusterUpdateFields { + c.clusterHandler.constructClusterUpdate() + } + return + } + + // Aggregate cluster handling. + newChildren := make(map[string]bool) + for _, childName := range clusterUpdate.PrioritizedClusterNames { + newChildren[childName] = true + } + + // These booleans help determine whether this callback will ping the overall + // clusterHandler to try and construct an update to send back to CDS. This + // will be determined by whether there would be a change in the overall + // clusterUpdate for the whole tree (ex. change in clusterUpdate for current + // cluster or a deleted child) and also if there's even a possibility for + // the update to build (ex. if a child is created and a watch is started, + // that child hasn't received an update yet due to the mutex lock on this + // callback). + var createdChild, deletedChild bool + + // This map will represent the current children of the cluster. It will be + // first added to in order to represent the new children. It will then have + // any children deleted that are no longer present. Then, from the cluster + // update received, will be used to construct the new child list. + mapCurrentChildren := make(map[string]*clusterNode) + for _, child := range c.children { + mapCurrentChildren[child.clusterUpdate.ServiceName] = child + } + + // Add and construct any new child nodes. + for child := range newChildren { + if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready { + createdChild = true + mapCurrentChildren[child] = createClusterNode(child, c.clusterHandler.xdsClient, c.clusterHandler) + } + } + + // Delete any child nodes no longer in the aggregate cluster's children. + for child := range mapCurrentChildren { + if _, stillAChild := newChildren[child]; !stillAChild { + deletedChild = true + mapCurrentChildren[child].delete() + delete(mapCurrentChildren, child) + } + } + + // The order of the children list matters, so use the clusterUpdate from + // xdsclient as the ordering, and use that logical ordering for the new + // children list. This will be a mixture of child nodes which are all + // already constructed in the mapCurrentChildrenMap. + var children = make([]*clusterNode, 0, len(clusterUpdate.PrioritizedClusterNames)) + + for _, orderedChild := range clusterUpdate.PrioritizedClusterNames { + // The cluster's already have watches started for them in xds client, so + // you can use these pointers to construct the new children list, you + // just have to put them in the correct order using the original cluster + // update. + currentChild := mapCurrentChildren[orderedChild] + children = append(children, currentChild) + } + + c.children = children + + // If the cluster is an aggregate cluster, if this callback created any new + // child cluster nodes, then there's no possibility for a full cluster + // update to successfully build, as those created children will not have + // received an update yet. However, if there was simply a child deleted, + // then there is a possibility that it will have a full cluster update to + // build and also will have a changed overall cluster update from the + // deleted child. + if deletedChild && !createdChild { + c.clusterHandler.constructClusterUpdate() + } +} diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go new file mode 100644 index 000000000000..96fca8a2696a --- /dev/null +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -0,0 +1,676 @@ +// +build go1.12 + +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cdsbalancer + +import ( + "context" + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/testutils/fakeclient" +) + +const ( + edsService = "EDS Service" + logicalDNSService = "Logical DNS Service" + edsService2 = "EDS Service 2" + logicalDNSService2 = "Logical DNS Service 2" + aggregateClusterService = "Aggregate Cluster Service" +) + +// setupTests creates a clusterHandler with a fake xds client for control over +// xds client. +func setupTests(t *testing.T) (*clusterHandler, *fakeclient.Client) { + xdsC := fakeclient.NewClient() + ch := &clusterHandler{ + xdsClient: xdsC, + // This is will be how the update channel is created in cds. It will be + // a separate channel to the buffer.Unbounded. This channel will also be + // read from to test any cluster updates. + updateChannel: make(chan clusterHandlerUpdate, 1), + } + return ch, xdsC +} + +// Simplest case: the cluster handler receives a cluster name, handler starts a +// watch for that cluster, xds client returns that it is a Leaf Node (EDS or +// LogicalDNS), not a tree, so expectation that update is written to buffer +// which will be read by CDS LB. +func (s) TestSuccessCaseLeafNode(t *testing.T) { + tests := []struct { + name string + clusterName string + clusterUpdate xdsclient.ClusterUpdate + }{ + {name: "test-update-root-cluster-EDS-success", + clusterName: edsService, + clusterUpdate: xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService, + }}, + { + name: "test-update-root-cluster-Logical-DNS-success", + clusterName: logicalDNSService, + clusterUpdate: xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ServiceName: logicalDNSService, + }}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ch, fakeClient := setupTests(t) + // When you first update the root cluster, it should hit the code + // path which will start a cluster node for that root. Updating the + // root cluster logically represents a ping from a ClientConn. + ch.updateRootCluster(test.clusterName) + // Starting a cluster node involves communicating with the + // xdsClient, telling it to watch a cluster. + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != test.clusterName { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, test.clusterName) + } + // Invoke callback with xds client with a certain clusterUpdate. Due + // to this cluster update filling out the whole cluster tree, as the + // cluster is of a root type (EDS or Logical DNS) and not an + // aggregate cluster, this should trigger the ClusterHandler to + // write to the update buffer to update the CDS policy. + fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{test.clusterUpdate}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + // Close the clusterHandler. This is meant to be called when the CDS + // Balancer is closed, and the call should cancel the watch for this + // cluster. + ch.close() + clusterNameDeleted, err := fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != test.clusterName { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + }) + } +} + +// The cluster handler receives a cluster name, handler starts a watch for that +// cluster, xds client returns that it is a Leaf Node (EDS or LogicalDNS), not a +// tree, so expectation that first update is written to buffer which will be +// read by CDS LB. Then, send a new cluster update that is different, with the +// expectation that it is also written to the update buffer to send back to CDS. +func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { + tests := []struct { + name string + clusterName string + clusterUpdate xdsclient.ClusterUpdate + newClusterUpdate xdsclient.ClusterUpdate + }{ + {name: "test-update-root-cluster-then-new-update-EDS-success", + clusterName: edsService, + clusterUpdate: xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService, + }, + newClusterUpdate: xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService2, + }, + }, + { + name: "test-update-root-cluster-then-new-update-Logical-DNS-success", + clusterName: logicalDNSService, + clusterUpdate: xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ServiceName: logicalDNSService, + }, + newClusterUpdate: xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ServiceName: logicalDNSService2, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ch, fakeClient := setupTests(t) + ch.updateRootCluster(test.clusterName) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) + select { + case <-ch.updateChannel: + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from updateChannel.") + } + + // Check that sending the same cluster update does not induce a + // update to be written to update buffer. + fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) + shouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + select { + case <-ch.updateChannel: + t.Fatal("Should not have written an update to update buffer, as cluster update did not change.") + case <-shouldNotHappenCtx.Done(): + } + + // Above represents same thing as the simple + // TestSuccessCaseLeafNode, extra behavior + validation (clusterNode + // which is a leaf receives a changed clusterUpdate, which should + // ping clusterHandler, which should then write to the update + // buffer). + fakeClient.InvokeWatchClusterCallback(test.newClusterUpdate, nil) + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{test.newClusterUpdate}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from updateChannel.") + } + }) + } +} + +// TestUpdateRootClusterAggregateSuccess tests the case where an aggregate +// cluster is a root pointing to two child clusters one of type EDS and the +// other of type LogicalDNS. This test will then send cluster updates for both +// the children, and at the end there should be a successful clusterUpdate +// written to the update buffer to send back to CDS. +func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { + ch, fakeClient := setupTests(t) + ch.updateRootCluster(aggregateClusterService) + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != aggregateClusterService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, aggregateClusterService) + } + + // The xdsClient telling the clusterNode that the cluster type is an + // aggregate cluster which will cause a lot of downstream behavior. For a + // cluster type that isn't an aggregate, the behavior is simple. The + // clusterNode will simply get a successful update, which will then ping the + // clusterHandler which will successfully build an update to send to the CDS + // policy. In the aggregate cluster case, the handleResp callback must also + // start watches for the aggregate cluster's children. The ping to the + // clusterHandler at the end of handleResp should be a no-op, as neither the + // EDS or LogicalDNS child clusters have received an update yet. + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeAggregate, + ServiceName: aggregateClusterService, + PrioritizedClusterNames: []string{edsService, logicalDNSService}, + }, nil) + + // xds client should be called to start a watch for one of the child + // clusters of the aggregate. The order of the children in the update + // written to the buffer to send to CDS matters, however there is no + // guarantee on the order it will start the watches of the children. + gotCluster, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService { + if gotCluster != logicalDNSService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, edsService) + } + } + + // xds client should then be called to start a watch for the second child + // cluster. + gotCluster, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService { + if gotCluster != logicalDNSService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, logicalDNSService) + } + } + + // The handleResp() call on the root aggregate cluster should not ping the + // cluster handler to try and construct an update, as the handleResp() + // callback knows that when a child is created, it cannot possibly build a + // successful update yet. Thus, there should be nothing in the update + // channel. + + shouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + + select { + case <-ch.updateChannel: + t.Fatal("Cluster Handler wrote an update to updateChannel when it shouldn't have, as each node in the full cluster tree has not yet received an update") + case <-shouldNotHappenCtx.Done(): + } + + // Send callback for the EDS child cluster. + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService, + }, nil) + + // EDS child cluster will ping the Cluster Handler, to try an update, which + // still won't successfully build as the LogicalDNS child of the root + // aggregate cluster has not yet received and handled an update. + select { + case <-ch.updateChannel: + t.Fatal("Cluster Handler wrote an update to updateChannel when it shouldn't have, as each node in the full cluster tree has not yet received an update") + case <-shouldNotHappenCtx.Done(): + } + + // Invoke callback for Logical DNS child cluster. + + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ServiceName: logicalDNSService, + }, nil) + + // Will Ping Cluster Handler, which will finally successfully build an + // update as all nodes in the tree of clusters have received an update. + // Since this cluster is an aggregate cluster comprised of two children, the + // returned update should be length 2, as the xds cluster resolver LB policy + // only cares about the full list of LogicalDNS and EDS clusters + // representing the base nodes of the tree of clusters. This list should be + // ordered as per the cluster update. + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService, + }, { + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ServiceName: logicalDNSService, + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestUpdateRootClusterAggregateThenChangeChild tests the scenario where you +// have an aggregate cluster with an EDS child and a LogicalDNS child, then you +// change one of the children and send an update for the changed child. This +// should write a new update to the update buffer to send back to CDS. +func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { + // This initial code is the same as the test for the aggregate success case, + // except without validations. This will get this test to the point where it + // can change one of the children. + ch, fakeClient := setupTests(t) + ch.updateRootCluster(aggregateClusterService) + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeAggregate, + ServiceName: aggregateClusterService, + PrioritizedClusterNames: []string{edsService, logicalDNSService}, + }, nil) + fakeClient.WaitForWatchCluster(ctx) + fakeClient.WaitForWatchCluster(ctx) + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService, + }, nil) + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ServiceName: logicalDNSService, + }, nil) + + select { + case <-ch.updateChannel: + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } + + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeAggregate, + ServiceName: aggregateClusterService, + PrioritizedClusterNames: []string{edsService, logicalDNSService2}, + }, nil) + + // The cluster update let's the aggregate cluster know that it's children + // are now edsService and logicalDNSService2, which implies that the + // aggregateCluster lost it's old logicalDNSService child. Thus, the + // logicalDNSService child should be deleted. + clusterNameDeleted, err := fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != logicalDNSService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + + // The handleResp() callback should then start a watch for + // logicalDNSService2. + clusterNameCreated, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if clusterNameCreated != logicalDNSService2 { + t.Fatalf("xdsClient.WatchCDS called for cluster %v, want: %v", clusterNameCreated, logicalDNSService2) + } + + // handleResp() should try and send an update here, but it will fail as + // logicalDNSService2 has not yet received an update. + shouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + select { + case <-ch.updateChannel: + t.Fatal("Cluster Handler wrote an update to updateChannel when it shouldn't have, as each node in the full cluster tree has not yet received an update") + case <-shouldNotHappenCtx.Done(): + } + + // Invoke a callback for the new logicalDNSService2 - this will fill out the + // tree with successful updates. + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ServiceName: logicalDNSService2, + }, nil) + + // Behavior: This update make every node in the tree of cluster have + // received an update. Thus, at the end of this callback, when you ping the + // clusterHandler to try and construct an update, the update should now + // successfully be written to update buffer to send back to CDS. This new + // update should contain the new child of LogicalDNS2. + + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService, + }, { + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ServiceName: logicalDNSService2, + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestUpdateRootClusterAggregateThenChangeRootToEDS tests the situation where +// you have a fully updated aggregate cluster (where AggregateCluster success +// test gets you) as the root cluster, then you update that root cluster to a +// cluster of type EDS. +func (s) TestUpdateRootClusterAggregateThenChangeRootToEDS(t *testing.T) { + // This initial code is the same as the test for the aggregate success case, + // except without validations. This will get this test to the point where it + // can update the root cluster to one of type EDS. + ch, fakeClient := setupTests(t) + ch.updateRootCluster(aggregateClusterService) + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeAggregate, + ServiceName: aggregateClusterService, + PrioritizedClusterNames: []string{edsService, logicalDNSService}, + }, nil) + fakeClient.WaitForWatchCluster(ctx) + fakeClient.WaitForWatchCluster(ctx) + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService, + }, nil) + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ServiceName: logicalDNSService, + }, nil) + + select { + case <-ch.updateChannel: + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } + + // Changes the root aggregate cluster to a EDS cluster. This should delete + // the root aggregate cluster and all of it's children by successfully + // canceling the watches for them. + ch.updateRootCluster(edsService2) + + // Reads from the cancel channel, should first be type Aggregate, then EDS + // then Logical DNS. + clusterNameDeleted, err := fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != aggregateClusterService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + + clusterNameDeleted, err = fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != edsService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + + clusterNameDeleted, err = fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != logicalDNSService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + + // After deletion, it should start a watch for the EDS Cluster. The behavior + // for this EDS Cluster receiving an update from xds client and then + // successfully writing an update to send back to CDS is already tested in + // the updateEDS success case. + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService2 { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, edsService2) + } +} + +// TestHandleRespInvokedWithError tests that when handleResp is invoked with an +// error, that the error is successfully written to the update buffer. +func (s) TestHandleRespInvokedWithError(t *testing.T) { + ch, fakeClient := setupTests(t) + ch.updateRootCluster(edsService) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, errors.New("some error")) + select { + case chu := <-ch.updateChannel: + if chu.err.Error() != "some error" { + t.Fatalf("Did not receive the expected error, instead received: %v", chu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } +} + +// TestSwitchClusterNodeBetweenLeafAndAggregated tests having an existing +// cluster node switch between a leaf and an aggregated cluster. When the +// cluster switches from a leaf to an aggregated cluster, it should add +// children, and when it switches back to a leaf, it should delete those new +// children and also successfully write a cluster update to the update buffer. +func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { + // Getting the test to the point where there's a root cluster which is a eds + // leaf. + ch, fakeClient := setupTests(t) + ch.updateRootCluster(edsService2) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService2, + }, nil) + select { + case <-ch.updateChannel: + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + // Switch the cluster to an aggregate cluster, this should cause two new + // child watches to be created. + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeAggregate, + ServiceName: edsService2, + PrioritizedClusterNames: []string{edsService, logicalDNSService}, + }, nil) + + // xds client should be called to start a watch for one of the child + // clusters of the aggregate. The order of the children in the update + // written to the buffer to send to CDS matters, however there is no + // guarantee on the order it will start the watches of the children. + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService { + if gotCluster != logicalDNSService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, edsService) + } + } + + // xds client should then be called to start a watch for the second child + // cluster. + gotCluster, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService { + if gotCluster != logicalDNSService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, logicalDNSService) + } + } + + // After starting a watch for the second child cluster, there should be no + // more watches started on the xds client. + shouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + gotCluster, err = fakeClient.WaitForWatchCluster(shouldNotHappenCtx) + if err == nil { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, no more watches should be started.", gotCluster) + } + + // The handleResp() call on the root aggregate cluster should not ping the + // cluster handler to try and construct an update, as the handleResp() + // callback knows that when a child is created, it cannot possibly build a + // successful update yet. Thus, there should be nothing in the update + // channel. + + shouldNotHappenCtx, shouldNotHappenCtxCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + + select { + case <-ch.updateChannel: + t.Fatal("Cluster Handler wrote an update to updateChannel when it shouldn't have, as each node in the full cluster tree has not yet received an update") + case <-shouldNotHappenCtx.Done(): + } + + // Switch the cluster back to an EDS Cluster. This should cause the two + // children to be deleted. + fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService2, + }, nil) + + // Should delete the two children (no guarantee of ordering deleted, which + // is ok), then successfully write an update to the update buffer as the + // full cluster tree has received updates. + clusterNameDeleted, err := fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + // No guarantee of ordering, so one of the children should be deleted first. + if clusterNameDeleted != edsService { + if clusterNameDeleted != logicalDNSService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want either: %v or: %v", clusterNameDeleted, edsService, logicalDNSService) + } + } + // Then the other child should be deleted. + clusterNameDeleted, err = fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != edsService { + if clusterNameDeleted != logicalDNSService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want either: %v or: %v", clusterNameDeleted, edsService, logicalDNSService) + } + } + + // After cancelling a watch for the second child cluster, there should be no + // more watches cancelled on the xds client. + shouldNotHappenCtx, shouldNotHappenCtxCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + gotCluster, err = fakeClient.WaitForCancelClusterWatch(shouldNotHappenCtx) + if err == nil { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, no more watches should be cancelled.", gotCluster) + } + + // Then an update should successfully be written to the update buffer. + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{{ + ClusterType: xdsclient.ClusterTypeEDS, + ServiceName: edsService2, + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } +} diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index 0978125b8aeb..eb4c659e505a 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -45,10 +45,10 @@ type Client struct { loadStore *load.Store bootstrapCfg *bootstrap.Config - ldsCb func(xdsclient.ListenerUpdate, error) - rdsCb func(xdsclient.RouteConfigUpdate, error) - cdsCb func(xdsclient.ClusterUpdate, error) - edsCb func(xdsclient.EndpointsUpdate, error) + ldsCb func(xdsclient.ListenerUpdate, error) + rdsCb func(xdsclient.RouteConfigUpdate, error) + cdsCbs map[string]func(xdsclient.ClusterUpdate, error) + edsCb func(xdsclient.EndpointsUpdate, error) } // WatchListener registers a LDS watch. @@ -121,10 +121,13 @@ func (xdsC *Client) WaitForCancelRouteConfigWatch(ctx context.Context) error { // WatchCluster registers a CDS watch. func (xdsC *Client) WatchCluster(clusterName string, callback func(xdsclient.ClusterUpdate, error)) func() { - xdsC.cdsCb = callback + // Due to the tree like structure of aggregate clusters, there can be multiple callbacks persisted for each cluster + // node. However, the client doesn't care about the parent child relationship between the nodes, only that it invokes + // the right callback for a particular cluster. + xdsC.cdsCbs[clusterName] = callback xdsC.cdsWatchCh.Send(clusterName) return func() { - xdsC.cdsCancelCh.Send(nil) + xdsC.cdsCancelCh.Send(clusterName) } } @@ -143,14 +146,28 @@ func (xdsC *Client) WaitForWatchCluster(ctx context.Context) (string, error) { // Not thread safe with WatchCluster. Only call this after // WaitForWatchCluster. func (xdsC *Client) InvokeWatchClusterCallback(update xdsclient.ClusterUpdate, err error) { - xdsC.cdsCb(update, err) + // Keeps functionality with previous usage of this, if single callback call that callback. + if len(xdsC.cdsCbs) == 1 { + var clusterName string + for cluster := range xdsC.cdsCbs { + clusterName = cluster + } + xdsC.cdsCbs[clusterName](update, err) + } else { + // Have what callback you call with the update determined by the service name in the ClusterUpdate. Left up to the + // caller to make sure the cluster update matches with a persisted callback. + xdsC.cdsCbs[update.ServiceName](update, err) + } } // WaitForCancelClusterWatch waits for a CDS watch to be cancelled and returns // context.DeadlineExceeded otherwise. -func (xdsC *Client) WaitForCancelClusterWatch(ctx context.Context) error { - _, err := xdsC.cdsCancelCh.Receive(ctx) - return err +func (xdsC *Client) WaitForCancelClusterWatch(ctx context.Context) (string, error) { + clusterNameReceived, err := xdsC.cdsCancelCh.Receive(ctx) + if err != nil { + return "", err + } + return clusterNameReceived.(string), err } // WatchEndpoints registers an EDS watch for provided clusterName. @@ -251,14 +268,15 @@ func NewClientWithName(name string) *Client { name: name, ldsWatchCh: testutils.NewChannel(), rdsWatchCh: testutils.NewChannel(), - cdsWatchCh: testutils.NewChannel(), + cdsWatchCh: testutils.NewChannelWithSize(10), edsWatchCh: testutils.NewChannel(), ldsCancelCh: testutils.NewChannel(), rdsCancelCh: testutils.NewChannel(), - cdsCancelCh: testutils.NewChannel(), + cdsCancelCh: testutils.NewChannelWithSize(10), edsCancelCh: testutils.NewChannel(), loadReportCh: testutils.NewChannel(), closeCh: testutils.NewChannel(), loadStore: load.NewStore(), + cdsCbs: make(map[string]func(xdsclient.ClusterUpdate, error)), } } From 9cb99a52111e9b67165d498ec2c322774b54a5f1 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 12 May 2021 15:48:16 -0700 Subject: [PATCH 072/998] xds: pretty print xDS updates and service config (#4405) --- internal/pretty/pretty.go | 82 +++++++++++++++++++ .../balancer/cdsbalancer/cdsbalancer.go | 5 +- .../balancer/clusterimpl/clusterimpl.go | 2 + .../balancer/clustermanager/clustermanager.go | 3 +- xds/internal/balancer/edsbalancer/eds.go | 5 +- xds/internal/balancer/lrs/balancer.go | 2 + xds/internal/balancer/priority/balancer.go | 2 + .../balancer/weightedtarget/weightedtarget.go | 2 + xds/internal/client/bootstrap/bootstrap.go | 3 +- xds/internal/client/callback.go | 10 ++- xds/internal/client/v2/client.go | 5 +- xds/internal/client/v2/loadreport.go | 7 +- xds/internal/client/v3/client.go | 5 +- xds/internal/client/v3/loadreport.go | 7 +- xds/internal/client/watchers.go | 10 ++- xds/internal/client/xds.go | 9 +- xds/internal/resolver/serviceconfig.go | 6 +- xds/internal/resolver/watch_service.go | 5 +- xds/internal/resolver/xds_resolver.go | 5 +- 19 files changed, 140 insertions(+), 35 deletions(-) create mode 100644 internal/pretty/pretty.go diff --git a/internal/pretty/pretty.go b/internal/pretty/pretty.go new file mode 100644 index 000000000000..0177af4b5114 --- /dev/null +++ b/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 9b987c00c2ca..1134cf72570f 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -31,6 +31,7 @@ import ( xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/edsbalancer" @@ -318,7 +319,7 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { return } - b.logger.Infof("Watch update from xds-client %p, content: %+v", b.xdsClient, update.cds) + b.logger.Infof("Watch update from xds-client %p, content: %+v", b.xdsClient, pretty.ToJSON(update.cds)) // Process the security config from the received update before building the // child policy or forwarding the update to it. We do this because the child @@ -466,7 +467,7 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro return errBalancerClosed } - b.logger.Infof("Received update from resolver, balancer config: %+v", state.BalancerConfig) + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(state.BalancerConfig)) // The errors checked here should ideally never happen because the // ServiceConfig in this case is prepared by the xdsResolver and is not // something that is received on the wire. diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 56664e391ac6..4bd29901d760 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/loadstore" @@ -189,6 +190,7 @@ func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState return nil } + cib.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index b4ae3710cd27..c00a9a16f458 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/balancergroup" @@ -115,7 +116,7 @@ func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } - b.logger.Infof("update with config %+v, resolver state %+v", s.BalancerConfig, s.ResolverState) + b.logger.Infof("update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) b.updateChildren(s, newConfig) return nil diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index d1a226e98987..5191dbd30db8 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -25,6 +25,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" @@ -211,7 +212,7 @@ func (x *edsBalancer) handleGRPCUpdate(update interface{}) { case *subConnStateUpdate: x.edsImpl.handleSubConnStateChange(u.sc, u.state.ConnectivityState) case *balancer.ClientConnState: - x.logger.Infof("Receive update from resolver, balancer config: %+v", u.BalancerConfig) + x.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(u.BalancerConfig)) cfg, _ := u.BalancerConfig.(*EDSConfig) if cfg == nil { // service config parsing failed. should never happen. @@ -278,7 +279,7 @@ func (x *edsBalancer) startEndpointsWatch() { x.cancelEndpointsWatch() } cancelEDSWatch := x.xdsClient.WatchEndpoints(x.edsServiceName, func(update xdsclient.EndpointsUpdate, err error) { - x.logger.Infof("Watch update from xds-client %p, content: %+v", x.xdsClient, update) + x.logger.Infof("Watch update from xds-client %p, content: %+v", x.xdsClient, pretty.ToJSON(update)) x.handleEDSUpdate(update, err) }) x.logger.Infof("Watch started on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go index e062fa234363..0642c54ed111 100644 --- a/xds/internal/balancer/lrs/balancer.go +++ b/xds/internal/balancer/lrs/balancer.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/loadstore" xdsclient "google.golang.org/grpc/xds/internal/client" @@ -80,6 +81,7 @@ type lrsBalancer struct { } func (b *lrsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index e12a5068737f..d760a58fa4ec 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/balancergroup" @@ -108,6 +109,7 @@ type priorityBalancer struct { } func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/xds/internal/balancer/weightedtarget/weightedtarget.go index a210816332b0..89f5ec660a04 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -83,6 +84,7 @@ type weightedTargetBalancer struct { // creates/deletes sub-balancers and sends them update. Addresses are split into // groups based on hierarchy path. func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + w.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) diff --git a/xds/internal/client/bootstrap/bootstrap.go b/xds/internal/client/bootstrap/bootstrap.go index f32c698b4f55..a3fb5c0816b4 100644 --- a/xds/internal/client/bootstrap/bootstrap.go +++ b/xds/internal/client/bootstrap/bootstrap.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/version" ) @@ -270,7 +271,7 @@ func NewConfig() (*Config, error) { if err := config.updateNodeProto(); err != nil { return nil, err } - logger.Infof("Bootstrap config for creating xds-client: %+v", config) + logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) return config, nil } diff --git a/xds/internal/client/callback.go b/xds/internal/client/callback.go index da8e2f62d6c0..ac6f0151a5f9 100644 --- a/xds/internal/client/callback.go +++ b/xds/internal/client/callback.go @@ -18,6 +18,8 @@ package client +import "google.golang.org/grpc/internal/pretty" + type watcherInfoWithUpdate struct { wi *watchInfo update interface{} @@ -104,7 +106,7 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata Up wi.newUpdate(update) } // Sync cache. - c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, update) + c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update)) c.ldsCache[name] = update c.ldsMD[name] = metadata } @@ -163,7 +165,7 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metad wi.newUpdate(update) } // Sync cache. - c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, update) + c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update)) c.rdsCache[name] = update c.rdsMD[name] = metadata } @@ -205,7 +207,7 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata Upda wi.newUpdate(update) } // Sync cache. - c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, update) + c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update)) c.cdsCache[name] = update c.cdsMD[name] = metadata } @@ -264,7 +266,7 @@ func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdate, metadata U wi.newUpdate(update) } // Sync cache. - c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, update) + c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update)) c.edsCache[name] = update c.edsMD[name] = metadata } diff --git a/xds/internal/client/v2/client.go b/xds/internal/client/v2/client.go index b6bc4908120d..b91482d13cf9 100644 --- a/xds/internal/client/v2/client.go +++ b/xds/internal/client/v2/client.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/version" @@ -125,7 +126,7 @@ func (v2c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rTyp if err := stream.Send(req); err != nil { return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err) } - v2c.logger.Debugf("ADS request sent: %v", req) + v2c.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req)) return nil } @@ -143,7 +144,7 @@ func (v2c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) } v2c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - v2c.logger.Debugf("ADS response received: %v", resp) + v2c.logger.Debugf("ADS response received: %v", pretty.ToJSON(resp)) return resp, nil } diff --git a/xds/internal/client/v2/loadreport.go b/xds/internal/client/v2/loadreport.go index 69405fcd9ad3..17ea8c8d4c43 100644 --- a/xds/internal/client/v2/loadreport.go +++ b/xds/internal/client/v2/loadreport.go @@ -26,6 +26,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/client/load" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -57,7 +58,7 @@ func (v2c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { node.ClientFeatures = append(node.ClientFeatures, clientFeatureLRSSendAllClusters) req := &lrspb.LoadStatsRequest{Node: node} - v2c.logger.Infof("lrs: sending init LoadStatsRequest: %v", req) + v2c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) return stream.Send(req) } @@ -71,7 +72,7 @@ func (v2c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time. if err != nil { return nil, 0, fmt.Errorf("lrs: failed to receive first response: %v", err) } - v2c.logger.Infof("lrs: received first LoadStatsResponse: %+v", resp) + v2c.logger.Infof("lrs: received first LoadStatsResponse: %+v", pretty.ToJSON(resp)) interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) if err != nil { @@ -149,6 +150,6 @@ func (v2c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) } req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} - v2c.logger.Infof("lrs: sending LRS loads: %+v", req) + v2c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) return stream.Send(req) } diff --git a/xds/internal/client/v3/client.go b/xds/internal/client/v3/client.go index 55cae56d8cc6..200da2ac7d73 100644 --- a/xds/internal/client/v3/client.go +++ b/xds/internal/client/v3/client.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/version" @@ -125,7 +126,7 @@ func (v3c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rTyp if err := stream.Send(req); err != nil { return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err) } - v3c.logger.Debugf("ADS request sent: %v", req) + v3c.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req)) return nil } @@ -143,7 +144,7 @@ func (v3c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) } v3c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - v3c.logger.Debugf("ADS response received: %+v", resp) + v3c.logger.Debugf("ADS response received: %+v", pretty.ToJSON(resp)) return resp, nil } diff --git a/xds/internal/client/v3/loadreport.go b/xds/internal/client/v3/loadreport.go index 74e18632aa07..1de0ccf57503 100644 --- a/xds/internal/client/v3/loadreport.go +++ b/xds/internal/client/v3/loadreport.go @@ -26,6 +26,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/client/load" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -57,7 +58,7 @@ func (v3c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { node.ClientFeatures = append(node.ClientFeatures, clientFeatureLRSSendAllClusters) req := &lrspb.LoadStatsRequest{Node: node} - v3c.logger.Infof("lrs: sending init LoadStatsRequest: %v", req) + v3c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) return stream.Send(req) } @@ -71,7 +72,7 @@ func (v3c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time. if err != nil { return nil, 0, fmt.Errorf("lrs: failed to receive first response: %v", err) } - v3c.logger.Infof("lrs: received first LoadStatsResponse: %+v", resp) + v3c.logger.Infof("lrs: received first LoadStatsResponse: %+v", pretty.ToJSON(resp)) interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) if err != nil { @@ -148,6 +149,6 @@ func (v3c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) } req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} - v3c.logger.Infof("lrs: sending LRS loads: %+v", req) + v3c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) return stream.Send(req) } diff --git a/xds/internal/client/watchers.go b/xds/internal/client/watchers.go index 9fafe5a60f83..446f5cca9973 100644 --- a/xds/internal/client/watchers.go +++ b/xds/internal/client/watchers.go @@ -22,6 +22,8 @@ import ( "fmt" "sync" "time" + + "google.golang.org/grpc/internal/pretty" ) type watchInfoState int @@ -161,22 +163,22 @@ func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { switch wi.rType { case ListenerResource: if v, ok := c.ldsCache[resourceName]; ok { - c.logger.Debugf("LDS resource with name %v found in cache: %+v", wi.target, v) + c.logger.Debugf("LDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) wi.newUpdate(v) } case RouteConfigResource: if v, ok := c.rdsCache[resourceName]; ok { - c.logger.Debugf("RDS resource with name %v found in cache: %+v", wi.target, v) + c.logger.Debugf("RDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) wi.newUpdate(v) } case ClusterResource: if v, ok := c.cdsCache[resourceName]; ok { - c.logger.Debugf("CDS resource with name %v found in cache: %+v", wi.target, v) + c.logger.Debugf("CDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) wi.newUpdate(v) } case EndpointsResource: if v, ok := c.edsCache[resourceName]; ok { - c.logger.Debugf("EDS resource with name %v found in cache: %+v", wi.target, v) + c.logger.Debugf("EDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) wi.newUpdate(v) } } diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index 55bcc2e936ca..cf705587e24a 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -39,6 +39,7 @@ import ( v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/pretty" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/grpclog" @@ -72,7 +73,7 @@ func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (stri if err := proto.Unmarshal(r.GetValue(), lis); err != nil { return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, lis) + logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, pretty.ToJSON(lis)) lu, err := processListener(lis, logger, v2) if err != nil { @@ -360,7 +361,7 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s if err := proto.Unmarshal(r.GetValue(), rc); err != nil { return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, rc) + logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, pretty.ToJSON(rc)) // TODO: Pass version.TransportAPI instead of relying upon the type URL v2 := r.GetTypeUrl() == version.V2RouteConfigURL @@ -572,7 +573,7 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, cluster) + logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, pretty.ToJSON(cluster)) cu, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { return cluster.GetName(), ClusterUpdate{}, err @@ -765,7 +766,7 @@ func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (str if err := proto.Unmarshal(r.GetValue(), cla); err != nil { return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, cla) + logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) u, err := parseEDSRespProto(cla) if err != nil { diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index ef7c37128c13..49ee8970d7d8 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -76,7 +76,7 @@ func (r *xdsResolver) pruneActiveClusters() { // serviceConfigJSON produces a service config in JSON format representing all // the clusters referenced in activeClusters. This includes clusters with zero // references, so they must be pruned first. -func serviceConfigJSON(activeClusters map[string]*clusterInfo) (string, error) { +func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) { // Generate children (all entries in activeClusters). children := make(map[string]xdsChildConfig) for cluster := range activeClusters { @@ -93,9 +93,9 @@ func serviceConfigJSON(activeClusters map[string]*clusterInfo) (string, error) { bs, err := json.Marshal(sc) if err != nil { - return "", fmt.Errorf("failed to marshal json: %v", err) + return nil, fmt.Errorf("failed to marshal json: %v", err) } - return string(bs), nil + return bs, nil } type virtualHost struct { diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 7667592ccd6f..f29fb32832e0 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -25,6 +25,7 @@ import ( "time" "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" xdsclient "google.golang.org/grpc/xds/internal/client" ) @@ -82,7 +83,7 @@ type serviceUpdateWatcher struct { } func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, err error) { - w.logger.Infof("received LDS update: %+v, err: %v", update, err) + w.logger.Infof("received LDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() if w.closed { @@ -163,7 +164,7 @@ func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteC } func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate, err error) { - w.logger.Infof("received RDS update: %+v, err: %v", update, err) + w.logger.Infof("received RDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() if w.closed { diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index d8c09db69b5a..e995fa9fa8fe 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/client/bootstrap" @@ -171,11 +172,11 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { r.cc.ReportError(err) return false } - r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, sc) + r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, pretty.FormatJSON(sc)) // Send the update to the ClientConn. state := iresolver.SetConfigSelector(resolver.State{ - ServiceConfig: r.cc.ParseServiceConfig(sc), + ServiceConfig: r.cc.ParseServiceConfig(string(sc)), }, cs) r.cc.UpdateState(state) return true From 397adad6a0d1d12ddd9b7f0101e902da274c15c8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 12 May 2021 15:52:15 -0700 Subject: [PATCH 073/998] update go.mod and go.sum to point to latest go-control-plane (#4425) --- examples/go.mod | 4 +-- examples/go.sum | 39 ++++++++++++++---------- go.mod | 10 +++---- go.sum | 44 ++++++++++++++++++++++------ security/advancedtls/examples/go.mod | 2 +- security/advancedtls/examples/go.sum | 34 +++++++++++++-------- security/advancedtls/go.mod | 2 +- security/advancedtls/go.sum | 34 +++++++++++++-------- 8 files changed, 110 insertions(+), 59 deletions(-) diff --git a/examples/go.mod b/examples/go.mod index 18c67afed969..143a8fc03f57 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -3,10 +3,10 @@ module google.golang.org/grpc/examples go 1.11 require ( - github.com/golang/protobuf v1.4.2 + github.com/golang/protobuf v1.4.3 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 - google.golang.org/grpc v1.31.0 + google.golang.org/grpc v1.36.0 google.golang.org/protobuf v1.25.0 ) diff --git a/examples/go.sum b/examples/go.sum index 1984770a80b3..a5b967336903 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -1,26 +1,31 @@ cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -28,39 +33,43 @@ github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -77,7 +86,7 @@ google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/go.mod b/go.mod index b177cfa66df5..6eed9370b462 100644 --- a/go.mod +++ b/go.mod @@ -4,14 +4,14 @@ go 1.11 require ( github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 - github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d + github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.4.2 + github.com/golang/protobuf v1.4.3 github.com/google/go-cmp v0.5.0 github.com/google/uuid v1.1.2 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + golang.org/x/net v0.0.0-20200822124328-c89045814202 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 google.golang.org/protobuf v1.25.0 ) diff --git a/go.sum b/go.sum index 24d2976abbaf..51fd1436e38f 100644 --- a/go.sum +++ b/go.sum @@ -1,32 +1,42 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -35,49 +45,64 @@ github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -90,7 +115,8 @@ google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index 936aa476893b..f141d4c05c81 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -3,7 +3,7 @@ module google.golang.org/grpc/security/advancedtls/examples go 1.15 require ( - google.golang.org/grpc v1.33.1 + google.golang.org/grpc v1.36.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b google.golang.org/grpc/security/advancedtls v0.0.0-20201112215255-90f1b3ee835b ) diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 519267dbc278..c24d070fa7f1 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -1,20 +1,24 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -22,35 +26,39 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -67,5 +75,5 @@ google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index be35029503da..90dee4a46fd0 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -4,7 +4,7 @@ go 1.14 require ( github.com/google/go-cmp v0.5.1 // indirect - google.golang.org/grpc v1.31.0 + google.golang.org/grpc v1.36.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 519267dbc278..c24d070fa7f1 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,20 +1,24 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -22,35 +26,39 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -67,5 +75,5 @@ google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From a712a738897ceebf3b6690d722006b61013572e0 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 12 May 2021 16:25:07 -0700 Subject: [PATCH 074/998] xds/cds: add separate fields for cluster name and eds service name (#4414) --- .../balancer/cdsbalancer/cdsbalancer.go | 3 +- .../cdsbalancer/cdsbalancer_security_test.go | 14 +-- .../balancer/cdsbalancer/cdsbalancer_test.go | 16 ++-- .../balancer/cdsbalancer/cluster_handler.go | 4 +- .../cdsbalancer/cluster_handler_test.go | 50 +++++------ xds/internal/balancer/edsbalancer/config.go | 8 +- xds/internal/balancer/edsbalancer/eds.go | 63 +++++++++---- .../balancer/edsbalancer/eds_impl_test.go | 11 +-- xds/internal/balancer/edsbalancer/eds_test.go | 53 +++++++++-- .../edsbalancer/load_store_wrapper.go | 88 ------------------- xds/internal/client/cds_test.go | 74 +++++++++++----- xds/internal/client/client.go | 15 ++-- xds/internal/client/client_test.go | 4 +- xds/internal/client/v2/cds_test.go | 4 +- xds/internal/client/watchers_cluster_test.go | 16 ++-- xds/internal/client/xds.go | 75 +++++++--------- xds/internal/testutils/fakeclient/client.go | 2 +- 17 files changed, 250 insertions(+), 250 deletions(-) delete mode 100644 xds/internal/balancer/edsbalancer/load_store_wrapper.go diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 1134cf72570f..0e8b83481ac9 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -348,7 +348,8 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName) } lbCfg := &edsbalancer.EDSConfig{ - EDSServiceName: update.cds.ServiceName, + ClusterName: update.cds.ClusterName, + EDSServiceName: update.cds.EDSServiceName, MaxConcurrentRequests: update.cds.MaxRequests, } if update.cds.EnableLRS { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index d1074f2a1c45..30c0d9105ed3 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -60,7 +60,7 @@ var ( fpb1, fpb2 *fakeProviderBuilder bootstrapConfig *bootstrap.Config cdsUpdateWithGoodSecurityCfg = xdsclient.ClusterUpdate{ - ServiceName: serviceName, + ClusterName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "default1", IdentityInstanceName: "default2", @@ -68,7 +68,7 @@ var ( }, } cdsUpdateWithMissingSecurityCfg = xdsclient.ClusterUpdate{ - ServiceName: serviceName, + ClusterName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "not-default", }, @@ -256,7 +256,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -312,7 +312,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -572,7 +572,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // an update which contains bad security config. So, we expect the CDS // balancer to forward this error to the EDS balancer and eventually the // channel needs to be put in a bad state. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -678,7 +678,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ - ServiceName: serviceName, + ClusterName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "default1", SubjectAltNameMatchers: testSANMatchers, @@ -703,7 +703,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // Push another update with a new security configuration. cdsUpdate = xdsclient.ClusterUpdate{ - ServiceName: serviceName, + ClusterName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ RootInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index e93df5a10aab..fba3ee1531e9 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -197,7 +197,7 @@ func cdsCCS(cluster string) balancer.ClientConnState { // cdsBalancer to the edsBalancer. func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientConnState { lbCfg := &edsbalancer.EDSConfig{ - EDSServiceName: service, + ClusterName: service, MaxConcurrentRequests: countMax, } if enableLRS { @@ -354,12 +354,12 @@ func (s) TestHandleClusterUpdate(t *testing.T) { }{ { name: "happy-case-with-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName, EnableLRS: true}, + cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName, EnableLRS: true}, wantCCS: edsCCS(serviceName, nil, true), }, { name: "happy-case-without-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName}, + cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName}, wantCCS: edsCCS(serviceName, nil, false), }, } @@ -427,7 +427,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) @@ -512,7 +512,7 @@ func (s) TestResolverError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) @@ -561,7 +561,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -596,7 +596,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will update // the service's counter with the new max requests. var maxRequests uint32 = 1 - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName, MaxRequests: &maxRequests} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName, MaxRequests: &maxRequests} wantCCS := edsCCS(serviceName, &maxRequests, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -629,7 +629,7 @@ func (s) TestClose(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index 2dafb212f4c9..4c13b55594da 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -180,7 +180,7 @@ func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err erro // clusterUpdate fields (forgetting the children). This will be used to help // determine whether to pingClusterHandler at the end of this callback or // not. - deltaInClusterUpdateFields := clusterUpdate.ServiceName != c.clusterUpdate.ServiceName || clusterUpdate.ClusterType != c.clusterUpdate.ClusterType + deltaInClusterUpdateFields := clusterUpdate.ClusterName != c.clusterUpdate.ClusterName || clusterUpdate.ClusterType != c.clusterUpdate.ClusterType c.receivedUpdate = true c.clusterUpdate = clusterUpdate @@ -223,7 +223,7 @@ func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err erro // update received, will be used to construct the new child list. mapCurrentChildren := make(map[string]*clusterNode) for _, child := range c.children { - mapCurrentChildren[child.clusterUpdate.ServiceName] = child + mapCurrentChildren[child.clusterUpdate.ClusterName] = child } // Add and construct any new child nodes. diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index 96fca8a2696a..049cdf55ee6a 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -64,14 +64,14 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { clusterName: edsService, clusterUpdate: xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService, + ClusterName: edsService, }}, { name: "test-update-root-cluster-Logical-DNS-success", clusterName: logicalDNSService, clusterUpdate: xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeLogicalDNS, - ServiceName: logicalDNSService, + ClusterName: logicalDNSService, }}, } @@ -138,11 +138,11 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { clusterName: edsService, clusterUpdate: xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService, + ClusterName: edsService, }, newClusterUpdate: xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService2, + ClusterName: edsService2, }, }, { @@ -150,11 +150,11 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { clusterName: logicalDNSService, clusterUpdate: xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeLogicalDNS, - ServiceName: logicalDNSService, + ClusterName: logicalDNSService, }, newClusterUpdate: xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeLogicalDNS, - ServiceName: logicalDNSService2, + ClusterName: logicalDNSService2, }, }, } @@ -235,7 +235,7 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { // EDS or LogicalDNS child clusters have received an update yet. fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeAggregate, - ServiceName: aggregateClusterService, + ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) @@ -283,7 +283,7 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { // Send callback for the EDS child cluster. fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService, + ClusterName: edsService, }, nil) // EDS child cluster will ping the Cluster Handler, to try an update, which @@ -299,7 +299,7 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeLogicalDNS, - ServiceName: logicalDNSService, + ClusterName: logicalDNSService, }, nil) // Will Ping Cluster Handler, which will finally successfully build an @@ -313,10 +313,10 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { case chu := <-ch.updateChannel: if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService, + ClusterName: edsService, }, { ClusterType: xdsclient.ClusterTypeLogicalDNS, - ServiceName: logicalDNSService, + ClusterName: logicalDNSService, }}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } @@ -345,18 +345,18 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeAggregate, - ServiceName: aggregateClusterService, + ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) fakeClient.WaitForWatchCluster(ctx) fakeClient.WaitForWatchCluster(ctx) fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService, + ClusterName: edsService, }, nil) fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeLogicalDNS, - ServiceName: logicalDNSService, + ClusterName: logicalDNSService, }, nil) select { @@ -367,7 +367,7 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeAggregate, - ServiceName: aggregateClusterService, + ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService2}, }, nil) @@ -407,7 +407,7 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { // tree with successful updates. fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeLogicalDNS, - ServiceName: logicalDNSService2, + ClusterName: logicalDNSService2, }, nil) // Behavior: This update make every node in the tree of cluster have @@ -420,10 +420,10 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { case chu := <-ch.updateChannel: if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService, + ClusterName: edsService, }, { ClusterType: xdsclient.ClusterTypeLogicalDNS, - ServiceName: logicalDNSService2, + ClusterName: logicalDNSService2, }}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } @@ -452,18 +452,18 @@ func (s) TestUpdateRootClusterAggregateThenChangeRootToEDS(t *testing.T) { fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeAggregate, - ServiceName: aggregateClusterService, + ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) fakeClient.WaitForWatchCluster(ctx) fakeClient.WaitForWatchCluster(ctx) fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService, + ClusterName: edsService, }, nil) fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeLogicalDNS, - ServiceName: logicalDNSService, + ClusterName: logicalDNSService, }, nil) select { @@ -556,7 +556,7 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { } fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService2, + ClusterName: edsService2, }, nil) select { case <-ch.updateChannel: @@ -567,7 +567,7 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { // child watches to be created. fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeAggregate, - ServiceName: edsService2, + ClusterName: edsService2, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) @@ -625,7 +625,7 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { // children to be deleted. fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService2, + ClusterName: edsService2, }, nil) // Should delete the two children (no guarantee of ordering deleted, which @@ -666,7 +666,7 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { case chu := <-ch.updateChannel: if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{{ ClusterType: xdsclient.ClusterTypeEDS, - ServiceName: edsService2, + ClusterName: edsService2, }}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } diff --git a/xds/internal/balancer/edsbalancer/config.go b/xds/internal/balancer/edsbalancer/config.go index 11c1338c81f7..d1583e2bf276 100644 --- a/xds/internal/balancer/edsbalancer/config.go +++ b/xds/internal/balancer/edsbalancer/config.go @@ -35,8 +35,10 @@ type EDSConfig struct { // FallBackPolicy represents the load balancing config for the // fallback. FallBackPolicy *loadBalancingConfig - // Name to use in EDS query. If not present, defaults to the server - // name from the target URI. + // ClusterName is the cluster name. + ClusterName string + // EDSServiceName is the name to use in EDS query. If not set, use + // ClusterName. EDSServiceName string // MaxConcurrentRequests is the max number of concurrent request allowed for // this service. If unset, default value 1024 is used. @@ -59,6 +61,7 @@ type EDSConfig struct { type edsConfigJSON struct { ChildPolicy []*loadBalancingConfig FallbackPolicy []*loadBalancingConfig + ClusterName string EDSServiceName string MaxConcurrentRequests *uint32 LRSLoadReportingServerName *string @@ -73,6 +76,7 @@ func (l *EDSConfig) UnmarshalJSON(data []byte) error { return err } + l.ClusterName = configJSON.ClusterName l.EDSServiceName = configJSON.EDSServiceName l.MaxConcurrentRequests = configJSON.MaxConcurrentRequests l.LrsLoadReportingServerName = configJSON.LRSLoadReportingServerName diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index 5191dbd30db8..98b1dbaedd46 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -26,6 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/balancer/loadstore" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" @@ -70,7 +71,7 @@ func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOp grpcUpdate: make(chan interface{}), xdsClientUpdate: make(chan *edsUpdate), childPolicyUpdate: buffer.NewUnbounded(), - lsw: &loadStoreWrapper{}, + loadWrapper: loadstore.NewWrapper(), config: &EDSConfig{}, } x.logger = prefixLogger(x) @@ -82,7 +83,7 @@ func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOp } x.xdsClient = client - x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.lsw, x.logger) + x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.loadWrapper, x.logger) x.logger.Infof("Created") go x.run() return x @@ -141,14 +142,14 @@ type edsBalancer struct { xdsClientUpdate chan *edsUpdate childPolicyUpdate *buffer.Unbounded - xdsClient xdsClientInterface - lsw *loadStoreWrapper - config *EDSConfig // may change when passed a different service config - edsImpl edsBalancerImplInterface + xdsClient xdsClientInterface + loadWrapper *loadstore.Wrapper + config *EDSConfig // may change when passed a different service config + edsImpl edsBalancerImplInterface - // edsServiceName is the edsServiceName currently being watched, not - // necessary the edsServiceName from service config. + clusterName string edsServiceName string + edsToWatch string // this is edsServiceName if it's set, otherwise, it's clusterName. cancelEndpointsWatch func() loadReportServer *string // LRS is disabled if loadReporterServer is nil. cancelLoadReport func() @@ -223,7 +224,7 @@ func (x *edsBalancer) handleGRPCUpdate(update interface{}) { x.logger.Warningf("failed to update xDS client: %v", err) } - x.edsImpl.updateServiceRequestsConfig(cfg.EDSServiceName, cfg.MaxConcurrentRequests) + x.edsImpl.updateServiceRequestsConfig(cfg.ClusterName, cfg.MaxConcurrentRequests) // We will update the edsImpl with the new child policy, if we got a // different one. @@ -246,10 +247,35 @@ func (x *edsBalancer) handleGRPCUpdate(update interface{}) { // handleServiceConfigUpdate applies the service config update, watching a new // EDS service name and restarting LRS stream, as required. func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { - // Restart EDS watch when the edsServiceName has changed. + var updateLoadClusterAndService bool + if x.clusterName != config.ClusterName { + updateLoadClusterAndService = true + x.clusterName = config.ClusterName + x.edsImpl.updateClusterName(x.clusterName) + } if x.edsServiceName != config.EDSServiceName { + updateLoadClusterAndService = true x.edsServiceName = config.EDSServiceName + } + + // If EDSServiceName is set, use it to watch EDS. Otherwise, use the cluster + // name. + newEDSToWatch := config.EDSServiceName + if newEDSToWatch == "" { + newEDSToWatch = config.ClusterName + } + var restartEDSWatch bool + if x.edsToWatch != newEDSToWatch { + restartEDSWatch = true + x.edsToWatch = newEDSToWatch + } + + // Restart EDS watch when the eds name has changed. + if restartEDSWatch { x.startEndpointsWatch() + } + + if updateLoadClusterAndService { // TODO: this update for the LRS service name is too early. It should // only apply to the new EDS response. But this is applied to the RPCs // before the new EDS response. To fully fix this, the EDS balancer @@ -257,14 +283,13 @@ func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { // // This is OK for now, because we don't actually expect edsServiceName // to change. Fix this (a bigger change) will happen later. - x.lsw.updateServiceName(x.edsServiceName) - x.edsImpl.updateClusterName(x.edsServiceName) + x.loadWrapper.UpdateClusterAndService(x.clusterName, x.edsServiceName) } // Restart load reporting when the loadReportServer name has changed. if !equalStringPointers(x.loadReportServer, config.LrsLoadReportingServerName) { loadStore := x.startLoadReport(config.LrsLoadReportingServerName) - x.lsw.updateLoadStore(loadStore) + x.loadWrapper.UpdateLoadStore(loadStore) } return nil @@ -278,14 +303,15 @@ func (x *edsBalancer) startEndpointsWatch() { if x.cancelEndpointsWatch != nil { x.cancelEndpointsWatch() } - cancelEDSWatch := x.xdsClient.WatchEndpoints(x.edsServiceName, func(update xdsclient.EndpointsUpdate, err error) { + edsToWatch := x.edsToWatch + cancelEDSWatch := x.xdsClient.WatchEndpoints(edsToWatch, func(update xdsclient.EndpointsUpdate, err error) { x.logger.Infof("Watch update from xds-client %p, content: %+v", x.xdsClient, pretty.ToJSON(update)) x.handleEDSUpdate(update, err) }) - x.logger.Infof("Watch started on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) + x.logger.Infof("Watch started on resource name %v with xds-client %p", edsToWatch, x.xdsClient) x.cancelEndpointsWatch = func() { cancelEDSWatch() - x.logger.Infof("Watch cancelled on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) + x.logger.Infof("Watch cancelled on resource name %v with xds-client %p", edsToWatch, x.xdsClient) } } @@ -293,10 +319,12 @@ func (x *edsBalancer) cancelWatch() { x.loadReportServer = nil if x.cancelLoadReport != nil { x.cancelLoadReport() + x.cancelLoadReport = nil } - x.edsServiceName = "" if x.cancelEndpointsWatch != nil { + x.edsToWatch = "" x.cancelEndpointsWatch() + x.cancelEndpointsWatch = nil } } @@ -310,6 +338,7 @@ func (x *edsBalancer) startLoadReport(loadReportServer *string) *load.Store { x.loadReportServer = loadReportServer if x.cancelLoadReport != nil { x.cancelLoadReport() + x.cancelLoadReport = nil } if loadReportServer == nil { return nil diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/edsbalancer/eds_impl_test.go index c5e3071d10d8..3cfc620a2400 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_test.go @@ -29,6 +29,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal" + "google.golang.org/grpc/xds/internal/balancer/loadstore" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" @@ -820,9 +821,9 @@ func (s) TestEDS_LoadReport(t *testing.T) { // implements the LoadStore() method to return the underlying load.Store to // be used. loadStore := load.NewStore() - lsWrapper := &loadStoreWrapper{} - lsWrapper.updateServiceName(testClusterNames[0]) - lsWrapper.updateLoadStore(loadStore) + lsWrapper := loadstore.NewWrapper() + lsWrapper.UpdateClusterAndService(testClusterNames[0], "") + lsWrapper.UpdateLoadStore(loadStore) cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, lsWrapper, nil) @@ -913,8 +914,8 @@ func (s) TestEDS_LoadReport(t *testing.T) { // TestEDS_LoadReportDisabled covers the case that LRS is disabled. It makes // sure the EDS implementation isn't broken (doesn't panic). func (s) TestEDS_LoadReportDisabled(t *testing.T) { - lsWrapper := &loadStoreWrapper{} - lsWrapper.updateServiceName(testClusterNames[0]) + lsWrapper := loadstore.NewWrapper() + lsWrapper.UpdateClusterAndService(testClusterNames[0], "") // Not calling lsWrapper.updateLoadStore(loadStore) because LRS is disabled. cc := testutils.NewTestClientConn(t) diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 5c9e5f0b1d53..3fe66098973c 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -354,6 +354,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ ChildPolicy: lbCfgA, + ClusterName: testEDSClusterName, EDSServiceName: testServiceName, }, }); err != nil { @@ -367,7 +368,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsLB.waitForChildPolicy(ctx, lbCfgA); err != nil { t.Fatal(err) } - if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil { + if err := edsLB.waitForCounterUpdate(ctx, testEDSClusterName); err != nil { t.Fatal(err) } if err := edsLB.waitForCountMaxUpdate(ctx, nil); err != nil { @@ -382,6 +383,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ ChildPolicy: lbCfgB, + ClusterName: testEDSClusterName, EDSServiceName: testServiceName, MaxConcurrentRequests: &testCountMax, }, @@ -391,7 +393,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { if err := edsLB.waitForChildPolicy(ctx, lbCfgB); err != nil { t.Fatal(err) } - if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil { + if err := edsLB.waitForCounterUpdate(ctx, testEDSClusterName); err != nil { // Counter is updated even though the service name didn't change. The // eds_impl will compare the service names, and skip if it didn't change. t.Fatal(err) @@ -510,6 +512,18 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { t.Fatalf("eds impl expecting empty update, got %v", err) } + + // An update with the same service name should not trigger a new watch. + if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, + }); err != nil { + t.Fatal(err) + } + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := xdsC.WaitForWatchEDS(sCtx); err != context.DeadlineExceeded { + t.Fatal("got unexpected new EDS watch") + } } // TestErrorFromResolver verifies that resolver errors are handled correctly. @@ -575,6 +589,17 @@ func (s) TestErrorFromResolver(t *testing.T) { if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { t.Fatalf("EDS impl got unexpected EDS response: %v", err) } + + // An update with the same service name should trigger a new watch, because + // the previous watch was canceled. + if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, + }); err != nil { + t.Fatal(err) + } + if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { + t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) + } } // Given a list of resource names, verifies that EDS requests for the same are @@ -586,7 +611,7 @@ func verifyExpectedRequests(ctx context.Context, fc *fakeclient.Client, resource if err := fc.WaitForCancelEDSWatch(ctx); err != nil { return fmt.Errorf("timed out when expecting resource %q", name) } - return nil + continue } resName, err := fc.WaitForWatchEDS(ctx) @@ -615,6 +640,18 @@ func (s) TestClientWatchEDS(t *testing.T) { } defer edsB.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // If eds service name is not set, should watch for cluster name. + if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ClusterName: "cluster-1"}, + }); err != nil { + t.Fatal(err) + } + if err := verifyExpectedRequests(ctx, xdsC, "cluster-1"); err != nil { + t.Fatal(err) + } + // Update with an non-empty edsServiceName should trigger an EDS watch for // the same. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ @@ -622,9 +659,7 @@ func (s) TestClientWatchEDS(t *testing.T) { }); err != nil { t.Fatal(err) } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := verifyExpectedRequests(ctx, xdsC, "foobar-1"); err != nil { + if err := verifyExpectedRequests(ctx, xdsC, "", "foobar-1"); err != nil { t.Fatal(err) } @@ -660,7 +695,7 @@ func (s) TestCounterUpdate(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ - EDSServiceName: "foobar-1", + ClusterName: "foobar-1", MaxConcurrentRequests: &testCountMax, }, }); err != nil { @@ -694,7 +729,7 @@ func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ - EDSServiceName: "foobar-1", + ClusterName: "foobar-1", }, }); err != nil { t.Fatal(err) @@ -713,7 +748,7 @@ func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ - EDSServiceName: "foobar-2", + ClusterName: "foobar-2", }, }); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/edsbalancer/load_store_wrapper.go b/xds/internal/balancer/edsbalancer/load_store_wrapper.go deleted file mode 100644 index 18904e47a42e..000000000000 --- a/xds/internal/balancer/edsbalancer/load_store_wrapper.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package edsbalancer - -import ( - "sync" - - "google.golang.org/grpc/xds/internal/client/load" -) - -type loadStoreWrapper struct { - mu sync.RWMutex - service string - // Both store and perCluster will be nil if load reporting is disabled (EDS - // response doesn't have LRS server name). Note that methods on Store and - // perCluster all handle nil, so there's no need to check nil before calling - // them. - store *load.Store - perCluster load.PerClusterReporter -} - -func (lsw *loadStoreWrapper) updateServiceName(service string) { - lsw.mu.Lock() - defer lsw.mu.Unlock() - if lsw.service == service { - return - } - lsw.service = service - lsw.perCluster = lsw.store.PerCluster(lsw.service, "") -} - -func (lsw *loadStoreWrapper) updateLoadStore(store *load.Store) { - lsw.mu.Lock() - defer lsw.mu.Unlock() - if store == lsw.store { - return - } - lsw.store = store - lsw.perCluster = lsw.store.PerCluster(lsw.service, "") -} - -func (lsw *loadStoreWrapper) CallStarted(locality string) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallStarted(locality) - } -} - -func (lsw *loadStoreWrapper) CallFinished(locality string, err error) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallFinished(locality, err) - } -} - -func (lsw *loadStoreWrapper) CallServerLoad(locality, name string, val float64) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallServerLoad(locality, name, val) - } -} - -func (lsw *loadStoreWrapper) CallDropped(category string) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallDropped(category) - } -} diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index 627229de7ad0..bb526116bda6 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -28,6 +28,7 @@ import ( v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" anypb "github.com/golang/protobuf/ptypes/any" @@ -45,7 +46,7 @@ const ( serviceName = "service" ) -var emptyUpdate = ClusterUpdate{ServiceName: "", EnableLRS: false} +var emptyUpdate = ClusterUpdate{ClusterName: clusterName, EnableLRS: false} func (s) TestValidateCluster_Failure(t *testing.T) { tests := []struct { @@ -141,24 +142,35 @@ func (s) TestValidateCluster_Success(t *testing.T) { { name: "happy-case-logical-dns", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ServiceName: "", EnableLRS: false, ClusterType: ClusterTypeLogicalDNS}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EnableLRS: false, ClusterType: ClusterTypeLogicalDNS}, }, { name: "happy-case-aggregate-v3", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ - ClusterType: &v3clusterpb.Cluster_CustomClusterType{Name: "envoy.clusters.aggregate"}, + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ + Clusters: []string{"a", "b", "c"}, + }), + }, }, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ServiceName: "", EnableLRS: false, ClusterType: ClusterTypeAggregate}, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, EnableLRS: false, ClusterType: ClusterTypeAggregate, + PrioritizedClusterNames: []string{"a", "b", "c"}, + }, }, { name: "happy-case-no-service-name-no-lrs", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -174,6 +186,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { { name: "happy-case-no-lrs", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -185,7 +198,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: false}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: false}, }, { name: "happiest-case", @@ -207,7 +220,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, }, - wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: true}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true}, }, { name: "happiest-case-with-circuitbreakers", @@ -241,7 +254,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, }, - wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: true, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, }, } @@ -254,8 +267,8 @@ func (s) TestValidateCluster_Success(t *testing.T) { if err != nil { t.Errorf("validateClusterAndConstructClusterUpdate(%+v) failed: %v", test.cluster, err) } - if !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) { - t.Errorf("validateClusterAndConstructClusterUpdate(%+v) = %v, want: %v", test.cluster, update, test.wantUpdate) + if diff := cmp.Diff(update, test.wantUpdate, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) got diff: %v (-got, +want)", test.cluster, diff) } }) } @@ -268,6 +281,7 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { defer func() { env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }() cluster := &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -295,8 +309,9 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { }, } wantUpdate := ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, } gotUpdate, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { @@ -318,6 +333,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { identityCertName = "identityCert" rootPluginInstance = "rootPluginInstance" rootCertName = "rootCert" + clusterName = "cluster" serviceName = "service" sanExact = "san-exact" sanPrefix = "san-prefix" @@ -608,6 +624,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { { name: "happy-case-with-no-identity-certs", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -635,8 +652,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -646,6 +664,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { { name: "happy-case-with-validation-context-provider-instance", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -677,8 +696,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -690,6 +710,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { { name: "happy-case-with-combined-validation-context", cluster: &v3clusterpb.Cluster{ + Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -735,8 +756,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -877,7 +899,8 @@ func (s) TestUnmarshalCluster(t *testing.T) { resources: []*anypb.Any{v2ClusterAny}, wantUpdate: map[string]ClusterUpdate{ v2ClusterName: { - ServiceName: v2Service, EnableLRS: true, + ClusterName: v2ClusterName, + EDSServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, }, }, @@ -891,7 +914,8 @@ func (s) TestUnmarshalCluster(t *testing.T) { resources: []*anypb.Any{v3ClusterAny}, wantUpdate: map[string]ClusterUpdate{ v3ClusterName: { - ServiceName: v3Service, EnableLRS: true, + ClusterName: v3ClusterName, + EDSServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, }, }, @@ -905,11 +929,13 @@ func (s) TestUnmarshalCluster(t *testing.T) { resources: []*anypb.Any{v2ClusterAny, v3ClusterAny}, wantUpdate: map[string]ClusterUpdate{ v2ClusterName: { - ServiceName: v2Service, EnableLRS: true, + ClusterName: v2ClusterName, + EDSServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, }, v3ClusterName: { - ServiceName: v3Service, EnableLRS: true, + ClusterName: v3ClusterName, + EDSServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, }, }, @@ -932,11 +958,13 @@ func (s) TestUnmarshalCluster(t *testing.T) { }, wantUpdate: map[string]ClusterUpdate{ v2ClusterName: { - ServiceName: v2Service, EnableLRS: true, + ClusterName: v2ClusterName, + EDSServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, }, v3ClusterName: { - ServiceName: v3Service, EnableLRS: true, + ClusterName: v3ClusterName, + EDSServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, }, "bad": {}, diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go index 603632801b04..05093653c075 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/client/client.go @@ -378,22 +378,23 @@ const ( // interest to the registered CDS watcher. type ClusterUpdate struct { ClusterType ClusterType - // ServiceName is the service name corresponding to the clusterName which - // is being watched for through CDS. - ServiceName string + // ClusterName is the clusterName being watched for through CDS. + ClusterName string + // EDSServiceName is an optional name for EDS. If it's not set, the balancer + // should watch ClusterName for the EDS resources. + EDSServiceName string // EnableLRS indicates whether or not load should be reported through LRS. EnableLRS bool // SecurityCfg contains security configuration sent by the control plane. SecurityCfg *SecurityConfig // MaxRequests for circuit breaking, if any (otherwise nil). MaxRequests *uint32 - - // Raw is the resource from the xds response. - Raw *anypb.Any - // PrioritizedClusterNames is used only for cluster type aggregate. It represents // a prioritized list of cluster names. PrioritizedClusterNames []string + + // Raw is the resource from the xds response. + Raw *anypb.Any } // OverloadDropConfig contains the config to drop overloads. diff --git a/xds/internal/client/client_test.go b/xds/internal/client/client_test.go index 69930557b26e..d57bc20e2c2c 100644 --- a/xds/internal/client/client_test.go +++ b/xds/internal/client/client_test.go @@ -185,13 +185,13 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { t.Fatal(err) } - wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} + wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2); err != nil { t.Fatal(err) diff --git a/xds/internal/client/v2/cds_test.go b/xds/internal/client/v2/cds_test.go index e627860d2a9f..ba7f627b25ff 100644 --- a/xds/internal/client/v2/cds_test.go +++ b/xds/internal/client/v2/cds_test.go @@ -151,7 +151,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: goodCDSResponse2, wantErr: false, wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName2: {ServiceName: serviceName2, Raw: marshaledCluster2}, + goodClusterName2: {ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, @@ -164,7 +164,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: goodCDSResponse1, wantErr: false, wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName1: {ServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}, + goodClusterName1: {ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, diff --git a/xds/internal/client/watchers_cluster_test.go b/xds/internal/client/watchers_cluster_test.go index 2d10c7f43b5f..c9837cd51978 100644 --- a/xds/internal/client/watchers_cluster_test.go +++ b/xds/internal/client/watchers_cluster_test.go @@ -64,7 +64,7 @@ func (s) TestClusterWatch(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { t.Fatal(err) @@ -128,7 +128,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate); err != nil { @@ -200,8 +200,8 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} + wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} + wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{ testCDSName + "1": wantUpdate1, testCDSName + "2": wantUpdate2, @@ -245,7 +245,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{ testCDSName: wantUpdate, }, UpdateMetadata{}) @@ -345,7 +345,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ServiceName: testEDSName} + wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{ testCDSName: wantUpdate, }, UpdateMetadata{}) @@ -402,8 +402,8 @@ func (s) TestClusterResourceRemoved(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} + wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} + wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{ testCDSName + "1": wantUpdate1, testCDSName + "2": wantUpdate2, diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index cf705587e24a..781eeb47a062 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -583,42 +583,9 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin return cluster.GetName(), cu, nil } -func clusterTypeFromCluster(cluster *v3clusterpb.Cluster) (ClusterType, string, []string, error) { - if cluster.GetType() == v3clusterpb.Cluster_EDS { - if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { - return 0, "", nil, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) - } - // If the Cluster message in the CDS response did not contain a - // serviceName, we will just use the clusterName for EDS. - if cluster.GetEdsClusterConfig().GetServiceName() == "" { - return ClusterTypeEDS, cluster.GetName(), nil, nil - } - return ClusterTypeEDS, cluster.GetEdsClusterConfig().GetServiceName(), nil, nil - } - - if cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS { - return ClusterTypeLogicalDNS, cluster.GetName(), nil, nil - } - - if cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate" { - // Loop through ClusterConfig here to get cluster names. - clusters := &v3aggregateclusterpb.ClusterConfig{} - if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { - return 0, "", nil, fmt.Errorf("failed to unmarshal resource: %v", err) - } - return ClusterTypeAggregate, cluster.GetName(), clusters.Clusters, nil - } - return 0, "", nil, fmt.Errorf("unexpected cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) -} - func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { - emptyUpdate := ClusterUpdate{ServiceName: "", EnableLRS: false} if cluster.GetLbPolicy() != v3clusterpb.Cluster_ROUND_ROBIN { - return emptyUpdate, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) - } - clusterType, serviceName, prioritizedClusters, err := clusterTypeFromCluster(cluster) - if err != nil { - return emptyUpdate, err + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } // Process security configuration received from the control plane iff the @@ -627,18 +594,40 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu if env.ClientSideSecuritySupport { var err error if sc, err = securityConfigFromCluster(cluster); err != nil { - return emptyUpdate, err + return ClusterUpdate{}, err } } - return ClusterUpdate{ - ClusterType: clusterType, - ServiceName: serviceName, - EnableLRS: cluster.GetLrsServer().GetSelf() != nil, - SecurityCfg: sc, - MaxRequests: circuitBreakersFromCluster(cluster), - PrioritizedClusterNames: prioritizedClusters, - }, nil + ret := ClusterUpdate{ + ClusterName: cluster.GetName(), + EnableLRS: cluster.GetLrsServer().GetSelf() != nil, + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + } + + // Validate and set cluster type from the response. + switch { + case cluster.GetType() == v3clusterpb.Cluster_EDS: + if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { + return ClusterUpdate{}, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) + } + ret.ClusterType = ClusterTypeEDS + ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + return ret, nil + case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: + ret.ClusterType = ClusterTypeLogicalDNS + return ret, nil + case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": + clusters := &v3aggregateclusterpb.ClusterConfig{} + if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { + return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + ret.ClusterType = ClusterTypeAggregate + ret.PrioritizedClusterNames = clusters.Clusters + return ret, nil + default: + return ClusterUpdate{}, fmt.Errorf("unexpected cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } } // securityConfigFromCluster extracts the relevant security configuration from diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index eb4c659e505a..f85f0ecbf3f3 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -156,7 +156,7 @@ func (xdsC *Client) InvokeWatchClusterCallback(update xdsclient.ClusterUpdate, e } else { // Have what callback you call with the update determined by the service name in the ClusterUpdate. Left up to the // caller to make sure the cluster update matches with a persisted callback. - xdsC.cdsCbs[update.ServiceName](update, err) + xdsC.cdsCbs[update.ClusterName](update, err) } } From 6fea90d7a884ad070a4f04863521eaf43e6c5d11 Mon Sep 17 00:00:00 2001 From: Mayank Singhal Date: Thu, 13 May 2021 05:45:47 +0530 Subject: [PATCH 075/998] benchmark: do not allow addition of values lower than the minimum allowed in histogram stats --- benchmark/stats/histogram.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/benchmark/stats/histogram.go b/benchmark/stats/histogram.go index f038d26ed0aa..461135f0125c 100644 --- a/benchmark/stats/histogram.go +++ b/benchmark/stats/histogram.go @@ -118,10 +118,6 @@ func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) { } maxBucketDigitLen := len(strconv.FormatFloat(h.Buckets[len(h.Buckets)-1].LowBound, 'f', 6, 64)) - if maxBucketDigitLen < 3 { - // For "inf". - maxBucketDigitLen = 3 - } maxCountDigitLen := len(strconv.FormatInt(h.Count, 10)) percentMulti := 100 / float64(h.Count) @@ -131,9 +127,9 @@ func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) { if i+1 < len(h.Buckets) { fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound/unit) } else { - fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") + upperBound := float64(h.opts.MinValue) + (b.LowBound-float64(h.opts.MinValue))*(1.0+h.opts.GrowthFactor) + fmt.Fprintf(w, "%*f)", maxBucketDigitLen, upperBound/unit) } - accCount += b.Count fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti) @@ -188,6 +184,9 @@ func (h *Histogram) Add(value int64) error { func (h *Histogram) findBucket(value int64) (int, error) { delta := float64(value - h.opts.MinValue) + if delta < 0 { + return 0, fmt.Errorf("no bucket for value: %d", value) + } var b int if delta >= h.opts.BaseBucketSize { // b = log_{1+growthFactor} (delta / baseBucketSize) + 1 From a16b156e990b0fb4100a4694e1c6dda779b08f77 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 12 May 2021 17:43:29 -0700 Subject: [PATCH 076/998] internal: fix flaky test KeepaliveClientStaysHealthyWithResponsiveServer (#4427) Server should allow `NoStream`, otherwise there's a small chance (5/1000) the connection will be closed due to `too many pings`. --- internal/transport/keepalive_test.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index 571cacca7e91..c4021925f325 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -346,12 +346,19 @@ func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { // responds to keepalive pings, and makes sure than a client transport stays // healthy without any active streams. func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { - server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, normal, ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - PermitWithoutStream: true, - }}) + server, client, cancel := setUpWithOptions(t, 0, + &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + PermitWithoutStream: true, + }, + }, + normal, + ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + PermitWithoutStream: true, + }}) defer func() { client.Close(fmt.Errorf("closed manually by test")) server.stop() From dc77d7ffe311f78f2e577572d984af3c0a8df82b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 12 May 2021 18:03:52 -0700 Subject: [PATCH 077/998] xds: revert a workaround made in #4413 (#4428) --- xds/internal/test/xds_integration_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index c2bb6bc18f67..a41fec929762 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -83,11 +83,6 @@ func TestMain(m *testing.M) { // spawns the management server and is blocked on the call to `Serve()`. leakcheck.RegisterIgnoreGoroutine("e2e.StartManagementServer") - // Remove this once https://github.com/envoyproxy/go-control-plane/pull/430 - // is merged. For more information about this goroutine leak, see: - // https://github.com/envoyproxy/go-control-plane/issues/429. - leakcheck.RegisterIgnoreGoroutine("(*server).StreamHandler") - cancel, err := setupManagementServer() if err != nil { log.Printf("setupManagementServer() failed: %v", err) From 71a1ca6c7f859658e44f0073fb754c4698216202 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 14 May 2021 11:13:26 -0700 Subject: [PATCH 078/998] interop/xds: support xds credentials in interop client (#4436) --- interop/xds/client/client.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/interop/xds/client/client.go b/interop/xds/client/client.go index b028ec79228e..92999b2193ed 100644 --- a/interop/xds/client/client.go +++ b/interop/xds/client/client.go @@ -32,6 +32,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/admin" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -175,6 +177,7 @@ var ( rpcTimeout = flag.Duration("rpc_timeout", 20*time.Second, "Per RPC timeout") server = flag.String("server", "localhost:8080", "Address of server to connect to") statsPort = flag.Int("stats_port", 8081, "Port to expose peer distribution stats service") + secureMode = flag.Bool("secure_mode", false, "If true, retrieve security configuration from the management server. Else, use insecure credentials.") rpcCfgs atomic.Value @@ -375,14 +378,23 @@ func main() { reflection.Register(s) cleanup, err := admin.Register(s) if err != nil { - logger.Fatalf("failed to register admin: %v", err) + logger.Fatalf("Failed to register admin: %v", err) } defer cleanup() go s.Serve(lis) + creds := insecure.NewCredentials() + if *secureMode { + var err error + creds, err = xds.NewClientCredentials(xds.ClientOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + logger.Fatalf("Failed to create xDS credentials: %v", err) + } + } + clients := make([]testgrpc.TestServiceClient, *numChannels) for i := 0; i < *numChannels; i++ { - conn, err := grpc.Dial(*server, grpc.WithInsecure()) + conn, err := grpc.Dial(*server, grpc.WithTransportCredentials(creds)) if err != nil { logger.Fatalf("Fail to dial: %v", err) } From b759b408e84fd5e990073fdaa28cd24d8eb2adad Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 14 May 2021 17:02:10 -0400 Subject: [PATCH 079/998] xds: moved shared matchers to internal/xds (#4441) * Moved shared matchers to internal/xds --- credentials/xds/xds_client_test.go | 6 +- internal/credentials/xds/handshake_info.go | 12 +- .../credentials/xds/handshake_info_test.go | 100 ++++---- internal/xds/bootstrap.go | 2 + internal/xds/matcher/matcher_header.go | 224 ++++++++++++++++++ .../xds/matcher}/matcher_header_test.go | 36 +-- internal/xds/{ => matcher}/string_matcher.go | 4 +- .../xds/{ => matcher}/string_matcher_test.go | 2 +- .../cdsbalancer/cdsbalancer_security_test.go | 14 +- xds/internal/client/cds_test.go | 14 +- xds/internal/client/client.go | 4 +- xds/internal/client/xds.go | 6 +- xds/internal/resolver/matcher.go | 25 +- xds/internal/resolver/matcher_header.go | 188 --------------- xds/internal/resolver/matcher_test.go | 17 +- 15 files changed, 347 insertions(+), 307 deletions(-) create mode 100644 internal/xds/matcher/matcher_header.go rename {xds/internal/resolver => internal/xds/matcher}/matcher_header_test.go (88%) rename internal/xds/{ => matcher}/string_matcher.go (98%) rename internal/xds/{ => matcher}/string_matcher_test.go (99%) delete mode 100644 xds/internal/resolver/matcher_header.go diff --git a/credentials/xds/xds_client_test.go b/credentials/xds/xds_client_test.go index 2c882be8a549..07a65b6e3cf2 100644 --- a/credentials/xds/xds_client_test.go +++ b/credentials/xds/xds_client_test.go @@ -38,7 +38,7 @@ import ( xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" "google.golang.org/grpc/testdata" ) @@ -223,7 +223,7 @@ func newTestContextWithHandshakeInfo(parent context.Context, root, identity cert // NewSubConn(). info := xdsinternal.NewHandshakeInfo(root, identity) if sanExactMatch != "" { - info.SetSANMatchers([]xds.StringMatcher{xds.StringMatcherForTesting(newStringP(sanExactMatch), nil, nil, nil, nil, false)}) + info.SetSANMatchers([]matcher.StringMatcher{matcher.StringMatcherForTesting(newStringP(sanExactMatch), nil, nil, nil, nil, false)}) } addr := xdsinternal.SetHandshakeInfo(resolver.Address{}, info) @@ -536,7 +536,7 @@ func (s) TestClientCredsProviderSwitch(t *testing.T) { // use the correct trust roots. root1 := makeRootProvider(t, "x509/client_ca_cert.pem") handshakeInfo := xdsinternal.NewHandshakeInfo(root1, nil) - handshakeInfo.SetSANMatchers([]xds.StringMatcher{xds.StringMatcherForTesting(newStringP(defaultTestCertSAN), nil, nil, nil, nil, false)}) + handshakeInfo.SetSANMatchers([]matcher.StringMatcher{matcher.StringMatcherForTesting(newStringP(defaultTestCertSAN), nil, nil, nil, nil, false)}) // We need to repeat most of what newTestContextWithHandshakeInfo() does // here because we need access to the underlying HandshakeInfo so that we diff --git a/internal/credentials/xds/handshake_info.go b/internal/credentials/xds/handshake_info.go index 6789a4cf2e51..6ef43cc89fa3 100644 --- a/internal/credentials/xds/handshake_info.go +++ b/internal/credentials/xds/handshake_info.go @@ -31,7 +31,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" - xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" ) @@ -66,8 +66,8 @@ type HandshakeInfo struct { mu sync.Mutex rootProvider certprovider.Provider identityProvider certprovider.Provider - sanMatchers []xdsinternal.StringMatcher // Only on the client side. - requireClientCert bool // Only on server side. + sanMatchers []matcher.StringMatcher // Only on the client side. + requireClientCert bool // Only on server side. } // SetRootCertProvider updates the root certificate provider. @@ -85,7 +85,7 @@ func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) } // SetSANMatchers updates the list of SAN matchers. -func (hi *HandshakeInfo) SetSANMatchers(sanMatchers []xdsinternal.StringMatcher) { +func (hi *HandshakeInfo) SetSANMatchers(sanMatchers []matcher.StringMatcher) { hi.mu.Lock() hi.sanMatchers = sanMatchers hi.mu.Unlock() @@ -113,10 +113,10 @@ func (hi *HandshakeInfo) UseFallbackCreds() bool { // GetSANMatchersForTesting returns the SAN matchers stored in HandshakeInfo. // To be used only for testing purposes. -func (hi *HandshakeInfo) GetSANMatchersForTesting() []xdsinternal.StringMatcher { +func (hi *HandshakeInfo) GetSANMatchersForTesting() []matcher.StringMatcher { hi.mu.Lock() defer hi.mu.Unlock() - return append([]xdsinternal.StringMatcher{}, hi.sanMatchers...) + return append([]matcher.StringMatcher{}, hi.sanMatchers...) } // ClientSideTLSConfig constructs a tls.Config to be used in a client-side diff --git a/internal/credentials/xds/handshake_info_test.go b/internal/credentials/xds/handshake_info_test.go index 81906fa758a1..91257a1925da 100644 --- a/internal/credentials/xds/handshake_info_test.go +++ b/internal/credentials/xds/handshake_info_test.go @@ -25,7 +25,7 @@ import ( "regexp" "testing" - xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" ) func TestDNSMatch(t *testing.T) { @@ -143,45 +143,45 @@ func TestMatchingSANExists_FailureCases(t *testing.T) { tests := []struct { desc string - sanMatchers []xdsinternal.StringMatcher + sanMatchers []matcher.StringMatcher }{ { desc: "exact match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP("abcd.test.com"), nil, nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(newStringP("http://golang"), nil, nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(newStringP("HTTP://GOLANG.ORG"), nil, nil, nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP("abcd.test.com"), nil, nil, nil, nil, false), + matcher.StringMatcherForTesting(newStringP("http://golang"), nil, nil, nil, nil, false), + matcher.StringMatcherForTesting(newStringP("HTTP://GOLANG.ORG"), nil, nil, nil, nil, false), }, }, { desc: "prefix match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, newStringP("i-aint-the-one"), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, newStringP("FOO.BAR"), nil, nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, newStringP("i-aint-the-one"), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, newStringP("FOO.BAR"), nil, nil, nil, false), }, }, { desc: "suffix match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("i-aint-the-one"), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("1::68"), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP(".COM"), nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, newStringP("i-aint-the-one"), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP("1::68"), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP(".COM"), nil, nil, false), }, }, { desc: "regex match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`.*\.examples\.com`), false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`.*\.examples\.com`), false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), }, }, { desc: "contains match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("i-aint-the-one"), nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("2001:db8:1:1::68"), nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("GRPC"), nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("i-aint-the-one"), nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("2001:db8:1:1::68"), nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("GRPC"), nil, false), }, }, } @@ -216,73 +216,73 @@ func TestMatchingSANExists_Success(t *testing.T) { tests := []struct { desc string - sanMatchers []xdsinternal.StringMatcher + sanMatchers []matcher.StringMatcher }{ { desc: "no san matchers", }, { desc: "exact match dns wildcard", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(newStringP("https://github.com/grpc/grpc-java"), nil, nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(newStringP("abc.example.com"), nil, nil, nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), + matcher.StringMatcherForTesting(newStringP("https://github.com/grpc/grpc-java"), nil, nil, nil, nil, false), + matcher.StringMatcherForTesting(newStringP("abc.example.com"), nil, nil, nil, nil, false), }, }, { desc: "exact match ignore case", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP("FOOBAR@EXAMPLE.COM"), nil, nil, nil, nil, true), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP("FOOBAR@EXAMPLE.COM"), nil, nil, nil, nil, true), }, }, { desc: "prefix match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, newStringP(".co.in"), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, newStringP("baz.test"), nil, nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, newStringP(".co.in"), nil, nil, false), + matcher.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, newStringP("baz.test"), nil, nil, nil, false), }, }, { desc: "prefix match ignore case", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, newStringP("BAZ.test"), nil, nil, nil, true), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, newStringP("BAZ.test"), nil, nil, nil, true), }, }, { desc: "suffix match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("192.168.1.1"), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("@test.com"), nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), + matcher.StringMatcherForTesting(nil, nil, newStringP("192.168.1.1"), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP("@test.com"), nil, nil, false), }, }, { desc: "suffix match ignore case", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("@test.COM"), nil, nil, true), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, newStringP("@test.COM"), nil, nil, true), }, }, { desc: "regex match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("https://github.com/grpc/grpc-java"), nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`.*\.test\.com`), false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("https://github.com/grpc/grpc-java"), nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`.*\.test\.com`), false), }, }, { desc: "contains match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP("https://github.com/grpc/grpc-java"), nil, nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("2001:68::db8"), nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("192.0.0"), nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP("https://github.com/grpc/grpc-java"), nil, nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("2001:68::db8"), nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("192.0.0"), nil, false), }, }, { desc: "contains match ignore case", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("GRPC"), nil, true), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("GRPC"), nil, true), }, }, } diff --git a/internal/xds/bootstrap.go b/internal/xds/bootstrap.go index 97ec8e17208e..a3f80d8f2496 100644 --- a/internal/xds/bootstrap.go +++ b/internal/xds/bootstrap.go @@ -16,6 +16,8 @@ * */ +// Package xds contains types that need to be shared between code under +// google.golang.org/grpc/xds/... and the rest of gRPC. package xds import ( diff --git a/internal/xds/matcher/matcher_header.go b/internal/xds/matcher/matcher_header.go new file mode 100644 index 000000000000..f9c0322179e8 --- /dev/null +++ b/internal/xds/matcher/matcher_header.go @@ -0,0 +1,224 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package matcher + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/metadata" +) + +// HeaderMatcherInterface is an interface for header matchers. These are +// documented in (EnvoyProxy link here?). These matchers will match on different +// aspects of HTTP header name/value pairs. +type HeaderMatcherInterface interface { + Match(metadata.MD) bool + String() string +} + +// mdValuesFromOutgoingCtx retrieves metadata from context. If there are +// multiple values, the values are concatenated with "," (comma and no space). +// +// All header matchers only match against the comma-concatenated string. +func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { + vs, ok := md[key] + if !ok { + return "", false + } + return strings.Join(vs, ","), true +} + +// HeaderExactMatcher matches on an exact match of the value of the header. +type HeaderExactMatcher struct { + key string + exact string +} + +// NewHeaderExactMatcher returns a new HeaderExactMatcher. +func NewHeaderExactMatcher(key, exact string) *HeaderExactMatcher { + return &HeaderExactMatcher{key: key, exact: exact} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderExactMatcher. +func (hem *HeaderExactMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hem.key) + if !ok { + return false + } + return v == hem.exact +} + +func (hem *HeaderExactMatcher) String() string { + return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact) +} + +// HeaderRegexMatcher matches on whether the entire request header value matches +// the regex. +type HeaderRegexMatcher struct { + key string + re *regexp.Regexp +} + +// NewHeaderRegexMatcher returns a new HeaderRegexMatcher. +func NewHeaderRegexMatcher(key string, re *regexp.Regexp) *HeaderRegexMatcher { + return &HeaderRegexMatcher{key: key, re: re} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRegexMatcher. +func (hrm *HeaderRegexMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + return hrm.re.MatchString(v) +} + +func (hrm *HeaderRegexMatcher) String() string { + return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String()) +} + +// HeaderRangeMatcher matches on whether the request header value is within the +// range. The header value must be an integer in base 10 notation. +type HeaderRangeMatcher struct { + key string + start, end int64 // represents [start, end). +} + +// NewHeaderRangeMatcher returns a new HeaderRangeMatcher. +func NewHeaderRangeMatcher(key string, start, end int64) *HeaderRangeMatcher { + return &HeaderRangeMatcher{key: key, start: start, end: end} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRangeMatcher. +func (hrm *HeaderRangeMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + if i, err := strconv.ParseInt(v, 10, 64); err == nil && i >= hrm.start && i < hrm.end { + return true + } + return false +} + +func (hrm *HeaderRangeMatcher) String() string { + return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end) +} + +// HeaderPresentMatcher will match based on whether the header is present in the +// whole request. +type HeaderPresentMatcher struct { + key string + present bool +} + +// NewHeaderPresentMatcher returns a new HeaderPresentMatcher. +func NewHeaderPresentMatcher(key string, present bool) *HeaderPresentMatcher { + return &HeaderPresentMatcher{key: key, present: present} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPresentMatcher. +func (hpm *HeaderPresentMatcher) Match(md metadata.MD) bool { + vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) + present := ok && len(vs) > 0 + return present == hpm.present +} + +func (hpm *HeaderPresentMatcher) String() string { + return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present) +} + +// HeaderPrefixMatcher matches on whether the prefix of the header value matches +// the prefix passed into this struct. +type HeaderPrefixMatcher struct { + key string + prefix string +} + +// NewHeaderPrefixMatcher returns a new HeaderPrefixMatcher. +func NewHeaderPrefixMatcher(key string, prefix string) *HeaderPrefixMatcher { + return &HeaderPrefixMatcher{key: key, prefix: prefix} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPrefixMatcher. +func (hpm *HeaderPrefixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hpm.key) + if !ok { + return false + } + return strings.HasPrefix(v, hpm.prefix) +} + +func (hpm *HeaderPrefixMatcher) String() string { + return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix) +} + +// HeaderSuffixMatcher matches on whether the suffix of the header value matches +// the suffix passed into this struct. +type HeaderSuffixMatcher struct { + key string + suffix string +} + +// NewHeaderSuffixMatcher returns a new HeaderSuffixMatcher. +func NewHeaderSuffixMatcher(key string, suffix string) *HeaderSuffixMatcher { + return &HeaderSuffixMatcher{key: key, suffix: suffix} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderSuffixMatcher. +func (hsm *HeaderSuffixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return strings.HasSuffix(v, hsm.suffix) +} + +func (hsm *HeaderSuffixMatcher) String() string { + return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix) +} + +// InvertMatcher inverts the match result of the underlying header matcher. +type InvertMatcher struct { + m HeaderMatcherInterface +} + +// NewInvertMatcher returns a new InvertMatcher. +func NewInvertMatcher(m HeaderMatcherInterface) *InvertMatcher { + return &InvertMatcher{m: m} +} + +// Match returns whether the passed in HTTP Headers match according to the +// InvertMatcher. +func (i *InvertMatcher) Match(md metadata.MD) bool { + return !i.m.Match(md) +} + +func (i *InvertMatcher) String() string { + return fmt.Sprintf("invert{%s}", i.m) +} diff --git a/xds/internal/resolver/matcher_header_test.go b/internal/xds/matcher/matcher_header_test.go similarity index 88% rename from xds/internal/resolver/matcher_header_test.go rename to internal/xds/matcher/matcher_header_test.go index c83c3ec3923c..911e7bcfaca1 100644 --- a/xds/internal/resolver/matcher_header_test.go +++ b/internal/xds/matcher/matcher_header_test.go @@ -18,7 +18,7 @@ * */ -package resolver +package matcher import ( "regexp" @@ -66,8 +66,8 @@ func TestHeaderExactMatcherMatch(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hem := newHeaderExactMatcher(tt.key, tt.exact) - if got := hem.match(tt.md); got != tt.want { + hem := NewHeaderExactMatcher(tt.key, tt.exact) + if got := hem.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -112,8 +112,8 @@ func TestHeaderRegexMatcherMatch(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hrm := newHeaderRegexMatcher(tt.key, regexp.MustCompile(tt.regexStr)) - if got := hrm.match(tt.md); got != tt.want { + hrm := NewHeaderRegexMatcher(tt.key, regexp.MustCompile(tt.regexStr)) + if got := hrm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -159,8 +159,8 @@ func TestHeaderRangeMatcherMatch(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hrm := newHeaderRangeMatcher(tt.key, tt.start, tt.end) - if got := hrm.match(tt.md); got != tt.want { + hrm := NewHeaderRangeMatcher(tt.key, tt.start, tt.end) + if got := hrm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -206,8 +206,8 @@ func TestHeaderPresentMatcherMatch(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hpm := newHeaderPresentMatcher(tt.key, tt.present) - if got := hpm.match(tt.md); got != tt.want { + hpm := NewHeaderPresentMatcher(tt.key, tt.present) + if got := hpm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -252,8 +252,8 @@ func TestHeaderPrefixMatcherMatch(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hpm := newHeaderPrefixMatcher(tt.key, tt.prefix) - if got := hpm.match(tt.md); got != tt.want { + hpm := NewHeaderPrefixMatcher(tt.key, tt.prefix) + if got := hpm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -298,8 +298,8 @@ func TestHeaderSuffixMatcherMatch(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hsm := newHeaderSuffixMatcher(tt.key, tt.suffix) - if got := hsm.match(tt.md); got != tt.want { + hsm := NewHeaderSuffixMatcher(tt.key, tt.suffix) + if got := hsm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -309,24 +309,24 @@ func TestHeaderSuffixMatcherMatch(t *testing.T) { func TestInvertMatcherMatch(t *testing.T) { tests := []struct { name string - m headerMatcherInterface + m HeaderMatcherInterface md metadata.MD }{ { name: "true->false", - m: newHeaderExactMatcher("th", "tv"), + m: NewHeaderExactMatcher("th", "tv"), md: metadata.Pairs("th", "tv"), }, { name: "false->true", - m: newHeaderExactMatcher("th", "abc"), + m: NewHeaderExactMatcher("th", "abc"), md: metadata.Pairs("th", "tv"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := newInvertMatcher(tt.m).match(tt.md) - want := !tt.m.match(tt.md) + got := NewInvertMatcher(tt.m).Match(tt.md) + want := !tt.m.Match(tt.md) if got != want { t.Errorf("match() = %v, want %v", got, want) } diff --git a/internal/xds/string_matcher.go b/internal/xds/matcher/string_matcher.go similarity index 98% rename from internal/xds/string_matcher.go rename to internal/xds/matcher/string_matcher.go index 21f15aad1b88..d7df6a1e2b40 100644 --- a/internal/xds/string_matcher.go +++ b/internal/xds/matcher/string_matcher.go @@ -16,9 +16,9 @@ * */ -// Package xds contains types that need to be shared between code under +// Package matcher contains types that need to be shared between code under // google.golang.org/grpc/xds/... and the rest of gRPC. -package xds +package matcher import ( "errors" diff --git a/internal/xds/string_matcher_test.go b/internal/xds/matcher/string_matcher_test.go similarity index 99% rename from internal/xds/string_matcher_test.go rename to internal/xds/matcher/string_matcher_test.go index 7908ac974b23..b634aa041963 100644 --- a/internal/xds/string_matcher_test.go +++ b/internal/xds/matcher/string_matcher_test.go @@ -16,7 +16,7 @@ * */ -package xds +package matcher import ( "regexp" diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 30c0d9105ed3..52b1a05f1362 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -34,7 +34,7 @@ import ( "google.golang.org/grpc/internal" xdscredsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/testutils" - xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/client/bootstrap" @@ -50,12 +50,12 @@ const ( ) var ( - testSANMatchers = []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP(testSAN), nil, nil, nil, nil, true), - xdsinternal.StringMatcherForTesting(nil, newStringP(testSAN), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP(testSAN), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(testSAN), false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP(testSAN), nil, false), + testSANMatchers = []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP(testSAN), nil, nil, nil, nil, true), + matcher.StringMatcherForTesting(nil, newStringP(testSAN), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP(testSAN), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(testSAN), false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP(testSAN), nil, false), } fpb1, fpb2 *fakeProviderBuilder bootstrapConfig *bootstrap.Config diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index bb526116bda6..7d8cf6a8670b 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -35,8 +35,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" - xdsinternal "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/version" "google.golang.org/protobuf/types/known/wrapperspb" ) @@ -764,12 +764,12 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { RootCertName: rootCertName, IdentityInstanceName: identityPluginInstance, IdentityCertName: identityCertName, - SubjectAltNameMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP(sanExact), nil, nil, nil, nil, true), - xdsinternal.StringMatcherForTesting(nil, newStringP(sanPrefix), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP(sanSuffix), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, sanRE, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP(sanContains), nil, false), + SubjectAltNameMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP(sanExact), nil, nil, nil, nil, true), + matcher.StringMatcherForTesting(nil, newStringP(sanPrefix), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP(sanSuffix), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, sanRE, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP(sanContains), nil, false), }, }, }, diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go index 05093653c075..8888e6214297 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/client/client.go @@ -33,7 +33,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/client/load" "google.golang.org/grpc/xds/internal/httpfilter" @@ -351,7 +351,7 @@ type SecurityConfig struct { // - If the peer certificate contains a wildcard DNS SAN, and an `exact` // matcher is configured, a wildcard DNS match is performed instead of a // regular string comparison. - SubjectAltNameMatchers []xds.StringMatcher + SubjectAltNameMatchers []matcher.StringMatcher // RequireClientCert indicates if the server handshake process expects the // client to present a certificate. Set to true when performing mTLS. Used // only on the server-side. diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index 781eeb47a062..e9fe205a7ce2 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -40,10 +40,10 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/httpfilter" @@ -689,10 +689,10 @@ func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext) (*Secu switch t := common.GetValidationContextType().(type) { case *v3tlspb.CommonTlsContext_CombinedValidationContext: combined := common.GetCombinedValidationContext() - var matchers []xds.StringMatcher + var matchers []matcher.StringMatcher if def := combined.GetDefaultValidationContext(); def != nil { for _, m := range def.GetMatchSubjectAltNames() { - matcher, err := xds.StringMatcherFromProto(m) + matcher, err := matcher.StringMatcherFromProto(m) if err != nil { return nil, err } diff --git a/xds/internal/resolver/matcher.go b/xds/internal/resolver/matcher.go index 06456a585573..e329944e1db1 100644 --- a/xds/internal/resolver/matcher.go +++ b/xds/internal/resolver/matcher.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/metadata" xdsclient "google.golang.org/grpc/xds/internal/client" ) @@ -42,27 +43,27 @@ func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { return nil, fmt.Errorf("illegal route: missing path_matcher") } - var headerMatchers []headerMatcherInterface + var headerMatchers []matcher.HeaderMatcherInterface for _, h := range r.Headers { - var matcherT headerMatcherInterface + var matcherT matcher.HeaderMatcherInterface switch { case h.ExactMatch != nil && *h.ExactMatch != "": - matcherT = newHeaderExactMatcher(h.Name, *h.ExactMatch) + matcherT = matcher.NewHeaderExactMatcher(h.Name, *h.ExactMatch) case h.RegexMatch != nil: - matcherT = newHeaderRegexMatcher(h.Name, h.RegexMatch) + matcherT = matcher.NewHeaderRegexMatcher(h.Name, h.RegexMatch) case h.PrefixMatch != nil && *h.PrefixMatch != "": - matcherT = newHeaderPrefixMatcher(h.Name, *h.PrefixMatch) + matcherT = matcher.NewHeaderPrefixMatcher(h.Name, *h.PrefixMatch) case h.SuffixMatch != nil && *h.SuffixMatch != "": - matcherT = newHeaderSuffixMatcher(h.Name, *h.SuffixMatch) + matcherT = matcher.NewHeaderSuffixMatcher(h.Name, *h.SuffixMatch) case h.RangeMatch != nil: - matcherT = newHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End) + matcherT = matcher.NewHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End) case h.PresentMatch != nil: - matcherT = newHeaderPresentMatcher(h.Name, *h.PresentMatch) + matcherT = matcher.NewHeaderPresentMatcher(h.Name, *h.PresentMatch) default: return nil, fmt.Errorf("illegal route: missing header_match_specifier") } if h.InvertMatch != nil && *h.InvertMatch { - matcherT = newInvertMatcher(matcherT) + matcherT = matcher.NewInvertMatcher(matcherT) } headerMatchers = append(headerMatchers, matcherT) } @@ -77,11 +78,11 @@ func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { // compositeMatcher.match returns true if all matchers return true. type compositeMatcher struct { pm pathMatcherInterface - hms []headerMatcherInterface + hms []matcher.HeaderMatcherInterface fm *fractionMatcher } -func newCompositeMatcher(pm pathMatcherInterface, hms []headerMatcherInterface, fm *fractionMatcher) *compositeMatcher { +func newCompositeMatcher(pm pathMatcherInterface, hms []matcher.HeaderMatcherInterface, fm *fractionMatcher) *compositeMatcher { return &compositeMatcher{pm: pm, hms: hms, fm: fm} } @@ -107,7 +108,7 @@ func (a *compositeMatcher) match(info iresolver.RPCInfo) bool { } } for _, m := range a.hms { - if !m.match(md) { + if !m.Match(md) { return false } } diff --git a/xds/internal/resolver/matcher_header.go b/xds/internal/resolver/matcher_header.go deleted file mode 100644 index 05a92788d7bf..000000000000 --- a/xds/internal/resolver/matcher_header.go +++ /dev/null @@ -1,188 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package resolver - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "google.golang.org/grpc/metadata" -) - -type headerMatcherInterface interface { - match(metadata.MD) bool - String() string -} - -// mdValuesFromOutgoingCtx retrieves metadata from context. If there are -// multiple values, the values are concatenated with "," (comma and no space). -// -// All header matchers only match against the comma-concatenated string. -func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { - vs, ok := md[key] - if !ok { - return "", false - } - return strings.Join(vs, ","), true -} - -type headerExactMatcher struct { - key string - exact string -} - -func newHeaderExactMatcher(key, exact string) *headerExactMatcher { - return &headerExactMatcher{key: key, exact: exact} -} - -func (hem *headerExactMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hem.key) - if !ok { - return false - } - return v == hem.exact -} - -func (hem *headerExactMatcher) String() string { - return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact) -} - -type headerRegexMatcher struct { - key string - re *regexp.Regexp -} - -func newHeaderRegexMatcher(key string, re *regexp.Regexp) *headerRegexMatcher { - return &headerRegexMatcher{key: key, re: re} -} - -func (hrm *headerRegexMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hrm.key) - if !ok { - return false - } - return hrm.re.MatchString(v) -} - -func (hrm *headerRegexMatcher) String() string { - return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String()) -} - -type headerRangeMatcher struct { - key string - start, end int64 // represents [start, end). -} - -func newHeaderRangeMatcher(key string, start, end int64) *headerRangeMatcher { - return &headerRangeMatcher{key: key, start: start, end: end} -} - -func (hrm *headerRangeMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hrm.key) - if !ok { - return false - } - if i, err := strconv.ParseInt(v, 10, 64); err == nil && i >= hrm.start && i < hrm.end { - return true - } - return false -} - -func (hrm *headerRangeMatcher) String() string { - return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end) -} - -type headerPresentMatcher struct { - key string - present bool -} - -func newHeaderPresentMatcher(key string, present bool) *headerPresentMatcher { - return &headerPresentMatcher{key: key, present: present} -} - -func (hpm *headerPresentMatcher) match(md metadata.MD) bool { - vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) - present := ok && len(vs) > 0 - return present == hpm.present -} - -func (hpm *headerPresentMatcher) String() string { - return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present) -} - -type headerPrefixMatcher struct { - key string - prefix string -} - -func newHeaderPrefixMatcher(key string, prefix string) *headerPrefixMatcher { - return &headerPrefixMatcher{key: key, prefix: prefix} -} - -func (hpm *headerPrefixMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hpm.key) - if !ok { - return false - } - return strings.HasPrefix(v, hpm.prefix) -} - -func (hpm *headerPrefixMatcher) String() string { - return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix) -} - -type headerSuffixMatcher struct { - key string - suffix string -} - -func newHeaderSuffixMatcher(key string, suffix string) *headerSuffixMatcher { - return &headerSuffixMatcher{key: key, suffix: suffix} -} - -func (hsm *headerSuffixMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hsm.key) - if !ok { - return false - } - return strings.HasSuffix(v, hsm.suffix) -} - -func (hsm *headerSuffixMatcher) String() string { - return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix) -} - -type invertMatcher struct { - m headerMatcherInterface -} - -func newInvertMatcher(m headerMatcherInterface) *invertMatcher { - return &invertMatcher{m: m} -} - -func (i *invertMatcher) match(md metadata.MD) bool { - return !i.m.match(md) -} - -func (i *invertMatcher) String() string { - return fmt.Sprintf("invert{%s}", i.m) -} diff --git a/xds/internal/resolver/matcher_test.go b/xds/internal/resolver/matcher_test.go index 5c8dca5c9e5b..6f599b82da2a 100644 --- a/xds/internal/resolver/matcher_test.go +++ b/xds/internal/resolver/matcher_test.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/metadata" ) @@ -34,14 +35,14 @@ func TestAndMatcherMatch(t *testing.T) { tests := []struct { name string pm pathMatcherInterface - hm headerMatcherInterface + hm matcher.HeaderMatcherInterface info iresolver.RPCInfo want bool }{ { name: "both match", pm: newPathExactMatcher("/a/b", false), - hm: newHeaderExactMatcher("th", "tv"), + hm: matcher.NewHeaderExactMatcher("th", "tv"), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -51,7 +52,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "both match with path case insensitive", pm: newPathExactMatcher("/A/B", true), - hm: newHeaderExactMatcher("th", "tv"), + hm: matcher.NewHeaderExactMatcher("th", "tv"), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -61,7 +62,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "only one match", pm: newPathExactMatcher("/a/b", false), - hm: newHeaderExactMatcher("th", "tv"), + hm: matcher.NewHeaderExactMatcher("th", "tv"), info: iresolver.RPCInfo{ Method: "/z/y", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -71,7 +72,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "both not match", pm: newPathExactMatcher("/z/y", false), - hm: newHeaderExactMatcher("th", "abc"), + hm: matcher.NewHeaderExactMatcher("th", "abc"), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -81,7 +82,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "fake header", pm: newPathPrefixMatcher("/", false), - hm: newHeaderExactMatcher("content-type", "fake"), + hm: matcher.NewHeaderExactMatcher("content-type", "fake"), info: iresolver.RPCInfo{ Method: "/a/b", Context: grpcutil.WithExtraMetadata(context.Background(), metadata.Pairs( @@ -93,7 +94,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "binary header", pm: newPathPrefixMatcher("/", false), - hm: newHeaderPresentMatcher("t-bin", true), + hm: matcher.NewHeaderPresentMatcher("t-bin", true), info: iresolver.RPCInfo{ Method: "/a/b", Context: grpcutil.WithExtraMetadata( @@ -107,7 +108,7 @@ func TestAndMatcherMatch(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - a := newCompositeMatcher(tt.pm, []headerMatcherInterface{tt.hm}, nil) + a := newCompositeMatcher(tt.pm, []matcher.HeaderMatcherInterface{tt.hm}, nil) if got := a.match(tt.info); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } From 50c071e9b5431dcb90be089c7159efc63edff4cb Mon Sep 17 00:00:00 2001 From: Zeke Lu Date: Sat, 15 May 2021 05:09:26 +0800 Subject: [PATCH 080/998] example: correct the default value for server_host_override (#4407) --- examples/features/encryption/README.md | 4 ++-- examples/route_guide/client/client.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/features/encryption/README.md b/examples/features/encryption/README.md index a00188d66a2d..2afca1d785f5 100644 --- a/examples/features/encryption/README.md +++ b/examples/features/encryption/README.md @@ -42,8 +42,8 @@ configure TLS and create the server credential using On client side, we provide the path to the "ca_cert.pem" to configure TLS and create the client credential using [`credentials.NewClientTLSFromFile`](https://godoc.org/google.golang.org/grpc/credentials#NewClientTLSFromFile). -Note that we override the server name with "x.test.youtube.com", as the server -certificate is valid for *.test.youtube.com but not localhost. It is solely for +Note that we override the server name with "x.test.example.com", as the server +certificate is valid for *.test.example.com but not localhost. It is solely for the convenience of making an example. Once the credentials have been created at both sides, we can start the server diff --git a/examples/route_guide/client/client.go b/examples/route_guide/client/client.go index 172f10fb308b..f18c10af8b14 100644 --- a/examples/route_guide/client/client.go +++ b/examples/route_guide/client/client.go @@ -40,7 +40,7 @@ var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") caFile = flag.String("ca_file", "", "The file containing the CA root cert file") serverAddr = flag.String("server_addr", "localhost:10000", "The server address in the format of host:port") - serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name used to verify the hostname returned by the TLS handshake") + serverHostOverride = flag.String("server_host_override", "x.test.example.com", "The server name used to verify the hostname returned by the TLS handshake") ) // printFeature gets the feature for the given point. From a12250e98f973530f34191d39f840ae435f00a91 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 14 May 2021 15:20:45 -0700 Subject: [PATCH 081/998] xds/cds: add env var for aggregated and DNS cluster (#4440) --- internal/xds/env/env.go | 20 +++++++++++++++----- xds/internal/client/cds_test.go | 6 ++++++ xds/internal/client/xds.go | 8 +++++++- 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index 1110722a630b..db9ac93b968c 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -37,11 +37,13 @@ const ( // and kept in variable BootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + circuitBreakingSupportEnv = "GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING" timeoutSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT" faultInjectionSupportEnv = "GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION" clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" @@ -60,6 +62,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) + // CircuitBreakingSupport indicates whether circuit breaking support is // enabled, which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING" to "false". @@ -71,10 +74,6 @@ var ( // FaultInjectionSupport is used to control both fault injection and HTTP // filter support. FaultInjectionSupport = !strings.EqualFold(os.Getenv(faultInjectionSupportEnv), "false") - // C2PResolverSupport indicates whether support for C2P resolver is enabled. - // This can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". - C2PResolverSupport = strings.EqualFold(os.Getenv(c2pResolverSupportEnv), "true") // ClientSideSecuritySupport is used to control processing of security // configuration on the client-side. // @@ -82,6 +81,17 @@ var ( // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. ClientSideSecuritySupport = strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "true") + // AggregateAndDNSSupportEnv indicates whether processing of aggregated + // cluster and DNS cluster is enabled, which can be enabled by setting the + // environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to + // "true". + AggregateAndDNSSupportEnv = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + + // C2PResolverSupport indicates whether support for C2P resolver is enabled. + // This can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". + C2PResolverSupport = strings.EqualFold(os.Getenv(c2pResolverSupportEnv), "true") // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index 7d8cf6a8670b..5209105b430a 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -124,6 +124,9 @@ func (s) TestValidateCluster_Failure(t *testing.T) { }, } + oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv + env.AggregateAndDNSSupportEnv = true + defer func() { env.CircuitBreakingSupport = oldAggregateAndDNSSupportEnv }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { if update, err := validateClusterAndConstructClusterUpdate(test.cluster); err == nil { @@ -261,6 +264,9 @@ func (s) TestValidateCluster_Success(t *testing.T) { origCircuitBreakingSupport := env.CircuitBreakingSupport env.CircuitBreakingSupport = true defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() + oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv + env.AggregateAndDNSSupportEnv = true + defer func() { env.CircuitBreakingSupport = oldAggregateAndDNSSupportEnv }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { update, err := validateClusterAndConstructClusterUpdate(test.cluster) diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index e9fe205a7ce2..7b4f4048ea58 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -615,9 +615,15 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() return ret, nil case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: + if !env.AggregateAndDNSSupportEnv { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } ret.ClusterType = ClusterTypeLogicalDNS return ret, nil case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": + if !env.AggregateAndDNSSupportEnv { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } clusters := &v3aggregateclusterpb.ClusterConfig{} if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) @@ -626,7 +632,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu ret.PrioritizedClusterNames = clusters.Clusters return ret, nil default: - return ClusterUpdate{}, fmt.Errorf("unexpected cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) } } From 78e8edf34d3649c7459e9cf88855f5bbf4f8e6f9 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 17 May 2021 14:13:32 -0700 Subject: [PATCH 082/998] interop/xds: dockerfile for the xds interop client (#4443) --- interop/xds/client/Dockerfile | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 interop/xds/client/Dockerfile diff --git a/interop/xds/client/Dockerfile b/interop/xds/client/Dockerfile new file mode 100644 index 000000000000..060f8a8c64aa --- /dev/null +++ b/interop/xds/client/Dockerfile @@ -0,0 +1,34 @@ +# Copyright 2021 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Dockerfile for building the xDS interop client. To build the image, run the +# following command from grpc-go directory: +# docker build -t -f interop/xds/client/Dockerfile . + +FROM golang:1.16-alpine as build + +# Make a grpc-go directory and copy the repo into it. +WORKDIR /go/src/grpc-go +COPY . . + +# Build a static binary without cgo so that we can copy just the binary in the +# final image, and can get rid of Go compiler and gRPC-Go dependencies. +RUN go build -tags osusergo,netgo interop/xds/client/client.go + +# Second stage of the build which copies over only the client binary and skips +# the Go compiler and gRPC repo from the earlier stage. This significantly +# reduces the docker image size. +FROM alpine +COPY --from=build /go/src/grpc-go/client . +ENTRYPOINT ["./client"] From 9749a79336273a1957e338d519ac553f4885cee9 Mon Sep 17 00:00:00 2001 From: James Protzman Date: Mon, 17 May 2021 17:49:15 -0400 Subject: [PATCH 083/998] transport: remove decodeState from server to reduce allocations (#4423) --- internal/transport/http2_server.go | 121 +++++++++++----- internal/transport/http_util.go | 198 --------------------------- internal/transport/http_util_test.go | 65 --------- internal/transport/transport_test.go | 107 +++++++++++++++ 4 files changed, 195 insertions(+), 296 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 11be5599cd47..ffff382e8f16 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -304,37 +304,92 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if h2code, err := state.decodeHeader(frame); err != nil { - if _, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: h2code, - onWrite: func() {}, - }) - } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) return false } buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = true + } + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } } + + if !isGRPC || headerError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return false + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } @@ -347,14 +402,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx = peer.NewContext(s.ctx, pr) // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) - } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) - } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } } t.mu.Lock() if t.state != reachable { @@ -383,10 +438,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return true } t.maxStreamID = streamID - if state.data.httpMethod != http.MethodPost { + if httpMethod != http.MethodPost { t.mu.Unlock() if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", state.data.httpMethod) + logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) } t.controlBuf.put(&cleanupStream{ streamID: streamID, @@ -399,7 +454,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: state.data.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() if logger.V(logLevel) { logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) @@ -437,7 +492,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(state.data.mdata).Copy(), + Header: metadata.MD(mdata).Copy(), } t.stats.HandleRPC(s.ctx, inHeader) } diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index 2771e224f7bf..15d775fca3cc 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -39,7 +39,6 @@ import ( spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -96,53 +95,6 @@ var ( logger = grpclog.Component("transport") ) -type parsedHeaderData struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - httpMethod string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -221,63 +173,6 @@ func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { return status.FromProto(st), nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit") - } - - for _, hf := range frame.Fields { - d.processHeaderField(hf) - } - - if d.data.isGRPC { - if d.data.grpcErr != nil { - return http2.ErrCodeProtocol, d.data.grpcErr - } - if d.serverSide { - return http2.ErrCodeNo, nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } - return http2.ErrCodeNo, nil - } - - // HTTP fallback mode - if d.data.httpErr != nil { - return http2.ErrCodeProtocol, d.data.httpErr - } - - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] - if !ok { - code = codes.Unknown - } - } - - return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - return constructHTTPErrMsg(d.data.httpStatus, d.data.contentTypeErr) -} - // constructErrMsg constructs error message to be returned in HTTP fallback mode. // Format: HTTP status code and its corresponding message + content-type error message. func constructHTTPErrMsg(httpStatus *int, contentTypeErr string) string { @@ -298,99 +193,6 @@ func constructHTTPErrMsg(httpStatus *int, contentTypeErr string) string { return strings.Join(errMsgs, "; ") } -func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) - } - d.data.mdata[k] = append(d.data.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) - if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return - } - d.data.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true - case "grpc-encoding": - d.data.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.data.rawStatusCode = &code - case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - d.data.statusGen = status.FromProto(s) - case "grpc-timeout": - d.data.timeoutSet = true - var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.data.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return - } - d.data.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return - } - d.data.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return - } - d.data.statsTrace = v - d.addMetadata(f.Name, string(v)) - case ":method": - d.data.httpMethod = f.Value - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - if logger.V(logLevel) { - logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - } - return - } - d.addMetadata(f.Name, v) - } -} - type timeoutUnit uint8 const ( diff --git a/internal/transport/http_util_test.go b/internal/transport/http_util_test.go index 2205050acea0..bbd53180471e 100644 --- a/internal/transport/http_util_test.go +++ b/internal/transport/http_util_test.go @@ -23,9 +23,6 @@ import ( "reflect" "testing" "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" ) func (s) TestTimeoutDecode(t *testing.T) { @@ -189,68 +186,6 @@ func (s) TestDecodeMetadataHeader(t *testing.T) { } } -func (s) TestDecodeHeaderH2ErrCode(t *testing.T) { - for _, test := range []struct { - name string - // input - metaHeaderFrame *http2.MetaHeadersFrame - serverSide bool - // output - wantCode http2.ErrCode - }{ - { - name: "valid header", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - {Name: "content-type", Value: "application/grpc"}, - }}, - wantCode: http2.ErrCodeNo, - }, - { - name: "valid header serverSide", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - {Name: "content-type", Value: "application/grpc"}, - }}, - serverSide: true, - wantCode: http2.ErrCodeNo, - }, - { - name: "invalid grpc status header field", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - {Name: "content-type", Value: "application/grpc"}, - {Name: "grpc-status", Value: "xxxx"}, - }}, - wantCode: http2.ErrCodeProtocol, - }, - { - name: "invalid http content type", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - {Name: "content-type", Value: "application/json"}, - }}, - wantCode: http2.ErrCodeProtocol, - }, - { - name: "http fallback and invalid http status", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - // No content type provided then fallback into handling http error. - {Name: ":status", Value: "xxxx"}, - }}, - wantCode: http2.ErrCodeProtocol, - }, - { - name: "http2 frame size exceeds", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: nil, Truncated: true}, - wantCode: http2.ErrCodeFrameSize, - }, - } { - t.Run(test.name, func(t *testing.T) { - state := &decodeState{serverSide: test.serverSide} - if h2code, _ := state.decodeHeader(test.metaHeaderFrame); h2code != test.wantCode { - t.Fatalf("decodeState.decodeHeader(%v) = %v, want %v", test.metaHeaderFrame, h2code, test.wantCode) - } - }) - } -} - func (s) TestParseDialTarget(t *testing.T) { for _, test := range []struct { target, wantNet, wantAddr string diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index c3830a8fd0b1..3aef86277140 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -1976,3 +1976,110 @@ func (s) TestClientHandshakeInfo(t *testing.T) { t.Fatalf("received attributes %v in creds, want %v", gotAttr, wantAttr) } } + +func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { + for _, test := range []struct { + name string + // input + metaHeaderFrame *http2.MetaHeadersFrame + // output + wantStatus *status.Status + }{ + { + name: "valid header", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/grpc"}, + {Name: "grpc-status", Value: "0"}, + }, + }, + // no error + wantStatus: status.New(codes.OK, ""), + }, + { + name: "invalid grpc status header field", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/grpc"}, + {Name: "grpc-status", Value: "xxxx"}, + }, + }, + wantStatus: status.New( + codes.Internal, + "transport: malformed grpc-status: strconv.ParseInt: parsing \"xxxx\": invalid syntax", + ), + }, + { + name: "invalid http content type", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/json"}, + }, + }, + wantStatus: status.New( + codes.Internal, + ": HTTP status code 0; transport: received the unexpected content-type \"application/json\"", + ), + }, + { + name: "http fallback and invalid http status", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + // No content type provided then fallback into handling http error. + {Name: ":status", Value: "xxxx"}, + }, + }, + wantStatus: status.New( + codes.Internal, + "transport: malformed http-status: strconv.ParseInt: parsing \"xxxx\": invalid syntax", + ), + }, + { + name: "http2 frame size exceeds", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: nil, + Truncated: true, + }, + wantStatus: status.New( + codes.Internal, + "peer header list size exceeded limit", + ), + }, + } { + t.Run(test.name, func(t *testing.T) { + ts := &Stream{ + done: make(chan struct{}), + headerChan: make(chan struct{}), + buf: &recvBuffer{ + c: make(chan recvMsg), + mu: sync.Mutex{}, + }, + } + s := &http2Client{ + mu: sync.Mutex{}, + activeStreams: map[uint32]*Stream{ + 0: ts, + }, + controlBuf: &controlBuffer{ + ch: make(chan struct{}), + done: make(chan struct{}), + list: &itemList{}, + }, + } + test.metaHeaderFrame.HeadersFrame = &http2.HeadersFrame{ + FrameHeader: http2.FrameHeader{ + StreamID: 0, + Flags: http2.FlagHeadersEndStream, + }, + } + + s.operateHeaders(test.metaHeaderFrame) + + got := ts.status + want := test.wantStatus + if got.Code() != want.Code() || got.Message() != want.Message() { + t.Fatalf("operateHeaders(%v); status = %v; want %v", test.metaHeaderFrame, got, want) + } + }) + } +} From 39015b9c5e190f8b687d8c79f1e6353568974104 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 17 May 2021 15:03:59 -0700 Subject: [PATCH 084/998] interop/xds: support xds security on interop server (#4444) --- interop/xds/server/Dockerfile | 34 ++++++++ interop/xds/server/server.go | 142 +++++++++++++++++++++++++++++----- 2 files changed, 158 insertions(+), 18 deletions(-) create mode 100644 interop/xds/server/Dockerfile diff --git a/interop/xds/server/Dockerfile b/interop/xds/server/Dockerfile new file mode 100644 index 000000000000..259066e86c50 --- /dev/null +++ b/interop/xds/server/Dockerfile @@ -0,0 +1,34 @@ +# Copyright 2021 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Dockerfile for building the xDS interop server. To build the image, run the +# following command from grpc-go directory: +# docker build -t -f interop/xds/server/Dockerfile . + +FROM golang:1.16-alpine as build + +# Make a grpc-go directory and copy the repo into it. +WORKDIR /go/src/grpc-go +COPY . . + +# Build a static binary without cgo so that we can copy just the binary in the +# final image, and can get rid of the Go compiler and gRPC-Go dependencies. +RUN go build -tags osusergo,netgo interop/xds/server/server.go + +# Second stage of the build which copies over only the client binary and skips +# the Go compiler and gRPC repo from the earlier stage. This significantly +# reduces the docker image size. +FROM alpine +COPY --from=build /go/src/grpc-go/server . +ENTRYPOINT ["./server"] diff --git a/interop/xds/server/server.go b/interop/xds/server/server.go index 4989eb728eec..2f33799f9404 100644 --- a/interop/xds/server/server.go +++ b/interop/xds/server/server.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,29 +16,37 @@ * */ -// Binary server for xDS interop tests. +// Binary server is the server used for xDS interop tests. package main import ( "context" "flag" + "fmt" "log" "net" "os" - "strconv" "google.golang.org/grpc" + "google.golang.org/grpc/admin" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/reflection" + "google.golang.org/grpc/xds" + xdscreds "google.golang.org/grpc/credentials/xds" + healthpb "google.golang.org/grpc/health/grpc_health_v1" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( - port = flag.Int("port", 8080, "The server port") - serverID = flag.String("server_id", "go_server", "Server ID included in response") - hostname = getHostname() + port = flag.Int("port", 8080, "Listening port for test service") + maintenancePort = flag.Int("maintenance_port", 8081, "Listening port for maintenance services like health, reflection, channelz etc when -secure_mode is true. When -secure_mode is false, all these services will be registered on -port") + serverID = flag.String("server_id", "go_server", "Server ID included in response") + secureMode = flag.Bool("secure_mode", false, "If true, retrieve security configuration from the management server. Else, use insecure credentials.") logger = grpclog.Component("interop") ) @@ -51,28 +59,126 @@ func getHostname() string { return hostname } -type server struct { +// testServiceImpl provides an implementation of the TestService defined in +// grpc.testing package. +type testServiceImpl struct { testgrpc.UnimplementedTestServiceServer + hostname string } -func (s *server) EmptyCall(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { - grpc.SetHeader(ctx, metadata.Pairs("hostname", hostname)) +func (s *testServiceImpl) EmptyCall(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + grpc.SetHeader(ctx, metadata.Pairs("hostname", s.hostname)) return &testpb.Empty{}, nil } -func (s *server) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - grpc.SetHeader(ctx, metadata.Pairs("hostname", hostname)) - return &testpb.SimpleResponse{ServerId: *serverID, Hostname: hostname}, nil +func (s *testServiceImpl) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + grpc.SetHeader(ctx, metadata.Pairs("hostname", s.hostname)) + return &testpb.SimpleResponse{ServerId: *serverID, Hostname: s.hostname}, nil +} + +// xdsUpdateHealthServiceImpl provides an implementation of the +// XdsUpdateHealthService defined in grpc.testing package. +type xdsUpdateHealthServiceImpl struct { + testgrpc.UnimplementedXdsUpdateHealthServiceServer + healthServer *health.Server +} + +func (x *xdsUpdateHealthServiceImpl) SetServing(_ context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + x.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + return &testpb.Empty{}, nil + +} + +func (x *xdsUpdateHealthServiceImpl) SetNotServing(_ context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + x.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) + return &testpb.Empty{}, nil +} + +func xdsServingModeCallback(addr net.Addr, args xds.ServingModeChangeArgs) { + logger.Infof("Serving mode for xDS server at %s changed to %s", addr.String(), args.Mode) + if args.Err != nil { + logger.Infof("ServingModeCallback returned error: %v", args.Err) + } } func main() { flag.Parse() - p := strconv.Itoa(*port) - lis, err := net.Listen("tcp", ":"+p) + + if *secureMode && *port == *maintenancePort { + logger.Fatal("-port and -maintenance_port must be different when -secure_mode is set") + } + + testService := &testServiceImpl{hostname: getHostname()} + healthServer := health.NewServer() + updateHealthService := &xdsUpdateHealthServiceImpl{healthServer: healthServer} + + // If -secure_mode is not set, expose all services on -port with a regular + // gRPC server. + if !*secureMode { + lis, err := net.Listen("tcp4", fmt.Sprintf(":%d", *port)) + if err != nil { + logger.Fatalf("net.Listen(%s) failed: %v", fmt.Sprintf(":%d", *port), err) + } + + server := grpc.NewServer() + testgrpc.RegisterTestServiceServer(server, testService) + healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + healthpb.RegisterHealthServer(server, healthServer) + testgrpc.RegisterXdsUpdateHealthServiceServer(server, updateHealthService) + reflection.Register(server) + cleanup, err := admin.Register(server) + if err != nil { + logger.Fatalf("Failed to register admin services: %v", err) + } + defer cleanup() + if err := server.Serve(lis); err != nil { + logger.Errorf("Serve() failed: %v", err) + } + return + } + + // Create a listener on -port to expose the test service. + testLis, err := net.Listen("tcp4", fmt.Sprintf(":%d", *port)) if err != nil { - logger.Fatalf("failed to listen: %v", err) + logger.Fatalf("net.Listen(%s) failed: %v", fmt.Sprintf(":%d", *port), err) + } + + // Create server-side xDS credentials with a plaintext fallback. + creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + logger.Fatalf("Failed to create xDS credentials: %v", err) + } + + // Create an xDS enabled gRPC server, register the test service + // implementation and start serving. + testServer := xds.NewGRPCServer(grpc.Creds(creds), xds.ServingModeCallback(xdsServingModeCallback)) + testgrpc.RegisterTestServiceServer(testServer, testService) + go func() { + if err := testServer.Serve(testLis); err != nil { + logger.Errorf("test server Serve() failed: %v", err) + } + }() + defer testServer.Stop() + + // Create a listener on -maintenance_port to expose other services. + maintenanceLis, err := net.Listen("tcp4", fmt.Sprintf(":%d", *maintenancePort)) + if err != nil { + logger.Fatalf("net.Listen(%s) failed: %v", fmt.Sprintf(":%d", *maintenancePort), err) + } + + // Create a regular gRPC server and register the maintenance services on + // it and start serving. + maintenanceServer := grpc.NewServer() + healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + healthpb.RegisterHealthServer(maintenanceServer, healthServer) + testgrpc.RegisterXdsUpdateHealthServiceServer(maintenanceServer, updateHealthService) + reflection.Register(maintenanceServer) + cleanup, err := admin.Register(maintenanceServer) + if err != nil { + logger.Fatalf("Failed to register admin services: %v", err) + } + defer cleanup() + if err := maintenanceServer.Serve(maintenanceLis); err != nil { + logger.Errorf("maintenance server Serve() failed: %v", err) } - s := grpc.NewServer() - testgrpc.RegisterTestServiceServer(s, &server{}) - s.Serve(lis) } From 2713b77e85261254c628d9c61d00f582e6a20d08 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 17 May 2021 17:27:58 -0700 Subject: [PATCH 085/998] use depth logging from the e2e package (#4448) --- .../test/xds_server_serving_mode_test.go | 7 +++---- xds/internal/testutils/e2e/server.go | 20 +++++++++++++++---- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 414a559b0982..0b70f7b06aed 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -182,8 +182,8 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatalf("rpc EmptyCall() failed: %v", err) } - // Update the management server to remove the second listener resource. This should - // push the only the second listener into "not-serving" mode. + // Update the management server to remove the second listener resource. This + // should push only the second listener into "not-serving" mode. if err := managementServer.Update(e2e.UpdateOptions{ NodeID: xdsClientNodeID, Listeners: []*v3listenerpb.Listener{listener1}, @@ -240,8 +240,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { // short timeout since we expect this to fail. sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() - _, err = grpc.DialContext(sCtx, lis1.Addr().String(), grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) - if err == nil { + if _, err := grpc.DialContext(sCtx, lis1.Addr().String(), grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())); err == nil { t.Fatal("successfully created clientConn to a server in \"not-serving\" state") } diff --git a/xds/internal/testutils/e2e/server.go b/xds/internal/testutils/e2e/server.go index 4a71a5054d7d..bc795b1b0cd0 100644 --- a/xds/internal/testutils/e2e/server.go +++ b/xds/internal/testutils/e2e/server.go @@ -45,10 +45,22 @@ var logger = grpclog.Component("xds-e2e") // envoyproxy/go-control-plane/pkg/log. This is passed to the Snapshot cache. type serverLogger struct{} -func (l serverLogger) Debugf(format string, args ...interface{}) { logger.Infof(format, args...) } -func (l serverLogger) Infof(format string, args ...interface{}) { logger.Infof(format, args...) } -func (l serverLogger) Warnf(format string, args ...interface{}) { logger.Warningf(format, args...) } -func (l serverLogger) Errorf(format string, args ...interface{}) { logger.Errorf(format, args...) } +func (l serverLogger) Debugf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.InfoDepth(1, msg) +} +func (l serverLogger) Infof(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.InfoDepth(1, msg) +} +func (l serverLogger) Warnf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.WarningDepth(1, msg) +} +func (l serverLogger) Errorf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.ErrorDepth(1, msg) +} // ManagementServer is a thin wrapper around the xDS control plane // implementation provided by envoyproxy/go-control-plane. From 584fa418225e60652638b79c38a189be1ff00036 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 18 May 2021 10:30:43 -0700 Subject: [PATCH 086/998] xds/testing: export variables for testing (#4449) The exported variables will be used by tests (to be added in a future PR, in another package) that use these balancers as child balancer. --- .../balancer/clusterimpl/balancer_test.go | 2 +- xds/internal/balancer/clusterimpl/picker.go | 6 ++++-- .../balancer/priority/balancer_priority.go | 9 ++++++--- .../balancer/priority/balancer_test.go | 18 +++++++++--------- .../balancer/weightedtarget/weightedtarget.go | 6 +++--- 5 files changed, 23 insertions(+), 18 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index d1da371c27f3..cfce6f673913 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -66,7 +66,7 @@ func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { } func init() { - newRandomWRR = testutils.NewTestWRR + NewRandomWRR = testutils.NewTestWRR } // TestDropByCategory verifies that the balancer correctly drops the picks, and diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index 87faba13a746..7a31615510d3 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -28,7 +28,9 @@ import ( "google.golang.org/grpc/xds/internal/client/load" ) -var newRandomWRR = wrr.NewRandom +// NewRandomWRR is used when calculating drops. It's exported so that tests can +// override it. +var NewRandomWRR = wrr.NewRandom const million = 1000000 @@ -48,7 +50,7 @@ func gcd(a, b uint32) uint32 { } func newDropper(c DropConfig) *dropper { - w := newRandomWRR() + w := NewRandomWRR() gcdv := gcd(c.RequestsPerMillion, million) // Return true for RequestPerMillion, false for the rest. w.Add(true, int64(c.RequestsPerMillion/gcdv)) diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index ea2f4f04184c..672f17498b16 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -28,8 +28,11 @@ import ( ) var ( - errAllPrioritiesRemoved = errors.New("no locality is provided, all priorities are removed") - defaultPriorityInitTimeout = 10 * time.Second + errAllPrioritiesRemoved = errors.New("no locality is provided, all priorities are removed") + // DefaultPriorityInitTimeout is the timeout after which if a priority is + // not READY, the next will be started. It's exported to be overridden by + // tests. + DefaultPriorityInitTimeout = 10 * time.Second ) // syncPriority handles priority after a config update. It makes sure the @@ -162,7 +165,7 @@ func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { // to check the stopped boolean. timerW := &timerWrapper{} b.priorityInitTimer = timerW - timerW.timer = time.AfterFunc(defaultPriorityInitTimeout, func() { + timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { b.mu.Lock() defer b.mu.Unlock() if timerW.stopped { diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 61e3dee94d9a..03442671311b 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -690,10 +690,10 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { func (s) TestPriority_InitTimeout(t *testing.T) { const testPriorityInitTimeout = time.Second defer func() func() { - old := defaultPriorityInitTimeout - defaultPriorityInitTimeout = testPriorityInitTimeout + old := DefaultPriorityInitTimeout + DefaultPriorityInitTimeout = testPriorityInitTimeout return func() { - defaultPriorityInitTimeout = old + DefaultPriorityInitTimeout = old } }()() @@ -760,10 +760,10 @@ func (s) TestPriority_InitTimeout(t *testing.T) { func (s) TestPriority_RemovesAllPriorities(t *testing.T) { const testPriorityInitTimeout = time.Second defer func() func() { - old := defaultPriorityInitTimeout - defaultPriorityInitTimeout = testPriorityInitTimeout + old := DefaultPriorityInitTimeout + DefaultPriorityInitTimeout = testPriorityInitTimeout return func() { - defaultPriorityInitTimeout = old + DefaultPriorityInitTimeout = old } }()() @@ -1030,9 +1030,9 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { const testPriorityInitTimeout = time.Second defer func(t time.Duration) { - defaultPriorityInitTimeout = t - }(defaultPriorityInitTimeout) - defaultPriorityInitTimeout = testPriorityInitTimeout + DefaultPriorityInitTimeout = t + }(DefaultPriorityInitTimeout) + DefaultPriorityInitTimeout = testPriorityInitTimeout cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/xds/internal/balancer/weightedtarget/weightedtarget.go index 89f5ec660a04..6c1b70f92235 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget.go @@ -37,9 +37,9 @@ import ( // Name is the name of the weighted_target balancer. const Name = "weighted_target_experimental" -// newRandomWRR is the WRR constructor used to pick sub-pickers from +// NewRandomWRR is the WRR constructor used to pick sub-pickers from // sub-balancers. It's to be modified in tests. -var newRandomWRR = wrr.NewRandom +var NewRandomWRR = wrr.NewRandom func init() { balancer.Register(&weightedTargetBB{}) @@ -50,7 +50,7 @@ type weightedTargetBB struct{} func (wt *weightedTargetBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &weightedTargetBalancer{} b.logger = prefixLogger(b) - b.stateAggregator = weightedaggregator.New(cc, b.logger, newRandomWRR) + b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR) b.stateAggregator.Start() b.bg = balancergroup.New(cc, bOpts, b.stateAggregator, nil, b.logger) b.bg.Start() From 74c40c963fefb22798e08e7cf13ef616786b2402 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 18 May 2021 10:31:27 -0700 Subject: [PATCH 087/998] xds/cds: fix LOGICAL_DNS cluster semantics (#4434) --- xds/internal/client/cds_test.go | 44 ++++++++++++++++++++++++++++++++- xds/internal/client/client.go | 3 +++ xds/internal/client/xds.go | 44 +++++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 1 deletion(-) diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index 5209105b430a..8bbf44012e3c 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -28,6 +28,7 @@ import ( v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" @@ -122,6 +123,23 @@ func (s) TestValidateCluster_Failure(t *testing.T) { wantUpdate: emptyUpdate, wantErr: true, }, + { + name: "logical-dns-multiple-localities", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ + Endpoints: []*v3endpointpb.LocalityLbEndpoints{ + // Invalid if there are more than one locality. + {LbEndpoints: nil}, + {LbEndpoints: nil}, + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, } oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv @@ -148,8 +166,32 @@ func (s) TestValidateCluster_Success(t *testing.T) { Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ + Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ + LbEndpoints: []*v3endpointpb.LbEndpoint{{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "dns_host", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 8080, + }, + }, + }, + }, + }, + }, + }}, + }}, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + ClusterType: ClusterTypeLogicalDNS, + DNSHostName: "dns_host:8080", }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EnableLRS: false, ClusterType: ClusterTypeLogicalDNS}, }, { name: "happy-case-aggregate-v3", diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go index 8888e6214297..4ed1c3a5833c 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/client/client.go @@ -389,6 +389,9 @@ type ClusterUpdate struct { SecurityCfg *SecurityConfig // MaxRequests for circuit breaking, if any (otherwise nil). MaxRequests *uint32 + // DNSHostName is used only for cluster type DNS. It's the DNS name to + // resolve in "host:port" form + DNSHostName string // PrioritizedClusterNames is used only for cluster type aggregate. It represents // a prioritized list of cluster names. PrioritizedClusterNames []string diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index 7b4f4048ea58..65287a485a87 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -619,6 +619,11 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) } ret.ClusterType = ClusterTypeLogicalDNS + dnsHN, err := dnsHostNameFromCluster(cluster) + if err != nil { + return ClusterUpdate{}, err + } + ret.DNSHostName = dnsHN return ret, nil case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": if !env.AggregateAndDNSSupportEnv { @@ -636,6 +641,45 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu } } +// dnsHostNameFromCluster extracts the DNS host name from the cluster's load +// assignment. +// +// There should be exactly one locality, with one endpoint, whose address +// contains the address and port. +func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { + loadAssignment := cluster.GetLoadAssignment() + if loadAssignment == nil { + return "", fmt.Errorf("load_assignment not present for LOGICAL_DNS cluster") + } + if len(loadAssignment.GetEndpoints()) != 1 { + return "", fmt.Errorf("load_assignment for LOGICAL_DNS cluster must have exactly one locality, got: %+v", loadAssignment) + } + endpoints := loadAssignment.GetEndpoints()[0].GetLbEndpoints() + if len(endpoints) != 1 { + return "", fmt.Errorf("locality for LOGICAL_DNS cluster must have exactly one endpoint, got: %+v", endpoints) + } + endpoint := endpoints[0].GetEndpoint() + if endpoint == nil { + return "", fmt.Errorf("endpoint for LOGICAL_DNS cluster not set") + } + socketAddr := endpoint.GetAddress().GetSocketAddress() + if socketAddr == nil { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set") + } + if socketAddr.GetResolverName() != "" { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set has unexpected custom resolver name: %v", socketAddr.GetResolverName()) + } + host := socketAddr.GetAddress() + if host == "" { + return "", fmt.Errorf("host for endpoint for LOGICAL_DNS cluster not set") + } + port := socketAddr.GetPortValue() + if port == 0 { + return "", fmt.Errorf("port for endpoint for LOGICAL_DNS cluster not set") + } + return net.JoinHostPort(host, strconv.Itoa(int(port))), nil +} + // securityConfigFromCluster extracts the relevant security configuration from // the received Cluster resource. func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { From c9c9a7536f5756744347acaba907189e53c38468 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 18 May 2021 10:32:05 -0700 Subject: [PATCH 088/998] internal: fix test unset env var AggregateAndDNSSupportEnv (#4454) --- xds/internal/client/cds_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go index 8bbf44012e3c..82b92372afc3 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/client/cds_test.go @@ -144,7 +144,7 @@ func (s) TestValidateCluster_Failure(t *testing.T) { oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv env.AggregateAndDNSSupportEnv = true - defer func() { env.CircuitBreakingSupport = oldAggregateAndDNSSupportEnv }() + defer func() { env.AggregateAndDNSSupportEnv = oldAggregateAndDNSSupportEnv }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { if update, err := validateClusterAndConstructClusterUpdate(test.cluster); err == nil { @@ -308,7 +308,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv env.AggregateAndDNSSupportEnv = true - defer func() { env.CircuitBreakingSupport = oldAggregateAndDNSSupportEnv }() + defer func() { env.AggregateAndDNSSupportEnv = oldAggregateAndDNSSupportEnv }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { update, err := validateClusterAndConstructClusterUpdate(test.cluster) From 23a83dd097ec07fc7ddfb4a30c675763e4972ba4 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 18 May 2021 15:26:51 -0700 Subject: [PATCH 089/998] transport: various simplifications noticed during #4447 (#4455) --- internal/transport/controlbuf.go | 15 +++++++-------- internal/transport/handler_server.go | 3 +-- internal/transport/http2_client.go | 8 +++----- internal/transport/http2_server.go | 23 ++++++++++------------- internal/transport/transport.go | 2 +- 5 files changed, 22 insertions(+), 29 deletions(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index f63a0137622a..04ddaeaa33a1 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -296,7 +296,7 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // *chan struct{} + trfChan atomic.Value // chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { @@ -310,10 +310,10 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer { // throttle blocks if there are too many incomingSettings/cleanupStreams in the // controlbuf. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(*chan struct{}) + ch, _ := c.trfChan.Load().(chan struct{}) if ch != nil { select { - case <-*ch: + case <-ch: case <-c.done: } } @@ -347,8 +347,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - ch := make(chan struct{}) - c.trfChan.Store(&ch) + c.trfChan.Store(make(chan struct{})) } } c.mu.Unlock() @@ -389,9 +388,9 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are removing the frame that put us over the // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(*chan struct{}) - close(*ch) - c.trfChan.Store((*chan struct{})(nil)) + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) } c.transportResponseFrames-- } diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index 05d3871e628d..1c3459c2b4c5 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -141,9 +141,8 @@ type serverHandlerTransport struct { stats stats.Handler } -func (ht *serverHandlerTransport) Close() error { +func (ht *serverHandlerTransport) Close() { ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil } func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 64ebd4a167f3..78c53ca70f87 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -399,11 +399,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) } } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { - t.conn.Close() - } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() close(t.writerDone) }() return t, nil diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index ffff382e8f16..43a17833d860 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -102,11 +102,11 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when drain(...) is called the first time. + // drainChan is initialized when Drain() is called the first time. // After which the server writes out the first GoAway(with ID 2^31-1) frame. // Then an independent goroutine will be launched to later send the second GoAway. // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is // already underway. drainChan chan struct{} state transportState @@ -1059,12 +1059,12 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() return } idleTimer.Reset(val) case <-ageTimer.C: - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1118,11 +1118,11 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close() { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return } t.state = closing streams := t.activeStreams @@ -1130,7 +1130,9 @@ func (t *http2Server) Close() error { t.mu.Unlock() t.controlBuf.finish() close(t.done) - err := t.conn.Close() + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) + } if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } @@ -1142,7 +1144,6 @@ func (t *http2Server) Close() error { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return err } // deleteStream deletes the stream s from transport's active streams. @@ -1207,17 +1208,13 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { t.mu.Lock() defer t.mu.Unlock() if t.drainChan != nil { return } t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 6cc1031fd92f..3ab945aa4d20 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -694,7 +694,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() error + Close() // RemoteAddr returns the remote network address. RemoteAddr() net.Addr From 86ac0fbc4037c1e748a650002d34a8044fff59e6 Mon Sep 17 00:00:00 2001 From: Aaron Jheng Date: Thu, 20 May 2021 01:57:27 +0800 Subject: [PATCH 090/998] Documentation: Fix typo (#4445) --- Documentation/server-reflection-tutorial.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/server-reflection-tutorial.md b/Documentation/server-reflection-tutorial.md index b1781fa68dc9..9f26656f22b2 100644 --- a/Documentation/server-reflection-tutorial.md +++ b/Documentation/server-reflection-tutorial.md @@ -58,7 +58,7 @@ $ go run examples/features/reflection/server/main.go Open a new terminal and make sure you are in the directory where grpc_cli lives: ```sh -$ cd /bins/opt +$ cd /bins/opt ``` ### List services From 84d0920b59e5f138ffd4da11f7b2ab51e862b581 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 19 May 2021 11:05:26 -0700 Subject: [PATCH 091/998] transport: unblock read throttling when controlbuf exits (#4447) --- internal/transport/controlbuf.go | 9 +++- internal/transport/http2_client.go | 1 + internal/transport/http2_server.go | 1 + test/end2end_test.go | 72 ++++++++++++++++++++++++++++++ 4 files changed, 82 insertions(+), 1 deletion(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 04ddaeaa33a1..45532f8aeaab 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -406,7 +406,6 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - c.finish() return nil, ErrConnClosing } } @@ -431,6 +430,14 @@ func (c *controlBuffer) finish() { hdr.onOrphaned(ErrConnClosing) } } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) c.mu.Unlock() } diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 78c53ca70f87..119f01e3ebcf 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -402,6 +402,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() return t, nil diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 43a17833d860..21a3c8526158 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -295,6 +295,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } } t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() go t.keepalive() diff --git a/test/end2end_test.go b/test/end2end_test.go index eb91d09afdf0..76ff07a27c22 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -71,6 +71,7 @@ import ( "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" + "google.golang.org/grpc/test/bufconn" testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" ) @@ -7524,3 +7525,74 @@ func (s) TestCanceledRPCCallOptionRace(t *testing.T) { } wg.Wait() } + +func (s) TestClientSettingsFloodCloseConn(t *testing.T) { + // Tests that the server properly closes its transport if the client floods + // settings frames and then closes the connection. + + // Minimize buffer sizes to stimulate failure condition more quickly. + s := grpc.NewServer(grpc.WriteBufferSize(20)) + l := bufconn.Listen(20) + go s.Serve(l) + + // Dial our server and handshake. + conn, err := l.Dial() + if err != nil { + t.Fatalf("Error dialing bufconn: %v", err) + } + + n, err := conn.Write([]byte(http2.ClientPreface)) + if err != nil || n != len(http2.ClientPreface) { + t.Fatalf("Error writing client preface: %v, %v", n, err) + } + + fr := http2.NewFramer(conn, conn) + f, err := fr.ReadFrame() + if err != nil { + t.Fatalf("Error reading initial settings frame: %v", err) + } + if _, ok := f.(*http2.SettingsFrame); ok { + if err := fr.WriteSettingsAck(); err != nil { + t.Fatalf("Error writing settings ack: %v", err) + } + } else { + t.Fatalf("Error reading initial settings frame: type=%T", f) + } + + // Confirm settings can be written, and that an ack is read. + if err = fr.WriteSettings(); err != nil { + t.Fatalf("Error writing settings frame: %v", err) + } + if f, err = fr.ReadFrame(); err != nil { + t.Fatalf("Error reading frame: %v", err) + } + if sf, ok := f.(*http2.SettingsFrame); !ok || !sf.IsAck() { + t.Fatalf("Unexpected frame: %v", f) + } + + // Flood settings frames until a timeout occurs, indiciating the server has + // stopped reading from the connection, then close the conn. + for { + conn.SetWriteDeadline(time.Now().Add(50 * time.Millisecond)) + if err := fr.WriteSettings(); err != nil { + if to, ok := err.(interface{ Timeout() bool }); !ok || !to.Timeout() { + t.Fatalf("Received unexpected write error: %v", err) + } + break + } + } + conn.Close() + + // If the server does not handle this situation correctly, it will never + // close the transport. This is because its loopyWriter.run() will have + // exited, and thus not handle the goAway the draining process initiates. + // Also, we would see a goroutine leak in this case, as the reader would be + // blocked on the controlBuf's throttle() method indefinitely. + + timer := time.AfterFunc(5*time.Second, func() { + t.Errorf("Timeout waiting for GracefulStop to return") + s.Stop() + }) + s.GracefulStop() + timer.Stop() +} From 3dd75a6888ce5d1b5195c5cf72241d9e36f68e42 Mon Sep 17 00:00:00 2001 From: AlphaBaby Date: Thu, 20 May 2021 02:18:52 +0800 Subject: [PATCH 092/998] xds_client/rds: weighted_cluster totalWeight default to 100 (#4439) --- xds/internal/client/rds_test.go | 90 +++++++++++++++++++++++++++++++++ xds/internal/client/xds.go | 10 +++- 2 files changed, 98 insertions(+), 2 deletions(-) diff --git a/xds/internal/client/rds_test.go b/xds/internal/client/rds_test.go index 10745e9e97da..33bcc2ad13f8 100644 --- a/xds/internal/client/rds_test.go +++ b/xds/internal/client/rds_test.go @@ -1077,6 +1077,96 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }, wantErr: true, }, + { + name: "totalWeight is nil in weighted clusters action", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 20}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 30}}, + }, + }}}}, + }, + }, + wantErr: true, + }, + { + name: "The sum of all weighted clusters is not equal totalWeight", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 50}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 20}}, + }, + TotalWeight: &wrapperspb.UInt32Value{Value: 100}, + }}}}, + }, + }, + wantErr: true, + }, + { + name: "default totalWeight is 100 in weighted clusters action", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}}}, + }, + }, + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + }}, + wantErr: false, + }, + { + name: "default totalWeight is 100 in weighted clusters action", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 30}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 20}}, + }, + TotalWeight: &wrapperspb.UInt32Value{Value: 50}, + }}}}, + }, + }, + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 20}, "B": {Weight: 30}}, + }}, + wantErr: false, + }, { name: "with custom HTTP filter config", routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": customFilterConfig}), diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go index 65287a485a87..b95224113237 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/client/xds.go @@ -522,8 +522,14 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, route.WeightedClusters[c.GetName()] = wc totalWeight += w } - if totalWeight != wcs.GetTotalWeight().GetValue() { - return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, want %v", r, a, wcs.GetTotalWeight().GetValue(), totalWeight) + // envoy xds doc + // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight + wantTotalWeight := uint32(100) + if tw := wcs.GetTotalWeight(); tw != nil { + wantTotalWeight = tw.GetValue() + } + if totalWeight != wantTotalWeight { + return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) } if totalWeight == 0 { return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) From b1f7648a9fc72ce76cbcd42d8e2c60d9d9bed9fc Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 21 May 2021 15:15:58 -0700 Subject: [PATCH 093/998] client: ensure LB policy is closed before closing resolver (#4478) --- balancer_conn_wrappers.go | 19 ++++++++++++++----- clientconn.go | 6 +++--- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 4cc7f9159b16..f1bb6dd30737 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -44,6 +44,7 @@ type ccBalancerWrapper struct { balancerMu sync.Mutex // synchronizes calls to the balancer balancer balancer.Balancer scBuffer *buffer.Unbounded + closed *grpcsync.Event done *grpcsync.Event mu sync.Mutex @@ -54,6 +55,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui ccb := &ccBalancerWrapper{ cc: cc, scBuffer: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), subConns: make(map[*acBalancerWrapper]struct{}), } @@ -69,33 +71,40 @@ func (ccb *ccBalancerWrapper) watcher() { select { case t := <-ccb.scBuffer.Get(): ccb.scBuffer.Load() - if ccb.done.HasFired() { + if ccb.closed.HasFired() { break } ccb.balancerMu.Lock() su := t.(*scStateUpdate) ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) ccb.balancerMu.Unlock() - case <-ccb.done.Done(): + case <-ccb.closed.Done(): } - if ccb.done.HasFired() { + if ccb.closed.HasFired() { + ccb.balancerMu.Lock() ccb.balancer.Close() + ccb.balancerMu.Unlock() ccb.mu.Lock() scs := ccb.subConns ccb.subConns = nil ccb.mu.Unlock() + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + ccb.done.Fire() + // Fire done before removing the addr conns. We can safely unblock + // ccb.close and allow the removeAddrConns to happen + // asynchronously. for acbw := range scs { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) return } } } func (ccb *ccBalancerWrapper) close() { - ccb.done.Fire() + ccb.closed.Fire() + <-ccb.done.Done() } func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { diff --git a/clientconn.go b/clientconn.go index 24109264f557..0236c81c4d4a 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1046,12 +1046,12 @@ func (cc *ClientConn) Close() error { cc.blockingpicker.close() - if rWrapper != nil { - rWrapper.close() - } if bWrapper != nil { bWrapper.close() } + if rWrapper != nil { + rWrapper.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) From a8e85e0d5704da1f5bd858a7b47621e77fe5035b Mon Sep 17 00:00:00 2001 From: Ehsan Afzali Date: Sat, 22 May 2021 01:54:24 +0300 Subject: [PATCH 094/998] server: allow PreparedMsgs to work for server streams (#3480) --- server.go | 2 ++ test/end2end_test.go | 55 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/server.go b/server.go index 0a151dee4fcb..2d8e005cd6f1 100644 --- a/server.go +++ b/server.go @@ -1521,6 +1521,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } } + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } diff --git a/test/end2end_test.go b/test/end2end_test.go index 76ff07a27c22..9a8099ca1e14 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -2249,6 +2249,61 @@ func testPreloaderClientSend(t *testing.T, e env) { } } +func (s) TestPreloaderSenderSend(t *testing.T) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + for i := 0; i < 10; i++ { + preparedMsg := &grpc.PreparedMsg{} + err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{ + Payload: &testpb.Payload{ + Body: []byte{'0' + uint8(i)}, + }, + }) + if err != nil { + return err + } + stream.SendMsg(preparedMsg) + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + var ngot int + var buf bytes.Buffer + for { + reply, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + ngot++ + if buf.Len() > 0 { + buf.WriteByte(',') + } + buf.Write(reply.GetPayload().GetBody()) + } + if want := 10; ngot != want { + t.Errorf("Got %d replies, want %d", ngot, want) + } + if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want { + t.Errorf("Got replies %q; want %q", got, want) + } +} + func (s) TestMaxMsgSizeClientDefault(t *testing.T) { for _, e := range listTestEnv() { testMaxMsgSizeClientDefault(t, e) From 359fdbb7b310c71882a354675949a4ca95957d75 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 21 May 2021 15:54:45 -0700 Subject: [PATCH 095/998] Delete .travis.yml file (#4462) --- .travis.yml | 42 ------------------------------------------ 1 file changed, 42 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 5847d94e5512..000000000000 --- a/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: go - -matrix: - include: - - go: 1.14.x - env: VET=1 GO111MODULE=on - - go: 1.14.x - env: RACE=1 GO111MODULE=on - - go: 1.14.x - env: RUN386=1 - - go: 1.14.x - env: GRPC_GO_RETRY=on - - go: 1.14.x - env: TESTEXTRAS=1 - - go: 1.13.x - env: GO111MODULE=on - - go: 1.12.x - env: GO111MODULE=on - - go: 1.11.x # Keep until interop tests no longer require Go1.11 - env: GO111MODULE=on - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi - -script: - - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; security/advancedtls/examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - - if [[ -n "${VET}" ]]; then ./vet.sh; fi - - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - - make test From c4ed6360a98355b1ca6e772a73bd27ece15de3e9 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 24 May 2021 17:30:29 -0700 Subject: [PATCH 096/998] transport: remove RequestURI field from requests in transport test (#4465) --- internal/transport/handler_server_test.go | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/internal/transport/handler_server_test.go b/internal/transport/handler_server_test.go index f9efdfb0716d..b08dcaaf3c4b 100644 --- a/internal/transport/handler_server_test.go +++ b/internal/transport/handler_server_test.go @@ -62,7 +62,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { ProtoMajor: 2, Method: "GET", Header: http.Header{}, - RequestURI: "/", }, wantErr: "invalid gRPC request method", }, @@ -74,7 +73,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { Header: http.Header{ "Content-Type": {"application/foo"}, }, - RequestURI: "/service/foo.bar", }, wantErr: "invalid gRPC request content-type", }, @@ -86,7 +84,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { Header: http.Header{ "Content-Type": {"application/grpc"}, }, - RequestURI: "/service/foo.bar", }, modrw: func(w http.ResponseWriter) http.ResponseWriter { // Return w without its Flush method @@ -109,7 +106,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", }, check: func(t *serverHandlerTransport, tt *testCase) error { if t.req != tt.req { @@ -133,7 +129,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", }, check: func(t *serverHandlerTransport, tt *testCase) error { if !t.timeoutSet { @@ -157,7 +152,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", }, wantErr: `rpc error: code = Internal desc = malformed time-out: transport: timeout unit is not recognized: "tomorrow"`, }, @@ -175,7 +169,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", }, check: func(ht *serverHandlerTransport, tt *testCase) error { want := metadata.MD{ @@ -247,8 +240,7 @@ func newHandleStreamTest(t *testing.T) *handleStreamTest { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", - Body: bodyr, + Body: bodyr, } rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) ht, err := NewServerHandlerTransport(rw, req, nil) @@ -359,8 +351,7 @@ func (s) TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", - Body: bodyr, + Body: bodyr, } rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) ht, err := NewServerHandlerTransport(rw, req, nil) From 728364accfb93cd52003fb38a6412c8e4965116b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 24 May 2021 17:30:40 -0700 Subject: [PATCH 097/998] server: return UNIMPLEMENTED on receipt of malformed method name (#4464) --- server.go | 2 +- test/end2end_test.go | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/server.go b/server.go index 2d8e005cd6f1..446995986c8b 100644 --- a/server.go +++ b/server.go @@ -1590,7 +1590,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() diff --git a/test/end2end_test.go b/test/end2end_test.go index 9a8099ca1e14..552f74e1b795 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -6347,6 +6347,23 @@ func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) { } } +// TestMalformedStreamMethod starts a test server and sends an RPC with a +// malformed method name. The server should respond with an UNIMPLEMENTED status +// code in this case. +func (s) TestMalformedStreamMethod(t *testing.T) { + const testMethod = "a-method-name-without-any-slashes" + te := newTest(t, tcpClearRREnv) + te.startServer(nil) + defer te.tearDown() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + err := te.clientConn().Invoke(ctx, testMethod, nil, nil) + if gotCode := status.Code(err); gotCode != codes.Unimplemented { + t.Fatalf("Invoke with method %q, got code %s, want %s", testMethod, gotCode, codes.Unimplemented) + } +} + func (s) TestMethodFromServerStream(t *testing.T) { const testMethod = "/package.service/method" e := tcpClearRREnv From 280df42a316deb7962dd49d32dedbea720806473 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 25 May 2021 09:16:23 -0700 Subject: [PATCH 098/998] mergeable: require RELEASE NOTES in PR description, milestone, and Type label (#4475) --- .github/mergeable.yml | 31 ++++--------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) diff --git a/.github/mergeable.yml b/.github/mergeable.yml index d647dafb7ab1..b42ef7b5e8a6 100644 --- a/.github/mergeable.yml +++ b/.github/mergeable.yml @@ -5,32 +5,9 @@ mergeable: - do: label must_include: regex: '^Type:' - fail: - - do: checks - status: 'failure' - payload: - title: 'Need an appropriate "Type:" label' - summary: 'Need an appropriate "Type:" label' - - when: pull_request.* - # This validator requires either the "no release notes" label OR a "Release" milestone - # to be considered successful. However, validators "pass" in mergeable only if all - # checks pass. So it is implemented in reverse. - # I.e.: !(!no_relnotes && !release_milestone) ==> no_relnotes || release_milestone - # If both validators pass, then it is considered a failure, and if either fails, it is - # considered a success. - validate: - - do: label - must_exclude: - regex: '^no release notes$' + - do: description + must_include: + regex: 'RELEASE NOTES:' - do: milestone - must_exclude: + must_include: regex: 'Release$' - pass: - - do: checks - status: 'failure' # fail on pass - payload: - title: 'Need Release milestone or "no release notes" label' - summary: 'Need Release milestone or "no release notes" label' - fail: - - do: checks - status: 'success' # pass on fail From 69da917ce95ec0c81e53647b43b6da5b184fdb88 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 25 May 2021 10:25:54 -0700 Subject: [PATCH 099/998] github: update stale bot to github actions (#4480) --- .github/stale.yml | 58 ------------------------------------- .github/workflows/stale.yml | 30 +++++++++++++++++++ 2 files changed, 30 insertions(+), 58 deletions(-) delete mode 100644 .github/stale.yml create mode 100644 .github/workflows/stale.yml diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index 8f69dbc4fe83..000000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,58 +0,0 @@ -# Configuration for probot-stale - https://github.com/probot/stale - -# Number of days of inactivity before an Issue or Pull Request becomes stale -daysUntilStale: 6 - -# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. -# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. -daysUntilClose: 7 - -# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) -onlyLabels: - - "Status: Requires Reporter Clarification" - -# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable -exemptLabels: [] - -# Set to true to ignore issues in a project (defaults to false) -exemptProjects: false - -# Set to true to ignore issues in a milestone (defaults to false) -exemptMilestones: false - -# Set to true to ignore issues with an assignee (defaults to false) -exemptAssignees: false - -# Label to use when marking as stale -staleLabel: "stale" - -# Comment to post when marking as stale. Set to `false` to disable -markComment: > - This issue is labeled as requiring an update from the reporter, and no update has been received - after 6 days. If no update is provided in the next 7 days, this issue will be automatically closed. - -# Comment to post when removing the stale label. -# unmarkComment: > -# Your comment here. - -# Comment to post when closing a stale Issue or Pull Request. -# closeComment: > -# Your comment here. - -# Limit the number of actions per hour, from 1-30. Default is 30 -limitPerRun: 1 - -# Limit to only `issues` or `pulls` -# only: issues - -# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': -# pulls: -# daysUntilStale: 30 -# markComment: > -# This pull request has been automatically marked as stale because it has not had -# recent activity. It will be closed if no further activity occurs. Thank you -# for your contributions. - -# issues: -# exemptLabels: -# - confirmed diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000000..c897101f3c9c --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,30 @@ +name: Stale bot + +on: + workflow_dispatch: + schedule: + - cron: "44 */2 * * *" + +jobs: + stale: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + days-before-stale: 6 + days-before-close: 7 + only-labels: 'Status: Requires Reporter Clarification' + stale-issue-label: 'stale' + stale-pr-label: 'stale' + operations-per-run: 1 + stale-issue-message: > + This issue is labeled as requiring an update from the reporter, and no update has been received + after 6 days. If no update is provided in the next 7 days, this issue will be automatically closed. + stale-pr-message: > + This PR is labeled as requiring an update from the reporter, and no update has been received + after 6 days. If no update is provided in the next 7 days, this issue will be automatically closed. From 4ecb61bedbdef3fb4c52e4f06247d504b54ace9b Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 25 May 2021 11:24:19 -0700 Subject: [PATCH 100/998] github: limit repo access of testing workflows (#4483) --- .github/workflows/codeql-analysis.yml | 6 ++++++ .github/workflows/testing.yml | 3 +++ 2 files changed, 9 insertions(+) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0c3806bdc23d..5251f01db088 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -9,6 +9,12 @@ on: schedule: - cron: '24 20 * * 3' +permissions: + contents: read + security-events: write + pull-requests: read + actions: read + jobs: analyze: name: Analyze diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index b6277ea3065f..c94c812f97b7 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -7,6 +7,9 @@ on: schedule: - cron: 0 0 * * * # daily at 00:00 +permissions: + contents: read + # Always force the use of Go modules env: GO111MODULE: on From 67b720630d6a61ae4fb38d190f16ca7685078a18 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 25 May 2021 11:45:53 -0700 Subject: [PATCH 101/998] github: increase stale bot ops per run to process everything (#4485) --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index c897101f3c9c..807d97c813b9 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -21,7 +21,7 @@ jobs: only-labels: 'Status: Requires Reporter Clarification' stale-issue-label: 'stale' stale-pr-label: 'stale' - operations-per-run: 1 + operations-per-run: 60 stale-issue-message: > This issue is labeled as requiring an update from the reporter, and no update has been received after 6 days. If no update is provided in the next 7 days, this issue will be automatically closed. From 598e3f6a9dafe9f4da7b874f9ed8c8b3c0ff65ae Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 25 May 2021 11:46:30 -0700 Subject: [PATCH 102/998] github: update lock bot to github actions (#4484) --- .github/lock.yml | 2 -- .github/workflows/lock.yml | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) delete mode 100644 .github/lock.yml create mode 100644 .github/workflows/lock.yml diff --git a/.github/lock.yml b/.github/lock.yml deleted file mode 100644 index 78f7b19b71d3..000000000000 --- a/.github/lock.yml +++ /dev/null @@ -1,2 +0,0 @@ -daysUntilLock: 180 -lockComment: false diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml new file mode 100644 index 000000000000..5f49c7900a3d --- /dev/null +++ b/.github/workflows/lock.yml @@ -0,0 +1,20 @@ +name: 'Lock Threads' + +on: + workflow_dispatch: + schedule: + - cron: '22 1 * * *' + +permissions: + issues: write + pull-requests: write + +jobs: + lock: + runs-on: ubuntu-latest + steps: + - uses: dessant/lock-threads@v2 + with: + github-token: ${{ github.token }} + issue-lock-inactive-days: 180 + pr-lock-inactive-days: 180 From e26e756f13345dd19470073c5c2920b65a24ac3c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 25 May 2021 15:43:14 -0700 Subject: [PATCH 103/998] Enable logging in xds interop docker containers (#4482) --- interop/xds/client/Dockerfile | 2 ++ interop/xds/server/Dockerfile | 2 ++ 2 files changed, 4 insertions(+) diff --git a/interop/xds/client/Dockerfile b/interop/xds/client/Dockerfile index 060f8a8c64aa..59cacb94a26a 100644 --- a/interop/xds/client/Dockerfile +++ b/interop/xds/client/Dockerfile @@ -31,4 +31,6 @@ RUN go build -tags osusergo,netgo interop/xds/client/client.go # reduces the docker image size. FROM alpine COPY --from=build /go/src/grpc-go/client . +ENV GRPC_GO_LOG_VERBOSITY_LEVEL=2 +ENV GRPC_GO_LOG_SEVERITY_LEVEL="info" ENTRYPOINT ["./client"] diff --git a/interop/xds/server/Dockerfile b/interop/xds/server/Dockerfile index 259066e86c50..cd8dcb5ccaad 100644 --- a/interop/xds/server/Dockerfile +++ b/interop/xds/server/Dockerfile @@ -31,4 +31,6 @@ RUN go build -tags osusergo,netgo interop/xds/server/server.go # reduces the docker image size. FROM alpine COPY --from=build /go/src/grpc-go/server . +ENV GRPC_GO_LOG_VERBOSITY_LEVEL=2 +ENV GRPC_GO_LOG_SEVERITY_LEVEL="info" ENTRYPOINT ["./server"] From bbb542c3d9c07f587e0025c9bdf0768e9624951b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 25 May 2021 15:46:02 -0700 Subject: [PATCH 104/998] Kokoro build configs for PSM security interop tests (#4481) --- test/kokoro/xds_k8s.cfg | 13 + test/kokoro/xds_k8s.sh | 147 +++++++++ test/kokoro/xds_k8s_install_test_driver.sh | 362 +++++++++++++++++++++ 3 files changed, 522 insertions(+) create mode 100644 test/kokoro/xds_k8s.cfg create mode 100755 test/kokoro/xds_k8s.sh create mode 100755 test/kokoro/xds_k8s_install_test_driver.sh diff --git a/test/kokoro/xds_k8s.cfg b/test/kokoro/xds_k8s.cfg new file mode 100644 index 000000000000..e9a82b002b9e --- /dev/null +++ b/test/kokoro/xds_k8s.cfg @@ -0,0 +1,13 @@ +# Config file for internal CI + +# Location of the continuous shell script in repository. +build_file: "grpc-go/test/kokoro/xds-k8s.sh" +timeout_mins: 120 + +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*sponge_log.log" + strip_prefix: "artifacts" + } +} diff --git a/test/kokoro/xds_k8s.sh b/test/kokoro/xds_k8s.sh new file mode 100755 index 000000000000..4d1fda18a9f9 --- /dev/null +++ b/test/kokoro/xds_k8s.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash +# Copyright 2021 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +# Constants +readonly GITHUB_REPOSITORY_NAME="grpc-go" +# GKE Cluster +readonly GKE_CLUSTER_NAME="interop-test-psm-sec-v2-us-central1-a" +readonly GKE_CLUSTER_ZONE="us-central1-a" +## xDS test server/client Docker images +readonly SERVER_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-server" +readonly CLIENT_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-client" +readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}" + +####################################### +# Builds test app Docker images and pushes them to GCR +# Globals: +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# Arguments: +# None +# Outputs: +# Writes the output of `gcloud builds submit` to stdout, stderr +####################################### +build_test_app_docker_images() { + echo "Building Go xDS interop test app Docker images" + docker build -f "${SRC_DIR}/interop/xds/client/Dockerfile" -t "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + docker build -f "${SRC_DIR}/interop/xds/server/Dockerfile" -t "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + gcloud -q auth configure-docker + docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" + docker push "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" +} + +####################################### +# Builds test app and its docker images unless they already exist +# Globals: +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# FORCE_IMAGE_BUILD +# Arguments: +# None +# Outputs: +# Writes the output to stdout, stderr +####################################### +build_docker_images_if_needed() { + # Check if images already exist + server_tags="$(gcloud_gcr_list_image_tags "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Server image: %s:%s\n" "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${server_tags:-Server image not found}" + + client_tags="$(gcloud_gcr_list_image_tags "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Client image: %s:%s\n" "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${client_tags:-Client image not found}" + + # Build if any of the images are missing, or FORCE_IMAGE_BUILD=1 + if [[ "${FORCE_IMAGE_BUILD}" == "1" || -z "${server_tags}" || -z "${client_tags}" ]]; then + build_test_app_docker_images + else + echo "Skipping Go test app build" + fi +} + +####################################### +# Executes the test case +# Globals: +# TEST_DRIVER_FLAGFILE: Relative path to test driver flagfile +# KUBE_CONTEXT: The name of kubectl context with GKE cluster access +# TEST_XML_OUTPUT_DIR: Output directory for the test xUnit XML report +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# Arguments: +# Test case name +# Outputs: +# Writes the output of test execution to stdout, stderr +# Test xUnit report to ${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml +####################################### +run_test() { + # Test driver usage: + # https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage + local test_name="${1:?Usage: run_test test_name}" + set -x + python -m "tests.${test_name}" \ + --flagfile="${TEST_DRIVER_FLAGFILE}" \ + --kube_context="${KUBE_CONTEXT}" \ + --server_image="${SERVER_IMAGE_NAME}:${GIT_COMMIT}" \ + --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ + --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ + --force_cleanup \ + --nocheck_local_certs + set +x +} + +####################################### +# Main function: provision software necessary to execute tests, and run them +# Globals: +# KOKORO_ARTIFACTS_DIR +# GITHUB_REPOSITORY_NAME +# SRC_DIR: Populated with absolute path to the source repo +# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing +# the test driver +# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code +# TEST_DRIVER_FLAGFILE: Populated with relative path to test driver flagfile +# TEST_XML_OUTPUT_DIR: Populated with the path to test xUnit XML report +# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build +# GIT_COMMIT: Populated with the SHA-1 of git commit being built +# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built +# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access +# Arguments: +# None +# Outputs: +# Writes the output of test execution to stdout, stderr +####################################### +main() { + local script_dir + script_dir="$(dirname "$0")" + # shellcheck source=tools/internal_ci/linux/grpc_xds_k8s_install_test_driver.sh + source "${script_dir}/xds_k8s_install_test_driver.sh" + set -x + if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then + kokoro_setup_test_driver "${GITHUB_REPOSITORY_NAME}" + else + local_setup_test_driver "${script_dir}" + fi + build_docker_images_if_needed + # Run tests + cd "${TEST_DRIVER_FULL_DIR}" + run_test baseline_test + run_test security_test +} + +main "$@" diff --git a/test/kokoro/xds_k8s_install_test_driver.sh b/test/kokoro/xds_k8s_install_test_driver.sh new file mode 100755 index 000000000000..54d9127608c3 --- /dev/null +++ b/test/kokoro/xds_k8s_install_test_driver.sh @@ -0,0 +1,362 @@ +#!/usr/bin/env bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# TODO(sergiitk): move to grpc/grpc when implementing support of other languages +set -eo pipefail + +# Constants +readonly PYTHON_VERSION="3.6" +# Test driver +readonly TEST_DRIVER_REPO_NAME="grpc" +readonly TEST_DRIVER_REPO_URL="https://github.com/grpc/grpc.git" +readonly TEST_DRIVER_BRANCH="${TEST_DRIVER_BRANCH:-master}" +readonly TEST_DRIVER_PATH="tools/run_tests/xds_k8s_test_driver" +readonly TEST_DRIVER_PROTOS_PATH="src/proto/grpc/testing" + +####################################### +# Run command end report its exit code. Doesn't exit on non-zero exit code. +# Globals: +# None +# Arguments: +# Command to execute +# Outputs: +# Writes the output of given command to stdout, stderr +####################################### +run_ignore_exit_code() { + local exit_code=-1 + "$@" || exit_code=$? + echo "Exit code: ${exit_code}" +} + +####################################### +# Parses information about git repository at given path to global variables. +# Globals: +# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build +# GIT_COMMIT: Populated with the SHA-1 of git commit being built +# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built +# Arguments: +# Git source dir +####################################### +parse_src_repo_git_info() { + local src_dir="${SRC_DIR:?SRC_DIR must be set}" + readonly GIT_ORIGIN_URL=$(git -C "${src_dir}" remote get-url origin) + readonly GIT_COMMIT=$(git -C "${src_dir}" rev-parse HEAD) + readonly GIT_COMMIT_SHORT=$(git -C "${src_dir}" rev-parse --short HEAD) +} + +####################################### +# List GCR image tags matching given tag name. +# Arguments: +# Image name +# Tag name +# Outputs: +# Writes the table with the list of found tags to stdout. +# If no tags found, the output is an empty string. +####################################### +gcloud_gcr_list_image_tags() { + gcloud container images list-tags --format="table[box](tags,digest,timestamp.date())" --filter="tags:$2" "$1" +} + +####################################### +# A helper to execute `gcloud -q components update`. +# Arguments: +# None +# Outputs: +# Writes the output of `gcloud` command to stdout, stderr +####################################### +gcloud_update() { + echo "Update gcloud components:" + gcloud -q components update +} + +####################################### +# Create kube context authenticated with GKE cluster, saves context name. +# to KUBE_CONTEXT +# Globals: +# GKE_CLUSTER_NAME +# GKE_CLUSTER_ZONE +# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access +# Arguments: +# None +# Outputs: +# Writes the output of `gcloud` command to stdout, stderr +# Writes authorization info $HOME/.kube/config +####################################### +gcloud_get_cluster_credentials() { + gcloud container clusters get-credentials "${GKE_CLUSTER_NAME}" --zone "${GKE_CLUSTER_ZONE}" + readonly KUBE_CONTEXT="$(kubectl config current-context)" +} + +####################################### +# Clone the source code of the test driver to $TEST_DRIVER_REPO_DIR, unless +# given folder exists. +# Globals: +# TEST_DRIVER_REPO_URL +# TEST_DRIVER_BRANCH +# TEST_DRIVER_REPO_DIR: path to the repo containing the test driver +# TEST_DRIVER_REPO_DIR_USE_EXISTING: set non-empty value to use exiting +# clone of the driver repo located at $TEST_DRIVER_REPO_DIR. +# Useful for debugging the build script locally. +# Arguments: +# None +# Outputs: +# Writes the output of `git` command to stdout, stderr +# Writes driver source code to $TEST_DRIVER_REPO_DIR +####################################### +test_driver_get_source() { + if [[ -n "${TEST_DRIVER_REPO_DIR_USE_EXISTING}" && -d "${TEST_DRIVER_REPO_DIR}" ]]; then + echo "Using exiting driver directory: ${TEST_DRIVER_REPO_DIR}." + else + echo "Cloning driver to ${TEST_DRIVER_REPO_URL} branch ${TEST_DRIVER_BRANCH} to ${TEST_DRIVER_REPO_DIR}" + git clone -b "${TEST_DRIVER_BRANCH}" --depth=1 "${TEST_DRIVER_REPO_URL}" "${TEST_DRIVER_REPO_DIR}" + fi +} + +####################################### +# Install Python modules from required in $TEST_DRIVER_FULL_DIR/requirements.txt +# to Python virtual environment. Creates and activates Python venv if necessary. +# Globals: +# TEST_DRIVER_FULL_DIR +# PYTHON_VERSION +# Arguments: +# None +# Outputs: +# Writes the output of `python`, `pip` commands to stdout, stderr +# Writes the list of installed modules to stdout +####################################### +test_driver_pip_install() { + echo "Install python dependencies" + cd "${TEST_DRIVER_FULL_DIR}" + + # Create and activate virtual environment unless already using one + if [[ -z "${VIRTUAL_ENV}" ]]; then + local venv_dir="${TEST_DRIVER_FULL_DIR}/venv" + if [[ -d "${venv_dir}" ]]; then + echo "Found python virtual environment directory: ${venv_dir}" + else + echo "Creating python virtual environment: ${venv_dir}" + "python${PYTHON_VERSION}" -m venv "${venv_dir}" + fi + # Intentional: No need to check python venv activate script. + # shellcheck source=/dev/null + source "${venv_dir}/bin/activate" + fi + + pip install -r requirements.txt + echo "Installed Python packages:" + pip list +} + +####################################### +# Compile proto-files needed for the test driver +# Globals: +# TEST_DRIVER_REPO_DIR +# TEST_DRIVER_FULL_DIR +# TEST_DRIVER_PROTOS_PATH +# Arguments: +# None +# Outputs: +# Writes the output of `python -m grpc_tools.protoc` to stdout, stderr +# Writes the list if compiled python code to stdout +# Writes compiled python code with proto messages and grpc services to +# $TEST_DRIVER_FULL_DIR/src/proto +####################################### +test_driver_compile_protos() { + declare -a protos + protos=( + "${TEST_DRIVER_PROTOS_PATH}/test.proto" + "${TEST_DRIVER_PROTOS_PATH}/messages.proto" + "${TEST_DRIVER_PROTOS_PATH}/empty.proto" + ) + echo "Generate python code from grpc.testing protos: ${protos[*]}" + cd "${TEST_DRIVER_REPO_DIR}" + python -m grpc_tools.protoc \ + --proto_path=. \ + --python_out="${TEST_DRIVER_FULL_DIR}" \ + --grpc_python_out="${TEST_DRIVER_FULL_DIR}" \ + "${protos[@]}" + local protos_out_dir="${TEST_DRIVER_FULL_DIR}/${TEST_DRIVER_PROTOS_PATH}" + echo "Generated files ${protos_out_dir}:" + ls -Fl "${protos_out_dir}" +} + +####################################### +# Installs the test driver and it's requirements. +# https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#installation +# Globals: +# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing +# the test driver +# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code +# Arguments: +# The directory for test driver's source code +# Outputs: +# Writes the output to stdout, stderr +####################################### +test_driver_install() { + readonly TEST_DRIVER_REPO_DIR="${1:?Usage test_driver_install TEST_DRIVER_REPO_DIR}" + readonly TEST_DRIVER_FULL_DIR="${TEST_DRIVER_REPO_DIR}/${TEST_DRIVER_PATH}" + test_driver_get_source + test_driver_pip_install + test_driver_compile_protos +} + +####################################### +# Outputs Kokoro image version and Ubuntu's lsb_release +# Arguments: +# None +# Outputs: +# Writes the output to stdout +####################################### +kokoro_print_version() { + echo "Kokoro VM version:" + if [[ -f /VERSION ]]; then + cat /VERSION + fi + run_ignore_exit_code lsb_release -a +} + +####################################### +# Report extra information about the job via sponge properties. +# Globals: +# KOKORO_ARTIFACTS_DIR +# GIT_ORIGIN_URL +# GIT_COMMIT_SHORT +# TESTGRID_EXCLUDE +# Arguments: +# None +# Outputs: +# Writes the output to stdout +# Writes job properties to $KOKORO_ARTIFACTS_DIR/custom_sponge_config.csv +####################################### +kokoro_write_sponge_properties() { + # CSV format: "property_name","property_value" + # Bump TESTS_FORMAT_VERSION when reported test name changed enough to when it + # makes more sense to discard previous test results from a testgrid board. + # Use GIT_ORIGIN_URL to exclude test runs executed against repo forks from + # testgrid reports. + cat >"${KOKORO_ARTIFACTS_DIR}/custom_sponge_config.csv" < Date: Tue, 25 May 2021 16:06:58 -0700 Subject: [PATCH 105/998] mergeable: update relnotes regex (#4488) --- .github/mergeable.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/mergeable.yml b/.github/mergeable.yml index b42ef7b5e8a6..187de98277b3 100644 --- a/.github/mergeable.yml +++ b/.github/mergeable.yml @@ -7,7 +7,15 @@ mergeable: regex: '^Type:' - do: description must_include: - regex: 'RELEASE NOTES:' + # Allow: + # RELEASE NOTES: none (case insensitive) + # + # RELEASE NOTES: N/A (case insensitive) + # + # RELEASE NOTES: + # * + regex: '^RELEASE NOTES:\s*([Nn][Oo][Nn][Ee]|[Nn]/[Aa]|\n(\*|-)\s*.+)$' + regex_flag: 'm' - do: milestone must_include: regex: 'Release$' From 194dcc921a94aa12fc04e2b3262ac3e4f69142b1 Mon Sep 17 00:00:00 2001 From: dkkb <82504881+dkkb@users.noreply.github.com> Date: Thu, 27 May 2021 02:17:27 +0800 Subject: [PATCH 106/998] example: improve hello world server with starting msg (#4468) --- examples/helloworld/greeter_server/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/helloworld/greeter_server/main.go b/examples/helloworld/greeter_server/main.go index 15604f9fc1f4..4d077db92cfb 100644 --- a/examples/helloworld/greeter_server/main.go +++ b/examples/helloworld/greeter_server/main.go @@ -50,6 +50,7 @@ func main() { } s := grpc.NewServer() pb.RegisterGreeterServer(s, &server{}) + log.Printf("server listening at %v", lis.Addr()) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } From 34bd6fbb8e3b570fdbda35e5537e389f7942b406 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 26 May 2021 14:20:25 -0400 Subject: [PATCH 107/998] xds: add RBAC Engine (#4471) * Added RBAC Engine --- internal/xds/matcher/matcher_header.go | 29 ++ internal/xds/rbac/matchers.go | 386 ++++++++++++++++ internal/xds/rbac/rbac_engine.go | 82 ++++ internal/xds/rbac/rbac_engine_test.go | 606 +++++++++++++++++++++++++ 4 files changed, 1103 insertions(+) create mode 100644 internal/xds/rbac/matchers.go create mode 100644 internal/xds/rbac/rbac_engine.go create mode 100644 internal/xds/rbac/rbac_engine_test.go diff --git a/internal/xds/matcher/matcher_header.go b/internal/xds/matcher/matcher_header.go index f9c0322179e8..9ae6ffc2288b 100644 --- a/internal/xds/matcher/matcher_header.go +++ b/internal/xds/matcher/matcher_header.go @@ -203,6 +203,35 @@ func (hsm *HeaderSuffixMatcher) String() string { return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix) } +// HeaderContainsMatcher matches on whether the header value contains the +// value passed into this struct. +type HeaderContainsMatcher struct { + key string + contains string +} + +// NewHeaderContainsMatcher returns a new HeaderContainsMatcher. key is the HTTP +// Header key to match on, and contains is the value that the header should +// should contain for a successful match. An empty contains string does not +// work, use HeaderPresentMatcher in that case. +func NewHeaderContainsMatcher(key string, contains string) *HeaderContainsMatcher { + return &HeaderContainsMatcher{key: key, contains: contains} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderContainsMatcher. +func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hcm.key) + if !ok { + return false + } + return strings.Contains(v, hcm.contains) +} + +func (hcm *HeaderContainsMatcher) String() string { + return fmt.Sprintf("headerContains:%v%v", hcm.key, hcm.contains) +} + // InvertMatcher inverts the match result of the underlying header matcher. type InvertMatcher struct { m HeaderMatcherInterface diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go new file mode 100644 index 000000000000..12f6e6cd04b4 --- /dev/null +++ b/internal/xds/rbac/matchers.go @@ -0,0 +1,386 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "errors" + "fmt" + "net" + "regexp" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3route_componentspb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + internalmatcher "google.golang.org/grpc/internal/xds/matcher" +) + +// matcher is an interface that takes data about incoming RPC's and returns +// whether it matches with whatever matcher implements this interface. +type matcher interface { + match(data *RPCData) bool +} + +// policyMatcher helps determine whether an incoming RPC call matches a policy. +// A policy is a logical role (e.g. Service Admin), which is comprised of +// permissions and principals. A principal is an identity (or identities) for a +// downstream subject which are assigned the policy (role), and a permission is +// an action(s) that a principal(s) can take. A policy matches if both a +// permission and a principal match, which will be determined by the child or +// permissions and principal matchers. policyMatcher implements the matcher +// interface. +type policyMatcher struct { + permissions *orMatcher + principals *orMatcher +} + +func newPolicyMatcher(policy *v3rbacpb.Policy) (*policyMatcher, error) { + permissions, err := matchersFromPermissions(policy.Permissions) + if err != nil { + return nil, err + } + principals, err := matchersFromPrincipals(policy.Principals) + if err != nil { + return nil, err + } + return &policyMatcher{ + permissions: &orMatcher{matchers: permissions}, + principals: &orMatcher{matchers: principals}, + }, nil +} + +func (pm *policyMatcher) match(data *RPCData) bool { + // A policy matches if and only if at least one of its permissions match the + // action taking place AND at least one if its principals match the + // downstream peer. + return pm.permissions.match(data) && pm.principals.match(data) +} + +// matchersFromPermissions takes a list of permissions (can also be +// a single permission, e.g. from a not matcher which is logically !permission) +// and returns a list of matchers which correspond to that permission. This will +// be called in many instances throughout the initial construction of the RBAC +// engine from the AND and OR matchers and also from the NOT matcher. +func matchersFromPermissions(permissions []*v3rbacpb.Permission) ([]matcher, error) { + var matchers []matcher + for _, permission := range permissions { + switch permission.GetRule().(type) { + case *v3rbacpb.Permission_AndRules: + mList, err := matchersFromPermissions(permission.GetAndRules().Rules) + if err != nil { + return nil, err + } + matchers = append(matchers, &andMatcher{matchers: mList}) + case *v3rbacpb.Permission_OrRules: + mList, err := matchersFromPermissions(permission.GetOrRules().Rules) + if err != nil { + return nil, err + } + matchers = append(matchers, &orMatcher{matchers: mList}) + case *v3rbacpb.Permission_Any: + matchers = append(matchers, &alwaysMatcher{}) + case *v3rbacpb.Permission_Header: + m, err := newHeaderMatcher(permission.GetHeader()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_UrlPath: + m, err := newURLPathMatcher(permission.GetUrlPath()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_DestinationIp: + m, err := newDestinationIPMatcher(permission.GetDestinationIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_DestinationPort: + matchers = append(matchers, newPortMatcher(permission.GetDestinationPort())) + case *v3rbacpb.Permission_NotRule: + mList, err := matchersFromPermissions([]*v3rbacpb.Permission{{Rule: permission.GetNotRule().Rule}}) + if err != nil { + return nil, err + } + matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) + case *v3rbacpb.Permission_Metadata: + // Not supported in gRPC RBAC currently - a permission typed as + // Metadata in the initial config will be a no-op. + case *v3rbacpb.Permission_RequestedServerName: + // Not supported in gRPC RBAC currently - a permission typed as + // requested server name in the initial config will be a no-op. + } + } + return matchers, nil +} + +func matchersFromPrincipals(principals []*v3rbacpb.Principal) ([]matcher, error) { + var matchers []matcher + for _, principal := range principals { + switch principal.GetIdentifier().(type) { + case *v3rbacpb.Principal_AndIds: + mList, err := matchersFromPrincipals(principal.GetAndIds().Ids) + if err != nil { + return nil, err + } + matchers = append(matchers, &andMatcher{matchers: mList}) + case *v3rbacpb.Principal_OrIds: + mList, err := matchersFromPrincipals(principal.GetOrIds().Ids) + if err != nil { + return nil, err + } + matchers = append(matchers, &orMatcher{matchers: mList}) + case *v3rbacpb.Principal_Any: + matchers = append(matchers, &alwaysMatcher{}) + case *v3rbacpb.Principal_Authenticated_: + authenticatedMatcher, err := newAuthenticatedMatcher(principal.GetAuthenticated()) + if err != nil { + return nil, err + } + matchers = append(matchers, authenticatedMatcher) + case *v3rbacpb.Principal_DirectRemoteIp: + m, err := newSourceIPMatcher(principal.GetDirectRemoteIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_Header: + // Do we need an error here? + m, err := newHeaderMatcher(principal.GetHeader()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_UrlPath: + m, err := newURLPathMatcher(principal.GetUrlPath()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_NotId: + mList, err := matchersFromPrincipals([]*v3rbacpb.Principal{{Identifier: principal.GetNotId().Identifier}}) + if err != nil { + return nil, err + } + matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) + case *v3rbacpb.Principal_SourceIp: + // The source ip principal identifier is deprecated. Thus, a + // principal typed as a source ip in the identifier will be a no-op. + // The config should use DirectRemoteIp instead. + case *v3rbacpb.Principal_RemoteIp: + // Not supported in gRPC RBAC currently - a principal typed as + // Remote Ip in the initial config will be a no-op. + case *v3rbacpb.Principal_Metadata: + // Not supported in gRPC RBAC currently - a principal typed as + // Metadata in the initial config will be a no-op. + } + } + return matchers, nil +} + +// orMatcher is a matcher where it successfully matches if one of it's +// children successfully match. It also logically represents a principal or +// permission, but can also be it's own entity further down the tree of +// matchers. orMatcher implements the matcher interface. +type orMatcher struct { + matchers []matcher +} + +func (om *orMatcher) match(data *RPCData) bool { + // Range through child matchers and pass in data about incoming RPC, and + // only one child matcher has to match to be logically successful. + for _, m := range om.matchers { + if m.match(data) { + return true + } + } + return false +} + +// andMatcher is a matcher that is successful if every child matcher +// matches. andMatcher implements the matcher interface. +type andMatcher struct { + matchers []matcher +} + +func (am *andMatcher) match(data *RPCData) bool { + for _, m := range am.matchers { + if !m.match(data) { + return false + } + } + return true +} + +// alwaysMatcher is a matcher that will always match. This logically +// represents an any rule for a permission or a principal. alwaysMatcher +// implements the matcher interface. +type alwaysMatcher struct { +} + +func (am *alwaysMatcher) match(data *RPCData) bool { + return true +} + +// notMatcher is a matcher that nots an underlying matcher. notMatcher +// implements the matcher interface. +type notMatcher struct { + matcherToNot matcher +} + +func (nm *notMatcher) match(data *RPCData) bool { + return !nm.matcherToNot.match(data) +} + +// headerMatcher is a matcher that matches on incoming HTTP Headers present +// in the incoming RPC. headerMatcher implements the matcher interface. +type headerMatcher struct { + matcher internalmatcher.HeaderMatcherInterface +} + +func newHeaderMatcher(headerMatcherConfig *v3route_componentspb.HeaderMatcher) (*headerMatcher, error) { + var m internalmatcher.HeaderMatcherInterface + switch headerMatcherConfig.HeaderMatchSpecifier.(type) { + case *v3route_componentspb.HeaderMatcher_ExactMatch: + m = internalmatcher.NewHeaderExactMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetExactMatch()) + case *v3route_componentspb.HeaderMatcher_SafeRegexMatch: + regex, err := regexp.Compile(headerMatcherConfig.GetSafeRegexMatch().Regex) + if err != nil { + return nil, err + } + m = internalmatcher.NewHeaderRegexMatcher(headerMatcherConfig.Name, regex) + case *v3route_componentspb.HeaderMatcher_RangeMatch: + m = internalmatcher.NewHeaderRangeMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetRangeMatch().Start, headerMatcherConfig.GetRangeMatch().End) + case *v3route_componentspb.HeaderMatcher_PresentMatch: + m = internalmatcher.NewHeaderPresentMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPresentMatch()) + case *v3route_componentspb.HeaderMatcher_PrefixMatch: + m = internalmatcher.NewHeaderPrefixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPrefixMatch()) + case *v3route_componentspb.HeaderMatcher_SuffixMatch: + m = internalmatcher.NewHeaderSuffixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetSuffixMatch()) + case *v3route_componentspb.HeaderMatcher_ContainsMatch: + m = internalmatcher.NewHeaderContainsMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetContainsMatch()) + default: + return nil, errors.New("unknown header matcher type") + } + if headerMatcherConfig.InvertMatch { + m = internalmatcher.NewInvertMatcher(m) + } + return &headerMatcher{matcher: m}, nil +} + +func (hm *headerMatcher) match(data *RPCData) bool { + return hm.matcher.Match(data.MD) +} + +// urlPathMatcher matches on the URL Path of the incoming RPC. In gRPC, this +// logically maps to the full method name the RPC is calling on the server side. +// urlPathMatcher implements the matcher interface. +type urlPathMatcher struct { + stringMatcher internalmatcher.StringMatcher +} + +func newURLPathMatcher(pathMatcher *v3matcherpb.PathMatcher) (*urlPathMatcher, error) { + stringMatcher, err := internalmatcher.StringMatcherFromProto(pathMatcher.GetPath()) + if err != nil { + return nil, err + } + return &urlPathMatcher{stringMatcher: stringMatcher}, nil +} + +func (upm *urlPathMatcher) match(data *RPCData) bool { + return upm.stringMatcher.Match(data.FullMethod) +} + +// sourceIPMatcher and destinationIPMatcher both are matchers that match against +// a CIDR Range. Two different matchers are needed as the source and ip address +// come from different parts of the data about incoming RPC's passed in. +// Matching a CIDR Range means to determine whether the IP Address falls within +// the CIDR Range or not. They both implement the matcher interface. +type sourceIPMatcher struct { + // ipNet represents the CidrRange that this matcher was configured with. + // This is what will source and destination IP's will be matched against. + ipNet *net.IPNet +} + +func newSourceIPMatcher(cidrRange *v3corepb.CidrRange) (*sourceIPMatcher, error) { + // Convert configuration to a cidrRangeString, as Go standard library has + // methods that parse cidr string. + cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) + _, ipNet, err := net.ParseCIDR(cidrRangeString) + if err != nil { + return nil, err + } + return &sourceIPMatcher{ipNet: ipNet}, nil +} + +func (sim *sourceIPMatcher) match(data *RPCData) bool { + return sim.ipNet.Contains(net.IP(net.ParseIP(data.PeerInfo.Addr.String()))) +} + +type destinationIPMatcher struct { + ipNet *net.IPNet +} + +func newDestinationIPMatcher(cidrRange *v3corepb.CidrRange) (*destinationIPMatcher, error) { + cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) + _, ipNet, err := net.ParseCIDR(cidrRangeString) + if err != nil { + return nil, err + } + return &destinationIPMatcher{ipNet: ipNet}, nil +} + +func (dim *destinationIPMatcher) match(data *RPCData) bool { + return dim.ipNet.Contains(net.IP(net.ParseIP(data.DestinationAddr.String()))) +} + +// portMatcher matches on whether the destination port of the RPC matches the +// destination port this matcher was instantiated with. portMatcher +// implements the matcher interface. +type portMatcher struct { + destinationPort uint32 +} + +func newPortMatcher(destinationPort uint32) *portMatcher { + return &portMatcher{destinationPort: destinationPort} +} + +func (pm *portMatcher) match(data *RPCData) bool { + return data.DestinationPort == pm.destinationPort +} + +// authenticatedMatcher matches on the name of the Principal. If set, the URI +// SAN or DNS SAN in that order is used from the certificate, otherwise the +// subject field is used. If unset, it applies to any user that is +// authenticated. authenticatedMatcher implements the matcher interface. +type authenticatedMatcher struct { + stringMatcher internalmatcher.StringMatcher +} + +func newAuthenticatedMatcher(authenticatedMatcherConfig *v3rbacpb.Principal_Authenticated) (*authenticatedMatcher, error) { + stringMatcher, err := internalmatcher.StringMatcherFromProto(authenticatedMatcherConfig.PrincipalName) + if err != nil { + return nil, err + } + return &authenticatedMatcher{stringMatcher: stringMatcher}, nil +} + +func (am *authenticatedMatcher) match(data *RPCData) bool { + return am.stringMatcher.Match(data.PrincipalName) +} diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go new file mode 100644 index 000000000000..29c96d9fcf20 --- /dev/null +++ b/internal/xds/rbac/rbac_engine.go @@ -0,0 +1,82 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package rbac provides service-level and method-level access control for a +// service. +package rbac + +import ( + "net" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// Engine is used for matching incoming RPCs to policies. +type Engine struct { + policies map[string]*policyMatcher +} + +// NewEngine creates an RBAC Engine based on the contents of policy. If the +// config is invalid (and fails to build underlying tree of matchers), NewEngine +// will return an error. This created RBAC Engine will not persist the action +// present in the policy, and will leave up to caller to handle the action that +// is attached to the config. +func NewEngine(policy *v3rbacpb.RBAC) (*Engine, error) { + policies := make(map[string]*policyMatcher) + for name, config := range policy.Policies { + matcher, err := newPolicyMatcher(config) + if err != nil { + return nil, err + } + policies[name] = matcher + } + return &Engine{policies: policies}, nil +} + +// RPCData wraps data pulled from an incoming RPC that the RBAC engine needs to +// find a matching policy. +type RPCData struct { + // MD is the HTTP Headers that are present in the incoming RPC. + MD metadata.MD + // PeerInfo is information about the downstream peer. + PeerInfo *peer.Peer + // FullMethod is the method name being called on the upstream service. + FullMethod string + // DestinationPort is the port that the RPC is being sent to on the + // server. + DestinationPort uint32 + // DestinationAddr is the address that the RPC is being sent to. + DestinationAddr net.Addr + // PrincipalName is the name of the downstream principal. If set, the URI + // SAN or DNS SAN in that order is used from the certificate, otherwise the + // subject field is used. If unset, it applies to any user that is + // authenticated. + PrincipalName string +} + +// FindMatchingPolicy determines if an incoming RPC matches a policy. On a +// successful match, it returns the name of the matching policy and a true +// boolean to specify that there was a matching policy found. +func (r *Engine) FindMatchingPolicy(data *RPCData) (string, bool) { + for policy, matcher := range r.policies { + if matcher.match(data) { + return policy, true + } + } + return "", false +} diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go new file mode 100644 index 000000000000..47ed5a1342e0 --- /dev/null +++ b/internal/xds/rbac/rbac_engine_test.go @@ -0,0 +1,606 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "testing" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/peer" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type addr struct { + ipAddress string +} + +func (addr) Network() string { return "" } +func (a *addr) String() string { return a.ipAddress } + +// TestRBACEngineConstruction tests the construction of the RBAC Engine. Due to +// some types of RBAC configuration being logically wrong and returning an error +// rather than successfully constructing the RBAC Engine, this test tests both +// RBAC Configurations deemed successful and also RBAC Configurations that will +// raise errors. +func (s) TestRBACEngineConstruction(t *testing.T) { + tests := []struct { + name string + rbacConfig *v3rbacpb.RBAC + wantErr bool + }{ + { + name: "TestSuccessCaseAnyMatch", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + { + name: "TestSuccessCaseSimplePolicy", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + { + name: "TestSuccessCaseEnvoyExample", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "service-admin": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/admin"}}}}}, + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/superuser"}}}}}, + }, + }, + "product-viewer": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/products"}}}}}}, + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 80}}, + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 443}}, + }, + }}}, + }, + }, + }, + }, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + { + name: "TestSourceIpMatcherSuccess", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + }, + }, + { + name: "TestSourceIpMatcherFailure", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "TestDestinationIpMatcherSuccess", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "certain-destination-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + { + name: "TestDestinationIpMatcherFailure", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "certain-destination-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "TestMatcherToNotPolicy", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "not-secret-content": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_NotRule{NotRule: &v3rbacpb.Permission{Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/secret-content"}}}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + { + name: "TestMatcherToNotPrincipal", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "not-from-certain-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_NotId{NotId: &v3rbacpb.Principal{Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}}}, + }, + }, + }, + }, + }, + { + name: "TestPrincipalProductViewer", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "product-viewer": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + { + Identifier: &v3rbacpb.Principal_AndIds{AndIds: &v3rbacpb.Principal_Set{Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/books"}}}}}}, + {Identifier: &v3rbacpb.Principal_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/cars"}}}}}}, + }, + }}}, + }}}, + }, + }, + }, + }, + }, + }, + { + name: "TestCertainHeaders", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "certain-headers": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + { + Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "GET"}}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_RangeMatch{RangeMatch: &v3typepb.Int64Range{ + Start: 0, + End: 64, + }}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PresentMatch{PresentMatch: true}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ContainsMatch{ContainsMatch: "GET"}}}}, + }}}, + }, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if _, err := NewEngine(test.rbacConfig); (err != nil) != test.wantErr { + t.Fatalf("NewEngine(%+v) returned err: %v, wantErr: %v", test.rbacConfig, err, test.wantErr) + } + }) + } +} + +// TestRBACEngine tests the RBAC Engine by configuring the engine in different +// scenarios. After configuring the engine in a certain way, this test pings the +// engine with different kinds of data representing incoming RPC's, and verifies +// that it works as expected. +func (s) TestRBACEngine(t *testing.T) { + tests := []struct { + name string + rbacConfig *v3rbacpb.RBAC + rbacQueries []struct { + rpcData *RPCData + wantMatchingPolicyName string + wantMatch bool + } + }{ + // TestSuccessCaseAnyMatch tests an RBAC Engine instantiated with a + // config with a policy with any rules for both permissions and + // principals, meaning that any data about incoming RPC's that the RBAC + // Engine is queried with should match that policy. + { + name: "TestSuccessCaseAnyMatch", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + rbacQueries: // Any incoming RPC should match with the anyone policy + []struct { + rpcData *RPCData + wantMatchingPolicyName string + wantMatch bool + }{ + { + rpcData: &RPCData{ + FullMethod: "some method", + }, + wantMatchingPolicyName: "anyone", + wantMatch: true, + }, + { + rpcData: &RPCData{ + DestinationPort: 100, + }, + wantMatchingPolicyName: "anyone", + wantMatch: true, + }, + }, + }, + // TestSuccessCaseSimplePolicy is a test that tests a simple policy that + // only allows an rpc to proceed if the rpc is calling a certain path + // and port. + { + name: "TestSuccessCaseSimplePolicy", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + rbacQueries: []struct { + rpcData *RPCData + wantMatchingPolicyName string + wantMatch bool + }{ + // This RPC should match with the local host fan policy. + { + rpcData: &RPCData{ + MD: map[string][]string{ + ":path": {"localhost-fan-page"}, + }, + DestinationPort: 8080, + }, + wantMatchingPolicyName: "localhost-fan", + wantMatch: true}, + // This RPC shouldn't match with the local host fan policy. + { + rpcData: &RPCData{ + DestinationPort: 100, + }, + wantMatchingPolicyName: ""}, + }, + }, + // TestSuccessCaseEnvoyExample is a test based on the example provided + // in the EnvoyProxy docs. The RBAC Config contains two policies, + // service admin and product viewer, that provides an example of a real + // RBAC Config that might be configured for a given for a given backend + // service. + { + name: "TestSuccessCaseEnvoyExample", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "service-admin": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/admin"}}}}}, + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/superuser"}}}}}, + }, + }, + "product-viewer": { + Permissions: []*v3rbacpb.Permission{ + { + Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/products"}}}}}}, + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 80}}, + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 443}}, + }, + }}}, + }, + }, + }, + }, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + rbacQueries: []struct { + rpcData *RPCData + wantMatchingPolicyName string + wantMatch bool + }{ + // This incoming RPC Call should match with the service admin + // policy. + { + rpcData: &RPCData{ + FullMethod: "some method", + PrincipalName: "cluster.local/ns/default/sa/admin", + }, + wantMatchingPolicyName: "service-admin", + wantMatch: true}, + // This incoming RPC Call should match with the product + // viewer policy. + { + rpcData: &RPCData{ + DestinationPort: 80, + MD: map[string][]string{ + ":method": {"GET"}, + }, + FullMethod: "/products", + }, + wantMatchingPolicyName: "product-viewer", + wantMatch: true}, + // These incoming RPC calls should not match any policy - + // represented by an empty matching policy name and false being + // returned. + { + rpcData: &RPCData{ + DestinationPort: 100, + }, + wantMatchingPolicyName: ""}, + { + rpcData: &RPCData{ + FullMethod: "get-product-list", + DestinationPort: 8080, + }, + wantMatchingPolicyName: ""}, + { + rpcData: &RPCData{ + PrincipalName: "localhost", + DestinationPort: 8080, + }, + wantMatchingPolicyName: ""}, + }, + }, + { + name: "TestNotMatcher", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "not-secret-content": { + Permissions: []*v3rbacpb.Permission{ + { + Rule: &v3rbacpb.Permission_NotRule{ + NotRule: &v3rbacpb.Permission{Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/secret-content"}}}}}}, + }, + }, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + rbacQueries: []struct { + rpcData *RPCData + wantMatchingPolicyName string + wantMatch bool + }{ + // This incoming RPC Call should match with the not-secret-content policy. + { + rpcData: &RPCData{ + FullMethod: "/regular-content", + }, + wantMatchingPolicyName: "not-secret-content", + wantMatch: true, + }, + // This incoming RPC Call shouldn't match with the not-secret-content policy. + { + rpcData: &RPCData{ + FullMethod: "/secret-content", + }, + wantMatchingPolicyName: "", + wantMatch: false, + }, + }, + }, + { + name: "TestSourceIpMatcher", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + }, + rbacQueries: []struct { + rpcData *RPCData + wantMatchingPolicyName string + wantMatch bool + }{ + // This incoming RPC Call should match with the certain-source-ip policy. + { + rpcData: &RPCData{ + PeerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantMatchingPolicyName: "certain-source-ip", + wantMatch: true, + }, + // This incoming RPC Call shouldn't match with the certain-source-ip policy. + { + rpcData: &RPCData{ + PeerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantMatchingPolicyName: "", + wantMatch: false, + }, + }, + }, + { + name: "TestDestinationIpMatcher", + rbacConfig: &v3rbacpb.RBAC{ + Policies: map[string]*v3rbacpb.Policy{ + "certain-destination-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + rbacQueries: []struct { + rpcData *RPCData + wantMatchingPolicyName string + wantMatch bool + }{ + // This incoming RPC Call should match with the certain-destination-ip policy. + { + rpcData: &RPCData{ + DestinationAddr: &addr{ipAddress: "0.0.0.10"}, + }, + wantMatchingPolicyName: "certain-destination-ip", + wantMatch: true, + }, + // This incoming RPC Call shouldn't match with the certain-destination-ip policy. + { + rpcData: &RPCData{ + DestinationAddr: &addr{ipAddress: "10.0.0.0"}, + }, + wantMatchingPolicyName: "", + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Instantiate the rbacEngine with different configurations that + // interesting to test and to query. + rbacEngine, err := NewEngine(test.rbacConfig) + if err != nil { + t.Fatalf("Error constructing RBAC Engine: %v", err) + } + // Query that created RBAC Engine with different args to see if the + // RBAC Engine configured a certain way works as intended. + for _, queryToRBACEngine := range test.rbacQueries { + // The matchingPolicyName returned will be empty in the case of + // no matching policy. Thus, matchingPolicyName can also be used + // to test the "error" condition of no matching policies. + matchingPolicyName, matchingPolicyFound := rbacEngine.FindMatchingPolicy(queryToRBACEngine.rpcData) + if matchingPolicyFound != queryToRBACEngine.wantMatch || matchingPolicyName != queryToRBACEngine.wantMatchingPolicyName { + t.Errorf("FindMatchingPolicy(%+v) returned (%v, %v), want (%v, %v)", queryToRBACEngine.rpcData, matchingPolicyName, matchingPolicyFound, queryToRBACEngine.wantMatchingPolicyName, queryToRBACEngine.wantMatch) + } + } + }) + } +} From 2de42fcbbce31dcdf14ee24836a713b65fc06dae Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 26 May 2021 15:35:27 -0700 Subject: [PATCH 108/998] kokoro: Specify the correct path to the build config (#4495) --- test/kokoro/xds_k8s.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/kokoro/xds_k8s.cfg b/test/kokoro/xds_k8s.cfg index e9a82b002b9e..4d5e019991f6 100644 --- a/test/kokoro/xds_k8s.cfg +++ b/test/kokoro/xds_k8s.cfg @@ -1,7 +1,7 @@ # Config file for internal CI # Location of the continuous shell script in repository. -build_file: "grpc-go/test/kokoro/xds-k8s.sh" +build_file: "grpc-go/test/kokoro/xds_k8s.sh" timeout_mins: 120 action { From 8bdcb4c9ab8de15f6a60ebce93b6f4c8d86622ef Mon Sep 17 00:00:00 2001 From: Evan Jones Date: Tue, 1 Jun 2021 11:54:43 -0400 Subject: [PATCH 109/998] client: Clarify that WaitForReady will block for CONNECTING channels (#4477) --- rpc_util.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rpc_util.go b/rpc_util.go index 6db356fa56a7..1831a73e73d3 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -258,7 +258,8 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } // WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if From e5cad3dcff812a49f39c8105ffb5cc4881230e60 Mon Sep 17 00:00:00 2001 From: laststem Date: Wed, 2 Jun 2021 08:50:35 +0900 Subject: [PATCH 110/998] doc: fix broken benchmark dashboard link in README.md (#4503) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3949a683fb58..0e6ae69a5846 100644 --- a/README.md +++ b/README.md @@ -136,6 +136,6 @@ errors. [Go module]: https://github.com/golang/go/wiki/Modules [gRPC]: https://grpc.io [Go gRPC docs]: https://grpc.io/docs/languages/go -[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 [quick start]: https://grpc.io/docs/languages/go/quickstart [go-releases]: https://golang.org/doc/devel/release.html From 3508452162f48011bf36f303f901f4efc50087ec Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 2 Jun 2021 10:48:18 -0700 Subject: [PATCH 111/998] xds: add test-only injection of xds config to client and server (#4476) --- internal/xds/bootstrap.go | 33 ++++++++---- .../balancer/cdsbalancer/cdsbalancer.go | 35 ++++++++++--- .../balancer/clusterimpl/clusterimpl.go | 31 ++++++++--- xds/internal/balancer/edsbalancer/eds.go | 27 +++++++--- xds/internal/balancer/lrs/balancer.go | 27 +++++++--- xds/internal/client/attributes.go | 36 +++++++++++++ xds/internal/client/bootstrap/bootstrap.go | 10 +++- xds/internal/client/singleton.go | 52 ++++++++++++++++++- xds/internal/resolver/xds_resolver.go | 27 +++++++++- .../test/xds_client_integration_test.go | 4 +- xds/internal/test/xds_integration_test.go | 29 +++++++---- .../test/xds_server_integration_test.go | 16 +++--- .../test/xds_server_serving_mode_test.go | 2 +- xds/server.go | 11 ++-- xds/server_options.go | 41 ++++++++------- xds/xds.go | 16 +++++- 16 files changed, 307 insertions(+), 90 deletions(-) create mode 100644 xds/internal/client/attributes.go diff --git a/internal/xds/bootstrap.go b/internal/xds/bootstrap.go index a3f80d8f2496..1d74ab46a114 100644 --- a/internal/xds/bootstrap.go +++ b/internal/xds/bootstrap.go @@ -65,11 +65,32 @@ type BootstrapOptions struct { // completed successfully. It is the responsibility of the caller to invoke the // cleanup function at the end of the test. func SetupBootstrapFile(opts BootstrapOptions) (func(), error) { + bootstrapContents, err := BootstrapContents(opts) + if err != nil { + return nil, err + } f, err := ioutil.TempFile("", "test_xds_bootstrap_*") if err != nil { return nil, fmt.Errorf("failed to created bootstrap file: %v", err) } + if err := ioutil.WriteFile(f.Name(), bootstrapContents, 0644); err != nil { + return nil, fmt.Errorf("failed to created bootstrap file: %v", err) + } + logger.Infof("Created bootstrap file at %q with contents: %s\n", f.Name(), bootstrapContents) + + origBootstrapFileName := env.BootstrapFileName + env.BootstrapFileName = f.Name() + return func() { + os.Remove(f.Name()) + env.BootstrapFileName = origBootstrapFileName + }, nil +} + +// BootstrapContents returns the contents to go into a bootstrap file, +// environment, or configuration passed to +// xds.NewXDSResolverWithConfigForTesting. +func BootstrapContents(opts BootstrapOptions) ([]byte, error) { cfg := &bootstrapConfig{ XdsServers: []server{ { @@ -100,17 +121,7 @@ func SetupBootstrapFile(opts BootstrapOptions) (func(), error) { if err != nil { return nil, fmt.Errorf("failed to created bootstrap file: %v", err) } - if err := ioutil.WriteFile(f.Name(), bootstrapContents, 0644); err != nil { - return nil, fmt.Errorf("failed to created bootstrap file: %v", err) - } - logger.Infof("Created bootstrap file at %q with contents: %s\n", f.Name(), bootstrapContents) - - origBootstrapFileName := env.BootstrapFileName - env.BootstrapFileName = f.Name() - return func() { - os.Remove(f.Name()) - env.BootstrapFileName = origBootstrapFileName - }, nil + return bootstrapContents, nil } type bootstrapConfig struct { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 0e8b83481ac9..1e6b7a2c08c2 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -59,7 +59,7 @@ var ( // not deal with subConns. return builder.Build(cc, opts), nil } - newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } + newXDSClient func() (xdsClientInterface, error) buildProvider = buildProviderFunc ) @@ -86,12 +86,15 @@ func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. b.logger = prefixLogger((b)) b.logger.Infof("Created") - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil + if newXDSClient != nil { + // For tests + client, err := newXDSClient() + if err != nil { + b.logger.Errorf("failed to create xds-client: %v", err) + return nil + } + b.xdsClient = client } - b.xdsClient = client var creds credentials.TransportCredentials switch { @@ -359,7 +362,15 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { lbCfg.LrsLoadReportingServerName = new(string) } + resolverState := resolver.State{} + // Include the xds client for the child LB policies to use. For unit + // tests, b.xdsClient may not be a full *xdsclient.Client, but it will + // always be in production. + if c, ok := b.xdsClient.(*xdsclient.Client); ok { + resolverState = xdsclient.SetClient(resolverState, c) + } ccState := balancer.ClientConnState{ + ResolverState: resolverState, BalancerConfig: lbCfg, } if err := b.edsLB.UpdateClientConnState(ccState); err != nil { @@ -397,7 +408,9 @@ func (b *cdsBalancer) run() { b.edsLB.Close() b.edsLB = nil } - b.xdsClient.Close() + if newXDSClient != nil { + b.xdsClient.Close() + } if b.cachedRoot != nil { b.cachedRoot.Close() } @@ -468,6 +481,14 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro return errBalancerClosed } + if b.xdsClient == nil { + c := xdsclient.FromResolverState(state.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + } + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(state.BalancerConfig)) // The errors checked here should ideally never happen because the // ServiceConfig in this case is prepared by the xdsResolver and is not diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 4bd29901d760..e72b867e2f25 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -52,7 +52,7 @@ func init() { balancer.Register(clusterImplBB{}) } -var newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } +var newXDSClient func() (xdsClientInterface, error) type clusterImplBB struct{} @@ -61,18 +61,22 @@ func (clusterImplBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) ClientConn: cc, bOpts: bOpts, closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), loadWrapper: loadstore.NewWrapper(), pickerUpdateCh: buffer.NewUnbounded(), requestCountMax: defaultRequestCountMax, } b.logger = prefixLogger(b) - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil + if newXDSClient != nil { + // For tests + client, err := newXDSClient() + if err != nil { + b.logger.Errorf("failed to create xds-client: %v", err) + return nil + } + b.xdsC = client } - b.xdsC = client go b.run() b.logger.Infof("Created") @@ -107,6 +111,7 @@ type clusterImplBalancer struct { // synchronized with Close(). mu sync.Mutex closed *grpcsync.Event + done *grpcsync.Event bOpts balancer.BuildOptions logger *grpclog.PrefixLogger @@ -204,6 +209,14 @@ func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name) } + if cib.xdsC == nil { + c := xdsclient.FromResolverState(s.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + cib.xdsC = c + } + // Update load reporting config. This needs to be done before updating the // child policy because we need the loadStore from the updated client to be // passed to the ccWrapper, so that the next picker from the child policy @@ -315,7 +328,10 @@ func (cib *clusterImplBalancer) Close() { cib.childLB.Close() cib.childLB = nil } - cib.xdsC.Close() + if newXDSClient != nil { + cib.xdsC.Close() + } + <-cib.done.Done() cib.logger.Infof("Shutdown") } @@ -363,6 +379,7 @@ type dropConfigs struct { } func (cib *clusterImplBalancer) run() { + defer cib.done.Fire() for { select { case update := <-cib.pickerUpdateCh.Get(): diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index 98b1dbaedd46..21e3f43be74e 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -53,7 +53,7 @@ var ( newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions, enqueueState func(priorityType, balancer.State), lw load.PerClusterReporter, logger *grpclog.PrefixLogger) edsBalancerImplInterface { return newEDSBalancerImpl(cc, opts, enqueueState, lw, logger) } - newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } + newXDSClient func() (xdsClientInterface, error) ) func init() { @@ -76,13 +76,16 @@ func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOp } x.logger = prefixLogger(x) - client, err := newXDSClient() - if err != nil { - x.logger.Errorf("xds: failed to create xds-client: %v", err) - return nil + if newXDSClient != nil { + // For tests + client, err := newXDSClient() + if err != nil { + x.logger.Errorf("xds: failed to create xds-client: %v", err) + return nil + } + x.xdsClient = client } - x.xdsClient = client x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.loadWrapper, x.logger) x.logger.Infof("Created") go x.run() @@ -172,7 +175,9 @@ func (x *edsBalancer) run() { x.edsImpl.updateState(u.priority, u.s) case <-x.closed.Done(): x.cancelWatch() - x.xdsClient.Close() + if newXDSClient != nil { + x.xdsClient.Close() + } x.edsImpl.close() x.logger.Infof("Shutdown") x.done.Fire() @@ -380,6 +385,14 @@ func (x *edsBalancer) ResolverError(err error) { } func (x *edsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + if x.xdsClient == nil { + c := xdsclient.FromResolverState(s.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + x.xdsClient = c + } + select { case x.grpcUpdate <- &s: case <-x.closed.Done(): diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go index 0642c54ed111..5b044f480345 100644 --- a/xds/internal/balancer/lrs/balancer.go +++ b/xds/internal/balancer/lrs/balancer.go @@ -36,7 +36,7 @@ func init() { balancer.Register(&lrsBB{}) } -var newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } +var newXDSClient func() (xdsClientInterface, error) // Name is the name of the LRS balancer. const Name = "lrs_experimental" @@ -51,12 +51,15 @@ func (l *lrsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balanc b.logger = prefixLogger(b) b.logger.Infof("Created") - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil + if newXDSClient != nil { + // For tests + client, err := newXDSClient() + if err != nil { + b.logger.Errorf("failed to create xds-client: %v", err) + return nil + } + b.client = newXDSClientWrapper(client) } - b.client = newXDSClientWrapper(client) return b } @@ -87,6 +90,14 @@ func (b *lrsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } + if b.client == nil { + c := xdsclient.FromResolverState(s.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.client = newXDSClientWrapper(c) + } + // Update load reporting config or xds client. This needs to be done before // updating the child policy because we need the loadStore from the updated // client to be passed to the ccWrapper. @@ -245,5 +256,7 @@ func (w *xdsClientWrapper) close() { w.cancelLoadReport() w.cancelLoadReport = nil } - w.c.Close() + if newXDSClient != nil { + w.c.Close() + } } diff --git a/xds/internal/client/attributes.go b/xds/internal/client/attributes.go new file mode 100644 index 000000000000..50b988245291 --- /dev/null +++ b/xds/internal/client/attributes.go @@ -0,0 +1,36 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package client + +import "google.golang.org/grpc/resolver" + +type clientKeyType string + +const clientKey = clientKeyType("grpc.xds.internal.client.Client") + +// FromResolverState returns the Client from state, or nil if not present. +func FromResolverState(state resolver.State) *Client { + cs, _ := state.Attributes.Value(clientKey).(*Client) + return cs +} + +// SetClient sets c in state and returns the new state. +func SetClient(state resolver.State, c *Client) resolver.State { + state.Attributes = state.Attributes.WithValues(clientKey, c) + return state +} diff --git a/xds/internal/client/bootstrap/bootstrap.go b/xds/internal/client/bootstrap/bootstrap.go index a3fb5c0816b4..dcf030631603 100644 --- a/xds/internal/client/bootstrap/bootstrap.go +++ b/xds/internal/client/bootstrap/bootstrap.go @@ -160,13 +160,19 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { // fields left unspecified, in which case the caller should use some sane // defaults. func NewConfig() (*Config, error) { - config := &Config{} - data, err := bootstrapConfigFromEnvVariable() if err != nil { return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) } logger.Debugf("Bootstrap content: %s", data) + return NewConfigFromContents(data) +} + +// NewConfigFromContents returns a new Config using the specified bootstrap +// file contents instead of reading the environment variable. This is only +// suitable for testing purposes. +func NewConfigFromContents(data []byte) (*Config, error) { + config := &Config{} var jsonData map[string]json.RawMessage if err := json.Unmarshal(data, &jsonData); err != nil { diff --git a/xds/internal/client/singleton.go b/xds/internal/client/singleton.go index 99f195341acd..41dd16e26afe 100644 --- a/xds/internal/client/singleton.go +++ b/xds/internal/client/singleton.go @@ -19,6 +19,8 @@ package client import ( + "bytes" + "encoding/json" "fmt" "sync" "time" @@ -92,8 +94,8 @@ func New() (*Client, error) { // singleton. The following calls will return the singleton xds client without // checking or using the config. // -// This function is internal only, for c2p resolver to use. DO NOT use this -// elsewhere. Use New() instead. +// This function is internal only, for c2p resolver and testing to use. DO NOT +// use this elsewhere. Use New() instead. func NewWithConfig(config *bootstrap.Config) (*Client, error) { singletonClient.mu.Lock() defer singletonClient.mu.Unlock() @@ -141,3 +143,49 @@ func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.D } return &Client{clientImpl: cl, refCount: 1}, nil } + +// NewClientWithBootstrapContents returns an xds client for this config, +// separate from the global singleton. This should be used for testing +// purposes only. +func NewClientWithBootstrapContents(contents []byte) (*Client, error) { + // Normalize the contents + buf := bytes.Buffer{} + err := json.Indent(&buf, contents, "", "") + if err != nil { + return nil, fmt.Errorf("xds: error normalizing JSON: %v", err) + } + contents = bytes.TrimSpace(buf.Bytes()) + + clientsMu.Lock() + defer clientsMu.Unlock() + if c := clients[string(contents)]; c != nil { + c.mu.Lock() + // Since we don't remove the *Client from the map when it is closed, we + // need to recreate the impl if the ref count dropped to zero. + if c.refCount > 0 { + c.refCount++ + c.mu.Unlock() + return c, nil + } + c.mu.Unlock() + } + + bcfg, err := bootstrap.NewConfigFromContents(contents) + if err != nil { + return nil, fmt.Errorf("xds: error with bootstrap config: %v", err) + } + + cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout) + if err != nil { + return nil, err + } + + c := &Client{clientImpl: cImpl, refCount: 1} + clients[string(contents)] = c + return c, nil +} + +var ( + clients = map[string]*Client{} + clientsMu sync.Mutex +) diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index e995fa9fa8fe..41e3899c285e 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -36,6 +36,17 @@ import ( const xdsScheme = "xds" +// NewBuilder creates a new xds resolver builder using a specific xds bootstrap +// config, so tests can use multiple xds clients in different ClientConns at +// the same time. +func NewBuilder(config []byte) (resolver.Builder, error) { + return &xdsResolverBuilder{ + newXDSClient: func() (xdsClientInterface, error) { + return xdsclient.NewClientWithBootstrapContents(config) + }, + }, nil +} + // For overriding in unittests. var newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } @@ -43,7 +54,9 @@ func init() { resolver.Register(&xdsResolverBuilder{}) } -type xdsResolverBuilder struct{} +type xdsResolverBuilder struct { + newXDSClient func() (xdsClientInterface, error) +} // Build helps implement the resolver.Builder interface. // @@ -60,6 +73,11 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op r.logger = prefixLogger((r)) r.logger.Infof("Creating resolver for target: %+v", t) + newXDSClient := newXDSClient + if b.newXDSClient != nil { + newXDSClient = b.newXDSClient + } + client, err := newXDSClient() if err != nil { return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) @@ -178,6 +196,13 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { state := iresolver.SetConfigSelector(resolver.State{ ServiceConfig: r.cc.ParseServiceConfig(string(sc)), }, cs) + + // Include the xds client for the LB policies to use. For unit tests, + // r.client may not be a full *xdsclient.Client, but it will always be in + // production. + if c, ok := r.client.(*xdsclient.Client); ok { + state = xdsclient.SetClient(state, c) + } r.cc.UpdateState(state) return true } diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index 713331b325e0..e94b70c9fb64 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -68,7 +68,7 @@ func (s) TestClientSideXDS(t *testing.T) { port, cleanup := clientSetup(t) defer cleanup() - serviceName := xdsServiceName + "-client-side-xds" + const serviceName = "my-service-client-side-xds" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: xdsClientNodeID, @@ -81,7 +81,7 @@ func (s) TestClientSideXDS(t *testing.T) { } // Create a ClientConn and make a successful RPC. - cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials())) + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsResolverBuilder)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index a41fec929762..b66cdd59cafe 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -40,7 +40,9 @@ import ( "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/testdata" + "google.golang.org/grpc/xds" "google.golang.org/grpc/xds/internal/testutils/e2e" xdsinternal "google.golang.org/grpc/internal/xds" @@ -71,8 +73,10 @@ func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, er var ( // Globals corresponding to the single instance of the xDS management server // which is spawned for all the tests in this package. - managementServer *e2e.ManagementServer - xdsClientNodeID string + managementServer *e2e.ManagementServer + xdsClientNodeID string + bootstrapContents []byte + xdsResolverBuilder resolver.Builder ) // TestMain sets up an xDS management server, runs all tests, and stops the @@ -158,30 +162,33 @@ func createClientTLSCredentials(t *testing.T) credentials.TransportCredentials { // - sets up the global variables which refer to this management server and the // nodeID to be used when talking to this management server. // -// Returns a function to be invoked by the caller to stop the management server. -func setupManagementServer() (func(), error) { +// Returns a function to be invoked by the caller to stop the management +// server. +func setupManagementServer() (cleanup func(), err error) { // Turn on the env var protection for client-side security. origClientSideSecurityEnvVar := env.ClientSideSecuritySupport env.ClientSideSecuritySupport = true // Spin up an xDS management server on a local port. - var err error managementServer, err = e2e.StartManagementServer() if err != nil { return nil, err } + defer func() { + if err != nil { + managementServer.Stop() + } + }() // Create a directory to hold certs and key files used on the server side. serverDir, err := createTmpDirWithFiles("testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") if err != nil { - managementServer.Stop() return nil, err } // Create a directory to hold certs and key files used on the client side. clientDir, err := createTmpDirWithFiles("testClientSideXDS*", "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") if err != nil { - managementServer.Stop() return nil, err } @@ -194,7 +201,7 @@ func setupManagementServer() (func(), error) { // Create a bootstrap file in a temporary directory. xdsClientNodeID = uuid.New().String() - bootstrapCleanup, err := xdsinternal.SetupBootstrapFile(xdsinternal.BootstrapOptions{ + bootstrapContents, err = xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ Version: xdsinternal.TransportV3, NodeID: xdsClientNodeID, ServerURI: managementServer.Address, @@ -202,13 +209,15 @@ func setupManagementServer() (func(), error) { ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, }) if err != nil { - managementServer.Stop() + return nil, err + } + xdsResolverBuilder, err = xds.NewXDSResolverWithConfigForTesting(bootstrapContents) + if err != nil { return nil, err } return func() { managementServer.Stop() - bootstrapCleanup() env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }, nil } diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 6511a6134cf8..4bf13e9305be 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -46,8 +46,6 @@ const ( certFile = "cert.pem" keyFile = "key.pem" rootFile = "ca.pem" - - xdsServiceName = "my-service" ) // setupGRPCServer performs the following: @@ -70,7 +68,7 @@ func setupGRPCServer(t *testing.T) (net.Listener, func()) { } // Initialize an xDS-enabled gRPC server and register the stubServer on it. - server := xds.NewGRPCServer(grpc.Creds(creds)) + server := xds.NewGRPCServer(grpc.Creds(creds), xds.BootstrapContentsForTesting(bootstrapContents)) testpb.RegisterTestServiceServer(server, &testService{}) // Create a local listener and pass it to Serve(). @@ -123,7 +121,7 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { if err != nil { t.Fatalf("failed to retrieve host and port of server: %v", err) } - serviceName := xdsServiceName + "-fallback" + const serviceName = "my-service-fallback" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: xdsClientNodeID, @@ -154,7 +152,7 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { // Create a ClientConn with the xds scheme and make a successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(xdsResolverBuilder)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -205,7 +203,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { // Create xDS resources to be consumed on the client side. This // includes the listener, route configuration, cluster (with // security configuration) and endpoint resources. - serviceName := xdsServiceName + "-file-watcher-certs-" + test.name + serviceName := "my-service-file-watcher-certs-" + test.name resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: xdsClientNodeID, @@ -236,7 +234,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { // Create a ClientConn with the xds scheme and make an RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(xdsResolverBuilder)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -270,7 +268,7 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { if err != nil { t.Fatalf("failed to retrieve host and port of server: %v", err) } - serviceName := xdsServiceName + "-security-config-change" + const serviceName = "my-service-security-config-change" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: xdsClientNodeID, @@ -301,7 +299,7 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { // Create a ClientConn with the xds scheme and make a successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(xdsCreds)) + xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(xdsCreds), grpc.WithResolvers(xdsResolverBuilder)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 0b70f7b06aed..8fb346298abf 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -105,7 +105,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { }) // Initialize an xDS-enabled gRPC server and register the stubServer on it. - server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt) + server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) defer server.Stop() testpb.RegisterTestServiceServer(server, &testService{}) diff --git a/xds/server.go b/xds/server.go index 3a2b629ae986..b83a073ac1c5 100644 --- a/xds/server.go +++ b/xds/server.go @@ -131,8 +131,8 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { func handleServerOptions(opts []grpc.ServerOption) *serverOptions { so := &serverOptions{} for _, opt := range opts { - if o, ok := opt.(serverOption); ok { - o.applyServerOption(so) + if o, ok := opt.(*serverOption); ok { + o.apply(so) } } return so @@ -154,6 +154,12 @@ func (s *GRPCServer) initXDSClient() error { return nil } + newXDSClient := newXDSClient + if s.opts.bootstrapContents != nil { + newXDSClient = func() (xdsClientInterface, error) { + return xdsclient.NewClientWithBootstrapContents(s.opts.bootstrapContents) + } + } client, err := newXDSClient() if err != nil { return fmt.Errorf("xds: failed to create xds-client: %v", err) @@ -181,7 +187,6 @@ func (s *GRPCServer) Serve(lis net.Listener) error { if err := s.initXDSClient(); err != nil { return err } - cfg := s.xdsC.BootstrapConfig() if cfg == nil { return errors.New("bootstrap configuration is empty") diff --git a/xds/server_options.go b/xds/server_options.go index 44b7b374fd00..0918c097a3e5 100644 --- a/xds/server_options.go +++ b/xds/server_options.go @@ -25,31 +25,20 @@ import ( iserver "google.golang.org/grpc/xds/internal/server" ) -// ServingModeCallback returns a grpc.ServerOption which allows users to -// register a callback to get notified about serving mode changes. -func ServingModeCallback(cb ServingModeCallbackFunc) grpc.ServerOption { - return &smcOption{cb: cb} -} - -type serverOption interface { - applyServerOption(*serverOptions) +type serverOptions struct { + modeCallback ServingModeCallbackFunc + bootstrapContents []byte } -// smcOption is a server option containing a callback to be invoked when the -// serving mode changes. -type smcOption struct { - // Embedding the empty server option makes it safe to pass it to - // grpc.NewServer(). +type serverOption struct { grpc.EmptyServerOption - cb ServingModeCallbackFunc -} - -func (s *smcOption) applyServerOption(o *serverOptions) { - o.modeCallback = s.cb + apply func(*serverOptions) } -type serverOptions struct { - modeCallback ServingModeCallbackFunc +// ServingModeCallback returns a grpc.ServerOption which allows users to +// register a callback to get notified about serving mode changes. +func ServingModeCallback(cb ServingModeCallbackFunc) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.modeCallback = cb }} } // ServingMode indicates the current mode of operation of the server. @@ -82,3 +71,15 @@ type ServingModeChangeArgs struct { // not-serving mode. Err error } + +// BootstrapContentsForTesting returns a grpc.ServerOption which allows users +// to inject a bootstrap configuration used by only this server, instead of the +// global configuration from the environment variables. +// +// Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +func BootstrapContentsForTesting(contents []byte) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.bootstrapContents = contents }} +} diff --git a/xds/xds.go b/xds/xds.go index 23c88903f40b..bbd3fe543212 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -38,6 +38,7 @@ import ( v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "google.golang.org/grpc" internaladmin "google.golang.org/grpc/internal/admin" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. @@ -45,7 +46,7 @@ import ( _ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 xDS API client. _ "google.golang.org/grpc/xds/internal/client/v3" // Register the v3 xDS API client. _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + xdsresolver "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. ) func init() { @@ -76,3 +77,16 @@ func init() { return csdss.Close, nil }) } + +// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using +// the provided xds bootstrap config instead of the global configuration from +// the supported environment variables. The resolver.Builder is meant to be +// used in conjunction with the grpc.WithResolvers DialOption. +// +// Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { + return xdsresolver.NewBuilder(bootstrapConfig) +} From e7b12ef3b15f6c46da7c5c3c71f4ca06ba410c1c Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 2 Jun 2021 15:58:39 -0700 Subject: [PATCH 112/998] cluster_resolver: add functions to build child balancer config (#4429) --- .../balancerconfig/configbuilder.go | 297 +++++++ .../balancerconfig/configbuilder_test.go | 839 ++++++++++++++++++ .../clusterresolver/balancerconfig/type.go | 95 ++ .../balancer/edsbalancer/configbuilder.go | 49 + .../edsbalancer/configbuilder_test.go | 126 +++ 5 files changed, 1406 insertions(+) create mode 100644 xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go create mode 100644 xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go create mode 100644 xds/internal/balancer/clusterresolver/balancerconfig/type.go create mode 100644 xds/internal/balancer/edsbalancer/configbuilder.go create mode 100644 xds/internal/balancer/edsbalancer/configbuilder_test.go diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go new file mode 100644 index 000000000000..2e67d54dadb6 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go @@ -0,0 +1,297 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancerconfig contains utility functions to build balancer config. +// The built config will generate a tree of balancers with priority, +// cluster_impl, weighted_target, lrs, and roundrobin. +// +// This is in a subpackage of cluster_resolver so that it can be used by the EDS +// balancer. Eventually we will delete the EDS balancer, and replace it with +// cluster_resolver, then we can move the functions to package cluster_resolver, +// and unexport them. +// +// TODO: move and unexport. Read above. +package balancerconfig + +import ( + "encoding/json" + "fmt" + "sort" + + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/hierarchy" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/lrs" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/weightedtarget" + xdsclient "google.golang.org/grpc/xds/internal/client" +) + +const million = 1000000 + +// PriorityConfig is config for one priority. For example, if there an EDS and a +// DNS, the priority list will be [priorityConfig{EDS}, PriorityConfig{DNS}]. +// +// Each PriorityConfig corresponds to one discovery mechanism from the LBConfig +// generated by the CDS balancer. The CDS balancer resolves the cluster name to +// an ordered list of discovery mechanisms (if the top cluster is an aggregated +// cluster), one for each underlying cluster. +type PriorityConfig struct { + Mechanism DiscoveryMechanism + // EDSResp is set only if type is EDS. + EDSResp xdsclient.EndpointsUpdate + // Addresses is set only if type is DNS. + Addresses []string +} + +// BuildPriorityConfigJSON builds balancer config for the passed in +// priorities. +// +// The built tree of balancers (see test for the output struct). +// +// ┌────────┐ +// │priority│ +// └┬──────┬┘ +// │ │ +// ┌───────────▼┐ ┌▼───────────┐ +// │cluster_impl│ │cluster_impl│ +// └─┬──────────┘ └──────────┬─┘ +// │ │ +// ┌──────────────▼─┐ ┌─▼──────────────┐ +// │locality_picking│ │locality_picking│ +// └┬──────────────┬┘ └┬──────────────┬┘ +// │ │ │ │ +// ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ +// │LRS│ │LRS│ │LRS│ │LRS│ +// └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘ +// │ │ │ │ +// ┌──────────▼─────┐ ┌─────▼──────────┐ ┌──────────▼─────┐ ┌─────▼──────────┐ +// │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ +// └────────────────┘ └────────────────┘ └────────────────┘ └────────────────┘ +// +// If endpointPickingPolicy is nil, roundrobin will be used. +// +// Custom locality picking policy isn't support, and weighted_target is always +// used. +// +// TODO: support setting locality picking policy, and add a parameter for +// locality picking policy. +func BuildPriorityConfigJSON(priorities []PriorityConfig, endpointPickingPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { + pc, addrs := buildPriorityConfig(priorities, endpointPickingPolicy) + ret, err := json.Marshal(pc) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal built priority config struct into json: %v", err) + } + return ret, addrs, nil +} + +func buildPriorityConfig(priorities []PriorityConfig, endpointPickingPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address) { + var ( + retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} + retAddrs []resolver.Address + ) + for i, p := range priorities { + switch p.Mechanism.Type { + case DiscoveryMechanismTypeEDS: + names, configs, addrs := buildClusterImplConfigForEDS(i, p.EDSResp, p.Mechanism, endpointPickingPolicy) + retConfig.Priorities = append(retConfig.Priorities, names...) + for n, c := range configs { + retConfig.Children[n] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: c}, + // Ignore all re-resolution from EDS children. + IgnoreReresolutionRequests: true, + } + } + retAddrs = append(retAddrs, addrs...) + case DiscoveryMechanismTypeLogicalDNS: + name, config, addrs := buildClusterImplConfigForDNS(i, p.Addresses) + retConfig.Priorities = append(retConfig.Priorities, name) + retConfig.Children[name] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: config}, + // Not ignore re-resolution from DNS children, they will trigger + // DNS to re-resolve. + IgnoreReresolutionRequests: false, + } + retAddrs = append(retAddrs, addrs...) + } + } + return retConfig, retAddrs +} + +func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string, *clusterimpl.LBConfig, []resolver.Address) { + // Endpoint picking policy for DNS is hardcoded to pick_first. + const childPolicy = "pick_first" + var retAddrs []resolver.Address + pName := fmt.Sprintf("priority-%v", parentPriority) + for _, addrStr := range addrStrs { + retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) + } + return pName, &clusterimpl.LBConfig{ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}}, retAddrs +} + +// buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for +// each priority, sorted by priority, and the addresses for each priority (with +// hierarchy attributes set). +// +// For example, if there are two priorities, the returned values will be +// - ["p0", "p1"] +// - map{"p0":p0_config, "p1":p1_config} +// - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] +// - p0 addresses' hierarchy attributes are set to p0 +func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.EndpointsUpdate, mechanism DiscoveryMechanism, endpointPickingPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address) { + var ( + retNames []string + retAddrs []resolver.Address + retConfigs = make(map[string]*clusterimpl.LBConfig) + ) + + if endpointPickingPolicy == nil { + endpointPickingPolicy = &internalserviceconfig.BalancerConfig{Name: roundrobin.Name} + } + + drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) + for _, d := range edsResp.Drops { + drops = append(drops, clusterimpl.DropConfig{ + Category: d.Category, + RequestsPerMillion: d.Numerator * million / d.Denominator, + }) + } + + priorityChildNames, priorities := groupLocalitiesByPriority(edsResp.Localities) + for _, priorityName := range priorityChildNames { + priorityLocalities := priorities[priorityName] + // Prepend parent priority to the priority names, to avoid duplicates. + pName := fmt.Sprintf("priority-%v-%v", parentPriority, priorityName) + retNames = append(retNames, pName) + wtConfig, addrs := localitiesToWeightedTarget(priorityLocalities, pName, endpointPickingPolicy, mechanism.LoadReportingServerName, mechanism.Cluster, mechanism.EDSServiceName) + retConfigs[pName] = &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + EDSServiceName: mechanism.EDSServiceName, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: weightedtarget.Name, Config: wtConfig}, + LoadReportingServerName: mechanism.LoadReportingServerName, + MaxConcurrentRequests: mechanism.MaxConcurrentRequests, + DropCategories: drops, + } + retAddrs = append(retAddrs, addrs...) + } + + return retNames, retConfigs, retAddrs +} + +// groupLocalitiesByPriority returns the localities grouped by priority. +// +// It also returns a list of strings where each string represents a priority, +// and the list is sorted from higher priority to lower priority. +// +// For example, for L0-p0, L1-p0, L2-p1, results will be +// - ["p0", "p1"] +// - map{"p0":[L0, L1], "p1":[L2]} +func groupLocalitiesByPriority(localities []xdsclient.Locality) ([]string, map[string][]xdsclient.Locality) { + var priorityIntSlice []int + priorities := make(map[string][]xdsclient.Locality) + for _, locality := range localities { + if locality.Weight == 0 { + continue + } + priorityName := fmt.Sprintf("%v", locality.Priority) + priorities[priorityName] = append(priorities[priorityName], locality) + priorityIntSlice = append(priorityIntSlice, int(locality.Priority)) + } + // Sort the priorities based on the int value, deduplicate, and then turn + // the sorted list into a string list. This will be child names, in priority + // order. + sort.Ints(priorityIntSlice) + priorityIntSliceDeduped := dedupSortedIntSlice(priorityIntSlice) + priorityNameSlice := make([]string, 0, len(priorityIntSliceDeduped)) + for _, p := range priorityIntSliceDeduped { + priorityNameSlice = append(priorityNameSlice, fmt.Sprintf("%v", p)) + } + return priorityNameSlice, priorities +} + +func dedupSortedIntSlice(a []int) []int { + if len(a) == 0 { + return a + } + i, j := 0, 1 + for ; j < len(a); j++ { + if a[i] == a[j] { + continue + } + i++ + if i != j { + a[i] = a[j] + } + } + return a[:i+1] +} + +// localitiesToWeightedTarget takes a list of localities (with the same +// priority), and generates a weighted target config, and list of addresses. +// +// The addresses have path hierarchy set to [priority-name, locality-name], so +// priority and weighted target know which child policy they are for. +func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig, lrsServer *string, cluster, edsService string) (*weightedtarget.LBConfig, []resolver.Address) { + weightedTargets := make(map[string]weightedtarget.Target) + var addrs []resolver.Address + for _, locality := range localities { + localityStr, err := locality.ID.ToString() + if err != nil { + localityStr = fmt.Sprintf("%+v", locality.ID) + } + + child := childPolicy + // If lrsServer is not set, we can skip this extra layer of the LRS + // policy. + if lrsServer != nil { + localityID := locality.ID + child = &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: cluster, + EDSServiceName: edsService, + ChildPolicy: childPolicy, + LoadReportingServerName: *lrsServer, + Locality: &localityID, + }, + } + } + weightedTargets[localityStr] = weightedtarget.Target{Weight: locality.Weight, ChildPolicy: child} + + for _, endpoint := range locality.Endpoints { + // Filter out all "unhealthy" endpoints (unknown and healthy are + // both considered to be healthy: + // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). + if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { + continue + } + + addr := resolver.Address{Addr: endpoint.Address} + if childPolicy.Name == weightedroundrobin.Name && endpoint.Weight != 0 { + ai := weightedroundrobin.AddrInfo{Weight: endpoint.Weight} + addr = weightedroundrobin.SetAddrInfo(addr, ai) + } + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addrs = append(addrs, addr) + } + } + return &weightedtarget.LBConfig{Targets: weightedTargets}, addrs +} diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go new file mode 100644 index 000000000000..273dc6233478 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go @@ -0,0 +1,839 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancerconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/hierarchy" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/lrs" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/weightedtarget" + xdsclient "google.golang.org/grpc/xds/internal/client" +) + +const ( + testClusterName = "test-cluster-name" + testLRSServer = "test-lrs-server" + testMaxRequests = 314 + testEDSServcie = "test-eds-service-name" + testEDSServiceName = "service-name-from-parent" + testDropCategory = "test-drops" + testDropOverMillion = 1 + + localityCount = 5 + addressPerLocality = 2 +) + +var ( + testLocalityIDs []internal.LocalityID + testAddressStrs [][]string + testEndpoints [][]xdsclient.Endpoint + + testLocalitiesP0, testLocalitiesP1 []xdsclient.Locality + + addrCmpOpts = cmp.Options{ + cmp.AllowUnexported(attributes.Attributes{}), + cmp.Transformer("SortAddrs", func(in []resolver.Address) []resolver.Address { + out := append([]resolver.Address(nil), in...) // Copy input to avoid mutating it + sort.Slice(out, func(i, j int) bool { + return out[i].Addr < out[j].Addr + }) + return out + })} +) + +func init() { + for i := 0; i < localityCount; i++ { + testLocalityIDs = append(testLocalityIDs, internal.LocalityID{Zone: fmt.Sprintf("test-zone-%d", i)}) + var ( + addrs []string + ends []xdsclient.Endpoint + ) + for j := 0; j < addressPerLocality; j++ { + addr := fmt.Sprintf("addr-%d-%d", i, j) + addrs = append(addrs, addr) + ends = append(ends, xdsclient.Endpoint{ + Address: addr, + HealthStatus: xdsclient.EndpointHealthStatusHealthy, + }) + } + testAddressStrs = append(testAddressStrs, addrs) + testEndpoints = append(testEndpoints, ends) + } + + testLocalitiesP0 = []xdsclient.Locality{ + { + Endpoints: testEndpoints[0], + ID: testLocalityIDs[0], + Weight: 20, + Priority: 0, + }, + { + Endpoints: testEndpoints[1], + ID: testLocalityIDs[1], + Weight: 80, + Priority: 0, + }, + } + testLocalitiesP1 = []xdsclient.Locality{ + { + Endpoints: testEndpoints[2], + ID: testLocalityIDs[2], + Weight: 20, + Priority: 1, + }, + { + Endpoints: testEndpoints[3], + ID: testLocalityIDs[3], + Weight: 80, + Priority: 1, + }, + } +} + +// TestBuildPriorityConfigJSON is a sanity check that the built balancer config +// can be parsed. The behavior test is covered by TestBuildPriorityConfig. +func TestBuildPriorityConfigJSON(t *testing.T) { + gotConfig, _, err := BuildPriorityConfigJSON([]PriorityConfig{ + { + Mechanism: DiscoveryMechanism{ + Cluster: testClusterName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, + }, + EDSResp: xdsclient.EndpointsUpdate{ + Drops: []xdsclient.OverloadDropConfig{ + { + Category: testDropCategory, + Numerator: testDropOverMillion, + Denominator: million, + }, + }, + Localities: []xdsclient.Locality{ + testLocalitiesP0[0], + testLocalitiesP0[1], + testLocalitiesP1[0], + testLocalitiesP1[1], + }, + }, + }, + { + Mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeLogicalDNS, + }, + Addresses: testAddressStrs[4], + }, + }, nil) + if err != nil { + t.Fatalf("buildPriorityConfigJSON(...) failed: %v", err) + } + + var prettyGot bytes.Buffer + if err := json.Indent(&prettyGot, gotConfig, ">>> ", " "); err != nil { + t.Fatalf("json.Indent() failed: %v", err) + } + // Print the indented json if this test fails. + t.Log(prettyGot.String()) + + priorityB := balancer.Get(priority.Name) + if _, err = priorityB.(balancer.ConfigParser).ParseConfig(gotConfig); err != nil { + t.Fatalf("ParseConfig(%+v) failed: %v", gotConfig, err) + } +} + +func TestBuildPriorityConfig(t *testing.T) { + gotConfig, gotAddrs := buildPriorityConfig([]PriorityConfig{ + { + Mechanism: DiscoveryMechanism{ + Cluster: testClusterName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, + }, + EDSResp: xdsclient.EndpointsUpdate{ + Drops: []xdsclient.OverloadDropConfig{ + { + Category: testDropCategory, + Numerator: testDropOverMillion, + Denominator: million, + }, + }, + Localities: []xdsclient.Locality{ + testLocalitiesP0[0], + testLocalitiesP0[1], + testLocalitiesP1[0], + testLocalitiesP1[1], + }, + }, + }, + { + Mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeLogicalDNS, + }, + Addresses: testAddressStrs[4], + }, + }, nil) + + wantConfig := &priority.LBConfig{ + Children: map[string]*priority.Child{ + "priority-0-0": { + Config: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + DropCategories: []clusterimpl.DropConfig{ + { + Category: testDropCategory, + RequestsPerMillion: testDropOverMillion, + }, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedtarget.Name, + Config: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(testLocalityIDs[0].ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: testLRSServer, + Locality: &testLocalityIDs[0], + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + assertString(testLocalityIDs[1].ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: testLRSServer, + Locality: &testLocalityIDs[1], + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + }, + }, + }, + }, + }, + IgnoreReresolutionRequests: true, + }, + "priority-0-1": { + Config: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + DropCategories: []clusterimpl.DropConfig{ + { + Category: testDropCategory, + RequestsPerMillion: testDropOverMillion, + }, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedtarget.Name, + Config: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(testLocalityIDs[2].ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: testLRSServer, + Locality: &testLocalityIDs[2], + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + assertString(testLocalityIDs[3].ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: testLRSServer, + Locality: &testLocalityIDs[3], + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + }, + }, + }, + }, + }, + IgnoreReresolutionRequests: true, + }, + "priority-1": { + Config: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: "pick_first"}, + }, + }, + IgnoreReresolutionRequests: false, + }, + }, + Priorities: []string{"priority-0-0", "priority-0-1", "priority-1"}, + } + wantAddrs := []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][0]}, []string{"priority-0-0", assertString(testLocalityIDs[0].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][1]}, []string{"priority-0-0", assertString(testLocalityIDs[0].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[1][0]}, []string{"priority-0-0", assertString(testLocalityIDs[1].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[1][1]}, []string{"priority-0-0", assertString(testLocalityIDs[1].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[2][0]}, []string{"priority-0-1", assertString(testLocalityIDs[2].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[2][1]}, []string{"priority-0-1", assertString(testLocalityIDs[2].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[3][0]}, []string{"priority-0-1", assertString(testLocalityIDs[3].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[3][1]}, []string{"priority-0-1", assertString(testLocalityIDs[3].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[4][0]}, []string{"priority-1"}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[4][1]}, []string{"priority-1"}), + } + + if diff := cmp.Diff(gotConfig, wantConfig); diff != "" { + t.Errorf("buildPriorityConfig() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(gotAddrs, wantAddrs, addrCmpOpts); diff != "" { + t.Errorf("buildPriorityConfig() diff (-got +want) %v", diff) + } +} + +func TestBuildClusterImplConfigForDNS(t *testing.T) { + gotName, gotConfig, gotAddrs := buildClusterImplConfigForDNS(3, testAddressStrs[0]) + wantName := "priority-3" + wantConfig := &clusterimpl.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "pick_first", + }, + } + wantAddrs := []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][0]}, []string{"priority-3"}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][1]}, []string{"priority-3"}), + } + + if diff := cmp.Diff(gotName, wantName); diff != "" { + t.Errorf("buildClusterImplConfigForDNS() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(gotConfig, wantConfig); diff != "" { + t.Errorf("buildClusterImplConfigForDNS() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(gotAddrs, wantAddrs, addrCmpOpts); diff != "" { + t.Errorf("buildClusterImplConfigForDNS() diff (-got +want) %v", diff) + } +} + +func TestBuildClusterImplConfigForEDS(t *testing.T) { + gotNames, gotConfigs, gotAddrs := buildClusterImplConfigForEDS( + 2, + xdsclient.EndpointsUpdate{ + Drops: []xdsclient.OverloadDropConfig{ + { + Category: testDropCategory, + Numerator: testDropOverMillion, + Denominator: million, + }, + }, + Localities: []xdsclient.Locality{ + { + Endpoints: testEndpoints[3], + ID: testLocalityIDs[3], + Weight: 80, + Priority: 1, + }, { + Endpoints: testEndpoints[1], + ID: testLocalityIDs[1], + Weight: 80, + Priority: 0, + }, { + Endpoints: testEndpoints[2], + ID: testLocalityIDs[2], + Weight: 20, + Priority: 1, + }, { + Endpoints: testEndpoints[0], + ID: testLocalityIDs[0], + Weight: 20, + Priority: 0, + }, + }, + }, + DiscoveryMechanism{ + Cluster: testClusterName, + MaxConcurrentRequests: newUint32(testMaxRequests), + LoadReportingServerName: newString(testLRSServer), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, + }, + nil, + ) + + wantNames := []string{ + fmt.Sprintf("priority-%v-%v", 2, 0), + fmt.Sprintf("priority-%v-%v", 2, 1), + } + wantConfigs := map[string]*clusterimpl.LBConfig{ + "priority-2-0": { + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + DropCategories: []clusterimpl.DropConfig{ + { + Category: testDropCategory, + RequestsPerMillion: testDropOverMillion, + }, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedtarget.Name, + Config: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(testLocalityIDs[0].ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: testLRSServer, + Locality: &testLocalityIDs[0], + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + assertString(testLocalityIDs[1].ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: testLRSServer, + Locality: &testLocalityIDs[1], + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + }, + }, + }, + }, + "priority-2-1": { + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + DropCategories: []clusterimpl.DropConfig{ + { + Category: testDropCategory, + RequestsPerMillion: testDropOverMillion, + }, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedtarget.Name, + Config: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(testLocalityIDs[2].ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: testLRSServer, + Locality: &testLocalityIDs[2], + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + assertString(testLocalityIDs[3].ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServerName: testLRSServer, + Locality: &testLocalityIDs[3], + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + }, + }, + }, + }, + } + wantAddrs := []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][0]}, []string{"priority-2-0", assertString(testLocalityIDs[0].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][1]}, []string{"priority-2-0", assertString(testLocalityIDs[0].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[1][0]}, []string{"priority-2-0", assertString(testLocalityIDs[1].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[1][1]}, []string{"priority-2-0", assertString(testLocalityIDs[1].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[2][0]}, []string{"priority-2-1", assertString(testLocalityIDs[2].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[2][1]}, []string{"priority-2-1", assertString(testLocalityIDs[2].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[3][0]}, []string{"priority-2-1", assertString(testLocalityIDs[3].ToString)}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[3][1]}, []string{"priority-2-1", assertString(testLocalityIDs[3].ToString)}), + } + + if diff := cmp.Diff(gotNames, wantNames); diff != "" { + t.Errorf("buildClusterImplConfigForEDS() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(gotConfigs, wantConfigs); diff != "" { + t.Errorf("buildClusterImplConfigForEDS() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(gotAddrs, wantAddrs, addrCmpOpts); diff != "" { + t.Errorf("buildClusterImplConfigForEDS() diff (-got +want) %v", diff) + } + +} + +func TestGroupLocalitiesByPriority(t *testing.T) { + tests := []struct { + name string + localities []xdsclient.Locality + wantPriorities []string + wantLocalities map[string][]xdsclient.Locality + }{ + { + name: "1 locality 1 priority", + localities: []xdsclient.Locality{testLocalitiesP0[0]}, + wantPriorities: []string{"0"}, + wantLocalities: map[string][]xdsclient.Locality{ + "0": {testLocalitiesP0[0]}, + }, + }, + { + name: "2 locality 1 priority", + localities: []xdsclient.Locality{testLocalitiesP0[0], testLocalitiesP0[1]}, + wantPriorities: []string{"0"}, + wantLocalities: map[string][]xdsclient.Locality{ + "0": {testLocalitiesP0[0], testLocalitiesP0[1]}, + }, + }, + { + name: "1 locality in each", + localities: []xdsclient.Locality{testLocalitiesP0[0], testLocalitiesP1[0]}, + wantPriorities: []string{"0", "1"}, + wantLocalities: map[string][]xdsclient.Locality{ + "0": {testLocalitiesP0[0]}, + "1": {testLocalitiesP1[0]}, + }, + }, + { + name: "2 localities in each sorted", + localities: []xdsclient.Locality{ + testLocalitiesP0[0], testLocalitiesP0[1], + testLocalitiesP1[0], testLocalitiesP1[1]}, + wantPriorities: []string{"0", "1"}, + wantLocalities: map[string][]xdsclient.Locality{ + "0": {testLocalitiesP0[0], testLocalitiesP0[1]}, + "1": {testLocalitiesP1[0], testLocalitiesP1[1]}, + }, + }, + { + // The localities are given in order [p1, p0, p1, p0], but the + // returned priority list must be sorted [p0, p1], because the list + // order is the priority order. + name: "2 localities in each needs to sort", + localities: []xdsclient.Locality{ + testLocalitiesP1[1], testLocalitiesP0[1], + testLocalitiesP1[0], testLocalitiesP0[0]}, + wantPriorities: []string{"0", "1"}, + wantLocalities: map[string][]xdsclient.Locality{ + "0": {testLocalitiesP0[1], testLocalitiesP0[0]}, + "1": {testLocalitiesP1[1], testLocalitiesP1[0]}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotPriorities, gotLocalities := groupLocalitiesByPriority(tt.localities) + if diff := cmp.Diff(gotPriorities, tt.wantPriorities); diff != "" { + t.Errorf("groupLocalitiesByPriority() diff(-got +want) %v", diff) + } + if diff := cmp.Diff(gotLocalities, tt.wantLocalities); diff != "" { + t.Errorf("groupLocalitiesByPriority() diff(-got +want) %v", diff) + } + }) + } +} + +func TestDedupSortedIntSlice(t *testing.T) { + tests := []struct { + name string + a []int + want []int + }{ + { + name: "empty", + a: []int{}, + want: []int{}, + }, + { + name: "no dup", + a: []int{0, 1, 2, 3}, + want: []int{0, 1, 2, 3}, + }, + { + name: "with dup", + a: []int{0, 0, 1, 1, 1, 2, 3}, + want: []int{0, 1, 2, 3}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := dedupSortedIntSlice(tt.a); !cmp.Equal(got, tt.want) { + t.Errorf("dedupSortedIntSlice() = %v, want %v, diff %v", got, tt.want, cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestLocalitiesToWeightedTarget(t *testing.T) { + tests := []struct { + name string + localities []xdsclient.Locality + priorityName string + childPolicy *internalserviceconfig.BalancerConfig + lrsServer *string + cluster string + edsService string + wantConfig *weightedtarget.LBConfig + wantAddrs []resolver.Address + }{ + { + name: "roundrobin as child, with LRS", + localities: []xdsclient.Locality{ + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }, + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + Weight: 80, + }, + }, + priorityName: "test-priority", + childPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + lrsServer: newString("test-lrs-server"), + cluster: "test-cluster", + edsService: "test-eds-service", + wantConfig: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: "test-cluster", + EDSServiceName: "test-eds-service", + LoadReportingServerName: "test-lrs-server", + Locality: &internal.LocalityID{Zone: "test-zone-1"}, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: lrs.Name, + Config: &lrs.LBConfig{ + ClusterName: "test-cluster", + EDSServiceName: "test-eds-service", + LoadReportingServerName: "test-lrs-server", + Locality: &internal.LocalityID{Zone: "test-zone-2"}, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + }, + }, + wantAddrs: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: "addr-1-1"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), + hierarchy.Set(resolver.Address{Addr: "addr-1-2"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), + hierarchy.Set(resolver.Address{Addr: "addr-2-1"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), + hierarchy.Set(resolver.Address{Addr: "addr-2-2"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), + }, + }, + { + name: "roundrobin as child, no LRS", + localities: []xdsclient.Locality{ + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }, + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + Weight: 80, + }, + }, + priorityName: "test-priority", + childPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + // lrsServer is nil, so LRS policy will not be used. + wantConfig: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }, + }, + wantAddrs: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: "addr-1-1"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), + hierarchy.Set(resolver.Address{Addr: "addr-1-2"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), + hierarchy.Set(resolver.Address{Addr: "addr-2-1"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), + hierarchy.Set(resolver.Address{Addr: "addr-2-2"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), + }, + }, + { + name: "weighted round robin as child, no LRS", + localities: []xdsclient.Locality{ + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }, + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + Weight: 80, + }, + }, + priorityName: "test-priority", + childPolicy: &internalserviceconfig.BalancerConfig{Name: weightedroundrobin.Name}, + // lrsServer is nil, so LRS policy will not be used. + wantConfig: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedroundrobin.Name, + }, + }, + assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedroundrobin.Name, + }, + }, + }, + }, + wantAddrs: []resolver.Address{ + hierarchy.Set( + weightedroundrobin.SetAddrInfo(resolver.Address{Addr: "addr-1-1"}, weightedroundrobin.AddrInfo{Weight: 90}), + []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), + hierarchy.Set( + weightedroundrobin.SetAddrInfo(resolver.Address{Addr: "addr-1-2"}, weightedroundrobin.AddrInfo{Weight: 10}), + []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), + hierarchy.Set( + weightedroundrobin.SetAddrInfo(resolver.Address{Addr: "addr-2-1"}, weightedroundrobin.AddrInfo{Weight: 90}), + []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), + hierarchy.Set( + weightedroundrobin.SetAddrInfo(resolver.Address{Addr: "addr-2-2"}, weightedroundrobin.AddrInfo{Weight: 10}), + []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1 := localitiesToWeightedTarget(tt.localities, tt.priorityName, tt.childPolicy, tt.lrsServer, tt.cluster, tt.edsService) + if diff := cmp.Diff(got, tt.wantConfig); diff != "" { + t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(got1, tt.wantAddrs, cmp.AllowUnexported(attributes.Attributes{})); diff != "" { + t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) + } + }) + } +} + +func newString(s string) *string { + return &s +} + +func newUint32(i uint32) *uint32 { + return &i +} + +func assertString(f func() (string, error)) string { + s, err := f() + if err != nil { + panic(err.Error()) + } + return s +} diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/type.go b/xds/internal/balancer/clusterresolver/balancerconfig/type.go new file mode 100644 index 000000000000..eb149cd384dd --- /dev/null +++ b/xds/internal/balancer/clusterresolver/balancerconfig/type.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancerconfig + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// DiscoveryMechanismType is the type of discovery mechanism. +type DiscoveryMechanismType int + +const ( + // DiscoveryMechanismTypeEDS is eds. + DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:EDS` + // DiscoveryMechanismTypeLogicalDNS is DNS. + DiscoveryMechanismTypeLogicalDNS // `json:LOGICAL_DNS` +) + +// MarshalJSON marshals a DiscoveryMechanismType to a quoted json string. +// +// This is necessary to handle enum (as strings) from JSON. +func (t *DiscoveryMechanismType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch *t { + case DiscoveryMechanismTypeEDS: + buffer.WriteString("EDS") + case DiscoveryMechanismTypeLogicalDNS: + buffer.WriteString("LOGICAL_DNS") + } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +// UnmarshalJSON unmarshals a quoted json string to the DiscoveryMechanismType. +func (t *DiscoveryMechanismType) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + switch s { + case "EDS": + *t = DiscoveryMechanismTypeEDS + case "LOGICAL_DNS": + *t = DiscoveryMechanismTypeLogicalDNS + default: + return fmt.Errorf("unable to unmarshal string %q to type DiscoveryMechanismType", s) + } + return nil +} + +// DiscoveryMechanism is the discovery mechanism, can be either EDS or DNS. +// +// For DNS, the ClientConn target will be used for name resolution. +// +// For EDS, if EDSServiceName is not empty, it will be used for watching. If +// EDSServiceName is empty, Cluster will be used. +type DiscoveryMechanism struct { + // Cluster is the cluster name. + Cluster string `json:"cluster,omitempty"` + // LoadReportingServerName is the LRS server to send load reports to. If + // not present, load reporting will be disabled. If set to the empty string, + // load reporting will be sent to the same server that we obtained CDS data + // from. + LoadReportingServerName *string `json:"lrsLoadReportingServerName,omitempty"` + // MaxConcurrentRequests is the maximum number of outstanding requests can + // be made to the upstream cluster. Default is 1024. + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + // Type is the discovery mechanism type. + Type DiscoveryMechanismType `json:"type,omitempty"` + // EDSServiceName is the EDS service name, as returned in CDS. May be unset + // if not specified in CDS. For type EDS only. + // + // This is used for EDS watch if set. If unset, Cluster is used for EDS + // watch. + EDSServiceName string `json:"edsServiceName,omitempty"` +} diff --git a/xds/internal/balancer/edsbalancer/configbuilder.go b/xds/internal/balancer/edsbalancer/configbuilder.go new file mode 100644 index 000000000000..1e08a05e2048 --- /dev/null +++ b/xds/internal/balancer/edsbalancer/configbuilder.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package edsbalancer + +import ( + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" + xdsclient "google.golang.org/grpc/xds/internal/client" +) + +const million = 1000000 + +func buildPriorityConfigJSON(edsResp xdsclient.EndpointsUpdate, c *EDSConfig) ([]byte, []resolver.Address, error) { + var childConfig *internalserviceconfig.BalancerConfig + if c.ChildPolicy != nil { + childConfig = &internalserviceconfig.BalancerConfig{Name: c.ChildPolicy.Name} + } + return balancerconfig.BuildPriorityConfigJSON( + []balancerconfig.PriorityConfig{ + { + Mechanism: balancerconfig.DiscoveryMechanism{ + Cluster: c.ClusterName, + LoadReportingServerName: c.LrsLoadReportingServerName, + MaxConcurrentRequests: c.MaxConcurrentRequests, + Type: balancerconfig.DiscoveryMechanismTypeEDS, + EDSServiceName: c.EDSServiceName, + }, + EDSResp: edsResp, + }, + }, childConfig, + ) +} diff --git a/xds/internal/balancer/edsbalancer/configbuilder_test.go b/xds/internal/balancer/edsbalancer/configbuilder_test.go new file mode 100644 index 000000000000..9425b2363a52 --- /dev/null +++ b/xds/internal/balancer/edsbalancer/configbuilder_test.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package edsbalancer + +import ( + "fmt" + "testing" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/priority" + xdsclient "google.golang.org/grpc/xds/internal/client" + + _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer + _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer +) + +const ( + localityCount = 4 + addressPerLocality = 2 +) + +var ( + testLocalityIDs []internal.LocalityID + testEndpoints [][]xdsclient.Endpoint +) + +func init() { + for i := 0; i < localityCount; i++ { + testLocalityIDs = append(testLocalityIDs, internal.LocalityID{Zone: fmt.Sprintf("test-zone-%d", i)}) + var ends []xdsclient.Endpoint + for j := 0; j < addressPerLocality; j++ { + addr := fmt.Sprintf("addr-%d-%d", i, j) + ends = append(ends, xdsclient.Endpoint{ + Address: addr, + HealthStatus: xdsclient.EndpointHealthStatusHealthy, + }) + } + testEndpoints = append(testEndpoints, ends) + } +} + +// TestBuildPriorityConfigJSON is a sanity check that the generated config bytes +// are valid (can be parsed back to a config struct). +// +// The correctness is covered by the unmarshalled version +// TestBuildPriorityConfig. +func TestBuildPriorityConfigJSON(t *testing.T) { + const ( + testClusterName = "cluster-name-for-watch" + testEDSServiceName = "service-name-from-parent" + testLRSServer = "lrs-addr-from-config" + testMaxReq = 314 + testDropCategory = "test-drops" + testDropOverMillion = 1 + ) + for _, lrsServer := range []*string{newString(testLRSServer), newString(""), nil} { + got, _, err := buildPriorityConfigJSON(xdsclient.EndpointsUpdate{ + Drops: []xdsclient.OverloadDropConfig{{ + Category: testDropCategory, + Numerator: testDropOverMillion, + Denominator: million, + }}, + Localities: []xdsclient.Locality{{ + Endpoints: testEndpoints[3], + ID: testLocalityIDs[3], + Weight: 80, + Priority: 1, + }, { + Endpoints: testEndpoints[1], + ID: testLocalityIDs[1], + Weight: 80, + Priority: 0, + }, { + Endpoints: testEndpoints[2], + ID: testLocalityIDs[2], + Weight: 20, + Priority: 1, + }, { + Endpoints: testEndpoints[0], + ID: testLocalityIDs[0], + Weight: 20, + Priority: 0, + }}}, + &EDSConfig{ + ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}, + ClusterName: testClusterName, + EDSServiceName: testEDSServiceName, + MaxConcurrentRequests: newUint32(testMaxReq), + LrsLoadReportingServerName: lrsServer, + }, + ) + if err != nil { + t.Fatalf("buildPriorityConfigJSON(...) failed: %v", err) + } + priorityB := balancer.Get(priority.Name) + if _, err = priorityB.(balancer.ConfigParser).ParseConfig(got); err != nil { + t.Fatalf("ParseConfig(%+v) failed: %v", got, err) + } + } +} + +func newString(s string) *string { + return &s +} + +func newUint32(i uint32) *uint32 { + return &i +} From 174b1c28afaa3c1ca3518c251deb53f014603bbd Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 2 Jun 2021 16:47:35 -0700 Subject: [PATCH 113/998] internal/transport: skip log on EOF when reading client preface (#4458) --- call_test.go | 2 +- internal/transport/http2_server.go | 18 +++++++++++++++--- internal/transport/transport.go | 6 ------ internal/transport/transport_test.go | 2 +- server.go | 16 +++++++++++----- 5 files changed, 28 insertions(+), 16 deletions(-) diff --git a/call_test.go b/call_test.go index abc4537ddb7d..8fdbc9b7eb7e 100644 --- a/call_test.go +++ b/call_test.go @@ -160,7 +160,7 @@ func (s *server) start(t *testing.T, port int, maxStreams uint32) { config := &transport.ServerConfig{ MaxStreams: maxStreams, } - st, err := transport.NewServerTransport("http2", conn, config) + st, err := transport.NewServerTransport(conn, config) if err != nil { continue } diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 21a3c8526158..e3799d50aa71 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -125,9 +125,14 @@ type http2Server struct { connectionID uint64 } -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a non-nil transport and a nil-error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -266,6 +271,13 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Skipping the error here will help + // reduce log clutter. + if err == io.EOF { + return nil, nil + } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 3ab945aa4d20..14198126457b 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -532,12 +532,6 @@ type ServerConfig struct { HeaderTableSize *uint32 } -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) -} - // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 3aef86277140..d50170cdf9b6 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -323,7 +323,7 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT if err != nil { return } - transport, err := NewServerTransport("http2", conn, serverConfig) + transport, err := NewServerTransport(conn, serverConfig) if err != nil { return } diff --git a/server.go b/server.go index 446995986c8b..d90f3fcd3bf6 100644 --- a/server.go +++ b/server.go @@ -844,10 +844,16 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { // ErrConnDispatched means that the connection was dispatched away from // gRPC; those connections should be left open. if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // In deployments where a gRPC server runs behind a cloud load + // balancer which performs regular TCP level health checks, the + // connection is closed immediately by the latter. Skipping the + // error here will help reduce log clutter. + if err != io.EOF { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + } rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -897,7 +903,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } - st, err := transport.NewServerTransport("http2", c, config) + st, err := transport.NewServerTransport(c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) From 0956b12520b5d76fe9d43f7eda8ad51765c44ce1 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 2 Jun 2021 21:22:13 -0700 Subject: [PATCH 114/998] client: handle RemoveSubConn in goroutine to avoid deadlock (#4504) --- balancer_conn_wrappers.go | 44 +++++++++++++++++++----------------- balancer_switching_test.go | 46 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 20 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index f1bb6dd30737..dd8397963974 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -43,7 +43,7 @@ type ccBalancerWrapper struct { cc *ClientConn balancerMu sync.Mutex // synchronizes calls to the balancer balancer balancer.Balancer - scBuffer *buffer.Unbounded + updateCh *buffer.Unbounded closed *grpcsync.Event done *grpcsync.Event @@ -54,7 +54,7 @@ type ccBalancerWrapper struct { func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, - scBuffer: buffer.NewUnbounded(), + updateCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), subConns: make(map[*acBalancerWrapper]struct{}), @@ -69,15 +69,26 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.scBuffer.Get(): - ccb.scBuffer.Load() + case t := <-ccb.updateCh.Get(): + ccb.updateCh.Load() if ccb.closed.HasFired() { break } - ccb.balancerMu.Lock() - su := t.(*scStateUpdate) - ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - ccb.balancerMu.Unlock() + switch u := t.(type) { + case *scStateUpdate: + ccb.balancerMu.Lock() + ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) + ccb.balancerMu.Unlock() + case *acBalancerWrapper: + ccb.mu.Lock() + if ccb.subConns != nil { + delete(ccb.subConns, u) + ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) + } + ccb.mu.Unlock() + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + } case <-ccb.closed.Done(): } @@ -118,7 +129,7 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co if sc == nil { return } - ccb.scBuffer.Put(&scStateUpdate{ + ccb.updateCh.Put(&scStateUpdate{ sc: sc, state: s, err: err, @@ -159,17 +170,10 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock + // during switchBalancer() if the old balancer calls RemoveSubConn() in its + // Close(). + ccb.updateCh.Put(sc) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/balancer_switching_test.go b/balancer_switching_test.go index 2c6ed576620f..e9fee87d8f81 100644 --- a/balancer_switching_test.go +++ b/balancer_switching_test.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" @@ -531,6 +532,51 @@ func (s) TestSwitchBalancerGRPCLBWithGRPCLBNotRegistered(t *testing.T) { } } +const inlineRemoveSubConnBalancerName = "test-inline-remove-subconn-balancer" + +func init() { + stub.Register(inlineRemoveSubConnBalancerName, stub.BalancerFuncs{ + Close: func(data *stub.BalancerData) { + data.ClientConn.RemoveSubConn(&acBalancerWrapper{}) + }, + }) +} + +// Test that when switching to balancers, the old balancer calls RemoveSubConn +// in Close. +// +// This test is to make sure this close doesn't cause a deadlock. +func (s) TestSwitchBalancerOldRemoveSubConn(t *testing.T) { + r := manual.NewBuilderWithScheme("whatever") + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, fmt.Sprintf(`{"loadBalancingPolicy": "%v"}`, inlineRemoveSubConnBalancerName))}, nil) + // This service config update will switch balancer from + // "test-inline-remove-subconn-balancer" to "pick_first". The test balancer + // will be closed, which will call cc.RemoveSubConn() inline (this + // RemoveSubConn is not required by the API, but some balancers might do + // it). + // + // This is to make sure the cc.RemoveSubConn() from Close() doesn't cause a + // deadlock (e.g. trying to grab a mutex while it's already locked). + // + // Do it in a goroutine so this test will fail with a helpful message + // (though the goroutine will still leak). + done := make(chan struct{}) + go func() { + cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "pick_first"}`)}, nil) + close(done) + }() + select { + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for updateResolverState to finish") + case <-done: + } +} + func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { scpr := r.CC.ParseServiceConfig(s) if scpr.Err != nil { From a3715292f8de67482ffe707076b000a15747815e Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 3 Jun 2021 13:59:37 -0700 Subject: [PATCH 115/998] csds: return empty response if xds client is not set (#4505) --- xds/csds/csds.go | 31 ++++++++++++++---------- xds/csds/csds_test.go | 56 ++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 72 insertions(+), 15 deletions(-) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 73b92e9443ce..468580f0b6a7 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -25,7 +25,6 @@ package csds import ( "context" - "fmt" "io" "time" @@ -59,14 +58,23 @@ type xdsClientInterface interface { var ( logger = grpclog.Component("xds") - newXDSClient = func() (xdsClientInterface, error) { - return client.New() + newXDSClient = func() xdsClientInterface { + c, err := client.New() + if err != nil { + // If err is not nil, c is a typed nil (of type *xdsclient.Client). + // If c is returned and assigned to the xdsClient field in the CSDS + // server, the nil checks in the handlers will not handle it + // properly. + logger.Warningf("failed to create xds client: %v", err) + return nil + } + return c } ) // ClientStatusDiscoveryServer implementations interface ClientStatusDiscoveryServiceServer. type ClientStatusDiscoveryServer struct { - // xdsClient will always be the same in practise. But we keep a copy in each + // xdsClient will always be the same in practice. But we keep a copy in each // server instance for testing. xdsClient xdsClientInterface } @@ -74,13 +82,7 @@ type ClientStatusDiscoveryServer struct { // NewClientStatusDiscoveryServer returns an implementation of the CSDS server that can be // registered on a gRPC server. func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { - xdsC, err := newXDSClient() - if err != nil { - return nil, fmt.Errorf("failed to create xds client: %v", err) - } - return &ClientStatusDiscoveryServer{ - xdsClient: xdsC, - }, nil + return &ClientStatusDiscoveryServer{xdsClient: newXDSClient()}, nil } // StreamClientStatus implementations interface ClientStatusDiscoveryServiceServer. @@ -113,6 +115,9 @@ func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req * // // If it returns an error, the error is a status error. func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { + if s.xdsClient == nil { + return &v3statuspb.ClientStatusResponse{}, nil + } // Field NodeMatchers is unsupported, by design // https://github.com/grpc/proposal/blob/master/A40-csds-support.md#detail-node-matching. if len(req.NodeMatchers) != 0 { @@ -137,7 +142,9 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp // Close cleans up the resources. func (s *ClientStatusDiscoveryServer) Close() { - s.xdsClient.Close() + if s.xdsClient != nil { + s.xdsClient.Close() + } } // nodeProtoToV3 converts the given proto into a v3.Node. n is from bootstrap diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 6cf88f6d3942..a051092a840b 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -275,9 +275,7 @@ func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServ t.Fatalf("failed to create xds client: %v", err) } oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { - return xdsC, nil - } + newXDSClient = func() xdsClientInterface { return xdsC } // Initialize an gRPC server and register CSDS on it. server := grpc.NewServer() @@ -635,6 +633,58 @@ func protoToJSON(p proto.Message) string { return ret } +func TestCSDSNoXDSClient(t *testing.T) { + oldNewXDSClient := newXDSClient + newXDSClient = func() xdsClientInterface { return nil } + defer func() { newXDSClient = oldNewXDSClient }() + + // Initialize an gRPC server and register CSDS on it. + server := grpc.NewServer() + csdss, err := NewClientStatusDiscoveryServer() + if err != nil { + t.Fatal(err) + } + defer csdss.Close() + v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) + // Create a local listener and pass it to Serve(). + lis, err := xtestutils.LocalTCPListener() + if err != nil { + t.Fatalf("xtestutils.LocalTCPListener() failed: %v", err) + } + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + defer server.Stop() + + // Create CSDS client. + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("cannot connect to server: %v", err) + } + defer conn.Close() + c := v3statuspbgrpc.NewClientStatusDiscoveryServiceClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("cannot get ServerReflectionInfo: %v", err) + } + + if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { + t.Fatalf("failed to send: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("failed to recv response: %v", err) + } + if n := len(r.Config); n != 0 { + t.Fatalf("got %d configs, want 0: %v", n, proto.MarshalTextString(r)) + } +} + func Test_nodeProtoToV3(t *testing.T) { const ( testID = "test-id" From c67c056bee6a3a40a36a8d42f91fe997442a2d07 Mon Sep 17 00:00:00 2001 From: "Jerry Y. Chen" Date: Fri, 4 Jun 2021 05:28:32 +0800 Subject: [PATCH 116/998] doc: fix typo in package networktype (#4508) --- internal/transport/networktype/networktype.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/transport/networktype/networktype.go b/internal/transport/networktype/networktype.go index 96967428b515..7bb53cff1011 100644 --- a/internal/transport/networktype/networktype.go +++ b/internal/transport/networktype/networktype.go @@ -17,7 +17,7 @@ */ // Package networktype declares the network type to be used in the default -// dailer. Attribute of a resolver.Address. +// dialer. Attribute of a resolver.Address. package networktype import ( From 32d5490aee8dd29a6fbfe75dc8caade5b6aa5d87 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 3 Jun 2021 15:23:46 -0700 Subject: [PATCH 117/998] metadata: convert keys to lowercase in FromContext() (#4416) --- metadata/metadata.go | 69 +++++++++++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 20 deletions(-) diff --git a/metadata/metadata.go b/metadata/metadata.go index e4cbea917498..8d9686375a13 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -93,12 +93,16 @@ func (md MD) Copy() MD { } // Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return @@ -107,7 +111,10 @@ func (md MD) Set(k string, vals ...string) { md[k] = vals } -// Append adds the values to key k, not overwriting what was already stored at that key. +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return @@ -117,8 +124,9 @@ func (md MD) Append(k string, vals ...string) { } // Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -145,8 +153,8 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { } // AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) @@ -159,20 +167,34 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := MD{} + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } + return out, true } -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). // -// This is intended for gRPC-internal use ONLY. +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { @@ -182,16 +204,23 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { return raw.md, raw.added, true } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } - out := raw.md.Copy() + out := MD{} + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } for _, added := range raw.added { if len(added)%2 == 1 { panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) From 5c164e2b8f227a29f4aa6b2de3afb2afa38880ba Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 3 Jun 2021 16:10:21 -0700 Subject: [PATCH 118/998] xds: rename xds/internal/client package to xdsclient (#4511) --- xds/csds/csds.go | 32 +++++++++--------- xds/csds/csds_test.go | 20 +++++------ xds/googledirectpath/googlec2p.go | 4 +-- xds/googledirectpath/googlec2p_test.go | 2 +- .../balancer/balancergroup/balancergroup.go | 2 +- .../balancergroup/balancergroup_test.go | 2 +- .../balancer/cdsbalancer/cdsbalancer.go | 4 +-- .../cdsbalancer/cdsbalancer_security_test.go | 4 +-- .../balancer/cdsbalancer/cdsbalancer_test.go | 5 ++- .../balancer/cdsbalancer/cluster_handler.go | 2 +- .../cdsbalancer/cluster_handler_test.go | 2 +- .../balancer/clusterimpl/balancer_test.go | 14 ++++---- .../balancer/clusterimpl/clusterimpl.go | 4 +-- xds/internal/balancer/clusterimpl/picker.go | 6 ++-- .../balancerconfig/configbuilder.go | 2 +- .../balancerconfig/configbuilder_test.go | 2 +- .../balancer/edsbalancer/configbuilder.go | 2 +- .../edsbalancer/configbuilder_test.go | 2 +- xds/internal/balancer/edsbalancer/eds.go | 4 +-- xds/internal/balancer/edsbalancer/eds_impl.go | 13 ++++---- .../balancer/edsbalancer/eds_impl_test.go | 7 ++-- xds/internal/balancer/edsbalancer/eds_test.go | 6 ++-- .../balancer/edsbalancer/eds_testutil.go | 2 +- xds/internal/balancer/edsbalancer/util.go | 2 +- .../balancer/edsbalancer/util_test.go | 2 +- .../balancer/loadstore/load_store_wrapper.go | 2 +- xds/internal/balancer/lrs/balancer.go | 4 +-- xds/internal/httpfilter/fault/fault_test.go | 6 ++-- xds/internal/resolver/matcher.go | 2 +- xds/internal/resolver/serviceconfig.go | 2 +- xds/internal/resolver/watch_service.go | 2 +- xds/internal/resolver/watch_service_test.go | 2 +- xds/internal/resolver/xds_resolver.go | 4 +-- xds/internal/resolver/xds_resolver_test.go | 33 +++++++++---------- xds/internal/server/conn_wrapper.go | 2 +- xds/internal/server/listener_wrapper.go | 4 +-- xds/internal/server/listener_wrapper_test.go | 2 +- xds/internal/testutils/fakeclient/client.go | 6 ++-- .../{client => xdsclient}/attributes.go | 2 +- .../bootstrap/bootstrap.go | 0 .../bootstrap/bootstrap_test.go | 0 .../bootstrap/logging.go | 0 .../{client => xdsclient}/callback.go | 2 +- .../{client => xdsclient}/cds_test.go | 2 +- xds/internal/{client => xdsclient}/client.go | 10 +++--- .../{client => xdsclient}/client_test.go | 4 +-- xds/internal/{client => xdsclient}/dump.go | 2 +- .../{client => xdsclient}/eds_test.go | 2 +- xds/internal/{client => xdsclient}/errors.go | 2 +- .../{client => xdsclient}/filter_chain.go | 2 +- .../filter_chain_test.go | 2 +- .../{client => xdsclient}/lds_test.go | 2 +- .../{client => xdsclient}/load/reporter.go | 0 .../{client => xdsclient}/load/store.go | 0 .../{client => xdsclient}/load/store_test.go | 0 .../{client => xdsclient}/loadreport.go | 4 +-- xds/internal/{client => xdsclient}/logging.go | 2 +- .../{client => xdsclient}/rds_test.go | 2 +- .../{client => xdsclient}/requests_counter.go | 2 +- .../requests_counter_test.go | 2 +- .../{client => xdsclient}/singleton.go | 4 +-- .../{client => xdsclient}/tests/README.md | 0 .../tests/client_test.go | 6 ++-- .../{client => xdsclient}/tests/dump_test.go | 4 +-- .../tests/loadreport_test.go | 8 ++--- .../{client => xdsclient}/transport_helper.go | 4 +-- .../{client => xdsclient}/v2/ack_test.go | 2 +- .../{client => xdsclient}/v2/cds_test.go | 2 +- .../{client => xdsclient}/v2/client.go | 2 +- .../{client => xdsclient}/v2/client_test.go | 2 +- .../{client => xdsclient}/v2/eds_test.go | 2 +- .../{client => xdsclient}/v2/lds_test.go | 2 +- .../{client => xdsclient}/v2/loadreport.go | 2 +- .../{client => xdsclient}/v2/rds_test.go | 2 +- .../{client => xdsclient}/v3/client.go | 2 +- .../{client => xdsclient}/v3/loadreport.go | 2 +- .../{client => xdsclient}/watchers.go | 2 +- .../watchers_cluster_test.go | 2 +- .../watchers_endpoints_test.go | 2 +- .../watchers_listener_test.go | 2 +- .../watchers_route_test.go | 2 +- xds/internal/{client => xdsclient}/xds.go | 2 +- xds/server.go | 4 +-- xds/server_test.go | 4 +-- xds/xds.go | 4 +-- 85 files changed, 164 insertions(+), 168 deletions(-) rename xds/internal/{client => xdsclient}/attributes.go (98%) rename xds/internal/{client => xdsclient}/bootstrap/bootstrap.go (100%) rename xds/internal/{client => xdsclient}/bootstrap/bootstrap_test.go (100%) rename xds/internal/{client => xdsclient}/bootstrap/logging.go (100%) rename xds/internal/{client => xdsclient}/callback.go (99%) rename xds/internal/{client => xdsclient}/cds_test.go (99%) rename xds/internal/{client => xdsclient}/client.go (98%) rename xds/internal/{client => xdsclient}/client_test.go (99%) rename xds/internal/{client => xdsclient}/dump.go (99%) rename xds/internal/{client => xdsclient}/eds_test.go (99%) rename xds/internal/{client => xdsclient}/errors.go (98%) rename xds/internal/{client => xdsclient}/filter_chain.go (99%) rename xds/internal/{client => xdsclient}/filter_chain_test.go (99%) rename xds/internal/{client => xdsclient}/lds_test.go (99%) rename xds/internal/{client => xdsclient}/load/reporter.go (100%) rename xds/internal/{client => xdsclient}/load/store.go (100%) rename xds/internal/{client => xdsclient}/load/store_test.go (100%) rename xds/internal/{client => xdsclient}/loadreport.go (98%) rename xds/internal/{client => xdsclient}/logging.go (98%) rename xds/internal/{client => xdsclient}/rds_test.go (99%) rename xds/internal/{client => xdsclient}/requests_counter.go (99%) rename xds/internal/{client => xdsclient}/requests_counter_test.go (99%) rename xds/internal/{client => xdsclient}/singleton.go (98%) rename xds/internal/{client => xdsclient}/tests/README.md (100%) rename xds/internal/{client => xdsclient}/tests/client_test.go (93%) rename xds/internal/{client => xdsclient}/tests/dump_test.go (99%) rename xds/internal/{client => xdsclient}/tests/loadreport_test.go (94%) rename xds/internal/{client => xdsclient}/transport_helper.go (99%) rename xds/internal/{client => xdsclient}/v2/ack_test.go (99%) rename xds/internal/{client => xdsclient}/v2/cds_test.go (99%) rename xds/internal/{client => xdsclient}/v2/client.go (99%) rename xds/internal/{client => xdsclient}/v2/client_test.go (99%) rename xds/internal/{client => xdsclient}/v2/eds_test.go (99%) rename xds/internal/{client => xdsclient}/v2/lds_test.go (99%) rename xds/internal/{client => xdsclient}/v2/loadreport.go (98%) rename xds/internal/{client => xdsclient}/v2/rds_test.go (99%) rename xds/internal/{client => xdsclient}/v3/client.go (99%) rename xds/internal/{client => xdsclient}/v3/loadreport.go (98%) rename xds/internal/{client => xdsclient}/watchers.go (99%) rename xds/internal/{client => xdsclient}/watchers_cluster_test.go (99%) rename xds/internal/{client => xdsclient}/watchers_endpoints_test.go (99%) rename xds/internal/{client => xdsclient}/watchers_listener_test.go (99%) rename xds/internal/{client => xdsclient}/watchers_route_test.go (99%) rename xds/internal/{client => xdsclient}/xds.go (99%) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 468580f0b6a7..d6a20a2f15c0 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -37,21 +37,21 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/types/known/timestamppb" - _ "google.golang.org/grpc/xds/internal/client/v2" // Register v2 xds_client. - _ "google.golang.org/grpc/xds/internal/client/v3" // Register v3 xds_client. + _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register v2 xds_client. + _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register v3 xds_client. ) // xdsClientInterface contains methods from xdsClient.Client which are used by // the server. This is useful for overriding in unit tests. type xdsClientInterface interface { - DumpLDS() (string, map[string]client.UpdateWithMD) - DumpRDS() (string, map[string]client.UpdateWithMD) - DumpCDS() (string, map[string]client.UpdateWithMD) - DumpEDS() (string, map[string]client.UpdateWithMD) + DumpLDS() (string, map[string]xdsclient.UpdateWithMD) + DumpRDS() (string, map[string]xdsclient.UpdateWithMD) + DumpCDS() (string, map[string]xdsclient.UpdateWithMD) + DumpEDS() (string, map[string]xdsclient.UpdateWithMD) BootstrapConfig() *bootstrap.Config Close() } @@ -59,7 +59,7 @@ type xdsClientInterface interface { var ( logger = grpclog.Component("xds") newXDSClient = func() xdsClientInterface { - c, err := client.New() + c, err := xdsclient.New() if err != nil { // If err is not nil, c is a typed nil (of type *xdsclient.Client). // If c is returned and assigned to the xdsClient field in the CSDS @@ -111,7 +111,7 @@ func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req * } // buildClientStatusRespForReq fetches the status from the client, and returns -// the response to be sent back to client. +// the response to be sent back to xdsclient. // // If it returns an error, the error is a status error. func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { @@ -303,17 +303,17 @@ func (s *ClientStatusDiscoveryServer) buildEDSPerXDSConfig() *v3statuspb.PerXdsC } } -func serviceStatusToProto(serviceStatus client.ServiceStatus) v3adminpb.ClientResourceStatus { +func serviceStatusToProto(serviceStatus xdsclient.ServiceStatus) v3adminpb.ClientResourceStatus { switch serviceStatus { - case client.ServiceStatusUnknown: + case xdsclient.ServiceStatusUnknown: return v3adminpb.ClientResourceStatus_UNKNOWN - case client.ServiceStatusRequested: + case xdsclient.ServiceStatusRequested: return v3adminpb.ClientResourceStatus_REQUESTED - case client.ServiceStatusNotExist: + case xdsclient.ServiceStatusNotExist: return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST - case client.ServiceStatusACKed: + case xdsclient.ServiceStatusACKed: return v3adminpb.ClientResourceStatus_ACKED - case client.ServiceStatusNACKed: + case xdsclient.ServiceStatusNACKed: return v3adminpb.ClientResourceStatus_NACKED default: return v3adminpb.ClientResourceStatus_UNKNOWN diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index a051092a840b..202a86db1851 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -36,10 +36,10 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/xds/internal/client" _ "google.golang.org/grpc/xds/internal/httpfilter/router" xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" + "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/timestamppb" @@ -60,10 +60,10 @@ const ( ) type xdsClientInterfaceWithWatch interface { - WatchListener(string, func(client.ListenerUpdate, error)) func() - WatchRouteConfig(string, func(client.RouteConfigUpdate, error)) func() - WatchCluster(string, func(client.ClusterUpdate, error)) func() - WatchEndpoints(string, func(client.EndpointsUpdate, error)) func() + WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsclient.RouteConfigUpdate, error)) func() + WatchCluster(string, func(xdsclient.ClusterUpdate, error)) func() + WatchEndpoints(string, func(xdsclient.EndpointsUpdate, error)) func() } var cmpOpts = cmp.Options{ @@ -174,16 +174,16 @@ func TestCSDS(t *testing.T) { defer cleanup() for _, target := range ldsTargets { - xdsC.WatchListener(target, func(client.ListenerUpdate, error) {}) + xdsC.WatchListener(target, func(xdsclient.ListenerUpdate, error) {}) } for _, target := range rdsTargets { - xdsC.WatchRouteConfig(target, func(client.RouteConfigUpdate, error) {}) + xdsC.WatchRouteConfig(target, func(xdsclient.RouteConfigUpdate, error) {}) } for _, target := range cdsTargets { - xdsC.WatchCluster(target, func(client.ClusterUpdate, error) {}) + xdsC.WatchCluster(target, func(xdsclient.ClusterUpdate, error) {}) } for _, target := range edsTargets { - xdsC.WatchEndpoints(target, func(client.EndpointsUpdate, error) {}) + xdsC.WatchEndpoints(target, func(xdsclient.EndpointsUpdate, error) {}) } for i := 0; i < retryCount; i++ { @@ -270,7 +270,7 @@ func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServ t.Fatal(err) } // Create xds_client. - xdsC, err := client.New() + xdsC, err := xdsclient.New() if err != nil { t.Fatalf("failed to create xds client: %v", err) } diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 4ccec4ec4120..ccf9f152ca7b 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -38,9 +38,9 @@ import ( "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/types/known/structpb" ) diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 524bb82e0f39..ba6167e5b6c1 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -31,8 +31,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/structpb" ) diff --git a/xds/internal/balancer/balancergroup/balancergroup.go b/xds/internal/balancer/balancergroup/balancergroup.go index 5b6d42a25e44..b86dea50e807 100644 --- a/xds/internal/balancer/balancergroup/balancergroup.go +++ b/xds/internal/balancer/balancergroup/balancergroup.go @@ -24,7 +24,7 @@ import ( "time" orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" diff --git a/xds/internal/balancer/balancergroup/balancergroup_test.go b/xds/internal/balancer/balancergroup/balancergroup_test.go index 1ba9195ab1d0..cf9a228ec1fc 100644 --- a/xds/internal/balancer/balancergroup/balancergroup_test.go +++ b/xds/internal/balancer/balancergroup/balancergroup_test.go @@ -44,8 +44,8 @@ import ( "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" - "google.golang.org/grpc/xds/internal/client/load" "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) var ( diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 1e6b7a2c08c2..401d990885c1 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -35,8 +35,8 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/edsbalancer" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const ( diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 52b1a05f1362..3cf98dcc3f54 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -36,10 +36,10 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const ( diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index fba3ee1531e9..b0ee84d1fcc7 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -37,10 +37,9 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/edsbalancer" - "google.golang.org/grpc/xds/internal/client" - xdsclient "google.golang.org/grpc/xds/internal/client" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" ) const ( @@ -606,7 +605,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // Since the counter's max requests was set to 1, the first request should // succeed and the second should fail. - counter := client.GetServiceRequestsCounter(serviceName) + counter := xdsclient.GetServiceRequestsCounter(serviceName) if err := counter.StartRequest(maxRequests); err != nil { t.Fatal(err) } diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index 4c13b55594da..dbdcdedf9501 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -20,7 +20,7 @@ import ( "errors" "sync" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index 049cdf55ee6a..886dba416437 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -24,8 +24,8 @@ import ( "testing" "github.com/google/go-cmp/cmp" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" ) const ( diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index cfce6f673913..1eea0babdeaa 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -34,10 +34,10 @@ import ( "google.golang.org/grpc/internal" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) const ( @@ -72,7 +72,7 @@ func init() { // TestDropByCategory verifies that the balancer correctly drops the picks, and // that the drops are reported. func TestDropByCategory(t *testing.T) { - defer client.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } @@ -230,7 +230,7 @@ func TestDropByCategory(t *testing.T) { // TestDropCircuitBreaking verifies that the balancer correctly drops the picks // due to circuit breaking, and that the drops are reported. func TestDropCircuitBreaking(t *testing.T) { - defer client.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } @@ -342,7 +342,7 @@ func TestDropCircuitBreaking(t *testing.T) { // picker after it's closed. Because picker updates are sent in the run() // goroutine. func TestPickerUpdateAfterClose(t *testing.T) { - defer client.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } @@ -387,7 +387,7 @@ func TestPickerUpdateAfterClose(t *testing.T) { // TestClusterNameInAddressAttributes covers the case that cluster name is // attached to the subconn address attributes. func TestClusterNameInAddressAttributes(t *testing.T) { - defer client.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } @@ -478,7 +478,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { // TestReResolution verifies that when a SubConn turns transient failure, // re-resolution is triggered. func TestReResolution(t *testing.T) { - defer client.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index e72b867e2f25..7671fcff17df 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -38,8 +38,8 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/loadstore" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) const ( diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index 7a31615510d3..a03b89179ee4 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -24,8 +24,8 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) // NewRandomWRR is used when calculating drops. It's exported so that tests can @@ -75,7 +75,7 @@ type dropPicker struct { drops []*dropper s balancer.State loadStore loadReporter - counter *client.ServiceRequestsCounter + counter *xdsclient.ServiceRequestsCounter countMax uint32 } diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go index 2e67d54dadb6..c75ecddd76d8 100644 --- a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go @@ -42,7 +42,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/lrs" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) const million = 1000000 diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go index 273dc6233478..389db349b21d 100644 --- a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go @@ -38,7 +38,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/lrs" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) const ( diff --git a/xds/internal/balancer/edsbalancer/configbuilder.go b/xds/internal/balancer/edsbalancer/configbuilder.go index 1e08a05e2048..7418a9de8024 100644 --- a/xds/internal/balancer/edsbalancer/configbuilder.go +++ b/xds/internal/balancer/edsbalancer/configbuilder.go @@ -22,7 +22,7 @@ import ( internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) const million = 1000000 diff --git a/xds/internal/balancer/edsbalancer/configbuilder_test.go b/xds/internal/balancer/edsbalancer/configbuilder_test.go index 9425b2363a52..f7d2955d2ad4 100644 --- a/xds/internal/balancer/edsbalancer/configbuilder_test.go +++ b/xds/internal/balancer/edsbalancer/configbuilder_test.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/priority" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index 21e3f43be74e..3a38bdf1abab 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -35,8 +35,8 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/serviceconfig" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) const edsName = "eds_experimental" diff --git a/xds/internal/balancer/edsbalancer/eds_impl.go b/xds/internal/balancer/edsbalancer/eds_impl.go index 63b75caae8f9..db11dec6f2f8 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl.go +++ b/xds/internal/balancer/edsbalancer/eds_impl.go @@ -37,9 +37,8 @@ import ( xdsi "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/balancergroup" "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" - "google.golang.org/grpc/xds/internal/client" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) // TODO: make this a environment variable? @@ -102,7 +101,7 @@ type edsBalancerImpl struct { dropConfig []xdsclient.OverloadDropConfig drops []*dropper innerState balancer.State // The state of the picker without drop support. - serviceRequestsCounter *client.ServiceRequestsCounter + serviceRequestsCounter *xdsclient.ServiceRequestsCounter serviceRequestCountMax uint32 clusterNameMu sync.Mutex @@ -425,7 +424,7 @@ func (edsImpl *edsBalancerImpl) updateServiceRequestsConfig(serviceName string, edsImpl.pickerMu.Lock() var updatePicker bool if edsImpl.serviceRequestsCounter == nil || edsImpl.serviceRequestsCounter.ServiceName != serviceName { - edsImpl.serviceRequestsCounter = client.GetServiceRequestsCounter(serviceName) + edsImpl.serviceRequestsCounter = xdsclient.GetServiceRequestsCounter(serviceName) updatePicker = true } @@ -540,11 +539,11 @@ type dropPicker struct { drops []*dropper p balancer.Picker loadStore load.PerClusterReporter - counter *client.ServiceRequestsCounter + counter *xdsclient.ServiceRequestsCounter countMax uint32 } -func newDropPicker(p balancer.Picker, drops []*dropper, loadStore load.PerClusterReporter, counter *client.ServiceRequestsCounter, countMax uint32) *dropPicker { +func newDropPicker(p balancer.Picker, drops []*dropper, loadStore load.PerClusterReporter, counter *xdsclient.ServiceRequestsCounter, countMax uint32) *dropPicker { return &dropPicker{ drops: drops, p: p, diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/edsbalancer/eds_impl_test.go index 3cfc620a2400..ec60dc1b1e60 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_test.go @@ -38,10 +38,9 @@ import ( "google.golang.org/grpc/internal/xds/env" xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/client" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) var ( @@ -835,7 +834,7 @@ func (s) TestEDS_LoadReport(t *testing.T) { ) var maxRequestsTemp uint32 = cbMaxRequests edsb.updateServiceRequestsConfig(testServiceName, &maxRequestsTemp) - defer client.ClearCounterForTesting(testServiceName) + defer xdsclient.ClearCounterForTesting(testServiceName) backendToBalancerID := make(map[balancer.SubConn]xdsinternal.LocalityID) diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 3fe66098973c..b7ec9e9c4526 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -41,11 +41,11 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" - _ "google.golang.org/grpc/xds/internal/client/v2" // V2 client registration. + _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // V2 client registration. ) const ( diff --git a/xds/internal/balancer/edsbalancer/eds_testutil.go b/xds/internal/balancer/edsbalancer/eds_testutil.go index 5e37cdcb47c7..f6d86e950d9a 100644 --- a/xds/internal/balancer/edsbalancer/eds_testutil.go +++ b/xds/internal/balancer/edsbalancer/eds_testutil.go @@ -26,7 +26,7 @@ import ( endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" typepb "github.com/envoyproxy/go-control-plane/envoy/type" "google.golang.org/grpc/xds/internal" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) // parseEDSRespProtoForTesting parses EDS response, and panic if parsing fails. diff --git a/xds/internal/balancer/edsbalancer/util.go b/xds/internal/balancer/edsbalancer/util.go index 132950426466..fa9ada6c9dd6 100644 --- a/xds/internal/balancer/edsbalancer/util.go +++ b/xds/internal/balancer/edsbalancer/util.go @@ -18,7 +18,7 @@ package edsbalancer import ( "google.golang.org/grpc/internal/wrr" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) var newRandomWRR = wrr.NewRandom diff --git a/xds/internal/balancer/edsbalancer/util_test.go b/xds/internal/balancer/edsbalancer/util_test.go index b94905d49889..2d08091fdfb5 100644 --- a/xds/internal/balancer/edsbalancer/util_test.go +++ b/xds/internal/balancer/edsbalancer/util_test.go @@ -21,8 +21,8 @@ package edsbalancer import ( "testing" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" ) func init() { diff --git a/xds/internal/balancer/loadstore/load_store_wrapper.go b/xds/internal/balancer/loadstore/load_store_wrapper.go index 88fa344118cc..8ce958d71ca8 100644 --- a/xds/internal/balancer/loadstore/load_store_wrapper.go +++ b/xds/internal/balancer/loadstore/load_store_wrapper.go @@ -22,7 +22,7 @@ package loadstore import ( "sync" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) // NewWrapper creates a Wrapper. diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go index 5b044f480345..916bd0939b88 100644 --- a/xds/internal/balancer/lrs/balancer.go +++ b/xds/internal/balancer/lrs/balancer.go @@ -28,8 +28,8 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/loadstore" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) func init() { diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index c132e912f92a..1c6db707c6a3 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -55,9 +55,9 @@ import ( tpb "github.com/envoyproxy/go-control-plane/envoy/type/v3" testpb "google.golang.org/grpc/test/grpc_testing" - _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/client/v3" // Register the v3 xDS API client. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register the v3 xDS API client. ) type s struct { diff --git a/xds/internal/resolver/matcher.go b/xds/internal/resolver/matcher.go index e329944e1db1..42a843deecaa 100644 --- a/xds/internal/resolver/matcher.go +++ b/xds/internal/resolver/matcher.go @@ -27,7 +27,7 @@ import ( iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/metadata" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index 49ee8970d7d8..31941fdc022c 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -31,9 +31,9 @@ import ( "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/balancer/clustermanager" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/xdsclient" ) const ( diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index f29fb32832e0..71c2bbf70842 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) // serviceUpdate contains information received from the LDS/RDS responses which diff --git a/xds/internal/resolver/watch_service_test.go b/xds/internal/resolver/watch_service_test.go index 421e5345a9d2..31c45bf3977f 100644 --- a/xds/internal/resolver/watch_service_test.go +++ b/xds/internal/resolver/watch_service_test.go @@ -29,8 +29,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/protobuf/proto" ) diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index 41e3899c285e..1519158a96c7 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -28,10 +28,10 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" iresolver "google.golang.org/grpc/internal/resolver" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) const xdsScheme = "xds" diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index eca561f369c5..c7bf7c0a1092 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -44,13 +44,12 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config "google.golang.org/grpc/xds/internal/balancer/clustermanager" - "google.golang.org/grpc/xds/internal/client" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const ( @@ -272,7 +271,7 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -331,7 +330,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantClusters map[string]bool }{ { - routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, wantJSON: `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ @@ -343,7 +342,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantClusters: map[string]bool{"test-cluster-1": true}, }, { - routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ + routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ "cluster_1": {Weight: 75}, "cluster_2": {Weight: 25}, }}}, @@ -367,7 +366,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantClusters: map[string]bool{"cluster_1": true, "cluster_2": true}, }, { - routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ + routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ "cluster_1": {Weight: 75}, "cluster_2": {Weight: 25}, }}}, @@ -464,7 +463,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, }, }, }, nil) @@ -524,7 +523,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, }, }, }, nil) @@ -635,7 +634,7 @@ func (s) TestXDSResolverWRR(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ "A": {Weight: 5}, "B": {Weight: 10}, }}}, @@ -696,7 +695,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{ + Routes: []*xdsclient.Route{{ Prefix: newStringP("/foo"), WeightedClusters: map[string]xdsclient.WeightedCluster{"A": {Weight: 1}}, MaxStreamDuration: newDurationP(5 * time.Second), @@ -796,7 +795,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, }, }, }, nil) @@ -846,7 +845,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, }, }, }, nil) @@ -856,7 +855,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, }, }, }, nil) @@ -895,7 +894,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, }, }, }, nil) @@ -954,7 +953,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -1229,7 +1228,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{ + Routes: []*xdsclient.Route{{ Prefix: newStringP("1"), WeightedClusters: map[string]xdsclient.WeightedCluster{ "A": {Weight: 1}, "B": {Weight: 1}, diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index a02d75b21445..0618a6cd4a4c 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -27,7 +27,7 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" xdsinternal "google.golang.org/grpc/internal/credentials/xds" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) // connWrapper is a thin wrapper around a net.Conn returned by Accept(). It diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 17f31f28f576..7e5f7071a808 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -31,8 +31,8 @@ import ( internalbackoff "google.golang.org/grpc/internal/backoff" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) var ( diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index b22f647a93ca..848793ccdb7f 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -34,8 +34,8 @@ import ( wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" ) const ( diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index f85f0ecbf3f3..ce0996cb8e98 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -23,9 +23,9 @@ import ( "context" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) // Client is a fake implementation of an xds client. It exposes a bunch of diff --git a/xds/internal/client/attributes.go b/xds/internal/xdsclient/attributes.go similarity index 98% rename from xds/internal/client/attributes.go rename to xds/internal/xdsclient/attributes.go index 50b988245291..99060177e1e3 100644 --- a/xds/internal/client/attributes.go +++ b/xds/internal/xdsclient/attributes.go @@ -15,7 +15,7 @@ * */ -package client +package xdsclient import "google.golang.org/grpc/resolver" diff --git a/xds/internal/client/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go similarity index 100% rename from xds/internal/client/bootstrap/bootstrap.go rename to xds/internal/xdsclient/bootstrap/bootstrap.go diff --git a/xds/internal/client/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go similarity index 100% rename from xds/internal/client/bootstrap/bootstrap_test.go rename to xds/internal/xdsclient/bootstrap/bootstrap_test.go diff --git a/xds/internal/client/bootstrap/logging.go b/xds/internal/xdsclient/bootstrap/logging.go similarity index 100% rename from xds/internal/client/bootstrap/logging.go rename to xds/internal/xdsclient/bootstrap/logging.go diff --git a/xds/internal/client/callback.go b/xds/internal/xdsclient/callback.go similarity index 99% rename from xds/internal/client/callback.go rename to xds/internal/xdsclient/callback.go index ac6f0151a5f9..64b7d6794c40 100644 --- a/xds/internal/client/callback.go +++ b/xds/internal/xdsclient/callback.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import "google.golang.org/grpc/internal/pretty" diff --git a/xds/internal/client/cds_test.go b/xds/internal/xdsclient/cds_test.go similarity index 99% rename from xds/internal/client/cds_test.go rename to xds/internal/xdsclient/cds_test.go index 82b92372afc3..83b9071ad134 100644 --- a/xds/internal/client/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "regexp" diff --git a/xds/internal/client/client.go b/xds/internal/xdsclient/client.go similarity index 98% rename from xds/internal/client/client.go rename to xds/internal/xdsclient/client.go index 4ed1c3a5833c..ac832f205d59 100644 --- a/xds/internal/client/client.go +++ b/xds/internal/xdsclient/client.go @@ -16,9 +16,9 @@ * */ -// Package client implements a full fledged gRPC client for the xDS API used by -// the xds resolver and balancer implementations. -package client +// Package xdsclient implements a full fledged gRPC client for the xDS API used +// by the xds resolver and balancer implementations. +package xdsclient import ( "context" @@ -34,8 +34,8 @@ import ( "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/xds/matcher" - "google.golang.org/grpc/xds/internal/client/load" "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc" "google.golang.org/grpc/internal/backoff" @@ -44,8 +44,8 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) var ( diff --git a/xds/internal/client/client_test.go b/xds/internal/xdsclient/client_test.go similarity index 99% rename from xds/internal/client/client_test.go rename to xds/internal/xdsclient/client_test.go index d57bc20e2c2c..c1d4b38e576a 100644 --- a/xds/internal/client/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "context" @@ -34,9 +34,9 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal/client/bootstrap" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/testing/protocmp" ) diff --git a/xds/internal/client/dump.go b/xds/internal/xdsclient/dump.go similarity index 99% rename from xds/internal/client/dump.go rename to xds/internal/xdsclient/dump.go index 3fd18f6103b3..db9b474f370d 100644 --- a/xds/internal/client/dump.go +++ b/xds/internal/xdsclient/dump.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import anypb "github.com/golang/protobuf/ptypes/any" diff --git a/xds/internal/client/eds_test.go b/xds/internal/xdsclient/eds_test.go similarity index 99% rename from xds/internal/client/eds_test.go rename to xds/internal/xdsclient/eds_test.go index 467f25269cdf..ebd26a40f957 100644 --- a/xds/internal/client/eds_test.go +++ b/xds/internal/xdsclient/eds_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "fmt" diff --git a/xds/internal/client/errors.go b/xds/internal/xdsclient/errors.go similarity index 98% rename from xds/internal/client/errors.go rename to xds/internal/xdsclient/errors.go index 34ae2738db00..4d6cdaaf9b4a 100644 --- a/xds/internal/client/errors.go +++ b/xds/internal/xdsclient/errors.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import "fmt" diff --git a/xds/internal/client/filter_chain.go b/xds/internal/xdsclient/filter_chain.go similarity index 99% rename from xds/internal/client/filter_chain.go rename to xds/internal/xdsclient/filter_chain.go index 66d26d03b634..7303ebd3ce70 100644 --- a/xds/internal/client/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "errors" diff --git a/xds/internal/client/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go similarity index 99% rename from xds/internal/client/filter_chain_test.go rename to xds/internal/xdsclient/filter_chain_test.go index c68e22286763..25390b7f9248 100644 --- a/xds/internal/client/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "fmt" diff --git a/xds/internal/client/lds_test.go b/xds/internal/xdsclient/lds_test.go similarity index 99% rename from xds/internal/client/lds_test.go rename to xds/internal/xdsclient/lds_test.go index ad9af4c885a2..1667698bd578 100644 --- a/xds/internal/client/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "fmt" diff --git a/xds/internal/client/load/reporter.go b/xds/internal/xdsclient/load/reporter.go similarity index 100% rename from xds/internal/client/load/reporter.go rename to xds/internal/xdsclient/load/reporter.go diff --git a/xds/internal/client/load/store.go b/xds/internal/xdsclient/load/store.go similarity index 100% rename from xds/internal/client/load/store.go rename to xds/internal/xdsclient/load/store.go diff --git a/xds/internal/client/load/store_test.go b/xds/internal/xdsclient/load/store_test.go similarity index 100% rename from xds/internal/client/load/store_test.go rename to xds/internal/xdsclient/load/store_test.go diff --git a/xds/internal/client/loadreport.go b/xds/internal/xdsclient/loadreport.go similarity index 98% rename from xds/internal/client/loadreport.go rename to xds/internal/xdsclient/loadreport.go index be42a6e0c383..32a71dada7fb 100644 --- a/xds/internal/client/loadreport.go +++ b/xds/internal/xdsclient/loadreport.go @@ -15,13 +15,13 @@ * limitations under the License. */ -package client +package xdsclient import ( "context" "google.golang.org/grpc" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) // ReportLoad starts an load reporting stream to the given server. If the server diff --git a/xds/internal/client/logging.go b/xds/internal/xdsclient/logging.go similarity index 98% rename from xds/internal/client/logging.go rename to xds/internal/xdsclient/logging.go index bff3fb1d3df3..e28ea0d04103 100644 --- a/xds/internal/client/logging.go +++ b/xds/internal/xdsclient/logging.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "fmt" diff --git a/xds/internal/client/rds_test.go b/xds/internal/xdsclient/rds_test.go similarity index 99% rename from xds/internal/client/rds_test.go rename to xds/internal/xdsclient/rds_test.go index 33bcc2ad13f8..660a2f29d21a 100644 --- a/xds/internal/client/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "fmt" diff --git a/xds/internal/client/requests_counter.go b/xds/internal/xdsclient/requests_counter.go similarity index 99% rename from xds/internal/client/requests_counter.go rename to xds/internal/xdsclient/requests_counter.go index f033e1920991..dacbc8fadb60 100644 --- a/xds/internal/client/requests_counter.go +++ b/xds/internal/xdsclient/requests_counter.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "fmt" diff --git a/xds/internal/client/requests_counter_test.go b/xds/internal/xdsclient/requests_counter_test.go similarity index 99% rename from xds/internal/client/requests_counter_test.go rename to xds/internal/xdsclient/requests_counter_test.go index 30892fc747a0..f444e8f163e6 100644 --- a/xds/internal/client/requests_counter_test.go +++ b/xds/internal/xdsclient/requests_counter_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "sync" diff --git a/xds/internal/client/singleton.go b/xds/internal/xdsclient/singleton.go similarity index 98% rename from xds/internal/client/singleton.go rename to xds/internal/xdsclient/singleton.go index 41dd16e26afe..8d0e10f2c31a 100644 --- a/xds/internal/client/singleton.go +++ b/xds/internal/xdsclient/singleton.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "bytes" @@ -25,7 +25,7 @@ import ( "sync" "time" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const defaultWatchExpiryTimeout = 15 * time.Second diff --git a/xds/internal/client/tests/README.md b/xds/internal/xdsclient/tests/README.md similarity index 100% rename from xds/internal/client/tests/README.md rename to xds/internal/xdsclient/tests/README.md diff --git a/xds/internal/client/tests/client_test.go b/xds/internal/xdsclient/tests/client_test.go similarity index 93% rename from xds/internal/client/tests/client_test.go rename to xds/internal/xdsclient/tests/client_test.go index 755f0e05ea45..26c64bacd034 100644 --- a/xds/internal/client/tests/client_test.go +++ b/xds/internal/xdsclient/tests/client_test.go @@ -27,11 +27,11 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" - _ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 API client. "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register the v2 API client. ) type s struct { diff --git a/xds/internal/client/tests/dump_test.go b/xds/internal/xdsclient/tests/dump_test.go similarity index 99% rename from xds/internal/client/tests/dump_test.go rename to xds/internal/xdsclient/tests/dump_test.go index 815850973e3d..541f5901c121 100644 --- a/xds/internal/client/tests/dump_test.go +++ b/xds/internal/xdsclient/tests/dump_test.go @@ -39,9 +39,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const defaultTestWatchExpiryTimeout = 500 * time.Millisecond diff --git a/xds/internal/client/tests/loadreport_test.go b/xds/internal/xdsclient/tests/loadreport_test.go similarity index 94% rename from xds/internal/client/tests/loadreport_test.go rename to xds/internal/xdsclient/tests/loadreport_test.go index b1ec37294771..1815f8a0ab43 100644 --- a/xds/internal/client/tests/loadreport_test.go +++ b/xds/internal/xdsclient/tests/loadreport_test.go @@ -34,13 +34,13 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/testing/protocmp" - _ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 xDS API client. + _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register the v2 xDS API client. ) const ( @@ -56,7 +56,7 @@ func (s) TestLRSClient(t *testing.T) { } defer sCleanup() - xdsC, err := client.NewWithConfigForTesting(&bootstrap.Config{ + xdsC, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ BalancerName: fs.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), NodeProto: &v2corepb.Node{}, diff --git a/xds/internal/client/transport_helper.go b/xds/internal/xdsclient/transport_helper.go similarity index 99% rename from xds/internal/client/transport_helper.go rename to xds/internal/xdsclient/transport_helper.go index 671e5b3220f1..1e3caa606d1d 100644 --- a/xds/internal/client/transport_helper.go +++ b/xds/internal/xdsclient/transport_helper.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "context" @@ -24,7 +24,7 @@ import ( "time" "github.com/golang/protobuf/proto" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc" "google.golang.org/grpc/internal/buffer" diff --git a/xds/internal/client/v2/ack_test.go b/xds/internal/xdsclient/v2/ack_test.go similarity index 99% rename from xds/internal/client/v2/ack_test.go rename to xds/internal/xdsclient/v2/ack_test.go index 53c8cef189d5..1c41cd9d1ad8 100644 --- a/xds/internal/client/v2/ack_test.go +++ b/xds/internal/xdsclient/v2/ack_test.go @@ -33,9 +33,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" ) const ( diff --git a/xds/internal/client/v2/cds_test.go b/xds/internal/xdsclient/v2/cds_test.go similarity index 99% rename from xds/internal/client/v2/cds_test.go rename to xds/internal/xdsclient/v2/cds_test.go index ba7f627b25ff..da4738460930 100644 --- a/xds/internal/client/v2/cds_test.go +++ b/xds/internal/xdsclient/v2/cds_test.go @@ -28,8 +28,8 @@ import ( corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" anypb "github.com/golang/protobuf/ptypes/any" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" ) const ( diff --git a/xds/internal/client/v2/client.go b/xds/internal/xdsclient/v2/client.go similarity index 99% rename from xds/internal/client/v2/client.go rename to xds/internal/xdsclient/v2/client.go index b91482d13cf9..311621f0405c 100644 --- a/xds/internal/client/v2/client.go +++ b/xds/internal/xdsclient/v2/client.go @@ -28,8 +28,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" diff --git a/xds/internal/client/v2/client_test.go b/xds/internal/xdsclient/v2/client_test.go similarity index 99% rename from xds/internal/client/v2/client_test.go rename to xds/internal/xdsclient/v2/client_test.go index 371375f3ee5c..efa228a8c6e3 100644 --- a/xds/internal/client/v2/client_test.go +++ b/xds/internal/xdsclient/v2/client_test.go @@ -37,9 +37,9 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/protobuf/testing/protocmp" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" diff --git a/xds/internal/client/v2/eds_test.go b/xds/internal/xdsclient/v2/eds_test.go similarity index 99% rename from xds/internal/client/v2/eds_test.go rename to xds/internal/xdsclient/v2/eds_test.go index 08e75d373017..7338ec80a5e5 100644 --- a/xds/internal/client/v2/eds_test.go +++ b/xds/internal/xdsclient/v2/eds_test.go @@ -28,9 +28,9 @@ import ( anypb "github.com/golang/protobuf/ptypes/any" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" - xdsclient "google.golang.org/grpc/xds/internal/client" xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" ) var ( diff --git a/xds/internal/client/v2/lds_test.go b/xds/internal/xdsclient/v2/lds_test.go similarity index 99% rename from xds/internal/client/v2/lds_test.go rename to xds/internal/xdsclient/v2/lds_test.go index 22fa35d5e51e..e6f1d8f1cf02 100644 --- a/xds/internal/client/v2/lds_test.go +++ b/xds/internal/xdsclient/v2/lds_test.go @@ -26,7 +26,7 @@ import ( v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) // TestLDSHandleResponse starts a fake xDS server, makes a ClientConn to it, diff --git a/xds/internal/client/v2/loadreport.go b/xds/internal/xdsclient/v2/loadreport.go similarity index 98% rename from xds/internal/client/v2/loadreport.go rename to xds/internal/xdsclient/v2/loadreport.go index 17ea8c8d4c43..77db5eb9d8d6 100644 --- a/xds/internal/client/v2/loadreport.go +++ b/xds/internal/xdsclient/v2/loadreport.go @@ -27,7 +27,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v2endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" diff --git a/xds/internal/client/v2/rds_test.go b/xds/internal/xdsclient/v2/rds_test.go similarity index 99% rename from xds/internal/client/v2/rds_test.go rename to xds/internal/xdsclient/v2/rds_test.go index 12495428bf95..745308c4e5b6 100644 --- a/xds/internal/client/v2/rds_test.go +++ b/xds/internal/xdsclient/v2/rds_test.go @@ -27,8 +27,8 @@ import ( xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils/fakeserver" + "google.golang.org/grpc/xds/internal/xdsclient" ) // doLDS makes a LDS watch, and waits for the response and ack to finish. diff --git a/xds/internal/client/v3/client.go b/xds/internal/xdsclient/v3/client.go similarity index 99% rename from xds/internal/client/v3/client.go rename to xds/internal/xdsclient/v3/client.go index 200da2ac7d73..be8ff7720d89 100644 --- a/xds/internal/client/v3/client.go +++ b/xds/internal/xdsclient/v3/client.go @@ -29,8 +29,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" diff --git a/xds/internal/client/v3/loadreport.go b/xds/internal/xdsclient/v3/loadreport.go similarity index 98% rename from xds/internal/client/v3/loadreport.go rename to xds/internal/xdsclient/v3/loadreport.go index 1de0ccf57503..147751baab03 100644 --- a/xds/internal/client/v3/loadreport.go +++ b/xds/internal/xdsclient/v3/loadreport.go @@ -27,7 +27,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" diff --git a/xds/internal/client/watchers.go b/xds/internal/xdsclient/watchers.go similarity index 99% rename from xds/internal/client/watchers.go rename to xds/internal/xdsclient/watchers.go index 446f5cca9973..249db4de91e4 100644 --- a/xds/internal/client/watchers.go +++ b/xds/internal/xdsclient/watchers.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "fmt" diff --git a/xds/internal/client/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go similarity index 99% rename from xds/internal/client/watchers_cluster_test.go rename to xds/internal/xdsclient/watchers_cluster_test.go index c9837cd51978..8c33486fa017 100644 --- a/xds/internal/client/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "context" diff --git a/xds/internal/client/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go similarity index 99% rename from xds/internal/client/watchers_endpoints_test.go rename to xds/internal/xdsclient/watchers_endpoints_test.go index bff4544d2679..70f06514d9f6 100644 --- a/xds/internal/client/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "context" diff --git a/xds/internal/client/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go similarity index 99% rename from xds/internal/client/watchers_listener_test.go rename to xds/internal/xdsclient/watchers_listener_test.go index fdd4ebd163fa..79ef997a72df 100644 --- a/xds/internal/client/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "context" diff --git a/xds/internal/client/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go similarity index 99% rename from xds/internal/client/watchers_route_test.go rename to xds/internal/xdsclient/watchers_route_test.go index 41640b85b574..08b035a0b0a3 100644 --- a/xds/internal/client/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -18,7 +18,7 @@ * */ -package client +package xdsclient import ( "context" diff --git a/xds/internal/client/xds.go b/xds/internal/xdsclient/xds.go similarity index 99% rename from xds/internal/client/xds.go rename to xds/internal/xdsclient/xds.go index b95224113237..44fd883e3f36 100644 --- a/xds/internal/client/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "errors" diff --git a/xds/server.go b/xds/server.go index b83a073ac1c5..86a40fcc8892 100644 --- a/xds/server.go +++ b/xds/server.go @@ -33,9 +33,9 @@ import ( "google.golang.org/grpc/internal/buffer" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/server" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const serverPrefix = "[xds-server %p] " diff --git a/xds/server_test.go b/xds/server_test.go index e16ac36b01f2..7e6aa7917a26 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -39,10 +39,10 @@ import ( "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const ( diff --git a/xds/xds.go b/xds/xds.go index bbd3fe543212..864d2e6a2e3a 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -43,10 +43,10 @@ import ( _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 xDS API client. - _ "google.golang.org/grpc/xds/internal/client/v3" // Register the v3 xDS API client. _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. xdsresolver "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register the v2 xDS API client. + _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register the v3 xDS API client. ) func init() { From 7beddeea913bd74a9d3b4e7ec49f0265a0ac7b88 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 4 Jun 2021 08:58:26 -0700 Subject: [PATCH 119/998] cleanup: remove "Interface" as suffix of (almost all) interface names (#4512) --- internal/xds/matcher/matcher_header.go | 8 ++--- internal/xds/matcher/matcher_header_test.go | 2 +- internal/xds/rbac/matchers.go | 4 +-- xds/csds/csds.go | 8 ++--- xds/csds/csds_test.go | 8 ++--- xds/googledirectpath/googlec2p.go | 6 ++-- xds/googledirectpath/googlec2p_test.go | 2 +- .../balancer/cdsbalancer/cdsbalancer.go | 8 ++--- .../cdsbalancer/cdsbalancer_security_test.go | 2 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 2 +- .../balancer/cdsbalancer/cluster_handler.go | 4 +-- .../balancer/clusterimpl/balancer_test.go | 10 +++--- .../balancer/clusterimpl/clusterimpl.go | 8 ++--- xds/internal/balancer/edsbalancer/eds.go | 8 ++--- .../balancer/edsbalancer/eds_impl_test.go | 2 +- xds/internal/balancer/edsbalancer/eds_test.go | 2 +- .../balancer/edsbalancer/xds_lrs_test.go | 2 +- xds/internal/balancer/lrs/balancer.go | 10 +++--- xds/internal/balancer/lrs/balancer_test.go | 2 +- xds/internal/resolver/matcher.go | 20 +++++------ xds/internal/resolver/matcher_path.go | 2 +- xds/internal/resolver/matcher_test.go | 6 ++-- xds/internal/resolver/watch_service.go | 4 +-- xds/internal/resolver/xds_resolver.go | 12 +++---- xds/internal/resolver/xds_resolver_test.go | 34 +++++++++---------- xds/internal/server/conn_wrapper.go | 2 +- xds/internal/server/listener_wrapper.go | 18 +++++----- xds/internal/server/listener_wrapper_test.go | 2 +- xds/server.go | 18 +++++----- xds/server_test.go | 14 ++++---- 30 files changed, 115 insertions(+), 115 deletions(-) diff --git a/internal/xds/matcher/matcher_header.go b/internal/xds/matcher/matcher_header.go index 9ae6ffc2288b..35a22adadcf2 100644 --- a/internal/xds/matcher/matcher_header.go +++ b/internal/xds/matcher/matcher_header.go @@ -27,10 +27,10 @@ import ( "google.golang.org/grpc/metadata" ) -// HeaderMatcherInterface is an interface for header matchers. These are +// HeaderMatcher is an interface for header matchers. These are // documented in (EnvoyProxy link here?). These matchers will match on different // aspects of HTTP header name/value pairs. -type HeaderMatcherInterface interface { +type HeaderMatcher interface { Match(metadata.MD) bool String() string } @@ -234,11 +234,11 @@ func (hcm *HeaderContainsMatcher) String() string { // InvertMatcher inverts the match result of the underlying header matcher. type InvertMatcher struct { - m HeaderMatcherInterface + m HeaderMatcher } // NewInvertMatcher returns a new InvertMatcher. -func NewInvertMatcher(m HeaderMatcherInterface) *InvertMatcher { +func NewInvertMatcher(m HeaderMatcher) *InvertMatcher { return &InvertMatcher{m: m} } diff --git a/internal/xds/matcher/matcher_header_test.go b/internal/xds/matcher/matcher_header_test.go index 911e7bcfaca1..902b8112e889 100644 --- a/internal/xds/matcher/matcher_header_test.go +++ b/internal/xds/matcher/matcher_header_test.go @@ -309,7 +309,7 @@ func TestHeaderSuffixMatcherMatch(t *testing.T) { func TestInvertMatcherMatch(t *testing.T) { tests := []struct { name string - m HeaderMatcherInterface + m HeaderMatcher md metadata.MD }{ { diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go index 12f6e6cd04b4..47be35c1d0d7 100644 --- a/internal/xds/rbac/matchers.go +++ b/internal/xds/rbac/matchers.go @@ -251,11 +251,11 @@ func (nm *notMatcher) match(data *RPCData) bool { // headerMatcher is a matcher that matches on incoming HTTP Headers present // in the incoming RPC. headerMatcher implements the matcher interface. type headerMatcher struct { - matcher internalmatcher.HeaderMatcherInterface + matcher internalmatcher.HeaderMatcher } func newHeaderMatcher(headerMatcherConfig *v3route_componentspb.HeaderMatcher) (*headerMatcher, error) { - var m internalmatcher.HeaderMatcherInterface + var m internalmatcher.HeaderMatcher switch headerMatcherConfig.HeaderMatchSpecifier.(type) { case *v3route_componentspb.HeaderMatcher_ExactMatch: m = internalmatcher.NewHeaderExactMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetExactMatch()) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index d6a20a2f15c0..d32bebac81bc 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -45,9 +45,9 @@ import ( _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register v3 xds_client. ) -// xdsClientInterface contains methods from xdsClient.Client which are used by +// xdsClient contains methods from xdsClient.Client which are used by // the server. This is useful for overriding in unit tests. -type xdsClientInterface interface { +type xdsClient interface { DumpLDS() (string, map[string]xdsclient.UpdateWithMD) DumpRDS() (string, map[string]xdsclient.UpdateWithMD) DumpCDS() (string, map[string]xdsclient.UpdateWithMD) @@ -58,7 +58,7 @@ type xdsClientInterface interface { var ( logger = grpclog.Component("xds") - newXDSClient = func() xdsClientInterface { + newXDSClient = func() xdsClient { c, err := xdsclient.New() if err != nil { // If err is not nil, c is a typed nil (of type *xdsclient.Client). @@ -76,7 +76,7 @@ var ( type ClientStatusDiscoveryServer struct { // xdsClient will always be the same in practice. But we keep a copy in each // server instance for testing. - xdsClient xdsClientInterface + xdsClient xdsClient } // NewClientStatusDiscoveryServer returns an implementation of the CSDS server that can be diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 202a86db1851..7f0e90bebc1e 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -59,7 +59,7 @@ const ( defaultTestTimeout = 10 * time.Second ) -type xdsClientInterfaceWithWatch interface { +type xdsClientWithWatch interface { WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() WatchRouteConfig(string, func(xdsclient.RouteConfigUpdate, error)) func() WatchCluster(string, func(xdsclient.ClusterUpdate, error)) func() @@ -250,7 +250,7 @@ func TestCSDS(t *testing.T) { } } -func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServer, string, v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { +func commonSetup(t *testing.T) (xdsClientWithWatch, *e2e.ManagementServer, string, v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { t.Helper() // Spin up a xDS management server on a local port. @@ -275,7 +275,7 @@ func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServ t.Fatalf("failed to create xds client: %v", err) } oldNewXDSClient := newXDSClient - newXDSClient = func() xdsClientInterface { return xdsC } + newXDSClient = func() xdsClient { return xdsC } // Initialize an gRPC server and register CSDS on it. server := grpc.NewServer() @@ -635,7 +635,7 @@ func protoToJSON(p proto.Message) string { func TestCSDSNoXDSClient(t *testing.T) { oldNewXDSClient := newXDSClient - newXDSClient = func() xdsClientInterface { return nil } + newXDSClient = func() xdsClient { return nil } defer func() { newXDSClient = oldNewXDSClient }() // Initialize an gRPC server and register CSDS on it. diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index ccf9f152ca7b..b514f03bfbfa 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -61,7 +61,7 @@ const ( dnsName, xdsName = "dns", "xds" ) -type xdsClientInterface interface { +type xdsClient interface { Close() } @@ -69,7 +69,7 @@ type xdsClientInterface interface { var ( onGCE = googlecloud.OnGCE - newClientWithConfig = func(config *bootstrap.Config) (xdsClientInterface, error) { + newClientWithConfig = func(config *bootstrap.Config) (xdsClient, error) { return xdsclient.NewWithConfig(config) } @@ -138,7 +138,7 @@ func (c2pResolverBuilder) Scheme() string { type c2pResolver struct { resolver.Resolver - client xdsClientInterface + client xdsClient } func (r *c2pResolver) Close() { diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index ba6167e5b6c1..5b8085ef34c3 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -177,7 +177,7 @@ func TestBuildXDS(t *testing.T) { configCh := make(chan *bootstrap.Config, 1) oldNewClient := newClientWithConfig - newClientWithConfig = func(config *bootstrap.Config) (xdsClientInterface, error) { + newClientWithConfig = func(config *bootstrap.Config) (xdsClient, error) { configCh <- config return tXDSClient, nil } diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 401d990885c1..d8c4f5eaac26 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -59,7 +59,7 @@ var ( // not deal with subConns. return builder.Build(cc, opts), nil } - newXDSClient func() (xdsClientInterface, error) + newXDSClient func() (xdsClient, error) buildProvider = buildProviderFunc ) @@ -138,9 +138,9 @@ func (cdsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, return &cfg, nil } -// xdsClientInterface contains methods from xdsClient.Client which are used by +// xdsClient contains methods from xdsClient.Client which are used by // the cdsBalancer. This will be faked out in unittests. -type xdsClientInterface interface { +type xdsClient interface { WatchCluster(string, func(xdsclient.ClusterUpdate, error)) func() BootstrapConfig() *bootstrap.Config Close() @@ -185,7 +185,7 @@ type cdsBalancer struct { ccw *ccWrapper // ClientConn interface passed to child LB. bOpts balancer.BuildOptions // BuildOptions passed to child LB. updateCh *buffer.Unbounded // Channel for gRPC and xdsClient updates. - xdsClient xdsClientInterface // xDS client to watch Cluster resource. + xdsClient xdsClient // xDS client to watch Cluster resource. cancelWatch func() // Cluster watch cancel func. edsLB balancer.Balancer // EDS child policy. clusterToWatch string diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 3cf98dcc3f54..9964b9de925c 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -136,7 +136,7 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } builder := balancer.Get(cdsName) if builder == nil { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index b0ee84d1fcc7..5c5161807be3 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -214,7 +214,7 @@ func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *x xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } builder := balancer.Get(cdsName) if builder == nil { diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index dbdcdedf9501..c38d1a6c31a6 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -40,7 +40,7 @@ type clusterHandler struct { // CDS Balancer cares about is the most recent update. updateChannel chan clusterHandlerUpdate - xdsClient xdsClientInterface + xdsClient xdsClient } func (ch *clusterHandler) updateRootCluster(rootClusterName string) { @@ -112,7 +112,7 @@ type clusterNode struct { // CreateClusterNode creates a cluster node from a given clusterName. This will // also start the watch for that cluster. -func createClusterNode(clusterName string, xdsClient xdsClientInterface, topLevelHandler *clusterHandler) *clusterNode { +func createClusterNode(clusterName string, xdsClient xdsClient, topLevelHandler *clusterHandler) *clusterNode { c := &clusterNode{ clusterHandler: topLevelHandler, } diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 1eea0babdeaa..404dfb22d005 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -75,7 +75,7 @@ func TestDropByCategory(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() builder := balancer.Get(Name) @@ -233,7 +233,7 @@ func TestDropCircuitBreaking(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() builder := balancer.Get(Name) @@ -345,7 +345,7 @@ func TestPickerUpdateAfterClose(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() builder := balancer.Get(Name) @@ -390,7 +390,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() builder := balancer.Get(Name) @@ -481,7 +481,7 @@ func TestReResolution(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() builder := balancer.Get(Name) diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 7671fcff17df..ae32caa5c759 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -52,7 +52,7 @@ func init() { balancer.Register(clusterImplBB{}) } -var newXDSClient func() (xdsClientInterface, error) +var newXDSClient func() (xdsClient, error) type clusterImplBB struct{} @@ -91,9 +91,9 @@ func (clusterImplBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancing return parseConfig(c) } -// xdsClientInterface contains only the xds_client methods needed by LRS +// xdsClient contains only the xds_client methods needed by LRS // balancer. It's defined so we can override xdsclient in tests. -type xdsClientInterface interface { +type xdsClient interface { ReportLoad(server string) (*load.Store, func()) Close() } @@ -115,7 +115,7 @@ type clusterImplBalancer struct { bOpts balancer.BuildOptions logger *grpclog.PrefixLogger - xdsC xdsClientInterface + xdsC xdsClient config *LBConfig childLB balancer.Balancer diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index 3a38bdf1abab..594d0b05edce 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -41,9 +41,9 @@ import ( const edsName = "eds_experimental" -// xdsClientInterface contains only the xds_client methods needed by EDS +// xdsClient contains only the xds_client methods needed by EDS // balancer. It's defined so we can override xdsclient.New function in tests. -type xdsClientInterface interface { +type xdsClient interface { WatchEndpoints(clusterName string, edsCb func(xdsclient.EndpointsUpdate, error)) (cancel func()) ReportLoad(server string) (loadStore *load.Store, cancel func()) Close() @@ -53,7 +53,7 @@ var ( newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions, enqueueState func(priorityType, balancer.State), lw load.PerClusterReporter, logger *grpclog.PrefixLogger) edsBalancerImplInterface { return newEDSBalancerImpl(cc, opts, enqueueState, lw, logger) } - newXDSClient func() (xdsClientInterface, error) + newXDSClient func() (xdsClient, error) ) func init() { @@ -145,7 +145,7 @@ type edsBalancer struct { xdsClientUpdate chan *edsUpdate childPolicyUpdate *buffer.Unbounded - xdsClient xdsClientInterface + xdsClient xdsClient loadWrapper *loadstore.Wrapper config *EDSConfig // may change when passed a different service config edsImpl edsBalancerImplInterface diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/edsbalancer/eds_impl_test.go index ec60dc1b1e60..2c1498dd3f78 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_test.go @@ -816,7 +816,7 @@ func (s) TestEDS_LoadReport(t *testing.T) { env.CircuitBreakingSupport = true defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() - // We create an xdsClientWrapper with a dummy xdsClientInterface which only + // We create an xdsClientWrapper with a dummy xdsClient which only // implements the LoadStore() method to return the underlying load.Store to // be used. loadStore := load.NewStore() diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index b7ec9e9c4526..0fda80373fbc 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -260,7 +260,7 @@ func waitForNewEDSLB(ctx context.Context, ch *testutils.Channel) (*fakeEDSBalanc func setup(edsLBCh *testutils.Channel) (*fakeclient.Client, func()) { xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } origNewEDSBalancer := newEDSBalancer newEDSBalancer = func(cc balancer.ClientConn, _ balancer.BuildOptions, _ func(priorityType, balancer.State), _ load.PerClusterReporter, _ *grpclog.PrefixLogger) edsBalancerImplInterface { diff --git a/xds/internal/balancer/edsbalancer/xds_lrs_test.go b/xds/internal/balancer/edsbalancer/xds_lrs_test.go index 8b7aab657667..d5b40dd98d32 100644 --- a/xds/internal/balancer/edsbalancer/xds_lrs_test.go +++ b/xds/internal/balancer/edsbalancer/xds_lrs_test.go @@ -34,7 +34,7 @@ import ( func (s) TestXDSLoadReporting(t *testing.T) { xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() builder := balancer.Get(edsName) diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go index 916bd0939b88..7e88c0f49712 100644 --- a/xds/internal/balancer/lrs/balancer.go +++ b/xds/internal/balancer/lrs/balancer.go @@ -36,7 +36,7 @@ func init() { balancer.Register(&lrsBB{}) } -var newXDSClient func() (xdsClientInterface, error) +var newXDSClient func() (xdsClient, error) // Name is the name of the LRS balancer. const Name = "lrs_experimental" @@ -169,15 +169,15 @@ func (ccw *ccWrapper) UpdateState(s balancer.State) { ccw.ClientConn.UpdateState(s) } -// xdsClientInterface contains only the xds_client methods needed by LRS +// xdsClient contains only the xds_client methods needed by LRS // balancer. It's defined so we can override xdsclient in tests. -type xdsClientInterface interface { +type xdsClient interface { ReportLoad(server string) (*load.Store, func()) Close() } type xdsClientWrapper struct { - c xdsClientInterface + c xdsClient cancelLoadReport func() clusterName string edsServiceName string @@ -187,7 +187,7 @@ type xdsClientWrapper struct { loadWrapper *loadstore.Wrapper } -func newXDSClientWrapper(c xdsClientInterface) *xdsClientWrapper { +func newXDSClientWrapper(c xdsClient) *xdsClientWrapper { return &xdsClientWrapper{ c: c, loadWrapper: loadstore.NewWrapper(), diff --git a/xds/internal/balancer/lrs/balancer_test.go b/xds/internal/balancer/lrs/balancer_test.go index f91937385a92..9ffa2894dad8 100644 --- a/xds/internal/balancer/lrs/balancer_test.go +++ b/xds/internal/balancer/lrs/balancer_test.go @@ -56,7 +56,7 @@ var ( func TestLoadReporting(t *testing.T) { xdsC := fakeclient.NewClient() oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } + newXDSClient = func() (xdsClient, error) { return xdsC, nil } defer func() { newXDSClient = oldNewXDSClient }() builder := balancer.Get(Name) diff --git a/xds/internal/resolver/matcher.go b/xds/internal/resolver/matcher.go index 42a843deecaa..6e09d93afa78 100644 --- a/xds/internal/resolver/matcher.go +++ b/xds/internal/resolver/matcher.go @@ -31,21 +31,21 @@ import ( ) func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { - var pathMatcher pathMatcherInterface + var pm pathMatcher switch { case r.Regex != nil: - pathMatcher = newPathRegexMatcher(r.Regex) + pm = newPathRegexMatcher(r.Regex) case r.Path != nil: - pathMatcher = newPathExactMatcher(*r.Path, r.CaseInsensitive) + pm = newPathExactMatcher(*r.Path, r.CaseInsensitive) case r.Prefix != nil: - pathMatcher = newPathPrefixMatcher(*r.Prefix, r.CaseInsensitive) + pm = newPathPrefixMatcher(*r.Prefix, r.CaseInsensitive) default: return nil, fmt.Errorf("illegal route: missing path_matcher") } - var headerMatchers []matcher.HeaderMatcherInterface + var headerMatchers []matcher.HeaderMatcher for _, h := range r.Headers { - var matcherT matcher.HeaderMatcherInterface + var matcherT matcher.HeaderMatcher switch { case h.ExactMatch != nil && *h.ExactMatch != "": matcherT = matcher.NewHeaderExactMatcher(h.Name, *h.ExactMatch) @@ -72,17 +72,17 @@ func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { if r.Fraction != nil { fractionMatcher = newFractionMatcher(*r.Fraction) } - return newCompositeMatcher(pathMatcher, headerMatchers, fractionMatcher), nil + return newCompositeMatcher(pm, headerMatchers, fractionMatcher), nil } // compositeMatcher.match returns true if all matchers return true. type compositeMatcher struct { - pm pathMatcherInterface - hms []matcher.HeaderMatcherInterface + pm pathMatcher + hms []matcher.HeaderMatcher fm *fractionMatcher } -func newCompositeMatcher(pm pathMatcherInterface, hms []matcher.HeaderMatcherInterface, fm *fractionMatcher) *compositeMatcher { +func newCompositeMatcher(pm pathMatcher, hms []matcher.HeaderMatcher, fm *fractionMatcher) *compositeMatcher { return &compositeMatcher{pm: pm, hms: hms, fm: fm} } diff --git a/xds/internal/resolver/matcher_path.go b/xds/internal/resolver/matcher_path.go index 011d1a94c49c..88a04f6d7bef 100644 --- a/xds/internal/resolver/matcher_path.go +++ b/xds/internal/resolver/matcher_path.go @@ -23,7 +23,7 @@ import ( "strings" ) -type pathMatcherInterface interface { +type pathMatcher interface { match(path string) bool String() string } diff --git a/xds/internal/resolver/matcher_test.go b/xds/internal/resolver/matcher_test.go index 6f599b82da2a..f7d5486cc136 100644 --- a/xds/internal/resolver/matcher_test.go +++ b/xds/internal/resolver/matcher_test.go @@ -34,8 +34,8 @@ import ( func TestAndMatcherMatch(t *testing.T) { tests := []struct { name string - pm pathMatcherInterface - hm matcher.HeaderMatcherInterface + pm pathMatcher + hm matcher.HeaderMatcher info iresolver.RPCInfo want bool }{ @@ -108,7 +108,7 @@ func TestAndMatcherMatch(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - a := newCompositeMatcher(tt.pm, []matcher.HeaderMatcherInterface{tt.hm}, nil) + a := newCompositeMatcher(tt.pm, []matcher.HeaderMatcher{tt.hm}, nil) if got := a.match(tt.info); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 71c2bbf70842..591cc3833937 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -54,7 +54,7 @@ type ldsConfig struct { // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func watchService(c xdsClientInterface, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { +func watchService(c xdsClient, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { w := &serviceUpdateWatcher{ logger: logger, c: c, @@ -70,7 +70,7 @@ func watchService(c xdsClientInterface, serviceName string, cb func(serviceUpdat // callback at the right time. type serviceUpdateWatcher struct { logger *grpclog.PrefixLogger - c xdsClientInterface + c xdsClient serviceName string ldsCancel func() serviceCb func(serviceUpdate, error) diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index 1519158a96c7..a6a013698ac4 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -41,21 +41,21 @@ const xdsScheme = "xds" // the same time. func NewBuilder(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ - newXDSClient: func() (xdsClientInterface, error) { + newXDSClient: func() (xdsClient, error) { return xdsclient.NewClientWithBootstrapContents(config) }, }, nil } // For overriding in unittests. -var newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } +var newXDSClient = func() (xdsClient, error) { return xdsclient.New() } func init() { resolver.Register(&xdsResolverBuilder{}) } type xdsResolverBuilder struct { - newXDSClient func() (xdsClientInterface, error) + newXDSClient func() (xdsClient, error) } // Build helps implement the resolver.Builder interface. @@ -119,9 +119,9 @@ func (*xdsResolverBuilder) Scheme() string { return xdsScheme } -// xdsClientInterface contains methods from xdsClient.Client which are used by +// xdsClient contains methods from xdsClient.Client which are used by // the resolver. This will be faked out in unittests. -type xdsClientInterface interface { +type xdsClient interface { WatchListener(serviceName string, cb func(xdsclient.ListenerUpdate, error)) func() WatchRouteConfig(routeName string, cb func(xdsclient.RouteConfigUpdate, error)) func() BootstrapConfig() *bootstrap.Config @@ -149,7 +149,7 @@ type xdsResolver struct { logger *grpclog.PrefixLogger // The underlying xdsClient which performs all xDS requests and responses. - client xdsClientInterface + client xdsClient // A channel for the watch API callback to write service updates on to. The // updates are read by the run goroutine and passed on to the ClientConn. updateCh chan suWithError diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index c7bf7c0a1092..d3a66595a358 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -114,19 +114,19 @@ func newTestClientConn() *testClientConn { func (s) TestResolverBuilder(t *testing.T) { tests := []struct { name string - xdsClientFunc func() (xdsClientInterface, error) + xdsClientFunc func() (xdsClient, error) wantErr bool }{ { name: "simple-good", - xdsClientFunc: func() (xdsClientInterface, error) { + xdsClientFunc: func() (xdsClient, error) { return fakeclient.NewClient(), nil }, wantErr: false, }, { name: "newXDSClient-throws-error", - xdsClientFunc: func() (xdsClientInterface, error) { + xdsClientFunc: func() (xdsClient, error) { return nil, errors.New("newXDSClient-throws-error") }, wantErr: true, @@ -167,7 +167,7 @@ func (s) TestResolverBuilder_xdsCredsBootstrapMismatch(t *testing.T) { // Fake out the xdsClient creation process by providing a fake, which does // not have any certificate provider configuration. oldClientMaker := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsClient, error) { fc := fakeclient.NewClient() fc.SetBootstrapConfig(&bootstrap.Config{}) return fc, nil @@ -194,7 +194,7 @@ func (s) TestResolverBuilder_xdsCredsBootstrapMismatch(t *testing.T) { } type setupOpts struct { - xdsClientFunc func() (xdsClientInterface, error) + xdsClientFunc func() (xdsClient, error) } func testSetup(t *testing.T, opts setupOpts) (*xdsResolver, *testClientConn, func()) { @@ -254,7 +254,7 @@ func waitForWatchRouteConfig(ctx context.Context, t *testing.T, xdsC *fakeclient func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer cancel() @@ -286,7 +286,7 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -312,7 +312,7 @@ func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -446,7 +446,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer cancel() defer xdsR.Close() @@ -506,7 +506,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { func (s) TestXDSResolverRemovedResource(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer cancel() defer xdsR.Close() @@ -614,7 +614,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { func (s) TestXDSResolverWRR(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -675,7 +675,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { defer func(old bool) { env.TimeoutSupport = old }(env.TimeoutSupport) xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -778,7 +778,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -927,7 +927,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -981,7 +981,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -1027,7 +1027,7 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -1202,7 +1202,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { t.Run(tc.name, func(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index 0618a6cd4a4c..43be4673655a 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -102,7 +102,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs // Identity provider name is mandatory on the server-side, and this is - // enforced when the resource is received at the xdsClient layer. + // enforced when the resource is received at the XDSClient layer. secCfg := c.filterChain.SecurityCfg ip, err := buildProviderFunc(cpc, secCfg.IdentityInstanceName, secCfg.IdentityCertName, true, false) if err != nil { diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 7e5f7071a808..727e95b94f13 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -88,9 +88,9 @@ func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", p)) } -// XDSClientInterface wraps the methods on the xdsClient which are required by +// XDSClient wraps the methods on the XDSClient which are required by // the listenerWrapper. -type XDSClientInterface interface { +type XDSClient interface { WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() BootstrapConfig() *bootstrap.Config } @@ -104,8 +104,8 @@ type ListenerWrapperParams struct { // XDSCredsInUse specifies whether or not the user expressed interest to // receive security configuration from the control plane. XDSCredsInUse bool - // XDSClient provides the functionality from the xdsClient required here. - XDSClient XDSClientInterface + // XDSClient provides the functionality from the XDSClient required here. + XDSClient XDSClient // ModeCallback is the callback to invoke when the serving mode changes. ModeCallback ServingModeCallback } @@ -152,7 +152,7 @@ type listenerWrapper struct { name string xdsCredsInUse bool - xdsC XDSClientInterface + xdsC XDSClient cancelWatch func() modeCallback ServingModeCallback @@ -168,7 +168,7 @@ type listenerWrapper struct { // instead of a vanilla channel simplifies the update handler as it need not // keep track of whether the received update is the first one or not. goodUpdate *grpcsync.Event - // A small race exists in the xdsClient code between the receipt of an xDS + // A small race exists in the XDSClient code between the receipt of an xDS // response and the user cancelling the associated watch. In this window, // the registered callback may be invoked after the watch is canceled, and // the user is expected to work around this. This event signifies that the @@ -299,14 +299,14 @@ func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, // Make sure that the socket address on the received Listener resource // matches the address of the net.Listener passed to us by the user. This - // check is done here instead of at the xdsClient layer because of the + // check is done here instead of at the XDSClient layer because of the // following couple of reasons: - // - xdsClient cannot know the listening address of every listener in the + // - XDSClient cannot know the listening address of every listener in the // system, and hence cannot perform this check. // - this is a very context-dependent check and only the server has the // appropriate context to perform this check. // - // What this means is that the xdsClient has ACKed a resource which can push + // What this means is that the XDSClient has ACKed a resource which can push // the server into a "not serving" mode. This is not ideal, but this is // what we have decided to do. See gRPC A36 for more details. ilc := update.InboundListenerCfg diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 848793ccdb7f..bef2ad56e18f 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -156,7 +156,7 @@ func (fc *fakeConn) Close() error { func newListenerWrapper(t *testing.T) (*listenerWrapper, <-chan struct{}, *fakeclient.Client, *fakeListener, func()) { t.Helper() - // Create a listener wrapper with a fake listener and fake xdsClient and + // Create a listener wrapper with a fake listener and fake XDSClient and // verify that it extracts the host and port from the passed in listener. lis := &fakeListener{ acceptCh: make(chan connAndErr, 1), diff --git a/xds/server.go b/xds/server.go index 86a40fcc8892..989859bc65c8 100644 --- a/xds/server.go +++ b/xds/server.go @@ -42,10 +42,10 @@ const serverPrefix = "[xds-server %p] " var ( // These new functions will be overridden in unit tests. - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsClient, error) { return xdsclient.New() } - newGRPCServer = func(opts ...grpc.ServerOption) grpcServerInterface { + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return grpc.NewServer(opts...) } @@ -58,17 +58,17 @@ func prefixLogger(p *GRPCServer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, p)) } -// xdsClientInterface contains methods from xdsClient.Client which are used by +// xdsClient contains methods from xdsClient.Client which are used by // the server. This is useful for overriding in unit tests. -type xdsClientInterface interface { +type xdsClient interface { WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() BootstrapConfig() *bootstrap.Config Close() } -// grpcServerInterface contains methods from grpc.Server which are used by the +// grpcServer contains methods from grpc.Server which are used by the // GRPCServer type here. This is useful for overriding in unit tests. -type grpcServerInterface interface { +type grpcServer interface { RegisterService(*grpc.ServiceDesc, interface{}) Serve(net.Listener) error Stop() @@ -80,7 +80,7 @@ type grpcServerInterface interface { // grpc.ServiceRegistrar interface and can be passed to service registration // functions in IDL generated code. type GRPCServer struct { - gs grpcServerInterface + gs grpcServer quit *grpcsync.Event logger *internalgrpclog.PrefixLogger xdsCredsInUse bool @@ -90,7 +90,7 @@ type GRPCServer struct { // beginning of Serve(), where we have to decide if we have to create a // client or use an existing one. clientMu sync.Mutex - xdsC xdsClientInterface + xdsC xdsClient } // NewGRPCServer creates an xDS-enabled gRPC server using the passed in opts. @@ -156,7 +156,7 @@ func (s *GRPCServer) initXDSClient() error { newXDSClient := newXDSClient if s.opts.bootstrapContents != nil { - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsClient, error) { return xdsclient.NewClientWithBootstrapContents(s.opts.bootstrapContents) } } diff --git a/xds/server_test.go b/xds/server_test.go index 7e6aa7917a26..27a33da091d0 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -133,7 +133,7 @@ func (s) TestNewServer(t *testing.T) { wantServerOpts := len(test.serverOpts) + 2 origNewGRPCServer := newGRPCServer - newGRPCServer = func(opts ...grpc.ServerOption) grpcServerInterface { + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { if got := len(opts); got != wantServerOpts { t.Fatalf("%d ServerOptions passed to grpc.Server, want %d", got, wantServerOpts) } @@ -161,7 +161,7 @@ func (s) TestRegisterService(t *testing.T) { fs := newFakeGRPCServer() origNewGRPCServer := newGRPCServer - newGRPCServer = func(opts ...grpc.ServerOption) grpcServerInterface { return fs } + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return fs } defer func() { newGRPCServer = origNewGRPCServer }() s := NewGRPCServer() @@ -247,7 +247,7 @@ func (p *fakeProvider) Close() { func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsClient, error) { c := fakeclient.NewClient() c.SetBootstrapConfig(&bootstrap.Config{ BalancerName: "dummyBalancer", @@ -262,7 +262,7 @@ func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { fs := newFakeGRPCServer() origNewGRPCServer := newGRPCServer - newGRPCServer = func(opts ...grpc.ServerOption) grpcServerInterface { return fs } + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return fs } return fs, clientCh, func() { newXDSClient = origNewXDSClient @@ -277,7 +277,7 @@ func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsClient, error) { c := fakeclient.NewClient() bc := &bootstrap.Config{ BalancerName: "dummyBalancer", @@ -544,7 +544,7 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { // xdsClient with the specified bootstrap configuration. clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsClient, error) { c := fakeclient.NewClient() c.SetBootstrapConfig(test.bootstrapConfig) clientCh.Send(c) @@ -587,7 +587,7 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { // verifies that Server() exits with a non-nil error. func (s) TestServeNewClientFailure(t *testing.T) { origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsClient, error) { return nil, errors.New("xdsClient creation failed") } defer func() { newXDSClient = origNewXDSClient }() From 7f9eeeae36417349a8d33f515a2cac04afceb30e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 4 Jun 2021 11:40:23 -0700 Subject: [PATCH 120/998] xds: standardize builder type names (bb) and balancer receiver names (b) (#4517) --- .../balancer/cdsbalancer/cdsbalancer.go | 13 +- .../balancer/clusterimpl/clusterimpl.go | 210 +++++++++--------- .../balancer/clustermanager/clustermanager.go | 10 +- xds/internal/balancer/edsbalancer/eds.go | 193 ++++++++-------- xds/internal/balancer/edsbalancer/eds_test.go | 7 +- xds/internal/balancer/edsbalancer/xds_old.go | 6 +- xds/internal/balancer/lrs/balancer.go | 10 +- xds/internal/balancer/priority/balancer.go | 10 +- .../balancer/weightedtarget/weightedtarget.go | 56 ++--- 9 files changed, 254 insertions(+), 261 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index d8c4f5eaac26..7278c624361d 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -64,17 +64,16 @@ var ( ) func init() { - balancer.Register(cdsBB{}) + balancer.Register(bb{}) } -// cdsBB (short for cdsBalancerBuilder) implements the balancer.Builder -// interface to help build a cdsBalancer. +// bb implements the balancer.Builder interface to help build a cdsBalancer. // It also implements the balancer.ConfigParser interface to help parse the // JSON service config, to be passed to the cdsBalancer. -type cdsBB struct{} +type bb struct{} // Build creates a new CDS balancer with the ClientConn. -func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { b := &cdsBalancer{ bOpts: opts, updateCh: buffer.NewUnbounded(), @@ -117,7 +116,7 @@ func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. } // Name returns the name of balancers built by this builder. -func (cdsBB) Name() string { +func (bb) Name() string { return cdsName } @@ -130,7 +129,7 @@ type lbConfig struct { // ParseConfig parses the JSON load balancer config provided into an // internal form or returns an error if the config is invalid. -func (cdsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg lbConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, fmt.Errorf("xds: unable to unmarshal lbconfig: %s, error: %v", string(c), err) diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index ae32caa5c759..359ec8505017 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -49,14 +49,14 @@ const ( ) func init() { - balancer.Register(clusterImplBB{}) + balancer.Register(bb{}) } var newXDSClient func() (xdsClient, error) -type clusterImplBB struct{} +type bb struct{} -func (clusterImplBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &clusterImplBalancer{ ClientConn: cc, bOpts: bOpts, @@ -83,11 +83,11 @@ func (clusterImplBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) return b } -func (clusterImplBB) Name() string { +func (bb) Name() string { return Name } -func (clusterImplBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(c) } @@ -139,20 +139,20 @@ type clusterImplBalancer struct { // updateLoadStore checks the config for load store, and decides whether it // needs to restart the load reporting stream. -func (cib *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { +func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { var updateLoadClusterAndService bool // ClusterName is different, restart. ClusterName is from ClusterName and // EDSServiceName. - clusterName := cib.getClusterName() + clusterName := b.getClusterName() if clusterName != newConfig.Cluster { updateLoadClusterAndService = true - cib.setClusterName(newConfig.Cluster) + b.setClusterName(newConfig.Cluster) clusterName = newConfig.Cluster } - if cib.edsServiceName != newConfig.EDSServiceName { + if b.edsServiceName != newConfig.EDSServiceName { updateLoadClusterAndService = true - cib.edsServiceName = newConfig.EDSServiceName + b.edsServiceName = newConfig.EDSServiceName } if updateLoadClusterAndService { // This updates the clusterName and serviceName that will be reported @@ -163,7 +163,7 @@ func (cib *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { // On the other hand, this will almost never happen. Each LRS policy // shouldn't get updated config. The parent should do a graceful switch // when the clusterName or serviceName is changed. - cib.loadWrapper.UpdateClusterAndService(clusterName, cib.edsServiceName) + b.loadWrapper.UpdateClusterAndService(clusterName, b.edsServiceName) } // Check if it's necessary to restart load report. @@ -171,31 +171,31 @@ func (cib *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { if newConfig.LoadReportingServerName != nil { newLRSServerName = *newConfig.LoadReportingServerName } - if cib.lrsServerName != newLRSServerName { + if b.lrsServerName != newLRSServerName { // LoadReportingServerName is different, load should be report to a // different server, restart. - cib.lrsServerName = newLRSServerName - if cib.cancelLoadReport != nil { - cib.cancelLoadReport() - cib.cancelLoadReport = nil + b.lrsServerName = newLRSServerName + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil } var loadStore *load.Store - if cib.xdsC != nil { - loadStore, cib.cancelLoadReport = cib.xdsC.ReportLoad(cib.lrsServerName) + if b.xdsC != nil { + loadStore, b.cancelLoadReport = b.xdsC.ReportLoad(b.lrsServerName) } - cib.loadWrapper.UpdateLoadStore(loadStore) + b.loadWrapper.UpdateLoadStore(loadStore) } return nil } -func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - if cib.closed.HasFired() { - cib.logger.Warningf("xds: received ClientConnState {%+v} after clusterImplBalancer was closed", s) +func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + if b.closed.HasFired() { + b.logger.Warningf("xds: received ClientConnState {%+v} after clusterImplBalancer was closed", s) return nil } - cib.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) @@ -209,36 +209,36 @@ func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name) } - if cib.xdsC == nil { + if b.xdsC == nil { c := xdsclient.FromResolverState(s.ResolverState) if c == nil { return balancer.ErrBadResolverState } - cib.xdsC = c + b.xdsC = c } // Update load reporting config. This needs to be done before updating the // child policy because we need the loadStore from the updated client to be // passed to the ccWrapper, so that the next picker from the child policy // will pick up the new loadStore. - if err := cib.updateLoadStore(newConfig); err != nil { + if err := b.updateLoadStore(newConfig); err != nil { return err } // Compare new drop config. And update picker if it's changed. var updatePicker bool - if cib.config == nil || !equalDropCategories(cib.config.DropCategories, newConfig.DropCategories) { - cib.drops = make([]*dropper, 0, len(newConfig.DropCategories)) + if b.config == nil || !equalDropCategories(b.config.DropCategories, newConfig.DropCategories) { + b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) for _, c := range newConfig.DropCategories { - cib.drops = append(cib.drops, newDropper(c)) + b.drops = append(b.drops, newDropper(c)) } updatePicker = true } // Compare cluster name. And update picker if it's changed, because circuit // breaking's stream counter will be different. - if cib.config == nil || cib.config.Cluster != newConfig.Cluster { - cib.requestCounter = xdsclient.GetServiceRequestsCounter(newConfig.Cluster) + if b.config == nil || b.config.Cluster != newConfig.Cluster { + b.requestCounter = xdsclient.GetServiceRequestsCounter(newConfig.Cluster) updatePicker = true } // Compare upper bound of stream count. And update picker if it's changed. @@ -247,29 +247,29 @@ func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState if newConfig.MaxConcurrentRequests != nil { newRequestCountMax = *newConfig.MaxConcurrentRequests } - if cib.requestCountMax != newRequestCountMax { - cib.requestCountMax = newRequestCountMax + if b.requestCountMax != newRequestCountMax { + b.requestCountMax = newRequestCountMax updatePicker = true } if updatePicker { - cib.pickerUpdateCh.Put(&dropConfigs{ - drops: cib.drops, - requestCounter: cib.requestCounter, - requestCountMax: cib.requestCountMax, + b.pickerUpdateCh.Put(&dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, }) } // If child policy is a different type, recreate the sub-balancer. - if cib.config == nil || cib.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { - if cib.childLB != nil { - cib.childLB.Close() + if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { + if b.childLB != nil { + b.childLB.Close() } - cib.childLB = bb.Build(cib, cib.bOpts) + b.childLB = bb.Build(b, b.bOpts) } - cib.config = newConfig + b.config = newConfig - if cib.childLB == nil { + if b.childLB == nil { // This is not an expected situation, and should be super rare in // practice. // @@ -280,26 +280,26 @@ func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState } // Addresses and sub-balancer config are sent to sub-balancer. - return cib.childLB.UpdateClientConnState(balancer.ClientConnState{ + return b.childLB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, - BalancerConfig: cib.config.ChildPolicy.Config, + BalancerConfig: b.config.ChildPolicy.Config, }) } -func (cib *clusterImplBalancer) ResolverError(err error) { - if cib.closed.HasFired() { - cib.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err) +func (b *clusterImplBalancer) ResolverError(err error) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err) return } - if cib.childLB != nil { - cib.childLB.ResolverError(err) + if b.childLB != nil { + b.childLB.ResolverError(err) } } -func (cib *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { - if cib.closed.HasFired() { - cib.logger.Warningf("xds: received subconn state change {%+v, %+v} after clusterImplBalancer was closed", sc, s) +func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received subconn state change {%+v, %+v} after clusterImplBalancer was closed", sc, s) return } @@ -311,65 +311,65 @@ func (cib *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balanc // knows). The parent priority policy is configured to ignore re-resolution // signal from the EDS children. if s.ConnectivityState == connectivity.TransientFailure { - cib.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) + b.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) } - if cib.childLB != nil { - cib.childLB.UpdateSubConnState(sc, s) + if b.childLB != nil { + b.childLB.UpdateSubConnState(sc, s) } } -func (cib *clusterImplBalancer) Close() { - cib.mu.Lock() - cib.closed.Fire() - cib.mu.Unlock() +func (b *clusterImplBalancer) Close() { + b.mu.Lock() + b.closed.Fire() + b.mu.Unlock() - if cib.childLB != nil { - cib.childLB.Close() - cib.childLB = nil + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil } if newXDSClient != nil { - cib.xdsC.Close() + b.xdsC.Close() } - <-cib.done.Done() - cib.logger.Infof("Shutdown") + <-b.done.Done() + b.logger.Infof("Shutdown") } // Override methods to accept updates from the child LB. -func (cib *clusterImplBalancer) UpdateState(state balancer.State) { +func (b *clusterImplBalancer) UpdateState(state balancer.State) { // Instead of updating parent ClientConn inline, send state to run(). - cib.pickerUpdateCh.Put(state) + b.pickerUpdateCh.Put(state) } -func (cib *clusterImplBalancer) setClusterName(n string) { - cib.clusterNameMu.Lock() - defer cib.clusterNameMu.Unlock() - cib.clusterName = n +func (b *clusterImplBalancer) setClusterName(n string) { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + b.clusterName = n } -func (cib *clusterImplBalancer) getClusterName() string { - cib.clusterNameMu.Lock() - defer cib.clusterNameMu.Unlock() - return cib.clusterName +func (b *clusterImplBalancer) getClusterName() string { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + return b.clusterName } -func (cib *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - clusterName := cib.getClusterName() +func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + clusterName := b.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) for i, addr := range addrs { newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) } - return cib.ClientConn.NewSubConn(newAddrs, opts) + return b.ClientConn.NewSubConn(newAddrs, opts) } -func (cib *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - clusterName := cib.getClusterName() +func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + clusterName := b.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) for i, addr := range addrs { newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) } - cib.ClientConn.UpdateAddresses(sc, newAddrs) + b.ClientConn.UpdateAddresses(sc, newAddrs) } type dropConfigs struct { @@ -378,40 +378,40 @@ type dropConfigs struct { requestCountMax uint32 } -func (cib *clusterImplBalancer) run() { - defer cib.done.Fire() +func (b *clusterImplBalancer) run() { + defer b.done.Fire() for { select { - case update := <-cib.pickerUpdateCh.Get(): - cib.pickerUpdateCh.Load() - cib.mu.Lock() - if cib.closed.HasFired() { - cib.mu.Unlock() + case update := <-b.pickerUpdateCh.Get(): + b.pickerUpdateCh.Load() + b.mu.Lock() + if b.closed.HasFired() { + b.mu.Unlock() return } switch u := update.(type) { case balancer.State: - cib.childState = u - cib.ClientConn.UpdateState(balancer.State{ - ConnectivityState: cib.childState.ConnectivityState, - Picker: newDropPicker(cib.childState, &dropConfigs{ - drops: cib.drops, - requestCounter: cib.requestCounter, - requestCountMax: cib.requestCountMax, - }, cib.loadWrapper), + b.childState = u + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: newDropPicker(b.childState, &dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + }, b.loadWrapper), }) case *dropConfigs: - cib.drops = u.drops - cib.requestCounter = u.requestCounter - if cib.childState.Picker != nil { - cib.ClientConn.UpdateState(balancer.State{ - ConnectivityState: cib.childState.ConnectivityState, - Picker: newDropPicker(cib.childState, u, cib.loadWrapper), + b.drops = u.drops + b.requestCounter = u.requestCounter + if b.childState.Picker != nil { + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: newDropPicker(b.childState, u, b.loadWrapper), }) } } - cib.mu.Unlock() - case <-cib.closed.Done(): + b.mu.Unlock() + case <-b.closed.Done(): return } } diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index c00a9a16f458..211133d384e8 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -36,12 +36,12 @@ import ( const balancerName = "xds_cluster_manager_experimental" func init() { - balancer.Register(builder{}) + balancer.Register(bb{}) } -type builder struct{} +type bb struct{} -func (builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { b := &bal{} b.logger = prefixLogger(b) b.stateAggregator = newBalancerStateAggregator(cc, b.logger) @@ -52,11 +52,11 @@ func (builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balance return b } -func (builder) Name() string { +func (bb) Name() string { return balancerName } -func (builder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(c) } diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index 594d0b05edce..ffc46cea469b 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -57,13 +57,12 @@ var ( ) func init() { - balancer.Register(&edsBalancerBuilder{}) + balancer.Register(bb{}) } -type edsBalancerBuilder struct{} +type bb struct{} -// Build helps implement the balancer.Builder interface. -func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { x := &edsBalancer{ cc: cc, closed: grpcsync.NewEvent(), @@ -92,11 +91,11 @@ func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOp return x } -func (b *edsBalancerBuilder) Name() string { +func (bb) Name() string { return edsName } -func (b *edsBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg EDSConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, fmt.Errorf("unable to unmarshal balancer config %s into EDSConfig, error: %v", string(c), err) @@ -162,25 +161,25 @@ type edsBalancer struct { // updates from grpc, xdsClient and load balancer. It synchronizes the // operations that happen inside edsBalancer. It exits when edsBalancer is // closed. -func (x *edsBalancer) run() { +func (b *edsBalancer) run() { for { select { - case update := <-x.grpcUpdate: - x.handleGRPCUpdate(update) - case update := <-x.xdsClientUpdate: - x.handleXDSClientUpdate(update) - case update := <-x.childPolicyUpdate.Get(): - x.childPolicyUpdate.Load() + case update := <-b.grpcUpdate: + b.handleGRPCUpdate(update) + case update := <-b.xdsClientUpdate: + b.handleXDSClientUpdate(update) + case update := <-b.childPolicyUpdate.Get(): + b.childPolicyUpdate.Load() u := update.(*balancerStateWithPriority) - x.edsImpl.updateState(u.priority, u.s) - case <-x.closed.Done(): - x.cancelWatch() + b.edsImpl.updateState(u.priority, u.s) + case <-b.closed.Done(): + b.cancelWatch() if newXDSClient != nil { - x.xdsClient.Close() + b.xdsClient.Close() } - x.edsImpl.close() - x.logger.Infof("Shutdown") - x.done.Fire() + b.edsImpl.close() + b.logger.Infof("Shutdown") + b.done.Fire() return } } @@ -199,68 +198,68 @@ func (x *edsBalancer) run() { // watcher should keep watching. // In both cases, the sub-balancers will be closed, and the future picks will // fail. -func (x *edsBalancer) handleErrorFromUpdate(err error, fromParent bool) { - x.logger.Warningf("Received error: %v", err) +func (b *edsBalancer) handleErrorFromUpdate(err error, fromParent bool) { + b.logger.Warningf("Received error: %v", err) if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { if fromParent { // This is an error from the parent ClientConn (can be the parent // CDS balancer), and is a resource-not-found error. This means the // resource (can be either LDS or CDS) was removed. Stop the EDS // watch. - x.cancelWatch() + b.cancelWatch() } - x.edsImpl.handleEDSResponse(xdsclient.EndpointsUpdate{}) + b.edsImpl.handleEDSResponse(xdsclient.EndpointsUpdate{}) } } -func (x *edsBalancer) handleGRPCUpdate(update interface{}) { +func (b *edsBalancer) handleGRPCUpdate(update interface{}) { switch u := update.(type) { case *subConnStateUpdate: - x.edsImpl.handleSubConnStateChange(u.sc, u.state.ConnectivityState) + b.edsImpl.handleSubConnStateChange(u.sc, u.state.ConnectivityState) case *balancer.ClientConnState: - x.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(u.BalancerConfig)) + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(u.BalancerConfig)) cfg, _ := u.BalancerConfig.(*EDSConfig) if cfg == nil { // service config parsing failed. should never happen. return } - if err := x.handleServiceConfigUpdate(cfg); err != nil { - x.logger.Warningf("failed to update xDS client: %v", err) + if err := b.handleServiceConfigUpdate(cfg); err != nil { + b.logger.Warningf("failed to update xDS client: %v", err) } - x.edsImpl.updateServiceRequestsConfig(cfg.ClusterName, cfg.MaxConcurrentRequests) + b.edsImpl.updateServiceRequestsConfig(cfg.ClusterName, cfg.MaxConcurrentRequests) // We will update the edsImpl with the new child policy, if we got a // different one. - if !cmp.Equal(cfg.ChildPolicy, x.config.ChildPolicy, cmpopts.EquateEmpty()) { + if !cmp.Equal(cfg.ChildPolicy, b.config.ChildPolicy, cmpopts.EquateEmpty()) { if cfg.ChildPolicy != nil { - x.edsImpl.handleChildPolicy(cfg.ChildPolicy.Name, cfg.ChildPolicy.Config) + b.edsImpl.handleChildPolicy(cfg.ChildPolicy.Name, cfg.ChildPolicy.Config) } else { - x.edsImpl.handleChildPolicy(roundrobin.Name, nil) + b.edsImpl.handleChildPolicy(roundrobin.Name, nil) } } - x.config = cfg + b.config = cfg case error: - x.handleErrorFromUpdate(u, true) + b.handleErrorFromUpdate(u, true) default: // unreachable path - x.logger.Errorf("wrong update type: %T", update) + b.logger.Errorf("wrong update type: %T", update) } } // handleServiceConfigUpdate applies the service config update, watching a new // EDS service name and restarting LRS stream, as required. -func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { +func (b *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { var updateLoadClusterAndService bool - if x.clusterName != config.ClusterName { + if b.clusterName != config.ClusterName { updateLoadClusterAndService = true - x.clusterName = config.ClusterName - x.edsImpl.updateClusterName(x.clusterName) + b.clusterName = config.ClusterName + b.edsImpl.updateClusterName(b.clusterName) } - if x.edsServiceName != config.EDSServiceName { + if b.edsServiceName != config.EDSServiceName { updateLoadClusterAndService = true - x.edsServiceName = config.EDSServiceName + b.edsServiceName = config.EDSServiceName } // If EDSServiceName is set, use it to watch EDS. Otherwise, use the cluster @@ -270,14 +269,14 @@ func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { newEDSToWatch = config.ClusterName } var restartEDSWatch bool - if x.edsToWatch != newEDSToWatch { + if b.edsToWatch != newEDSToWatch { restartEDSWatch = true - x.edsToWatch = newEDSToWatch + b.edsToWatch = newEDSToWatch } // Restart EDS watch when the eds name has changed. if restartEDSWatch { - x.startEndpointsWatch() + b.startEndpointsWatch() } if updateLoadClusterAndService { @@ -288,13 +287,13 @@ func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { // // This is OK for now, because we don't actually expect edsServiceName // to change. Fix this (a bigger change) will happen later. - x.loadWrapper.UpdateClusterAndService(x.clusterName, x.edsServiceName) + b.loadWrapper.UpdateClusterAndService(b.clusterName, b.edsServiceName) } // Restart load reporting when the loadReportServer name has changed. - if !equalStringPointers(x.loadReportServer, config.LrsLoadReportingServerName) { - loadStore := x.startLoadReport(config.LrsLoadReportingServerName) - x.loadWrapper.UpdateLoadStore(loadStore) + if !equalStringPointers(b.loadReportServer, config.LrsLoadReportingServerName) { + loadStore := b.startLoadReport(config.LrsLoadReportingServerName) + b.loadWrapper.UpdateLoadStore(loadStore) } return nil @@ -304,32 +303,32 @@ func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { // // This usually means load report needs to be restarted, but this function does // NOT do that. Caller needs to call startLoadReport separately. -func (x *edsBalancer) startEndpointsWatch() { - if x.cancelEndpointsWatch != nil { - x.cancelEndpointsWatch() +func (b *edsBalancer) startEndpointsWatch() { + if b.cancelEndpointsWatch != nil { + b.cancelEndpointsWatch() } - edsToWatch := x.edsToWatch - cancelEDSWatch := x.xdsClient.WatchEndpoints(edsToWatch, func(update xdsclient.EndpointsUpdate, err error) { - x.logger.Infof("Watch update from xds-client %p, content: %+v", x.xdsClient, pretty.ToJSON(update)) - x.handleEDSUpdate(update, err) + edsToWatch := b.edsToWatch + cancelEDSWatch := b.xdsClient.WatchEndpoints(edsToWatch, func(update xdsclient.EndpointsUpdate, err error) { + b.logger.Infof("Watch update from xds-client %p, content: %+v", b.xdsClient, pretty.ToJSON(update)) + b.handleEDSUpdate(update, err) }) - x.logger.Infof("Watch started on resource name %v with xds-client %p", edsToWatch, x.xdsClient) - x.cancelEndpointsWatch = func() { + b.logger.Infof("Watch started on resource name %v with xds-client %p", edsToWatch, b.xdsClient) + b.cancelEndpointsWatch = func() { cancelEDSWatch() - x.logger.Infof("Watch cancelled on resource name %v with xds-client %p", edsToWatch, x.xdsClient) + b.logger.Infof("Watch cancelled on resource name %v with xds-client %p", edsToWatch, b.xdsClient) } } -func (x *edsBalancer) cancelWatch() { - x.loadReportServer = nil - if x.cancelLoadReport != nil { - x.cancelLoadReport() - x.cancelLoadReport = nil +func (b *edsBalancer) cancelWatch() { + b.loadReportServer = nil + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil } - if x.cancelEndpointsWatch != nil { - x.edsToWatch = "" - x.cancelEndpointsWatch() - x.cancelEndpointsWatch = nil + if b.cancelEndpointsWatch != nil { + b.edsToWatch = "" + b.cancelEndpointsWatch() + b.cancelEndpointsWatch = nil } } @@ -339,26 +338,26 @@ func (x *edsBalancer) cancelWatch() { // Caller can cal this when the loadReportServer name changes, but // edsServiceName doesn't (so we only need to restart load reporting, not EDS // watch). -func (x *edsBalancer) startLoadReport(loadReportServer *string) *load.Store { - x.loadReportServer = loadReportServer - if x.cancelLoadReport != nil { - x.cancelLoadReport() - x.cancelLoadReport = nil +func (b *edsBalancer) startLoadReport(loadReportServer *string) *load.Store { + b.loadReportServer = loadReportServer + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil } if loadReportServer == nil { return nil } - ls, cancel := x.xdsClient.ReportLoad(*loadReportServer) - x.cancelLoadReport = cancel + ls, cancel := b.xdsClient.ReportLoad(*loadReportServer) + b.cancelLoadReport = cancel return ls } -func (x *edsBalancer) handleXDSClientUpdate(update *edsUpdate) { +func (b *edsBalancer) handleXDSClientUpdate(update *edsUpdate) { if err := update.err; err != nil { - x.handleErrorFromUpdate(err, false) + b.handleErrorFromUpdate(err, false) return } - x.edsImpl.handleEDSResponse(update.resp) + b.edsImpl.handleEDSResponse(update.resp) } type subConnStateUpdate struct { @@ -366,36 +365,36 @@ type subConnStateUpdate struct { state balancer.SubConnState } -func (x *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +func (b *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { update := &subConnStateUpdate{ sc: sc, state: state, } select { - case x.grpcUpdate <- update: - case <-x.closed.Done(): + case b.grpcUpdate <- update: + case <-b.closed.Done(): } } -func (x *edsBalancer) ResolverError(err error) { +func (b *edsBalancer) ResolverError(err error) { select { - case x.grpcUpdate <- err: - case <-x.closed.Done(): + case b.grpcUpdate <- err: + case <-b.closed.Done(): } } -func (x *edsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - if x.xdsClient == nil { +func (b *edsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + if b.xdsClient == nil { c := xdsclient.FromResolverState(s.ResolverState) if c == nil { return balancer.ErrBadResolverState } - x.xdsClient = c + b.xdsClient = c } select { - case x.grpcUpdate <- &s: - case <-x.closed.Done(): + case b.grpcUpdate <- &s: + case <-b.closed.Done(): } return nil } @@ -405,10 +404,10 @@ type edsUpdate struct { err error } -func (x *edsBalancer) handleEDSUpdate(resp xdsclient.EndpointsUpdate, err error) { +func (b *edsBalancer) handleEDSUpdate(resp xdsclient.EndpointsUpdate, err error) { select { - case x.xdsClientUpdate <- &edsUpdate{resp: resp, err: err}: - case <-x.closed.Done(): + case b.xdsClientUpdate <- &edsUpdate{resp: resp, err: err}: + case <-b.closed.Done(): } } @@ -417,16 +416,16 @@ type balancerStateWithPriority struct { s balancer.State } -func (x *edsBalancer) enqueueChildBalancerState(p priorityType, s balancer.State) { - x.childPolicyUpdate.Put(&balancerStateWithPriority{ +func (b *edsBalancer) enqueueChildBalancerState(p priorityType, s balancer.State) { + b.childPolicyUpdate.Put(&balancerStateWithPriority{ priority: p, s: s, }) } -func (x *edsBalancer) Close() { - x.closed.Fire() - <-x.done.Done() +func (b *edsBalancer) Close() { + b.closed.Fire() + <-b.done.Done() } // equalStringPointers returns true if diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 0fda80373fbc..7e16076751ab 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -70,10 +70,6 @@ var ( } ) -func init() { - balancer.Register(&edsBalancerBuilder{}) -} - func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { return func() balancer.SubConn { scst, _ := p.Pick(balancer.PickInfo{}) @@ -890,8 +886,7 @@ func (s) TestBalancerConfigParsing(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - b := &edsBalancerBuilder{} - got, err := b.ParseConfig(tt.js) + got, err := bb{}.ParseConfig(tt.js) if (err != nil) != tt.wantErr { t.Fatalf("edsBalancerBuilder.ParseConfig() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/xds/internal/balancer/edsbalancer/xds_old.go b/xds/internal/balancer/edsbalancer/xds_old.go index 6729e6801f15..363ce7c9dd78 100644 --- a/xds/internal/balancer/edsbalancer/xds_old.go +++ b/xds/internal/balancer/edsbalancer/xds_old.go @@ -32,15 +32,15 @@ import "google.golang.org/grpc/balancer" const xdsName = "xds_experimental" func init() { - balancer.Register(&xdsBalancerBuilder{}) + balancer.Register(xdsBalancerBuilder{}) } // xdsBalancerBuilder register edsBalancerBuilder (now with name // "eds_experimental") under the old name "xds_experimental". type xdsBalancerBuilder struct { - edsBalancerBuilder + bb } -func (b *xdsBalancerBuilder) Name() string { +func (xdsBalancerBuilder) Name() string { return xdsName } diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go index 7e88c0f49712..313b4b78a237 100644 --- a/xds/internal/balancer/lrs/balancer.go +++ b/xds/internal/balancer/lrs/balancer.go @@ -33,7 +33,7 @@ import ( ) func init() { - balancer.Register(&lrsBB{}) + balancer.Register(bb{}) } var newXDSClient func() (xdsClient, error) @@ -41,9 +41,9 @@ var newXDSClient func() (xdsClient, error) // Name is the name of the LRS balancer. const Name = "lrs_experimental" -type lrsBB struct{} +type bb struct{} -func (l *lrsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { b := &lrsBalancer{ cc: cc, buildOpts: opts, @@ -64,11 +64,11 @@ func (l *lrsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balanc return b } -func (l *lrsBB) Name() string { +func (bb) Name() string { return Name } -func (l *lrsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(c) } diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index d760a58fa4ec..7475145c612b 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -44,12 +44,12 @@ import ( const Name = "priority_experimental" func init() { - balancer.Register(priorityBB{}) + balancer.Register(bb{}) } -type priorityBB struct{} +type bb struct{} -func (priorityBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &priorityBalancer{ cc: cc, done: grpcsync.NewEvent(), @@ -66,11 +66,11 @@ func (priorityBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) bal return b } -func (b priorityBB) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (b bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(s) } -func (priorityBB) Name() string { +func (bb) Name() string { return Name } diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/xds/internal/balancer/weightedtarget/weightedtarget.go index 6c1b70f92235..fd9da9c59d71 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget.go @@ -42,12 +42,12 @@ const Name = "weighted_target_experimental" var NewRandomWRR = wrr.NewRandom func init() { - balancer.Register(&weightedTargetBB{}) + balancer.Register(bb{}) } -type weightedTargetBB struct{} +type bb struct{} -func (wt *weightedTargetBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &weightedTargetBalancer{} b.logger = prefixLogger(b) b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR) @@ -58,11 +58,11 @@ func (wt *weightedTargetBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOp return b } -func (wt *weightedTargetBB) Name() string { +func (bb) Name() string { return Name } -func (wt *weightedTargetBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(c) } @@ -83,8 +83,8 @@ type weightedTargetBalancer struct { // UpdateClientConnState takes the new targets in balancer group, // creates/deletes sub-balancers and sends them update. Addresses are split into // groups based on hierarchy path. -func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - w.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) +func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) @@ -94,10 +94,10 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat var rebuildStateAndPicker bool // Remove sub-pickers and sub-balancers that are not in the new config. - for name := range w.targets { + for name := range b.targets { if _, ok := newConfig.Targets[name]; !ok { - w.stateAggregator.Remove(name) - w.bg.Remove(name) + b.stateAggregator.Remove(name) + b.bg.Remove(name) // Trigger a state/picker update, because we don't want `ClientConn` // to pick this sub-balancer anymore. rebuildStateAndPicker = true @@ -110,27 +110,27 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat // // For all sub-balancers, forward the address/balancer config update. for name, newT := range newConfig.Targets { - oldT, ok := w.targets[name] + oldT, ok := b.targets[name] if !ok { // If this is a new sub-balancer, add weights to the picker map. - w.stateAggregator.Add(name, newT.Weight) + b.stateAggregator.Add(name, newT.Weight) // Then add to the balancer group. - w.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) // Not trigger a state/picker update. Wait for the new sub-balancer // to send its updates. } else if newT.ChildPolicy.Name != oldT.ChildPolicy.Name { // If the child policy name is differet, remove from balancer group // and re-add. - w.stateAggregator.Remove(name) - w.bg.Remove(name) - w.stateAggregator.Add(name, newT.Weight) - w.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + b.stateAggregator.Remove(name) + b.bg.Remove(name) + b.stateAggregator.Add(name, newT.Weight) + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) // Trigger a state/picker update, because we don't want `ClientConn` // to pick this sub-balancer anymore. rebuildStateAndPicker = true } else if newT.Weight != oldT.Weight { // If this is an existing sub-balancer, update weight if necessary. - w.stateAggregator.UpdateWeight(name, newT.Weight) + b.stateAggregator.UpdateWeight(name, newT.Weight) // Trigger a state/picker update, because we don't want `ClientConn` // should do picks with the new weights now. rebuildStateAndPicker = true @@ -142,7 +142,7 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat // - Balancer config comes from the targets map. // // TODO: handle error? How to aggregate errors and return? - _ = w.bg.UpdateClientConnState(name, balancer.ClientConnState{ + _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, @@ -152,23 +152,23 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat }) } - w.targets = newConfig.Targets + b.targets = newConfig.Targets if rebuildStateAndPicker { - w.stateAggregator.BuildAndUpdate() + b.stateAggregator.BuildAndUpdate() } return nil } -func (w *weightedTargetBalancer) ResolverError(err error) { - w.bg.ResolverError(err) +func (b *weightedTargetBalancer) ResolverError(err error) { + b.bg.ResolverError(err) } -func (w *weightedTargetBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - w.bg.UpdateSubConnState(sc, state) +func (b *weightedTargetBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.bg.UpdateSubConnState(sc, state) } -func (w *weightedTargetBalancer) Close() { - w.stateAggregator.Stop() - w.bg.Close() +func (b *weightedTargetBalancer) Close() { + b.stateAggregator.Stop() + b.bg.Close() } From 656cad9ae5cf6ac93dc06669f308d29be7118481 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 4 Jun 2021 12:00:13 -0700 Subject: [PATCH 121/998] xds: standardize xds client field name (xdsClient) (#4518) --- .../balancer/clusterimpl/clusterimpl.go | 18 +++++++++--------- xds/internal/balancer/lrs/balancer.go | 16 ++++++++-------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 359ec8505017..9f3acafbc92b 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -75,7 +75,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba b.logger.Errorf("failed to create xds-client: %v", err) return nil } - b.xdsC = client + b.xdsClient = client } go b.run() @@ -113,9 +113,9 @@ type clusterImplBalancer struct { closed *grpcsync.Event done *grpcsync.Event - bOpts balancer.BuildOptions - logger *grpclog.PrefixLogger - xdsC xdsClient + bOpts balancer.BuildOptions + logger *grpclog.PrefixLogger + xdsClient xdsClient config *LBConfig childLB balancer.Balancer @@ -180,8 +180,8 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { b.cancelLoadReport = nil } var loadStore *load.Store - if b.xdsC != nil { - loadStore, b.cancelLoadReport = b.xdsC.ReportLoad(b.lrsServerName) + if b.xdsClient != nil { + loadStore, b.cancelLoadReport = b.xdsClient.ReportLoad(b.lrsServerName) } b.loadWrapper.UpdateLoadStore(loadStore) } @@ -209,12 +209,12 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name) } - if b.xdsC == nil { + if b.xdsClient == nil { c := xdsclient.FromResolverState(s.ResolverState) if c == nil { return balancer.ErrBadResolverState } - b.xdsC = c + b.xdsClient = c } // Update load reporting config. This needs to be done before updating the @@ -329,7 +329,7 @@ func (b *clusterImplBalancer) Close() { b.childLB = nil } if newXDSClient != nil { - b.xdsC.Close() + b.xdsClient.Close() } <-b.done.Done() b.logger.Infof("Shutdown") diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go index 313b4b78a237..75a8cbb0dd7b 100644 --- a/xds/internal/balancer/lrs/balancer.go +++ b/xds/internal/balancer/lrs/balancer.go @@ -58,7 +58,7 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal b.logger.Errorf("failed to create xds-client: %v", err) return nil } - b.client = newXDSClientWrapper(client) + b.xdsClient = newXDSClientWrapper(client) } return b @@ -76,8 +76,8 @@ type lrsBalancer struct { cc balancer.ClientConn buildOpts balancer.BuildOptions - logger *grpclog.PrefixLogger - client *xdsClientWrapper + logger *grpclog.PrefixLogger + xdsClient *xdsClientWrapper config *LBConfig lb balancer.Balancer // The sub balancer. @@ -90,18 +90,18 @@ func (b *lrsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } - if b.client == nil { + if b.xdsClient == nil { c := xdsclient.FromResolverState(s.ResolverState) if c == nil { return balancer.ErrBadResolverState } - b.client = newXDSClientWrapper(c) + b.xdsClient = newXDSClientWrapper(c) } // Update load reporting config or xds client. This needs to be done before // updating the child policy because we need the loadStore from the updated // client to be passed to the ccWrapper. - if err := b.client.update(newConfig); err != nil { + if err := b.xdsClient.update(newConfig); err != nil { return err } @@ -118,7 +118,7 @@ func (b *lrsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { if err != nil { return fmt.Errorf("failed to marshal LocalityID: %#v", newConfig.Locality) } - ccWrapper := newCCWrapper(b.cc, b.client.loadStore(), lidJSON) + ccWrapper := newCCWrapper(b.cc, b.xdsClient.loadStore(), lidJSON) b.lb = bb.Build(ccWrapper, b.buildOpts) } b.config = newConfig @@ -147,7 +147,7 @@ func (b *lrsBalancer) Close() { b.lb.Close() b.lb = nil } - b.client.close() + b.xdsClient.close() } type ccWrapper struct { From d30e2c91a0545bd393774c3775cd9f9c5f5a5673 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 7 Jun 2021 17:13:48 -0700 Subject: [PATCH 122/998] xds/resolver: test xds client closed by resolver Close (#4509) --- xds/internal/resolver/xds_resolver_test.go | 14 ++++++++++++++ xds/internal/testutils/fakeclient/client.go | 17 ++++++----------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index d3a66595a358..d588ff157cd6 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -281,6 +281,20 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { } } +// TestXDSResolverCloseClosesXDSClient tests that the XDS resolver's Close +// method closes the XDS client. +func (s) TestXDSResolverCloseClosesXDSClient(t *testing.T) { + xdsC := fakeclient.NewClient() + xdsR, _, cancel := testSetup(t, setupOpts{ + xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + }) + defer cancel() + xdsR.Close() + if !xdsC.Closed.HasFired() { + t.Fatalf("xds client not closed by xds resolver Close method") + } +} + // TestXDSResolverBadServiceUpdate tests the case the xdsClient returns a bad // service update. func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index ce0996cb8e98..37e84f998b99 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -22,6 +22,7 @@ package fakeclient import ( "context" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -41,7 +42,6 @@ type Client struct { cdsCancelCh *testutils.Channel edsCancelCh *testutils.Channel loadReportCh *testutils.Channel - closeCh *testutils.Channel loadStore *load.Store bootstrapCfg *bootstrap.Config @@ -49,6 +49,8 @@ type Client struct { rdsCb func(xdsclient.RouteConfigUpdate, error) cdsCbs map[string]func(xdsclient.ClusterUpdate, error) edsCb func(xdsclient.EndpointsUpdate, error) + + Closed *grpcsync.Event // fired when Close is called. } // WatchListener registers a LDS watch. @@ -228,16 +230,9 @@ func (xdsC *Client) WaitForReportLoad(ctx context.Context) (ReportLoadArgs, erro return val.(ReportLoadArgs), err } -// Close closes the xds client. +// Close fires xdsC.Closed, indicating it was called. func (xdsC *Client) Close() { - xdsC.closeCh.Send(nil) -} - -// WaitForClose waits for Close to be invoked on this client and returns -// context.DeadlineExceeded otherwise. -func (xdsC *Client) WaitForClose(ctx context.Context) error { - _, err := xdsC.closeCh.Receive(ctx) - return err + xdsC.Closed.Fire() } // BootstrapConfig returns the bootstrap config. @@ -275,8 +270,8 @@ func NewClientWithName(name string) *Client { cdsCancelCh: testutils.NewChannelWithSize(10), edsCancelCh: testutils.NewChannel(), loadReportCh: testutils.NewChannel(), - closeCh: testutils.NewChannel(), loadStore: load.NewStore(), cdsCbs: make(map[string]func(xdsclient.ClusterUpdate, error)), + Closed: grpcsync.NewEvent(), } } From 7301a311748ce82f30d8bd8076fad23ec4c7c1df Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 7 Jun 2021 21:57:17 -0700 Subject: [PATCH 123/998] c2p: add random number to xDS node ID in google-c2p resolver (#4519) --- internal/grpcrand/grpcrand.go | 22 +++++++++++++--------- xds/googledirectpath/googlec2p.go | 5 ++++- xds/googledirectpath/googlec2p_test.go | 2 +- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/internal/grpcrand/grpcrand.go b/internal/grpcrand/grpcrand.go index 200b115ca209..7bc3583b5fcc 100644 --- a/internal/grpcrand/grpcrand.go +++ b/internal/grpcrand/grpcrand.go @@ -31,26 +31,30 @@ var ( mu sync.Mutex ) +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + // Int63n implements rand.Int63n on the grpcrand global source. func Int63n(n int64) int64 { mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Int63n(n) } // Intn implements rand.Intn on the grpcrand global source. func Intn(n int) int { mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Intn(n) } // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() - res := r.Float64() - mu.Unlock() - return res + defer mu.Unlock() + return r.Float64() } diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index b514f03bfbfa..af487ec4a736 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/googlecloud" internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. @@ -152,13 +153,15 @@ var ipv6EnabledMetadata = &structpb.Struct{ }, } +var id = fmt.Sprintf("C2P-%d", grpcrand.Int()) + // newNode makes a copy of defaultNode, and populate it's Metadata and // Locality fields. func newNode(zone string, ipv6Capable bool) *v3corepb.Node { ret := &v3corepb.Node{ // Not all required fields are set in defaultNote. Metadata will be set // if ipv6 is enabled. Locality will be set to the value from metadata. - Id: "C2P", + Id: id, UserAgentName: gRPCUserAgentName, UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, ClientFeatures: []string{clientFeatureNoOverprovisioning}, diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 5b8085ef34c3..fb68fa23a1d0 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -194,7 +194,7 @@ func TestBuildXDS(t *testing.T) { } wantNode := &v3corepb.Node{ - Id: "C2P", + Id: id, Metadata: nil, Locality: &v3corepb.Locality{Zone: testZone}, UserAgentName: gRPCUserAgentName, From b1418a6e74bc6bed7dad82588b6d817b5417b20b Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 8 Jun 2021 16:05:50 -0700 Subject: [PATCH 124/998] xds: export XDSClient interface and use it in balancer tests (#4510) - xdsclient.New returns the interface now - xdsclient.SetClient and xdsclient.FromResolverState take and return the interface now - cleanup xds balancer tests to pass xds_client in resolver state --- xds/csds/csds.go | 20 +------- xds/csds/csds_test.go | 13 ++--- xds/googledirectpath/googlec2p.go | 8 +--- xds/googledirectpath/googlec2p_test.go | 4 +- .../balancer/cdsbalancer/cdsbalancer.go | 35 +------------- .../cdsbalancer/cdsbalancer_security_test.go | 8 +--- .../balancer/cdsbalancer/cdsbalancer_test.go | 31 ++++++------ .../balancer/cdsbalancer/cluster_handler.go | 4 +- .../balancer/clusterimpl/balancer_test.go | 48 +++++-------------- .../balancer/clusterimpl/clusterimpl.go | 25 +--------- xds/internal/balancer/edsbalancer/eds.go | 25 +--------- xds/internal/balancer/edsbalancer/eds_test.go | 19 ++++++-- .../balancer/edsbalancer/xds_lrs_test.go | 7 +-- xds/internal/balancer/lrs/balancer.go | 27 +---------- xds/internal/balancer/lrs/balancer_test.go | 9 ++-- xds/internal/resolver/watch_service.go | 4 +- xds/internal/resolver/xds_resolver.go | 30 +++--------- xds/internal/resolver/xds_resolver_test.go | 36 +++++++------- xds/internal/testutils/fakeclient/client.go | 5 ++ xds/internal/xdsclient/attributes.go | 31 ++++++++++-- xds/internal/xdsclient/client.go | 2 +- xds/internal/xdsclient/client_test.go | 6 +-- xds/internal/xdsclient/singleton.go | 37 ++++++++------ xds/internal/xdsclient/tests/dump_test.go | 20 ++++---- xds/server.go | 15 ++---- xds/server_test.go | 8 ++-- 26 files changed, 173 insertions(+), 304 deletions(-) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index d32bebac81bc..1b54a3a4c6e3 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -38,33 +38,17 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/types/known/timestamppb" _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register v2 xds_client. _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register v3 xds_client. ) -// xdsClient contains methods from xdsClient.Client which are used by -// the server. This is useful for overriding in unit tests. -type xdsClient interface { - DumpLDS() (string, map[string]xdsclient.UpdateWithMD) - DumpRDS() (string, map[string]xdsclient.UpdateWithMD) - DumpCDS() (string, map[string]xdsclient.UpdateWithMD) - DumpEDS() (string, map[string]xdsclient.UpdateWithMD) - BootstrapConfig() *bootstrap.Config - Close() -} - var ( logger = grpclog.Component("xds") - newXDSClient = func() xdsClient { + newXDSClient = func() xdsclient.XDSClient { c, err := xdsclient.New() if err != nil { - // If err is not nil, c is a typed nil (of type *xdsclient.Client). - // If c is returned and assigned to the xdsClient field in the CSDS - // server, the nil checks in the handlers will not handle it - // properly. logger.Warningf("failed to create xds client: %v", err) return nil } @@ -76,7 +60,7 @@ var ( type ClientStatusDiscoveryServer struct { // xdsClient will always be the same in practice. But we keep a copy in each // server instance for testing. - xdsClient xdsClient + xdsClient xdsclient.XDSClient } // NewClientStatusDiscoveryServer returns an implementation of the CSDS server that can be diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 7f0e90bebc1e..98dc93e86713 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -59,13 +59,6 @@ const ( defaultTestTimeout = 10 * time.Second ) -type xdsClientWithWatch interface { - WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() - WatchRouteConfig(string, func(xdsclient.RouteConfigUpdate, error)) func() - WatchCluster(string, func(xdsclient.ClusterUpdate, error)) func() - WatchEndpoints(string, func(xdsclient.EndpointsUpdate, error)) func() -} - var cmpOpts = cmp.Options{ cmpopts.EquateEmpty(), cmp.Comparer(func(a, b *timestamppb.Timestamp) bool { return true }), @@ -250,7 +243,7 @@ func TestCSDS(t *testing.T) { } } -func commonSetup(t *testing.T) (xdsClientWithWatch, *e2e.ManagementServer, string, v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { +func commonSetup(t *testing.T) (xdsclient.XDSClient, *e2e.ManagementServer, string, v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { t.Helper() // Spin up a xDS management server on a local port. @@ -275,7 +268,7 @@ func commonSetup(t *testing.T) (xdsClientWithWatch, *e2e.ManagementServer, strin t.Fatalf("failed to create xds client: %v", err) } oldNewXDSClient := newXDSClient - newXDSClient = func() xdsClient { return xdsC } + newXDSClient = func() xdsclient.XDSClient { return xdsC } // Initialize an gRPC server and register CSDS on it. server := grpc.NewServer() @@ -635,7 +628,7 @@ func protoToJSON(p proto.Message) string { func TestCSDSNoXDSClient(t *testing.T) { oldNewXDSClient := newXDSClient - newXDSClient = func() xdsClient { return nil } + newXDSClient = func() xdsclient.XDSClient { return nil } defer func() { newXDSClient = oldNewXDSClient }() // Initialize an gRPC server and register CSDS on it. diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index af487ec4a736..0c2f984fbcb1 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -62,15 +62,11 @@ const ( dnsName, xdsName = "dns", "xds" ) -type xdsClient interface { - Close() -} - // For overriding in unittests. var ( onGCE = googlecloud.OnGCE - newClientWithConfig = func(config *bootstrap.Config) (xdsClient, error) { + newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, error) { return xdsclient.NewWithConfig(config) } @@ -139,7 +135,7 @@ func (c2pResolverBuilder) Scheme() string { type c2pResolver struct { resolver.Resolver - client xdsClient + client xdsclient.XDSClient } func (r *c2pResolver) Close() { diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index fb68fa23a1d0..8f98d3159d3a 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/structpb" @@ -130,6 +131,7 @@ func TestBuildNotOnGCE(t *testing.T) { } type testXDSClient struct { + xdsclient.XDSClient closed chan struct{} } @@ -177,7 +179,7 @@ func TestBuildXDS(t *testing.T) { configCh := make(chan *bootstrap.Config, 1) oldNewClient := newClientWithConfig - newClientWithConfig = func(config *bootstrap.Config) (xdsClient, error) { + newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, error) { configCh <- config return tXDSClient, nil } diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 7278c624361d..a710e4983161 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -36,7 +36,6 @@ import ( "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/edsbalancer" "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const ( @@ -59,7 +58,6 @@ var ( // not deal with subConns. return builder.Build(cc, opts), nil } - newXDSClient func() (xdsClient, error) buildProvider = buildProviderFunc ) @@ -84,17 +82,6 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal } b.logger = prefixLogger((b)) b.logger.Infof("Created") - - if newXDSClient != nil { - // For tests - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil - } - b.xdsClient = client - } - var creds credentials.TransportCredentials switch { case opts.DialCreds != nil: @@ -137,14 +124,6 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err return &cfg, nil } -// xdsClient contains methods from xdsClient.Client which are used by -// the cdsBalancer. This will be faked out in unittests. -type xdsClient interface { - WatchCluster(string, func(xdsclient.ClusterUpdate, error)) func() - BootstrapConfig() *bootstrap.Config - Close() -} - // ccUpdate wraps a clientConn update received from gRPC (pushed from the // xdsResolver). A valid clusterName causes the cdsBalancer to register a CDS // watcher with the xdsClient, while a non-nil error causes it to cancel the @@ -184,7 +163,7 @@ type cdsBalancer struct { ccw *ccWrapper // ClientConn interface passed to child LB. bOpts balancer.BuildOptions // BuildOptions passed to child LB. updateCh *buffer.Unbounded // Channel for gRPC and xdsClient updates. - xdsClient xdsClient // xDS client to watch Cluster resource. + xdsClient xdsclient.XDSClient // xDS client to watch Cluster resource. cancelWatch func() // Cluster watch cancel func. edsLB balancer.Balancer // EDS child policy. clusterToWatch string @@ -361,15 +340,8 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { lbCfg.LrsLoadReportingServerName = new(string) } - resolverState := resolver.State{} - // Include the xds client for the child LB policies to use. For unit - // tests, b.xdsClient may not be a full *xdsclient.Client, but it will - // always be in production. - if c, ok := b.xdsClient.(*xdsclient.Client); ok { - resolverState = xdsclient.SetClient(resolverState, c) - } ccState := balancer.ClientConnState{ - ResolverState: resolverState, + ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), BalancerConfig: lbCfg, } if err := b.edsLB.UpdateClientConnState(ccState); err != nil { @@ -407,9 +379,6 @@ func (b *cdsBalancer) run() { b.edsLB.Close() b.edsLB = nil } - if newXDSClient != nil { - b.xdsClient.Close() - } if b.cachedRoot != nil { b.cachedRoot.Close() } diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 9964b9de925c..067bc2b05369 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -133,11 +133,7 @@ func (p *fakeProvider) Close() { // xDSCredentials. func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *xdstestutils.TestClientConn, func()) { t.Helper() - xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } - builder := balancer.Get(cdsName) if builder == nil { t.Fatalf("balancer.Get(%q) returned nil", cdsName) @@ -164,7 +160,7 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS } // Push a ClientConnState update to the CDS balancer with a cluster name. - if err := cdsB.UpdateClientConnState(cdsCCS(clusterName)); err != nil { + if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != nil { t.Fatalf("cdsBalancer.UpdateClientConnState failed with error: %v", err) } @@ -181,8 +177,8 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS } return xdsC, cdsB.(*cdsBalancer), edsB, tcc, func() { - newXDSClient = oldNewXDSClient newEDSBalancer = oldEDSBalancerBuilder + xdsC.Close() } } diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 5c5161807be3..f36117620e68 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -28,7 +28,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" @@ -129,7 +128,10 @@ func (tb *testEDSBalancer) waitForClientConnUpdate(ctx context.Context, wantCCS return err } gotCCS := ccs.(balancer.ClientConnState) - if !cmp.Equal(gotCCS, wantCCS, cmpopts.IgnoreUnexported(attributes.Attributes{})) { + if xdsclient.FromResolverState(gotCCS.ResolverState) == nil { + return fmt.Errorf("want resolver state with XDSClient attached, got one without") + } + if !cmp.Equal(gotCCS, wantCCS, cmpopts.IgnoreFields(resolver.State{}, "Attributes")) { return fmt.Errorf("received ClientConnState: %+v, want %+v", gotCCS, wantCCS) } return nil @@ -173,7 +175,7 @@ func (tb *testEDSBalancer) waitForClose(ctx context.Context) error { // cdsCCS is a helper function to construct a good update passed from the // xdsResolver to the cdsBalancer. -func cdsCCS(cluster string) balancer.ClientConnState { +func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { const cdsLBConfig = `{ "loadBalancingConfig":[ { @@ -185,9 +187,9 @@ func cdsCCS(cluster string) balancer.ClientConnState { }` jsonSC := fmt.Sprintf(cdsLBConfig, cluster) return balancer.ClientConnState{ - ResolverState: resolver.State{ + ResolverState: xdsclient.SetClient(resolver.State{ ServiceConfig: internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(jsonSC), - }, + }, xdsC), BalancerConfig: &lbConfig{ClusterName: clusterName}, } } @@ -211,11 +213,7 @@ func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientCon // newEDSBalancer function to return it), and also returns a cleanup function. func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *xdstestutils.TestClientConn, func()) { t.Helper() - xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } - builder := balancer.Get(cdsName) if builder == nil { t.Fatalf("balancer.Get(%q) returned nil", cdsName) @@ -232,7 +230,7 @@ func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *x return xdsC, cdsB.(*cdsBalancer), edsB, tcc, func() { newEDSBalancer = oldEDSBalancerBuilder - newXDSClient = oldNewXDSClient + xdsC.Close() } } @@ -242,7 +240,7 @@ func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBal t.Helper() xdsC, cdsB, edsB, tcc, cancel := setup(t) - if err := cdsB.UpdateClientConnState(cdsCCS(clusterName)); err != nil { + if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != nil { t.Fatalf("cdsBalancer.UpdateClientConnState failed with error: %v", err) } @@ -262,6 +260,9 @@ func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBal // cdsBalancer with different inputs and verifies that the CDS watch API on the // provided xdsClient is invoked appropriately. func (s) TestUpdateClientConnState(t *testing.T) { + xdsC := fakeclient.NewClient() + defer xdsC.Close() + tests := []struct { name string ccs balancer.ClientConnState @@ -280,14 +281,14 @@ func (s) TestUpdateClientConnState(t *testing.T) { }, { name: "happy-good-case", - ccs: cdsCCS(clusterName), + ccs: cdsCCS(clusterName, xdsC), wantCluster: clusterName, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - xdsC, cdsB, _, _, cancel := setup(t) + _, cdsB, _, _, cancel := setup(t) defer func() { cancel() cdsB.Close() @@ -324,7 +325,7 @@ func (s) TestUpdateClientConnStateWithSameState(t *testing.T) { }() // This is the same clientConn update sent in setupWithWatch(). - if err := cdsB.UpdateClientConnState(cdsCCS(clusterName)); err != nil { + if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != nil { t.Fatalf("cdsBalancer.UpdateClientConnState failed with error: %v", err) } // The above update should not result in a new watch being registered. @@ -660,7 +661,7 @@ func (s) TestClose(t *testing.T) { // Make sure that the UpdateClientConnState() method on the CDS balancer // returns error. - if err := cdsB.UpdateClientConnState(cdsCCS(clusterName)); err != errBalancerClosed { + if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != errBalancerClosed { t.Fatalf("UpdateClientConnState() after close returned %v, want %v", err, errBalancerClosed) } diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index c38d1a6c31a6..09d945cd0c37 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -40,7 +40,7 @@ type clusterHandler struct { // CDS Balancer cares about is the most recent update. updateChannel chan clusterHandlerUpdate - xdsClient xdsClient + xdsClient xdsclient.XDSClient } func (ch *clusterHandler) updateRootCluster(rootClusterName string) { @@ -112,7 +112,7 @@ type clusterNode struct { // CreateClusterNode creates a cluster node from a given clusterName. This will // also start the watch for that cluster. -func createClusterNode(clusterName string, xdsClient xdsClient, topLevelHandler *clusterHandler) *clusterNode { +func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler) *clusterNode { c := &clusterNode{ clusterHandler: topLevelHandler, } diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 404dfb22d005..ab3613bec31d 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -74,9 +74,7 @@ func init() { func TestDropByCategory(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() + defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -89,9 +87,7 @@ func TestDropByCategory(t *testing.T) { dropDenominator = 2 ) if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -176,9 +172,7 @@ func TestDropByCategory(t *testing.T) { dropDenominator2 = 4 ) if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -232,9 +226,7 @@ func TestDropByCategory(t *testing.T) { func TestDropCircuitBreaking(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() + defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -243,9 +235,7 @@ func TestDropCircuitBreaking(t *testing.T) { var maxRequest uint32 = 50 if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -344,9 +334,7 @@ func TestDropCircuitBreaking(t *testing.T) { func TestPickerUpdateAfterClose(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() + defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -354,9 +342,7 @@ func TestPickerUpdateAfterClose(t *testing.T) { var maxRequest uint32 = 50 if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -389,9 +375,7 @@ func TestPickerUpdateAfterClose(t *testing.T) { func TestClusterNameInAddressAttributes(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() + defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -399,9 +383,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { defer b.Close() if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -450,9 +432,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { const testClusterName2 = "test-cluster-2" var addr2 = resolver.Address{Addr: "2.2.2.2"} if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: []resolver.Address{addr2}, - }, + ResolverState: xdsclient.SetClient(resolver.State{Addresses: []resolver.Address{addr2}}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName2, EDSServiceName: testServiceName, @@ -480,9 +460,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { func TestReResolution(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName) xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() + defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -490,9 +468,7 @@ func TestReResolution(t *testing.T) { defer b.Close() if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 9f3acafbc92b..f5fa7c12589b 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -52,8 +52,6 @@ func init() { balancer.Register(bb{}) } -var newXDSClient func() (xdsClient, error) - type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { @@ -67,18 +65,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba requestCountMax: defaultRequestCountMax, } b.logger = prefixLogger(b) - - if newXDSClient != nil { - // For tests - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil - } - b.xdsClient = client - } go b.run() - b.logger.Infof("Created") return b } @@ -91,13 +78,6 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err return parseConfig(c) } -// xdsClient contains only the xds_client methods needed by LRS -// balancer. It's defined so we can override xdsclient in tests. -type xdsClient interface { - ReportLoad(server string) (*load.Store, func()) - Close() -} - type clusterImplBalancer struct { balancer.ClientConn @@ -115,7 +95,7 @@ type clusterImplBalancer struct { bOpts balancer.BuildOptions logger *grpclog.PrefixLogger - xdsClient xdsClient + xdsClient xdsclient.XDSClient config *LBConfig childLB balancer.Balancer @@ -328,9 +308,6 @@ func (b *clusterImplBalancer) Close() { b.childLB.Close() b.childLB = nil } - if newXDSClient != nil { - b.xdsClient.Close() - } <-b.done.Done() b.logger.Infof("Shutdown") } diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index ffc46cea469b..ea11b2f8a257 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -41,19 +41,10 @@ import ( const edsName = "eds_experimental" -// xdsClient contains only the xds_client methods needed by EDS -// balancer. It's defined so we can override xdsclient.New function in tests. -type xdsClient interface { - WatchEndpoints(clusterName string, edsCb func(xdsclient.EndpointsUpdate, error)) (cancel func()) - ReportLoad(server string) (loadStore *load.Store, cancel func()) - Close() -} - var ( newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions, enqueueState func(priorityType, balancer.State), lw load.PerClusterReporter, logger *grpclog.PrefixLogger) edsBalancerImplInterface { return newEDSBalancerImpl(cc, opts, enqueueState, lw, logger) } - newXDSClient func() (xdsClient, error) ) func init() { @@ -74,17 +65,6 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal config: &EDSConfig{}, } x.logger = prefixLogger(x) - - if newXDSClient != nil { - // For tests - client, err := newXDSClient() - if err != nil { - x.logger.Errorf("xds: failed to create xds-client: %v", err) - return nil - } - x.xdsClient = client - } - x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.loadWrapper, x.logger) x.logger.Infof("Created") go x.run() @@ -144,7 +124,7 @@ type edsBalancer struct { xdsClientUpdate chan *edsUpdate childPolicyUpdate *buffer.Unbounded - xdsClient xdsClient + xdsClient xdsclient.XDSClient loadWrapper *loadstore.Wrapper config *EDSConfig // may change when passed a different service config edsImpl edsBalancerImplInterface @@ -174,9 +154,6 @@ func (b *edsBalancer) run() { b.edsImpl.updateState(u.priority, u.s) case <-b.closed.Done(): b.cancelWatch() - if newXDSClient != nil { - b.xdsClient.Close() - } b.edsImpl.close() b.logger.Infof("Shutdown") b.done.Fire() diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index 7e16076751ab..c20e8206b9ec 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -255,8 +255,6 @@ func waitForNewEDSLB(ctx context.Context, ch *testutils.Channel) (*fakeEDSBalanc // cleanup. func setup(edsLBCh *testutils.Channel) (*fakeclient.Client, func()) { xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } origNewEDSBalancer := newEDSBalancer newEDSBalancer = func(cc balancer.ClientConn, _ balancer.BuildOptions, _ func(priorityType, balancer.State), _ load.PerClusterReporter, _ *grpclog.PrefixLogger) edsBalancerImplInterface { @@ -266,7 +264,7 @@ func setup(edsLBCh *testutils.Channel) (*fakeclient.Client, func()) { } return xdsC, func() { newEDSBalancer = origNewEDSBalancer - newXDSClient = oldNewXDSClient + xdsC.Close() } } @@ -348,6 +346,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { Config: json.RawMessage("{}"), } if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{ ChildPolicy: lbCfgA, ClusterName: testEDSClusterName, @@ -377,6 +376,7 @@ func (s) TestConfigChildPolicyUpdate(t *testing.T) { Config: json.RawMessage("{}"), } if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{ ChildPolicy: lbCfgB, ClusterName: testEDSClusterName, @@ -421,6 +421,7 @@ func (s) TestSubConnStateChange(t *testing.T) { } if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, }); err != nil { t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) @@ -467,6 +468,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { } if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, }); err != nil { t.Fatal(err) @@ -511,6 +513,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { // An update with the same service name should not trigger a new watch. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, }); err != nil { t.Fatal(err) @@ -549,6 +552,7 @@ func (s) TestErrorFromResolver(t *testing.T) { } if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, }); err != nil { t.Fatal(err) @@ -589,6 +593,7 @@ func (s) TestErrorFromResolver(t *testing.T) { // An update with the same service name should trigger a new watch, because // the previous watch was canceled. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, }); err != nil { t.Fatal(err) @@ -640,6 +645,7 @@ func (s) TestClientWatchEDS(t *testing.T) { defer cancel() // If eds service name is not set, should watch for cluster name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{ClusterName: "cluster-1"}, }); err != nil { t.Fatal(err) @@ -651,6 +657,7 @@ func (s) TestClientWatchEDS(t *testing.T) { // Update with an non-empty edsServiceName should trigger an EDS watch for // the same. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: "foobar-1"}, }); err != nil { t.Fatal(err) @@ -664,6 +671,7 @@ func (s) TestClientWatchEDS(t *testing.T) { // registered watch will be cancelled, which will result in an EDS request // with no resource names being sent to the server. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: "foobar-2"}, }); err != nil { t.Fatal(err) @@ -677,7 +685,7 @@ func (s) TestClientWatchEDS(t *testing.T) { // service name from an update's config. func (s) TestCounterUpdate(t *testing.T) { edsLBCh := testutils.NewChannel() - _, cleanup := setup(edsLBCh) + xdsC, cleanup := setup(edsLBCh) defer cleanup() builder := balancer.Get(edsName) @@ -690,6 +698,7 @@ func (s) TestCounterUpdate(t *testing.T) { var testCountMax uint32 = 100 // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{ ClusterName: "foobar-1", MaxConcurrentRequests: &testCountMax, @@ -724,6 +733,7 @@ func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{ ClusterName: "foobar-1", }, @@ -743,6 +753,7 @@ func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { // Update should trigger counter update with provided service name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{ ClusterName: "foobar-2", }, diff --git a/xds/internal/balancer/edsbalancer/xds_lrs_test.go b/xds/internal/balancer/edsbalancer/xds_lrs_test.go index d5b40dd98d32..3dcbf5e259c7 100644 --- a/xds/internal/balancer/edsbalancer/xds_lrs_test.go +++ b/xds/internal/balancer/edsbalancer/xds_lrs_test.go @@ -25,7 +25,9 @@ import ( "testing" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" ) // TestXDSLoadReporting verifies that the edsBalancer starts the loadReport @@ -33,9 +35,7 @@ import ( // server (empty string). func (s) TestXDSLoadReporting(t *testing.T) { xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() + defer xdsC.Close() builder := balancer.Get(edsName) edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) @@ -45,6 +45,7 @@ func (s) TestXDSLoadReporting(t *testing.T) { defer edsB.Close() if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{ EDSServiceName: testEDSClusterName, LrsLoadReportingServerName: new(string), diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go index 75a8cbb0dd7b..ed7fb38c8545 100644 --- a/xds/internal/balancer/lrs/balancer.go +++ b/xds/internal/balancer/lrs/balancer.go @@ -36,8 +36,6 @@ func init() { balancer.Register(bb{}) } -var newXDSClient func() (xdsClient, error) - // Name is the name of the LRS balancer. const Name = "lrs_experimental" @@ -50,17 +48,6 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal } b.logger = prefixLogger(b) b.logger.Infof("Created") - - if newXDSClient != nil { - // For tests - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil - } - b.xdsClient = newXDSClientWrapper(client) - } - return b } @@ -169,15 +156,8 @@ func (ccw *ccWrapper) UpdateState(s balancer.State) { ccw.ClientConn.UpdateState(s) } -// xdsClient contains only the xds_client methods needed by LRS -// balancer. It's defined so we can override xdsclient in tests. -type xdsClient interface { - ReportLoad(server string) (*load.Store, func()) - Close() -} - type xdsClientWrapper struct { - c xdsClient + c xdsclient.XDSClient cancelLoadReport func() clusterName string edsServiceName string @@ -187,7 +167,7 @@ type xdsClientWrapper struct { loadWrapper *loadstore.Wrapper } -func newXDSClientWrapper(c xdsClient) *xdsClientWrapper { +func newXDSClientWrapper(c xdsclient.XDSClient) *xdsClientWrapper { return &xdsClientWrapper{ c: c, loadWrapper: loadstore.NewWrapper(), @@ -256,7 +236,4 @@ func (w *xdsClientWrapper) close() { w.cancelLoadReport() w.cancelLoadReport = nil } - if newXDSClient != nil { - w.c.Close() - } } diff --git a/xds/internal/balancer/lrs/balancer_test.go b/xds/internal/balancer/lrs/balancer_test.go index 9ffa2894dad8..c0ec9cc41dd3 100644 --- a/xds/internal/balancer/lrs/balancer_test.go +++ b/xds/internal/balancer/lrs/balancer_test.go @@ -35,6 +35,7 @@ import ( xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" ) const defaultTestTimeout = 1 * time.Second @@ -55,9 +56,7 @@ var ( // server (empty string). func TestLoadReporting(t *testing.T) { xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() + defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -65,9 +64,7 @@ func TestLoadReporting(t *testing.T) { defer lrsB.Close() if err := lrsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ ClusterName: testClusterName, EDSServiceName: testServiceName, diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 591cc3833937..bea5bbcda140 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -54,7 +54,7 @@ type ldsConfig struct { // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func watchService(c xdsClient, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { +func watchService(c xdsclient.XDSClient, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { w := &serviceUpdateWatcher{ logger: logger, c: c, @@ -70,7 +70,7 @@ func watchService(c xdsClient, serviceName string, cb func(serviceUpdate, error) // callback at the right time. type serviceUpdateWatcher struct { logger *grpclog.PrefixLogger - c xdsClient + c xdsclient.XDSClient serviceName string ldsCancel func() serviceCb func(serviceUpdate, error) diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index a6a013698ac4..19ee01773e8c 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -27,10 +27,8 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -41,21 +39,21 @@ const xdsScheme = "xds" // the same time. func NewBuilder(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ - newXDSClient: func() (xdsClient, error) { + newXDSClient: func() (xdsclient.XDSClient, error) { return xdsclient.NewClientWithBootstrapContents(config) }, }, nil } // For overriding in unittests. -var newXDSClient = func() (xdsClient, error) { return xdsclient.New() } +var newXDSClient = func() (xdsclient.XDSClient, error) { return xdsclient.New() } func init() { resolver.Register(&xdsResolverBuilder{}) } type xdsResolverBuilder struct { - newXDSClient func() (xdsClient, error) + newXDSClient func() (xdsclient.XDSClient, error) } // Build helps implement the resolver.Builder interface. @@ -119,15 +117,6 @@ func (*xdsResolverBuilder) Scheme() string { return xdsScheme } -// xdsClient contains methods from xdsClient.Client which are used by -// the resolver. This will be faked out in unittests. -type xdsClient interface { - WatchListener(serviceName string, cb func(xdsclient.ListenerUpdate, error)) func() - WatchRouteConfig(routeName string, cb func(xdsclient.RouteConfigUpdate, error)) func() - BootstrapConfig() *bootstrap.Config - Close() -} - // suWithError wraps the ServiceUpdate and error received through a watch API // callback, so that it can pushed onto the update channel as a single entity. type suWithError struct { @@ -149,7 +138,7 @@ type xdsResolver struct { logger *grpclog.PrefixLogger // The underlying xdsClient which performs all xDS requests and responses. - client xdsClient + client xdsclient.XDSClient // A channel for the watch API callback to write service updates on to. The // updates are read by the run goroutine and passed on to the ClientConn. updateCh chan suWithError @@ -196,14 +185,7 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { state := iresolver.SetConfigSelector(resolver.State{ ServiceConfig: r.cc.ParseServiceConfig(string(sc)), }, cs) - - // Include the xds client for the LB policies to use. For unit tests, - // r.client may not be a full *xdsclient.Client, but it will always be in - // production. - if c, ok := r.client.(*xdsclient.Client); ok { - state = xdsclient.SetClient(state, c) - } - r.cc.UpdateState(state) + r.cc.UpdateState(xdsclient.SetClient(state, r.client)) return true } diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index d588ff157cd6..a41920998272 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -114,19 +114,19 @@ func newTestClientConn() *testClientConn { func (s) TestResolverBuilder(t *testing.T) { tests := []struct { name string - xdsClientFunc func() (xdsClient, error) + xdsClientFunc func() (xdsclient.XDSClient, error) wantErr bool }{ { name: "simple-good", - xdsClientFunc: func() (xdsClient, error) { + xdsClientFunc: func() (xdsclient.XDSClient, error) { return fakeclient.NewClient(), nil }, wantErr: false, }, { name: "newXDSClient-throws-error", - xdsClientFunc: func() (xdsClient, error) { + xdsClientFunc: func() (xdsclient.XDSClient, error) { return nil, errors.New("newXDSClient-throws-error") }, wantErr: true, @@ -167,7 +167,7 @@ func (s) TestResolverBuilder_xdsCredsBootstrapMismatch(t *testing.T) { // Fake out the xdsClient creation process by providing a fake, which does // not have any certificate provider configuration. oldClientMaker := newXDSClient - newXDSClient = func() (xdsClient, error) { + newXDSClient = func() (xdsclient.XDSClient, error) { fc := fakeclient.NewClient() fc.SetBootstrapConfig(&bootstrap.Config{}) return fc, nil @@ -194,7 +194,7 @@ func (s) TestResolverBuilder_xdsCredsBootstrapMismatch(t *testing.T) { } type setupOpts struct { - xdsClientFunc func() (xdsClient, error) + xdsClientFunc func() (xdsclient.XDSClient, error) } func testSetup(t *testing.T, opts setupOpts) (*xdsResolver, *testClientConn, func()) { @@ -254,7 +254,7 @@ func waitForWatchRouteConfig(ctx context.Context, t *testing.T, xdsC *fakeclient func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer cancel() @@ -286,7 +286,7 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { func (s) TestXDSResolverCloseClosesXDSClient(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, _, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer cancel() xdsR.Close() @@ -300,7 +300,7 @@ func (s) TestXDSResolverCloseClosesXDSClient(t *testing.T) { func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -326,7 +326,7 @@ func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -460,7 +460,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer cancel() defer xdsR.Close() @@ -520,7 +520,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { func (s) TestXDSResolverRemovedResource(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer cancel() defer xdsR.Close() @@ -628,7 +628,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { func (s) TestXDSResolverWRR(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -689,7 +689,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { defer func(old bool) { env.TimeoutSupport = old }(env.TimeoutSupport) xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -792,7 +792,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -941,7 +941,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -995,7 +995,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -1041,7 +1041,7 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() @@ -1216,7 +1216,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { t.Run(tc.name, func(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClient, error) { return xdsC, nil }, + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, }) defer xdsR.Close() defer cancel() diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index 37e84f998b99..2538b59255cf 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -32,6 +32,11 @@ import ( // Client is a fake implementation of an xds client. It exposes a bunch of // channels to signal the occurrence of various events. type Client struct { + // Embed XDSClient so this fake client implements the interface, but it's + // never set (it's always nil). This may cause nil panic since not all the + // methods are implemented. + xdsclient.XDSClient + name string ldsWatchCh *testutils.Channel rdsWatchCh *testutils.Channel diff --git a/xds/internal/xdsclient/attributes.go b/xds/internal/xdsclient/attributes.go index 99060177e1e3..d2357df0727c 100644 --- a/xds/internal/xdsclient/attributes.go +++ b/xds/internal/xdsclient/attributes.go @@ -17,20 +17,43 @@ package xdsclient -import "google.golang.org/grpc/resolver" +import ( + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) type clientKeyType string const clientKey = clientKeyType("grpc.xds.internal.client.Client") +// XDSClient is a full fledged gRPC client which queries a set of discovery APIs +// (collectively termed as xDS) on a remote management server, to discover +// various dynamic resources. +type XDSClient interface { + WatchListener(string, func(ListenerUpdate, error)) func() + WatchRouteConfig(string, func(RouteConfigUpdate, error)) func() + WatchCluster(string, func(ClusterUpdate, error)) func() + WatchEndpoints(clusterName string, edsCb func(EndpointsUpdate, error)) (cancel func()) + ReportLoad(server string) (*load.Store, func()) + + DumpLDS() (string, map[string]UpdateWithMD) + DumpRDS() (string, map[string]UpdateWithMD) + DumpCDS() (string, map[string]UpdateWithMD) + DumpEDS() (string, map[string]UpdateWithMD) + + BootstrapConfig() *bootstrap.Config + Close() +} + // FromResolverState returns the Client from state, or nil if not present. -func FromResolverState(state resolver.State) *Client { - cs, _ := state.Attributes.Value(clientKey).(*Client) +func FromResolverState(state resolver.State) XDSClient { + cs, _ := state.Attributes.Value(clientKey).(XDSClient) return cs } // SetClient sets c in state and returns the new state. -func SetClient(state resolver.State, c *Client) resolver.State { +func SetClient(state resolver.State, c XDSClient) resolver.State { state.Attributes = state.Attributes.WithValues(clientKey, c) return state } diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index ac832f205d59..13ef973807ce 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -579,7 +579,7 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) ( // BootstrapConfig returns the configuration read from the bootstrap file. // Callers must treat the return value as read-only. -func (c *Client) BootstrapConfig() *bootstrap.Config { +func (c *clientRefCounted) BootstrapConfig() *bootstrap.Config { return c.config } diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index c1d4b38e576a..12590408e6ca 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -263,7 +263,7 @@ func (s) TestClientNewSingleton(t *testing.T) { defer cleanup() // The first New(). Should create a Client and a new APIClient. - client, err := New() + client, err := newRefCounted() if err != nil { t.Fatalf("failed to create client: %v", err) } @@ -280,7 +280,7 @@ func (s) TestClientNewSingleton(t *testing.T) { // and should not create new API client. const count = 9 for i := 0; i < count; i++ { - tc, terr := New() + tc, terr := newRefCounted() if terr != nil { client.Close() t.Fatalf("%d-th call to New() failed with error: %v", i, terr) @@ -324,7 +324,7 @@ func (s) TestClientNewSingleton(t *testing.T) { // Call New() again after the previous Client is actually closed. Should // create a Client and a new APIClient. - client2, err2 := New() + client2, err2 := newRefCounted() if err2 != nil { t.Fatalf("failed to create client: %v", err) } diff --git a/xds/internal/xdsclient/singleton.go b/xds/internal/xdsclient/singleton.go index 8d0e10f2c31a..f045790e2a40 100644 --- a/xds/internal/xdsclient/singleton.go +++ b/xds/internal/xdsclient/singleton.go @@ -32,18 +32,14 @@ const defaultWatchExpiryTimeout = 15 * time.Second // This is the Client returned by New(). It contains one client implementation, // and maintains the refcount. -var singletonClient = &Client{} +var singletonClient = &clientRefCounted{} // To override in tests. var bootstrapNewConfig = bootstrap.NewConfig -// Client is a full fledged gRPC client which queries a set of discovery APIs -// (collectively termed as xDS) on a remote management server, to discover -// various dynamic resources. -// -// The xds client is a singleton. It will be shared by the xds resolver and +// clientRefCounted is ref-counted, and to be shared by the xds resolver and // balancer implementations, across multiple ClientConns and Servers. -type Client struct { +type clientRefCounted struct { *clientImpl // This mu protects all the fields, including the embedded clientImpl above. @@ -60,7 +56,18 @@ type Client struct { // Note that the first invocation of New() or NewWithConfig() sets the client // singleton. The following calls will return the singleton xds client without // checking or using the config. -func New() (*Client, error) { +func New() (XDSClient, error) { + // This cannot just return newRefCounted(), because in error cases, the + // returned nil is a typed nil (*clientRefCounted), which may cause nil + // checks fail. + c, err := newRefCounted() + if err != nil { + return nil, err + } + return c, nil +} + +func newRefCounted() (*clientRefCounted, error) { singletonClient.mu.Lock() defer singletonClient.mu.Unlock() // If the client implementation was created, increment ref count and return @@ -96,7 +103,7 @@ func New() (*Client, error) { // // This function is internal only, for c2p resolver and testing to use. DO NOT // use this elsewhere. Use New() instead. -func NewWithConfig(config *bootstrap.Config) (*Client, error) { +func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { singletonClient.mu.Lock() defer singletonClient.mu.Unlock() // If the client implementation was created, increment ref count and return @@ -120,7 +127,7 @@ func NewWithConfig(config *bootstrap.Config) (*Client, error) { // Close closes the client. It does ref count of the xds client implementation, // and closes the gRPC connection to the management server when ref count // reaches 0. -func (c *Client) Close() { +func (c *clientRefCounted) Close() { c.mu.Lock() defer c.mu.Unlock() c.refCount-- @@ -136,18 +143,18 @@ func (c *Client) Close() { // // Note that this function doesn't set the singleton, so that the testing states // don't leak. -func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (*Client, error) { +func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) { cl, err := newWithConfig(config, watchExpiryTimeout) if err != nil { return nil, err } - return &Client{clientImpl: cl, refCount: 1}, nil + return &clientRefCounted{clientImpl: cl, refCount: 1}, nil } // NewClientWithBootstrapContents returns an xds client for this config, // separate from the global singleton. This should be used for testing // purposes only. -func NewClientWithBootstrapContents(contents []byte) (*Client, error) { +func NewClientWithBootstrapContents(contents []byte) (XDSClient, error) { // Normalize the contents buf := bytes.Buffer{} err := json.Indent(&buf, contents, "", "") @@ -180,12 +187,12 @@ func NewClientWithBootstrapContents(contents []byte) (*Client, error) { return nil, err } - c := &Client{clientImpl: cImpl, refCount: 1} + c := &clientRefCounted{clientImpl: cImpl, refCount: 1} clients[string(contents)] = c return c, nil } var ( - clients = map[string]*Client{} + clients = map[string]*clientRefCounted{} clientsMu sync.Mutex ) diff --git a/xds/internal/xdsclient/tests/dump_test.go b/xds/internal/xdsclient/tests/dump_test.go index 541f5901c121..64c78f672858 100644 --- a/xds/internal/xdsclient/tests/dump_test.go +++ b/xds/internal/xdsclient/tests/dump_test.go @@ -85,6 +85,7 @@ func (s) TestLDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() + updateHandler := client.(xdsclient.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpLDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { @@ -111,7 +112,7 @@ func (s) TestLDSConfigDump(t *testing.T) { Raw: r, } } - client.NewListeners(update0, xdsclient.UpdateMetadata{Version: testVersion}) + updateHandler.NewListeners(update0, xdsclient.UpdateMetadata{Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpLDS, testVersion, want0); err != nil { @@ -120,7 +121,7 @@ func (s) TestLDSConfigDump(t *testing.T) { const nackVersion = "lds-version-nack" var nackErr = fmt.Errorf("lds nack error") - client.NewListeners( + updateHandler.NewListeners( map[string]xdsclient.ListenerUpdate{ ldsTargets[0]: {}, }, @@ -195,6 +196,7 @@ func (s) TestRDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() + updateHandler := client.(xdsclient.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpRDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { @@ -221,7 +223,7 @@ func (s) TestRDSConfigDump(t *testing.T) { Raw: r, } } - client.NewRouteConfigs(update0, xdsclient.UpdateMetadata{Version: testVersion}) + updateHandler.NewRouteConfigs(update0, xdsclient.UpdateMetadata{Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpRDS, testVersion, want0); err != nil { @@ -230,7 +232,7 @@ func (s) TestRDSConfigDump(t *testing.T) { const nackVersion = "rds-version-nack" var nackErr = fmt.Errorf("rds nack error") - client.NewRouteConfigs( + updateHandler.NewRouteConfigs( map[string]xdsclient.RouteConfigUpdate{ rdsTargets[0]: {}, }, @@ -305,6 +307,7 @@ func (s) TestCDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() + updateHandler := client.(xdsclient.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpCDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { @@ -331,7 +334,7 @@ func (s) TestCDSConfigDump(t *testing.T) { Raw: r, } } - client.NewClusters(update0, xdsclient.UpdateMetadata{Version: testVersion}) + updateHandler.NewClusters(update0, xdsclient.UpdateMetadata{Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpCDS, testVersion, want0); err != nil { @@ -340,7 +343,7 @@ func (s) TestCDSConfigDump(t *testing.T) { const nackVersion = "cds-version-nack" var nackErr = fmt.Errorf("cds nack error") - client.NewClusters( + updateHandler.NewClusters( map[string]xdsclient.ClusterUpdate{ cdsTargets[0]: {}, }, @@ -401,6 +404,7 @@ func (s) TestEDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() + updateHandler := client.(xdsclient.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpEDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { @@ -427,7 +431,7 @@ func (s) TestEDSConfigDump(t *testing.T) { Raw: r, } } - client.NewEndpoints(update0, xdsclient.UpdateMetadata{Version: testVersion}) + updateHandler.NewEndpoints(update0, xdsclient.UpdateMetadata{Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpEDS, testVersion, want0); err != nil { @@ -436,7 +440,7 @@ func (s) TestEDSConfigDump(t *testing.T) { const nackVersion = "eds-version-nack" var nackErr = fmt.Errorf("eds nack error") - client.NewEndpoints( + updateHandler.NewEndpoints( map[string]xdsclient.EndpointsUpdate{ edsTargets[0]: {}, }, diff --git a/xds/server.go b/xds/server.go index 989859bc65c8..cfbea1a1bca2 100644 --- a/xds/server.go +++ b/xds/server.go @@ -35,14 +35,13 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/server" "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const serverPrefix = "[xds-server %p] " var ( // These new functions will be overridden in unit tests. - newXDSClient = func() (xdsClient, error) { + newXDSClient = func() (xdsclient.XDSClient, error) { return xdsclient.New() } newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { @@ -58,14 +57,6 @@ func prefixLogger(p *GRPCServer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, p)) } -// xdsClient contains methods from xdsClient.Client which are used by -// the server. This is useful for overriding in unit tests. -type xdsClient interface { - WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() - BootstrapConfig() *bootstrap.Config - Close() -} - // grpcServer contains methods from grpc.Server which are used by the // GRPCServer type here. This is useful for overriding in unit tests. type grpcServer interface { @@ -90,7 +81,7 @@ type GRPCServer struct { // beginning of Serve(), where we have to decide if we have to create a // client or use an existing one. clientMu sync.Mutex - xdsC xdsClient + xdsC xdsclient.XDSClient } // NewGRPCServer creates an xDS-enabled gRPC server using the passed in opts. @@ -156,7 +147,7 @@ func (s *GRPCServer) initXDSClient() error { newXDSClient := newXDSClient if s.opts.bootstrapContents != nil { - newXDSClient = func() (xdsClient, error) { + newXDSClient = func() (xdsclient.XDSClient, error) { return xdsclient.NewClientWithBootstrapContents(s.opts.bootstrapContents) } } diff --git a/xds/server_test.go b/xds/server_test.go index 27a33da091d0..45df8b76fca9 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -247,7 +247,7 @@ func (p *fakeProvider) Close() { func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { + newXDSClient = func() (xdsclient.XDSClient, error) { c := fakeclient.NewClient() c.SetBootstrapConfig(&bootstrap.Config{ BalancerName: "dummyBalancer", @@ -277,7 +277,7 @@ func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { + newXDSClient = func() (xdsclient.XDSClient, error) { c := fakeclient.NewClient() bc := &bootstrap.Config{ BalancerName: "dummyBalancer", @@ -544,7 +544,7 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { // xdsClient with the specified bootstrap configuration. clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { + newXDSClient = func() (xdsclient.XDSClient, error) { c := fakeclient.NewClient() c.SetBootstrapConfig(test.bootstrapConfig) clientCh.Send(c) @@ -587,7 +587,7 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { // verifies that Server() exits with a non-nil error. func (s) TestServeNewClientFailure(t *testing.T) { origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClient, error) { + newXDSClient = func() (xdsclient.XDSClient, error) { return nil, errors.New("xdsClient creation failed") } defer func() { newXDSClient = origNewXDSClient }() From aa1169ab7c3b34a8ed665b16ce9cfc5343306807 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 9 Jun 2021 10:01:40 -0700 Subject: [PATCH 125/998] vet: remove support for non-module-aware Go versions (#4530) --- vet.sh | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/vet.sh b/vet.sh index 1a0dbd7ee5ab..5eaa8b05d6d3 100755 --- a/vet.sh +++ b/vet.sh @@ -32,26 +32,14 @@ PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" go version if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - fi + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then PROTOBUF_VERSION=3.14.0 From 95e48a892d6c51e95d2aa77742da72c2df14dc28 Mon Sep 17 00:00:00 2001 From: Aliaksandr Mianzhynski Date: Wed, 9 Jun 2021 21:05:17 +0300 Subject: [PATCH 126/998] Add GetServiceInfo to xds.GRPCServer (#4507) --- xds/server.go | 7 +++++++ xds/server_test.go | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/xds/server.go b/xds/server.go index cfbea1a1bca2..aad05d81d116 100644 --- a/xds/server.go +++ b/xds/server.go @@ -64,6 +64,7 @@ type grpcServer interface { Serve(net.Listener) error Stop() GracefulStop() + GetServiceInfo() map[string]grpc.ServiceInfo } // GRPCServer wraps a gRPC server and provides server-side xDS functionality, by @@ -136,6 +137,12 @@ func (s *GRPCServer) RegisterService(sd *grpc.ServiceDesc, ss interface{}) { s.gs.RegisterService(sd, ss) } +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *GRPCServer) GetServiceInfo() map[string]grpc.ServiceInfo { + return s.gs.GetServiceInfo() +} + // initXDSClient creates a new xdsClient if there is no existing one available. func (s *GRPCServer) initXDSClient() error { s.clientMu.Lock() diff --git a/xds/server_test.go b/xds/server_test.go index 45df8b76fca9..68e0d85c692d 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -86,6 +86,10 @@ func (f *fakeGRPCServer) GracefulStop() { f.gracefulStopCh.Send(nil) } +func (f *fakeGRPCServer) GetServiceInfo() map[string]grpc.ServiceInfo { + panic("implement me") +} + func newFakeGRPCServer() *fakeGRPCServer { return &fakeGRPCServer{ done: make(chan struct{}), From 6351a55c3895e5658b2c59769c81109d962d0e04 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 10 Jun 2021 09:33:06 -0700 Subject: [PATCH 127/998] xds: remove env var protetion of advanced routing features (#4529) --- internal/xds/env/env.go | 14 ------- xds/internal/balancer/edsbalancer/eds_impl.go | 4 -- .../balancer/edsbalancer/eds_impl_test.go | 9 ----- xds/internal/httpfilter/fault/fault.go | 5 +-- xds/internal/httpfilter/fault/fault_test.go | 9 ----- xds/internal/resolver/serviceconfig.go | 3 +- xds/internal/resolver/xds_resolver_test.go | 36 ++++++------------ xds/internal/xdsclient/cds_test.go | 3 -- xds/internal/xdsclient/lds_test.go | 38 ------------------- xds/internal/xdsclient/rds_test.go | 30 --------------- xds/internal/xdsclient/xds.go | 9 +---- 11 files changed, 15 insertions(+), 145 deletions(-) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index db9ac93b968c..05a017f2611b 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -39,9 +39,6 @@ const ( // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - circuitBreakingSupportEnv = "GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING" - timeoutSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT" - faultInjectionSupportEnv = "GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION" clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" @@ -63,17 +60,6 @@ var ( // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) - // CircuitBreakingSupport indicates whether circuit breaking support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING" to "false". - CircuitBreakingSupport = !strings.EqualFold(os.Getenv(circuitBreakingSupportEnv), "false") - // TimeoutSupport indicates whether support for max_stream_duration in - // route actions is enabled. This can be disabled by setting the - // environment variable "GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT" to "false". - TimeoutSupport = !strings.EqualFold(os.Getenv(timeoutSupportEnv), "false") - // FaultInjectionSupport is used to control both fault injection and HTTP - // filter support. - FaultInjectionSupport = !strings.EqualFold(os.Getenv(faultInjectionSupportEnv), "false") // ClientSideSecuritySupport is used to control processing of security // configuration on the client-side. // diff --git a/xds/internal/balancer/edsbalancer/eds_impl.go b/xds/internal/balancer/edsbalancer/eds_impl.go index db11dec6f2f8..c4dfd6dd6d0b 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl.go +++ b/xds/internal/balancer/edsbalancer/eds_impl.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" "google.golang.org/grpc/status" xdsi "google.golang.org/grpc/xds/internal" @@ -418,9 +417,6 @@ func (edsImpl *edsBalancerImpl) handleSubConnStateChange(sc balancer.SubConn, s // updateServiceRequestsConfig handles changes to the circuit breaking configuration. func (edsImpl *edsBalancerImpl) updateServiceRequestsConfig(serviceName string, max *uint32) { - if !env.CircuitBreakingSupport { - return - } edsImpl.pickerMu.Lock() var updatePicker bool if edsImpl.serviceRequestsCounter == nil || edsImpl.serviceRequestsCounter.ServiceName != serviceName { diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/edsbalancer/eds_impl_test.go index 2c1498dd3f78..8599573a2917 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_test.go @@ -35,7 +35,6 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/stub" - "google.golang.org/grpc/internal/xds/env" xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/balancergroup" "google.golang.org/grpc/xds/internal/testutils" @@ -575,10 +574,6 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { } func (s) TestEDS_CircuitBreaking(t *testing.T) { - origCircuitBreakingSupport := env.CircuitBreakingSupport - env.CircuitBreakingSupport = true - defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() - cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState @@ -812,10 +807,6 @@ func (s) TestDropPicker(t *testing.T) { } func (s) TestEDS_LoadReport(t *testing.T) { - origCircuitBreakingSupport := env.CircuitBreakingSupport - env.CircuitBreakingSupport = true - defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() - // We create an xdsClientWrapper with a dummy xdsClient which only // implements the LoadStore() method to return the underlying load.Store to // be used. diff --git a/xds/internal/httpfilter/fault/fault.go b/xds/internal/httpfilter/fault/fault.go index ee2ed9fd4922..42f8e70af93b 100644 --- a/xds/internal/httpfilter/fault/fault.go +++ b/xds/internal/httpfilter/fault/fault.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/httpfilter" @@ -63,9 +62,7 @@ var statusMap = map[int]codes.Code{ } func init() { - if env.FaultInjectionSupport { - httpfilter.Register(builder{}) - } + httpfilter.Register(builder{}) } type builder struct { diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 1c6db707c6a3..46606449cb9d 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -40,10 +40,8 @@ import ( "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/httpfilter" xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/protobuf/types/known/wrapperspb" @@ -140,13 +138,6 @@ func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { } } -func init() { - env.FaultInjectionSupport = true - // Manually register to avoid a race between this init and the one that - // check the env var to register the fault injection filter. - httpfilter.Register(builder{}) -} - func (s) TestFaultInjection_Unary(t *testing.T) { type subcase struct { name string diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index 31941fdc022c..2521b0d0193e 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -28,7 +28,6 @@ import ( "google.golang.org/grpc/codes" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/wrr" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/balancer/clustermanager" "google.golang.org/grpc/xds/internal/httpfilter" @@ -179,7 +178,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP Interceptor: interceptor, } - if env.TimeoutSupport && rt.maxStreamDuration != 0 { + if rt.maxStreamDuration != 0 { config.MethodConfig.Timeout = &rt.maxStreamDuration } diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index a41920998272..a519557df6ac 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -38,7 +38,6 @@ import ( iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/wrr" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" @@ -686,7 +685,6 @@ func (s) TestXDSResolverWRR(t *testing.T) { } func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { - defer func(old bool) { env.TimeoutSupport = old }(env.TimeoutSupport) xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, @@ -740,35 +738,25 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { } testCases := []struct { - name string - method string - timeoutSupport bool - want *time.Duration + name string + method string + want *time.Duration }{{ - name: "RDS setting", - method: "/foo/method", - timeoutSupport: true, - want: newDurationP(5 * time.Second), + name: "RDS setting", + method: "/foo/method", + want: newDurationP(5 * time.Second), }, { - name: "timeout support disabled", - method: "/foo/method", - timeoutSupport: false, - want: nil, + name: "explicit zero in RDS; ignore LDS", + method: "/bar/method", + want: nil, }, { - name: "explicit zero in RDS; ignore LDS", - method: "/bar/method", - timeoutSupport: true, - want: nil, - }, { - name: "no config in RDS; fallback to LDS", - method: "/baz/method", - timeoutSupport: true, - want: newDurationP(time.Second), + name: "no config in RDS; fallback to LDS", + method: "/baz/method", + want: newDurationP(time.Second), }} for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - env.TimeoutSupport = tc.timeoutSupport req := iresolver.RPCInfo{ Method: tc.method, Context: context.Background(), diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index 83b9071ad134..88bfe21a7bdb 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -303,9 +303,6 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, } - origCircuitBreakingSupport := env.CircuitBreakingSupport - env.CircuitBreakingSupport = true - defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv env.AggregateAndDNSSupportEnv = true defer func() { env.AggregateAndDNSSupportEnv = oldAggregateAndDNSSupportEnv }() diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index 1667698bd578..ebebde84b6a1 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -34,7 +34,6 @@ import ( "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" @@ -186,7 +185,6 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { wantUpdate map[string]ListenerUpdate wantMD UpdateMetadata wantErr bool - disableFI bool // disable fault injection }{ { name: "non-listener resource", @@ -358,18 +356,6 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Version: testVersion, }, }, - { - name: "v3 with custom filter, fault injection disabled", - resources: []*anypb.Any{v3LisWithFilters(customFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(customFilter)}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - disableFI: true, - }, { name: "v3 with two filters with same name", resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter)}, @@ -477,22 +463,6 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Version: testVersion, }, }, - { - name: "v3 with error filter, fault injection disabled", - resources: []*anypb.Any{v3LisWithFilters(errFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - RouteConfigName: v3RouteConfigName, - MaxStreamDuration: time.Second, - Raw: v3LisWithFilters(errFilter), - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - disableFI: true, - }, { name: "v2 listener resource", resources: []*anypb.Any{v2Lis}, @@ -572,9 +542,6 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - oldFI := env.FaultInjectionSupport - env.FaultInjectionSupport = !test.disableFI - update, md, err := UnmarshalListener(testVersion, test.resources, nil) if (err != nil) != test.wantErr { t.Fatalf("UnmarshalListener(), got err: %v, wantErr: %v", err, test.wantErr) @@ -585,7 +552,6 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) } - env.FaultInjectionSupport = oldFI }) } } @@ -1287,10 +1253,6 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - oldFI := env.FaultInjectionSupport - env.FaultInjectionSupport = true - defer func() { env.FaultInjectionSupport = oldFI }() - gotUpdate, md, err := UnmarshalListener(testVersion, test.resources, nil) if (err != nil) != (test.wantErr != "") { t.Fatalf("UnmarshalListener(), got err: %v, wantErr: %v", err, test.wantErr) diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index 660a2f29d21a..adabe5f41671 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -29,7 +29,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" "google.golang.org/protobuf/types/known/durationpb" @@ -88,7 +87,6 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { rc *v3routepb.RouteConfiguration wantUpdate RouteConfigUpdate wantError bool - disableFI bool // disable fault injection }{ { name: "default-route-match-field-is-nil", @@ -474,19 +472,10 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("unknown.custom.filter")}), wantUpdate: goodUpdateWithFilterConfigs(nil), }, - { - name: "good-route-config-with-http-err-filter-config-fi-disabled", - disableFI: true, - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": errFilterConfig}), - wantUpdate: goodUpdateWithFilterConfigs(nil), - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - oldFI := env.FaultInjectionSupport - env.FaultInjectionSupport = !test.disableFI - gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc, nil, false) if (gotError != nil) != test.wantError || !cmp.Equal(gotUpdate, test.wantUpdate, cmpopts.EquateEmpty(), @@ -494,8 +483,6 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { return fmt.Sprint(fc) })) { t.Errorf("generateRDSUpdateFromRouteConfiguration(%+v, %v) returned unexpected, diff (-want +got):\\n%s", test.rc, ldsTarget, cmp.Diff(test.wantUpdate, gotUpdate, cmpopts.EquateEmpty())) - - env.FaultInjectionSupport = oldFI } }) } @@ -815,7 +802,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { routes []*v3routepb.Route wantRoutes []*Route wantErr bool - disableFI bool // disable fault injection }{ { name: "no path", @@ -1182,12 +1168,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("custom.filter")}), wantRoutes: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), }, - { - name: "with custom HTTP filter config, FI disabled", - disableFI: true, - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": customFilterConfig}), - wantRoutes: goodUpdateWithFilterConfigs(nil), - }, { name: "with erroring custom HTTP filter config", routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": errFilterConfig}), @@ -1198,12 +1178,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("err.custom.filter")}), wantErr: true, }, - { - name: "with erroring custom HTTP filter config, FI disabled", - disableFI: true, - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": errFilterConfig}), - wantRoutes: goodUpdateWithFilterConfigs(nil), - }, { name: "with unknown custom HTTP filter config", routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": unknownFilterConfig}), @@ -1226,10 +1200,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - oldFI := env.FaultInjectionSupport - env.FaultInjectionSupport = !tt.disableFI - defer func() { env.FaultInjectionSupport = oldFI }() - got, err := routesProtoToSlice(tt.routes, nil, false) if (err != nil) != tt.wantErr { t.Fatalf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 44fd883e3f36..dd68f6c68bc8 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -178,7 +178,7 @@ func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Fi } func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { - if !env.FaultInjectionSupport || len(cfgs) == 0 { + if len(cfgs) == 0 { return nil, nil } m := make(map[string]httpfilter.FilterConfig) @@ -207,10 +207,6 @@ func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilt } func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { - if !env.FaultInjectionSupport { - return nil, nil - } - ret := make([]HTTPFilter, 0, len(filters)) seenNames := make(map[string]bool, len(filters)) for _, filter := range filters { @@ -776,9 +772,6 @@ func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext) (*Secu // the received cluster resource. Returns nil if no CircuitBreakers or no // Thresholds in CircuitBreakers. func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { - if !env.CircuitBreakingSupport { - return nil - } for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { continue From 2d3b1f900edcb0f08915526e01adb17d1c829180 Mon Sep 17 00:00:00 2001 From: Dustin Ward Date: Fri, 11 Jun 2021 12:48:03 -0400 Subject: [PATCH 128/998] grpc: prevent deadlock in Test/ClientUpdatesParamsAfterGoAway on failure (#4534) --- clientconn_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clientconn_test.go b/clientconn_test.go index 6c61666b7efb..a50db9419c2a 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -735,16 +735,15 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { time.Sleep(10 * time.Millisecond) cc.mu.RLock() v := cc.mkp.Time + cc.mu.RUnlock() if v == 20*time.Second { // Success - cc.mu.RUnlock() return } if ctx.Err() != nil { // Timeout t.Fatalf("cc.dopts.copts.Keepalive.Time = %v , want 20s", v) } - cc.mu.RUnlock() } } From 45549242f79aacb850de77336a76777bef8bbe01 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 11 Jun 2021 13:14:09 -0700 Subject: [PATCH 129/998] internal: fix deadlock during switch_balancer and NewSubConn() (#4536) --- clientconn.go | 5 +++++ resolver_conn_wrapper.go | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/clientconn.go b/clientconn.go index 0236c81c4d4a..5cef39295bdc 100644 --- a/clientconn.go +++ b/clientconn.go @@ -711,7 +711,12 @@ func (cc *ClientConn) switchBalancer(name string) { return } if cc.balancerWrapper != nil { + // Don't hold cc.mu while closing the balancers. The balancers may call + // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex + // would cause a deadlock in that case. + cc.mu.Unlock() cc.balancerWrapper.close() + cc.mu.Lock() } builder := balancer.Get(name) diff --git a/resolver_conn_wrapper.go b/resolver_conn_wrapper.go index 4118de571ab5..2c47cd54f07c 100644 --- a/resolver_conn_wrapper.go +++ b/resolver_conn_wrapper.go @@ -39,6 +39,8 @@ type ccResolverWrapper struct { resolver resolver.Resolver done *grpcsync.Event curState resolver.State + + incomingMu sync.Mutex // Synchronizes all the incoming calls. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and @@ -90,6 +92,8 @@ func (ccr *ccResolverWrapper) close() { } func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return nil } @@ -105,6 +109,8 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { } func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } @@ -114,6 +120,8 @@ func (ccr *ccResolverWrapper) ReportError(err error) { // NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } @@ -128,6 +136,8 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } From 22c535818725b54cc34ccbc4b953318f19bc13a6 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 14 Jun 2021 15:02:50 -0400 Subject: [PATCH 130/998] xds: add HashPolicy fields to RDS update (#4521) * Add HashPolicy fields to RDS update --- internal/xds/env/env.go | 6 +- xds/internal/xdsclient/client.go | 24 ++++ xds/internal/xdsclient/rds_test.go | 173 ++++++++++++++++++++++++++++- xds/internal/xdsclient/xds.go | 41 +++++++ 4 files changed, 242 insertions(+), 2 deletions(-) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index 05a017f2611b..e90c7ffc465c 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -39,6 +39,7 @@ const ( // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" @@ -59,7 +60,10 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) - + // RingHashSupport indicates whether ring hash support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "true". + RingHashSupport = strings.EqualFold(os.Getenv(ringHashSupportEnv), "true") // ClientSideSecuritySupport is used to control processing of security // configuration on the client-side. // diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 13ef973807ce..cb0d93bd98ec 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -269,6 +269,28 @@ type VirtualHost struct { HTTPFilterConfigOverride map[string]httpfilter.FilterConfig } +// HashPolicyType specifies the type of HashPolicy from a received RDS Response. +type HashPolicyType int + +const ( + // HashPolicyTypeHeader specifies to hash a Header in the incoming request. + HashPolicyTypeHeader HashPolicyType = iota + // HashPolicyTypeChannelID specifies to hash a unique Identifier of the + // Channel. In grpc-go, this will be done using the ClientConn pointer. + HashPolicyTypeChannelID +) + +// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing +// load balancer. +type HashPolicy struct { + HashPolicyType HashPolicyType + Terminal bool + // Fields used for type HEADER. + HeaderName string + Regex *regexp.Regexp + RegexSubstitution string +} + // Route is both a specification of how to match a request as well as an // indication of the action to take upon match. type Route struct { @@ -281,6 +303,8 @@ type Route struct { Headers []*HeaderMatcher Fraction *uint32 + HashPolicies []*HashPolicy + // If the matchers above indicate a match, the below configuration is used. WeightedClusters map[string]WeightedCluster // If MaxStreamDuration is nil, it indicates neither of the route action's diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index adabe5f41671..b78a49a3e31a 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -29,6 +29,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" "google.golang.org/protobuf/types/known/durationpb" @@ -1153,6 +1154,61 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }}, wantErr: false, }, + { + name: "good-with-channel-id-hash-policy", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{ + PrefixMatch: "tv", + }, + InvertMatch: true, + }, + }, + RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ + DefaultValue: &v3typepb.FractionalPercent{ + Numerator: 1, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + TotalWeight: &wrapperspb.UInt32Value{Value: 100}, + }}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{FilterState: &v3routepb.RouteAction_HashPolicy_FilterState{Key: "io.grpc.channel_id"}}}, + }, + }}, + }, + }, + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + Headers: []*HeaderMatcher{ + { + Name: "th", + InvertMatch: newBoolP(true), + PrefixMatch: newStringP("tv"), + }, + }, + Fraction: newUInt32P(10000), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + HashPolicies: []*HashPolicy{ + {HashPolicyType: HashPolicyTypeChannelID}, + }, + }}, + wantErr: false, + }, { name: "with custom HTTP filter config", routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": customFilterConfig}), @@ -1197,7 +1253,9 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { return fmt.Sprint(fc) }), } - + oldRingHashSupport := env.RingHashSupport + env.RingHashSupport = true + defer func() { env.RingHashSupport = oldRingHashSupport }() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := routesProtoToSlice(tt.routes, nil, false) @@ -1211,6 +1269,119 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { } } +func (s) TestHashPoliciesProtoToSlice(t *testing.T) { + tests := []struct { + name string + hashPolicies []*v3routepb.RouteAction_HashPolicy + wantHashPolicies []*HashPolicy + wantErr bool + }{ + // header-hash-policy tests a basic hash policy that specifies to hash a + // certain header. + { + name: "header-hash-policy", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + { + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: ":path", + RegexRewrite: &v3matcherpb.RegexMatchAndSubstitute{ + Pattern: &v3matcherpb.RegexMatcher{Regex: "/products"}, + Substitution: "/products", + }, + }, + }, + }, + }, + wantHashPolicies: []*HashPolicy{ + { + HashPolicyType: HashPolicyTypeHeader, + HeaderName: ":path", + Regex: func() *regexp.Regexp { return regexp.MustCompile("/products") }(), + RegexSubstitution: "/products", + }, + }, + }, + // channel-id-hash-policy tests a basic hash policy that specifies to + // hash a unique identifier of the channel. + { + name: "channel-id-hash-policy", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{FilterState: &v3routepb.RouteAction_HashPolicy_FilterState{Key: "io.grpc.channel_id"}}}, + }, + wantHashPolicies: []*HashPolicy{ + {HashPolicyType: HashPolicyTypeChannelID}, + }, + }, + // unsupported-filter-state-key tests that an unsupported key in the + // filter state hash policy are treated as a no-op. + { + name: "wrong-filter-state-key", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{FilterState: &v3routepb.RouteAction_HashPolicy_FilterState{Key: "unsupported key"}}}, + }, + }, + // no-op-hash-policy tests that hash policies that are not supported by + // grpc are treated as a no-op. + { + name: "no-op-hash-policy", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{}}, + }, + }, + // header-and-channel-id-hash-policy test that a list of header and + // channel id hash policies are successfully converted to an internal + // struct. + { + name: "header-and-channel-id-hash-policy", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + { + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: ":path", + RegexRewrite: &v3matcherpb.RegexMatchAndSubstitute{ + Pattern: &v3matcherpb.RegexMatcher{Regex: "/products"}, + Substitution: "/products", + }, + }, + }, + }, + { + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{FilterState: &v3routepb.RouteAction_HashPolicy_FilterState{Key: "io.grpc.channel_id"}}, + Terminal: true, + }, + }, + wantHashPolicies: []*HashPolicy{ + { + HashPolicyType: HashPolicyTypeHeader, + HeaderName: ":path", + Regex: func() *regexp.Regexp { return regexp.MustCompile("/products") }(), + RegexSubstitution: "/products", + }, + { + HashPolicyType: HashPolicyTypeChannelID, + Terminal: true, + }, + }, + }, + } + + oldRingHashSupport := env.RingHashSupport + env.RingHashSupport = true + defer func() { env.RingHashSupport = oldRingHashSupport }() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := hashPoliciesProtoToSlice(tt.hashPolicies, nil) + if (err != nil) != tt.wantErr { + t.Fatalf("hashPoliciesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) + } + if diff := cmp.Diff(got, tt.wantHashPolicies, cmp.AllowUnexported(regexp.Regexp{})); diff != "" { + t.Fatalf("hashPoliciesProtoToSlice() returned unexpected diff (-got +want):\n%s", diff) + } + }) + } +} + func newStringP(s string) *string { return &s } diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index dd68f6c68bc8..b4b0dd1a45c1 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -496,6 +496,16 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, route.WeightedClusters = make(map[string]WeightedCluster) action := r.GetRoute() + + // Hash Policies are only applicable for a Ring Hash LB. + if env.RingHashSupport { + hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) + if err != nil { + return nil, err + } + route.HashPolicies = hp + } + switch a := action.GetClusterSpecifier().(type) { case *v3routepb.RouteAction_Cluster: route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} @@ -557,6 +567,37 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, return routesRet, nil } +func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logger *grpclog.PrefixLogger) ([]*HashPolicy, error) { + var hashPoliciesRet []*HashPolicy + for _, p := range policies { + policy := HashPolicy{Terminal: p.Terminal} + switch p.GetPolicySpecifier().(type) { + case *v3routepb.RouteAction_HashPolicy_Header_: + policy.HashPolicyType = HashPolicyTypeHeader + policy.HeaderName = p.GetHeader().GetHeaderName() + regex := p.GetHeader().GetRegexRewrite().GetPattern().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) + } + policy.Regex = re + policy.RegexSubstitution = p.GetHeader().GetRegexRewrite().GetSubstitution() + case *v3routepb.RouteAction_HashPolicy_FilterState_: + if p.GetFilterState().GetKey() != "io.grpc.channel_id" { + logger.Infof("hash policy %+v contains an invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) + continue + } + policy.HashPolicyType = HashPolicyTypeChannelID + default: + logger.Infof("hash policy %T is an unsupported hash policy", p.GetPolicySpecifier()) + continue + } + + hashPoliciesRet = append(hashPoliciesRet, &policy) + } + return hashPoliciesRet, nil +} + // UnmarshalCluster processes resources received in an CDS response, validates // them, and transforms them into a native struct which contains only fields we // are interested in. From f06e0060c6567a63a687be461f905268b9cc193d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 15 Jun 2021 10:49:54 -0700 Subject: [PATCH 131/998] Change version to 1.40.0-dev (#4543) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 4e26aec6ac18..54e4ea43bd44 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.39.0-dev" +const Version = "1.40.0-dev" From cd9f53ac49fe8d2ae979dd94cb0eb2a5e5b9660c Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 15 Jun 2021 11:09:10 -0700 Subject: [PATCH 132/998] xds/cds: update CDS balancer to partially handle aggregated cluster (#4539) --- .../balancer/cdsbalancer/cdsbalancer.go | 88 +++++++------------ .../balancer/cdsbalancer/cluster_handler.go | 86 ++++++++++++------ .../cdsbalancer/cluster_handler_test.go | 14 +-- 3 files changed, 96 insertions(+), 92 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index a710e4983161..cb97353ff46a 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -73,12 +73,11 @@ type bb struct{} // Build creates a new CDS balancer with the ClientConn. func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { b := &cdsBalancer{ - bOpts: opts, - updateCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - cancelWatch: func() {}, // No-op at this point. - xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), + bOpts: opts, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), } b.logger = prefixLogger((b)) b.logger.Infof("Created") @@ -93,7 +92,7 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal b.xdsCredsInUse = true } b.logger.Infof("xDS credentials in use: %v", b.xdsCredsInUse) - + b.clusterHandler = newClusterHandler(b) b.ccw = &ccWrapper{ ClientConn: cc, xdsHI: b.xdsHI, @@ -133,11 +132,6 @@ type ccUpdate struct { err error } -type clusterHandlerUpdate struct { - chu []xdsclient.ClusterUpdate - err error -} - // scUpdate wraps a subConn update received from gRPC. This is directly passed // on to the edsBalancer. type scUpdate struct { @@ -145,15 +139,6 @@ type scUpdate struct { state balancer.SubConnState } -// watchUpdate wraps the information received from a registered CDS watcher. A -// non-nil error is propagated to the underlying edsBalancer. A valid update -// results in creating a new edsBalancer (if one doesn't already exist) and -// pushing the update to it. -type watchUpdate struct { - cds xdsclient.ClusterUpdate - err error -} - // cdsBalancer implements a CDS based LB policy. It instantiates an EDS based // LB policy to further resolve the serviceName received from CDS, into // localities and endpoints. Implements the balancer.Balancer interface which @@ -164,9 +149,8 @@ type cdsBalancer struct { bOpts balancer.BuildOptions // BuildOptions passed to child LB. updateCh *buffer.Unbounded // Channel for gRPC and xdsClient updates. xdsClient xdsclient.XDSClient // xDS client to watch Cluster resource. - cancelWatch func() // Cluster watch cancel func. + clusterHandler *clusterHandler // To watch the clusters. edsLB balancer.Balancer // EDS child policy. - clusterToWatch string logger *grpclog.PrefixLogger closed *grpcsync.Event done *grpcsync.Event @@ -188,19 +172,9 @@ func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) { // update, only if the status quo has changed. if err := update.err; err != nil { b.handleErrorFromUpdate(err, true) - } - if b.clusterToWatch == update.clusterName { return } - if update.clusterName != "" { - cancelWatch := b.xdsClient.WatchCluster(update.clusterName, b.handleClusterUpdate) - b.logger.Infof("Watch started on resource name %v with xds-client %p", update.clusterName, b.xdsClient) - b.cancelWatch = func() { - cancelWatch() - b.logger.Infof("Watch cancelled on resource name %v with xds-client %p", update.clusterName, b.xdsClient) - } - b.clusterToWatch = update.clusterName - } + b.clusterHandler.updateRootCluster(update.clusterName) } // handleSecurityConfig processes the security configuration received from the @@ -293,21 +267,21 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc // handleWatchUpdate handles a watch update from the xDS Client. Good updates // lead to clientConn updates being invoked on the underlying edsBalancer. -func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { +func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { if err := update.err; err != nil { b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) b.handleErrorFromUpdate(err, false) return } - b.logger.Infof("Watch update from xds-client %p, content: %+v", b.xdsClient, pretty.ToJSON(update.cds)) + b.logger.Infof("Watch update from xds-client %p, content: %+v, security config: %v", b.xdsClient, pretty.ToJSON(update.chu), pretty.ToJSON(update.securityCfg)) // Process the security config from the received update before building the // child policy or forwarding the update to it. We do this because the child // policy may try to create a new subConn inline. Processing the security // configuration here and setting up the handshakeInfo will make sure that // such attempts are handled properly. - if err := b.handleSecurityConfig(update.cds.SecurityCfg); err != nil { + if err := b.handleSecurityConfig(update.securityCfg); err != nil { // If the security config is invalid, for example, if the provider // instance is not found in the bootstrap config, we need to put the // channel in transient failure. @@ -328,12 +302,24 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { b.edsLB = edsLB b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName) } + + if len(update.chu) == 0 { + b.logger.Infof("got update with 0 cluster updates, should never happen. There should be at least one cluster") + } + // TODO: this function is currently only handling the cluster with higher + // priority. This should work in most cases (e.g. if the cluster is not a + // aggregated cluster, or if the higher priority cluster works fine so + // there's no need to fallback). This quick fix is to unblock the testing + // work before the full fallback support is complete. Once the EDS balancer + // is updated to cluster_resolver, which has the fallback functionality, we + // will fix this to handle all the clusters in list. + cds := update.chu[0] lbCfg := &edsbalancer.EDSConfig{ - ClusterName: update.cds.ClusterName, - EDSServiceName: update.cds.EDSServiceName, - MaxConcurrentRequests: update.cds.MaxRequests, + ClusterName: cds.ClusterName, + EDSServiceName: cds.EDSServiceName, + MaxConcurrentRequests: cds.MaxRequests, } - if update.cds.EnableLRS { + if cds.EnableLRS { // An empty string here indicates that the edsBalancer should use the // same xDS server for load reporting as it does for EDS // requests/responses. @@ -368,13 +354,11 @@ func (b *cdsBalancer) run() { break } b.edsLB.UpdateSubConnState(update.subConn, update.state) - case *watchUpdate: - b.handleWatchUpdate(update) } + case u := <-b.clusterHandler.updateChannel: + b.handleWatchUpdate(u) case <-b.closed.Done(): - b.cancelWatch() - b.cancelWatch = func() {} - + b.clusterHandler.close() if b.edsLB != nil { b.edsLB.Close() b.edsLB = nil @@ -416,7 +400,7 @@ func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { // This is not necessary today, because xds client never sends connection // errors. if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { - b.cancelWatch() + b.clusterHandler.close() } if b.edsLB != nil { b.edsLB.ResolverError(err) @@ -430,16 +414,6 @@ func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { } } -// handleClusterUpdate is the CDS watch API callback. It simply pushes the -// received information on to the update channel for run() to pick it up. -func (b *cdsBalancer) handleClusterUpdate(cu xdsclient.ClusterUpdate, err error) { - if b.closed.HasFired() { - b.logger.Warningf("xds: received cluster update {%+v} after cdsBalancer was closed", cu) - return - } - b.updateCh.Put(&watchUpdate{cds: cu, err: err}) -} - // UpdateClientConnState receives the serviceConfig (which contains the // clusterName to watch for in CDS) and the xdsClient object from the // xdsResolver. diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index 09d945cd0c37..b0760c7630ab 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -25,10 +25,24 @@ import ( var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") +// clusterHandlerUpdate wraps the information received from the registered CDS +// watcher. A non-nil error is propagated to the underlying cluster_resolver +// balancer. A valid update results in creating a new cluster_resolver balancer +// (if one doesn't already exist) and pushing the update to it. +type clusterHandlerUpdate struct { + // securityCfg is the Security Config from the top (root) cluster. + securityCfg *xdsclient.SecurityConfig + // chu is a list of ClusterUpdates from all the leaf clusters. + chu []xdsclient.ClusterUpdate + err error +} + // clusterHandler will be given a name representing a cluster. It will then // update the CDS policy constantly with a list of Clusters to pass down to // XdsClusterResolverLoadBalancingPolicyConfig in a stream like fashion. type clusterHandler struct { + parent *cdsBalancer + // A mutex to protect entire tree of clusters. clusterMutex sync.Mutex root *clusterNode @@ -39,8 +53,13 @@ type clusterHandler struct { // update or from a child with an error. Capacity of one as the only update // CDS Balancer cares about is the most recent update. updateChannel chan clusterHandlerUpdate +} - xdsClient xdsclient.XDSClient +func newClusterHandler(parent *cdsBalancer) *clusterHandler { + return &clusterHandler{ + parent: parent, + updateChannel: make(chan clusterHandlerUpdate, 1), + } } func (ch *clusterHandler) updateRootCluster(rootClusterName string) { @@ -48,7 +67,7 @@ func (ch *clusterHandler) updateRootCluster(rootClusterName string) { defer ch.clusterMutex.Unlock() if ch.root == nil { // Construct a root node on first update. - ch.root = createClusterNode(rootClusterName, ch.xdsClient, ch) + ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) ch.rootClusterName = rootClusterName return } @@ -56,24 +75,33 @@ func (ch *clusterHandler) updateRootCluster(rootClusterName string) { // new one, if not do nothing. if rootClusterName != ch.rootClusterName { ch.root.delete() - ch.root = createClusterNode(rootClusterName, ch.xdsClient, ch) + ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) ch.rootClusterName = rootClusterName } } // This function tries to construct a cluster update to send to CDS. func (ch *clusterHandler) constructClusterUpdate() { - // If there was an error received no op, as this simply means one of the - // children hasn't received an update yet. - if clusterUpdate, err := ch.root.constructClusterUpdate(); err == nil { - // For a ClusterUpdate, the only update CDS cares about is the most - // recent one, so opportunistically drain the update channel before - // sending the new update. - select { - case <-ch.updateChannel: - default: - } - ch.updateChannel <- clusterHandlerUpdate{chu: clusterUpdate, err: nil} + if ch.root == nil { + // If root is nil, this handler is closed, ignore the update. + return + } + clusterUpdate, err := ch.root.constructClusterUpdate() + if err != nil { + // If there was an error received no op, as this simply means one of the + // children hasn't received an update yet. + return + } + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-ch.updateChannel: + default: + } + ch.updateChannel <- clusterHandlerUpdate{ + securityCfg: ch.root.clusterUpdate.SecurityCfg, + chu: clusterUpdate, } } @@ -82,6 +110,9 @@ func (ch *clusterHandler) constructClusterUpdate() { func (ch *clusterHandler) close() { ch.clusterMutex.Lock() defer ch.clusterMutex.Unlock() + if ch.root == nil { + return + } ch.root.delete() ch.root = nil ch.rootClusterName = "" @@ -117,7 +148,12 @@ func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLev clusterHandler: topLevelHandler, } // Communicate with the xds client here. - c.cancelFunc = xdsClient.WatchCluster(clusterName, c.handleResp) + topLevelHandler.parent.logger.Infof("CDS watch started on %v", clusterName) + cancel := xdsClient.WatchCluster(clusterName, c.handleResp) + c.cancelFunc = func() { + topLevelHandler.parent.logger.Infof("CDS watch canceled on %v", clusterName) + cancel() + } return c } @@ -172,15 +208,10 @@ func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err erro case <-c.clusterHandler.updateChannel: default: } - c.clusterHandler.updateChannel <- clusterHandlerUpdate{chu: nil, err: err} + c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: err} return } - // deltaInClusterUpdateFields determines whether there was a delta in the - // clusterUpdate fields (forgetting the children). This will be used to help - // determine whether to pingClusterHandler at the end of this callback or - // not. - deltaInClusterUpdateFields := clusterUpdate.ClusterName != c.clusterUpdate.ClusterName || clusterUpdate.ClusterType != c.clusterUpdate.ClusterType c.receivedUpdate = true c.clusterUpdate = clusterUpdate @@ -195,9 +226,14 @@ func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err erro child.delete() } c.children = nil - if deltaInClusterUpdateFields { - c.clusterHandler.constructClusterUpdate() - } + // This is an update in the one leaf node, should try to send an update + // to the parent CDS balancer. + // + // Note that this update might be a duplicate from the previous one. + // Because the update contains not only the cluster name to watch, but + // also the extra fields (e.g. security config). There's no good way to + // compare all the fields. + c.clusterHandler.constructClusterUpdate() return } @@ -230,7 +266,7 @@ func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err erro for child := range newChildren { if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready { createdChild = true - mapCurrentChildren[child] = createClusterNode(child, c.clusterHandler.xdsClient, c.clusterHandler) + mapCurrentChildren[child] = createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler) } } diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index 886dba416437..216592f9200e 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -40,13 +40,7 @@ const ( // xds client. func setupTests(t *testing.T) (*clusterHandler, *fakeclient.Client) { xdsC := fakeclient.NewClient() - ch := &clusterHandler{ - xdsClient: xdsC, - // This is will be how the update channel is created in cds. It will be - // a separate channel to the buffer.Unbounded. This channel will also be - // read from to test any cluster updates. - updateChannel: make(chan clusterHandlerUpdate, 1), - } + ch := newClusterHandler(&cdsBalancer{xdsClient: xdsC}) return ch, xdsC } @@ -176,15 +170,15 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { t.Fatal("Timed out waiting for update from updateChannel.") } - // Check that sending the same cluster update does not induce a - // update to be written to update buffer. + // Check that sending the same cluster update also induces an update + // to be written to update buffer. fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) shouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer shouldNotHappenCtxCancel() select { case <-ch.updateChannel: - t.Fatal("Should not have written an update to update buffer, as cluster update did not change.") case <-shouldNotHappenCtx.Done(): + t.Fatal("Timed out waiting for update from updateChannel.") } // Above represents same thing as the simple From 549c53a90c2a61a4bbe4e067b21f709ead03e2de Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 15 Jun 2021 14:03:10 -0700 Subject: [PATCH 133/998] xds/eds: rewrite EDS policy using child policies (#4457) --- xds/internal/balancer/balancer.go | 2 + .../balancer/balancergroup/balancergroup.go | 2 +- .../edsbalancer/configbuilder_test.go | 3 - xds/internal/balancer/edsbalancer/eds.go | 519 ++++++------ xds/internal/balancer/edsbalancer/eds_impl.go | 596 -------------- .../balancer/edsbalancer/eds_impl_priority.go | 358 --------- .../edsbalancer/eds_impl_priority_test.go | 503 +++++------- .../balancer/edsbalancer/eds_impl_test.go | 741 +++++------------- xds/internal/balancer/edsbalancer/eds_test.go | 511 +++--------- .../{eds_testutil.go => eds_testutil_test.go} | 73 ++ .../balancer/edsbalancer/eds_watcher.go | 87 ++ xds/internal/balancer/edsbalancer/util.go | 44 -- .../balancer/edsbalancer/util_test.go | 90 --- .../balancer/edsbalancer/xds_lrs_test.go | 74 -- xds/internal/balancer/edsbalancer/xds_old.go | 46 -- .../balancer/priority/balancer_priority.go | 5 +- .../balancer/priority/balancer_test.go | 8 +- xds/internal/xdsclient/requests_counter.go | 8 + 18 files changed, 925 insertions(+), 2745 deletions(-) delete mode 100644 xds/internal/balancer/edsbalancer/eds_impl.go delete mode 100644 xds/internal/balancer/edsbalancer/eds_impl_priority.go rename xds/internal/balancer/edsbalancer/{eds_testutil.go => eds_testutil_test.go} (62%) create mode 100644 xds/internal/balancer/edsbalancer/eds_watcher.go delete mode 100644 xds/internal/balancer/edsbalancer/util.go delete mode 100644 xds/internal/balancer/edsbalancer/util_test.go delete mode 100644 xds/internal/balancer/edsbalancer/xds_lrs_test.go delete mode 100644 xds/internal/balancer/edsbalancer/xds_old.go diff --git a/xds/internal/balancer/balancer.go b/xds/internal/balancer/balancer.go index 5883027a2c52..0da15b9b9dbc 100644 --- a/xds/internal/balancer/balancer.go +++ b/xds/internal/balancer/balancer.go @@ -21,7 +21,9 @@ package balancer import ( _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer _ "google.golang.org/grpc/xds/internal/balancer/edsbalancer" // Register the EDS balancer + _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer ) diff --git a/xds/internal/balancer/balancergroup/balancergroup.go b/xds/internal/balancer/balancergroup/balancergroup.go index b86dea50e807..6d54728dc919 100644 --- a/xds/internal/balancer/balancergroup/balancergroup.go +++ b/xds/internal/balancer/balancergroup/balancergroup.go @@ -183,7 +183,7 @@ type BalancerGroup struct { cc balancer.ClientConn buildOpts balancer.BuildOptions logger *grpclog.PrefixLogger - loadStore load.PerClusterReporter + loadStore load.PerClusterReporter // TODO: delete this, no longer needed. It was used by EDS. // stateAggregator is where the state/picker updates will be sent to. It's // provided by the parent balancer, to build a picker with all the diff --git a/xds/internal/balancer/edsbalancer/configbuilder_test.go b/xds/internal/balancer/edsbalancer/configbuilder_test.go index f7d2955d2ad4..b9e76dc1cb00 100644 --- a/xds/internal/balancer/edsbalancer/configbuilder_test.go +++ b/xds/internal/balancer/edsbalancer/configbuilder_test.go @@ -27,9 +27,6 @@ import ( "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/xdsclient" - - _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer - _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer ) const ( diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go index ea11b2f8a257..cc486fe7b8c8 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/edsbalancer/eds.go @@ -21,29 +21,29 @@ package edsbalancer import ( "encoding/json" + "errors" "fmt" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal/balancer/loadstore" - + "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/load" ) const edsName = "eds_experimental" var ( - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions, enqueueState func(priorityType, balancer.State), lw load.PerClusterReporter, logger *grpclog.PrefixLogger) edsBalancerImplInterface { - return newEDSBalancerImpl(cc, opts, enqueueState, lw, logger) + errBalancerClosed = errors.New("cdsBalancer is closed") + newChildBalancer = func(bb balancer.Builder, cc balancer.ClientConn, o balancer.BuildOptions) balancer.Balancer { + return bb.Build(cc, o) } ) @@ -53,22 +53,38 @@ func init() { type bb struct{} +// Build helps implement the balancer.Builder interface. func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - x := &edsBalancer{ - cc: cc, - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - grpcUpdate: make(chan interface{}), - xdsClientUpdate: make(chan *edsUpdate), - childPolicyUpdate: buffer.NewUnbounded(), - loadWrapper: loadstore.NewWrapper(), - config: &EDSConfig{}, + priorityBuilder := balancer.Get(priority.Name) + if priorityBuilder == nil { + logger.Errorf("priority balancer is needed but not registered") + return nil } - x.logger = prefixLogger(x) - x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.loadWrapper, x.logger) - x.logger.Infof("Created") - go x.run() - return x + priorityConfigParser, ok := priorityBuilder.(balancer.ConfigParser) + if !ok { + logger.Errorf("priority balancer builder is not a config parser") + return nil + } + + b := &edsBalancer{ + cc: cc, + bOpts: opts, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + + priorityBuilder: priorityBuilder, + priorityConfigParser: priorityConfigParser, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + b.edsWatcher = &edsWatcher{ + parent: b, + updateChannel: make(chan *watchUpdate, 1), + } + + go b.run() + return b } func (bb) Name() string { @@ -83,29 +99,18 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err return &cfg, nil } -// edsBalancerImplInterface defines the interface that edsBalancerImpl must -// implement to communicate with edsBalancer. -// -// It's implemented by the real eds balancer and a fake testing eds balancer. -type edsBalancerImplInterface interface { - // handleEDSResponse passes the received EDS message from traffic director - // to eds balancer. - handleEDSResponse(edsResp xdsclient.EndpointsUpdate) - // handleChildPolicy updates the eds balancer the intra-cluster load - // balancing policy to use. - handleChildPolicy(name string, config json.RawMessage) - // handleSubConnStateChange handles state change for SubConn. - handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) - // updateState handle a balancer state update from the priority. - updateState(priority priorityType, s balancer.State) - // updateServiceRequestsConfig updates the service requests counter to the - // one for the given service name. - updateServiceRequestsConfig(serviceName string, max *uint32) - // updateClusterName updates the cluster name that will be attached to the - // address attributes. - updateClusterName(name string) - // close closes the eds balancer. - close() +// ccUpdate wraps a clientConn update received from gRPC (pushed from the +// xdsResolver). +type ccUpdate struct { + state balancer.ClientConnState + err error +} + +// scUpdate wraps a subConn update received from gRPC. This is directly passed +// on to the child balancer. +type scUpdate struct { + subConn balancer.SubConn + state balancer.SubConnState } // edsBalancer manages xdsClient and the actual EDS balancer implementation that @@ -113,307 +118,229 @@ type edsBalancerImplInterface interface { // // It currently has only an edsBalancer. Later, we may add fallback. type edsBalancer struct { - cc balancer.ClientConn - closed *grpcsync.Event - done *grpcsync.Event - logger *grpclog.PrefixLogger - - // edsBalancer continuously monitors the channels below, and will handle - // events from them in sync. - grpcUpdate chan interface{} - xdsClientUpdate chan *edsUpdate - childPolicyUpdate *buffer.Unbounded - - xdsClient xdsclient.XDSClient - loadWrapper *loadstore.Wrapper - config *EDSConfig // may change when passed a different service config - edsImpl edsBalancerImplInterface - - clusterName string - edsServiceName string - edsToWatch string // this is edsServiceName if it's set, otherwise, it's clusterName. - cancelEndpointsWatch func() - loadReportServer *string // LRS is disabled if loadReporterServer is nil. - cancelLoadReport func() + cc balancer.ClientConn + bOpts balancer.BuildOptions + updateCh *buffer.Unbounded // Channel for updates from gRPC. + edsWatcher *edsWatcher + logger *grpclog.PrefixLogger + closed *grpcsync.Event + done *grpcsync.Event + + priorityBuilder balancer.Builder + priorityConfigParser balancer.ConfigParser + + config *EDSConfig + configRaw *serviceconfig.ParseResult + xdsClient xdsclient.XDSClient // xDS client to watch EDS resource. + attrsWithClient *attributes.Attributes // Attributes with xdsClient attached to be passed to the child policies. + + child balancer.Balancer + edsResp xdsclient.EndpointsUpdate + edsRespReceived bool } -// run gets executed in a goroutine once edsBalancer is created. It monitors -// updates from grpc, xdsClient and load balancer. It synchronizes the -// operations that happen inside edsBalancer. It exits when edsBalancer is -// closed. -func (b *edsBalancer) run() { - for { - select { - case update := <-b.grpcUpdate: - b.handleGRPCUpdate(update) - case update := <-b.xdsClientUpdate: - b.handleXDSClientUpdate(update) - case update := <-b.childPolicyUpdate.Get(): - b.childPolicyUpdate.Load() - u := update.(*balancerStateWithPriority) - b.edsImpl.updateState(u.priority, u.s) - case <-b.closed.Done(): - b.cancelWatch() - b.edsImpl.close() - b.logger.Infof("Shutdown") - b.done.Fire() - return - } +// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good +// updates lead to registration of an EDS watch. Updates with error lead to +// cancellation of existing watch and propagation of the same error to the +// child balancer. +func (b *edsBalancer) handleClientConnUpdate(update *ccUpdate) { + // We first handle errors, if any, and then proceed with handling the + // update, only if the status quo has changed. + if err := update.err; err != nil { + b.handleErrorFromUpdate(err, true) } -} -// handleErrorFromUpdate handles both the error from parent ClientConn (from CDS -// balancer) and the error from xds client (from the watcher). fromParent is -// true if error is from parent ClientConn. -// -// If the error is connection error, it should be handled for fallback purposes. -// -// If the error is resource-not-found: -// - If it's from CDS balancer (shows as a resolver error), it means LDS or CDS -// resources were removed. The EDS watch should be canceled. -// - If it's from xds client, it means EDS resource were removed. The EDS -// watcher should keep watching. -// In both cases, the sub-balancers will be closed, and the future picks will -// fail. -func (b *edsBalancer) handleErrorFromUpdate(err error, fromParent bool) { - b.logger.Warningf("Received error: %v", err) - if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { - if fromParent { - // This is an error from the parent ClientConn (can be the parent - // CDS balancer), and is a resource-not-found error. This means the - // resource (can be either LDS or CDS) was removed. Stop the EDS - // watch. - b.cancelWatch() - } - b.edsImpl.handleEDSResponse(xdsclient.EndpointsUpdate{}) + b.logger.Infof("Receive update from resolver, balancer config: %+v", update.state.BalancerConfig) + cfg, _ := update.state.BalancerConfig.(*EDSConfig) + if cfg == nil { + b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", update.state.BalancerConfig) + return } -} -func (b *edsBalancer) handleGRPCUpdate(update interface{}) { - switch u := update.(type) { - case *subConnStateUpdate: - b.edsImpl.handleSubConnStateChange(u.sc, u.state.ConnectivityState) - case *balancer.ClientConnState: - b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(u.BalancerConfig)) - cfg, _ := u.BalancerConfig.(*EDSConfig) - if cfg == nil { - // service config parsing failed. should never happen. - return - } - - if err := b.handleServiceConfigUpdate(cfg); err != nil { - b.logger.Warningf("failed to update xDS client: %v", err) - } + b.config = cfg + b.configRaw = update.state.ResolverState.ServiceConfig + b.edsWatcher.updateConfig(cfg) - b.edsImpl.updateServiceRequestsConfig(cfg.ClusterName, cfg.MaxConcurrentRequests) - - // We will update the edsImpl with the new child policy, if we got a - // different one. - if !cmp.Equal(cfg.ChildPolicy, b.config.ChildPolicy, cmpopts.EquateEmpty()) { - if cfg.ChildPolicy != nil { - b.edsImpl.handleChildPolicy(cfg.ChildPolicy.Name, cfg.ChildPolicy.Config) - } else { - b.edsImpl.handleChildPolicy(roundrobin.Name, nil) - } - } - b.config = cfg - case error: - b.handleErrorFromUpdate(u, true) - default: - // unreachable path - b.logger.Errorf("wrong update type: %T", update) - } -} - -// handleServiceConfigUpdate applies the service config update, watching a new -// EDS service name and restarting LRS stream, as required. -func (b *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { - var updateLoadClusterAndService bool - if b.clusterName != config.ClusterName { - updateLoadClusterAndService = true - b.clusterName = config.ClusterName - b.edsImpl.updateClusterName(b.clusterName) + if !b.edsRespReceived { + // If eds resp was not received, wait for it. + return } - if b.edsServiceName != config.EDSServiceName { - updateLoadClusterAndService = true - b.edsServiceName = config.EDSServiceName + // If eds resp was received before this, the child policy was created. We + // need to generate a new balancer config and send it to the child, because + // certain fields (unrelated to EDS watch) might have changed. + if err := b.updateChildConfig(); err != nil { + b.logger.Warningf("failed to update child policy config: %v", err) } +} - // If EDSServiceName is set, use it to watch EDS. Otherwise, use the cluster - // name. - newEDSToWatch := config.EDSServiceName - if newEDSToWatch == "" { - newEDSToWatch = config.ClusterName - } - var restartEDSWatch bool - if b.edsToWatch != newEDSToWatch { - restartEDSWatch = true - b.edsToWatch = newEDSToWatch +// handleWatchUpdate handles a watch update from the xDS Client. Good updates +// lead to clientConn updates being invoked on the underlying child balancer. +func (b *edsBalancer) handleWatchUpdate(update *watchUpdate) { + if err := update.err; err != nil { + b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) + b.handleErrorFromUpdate(err, false) + return } - // Restart EDS watch when the eds name has changed. - if restartEDSWatch { - b.startEndpointsWatch() - } + b.logger.Infof("Watch update from xds-client %p, content: %+v", b.xdsClient, pretty.ToJSON(update.eds)) + b.edsRespReceived = true + b.edsResp = update.eds - if updateLoadClusterAndService { - // TODO: this update for the LRS service name is too early. It should - // only apply to the new EDS response. But this is applied to the RPCs - // before the new EDS response. To fully fix this, the EDS balancer - // needs to do a graceful switch to another EDS implementation. - // - // This is OK for now, because we don't actually expect edsServiceName - // to change. Fix this (a bigger change) will happen later. - b.loadWrapper.UpdateClusterAndService(b.clusterName, b.edsServiceName) + // A new EDS update triggers new child configs (e.g. different priorities + // for the priority balancer), and new addresses (the endpoints come from + // the EDS response). + if err := b.updateChildConfig(); err != nil { + b.logger.Warningf("failed to update child policy's balancer config: %v", err) } - - // Restart load reporting when the loadReportServer name has changed. - if !equalStringPointers(b.loadReportServer, config.LrsLoadReportingServerName) { - loadStore := b.startLoadReport(config.LrsLoadReportingServerName) - b.loadWrapper.UpdateLoadStore(loadStore) - } - - return nil } -// startEndpointsWatch starts the EDS watch. +// updateChildConfig builds a balancer config from eb's cached eds resp and +// service config, and sends that to the child balancer. Note that it also +// generates the addresses, because the endpoints come from the EDS resp. // -// This usually means load report needs to be restarted, but this function does -// NOT do that. Caller needs to call startLoadReport separately. -func (b *edsBalancer) startEndpointsWatch() { - if b.cancelEndpointsWatch != nil { - b.cancelEndpointsWatch() - } - edsToWatch := b.edsToWatch - cancelEDSWatch := b.xdsClient.WatchEndpoints(edsToWatch, func(update xdsclient.EndpointsUpdate, err error) { - b.logger.Infof("Watch update from xds-client %p, content: %+v", b.xdsClient, pretty.ToJSON(update)) - b.handleEDSUpdate(update, err) - }) - b.logger.Infof("Watch started on resource name %v with xds-client %p", edsToWatch, b.xdsClient) - b.cancelEndpointsWatch = func() { - cancelEDSWatch() - b.logger.Infof("Watch cancelled on resource name %v with xds-client %p", edsToWatch, b.xdsClient) +// If child balancer doesn't already exist, one will be created. +func (b *edsBalancer) updateChildConfig() error { + // Child was build when the first EDS resp was received, so we just build + // the config and addresses. + if b.child == nil { + b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } -} -func (b *edsBalancer) cancelWatch() { - b.loadReportServer = nil - if b.cancelLoadReport != nil { - b.cancelLoadReport() - b.cancelLoadReport = nil + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.edsResp, b.config) + if err != nil { + return fmt.Errorf("failed to build priority balancer config: %v", err) } - if b.cancelEndpointsWatch != nil { - b.edsToWatch = "" - b.cancelEndpointsWatch() - b.cancelEndpointsWatch = nil + childCfg, err := b.priorityConfigParser.ParseConfig(childCfgBytes) + if err != nil { + return fmt.Errorf("failed to parse generated priority balancer config, this should never happen because the config is generated: %v", err) } + b.logger.Infof("build balancer config: %v", pretty.ToJSON(childCfg)) + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: addrs, + ServiceConfig: b.configRaw, + Attributes: b.attrsWithClient, + }, + BalancerConfig: childCfg, + }) } -// startLoadReport starts load reporting. If there's already a load reporting in -// progress, it cancels that. +// handleErrorFromUpdate handles both the error from parent ClientConn (from CDS +// balancer) and the error from xds client (from the watcher). fromParent is +// true if error is from parent ClientConn. +// +// If the error is connection error, it should be handled for fallback purposes. // -// Caller can cal this when the loadReportServer name changes, but -// edsServiceName doesn't (so we only need to restart load reporting, not EDS -// watch). -func (b *edsBalancer) startLoadReport(loadReportServer *string) *load.Store { - b.loadReportServer = loadReportServer - if b.cancelLoadReport != nil { - b.cancelLoadReport() - b.cancelLoadReport = nil +// If the error is resource-not-found: +// - If it's from CDS balancer (shows as a resolver error), it means LDS or CDS +// resources were removed. The EDS watch should be canceled. +// - If it's from xds client, it means EDS resource were removed. The EDS +// watcher should keep watching. +// In both cases, the sub-balancers will be receive the error. +func (b *edsBalancer) handleErrorFromUpdate(err error, fromParent bool) { + b.logger.Warningf("Received error: %v", err) + if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { + // This is an error from the parent ClientConn (can be the parent CDS + // balancer), and is a resource-not-found error. This means the resource + // (can be either LDS or CDS) was removed. Stop the EDS watch. + b.edsWatcher.stopWatch() } - if loadReportServer == nil { - return nil + if b.child != nil { + b.child.ResolverError(err) + } else { + // If eds balancer was never created, fail the RPCs with errors. + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) } - ls, cancel := b.xdsClient.ReportLoad(*loadReportServer) - b.cancelLoadReport = cancel - return ls -} -func (b *edsBalancer) handleXDSClientUpdate(update *edsUpdate) { - if err := update.err; err != nil { - b.handleErrorFromUpdate(err, false) - return - } - b.edsImpl.handleEDSResponse(update.resp) } -type subConnStateUpdate struct { - sc balancer.SubConn - state balancer.SubConnState -} +// run is a long-running goroutine which handles all updates from gRPC and +// xdsClient. All methods which are invoked directly by gRPC or xdsClient simply +// push an update onto a channel which is read and acted upon right here. +func (b *edsBalancer) run() { + for { + select { + case u := <-b.updateCh.Get(): + b.updateCh.Load() + switch update := u.(type) { + case *ccUpdate: + b.handleClientConnUpdate(update) + case *scUpdate: + // SubConn updates are simply handed over to the underlying + // child balancer. + if b.child == nil { + b.logger.Errorf("xds: received scUpdate {%+v} with no child balancer", update) + break + } + b.child.UpdateSubConnState(update.subConn, update.state) + } + case u := <-b.edsWatcher.updateChannel: + b.handleWatchUpdate(u) -func (b *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - update := &subConnStateUpdate{ - sc: sc, - state: state, - } - select { - case b.grpcUpdate <- update: - case <-b.closed.Done(): + // Close results in cancellation of the EDS watch and closing of the + // underlying child policy and is the only way to exit this goroutine. + case <-b.closed.Done(): + b.edsWatcher.stopWatch() + + if b.child != nil { + b.child.Close() + b.child = nil + } + // This is the *ONLY* point of return from this function. + b.logger.Infof("Shutdown") + b.done.Fire() + return + } } } -func (b *edsBalancer) ResolverError(err error) { - select { - case b.grpcUpdate <- err: - case <-b.closed.Done(): +// Following are methods to implement the balancer interface. + +// UpdateClientConnState receives the serviceConfig (which contains the +// clusterName to watch for in CDS) and the xdsClient object from the +// xdsResolver. +func (b *edsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if b.closed.HasFired() { + b.logger.Warningf("xds: received ClientConnState {%+v} after edsBalancer was closed", state) + return errBalancerClosed } -} -func (b *edsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { if b.xdsClient == nil { - c := xdsclient.FromResolverState(s.ResolverState) + c := xdsclient.FromResolverState(state.ResolverState) if c == nil { return balancer.ErrBadResolverState } b.xdsClient = c + b.attrsWithClient = state.ResolverState.Attributes } - select { - case b.grpcUpdate <- &s: - case <-b.closed.Done(): - } + b.updateCh.Put(&ccUpdate{state: state}) return nil } -type edsUpdate struct { - resp xdsclient.EndpointsUpdate - err error -} - -func (b *edsBalancer) handleEDSUpdate(resp xdsclient.EndpointsUpdate, err error) { - select { - case b.xdsClientUpdate <- &edsUpdate{resp: resp, err: err}: - case <-b.closed.Done(): +// ResolverError handles errors reported by the xdsResolver. +func (b *edsBalancer) ResolverError(err error) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received resolver error {%v} after edsBalancer was closed", err) + return } + b.updateCh.Put(&ccUpdate{err: err}) } -type balancerStateWithPriority struct { - priority priorityType - s balancer.State -} - -func (b *edsBalancer) enqueueChildBalancerState(p priorityType, s balancer.State) { - b.childPolicyUpdate.Put(&balancerStateWithPriority{ - priority: p, - s: s, - }) +// UpdateSubConnState handles subConn updates from gRPC. +func (b *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received subConn update {%v, %v} after edsBalancer was closed", sc, state) + return + } + b.updateCh.Put(&scUpdate{subConn: sc, state: state}) } +// Close closes the cdsBalancer and the underlying child balancer. func (b *edsBalancer) Close() { b.closed.Fire() <-b.done.Done() } - -// equalStringPointers returns true if -// - a and b are both nil OR -// - *a == *b (and a and b are both non-nil) -func equalStringPointers(a, b *string) bool { - if a == nil && b == nil { - return true - } - if a == nil || b == nil { - return false - } - return *a == *b -} diff --git a/xds/internal/balancer/edsbalancer/eds_impl.go b/xds/internal/balancer/edsbalancer/eds_impl.go deleted file mode 100644 index c4dfd6dd6d0b..000000000000 --- a/xds/internal/balancer/edsbalancer/eds_impl.go +++ /dev/null @@ -1,596 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "encoding/json" - "reflect" - "sync" - "time" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/balancer/weightedroundrobin" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" - xdsi "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" - "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/load" -) - -// TODO: make this a environment variable? -var defaultPriorityInitTimeout = 10 * time.Second - -const defaultServiceRequestCountMax = 1024 - -type localityConfig struct { - weight uint32 - addrs []resolver.Address -} - -// balancerGroupWithConfig contains the localities with the same priority. It -// manages all localities using a balancerGroup. -type balancerGroupWithConfig struct { - bg *balancergroup.BalancerGroup - stateAggregator *weightedaggregator.Aggregator - configs map[xdsi.LocalityID]*localityConfig -} - -// edsBalancerImpl does load balancing based on the EDS responses. Note that it -// doesn't implement the balancer interface. It's intended to be used by a high -// level balancer implementation. -// -// The localities are picked as weighted round robin. A configurable child -// policy is used to manage endpoints in each locality. -type edsBalancerImpl struct { - cc balancer.ClientConn - buildOpts balancer.BuildOptions - logger *grpclog.PrefixLogger - loadReporter load.PerClusterReporter - - enqueueChildBalancerStateUpdate func(priorityType, balancer.State) - - subBalancerBuilder balancer.Builder - priorityToLocalities map[priorityType]*balancerGroupWithConfig - respReceived bool - - // There's no need to hold any mutexes at the same time. The order to take - // mutex should be: priorityMu > subConnMu, but this is implicit via - // balancers (starting balancer with next priority while holding priorityMu, - // and the balancer may create new SubConn). - - priorityMu sync.Mutex - // priorities are pointers, and will be nil when EDS returns empty result. - priorityInUse priorityType - priorityLowest priorityType - priorityToState map[priorityType]*balancer.State - // The timer to give a priority 10 seconds to connect. And if the priority - // doesn't go into Ready/Failure, start the next priority. - // - // One timer is enough because there can be at most one priority in init - // state. - priorityInitTimer *time.Timer - - subConnMu sync.Mutex - subConnToPriority map[balancer.SubConn]priorityType - - pickerMu sync.Mutex - dropConfig []xdsclient.OverloadDropConfig - drops []*dropper - innerState balancer.State // The state of the picker without drop support. - serviceRequestsCounter *xdsclient.ServiceRequestsCounter - serviceRequestCountMax uint32 - - clusterNameMu sync.Mutex - clusterName string -} - -// newEDSBalancerImpl create a new edsBalancerImpl. -func newEDSBalancerImpl(cc balancer.ClientConn, bOpts balancer.BuildOptions, enqueueState func(priorityType, balancer.State), lr load.PerClusterReporter, logger *grpclog.PrefixLogger) *edsBalancerImpl { - edsImpl := &edsBalancerImpl{ - cc: cc, - buildOpts: bOpts, - logger: logger, - subBalancerBuilder: balancer.Get(roundrobin.Name), - loadReporter: lr, - - enqueueChildBalancerStateUpdate: enqueueState, - - priorityToLocalities: make(map[priorityType]*balancerGroupWithConfig), - priorityToState: make(map[priorityType]*balancer.State), - subConnToPriority: make(map[balancer.SubConn]priorityType), - serviceRequestCountMax: defaultServiceRequestCountMax, - } - // Don't start balancer group here. Start it when handling the first EDS - // response. Otherwise the balancer group will be started with round-robin, - // and if users specify a different sub-balancer, all balancers in balancer - // group will be closed and recreated when sub-balancer update happens. - return edsImpl -} - -// handleChildPolicy updates the child balancers handling endpoints. Child -// policy is roundrobin by default. If the specified balancer is not installed, -// the old child balancer will be used. -// -// HandleChildPolicy and HandleEDSResponse must be called by the same goroutine. -func (edsImpl *edsBalancerImpl) handleChildPolicy(name string, config json.RawMessage) { - if edsImpl.subBalancerBuilder.Name() == name { - return - } - newSubBalancerBuilder := balancer.Get(name) - if newSubBalancerBuilder == nil { - edsImpl.logger.Infof("edsBalancerImpl: failed to find balancer with name %q, keep using %q", name, edsImpl.subBalancerBuilder.Name()) - return - } - edsImpl.subBalancerBuilder = newSubBalancerBuilder - for _, bgwc := range edsImpl.priorityToLocalities { - if bgwc == nil { - continue - } - for lid, config := range bgwc.configs { - lidJSON, err := lid.ToString() - if err != nil { - edsImpl.logger.Errorf("failed to marshal LocalityID: %#v, skipping this locality", lid) - continue - } - // TODO: (eds) add support to balancer group to support smoothly - // switching sub-balancers (keep old balancer around until new - // balancer becomes ready). - bgwc.bg.Remove(lidJSON) - bgwc.bg.Add(lidJSON, edsImpl.subBalancerBuilder) - bgwc.bg.UpdateClientConnState(lidJSON, balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: config.addrs}, - }) - // This doesn't need to manually update picker, because the new - // sub-balancer will send it's picker later. - } - } -} - -// updateDrops compares new drop policies with the old. If they are different, -// it updates the drop policies and send ClientConn an updated picker. -func (edsImpl *edsBalancerImpl) updateDrops(dropConfig []xdsclient.OverloadDropConfig) { - if cmp.Equal(dropConfig, edsImpl.dropConfig) { - return - } - edsImpl.pickerMu.Lock() - edsImpl.dropConfig = dropConfig - var newDrops []*dropper - for _, c := range edsImpl.dropConfig { - newDrops = append(newDrops, newDropper(c)) - } - edsImpl.drops = newDrops - if edsImpl.innerState.Picker != nil { - // Update picker with old inner picker, new drops. - edsImpl.cc.UpdateState(balancer.State{ - ConnectivityState: edsImpl.innerState.ConnectivityState, - Picker: newDropPicker(edsImpl.innerState.Picker, newDrops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)}, - ) - } - edsImpl.pickerMu.Unlock() -} - -// handleEDSResponse handles the EDS response and creates/deletes localities and -// SubConns. It also handles drops. -// -// HandleChildPolicy and HandleEDSResponse must be called by the same goroutine. -func (edsImpl *edsBalancerImpl) handleEDSResponse(edsResp xdsclient.EndpointsUpdate) { - // TODO: Unhandled fields from EDS response: - // - edsResp.GetPolicy().GetOverprovisioningFactor() - // - locality.GetPriority() - // - lbEndpoint.GetMetadata(): contains BNS name, send to sub-balancers - // - as service config or as resolved address - // - if socketAddress is not ip:port - // - socketAddress.GetNamedPort(), socketAddress.GetResolverName() - // - resolve endpoint's name with another resolver - - // If the first EDS update is an empty update, nothing is changing from the - // previous update (which is the default empty value). We need to explicitly - // handle first update being empty, and send a transient failure picker. - // - // TODO: define Equal() on type EndpointUpdate to avoid DeepEqual. And do - // the same for the other types. - if !edsImpl.respReceived && reflect.DeepEqual(edsResp, xdsclient.EndpointsUpdate{}) { - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(errAllPrioritiesRemoved)}) - } - edsImpl.respReceived = true - - edsImpl.updateDrops(edsResp.Drops) - - // Filter out all localities with weight 0. - // - // Locality weighted load balancer can be enabled by setting an option in - // CDS, and the weight of each locality. Currently, without the guarantee - // that CDS is always sent, we assume locality weighted load balance is - // always enabled, and ignore all weight 0 localities. - // - // In the future, we should look at the config in CDS response and decide - // whether locality weight matters. - newLocalitiesWithPriority := make(map[priorityType][]xdsclient.Locality) - for _, locality := range edsResp.Localities { - if locality.Weight == 0 { - continue - } - priority := newPriorityType(locality.Priority) - newLocalitiesWithPriority[priority] = append(newLocalitiesWithPriority[priority], locality) - } - - var ( - priorityLowest priorityType - priorityChanged bool - ) - - for priority, newLocalities := range newLocalitiesWithPriority { - if !priorityLowest.isSet() || priorityLowest.higherThan(priority) { - priorityLowest = priority - } - - bgwc, ok := edsImpl.priorityToLocalities[priority] - if !ok { - // Create balancer group if it's never created (this is the first - // time this priority is received). We don't start it here. It may - // be started when necessary (e.g. when higher is down, or if it's a - // new lowest priority). - ccPriorityWrapper := edsImpl.ccWrapperWithPriority(priority) - stateAggregator := weightedaggregator.New(ccPriorityWrapper, edsImpl.logger, newRandomWRR) - bgwc = &balancerGroupWithConfig{ - bg: balancergroup.New(ccPriorityWrapper, edsImpl.buildOpts, stateAggregator, edsImpl.loadReporter, edsImpl.logger), - stateAggregator: stateAggregator, - configs: make(map[xdsi.LocalityID]*localityConfig), - } - edsImpl.priorityToLocalities[priority] = bgwc - priorityChanged = true - edsImpl.logger.Infof("New priority %v added", priority) - } - edsImpl.handleEDSResponsePerPriority(bgwc, newLocalities) - } - edsImpl.priorityLowest = priorityLowest - - // Delete priorities that are removed in the latest response, and also close - // the balancer group. - for p, bgwc := range edsImpl.priorityToLocalities { - if _, ok := newLocalitiesWithPriority[p]; !ok { - delete(edsImpl.priorityToLocalities, p) - bgwc.bg.Close() - delete(edsImpl.priorityToState, p) - priorityChanged = true - edsImpl.logger.Infof("Priority %v deleted", p) - } - } - - // If priority was added/removed, it may affect the balancer group to use. - // E.g. priorityInUse was removed, or all priorities are down, and a new - // lower priority was added. - if priorityChanged { - edsImpl.handlePriorityChange() - } -} - -func (edsImpl *edsBalancerImpl) handleEDSResponsePerPriority(bgwc *balancerGroupWithConfig, newLocalities []xdsclient.Locality) { - // newLocalitiesSet contains all names of localities in the new EDS response - // for the same priority. It's used to delete localities that are removed in - // the new EDS response. - newLocalitiesSet := make(map[xdsi.LocalityID]struct{}) - var rebuildStateAndPicker bool - for _, locality := range newLocalities { - // One balancer for each locality. - - lid := locality.ID - lidJSON, err := lid.ToString() - if err != nil { - edsImpl.logger.Errorf("failed to marshal LocalityID: %#v, skipping this locality", lid) - continue - } - newLocalitiesSet[lid] = struct{}{} - - newWeight := locality.Weight - var newAddrs []resolver.Address - for _, lbEndpoint := range locality.Endpoints { - // Filter out all "unhealthy" endpoints (unknown and - // healthy are both considered to be healthy: - // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). - if lbEndpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && - lbEndpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { - continue - } - - address := resolver.Address{ - Addr: lbEndpoint.Address, - } - if edsImpl.subBalancerBuilder.Name() == weightedroundrobin.Name && lbEndpoint.Weight != 0 { - ai := weightedroundrobin.AddrInfo{Weight: lbEndpoint.Weight} - address = weightedroundrobin.SetAddrInfo(address, ai) - // Metadata field in resolver.Address is deprecated. The - // attributes field should be used to specify arbitrary - // attributes about the address. We still need to populate the - // Metadata field here to allow users of this field to migrate - // to the new one. - // TODO(easwars): Remove this once all users have migrated. - // See https://github.com/grpc/grpc-go/issues/3563. - address.Metadata = &ai - } - newAddrs = append(newAddrs, address) - } - var weightChanged, addrsChanged bool - config, ok := bgwc.configs[lid] - if !ok { - // A new balancer, add it to balancer group and balancer map. - bgwc.stateAggregator.Add(lidJSON, newWeight) - bgwc.bg.Add(lidJSON, edsImpl.subBalancerBuilder) - config = &localityConfig{ - weight: newWeight, - } - bgwc.configs[lid] = config - - // weightChanged is false for new locality, because there's no need - // to update weight in bg. - addrsChanged = true - edsImpl.logger.Infof("New locality %v added", lid) - } else { - // Compare weight and addrs. - if config.weight != newWeight { - weightChanged = true - } - if !cmp.Equal(config.addrs, newAddrs) { - addrsChanged = true - } - edsImpl.logger.Infof("Locality %v updated, weightedChanged: %v, addrsChanged: %v", lid, weightChanged, addrsChanged) - } - - if weightChanged { - config.weight = newWeight - bgwc.stateAggregator.UpdateWeight(lidJSON, newWeight) - rebuildStateAndPicker = true - } - - if addrsChanged { - config.addrs = newAddrs - bgwc.bg.UpdateClientConnState(lidJSON, balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: newAddrs}, - }) - } - } - - // Delete localities that are removed in the latest response. - for lid := range bgwc.configs { - lidJSON, err := lid.ToString() - if err != nil { - edsImpl.logger.Errorf("failed to marshal LocalityID: %#v, skipping this locality", lid) - continue - } - if _, ok := newLocalitiesSet[lid]; !ok { - bgwc.stateAggregator.Remove(lidJSON) - bgwc.bg.Remove(lidJSON) - delete(bgwc.configs, lid) - edsImpl.logger.Infof("Locality %v deleted", lid) - rebuildStateAndPicker = true - } - } - - if rebuildStateAndPicker { - bgwc.stateAggregator.BuildAndUpdate() - } -} - -// handleSubConnStateChange handles the state change and update pickers accordingly. -func (edsImpl *edsBalancerImpl) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - edsImpl.subConnMu.Lock() - var bgwc *balancerGroupWithConfig - if p, ok := edsImpl.subConnToPriority[sc]; ok { - if s == connectivity.Shutdown { - // Only delete sc from the map when state changed to Shutdown. - delete(edsImpl.subConnToPriority, sc) - } - bgwc = edsImpl.priorityToLocalities[p] - } - edsImpl.subConnMu.Unlock() - if bgwc == nil { - edsImpl.logger.Infof("edsBalancerImpl: priority not found for sc state change") - return - } - if bg := bgwc.bg; bg != nil { - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) - } -} - -// updateServiceRequestsConfig handles changes to the circuit breaking configuration. -func (edsImpl *edsBalancerImpl) updateServiceRequestsConfig(serviceName string, max *uint32) { - edsImpl.pickerMu.Lock() - var updatePicker bool - if edsImpl.serviceRequestsCounter == nil || edsImpl.serviceRequestsCounter.ServiceName != serviceName { - edsImpl.serviceRequestsCounter = xdsclient.GetServiceRequestsCounter(serviceName) - updatePicker = true - } - - var newMax uint32 = defaultServiceRequestCountMax - if max != nil { - newMax = *max - } - if edsImpl.serviceRequestCountMax != newMax { - edsImpl.serviceRequestCountMax = newMax - updatePicker = true - } - if updatePicker && edsImpl.innerState.Picker != nil { - // Update picker with old inner picker, new counter and counterMax. - edsImpl.cc.UpdateState(balancer.State{ - ConnectivityState: edsImpl.innerState.ConnectivityState, - Picker: newDropPicker(edsImpl.innerState.Picker, edsImpl.drops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)}, - ) - } - edsImpl.pickerMu.Unlock() -} - -func (edsImpl *edsBalancerImpl) updateClusterName(name string) { - edsImpl.clusterNameMu.Lock() - defer edsImpl.clusterNameMu.Unlock() - edsImpl.clusterName = name -} - -func (edsImpl *edsBalancerImpl) getClusterName() string { - edsImpl.clusterNameMu.Lock() - defer edsImpl.clusterNameMu.Unlock() - return edsImpl.clusterName -} - -// updateState first handles priority, and then wraps picker in a drop picker -// before forwarding the update. -func (edsImpl *edsBalancerImpl) updateState(priority priorityType, s balancer.State) { - _, ok := edsImpl.priorityToLocalities[priority] - if !ok { - edsImpl.logger.Infof("eds: received picker update from unknown priority") - return - } - - if edsImpl.handlePriorityWithNewState(priority, s) { - edsImpl.pickerMu.Lock() - defer edsImpl.pickerMu.Unlock() - edsImpl.innerState = s - // Don't reset drops when it's a state change. - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: newDropPicker(s.Picker, edsImpl.drops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)}) - } -} - -func (edsImpl *edsBalancerImpl) ccWrapperWithPriority(priority priorityType) *edsBalancerWrapperCC { - return &edsBalancerWrapperCC{ - ClientConn: edsImpl.cc, - priority: priority, - parent: edsImpl, - } -} - -// edsBalancerWrapperCC implements the balancer.ClientConn API and get passed to -// each balancer group. It contains the locality priority. -type edsBalancerWrapperCC struct { - balancer.ClientConn - priority priorityType - parent *edsBalancerImpl -} - -func (ebwcc *edsBalancerWrapperCC) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - clusterName := ebwcc.parent.getClusterName() - newAddrs := make([]resolver.Address, len(addrs)) - for i, addr := range addrs { - newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) - } - return ebwcc.parent.newSubConn(ebwcc.priority, newAddrs, opts) -} - -func (ebwcc *edsBalancerWrapperCC) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - clusterName := ebwcc.parent.getClusterName() - newAddrs := make([]resolver.Address, len(addrs)) - for i, addr := range addrs { - newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) - } - ebwcc.ClientConn.UpdateAddresses(sc, newAddrs) -} - -func (ebwcc *edsBalancerWrapperCC) UpdateState(state balancer.State) { - ebwcc.parent.enqueueChildBalancerStateUpdate(ebwcc.priority, state) -} - -func (edsImpl *edsBalancerImpl) newSubConn(priority priorityType, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - sc, err := edsImpl.cc.NewSubConn(addrs, opts) - if err != nil { - return nil, err - } - edsImpl.subConnMu.Lock() - edsImpl.subConnToPriority[sc] = priority - edsImpl.subConnMu.Unlock() - return sc, nil -} - -// close closes the balancer. -func (edsImpl *edsBalancerImpl) close() { - for _, bgwc := range edsImpl.priorityToLocalities { - if bg := bgwc.bg; bg != nil { - bgwc.stateAggregator.Stop() - bg.Close() - } - } -} - -type dropPicker struct { - drops []*dropper - p balancer.Picker - loadStore load.PerClusterReporter - counter *xdsclient.ServiceRequestsCounter - countMax uint32 -} - -func newDropPicker(p balancer.Picker, drops []*dropper, loadStore load.PerClusterReporter, counter *xdsclient.ServiceRequestsCounter, countMax uint32) *dropPicker { - return &dropPicker{ - drops: drops, - p: p, - loadStore: loadStore, - counter: counter, - countMax: countMax, - } -} - -func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - var ( - drop bool - category string - ) - for _, dp := range d.drops { - if dp.drop() { - drop = true - category = dp.c.Category - break - } - } - if drop { - if d.loadStore != nil { - d.loadStore.CallDropped(category) - } - return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") - } - if d.counter != nil { - if err := d.counter.StartRequest(d.countMax); err != nil { - // Drops by circuit breaking are reported with empty category. They - // will be reported only in total drops, but not in per category. - if d.loadStore != nil { - d.loadStore.CallDropped("") - } - return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) - } - pr, err := d.p.Pick(info) - if err != nil { - d.counter.EndRequest() - return pr, err - } - oldDone := pr.Done - pr.Done = func(doneInfo balancer.DoneInfo) { - d.counter.EndRequest() - if oldDone != nil { - oldDone(doneInfo) - } - } - return pr, err - } - // TODO: (eds) don't drop unless the inner picker is READY. Similar to - // https://github.com/grpc/grpc-go/issues/2622. - return d.p.Pick(info) -} diff --git a/xds/internal/balancer/edsbalancer/eds_impl_priority.go b/xds/internal/balancer/edsbalancer/eds_impl_priority.go deleted file mode 100644 index 53ac6ef5e873..000000000000 --- a/xds/internal/balancer/edsbalancer/eds_impl_priority.go +++ /dev/null @@ -1,358 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "errors" - "fmt" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/connectivity" -) - -var errAllPrioritiesRemoved = errors.New("eds: no locality is provided, all priorities are removed") - -// handlePriorityChange handles priority after EDS adds/removes a -// priority. -// -// - If all priorities were deleted, unset priorityInUse, and set parent -// ClientConn to TransientFailure -// - If priorityInUse wasn't set, this is either the first EDS resp, or the -// previous EDS resp deleted everything. Set priorityInUse to 0, and start 0. -// - If priorityInUse was deleted, send the picker from the new lowest priority -// to parent ClientConn, and set priorityInUse to the new lowest. -// - If priorityInUse has a non-Ready state, and also there's a priority lower -// than priorityInUse (which means a lower priority was added), set the next -// priority as new priorityInUse, and start the bg. -func (edsImpl *edsBalancerImpl) handlePriorityChange() { - edsImpl.priorityMu.Lock() - defer edsImpl.priorityMu.Unlock() - - // Everything was removed by EDS. - if !edsImpl.priorityLowest.isSet() { - edsImpl.priorityInUse = newPriorityTypeUnset() - // Stop the init timer. This can happen if the only priority is removed - // shortly after it's added. - if timer := edsImpl.priorityInitTimer; timer != nil { - timer.Stop() - edsImpl.priorityInitTimer = nil - } - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(errAllPrioritiesRemoved)}) - return - } - - // priorityInUse wasn't set, use 0. - if !edsImpl.priorityInUse.isSet() { - edsImpl.logger.Infof("Switching priority from unset to %v", 0) - edsImpl.startPriority(newPriorityType(0)) - return - } - - // priorityInUse was deleted, use the new lowest. - if _, ok := edsImpl.priorityToLocalities[edsImpl.priorityInUse]; !ok { - oldP := edsImpl.priorityInUse - edsImpl.priorityInUse = edsImpl.priorityLowest - edsImpl.logger.Infof("Switching priority from %v to %v, because former was deleted", oldP, edsImpl.priorityInUse) - if s, ok := edsImpl.priorityToState[edsImpl.priorityLowest]; ok { - edsImpl.cc.UpdateState(*s) - } else { - // If state for priorityLowest is not found, this means priorityLowest was - // started, but never sent any update. The init timer fired and - // triggered the next priority. The old_priorityInUse (that was just - // deleted EDS) was picked later. - // - // We don't have an old state to send to parent, but we also don't - // want parent to keep using picker from old_priorityInUse. Send an - // update to trigger block picks until a new picker is ready. - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)}) - } - return - } - - // priorityInUse is not ready, look for next priority, and use if found. - if s, ok := edsImpl.priorityToState[edsImpl.priorityInUse]; ok && s.ConnectivityState != connectivity.Ready { - pNext := edsImpl.priorityInUse.nextLower() - if _, ok := edsImpl.priorityToLocalities[pNext]; ok { - edsImpl.logger.Infof("Switching priority from %v to %v, because latter was added, and former wasn't Ready") - edsImpl.startPriority(pNext) - } - } -} - -// startPriority sets priorityInUse to p, and starts the balancer group for p. -// It also starts a timer to fall to next priority after timeout. -// -// Caller must hold priorityMu, priority must exist, and edsImpl.priorityInUse -// must be non-nil. -func (edsImpl *edsBalancerImpl) startPriority(priority priorityType) { - edsImpl.priorityInUse = priority - p := edsImpl.priorityToLocalities[priority] - // NOTE: this will eventually send addresses to sub-balancers. If the - // sub-balancer tries to update picker, it will result in a deadlock on - // priorityMu in the update is handled synchronously. The deadlock is - // currently avoided by handling balancer update in a goroutine (the run - // goroutine in the parent eds balancer). When priority balancer is split - // into its own, this asynchronous state handling needs to be copied. - p.stateAggregator.Start() - p.bg.Start() - // startPriority can be called when - // 1. first EDS resp, start p0 - // 2. a high priority goes Failure, start next - // 3. a high priority init timeout, start next - // - // In all the cases, the existing init timer is either closed, also already - // expired. There's no need to close the old timer. - edsImpl.priorityInitTimer = time.AfterFunc(defaultPriorityInitTimeout, func() { - edsImpl.priorityMu.Lock() - defer edsImpl.priorityMu.Unlock() - if !edsImpl.priorityInUse.isSet() || !edsImpl.priorityInUse.equal(priority) { - return - } - edsImpl.priorityInitTimer = nil - pNext := priority.nextLower() - if _, ok := edsImpl.priorityToLocalities[pNext]; ok { - edsImpl.startPriority(pNext) - } - }) -} - -// handlePriorityWithNewState start/close priorities based on the connectivity -// state. It returns whether the state should be forwarded to parent ClientConn. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewState(priority priorityType, s balancer.State) bool { - edsImpl.priorityMu.Lock() - defer edsImpl.priorityMu.Unlock() - - if !edsImpl.priorityInUse.isSet() { - edsImpl.logger.Infof("eds: received picker update when no priority is in use (EDS returned an empty list)") - return false - } - - if edsImpl.priorityInUse.higherThan(priority) { - // Lower priorities should all be closed, this is an unexpected update. - edsImpl.logger.Infof("eds: received picker update from priority lower then priorityInUse") - return false - } - - bState, ok := edsImpl.priorityToState[priority] - if !ok { - bState = &balancer.State{} - edsImpl.priorityToState[priority] = bState - } - oldState := bState.ConnectivityState - *bState = s - - switch s.ConnectivityState { - case connectivity.Ready: - return edsImpl.handlePriorityWithNewStateReady(priority) - case connectivity.TransientFailure: - return edsImpl.handlePriorityWithNewStateTransientFailure(priority) - case connectivity.Connecting: - return edsImpl.handlePriorityWithNewStateConnecting(priority, oldState) - default: - // New state is Idle, should never happen. Don't forward. - return false - } -} - -// handlePriorityWithNewStateReady handles state Ready and decides whether to -// forward update or not. -// -// An update with state Ready: -// - If it's from higher priority: -// - Forward the update -// - Set the priority as priorityInUse -// - Close all priorities lower than this one -// - If it's from priorityInUse: -// - Forward and do nothing else -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold priorityMu. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateReady(priority priorityType) bool { - // If one priority higher or equal to priorityInUse goes Ready, stop the - // init timer. If update is from higher than priorityInUse, - // priorityInUse will be closed, and the init timer will become useless. - if timer := edsImpl.priorityInitTimer; timer != nil { - timer.Stop() - edsImpl.priorityInitTimer = nil - } - - if edsImpl.priorityInUse.lowerThan(priority) { - edsImpl.logger.Infof("Switching priority from %v to %v, because latter became Ready", edsImpl.priorityInUse, priority) - edsImpl.priorityInUse = priority - for i := priority.nextLower(); !i.lowerThan(edsImpl.priorityLowest); i = i.nextLower() { - bgwc := edsImpl.priorityToLocalities[i] - bgwc.stateAggregator.Stop() - bgwc.bg.Close() - } - return true - } - return true -} - -// handlePriorityWithNewStateTransientFailure handles state TransientFailure and -// decides whether to forward update or not. -// -// An update with state Failure: -// - If it's from a higher priority: -// - Do not forward, and do nothing -// - If it's from priorityInUse: -// - If there's no lower: -// - Forward and do nothing else -// - If there's a lower priority: -// - Forward -// - Set lower as priorityInUse -// - Start lower -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold priorityMu. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateTransientFailure(priority priorityType) bool { - if edsImpl.priorityInUse.lowerThan(priority) { - return false - } - // priorityInUse sends a failure. Stop its init timer. - if timer := edsImpl.priorityInitTimer; timer != nil { - timer.Stop() - edsImpl.priorityInitTimer = nil - } - pNext := priority.nextLower() - if _, okNext := edsImpl.priorityToLocalities[pNext]; !okNext { - return true - } - edsImpl.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, pNext) - edsImpl.startPriority(pNext) - return true -} - -// handlePriorityWithNewStateConnecting handles state Connecting and decides -// whether to forward update or not. -// -// An update with state Connecting: -// - If it's from a higher priority -// - Do nothing -// - If it's from priorityInUse, the behavior depends on previous state. -// -// When new state is Connecting, the behavior depends on previous state. If the -// previous state was Ready, this is a transition out from Ready to Connecting. -// Assuming there are multiple backends in the same priority, this mean we are -// in a bad situation and we should failover to the next priority (Side note: -// the current connectivity state aggregating algorhtim (e.g. round-robin) is -// not handling this right, because if many backends all go from Ready to -// Connecting, the overall situation is more like TransientFailure, not -// Connecting). -// -// If the previous state was Idle, we don't do anything special with failure, -// and simply forward the update. The init timer should be in process, will -// handle failover if it timeouts. If the previous state was TransientFailure, -// we do not forward, because the lower priority is in use. -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold priorityMu. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateConnecting(priority priorityType, oldState connectivity.State) bool { - if edsImpl.priorityInUse.lowerThan(priority) { - return false - } - - switch oldState { - case connectivity.Ready: - pNext := priority.nextLower() - if _, okNext := edsImpl.priorityToLocalities[pNext]; !okNext { - return true - } - edsImpl.logger.Infof("Switching priority from %v to %v, because former became Connecting from Ready", priority, pNext) - edsImpl.startPriority(pNext) - return true - case connectivity.Idle: - return true - case connectivity.TransientFailure: - return false - default: - // Old state is Connecting or Shutdown. Don't forward. - return false - } -} - -// priorityType represents the priority from EDS response. -// -// 0 is the highest priority. The bigger the number, the lower the priority. -type priorityType struct { - set bool - p uint32 -} - -func newPriorityType(p uint32) priorityType { - return priorityType{ - set: true, - p: p, - } -} - -func newPriorityTypeUnset() priorityType { - return priorityType{} -} - -func (p priorityType) isSet() bool { - return p.set -} - -func (p priorityType) equal(p2 priorityType) bool { - if !p.isSet() && !p2.isSet() { - return true - } - if !p.isSet() || !p2.isSet() { - return false - } - return p == p2 -} - -func (p priorityType) higherThan(p2 priorityType) bool { - if !p.isSet() || !p2.isSet() { - // TODO(menghanl): return an appropriate value instead of panic. - panic("priority unset") - } - return p.p < p2.p -} - -func (p priorityType) lowerThan(p2 priorityType) bool { - if !p.isSet() || !p2.isSet() { - // TODO(menghanl): return an appropriate value instead of panic. - panic("priority unset") - } - return p.p > p2.p -} - -func (p priorityType) nextLower() priorityType { - if !p.isSet() { - panic("priority unset") - } - return priorityType{ - set: true, - p: p.p + 1, - } -} - -func (p priorityType) String() string { - if !p.set { - return "Nil" - } - return fmt.Sprint(p.p) -} diff --git a/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go b/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go index 51b35f22f09d..5674411159b8 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go +++ b/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go @@ -28,6 +28,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/testutils" ) @@ -36,15 +37,14 @@ import ( // // Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0. func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() // Two localities, with priorities [0, 1], each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { @@ -53,22 +53,20 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { sc1 := <-cc.NewSubConnCh // p0 is ready. - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + t.Fatal(err) } - // Add p2, it shouldn't cause any udpates. + // Add p2, it shouldn't cause any updates. clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) select { case <-cc.NewPickerCh: @@ -84,7 +82,7 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab3.Build()), nil) select { case <-cc.NewPickerCh: @@ -102,15 +100,14 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { // Init 0 and 1; 0 is up, use 0; 0 is down, 1 is up, use 1; add 2, use 1; 1 is // down, use 2; remove 2, use 1. func (s) TestEDSPriority_SwitchPriority(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() // Two localities, with priorities [0, 1], each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { @@ -119,41 +116,35 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { sc0 := <-cc.NewSubConnCh // p0 is ready. - edsb.handleSubConnStateChange(sc0, connectivity.Connecting) - edsb.handleSubConnStateChange(sc0, connectivity.Ready) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + t.Fatal(err) } // Turn down 0, 1 is used. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + t.Fatal(err) } - // Add p2, it shouldn't cause any udpates. + // Add p2, it shouldn't cause any updates. clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) select { case <-cc.NewPickerCh: @@ -166,29 +157,25 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { } // Turn down 1, use 2 - edsb.handleSubConnStateChange(sc1, connectivity.TransientFailure) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { + t.Fatal(err) } // Remove 2, use 1. clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab3.Build()), nil) // p2 SubConns are removed. scToRemove := <-cc.RemoveSubConnCh @@ -197,28 +184,23 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { } // Should get an update with 1's old picker, to override 2's old picker. - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p3.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) - } + if err := testErrPickerFromCh(cc.NewPickerCh, balancer.ErrTransientFailure); err != nil { + t.Fatal(err) } + } // Add a lower priority while the higher priority is down. // // Init 0 and 1; 0 and 1 both down; add 2, use 2. func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() // Two localities, with different priorities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -226,21 +208,18 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { sc0 := <-cc.NewSubConnCh // Turn down 0, 1 is used. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh // Turn down 1, pick should error. - edsb.handleSubConnStateChange(sc1, connectivity.TransientFailure) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Test pick failure. - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) - } + if err := testErrPickerFromCh(cc.NewPickerCh, balancer.ErrTransientFailure); err != nil { + t.Fatal(err) } // Add p2, it should create a new SubConn. @@ -248,41 +227,34 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) - + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { + t.Fatal(err) } + } // When a higher priority becomes available, all lower priorities are closed. // // Init 0,1,2; 0 and 1 down, use 2; 0 up, close 1 and 2. func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() // Two localities, with priorities [0,1,2], each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab1.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -290,39 +262,55 @@ func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { sc0 := <-cc.NewSubConnCh // Turn down 0, 1 is used. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh // Turn down 1, 2 is used. - edsb.handleSubConnStateChange(sc1, connectivity.TransientFailure) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { + t.Fatal(err) } // When 0 becomes ready, 0 should be used, 1 and 2 should all be closed. - edsb.handleSubConnStateChange(sc0, connectivity.Ready) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + var ( + scToRemove []balancer.SubConn + scToRemoveMap = make(map[balancer.SubConn]struct{}) + ) + // Each subconn is removed twice. This is OK in production, but it makes + // testing harder. + // + // The sub-balancer to be closed is priority's child, clusterimpl, who has + // weightedtarget as children. + // + // - When clusterimpl is removed from priority's balancergroup, all its + // subconns are removed once. + // - When clusterimpl is closed, it closes weightedtarget, and this + // weightedtarget's balancer removes all the same subconns again. + for i := 0; i < 4; i++ { + // We expect 2 subconns, so we recv from channel 4 times. + scToRemoveMap[<-cc.RemoveSubConnCh] = struct{}{} + } + for sc := range scToRemoveMap { + scToRemove = append(scToRemove, sc) + } // sc1 and sc2 should be removed. // // With localities caching, the lower priorities are closed after a timeout, // in goroutines. The order is no longer guaranteed. - scToRemove := []balancer.SubConn{<-cc.RemoveSubConnCh, <-cc.RemoveSubConnCh} if !(cmp.Equal(scToRemove[0], sc1, cmp.AllowUnexported(testutils.TestSubConn{})) && cmp.Equal(scToRemove[1], sc2, cmp.AllowUnexported(testutils.TestSubConn{}))) && !(cmp.Equal(scToRemove[0], sc2, cmp.AllowUnexported(testutils.TestSubConn{})) && @@ -331,12 +319,8 @@ func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { } // Test pick with 0. - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p0.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) - } + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + t.Fatal(err) } } @@ -347,23 +331,20 @@ func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { func (s) TestEDSPriority_InitTimeout(t *testing.T) { const testPriorityInitTimeout = time.Second defer func() func() { - old := defaultPriorityInitTimeout - defaultPriorityInitTimeout = testPriorityInitTimeout + old := priority.DefaultPriorityInitTimeout + priority.DefaultPriorityInitTimeout = testPriorityInitTimeout return func() { - defaultPriorityInitTimeout = old + priority.DefaultPriorityInitTimeout = old } }()() - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() // Two localities, with different priorities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -371,7 +352,7 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) { sc0 := <-cc.NewSubConnCh // Keep 0 in connecting, 1 will be used after init timeout. - edsb.handleSubConnStateChange(sc0, connectivity.Connecting) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) // Make sure new SubConn is created before timeout. select { @@ -386,16 +367,12 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) { } sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + t.Fatal(err) } } @@ -404,51 +381,44 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) { // - start with 2 locality with p0 and p1 // - add localities to existing p0 and p1 func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() // Two localities, with different priorities, each with one backend. clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab0.Build())) - + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab0.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc0 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc0, connectivity.Connecting) - edsb.handleSubConnStateChange(sc0, connectivity.Ready) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + t.Fatal(err) } // Turn down p0 subconns, p1 subconns will be created. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p1 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + t.Fatal(err) } // Reconnect p0 subconns, p1 subconn will be closed. - edsb.handleSubConnStateChange(sc0, connectivity.Ready) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { @@ -456,10 +426,8 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { } // Test roundrobin with only p0 subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + t.Fatal(err) } // Add two localities, with two priorities, with one backend. @@ -468,39 +436,34 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab1.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) clab1.AddLocality(testSubZones[3], 1, 1, testEndpointAddrs[3:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only two p0 subconns. - p3 := <-cc.NewPickerCh - want = []balancer.SubConn{sc0, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0, sc2}); err != nil { + t.Fatal(err) } // Turn down p0 subconns, p1 subconns will be created. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) - edsb.handleSubConnStateChange(sc2, connectivity.TransientFailure) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) sc3 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc3, connectivity.Connecting) - edsb.handleSubConnStateChange(sc3, connectivity.Ready) + edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) sc4 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc4, connectivity.Connecting) - edsb.handleSubConnStateChange(sc4, connectivity.Ready) + edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p4 := <-cc.NewPickerCh - want = []balancer.SubConn{sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p4)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3, sc4}); err != nil { + t.Fatal(err) } } @@ -508,62 +471,55 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { const testPriorityInitTimeout = time.Second defer func() func() { - old := defaultPriorityInitTimeout - defaultPriorityInitTimeout = testPriorityInitTimeout + old := priority.DefaultPriorityInitTimeout + priority.DefaultPriorityInitTimeout = testPriorityInitTimeout return func() { - defaultPriorityInitTimeout = old + priority.DefaultPriorityInitTimeout = old } }()() - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() // Two localities, with different priorities, each with one backend. clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab0.Build())) - + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab0.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc0 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc0, connectivity.Connecting) - edsb.handleSubConnStateChange(sc0, connectivity.Ready) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + t.Fatal(err) } // Remove all priorities. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) // p0 subconn should be removed. scToRemove := <-cc.RemoveSubConnCh + <-cc.RemoveSubConnCh // Drain the duplicate subconn removed. if !cmp.Equal(scToRemove, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc0, scToRemove) } + // time.Sleep(time.Second) + // Test pick return TransientFailure. - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != errAllPrioritiesRemoved { - t.Fatalf("want pick error %v, got %v", errAllPrioritiesRemoved, err) - } + if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { + t.Fatal(err) } // Re-add two localities, with previous priorities, but different backends. clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[3:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) - + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) addrs01 := <-cc.NewSubConnAddrsCh if got, want := addrs01[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -580,45 +536,39 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc11 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc11, connectivity.Connecting) - edsb.handleSubConnStateChange(sc11, connectivity.Ready) + edsb.UpdateSubConnState(sc11, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc11, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p1 := <-cc.NewPickerCh - want = []balancer.SubConn{sc11} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc11}); err != nil { + t.Fatal(err) } // Remove p1 from EDS, to fallback to p0. clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab3.Build()), nil) // p1 subconn should be removed. scToRemove1 := <-cc.RemoveSubConnCh + <-cc.RemoveSubConnCh // Drain the duplicate subconn removed. if !cmp.Equal(scToRemove1, sc11, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc11, scToRemove1) } // Test pick return TransientFailure. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if scst, err := pFail1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error _, %v, got %v, _ ,%v", balancer.ErrTransientFailure, scst, err) - } + if err := testErrPickerFromCh(cc.NewPickerCh, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err) } // Send an ready update for the p0 sc that was received when re-adding // localities to EDS. - edsb.handleSubConnStateChange(sc01, connectivity.Connecting) - edsb.handleSubConnStateChange(sc01, connectivity.Ready) + edsb.UpdateSubConnState(sc01, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc01, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc01} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc01}); err != nil { + t.Fatal(err) } select { @@ -632,83 +582,16 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { } } -func (s) TestPriorityType(t *testing.T) { - p0 := newPriorityType(0) - p1 := newPriorityType(1) - p2 := newPriorityType(2) - - if !p0.higherThan(p1) || !p0.higherThan(p2) { - t.Errorf("want p0 to be higher than p1 and p2, got p0>p1: %v, p0>p2: %v", !p0.higherThan(p1), !p0.higherThan(p2)) - } - if !p1.lowerThan(p0) || !p1.higherThan(p2) { - t.Errorf("want p1 to be between p0 and p2, got p1p2: %v", !p1.lowerThan(p0), !p1.higherThan(p2)) - } - if !p2.lowerThan(p0) || !p2.lowerThan(p1) { - t.Errorf("want p2 to be lower than p0 and p1, got p2") - } else if i > 50 && err != nil { - t.Errorf("The second 50%% picks should be non-drops, got error %v", err) + if err := testPickerFromCh(cc.NewPickerCh, func(p balancer.Picker) error { + for i := 0; i < 100; i++ { + _, err := p.Pick(balancer.PickInfo{}) + // TODO: the dropping algorithm needs a design. When the dropping algorithm + // is fixed, this test also needs fix. + if i%2 == 0 && err == nil { + return fmt.Errorf("%d - the even number picks should be drops, got error ", i) + } else if i%2 != 0 && err != nil { + return fmt.Errorf("%d - the odd number picks should be non-drops, got error %v", i, err) + } } + return nil + }); err != nil { + t.Fatal(err) } // The same locality, remove drops. clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab6.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab6.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab6.Build()), nil) // Pick without drops. - p6 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p6.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc3) - } + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3}); err != nil { + t.Fatal(err) } } @@ -184,32 +198,29 @@ func (s) TestEDS_OneLocality(t *testing.T) { // - address change for the locality // - update locality weight func (s) TestEDS_TwoLocalities(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() // Two localities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Add the second locality later to make sure sc2 belongs to the second // locality. Otherwise the test is flaky because of a map is used in EDS to // keep localities. clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with two subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1, sc2}); err != nil { + t.Fatal(err) } // Add another locality, with one backend. @@ -217,79 +228,73 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) sc3 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc3, connectivity.Connecting) - edsb.handleSubConnStateChange(sc3, connectivity.Ready) + edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with three subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1, sc2, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1, sc2, sc3}); err != nil { + t.Fatal(err) } // Remove first locality. clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab3.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab3.Build()), nil) scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) } - edsb.handleSubConnStateChange(scToRemove, connectivity.Shutdown) + edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) // Test pick with two subconns (without the first one). - p3 := <-cc.NewPickerCh - want = []balancer.SubConn{sc2, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2, sc3}); err != nil { + t.Fatal(err) } // Add a backend to the last locality. clab4 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab4.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab4.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab4.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab4.Build()), nil) sc4 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc4, connectivity.Connecting) - edsb.handleSubConnStateChange(sc4, connectivity.Ready) + edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with two subconns (without the first one). - p4 := <-cc.NewPickerCh + // // Locality-1 will be picked twice, and locality-2 will be picked twice. // Locality-1 contains only sc2, locality-2 contains sc3 and sc4. So expect // two sc2's and sc3, sc4. - want = []balancer.SubConn{sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p4)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2, sc2, sc3, sc4}); err != nil { + t.Fatal(err) } // Change weight of the locality[1]. clab5 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab5.AddLocality(testSubZones[1], 2, 0, testEndpointAddrs[1:2], nil) clab5.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab5.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab5.Build()), nil) // Test pick with two subconns different locality weight. - p5 := <-cc.NewPickerCh + // // Locality-1 will be picked four times, and locality-2 will be picked twice // (weight 2 and 1). Locality-1 contains only sc2, locality-2 contains sc3 and // sc4. So expect four sc2's and sc3, sc4. - want = []balancer.SubConn{sc2, sc2, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p5)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2, sc2, sc2, sc2, sc3, sc4}); err != nil { + t.Fatal(err) } // Change weight of the locality[1] to 0, it should never be picked. clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab6.AddLocality(testSubZones[1], 0, 0, testEndpointAddrs[1:2], nil) clab6.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab6.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab6.Build()), nil) // Changing weight of locality[1] to 0 caused it to be removed. It's subconn // should also be removed. @@ -303,21 +308,19 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { } // Test pick with two subconns different locality weight. - p6 := <-cc.NewPickerCh + // // Locality-1 will be not be picked, and locality-2 will be picked. // Locality-2 contains sc3 and sc4. So expect sc3, sc4. - want = []balancer.SubConn{sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p6)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3, sc4}); err != nil { + t.Fatal(err) } } // The EDS balancer gets EDS resp with unhealthy endpoints. Test that only // healthy ones are used. func (s) TestEDS_EndpointsHealth(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() // Two localities, each 3 backend, one Healthy, one Unhealthy, one Unknown. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) @@ -341,7 +344,7 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { corepb.HealthStatus_DEGRADED, }, }) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) var ( readySCs []balancer.SubConn @@ -351,8 +354,8 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { addr := <-cc.NewSubConnAddrsCh newSubConnAddrStrs = append(newSubConnAddrStrs, addr[0].Addr) sc := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc, connectivity.Connecting) - edsb.handleSubConnStateChange(sc, connectivity.Ready) + edsb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) readySCs = append(readySCs, sc) } @@ -380,81 +383,69 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { } // Test roundrobin with the subconns. - p1 := <-cc.NewPickerCh - want := readySCs - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, readySCs); err != nil { + t.Fatal(err) } } -func (s) TestClose(t *testing.T) { - edsb := newEDSBalancerImpl(nil, balancer.BuildOptions{}, nil, nil, nil) - // This is what could happen when switching between fallback and eds. This - // make sure it doesn't panic. - edsb.close() -} - // TestEDS_EmptyUpdate covers the cases when eds impl receives an empty update. // // It should send an error picker with transient failure to the parent. func (s) TestEDS_EmptyUpdate(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() + + const cacheTimeout = 100 * time.Microsecond + oldCacheTimeout := balancergroup.DefaultSubBalancerCloseTimeout + balancergroup.DefaultSubBalancerCloseTimeout = cacheTimeout + defer func() { balancergroup.DefaultSubBalancerCloseTimeout = oldCacheTimeout }() // The first update is an empty update. - edsb.handleEDSResponse(xdsclient.EndpointsUpdate{}) + xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) // Pick should fail with transient failure, and all priority removed error. - perr0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := perr0.Pick(balancer.PickInfo{}) - if !reflect.DeepEqual(err, errAllPrioritiesRemoved) { - t.Fatalf("picker.Pick, got error %v, want error %v", err, errAllPrioritiesRemoved) - } + if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { + t.Fatal(err) } // One locality with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Pick with only the first backend. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !reflect.DeepEqual(gotSCSt.SubConn, sc1) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + t.Fatal(err) } - edsb.handleEDSResponse(xdsclient.EndpointsUpdate{}) + xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) // Pick should fail with transient failure, and all priority removed error. - perr1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := perr1.Pick(balancer.PickInfo{}) - if !reflect.DeepEqual(err, errAllPrioritiesRemoved) { - t.Fatalf("picker.Pick, got error %v, want error %v", err, errAllPrioritiesRemoved) - } + if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { + t.Fatal(err) } + // Wait for the old SubConn to be removed (which happens when the child + // policy is closed), so a new update would trigger a new SubConn (we need + // this new SubConn to tell if the next picker is newly created). + scToRemove := <-cc.RemoveSubConnCh + if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) + } + edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + // Handle another update with priorities and localities. - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Pick with only the first backend. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !reflect.DeepEqual(gotSCSt.SubConn, sc2) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { + t.Fatal(err) } } @@ -464,7 +455,6 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { const balancerName = "stubBalancer-TestEDS_UpdateSubBalancerName" - cc := testutils.NewTestClientConn(t) stub.Register(balancerName, stub.BalancerFuncs{ UpdateClientConnState: func(bd *stub.BalancerData, s balancer.ClientConnState) error { if len(s.ResolverState.Addresses) == 0 { @@ -481,54 +471,59 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { }, }) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() t.Logf("update sub-balancer to stub-balancer") - edsb.handleChildPolicy(balancerName, nil) + if err := edsb.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ClusterName: testClusterName, ChildPolicy: &loadBalancingConfig{Name: balancerName}}, + }); err != nil { + t.Fatal(err) + } // Two localities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) for i := 0; i < 2; i++ { sc := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc, connectivity.Ready) + edsb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) } - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != testutils.ErrTestConstPicker { - t.Fatalf("picker.Pick, got err %+v, want err %+v", err, testutils.ErrTestConstPicker) - } + if err := testErrPickerFromCh(cc.NewPickerCh, testutils.ErrTestConstPicker); err != nil { + t.Fatal(err) } t.Logf("update sub-balancer to round-robin") - edsb.handleChildPolicy(roundrobin.Name, nil) + if err := edsb.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ClusterName: testClusterName, ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}}, + }); err != nil { + t.Fatal(err) + } for i := 0; i < 2; i++ { <-cc.RemoveSubConnCh } sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - // Test roundrobin with two subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1, sc2}); err != nil { + t.Fatal(err) } t.Logf("update sub-balancer to stub-balancer") - edsb.handleChildPolicy(balancerName, nil) + if err := edsb.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ClusterName: testClusterName, ChildPolicy: &loadBalancingConfig{Name: balancerName}}, + }); err != nil { + t.Fatal(err) + } for i := 0; i < 2; i++ { scToRemove := <-cc.RemoveSubConnCh @@ -536,57 +531,63 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want (%v or %v), got %v", sc1, sc2, scToRemove) } - edsb.handleSubConnStateChange(scToRemove, connectivity.Shutdown) + edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) } for i := 0; i < 2; i++ { sc := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc, connectivity.Ready) + edsb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) } - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := p2.Pick(balancer.PickInfo{}) - if err != testutils.ErrTestConstPicker { - t.Fatalf("picker.Pick, got err %q, want err %q", err, testutils.ErrTestConstPicker) - } + if err := testErrPickerFromCh(cc.NewPickerCh, testutils.ErrTestConstPicker); err != nil { + t.Fatal(err) } t.Logf("update sub-balancer to round-robin") - edsb.handleChildPolicy(roundrobin.Name, nil) + if err := edsb.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ClusterName: testClusterName, ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}}, + }); err != nil { + t.Fatal(err) + } for i := 0; i < 2; i++ { <-cc.RemoveSubConnCh } sc3 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc3, connectivity.Connecting) - edsb.handleSubConnStateChange(sc3, connectivity.Ready) + edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) sc4 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc4, connectivity.Connecting) - edsb.handleSubConnStateChange(sc4, connectivity.Ready) + edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - p3 := <-cc.NewPickerCh - want = []balancer.SubConn{sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3, sc4}); err != nil { + t.Fatal(err) } } func (s) TestEDS_CircuitBreaking(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState + edsb, cc, xdsC, cleanup := setupTestEDS(t) + defer cleanup() + var maxRequests uint32 = 50 - edsb.updateServiceRequestsConfig("test", &maxRequests) + if err := edsb.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ + ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}, + ClusterName: testClusterName, + MaxConcurrentRequests: &maxRequests, + }, + }); err != nil { + t.Fatal(err) + } // One locality with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) + xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Picks with drops. dones := []func(){} @@ -631,7 +632,15 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { // Send another update, with only circuit breaking update (and no picker // update afterwards). Make sure the new picker uses the new configs. var maxRequests2 uint32 = 10 - edsb.updateServiceRequestsConfig("test", &maxRequests2) + if err := edsb.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &EDSConfig{ + ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}, + ClusterName: testClusterName, + MaxConcurrentRequests: &maxRequests2, + }, + }); err != nil { + t.Fatal(err) + } // Picks with drops. dones = []func(){} @@ -673,327 +682,3 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { done() } } - -func init() { - balancer.Register(&testInlineUpdateBalancerBuilder{}) -} - -// A test balancer that updates balancer.State inline when handling ClientConn -// state. -type testInlineUpdateBalancerBuilder struct{} - -func (*testInlineUpdateBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - return &testInlineUpdateBalancer{cc: cc} -} - -func (*testInlineUpdateBalancerBuilder) Name() string { - return "test-inline-update-balancer" -} - -type testInlineUpdateBalancer struct { - cc balancer.ClientConn -} - -func (tb *testInlineUpdateBalancer) ResolverError(error) { - panic("not implemented") -} - -func (tb *testInlineUpdateBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { -} - -var errTestInlineStateUpdate = fmt.Errorf("don't like addresses, empty or not") - -func (tb *testInlineUpdateBalancer) UpdateClientConnState(balancer.ClientConnState) error { - tb.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &testutils.TestConstPicker{Err: errTestInlineStateUpdate}, - }) - return nil -} - -func (*testInlineUpdateBalancer) Close() { -} - -// When the child policy update picker inline in a handleClientUpdate call -// (e.g., roundrobin handling empty addresses). There could be deadlock caused -// by acquiring a locked mutex. -func (s) TestEDS_ChildPolicyUpdatePickerInline(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = func(p priorityType, state balancer.State) { - // For this test, euqueue needs to happen asynchronously (like in the - // real implementation). - go edsb.updateState(p, state) - } - - edsb.handleChildPolicy("test-inline-update-balancer", nil) - - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != errTestInlineStateUpdate { - t.Fatalf("picker.Pick, got err %q, want err %q", err, errTestInlineStateUpdate) - } - } -} - -func (s) TestDropPicker(t *testing.T) { - const pickCount = 12 - var constPicker = &testutils.TestConstPicker{ - SC: testutils.TestSubConns[0], - } - - tests := []struct { - name string - drops []*dropper - }{ - { - name: "no drop", - drops: nil, - }, - { - name: "one drop", - drops: []*dropper{ - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 2}), - }, - }, - { - name: "two drops", - drops: []*dropper{ - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 3}), - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 2}), - }, - }, - { - name: "three drops", - drops: []*dropper{ - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 3}), - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 4}), - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 2}), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - p := newDropPicker(constPicker, tt.drops, nil, nil, defaultServiceRequestCountMax) - - // scCount is the number of sc's returned by pick. The opposite of - // drop-count. - var ( - scCount int - wantCount = pickCount - ) - for _, dp := range tt.drops { - wantCount = wantCount * int(dp.c.Denominator-dp.c.Numerator) / int(dp.c.Denominator) - } - - for i := 0; i < pickCount; i++ { - _, err := p.Pick(balancer.PickInfo{}) - if err == nil { - scCount++ - } - } - - if scCount != (wantCount) { - t.Errorf("drops: %+v, scCount %v, wantCount %v", tt.drops, scCount, wantCount) - } - }) - } -} - -func (s) TestEDS_LoadReport(t *testing.T) { - // We create an xdsClientWrapper with a dummy xdsClient which only - // implements the LoadStore() method to return the underlying load.Store to - // be used. - loadStore := load.NewStore() - lsWrapper := loadstore.NewWrapper() - lsWrapper.UpdateClusterAndService(testClusterNames[0], "") - lsWrapper.UpdateLoadStore(loadStore) - - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, lsWrapper, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - const ( - testServiceName = "test-service" - cbMaxRequests = 20 - ) - var maxRequestsTemp uint32 = cbMaxRequests - edsb.updateServiceRequestsConfig(testServiceName, &maxRequestsTemp) - defer xdsclient.ClearCounterForTesting(testServiceName) - - backendToBalancerID := make(map[balancer.SubConn]xdsinternal.LocalityID) - - const testDropCategory = "test-drop" - // Two localities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], map[string]uint32{testDropCategory: 50}) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - locality1 := xdsinternal.LocalityID{SubZone: testSubZones[0]} - backendToBalancerID[sc1] = locality1 - - // Add the second locality later to make sure sc2 belongs to the second - // locality. Otherwise the test is flaky because of a map is used in EDS to - // keep localities. - clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - locality2 := xdsinternal.LocalityID{SubZone: testSubZones[1]} - backendToBalancerID[sc2] = locality2 - - // Test roundrobin with two subconns. - p1 := <-cc.NewPickerCh - // We expect the 10 picks to be split between the localities since they are - // of equal weight. And since we only mark the picks routed to sc2 as done, - // the picks on sc1 should show up as inProgress. - locality1JSON, _ := locality1.ToString() - locality2JSON, _ := locality2.ToString() - const ( - rpcCount = 100 - // 50% will be dropped with category testDropCategory. - dropWithCategory = rpcCount / 2 - // In the remaining RPCs, only cbMaxRequests are allowed by circuit - // breaking. Others will be dropped by CB. - dropWithCB = rpcCount - dropWithCategory - cbMaxRequests - - rpcInProgress = cbMaxRequests / 2 // 50% of RPCs will be never done. - rpcSucceeded = cbMaxRequests / 2 // 50% of RPCs will succeed. - ) - wantStoreData := []*load.Data{{ - Cluster: testClusterNames[0], - Service: "", - LocalityStats: map[string]load.LocalityData{ - locality1JSON: {RequestStats: load.RequestData{InProgress: rpcInProgress}}, - locality2JSON: {RequestStats: load.RequestData{Succeeded: rpcSucceeded}}, - }, - TotalDrops: dropWithCategory + dropWithCB, - Drops: map[string]uint64{ - testDropCategory: dropWithCategory, - }, - }} - - var rpcsToBeDone []balancer.PickResult - // Run the picks, but only pick with sc1 will be done later. - for i := 0; i < rpcCount; i++ { - scst, _ := p1.Pick(balancer.PickInfo{}) - if scst.Done != nil && scst.SubConn != sc1 { - rpcsToBeDone = append(rpcsToBeDone, scst) - } - } - // Call done on those sc1 picks. - for _, scst := range rpcsToBeDone { - scst.Done(balancer.DoneInfo{}) - } - - gotStoreData := loadStore.Stats(testClusterNames[0:1]) - if diff := cmp.Diff(wantStoreData, gotStoreData, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(load.Data{}, "ReportInterval")); diff != "" { - t.Errorf("store.stats() returned unexpected diff (-want +got):\n%s", diff) - } -} - -// TestEDS_LoadReportDisabled covers the case that LRS is disabled. It makes -// sure the EDS implementation isn't broken (doesn't panic). -func (s) TestEDS_LoadReportDisabled(t *testing.T) { - lsWrapper := loadstore.NewWrapper() - lsWrapper.UpdateClusterAndService(testClusterNames[0], "") - // Not calling lsWrapper.updateLoadStore(loadStore) because LRS is disabled. - - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, lsWrapper, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // One localities, with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Test roundrobin with two subconns. - p1 := <-cc.NewPickerCh - // We call picks to make sure they don't panic. - for i := 0; i < 10; i++ { - p1.Pick(balancer.PickInfo{}) - } -} - -// TestEDS_ClusterNameInAddressAttributes covers the case that cluster name is -// attached to the subconn address attributes. -func (s) TestEDS_ClusterNameInAddressAttributes(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - const clusterName1 = "cluster-name-1" - edsb.updateClusterName(clusterName1) - - // One locality with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - cn, ok := internal.GetXDSHandshakeClusterName(addrs1[0].Attributes) - if !ok || cn != clusterName1 { - t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn, ok, clusterName1) - } - - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Pick with only the first backend. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - } - - // Change cluster name. - const clusterName2 = "cluster-name-2" - edsb.updateClusterName(clusterName2) - - // Change backend. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) - - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - // New addresses should have the new cluster name. - cn2, ok := internal.GetXDSHandshakeClusterName(addrs2[0].Attributes) - if !ok || cn2 != clusterName2 { - t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn2, ok, clusterName1) - } - - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - - // Test roundrobin with two subconns. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } - } -} diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go index c20e8206b9ec..15cc10ba89df 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/edsbalancer/eds_test.go @@ -25,7 +25,6 @@ import ( "context" "encoding/json" "fmt" - "reflect" "testing" "time" @@ -34,7 +33,6 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpctest" scpb "google.golang.org/grpc/internal/proto/grpc_service_config" "google.golang.org/grpc/internal/testutils" @@ -43,7 +41,6 @@ import ( "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/load" _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // V2 client registration. ) @@ -52,7 +49,7 @@ const ( defaultTestTimeout = 1 * time.Second defaultTestShortTimeout = 10 * time.Millisecond testServiceName = "test/foo" - testEDSClusterName = "test/service/eds" + testClusterName = "test/cluster" ) var ( @@ -70,15 +67,22 @@ var ( } ) -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } +func init() { + balancer.Register(bb{}) } type s struct { grpctest.Tester + + cleanup func() +} + +func (ss s) Teardown(t *testing.T) { + xdsclient.ClearAllCountersForTesting() + ss.Tester.Teardown(t) + if ss.cleanup != nil { + ss.cleanup() + } } func Test(t *testing.T) { @@ -105,132 +109,65 @@ func (noopTestClientConn) Target() string { return testServiceName } type scStateChange struct { sc balancer.SubConn - state connectivity.State -} - -type fakeEDSBalancer struct { - cc balancer.ClientConn - childPolicy *testutils.Channel - subconnStateChange *testutils.Channel - edsUpdate *testutils.Channel - serviceName *testutils.Channel - serviceRequestMax *testutils.Channel - clusterName *testutils.Channel + state balancer.SubConnState } -func (f *fakeEDSBalancer) handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { - f.subconnStateChange.Send(&scStateChange{sc: sc, state: state}) +type fakeChildBalancer struct { + cc balancer.ClientConn + subConnState *testutils.Channel + clientConnState *testutils.Channel + resolverError *testutils.Channel } -func (f *fakeEDSBalancer) handleChildPolicy(name string, config json.RawMessage) { - f.childPolicy.Send(&loadBalancingConfig{Name: name, Config: config}) -} - -func (f *fakeEDSBalancer) handleEDSResponse(edsResp xdsclient.EndpointsUpdate) { - f.edsUpdate.Send(edsResp) +func (f *fakeChildBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + f.clientConnState.Send(state) + return nil } -func (f *fakeEDSBalancer) updateState(priority priorityType, s balancer.State) {} - -func (f *fakeEDSBalancer) updateServiceRequestsConfig(serviceName string, max *uint32) { - f.serviceName.Send(serviceName) - f.serviceRequestMax.Send(max) +func (f *fakeChildBalancer) ResolverError(err error) { + f.resolverError.Send(err) } -func (f *fakeEDSBalancer) updateClusterName(name string) { - f.clusterName.Send(name) +func (f *fakeChildBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + f.subConnState.Send(&scStateChange{sc: sc, state: state}) } -func (f *fakeEDSBalancer) close() {} +func (f *fakeChildBalancer) Close() {} -func (f *fakeEDSBalancer) waitForChildPolicy(ctx context.Context, wantPolicy *loadBalancingConfig) error { - val, err := f.childPolicy.Receive(ctx) +func (f *fakeChildBalancer) waitForClientConnStateChange(ctx context.Context) error { + _, err := f.clientConnState.Receive(ctx) if err != nil { return err } - gotPolicy := val.(*loadBalancingConfig) - if !cmp.Equal(gotPolicy, wantPolicy) { - return fmt.Errorf("got childPolicy %v, want %v", gotPolicy, wantPolicy) - } return nil } -func (f *fakeEDSBalancer) waitForSubConnStateChange(ctx context.Context, wantState *scStateChange) error { - val, err := f.subconnStateChange.Receive(ctx) +func (f *fakeChildBalancer) waitForResolverError(ctx context.Context) error { + _, err := f.resolverError.Receive(ctx) if err != nil { return err } - gotState := val.(*scStateChange) - if !cmp.Equal(gotState, wantState, cmp.AllowUnexported(scStateChange{})) { - return fmt.Errorf("got subconnStateChange %v, want %v", gotState, wantState) - } return nil } -func (f *fakeEDSBalancer) waitForEDSResponse(ctx context.Context, wantUpdate xdsclient.EndpointsUpdate) error { - val, err := f.edsUpdate.Receive(ctx) - if err != nil { - return err - } - gotUpdate := val.(xdsclient.EndpointsUpdate) - if !reflect.DeepEqual(gotUpdate, wantUpdate) { - return fmt.Errorf("got edsUpdate %+v, want %+v", gotUpdate, wantUpdate) - } - return nil -} - -func (f *fakeEDSBalancer) waitForCounterUpdate(ctx context.Context, wantServiceName string) error { - val, err := f.serviceName.Receive(ctx) - if err != nil { - return err - } - gotServiceName := val.(string) - if gotServiceName != wantServiceName { - return fmt.Errorf("got serviceName %v, want %v", gotServiceName, wantServiceName) - } - return nil -} - -func (f *fakeEDSBalancer) waitForCountMaxUpdate(ctx context.Context, want *uint32) error { - val, err := f.serviceRequestMax.Receive(ctx) - if err != nil { - return err - } - got := val.(*uint32) - - if got == nil && want == nil { - return nil - } - if got != nil && want != nil { - if *got != *want { - return fmt.Errorf("got countMax %v, want %v", *got, *want) - } - return nil - } - return fmt.Errorf("got countMax %+v, want %+v", got, want) -} - -func (f *fakeEDSBalancer) waitForClusterNameUpdate(ctx context.Context, wantClusterName string) error { - val, err := f.clusterName.Receive(ctx) +func (f *fakeChildBalancer) waitForSubConnStateChange(ctx context.Context, wantState *scStateChange) error { + val, err := f.subConnState.Receive(ctx) if err != nil { return err } - gotServiceName := val.(string) - if gotServiceName != wantClusterName { - return fmt.Errorf("got clusterName %v, want %v", gotServiceName, wantClusterName) + gotState := val.(*scStateChange) + if !cmp.Equal(gotState, wantState, cmp.AllowUnexported(scStateChange{})) { + return fmt.Errorf("got subconnStateChange %v, want %v", gotState, wantState) } return nil } -func newFakeEDSBalancer(cc balancer.ClientConn) edsBalancerImplInterface { - return &fakeEDSBalancer{ - cc: cc, - childPolicy: testutils.NewChannelWithSize(10), - subconnStateChange: testutils.NewChannelWithSize(10), - edsUpdate: testutils.NewChannelWithSize(10), - serviceName: testutils.NewChannelWithSize(10), - serviceRequestMax: testutils.NewChannelWithSize(10), - clusterName: testutils.NewChannelWithSize(10), +func newFakeChildBalancer(cc balancer.ClientConn) balancer.Balancer { + return &fakeChildBalancer{ + cc: cc, + subConnState: testutils.NewChannelWithSize(10), + clientConnState: testutils.NewChannelWithSize(10), + resolverError: testutils.NewChannelWithSize(10), } } @@ -239,168 +176,37 @@ type fakeSubConn struct{} func (*fakeSubConn) UpdateAddresses([]resolver.Address) { panic("implement me") } func (*fakeSubConn) Connect() { panic("implement me") } -// waitForNewEDSLB makes sure that a new edsLB is created by the top-level +// waitForNewChildLB makes sure that a new child LB is created by the top-level // edsBalancer. -func waitForNewEDSLB(ctx context.Context, ch *testutils.Channel) (*fakeEDSBalancer, error) { +func waitForNewChildLB(ctx context.Context, ch *testutils.Channel) (*fakeChildBalancer, error) { val, err := ch.Receive(ctx) if err != nil { return nil, fmt.Errorf("error when waiting for a new edsLB: %v", err) } - return val.(*fakeEDSBalancer), nil + return val.(*fakeChildBalancer), nil } // setup overrides the functions which are used to create the xdsClient and the // edsLB, creates fake version of them and makes them available on the provided // channels. The returned cancel function should be called by the test for // cleanup. -func setup(edsLBCh *testutils.Channel) (*fakeclient.Client, func()) { +func setup(childLBCh *testutils.Channel) (*fakeclient.Client, func()) { xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) - origNewEDSBalancer := newEDSBalancer - newEDSBalancer = func(cc balancer.ClientConn, _ balancer.BuildOptions, _ func(priorityType, balancer.State), _ load.PerClusterReporter, _ *grpclog.PrefixLogger) edsBalancerImplInterface { - edsLB := newFakeEDSBalancer(cc) - defer func() { edsLBCh.Send(edsLB) }() - return edsLB + origNewChildBalancer := newChildBalancer + newChildBalancer = func(_ balancer.Builder, cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + childLB := newFakeChildBalancer(cc) + defer func() { childLBCh.Send(childLB) }() + return childLB } return xdsC, func() { - newEDSBalancer = origNewEDSBalancer + newChildBalancer = origNewChildBalancer xdsC.Close() } } -const ( - fakeBalancerA = "fake_balancer_A" - fakeBalancerB = "fake_balancer_B" -) - -// Install two fake balancers for service config update tests. -// -// ParseConfig only accepts the json if the balancer specified is registered. -func init() { - balancer.Register(&fakeBalancerBuilder{name: fakeBalancerA}) - balancer.Register(&fakeBalancerBuilder{name: fakeBalancerB}) -} - -type fakeBalancerBuilder struct { - name string -} - -func (b *fakeBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - return &fakeBalancer{cc: cc} -} - -func (b *fakeBalancerBuilder) Name() string { - return b.name -} - -type fakeBalancer struct { - cc balancer.ClientConn -} - -func (b *fakeBalancer) ResolverError(error) { - panic("implement me") -} - -func (b *fakeBalancer) UpdateClientConnState(balancer.ClientConnState) error { - panic("implement me") -} - -func (b *fakeBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { - panic("implement me") -} - -func (b *fakeBalancer) Close() {} - -// TestConfigChildPolicyUpdate verifies scenarios where the childPolicy -// section of the lbConfig is updated. -// -// The test does the following: -// * Builds a new EDS balancer. -// * Pushes a new ClientConnState with a childPolicy set to fakeBalancerA. -// Verifies that an EDS watch is registered. It then pushes a new edsUpdate -// through the fakexds client. Verifies that a new edsLB is created and it -// receives the expected childPolicy. -// * Pushes a new ClientConnState with a childPolicy set to fakeBalancerB. -// Verifies that the existing edsLB receives the new child policy. -func (s) TestConfigChildPolicyUpdate(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - edsLB, err := waitForNewEDSLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - - lbCfgA := &loadBalancingConfig{ - Name: fakeBalancerA, - Config: json.RawMessage("{}"), - } - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{ - ChildPolicy: lbCfgA, - ClusterName: testEDSClusterName, - EDSServiceName: testServiceName, - }, - }); err != nil { - t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) - } - - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - xdsC.InvokeWatchEDSCallback(defaultEndpointsUpdate, nil) - if err := edsLB.waitForChildPolicy(ctx, lbCfgA); err != nil { - t.Fatal(err) - } - if err := edsLB.waitForCounterUpdate(ctx, testEDSClusterName); err != nil { - t.Fatal(err) - } - if err := edsLB.waitForCountMaxUpdate(ctx, nil); err != nil { - t.Fatal(err) - } - - var testCountMax uint32 = 100 - lbCfgB := &loadBalancingConfig{ - Name: fakeBalancerB, - Config: json.RawMessage("{}"), - } - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{ - ChildPolicy: lbCfgB, - ClusterName: testEDSClusterName, - EDSServiceName: testServiceName, - MaxConcurrentRequests: &testCountMax, - }, - }); err != nil { - t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) - } - if err := edsLB.waitForChildPolicy(ctx, lbCfgB); err != nil { - t.Fatal(err) - } - if err := edsLB.waitForCounterUpdate(ctx, testEDSClusterName); err != nil { - // Counter is updated even though the service name didn't change. The - // eds_impl will compare the service names, and skip if it didn't change. - t.Fatal(err) - } - if err := edsLB.waitForCountMaxUpdate(ctx, &testCountMax); err != nil { - t.Fatal(err) - } -} - // TestSubConnStateChange verifies if the top-level edsBalancer passes on -// the subConnStateChange to appropriate child balancer. +// the subConnState to appropriate child balancer. func (s) TestSubConnStateChange(t *testing.T) { edsLBCh := testutils.NewChannel() xdsC, cleanup := setup(edsLBCh) @@ -413,13 +219,6 @@ func (s) TestSubConnStateChange(t *testing.T) { } defer edsB.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - edsLB, err := waitForNewEDSLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, @@ -427,14 +226,20 @@ func (s) TestSubConnStateChange(t *testing.T) { t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } xdsC.InvokeWatchEDSCallback(defaultEndpointsUpdate, nil) + edsLB, err := waitForNewChildLB(ctx, edsLBCh) + if err != nil { + t.Fatal(err) + } fsc := &fakeSubConn{} - state := connectivity.Ready - edsB.UpdateSubConnState(fsc, balancer.SubConnState{ConnectivityState: state}) + state := balancer.SubConnState{ConnectivityState: connectivity.Ready} + edsB.UpdateSubConnState(fsc, state) if err := edsLB.waitForSubConnStateChange(ctx, &scStateChange{sc: fsc, state: state}); err != nil { t.Fatal(err) } @@ -462,24 +267,22 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - edsLB, err := waitForNewEDSLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, }); err != nil { t.Fatal(err) } - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) - if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { - t.Fatalf("EDS impl got unexpected EDS response: %v", err) + edsLB, err := waitForNewChildLB(ctx, edsLBCh) + if err != nil { + t.Fatal(err) + } + if err := edsLB.waitForClientConnStateChange(ctx); err != nil { + t.Fatalf("EDS impl got unexpected update: %v", err) } connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error") @@ -493,9 +296,12 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := edsLB.waitForEDSResponse(sCtx, xdsclient.EndpointsUpdate{}); err != context.DeadlineExceeded { + if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { t.Fatal(err) } + if err := edsLB.waitForResolverError(ctx); err != nil { + t.Fatalf("want resolver error, got %v", err) + } resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "edsBalancer resource not found error") xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, resourceErr) @@ -507,8 +313,11 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { if err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("watch was canceled, want not canceled (timeout error)") } - if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { - t.Fatalf("eds impl expecting empty update, got %v", err) + if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { + t.Fatal(err) + } + if err := edsLB.waitForResolverError(ctx); err != nil { + t.Fatalf("want resolver error, got %v", err) } // An update with the same service name should not trigger a new watch. @@ -546,11 +355,6 @@ func (s) TestErrorFromResolver(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - edsLB, err := waitForNewEDSLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, @@ -562,8 +366,12 @@ func (s) TestErrorFromResolver(t *testing.T) { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) - if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { - t.Fatalf("EDS impl got unexpected EDS response: %v", err) + edsLB, err := waitForNewChildLB(ctx, edsLBCh) + if err != nil { + t.Fatal(err) + } + if err := edsLB.waitForClientConnStateChange(ctx); err != nil { + t.Fatalf("EDS impl got unexpected update: %v", err) } connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error") @@ -577,17 +385,23 @@ func (s) TestErrorFromResolver(t *testing.T) { sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := edsLB.waitForEDSResponse(sCtx, xdsclient.EndpointsUpdate{}); err != context.DeadlineExceeded { + if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { t.Fatal("eds impl got EDS resp, want timeout error") } + if err := edsLB.waitForResolverError(ctx); err != nil { + t.Fatalf("want resolver error, got %v", err) + } resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "edsBalancer resource not found error") edsB.ResolverError(resourceErr) if err := xdsC.WaitForCancelEDSWatch(ctx); err != nil { t.Fatalf("want watch to be canceled, waitForCancel failed: %v", err) } - if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { - t.Fatalf("EDS impl got unexpected EDS response: %v", err) + if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { + t.Fatal(err) + } + if err := edsLB.waitForResolverError(ctx); err != nil { + t.Fatalf("want resolver error, got %v", err) } // An update with the same service name should trigger a new watch, because @@ -681,97 +495,49 @@ func (s) TestClientWatchEDS(t *testing.T) { } } -// TestCounterUpdate verifies that the counter update is triggered with the -// service name from an update's config. -func (s) TestCounterUpdate(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() +const ( + fakeBalancerA = "fake_balancer_A" + fakeBalancerB = "fake_balancer_B" +) - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() +// Install two fake balancers for service config update tests. +// +// ParseConfig only accepts the json if the balancer specified is registered. +func init() { + balancer.Register(&fakeBalancerBuilder{name: fakeBalancerA}) + balancer.Register(&fakeBalancerBuilder{name: fakeBalancerB}) +} - var testCountMax uint32 = 100 - // Update should trigger counter update with provided service name. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{ - ClusterName: "foobar-1", - MaxConcurrentRequests: &testCountMax, - }, - }); err != nil { - t.Fatal(err) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - edsI := edsB.(*edsBalancer).edsImpl.(*fakeEDSBalancer) - if err := edsI.waitForCounterUpdate(ctx, "foobar-1"); err != nil { - t.Fatal(err) - } - if err := edsI.waitForCountMaxUpdate(ctx, &testCountMax); err != nil { - t.Fatal(err) - } +type fakeBalancerBuilder struct { + name string } -// TestClusterNameUpdateInAddressAttributes verifies that cluster name update in -// edsImpl is triggered with the update from a new service config. -func (s) TestClusterNameUpdateInAddressAttributes(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() +func (b *fakeBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &fakeBalancer{cc: cc} +} - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() +func (b *fakeBalancerBuilder) Name() string { + return b.name +} - // Update should trigger counter update with provided service name. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{ - ClusterName: "foobar-1", - }, - }); err != nil { - t.Fatal(err) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - gotCluster, err := xdsC.WaitForWatchEDS(ctx) - if err != nil || gotCluster != "foobar-1" { - t.Fatalf("unexpected EDS watch: %v, %v", gotCluster, err) - } - edsI := edsB.(*edsBalancer).edsImpl.(*fakeEDSBalancer) - if err := edsI.waitForClusterNameUpdate(ctx, "foobar-1"); err != nil { - t.Fatal(err) - } +type fakeBalancer struct { + cc balancer.ClientConn +} - // Update should trigger counter update with provided service name. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{ - ClusterName: "foobar-2", - }, - }); err != nil { - t.Fatal(err) - } - if err := xdsC.WaitForCancelEDSWatch(ctx); err != nil { - t.Fatalf("failed to wait for EDS cancel: %v", err) - } - gotCluster2, err := xdsC.WaitForWatchEDS(ctx) - if err != nil || gotCluster2 != "foobar-2" { - t.Fatalf("unexpected EDS watch: %v, %v", gotCluster2, err) - } - if err := edsI.waitForClusterNameUpdate(ctx, "foobar-2"); err != nil { - t.Fatal(err) - } +func (b *fakeBalancer) ResolverError(error) { + panic("implement me") +} + +func (b *fakeBalancer) UpdateClientConnState(balancer.ClientConnState) error { + panic("implement me") } +func (b *fakeBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { + panic("implement me") +} + +func (b *fakeBalancer) Close() {} + func (s) TestBalancerConfigParsing(t *testing.T) { const testEDSName = "eds.service" var testLRSName = "lrs.server" @@ -910,30 +676,3 @@ func (s) TestBalancerConfigParsing(t *testing.T) { }) } } - -func (s) TestEqualStringPointers(t *testing.T) { - var ( - ta1 = "test-a" - ta2 = "test-a" - tb = "test-b" - ) - tests := []struct { - name string - a *string - b *string - want bool - }{ - {"both-nil", nil, nil, true}, - {"a-non-nil", &ta1, nil, false}, - {"b-non-nil", nil, &tb, false}, - {"equal", &ta1, &ta2, true}, - {"different", &ta1, &tb, false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := equalStringPointers(tt.a, tt.b); got != tt.want { - t.Errorf("equalStringPointers() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/xds/internal/balancer/edsbalancer/eds_testutil.go b/xds/internal/balancer/edsbalancer/eds_testutil_test.go similarity index 62% rename from xds/internal/balancer/edsbalancer/eds_testutil.go rename to xds/internal/balancer/edsbalancer/eds_testutil_test.go index f6d86e950d9a..c8be545d8b50 100644 --- a/xds/internal/balancer/edsbalancer/eds_testutil.go +++ b/xds/internal/balancer/edsbalancer/eds_testutil_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * Copyright 2020 gRPC authors. * @@ -19,13 +21,17 @@ package edsbalancer import ( "fmt" "net" + "reflect" "strconv" + "time" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" typepb "github.com/envoyproxy/go-control-plane/envoy/type" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -111,3 +117,70 @@ func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsclient.Endpoint { } return endpoints } + +// testPickerFromCh receives pickers from the channel, and check if their +// behaviors are as expected (that the given function returns nil err). +// +// It returns nil if one picker has the correct behavior. +// +// It returns error when there's no picker from channel after 1 second timeout, +// and the error returned is the mismatch error from the previous picker. +func testPickerFromCh(ch chan balancer.Picker, f func(balancer.Picker) error) error { + var ( + p balancer.Picker + err error + ) + for { + select { + case p = <-ch: + case <-time.After(defaultTestTimeout): + // TODO: this function should take a context, and use the context + // here, instead of making a new timer. + return fmt.Errorf("timeout waiting for picker with expected behavior, error from previous picker: %v", err) + } + + err = f(p) + if err == nil { + return nil + } + } +} + +func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { + return func() balancer.SubConn { + scst, _ := p.Pick(balancer.PickInfo{}) + return scst.SubConn + } +} + +// testRoundRobinPickerFromCh receives pickers from the channel, and check if +// their behaviors are round-robin of want. +// +// It returns nil if one picker has the correct behavior. +// +// It returns error when there's no picker from channel after 1 second timeout, +// and the error returned is the mismatch error from the previous picker. +func testRoundRobinPickerFromCh(ch chan balancer.Picker, want []balancer.SubConn) error { + return testPickerFromCh(ch, func(p balancer.Picker) error { + return testutils.IsRoundRobin(want, subConnFromPicker(p)) + }) +} + +// testErrPickerFromCh receives pickers from the channel, and check if they +// return the wanted error. +// +// It returns nil if one picker has the correct behavior. +// +// It returns error when there's no picker from channel after 1 second timeout, +// and the error returned is the mismatch error from the previous picker. +func testErrPickerFromCh(ch chan balancer.Picker, want error) error { + return testPickerFromCh(ch, func(p balancer.Picker) error { + for i := 0; i < 5; i++ { + _, err := p.Pick(balancer.PickInfo{}) + if !reflect.DeepEqual(err, want) { + return fmt.Errorf("picker.Pick, got err %q, want err %q", err, want) + } + } + return nil + }) +} diff --git a/xds/internal/balancer/edsbalancer/eds_watcher.go b/xds/internal/balancer/edsbalancer/eds_watcher.go new file mode 100644 index 000000000000..447c040580fd --- /dev/null +++ b/xds/internal/balancer/edsbalancer/eds_watcher.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package edsbalancer + +import ( + "google.golang.org/grpc/xds/internal/xdsclient" +) + +// watchUpdate wraps the information received from a registered EDS watcher. A +// non-nil error is propagated to the underlying child balancer. A valid update +// results in creating a new child balancer (priority balancer, if one doesn't +// already exist) and pushing the updated balancer config to it. +type watchUpdate struct { + eds xdsclient.EndpointsUpdate + err error +} + +// edsWatcher takes an EDS balancer config, and use the xds_client to watch EDS +// updates. The EDS updates are passed back to the balancer via a channel. +type edsWatcher struct { + parent *edsBalancer + + updateChannel chan *watchUpdate + + edsToWatch string + edsCancel func() +} + +func (ew *edsWatcher) updateConfig(config *EDSConfig) { + // If EDSServiceName is set, use it to watch EDS. Otherwise, use the cluster + // name. + newEDSToWatch := config.EDSServiceName + if newEDSToWatch == "" { + newEDSToWatch = config.ClusterName + } + + if ew.edsToWatch == newEDSToWatch { + return + } + + // Restart EDS watch when the eds name to watch has changed. + ew.edsToWatch = newEDSToWatch + + if ew.edsCancel != nil { + ew.edsCancel() + } + cancelEDSWatch := ew.parent.xdsClient.WatchEndpoints(newEDSToWatch, func(update xdsclient.EndpointsUpdate, err error) { + select { + case <-ew.updateChannel: + default: + } + ew.updateChannel <- &watchUpdate{eds: update, err: err} + }) + ew.parent.logger.Infof("Watch started on resource name %v with xds-client %p", newEDSToWatch, ew.parent.xdsClient) + ew.edsCancel = func() { + cancelEDSWatch() + ew.parent.logger.Infof("Watch cancelled on resource name %v with xds-client %p", newEDSToWatch, ew.parent.xdsClient) + } + +} + +// stopWatch stops the EDS watch. +// +// Call to updateConfig will restart the watch with the new name. +func (ew *edsWatcher) stopWatch() { + if ew.edsCancel != nil { + ew.edsCancel() + ew.edsCancel = nil + } + ew.edsToWatch = "" +} diff --git a/xds/internal/balancer/edsbalancer/util.go b/xds/internal/balancer/edsbalancer/util.go deleted file mode 100644 index fa9ada6c9dd6..000000000000 --- a/xds/internal/balancer/edsbalancer/util.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "google.golang.org/grpc/internal/wrr" - "google.golang.org/grpc/xds/internal/xdsclient" -) - -var newRandomWRR = wrr.NewRandom - -type dropper struct { - c xdsclient.OverloadDropConfig - w wrr.WRR -} - -func newDropper(c xdsclient.OverloadDropConfig) *dropper { - w := newRandomWRR() - w.Add(true, int64(c.Numerator)) - w.Add(false, int64(c.Denominator-c.Numerator)) - - return &dropper{ - c: c, - w: w, - } -} - -func (d *dropper) drop() (ret bool) { - return d.w.Next().(bool) -} diff --git a/xds/internal/balancer/edsbalancer/util_test.go b/xds/internal/balancer/edsbalancer/util_test.go deleted file mode 100644 index 2d08091fdfb5..000000000000 --- a/xds/internal/balancer/edsbalancer/util_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// +build go1.12 - -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "testing" - - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient" -) - -func init() { - newRandomWRR = testutils.NewTestWRR -} - -func (s) TestDropper(t *testing.T) { - const repeat = 2 - - type args struct { - numerator uint32 - denominator uint32 - } - tests := []struct { - name string - args args - }{ - { - name: "2_3", - args: args{ - numerator: 2, - denominator: 3, - }, - }, - { - name: "4_8", - args: args{ - numerator: 4, - denominator: 8, - }, - }, - { - name: "7_20", - args: args{ - numerator: 7, - denominator: 20, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - d := newDropper(xdsclient.OverloadDropConfig{ - Category: "", - Numerator: tt.args.numerator, - Denominator: tt.args.denominator, - }) - - var ( - dCount int - wantCount = int(tt.args.numerator) * repeat - loopCount = int(tt.args.denominator) * repeat - ) - for i := 0; i < loopCount; i++ { - if d.drop() { - dCount++ - } - } - - if dCount != (wantCount) { - t.Errorf("with numerator %v, denominator %v repeat %v, got drop count: %v, want %v", - tt.args.numerator, tt.args.denominator, repeat, dCount, wantCount) - } - }) - } -} diff --git a/xds/internal/balancer/edsbalancer/xds_lrs_test.go b/xds/internal/balancer/edsbalancer/xds_lrs_test.go deleted file mode 100644 index 3dcbf5e259c7..000000000000 --- a/xds/internal/balancer/edsbalancer/xds_lrs_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package edsbalancer - -import ( - "context" - "testing" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" -) - -// TestXDSLoadReporting verifies that the edsBalancer starts the loadReport -// stream when the lbConfig passed to it contains a valid value for the LRS -// server (empty string). -func (s) TestXDSLoadReporting(t *testing.T) { - xdsC := fakeclient.NewClient() - defer xdsC.Close() - - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() - - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{ - EDSServiceName: testEDSClusterName, - LrsLoadReportingServerName: new(string), - }, - }); err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - gotCluster, err := xdsC.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - if gotCluster != testEDSClusterName { - t.Fatalf("xdsClient.WatchEndpoints() called with cluster: %v, want %v", gotCluster, testEDSClusterName) - } - - got, err := xdsC.WaitForReportLoad(ctx) - if err != nil { - t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) - } - if got.Server != "" { - t.Fatalf("xdsClient.ReportLoad called with {%v}: want {\"\"}", got.Server) - } -} diff --git a/xds/internal/balancer/edsbalancer/xds_old.go b/xds/internal/balancer/edsbalancer/xds_old.go deleted file mode 100644 index 363ce7c9dd78..000000000000 --- a/xds/internal/balancer/edsbalancer/xds_old.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import "google.golang.org/grpc/balancer" - -// The old xds balancer implements logic for both CDS and EDS. With the new -// design, CDS is split and moved to a separate balancer, and the xds balancer -// becomes the EDS balancer. -// -// To keep the existing tests working, this file regisger EDS balancer under the -// old xds balancer name. -// -// TODO: delete this file when migration to new workflow (LDS, RDS, CDS, EDS) is -// done. - -const xdsName = "xds_experimental" - -func init() { - balancer.Register(xdsBalancerBuilder{}) -} - -// xdsBalancerBuilder register edsBalancerBuilder (now with name -// "eds_experimental") under the old name "xds_experimental". -type xdsBalancerBuilder struct { - bb -} - -func (xdsBalancerBuilder) Name() string { - return xdsName -} diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index 672f17498b16..3ba0b9e929d7 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -28,7 +28,8 @@ import ( ) var ( - errAllPrioritiesRemoved = errors.New("no locality is provided, all priorities are removed") + // ErrAllPrioritiesRemoved is returned by the picker when there's no priority available. + ErrAllPrioritiesRemoved = errors.New("no priority is provided, all priorities are removed") // DefaultPriorityInitTimeout is the timeout after which if a priority is // not READY, the next will be started. It's exported to be overridden by // tests. @@ -73,7 +74,7 @@ func (b *priorityBalancer) syncPriority() { b.stopPriorityInitTimer() b.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(errAllPrioritiesRemoved), + Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), }) return } diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 03442671311b..bc43a74557e6 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -828,8 +828,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { // Test pick return TransientFailure. pFail := <-cc.NewPickerCh for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != errAllPrioritiesRemoved { - t.Fatalf("want pick error %v, got %v", errAllPrioritiesRemoved, err) + if _, err := pFail.Pick(balancer.PickInfo{}); err != ErrAllPrioritiesRemoved { + t.Fatalf("want pick error %v, got %v", ErrAllPrioritiesRemoved, err) } } @@ -1436,8 +1436,8 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { pFail := <-cc.NewPickerCh for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != errAllPrioritiesRemoved { - t.Fatalf("want pick error %v, got %v", errAllPrioritiesRemoved, err) + if _, err := pFail.Pick(balancer.PickInfo{}); err != ErrAllPrioritiesRemoved { + t.Fatalf("want pick error %v, got %v", ErrAllPrioritiesRemoved, err) } } diff --git a/xds/internal/xdsclient/requests_counter.go b/xds/internal/xdsclient/requests_counter.go index dacbc8fadb60..b7f94332da24 100644 --- a/xds/internal/xdsclient/requests_counter.go +++ b/xds/internal/xdsclient/requests_counter.go @@ -84,3 +84,11 @@ func ClearCounterForTesting(serviceName string) { } c.numRequests = 0 } + +// ClearAllCountersForTesting clears all the counters. Should be only used in +// tests. +func ClearAllCountersForTesting() { + src.mu.Lock() + defer src.mu.Unlock() + src.services = make(map[string]*ServiceRequestsCounter) +} From 4c651eda23d0bc60edc6c932ce60f1246a2a2034 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 16 Jun 2021 11:04:33 -0700 Subject: [PATCH 134/998] xds: move eds package to cluster_resolver (#4545) --- xds/internal/balancer/balancer.go | 12 ++-- .../balancer/cdsbalancer/cdsbalancer.go | 4 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 4 +- .../clusterresolver.go} | 60 +++++++++---------- .../clusterresolver_test.go} | 26 ++++---- .../config.go | 2 +- .../configbuilder.go | 2 +- .../configbuilder_test.go | 2 +- .../eds_impl_test.go | 14 ++--- .../eds_watcher.go | 4 +- .../logging.go | 6 +- .../priority_test.go} | 2 +- .../testutil_test.go} | 2 +- 13 files changed, 68 insertions(+), 72 deletions(-) rename xds/internal/balancer/{edsbalancer/eds.go => clusterresolver/clusterresolver.go} (86%) rename xds/internal/balancer/{edsbalancer/eds_test.go => clusterresolver/clusterresolver_test.go} (97%) rename xds/internal/balancer/{edsbalancer => clusterresolver}/config.go (99%) rename xds/internal/balancer/{edsbalancer => clusterresolver}/configbuilder.go (98%) rename xds/internal/balancer/{edsbalancer => clusterresolver}/configbuilder_test.go (99%) rename xds/internal/balancer/{edsbalancer => clusterresolver}/eds_impl_test.go (99%) rename xds/internal/balancer/{edsbalancer => clusterresolver}/eds_watcher.go (97%) rename xds/internal/balancer/{edsbalancer => clusterresolver}/logging.go (84%) rename xds/internal/balancer/{edsbalancer/eds_impl_priority_test.go => clusterresolver/priority_test.go} (99%) rename xds/internal/balancer/{edsbalancer/eds_testutil_test.go => clusterresolver/testutil_test.go} (99%) diff --git a/xds/internal/balancer/balancer.go b/xds/internal/balancer/balancer.go index 0da15b9b9dbc..86656736a61b 100644 --- a/xds/internal/balancer/balancer.go +++ b/xds/internal/balancer/balancer.go @@ -20,10 +20,10 @@ package balancer import ( - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer - _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer - _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer - _ "google.golang.org/grpc/xds/internal/balancer/edsbalancer" // Register the EDS balancer - _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer - _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer + _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer + _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer + _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer ) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index cb97353ff46a..b04d150a3110 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -34,7 +34,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/edsbalancer" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -314,7 +314,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { // is updated to cluster_resolver, which has the fallback functionality, we // will fix this to handle all the clusters in list. cds := update.chu[0] - lbCfg := &edsbalancer.EDSConfig{ + lbCfg := &clusterresolver.EDSConfig{ ClusterName: cds.ClusterName, EDSServiceName: cds.EDSServiceName, MaxConcurrentRequests: cds.MaxRequests, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index f36117620e68..206918a37d95 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -35,7 +35,7 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/edsbalancer" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" @@ -197,7 +197,7 @@ func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { // edsCCS is a helper function to construct a good update passed from the // cdsBalancer to the edsBalancer. func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientConnState { - lbCfg := &edsbalancer.EDSConfig{ + lbCfg := &clusterresolver.EDSConfig{ ClusterName: service, MaxConcurrentRequests: countMax, } diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/clusterresolver/clusterresolver.go similarity index 86% rename from xds/internal/balancer/edsbalancer/eds.go rename to xds/internal/balancer/clusterresolver/clusterresolver.go index cc486fe7b8c8..f61b56b9a2cf 100644 --- a/xds/internal/balancer/edsbalancer/eds.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -16,8 +16,8 @@ * */ -// Package edsbalancer contains EDS balancer implementation. -package edsbalancer +// Package clusterresolver contains EDS balancer implementation. +package clusterresolver import ( "encoding/json" @@ -38,7 +38,8 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" ) -const edsName = "eds_experimental" +// Name is the name of the cluster_resolver balancer. +const Name = "eds_experimental" var ( errBalancerClosed = errors.New("cdsBalancer is closed") @@ -66,7 +67,7 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal return nil } - b := &edsBalancer{ + b := &clusterResolverBalancer{ cc: cc, bOpts: opts, updateCh: buffer.NewUnbounded(), @@ -88,7 +89,7 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal } func (bb) Name() string { - return edsName + return Name } func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { @@ -113,11 +114,9 @@ type scUpdate struct { state balancer.SubConnState } -// edsBalancer manages xdsClient and the actual EDS balancer implementation that -// does load balancing. -// -// It currently has only an edsBalancer. Later, we may add fallback. -type edsBalancer struct { +// clusterResolverBalancer manages xdsClient and the actual EDS balancer +// implementation that does load balancing. +type clusterResolverBalancer struct { cc balancer.ClientConn bOpts balancer.BuildOptions updateCh *buffer.Unbounded // Channel for updates from gRPC. @@ -134,23 +133,24 @@ type edsBalancer struct { xdsClient xdsclient.XDSClient // xDS client to watch EDS resource. attrsWithClient *attributes.Attributes // Attributes with xdsClient attached to be passed to the child policies. - child balancer.Balancer - edsResp xdsclient.EndpointsUpdate - edsRespReceived bool + child balancer.Balancer + edsResp xdsclient.EndpointsUpdate + watchUpdateReceived bool } // handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good // updates lead to registration of an EDS watch. Updates with error lead to // cancellation of existing watch and propagation of the same error to the // child balancer. -func (b *edsBalancer) handleClientConnUpdate(update *ccUpdate) { +func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { // We first handle errors, if any, and then proceed with handling the // update, only if the status quo has changed. if err := update.err; err != nil { b.handleErrorFromUpdate(err, true) + return } - b.logger.Infof("Receive update from resolver, balancer config: %+v", update.state.BalancerConfig) + b.logger.Infof("Receive update from resolver, balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) cfg, _ := update.state.BalancerConfig.(*EDSConfig) if cfg == nil { b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", update.state.BalancerConfig) @@ -161,8 +161,8 @@ func (b *edsBalancer) handleClientConnUpdate(update *ccUpdate) { b.configRaw = update.state.ResolverState.ServiceConfig b.edsWatcher.updateConfig(cfg) - if !b.edsRespReceived { - // If eds resp was not received, wait for it. + if !b.watchUpdateReceived { + // If update was not received, wait for it. return } // If eds resp was received before this, the child policy was created. We @@ -175,15 +175,15 @@ func (b *edsBalancer) handleClientConnUpdate(update *ccUpdate) { // handleWatchUpdate handles a watch update from the xDS Client. Good updates // lead to clientConn updates being invoked on the underlying child balancer. -func (b *edsBalancer) handleWatchUpdate(update *watchUpdate) { +func (b *clusterResolverBalancer) handleWatchUpdate(update *watchUpdate) { if err := update.err; err != nil { b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) b.handleErrorFromUpdate(err, false) return } - b.logger.Infof("Watch update from xds-client %p, content: %+v", b.xdsClient, pretty.ToJSON(update.eds)) - b.edsRespReceived = true + b.logger.Infof("resource update: %+v", pretty.ToJSON(update.eds)) + b.watchUpdateReceived = true b.edsResp = update.eds // A new EDS update triggers new child configs (e.g. different priorities @@ -199,7 +199,7 @@ func (b *edsBalancer) handleWatchUpdate(update *watchUpdate) { // generates the addresses, because the endpoints come from the EDS resp. // // If child balancer doesn't already exist, one will be created. -func (b *edsBalancer) updateChildConfig() error { +func (b *clusterResolverBalancer) updateChildConfig() error { // Child was build when the first EDS resp was received, so we just build // the config and addresses. if b.child == nil { @@ -237,7 +237,7 @@ func (b *edsBalancer) updateChildConfig() error { // - If it's from xds client, it means EDS resource were removed. The EDS // watcher should keep watching. // In both cases, the sub-balancers will be receive the error. -func (b *edsBalancer) handleErrorFromUpdate(err error, fromParent bool) { +func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) { b.logger.Warningf("Received error: %v", err) if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { // This is an error from the parent ClientConn (can be the parent CDS @@ -260,7 +260,7 @@ func (b *edsBalancer) handleErrorFromUpdate(err error, fromParent bool) { // run is a long-running goroutine which handles all updates from gRPC and // xdsClient. All methods which are invoked directly by gRPC or xdsClient simply // push an update onto a channel which is read and acted upon right here. -func (b *edsBalancer) run() { +func (b *clusterResolverBalancer) run() { for { select { case u := <-b.updateCh.Get(): @@ -302,9 +302,9 @@ func (b *edsBalancer) run() { // UpdateClientConnState receives the serviceConfig (which contains the // clusterName to watch for in CDS) and the xdsClient object from the // xdsResolver. -func (b *edsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { +func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if b.closed.HasFired() { - b.logger.Warningf("xds: received ClientConnState {%+v} after edsBalancer was closed", state) + b.logger.Warningf("xds: received ClientConnState {%+v} after clusterResolverBalancer was closed", state) return errBalancerClosed } @@ -322,25 +322,25 @@ func (b *edsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro } // ResolverError handles errors reported by the xdsResolver. -func (b *edsBalancer) ResolverError(err error) { +func (b *clusterResolverBalancer) ResolverError(err error) { if b.closed.HasFired() { - b.logger.Warningf("xds: received resolver error {%v} after edsBalancer was closed", err) + b.logger.Warningf("xds: received resolver error {%v} after clusterResolverBalancer was closed", err) return } b.updateCh.Put(&ccUpdate{err: err}) } // UpdateSubConnState handles subConn updates from gRPC. -func (b *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +func (b *clusterResolverBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { if b.closed.HasFired() { - b.logger.Warningf("xds: received subConn update {%v, %v} after edsBalancer was closed", sc, state) + b.logger.Warningf("xds: received subConn update {%v, %v} after clusterResolverBalancer was closed", sc, state) return } b.updateCh.Put(&scUpdate{subConn: sc, state: state}) } // Close closes the cdsBalancer and the underlying child balancer. -func (b *edsBalancer) Close() { +func (b *clusterResolverBalancer) Close() { b.closed.Fire() <-b.done.Done() } diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go similarity index 97% rename from xds/internal/balancer/edsbalancer/eds_test.go rename to xds/internal/balancer/clusterresolver/clusterresolver_test.go index 15cc10ba89df..ea8ac4e419f4 100644 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -18,7 +18,7 @@ * */ -package edsbalancer +package clusterresolver import ( "bytes" @@ -177,7 +177,7 @@ func (*fakeSubConn) UpdateAddresses([]resolver.Address) { panic("implement me") func (*fakeSubConn) Connect() { panic("implement me") } // waitForNewChildLB makes sure that a new child LB is created by the top-level -// edsBalancer. +// clusterResolverBalancer. func waitForNewChildLB(ctx context.Context, ch *testutils.Channel) (*fakeChildBalancer, error) { val, err := ch.Receive(ctx) if err != nil { @@ -205,17 +205,17 @@ func setup(childLBCh *testutils.Channel) (*fakeclient.Client, func()) { } } -// TestSubConnStateChange verifies if the top-level edsBalancer passes on +// TestSubConnStateChange verifies if the top-level clusterResolverBalancer passes on // the subConnState to appropriate child balancer. func (s) TestSubConnStateChange(t *testing.T) { edsLBCh := testutils.NewChannel() xdsC, cleanup := setup(edsLBCh) defer cleanup() - builder := balancer.Get(edsName) + builder := balancer.Get(Name) edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) + t.Fatalf("builder.Build(%s) failed and returned nil", Name) } defer edsB.Close() @@ -258,10 +258,10 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { xdsC, cleanup := setup(edsLBCh) defer cleanup() - builder := balancer.Get(edsName) + builder := balancer.Get(Name) edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) + t.Fatalf("builder.Build(%s) failed and returned nil", Name) } defer edsB.Close() @@ -303,7 +303,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { t.Fatalf("want resolver error, got %v", err) } - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "edsBalancer resource not found error") + resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, resourceErr) // Even if error is resource not found, watch shouldn't be canceled, because // this is an EDS resource removed (and xds client actually never sends this @@ -346,10 +346,10 @@ func (s) TestErrorFromResolver(t *testing.T) { xdsC, cleanup := setup(edsLBCh) defer cleanup() - builder := balancer.Get(edsName) + builder := balancer.Get(Name) edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) + t.Fatalf("builder.Build(%s) failed and returned nil", Name) } defer edsB.Close() @@ -392,7 +392,7 @@ func (s) TestErrorFromResolver(t *testing.T) { t.Fatalf("want resolver error, got %v", err) } - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "edsBalancer resource not found error") + resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") edsB.ResolverError(resourceErr) if err := xdsC.WaitForCancelEDSWatch(ctx); err != nil { t.Fatalf("want watch to be canceled, waitForCancel failed: %v", err) @@ -448,10 +448,10 @@ func (s) TestClientWatchEDS(t *testing.T) { xdsC, cleanup := setup(edsLBCh) defer cleanup() - builder := balancer.Get(edsName) + builder := balancer.Get(Name) edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) + t.Fatalf("builder.Build(%s) failed and returned nil", Name) } defer edsB.Close() diff --git a/xds/internal/balancer/edsbalancer/config.go b/xds/internal/balancer/clusterresolver/config.go similarity index 99% rename from xds/internal/balancer/edsbalancer/config.go rename to xds/internal/balancer/clusterresolver/config.go index d1583e2bf276..0741d6586ae3 100644 --- a/xds/internal/balancer/edsbalancer/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package edsbalancer +package clusterresolver import ( "encoding/json" diff --git a/xds/internal/balancer/edsbalancer/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go similarity index 98% rename from xds/internal/balancer/edsbalancer/configbuilder.go rename to xds/internal/balancer/clusterresolver/configbuilder.go index 7418a9de8024..3dd3b5309248 100644 --- a/xds/internal/balancer/edsbalancer/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -16,7 +16,7 @@ * */ -package edsbalancer +package clusterresolver import ( internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" diff --git a/xds/internal/balancer/edsbalancer/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go similarity index 99% rename from xds/internal/balancer/edsbalancer/configbuilder_test.go rename to xds/internal/balancer/clusterresolver/configbuilder_test.go index b9e76dc1cb00..31f17fde7a74 100644 --- a/xds/internal/balancer/edsbalancer/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -16,7 +16,7 @@ * */ -package edsbalancer +package clusterresolver import ( "fmt" diff --git a/xds/internal/balancer/edsbalancer/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go similarity index 99% rename from xds/internal/balancer/edsbalancer/eds_impl_test.go rename to xds/internal/balancer/clusterresolver/eds_impl_test.go index 79d34bf8af9e..fa2d26cb08c2 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -16,7 +16,7 @@ * limitations under the License. */ -package edsbalancer +package clusterresolver import ( "context" @@ -62,10 +62,10 @@ func init() { func setupTestEDS(t *testing.T) (balancer.Balancer, *testutils.TestClientConn, *fakeclient.Client, func()) { xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) cc := testutils.NewTestClientConn(t) - builder := balancer.Get(edsName) + builder := balancer.Get(Name) edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) if edsb == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) + t.Fatalf("builder.Build(%s) failed and returned nil", Name) } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -74,10 +74,12 @@ func setupTestEDS(t *testing.T) (balancer.Balancer, *testutils.TestClientConn, * BalancerConfig: &EDSConfig{ClusterName: testClusterName}, }); err != nil { edsb.Close() + xdsC.Close() t.Fatal(err) } if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { edsb.Close() + xdsC.Close() t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } return edsb, cc, xdsC, func() { @@ -480,7 +482,6 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { }); err != nil { t.Fatal(err) } - // Two localities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) @@ -502,7 +503,6 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { }); err != nil { t.Fatal(err) } - for i := 0; i < 2; i++ { <-cc.RemoveSubConnCh } @@ -524,7 +524,6 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { }); err != nil { t.Fatal(err) } - for i := 0; i < 2; i++ { scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) && @@ -549,7 +548,6 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { }); err != nil { t.Fatal(err) } - for i := 0; i < 2; i++ { <-cc.RemoveSubConnCh } @@ -580,7 +578,6 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { }); err != nil { t.Fatal(err) } - // One locality with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) @@ -641,7 +638,6 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { }); err != nil { t.Fatal(err) } - // Picks with drops. dones = []func(){} p2 := <-cc.NewPickerCh diff --git a/xds/internal/balancer/edsbalancer/eds_watcher.go b/xds/internal/balancer/clusterresolver/eds_watcher.go similarity index 97% rename from xds/internal/balancer/edsbalancer/eds_watcher.go rename to xds/internal/balancer/clusterresolver/eds_watcher.go index 447c040580fd..02186702c0ec 100644 --- a/xds/internal/balancer/edsbalancer/eds_watcher.go +++ b/xds/internal/balancer/clusterresolver/eds_watcher.go @@ -16,7 +16,7 @@ * */ -package edsbalancer +package clusterresolver import ( "google.golang.org/grpc/xds/internal/xdsclient" @@ -34,7 +34,7 @@ type watchUpdate struct { // edsWatcher takes an EDS balancer config, and use the xds_client to watch EDS // updates. The EDS updates are passed back to the balancer via a channel. type edsWatcher struct { - parent *edsBalancer + parent *clusterResolverBalancer updateChannel chan *watchUpdate diff --git a/xds/internal/balancer/edsbalancer/logging.go b/xds/internal/balancer/clusterresolver/logging.go similarity index 84% rename from xds/internal/balancer/edsbalancer/logging.go rename to xds/internal/balancer/clusterresolver/logging.go index be4d0a512d16..728f1f709c28 100644 --- a/xds/internal/balancer/edsbalancer/logging.go +++ b/xds/internal/balancer/clusterresolver/logging.go @@ -16,7 +16,7 @@ * */ -package edsbalancer +package clusterresolver import ( "fmt" @@ -25,10 +25,10 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const prefix = "[eds-lb %p] " +const prefix = "[xds-cluster-resolver-lb %p] " var logger = grpclog.Component("xds") -func prefixLogger(p *edsBalancer) *internalgrpclog.PrefixLogger { +func prefixLogger(p *clusterResolverBalancer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } diff --git a/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go similarity index 99% rename from xds/internal/balancer/edsbalancer/eds_impl_priority_test.go rename to xds/internal/balancer/clusterresolver/priority_test.go index 5674411159b8..c7c648a2c0fa 100644 --- a/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -17,7 +17,7 @@ * limitations under the License. */ -package edsbalancer +package clusterresolver import ( "context" diff --git a/xds/internal/balancer/edsbalancer/eds_testutil_test.go b/xds/internal/balancer/clusterresolver/testutil_test.go similarity index 99% rename from xds/internal/balancer/edsbalancer/eds_testutil_test.go rename to xds/internal/balancer/clusterresolver/testutil_test.go index c8be545d8b50..0025846c07c9 100644 --- a/xds/internal/balancer/edsbalancer/eds_testutil_test.go +++ b/xds/internal/balancer/clusterresolver/testutil_test.go @@ -16,7 +16,7 @@ * limitations under the License. */ -package edsbalancer +package clusterresolver import ( "fmt" From 7e3535650101d07525dbbfe398caf82f4ea1a6c8 Mon Sep 17 00:00:00 2001 From: Konrad Reiche Date: Wed, 16 Jun 2021 16:56:04 -0700 Subject: [PATCH 135/998] metadata: add Delete method to MD (#4549) --- metadata/metadata.go | 7 +++++++ metadata/metadata_test.go | 29 +++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/metadata/metadata.go b/metadata/metadata.go index 8d9686375a13..3604c7819fdc 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -123,6 +123,13 @@ func (md MD) Append(k string, vals ...string) { md[k] = append(md[k], vals...) } +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + // Join joins any number of mds into a single MD. // // The order of values for each key is determined by the order in which the mds diff --git a/metadata/metadata_test.go b/metadata/metadata_test.go index f1fb5f6d324e..89be06eaada0 100644 --- a/metadata/metadata_test.go +++ b/metadata/metadata_test.go @@ -169,6 +169,35 @@ func (s) TestAppend(t *testing.T) { } } +func (s) TestDelete(t *testing.T) { + for _, test := range []struct { + md MD + deleteKey string + want MD + }{ + { + md: Pairs("My-Optional-Header", "42"), + deleteKey: "My-Optional-Header", + want: Pairs(), + }, + { + md: Pairs("My-Optional-Header", "42"), + deleteKey: "Other-Key", + want: Pairs("my-optional-header", "42"), + }, + { + md: Pairs("My-Optional-Header", "42"), + deleteKey: "my-OptIoNal-HeAder", + want: Pairs(), + }, + } { + test.md.Delete(test.deleteKey) + if !reflect.DeepEqual(test.md, test.want) { + t.Errorf("value of metadata is %v, want %v", test.md, test.want) + } + } +} + func (s) TestAppendToOutgoingContext(t *testing.T) { // Pre-existing metadata tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) From 633fbe4dfee2289937bafe9c08ccb46d045c0310 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 17 Jun 2021 09:00:05 -0400 Subject: [PATCH 136/998] xds: generate per-request hash config selector (#4525) * xds: generate per-request hash in config selector --- examples/go.sum | 6 ++ go.mod | 1 + go.sum | 6 ++ internal/grpcrand/grpcrand.go | 7 ++ security/advancedtls/examples/go.sum | 3 + security/advancedtls/go.sum | 3 + xds/internal/balancer/ringhash/util.go | 41 ++++++++++++ xds/internal/resolver/serviceconfig.go | 74 ++++++++++++++++++++- xds/internal/resolver/serviceconfig_test.go | 74 +++++++++++++++++++++ xds/internal/resolver/xds_resolver_test.go | 69 +++++++++++++++++++ xds/internal/xdsclient/rds_test.go | 58 ++++++++++++++++ xds/internal/xdsclient/xds.go | 14 ++-- 12 files changed, 348 insertions(+), 8 deletions(-) create mode 100644 xds/internal/balancer/ringhash/util.go diff --git a/examples/go.sum b/examples/go.sum index a5b967336903..4b9be0bebf85 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -1,8 +1,12 @@ cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= @@ -38,6 +42,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= diff --git a/go.mod b/go.mod index 6eed9370b462..2f2cf1eb7669 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module google.golang.org/grpc go 1.11 require ( + github.com/cespare/xxhash v1.1.0 github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b diff --git a/go.sum b/go.sum index 51fd1436e38f..372b4ea3d201 100644 --- a/go.sum +++ b/go.sum @@ -2,9 +2,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= @@ -50,6 +54,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= diff --git a/internal/grpcrand/grpcrand.go b/internal/grpcrand/grpcrand.go index 7bc3583b5fcc..740f83c2b766 100644 --- a/internal/grpcrand/grpcrand.go +++ b/internal/grpcrand/grpcrand.go @@ -58,3 +58,10 @@ func Float64() float64 { defer mu.Unlock() return r.Float64() } + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() +} diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index c24d070fa7f1..a08a6ac80cd7 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -1,6 +1,8 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -30,6 +32,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index c24d070fa7f1..a08a6ac80cd7 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,6 +1,8 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -30,6 +32,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= diff --git a/xds/internal/balancer/ringhash/util.go b/xds/internal/balancer/ringhash/util.go new file mode 100644 index 000000000000..848b20844d89 --- /dev/null +++ b/xds/internal/balancer/ringhash/util.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package ringhash contains the functionality to support Ring Hash in grpc. +package ringhash + +import "context" + +type clusterKey struct{} + +func getRequestHash(ctx context.Context) uint64 { + requestHash, _ := ctx.Value(clusterKey{}).(uint64) + return requestHash +} + +// GetRequestHashForTesting returns the request hash in the context; to be used +// for testing only. +func GetRequestHashForTesting(ctx context.Context) uint64 { + return getRequestHash(ctx) +} + +// SetRequestHash adds the request hash to the context for use in Ring Hash Load +// Balancing. +func SetRequestHash(ctx context.Context, requestHash uint64) context.Context { + return context.WithValue(ctx, clusterKey{}, requestHash) +} diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index 2521b0d0193e..9eaff52dbcce 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -22,14 +22,21 @@ import ( "context" "encoding/json" "fmt" + "math/bits" + "strings" "sync/atomic" "time" + "github.com/cespare/xxhash" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/balancer/clustermanager" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/xdsclient" @@ -115,6 +122,7 @@ type route struct { maxStreamDuration time.Duration // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig + hashPolicies []*xdsclient.HashPolicy } func (r route) String() string { @@ -160,9 +168,15 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP return nil, err } + lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name) + // Request Hashes are only applicable for a Ring Hash LB. + if env.RingHashSupport { + lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies)) + } + config := &iresolver.RPCConfig{ - // Communicate to the LB policy the chosen cluster. - Context: clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name), + // Communicate to the LB policy the chosen cluster and request hash, if Ring Hash LB policy. + Context: lbCtx, OnCommitted: func() { // When the RPC is committed, the cluster is no longer required. // Decrease its ref. @@ -185,6 +199,61 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP return config, nil } +func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsclient.HashPolicy) uint64 { + var hash uint64 + var generatedHash bool + for _, policy := range hashPolicies { + var policyHash uint64 + var generatedPolicyHash bool + switch policy.HashPolicyType { + case xdsclient.HashPolicyTypeHeader: + md, ok := metadata.FromIncomingContext(rpcInfo.Context) + if !ok { + continue + } + values := md.Get(policy.HeaderName) + // If the header isn't present, no-op. + if len(values) == 0 { + continue + } + joinedValues := strings.Join(values, ",") + if policy.Regex != nil { + joinedValues = policy.Regex.ReplaceAllString(joinedValues, policy.RegexSubstitution) + } + policyHash = xxhash.Sum64String(joinedValues) + generatedHash = true + generatedPolicyHash = true + case xdsclient.HashPolicyTypeChannelID: + // Hash the ClientConn pointer which logically uniquely + // identifies the client. + policyHash = xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)) + generatedHash = true + generatedPolicyHash = true + } + + // Deterministically combine the hash policies. Rotating prevents + // duplicate hash policies from cancelling each other out and preserves + // the 64 bits of entropy. + if generatedPolicyHash { + hash = bits.RotateLeft64(hash, 1) + hash = hash ^ policyHash + } + + // If terminal policy and a hash has already been generated, ignore the + // rest of the policies and use that hash already generated. + if policy.Terminal && generatedHash { + break + } + } + + if generatedHash { + return hash + } + // If no generated hash return a random long. In the grand scheme of things + // this logically will map to choosing a random backend to route request to. + return grpcrand.Uint64() +} + func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (iresolver.ClientInterceptor, error) { if len(cs.httpFilterConfig) == 0 { return nil, nil @@ -292,6 +361,7 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro } cs.routes[i].httpFilterConfigOverride = rt.HTTPFilterConfigOverride + cs.routes[i].hashPolicies = rt.HashPolicies } // Account for this config selector's clusters. Do this after no further diff --git a/xds/internal/resolver/serviceconfig_test.go b/xds/internal/resolver/serviceconfig_test.go index 7fe8218160f5..b0bc86c882b3 100644 --- a/xds/internal/resolver/serviceconfig_test.go +++ b/xds/internal/resolver/serviceconfig_test.go @@ -21,10 +21,17 @@ package resolver import ( + "context" + "fmt" + "regexp" "testing" + "github.com/cespare/xxhash" "github.com/google/go-cmp/cmp" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/metadata" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config + "google.golang.org/grpc/xds/internal/xdsclient" ) func (s) TestPruneActiveClusters(t *testing.T) { @@ -43,3 +50,70 @@ func (s) TestPruneActiveClusters(t *testing.T) { t.Fatalf("r.activeClusters = %v; want %v\nDiffs: %v", r.activeClusters, want, d) } } + +func (s) TestGenerateRequestHash(t *testing.T) { + cs := &configSelector{ + r: &xdsResolver{ + cc: &testClientConn{}, + }, + } + tests := []struct { + name string + hashPolicies []*xdsclient.HashPolicy + requestHashWant uint64 + rpcInfo iresolver.RPCInfo + }{ + // TestGenerateRequestHashHeaders tests generating request hashes for + // hash policies that specify to hash headers. + { + name: "test-generate-request-hash-headers", + hashPolicies: []*xdsclient.HashPolicy{{ + HashPolicyType: xdsclient.HashPolicyTypeHeader, + HeaderName: ":path", + Regex: func() *regexp.Regexp { return regexp.MustCompile("/products") }(), // Will replace /products with /new-products, to test find and replace functionality. + RegexSubstitution: "/new-products", + }}, + requestHashWant: xxhash.Sum64String("/new-products"), + rpcInfo: iresolver.RPCInfo{ + Context: metadata.NewIncomingContext(context.Background(), metadata.Pairs(":path", "/products")), + Method: "/some-method", + }, + }, + // TestGenerateHashChannelID tests generating request hashes for hash + // policies that specify to hash something that uniquely identifies the + // ClientConn (the pointer). + { + name: "test-generate-request-hash-channel-id", + hashPolicies: []*xdsclient.HashPolicy{{ + HashPolicyType: xdsclient.HashPolicyTypeChannelID, + }}, + requestHashWant: xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)), + rpcInfo: iresolver.RPCInfo{}, + }, + // TestGenerateRequestHashEmptyString tests generating request hashes + // for hash policies that specify to hash headers and replace empty + // strings in the headers. + { + name: "test-generate-request-hash-empty-string", + hashPolicies: []*xdsclient.HashPolicy{{ + HashPolicyType: xdsclient.HashPolicyTypeHeader, + HeaderName: ":path", + Regex: func() *regexp.Regexp { return regexp.MustCompile("") }(), + RegexSubstitution: "e", + }}, + requestHashWant: xxhash.Sum64String("eaebece"), + rpcInfo: iresolver.RPCInfo{ + Context: metadata.NewIncomingContext(context.Background(), metadata.Pairs(":path", "abc")), + Method: "/some-method", + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + requestHashGot := cs.generateHash(test.rpcInfo, test.hashPolicies) + if requestHashGot != test.requestHashWant { + t.Fatalf("requestHashGot = %v, requestHashWant = %v", requestHashGot, test.requestHashWant) + } + }) + } +} diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index a519557df6ac..9bce8ffe8bf6 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -28,6 +28,7 @@ import ( "testing" "time" + "github.com/cespare/xxhash" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" @@ -38,11 +39,14 @@ import ( iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config "google.golang.org/grpc/xds/internal/balancer/clustermanager" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" xdstestutils "google.golang.org/grpc/xds/internal/testutils" @@ -454,6 +458,71 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { } } +// TestXDSResolverRequestHash tests a case where a resolver receives a RouteConfig update +// with a HashPolicy specifying to generate a hash. The configSelector generated should +// successfully generate a Hash. +func (s) TestXDSResolverRequestHash(t *testing.T) { + oldRH := env.RingHashSupport + env.RingHashSupport = true + defer func() { env.RingHashSupport = oldRH }() + + xdsC := fakeclient.NewClient() + xdsR, tcc, cancel := testSetup(t, setupOpts{ + xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, + }) + defer xdsR.Close() + defer cancel() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + waitForWatchListener(ctx, t, xdsC, targetStr) + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + // Invoke watchAPI callback with a good service update (with hash policies + // specified) and wait for UpdateState method to be called on ClientConn. + xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + VirtualHosts: []*xdsclient.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsclient.Route{{ + Prefix: newStringP(""), + WeightedClusters: map[string]xdsclient.WeightedCluster{ + "cluster_1": {Weight: 75}, + "cluster_2": {Weight: 25}, + }, + HashPolicies: []*xdsclient.HashPolicy{{ + HashPolicyType: xdsclient.HashPolicyTypeHeader, + HeaderName: ":path", + }}, + }}, + }, + }, + }, nil) + + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotState, err := tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for UpdateState to be called: %v", err) + } + rState := gotState.(resolver.State) + cs := iresolver.GetConfigSelector(rState) + if cs == nil { + t.Error("received nil config selector") + } + // Selecting a config when there was a hash policy specified in the route + // that will be selected should put a request hash in the config's context. + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: metadata.NewIncomingContext(context.Background(), metadata.Pairs(":path", "/products"))}) + if err != nil { + t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + } + requestHashGot := ringhash.GetRequestHashForTesting(res.Context) + requestHashWant := xxhash.Sum64String("/products") + if requestHashGot != requestHashWant { + t.Fatalf("requestHashGot = %v, requestHashWant = %v", requestHashGot, requestHashWant) + } +} + // TestXDSResolverRemovedWithRPCs tests the case where a config selector sends // an empty update to the resolver after the resource is removed. func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index b78a49a3e31a..15a2afee1f7f 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -1209,6 +1209,64 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }}, wantErr: false, }, + // This tests that policy.Regex ends up being nil if RegexRewrite is not + // set in xds response. + { + name: "good-with-header-hash-policy-no-regex-specified", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{ + PrefixMatch: "tv", + }, + InvertMatch: true, + }, + }, + RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ + DefaultValue: &v3typepb.FractionalPercent{ + Numerator: 1, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + TotalWeight: &wrapperspb.UInt32Value{Value: 100}, + }}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{Header: &v3routepb.RouteAction_HashPolicy_Header{HeaderName: ":path"}}}, + }, + }}, + }, + }, + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + Headers: []*HeaderMatcher{ + { + Name: "th", + InvertMatch: newBoolP(true), + PrefixMatch: newStringP("tv"), + }, + }, + Fraction: newUInt32P(10000), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + HashPolicies: []*HashPolicy{ + {HashPolicyType: HashPolicyTypeHeader, + HeaderName: ":path"}, + }, + }}, + wantErr: false, + }, { name: "with custom HTTP filter config", routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": customFilterConfig}), diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index b4b0dd1a45c1..8dfb573f335f 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -575,13 +575,15 @@ func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logg case *v3routepb.RouteAction_HashPolicy_Header_: policy.HashPolicyType = HashPolicyTypeHeader policy.HeaderName = p.GetHeader().GetHeaderName() - regex := p.GetHeader().GetRegexRewrite().GetPattern().GetRegex() - re, err := regexp.Compile(regex) - if err != nil { - return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) + if rr := p.GetHeader().GetRegexRewrite(); rr != nil { + regex := rr.GetPattern().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) + } + policy.Regex = re + policy.RegexSubstitution = rr.GetSubstitution() } - policy.Regex = re - policy.RegexSubstitution = p.GetHeader().GetRegexRewrite().GetSubstitution() case *v3routepb.RouteAction_HashPolicy_FilterState_: if p.GetFilterState().GetKey() != "io.grpc.channel_id" { logger.Infof("hash policy %+v contains an invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) From 00ae0c57cc0a418f5208906d4f68c4b682dc662c Mon Sep 17 00:00:00 2001 From: Aliaksandr Mianzhynski Date: Thu, 17 Jun 2021 20:23:18 +0300 Subject: [PATCH 137/998] xds: require router filter when filters are empty (#4553) --- xds/internal/resolver/serviceconfig.go | 3 --- xds/internal/resolver/xds_resolver_test.go | 5 +++++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index 9eaff52dbcce..bf1c53fc93b8 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -255,9 +255,6 @@ func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies [ } func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (iresolver.ClientInterceptor, error) { - if len(cs.httpFilterConfig) == 0 { - return nil, nil - } interceptors := make([]iresolver.ClientInterceptor, 0, len(cs.httpFilterConfig)) for _, filter := range cs.httpFilterConfig { if router.IsRouterFilter(filter.Filter) { diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 9bce8ffe8bf6..46229823580c 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -1197,6 +1197,11 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { selectErr string newStreamErr string }{ + { + name: "empty filters", + ldsFilters: []xdsclient.HTTPFilter{}, + selectErr: "no router filter present", + }, { name: "no router filter", ldsFilters: []xdsclient.HTTPFilter{ From 151c8b770a05e77528859076e2869405ac403d1a Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 17 Jun 2021 11:14:00 -0700 Subject: [PATCH 138/998] xds/clusterimpl: fix race between picker update and ClientConn state update (#4551) --- .../balancer/clusterimpl/clusterimpl.go | 108 ++++++++++-------- 1 file changed, 61 insertions(+), 47 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index f5fa7c12589b..34e7171d2c70 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -107,14 +107,17 @@ type clusterImplBalancer struct { clusterNameMu sync.Mutex clusterName string - // childState/drops/requestCounter can only be accessed in run(). And run() - // is the only goroutine that sends picker to the parent ClientConn. All + // childState/drops/requestCounter keeps the state used by the most recently + // generated picker. All fields can only be accessed in run(). And run() is + // the only goroutine that sends picker to the parent ClientConn. All // requests to update picker need to be sent to pickerUpdateCh. - childState balancer.State - drops []*dropper - requestCounter *xdsclient.ServiceRequestsCounter - requestCountMax uint32 - pickerUpdateCh *buffer.Unbounded + childState balancer.State + dropCategories []DropConfig // The categories for drops. + drops []*dropper + requestCounterCluster string // The cluster name for the request counter. + requestCounter *xdsclient.ServiceRequestsCounter + requestCountMax uint32 + pickerUpdateCh *buffer.Unbounded } // updateLoadStore checks the config for load store, and decides whether it @@ -205,41 +208,6 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) return err } - // Compare new drop config. And update picker if it's changed. - var updatePicker bool - if b.config == nil || !equalDropCategories(b.config.DropCategories, newConfig.DropCategories) { - b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) - for _, c := range newConfig.DropCategories { - b.drops = append(b.drops, newDropper(c)) - } - updatePicker = true - } - - // Compare cluster name. And update picker if it's changed, because circuit - // breaking's stream counter will be different. - if b.config == nil || b.config.Cluster != newConfig.Cluster { - b.requestCounter = xdsclient.GetServiceRequestsCounter(newConfig.Cluster) - updatePicker = true - } - // Compare upper bound of stream count. And update picker if it's changed. - // This is also for circuit breaking. - var newRequestCountMax uint32 = 1024 - if newConfig.MaxConcurrentRequests != nil { - newRequestCountMax = *newConfig.MaxConcurrentRequests - } - if b.requestCountMax != newRequestCountMax { - b.requestCountMax = newRequestCountMax - updatePicker = true - } - - if updatePicker { - b.pickerUpdateCh.Put(&dropConfigs{ - drops: b.drops, - requestCounter: b.requestCounter, - requestCountMax: b.requestCountMax, - }) - } - // If child policy is a different type, recreate the sub-balancer. if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { if b.childLB != nil { @@ -259,6 +227,10 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) return fmt.Errorf("child policy is nil, this means balancer %q's Build() returned nil", newConfig.ChildPolicy.Name) } + // Notify run() of this new config, in case drop and request counter need + // update (which means a new picker needs to be generated). + b.pickerUpdateCh.Put(newConfig) + // Addresses and sub-balancer config are sent to sub-balancer. return b.childLB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, @@ -355,6 +327,49 @@ type dropConfigs struct { requestCountMax uint32 } +// handleDropAndRequestCount compares drop and request counter in newConfig with +// the one currently used by picker. It returns a new dropConfigs if a new +// picker needs to be generated, otherwise it returns nil. +func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dropConfigs { + // Compare new drop config. And update picker if it's changed. + var updatePicker bool + if !equalDropCategories(b.dropCategories, newConfig.DropCategories) { + b.dropCategories = newConfig.DropCategories + b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) + for _, c := range newConfig.DropCategories { + b.drops = append(b.drops, newDropper(c)) + } + updatePicker = true + } + + // Compare cluster name. And update picker if it's changed, because circuit + // breaking's stream counter will be different. + if b.requestCounterCluster != newConfig.Cluster { + b.requestCounterCluster = newConfig.Cluster + b.requestCounter = xdsclient.GetServiceRequestsCounter(newConfig.Cluster) + updatePicker = true + } + // Compare upper bound of stream count. And update picker if it's changed. + // This is also for circuit breaking. + var newRequestCountMax uint32 = 1024 + if newConfig.MaxConcurrentRequests != nil { + newRequestCountMax = *newConfig.MaxConcurrentRequests + } + if b.requestCountMax != newRequestCountMax { + b.requestCountMax = newRequestCountMax + updatePicker = true + } + + if !updatePicker { + return nil + } + return &dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + } +} + func (b *clusterImplBalancer) run() { defer b.done.Fire() for { @@ -377,13 +392,12 @@ func (b *clusterImplBalancer) run() { requestCountMax: b.requestCountMax, }, b.loadWrapper), }) - case *dropConfigs: - b.drops = u.drops - b.requestCounter = u.requestCounter - if b.childState.Picker != nil { + case *LBConfig: + dc := b.handleDropAndRequestCount(u) + if dc != nil && b.childState.Picker != nil { b.ClientConn.UpdateState(balancer.State{ ConnectivityState: b.childState.ConnectivityState, - Picker: newDropPicker(b.childState, u, b.loadWrapper), + Picker: newDropPicker(b.childState, dc, b.loadWrapper), }) } } From 1c1e3f88d343d53aa7be5712e21d42d46892bc32 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 17 Jun 2021 11:29:17 -0700 Subject: [PATCH 139/998] xds: fix test race in cluster_resolver (#4555) There's a race between update sub-balancer and the first EDS resp. If sub-balancer is updated after the first EDS resp, the old balancers (round_robin) will create two lingering SubConns that are not handled, which will mess up the following SubConn state updates. --- .../balancer/clusterresolver/eds_impl_test.go | 28 +++++++++---------- .../balancer/clusterresolver/priority_test.go | 20 ++++++------- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index fa2d26cb08c2..f0eae0afc3da 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -59,7 +59,7 @@ func init() { balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond * 100 } -func setupTestEDS(t *testing.T) (balancer.Balancer, *testutils.TestClientConn, *fakeclient.Client, func()) { +func setupTestEDS(t *testing.T, initChild *loadBalancingConfig) (balancer.Balancer, *testutils.TestClientConn, *fakeclient.Client, func()) { xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) cc := testutils.NewTestClientConn(t) builder := balancer.Get(Name) @@ -70,8 +70,11 @@ func setupTestEDS(t *testing.T) (balancer.Balancer, *testutils.TestClientConn, * ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{ClusterName: testClusterName}, + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), + BalancerConfig: &EDSConfig{ + ClusterName: testClusterName, + ChildPolicy: initChild, + }, }); err != nil { edsb.Close() xdsC.Close() @@ -94,7 +97,7 @@ func setupTestEDS(t *testing.T) (balancer.Balancer, *testutils.TestClientConn, * // - replace backend // - change drop rate func (s) TestEDS_OneLocality(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // One locality with one backend. @@ -200,7 +203,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { // - address change for the locality // - update locality weight func (s) TestEDS_TwoLocalities(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, each with one backend. @@ -321,7 +324,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { // The EDS balancer gets EDS resp with unhealthy endpoints. Test that only // healthy ones are used. func (s) TestEDS_EndpointsHealth(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, each 3 backend, one Healthy, one Unhealthy, one Unknown. @@ -394,7 +397,7 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { // // It should send an error picker with transient failure to the parent. func (s) TestEDS_EmptyUpdate(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() const cacheTimeout = 100 * time.Microsecond @@ -473,15 +476,10 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { }, }) - edsb, cc, xdsC, cleanup := setupTestEDS(t) + t.Logf("initialize with sub-balancer: stub-balancer") + edsb, cc, xdsC, cleanup := setupTestEDS(t, &loadBalancingConfig{Name: balancerName}) defer cleanup() - t.Logf("update sub-balancer to stub-balancer") - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ClusterName: testClusterName, ChildPolicy: &loadBalancingConfig{Name: balancerName}}, - }); err != nil { - t.Fatal(err) - } // Two localities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) @@ -565,7 +563,7 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { } func (s) TestEDS_CircuitBreaking(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() var maxRequests uint32 = 50 diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index c7c648a2c0fa..711c3eeed0d3 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -37,7 +37,7 @@ import ( // // Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0. func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with priorities [0, 1], each with one backend. @@ -100,7 +100,7 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { // Init 0 and 1; 0 is up, use 0; 0 is down, 1 is up, use 1; add 2, use 1; 1 is // down, use 2; remove 2, use 1. func (s) TestEDSPriority_SwitchPriority(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with priorities [0, 1], each with one backend. @@ -194,7 +194,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { // // Init 0 and 1; 0 and 1 both down; add 2, use 2. func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with different priorities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) @@ -247,7 +247,7 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { // // Init 0,1,2; 0 and 1 down, use 2; 0 up, close 1 and 2. func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with priorities [0,1,2], each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) @@ -338,7 +338,7 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) { } }()() - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with different priorities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) @@ -381,7 +381,7 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) { // - start with 2 locality with p0 and p1 // - add localities to existing p0 and p1 func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with different priorities, each with one backend. clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) @@ -478,7 +478,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { } }()() - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with different priorities, each with one backend. clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) @@ -585,7 +585,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { // Test the case where the high priority contains no backends. The low priority // will be used. func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with priorities [0, 1], each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) @@ -636,7 +636,7 @@ func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) { // Test the case where the high priority contains no healthy backends. The low // priority will be used. func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t) + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with priorities [0, 1], each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) @@ -697,7 +697,7 @@ func (s) TestEDSPriority_FirstPriorityRemoved(t *testing.T) { } }()() - _, cc, xdsC, cleanup := setupTestEDS(t) + _, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // One localities, with priorities [0], each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) From 74fe073e9acce820ff3815b78e49aadd10439d59 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 17 Jun 2021 16:53:52 -0700 Subject: [PATCH 140/998] Revert "xds: require router filter when filters are empty" (#4556) This reverts commit 00ae0c57cc0a418f5208906d4f68c4b682dc662c. --- xds/internal/resolver/serviceconfig.go | 3 +++ xds/internal/resolver/xds_resolver_test.go | 5 ----- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index bf1c53fc93b8..9eaff52dbcce 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -255,6 +255,9 @@ func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies [ } func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (iresolver.ClientInterceptor, error) { + if len(cs.httpFilterConfig) == 0 { + return nil, nil + } interceptors := make([]iresolver.ClientInterceptor, 0, len(cs.httpFilterConfig)) for _, filter := range cs.httpFilterConfig { if router.IsRouterFilter(filter.Filter) { diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 46229823580c..9bce8ffe8bf6 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -1197,11 +1197,6 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { selectErr string newStreamErr string }{ - { - name: "empty filters", - ldsFilters: []xdsclient.HTTPFilter{}, - selectErr: "no router filter present", - }, { name: "no router filter", ldsFilters: []xdsclient.HTTPFilter{ From 4faa31f0a5809a5064ee128c9d855c0bedc1c783 Mon Sep 17 00:00:00 2001 From: Iskandarov Lev Date: Fri, 18 Jun 2021 23:21:07 +0300 Subject: [PATCH 141/998] stats: add stream info inside stats.Begin (#4533) --- server.go | 8 ++++-- stats/stats.go | 4 +++ stats/stats_test.go | 70 +++++++++++++++++++++++++++++++-------------- stream.go | 8 ++++-- 4 files changed, 63 insertions(+), 27 deletions(-) diff --git a/server.go b/server.go index d90f3fcd3bf6..e72029bf1472 100644 --- a/server.go +++ b/server.go @@ -1144,7 +1144,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1424,7 +1426,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, } sh.HandleRPC(stream.Context(), statsBegin) } diff --git a/stats/stats.go b/stats/stats.go index 63e476ee7ff8..a5ebeeb6932d 100644 --- a/stats/stats.go +++ b/stats/stats.go @@ -45,6 +45,10 @@ type Begin struct { BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool } // IsClient indicates if the stats information is from client side. diff --git a/stats/stats_test.go b/stats/stats_test.go index 306f2f6b8e90..dfc6edfc3d36 100644 --- a/stats/stats_test.go +++ b/stats/stats_test.go @@ -407,15 +407,17 @@ func (te *test) doServerStreamCall(c *rpcConfig) (*testpb.StreamingOutputCallReq } type expectedData struct { - method string - serverAddr string - compression string - reqIdx int - requests []proto.Message - respIdx int - responses []proto.Message - err error - failfast bool + method string + isClientStream bool + isServerStream bool + serverAddr string + compression string + reqIdx int + requests []proto.Message + respIdx int + responses []proto.Message + err error + failfast bool } type gotData struct { @@ -456,6 +458,12 @@ func checkBegin(t *testing.T, d *gotData, e *expectedData) { t.Fatalf("st.FailFast = %v, want %v", st.FailFast, e.failfast) } } + if st.IsClientStream != e.isClientStream { + t.Fatalf("st.IsClientStream = %v, want %v", st.IsClientStream, e.isClientStream) + } + if st.IsServerStream != e.isServerStream { + t.Fatalf("st.IsServerStream = %v, want %v", st.IsServerStream, e.isServerStream) + } } func checkInHeader(t *testing.T, d *gotData, e *expectedData) { @@ -847,6 +855,9 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f err error method string + isClientStream bool + isServerStream bool + req proto.Message resp proto.Message e error @@ -864,14 +875,18 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f reqs, resp, e = te.doClientStreamCall(cc) resps = []proto.Message{resp} err = e + isClientStream = true case serverStreamRPC: method = "/grpc.testing.TestService/StreamingOutputCall" req, resps, e = te.doServerStreamCall(cc) reqs = []proto.Message{req} err = e + isServerStream = true case fullDuplexStreamRPC: method = "/grpc.testing.TestService/FullDuplexCall" reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) + isClientStream = true + isServerStream = true } if cc.success != (err == nil) { t.Fatalf("cc.success: %v, got error: %v", cc.success, err) @@ -900,12 +915,14 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f } expect := &expectedData{ - serverAddr: te.srvAddr, - compression: tc.compress, - method: method, - requests: reqs, - responses: resps, - err: err, + serverAddr: te.srvAddr, + compression: tc.compress, + method: method, + requests: reqs, + responses: resps, + err: err, + isClientStream: isClientStream, + isServerStream: isServerStream, } h.mu.Lock() @@ -1138,6 +1155,9 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map method string err error + isClientStream bool + isServerStream bool + req proto.Message resp proto.Message e error @@ -1154,14 +1174,18 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map reqs, resp, e = te.doClientStreamCall(cc) resps = []proto.Message{resp} err = e + isClientStream = true case serverStreamRPC: method = "/grpc.testing.TestService/StreamingOutputCall" req, resps, e = te.doServerStreamCall(cc) reqs = []proto.Message{req} err = e + isServerStream = true case fullDuplexStreamRPC: method = "/grpc.testing.TestService/FullDuplexCall" reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) + isClientStream = true + isServerStream = true } if cc.success != (err == nil) { t.Fatalf("cc.success: %v, got error: %v", cc.success, err) @@ -1194,13 +1218,15 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map } expect := &expectedData{ - serverAddr: te.srvAddr, - compression: tc.compress, - method: method, - requests: reqs, - responses: resps, - failfast: cc.failfast, - err: err, + serverAddr: te.srvAddr, + compression: tc.compress, + method: method, + requests: reqs, + responses: resps, + failfast: cc.failfast, + err: err, + isClientStream: isClientStream, + isServerStream: isServerStream, } h.mu.Lock() diff --git a/stream.go b/stream.go index 1f3e70d2c440..ed6af683d209 100644 --- a/stream.go +++ b/stream.go @@ -295,9 +295,11 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) beginTime = time.Now() begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + IsClientStream: desc.ClientStreams, + IsServerStream: desc.ServerStreams, } sh.HandleRPC(ctx, begin) } From 50328cf800a44d78199311c2d93f5856e4b699c1 Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Mon, 21 Jun 2021 15:11:57 -0400 Subject: [PATCH 142/998] buildscripts: add option to use xds-k8s test driver from a fork (#4548) --- test/kokoro/xds_k8s_install_test_driver.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/kokoro/xds_k8s_install_test_driver.sh b/test/kokoro/xds_k8s_install_test_driver.sh index 54d9127608c3..a342bd995f92 100755 --- a/test/kokoro/xds_k8s_install_test_driver.sh +++ b/test/kokoro/xds_k8s_install_test_driver.sh @@ -19,7 +19,7 @@ set -eo pipefail readonly PYTHON_VERSION="3.6" # Test driver readonly TEST_DRIVER_REPO_NAME="grpc" -readonly TEST_DRIVER_REPO_URL="https://github.com/grpc/grpc.git" +readonly TEST_DRIVER_REPO_URL="https://github.com/${TEST_DRIVER_REPO_OWNER:-grpc}/grpc.git" readonly TEST_DRIVER_BRANCH="${TEST_DRIVER_BRANCH:-master}" readonly TEST_DRIVER_PATH="tools/run_tests/xds_k8s_test_driver" readonly TEST_DRIVER_PROTOS_PATH="src/proto/grpc/testing" From 14c7ed60ad7655f522345032f0c0c7ae05303816 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 22 Jun 2021 11:03:12 -0700 Subject: [PATCH 143/998] xds/circuit_breaking: counters should be keyed by {cluster, EDS service name} pair (#4560) --- .../balancer/cdsbalancer/cdsbalancer_test.go | 6 +-- .../balancer/clusterimpl/balancer_test.go | 10 ++-- .../balancer/clusterimpl/clusterimpl.go | 10 ++-- xds/internal/balancer/clusterimpl/picker.go | 2 +- xds/internal/xdsclient/requests_counter.go | 53 ++++++++++++------- .../xdsclient/requests_counter_test.go | 34 ++++++------ 6 files changed, 66 insertions(+), 49 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 206918a37d95..8b103143ff76 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -596,8 +596,8 @@ func (s) TestCircuitBreaking(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will update // the service's counter with the new max requests. var maxRequests uint32 = 1 - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName, MaxRequests: &maxRequests} - wantCCS := edsCCS(serviceName, &maxRequests, false) + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} + wantCCS := edsCCS(clusterName, &maxRequests, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -606,7 +606,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // Since the counter's max requests was set to 1, the first request should // succeed and the second should fail. - counter := xdsclient.GetServiceRequestsCounter(serviceName) + counter := xdsclient.GetClusterRequestsCounter(clusterName, "") if err := counter.StartRequest(maxRequests); err != nil { t.Fatal(err) } diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index ab3613bec31d..3cb34200b62e 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -72,7 +72,7 @@ func init() { // TestDropByCategory verifies that the balancer correctly drops the picks, and // that the drops are reported. func TestDropByCategory(t *testing.T) { - defer xdsclient.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -224,7 +224,7 @@ func TestDropByCategory(t *testing.T) { // TestDropCircuitBreaking verifies that the balancer correctly drops the picks // due to circuit breaking, and that the drops are reported. func TestDropCircuitBreaking(t *testing.T) { - defer xdsclient.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -332,7 +332,7 @@ func TestDropCircuitBreaking(t *testing.T) { // picker after it's closed. Because picker updates are sent in the run() // goroutine. func TestPickerUpdateAfterClose(t *testing.T) { - defer xdsclient.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -373,7 +373,7 @@ func TestPickerUpdateAfterClose(t *testing.T) { // TestClusterNameInAddressAttributes covers the case that cluster name is // attached to the subconn address attributes. func TestClusterNameInAddressAttributes(t *testing.T) { - defer xdsclient.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -458,7 +458,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { // TestReResolution verifies that when a SubConn turns transient failure, // re-resolution is triggered. func TestReResolution(t *testing.T) { - defer xdsclient.ClearCounterForTesting(testClusterName) + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 34e7171d2c70..64b175d3c958 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -115,7 +115,8 @@ type clusterImplBalancer struct { dropCategories []DropConfig // The categories for drops. drops []*dropper requestCounterCluster string // The cluster name for the request counter. - requestCounter *xdsclient.ServiceRequestsCounter + requestCounterService string // The service name for the request counter. + requestCounter *xdsclient.ClusterRequestsCounter requestCountMax uint32 pickerUpdateCh *buffer.Unbounded } @@ -323,7 +324,7 @@ func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resol type dropConfigs struct { drops []*dropper - requestCounter *xdsclient.ServiceRequestsCounter + requestCounter *xdsclient.ClusterRequestsCounter requestCountMax uint32 } @@ -344,9 +345,10 @@ func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dr // Compare cluster name. And update picker if it's changed, because circuit // breaking's stream counter will be different. - if b.requestCounterCluster != newConfig.Cluster { + if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { b.requestCounterCluster = newConfig.Cluster - b.requestCounter = xdsclient.GetServiceRequestsCounter(newConfig.Cluster) + b.requestCounterService = newConfig.EDSServiceName + b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) updatePicker = true } // Compare upper bound of stream count. And update picker if it's changed. diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index a03b89179ee4..c2693258e120 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -75,7 +75,7 @@ type dropPicker struct { drops []*dropper s balancer.State loadStore loadReporter - counter *xdsclient.ServiceRequestsCounter + counter *xdsclient.ClusterRequestsCounter countMax uint32 } diff --git a/xds/internal/xdsclient/requests_counter.go b/xds/internal/xdsclient/requests_counter.go index b7f94332da24..beed2e9d0add 100644 --- a/xds/internal/xdsclient/requests_counter.go +++ b/xds/internal/xdsclient/requests_counter.go @@ -24,44 +24,53 @@ import ( "sync/atomic" ) -type servicesRequestsCounter struct { +type clusterNameAndServiceName struct { + clusterName, edsServcieName string +} + +type clusterRequestsCounter struct { mu sync.Mutex - services map[string]*ServiceRequestsCounter + clusters map[clusterNameAndServiceName]*ClusterRequestsCounter } -var src = &servicesRequestsCounter{ - services: make(map[string]*ServiceRequestsCounter), +var src = &clusterRequestsCounter{ + clusters: make(map[clusterNameAndServiceName]*ClusterRequestsCounter), } -// ServiceRequestsCounter is used to track the total inflight requests for a +// ClusterRequestsCounter is used to track the total inflight requests for a // service with the provided name. -type ServiceRequestsCounter struct { - ServiceName string - numRequests uint32 +type ClusterRequestsCounter struct { + ClusterName string + EDSServiceName string + numRequests uint32 } -// GetServiceRequestsCounter returns the ServiceRequestsCounter with the +// GetClusterRequestsCounter returns the ClusterRequestsCounter with the // provided serviceName. If one does not exist, it creates it. -func GetServiceRequestsCounter(serviceName string) *ServiceRequestsCounter { +func GetClusterRequestsCounter(clusterName, edsServiceName string) *ClusterRequestsCounter { src.mu.Lock() defer src.mu.Unlock() - c, ok := src.services[serviceName] + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServcieName: edsServiceName, + } + c, ok := src.clusters[k] if !ok { - c = &ServiceRequestsCounter{ServiceName: serviceName} - src.services[serviceName] = c + c = &ClusterRequestsCounter{ClusterName: clusterName} + src.clusters[k] = c } return c } -// StartRequest starts a request for a service, incrementing its number of +// StartRequest starts a request for a cluster, incrementing its number of // requests by 1. Returns an error if the max number of requests is exceeded. -func (c *ServiceRequestsCounter) StartRequest(max uint32) error { +func (c *ClusterRequestsCounter) StartRequest(max uint32) error { // Note that during race, the limits could be exceeded. This is allowed: // "Since the implementation is eventually consistent, races between threads // may allow limits to be potentially exceeded." // https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/circuit_breaking#arch-overview-circuit-break. if atomic.LoadUint32(&c.numRequests) >= max { - return fmt.Errorf("max requests %v exceeded on service %v", max, c.ServiceName) + return fmt.Errorf("max requests %v exceeded on service %v", max, c.ClusterName) } atomic.AddUint32(&c.numRequests, 1) return nil @@ -69,16 +78,20 @@ func (c *ServiceRequestsCounter) StartRequest(max uint32) error { // EndRequest ends a request for a service, decrementing its number of requests // by 1. -func (c *ServiceRequestsCounter) EndRequest() { +func (c *ClusterRequestsCounter) EndRequest() { atomic.AddUint32(&c.numRequests, ^uint32(0)) } // ClearCounterForTesting clears the counter for the service. Should be only // used in tests. -func ClearCounterForTesting(serviceName string) { +func ClearCounterForTesting(clusterName, edsServiceName string) { src.mu.Lock() defer src.mu.Unlock() - c, ok := src.services[serviceName] + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServcieName: edsServiceName, + } + c, ok := src.clusters[k] if !ok { return } @@ -90,5 +103,5 @@ func ClearCounterForTesting(serviceName string) { func ClearAllCountersForTesting() { src.mu.Lock() defer src.mu.Unlock() - src.services = make(map[string]*ServiceRequestsCounter) + src.clusters = make(map[clusterNameAndServiceName]*ClusterRequestsCounter) } diff --git a/xds/internal/xdsclient/requests_counter_test.go b/xds/internal/xdsclient/requests_counter_test.go index f444e8f163e6..cd95aeaf82e0 100644 --- a/xds/internal/xdsclient/requests_counter_test.go +++ b/xds/internal/xdsclient/requests_counter_test.go @@ -26,6 +26,8 @@ import ( "testing" ) +const testService = "test-service-name" + type counterTest struct { name string maxRequests uint32 @@ -51,9 +53,9 @@ var tests = []counterTest{ }, } -func resetServiceRequestsCounter() { - src = &servicesRequestsCounter{ - services: make(map[string]*ServiceRequestsCounter), +func resetClusterRequestsCounter() { + src = &clusterRequestsCounter{ + clusters: make(map[clusterNameAndServiceName]*ClusterRequestsCounter), } } @@ -67,7 +69,7 @@ func testCounter(t *testing.T, test counterTest) { var successes, errors uint32 for i := 0; i < int(test.numRequests); i++ { go func() { - counter := GetServiceRequestsCounter(test.name) + counter := GetClusterRequestsCounter(test.name, testService) defer requestsDone.Done() err := counter.StartRequest(test.maxRequests) if err == nil { @@ -103,7 +105,7 @@ func testCounter(t *testing.T, test counterTest) { } func (s) TestRequestsCounter(t *testing.T) { - defer resetServiceRequestsCounter() + defer resetClusterRequestsCounter() for _, test := range tests { t.Run(test.name, func(t *testing.T) { testCounter(t, test) @@ -111,18 +113,18 @@ func (s) TestRequestsCounter(t *testing.T) { } } -func (s) TestGetServiceRequestsCounter(t *testing.T) { - defer resetServiceRequestsCounter() +func (s) TestGetClusterRequestsCounter(t *testing.T) { + defer resetClusterRequestsCounter() for _, test := range tests { - counterA := GetServiceRequestsCounter(test.name) - counterB := GetServiceRequestsCounter(test.name) + counterA := GetClusterRequestsCounter(test.name, testService) + counterB := GetClusterRequestsCounter(test.name, testService) if counterA != counterB { t.Errorf("counter %v %v != counter %v %v", counterA, *counterA, counterB, *counterB) } } } -func startRequests(t *testing.T, n uint32, max uint32, counter *ServiceRequestsCounter) { +func startRequests(t *testing.T, n uint32, max uint32, counter *ClusterRequestsCounter) { for i := uint32(0); i < n; i++ { if err := counter.StartRequest(max); err != nil { t.Fatalf("error starting initial request: %v", err) @@ -131,11 +133,11 @@ func startRequests(t *testing.T, n uint32, max uint32, counter *ServiceRequestsC } func (s) TestSetMaxRequestsIncreased(t *testing.T) { - defer resetServiceRequestsCounter() - const serviceName string = "set-max-requests-increased" + defer resetClusterRequestsCounter() + const clusterName string = "set-max-requests-increased" var initialMax uint32 = 16 - counter := GetServiceRequestsCounter(serviceName) + counter := GetClusterRequestsCounter(clusterName, testService) startRequests(t, initialMax, initialMax, counter) if err := counter.StartRequest(initialMax); err == nil { t.Fatal("unexpected success on start request after max met") @@ -148,11 +150,11 @@ func (s) TestSetMaxRequestsIncreased(t *testing.T) { } func (s) TestSetMaxRequestsDecreased(t *testing.T) { - defer resetServiceRequestsCounter() - const serviceName string = "set-max-requests-decreased" + defer resetClusterRequestsCounter() + const clusterName string = "set-max-requests-decreased" var initialMax uint32 = 16 - counter := GetServiceRequestsCounter(serviceName) + counter := GetClusterRequestsCounter(clusterName, testService) startRequests(t, initialMax-1, initialMax, counter) newMax := initialMax - 1 From 4440c3b8306d28f4af5833bdf12ac54866dc1423 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 22 Jun 2021 14:57:05 -0700 Subject: [PATCH 144/998] cluster_resolver: fix DiscoveryMechanismType marshal JSON (#4532) --- .../clusterresolver/balancerconfig/type.go | 11 ++- .../balancerconfig/type_test.go | 88 +++++++++++++++++++ 2 files changed, 95 insertions(+), 4 deletions(-) create mode 100644 xds/internal/balancer/clusterresolver/balancerconfig/type_test.go diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/type.go b/xds/internal/balancer/clusterresolver/balancerconfig/type.go index eb149cd384dd..2f9ba68fe59d 100644 --- a/xds/internal/balancer/clusterresolver/balancerconfig/type.go +++ b/xds/internal/balancer/clusterresolver/balancerconfig/type.go @@ -29,17 +29,20 @@ type DiscoveryMechanismType int const ( // DiscoveryMechanismTypeEDS is eds. - DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:EDS` + DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:"EDS"` // DiscoveryMechanismTypeLogicalDNS is DNS. - DiscoveryMechanismTypeLogicalDNS // `json:LOGICAL_DNS` + DiscoveryMechanismTypeLogicalDNS // `json:"LOGICAL_DNS"` ) // MarshalJSON marshals a DiscoveryMechanismType to a quoted json string. // // This is necessary to handle enum (as strings) from JSON. -func (t *DiscoveryMechanismType) MarshalJSON() ([]byte, error) { +// +// Note that this needs to be defined on the type not pointer, otherwise the +// variables of this type will marshal to int not string. +func (t DiscoveryMechanismType) MarshalJSON() ([]byte, error) { buffer := bytes.NewBufferString(`"`) - switch *t { + switch t { case DiscoveryMechanismTypeEDS: buffer.WriteString("EDS") case DiscoveryMechanismTypeLogicalDNS: diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/type_test.go b/xds/internal/balancer/clusterresolver/balancerconfig/type_test.go new file mode 100644 index 000000000000..1adbc7c5d230 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/balancerconfig/type_test.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancerconfig + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestDiscoveryMechanismTypeMarshalJSON(t *testing.T) { + tests := []struct { + name string + typ DiscoveryMechanismType + want string + }{ + { + name: "eds", + typ: DiscoveryMechanismTypeEDS, + want: `"EDS"`, + }, + { + name: "dns", + typ: DiscoveryMechanismTypeLogicalDNS, + want: `"LOGICAL_DNS"`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got, err := json.Marshal(tt.typ); err != nil || string(got) != tt.want { + t.Fatalf("DiscoveryMechanismTypeEDS.MarshalJSON() = (%v, %v), want (%s, nil)", string(got), err, tt.want) + } + }) + } +} +func TestDiscoveryMechanismTypeUnmarshalJSON(t *testing.T) { + tests := []struct { + name string + js string + want DiscoveryMechanismType + wantErr bool + }{ + { + name: "eds", + js: `"EDS"`, + want: DiscoveryMechanismTypeEDS, + }, + { + name: "dns", + js: `"LOGICAL_DNS"`, + want: DiscoveryMechanismTypeLogicalDNS, + }, + { + name: "error", + js: `"1234"`, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got DiscoveryMechanismType + err := json.Unmarshal([]byte(tt.js), &got) + if (err != nil) != tt.wantErr { + t.Fatalf("DiscoveryMechanismTypeEDS.UnmarshalJSON() error = %v, wantErr %v", err, tt.wantErr) + } + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Fatalf("DiscoveryMechanismTypeEDS.UnmarshalJSON() got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} From b9270c3a7f163541823e37485aae70fcf043d406 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 23 Jun 2021 16:36:24 -0400 Subject: [PATCH 145/998] client: add deadline for TransportCredentials handshaker (#4559) * Add deadline on connection for TransportCredentials handshake --- internal/transport/http2_client.go | 10 +++- test/end2end_test.go | 86 ++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 119f01e3ebcf..5b2493130fff 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -241,7 +241,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // and passed to the credential handshaker. This makes it possible for // address specific arbitrary data to reach the credential handshaker. connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + rawConn := conn + // Pull the deadline from the connectCtx, which will be used for + // timeouts in the authentication protocol handshake. Can ignore the + // boolean as the deadline will return the zero value, which will make + // the conn not timeout on I/O operations. + deadline, _ := connectCtx.Deadline() + rawConn.SetDeadline(deadline) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) + rawConn.SetDeadline(time.Time{}) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } diff --git a/test/end2end_test.go b/test/end2end_test.go index 552f74e1b795..1b839529c58b 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7668,3 +7668,89 @@ func (s) TestClientSettingsFloodCloseConn(t *testing.T) { s.GracefulStop() timer.Stop() } + +// TestDeadlineSetOnConnectionOnClientCredentialHandshake tests that there is a deadline +// set on the net.Conn when a credential handshake happens in http2_client. +func (s) TestDeadlineSetOnConnectionOnClientCredentialHandshake(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + connCh := make(chan net.Conn, 1) + go func() { + defer close(connCh) + conn, err := lis.Accept() + if err != nil { + t.Errorf("Error accepting connection: %v", err) + return + } + connCh <- conn + }() + defer func() { + conn := <-connCh + if conn != nil { + conn.Close() + } + }() + deadlineCh := testutils.NewChannel() + cvd := &credentialsVerifyDeadline{ + deadlineCh: deadlineCh, + } + dOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + conn, err := (&net.Dialer{}).DialContext(ctx, "tcp", addr) + if err != nil { + return nil, err + } + return &infoConn{Conn: conn}, nil + }) + cc, err := grpc.Dial(lis.Addr().String(), dOpt, grpc.WithTransportCredentials(cvd)) + if err != nil { + t.Fatalf("Failed to dial: %v", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + deadline, err := deadlineCh.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving from credsInvoked: %v", err) + } + // Default connection timeout is 20 seconds, so if the deadline exceeds now + // + 18 seconds it should be valid. + if !deadline.(time.Time).After(time.Now().Add(time.Second * 18)) { + t.Fatalf("Connection did not have deadline set.") + } +} + +type infoConn struct { + net.Conn + deadline time.Time +} + +func (c *infoConn) SetDeadline(t time.Time) error { + c.deadline = t + return c.Conn.SetDeadline(t) +} + +type credentialsVerifyDeadline struct { + deadlineCh *testutils.Channel +} + +func (cvd *credentialsVerifyDeadline) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return rawConn, nil, nil +} + +func (cvd *credentialsVerifyDeadline) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + cvd.deadlineCh.Send(rawConn.(*infoConn).deadline) + return rawConn, nil, nil +} + +func (cvd *credentialsVerifyDeadline) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{} +} +func (cvd *credentialsVerifyDeadline) Clone() credentials.TransportCredentials { + return cvd +} +func (cvd *credentialsVerifyDeadline) OverrideServerName(s string) error { + return nil +} From d9eb12feed7a0f45d4acbf478e83171f4c00210a Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 23 Jun 2021 14:15:56 -0700 Subject: [PATCH 146/998] xdsclient: move tests out of tests directory (#4535) --- xds/internal/xdsclient/{tests => }/dump_test.go | 2 +- xds/internal/xdsclient/{tests => }/loadreport_test.go | 2 +- xds/internal/xdsclient/tests/README.md | 1 - .../xdsclient/{tests/client_test.go => xdsclient_test.go} | 2 +- 4 files changed, 3 insertions(+), 4 deletions(-) rename xds/internal/xdsclient/{tests => }/dump_test.go (99%) rename xds/internal/xdsclient/{tests => }/loadreport_test.go (99%) delete mode 100644 xds/internal/xdsclient/tests/README.md rename xds/internal/xdsclient/{tests/client_test.go => xdsclient_test.go} (99%) diff --git a/xds/internal/xdsclient/tests/dump_test.go b/xds/internal/xdsclient/dump_test.go similarity index 99% rename from xds/internal/xdsclient/tests/dump_test.go rename to xds/internal/xdsclient/dump_test.go index 64c78f672858..5c31b183a6bc 100644 --- a/xds/internal/xdsclient/tests/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -18,7 +18,7 @@ * */ -package tests_test +package xdsclient_test import ( "fmt" diff --git a/xds/internal/xdsclient/tests/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go similarity index 99% rename from xds/internal/xdsclient/tests/loadreport_test.go rename to xds/internal/xdsclient/loadreport_test.go index 1815f8a0ab43..0f29e96fc1eb 100644 --- a/xds/internal/xdsclient/tests/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -18,7 +18,7 @@ * */ -package tests_test +package xdsclient_test import ( "context" diff --git a/xds/internal/xdsclient/tests/README.md b/xds/internal/xdsclient/tests/README.md deleted file mode 100644 index 6dc940c103f7..000000000000 --- a/xds/internal/xdsclient/tests/README.md +++ /dev/null @@ -1 +0,0 @@ -This package contains tests which cannot live in the `client` package because they need to import one of the API client packages (which itself has a dependency on the `client` package). diff --git a/xds/internal/xdsclient/tests/client_test.go b/xds/internal/xdsclient/xdsclient_test.go similarity index 99% rename from xds/internal/xdsclient/tests/client_test.go rename to xds/internal/xdsclient/xdsclient_test.go index 26c64bacd034..7b33c145f3bd 100644 --- a/xds/internal/xdsclient/tests/client_test.go +++ b/xds/internal/xdsclient/xdsclient_test.go @@ -18,7 +18,7 @@ * */ -package tests_test +package xdsclient_test import ( "testing" From e24ede593630782a7718aeb27f116446e0284f90 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 24 Jun 2021 16:20:11 -0700 Subject: [PATCH 147/998] xds: delete LRS policy and move the functionality to xds_cluster_impl (#4528) - (cluster_resolver) attach locality ID to addresses - (cluster_impl) wrap SubConn - (lrs) delete --- .../balancer/clusterimpl/balancer_test.go | 125 +++++++++ .../balancer/clusterimpl/clusterimpl.go | 93 ++++++- xds/internal/balancer/clusterimpl/picker.go | 76 +++++- .../balancerconfig/configbuilder.go | 27 +- .../balancerconfig/configbuilder_test.go | 215 +++++----------- xds/internal/balancer/lrs/balancer.go | 239 ------------------ xds/internal/balancer/lrs/balancer_test.go | 143 ----------- xds/internal/balancer/lrs/config.go | 53 ---- xds/internal/balancer/lrs/config_test.go | 114 --------- xds/internal/balancer/lrs/logging.go | 34 --- xds/internal/balancer/lrs/picker.go | 85 ------- .../weightedtarget_config_test.go | 26 +- xds/internal/internal.go | 18 ++ 13 files changed, 382 insertions(+), 866 deletions(-) delete mode 100644 xds/internal/balancer/lrs/balancer.go delete mode 100644 xds/internal/balancer/lrs/balancer_test.go delete mode 100644 xds/internal/balancer/lrs/config.go delete mode 100644 xds/internal/balancer/lrs/config_test.go delete mode 100644 xds/internal/balancer/lrs/logging.go delete mode 100644 xds/internal/balancer/lrs/picker.go diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 3cb34200b62e..09194a54d0e1 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -22,6 +22,7 @@ package clusterimpl import ( "context" + "fmt" "strings" "testing" "time" @@ -34,6 +35,7 @@ import ( "google.golang.org/grpc/internal" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" + xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" @@ -158,6 +160,9 @@ func TestDropByCategory(t *testing.T) { Service: testServiceName, TotalDrops: dropCount, Drops: map[string]uint64{dropReason: dropCount}, + LocalityStats: map[string]load.LocalityData{ + assertString(xdsinternal.LocalityID{}.ToString): {RequestStats: load.RequestData{Succeeded: rpcCount - dropCount}}, + }, }} gotStatsData0 := loadStore.Stats([]string{testClusterName}) @@ -213,6 +218,9 @@ func TestDropByCategory(t *testing.T) { Service: testServiceName, TotalDrops: dropCount2, Drops: map[string]uint64{dropReason2: dropCount2}, + LocalityStats: map[string]load.LocalityData{ + assertString(xdsinternal.LocalityID{}.ToString): {RequestStats: load.RequestData{Succeeded: rpcCount - dropCount2}}, + }, }} gotStatsData1 := loadStore.Stats([]string{testClusterName}) @@ -320,6 +328,9 @@ func TestDropCircuitBreaking(t *testing.T) { Cluster: testClusterName, Service: testServiceName, TotalDrops: uint64(maxRequest), + LocalityStats: map[string]load.LocalityData{ + assertString(xdsinternal.LocalityID{}.ToString): {RequestStats: load.RequestData{Succeeded: uint64(rpcCount - maxRequest + 50)}}, + }, }} gotStatsData0 := loadStore.Stats([]string{testClusterName}) @@ -533,3 +544,117 @@ func TestReResolution(t *testing.T) { t.Fatalf("timeout waiting for ResolveNow()") } } + +func TestLoadReporting(t *testing.T) { + var testLocality = xdsinternal.LocalityID{ + Region: "test-region", + Zone: "test-zone", + SubZone: "test-sub-zone", + } + + xdsC := fakeclient.NewClient() + defer xdsC.Close() + + builder := balancer.Get(Name) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + defer b.Close() + + addrs := make([]resolver.Address, len(testBackendAddrs)) + for i, a := range testBackendAddrs { + addrs[i] = xdsinternal.SetLocalityID(a, testLocality) + } + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServerName: newString(testLRSServerName), + // Locality: testLocality, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + got, err := xdsC.WaitForReportLoad(ctx) + if err != nil { + t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) + } + if got.Server != testLRSServerName { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerName) + } + + sc1 := <-cc.NewSubConnCh + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // This should get the connecting picker. + p0 := <-cc.NewPickerCh + for i := 0; i < 10; i++ { + _, err := p0.Pick(balancer.PickInfo{}) + if err != balancer.ErrNoSubConnAvailable { + t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) + } + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Test pick with one backend. + p1 := <-cc.NewPickerCh + const successCount = 5 + for i := 0; i < successCount; i++ { + gotSCSt, err := p1.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + gotSCSt.Done(balancer.DoneInfo{}) + } + const errorCount = 5 + for i := 0; i < errorCount; i++ { + gotSCSt, err := p1.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + gotSCSt.Done(balancer.DoneInfo{Err: fmt.Errorf("error")}) + } + + // Dump load data from the store and compare with expected counts. + loadStore := xdsC.LoadStore() + if loadStore == nil { + t.Fatal("loadStore is nil in xdsClient") + } + sds := loadStore.Stats([]string{testClusterName}) + if len(sds) == 0 { + t.Fatalf("loads for cluster %v not found in store", testClusterName) + } + sd := sds[0] + if sd.Cluster != testClusterName || sd.Service != testServiceName { + t.Fatalf("got unexpected load for %q, %q, want %q, %q", sd.Cluster, sd.Service, testClusterName, testServiceName) + } + testLocalityJSON, _ := testLocality.ToString() + localityData, ok := sd.LocalityStats[testLocalityJSON] + if !ok { + t.Fatalf("loads for %v not found in store", testLocality) + } + reqStats := localityData.RequestStats + if reqStats.Succeeded != successCount { + t.Errorf("got succeeded %v, want %v", reqStats.Succeeded, successCount) + } + if reqStats.Errored != errorCount { + t.Errorf("got errord %v, want %v", reqStats.Errored, errorCount) + } + if reqStats.InProgress != 0 { + t.Errorf("got inProgress %v, want %v", reqStats.InProgress, 0) + } +} + +func assertString(f func() (string, error)) string { + s, err := f() + if err != nil { + panic(err.Error()) + } + return s +} diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 64b175d3c958..32c97757ded0 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -27,6 +27,7 @@ import ( "encoding/json" "fmt" "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" @@ -37,6 +38,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/loadstore" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/load" @@ -61,6 +63,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), loadWrapper: loadstore.NewWrapper(), + scWrappers: make(map[balancer.SubConn]*scWrapper), pickerUpdateCh: buffer.NewUnbounded(), requestCountMax: defaultRequestCountMax, } @@ -107,6 +110,18 @@ type clusterImplBalancer struct { clusterNameMu sync.Mutex clusterName string + scWrappersMu sync.Mutex + // The SubConns passed to the child policy are wrapped in a wrapper, to keep + // locality ID. But when the parent ClientConn sends updates, it's going to + // give the original SubConn, not the wrapper. But the child policies only + // know about the wrapper, so when forwarding SubConn updates, they must be + // sent for the wrappers. + // + // This keeps a map from original SubConn to wrapper, so that when + // forwarding the SubConn state update, the child policy will get the + // wrappers. + scWrappers map[balancer.SubConn]*scWrapper + // childState/drops/requestCounter keeps the state used by the most recently // generated picker. All fields can only be accessed in run(). And run() is // the only goroutine that sends picker to the parent ClientConn. All @@ -267,6 +282,15 @@ func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer b.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) } + b.scWrappersMu.Lock() + if scw, ok := b.scWrappers[sc]; ok { + sc = scw + if s.ConnectivityState == connectivity.Shutdown { + // Remove this SubConn from the map on Shutdown. + delete(b.scWrappers, scw.SubConn) + } + } + b.scWrappersMu.Unlock() if b.childLB != nil { b.childLB.UpdateSubConnState(sc, s) } @@ -304,20 +328,83 @@ func (b *clusterImplBalancer) getClusterName() string { return b.clusterName } +// scWrapper is a wrapper of SubConn with locality ID. The locality ID can be +// retrieved from the addresses when creating SubConn. +// +// All SubConns passed to the child policies are wrapped in this, so that the +// picker can get the localityID from the picked SubConn, and do load reporting. +// +// After wrapping, all SubConns to and from the parent ClientConn (e.g. for +// SubConn state update, update/remove SubConn) must be the original SubConns. +// All SubConns to and from the child policy (NewSubConn, forwarding SubConn +// state update) must be the wrapper. The balancer keeps a map from the original +// SubConn to the wrapper for this purpose. +type scWrapper struct { + balancer.SubConn + // locality needs to be atomic because it can be updated while being read by + // the picker. + locality atomic.Value // type xdsinternal.LocalityID +} + +func (scw *scWrapper) updateLocalityID(lID xdsinternal.LocalityID) { + scw.locality.Store(lID) +} + +func (scw *scWrapper) localityID() xdsinternal.LocalityID { + lID, _ := scw.locality.Load().(xdsinternal.LocalityID) + return lID +} + func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { clusterName := b.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) + var lID xdsinternal.LocalityID for i, addr := range addrs { newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) + lID = xdsinternal.GetLocalityID(newAddrs[i]) + } + sc, err := b.ClientConn.NewSubConn(newAddrs, opts) + if err != nil { + return nil, err + } + // Wrap this SubConn in a wrapper, and add it to the map. + b.scWrappersMu.Lock() + ret := &scWrapper{SubConn: sc} + ret.updateLocalityID(lID) + b.scWrappers[sc] = ret + b.scWrappersMu.Unlock() + return ret, nil +} + +func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) { + scw, ok := sc.(*scWrapper) + if !ok { + b.ClientConn.RemoveSubConn(sc) + return } - return b.ClientConn.NewSubConn(newAddrs, opts) + // Remove the original SubConn from the parent ClientConn. + // + // Note that we don't remove this SubConn from the scWrappers map. We will + // need it to forward the final SubConn state Shutdown to the child policy. + // + // This entry is kept in the map until it's state is changes to Shutdown, + // and will be deleted in UpdateSubConnState(). + b.ClientConn.RemoveSubConn(scw.SubConn) } func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { clusterName := b.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) + var lID xdsinternal.LocalityID for i, addr := range addrs { newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) + lID = xdsinternal.GetLocalityID(newAddrs[i]) + } + if scw, ok := sc.(*scWrapper); ok { + scw.updateLocalityID(lID) + // Need to get the original SubConn from the wrapper before calling + // parent ClientConn. + sc = scw.SubConn } b.ClientConn.UpdateAddresses(sc, newAddrs) } @@ -388,7 +475,7 @@ func (b *clusterImplBalancer) run() { b.childState = u b.ClientConn.UpdateState(balancer.State{ ConnectivityState: b.childState.ConnectivityState, - Picker: newDropPicker(b.childState, &dropConfigs{ + Picker: newPicker(b.childState, &dropConfigs{ drops: b.drops, requestCounter: b.requestCounter, requestCountMax: b.requestCountMax, @@ -399,7 +486,7 @@ func (b *clusterImplBalancer) run() { if dc != nil && b.childState.Picker != nil { b.ClientConn.UpdateState(balancer.State{ ConnectivityState: b.childState.ConnectivityState, - Picker: newDropPicker(b.childState, dc, b.loadWrapper), + Picker: newPicker(b.childState, dc, b.loadWrapper), }) } } diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index c2693258e120..db29c550be11 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -19,6 +19,7 @@ package clusterimpl import ( + orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" @@ -66,12 +67,21 @@ func (d *dropper) drop() (ret bool) { return d.w.Next().(bool) } +const ( + serverLoadCPUName = "cpu_utilization" + serverLoadMemoryName = "mem_utilization" +) + // loadReporter wraps the methods from the loadStore that are used here. type loadReporter interface { + CallStarted(locality string) + CallFinished(locality string, err error) + CallServerLoad(locality, name string, val float64) CallDropped(locality string) } -type dropPicker struct { +// Picker implements RPC drop, circuit breaking drop and load reporting. +type picker struct { drops []*dropper s balancer.State loadStore loadReporter @@ -79,8 +89,8 @@ type dropPicker struct { countMax uint32 } -func newDropPicker(s balancer.State, config *dropConfigs, loadStore load.PerClusterReporter) *dropPicker { - return &dropPicker{ +func newPicker(s balancer.State, config *dropConfigs, loadStore load.PerClusterReporter) *picker { + return &picker{ drops: config.drops, s: s, loadStore: loadStore, @@ -89,13 +99,14 @@ func newDropPicker(s balancer.State, config *dropConfigs, loadStore load.PerClus } } -func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // Don't drop unless the inner picker is READY. Similar to // https://github.com/grpc/grpc-go/issues/2622. if d.s.ConnectivityState != connectivity.Ready { return d.s.Picker.Pick(info) } + // Check if this RPC should be dropped by category. for _, dp := range d.drops { if dp.drop() { if d.loadStore != nil { @@ -105,6 +116,7 @@ func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { } } + // Check if this RPC should be dropped by circuit breaking. if d.counter != nil { if err := d.counter.StartRequest(d.countMax); err != nil { // Drops by circuit breaking are reported with empty category. They @@ -114,11 +126,58 @@ func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { } return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) } - pr, err := d.s.Picker.Pick(info) - if err != nil { + } + + var lIDStr string + pr, err := d.s.Picker.Pick(info) + if scw, ok := pr.SubConn.(*scWrapper); ok { + // This OK check also covers the case err!=nil, because SubConn will be + // nil. + pr.SubConn = scw.SubConn + var e error + // If locality ID isn't found in the wrapper, an empty locality ID will + // be used. + lIDStr, e = scw.localityID().ToString() + if e != nil { + logger.Infof("failed to marshal LocalityID: %#v, loads won't be reported", scw.localityID()) + } + } + + if err != nil { + if d.counter != nil { + // Release one request count if this pick fails. d.counter.EndRequest() - return pr, err } + return pr, err + } + + if d.loadStore != nil { + d.loadStore.CallStarted(lIDStr) + oldDone := pr.Done + pr.Done = func(info balancer.DoneInfo) { + if oldDone != nil { + oldDone(info) + } + d.loadStore.CallFinished(lIDStr, info.Err) + + load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport) + if !ok { + return + } + d.loadStore.CallServerLoad(lIDStr, serverLoadCPUName, load.CpuUtilization) + d.loadStore.CallServerLoad(lIDStr, serverLoadMemoryName, load.MemUtilization) + for n, c := range load.RequestCost { + d.loadStore.CallServerLoad(lIDStr, n, c) + } + for n, c := range load.Utilization { + d.loadStore.CallServerLoad(lIDStr, n, c) + } + } + } + + if d.counter != nil { + // Update Done() so that when the RPC finishes, the request count will + // be released. oldDone := pr.Done pr.Done = func(doneInfo balancer.DoneInfo) { d.counter.EndRequest() @@ -126,8 +185,7 @@ func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { oldDone(doneInfo) } } - return pr, err } - return d.s.Picker.Pick(info) + return pr, err } diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go index c75ecddd76d8..4f96cc61f1ef 100644 --- a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go @@ -38,8 +38,8 @@ import ( "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" - "google.golang.org/grpc/xds/internal/balancer/lrs" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" "google.golang.org/grpc/xds/internal/xdsclient" @@ -181,7 +181,7 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.Endpoint // Prepend parent priority to the priority names, to avoid duplicates. pName := fmt.Sprintf("priority-%v-%v", parentPriority, priorityName) retNames = append(retNames, pName) - wtConfig, addrs := localitiesToWeightedTarget(priorityLocalities, pName, endpointPickingPolicy, mechanism.LoadReportingServerName, mechanism.Cluster, mechanism.EDSServiceName) + wtConfig, addrs := localitiesToWeightedTarget(priorityLocalities, pName, endpointPickingPolicy, mechanism.Cluster, mechanism.EDSServiceName) retConfigs[pName] = &clusterimpl.LBConfig{ Cluster: mechanism.Cluster, EDSServiceName: mechanism.EDSServiceName, @@ -249,7 +249,7 @@ func dedupSortedIntSlice(a []int) []int { // // The addresses have path hierarchy set to [priority-name, locality-name], so // priority and weighted target know which child policy they are for. -func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig, lrsServer *string, cluster, edsService string) (*weightedtarget.LBConfig, []resolver.Address) { +func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig, cluster, edsService string) (*weightedtarget.LBConfig, []resolver.Address) { weightedTargets := make(map[string]weightedtarget.Target) var addrs []resolver.Address for _, locality := range localities { @@ -257,25 +257,7 @@ func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName st if err != nil { localityStr = fmt.Sprintf("%+v", locality.ID) } - - child := childPolicy - // If lrsServer is not set, we can skip this extra layer of the LRS - // policy. - if lrsServer != nil { - localityID := locality.ID - child = &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: cluster, - EDSServiceName: edsService, - ChildPolicy: childPolicy, - LoadReportingServerName: *lrsServer, - Locality: &localityID, - }, - } - } - weightedTargets[localityStr] = weightedtarget.Target{Weight: locality.Weight, ChildPolicy: child} - + weightedTargets[localityStr] = weightedtarget.Target{Weight: locality.Weight, ChildPolicy: childPolicy} for _, endpoint := range locality.Endpoints { // Filter out all "unhealthy" endpoints (unknown and healthy are // both considered to be healthy: @@ -290,6 +272,7 @@ func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName st addr = weightedroundrobin.SetAddrInfo(addr, ai) } addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) addrs = append(addrs, addr) } } diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go index 389db349b21d..95ded6019c57 100644 --- a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go @@ -35,7 +35,6 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" - "google.golang.org/grpc/xds/internal/balancer/lrs" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" "google.golang.org/grpc/xds/internal/xdsclient" @@ -228,30 +227,12 @@ func TestBuildPriorityConfig(t *testing.T) { Config: &weightedtarget.LBConfig{ Targets: map[string]weightedtarget.Target{ assertString(testLocalityIDs[0].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: testLRSServer, - Locality: &testLocalityIDs[0], - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, assertString(testLocalityIDs[1].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: testLRSServer, - Locality: &testLocalityIDs[1], - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, }, }, @@ -279,30 +260,12 @@ func TestBuildPriorityConfig(t *testing.T) { Config: &weightedtarget.LBConfig{ Targets: map[string]weightedtarget.Target{ assertString(testLocalityIDs[2].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: testLRSServer, - Locality: &testLocalityIDs[2], - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, assertString(testLocalityIDs[3].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: testLRSServer, - Locality: &testLocalityIDs[3], - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, }, }, @@ -324,16 +287,16 @@ func TestBuildPriorityConfig(t *testing.T) { Priorities: []string{"priority-0-0", "priority-0-1", "priority-1"}, } wantAddrs := []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][0]}, []string{"priority-0-0", assertString(testLocalityIDs[0].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][1]}, []string{"priority-0-0", assertString(testLocalityIDs[0].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[1][0]}, []string{"priority-0-0", assertString(testLocalityIDs[1].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[1][1]}, []string{"priority-0-0", assertString(testLocalityIDs[1].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[2][0]}, []string{"priority-0-1", assertString(testLocalityIDs[2].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[2][1]}, []string{"priority-0-1", assertString(testLocalityIDs[2].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[3][0]}, []string{"priority-0-1", assertString(testLocalityIDs[3].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[3][1]}, []string{"priority-0-1", assertString(testLocalityIDs[3].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[4][0]}, []string{"priority-1"}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[4][1]}, []string{"priority-1"}), + testAddrWithAttrs(testAddressStrs[0][0], nil, "priority-0-0", &testLocalityIDs[0]), + testAddrWithAttrs(testAddressStrs[0][1], nil, "priority-0-0", &testLocalityIDs[0]), + testAddrWithAttrs(testAddressStrs[1][0], nil, "priority-0-0", &testLocalityIDs[1]), + testAddrWithAttrs(testAddressStrs[1][1], nil, "priority-0-0", &testLocalityIDs[1]), + testAddrWithAttrs(testAddressStrs[2][0], nil, "priority-0-1", &testLocalityIDs[2]), + testAddrWithAttrs(testAddressStrs[2][1], nil, "priority-0-1", &testLocalityIDs[2]), + testAddrWithAttrs(testAddressStrs[3][0], nil, "priority-0-1", &testLocalityIDs[3]), + testAddrWithAttrs(testAddressStrs[3][1], nil, "priority-0-1", &testLocalityIDs[3]), + testAddrWithAttrs(testAddressStrs[4][0], nil, "priority-1", nil), + testAddrWithAttrs(testAddressStrs[4][1], nil, "priority-1", nil), } if diff := cmp.Diff(gotConfig, wantConfig); diff != "" { @@ -434,30 +397,12 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { Config: &weightedtarget.LBConfig{ Targets: map[string]weightedtarget.Target{ assertString(testLocalityIDs[0].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: testLRSServer, - Locality: &testLocalityIDs[0], - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, assertString(testLocalityIDs[1].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: testLRSServer, - Locality: &testLocalityIDs[1], - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, }, }, @@ -479,30 +424,12 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { Config: &weightedtarget.LBConfig{ Targets: map[string]weightedtarget.Target{ assertString(testLocalityIDs[2].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: testLRSServer, - Locality: &testLocalityIDs[2], - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, assertString(testLocalityIDs[3].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: testLRSServer, - Locality: &testLocalityIDs[3], - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, }, }, @@ -510,14 +437,14 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { }, } wantAddrs := []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][0]}, []string{"priority-2-0", assertString(testLocalityIDs[0].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][1]}, []string{"priority-2-0", assertString(testLocalityIDs[0].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[1][0]}, []string{"priority-2-0", assertString(testLocalityIDs[1].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[1][1]}, []string{"priority-2-0", assertString(testLocalityIDs[1].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[2][0]}, []string{"priority-2-1", assertString(testLocalityIDs[2].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[2][1]}, []string{"priority-2-1", assertString(testLocalityIDs[2].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[3][0]}, []string{"priority-2-1", assertString(testLocalityIDs[3].ToString)}), - hierarchy.Set(resolver.Address{Addr: testAddressStrs[3][1]}, []string{"priority-2-1", assertString(testLocalityIDs[3].ToString)}), + testAddrWithAttrs(testAddressStrs[0][0], nil, "priority-2-0", &testLocalityIDs[0]), + testAddrWithAttrs(testAddressStrs[0][1], nil, "priority-2-0", &testLocalityIDs[0]), + testAddrWithAttrs(testAddressStrs[1][0], nil, "priority-2-0", &testLocalityIDs[1]), + testAddrWithAttrs(testAddressStrs[1][1], nil, "priority-2-0", &testLocalityIDs[1]), + testAddrWithAttrs(testAddressStrs[2][0], nil, "priority-2-1", &testLocalityIDs[2]), + testAddrWithAttrs(testAddressStrs[2][1], nil, "priority-2-1", &testLocalityIDs[2]), + testAddrWithAttrs(testAddressStrs[3][0], nil, "priority-2-1", &testLocalityIDs[3]), + testAddrWithAttrs(testAddressStrs[3][1], nil, "priority-2-1", &testLocalityIDs[3]), } if diff := cmp.Diff(gotNames, wantNames); diff != "" { @@ -674,38 +601,20 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { wantConfig: &weightedtarget.LBConfig{ Targets: map[string]weightedtarget.Target{ assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: "test-cluster", - EDSServiceName: "test-eds-service", - LoadReportingServerName: "test-lrs-server", - Locality: &internal.LocalityID{Zone: "test-zone-1"}, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrs.Name, - Config: &lrs.LBConfig{ - ClusterName: "test-cluster", - EDSServiceName: "test-eds-service", - LoadReportingServerName: "test-lrs-server", - Locality: &internal.LocalityID{Zone: "test-zone-2"}, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, }, }, wantAddrs: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: "addr-1-1"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), - hierarchy.Set(resolver.Address{Addr: "addr-1-2"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), - hierarchy.Set(resolver.Address{Addr: "addr-2-1"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), - hierarchy.Set(resolver.Address{Addr: "addr-2-2"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), + testAddrWithAttrs("addr-1-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), }, }, { @@ -748,10 +657,10 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { }, }, wantAddrs: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: "addr-1-1"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), - hierarchy.Set(resolver.Address{Addr: "addr-1-2"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), - hierarchy.Set(resolver.Address{Addr: "addr-2-1"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), - hierarchy.Set(resolver.Address{Addr: "addr-2-2"}, []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), + testAddrWithAttrs("addr-1-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), }, }, { @@ -794,24 +703,16 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { }, }, wantAddrs: []resolver.Address{ - hierarchy.Set( - weightedroundrobin.SetAddrInfo(resolver.Address{Addr: "addr-1-1"}, weightedroundrobin.AddrInfo{Weight: 90}), - []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), - hierarchy.Set( - weightedroundrobin.SetAddrInfo(resolver.Address{Addr: "addr-1-2"}, weightedroundrobin.AddrInfo{Weight: 10}), - []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString)}), - hierarchy.Set( - weightedroundrobin.SetAddrInfo(resolver.Address{Addr: "addr-2-1"}, weightedroundrobin.AddrInfo{Weight: 90}), - []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), - hierarchy.Set( - weightedroundrobin.SetAddrInfo(resolver.Address{Addr: "addr-2-2"}, weightedroundrobin.AddrInfo{Weight: 10}), - []string{"test-priority", assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString)}), + testAddrWithAttrs("addr-1-1", newUint32(90), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", newUint32(10), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", newUint32(90), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", newUint32(10), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, got1 := localitiesToWeightedTarget(tt.localities, tt.priorityName, tt.childPolicy, tt.lrsServer, tt.cluster, tt.edsService) + got, got1 := localitiesToWeightedTarget(tt.localities, tt.priorityName, tt.childPolicy, tt.cluster, tt.edsService) if diff := cmp.Diff(got, tt.wantConfig); diff != "" { t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) } @@ -837,3 +738,17 @@ func assertString(f func() (string, error)) string { } return s } + +func testAddrWithAttrs(addrStr string, weight *uint32, priority string, lID *internal.LocalityID) resolver.Address { + addr := resolver.Address{Addr: addrStr} + if weight != nil { + addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: *weight}) + } + path := []string{priority} + if lID != nil { + path = append(path, assertString(lID.ToString)) + addr = internal.SetLocalityID(addr, *lID) + } + addr = hierarchy.Set(addr, path) + return addr +} diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go deleted file mode 100644 index ed7fb38c8545..000000000000 --- a/xds/internal/balancer/lrs/balancer.go +++ /dev/null @@ -1,239 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package lrs implements load reporting balancer for xds. -package lrs - -import ( - "encoding/json" - "fmt" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/loadstore" - "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/load" -) - -func init() { - balancer.Register(bb{}) -} - -// Name is the name of the LRS balancer. -const Name = "lrs_experimental" - -type bb struct{} - -func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - b := &lrsBalancer{ - cc: cc, - buildOpts: opts, - } - b.logger = prefixLogger(b) - b.logger.Infof("Created") - return b -} - -func (bb) Name() string { - return Name -} - -func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - return parseConfig(c) -} - -type lrsBalancer struct { - cc balancer.ClientConn - buildOpts balancer.BuildOptions - - logger *grpclog.PrefixLogger - xdsClient *xdsClientWrapper - - config *LBConfig - lb balancer.Balancer // The sub balancer. -} - -func (b *lrsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) - newConfig, ok := s.BalancerConfig.(*LBConfig) - if !ok { - return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) - } - - if b.xdsClient == nil { - c := xdsclient.FromResolverState(s.ResolverState) - if c == nil { - return balancer.ErrBadResolverState - } - b.xdsClient = newXDSClientWrapper(c) - } - - // Update load reporting config or xds client. This needs to be done before - // updating the child policy because we need the loadStore from the updated - // client to be passed to the ccWrapper. - if err := b.xdsClient.update(newConfig); err != nil { - return err - } - - // If child policy is a different type, recreate the sub-balancer. - if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { - bb := balancer.Get(newConfig.ChildPolicy.Name) - if bb == nil { - return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name) - } - if b.lb != nil { - b.lb.Close() - } - lidJSON, err := newConfig.Locality.ToString() - if err != nil { - return fmt.Errorf("failed to marshal LocalityID: %#v", newConfig.Locality) - } - ccWrapper := newCCWrapper(b.cc, b.xdsClient.loadStore(), lidJSON) - b.lb = bb.Build(ccWrapper, b.buildOpts) - } - b.config = newConfig - - // Addresses and sub-balancer config are sent to sub-balancer. - return b.lb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: s.ResolverState, - BalancerConfig: b.config.ChildPolicy.Config, - }) -} - -func (b *lrsBalancer) ResolverError(err error) { - if b.lb != nil { - b.lb.ResolverError(err) - } -} - -func (b *lrsBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { - if b.lb != nil { - b.lb.UpdateSubConnState(sc, s) - } -} - -func (b *lrsBalancer) Close() { - if b.lb != nil { - b.lb.Close() - b.lb = nil - } - b.xdsClient.close() -} - -type ccWrapper struct { - balancer.ClientConn - loadStore load.PerClusterReporter - localityIDJSON string -} - -func newCCWrapper(cc balancer.ClientConn, loadStore load.PerClusterReporter, localityIDJSON string) *ccWrapper { - return &ccWrapper{ - ClientConn: cc, - loadStore: loadStore, - localityIDJSON: localityIDJSON, - } -} - -func (ccw *ccWrapper) UpdateState(s balancer.State) { - s.Picker = newLoadReportPicker(s.Picker, ccw.localityIDJSON, ccw.loadStore) - ccw.ClientConn.UpdateState(s) -} - -type xdsClientWrapper struct { - c xdsclient.XDSClient - cancelLoadReport func() - clusterName string - edsServiceName string - lrsServerName *string - // loadWrapper is a wrapper with loadOriginal, with clusterName and - // edsServiceName. It's used children to report loads. - loadWrapper *loadstore.Wrapper -} - -func newXDSClientWrapper(c xdsclient.XDSClient) *xdsClientWrapper { - return &xdsClientWrapper{ - c: c, - loadWrapper: loadstore.NewWrapper(), - } -} - -// update checks the config and xdsclient, and decides whether it needs to -// restart the load reporting stream. -func (w *xdsClientWrapper) update(newConfig *LBConfig) error { - var ( - restartLoadReport bool - updateLoadClusterAndService bool - ) - - // ClusterName is different, restart. ClusterName is from ClusterName and - // EDSServiceName. - if w.clusterName != newConfig.ClusterName { - updateLoadClusterAndService = true - w.clusterName = newConfig.ClusterName - } - if w.edsServiceName != newConfig.EDSServiceName { - updateLoadClusterAndService = true - w.edsServiceName = newConfig.EDSServiceName - } - - if updateLoadClusterAndService { - // This updates the clusterName and serviceName that will reported for the - // loads. The update here is too early, the perfect timing is when the - // picker is updated with the new connection. But from this balancer's point - // of view, it's impossible to tell. - // - // On the other hand, this will almost never happen. Each LRS policy - // shouldn't get updated config. The parent should do a graceful switch when - // the clusterName or serviceName is changed. - w.loadWrapper.UpdateClusterAndService(w.clusterName, w.edsServiceName) - } - - if w.lrsServerName == nil || *w.lrsServerName != newConfig.LoadReportingServerName { - // LoadReportingServerName is different, load should be report to a - // different server, restart. - restartLoadReport = true - w.lrsServerName = &newConfig.LoadReportingServerName - } - - if restartLoadReport { - if w.cancelLoadReport != nil { - w.cancelLoadReport() - w.cancelLoadReport = nil - } - var loadStore *load.Store - if w.c != nil { - loadStore, w.cancelLoadReport = w.c.ReportLoad(*w.lrsServerName) - } - w.loadWrapper.UpdateLoadStore(loadStore) - } - - return nil -} - -func (w *xdsClientWrapper) loadStore() load.PerClusterReporter { - return w.loadWrapper -} - -func (w *xdsClientWrapper) close() { - if w.cancelLoadReport != nil { - w.cancelLoadReport() - w.cancelLoadReport = nil - } -} diff --git a/xds/internal/balancer/lrs/balancer_test.go b/xds/internal/balancer/lrs/balancer_test.go deleted file mode 100644 index c0ec9cc41dd3..000000000000 --- a/xds/internal/balancer/lrs/balancer_test.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package lrs - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/connectivity" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/resolver" - xdsinternal "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" -) - -const defaultTestTimeout = 1 * time.Second - -var ( - testBackendAddrs = []resolver.Address{ - {Addr: "1.1.1.1:1"}, - } - testLocality = &xdsinternal.LocalityID{ - Region: "test-region", - Zone: "test-zone", - SubZone: "test-sub-zone", - } -) - -// TestLoadReporting verifies that the lrs balancer starts the loadReport -// stream when the LBConfig passed to it contains a valid value for the LRS -// server (empty string). -func TestLoadReporting(t *testing.T) { - xdsC := fakeclient.NewClient() - defer xdsC.Close() - - builder := balancer.Get(Name) - cc := testutils.NewTestClientConn(t) - lrsB := builder.Build(cc, balancer.BuildOptions{}) - defer lrsB.Close() - - if err := lrsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), - BalancerConfig: &LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testServiceName, - LoadReportingServerName: testLRSServerName, - Locality: testLocality, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, - }, - }); err != nil { - t.Fatalf("unexpected error from UpdateClientConnState: %v", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - got, err := xdsC.WaitForReportLoad(ctx) - if err != nil { - t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) - } - if got.Server != testLRSServerName { - t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerName) - } - - sc1 := <-cc.NewSubConnCh - lrsB.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - lrsB.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with one backend. - p1 := <-cc.NewPickerCh - const successCount = 5 - for i := 0; i < successCount; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - gotSCSt.Done(balancer.DoneInfo{}) - } - const errorCount = 5 - for i := 0; i < errorCount; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - gotSCSt.Done(balancer.DoneInfo{Err: fmt.Errorf("error")}) - } - - // Dump load data from the store and compare with expected counts. - loadStore := xdsC.LoadStore() - if loadStore == nil { - t.Fatal("loadStore is nil in xdsClient") - } - sds := loadStore.Stats([]string{testClusterName}) - if len(sds) == 0 { - t.Fatalf("loads for cluster %v not found in store", testClusterName) - } - sd := sds[0] - if sd.Cluster != testClusterName || sd.Service != testServiceName { - t.Fatalf("got unexpected load for %q, %q, want %q, %q", sd.Cluster, sd.Service, testClusterName, testServiceName) - } - testLocalityJSON, _ := testLocality.ToString() - localityData, ok := sd.LocalityStats[testLocalityJSON] - if !ok { - t.Fatalf("loads for %v not found in store", testLocality) - } - reqStats := localityData.RequestStats - if reqStats.Succeeded != successCount { - t.Errorf("got succeeded %v, want %v", reqStats.Succeeded, successCount) - } - if reqStats.Errored != errorCount { - t.Errorf("got errord %v, want %v", reqStats.Errored, errorCount) - } - if reqStats.InProgress != 0 { - t.Errorf("got inProgress %v, want %v", reqStats.InProgress, 0) - } -} diff --git a/xds/internal/balancer/lrs/config.go b/xds/internal/balancer/lrs/config.go deleted file mode 100644 index e0e30bbb8821..000000000000 --- a/xds/internal/balancer/lrs/config.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package lrs - -import ( - "encoding/json" - "fmt" - - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal" -) - -// LBConfig is the balancer config for lrs balancer. -type LBConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - ClusterName string `json:"clusterName,omitempty"` - EDSServiceName string `json:"edsServiceName,omitempty"` - LoadReportingServerName string `json:"lrsLoadReportingServerName,omitempty"` - Locality *internal.LocalityID `json:"locality,omitempty"` - ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` -} - -func parseConfig(c json.RawMessage) (*LBConfig, error) { - var cfg LBConfig - if err := json.Unmarshal(c, &cfg); err != nil { - return nil, err - } - if cfg.ClusterName == "" { - return nil, fmt.Errorf("required ClusterName is not set in %+v", cfg) - } - if cfg.Locality == nil { - return nil, fmt.Errorf("required Locality is not set in %+v", cfg) - } - return &cfg, nil -} diff --git a/xds/internal/balancer/lrs/config_test.go b/xds/internal/balancer/lrs/config_test.go deleted file mode 100644 index eaf902ac535d..000000000000 --- a/xds/internal/balancer/lrs/config_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package lrs - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer/roundrobin" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - xdsinternal "google.golang.org/grpc/xds/internal" -) - -const ( - testClusterName = "test-cluster" - testServiceName = "test-eds-service" - testLRSServerName = "test-lrs-name" -) - -func TestParseConfig(t *testing.T) { - tests := []struct { - name string - js string - want *LBConfig - wantErr bool - }{ - { - name: "no cluster name", - js: `{ - "edsServiceName": "test-eds-service", - "lrsLoadReportingServerName": "test-lrs-name", - "locality": { - "region": "test-region", - "zone": "test-zone", - "subZone": "test-sub-zone" - }, - "childPolicy":[{"round_robin":{}}] -} - `, - wantErr: true, - }, - { - name: "no locality", - js: `{ - "clusterName": "test-cluster", - "edsServiceName": "test-eds-service", - "lrsLoadReportingServerName": "test-lrs-name", - "childPolicy":[{"round_robin":{}}] -} - `, - wantErr: true, - }, - { - name: "good", - js: `{ - "clusterName": "test-cluster", - "edsServiceName": "test-eds-service", - "lrsLoadReportingServerName": "test-lrs-name", - "locality": { - "region": "test-region", - "zone": "test-zone", - "subZone": "test-sub-zone" - }, - "childPolicy":[{"round_robin":{}}] -} - `, - want: &LBConfig{ - ClusterName: testClusterName, - EDSServiceName: testServiceName, - LoadReportingServerName: testLRSServerName, - Locality: &xdsinternal.LocalityID{ - Region: "test-region", - Zone: "test-zone", - SubZone: "test-sub-zone", - }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - Config: nil, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := parseConfig([]byte(tt.js)) - if (err != nil) != tt.wantErr { - t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) - return - } - if diff := cmp.Diff(got, tt.want); diff != "" { - t.Errorf("parseConfig() got = %v, want %v, diff: %s", got, tt.want, diff) - } - }) - } -} diff --git a/xds/internal/balancer/lrs/logging.go b/xds/internal/balancer/lrs/logging.go deleted file mode 100644 index 602dac099597..000000000000 --- a/xds/internal/balancer/lrs/logging.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package lrs - -import ( - "fmt" - - "google.golang.org/grpc/grpclog" - internalgrpclog "google.golang.org/grpc/internal/grpclog" -) - -const prefix = "[lrs-lb %p] " - -var logger = grpclog.Component("xds") - -func prefixLogger(p *lrsBalancer) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) -} diff --git a/xds/internal/balancer/lrs/picker.go b/xds/internal/balancer/lrs/picker.go deleted file mode 100644 index 1e4ad156e5b7..000000000000 --- a/xds/internal/balancer/lrs/picker.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package lrs - -import ( - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" - "google.golang.org/grpc/balancer" -) - -const ( - serverLoadCPUName = "cpu_utilization" - serverLoadMemoryName = "mem_utilization" -) - -// loadReporter wraps the methods from the loadStore that are used here. -type loadReporter interface { - CallStarted(locality string) - CallFinished(locality string, err error) - CallServerLoad(locality, name string, val float64) -} - -type loadReportPicker struct { - p balancer.Picker - - locality string - loadStore loadReporter -} - -func newLoadReportPicker(p balancer.Picker, id string, loadStore loadReporter) *loadReportPicker { - return &loadReportPicker{ - p: p, - locality: id, - loadStore: loadStore, - } -} - -func (lrp *loadReportPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - res, err := lrp.p.Pick(info) - if err != nil { - return res, err - } - - if lrp.loadStore == nil { - return res, err - } - - lrp.loadStore.CallStarted(lrp.locality) - oldDone := res.Done - res.Done = func(info balancer.DoneInfo) { - if oldDone != nil { - oldDone(info) - } - lrp.loadStore.CallFinished(lrp.locality, info.Err) - - load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport) - if !ok { - return - } - lrp.loadStore.CallServerLoad(lrp.locality, serverLoadCPUName, load.CpuUtilization) - lrp.loadStore.CallServerLoad(lrp.locality, serverLoadMemoryName, load.MemUtilization) - for n, d := range load.RequestCost { - lrp.loadStore.CallServerLoad(lrp.locality, n, d) - } - for n, d := range load.Utilization { - lrp.loadStore.CallServerLoad(lrp.locality, n, d) - } - } - return res, err -} diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go index 351a13553e4e..3cd6f74df723 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go @@ -26,8 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - - _ "google.golang.org/grpc/xds/internal/balancer/lrs" // Register LRS balancer, so we can use it as child policy in the config tests. + "google.golang.org/grpc/xds/internal/balancer/priority" ) const ( @@ -35,23 +34,22 @@ const ( "targets": { "cluster_1" : { "weight":75, - "childPolicy":[{"lrs_experimental":{"clusterName":"cluster_1","lrsLoadReportingServerName":"lrs.server","locality":{"zone":"test-zone-1"}}}] + "childPolicy":[{"priority_experimental":{"priorities": ["child-1"], "children": {"child-1": {"config": [{"round_robin":{}}]}}}}] }, "cluster_2" : { "weight":25, - "childPolicy":[{"lrs_experimental":{"clusterName":"cluster_2","lrsLoadReportingServerName":"lrs.server","locality":{"zone":"test-zone-2"}}}] + "childPolicy":[{"priority_experimental":{"priorities": ["child-2"], "children": {"child-2": {"config": [{"round_robin":{}}]}}}}] } } }` - lrsBalancerName = "lrs_experimental" ) var ( - lrsConfigParser = balancer.Get(lrsBalancerName).(balancer.ConfigParser) - lrsConfigJSON1 = `{"clusterName":"cluster_1","lrsLoadReportingServerName":"lrs.server","locality":{"zone":"test-zone-1"}}` - lrsConfig1, _ = lrsConfigParser.ParseConfig([]byte(lrsConfigJSON1)) - lrsConfigJSON2 = `{"clusterName":"cluster_2","lrsLoadReportingServerName":"lrs.server","locality":{"zone":"test-zone-2"}}` - lrsConfig2, _ = lrsConfigParser.ParseConfig([]byte(lrsConfigJSON2)) + testConfigParser = balancer.Get(priority.Name).(balancer.ConfigParser) + testConfigJSON1 = `{"priorities": ["child-1"], "children": {"child-1": {"config": [{"round_robin":{}}]}}}` + testConfig1, _ = testConfigParser.ParseConfig([]byte(testConfigJSON1)) + testConfigJSON2 = `{"priorities": ["child-2"], "children": {"child-2": {"config": [{"round_robin":{}}]}}}` + testConfig2, _ = testConfigParser.ParseConfig([]byte(testConfigJSON2)) ) func Test_parseConfig(t *testing.T) { @@ -75,15 +73,15 @@ func Test_parseConfig(t *testing.T) { "cluster_1": { Weight: 75, ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrsBalancerName, - Config: lrsConfig1, + Name: priority.Name, + Config: testConfig1, }, }, "cluster_2": { Weight: 25, ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: lrsBalancerName, - Config: lrsConfig2, + Name: priority.Name, + Config: testConfig2, }, }, }, diff --git a/xds/internal/internal.go b/xds/internal/internal.go index e4284ee02e0c..0cccd3824101 100644 --- a/xds/internal/internal.go +++ b/xds/internal/internal.go @@ -22,6 +22,8 @@ package internal import ( "encoding/json" "fmt" + + "google.golang.org/grpc/resolver" ) // LocalityID is xds.Locality without XXX fields, so it can be used as map @@ -53,3 +55,19 @@ func LocalityIDFromString(s string) (ret LocalityID, _ error) { } return ret, nil } + +type localityKeyType string + +const localityKey = localityKeyType("grpc.xds.internal.address.locality") + +// GetLocalityID returns the locality ID of addr. +func GetLocalityID(addr resolver.Address) LocalityID { + path, _ := addr.Attributes.Value(localityKey).(LocalityID) + return path +} + +// SetLocalityID sets locality ID in addr to l. +func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(localityKey, l) + return addr +} From 9b2fa9f8d3caed4aae28242f6ac7cd27c790806c Mon Sep 17 00:00:00 2001 From: Aliaksandr Mianzhynski Date: Fri, 25 Jun 2021 08:11:47 +0300 Subject: [PATCH 148/998] server: improve chained interceptors performance (#4524) --- server.go | 48 +++++++++++++++++++++++------------------- server_test.go | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 22 deletions(-) diff --git a/server.go b/server.go index e72029bf1472..de1708306417 100644 --- a/server.go +++ b/server.go @@ -1115,22 +1115,24 @@ func chainUnaryServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } + chainedInt = chainUnaryInterceptors(interceptors) } s.opts.unaryInt = chainedInt } -// getChainUnaryHandler recursively generate the chained UnaryHandler -func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(ctx context.Context, req interface{}) (interface{}, error) { - return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + var i int + var next UnaryHandler + next = func(ctx context.Context, req interface{}) (interface{}, error) { + if i == len(interceptors)-1 { + return interceptors[i](ctx, req, info, handler) + } + i++ + return interceptors[i-1](ctx, req, info, next) + } + return next(ctx, req) } } @@ -1398,22 +1400,24 @@ func chainStreamServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } + chainedInt = chainStreamInterceptors(interceptors) } s.opts.streamInt = chainedInt } -// getChainStreamHandler recursively generate the chained StreamHandler -func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(srv interface{}, ss ServerStream) error { - return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + var i int + var next StreamHandler + next = func(srv interface{}, ss ServerStream) error { + if i == len(interceptors)-1 { + return interceptors[i](srv, ss, info, handler) + } + i++ + return interceptors[i-1](srv, ss, info, next) + } + return next(srv, ss) } } diff --git a/server_test.go b/server_test.go index fcfde30706c3..b15939160144 100644 --- a/server_test.go +++ b/server_test.go @@ -22,6 +22,7 @@ import ( "context" "net" "reflect" + "strconv" "strings" "testing" "time" @@ -130,3 +131,59 @@ func (s) TestStreamContext(t *testing.T) { t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, stream, ok, expectedStream) } } + +func BenchmarkChainUnaryInterceptor(b *testing.B) { + for _, n := range []int{1, 3, 5, 10} { + n := n + b.Run(strconv.Itoa(n), func(b *testing.B) { + interceptors := make([]UnaryServerInterceptor, 0, n) + for i := 0; i < n; i++ { + interceptors = append(interceptors, func( + ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler, + ) (interface{}, error) { + return handler(ctx, req) + }) + } + + s := NewServer(ChainUnaryInterceptor(interceptors...)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := s.opts.unaryInt(context.Background(), nil, nil, + func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, nil + }, + ); err != nil { + b.Fatal(err) + } + } + }) + } +} + +func BenchmarkChainStreamInterceptor(b *testing.B) { + for _, n := range []int{1, 3, 5, 10} { + n := n + b.Run(strconv.Itoa(n), func(b *testing.B) { + interceptors := make([]StreamServerInterceptor, 0, n) + for i := 0; i < n; i++ { + interceptors = append(interceptors, func( + srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler, + ) error { + return handler(srv, ss) + }) + } + + s := NewServer(ChainStreamInterceptor(interceptors...)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := s.opts.streamInt(nil, nil, nil, func(srv interface{}, stream ServerStream) error { + return nil + }); err != nil { + b.Fatal(err) + } + } + }) + } +} From 83f9def5feb388c4fd7e6586bd55cf6bf6d46a01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicent=20Mart=C3=AD?= <42793+vmg@users.noreply.github.com> Date: Mon, 28 Jun 2021 18:51:21 +0200 Subject: [PATCH 149/998] internal/transport: do not mask ConnectionError (#4561) --- internal/transport/http2_client.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 5b2493130fff..9227b80150ff 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -885,12 +885,18 @@ func (t *http2Client) Close(err error) { // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status if len(goAwayDebugMessage) > 0 { - err = fmt.Errorf("closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) } + // Notify all active streams. for _, s := range streams { - t.closeStream(s, err, false, http2.ErrCodeNo, status.New(codes.Unavailable, err.Error()), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ From b3f274c2babaeab7802d98e21a66209846437ff5 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 29 Jun 2021 11:45:16 -0700 Subject: [PATCH 150/998] xds/cluster_impl: fix cluster_impl not correctly starting LoadReport stream (#4566) --- .../balancer/clusterimpl/balancer_test.go | 113 +++++++++++++++++- .../balancer/clusterimpl/clusterimpl.go | 50 ++++++-- xds/internal/testutils/fakeclient/client.go | 18 ++- 3 files changed, 165 insertions(+), 16 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 09194a54d0e1..e583b647473d 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -43,10 +43,12 @@ import ( ) const ( - defaultTestTimeout = 1 * time.Second - testClusterName = "test-cluster" - testServiceName = "test-eds-service" - testLRSServerName = "test-lrs-name" + defaultTestTimeout = 1 * time.Second + defaultShortTestTimeout = 100 * time.Microsecond + + testClusterName = "test-cluster" + testServiceName = "test-eds-service" + testLRSServerName = "test-lrs-name" ) var ( @@ -649,6 +651,109 @@ func TestLoadReporting(t *testing.T) { if reqStats.InProgress != 0 { t.Errorf("got inProgress %v, want %v", reqStats.InProgress, 0) } + + b.Close() + if err := xdsC.WaitForCancelReportLoad(ctx); err != nil { + t.Fatalf("unexpected error waiting form load report to be canceled: %v", err) + } +} + +// TestUpdateLRSServer covers the cases +// - the init config specifies "" as the LRS server +// - config modifies LRS server to a different string +// - config sets LRS server to nil to stop load reporting +func TestUpdateLRSServer(t *testing.T) { + var testLocality = xdsinternal.LocalityID{ + Region: "test-region", + Zone: "test-zone", + SubZone: "test-sub-zone", + } + + xdsC := fakeclient.NewClient() + defer xdsC.Close() + + builder := balancer.Get(Name) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + defer b.Close() + + addrs := make([]resolver.Address, len(testBackendAddrs)) + for i, a := range testBackendAddrs { + addrs[i] = xdsinternal.SetLocalityID(a, testLocality) + } + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServerName: newString(""), + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + got, err := xdsC.WaitForReportLoad(ctx) + if err != nil { + t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) + } + if got.Server != "" { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, "") + } + + // Update LRS server to a different name. + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServerName: newString(testLRSServerName), + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + if err := xdsC.WaitForCancelReportLoad(ctx); err != nil { + t.Fatalf("unexpected error waiting form load report to be canceled: %v", err) + } + got2, err2 := xdsC.WaitForReportLoad(ctx) + if err2 != nil { + t.Fatalf("xdsClient.ReportLoad failed with error: %v", err2) + } + if got2.Server != testLRSServerName { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got2.Server, testLRSServerName) + } + + // Update LRS server to nil, to disable LRS. + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServerName: nil, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + if err := xdsC.WaitForCancelReportLoad(ctx); err != nil { + t.Fatalf("unexpected error waiting form load report to be canceled: %v", err) + } + + shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultShortTestTimeout) + defer shortCancel() + if s, err := xdsC.WaitForReportLoad(shortCtx); err != context.DeadlineExceeded { + t.Fatalf("unexpected load report to server: %q", s) + } } func assertString(f func() (string, error)) string { diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 32c97757ded0..1b49dccbc633 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -104,7 +104,7 @@ type clusterImplBalancer struct { childLB balancer.Balancer cancelLoadReport func() edsServiceName string - lrsServerName string + lrsServerName *string loadWrapper *loadstore.Wrapper clusterNameMu sync.Mutex @@ -165,22 +165,48 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { b.loadWrapper.UpdateClusterAndService(clusterName, b.edsServiceName) } + var ( + stopOldLoadReport bool + startNewLoadReport bool + ) + // Check if it's necessary to restart load report. - var newLRSServerName string - if newConfig.LoadReportingServerName != nil { - newLRSServerName = *newConfig.LoadReportingServerName - } - if b.lrsServerName != newLRSServerName { - // LoadReportingServerName is different, load should be report to a - // different server, restart. - b.lrsServerName = newLRSServerName + if b.lrsServerName == nil { + if newConfig.LoadReportingServerName != nil { + // Old is nil, new is not nil, start new LRS. + b.lrsServerName = newConfig.LoadReportingServerName + startNewLoadReport = true + } + // Old is nil, new is nil, do nothing. + } else if newConfig.LoadReportingServerName == nil { + // Old is not nil, new is nil, stop old, don't start new. + b.lrsServerName = newConfig.LoadReportingServerName + stopOldLoadReport = true + } else { + // Old is not nil, new is not nil, compare string values, if + // different, stop old and start new. + if *b.lrsServerName != *newConfig.LoadReportingServerName { + b.lrsServerName = newConfig.LoadReportingServerName + stopOldLoadReport = true + startNewLoadReport = true + } + } + + if stopOldLoadReport { if b.cancelLoadReport != nil { b.cancelLoadReport() b.cancelLoadReport = nil + if !startNewLoadReport { + // If a new LRS stream will be started later, no need to update + // it to nil here. + b.loadWrapper.UpdateLoadStore(nil) + } } + } + if startNewLoadReport { var loadStore *load.Store if b.xdsClient != nil { - loadStore, b.cancelLoadReport = b.xdsClient.ReportLoad(b.lrsServerName) + loadStore, b.cancelLoadReport = b.xdsClient.ReportLoad(*b.lrsServerName) } b.loadWrapper.UpdateLoadStore(loadStore) } @@ -492,6 +518,10 @@ func (b *clusterImplBalancer) run() { } b.mu.Unlock() case <-b.closed.Done(): + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + } return } } diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index 2538b59255cf..f3cfb9401e38 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -47,6 +47,7 @@ type Client struct { cdsCancelCh *testutils.Channel edsCancelCh *testutils.Channel loadReportCh *testutils.Channel + lrsCancelCh *testutils.Channel loadStore *load.Store bootstrapCfg *bootstrap.Config @@ -220,7 +221,16 @@ type ReportLoadArgs struct { // ReportLoad starts reporting load about clusterName to server. func (xdsC *Client) ReportLoad(server string) (loadStore *load.Store, cancel func()) { xdsC.loadReportCh.Send(ReportLoadArgs{Server: server}) - return xdsC.loadStore, func() {} + return xdsC.loadStore, func() { + xdsC.lrsCancelCh.Send(nil) + } +} + +// WaitForCancelReportLoad waits for a load report to be cancelled and returns +// context.DeadlineExceeded otherwise. +func (xdsC *Client) WaitForCancelReportLoad(ctx context.Context) error { + _, err := xdsC.lrsCancelCh.Receive(ctx) + return err } // LoadStore returns the underlying load data store. @@ -232,7 +242,10 @@ func (xdsC *Client) LoadStore() *load.Store { // returns the arguments passed to it. func (xdsC *Client) WaitForReportLoad(ctx context.Context) (ReportLoadArgs, error) { val, err := xdsC.loadReportCh.Receive(ctx) - return val.(ReportLoadArgs), err + if err != nil { + return ReportLoadArgs{}, err + } + return val.(ReportLoadArgs), nil } // Close fires xdsC.Closed, indicating it was called. @@ -275,6 +288,7 @@ func NewClientWithName(name string) *Client { cdsCancelCh: testutils.NewChannelWithSize(10), edsCancelCh: testutils.NewChannel(), loadReportCh: testutils.NewChannel(), + lrsCancelCh: testutils.NewChannel(), loadStore: load.NewStore(), cdsCbs: make(map[string]func(xdsclient.ClusterUpdate, error)), Closed: grpcsync.NewEvent(), From 52546c5d89b7e362064f2a21c9d10803b44af15f Mon Sep 17 00:00:00 2001 From: Ashitha Santhosh <55257063+ashithasantosh@users.noreply.github.com> Date: Wed, 30 Jun 2021 11:14:57 -0700 Subject: [PATCH 151/998] authorization: translate SDK policy to Envoy RBAC proto (#4523) * Translates SDK authorization policy to Envoy RBAC proto. --- authz/rbac_translator.go | 301 ++++++++++++++++++++++++++++++++++ authz/rbac_translator_test.go | 228 +++++++++++++++++++++++++ 2 files changed, 529 insertions(+) create mode 100644 authz/rbac_translator.go create mode 100644 authz/rbac_translator_test.go diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go new file mode 100644 index 000000000000..8dc764896053 --- /dev/null +++ b/authz/rbac_translator.go @@ -0,0 +1,301 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package authz exposes methods to manage authorization within gRPC. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed +// in a later release. +package authz + +import ( + "encoding/json" + "fmt" + "strings" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" +) + +type header struct { + Key string + Values []string +} + +type peer struct { + Principals []string +} + +type request struct { + Paths []string + Headers []header +} + +type rule struct { + Name string + Source peer + Request request +} + +// Represents the SDK authorization policy provided by user. +type authorizationPolicy struct { + Name string + DenyRules []rule `json:"deny_rules"` + AllowRules []rule `json:"allow_rules"` +} + +func principalOr(principals []*v3rbacpb.Principal) *v3rbacpb.Principal { + return &v3rbacpb.Principal{ + Identifier: &v3rbacpb.Principal_OrIds{ + OrIds: &v3rbacpb.Principal_Set{ + Ids: principals, + }, + }, + } +} + +func permissionOr(permission []*v3rbacpb.Permission) *v3rbacpb.Permission { + return &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_OrRules{ + OrRules: &v3rbacpb.Permission_Set{ + Rules: permission, + }, + }, + } +} + +func permissionAnd(permission []*v3rbacpb.Permission) *v3rbacpb.Permission { + return &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_AndRules{ + AndRules: &v3rbacpb.Permission_Set{ + Rules: permission, + }, + }, + } +} + +func getStringMatcher(value string) *v3matcherpb.StringMatcher { + switch { + case value == "*": + return &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{}, + } + case strings.HasSuffix(value, "*"): + prefix := strings.TrimSuffix(value, "*") + return &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: prefix}, + } + case strings.HasPrefix(value, "*"): + suffix := strings.TrimPrefix(value, "*") + return &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: suffix}, + } + default: + return &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: value}, + } + } +} + +func getHeaderMatcher(key, value string) *v3routepb.HeaderMatcher { + switch { + case value == "*": + return &v3routepb.HeaderMatcher{ + Name: key, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{}, + } + case strings.HasSuffix(value, "*"): + prefix := strings.TrimSuffix(value, "*") + return &v3routepb.HeaderMatcher{ + Name: key, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: prefix}, + } + case strings.HasPrefix(value, "*"): + suffix := strings.TrimPrefix(value, "*") + return &v3routepb.HeaderMatcher{ + Name: key, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: suffix}, + } + default: + return &v3routepb.HeaderMatcher{ + Name: key, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: value}, + } + } +} + +func parsePrincipalNames(principalNames []string) []*v3rbacpb.Principal { + var ps []*v3rbacpb.Principal + for _, principalName := range principalNames { + newPrincipalName := &v3rbacpb.Principal{ + Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{ + PrincipalName: getStringMatcher(principalName), + }, + }} + ps = append(ps, newPrincipalName) + } + return ps +} + +func parsePeer(source peer) (*v3rbacpb.Principal, error) { + if len(source.Principals) > 0 { + return principalOr(parsePrincipalNames(source.Principals)), nil + } + return &v3rbacpb.Principal{ + Identifier: &v3rbacpb.Principal_Any{ + Any: true, + }, + }, nil +} + +func parsePaths(paths []string) []*v3rbacpb.Permission { + var ps []*v3rbacpb.Permission + for _, path := range paths { + newPath := &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{ + Rule: &v3matcherpb.PathMatcher_Path{Path: getStringMatcher(path)}}}} + ps = append(ps, newPath) + } + return ps +} + +func parseHeaderValues(key string, values []string) []*v3rbacpb.Permission { + var vs []*v3rbacpb.Permission + for _, value := range values { + newHeader := &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_Header{ + Header: getHeaderMatcher(key, value)}} + vs = append(vs, newHeader) + } + return vs +} + +var unsupportedHeaders = map[string]bool{ + "host": true, + "connection": true, + "keep-alive": true, + "proxy-authenticate": true, + "proxy-authorization": true, + "te": true, + "trailer": true, + "transfer-encoding": true, + "upgrade": true, +} + +func unsupportedHeader(key string) bool { + return key[0] == ':' || strings.HasPrefix(key, "grpc-") || unsupportedHeaders[key] +} + +func parseHeaders(headers []header) ([]*v3rbacpb.Permission, error) { + var hs []*v3rbacpb.Permission + for i, header := range headers { + if header.Key == "" { + return nil, fmt.Errorf(`"headers" %d: "key" is not present`, i) + } + header.Key = strings.ToLower(header.Key) + if unsupportedHeader(header.Key) { + return nil, fmt.Errorf(`"headers" %d: unsupported "key" %s`, i, header.Key) + } + if len(header.Values) == 0 { + return nil, fmt.Errorf(`"headers" %d: "values" is not present`, i) + } + values := parseHeaderValues(header.Key, header.Values) + hs = append(hs, permissionOr(values)) + } + return hs, nil +} + +func parseRequest(request request) (*v3rbacpb.Permission, error) { + var and []*v3rbacpb.Permission + if len(request.Paths) > 0 { + and = append(and, permissionOr(parsePaths(request.Paths))) + } + if len(request.Headers) > 0 { + headers, err := parseHeaders(request.Headers) + if err != nil { + return nil, err + } + and = append(and, permissionAnd(headers)) + } + if len(and) > 0 { + return permissionAnd(and), nil + } + return &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_Any{ + Any: true, + }, + }, nil +} + +func parseRules(rules []rule, prefixName string) (map[string]*v3rbacpb.Policy, error) { + policies := make(map[string]*v3rbacpb.Policy) + for i, rule := range rules { + if rule.Name == "" { + return policies, fmt.Errorf(`%d: "name" is not present`, i) + } + principal, err := parsePeer(rule.Source) + if err != nil { + return nil, fmt.Errorf("%d: %v", i, err) + } + permission, err := parseRequest(rule.Request) + if err != nil { + return nil, fmt.Errorf("%d: %v", i, err) + } + policyName := prefixName + "_" + rule.Name + policies[policyName] = &v3rbacpb.Policy{ + Principals: []*v3rbacpb.Principal{principal}, + Permissions: []*v3rbacpb.Permission{permission}, + } + } + return policies, nil +} + +// translatePolicy translates SDK authorization policy in JSON format to two +// Envoy RBAC polices (deny and allow policy). If the policy cannot be parsed +// or is invalid, an error will be returned. +func translatePolicy(policyStr string) (*v3rbacpb.RBAC, *v3rbacpb.RBAC, error) { + var policy authorizationPolicy + if err := json.Unmarshal([]byte(policyStr), &policy); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal policy: %v", err) + } + if policy.Name == "" { + return nil, nil, fmt.Errorf(`"name" is not present`) + } + if len(policy.AllowRules) == 0 { + return nil, nil, fmt.Errorf(`"allow_rules" is not present`) + } + allowPolicies, err := parseRules(policy.AllowRules, policy.Name) + if err != nil { + return nil, nil, fmt.Errorf(`"allow_rules" %v`, err) + } + allowRBAC := &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_ALLOW, Policies: allowPolicies} + var denyRBAC *v3rbacpb.RBAC + if len(policy.DenyRules) > 0 { + denyPolicies, err := parseRules(policy.DenyRules, policy.Name) + if err != nil { + return nil, nil, fmt.Errorf(`"deny_rules" %v`, err) + } + denyRBAC = &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_DENY, + Policies: denyPolicies, + } + } + return denyRBAC, allowRBAC, nil +} diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go new file mode 100644 index 000000000000..425cae85b037 --- /dev/null +++ b/authz/rbac_translator_test.go @@ -0,0 +1,228 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package authz + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/testing/protocmp" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" +) + +func TestTranslatePolicy(t *testing.T) { + tests := map[string]struct { + authzPolicy string + wantErr string + wantDenyPolicy *v3rbacpb.RBAC + wantAllowPolicy *v3rbacpb.RBAC + }{ + "valid policy": { + authzPolicy: `{ + "name": "authz", + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc", + "spiffe://bar*", + "*baz", + "spiffe://abc.*.com" + ] + } + }], + "allow_rules": [ + { + "name": "allow_policy_1", + "source": { + "principals":["*"] + }, + "request": { + "paths": ["path-foo*"] + } + }, + { + "name": "allow_policy_2", + "request": { + "paths": [ + "path-bar", + "*baz" + ], + "headers": [ + { + "key": "key-1", + "values": ["foo", "*bar"] + }, + { + "key": "key-2", + "values": ["baz*"] + } + ] + } + }] + }`, + wantDenyPolicy: &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_DENY, Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}}}}}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "spiffe://bar"}}}}}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: "baz"}}}}}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://abc.*.com"}}}}}, + }}}}}, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}}, + }, + }}, + wantAllowPolicy: &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_ALLOW, Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: ""}}}}}, + }}}}}, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "path-foo"}}}}}}, + }}}}}}}}}, + }, + "authz_allow_policy_2": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}}, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "path-bar"}}}}}}, + {Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: "baz"}}}}}}, + }}}}, + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{ + Header: &v3routepb.HeaderMatcher{ + Name: "key-1", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "foo"}}}}, + {Rule: &v3rbacpb.Permission_Header{ + Header: &v3routepb.HeaderMatcher{ + Name: "key-1", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: "bar"}}}}, + }}}}, + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{ + Header: &v3routepb.HeaderMatcher{ + Name: "key-2", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "baz"}}}}, + }}}}}}}}}}}}}, + }, + }}, + }, + "missing name field": { + authzPolicy: `{}`, + wantErr: `"name" is not present`, + }, + "invalid field type": { + authzPolicy: `{"name": 123}`, + wantErr: "failed to unmarshal policy", + }, + "missing allow rules field": { + authzPolicy: `{"name": "authz-foo"}`, + wantErr: `"allow_rules" is not present`, + wantDenyPolicy: nil, + wantAllowPolicy: nil, + }, + "missing rule name field": { + authzPolicy: `{ + "name": "authz-foo", + "allow_rules": [{}] + }`, + wantErr: `"allow_rules" 0: "name" is not present`, + }, + "missing header key": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [{ + "name": "allow_policy_1", + "request": {"headers":[{"key":"key-a", "values": ["value-a"]}, {}]} + }] + }`, + wantErr: `"allow_rules" 0: "headers" 1: "key" is not present`, + }, + "missing header values": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [{ + "name": "allow_policy_1", + "request": {"headers":[{"key":"key-a"}]} + }] + }`, + wantErr: `"allow_rules" 0: "headers" 0: "values" is not present`, + }, + "unsupported header": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [{ + "name": "allow_policy_1", + "request": {"headers":[{"key":":method", "values":["GET"]}]} + }] + }`, + wantErr: `"allow_rules" 0: "headers" 0: unsupported "key" :method`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + gotDenyPolicy, gotAllowPolicy, gotErr := translatePolicy(test.authzPolicy) + if gotErr != nil && !strings.HasPrefix(gotErr.Error(), test.wantErr) { + t.Fatalf("unexpected error\nwant:%v\ngot:%v", test.wantErr, gotErr) + } + if diff := cmp.Diff(gotDenyPolicy, test.wantDenyPolicy, protocmp.Transform()); diff != "" { + t.Fatalf("unexpected deny policy\ndiff (-want +got):\n%s", diff) + } + if diff := cmp.Diff(gotAllowPolicy, test.wantAllowPolicy, protocmp.Transform()); diff != "" { + t.Fatalf("unexpected allow policy\ndiff (-want +got):\n%s", diff) + } + }) + } +} From dd589923e1a17f5cc7c667359ae12d56bc1d3113 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 2 Jul 2021 16:21:46 -0700 Subject: [PATCH 152/998] clientconn: stop automatically connecting to idle subchannels returned by picker (#4579) --- clientconn.go | 24 ++++++------------------ picker_wrapper.go | 2 +- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/clientconn.go b/clientconn.go index 5cef39295bdc..b2bccfed136e 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1429,26 +1429,14 @@ func (ac *addrConn) resetConnectBackoff() { ac.mu.Unlock() } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true - } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport } - return nil, false + return nil } // tearDown starts to tear down the addrConn. diff --git a/picker_wrapper.go b/picker_wrapper.go index a58174b6f436..0878ada9dbb2 100644 --- a/picker_wrapper.go +++ b/picker_wrapper.go @@ -147,7 +147,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. logger.Error("subconn returned from pick is not *acBalancerWrapper") continue } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { return t, doneChannelzWrapper(acw, pickResult.Done), nil } From 91e0aeb192456225adf27966d04ada4cf8599915 Mon Sep 17 00:00:00 2001 From: Jille Timmermans Date: Thu, 8 Jul 2021 01:37:57 +0200 Subject: [PATCH 153/998] binarylog: Don't leak the flusher goroutine when closing a Sink (#4583) time.Ticker.Stop() doesn't close the ticker channel, so we need to signal the goroutine to die some other way --- internal/binarylog/sink.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/internal/binarylog/sink.go b/internal/binarylog/sink.go index 7d7a3056b71e..f7c25104da5c 100644 --- a/internal/binarylog/sink.go +++ b/internal/binarylog/sink.go @@ -92,6 +92,7 @@ type bufferedSink struct { writeStartOnce sync.Once writeTicker *time.Ticker + done chan struct{} } func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { @@ -113,7 +114,12 @@ const ( func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { - for range fs.writeTicker.C { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } fs.mu.Lock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) @@ -127,6 +133,7 @@ func (fs *bufferedSink) Close() error { if fs.writeTicker != nil { fs.writeTicker.Stop() } + close(fs.done) fs.mu.Lock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) @@ -155,5 +162,6 @@ func NewBufferedSink(o io.WriteCloser) Sink { closer: o, out: newWriterSink(bufW), buf: bufW, + done: make(chan struct{}), } } From afad37618961fd1123d6582661895c6c533852ea Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 8 Jul 2021 09:20:15 -0700 Subject: [PATCH 154/998] Fix bootstrap format in comment (#4586) --- xds/internal/xdsclient/bootstrap/bootstrap.go | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index dcf030631603..fa229d99593e 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -127,16 +127,18 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { // // The format of the bootstrap file will be as follows: // { -// "xds_server": { -// "server_uri": , -// "channel_creds": [ -// { -// "type": , -// "config": -// } -// ], -// "server_features": [ ... ], -// }, +// "xds_servers": [ +// { +// "server_uri": , +// "channel_creds": [ +// { +// "type": , +// "config": +// } +// ], +// "server_features": [ ... ], +// } +// ], // "node": , // "certificate_providers" : { // "default": { From 51e780ce00959f0a2ba16ca7c65f3b99a91c3c61 Mon Sep 17 00:00:00 2001 From: Jille Timmermans Date: Thu, 8 Jul 2021 19:06:11 +0200 Subject: [PATCH 155/998] internal/binarylog: Use defer to unlock mutexes (#4590) --- internal/binarylog/sink.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/binarylog/sink.go b/internal/binarylog/sink.go index f7c25104da5c..b6881fffd594 100644 --- a/internal/binarylog/sink.go +++ b/internal/binarylog/sink.go @@ -99,11 +99,10 @@ func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { // Start the write loop when Write is called. fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.mu.Lock() + defer fs.mu.Unlock() if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() return err } - fs.mu.Unlock() return nil } @@ -135,6 +134,7 @@ func (fs *bufferedSink) Close() error { } close(fs.done) fs.mu.Lock() + defer fs.mu.Unlock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) } @@ -144,7 +144,6 @@ func (fs *bufferedSink) Close() error { if err := fs.out.Close(); err != nil { grpclogLogger.Warningf("failed to close the Sink: %v", err) } - fs.mu.Unlock() return nil } From 30dfb4b933a50fd366d7ed36ed4f71dbba2d382e Mon Sep 17 00:00:00 2001 From: Jille Timmermans Date: Thu, 8 Jul 2021 19:06:55 +0200 Subject: [PATCH 156/998] binarylog: Don't continue after failing to marshal the proto (#4582) --- internal/binarylog/sink.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/binarylog/sink.go b/internal/binarylog/sink.go index b6881fffd594..2ae71da3e880 100644 --- a/internal/binarylog/sink.go +++ b/internal/binarylog/sink.go @@ -69,7 +69,8 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) From ebfe3be62a82434bc83fd7b36410141a603a96be Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 12 Jul 2021 16:42:02 -0700 Subject: [PATCH 157/998] cluster_resolver: implement resource resolver to resolve EDS and DNS (#4531) --- resolver/manual/manual.go | 20 +- .../clusterresolver/balancerconfig/type.go | 42 + .../clusterresolver/clusterresolver_test.go | 20 +- .../balancer/clusterresolver/eds_impl_test.go | 40 +- .../balancer/clusterresolver/priority_test.go | 44 +- .../clusterresolver/resource_resolver.go | 248 +++++ .../clusterresolver/resource_resolver_dns.go | 114 +++ .../clusterresolver/resource_resolver_test.go | 873 ++++++++++++++++++ xds/internal/testutils/fakeclient/client.go | 34 +- 9 files changed, 1371 insertions(+), 64 deletions(-) create mode 100644 xds/internal/balancer/clusterresolver/resource_resolver.go create mode 100644 xds/internal/balancer/clusterresolver/resource_resolver_dns.go create mode 100644 xds/internal/balancer/clusterresolver/resource_resolver_test.go diff --git a/resolver/manual/manual.go b/resolver/manual/manual.go index 3679d702ab96..f6e7b5ae3581 100644 --- a/resolver/manual/manual.go +++ b/resolver/manual/manual.go @@ -27,7 +27,9 @@ import ( // NewBuilderWithScheme creates a new test resolver builder with the given scheme. func NewBuilderWithScheme(scheme string) *Resolver { return &Resolver{ + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, scheme: scheme, } } @@ -35,11 +37,17 @@ func NewBuilderWithScheme(scheme string) *Resolver { // Resolver is also a resolver builder. // It's build() function always returns itself. type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) // ResolveNowCallback is called when the ResolveNow method is called on the // resolver. Must not be nil. Must not be changed after the resolver may // be built. ResolveNowCallback func(resolver.ResolveNowOptions) - scheme string + // CloseCallback is called when the Close method is called. Must not be + // nil. Must not be changed after the resolver may be built. + CloseCallback func() + scheme string // Fields actually belong to the resolver. CC resolver.ClientConn @@ -54,6 +62,7 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.BuildCallback(target, cc, opts) r.CC = cc if r.bootstrapState != nil { r.UpdateState(*r.bootstrapState) @@ -72,9 +81,16 @@ func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { } // Close is a noop for Resolver. -func (*Resolver) Close() {} +func (r *Resolver) Close() { + r.CloseCallback() +} // UpdateState calls CC.UpdateState. func (r *Resolver) UpdateState(s resolver.State) { r.CC.UpdateState(s) } + +// ReportError calls CC.ReportError. +func (r *Resolver) ReportError(err error) { + r.CC.ReportError(err) +} diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/type.go b/xds/internal/balancer/clusterresolver/balancerconfig/type.go index 2f9ba68fe59d..3e47b8234e33 100644 --- a/xds/internal/balancer/clusterresolver/balancerconfig/type.go +++ b/xds/internal/balancer/clusterresolver/balancerconfig/type.go @@ -95,4 +95,46 @@ type DiscoveryMechanism struct { // This is used for EDS watch if set. If unset, Cluster is used for EDS // watch. EDSServiceName string `json:"edsServiceName,omitempty"` + // DNSHostname is the DNS name to resolve in "host:port" form. For type + // LOGICAL_DNS only. + DNSHostname string `json:"dnsHostname,omitempty"` +} + +// Equal returns whether the DiscoveryMechanism is the same with the parameter. +func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { + switch { + case dm.Cluster != b.Cluster: + return false + case !equalStringP(dm.LoadReportingServerName, b.LoadReportingServerName): + return false + case !equalUint32P(dm.MaxConcurrentRequests, b.MaxConcurrentRequests): + return false + case dm.Type != b.Type: + return false + case dm.EDSServiceName != b.EDSServiceName: + return false + case dm.DNSHostname != b.DNSHostname: + return false + } + return true +} + +func equalStringP(a, b *string) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +func equalUint32P(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b } diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index ea8ac4e419f4..8f3644d08bed 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -231,7 +231,7 @@ func (s) TestSubConnStateChange(t *testing.T) { if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } - xdsC.InvokeWatchEDSCallback(defaultEndpointsUpdate, nil) + xdsC.InvokeWatchEDSCallback("", defaultEndpointsUpdate, nil) edsLB, err := waitForNewChildLB(ctx, edsLBCh) if err != nil { t.Fatal(err) @@ -276,7 +276,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) edsLB, err := waitForNewChildLB(ctx, edsLBCh) if err != nil { t.Fatal(err) @@ -286,11 +286,11 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { } connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error") - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, connectionErr) + xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, connectionErr) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("watch was canceled, want not canceled (timeout error)") } @@ -304,13 +304,13 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { } resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, resourceErr) + xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, resourceErr) // Even if error is resource not found, watch shouldn't be canceled, because // this is an EDS resource removed (and xds client actually never sends this // error, but we still handles it). sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("watch was canceled, want not canceled (timeout error)") } if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { @@ -365,7 +365,7 @@ func (s) TestErrorFromResolver(t *testing.T) { if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) edsLB, err := waitForNewChildLB(ctx, edsLBCh) if err != nil { t.Fatal(err) @@ -379,7 +379,7 @@ func (s) TestErrorFromResolver(t *testing.T) { sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("watch was canceled, want not canceled (timeout error)") } @@ -394,7 +394,7 @@ func (s) TestErrorFromResolver(t *testing.T) { resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") edsB.ResolverError(resourceErr) - if err := xdsC.WaitForCancelEDSWatch(ctx); err != nil { + if _, err := xdsC.WaitForCancelEDSWatch(ctx); err != nil { t.Fatalf("want watch to be canceled, waitForCancel failed: %v", err) } if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { @@ -423,7 +423,7 @@ func verifyExpectedRequests(ctx context.Context, fc *fakeclient.Client, resource for _, name := range resourceNames { if name == "" { // ResourceName empty string indicates a cancel. - if err := fc.WaitForCancelEDSWatch(ctx); err != nil { + if _, err := fc.WaitForCancelEDSWatch(ctx); err != nil { return fmt.Errorf("timed out when expecting resource %q", name) } continue diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index f0eae0afc3da..9a41fa9e2b33 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -103,7 +103,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { // One locality with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) sc1 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) @@ -117,7 +117,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { // The same locality, add one more backend. clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) sc2 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) @@ -131,7 +131,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { // The same locality, delete first backend. clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab3.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { @@ -147,7 +147,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { // The same locality, replace backend. clab4 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab4.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab4.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab4.Build()), nil) sc3 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) @@ -166,7 +166,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { // The same locality, different drop rate, dropping 50%. clab5 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], map[string]uint32{"test-drop": 50}) clab5.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab5.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab5.Build()), nil) // Picks with drops. if err := testPickerFromCh(cc.NewPickerCh, func(p balancer.Picker) error { @@ -188,7 +188,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { // The same locality, remove drops. clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab6.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab6.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab6.Build()), nil) // Pick without drops. if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3}); err != nil { @@ -209,7 +209,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { // Two localities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) sc1 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -218,7 +218,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { // locality. Otherwise the test is flaky because of a map is used in EDS to // keep localities. clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) sc2 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -233,7 +233,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) sc3 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) @@ -248,7 +248,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab3.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab3.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { @@ -265,7 +265,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { clab4 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab4.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab4.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab4.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab4.Build()), nil) sc4 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) @@ -284,7 +284,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { clab5 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab5.AddLocality(testSubZones[1], 2, 0, testEndpointAddrs[1:2], nil) clab5.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab5.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab5.Build()), nil) // Test pick with two subconns different locality weight. // @@ -299,7 +299,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab6.AddLocality(testSubZones[1], 0, 0, testEndpointAddrs[1:2], nil) clab6.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab6.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab6.Build()), nil) // Changing weight of locality[1] to 0 caused it to be removed. It's subconn // should also be removed. @@ -349,7 +349,7 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { corepb.HealthStatus_DEGRADED, }, }) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) var ( readySCs []balancer.SubConn @@ -406,7 +406,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { defer func() { balancergroup.DefaultSubBalancerCloseTimeout = oldCacheTimeout }() // The first update is an empty update. - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) // Pick should fail with transient failure, and all priority removed error. if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { t.Fatal(err) @@ -415,7 +415,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { // One locality with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) sc1 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) @@ -426,7 +426,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { t.Fatal(err) } - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) // Pick should fail with transient failure, and all priority removed error. if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { t.Fatal(err) @@ -442,7 +442,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) // Handle another update with priorities and localities. - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) sc2 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) @@ -484,7 +484,7 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) for i := 0; i < 2; i++ { sc := <-cc.NewSubConnCh @@ -579,7 +579,7 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { // One locality with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) sc1 := <-cc.NewSubConnCh edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index 711c3eeed0d3..a4c6d5b1c658 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -44,7 +44,7 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { @@ -66,7 +66,7 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) select { case <-cc.NewPickerCh: @@ -82,7 +82,7 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab3.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) select { case <-cc.NewPickerCh: @@ -107,7 +107,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { @@ -144,7 +144,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) select { case <-cc.NewPickerCh: @@ -175,7 +175,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab3.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) // p2 SubConns are removed. scToRemove := <-cc.RemoveSubConnCh @@ -200,7 +200,7 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -227,7 +227,7 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -254,7 +254,7 @@ func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab1.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -344,7 +344,7 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) { clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -387,7 +387,7 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab0.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab0.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -436,7 +436,7 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab1.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) clab1.AddLocality(testSubZones[3], 1, 1, testEndpointAddrs[3:4], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -484,7 +484,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab0.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab0.Build()), nil) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -500,7 +500,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { // Remove all priorities. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) // p0 subconn should be removed. scToRemove := <-cc.RemoveSubConnCh <-cc.RemoveSubConnCh // Drain the duplicate subconn removed. @@ -519,7 +519,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[3:4], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) addrs01 := <-cc.NewSubConnAddrsCh if got, want := addrs01[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -547,7 +547,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { // Remove p1 from EDS, to fallback to p0. clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab3.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) // p1 subconn should be removed. scToRemove1 := <-cc.RemoveSubConnCh @@ -591,7 +591,7 @@ func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) { clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -611,7 +611,7 @@ func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) { clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, nil, nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) // p0 will remove the subconn, and ClientConn will send a sc update to // shutdown. scToRemove := <-cc.RemoveSubConnCh @@ -642,7 +642,7 @@ func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) { clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -664,7 +664,7 @@ func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) { Health: []corepb.HealthStatus{corepb.HealthStatus_UNHEALTHY}, }) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) // p0 will remove the subconn, and ClientConn will send a sc update to // transient failure. scToRemove := <-cc.RemoveSubConnCh @@ -702,10 +702,10 @@ func (s) TestEDSPriority_FirstPriorityRemoved(t *testing.T) { // One localities, with priorities [0], each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab1.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) // Remove the only localities. clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - xdsC.InvokeWatchEDSCallback(parseEDSRespProtoForTesting(clab2.Build()), nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if err := cc.WaitForErrPicker(ctx); err != nil { diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go new file mode 100644 index 000000000000..29aed0e72f4a --- /dev/null +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -0,0 +1,248 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" + "google.golang.org/grpc/xds/internal/xdsclient" +) + +// resourceUpdate is a combined update from all the resources, in the order of +// priority. For example, it can be {EDS, EDS, DNS}. +type resourceUpdate struct { + p []balancerconfig.PriorityConfig + err error +} + +type discoveryMechanism interface { + lastUpdate() (interface{}, bool) + resolveNow() + stop() +} + +// discoveryMechanismKey is {type+resource_name}, it's used as the map key, so +// that the same resource resolver can be reused (e.g. when there are two +// mechanisms, both for the same EDS resource, but has different circuit +// breaking config. +type discoveryMechanismKey struct { + typ balancerconfig.DiscoveryMechanismType + name string +} + +// resolverMechanismTuple is needed to keep the resolver and the discovery +// mechanism together, because resolvers can be shared. And we need the +// mechanism for fields like circuit breaking, LRS etc when generating the +// balancer config. +type resolverMechanismTuple struct { + dm balancerconfig.DiscoveryMechanism + dmKey discoveryMechanismKey + r discoveryMechanism +} + +type resourceResolver struct { + parent *clusterResolverBalancer + updateChannel chan *resourceUpdate + + // mu protects the slice and map, and content of the resolvers in the slice. + mu sync.Mutex + mechanisms []balancerconfig.DiscoveryMechanism + children []resolverMechanismTuple + childrenMap map[discoveryMechanismKey]discoveryMechanism +} + +func newResourceResolver(parent *clusterResolverBalancer) *resourceResolver { + return &resourceResolver{ + parent: parent, + updateChannel: make(chan *resourceUpdate, 1), + childrenMap: make(map[discoveryMechanismKey]discoveryMechanism), + } +} + +func equalDiscoveryMechanisms(a, b []balancerconfig.DiscoveryMechanism) bool { + if len(a) != len(b) { + return false + } + for i, aa := range a { + bb := b[i] + if !aa.Equal(bb) { + return false + } + } + return true +} + +func (rr *resourceResolver) updateMechanisms(mechanisms []balancerconfig.DiscoveryMechanism) { + rr.mu.Lock() + defer rr.mu.Unlock() + if equalDiscoveryMechanisms(rr.mechanisms, mechanisms) { + return + } + rr.mechanisms = mechanisms + rr.children = make([]resolverMechanismTuple, len(mechanisms)) + newDMs := make(map[discoveryMechanismKey]bool) + + // Start one watch for each new discover mechanism {type+resource_name}. + for i, dm := range mechanisms { + switch dm.Type { + case balancerconfig.DiscoveryMechanismTypeEDS: + // If EDSServiceName is not set, use the cluster name as EDS service + // name to watch. + nameToWatch := dm.EDSServiceName + if nameToWatch == "" { + nameToWatch = dm.Cluster + } + dmKey := discoveryMechanismKey{typ: dm.Type, name: nameToWatch} + newDMs[dmKey] = true + + r := rr.childrenMap[dmKey] + if r == nil { + r = newEDSResolver(nameToWatch, rr.parent.xdsClient, rr) + rr.childrenMap[dmKey] = r + } + rr.children[i] = resolverMechanismTuple{dm: dm, dmKey: dmKey, r: r} + case balancerconfig.DiscoveryMechanismTypeLogicalDNS: + // Name to resolve in DNS is the hostname, not the ClientConn + // target. + dmKey := discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} + newDMs[dmKey] = true + + r := rr.childrenMap[dmKey] + if r == nil { + r = newDNSResolver(dm.DNSHostname, rr) + rr.childrenMap[dmKey] = r + } + rr.children[i] = resolverMechanismTuple{dm: dm, dmKey: dmKey, r: r} + } + } + // Stop the resources that were removed. + for dm, r := range rr.childrenMap { + if !newDMs[dm] { + delete(rr.childrenMap, dm) + r.stop() + } + } + // Regenerate even if there's no change in discovery mechanism, in case + // priority order changed. + rr.generate() +} + +// resolveNow is typically called to trigger re-resolve of DNS. The EDS +// resolveNow() is a noop. +func (rr *resourceResolver) resolveNow() { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, r := range rr.childrenMap { + r.resolveNow() + } +} + +func (rr *resourceResolver) stop() { + rr.mu.Lock() + defer rr.mu.Unlock() + for dm, r := range rr.childrenMap { + delete(rr.childrenMap, dm) + r.stop() + } + rr.mechanisms = nil + rr.children = nil +} + +// generate collects all the updates from all the resolvers, and push the +// combined result into the update channel. It only pushes the update when all +// the child resolvers have received at least one update, otherwise it will +// wait. +// +// caller must hold rr.mu. +func (rr *resourceResolver) generate() { + var ret []balancerconfig.PriorityConfig + for _, rDM := range rr.children { + r, ok := rr.childrenMap[rDM.dmKey] + if !ok { + rr.parent.logger.Infof("resolver for %+v not found, should never happen", rDM.dmKey) + continue + } + + u, ok := r.lastUpdate() + if !ok { + // Don't send updates to parent until all resolvers have update to + // send. + return + } + switch uu := u.(type) { + case xdsclient.EndpointsUpdate: + ret = append(ret, balancerconfig.PriorityConfig{Mechanism: rDM.dm, EDSResp: uu}) + case []string: + ret = append(ret, balancerconfig.PriorityConfig{Mechanism: rDM.dm, Addresses: uu}) + } + } + select { + case <-rr.updateChannel: + default: + } + rr.updateChannel <- &resourceUpdate{p: ret} +} + +type edsDiscoveryMechanism struct { + cancel func() + + update xdsclient.EndpointsUpdate + updateReceived bool +} + +func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + if !er.updateReceived { + return nil, false + } + return er.update, true +} + +func (er *edsDiscoveryMechanism) resolveNow() { +} + +func (er *edsDiscoveryMechanism) stop() { + er.cancel() +} + +// newEDSResolver starts the EDS watch on the given xds client. +func newEDSResolver(nameToWatch string, xdsc xdsclient.XDSClient, topLevelResolver *resourceResolver) *edsDiscoveryMechanism { + ret := &edsDiscoveryMechanism{} + topLevelResolver.parent.logger.Infof("EDS watch started on %v", nameToWatch) + cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsclient.EndpointsUpdate, err error) { + topLevelResolver.mu.Lock() + defer topLevelResolver.mu.Unlock() + if err != nil { + select { + case <-topLevelResolver.updateChannel: + default: + } + topLevelResolver.updateChannel <- &resourceUpdate{err: err} + return + } + ret.update = update + ret.updateReceived = true + topLevelResolver.generate() + }) + ret.cancel = func() { + topLevelResolver.parent.logger.Infof("EDS watch canceled on %v", nameToWatch) + cancel() + } + return ret +} diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go new file mode 100644 index 000000000000..7a639f51a5d9 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "fmt" + + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + newDNS = func(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + // The dns resolver is registered by the grpc package. So, this call to + // resolver.Get() is never expected to return nil. + return resolver.Get("dns").Build(target, cc, opts) + } +) + +// dnsDiscoveryMechanism watches updates for the given DNS hostname. +// +// It implements resolver.ClientConn interface to work with the DNS resolver. +type dnsDiscoveryMechanism struct { + target string + topLevelResolver *resourceResolver + r resolver.Resolver + + addrs []string + updateReceived bool +} + +func newDNSResolver(target string, topLevelResolver *resourceResolver) *dnsDiscoveryMechanism { + ret := &dnsDiscoveryMechanism{ + target: target, + topLevelResolver: topLevelResolver, + } + r, err := newDNS(resolver.Target{Scheme: "dns", Endpoint: target}, ret, resolver.BuildOptions{}) + if err != nil { + select { + case <-topLevelResolver.updateChannel: + default: + } + topLevelResolver.updateChannel <- &resourceUpdate{err: err} + } + ret.r = r + return ret +} + +func (dr *dnsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + if !dr.updateReceived { + return nil, false + } + return dr.addrs, true +} + +func (dr *dnsDiscoveryMechanism) resolveNow() { + dr.r.ResolveNow(resolver.ResolveNowOptions{}) +} + +func (dr *dnsDiscoveryMechanism) stop() { + dr.r.Close() +} + +// dnsDiscoveryMechanism needs to implement resolver.ClientConn interface to receive +// updates from the real DNS resolver. + +func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { + dr.topLevelResolver.mu.Lock() + defer dr.topLevelResolver.mu.Unlock() + addrs := make([]string, len(state.Addresses)) + for i, a := range state.Addresses { + addrs[i] = a.Addr + } + dr.addrs = addrs + dr.updateReceived = true + dr.topLevelResolver.generate() + return nil +} + +func (dr *dnsDiscoveryMechanism) ReportError(err error) { + select { + case <-dr.topLevelResolver.updateChannel: + default: + } + dr.topLevelResolver.updateChannel <- &resourceUpdate{err: err} +} + +func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { + dr.UpdateState(resolver.State{Addresses: addresses}) +} + +func (dr *dnsDiscoveryMechanism) NewServiceConfig(string) { + // This method is deprecated, and service config isn't supported. +} + +func (dr *dnsDiscoveryMechanism) ParseServiceConfig(string) *serviceconfig.ParseResult { + return &serviceconfig.ParseResult{Err: fmt.Errorf("service config not supported")} +} diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go new file mode 100644 index 000000000000..9a9438155098 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -0,0 +1,873 @@ +// +build go1.12 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" + "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/testutils/fakeclient" + xdsclient "google.golang.org/grpc/xds/internal/xdsclient" +) + +const ( + testDNSTarget = "dns.com" +) + +var ( + testEDSUpdates []xdsclient.EndpointsUpdate +) + +func init() { + clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) + testEDSUpdates = append(testEDSUpdates, parseEDSRespProtoForTesting(clab1.Build())) + clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) + testEDSUpdates = append(testEDSUpdates, parseEDSRespProtoForTesting(clab2.Build())) +} + +// Test the simple case with one EDS resource to watch. +func (s) TestResourceResolverOneEDSResource(t *testing.T) { + for _, test := range []struct { + name string + clusterName, edsName string + wantName string + edsUpdate xdsclient.EndpointsUpdate + want []balancerconfig.PriorityConfig + }{ + {name: "watch EDS", + clusterName: testClusterName, + edsName: testServiceName, + wantName: testServiceName, + edsUpdate: testEDSUpdates[0], + want: []balancerconfig.PriorityConfig{{ + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + EDSServiceName: testServiceName, + }, + EDSResp: testEDSUpdates[0], + }}, + }, + { + name: "watch EDS no EDS name", // Will watch for cluster name. + clusterName: testClusterName, + wantName: testClusterName, + edsUpdate: testEDSUpdates[1], + want: []balancerconfig.PriorityConfig{{ + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + }, + EDSResp: testEDSUpdates[1], + }}, + }, + } { + t.Run(test.name, func(t *testing.T) { + fakeClient := fakeclient.NewClient() + rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: test.clusterName, + EDSServiceName: test.edsName, + }}) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotEDSName, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName != test.wantName { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName, test.wantName) + } + + // Invoke callback, should get an update. + fakeClient.InvokeWatchEDSCallback("", test.edsUpdate, nil) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, test.want); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + // Close the resource resolver. Should stop EDS watch. + rr.stop() + edsNameCanceled, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled != test.wantName { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled, testServiceName) + } + }) + } +} + +func setupDNS() (chan resolver.Target, chan struct{}, chan resolver.ResolveNowOptions, *manual.Resolver, func()) { + dnsTargetCh := make(chan resolver.Target, 1) + dnsCloseCh := make(chan struct{}, 1) + resolveNowCh := make(chan resolver.ResolveNowOptions, 1) + + mr := manual.NewBuilderWithScheme("dns") + mr.BuildCallback = func(target resolver.Target, _ resolver.ClientConn, _ resolver.BuildOptions) { dnsTargetCh <- target } + mr.CloseCallback = func() { dnsCloseCh <- struct{}{} } + mr.ResolveNowCallback = func(opts resolver.ResolveNowOptions) { resolveNowCh <- opts } + oldNewDNS := newDNS + newDNS = func(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + return mr.Build(target, cc, opts) + } + return dnsTargetCh, dnsCloseCh, resolveNowCh, mr, func() { newDNS = oldNewDNS } +} + +// Test the simple case of one DNS resolver. +func (s) TestResourceResolverOneDNSResource(t *testing.T) { + for _, test := range []struct { + name string + target string + wantTarget resolver.Target + addrs []resolver.Address + want []balancerconfig.PriorityConfig + }{ + { + name: "watch DNS", + target: testDNSTarget, + wantTarget: resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}, + addrs: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}, + want: []balancerconfig.PriorityConfig{{ + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: testDNSTarget, + }, + Addresses: []string{"1.1.1.1", "2.2.2.2"}, + }}, + }, + } { + t.Run(test.name, func(t *testing.T) { + dnsTargetCh, dnsCloseCh, _, dnsR, cleanup := setupDNS() + defer cleanup() + fakeClient := fakeclient.NewClient() + rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: test.target, + }}) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + select { + case target := <-dnsTargetCh: + if diff := cmp.Diff(target, test.wantTarget); diff != "" { + t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for building DNS resolver") + } + + // Invoke callback, should get an update. + dnsR.UpdateState(resolver.State{Addresses: test.addrs}) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, test.want); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + // Close the resource resolver. Should close the underlying resolver. + rr.stop() + select { + case <-dnsCloseCh: + case <-ctx.Done(): + t.Fatal("Timed out waiting for closing DNS resolver") + } + }) + } +} + +// Test that changing EDS name would cause a cancel and a new watch. +// +// Also, changes that don't actually change EDS names (e.g. changing cluster +// name but not service name, or change circuit breaking count) doesn't do +// anything. +// +// - update DiscoveryMechanism +// - same EDS name to watch, but different MaxCurrentCount: no new watch +// - different cluster name, but same EDS name: no new watch +func (s) TestResourceResolverChangeEDSName(t *testing.T) { + fakeClient := fakeclient.NewClient() + rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + EDSServiceName: testServiceName, + }}) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName1 != testServiceName { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testServiceName) + } + + // Invoke callback, should get an update. + fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + EDSServiceName: testServiceName, + }, + EDSResp: testEDSUpdates[0], + }}); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Change name to watch. + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + }}) + edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled1 != gotEDSName1 { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, testServiceName) + } + gotEDSName2, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName2 != testClusterName { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName2, testClusterName) + } + // Shouldn't get any update, because the new resource hasn't received any + // update. + shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shortCancel() + select { + case u := <-rr.updateChannel: + t.Fatalf("get unexpected update %+v", u) + case <-shortCtx.Done(): + } + + // Invoke callback, should get an update. + fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + }, + EDSResp: testEDSUpdates[1], + }}); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Change circuit breaking count, should get an update with new circuit + // breaking count, but shouldn't trigger new watch. + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + MaxConcurrentRequests: newUint32(123), + }}) + shortCtx, shortCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shortCancel() + if n, err := fakeClient.WaitForWatchEDS(shortCtx); err == nil { + t.Fatalf("unexpected watch started for EDS: %v", n) + } + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + MaxConcurrentRequests: newUint32(123), + }, + EDSResp: testEDSUpdates[1], + }}); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Close the resource resolver. Should stop EDS watch. + rr.stop() + edsNameCanceled, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled != gotEDSName2 { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled, gotEDSName2) + } +} + +// Test the case that same resources with the same priority should not add new +// EDS watch, and also should not trigger an update. +func (s) TestResourceResolverNoChangeNoUpdate(t *testing.T) { + fakeClient := fakeclient.NewClient() + rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[0], + }, + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[1], + MaxConcurrentRequests: newUint32(100), + }, + }) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName1 != testClusterNames[0] { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterNames[0]) + } + gotEDSName2, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName2 != testClusterNames[1] { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName2, testClusterNames[1]) + } + + // Invoke callback, should get an update. + fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) + // Shouldn't send update, because only one resource received an update. + shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shortCancel() + select { + case u := <-rr.updateChannel: + t.Fatalf("get unexpected update %+v", u) + case <-shortCtx.Done(): + } + fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{ + { + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[0], + }, + EDSResp: testEDSUpdates[0], + }, + { + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[1], + MaxConcurrentRequests: newUint32(100), + }, + EDSResp: testEDSUpdates[1], + }, + }); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Send the same resources with the same priorities, shouldn't any change. + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[0], + }, + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[1], + MaxConcurrentRequests: newUint32(100), + }, + }) + shortCtx, shortCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shortCancel() + if n, err := fakeClient.WaitForWatchEDS(shortCtx); err == nil { + t.Fatalf("unexpected watch started for EDS: %v", n) + } + shortCtx, shortCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shortCancel() + select { + case u := <-rr.updateChannel: + t.Fatalf("unexpected update: %+v", u) + case <-shortCtx.Done(): + } + + // Close the resource resolver. Should stop EDS watch. + rr.stop() + edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled1 != gotEDSName1 && edsNameCanceled1 != gotEDSName2 { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v or %v", edsNameCanceled1, gotEDSName1, gotEDSName2) + } + edsNameCanceled2, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled2 != gotEDSName2 && edsNameCanceled2 != gotEDSName1 { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v or %v", edsNameCanceled2, gotEDSName1, gotEDSName2) + } +} + +// Test the case that same resources are watched, but with different priority. +// Should not add new EDS watch, but should trigger an update with the new +// priorities. +func (s) TestResourceResolverChangePriority(t *testing.T) { + fakeClient := fakeclient.NewClient() + rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[0], + }, + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[1], + }, + }) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName1 != testClusterNames[0] { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterNames[0]) + } + gotEDSName2, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName2 != testClusterNames[1] { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName2, testClusterNames[1]) + } + + // Invoke callback, should get an update. + fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) + // Shouldn't send update, because only one resource received an update. + shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shortCancel() + select { + case u := <-rr.updateChannel: + t.Fatalf("get unexpected update %+v", u) + case <-shortCtx.Done(): + } + fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{ + { + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[0], + }, + EDSResp: testEDSUpdates[0], + }, + { + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[1], + }, + EDSResp: testEDSUpdates[1], + }, + }); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Send the same resources with different priorities, shouldn't trigger + // watch, but should trigger an update with the new priorities. + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[1], + }, + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[0], + }, + }) + shortCtx, shortCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shortCancel() + if n, err := fakeClient.WaitForWatchEDS(shortCtx); err == nil { + t.Fatalf("unexpected watch started for EDS: %v", n) + } + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{ + { + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[1], + }, + EDSResp: testEDSUpdates[1], + }, + { + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterNames[0], + }, + EDSResp: testEDSUpdates[0], + }, + }); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Close the resource resolver. Should stop EDS watch. + rr.stop() + edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled1 != gotEDSName1 && edsNameCanceled1 != gotEDSName2 { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v or %v", edsNameCanceled1, gotEDSName1, gotEDSName2) + } + edsNameCanceled2, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled2 != gotEDSName2 && edsNameCanceled2 != gotEDSName1 { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v or %v", edsNameCanceled2, gotEDSName1, gotEDSName2) + } +} + +// Test the case that covers resource for both EDS and DNS. +func (s) TestResourceResolverEDSAndDNS(t *testing.T) { + dnsTargetCh, dnsCloseCh, _, dnsR, cleanup := setupDNS() + defer cleanup() + fakeClient := fakeclient.NewClient() + rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + }, + { + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: testDNSTarget, + }, + }) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName1 != testClusterName { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterName) + } + select { + case target := <-dnsTargetCh: + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for building DNS resolver") + } + + fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) + // Shouldn't send update, because only one resource received an update. + shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shortCancel() + select { + case u := <-rr.updateChannel: + t.Fatalf("get unexpected update %+v", u) + case <-shortCtx.Done(): + } + // Invoke DNS, should get an update. + dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{ + { + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + }, + EDSResp: testEDSUpdates[0], + }, + { + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: testDNSTarget, + }, + Addresses: []string{"1.1.1.1", "2.2.2.2"}, + }, + }); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Close the resource resolver. Should stop EDS watch. + rr.stop() + edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled1 != gotEDSName1 { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, gotEDSName1) + } + select { + case <-dnsCloseCh: + case <-ctx.Done(): + t.Fatal("Timed out waiting for closing DNS resolver") + } +} + +// Test the case that covers resource changing between EDS and DNS. +func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { + dnsTargetCh, dnsCloseCh, _, dnsR, cleanup := setupDNS() + defer cleanup() + fakeClient := fakeclient.NewClient() + rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + }}) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName1 != testClusterName { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterName) + } + + // Invoke callback, should get an update. + fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + }, + EDSResp: testEDSUpdates[0], + }}); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Update to watch DNS instead. Should cancel EDS, and start DNS. + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: testDNSTarget, + }}) + select { + case target := <-dnsTargetCh: + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for building DNS resolver") + } + edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled1 != gotEDSName1 { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, gotEDSName1) + } + + dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: testDNSTarget, + }, + Addresses: []string{"1.1.1.1", "2.2.2.2"}, + }}); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Close the resource resolver. Should stop DNS. + rr.stop() + select { + case <-dnsCloseCh: + case <-ctx.Done(): + t.Fatal("Timed out waiting for closing DNS resolver") + } +} + +// Test the case that covers errors for both EDS and DNS. +func (s) TestResourceResolverError(t *testing.T) { + dnsTargetCh, dnsCloseCh, _, dnsR, cleanup := setupDNS() + defer cleanup() + fakeClient := fakeclient.NewClient() + rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + }, + { + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: testDNSTarget, + }, + }) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotEDSName1 != testClusterName { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterName) + } + select { + case target := <-dnsTargetCh: + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for building DNS resolver") + } + + // Invoke callback with an error, should get an update. + edsErr := fmt.Errorf("EDS error") + fakeClient.InvokeWatchEDSCallback(gotEDSName1, xdsclient.EndpointsUpdate{}, edsErr) + select { + case u := <-rr.updateChannel: + if u.err != edsErr { + t.Fatalf("got unexpected error from update, want %v, got %v", edsErr, u.err) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Invoke DNS with an error, should get an update. + dnsErr := fmt.Errorf("DNS error") + dnsR.ReportError(dnsErr) + select { + case u := <-rr.updateChannel: + if u.err != dnsErr { + t.Fatalf("got unexpected error from update, want %v, got %v", dnsErr, u.err) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + // Close the resource resolver. Should stop EDS watch. + rr.stop() + edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if edsNameCanceled1 != gotEDSName1 { + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, gotEDSName1) + } + select { + case <-dnsCloseCh: + case <-ctx.Done(): + t.Fatal("Timed out waiting for closing DNS resolver") + } +} + +// Test re-resolve of the DNS resolver. +func (s) TestResourceResolverDNSResolveNow(t *testing.T) { + dnsTargetCh, dnsCloseCh, resolveNowCh, dnsR, cleanup := setupDNS() + defer cleanup() + fakeClient := fakeclient.NewClient() + rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) + rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: testDNSTarget, + }}) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + select { + case target := <-dnsTargetCh: + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for building DNS resolver") + } + + // Invoke callback, should get an update. + dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) + select { + case u := <-rr.updateChannel: + if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + Mechanism: balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: testDNSTarget, + }, + Addresses: []string{"1.1.1.1", "2.2.2.2"}, + }}); diff != "" { + t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + rr.resolveNow() + select { + case <-resolveNowCh: + case <-ctx.Done(): + t.Fatal("Timed out waiting for re-resolve") + } + // Close the resource resolver. Should close the underlying resolver. + rr.stop() + select { + case <-dnsCloseCh: + case <-ctx.Done(): + t.Fatal("Timed out waiting for closing DNS resolver") + } +} diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index f3cfb9401e38..255454080360 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -54,7 +54,7 @@ type Client struct { ldsCb func(xdsclient.ListenerUpdate, error) rdsCb func(xdsclient.RouteConfigUpdate, error) cdsCbs map[string]func(xdsclient.ClusterUpdate, error) - edsCb func(xdsclient.EndpointsUpdate, error) + edsCbs map[string]func(xdsclient.EndpointsUpdate, error) Closed *grpcsync.Event // fired when Close is called. } @@ -180,10 +180,10 @@ func (xdsC *Client) WaitForCancelClusterWatch(ctx context.Context) (string, erro // WatchEndpoints registers an EDS watch for provided clusterName. func (xdsC *Client) WatchEndpoints(clusterName string, callback func(xdsclient.EndpointsUpdate, error)) (cancel func()) { - xdsC.edsCb = callback + xdsC.edsCbs[clusterName] = callback xdsC.edsWatchCh.Send(clusterName) return func() { - xdsC.edsCancelCh.Send(nil) + xdsC.edsCancelCh.Send(clusterName) } } @@ -201,15 +201,28 @@ func (xdsC *Client) WaitForWatchEDS(ctx context.Context) (string, error) { // // Not thread safe with WatchEndpoints. Only call this after // WaitForWatchEDS. -func (xdsC *Client) InvokeWatchEDSCallback(update xdsclient.EndpointsUpdate, err error) { - xdsC.edsCb(update, err) +func (xdsC *Client) InvokeWatchEDSCallback(name string, update xdsclient.EndpointsUpdate, err error) { + if len(xdsC.edsCbs) != 1 { + // This may panic if name isn't found. But it's fine for tests. + xdsC.edsCbs[name](update, err) + return + } + // Keeps functionality with previous usage of this, if single callback call + // that callback. + for n := range xdsC.edsCbs { + name = n + } + xdsC.edsCbs[name](update, err) } // WaitForCancelEDSWatch waits for a EDS watch to be cancelled and returns // context.DeadlineExceeded otherwise. -func (xdsC *Client) WaitForCancelEDSWatch(ctx context.Context) error { - _, err := xdsC.edsCancelCh.Receive(ctx) - return err +func (xdsC *Client) WaitForCancelEDSWatch(ctx context.Context) (string, error) { + edsNameReceived, err := xdsC.edsCancelCh.Receive(ctx) + if err != nil { + return "", err + } + return edsNameReceived.(string), err } // ReportLoadArgs wraps the arguments passed to ReportLoad. @@ -282,15 +295,16 @@ func NewClientWithName(name string) *Client { ldsWatchCh: testutils.NewChannel(), rdsWatchCh: testutils.NewChannel(), cdsWatchCh: testutils.NewChannelWithSize(10), - edsWatchCh: testutils.NewChannel(), + edsWatchCh: testutils.NewChannelWithSize(10), ldsCancelCh: testutils.NewChannel(), rdsCancelCh: testutils.NewChannel(), cdsCancelCh: testutils.NewChannelWithSize(10), - edsCancelCh: testutils.NewChannel(), + edsCancelCh: testutils.NewChannelWithSize(10), loadReportCh: testutils.NewChannel(), lrsCancelCh: testutils.NewChannel(), loadStore: load.NewStore(), cdsCbs: make(map[string]func(xdsclient.ClusterUpdate, error)), + edsCbs: make(map[string]func(xdsclient.EndpointsUpdate, error)), Closed: grpcsync.NewEvent(), } } From ba41bbac225e6e1a9b822fe636c40c3b7d977894 Mon Sep 17 00:00:00 2001 From: James Protzman Date: Wed, 14 Jul 2021 13:54:58 -0400 Subject: [PATCH 158/998] transport: validate http 200 status for responses (#4474) --- internal/status/status.go | 14 ++-- internal/transport/http2_client.go | 85 +++++++++++++--------- internal/transport/http_util.go | 20 ----- internal/transport/transport_test.go | 105 ++++++++++++++++++++++----- test/end2end_test.go | 22 ++++-- 5 files changed, 164 insertions(+), 82 deletions(-) diff --git a/internal/status/status.go b/internal/status/status.go index 710223b8ded0..e5c6513edd13 100644 --- a/internal/status/status.go +++ b/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return &Error{e: s.Proto()} + return &Error{s: s} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,19 +136,23 @@ func (s *Status) Details() []interface{} { return details } +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + // Error wraps a pointer of a status proto. It implements error and Status, // and a nil *Error should never be returned by this package. type Error struct { - e *spb.Status + s *Status } func (e *Error) Error() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) + return e.s.String() } // GRPCStatus returns the Status represented by se. func (e *Error) GRPCStatus() *Status { - return FromProto(e.e) + return e.s } // Is implements future error.Is functionality. @@ -158,5 +162,5 @@ func (e *Error) Is(target error) bool { if !ok { return false } - return proto.Equal(e.e, tse.e) + return proto.Equal(e.s.s, tse.s.s) } diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 9227b80150ff..9592d443d3e1 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -24,6 +24,7 @@ import ( "io" "math" "net" + "net/http" "strconv" "strings" "sync" @@ -1280,29 +1281,40 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // that the peer is speaking gRPC and we are in gRPC mode. isGRPC = !initialHeader mdata = make(map[string][]string) - contentTypeErr string + contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string statusGen *status.Status - - httpStatus string - rawStatus string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown // headerError is set if an error is encountered while parsing the headers headerError string ) + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + for _, hf := range frame.Fields { switch hf.Name { case "content-type": if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { - contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", hf.Value) + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) break } + contentTypeErr = "" mdata[hf.Name] = append(mdata[hf.Name], hf.Value) isGRPC = true case "grpc-encoding": s.recvCompress = hf.Value case "grpc-status": - rawStatus = hf.Value + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) case "grpc-status-details-bin": @@ -1312,7 +1324,27 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) } case ":status": - httpStatus = hf.Value + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -1327,30 +1359,25 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - if !isGRPC { - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - httpStatusCode int - ) - - if httpStatus != "" { - c, err := strconv.ParseInt(httpStatus, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - httpStatusCode = int(c) + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + if httpStatusCode != nil { var ok bool - code, ok = HTTPStatusConvTab[httpStatusCode] + code, ok = HTTPStatusConvTab[*httpStatusCode] if !ok { code = codes.Unknown } } - + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } // Verify the HTTP response is a 200. - se := status.New(code, constructHTTPErrMsg(&httpStatusCode, contentTypeErr)) + se := status.New(code, strings.Join(errs, "; ")) t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1407,16 +1434,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } if statusGen == nil { - rawStatusCode := codes.Unknown - if rawStatus != "" { - code, err := strconv.ParseInt(rawStatus, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - rawStatusCode = codes.Code(uint32(code)) - } statusGen = status.New(rawStatusCode, grpcMessage) } diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index 15d775fca3cc..d8247bcdf692 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -173,26 +173,6 @@ func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { return status.FromProto(st), nil } -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func constructHTTPErrMsg(httpStatus *int, contentTypeErr string) string { - var errMsgs []string - - if httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(httpStatus)), *httpStatus)) - } - - if contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, contentTypeErr) - } - - return strings.Join(errMsgs, "; ") -} - type timeoutUnit uint8 const ( diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index d50170cdf9b6..92990eaf7b27 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -1978,6 +1978,31 @@ func (s) TestClientHandshakeInfo(t *testing.T) { } func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { + testStream := func() *Stream { + return &Stream{ + done: make(chan struct{}), + headerChan: make(chan struct{}), + buf: &recvBuffer{ + c: make(chan recvMsg), + mu: sync.Mutex{}, + }, + } + } + + testClient := func(ts *Stream) *http2Client { + return &http2Client{ + mu: sync.Mutex{}, + activeStreams: map[uint32]*Stream{ + 0: ts, + }, + controlBuf: &controlBuffer{ + ch: make(chan struct{}), + done: make(chan struct{}), + list: &itemList{}, + }, + } + } + for _, test := range []struct { name string // input @@ -1991,17 +2016,32 @@ func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { Fields: []hpack.HeaderField{ {Name: "content-type", Value: "application/grpc"}, {Name: "grpc-status", Value: "0"}, + {Name: ":status", Value: "200"}, }, }, // no error wantStatus: status.New(codes.OK, ""), }, + { + name: "missing content-type header", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "grpc-status", Value: "0"}, + {Name: ":status", Value: "200"}, + }, + }, + wantStatus: status.New( + codes.Unknown, + "malformed header: missing HTTP content-type", + ), + }, { name: "invalid grpc status header field", metaHeaderFrame: &http2.MetaHeadersFrame{ Fields: []hpack.HeaderField{ {Name: "content-type", Value: "application/grpc"}, {Name: "grpc-status", Value: "xxxx"}, + {Name: ":status", Value: "200"}, }, }, wantStatus: status.New( @@ -2018,7 +2058,7 @@ func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { }, wantStatus: status.New( codes.Internal, - ": HTTP status code 0; transport: received the unexpected content-type \"application/json\"", + "malformed header: missing HTTP status; transport: received unexpected content-type \"application/json\"", ), }, { @@ -2045,27 +2085,56 @@ func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { "peer header list size exceeded limit", ), }, + { + name: "bad status in grpc mode", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/grpc"}, + {Name: "grpc-status", Value: "0"}, + {Name: ":status", Value: "504"}, + }, + }, + wantStatus: status.New( + codes.Unavailable, + "unexpected HTTP status code received from server: 504 (Gateway Timeout)", + ), + }, + { + name: "missing http status", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/grpc"}, + }, + }, + wantStatus: status.New( + codes.Internal, + "malformed header: missing HTTP status", + ), + }, } { + t.Run(test.name, func(t *testing.T) { - ts := &Stream{ - done: make(chan struct{}), - headerChan: make(chan struct{}), - buf: &recvBuffer{ - c: make(chan recvMsg), - mu: sync.Mutex{}, + ts := testStream() + s := testClient(ts) + + test.metaHeaderFrame.HeadersFrame = &http2.HeadersFrame{ + FrameHeader: http2.FrameHeader{ + StreamID: 0, }, } - s := &http2Client{ - mu: sync.Mutex{}, - activeStreams: map[uint32]*Stream{ - 0: ts, - }, - controlBuf: &controlBuffer{ - ch: make(chan struct{}), - done: make(chan struct{}), - list: &itemList{}, - }, + + s.operateHeaders(test.metaHeaderFrame) + + got := ts.status + want := test.wantStatus + if got.Code() != want.Code() || got.Message() != want.Message() { + t.Fatalf("operateHeaders(%v); status = \ngot: %s\nwant: %s", test.metaHeaderFrame, got, want) } + }) + t.Run(fmt.Sprintf("%s-end_stream", test.name), func(t *testing.T) { + ts := testStream() + s := testClient(ts) + test.metaHeaderFrame.HeadersFrame = &http2.HeadersFrame{ FrameHeader: http2.FrameHeader{ StreamID: 0, @@ -2078,7 +2147,7 @@ func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { got := ts.status want := test.wantStatus if got.Code() != want.Code() || got.Message() != want.Message() { - t.Fatalf("operateHeaders(%v); status = %v; want %v", test.metaHeaderFrame, got, want) + t.Fatalf("operateHeaders(%v); status = \ngot: %s\nwant: %s", test.metaHeaderFrame, got, want) } }) } diff --git a/test/end2end_test.go b/test/end2end_test.go index 1b839529c58b..fc112bf21c34 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7254,7 +7254,7 @@ func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) { ":status", "403", "content-type", "application/grpc", }, - errCode: codes.Unknown, + errCode: codes.PermissionDenied, }, { // malformed grpc-status. @@ -7273,7 +7273,7 @@ func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) { "grpc-status", "0", "grpc-tags-bin", "???", }, - errCode: codes.Internal, + errCode: codes.Unavailable, }, { // gRPC status error. @@ -7282,14 +7282,14 @@ func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) { "content-type", "application/grpc", "grpc-status", "3", }, - errCode: codes.InvalidArgument, + errCode: codes.Unavailable, }, } { doHTTPHeaderTest(t, test.errCode, test.header) } } -// Testing non-Trailers-only Trailers (delievered in second HEADERS frame) +// Testing non-Trailers-only Trailers (delivered in second HEADERS frame) func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { for _, test := range []struct { responseHeader []string @@ -7305,7 +7305,7 @@ func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { // trailer missing grpc-status ":status", "502", }, - errCode: codes.Unknown, + errCode: codes.Unavailable, }, { responseHeader: []string{ @@ -7317,6 +7317,18 @@ func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { "grpc-status", "0", "grpc-status-details-bin", "????", }, + errCode: codes.Unimplemented, + }, + { + responseHeader: []string{ + ":status", "200", + "content-type", "application/grpc", + }, + trailer: []string{ + // malformed grpc-status-details-bin field + "grpc-status", "0", + "grpc-status-details-bin", "????", + }, errCode: codes.Internal, }, } { From bfe1d0dc23ac33e7c8ebf125753e5fb0698a4bde Mon Sep 17 00:00:00 2001 From: Jille Timmermans Date: Wed, 14 Jul 2021 20:34:40 +0200 Subject: [PATCH 159/998] binarylog: Use a simple boolean rather than a sync.Once (#4581) --- internal/binarylog/sink.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/internal/binarylog/sink.go b/internal/binarylog/sink.go index 2ae71da3e880..936cfc007df9 100644 --- a/internal/binarylog/sink.go +++ b/internal/binarylog/sink.go @@ -86,21 +86,24 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } type bufferedSink struct { - mu sync.Mutex - closer io.Closer - out Sink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. - - writeStartOnce sync.Once - writeTicker *time.Ticker - done chan struct{} + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} } func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.mu.Lock() defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } if err := fs.out.Write(e); err != nil { return err } From b586e9215896c69206b29af00f30bc34d483b6fc Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 14 Jul 2021 13:10:19 -0700 Subject: [PATCH 160/998] xds/client: notify the resource watchers of xDS errors (#4564) --- .../balancer/cdsbalancer/cdsbalancer.go | 11 ++-- xds/internal/xdsclient/callback.go | 37 ++++++++--- xds/internal/xdsclient/client.go | 3 + xds/internal/xdsclient/client_test.go | 38 ++++++++--- xds/internal/xdsclient/v2/client.go | 2 +- xds/internal/xdsclient/v2/client_test.go | 2 + xds/internal/xdsclient/v3/client.go | 2 +- xds/internal/xdsclient/watchers.go | 11 ++++ .../xdsclient/watchers_cluster_test.go | 63 +++++++++++++++---- .../xdsclient/watchers_endpoints_test.go | 51 ++++++++++++--- .../xdsclient/watchers_listener_test.go | 61 ++++++++++++++---- xds/internal/xdsclient/watchers_route_test.go | 49 +++++++++++++-- 12 files changed, 269 insertions(+), 61 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index b04d150a3110..c9014247a767 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -392,18 +392,17 @@ func (b *cdsBalancer) run() { // In both cases, the error will be forwarded to EDS balancer. And if error is // resource-not-found, the child EDS balancer will stop watching EDS. func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { - // TODO: connection errors will be sent to the eds balancers directly, and - // also forwarded by the parent balancers/resolvers. So the eds balancer may - // see the same error multiple times. We way want to only forward the error - // to eds if it's not a connection error. - // // This is not necessary today, because xds client never sends connection // errors. if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { b.clusterHandler.close() } if b.edsLB != nil { - b.edsLB.ResolverError(err) + if xdsclient.ErrType(err) != xdsclient.ErrorTypeConnection { + // Connection errors will be sent to the child balancers directly. + // There's no need to forward them. + b.edsLB.ResolverError(err) + } } else { // If eds balancer was never created, fail the RPCs with // errors. diff --git a/xds/internal/xdsclient/callback.go b/xds/internal/xdsclient/callback.go index 64b7d6794c40..b8ad0ec76362 100644 --- a/xds/internal/xdsclient/callback.go +++ b/xds/internal/xdsclient/callback.go @@ -84,14 +84,16 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata Up // On NACK, update overall version to the NACKed resp. c.ldsVersion = metadata.ErrState.Version for name := range updates { - if _, ok := c.ldsWatchers[name]; ok { + if s, ok := c.ldsWatchers[name]; ok { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.ldsMD[name] mdCopy.ErrState = metadata.ErrState mdCopy.Status = metadata.Status c.ldsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. + for wi := range s { + wi.newError(metadata.ErrState.Err) + } } } return @@ -143,14 +145,16 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metad // On NACK, update overall version to the NACKed resp. c.rdsVersion = metadata.ErrState.Version for name := range updates { - if _, ok := c.rdsWatchers[name]; ok { + if s, ok := c.rdsWatchers[name]; ok { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.rdsMD[name] mdCopy.ErrState = metadata.ErrState mdCopy.Status = metadata.Status c.rdsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. + for wi := range s { + wi.newError(metadata.ErrState.Err) + } } } return @@ -185,14 +189,16 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata Upda // On NACK, update overall version to the NACKed resp. c.cdsVersion = metadata.ErrState.Version for name := range updates { - if _, ok := c.cdsWatchers[name]; ok { + if s, ok := c.cdsWatchers[name]; ok { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.cdsMD[name] mdCopy.ErrState = metadata.ErrState mdCopy.Status = metadata.Status c.cdsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. + for wi := range s { + wi.newError(metadata.ErrState.Err) + } } } return @@ -244,14 +250,16 @@ func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdate, metadata U // On NACK, update overall version to the NACKed resp. c.edsVersion = metadata.ErrState.Version for name := range updates { - if _, ok := c.edsWatchers[name]; ok { + if s, ok := c.edsWatchers[name]; ok { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.edsMD[name] mdCopy.ErrState = metadata.ErrState mdCopy.Status = metadata.Status c.edsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. + for wi := range s { + wi.newError(metadata.ErrState.Err) + } } } return @@ -272,3 +280,16 @@ func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdate, metadata U } } } + +// NewConnectionError is called by the underlying xdsAPIClient when it receives +// a connection error. The error will be forwarded to all the resource watchers. +func (c *clientImpl) NewConnectionError(err error) { + c.mu.Lock() + defer c.mu.Unlock() + + for _, s := range c.edsWatchers { + for wi := range s { + wi.newError(NewErrorf(ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) + } + } +} diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index cb0d93bd98ec..a7de226cd292 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -141,6 +141,9 @@ type UpdateHandler interface { // NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely // referred to as Endpoints) resources. NewEndpoints(map[string]EndpointsUpdate, UpdateMetadata) + // NewConnectionError handles connection errors from the xDS stream. The + // error will be reported to all the resource watchers. + NewConnectionError(err error) } // ServiceStatus is the status of the update. diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index 12590408e6ca..abf51c45b83a 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -187,59 +187,83 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2}, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2, nil); err != nil { t.Fatal(err) } } -func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ListenerUpdate) error { +func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ListenerUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for listener update: %v", err) } gotUpdate := u.(ldsUpdateErr) + if wantErr != nil { + if gotUpdate.err != wantErr { + return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr) + } + return nil + } if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) { return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) } return nil } -func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate RouteConfigUpdate) error { +func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate RouteConfigUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for route configuration update: %v", err) } gotUpdate := u.(rdsUpdateErr) + if wantErr != nil { + if gotUpdate.err != wantErr { + return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr) + } + return nil + } if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) { return fmt.Errorf("unexpected route config update: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) } return nil } -func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ClusterUpdate) error { +func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ClusterUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for cluster update: %v", err) } gotUpdate := u.(clusterUpdateErr) - if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) { + if wantErr != nil { + if gotUpdate.err != wantErr { + return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr) + } + return nil + } + if !cmp.Equal(gotUpdate.u, wantUpdate) { return fmt.Errorf("unexpected clusterUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) } return nil } -func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate EndpointsUpdate) error { +func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate EndpointsUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for endpoints update: %v", err) } gotUpdate := u.(endpointsUpdateErr) + if wantErr != nil { + if gotUpdate.err != wantErr { + return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr) + } + return nil + } if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate, cmpopts.EquateEmpty()) { return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) } diff --git a/xds/internal/xdsclient/v2/client.go b/xds/internal/xdsclient/v2/client.go index 311621f0405c..766db2564b8d 100644 --- a/xds/internal/xdsclient/v2/client.go +++ b/xds/internal/xdsclient/v2/client.go @@ -140,7 +140,7 @@ func (v2c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { resp, err := stream.Recv() if err != nil { - // TODO: call watch callbacks with error when stream is broken. + v2c.parent.NewConnectionError(err) return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) } v2c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) diff --git a/xds/internal/xdsclient/v2/client_test.go b/xds/internal/xdsclient/v2/client_test.go index efa228a8c6e3..138c9a161695 100644 --- a/xds/internal/xdsclient/v2/client_test.go +++ b/xds/internal/xdsclient/v2/client_test.go @@ -339,6 +339,8 @@ func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdate t.newUpdate(xdsclient.EndpointsResource, dd, metadata) } +func (t *testUpdateReceiver) NewConnectionError(error) {} + func (t *testUpdateReceiver) newUpdate(rType xdsclient.ResourceType, d map[string]interface{}, metadata xdsclient.UpdateMetadata) { t.f(rType, d, metadata) } diff --git a/xds/internal/xdsclient/v3/client.go b/xds/internal/xdsclient/v3/client.go index be8ff7720d89..6088189f97f1 100644 --- a/xds/internal/xdsclient/v3/client.go +++ b/xds/internal/xdsclient/v3/client.go @@ -140,7 +140,7 @@ func (v3c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { resp, err := stream.Recv() if err != nil { - // TODO: call watch callbacks with error when stream is broken. + v3c.parent.NewConnectionError(err) return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) } v3c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) diff --git a/xds/internal/xdsclient/watchers.go b/xds/internal/xdsclient/watchers.go index 249db4de91e4..e26ed360308a 100644 --- a/xds/internal/xdsclient/watchers.go +++ b/xds/internal/xdsclient/watchers.go @@ -66,6 +66,17 @@ func (wi *watchInfo) newUpdate(update interface{}) { wi.c.scheduleCallback(wi, update, nil) } +func (wi *watchInfo) newError(err error) { + wi.mu.Lock() + defer wi.mu.Unlock() + if wi.state == watchInfoStateCanceled { + return + } + wi.state = watchInfoStateRespReceived + wi.expiryTimer.Stop() + wi.sendErrorLocked(err) +} + func (wi *watchInfo) resourceNotFound() { wi.mu.Lock() defer wi.mu.Unlock() diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index 8c33486fa017..0ded36445994 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -22,6 +22,7 @@ package xdsclient import ( "context" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -66,7 +67,7 @@ func (s) TestClusterWatch(t *testing.T) { wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -75,7 +76,7 @@ func (s) TestClusterWatch(t *testing.T) { testCDSName: wantUpdate, "randomName": {}, }, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -131,7 +132,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { wantUpdate := ClusterUpdate{ClusterName: testEDSName} client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count; i++ { - if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) } } @@ -140,7 +141,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { cancelLastWatch() client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count-1; i++ { - if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) } } @@ -208,11 +209,11 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { }, UpdateMetadata{}) for i := 0; i < count; i++ { - if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate1); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate1, nil); err != nil { t.Fatal(err) } } - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } } @@ -249,7 +250,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { client.NewClusters(map[string]ClusterUpdate{ testCDSName: wantUpdate, }, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -265,7 +266,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { } // New watch should receives the update. - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -349,7 +350,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { client.NewClusters(map[string]ClusterUpdate{ testCDSName: wantUpdate, }, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -408,10 +409,10 @@ func (s) TestClusterResourceRemoved(t *testing.T) { testCDSName + "1": wantUpdate1, testCDSName + "2": wantUpdate2, }, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh1, wantUpdate1); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh1, wantUpdate1, nil); err != nil { t.Fatal(err) } - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } @@ -424,7 +425,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) { } // Watcher 2 should get the same update again. - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } @@ -439,7 +440,43 @@ func (s) TestClusterResourceRemoved(t *testing.T) { } // Watcher 2 should get the same update again. - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil { + t.Fatal(err) + } +} + +// TestClusterWatchNACKError covers the case that an update is NACK'ed, and the +// watcher should also receive the error. +func (s) TestClusterWatchNACKError(t *testing.T) { + apiClientCh, cleanup := overrideNewAPIClient() + defer cleanup() + + client, err := newWithConfig(clientOpts(testXDSServer, false)) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + c, err := apiClientCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + apiClient := c.(*testAPIClient) + + clusterUpdateCh := testutils.NewChannel() + cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { + clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) + }) + defer cancelWatch() + if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + + wantError := fmt.Errorf("testing error") + client.NewClusters(map[string]ClusterUpdate{testCDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + if err := verifyClusterUpdate(ctx, clusterUpdateCh, ClusterUpdate{}, nil); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go index 70f06514d9f6..9b5000b8bb9f 100644 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -22,6 +22,7 @@ package xdsclient import ( "context" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -84,7 +85,7 @@ func (s) TestEndpointsWatch(t *testing.T) { wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -150,7 +151,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count; i++ { - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) } } @@ -159,7 +160,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { cancelLastWatch() client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count-1; i++ { - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) } } @@ -227,11 +228,11 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { }, UpdateMetadata{}) for i := 0; i < count; i++ { - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate1); err != nil { + if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate1, nil); err != nil { t.Fatal(err) } } - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate2); err != nil { + if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } } @@ -266,7 +267,7 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -282,7 +283,7 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { } // New watch should receives the update. - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -332,3 +333,39 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { t.Fatalf("unexpected endpointsUpdate: (%v, %v), want: (EndpointsUpdate{}, nil)", gotUpdate.u, gotUpdate.err) } } + +// TestEndpointsWatchNACKError covers the case that an update is NACK'ed, and +// the watcher should also receive the error. +func (s) TestEndpointsWatchNACKError(t *testing.T) { + apiClientCh, cleanup := overrideNewAPIClient() + defer cleanup() + + client, err := newWithConfig(clientOpts(testXDSServer, false)) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + c, err := apiClientCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + apiClient := c.(*testAPIClient) + + endpointsUpdateCh := testutils.NewChannel() + cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { + endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) + }) + defer cancelWatch() + if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + + wantError := fmt.Errorf("testing error") + client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, EndpointsUpdate{}, wantError); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go index 79ef997a72df..f7a1169bee39 100644 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -22,6 +22,7 @@ package xdsclient import ( "context" + "fmt" "testing" "google.golang.org/grpc/internal/testutils" @@ -64,7 +65,7 @@ func (s) TestLDSWatch(t *testing.T) { wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -73,7 +74,7 @@ func (s) TestLDSWatch(t *testing.T) { testLDSName: wantUpdate, "randomName": {}, }, UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -132,7 +133,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count; i++ { - if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) } } @@ -141,7 +142,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { cancelLastWatch() client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count-1; i++ { - if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) } } @@ -210,11 +211,11 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { }, UpdateMetadata{}) for i := 0; i < count; i++ { - if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate1); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate1, nil); err != nil { t.Fatal(err) } } - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } } @@ -249,7 +250,7 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -265,7 +266,7 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { } // New watch should receive the update. - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -323,10 +324,10 @@ func (s) TestLDSResourceRemoved(t *testing.T) { testLDSName + "1": wantUpdate1, testLDSName + "2": wantUpdate2, }, UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh1, wantUpdate1); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh1, wantUpdate1, nil); err != nil { t.Fatal(err) } - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } @@ -339,7 +340,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) { } // Watcher 2 should get the same update again. - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } @@ -354,7 +355,43 @@ func (s) TestLDSResourceRemoved(t *testing.T) { } // Watcher 2 should get the same update again. - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil { + t.Fatal(err) + } +} + +// TestListenerWatchNACKError covers the case that an update is NACK'ed, and the +// watcher should also receive the error. +func (s) TestListenerWatchNACKError(t *testing.T) { + apiClientCh, cleanup := overrideNewAPIClient() + defer cleanup() + + client, err := newWithConfig(clientOpts(testXDSServer, false)) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + c, err := apiClientCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + apiClient := c.(*testAPIClient) + + ldsUpdateCh := testutils.NewChannel() + cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { + ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) + }) + defer cancelWatch() + if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + + wantError := fmt.Errorf("testing error") + client.NewListeners(map[string]ListenerUpdate{testLDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + if err := verifyListenerUpdate(ctx, ldsUpdateCh, ListenerUpdate{}, wantError); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go index 08b035a0b0a3..5e9b51da9d22 100644 --- a/xds/internal/xdsclient/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -22,6 +22,7 @@ package xdsclient import ( "context" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -73,7 +74,7 @@ func (s) TestRDSWatch(t *testing.T) { }, } client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate); err != nil { + if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -146,7 +147,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { } client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count; i++ { - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate); err != nil { + if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) } } @@ -155,7 +156,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { cancelLastWatch() client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) for i := 0; i < count-1; i++ { - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate); err != nil { + if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) } } @@ -237,11 +238,11 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { }, UpdateMetadata{}) for i := 0; i < count; i++ { - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate1); err != nil { + if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate1, nil); err != nil { t.Fatal(err) } } - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh2, wantUpdate2); err != nil { + if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } } @@ -283,7 +284,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { }, } client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate); err != nil { + if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -310,3 +311,39 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) } } + +// TestRouteWatchNACKError covers the case that an update is NACK'ed, and the +// watcher should also receive the error. +func (s) TestRouteWatchNACKError(t *testing.T) { + apiClientCh, cleanup := overrideNewAPIClient() + defer cleanup() + + client, err := newWithConfig(clientOpts(testXDSServer, false)) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + c, err := apiClientCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + apiClient := c.(*testAPIClient) + + rdsUpdateCh := testutils.NewChannel() + cancelWatch := client.WatchRouteConfig(testCDSName, func(update RouteConfigUpdate, err error) { + rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) + }) + defer cancelWatch() + if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + + wantError := fmt.Errorf("testing error") + client.NewRouteConfigs(map[string]RouteConfigUpdate{testCDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, RouteConfigUpdate{}, wantError); err != nil { + t.Fatal(err) + } +} From 0103ea2d6c98f59ddd6ff09aa93f963936157213 Mon Sep 17 00:00:00 2001 From: John Howard Date: Wed, 14 Jul 2021 13:59:50 -0700 Subject: [PATCH 161/998] client: improve GOAWAY debug messages (#4587) --- internal/transport/http2_client.go | 6 +++++- test/end2end_test.go | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 9592d443d3e1..8b6254b5bdc7 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -1235,7 +1235,11 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } - t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %v", f.ErrCode, string(f.DebugData())) + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } } func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { diff --git a/test/end2end_test.go b/test/end2end_test.go index fc112bf21c34..3d941b187bf9 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -1453,7 +1453,7 @@ func (s) TestDetailedGoawayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) if err != nil { t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) } - const expectedErrorMessageSubstring = "received prior goaway: code: ENHANCE_YOUR_CALM, debug data: too_many_pings" + const expectedErrorMessageSubstring = `received prior goaway: code: ENHANCE_YOUR_CALM, debug data: "too_many_pings"` _, err = stream.Recv() close(rpcDoneOnClient) if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) { From ce7bdf50abb1f7c7a5ba1a54890e6dac46eb87f7 Mon Sep 17 00:00:00 2001 From: Matt Jones Date: Thu, 15 Jul 2021 09:53:31 -0700 Subject: [PATCH 162/998] advancedtls: CRL checking for golang gRPC (#4489) * Code for CRL checking for golang gRPC. --- security/advancedtls/crl.go | 499 ++++++++++++ security/advancedtls/crl_test.go | 718 ++++++++++++++++++ security/advancedtls/examples/go.mod | 2 +- security/advancedtls/examples/go.sum | 2 + security/advancedtls/go.mod | 3 +- security/advancedtls/go.sum | 2 + security/advancedtls/testdata/crl/0b35a562.r0 | 1 + security/advancedtls/testdata/crl/0b35a562.r1 | 1 + security/advancedtls/testdata/crl/1.crl | 10 + security/advancedtls/testdata/crl/1ab871c8.r0 | 1 + security/advancedtls/testdata/crl/2.crl | 10 + security/advancedtls/testdata/crl/3.crl | 11 + security/advancedtls/testdata/crl/4.crl | 10 + security/advancedtls/testdata/crl/5.crl | 10 + security/advancedtls/testdata/crl/6.crl | 11 + security/advancedtls/testdata/crl/71eac5a2.r0 | 1 + security/advancedtls/testdata/crl/7a1799af.r0 | 1 + security/advancedtls/testdata/crl/8828a7e6.r0 | 1 + security/advancedtls/testdata/crl/README.md | 48 ++ security/advancedtls/testdata/crl/deee447d.r0 | 1 + .../advancedtls/testdata/crl/revokedInt.pem | 58 ++ .../advancedtls/testdata/crl/revokedLeaf.pem | 59 ++ .../advancedtls/testdata/crl/unrevoked.pem | 58 ++ 23 files changed, 1516 insertions(+), 2 deletions(-) create mode 100644 security/advancedtls/crl.go create mode 100644 security/advancedtls/crl_test.go create mode 120000 security/advancedtls/testdata/crl/0b35a562.r0 create mode 120000 security/advancedtls/testdata/crl/0b35a562.r1 create mode 100644 security/advancedtls/testdata/crl/1.crl create mode 120000 security/advancedtls/testdata/crl/1ab871c8.r0 create mode 100644 security/advancedtls/testdata/crl/2.crl create mode 100644 security/advancedtls/testdata/crl/3.crl create mode 100644 security/advancedtls/testdata/crl/4.crl create mode 100644 security/advancedtls/testdata/crl/5.crl create mode 100644 security/advancedtls/testdata/crl/6.crl create mode 120000 security/advancedtls/testdata/crl/71eac5a2.r0 create mode 120000 security/advancedtls/testdata/crl/7a1799af.r0 create mode 120000 security/advancedtls/testdata/crl/8828a7e6.r0 create mode 100644 security/advancedtls/testdata/crl/README.md create mode 120000 security/advancedtls/testdata/crl/deee447d.r0 create mode 100644 security/advancedtls/testdata/crl/revokedInt.pem create mode 100644 security/advancedtls/testdata/crl/revokedLeaf.pem create mode 100644 security/advancedtls/testdata/crl/unrevoked.pem diff --git a/security/advancedtls/crl.go b/security/advancedtls/crl.go new file mode 100644 index 000000000000..5b3f90127ee7 --- /dev/null +++ b/security/advancedtls/crl.go @@ -0,0 +1,499 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package advancedtls + +import ( + "bytes" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "google.golang.org/grpc/grpclog" +) + +var grpclogLogger = grpclog.Component("advancedtls") + +// Cache is an interface to cache CRL files. +// The cache implemetation must be concurrency safe. +// A fixed size lru cache from golang-lru is recommended. +type Cache interface { + // Add adds a value to the cache. + Add(key, value interface{}) bool + // Get looks up a key's value from the cache. + Get(key interface{}) (value interface{}, ok bool) +} + +// RevocationConfig contains options for CRL lookup. +type RevocationConfig struct { + // RootDir is the directory to search for CRL files. + // Directory format must match OpenSSL X509_LOOKUP_hash_dir(3). + RootDir string + // AllowUndetermined controls if certificate chains with RevocationUndetermined + // revocation status are allowed to complete. + AllowUndetermined bool + // Cache will store CRL files if not nil, otherwise files are reloaded for every lookup. + Cache Cache +} + +// RevocationStatus is the revocation status for a certificate or chain. +type RevocationStatus int + +const ( + // RevocationUndetermined means we couldn't find or verify a CRL for the cert. + RevocationUndetermined RevocationStatus = iota + // RevocationUnrevoked means we found the CRL for the cert and the cert is not revoked. + RevocationUnrevoked + // RevocationRevoked means we found the CRL and the cert is revoked. + RevocationRevoked +) + +func (s RevocationStatus) String() string { + return [...]string{"RevocationUndetermined", "RevocationUnrevoked", "RevocationRevoked"}[s] +} + +// certificateListExt contains a pkix.CertificateList and parsed +// extensions that aren't provided by the golang CRL parser. +type certificateListExt struct { + CertList *pkix.CertificateList + // RFC5280, 5.2.1, all conforming CRLs must have a AKID with the ID method. + AuthorityKeyID []byte +} + +const tagDirectoryName = 4 + +var ( + // RFC5280, 5.2.4 id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } + oidDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27} + // RFC5280, 5.2.5 id-ce-issuingDistributionPoint OBJECT IDENTIFIER ::= { id-ce 28 } + oidIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28} + // RFC5280, 5.3.3 id-ce-certificateIssuer OBJECT IDENTIFIER ::= { id-ce 29 } + oidCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29} + // RFC5290, 4.2.1.1 id-ce-authorityKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 35 } + oidAuthorityKeyIdentifier = asn1.ObjectIdentifier{2, 5, 29, 35} +) + +// x509NameHash implements the OpenSSL X509_NAME_hash function for hashed directory lookups. +func x509NameHash(r pkix.RDNSequence) string { + var canonBytes []byte + // First, canonicalize all the strings. + for _, rdnSet := range r { + for i, rdn := range rdnSet { + value, ok := rdn.Value.(string) + if !ok { + continue + } + // OpenSSL trims all whitespace, does a tolower, and removes extra spaces between words. + // Implemented in x509_name_canon in OpenSSL + canonStr := strings.Join(strings.Fields( + strings.TrimSpace(strings.ToLower(value))), " ") + // Then it changes everything to UTF8 strings + rdnSet[i].Value = asn1.RawValue{Tag: asn1.TagUTF8String, Bytes: []byte(canonStr)} + + } + } + + // Finally, OpenSSL drops the initial sequence tag + // so we marshal all the RDNs separately instead of as a group. + for _, canonRdn := range r { + b, err := asn1.Marshal(canonRdn) + if err != nil { + continue + } + canonBytes = append(canonBytes, b...) + } + + issuerHash := sha1.Sum(canonBytes) + // Openssl takes the first 4 bytes and encodes them as a little endian + // uint32 and then uses the hex to make the file name. + // In C++, this would be: + // (((unsigned long)md[0]) | ((unsigned long)md[1] << 8L) | + // ((unsigned long)md[2] << 16L) | ((unsigned long)md[3] << 24L) + // ) & 0xffffffffL; + fileHash := binary.LittleEndian.Uint32(issuerHash[0:4]) + return fmt.Sprintf("%08x", fileHash) +} + +// CheckRevocation checks the connection for revoked certificates based on RFC5280. +// This implementation has the following major limitations: +// * Indirect CRL files are not supported. +// * CRL loading is only supported from directories in the X509_LOOKUP_hash_dir format. +// * OnlySomeReasons is not supported. +// * Delta CRL files are not supported. +// * Certificate CRLDistributionPoint must be URLs, but are then ignored and converted into a file path. +// * CRL checks are done after path building, which goes against RFC4158. +func CheckRevocation(conn tls.ConnectionState, cfg RevocationConfig) error { + return CheckChainRevocation(conn.VerifiedChains, cfg) +} + +// CheckChainRevocation checks the verified certificate chain +// for revoked certificates based on RFC5280. +func CheckChainRevocation(verifiedChains [][]*x509.Certificate, cfg RevocationConfig) error { + // Iterate the verified chains looking for one that is RevocationUnrevoked. + // A single RevocationUnrevoked chain is enough to allow the connection, and a single RevocationRevoked + // chain does not mean the connection should fail. + count := make(map[RevocationStatus]int) + for _, chain := range verifiedChains { + switch checkChain(chain, cfg) { + case RevocationUnrevoked: + // If any chain is RevocationUnrevoked then return no error. + return nil + case RevocationRevoked: + // If this chain is revoked, keep looking for another chain. + count[RevocationRevoked]++ + continue + case RevocationUndetermined: + if cfg.AllowUndetermined { + return nil + } + count[RevocationUndetermined]++ + continue + } + } + return fmt.Errorf("no unrevoked chains found: %v", count) +} + +// checkChain will determine and check all certificates in chain against the CRL +// defined in the certificate with the following rules: +// 1. If any certificate is RevocationRevoked, return RevocationRevoked. +// 2. If any certificate is RevocationUndetermined, return RevocationUndetermined. +// 3. If all certificates are RevocationUnrevoked, return RevocationUnrevoked. +func checkChain(chain []*x509.Certificate, cfg RevocationConfig) RevocationStatus { + chainStatus := RevocationUnrevoked + for _, c := range chain { + switch checkCert(c, chain, cfg) { + case RevocationRevoked: + // Easy case, if a cert in the chain is revoked, the chain is revoked. + return RevocationRevoked + case RevocationUndetermined: + // If we couldn't find the revocation status for a cert, the chain is at best RevocationUndetermined + // keep looking to see if we find a cert in the chain that's RevocationRevoked, + // but return RevocationUndetermined at a minimum. + chainStatus = RevocationUndetermined + case RevocationUnrevoked: + // Continue iterating up the cert chain. + continue + } + } + return chainStatus +} + +func cachedCrl(rawIssuer []byte, cache Cache) (*certificateListExt, bool) { + val, ok := cache.Get(hex.EncodeToString(rawIssuer)) + if !ok { + return nil, false + } + crl, ok := val.(*certificateListExt) + if !ok { + return nil, false + } + // If the CRL is expired, force a reload. + if crl.CertList.HasExpired(time.Now()) { + return nil, false + } + return crl, true +} + +// fetchIssuerCRL fetches and verifies the CRL for rawIssuer from disk or cache if configured in cfg. +func fetchIssuerCRL(crlDistributionPoint string, rawIssuer []byte, crlVerifyCrt []*x509.Certificate, cfg RevocationConfig) (*certificateListExt, error) { + if cfg.Cache != nil { + if crl, ok := cachedCrl(rawIssuer, cfg.Cache); ok { + return crl, nil + } + } + + crl, err := fetchCRL(crlDistributionPoint, rawIssuer, cfg) + if err != nil { + return nil, fmt.Errorf("fetchCRL(%v) failed err = %v", crlDistributionPoint, err) + } + + if err := verifyCRL(crl, rawIssuer, crlVerifyCrt); err != nil { + return nil, fmt.Errorf("verifyCRL(%v) failed err = %v", crlDistributionPoint, err) + } + if cfg.Cache != nil { + cfg.Cache.Add(hex.EncodeToString(rawIssuer), crl) + } + return crl, nil +} + +// checkCert checks a single certificate against the CRL defined in the certificate. +// It will fetch and verify the CRL(s) defined by CRLDistributionPoints. +// If we can't load any authoritative CRL files, the status is RevocationUndetermined. +// c is the certificate to check. +// crlVerifyCrt is the group of possible certificates to verify the crl. +func checkCert(c *x509.Certificate, crlVerifyCrt []*x509.Certificate, cfg RevocationConfig) RevocationStatus { + if len(c.CRLDistributionPoints) == 0 { + return RevocationUnrevoked + } + // Iterate through CRL distribution points to check for status + for _, dp := range c.CRLDistributionPoints { + crl, err := fetchIssuerCRL(dp, c.RawIssuer, crlVerifyCrt, cfg) + if err != nil { + grpclogLogger.Warningf("getIssuerCRL(%v) err = %v", c.Issuer, err) + continue + } + revocation, err := checkCertRevocation(c, crl) + if err != nil { + grpclogLogger.Warningf("checkCertRevocation(CRL %v) failed %v", crl.CertList.TBSCertList.Issuer, err) + // We couldn't check the CRL file for some reason, so continue + // to the next file + continue + } + // Here we've gotten a CRL that loads and verifies. + // We only handle all-reasons CRL files, so this file + // is authoritative for the certificate. + return revocation + + } + // We couldn't load any CRL files for the certificate, so we don't know if it's RevocationUnrevoked or not. + return RevocationUndetermined +} + +func checkCertRevocation(c *x509.Certificate, crl *certificateListExt) (RevocationStatus, error) { + // Per section 5.3.3 we prime the certificate issuer with the CRL issuer. + // Subsequent entries use the previous entry's issuer. + rawEntryIssuer, err := asn1.Marshal(crl.CertList.TBSCertList.Issuer) + if err != nil { + return RevocationUndetermined, err + } + + // Loop through all the revoked certificates. + for _, revCert := range crl.CertList.TBSCertList.RevokedCertificates { + // 5.3 Loop through CRL entry extensions for needed information. + for _, ext := range revCert.Extensions { + if oidCertificateIssuer.Equal(ext.Id) { + extIssuer, err := parseCertIssuerExt(ext) + if err != nil { + grpclogLogger.Info(err) + if ext.Critical { + return RevocationUndetermined, err + } + // Since this is a non-critical extension, we can skip it even though + // there was a parsing failure. + continue + } + rawEntryIssuer = extIssuer + } else if ext.Critical { + return RevocationUndetermined, fmt.Errorf("checkCertRevocation: Unhandled critical extension: %v", ext.Id) + } + } + + // If the issuer and serial number appear in the CRL, the certificate is revoked. + if bytes.Equal(c.RawIssuer, rawEntryIssuer) && c.SerialNumber.Cmp(revCert.SerialNumber) == 0 { + // CRL contains the serial, so return revoked. + return RevocationRevoked, nil + } + } + // We did not find the serial in the CRL file that was valid for the cert + // so the certificate is not revoked. + return RevocationUnrevoked, nil +} + +func parseCertIssuerExt(ext pkix.Extension) ([]byte, error) { + // 5.3.3 Certificate Issuer + // CertificateIssuer ::= GeneralNames + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + var generalNames []asn1.RawValue + if rest, err := asn1.Unmarshal(ext.Value, &generalNames); err != nil || len(rest) != 0 { + return nil, fmt.Errorf("asn1.Unmarshal failed err = %v", err) + } + + for _, generalName := range generalNames { + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + if generalName.Tag == tagDirectoryName { + return generalName.Bytes, nil + } + } + // Conforming CRL issuers MUST include in this extension the + // distinguished name (DN) from the issuer field of the certificate that + // corresponds to this CRL entry. + // If we couldn't get a directoryName, we can't reason about this file so cert status is + // RevocationUndetermined. + return nil, errors.New("no DN found in certificate issuer") +} + +// RFC 5280, 4.2.1.1 +type authKeyID struct { + ID []byte `asn1:"optional,tag:0"` +} + +// RFC5280, 5.2.5 +// id-ce-issuingDistributionPoint OBJECT IDENTIFIER ::= { id-ce 28 } + +// IssuingDistributionPoint ::= SEQUENCE { +// distributionPoint [0] DistributionPointName OPTIONAL, +// onlyContainsUserCerts [1] BOOLEAN DEFAULT FALSE, +// onlyContainsCACerts [2] BOOLEAN DEFAULT FALSE, +// onlySomeReasons [3] ReasonFlags OPTIONAL, +// indirectCRL [4] BOOLEAN DEFAULT FALSE, +// onlyContainsAttributeCerts [5] BOOLEAN DEFAULT FALSE } + +// -- at most one of onlyContainsUserCerts, onlyContainsCACerts, +// -- and onlyContainsAttributeCerts may be set to TRUE. +type issuingDistributionPoint struct { + DistributionPoint asn1.RawValue `asn1:"optional,tag:0"` + OnlyContainsUserCerts bool `asn1:"optional,tag:1"` + OnlyContainsCACerts bool `asn1:"optional,tag:2"` + OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"` + IndirectCRL bool `asn1:"optional,tag:4"` + OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"` +} + +// parseCRLExtensions parses the extensions for a CRL +// and checks that they're supported by the parser. +func parseCRLExtensions(c *pkix.CertificateList) (*certificateListExt, error) { + if c == nil { + return nil, errors.New("c is nil, expected any value") + } + certList := &certificateListExt{CertList: c} + + for _, ext := range c.TBSCertList.Extensions { + switch { + case oidDeltaCRLIndicator.Equal(ext.Id): + return nil, fmt.Errorf("delta CRLs unsupported") + + case oidAuthorityKeyIdentifier.Equal(ext.Id): + var a authKeyID + if rest, err := asn1.Unmarshal(ext.Value, &a); err != nil { + return nil, fmt.Errorf("asn1.Unmarshal failed. err = %v", err) + } else if len(rest) != 0 { + return nil, errors.New("trailing data after AKID extension") + } + certList.AuthorityKeyID = a.ID + + case oidIssuingDistributionPoint.Equal(ext.Id): + var dp issuingDistributionPoint + if rest, err := asn1.Unmarshal(ext.Value, &dp); err != nil { + return nil, fmt.Errorf("asn1.Unmarshal failed. err = %v", err) + } else if len(rest) != 0 { + return nil, errors.New("trailing data after IssuingDistributionPoint extension") + } + + if dp.OnlyContainsUserCerts || dp.OnlyContainsCACerts || dp.OnlyContainsAttributeCerts { + return nil, errors.New("CRL only contains some certificate types") + } + if dp.IndirectCRL { + return nil, errors.New("indirect CRLs unsupported") + } + if dp.OnlySomeReasons.BitLength != 0 { + return nil, errors.New("onlySomeReasons unsupported") + } + + case ext.Critical: + return nil, fmt.Errorf("unsupported critical extension: %v", ext.Id) + } + } + + if len(certList.AuthorityKeyID) == 0 { + return nil, errors.New("authority key identifier extension missing") + } + return certList, nil +} + +func fetchCRL(loc string, rawIssuer []byte, cfg RevocationConfig) (*certificateListExt, error) { + var parsedCRL *certificateListExt + // 6.3.3 (a) (1) (ii) + // According to X509_LOOKUP_hash_dir the format is issuer_hash.rN where N is an increasing number. + // There are no gaps, so we break when we can't find a file. + for i := 0; ; i++ { + // Unmarshal to RDNSeqence according to http://go/godoc/crypto/x509/pkix/#Name. + var r pkix.RDNSequence + rest, err := asn1.Unmarshal(rawIssuer, &r) + if len(rest) != 0 || err != nil { + return nil, fmt.Errorf("asn1.Unmarshal(Issuer) len(rest) = %v, err = %v", len(rest), err) + } + crlPath := fmt.Sprintf("%s.r%d", filepath.Join(cfg.RootDir, x509NameHash(r)), i) + crlBytes, err := ioutil.ReadFile(crlPath) + if err != nil { + // Break when we can't read a CRL file. + grpclogLogger.Infof("readFile: %v", err) + break + } + + crl, err := x509.ParseCRL(crlBytes) + if err != nil { + // Parsing errors for a CRL shouldn't happen so fail. + return nil, fmt.Errorf("x509.ParseCrl(%v) failed err = %v", crlPath, err) + } + var certList *certificateListExt + if certList, err = parseCRLExtensions(crl); err != nil { + grpclogLogger.Infof("fetchCRL: unsupported crl %v, err = %v", crlPath, err) + // Continue to find a supported CRL + continue + } + + rawCRLIssuer, err := asn1.Marshal(certList.CertList.TBSCertList.Issuer) + if err != nil { + return nil, fmt.Errorf("asn1.Marshal(%v) failed err = %v", certList.CertList.TBSCertList.Issuer, err) + } + // RFC5280, 6.3.3 (b) Verify the issuer and scope of the complete CRL. + if bytes.Equal(rawIssuer, rawCRLIssuer) { + parsedCRL = certList + // Continue to find the highest number in the .rN suffix. + continue + } + } + + if parsedCRL == nil { + return nil, fmt.Errorf("fetchCrls no CRLs found for issuer") + } + return parsedCRL, nil +} + +func verifyCRL(crl *certificateListExt, rawIssuer []byte, chain []*x509.Certificate) error { + // RFC5280, 6.3.3 (f) Obtain and validateate the certification path for the issuer of the complete CRL + // We intentionally limit our CRLs to be signed with the same certificate path as the certificate + // so we can use the chain from the connection. + rawCRLIssuer, err := asn1.Marshal(crl.CertList.TBSCertList.Issuer) + if err != nil { + return fmt.Errorf("asn1.Marshal(%v) failed err = %v", crl.CertList.TBSCertList.Issuer, err) + } + + for _, c := range chain { + // Use the key where the subject and KIDs match. + // This departs from RFC4158, 3.5.12 which states that KIDs + // cannot eliminate certificates, but RFC5280, 5.2.1 states that + // "Conforming CRL issuers MUST use the key identifier method, and MUST + // include this extension in all CRLs issued." + // So, this is much simpler than RFC4158 and should be compatible. + if bytes.Equal(c.SubjectKeyId, crl.AuthorityKeyID) && bytes.Equal(c.RawSubject, rawCRLIssuer) { + // RFC5280, 6.3.3 (g) Validate signature. + return c.CheckCRLSignature(crl.CertList) + } + } + return fmt.Errorf("verifyCRL: No certificates mached CRL issuer (%v)", crl.CertList.TBSCertList.Issuer) +} diff --git a/security/advancedtls/crl_test.go b/security/advancedtls/crl_test.go new file mode 100644 index 000000000000..ec4483304c79 --- /dev/null +++ b/security/advancedtls/crl_test.go @@ -0,0 +1,718 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package advancedtls + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "os" + "path" + "strings" + "testing" + "time" + + lru "github.com/hashicorp/golang-lru" + "google.golang.org/grpc/security/advancedtls/testdata" +) + +func TestX509NameHash(t *testing.T) { + nameTests := []struct { + in pkix.Name + out string + }{ + { + in: pkix.Name{ + Country: []string{"US"}, + Organization: []string{"Example"}, + }, + out: "9cdd41ff", + }, + { + in: pkix.Name{ + Country: []string{"us"}, + Organization: []string{"example"}, + }, + out: "9cdd41ff", + }, + { + in: pkix.Name{ + Country: []string{" us"}, + Organization: []string{"example"}, + }, + out: "9cdd41ff", + }, + { + in: pkix.Name{ + Country: []string{"US"}, + Province: []string{"California"}, + Locality: []string{"Mountain View"}, + Organization: []string{"BoringSSL"}, + }, + out: "c24414d9", + }, + { + in: pkix.Name{ + Country: []string{"US"}, + Province: []string{"California"}, + Locality: []string{"Mountain View"}, + Organization: []string{"BoringSSL"}, + }, + out: "c24414d9", + }, + { + in: pkix.Name{ + SerialNumber: "87f4514475ba0a2b", + }, + out: "9dc713cd", + }, + { + in: pkix.Name{ + Country: []string{"US"}, + Province: []string{"California"}, + Locality: []string{"Mountain View"}, + Organization: []string{"Google LLC"}, + OrganizationalUnit: []string{"Production", "campus-sln"}, + CommonName: "Root CA (2021-02-02T07:30:36-08:00)", + }, + out: "0b35a562", + }, + { + in: pkix.Name{ + ExtraNames: []pkix.AttributeTypeAndValue{ + {Type: asn1.ObjectIdentifier{5, 5, 5, 5}, Value: "aaaa"}, + }, + }, + out: "eea339da", + }, + } + for _, tt := range nameTests { + t.Run(tt.in.String(), func(t *testing.T) { + h := x509NameHash(tt.in.ToRDNSequence()) + if h != tt.out { + t.Errorf("x509NameHash(%v): Got %v wanted %v", tt.in, h, tt.out) + } + }) + } +} + +func TestUnsupportedCRLs(t *testing.T) { + crlBytesSomeReasons := []byte(`-----BEGIN X509 CRL----- +MIIEeDCCA2ACAQEwDQYJKoZIhvcNAQELBQAwQjELMAkGA1UEBhMCVVMxHjAcBgNV +BAoTFUdvb2dsZSBUcnVzdCBTZXJ2aWNlczETMBEGA1UEAxMKR1RTIENBIDFPMRcN +MjEwNDI2MTI1OTQxWhcNMjEwNTA2MTE1OTQwWjCCAn0wIgIRAPOOG3L4VLC7CAAA +AABxQgEXDTIxMDQxOTEyMTgxOFowIQIQUK0UwBZkVdQIAAAAAHFCBRcNMjEwNDE5 +MTIxODE4WjAhAhBRIXBJaKoQkQgAAAAAcULHFw0yMTA0MjAxMjE4MTdaMCICEQCv +qQWUq5UxmQgAAAAAcULMFw0yMTA0MjAxMjE4MTdaMCICEQDdv5k1kKwKTQgAAAAA +cUOQFw0yMTA0MjExMjE4MTZaMCICEQDGIEfR8N9sEAgAAAAAcUOWFw0yMTA0MjEx +MjE4MThaMCECEBHgbLXlj5yUCAAAAABxQ/IXDTIxMDQyMTIzMDAyNlowIQIQE1wT +2GGYqKwIAAAAAHFD7xcNMjEwNDIxMjMwMDI5WjAiAhEAo/bSyDjpVtsIAAAAAHFE +txcNMjEwNDIyMjMwMDI3WjAhAhARdCrSrHE0dAgAAAAAcUS/Fw0yMTA0MjIyMzAw +MjhaMCECEHONohfWn3wwCAAAAABxRX8XDTIxMDQyMzIzMDAyOVowIgIRAOYkiUPA +os4vCAAAAABxRYgXDTIxMDQyMzIzMDAyOFowIQIQRNTow5Eg2gEIAAAAAHFGShcN +MjEwNDI0MjMwMDI2WjAhAhBX32dH4/WQ6AgAAAAAcUZNFw0yMTA0MjQyMzAwMjZa +MCICEQDHnUM1vsaP/wgAAAAAcUcQFw0yMTA0MjUyMzAwMjZaMCECEEm5rvmL8sj6 +CAAAAABxRxQXDTIxMDQyNTIzMDAyN1owIQIQW16OQs4YQYkIAAAAAHFIABcNMjEw +NDI2MTI1NDA4WjAhAhAhSohpYsJtDQgAAAAAcUgEFw0yMTA0MjYxMjU0MDlaoGkw +ZzAfBgNVHSMEGDAWgBSY0fhuEOvPm+xgnxiQG6DrfQn9KzALBgNVHRQEBAICBngw +NwYDVR0cAQH/BC0wK6AmoCSGImh0dHA6Ly9jcmwucGtpLmdvb2cvR1RTMU8xY29y +ZS5jcmyBAf8wDQYJKoZIhvcNAQELBQADggEBADPBXbxVxMJ1HC7btXExRUpJHUlU +YbeCZGx6zj5F8pkopbmpV7cpewwhm848Fx4VaFFppZQZd92O08daEC6aEqoug4qF +z6ZrOLzhuKfpW8E93JjgL91v0FYN7iOcT7+ERKCwVEwEkuxszxs7ggW6OJYJNvHh +priIdmcPoiQ3ZrIRH0vE3BfUcNXnKFGATWuDkiRI0I4A5P7NiOf+lAuGZet3/eom +0chgts6sdau10GfeUpHUd4f8e93cS/QeLeG16z7LC8vRLstU3m3vrknpZbdGqSia +97w66mqcnQh9V0swZiEnVLmLufaiuDZJ+6nUzSvLqBlb/ei3T/tKV0BoKJA= +-----END X509 CRL-----`) + + crlBytesIndirect := []byte(`-----BEGIN X509 CRL----- +MIIDGjCCAgICAQEwDQYJKoZIhvcNAQELBQAwdjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAoTC1Rlc3RpbmcgTHRkMSowKAYDVQQLEyFU +ZXN0aW5nIEx0ZCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxEDAOBgNVBAMTB1Rlc3Qg +Q0EXDTIxMDExNjAyMjAxNloXDTIxMDEyMDA2MjAxNlowgfIwbAIBAhcNMjEwMTE2 +MDIyMDE2WjBYMAoGA1UdFQQDCgEEMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQG +EwNVU0ExDTALBgNVBAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0 +MTAgAgEDFw0yMTAxMTYwMjIwMTZaMAwwCgYDVR0VBAMKAQEwYAIBBBcNMjEwMTE2 +MDIyMDE2WjBMMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQGEwNVU0ExDTALBgNV +BAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0MqBjMGEwHwYDVR0j +BBgwFoAURJSDWAOfhGCryBjl8dsQjBitl3swCgYDVR0UBAMCAQEwMgYDVR0cAQH/ +BCgwJqAhoB+GHWh0dHA6Ly9jcmxzLnBraS5nb29nL3Rlc3QuY3JshAH/MA0GCSqG +SIb3DQEBCwUAA4IBAQBVXX67mr2wFPmEWCe6mf/wFnPl3xL6zNOl96YJtsd7ulcS +TEbdJpaUnWFQ23+Tpzdj/lI2aQhTg5Lvii3o+D8C5r/Jc5NhSOtVJJDI/IQLh4pG +NgGdljdbJQIT5D2Z71dgbq1ocxn8DefZIJjO3jp8VnAm7AIMX2tLTySzD2MpMeMq +XmcN4lG1e4nx+xjzp7MySYO42NRY3LkphVzJhu3dRBYhBKViRJxw9hLttChitJpF +6Kh6a0QzrEY/QDJGhE1VrAD2c5g/SKnHPDVoCWo4ACIICi76KQQSIWfIdp4W/SY3 +qsSIp8gfxSyzkJP+Ngkm2DdLjlJQCZ9R0MZP9Xj4 +-----END X509 CRL-----`) + + var tests = []struct { + desc string + in []byte + }{ + { + desc: "some reasons", + in: crlBytesSomeReasons, + }, + { + desc: "indirect", + in: crlBytesIndirect, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + crl, err := x509.ParseCRL(tt.in) + if err != nil { + t.Fatal(err) + } + if _, err := parseCRLExtensions(crl); err == nil { + t.Error("expected error got ok") + } + }) + } +} + +func TestCheckCertRevocation(t *testing.T) { + dummyCrlFile := []byte(`-----BEGIN X509 CRL----- +MIIDGjCCAgICAQEwDQYJKoZIhvcNAQELBQAwdjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAoTC1Rlc3RpbmcgTHRkMSowKAYDVQQLEyFU +ZXN0aW5nIEx0ZCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxEDAOBgNVBAMTB1Rlc3Qg +Q0EXDTIxMDExNjAyMjAxNloXDTIxMDEyMDA2MjAxNlowgfIwbAIBAhcNMjEwMTE2 +MDIyMDE2WjBYMAoGA1UdFQQDCgEEMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQG +EwNVU0ExDTALBgNVBAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0 +MTAgAgEDFw0yMTAxMTYwMjIwMTZaMAwwCgYDVR0VBAMKAQEwYAIBBBcNMjEwMTE2 +MDIyMDE2WjBMMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQGEwNVU0ExDTALBgNV +BAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0MqBjMGEwHwYDVR0j +BBgwFoAURJSDWAOfhGCryBjl8dsQjBitl3swCgYDVR0UBAMCAQEwMgYDVR0cAQH/ +BCgwJqAhoB+GHWh0dHA6Ly9jcmxzLnBraS5nb29nL3Rlc3QuY3JshAH/MA0GCSqG +SIb3DQEBCwUAA4IBAQBVXX67mr2wFPmEWCe6mf/wFnPl3xL6zNOl96YJtsd7ulcS +TEbdJpaUnWFQ23+Tpzdj/lI2aQhTg5Lvii3o+D8C5r/Jc5NhSOtVJJDI/IQLh4pG +NgGdljdbJQIT5D2Z71dgbq1ocxn8DefZIJjO3jp8VnAm7AIMX2tLTySzD2MpMeMq +XmcN4lG1e4nx+xjzp7MySYO42NRY3LkphVzJhu3dRBYhBKViRJxw9hLttChitJpF +6Kh6a0QzrEY/QDJGhE1VrAD2c5g/SKnHPDVoCWo4ACIICi76KQQSIWfIdp4W/SY3 +qsSIp8gfxSyzkJP+Ngkm2DdLjlJQCZ9R0MZP9Xj4 +-----END X509 CRL-----`) + crl, err := x509.ParseCRL(dummyCrlFile) + if err != nil { + t.Fatalf("x509.ParseCRL(dummyCrlFile) failed: %v", err) + } + crlExt := &certificateListExt{CertList: crl} + var crlIssuer pkix.Name + crlIssuer.FillFromRDNSequence(&crl.TBSCertList.Issuer) + + var revocationTests = []struct { + desc string + in x509.Certificate + revoked RevocationStatus + }{ + { + desc: "Single revoked", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test1", + }, + SerialNumber: big.NewInt(2), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationRevoked, + }, + { + desc: "Revoked no entry issuer", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test1", + }, + SerialNumber: big.NewInt(3), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationRevoked, + }, + { + desc: "Revoked new entry issuer", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test2", + }, + SerialNumber: big.NewInt(4), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationRevoked, + }, + { + desc: "Single unrevoked", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test2", + }, + SerialNumber: big.NewInt(1), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationUnrevoked, + }, + { + desc: "Single unrevoked Issuer", + in: x509.Certificate{ + Issuer: crlIssuer, + SerialNumber: big.NewInt(2), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationUnrevoked, + }, + } + + for _, tt := range revocationTests { + rawIssuer, err := asn1.Marshal(tt.in.Issuer.ToRDNSequence()) + if err != nil { + t.Fatalf("asn1.Marshal(%v) failed: %v", tt.in.Issuer.ToRDNSequence(), err) + } + tt.in.RawIssuer = rawIssuer + t.Run(tt.desc, func(t *testing.T) { + rev, err := checkCertRevocation(&tt.in, crlExt) + if err != nil { + t.Errorf("checkCertRevocation(%v) err = %v", tt.in.Issuer, err) + } else if rev != tt.revoked { + t.Errorf("checkCertRevocation(%v(%v)) returned %v wanted %v", + tt.in.Issuer, tt.in.SerialNumber, rev, tt.revoked) + } + }) + } +} + +func makeChain(t *testing.T, name string) []*x509.Certificate { + t.Helper() + + certChain := make([]*x509.Certificate, 0) + + rest, err := ioutil.ReadFile(name) + if err != nil { + t.Fatalf("ioutil.ReadFile(%v) failed %v", name, err) + } + for len(rest) > 0 { + var block *pem.Block + block, rest = pem.Decode(rest) + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatalf("ParseCertificate error %v", err) + } + t.Logf("Parsed Cert sub = %v iss = %v", c.Subject, c.Issuer) + certChain = append(certChain, c) + } + return certChain +} + +func loadCRL(t *testing.T, path string) *pkix.CertificateList { + b, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("readFile(%v) failed err = %v", path, err) + } + crl, err := x509.ParseCRL(b) + if err != nil { + t.Fatalf("ParseCrl(%v) failed err = %v", path, err) + } + return crl +} + +func TestCachedCRL(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + tests := []struct { + desc string + val interface{} + ok bool + }{ + { + desc: "Valid", + val: &certificateListExt{ + CertList: &pkix.CertificateList{ + TBSCertList: pkix.TBSCertificateList{ + NextUpdate: time.Now().Add(time.Hour), + }, + }}, + ok: true, + }, + { + desc: "Expired", + val: &certificateListExt{ + CertList: &pkix.CertificateList{ + TBSCertList: pkix.TBSCertificateList{ + NextUpdate: time.Now().Add(-time.Hour), + }, + }}, + ok: false, + }, + { + desc: "Wrong Type", + val: "string", + ok: false, + }, + { + desc: "Empty", + val: nil, + ok: false, + }, + } + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if tt.val != nil { + cache.Add(hex.EncodeToString([]byte(tt.desc)), tt.val) + } + _, ok := cachedCrl([]byte(tt.desc), cache) + if tt.ok != ok { + t.Errorf("Cache ok error expected %v vs %v", tt.ok, ok) + } + }) + } +} + +func TestGetIssuerCRLCache(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + tests := []struct { + desc string + rawIssuer []byte + certs []*x509.Certificate + }{ + { + desc: "Valid", + rawIssuer: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1].RawIssuer, + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + }, + { + desc: "Unverified", + rawIssuer: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1].RawIssuer, + }, + { + desc: "Not Found", + rawIssuer: []byte("not_found"), + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + cache.Purge() + _, err := fetchIssuerCRL("test", tt.rawIssuer, tt.certs, RevocationConfig{ + RootDir: testdata.Path("."), + Cache: cache, + }) + if err == nil && cache.Len() == 0 { + t.Error("Verified CRL not added to cache") + } + if err != nil && cache.Len() != 0 { + t.Error("Unverified CRL added to cache") + } + }) + } +} + +func TestVerifyCrl(t *testing.T) { + tampered := loadCRL(t, testdata.Path("crl/1.crl")) + // Change the signature so it won't verify + tampered.SignatureValue.Bytes[0]++ + + verifyTests := []struct { + desc string + crl *pkix.CertificateList + certs []*x509.Certificate + cert *x509.Certificate + errWant string + }{ + { + desc: "Pass intermediate", + crl: loadCRL(t, testdata.Path("crl/1.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "", + }, + { + desc: "Pass leaf", + crl: loadCRL(t, testdata.Path("crl/2.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[2], + errWant: "", + }, + { + desc: "Fail wrong cert chain", + crl: loadCRL(t, testdata.Path("crl/3.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/revokedInt.pem"))[1], + errWant: "No certificates mached", + }, + { + desc: "Fail no certs", + crl: loadCRL(t, testdata.Path("crl/1.crl")), + certs: []*x509.Certificate{}, + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "No certificates mached", + }, + { + desc: "Fail Tampered signature", + crl: tampered, + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "verification failure", + }, + } + + for _, tt := range verifyTests { + t.Run(tt.desc, func(t *testing.T) { + crlExt, err := parseCRLExtensions(tt.crl) + if err != nil { + t.Fatalf("parseCRLExtensions(%v) failed, err = %v", tt.crl.TBSCertList.Issuer, err) + } + err = verifyCRL(crlExt, tt.cert.RawIssuer, tt.certs) + switch { + case tt.errWant == "" && err != nil: + t.Errorf("Valid CRL did not verify err = %v", err) + case tt.errWant != "" && err == nil: + t.Error("Invalid CRL verified") + case tt.errWant != "" && !strings.Contains(err.Error(), tt.errWant): + t.Errorf("fetchIssuerCRL(_, %v, %v, _) = %v; want Contains(%v)", tt.cert.RawIssuer, tt.certs, err, tt.errWant) + } + }) + } +} + +func TestRevokedCert(t *testing.T) { + revokedIntChain := makeChain(t, testdata.Path("crl/revokedInt.pem")) + revokedLeafChain := makeChain(t, testdata.Path("crl/revokedLeaf.pem")) + validChain := makeChain(t, testdata.Path("crl/unrevoked.pem")) + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + var revocationTests = []struct { + desc string + in tls.ConnectionState + revoked bool + allowUndetermined bool + }{ + { + desc: "Single unrevoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain}}, + revoked: false, + }, + { + desc: "Single revoked intermediate", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedIntChain}}, + revoked: true, + }, + { + desc: "Single revoked leaf", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedLeafChain}}, + revoked: true, + }, + { + desc: "Multi one revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain, revokedLeafChain}}, + revoked: false, + }, + { + desc: "Multi revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedLeafChain, revokedIntChain}}, + revoked: true, + }, + { + desc: "Multi unrevoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain, validChain}}, + revoked: false, + }, + { + desc: "Undetermined revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{ + {&x509.Certificate{CRLDistributionPoints: []string{"test"}}}, + }}, + revoked: true, + }, + { + desc: "Undetermined allowed", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{ + {&x509.Certificate{CRLDistributionPoints: []string{"test"}}}, + }}, + revoked: false, + allowUndetermined: true, + }, + } + + for _, tt := range revocationTests { + t.Run(tt.desc, func(t *testing.T) { + err := CheckRevocation(tt.in, RevocationConfig{ + RootDir: testdata.Path("crl"), + AllowUndetermined: tt.allowUndetermined, + Cache: cache, + }) + t.Logf("CheckRevocation err = %v", err) + if tt.revoked && err == nil { + t.Error("Revoked certificate chain was allowed") + } else if !tt.revoked && err != nil { + t.Error("Unrevoked certificate not allowed") + } + }) + } +} + +func setupTLSConn(t *testing.T) (net.Listener, *x509.Certificate, *ecdsa.PrivateKey) { + t.Helper() + templ := x509.Certificate{ + SerialNumber: big.NewInt(5), + BasicConstraintsValid: true, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + Subject: pkix.Name{CommonName: "test-cert"}, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + IPAddresses: []net.IP{net.ParseIP("::1")}, + CRLDistributionPoints: []string{"http://static.corp.google.com/crl/campus-sln/borg"}, + } + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("ecdsa.GenerateKey failed err = %v", err) + } + rawCert, err := x509.CreateCertificate(rand.Reader, &templ, &templ, key.Public(), key) + if err != nil { + t.Fatalf("x509.CreateCertificate failed err = %v", err) + } + cert, err := x509.ParseCertificate(rawCert) + if err != nil { + t.Fatalf("x509.ParseCertificate failed err = %v", err) + } + + srvCfg := tls.Config{ + Certificates: []tls.Certificate{ + { + Certificate: [][]byte{cert.Raw}, + PrivateKey: key, + }, + }, + } + l, err := tls.Listen("tcp6", "[::1]:0", &srvCfg) + if err != nil { + t.Fatalf("tls.Listen failed err = %v", err) + } + return l, cert, key +} + +// TestVerifyConnection will setup a client/server connection and check revocation in the real TLS dialer +func TestVerifyConnection(t *testing.T) { + lis, cert, key := setupTLSConn(t) + defer func() { + lis.Close() + }() + + var handshakeTests = []struct { + desc string + revoked []pkix.RevokedCertificate + success bool + }{ + { + desc: "Empty CRL", + revoked: []pkix.RevokedCertificate{}, + success: true, + }, + { + desc: "Revoked Cert", + revoked: []pkix.RevokedCertificate{ + { + SerialNumber: cert.SerialNumber, + RevocationTime: time.Now(), + }, + }, + success: false, + }, + } + for _, tt := range handshakeTests { + t.Run(tt.desc, func(t *testing.T) { + // Accept one connection. + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("tls.Accept failed err = %v", err) + } else { + conn.Write([]byte("Hello, World!")) + conn.Close() + } + }() + + dir, err := ioutil.TempDir("", "crl_dir") + if err != nil { + t.Fatalf("ioutil.TempDir failed err = %v", err) + } + defer os.RemoveAll(dir) + + crl, err := cert.CreateCRL(rand.Reader, key, tt.revoked, time.Now(), time.Now().Add(time.Hour)) + if err != nil { + t.Fatalf("templ.CreateCRL failed err = %v", err) + } + + err = ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.r0", x509NameHash(cert.Subject.ToRDNSequence()))), crl, 0777) + if err != nil { + t.Fatalf("ioutil.WriteFile failed err = %v", err) + } + + cp := x509.NewCertPool() + cp.AddCert(cert) + cliCfg := tls.Config{ + RootCAs: cp, + VerifyConnection: func(cs tls.ConnectionState) error { + return CheckRevocation(cs, RevocationConfig{RootDir: dir}) + }, + } + conn, err := tls.Dial(lis.Addr().Network(), lis.Addr().String(), &cliCfg) + t.Logf("tls.Dial err = %v", err) + if tt.success && err != nil { + t.Errorf("Expected success got err = %v", err) + } + if !tt.success && err == nil { + t.Error("Expected error, but got success") + } + if err == nil { + conn.Close() + } + }) + } +} diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index f141d4c05c81..20ed81e24d38 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -3,7 +3,7 @@ module google.golang.org/grpc/security/advancedtls/examples go 1.15 require ( - google.golang.org/grpc v1.36.0 + google.golang.org/grpc v1.38.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b google.golang.org/grpc/security/advancedtls v0.0.0-20201112215255-90f1b3ee835b ) diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index a08a6ac80cd7..004437a7ea66 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -29,6 +29,8 @@ github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index 90dee4a46fd0..75527018ee78 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -4,7 +4,8 @@ go 1.14 require ( github.com/google/go-cmp v0.5.1 // indirect - google.golang.org/grpc v1.36.0 + github.com/hashicorp/golang-lru v0.5.4 + google.golang.org/grpc v1.38.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index a08a6ac80cd7..004437a7ea66 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -29,6 +29,8 @@ github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= diff --git a/security/advancedtls/testdata/crl/0b35a562.r0 b/security/advancedtls/testdata/crl/0b35a562.r0 new file mode 120000 index 000000000000..1a84eabdfc72 --- /dev/null +++ b/security/advancedtls/testdata/crl/0b35a562.r0 @@ -0,0 +1 @@ +5.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/0b35a562.r1 b/security/advancedtls/testdata/crl/0b35a562.r1 new file mode 120000 index 000000000000..6e6f10978918 --- /dev/null +++ b/security/advancedtls/testdata/crl/0b35a562.r1 @@ -0,0 +1 @@ +1.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/1.crl b/security/advancedtls/testdata/crl/1.crl new file mode 100644 index 000000000000..5b12ded4a66f --- /dev/null +++ b/security/advancedtls/testdata/crl/1.crl @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBYDCCAQYCAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAyMS0wMi0wMlQwNzozMDozNi0wODowMCkX +DTIxMDIwMjE1MzAzNloXDTIxMDIwOTE1MzAzNlqgLzAtMB8GA1UdIwQYMBaAFPQN +tnCIBcG4ReQgoVi0kPgTROseMAoGA1UdFAQDAgEAMAoGCCqGSM49BAMCA0gAMEUC +IQDB9WEPBPHEo5xjCv8CT9okockJJnkLDOus6FypVLqj5QIgYw9/PYLwb41/Uc+4 +LLTAsfdDWh7xBJmqvVQglMoJOEc= +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/1ab871c8.r0 b/security/advancedtls/testdata/crl/1ab871c8.r0 new file mode 120000 index 000000000000..f2cd877e7edb --- /dev/null +++ b/security/advancedtls/testdata/crl/1ab871c8.r0 @@ -0,0 +1 @@ +2.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/2.crl b/security/advancedtls/testdata/crl/2.crl new file mode 100644 index 000000000000..5ca9afd71419 --- /dev/null +++ b/security/advancedtls/testdata/crl/2.crl @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBYDCCAQYCAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjbm9kZSBDQSAoMjAyMS0wMi0wMlQwNzozMDozNi0wODowMCkX +DTIxMDIwMjE1MzAzNloXDTIxMDIwOTE1MzAzNlqgLzAtMB8GA1UdIwQYMBaAFBjo +V5Jnk/gp1k7fmWwkvTk/cF/IMAoGA1UdFAQDAgEAMAoGCCqGSM49BAMCA0gAMEUC +IQDgjA1Vj/pNFtNRL0vFEdapmFoArHM2+rn4IiP8jYLsCAIgAj2KEHbbtJ3zl5XP +WVW6ZyW7r3wIX+Bt3vLJWPrQtf8= +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/3.crl b/security/advancedtls/testdata/crl/3.crl new file mode 100644 index 000000000000..d37ad2247f59 --- /dev/null +++ b/security/advancedtls/testdata/crl/3.crl @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBiDCCAS8CAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAyMS0wMi0wMlQwNzozMTo1NC0wODowMCkX +DTIxMDIwMjE1MzE1NFoXDTIxMDIwOTE1MzE1NFowJzAlAhQAroEYW855BRqTrlov +5cBCGvkutxcNMjEwMjAyMTUzMTU0WqAvMC0wHwYDVR0jBBgwFoAUeq/TQ959KbWk +/um08jSTXogXpWUwCgYDVR0UBAMCAQEwCgYIKoZIzj0EAwIDRwAwRAIgaSOIhJDg +wOLYlbXkmxW0cqy/AfOUNYbz5D/8/FfvhosCICftg7Vzlu0Nh83jikyjy+wtkiJt +ZYNvGFQ3Sp2L3A9e +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/4.crl b/security/advancedtls/testdata/crl/4.crl new file mode 100644 index 000000000000..d4ee6f7cf186 --- /dev/null +++ b/security/advancedtls/testdata/crl/4.crl @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBYDCCAQYCAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjbm9kZSBDQSAoMjAyMS0wMi0wMlQwNzozMTo1NC0wODowMCkX +DTIxMDIwMjE1MzE1NFoXDTIxMDIwOTE1MzE1NFqgLzAtMB8GA1UdIwQYMBaAFIVn +8tIFgZpIdhomgYJ2c5ULLzpSMAoGA1UdFAQDAgEAMAoGCCqGSM49BAMCA0gAMEUC +ICupTvOqgAyRa1nn7+Pe/1vvlJPAQ8gUfTQsQ6XX3v6oAiEA08B2PsK6aTEwzjry +pXqhlUNZFzgaXrVVQuEJbyJ1qoU= +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/5.crl b/security/advancedtls/testdata/crl/5.crl new file mode 100644 index 000000000000..d1c24f0f25a3 --- /dev/null +++ b/security/advancedtls/testdata/crl/5.crl @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBXzCCAQYCAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAyMS0wMi0wMlQwNzozMjo1Ny0wODowMCkX +DTIxMDIwMjE1MzI1N1oXDTIxMDIwOTE1MzI1N1qgLzAtMB8GA1UdIwQYMBaAFN+g +xTAtSTlb5Qqvrbp4rZtsaNzqMAoGA1UdFAQDAgEAMAoGCCqGSM49BAMCA0cAMEQC +IHrRKjieY7w7gxvpkJAdszPZBlaSSp/c9wILutBTy7SyAiAwhaHfgas89iRfaBs2 +EhGIeK39A+kSzqu6qEQBHpK36g== +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/6.crl b/security/advancedtls/testdata/crl/6.crl new file mode 100644 index 000000000000..87ef378f6aba --- /dev/null +++ b/security/advancedtls/testdata/crl/6.crl @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBiDCCAS8CAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjbm9kZSBDQSAoMjAyMS0wMi0wMlQwNzozMjo1Ny0wODowMCkX +DTIxMDIwMjE1MzI1N1oXDTIxMDIwOTE1MzI1N1owJzAlAhQAxSe/pGmyvzN7mxm5 +6ZJTYUXYuhcNMjEwMjAyMTUzMjU3WqAvMC0wHwYDVR0jBBgwFoAUpZ30UJXB4lI9 +j2SzodCtRFckrRcwCgYDVR0UBAMCAQEwCgYIKoZIzj0EAwIDRwAwRAIgRg3u7t3b +oyV5FhMuGGzWnfIwnKclpT8imnp8tEN253sCIFUY7DjiDohwu4Zup3bWs1OaZ3q3 +cm+j0H/oe8zzCAgp +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/71eac5a2.r0 b/security/advancedtls/testdata/crl/71eac5a2.r0 new file mode 120000 index 000000000000..9f37924cae0c --- /dev/null +++ b/security/advancedtls/testdata/crl/71eac5a2.r0 @@ -0,0 +1 @@ +4.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/7a1799af.r0 b/security/advancedtls/testdata/crl/7a1799af.r0 new file mode 120000 index 000000000000..f34df5b59c01 --- /dev/null +++ b/security/advancedtls/testdata/crl/7a1799af.r0 @@ -0,0 +1 @@ +3.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/8828a7e6.r0 b/security/advancedtls/testdata/crl/8828a7e6.r0 new file mode 120000 index 000000000000..70bead214cc3 --- /dev/null +++ b/security/advancedtls/testdata/crl/8828a7e6.r0 @@ -0,0 +1 @@ +6.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/README.md b/security/advancedtls/testdata/crl/README.md new file mode 100644 index 000000000000..00cb09c31928 --- /dev/null +++ b/security/advancedtls/testdata/crl/README.md @@ -0,0 +1,48 @@ +# CRL Test Data + +This directory contains cert chains and CRL files for revocation testing. + +To print the chain, use a command like, + +```shell +openssl crl2pkcs7 -nocrl -certfile security/crl/x509/client/testdata/revokedLeaf.pem | openssl pkcs7 -print_certs -text -noout +``` + +The crl file symlinks are generated with `openssl rehash` + +## unrevoked.pem + +A certificate chain with CRL files and unrevoked certs + +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=Root CA (2021-02-02T07:30:36-08:00) + * 1.crl + +NOTE: 1.crl file is symlinked with 5.crl to simulate two issuers that hash to +the same value to test that loading multiple files works. + +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=node CA (2021-02-02T07:30:36-08:00) + * 2.crl + +## revokedInt.pem + +Certificate chain where the intermediate is revoked + +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=Root CA (2021-02-02T07:31:54-08:00) + * 3.crl +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=node CA (2021-02-02T07:31:54-08:00) + * 4.crl + +## revokedLeaf.pem + +Certificate chain where the leaf is revoked + +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=Root CA (2021-02-02T07:32:57-08:00) + * 5.crl +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=node CA (2021-02-02T07:32:57-08:00) + * 6.crl diff --git a/security/advancedtls/testdata/crl/deee447d.r0 b/security/advancedtls/testdata/crl/deee447d.r0 new file mode 120000 index 000000000000..1a84eabdfc72 --- /dev/null +++ b/security/advancedtls/testdata/crl/deee447d.r0 @@ -0,0 +1 @@ +5.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/revokedInt.pem b/security/advancedtls/testdata/crl/revokedInt.pem new file mode 100644 index 000000000000..8b7282ff8221 --- /dev/null +++ b/security/advancedtls/testdata/crl/revokedInt.pem @@ -0,0 +1,58 @@ +-----BEGIN CERTIFICATE----- +MIIDAzCCAqmgAwIBAgITAWjKwm2dNQvkO62Jgyr5rAvVQzAKBggqhkjOPQQDAjCB +pTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDU1v +dW50YWluIFZpZXcxEzARBgNVBAoTCkdvb2dsZSBMTEMxJjARBgNVBAsTClByb2R1 +Y3Rpb24wEQYDVQQLEwpjYW1wdXMtc2xuMSwwKgYDVQQDEyNSb290IENBICgyMDIx +LTAyLTAyVDA3OjMxOjU0LTA4OjAwKTAgFw0yMTAyMDIxNTMxNTRaGA85OTk5MTIz +MTIzNTk1OVowgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYw +FAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYD +VQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9v +dCBDQSAoMjAyMS0wMi0wMlQwNzozMTo1NC0wODowMCkwWTATBgcqhkjOPQIBBggq +hkjOPQMBBwNCAAQhA0/puhTtSxbVVHseVhL2z7QhpPyJs5Q4beKi7tpaYRDmVn6p +Phh+jbRzg8Qj4gKI/Q1rrdm4rKer63LHpdWdo4GzMIGwMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUeq/TQ959KbWk/um08jSTXogXpWUwHwYDVR0jBBgwFoAUeq/T +Q959KbWk/um08jSTXogXpWUwLgYDVR0RBCcwJYYjc3BpZmZlOi8vY2FtcHVzLXNs +bi5wcm9kLmdvb2dsZS5jb20wCgYIKoZIzj0EAwIDSAAwRQIgOSQZvyDPQwVOWnpF +zWvI+DS2yXIj/2T2EOvJz2XgcK4CIQCL0mh/+DxLiO4zzbInKr0mxpGSxSeZCUk7 +1ZF7AeLlbw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDizCCAzKgAwIBAgIUAK6BGFvOeQUak65aL+XAQhr5LrcwCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAy +MS0wMi0wMlQwNzozMTo1NC0wODowMCkwIBcNMjEwMjAyMTUzMTU0WhgPOTk5OTEy +MzEyMzU5NTlaMIGlMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEW +MBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UEChMKR29vZ2xlIExMQzEmMBEG +A1UECxMKUHJvZHVjdGlvbjARBgNVBAsTCmNhbXB1cy1zbG4xLDAqBgNVBAMTI25v +ZGUgQ0EgKDIwMjEtMDItMDJUMDc6MzE6NTQtMDg6MDApMFkwEwYHKoZIzj0CAQYI +KoZIzj0DAQcDQgAEye6UOlBos8Q3FFBiLahD9BaLTA18bO4MTPyv35T3lppvxD5X +U/AnEllOnx5OMtMjMBbIQjSkMbiQ9xNXoSqB6aOCATowggE2MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUhWfy0gWBmkh2GiaBgnZzlQsvOlIwHwYDVR0jBBgwFoAU +eq/TQ959KbWk/um08jSTXogXpWUwMwYDVR0RBCwwKoYoc3BpZmZlOi8vbm9kZS5j +YW1wdXMtc2xuLnByb2QuZ29vZ2xlLmNvbTA7BgNVHR4BAf8EMTAvoC0wK4YpY3Nj +cy10ZWFtLm5vZGUuY2FtcHVzLXNsbi5wcm9kLmdvb2dsZS5jb20wQgYDVR0fBDsw +OTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2NhbXB1 +cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNHADBEAiA79rPu6ZO1/0qB6RxL7jVz1200 +UTo8ioB4itbTzMnJqAIgJqp/Rc8OhpsfzQX8XnIIkl+SewT+tOxJT1MHVNMlVhc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC0DCCAnWgAwIBAgITXQ2c/C27OGqk4Pbu+MNJlOtpYTAKBggqhkjOPQQDAjCB +pTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDU1v +dW50YWluIFZpZXcxEzARBgNVBAoTCkdvb2dsZSBMTEMxJjARBgNVBAsTClByb2R1 +Y3Rpb24wEQYDVQQLEwpjYW1wdXMtc2xuMSwwKgYDVQQDEyNub2RlIENBICgyMDIx +LTAyLTAyVDA3OjMxOjU0LTA4OjAwKTAgFw0yMTAyMDIxNTMxNTRaGA85OTk5MTIz +MTIzNTk1OVowADBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABN2/1le5d3hS/piw +hrNMHjd7gPEjzXwtuXQTzdV+aaeOf3ldnC6OnEF/bggym9MldQSJZLXPYSaoj430 +Vu5PRNejggEkMIIBIDAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0lBBYwFAYIKwYBBQUH +AwIGCCsGAQUFBwMBMB0GA1UdDgQWBBTEewP3JgrJPekWWGGjChVqaMhaqTAfBgNV +HSMEGDAWgBSFZ/LSBYGaSHYaJoGCdnOVCy86UjBrBgNVHREBAf8EYTBfghZqemFi +MTIucHJvZC5nb29nbGUuY29thkVzcGlmZmU6Ly9jc2NzLXRlYW0ubm9kZS5jYW1w +dXMtc2xuLnByb2QuZ29vZ2xlLmNvbS9yb2xlL2JvcmctYWRtaW4tY28wQgYDVR0f +BDswOTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2Nh +bXB1cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNJADBGAiEA9w4qp3nHpXo+6d7mZc69 +QoALfP5ynfBCArt8bAlToo8CIQCgc/lTfl2BtBko+7h/w6pKxLeuoQkvCL5gHFyK +LXE6vA== +-----END CERTIFICATE----- diff --git a/security/advancedtls/testdata/crl/revokedLeaf.pem b/security/advancedtls/testdata/crl/revokedLeaf.pem new file mode 100644 index 000000000000..b7541abf6214 --- /dev/null +++ b/security/advancedtls/testdata/crl/revokedLeaf.pem @@ -0,0 +1,59 @@ +-----BEGIN CERTIFICATE----- +MIIDAzCCAqmgAwIBAgITTwodm6C4ZabFVUVa5yBw0TbzJTAKBggqhkjOPQQDAjCB +pTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDU1v +dW50YWluIFZpZXcxEzARBgNVBAoTCkdvb2dsZSBMTEMxJjARBgNVBAsTClByb2R1 +Y3Rpb24wEQYDVQQLEwpjYW1wdXMtc2xuMSwwKgYDVQQDEyNSb290IENBICgyMDIx +LTAyLTAyVDA3OjMyOjU3LTA4OjAwKTAgFw0yMTAyMDIxNTMyNTdaGA85OTk5MTIz +MTIzNTk1OVowgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYw +FAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYD +VQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9v +dCBDQSAoMjAyMS0wMi0wMlQwNzozMjo1Ny0wODowMCkwWTATBgcqhkjOPQIBBggq +hkjOPQMBBwNCAARoZnzQWvAoyhvCLA2cFIK17khSaA9aA+flS5X9fLRt4RsfPCx3 +kim7wYKQSmBhQdc1UM4h3969r1c1Fvsh2H9qo4GzMIGwMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU36DFMC1JOVvlCq+tunitm2xo3OowHwYDVR0jBBgwFoAU36DF +MC1JOVvlCq+tunitm2xo3OowLgYDVR0RBCcwJYYjc3BpZmZlOi8vY2FtcHVzLXNs +bi5wcm9kLmdvb2dsZS5jb20wCgYIKoZIzj0EAwIDSAAwRQIgN7S9dQOQzNih92ag +7c5uQxuz+M6wnxWj/uwGQIIghRUCIQD2UDH6kkRSYQuyP0oN7XYO3XFjmZ2Yer6m +1ZS8fyWYYA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDjTCCAzKgAwIBAgIUAOmArBu9gihLTlqP3W7Et0UoocEwCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAy +MS0wMi0wMlQwNzozMjo1Ny0wODowMCkwIBcNMjEwMjAyMTUzMjU3WhgPOTk5OTEy +MzEyMzU5NTlaMIGlMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEW +MBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UEChMKR29vZ2xlIExMQzEmMBEG +A1UECxMKUHJvZHVjdGlvbjARBgNVBAsTCmNhbXB1cy1zbG4xLDAqBgNVBAMTI25v +ZGUgQ0EgKDIwMjEtMDItMDJUMDc6MzI6NTctMDg6MDApMFkwEwYHKoZIzj0CAQYI +KoZIzj0DAQcDQgAEfrgVEVQfSEFeCF1/FGeW7oq0yxecenT1BESfj4Z0zJ8p7P9W +bj1o6Rn6dUNlEhGrx7E3/4NFJ0cL1BSNGHkjiqOCATowggE2MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUpZ30UJXB4lI9j2SzodCtRFckrRcwHwYDVR0jBBgwFoAU +36DFMC1JOVvlCq+tunitm2xo3OowMwYDVR0RBCwwKoYoc3BpZmZlOi8vbm9kZS5j +YW1wdXMtc2xuLnByb2QuZ29vZ2xlLmNvbTA7BgNVHR4BAf8EMTAvoC0wK4YpY3Nj +cy10ZWFtLm5vZGUuY2FtcHVzLXNsbi5wcm9kLmdvb2dsZS5jb20wQgYDVR0fBDsw +OTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2NhbXB1 +cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNJADBGAiEAnuONgMqmbBlj4ibw5BgDtZUM +pboACSFJtEOJu4Yqjt0CIQDI5193J4wUcAY0BK0vO9rRfbNOIc+4ke9ieBDPSuhm +mA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICzzCCAnagAwIBAgIUAMUnv6Rpsr8ze5sZuemSU2FF2LowCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjbm9kZSBDQSAoMjAy +MS0wMi0wMlQwNzozMjo1Ny0wODowMCkwIBcNMjEwMjAyMTUzMjU3WhgPOTk5OTEy +MzEyMzU5NTlaMAAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASCmYiIHUux5WFz +S0ksJzAPL7YTEh5o5MdXgLPB/WM6x9sVsQDSYU0PF5qc9vPNhkQzGBW79dkBnxhW +AGJkFr1Po4IBJDCCASAwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQWMBQGCCsGAQUF +BwMCBggrBgEFBQcDATAdBgNVHQ4EFgQUCR1CGEdlks0qcxCExO0rP1B/Z7UwHwYD +VR0jBBgwFoAUpZ30UJXB4lI9j2SzodCtRFckrRcwawYDVR0RAQH/BGEwX4IWanph +YjEyLnByb2QuZ29vZ2xlLmNvbYZFc3BpZmZlOi8vY3Njcy10ZWFtLm5vZGUuY2Ft +cHVzLXNsbi5wcm9kLmdvb2dsZS5jb20vcm9sZS9ib3JnLWFkbWluLWNvMEIGA1Ud +HwQ7MDkwN6A1oDOGMWh0dHA6Ly9zdGF0aWMuY29ycC5nb29nbGUuY29tL2NybC9j +YW1wdXMtc2xuL25vZGUwCgYIKoZIzj0EAwIDRwAwRAIgK9vQYNoL8HlEwWv89ioG +aQ1+8swq6Bo/5mJBrdVLvY8CIGxo6M9vJkPdObmetWNC+lmKuZDoqJWI0AAmBT2J +mR2r +-----END CERTIFICATE----- diff --git a/security/advancedtls/testdata/crl/unrevoked.pem b/security/advancedtls/testdata/crl/unrevoked.pem new file mode 100644 index 000000000000..5c5fc58a7a5e --- /dev/null +++ b/security/advancedtls/testdata/crl/unrevoked.pem @@ -0,0 +1,58 @@ +-----BEGIN CERTIFICATE----- +MIIDBDCCAqqgAwIBAgIUALy864QhnkTdceLH52k2XVOe8IQwCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAy +MS0wMi0wMlQwNzozMDozNi0wODowMCkwIBcNMjEwMjAyMTUzMDM2WhgPOTk5OTEy +MzEyMzU5NTlaMIGlMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEW +MBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UEChMKR29vZ2xlIExMQzEmMBEG +A1UECxMKUHJvZHVjdGlvbjARBgNVBAsTCmNhbXB1cy1zbG4xLDAqBgNVBAMTI1Jv +b3QgQ0EgKDIwMjEtMDItMDJUMDc6MzA6MzYtMDg6MDApMFkwEwYHKoZIzj0CAQYI +KoZIzj0DAQcDQgAEYv/JS5hQ5kIgdKqYZWTKCO/6gloHAmIb1G8lmY0oXLXYNHQ4 +qHN7/pPtlcHQp0WK/hM8IGvgOUDoynA8mj0H9KOBszCBsDAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFPQNtnCIBcG4ReQgoVi0kPgTROseMB8GA1UdIwQYMBaAFPQN +tnCIBcG4ReQgoVi0kPgTROseMC4GA1UdEQQnMCWGI3NwaWZmZTovL2NhbXB1cy1z +bG4ucHJvZC5nb29nbGUuY29tMAoGCCqGSM49BAMCA0gAMEUCIQDwBn20DB4X/7Uk +Q5BR8JxQYUPxOfvuedjfeA8bPvQ2FwIgOEWa0cXJs1JxarILJeCXtdXvBgu6LEGQ +3Pk/bgz8Gek= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDizCCAzKgAwIBAgIUAM/6RKQ7Vke0i4xp5LaAqV73cmIwCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAy +MS0wMi0wMlQwNzozMDozNi0wODowMCkwIBcNMjEwMjAyMTUzMDM2WhgPOTk5OTEy +MzEyMzU5NTlaMIGlMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEW +MBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UEChMKR29vZ2xlIExMQzEmMBEG +A1UECxMKUHJvZHVjdGlvbjARBgNVBAsTCmNhbXB1cy1zbG4xLDAqBgNVBAMTI25v +ZGUgQ0EgKDIwMjEtMDItMDJUMDc6MzA6MzYtMDg6MDApMFkwEwYHKoZIzj0CAQYI +KoZIzj0DAQcDQgAEllnhxmMYiUPUgRGmenbnm10gXpM94zHx3D1/HumPs6arjYuT +Zlhx81XL+g4bu4HII2qcGdP+Hqj/MMFNDI9z4aOCATowggE2MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUGOhXkmeT+CnWTt+ZbCS9OT9wX8gwHwYDVR0jBBgwFoAU +9A22cIgFwbhF5CChWLSQ+BNE6x4wMwYDVR0RBCwwKoYoc3BpZmZlOi8vbm9kZS5j +YW1wdXMtc2xuLnByb2QuZ29vZ2xlLmNvbTA7BgNVHR4BAf8EMTAvoC0wK4YpY3Nj +cy10ZWFtLm5vZGUuY2FtcHVzLXNsbi5wcm9kLmdvb2dsZS5jb20wQgYDVR0fBDsw +OTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2NhbXB1 +cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNHADBEAiA86egqPw0qyapAeMGbHxrmYZYa +i5ARQsSKRmQixgYizQIgW+2iRWN6Kbqt4WcwpmGv/xDckdRXakF5Ign/WUDO5u4= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICzzCCAnWgAwIBAgITYjjKfYZUKQNUjNyF+hLDGpHJKTAKBggqhkjOPQQDAjCB +pTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDU1v +dW50YWluIFZpZXcxEzARBgNVBAoTCkdvb2dsZSBMTEMxJjARBgNVBAsTClByb2R1 +Y3Rpb24wEQYDVQQLEwpjYW1wdXMtc2xuMSwwKgYDVQQDEyNub2RlIENBICgyMDIx +LTAyLTAyVDA3OjMwOjM2LTA4OjAwKTAgFw0yMTAyMDIxNTMwMzZaGA85OTk5MTIz +MTIzNTk1OVowADBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD4r4+nCgZExYF8v +CLvGn0lY/cmam8mAkJDXRN2Ja2t+JwaTOptPmbbXft+1NTk5gCg5wB+FJCnaV3I/ +HaxEhBWjggEkMIIBIDAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0lBBYwFAYIKwYBBQUH +AwIGCCsGAQUFBwMBMB0GA1UdDgQWBBTTCjXX1Txjc00tBg/5cFzpeCSKuDAfBgNV +HSMEGDAWgBQY6FeSZ5P4KdZO35lsJL05P3BfyDBrBgNVHREBAf8EYTBfghZqemFi +MTIucHJvZC5nb29nbGUuY29thkVzcGlmZmU6Ly9jc2NzLXRlYW0ubm9kZS5jYW1w +dXMtc2xuLnByb2QuZ29vZ2xlLmNvbS9yb2xlL2JvcmctYWRtaW4tY28wQgYDVR0f +BDswOTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2Nh +bXB1cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNIADBFAiBq3URViNyMLpvzZHC1Y+4L ++35guyIJfjHu08P3S8/xswIhAJtWSQ1ZtozdOzGxg7GfUo4hR+5SP6rBTgIqXEfq +48fW +-----END CERTIFICATE----- From 65cabd74d8e18d7347fecd414fa8d83a00035f5f Mon Sep 17 00:00:00 2001 From: Jille Timmermans Date: Tue, 20 Jul 2021 19:58:14 +0200 Subject: [PATCH 163/998] internal/binarylog: Fix data race when calling Write() and Close() in parallel (#4604) They both touched bufferedSink.writeTicker --- internal/binarylog/sink.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/binarylog/sink.go b/internal/binarylog/sink.go index 936cfc007df9..c2fdd58b3198 100644 --- a/internal/binarylog/sink.go +++ b/internal/binarylog/sink.go @@ -133,12 +133,12 @@ func (fs *bufferedSink) startFlushGoroutine() { } func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() if fs.writeTicker != nil { fs.writeTicker.Stop() } close(fs.done) - fs.mu.Lock() - defer fs.mu.Unlock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) } From 0300770df1c0b742f4eef4cce47ca315379ad4d1 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 21 Jul 2021 10:22:02 -0700 Subject: [PATCH 164/998] xds: support cluster fallback in cluster_resolver (#4594) --- .../balancer/cdsbalancer/cdsbalancer.go | 132 ++++++----- .../cdsbalancer/cdsbalancer_security_test.go | 20 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 29 ++- .../balancer/cdsbalancer/cluster_handler.go | 8 +- .../cdsbalancer/cluster_handler_test.go | 10 +- .../clusterresolver/clusterresolver.go | 73 +++--- .../clusterresolver/clusterresolver_test.go | 223 ++---------------- .../balancer/clusterresolver/config.go | 128 +++------- .../balancer/clusterresolver/config_test.go | 165 +++++++++++++ .../balancer/clusterresolver/configbuilder.go | 49 ---- .../clusterresolver/configbuilder_test.go | 123 ---------- .../balancer/clusterresolver/eds_impl_test.go | 106 +++++++-- .../balancer/clusterresolver/eds_watcher.go | 87 ------- .../balancer/clusterresolver/priority_test.go | 106 +++++++++ .../clusterresolver/resource_resolver.go | 6 +- .../clusterresolver/resource_resolver_test.go | 42 ++-- xds/internal/balancer/priority/config.go | 2 +- 17 files changed, 587 insertions(+), 722 deletions(-) create mode 100644 xds/internal/balancer/clusterresolver/config_test.go delete mode 100644 xds/internal/balancer/clusterresolver/configbuilder.go delete mode 100644 xds/internal/balancer/clusterresolver/configbuilder_test.go delete mode 100644 xds/internal/balancer/clusterresolver/eds_watcher.go diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index c9014247a767..e52a34a7d29a 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -35,27 +35,27 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/xdsclient" ) const ( cdsName = "cds_experimental" - edsName = "eds_experimental" ) var ( errBalancerClosed = errors.New("cdsBalancer is closed") - // newEDSBalancer is a helper function to build a new edsBalancer and will be - // overridden in unittests. - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { - builder := balancer.Get(edsName) + // newChildBalancer is a helper function to build a new cluster_resolver + // balancer and will be overridden in unittests. + newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { + builder := balancer.Get(clusterresolver.Name) if builder == nil { - return nil, fmt.Errorf("xds: no balancer builder with name %v", edsName) + return nil, fmt.Errorf("xds: no balancer builder with name %v", clusterresolver.Name) } - // We directly pass the parent clientConn to the - // underlying edsBalancer because the cdsBalancer does - // not deal with subConns. + // We directly pass the parent clientConn to the underlying + // cluster_resolver balancer because the cdsBalancer does not deal with + // subConns. return builder.Build(cc, opts), nil } buildProvider = buildProviderFunc @@ -126,31 +126,32 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err // ccUpdate wraps a clientConn update received from gRPC (pushed from the // xdsResolver). A valid clusterName causes the cdsBalancer to register a CDS // watcher with the xdsClient, while a non-nil error causes it to cancel the -// existing watch and propagate the error to the underlying edsBalancer. +// existing watch and propagate the error to the underlying cluster_resolver +// balancer. type ccUpdate struct { clusterName string err error } // scUpdate wraps a subConn update received from gRPC. This is directly passed -// on to the edsBalancer. +// on to the cluster_resolver balancer. type scUpdate struct { subConn balancer.SubConn state balancer.SubConnState } -// cdsBalancer implements a CDS based LB policy. It instantiates an EDS based -// LB policy to further resolve the serviceName received from CDS, into -// localities and endpoints. Implements the balancer.Balancer interface which -// is exposed to gRPC and implements the balancer.ClientConn interface which is -// exposed to the edsBalancer. +// cdsBalancer implements a CDS based LB policy. It instantiates a +// cluster_resolver balancer to further resolve the serviceName received from +// CDS, into localities and endpoints. Implements the balancer.Balancer +// interface which is exposed to gRPC and implements the balancer.ClientConn +// interface which is exposed to the cluster_resolver balancer. type cdsBalancer struct { ccw *ccWrapper // ClientConn interface passed to child LB. bOpts balancer.BuildOptions // BuildOptions passed to child LB. updateCh *buffer.Unbounded // Channel for gRPC and xdsClient updates. xdsClient xdsclient.XDSClient // xDS client to watch Cluster resource. clusterHandler *clusterHandler // To watch the clusters. - edsLB balancer.Balancer // EDS child policy. + childLB balancer.Balancer logger *grpclog.PrefixLogger closed *grpcsync.Event done *grpcsync.Event @@ -166,7 +167,7 @@ type cdsBalancer struct { // handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good // updates lead to registration of a CDS watch. Updates with error lead to // cancellation of existing watch and propagation of the same error to the -// edsBalancer. +// cluster_resolver balancer. func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) { // We first handle errors, if any, and then proceed with handling the // update, only if the status quo has changed. @@ -266,7 +267,7 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc } // handleWatchUpdate handles a watch update from the xDS Client. Good updates -// lead to clientConn updates being invoked on the underlying edsBalancer. +// lead to clientConn updates being invoked on the underlying cluster_resolver balancer. func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { if err := update.err; err != nil { b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) @@ -274,7 +275,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { return } - b.logger.Infof("Watch update from xds-client %p, content: %+v, security config: %v", b.xdsClient, pretty.ToJSON(update.chu), pretty.ToJSON(update.securityCfg)) + b.logger.Infof("Watch update from xds-client %p, content: %+v, security config: %v", b.xdsClient, pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg)) // Process the security config from the received update before building the // child policy or forwarding the update to it. We do this because the child @@ -291,47 +292,54 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { } // The first good update from the watch API leads to the instantiation of an - // edsBalancer. Further updates/errors are propagated to the existing - // edsBalancer. - if b.edsLB == nil { - edsLB, err := newEDSBalancer(b.ccw, b.bOpts) + // cluster_resolver balancer. Further updates/errors are propagated to the existing + // cluster_resolver balancer. + if b.childLB == nil { + childLB, err := newChildBalancer(b.ccw, b.bOpts) if err != nil { - b.logger.Errorf("Failed to create child policy of type %s, %v", edsName, err) + b.logger.Errorf("Failed to create child policy of type %s, %v", clusterresolver.Name, err) return } - b.edsLB = edsLB - b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName) - } + b.childLB = childLB + b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) + } + + dms := make([]balancerconfig.DiscoveryMechanism, len(update.updates)) + for i, cu := range update.updates { + switch cu.ClusterType { + case xdsclient.ClusterTypeEDS: + dms[i] = balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: cu.ClusterName, + EDSServiceName: cu.EDSServiceName, + MaxConcurrentRequests: cu.MaxRequests, + } + if cu.EnableLRS { + // An empty string here indicates that the cluster_resolver balancer should use the + // same xDS server for load reporting as it does for EDS + // requests/responses. + dms[i].LoadReportingServerName = new(string) - if len(update.chu) == 0 { - b.logger.Infof("got update with 0 cluster updates, should never happen. There should be at least one cluster") + } + case xdsclient.ClusterTypeLogicalDNS: + dms[i] = balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: cu.DNSHostName, + } + default: + b.logger.Infof("unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) + } } - // TODO: this function is currently only handling the cluster with higher - // priority. This should work in most cases (e.g. if the cluster is not a - // aggregated cluster, or if the higher priority cluster works fine so - // there's no need to fallback). This quick fix is to unblock the testing - // work before the full fallback support is complete. Once the EDS balancer - // is updated to cluster_resolver, which has the fallback functionality, we - // will fix this to handle all the clusters in list. - cds := update.chu[0] - lbCfg := &clusterresolver.EDSConfig{ - ClusterName: cds.ClusterName, - EDSServiceName: cds.EDSServiceName, - MaxConcurrentRequests: cds.MaxRequests, + lbCfg := &clusterresolver.LBConfig{ + DiscoveryMechanisms: dms, } - if cds.EnableLRS { - // An empty string here indicates that the edsBalancer should use the - // same xDS server for load reporting as it does for EDS - // requests/responses. - lbCfg.LrsLoadReportingServerName = new(string) - } ccState := balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), BalancerConfig: lbCfg, } - if err := b.edsLB.UpdateClientConnState(ccState); err != nil { - b.logger.Errorf("xds: edsBalancer.UpdateClientConnState(%+v) returned error: %v", ccState, err) + if err := b.childLB.UpdateClientConnState(ccState); err != nil { + b.logger.Errorf("xds: cluster_resolver balancer.UpdateClientConnState(%+v) returned error: %v", ccState, err) } } @@ -348,20 +356,20 @@ func (b *cdsBalancer) run() { b.handleClientConnUpdate(update) case *scUpdate: // SubConn updates are passthrough and are simply handed over to - // the underlying edsBalancer. - if b.edsLB == nil { - b.logger.Errorf("xds: received scUpdate {%+v} with no edsBalancer", update) + // the underlying cluster_resolver balancer. + if b.childLB == nil { + b.logger.Errorf("xds: received scUpdate {%+v} with no cluster_resolver balancer", update) break } - b.edsLB.UpdateSubConnState(update.subConn, update.state) + b.childLB.UpdateSubConnState(update.subConn, update.state) } case u := <-b.clusterHandler.updateChannel: b.handleWatchUpdate(u) case <-b.closed.Done(): b.clusterHandler.close() - if b.edsLB != nil { - b.edsLB.Close() - b.edsLB = nil + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil } if b.cachedRoot != nil { b.cachedRoot.Close() @@ -389,22 +397,22 @@ func (b *cdsBalancer) run() { // - If it's from xds client, it means CDS resource were removed. The CDS // watcher should keep watching. // -// In both cases, the error will be forwarded to EDS balancer. And if error is -// resource-not-found, the child EDS balancer will stop watching EDS. +// In both cases, the error will be forwarded to the child balancer. And if +// error is resource-not-found, the child balancer will stop watching EDS. func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { // This is not necessary today, because xds client never sends connection // errors. if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { b.clusterHandler.close() } - if b.edsLB != nil { + if b.childLB != nil { if xdsclient.ErrType(err) != xdsclient.ErrorTypeConnection { // Connection errors will be sent to the child balancers directly. // There's no need to forward them. - b.edsLB.ResolverError(err) + b.childLB.ResolverError(err) } } else { - // If eds balancer was never created, fail the RPCs with + // If child balancer was never created, fail the RPCs with // errors. b.ccw.UpdateState(balancer.State{ ConnectivityState: connectivity.TransientFailure, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 067bc2b05369..7eb1d0889395 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -153,8 +153,8 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS // Override the creation of the EDS balancer to return a fake EDS balancer // implementation. edsB := newTestEDSBalancer() - oldEDSBalancerBuilder := newEDSBalancer - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { + oldEDSBalancerBuilder := newChildBalancer + newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { edsB.parentCC = cc return edsB, nil } @@ -177,7 +177,7 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS } return xdsC, cdsB.(*cdsBalancer), edsB, tcc, func() { - newEDSBalancer = oldEDSBalancerBuilder + newChildBalancer = oldEDSBalancerBuilder xdsC.Close() } } @@ -251,7 +251,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -306,7 +306,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. No security config is + // newChildBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) @@ -464,7 +464,7 @@ func (s) TestSecurityConfigUpdate_BadToGood(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. wantCCS := edsCCS(serviceName, nil, false) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { t.Fatal(err) @@ -498,7 +498,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -551,7 +551,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -601,7 +601,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -672,7 +672,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ ClusterName: serviceName, SecurityCfg: &xdsclient.SecurityConfig{ diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 8b103143ff76..a4c6d40f7824 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" @@ -197,20 +198,26 @@ func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { // edsCCS is a helper function to construct a good update passed from the // cdsBalancer to the edsBalancer. func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientConnState { - lbCfg := &clusterresolver.EDSConfig{ - ClusterName: service, + discoveryMechanism := balancerconfig.DiscoveryMechanism{ + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: service, MaxConcurrentRequests: countMax, } if enableLRS { - lbCfg.LrsLoadReportingServerName = new(string) + discoveryMechanism.LoadReportingServerName = new(string) + + } + lbCfg := &clusterresolver.LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{discoveryMechanism}, } + return balancer.ClientConnState{ BalancerConfig: lbCfg, } } // setup creates a cdsBalancer and an edsBalancer (and overrides the -// newEDSBalancer function to return it), and also returns a cleanup function. +// newChildBalancer function to return it), and also returns a cleanup function. func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *xdstestutils.TestClientConn, func()) { t.Helper() xdsC := fakeclient.NewClient() @@ -222,14 +229,14 @@ func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *x cdsB := builder.Build(tcc, balancer.BuildOptions{}) edsB := newTestEDSBalancer() - oldEDSBalancerBuilder := newEDSBalancer - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { + oldEDSBalancerBuilder := newChildBalancer + newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { edsB.parentCC = cc return edsB, nil } return xdsC, cdsB.(*cdsBalancer), edsB, tcc, func() { - newEDSBalancer = oldEDSBalancerBuilder + newChildBalancer = oldEDSBalancerBuilder xdsC.Close() } } @@ -426,7 +433,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -511,7 +518,7 @@ func (s) TestResolverError(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -560,7 +567,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -628,7 +635,7 @@ func (s) TestClose(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. + // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index b0760c7630ab..1f5acafe110b 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -32,9 +32,9 @@ var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a type clusterHandlerUpdate struct { // securityCfg is the Security Config from the top (root) cluster. securityCfg *xdsclient.SecurityConfig - // chu is a list of ClusterUpdates from all the leaf clusters. - chu []xdsclient.ClusterUpdate - err error + // updates is a list of ClusterUpdates from all the leaf clusters. + updates []xdsclient.ClusterUpdate + err error } // clusterHandler will be given a name representing a cluster. It will then @@ -101,7 +101,7 @@ func (ch *clusterHandler) constructClusterUpdate() { } ch.updateChannel <- clusterHandlerUpdate{ securityCfg: ch.root.clusterUpdate.SecurityCfg, - chu: clusterUpdate, + updates: clusterUpdate, } } diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index 216592f9200e..dc69dd34e2af 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -95,7 +95,7 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{test.clusterUpdate}); diff != "" { + if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{test.clusterUpdate}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -189,7 +189,7 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { fakeClient.InvokeWatchClusterCallback(test.newClusterUpdate, nil) select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{test.newClusterUpdate}); diff != "" { + if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{test.newClusterUpdate}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -305,7 +305,7 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { // ordered as per the cluster update. select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{{ + if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{{ ClusterType: xdsclient.ClusterTypeEDS, ClusterName: edsService, }, { @@ -412,7 +412,7 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{{ + if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{{ ClusterType: xdsclient.ClusterTypeEDS, ClusterName: edsService, }, { @@ -658,7 +658,7 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { // Then an update should successfully be written to the update buffer. select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.chu, []xdsclient.ClusterUpdate{{ + if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{{ ClusterType: xdsclient.ClusterTypeEDS, ClusterName: edsService2, }}); diff != "" { diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index f61b56b9a2cf..cb8176d16448 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -34,12 +34,13 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/xdsclient" ) // Name is the name of the cluster_resolver balancer. -const Name = "eds_experimental" +const Name = "cluster_resolver_experimental" var ( errBalancerClosed = errors.New("cdsBalancer is closed") @@ -68,7 +69,6 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal } b := &clusterResolverBalancer{ - cc: cc, bOpts: opts, updateCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), @@ -79,9 +79,11 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal } b.logger = prefixLogger(b) b.logger.Infof("Created") - b.edsWatcher = &edsWatcher{ - parent: b, - updateChannel: make(chan *watchUpdate, 1), + + b.resourceWatcher = newResourceResolver(b) + b.cc = &ccWrapper{ + ClientConn: cc, + resourceWatcher: b.resourceWatcher, } go b.run() @@ -93,9 +95,9 @@ func (bb) Name() string { } func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg EDSConfig + var cfg LBConfig if err := json.Unmarshal(c, &cfg); err != nil { - return nil, fmt.Errorf("unable to unmarshal balancer config %s into EDSConfig, error: %v", string(c), err) + return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(c), err) } return &cfg, nil } @@ -114,33 +116,35 @@ type scUpdate struct { state balancer.SubConnState } -// clusterResolverBalancer manages xdsClient and the actual EDS balancer -// implementation that does load balancing. +// clusterResolverBalancer manages xdsClient and the actual EDS balancer implementation that +// does load balancing. +// +// It currently has only an clusterResolverBalancer. Later, we may add fallback. type clusterResolverBalancer struct { - cc balancer.ClientConn - bOpts balancer.BuildOptions - updateCh *buffer.Unbounded // Channel for updates from gRPC. - edsWatcher *edsWatcher - logger *grpclog.PrefixLogger - closed *grpcsync.Event - done *grpcsync.Event + cc balancer.ClientConn + bOpts balancer.BuildOptions + updateCh *buffer.Unbounded // Channel for updates from gRPC. + resourceWatcher *resourceResolver + logger *grpclog.PrefixLogger + closed *grpcsync.Event + done *grpcsync.Event priorityBuilder balancer.Builder priorityConfigParser balancer.ConfigParser - config *EDSConfig + config *LBConfig configRaw *serviceconfig.ParseResult xdsClient xdsclient.XDSClient // xDS client to watch EDS resource. attrsWithClient *attributes.Attributes // Attributes with xdsClient attached to be passed to the child policies. child balancer.Balancer - edsResp xdsclient.EndpointsUpdate + priorities []balancerconfig.PriorityConfig watchUpdateReceived bool } // handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good -// updates lead to registration of an EDS watch. Updates with error lead to -// cancellation of existing watch and propagation of the same error to the +// updates lead to registration of EDS and DNS watches. Updates with error lead +// to cancellation of existing watch and propagation of the same error to the // child balancer. func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { // We first handle errors, if any, and then proceed with handling the @@ -151,7 +155,7 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { } b.logger.Infof("Receive update from resolver, balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) - cfg, _ := update.state.BalancerConfig.(*EDSConfig) + cfg, _ := update.state.BalancerConfig.(*LBConfig) if cfg == nil { b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", update.state.BalancerConfig) return @@ -159,7 +163,7 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { b.config = cfg b.configRaw = update.state.ResolverState.ServiceConfig - b.edsWatcher.updateConfig(cfg) + b.resourceWatcher.updateMechanisms(cfg.DiscoveryMechanisms) if !b.watchUpdateReceived { // If update was not received, wait for it. @@ -175,16 +179,16 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { // handleWatchUpdate handles a watch update from the xDS Client. Good updates // lead to clientConn updates being invoked on the underlying child balancer. -func (b *clusterResolverBalancer) handleWatchUpdate(update *watchUpdate) { +func (b *clusterResolverBalancer) handleWatchUpdate(update *resourceUpdate) { if err := update.err; err != nil { b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) b.handleErrorFromUpdate(err, false) return } - b.logger.Infof("resource update: %+v", pretty.ToJSON(update.eds)) + b.logger.Infof("resource update: %+v", pretty.ToJSON(update.priorities)) b.watchUpdateReceived = true - b.edsResp = update.eds + b.priorities = update.priorities // A new EDS update triggers new child configs (e.g. different priorities // for the priority balancer), and new addresses (the endpoints come from @@ -206,7 +210,7 @@ func (b *clusterResolverBalancer) updateChildConfig() error { b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } - childCfgBytes, addrs, err := buildPriorityConfigJSON(b.edsResp, b.config) + childCfgBytes, addrs, err := balancerconfig.BuildPriorityConfigJSON(b.priorities, b.config.EndpointPickingPolicy) if err != nil { return fmt.Errorf("failed to build priority balancer config: %v", err) } @@ -243,7 +247,7 @@ func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bo // This is an error from the parent ClientConn (can be the parent CDS // balancer), and is a resource-not-found error. This means the resource // (can be either LDS or CDS) was removed. Stop the EDS watch. - b.edsWatcher.stopWatch() + b.resourceWatcher.stop() } if b.child != nil { b.child.ResolverError(err) @@ -277,13 +281,13 @@ func (b *clusterResolverBalancer) run() { } b.child.UpdateSubConnState(update.subConn, update.state) } - case u := <-b.edsWatcher.updateChannel: + case u := <-b.resourceWatcher.updateChannel: b.handleWatchUpdate(u) // Close results in cancellation of the EDS watch and closing of the // underlying child policy and is the only way to exit this goroutine. case <-b.closed.Done(): - b.edsWatcher.stopWatch() + b.resourceWatcher.stop() if b.child != nil { b.child.Close() @@ -344,3 +348,14 @@ func (b *clusterResolverBalancer) Close() { b.closed.Fire() <-b.done.Done() } + +// ccWrapper overrides ResolveNow(), so that re-resolution from the child +// policies will trigger the DNS resolver in cluster_resolver balancer. +type ccWrapper struct { + balancer.ClientConn + resourceWatcher *resourceResolver +} + +func (c *ccWrapper) ResolveNow(resolver.ResolveNowOptions) { + c.resourceWatcher.resolveNow() +} diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 8f3644d08bed..7e2df25e0535 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -21,24 +21,19 @@ package clusterresolver import ( - "bytes" "context" - "encoding/json" "fmt" "testing" "time" - "github.com/golang/protobuf/jsonpb" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/grpctest" - scpb "google.golang.org/grpc/internal/proto/grpc_service_config" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" @@ -48,8 +43,8 @@ import ( const ( defaultTestTimeout = 1 * time.Second defaultTestShortTimeout = 10 * time.Millisecond - testServiceName = "test/foo" - testClusterName = "test/cluster" + testEDSServcie = "test-eds-service-name" + testClusterName = "test-cluster-name" ) var ( @@ -105,7 +100,7 @@ func (t *noopTestClientConn) NewSubConn([]resolver.Address, balancer.NewSubConnO return nil, nil } -func (noopTestClientConn) Target() string { return testServiceName } +func (noopTestClientConn) Target() string { return testEDSServcie } type scStateChange struct { sc balancer.SubConn @@ -213,7 +208,7 @@ func (s) TestSubConnStateChange(t *testing.T) { defer cleanup() builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) if edsB == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } @@ -221,7 +216,7 @@ func (s) TestSubConnStateChange(t *testing.T) { if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, + BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), }); err != nil { t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) } @@ -259,7 +254,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { defer cleanup() builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) if edsB == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } @@ -269,7 +264,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { defer cancel() if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, + BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), }); err != nil { t.Fatal(err) } @@ -323,7 +318,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { // An update with the same service name should not trigger a new watch. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, + BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), }); err != nil { t.Fatal(err) } @@ -347,7 +342,7 @@ func (s) TestErrorFromResolver(t *testing.T) { defer cleanup() builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) if edsB == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } @@ -357,7 +352,7 @@ func (s) TestErrorFromResolver(t *testing.T) { defer cancel() if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, + BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), }); err != nil { t.Fatal(err) } @@ -408,7 +403,7 @@ func (s) TestErrorFromResolver(t *testing.T) { // the previous watch was canceled. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, + BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), }); err != nil { t.Fatal(err) } @@ -449,7 +444,7 @@ func (s) TestClientWatchEDS(t *testing.T) { defer cleanup() builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) if edsB == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } @@ -460,7 +455,7 @@ func (s) TestClientWatchEDS(t *testing.T) { // If eds service name is not set, should watch for cluster name. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{ClusterName: "cluster-1"}, + BalancerConfig: newLBConfigWithOneEDS("cluster-1"), }); err != nil { t.Fatal(err) } @@ -472,7 +467,7 @@ func (s) TestClientWatchEDS(t *testing.T) { // the same. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{EDSServiceName: "foobar-1"}, + BalancerConfig: newLBConfigWithOneEDS("foobar-1"), }); err != nil { t.Fatal(err) } @@ -486,7 +481,7 @@ func (s) TestClientWatchEDS(t *testing.T) { // with no resource names being sent to the server. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{EDSServiceName: "foobar-2"}, + BalancerConfig: newLBConfigWithOneEDS("foobar-2"), }); err != nil { t.Fatal(err) } @@ -495,184 +490,12 @@ func (s) TestClientWatchEDS(t *testing.T) { } } -const ( - fakeBalancerA = "fake_balancer_A" - fakeBalancerB = "fake_balancer_B" -) - -// Install two fake balancers for service config update tests. -// -// ParseConfig only accepts the json if the balancer specified is registered. -func init() { - balancer.Register(&fakeBalancerBuilder{name: fakeBalancerA}) - balancer.Register(&fakeBalancerBuilder{name: fakeBalancerB}) -} - -type fakeBalancerBuilder struct { - name string -} - -func (b *fakeBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - return &fakeBalancer{cc: cc} -} - -func (b *fakeBalancerBuilder) Name() string { - return b.name -} - -type fakeBalancer struct { - cc balancer.ClientConn -} - -func (b *fakeBalancer) ResolverError(error) { - panic("implement me") -} - -func (b *fakeBalancer) UpdateClientConnState(balancer.ClientConnState) error { - panic("implement me") -} - -func (b *fakeBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { - panic("implement me") -} - -func (b *fakeBalancer) Close() {} - -func (s) TestBalancerConfigParsing(t *testing.T) { - const testEDSName = "eds.service" - var testLRSName = "lrs.server" - b := bytes.NewBuffer(nil) - if err := (&jsonpb.Marshaler{}).Marshal(b, &scpb.XdsConfig{ - ChildPolicy: []*scpb.LoadBalancingConfig{ - {Policy: &scpb.LoadBalancingConfig_Xds{}}, - {Policy: &scpb.LoadBalancingConfig_RoundRobin{ - RoundRobin: &scpb.RoundRobinConfig{}, - }}, - }, - FallbackPolicy: []*scpb.LoadBalancingConfig{ - {Policy: &scpb.LoadBalancingConfig_Xds{}}, - {Policy: &scpb.LoadBalancingConfig_PickFirst{ - PickFirst: &scpb.PickFirstConfig{}, - }}, - }, - EdsServiceName: testEDSName, - LrsLoadReportingServerName: &wrapperspb.StringValue{Value: testLRSName}, - }); err != nil { - t.Fatalf("%v", err) - } - - var testMaxConcurrentRequests uint32 = 123 - tests := []struct { - name string - js json.RawMessage - want serviceconfig.LoadBalancingConfig - wantErr bool - }{ - { - name: "bad json", - js: json.RawMessage(`i am not JSON`), - wantErr: true, - }, - { - name: "empty", - js: json.RawMessage(`{}`), - want: &EDSConfig{}, - }, - { - name: "jsonpb-generated", - js: b.Bytes(), - want: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{ - Name: "round_robin", - Config: json.RawMessage("{}"), - }, - FallBackPolicy: &loadBalancingConfig{ - Name: "pick_first", - Config: json.RawMessage("{}"), - }, - EDSServiceName: testEDSName, - LrsLoadReportingServerName: &testLRSName, - }, - }, - { - // json with random balancers, and the first is not registered. - name: "manually-generated", - js: json.RawMessage(` -{ - "childPolicy": [ - {"fake_balancer_C": {}}, - {"fake_balancer_A": {}}, - {"fake_balancer_B": {}} - ], - "fallbackPolicy": [ - {"fake_balancer_C": {}}, - {"fake_balancer_B": {}}, - {"fake_balancer_A": {}} - ], - "edsServiceName": "eds.service", - "maxConcurrentRequests": 123, - "lrsLoadReportingServerName": "lrs.server" -}`), - want: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{ - Name: "fake_balancer_A", - Config: json.RawMessage("{}"), - }, - FallBackPolicy: &loadBalancingConfig{ - Name: "fake_balancer_B", - Config: json.RawMessage("{}"), - }, - EDSServiceName: testEDSName, - MaxConcurrentRequests: &testMaxConcurrentRequests, - LrsLoadReportingServerName: &testLRSName, - }, - }, - { - // json with no lrs server name, LoadReportingServerName should - // be nil (not an empty string). - name: "no-lrs-server-name", - js: json.RawMessage(` -{ - "edsServiceName": "eds.service" -}`), - want: &EDSConfig{ - EDSServiceName: testEDSName, - LrsLoadReportingServerName: nil, - }, - }, - { - name: "good child policy", - js: json.RawMessage(`{"childPolicy":[{"pick_first":{}}]}`), - want: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{ - Name: "pick_first", - Config: json.RawMessage(`{}`), - }, - }, - }, - { - name: "multiple good child policies", - js: json.RawMessage(`{"childPolicy":[{"round_robin":{}},{"pick_first":{}}]}`), - want: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{ - Name: "round_robin", - Config: json.RawMessage(`{}`), - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := bb{}.ParseConfig(tt.js) - if (err != nil) != tt.wantErr { - t.Fatalf("edsBalancerBuilder.ParseConfig() error = %v, wantErr %v", err, tt.wantErr) - } - if tt.wantErr { - return - } - if !cmp.Equal(got, tt.want) { - t.Errorf(cmp.Diff(got, tt.want)) - } - }) +func newLBConfigWithOneEDS(edsServiceName string) *LBConfig { + return &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + Cluster: testClusterName, + Type: balancerconfig.DiscoveryMechanismTypeEDS, + EDSServiceName: edsServiceName, + }}, } } diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index 0741d6586ae3..043c834399e6 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -1,6 +1,6 @@ /* * - * Copyright 2019 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,110 +19,44 @@ package clusterresolver import ( "encoding/json" - "fmt" - "google.golang.org/grpc/balancer" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" ) -// EDSConfig represents the loadBalancingConfig section of the service config -// for EDS balancers. -type EDSConfig struct { - serviceconfig.LoadBalancingConfig - // ChildPolicy represents the load balancing config for the child - // policy. - ChildPolicy *loadBalancingConfig - // FallBackPolicy represents the load balancing config for the - // fallback. - FallBackPolicy *loadBalancingConfig - // ClusterName is the cluster name. - ClusterName string - // EDSServiceName is the name to use in EDS query. If not set, use - // ClusterName. - EDSServiceName string - // MaxConcurrentRequests is the max number of concurrent request allowed for - // this service. If unset, default value 1024 is used. +// LBConfig is the config for cluster resolver balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // DiscoveryMechanisms is an ordered list of discovery mechanisms. // - // Note that this is not defined in the service config proto. And the reason - // is, we are dropping EDS and moving the features into cluster_impl. But in - // the mean time, to keep things working, we need to add this field. And it - // should be fine to add this extra field here, because EDS is only used in - // CDS today, so we have full control. - MaxConcurrentRequests *uint32 - // LRS server to send load reports to. If not present, load reporting - // will be disabled. If set to the empty string, load reporting will - // be sent to the same server that we obtained CDS data from. - LrsLoadReportingServerName *string -} - -// edsConfigJSON is the intermediate unmarshal result of EDSConfig. ChildPolicy -// and Fallbackspolicy are post-processed, and for each, the first installed -// policy is kept. -type edsConfigJSON struct { - ChildPolicy []*loadBalancingConfig - FallbackPolicy []*loadBalancingConfig - ClusterName string - EDSServiceName string - MaxConcurrentRequests *uint32 - LRSLoadReportingServerName *string -} - -// UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. -// When unmarshalling, we iterate through the childPolicy/fallbackPolicy lists -// and select the first LB policy which has been registered. -func (l *EDSConfig) UnmarshalJSON(data []byte) error { - var configJSON edsConfigJSON - if err := json.Unmarshal(data, &configJSON); err != nil { - return err - } + // Must have at least one element. Results from each discovery mechanism are + // concatenated together in successive priorities. + DiscoveryMechanisms []balancerconfig.DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` - l.ClusterName = configJSON.ClusterName - l.EDSServiceName = configJSON.EDSServiceName - l.MaxConcurrentRequests = configJSON.MaxConcurrentRequests - l.LrsLoadReportingServerName = configJSON.LRSLoadReportingServerName - - for _, lbcfg := range configJSON.ChildPolicy { - if balancer.Get(lbcfg.Name) != nil { - l.ChildPolicy = lbcfg - break - } - } - - for _, lbcfg := range configJSON.FallbackPolicy { - if balancer.Get(lbcfg.Name) != nil { - l.FallBackPolicy = lbcfg - break - } - } - return nil -} - -// MarshalJSON returns a JSON encoding of l. -func (l *EDSConfig) MarshalJSON() ([]byte, error) { - return nil, fmt.Errorf("EDSConfig.MarshalJSON() is unimplemented") -} - -// loadBalancingConfig represents a single load balancing config, -// stored in JSON format. -type loadBalancingConfig struct { - Name string - Config json.RawMessage -} + // LocalityPickingPolicy is policy for locality picking. + // + // This policy's config is expected to be in the format used by the + // weighted_target policy. Note that the config should include an empty + // value for the "targets" field; that empty value will be replaced by one + // that is dynamically generated based on the EDS data. Optional; defaults + // to "weighted_target". + LocalityPickingPolicy *internalserviceconfig.BalancerConfig `json:"localityPickingPolicy,omitempty"` + + // EndpointPickingPolicy is policy for endpoint picking. + // + // This will be configured as the policy for each child in the + // locality-policy's config. Optional; defaults to "round_robin". + EndpointPickingPolicy *internalserviceconfig.BalancerConfig `json:"endpointPickingPolicy,omitempty"` -// MarshalJSON returns a JSON encoding of l. -func (l *loadBalancingConfig) MarshalJSON() ([]byte, error) { - return nil, fmt.Errorf("loadBalancingConfig.MarshalJSON() is unimplemented") + // TODO: read and warn if endpoint is not roundrobin or locality is not + // weightedtarget. } -// UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. -func (l *loadBalancingConfig) UnmarshalJSON(data []byte) error { - var cfg map[string]json.RawMessage - if err := json.Unmarshal(data, &cfg); err != nil { - return err - } - for name, config := range cfg { - l.Name = name - l.Config = config +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err } - return nil + return &cfg, nil } diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go new file mode 100644 index 000000000000..1333692b7fca --- /dev/null +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -0,0 +1,165 @@ +// +build go1.12 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" +) + +const ( + testJSONConfig1 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServerName": "test-lrs-server", + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name" + }] +}` + testJSONConfig2 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServerName": "test-lrs-server", + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name" + },{ + "type": "LOGICAL_DNS" + }] +}` + testJSONConfig3 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServerName": "test-lrs-server", + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name" + }], + "localityPickingPolicy":[{"pick_first":{}}], + "endpointPickingPolicy":[{"pick_first":{}}] +}` + + testLRSServer = "test-lrs-server" + testMaxRequests = 314 +) + +func TestParseConfig(t *testing.T) { + tests := []struct { + name string + js string + want *LBConfig + wantErr bool + }{ + { + name: "empty json", + js: "", + want: nil, + wantErr: true, + }, + { + name: "OK with one discovery mechanism", + js: testJSONConfig1, + want: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: balancerconfig.DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServcie, + }, + }, + LocalityPickingPolicy: nil, + EndpointPickingPolicy: nil, + }, + wantErr: false, + }, + { + name: "OK with multiple discovery mechanisms", + js: testJSONConfig2, + want: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: balancerconfig.DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServcie, + }, + { + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + }, + }, + LocalityPickingPolicy: nil, + EndpointPickingPolicy: nil, + }, + wantErr: false, + }, + { + name: "OK with picking policy override", + js: testJSONConfig3, + want: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: balancerconfig.DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServcie, + }, + }, + LocalityPickingPolicy: &internalserviceconfig.BalancerConfig{ + Name: "pick_first", + Config: nil, + }, + EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ + Name: "pick_first", + Config: nil, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseConfig([]byte(tt.js)) + if (err != nil) != tt.wantErr { + t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("parseConfig() got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} + +func newString(s string) *string { + return &s +} + +func newUint32(i uint32) *uint32 { + return &i +} diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go deleted file mode 100644 index 3dd3b5309248..000000000000 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package clusterresolver - -import ( - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" - "google.golang.org/grpc/xds/internal/xdsclient" -) - -const million = 1000000 - -func buildPriorityConfigJSON(edsResp xdsclient.EndpointsUpdate, c *EDSConfig) ([]byte, []resolver.Address, error) { - var childConfig *internalserviceconfig.BalancerConfig - if c.ChildPolicy != nil { - childConfig = &internalserviceconfig.BalancerConfig{Name: c.ChildPolicy.Name} - } - return balancerconfig.BuildPriorityConfigJSON( - []balancerconfig.PriorityConfig{ - { - Mechanism: balancerconfig.DiscoveryMechanism{ - Cluster: c.ClusterName, - LoadReportingServerName: c.LrsLoadReportingServerName, - MaxConcurrentRequests: c.MaxConcurrentRequests, - Type: balancerconfig.DiscoveryMechanismTypeEDS, - EDSServiceName: c.EDSServiceName, - }, - EDSResp: edsResp, - }, - }, childConfig, - ) -} diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go deleted file mode 100644 index 31f17fde7a74..000000000000 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package clusterresolver - -import ( - "fmt" - "testing" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/balancer/priority" - "google.golang.org/grpc/xds/internal/xdsclient" -) - -const ( - localityCount = 4 - addressPerLocality = 2 -) - -var ( - testLocalityIDs []internal.LocalityID - testEndpoints [][]xdsclient.Endpoint -) - -func init() { - for i := 0; i < localityCount; i++ { - testLocalityIDs = append(testLocalityIDs, internal.LocalityID{Zone: fmt.Sprintf("test-zone-%d", i)}) - var ends []xdsclient.Endpoint - for j := 0; j < addressPerLocality; j++ { - addr := fmt.Sprintf("addr-%d-%d", i, j) - ends = append(ends, xdsclient.Endpoint{ - Address: addr, - HealthStatus: xdsclient.EndpointHealthStatusHealthy, - }) - } - testEndpoints = append(testEndpoints, ends) - } -} - -// TestBuildPriorityConfigJSON is a sanity check that the generated config bytes -// are valid (can be parsed back to a config struct). -// -// The correctness is covered by the unmarshalled version -// TestBuildPriorityConfig. -func TestBuildPriorityConfigJSON(t *testing.T) { - const ( - testClusterName = "cluster-name-for-watch" - testEDSServiceName = "service-name-from-parent" - testLRSServer = "lrs-addr-from-config" - testMaxReq = 314 - testDropCategory = "test-drops" - testDropOverMillion = 1 - ) - for _, lrsServer := range []*string{newString(testLRSServer), newString(""), nil} { - got, _, err := buildPriorityConfigJSON(xdsclient.EndpointsUpdate{ - Drops: []xdsclient.OverloadDropConfig{{ - Category: testDropCategory, - Numerator: testDropOverMillion, - Denominator: million, - }}, - Localities: []xdsclient.Locality{{ - Endpoints: testEndpoints[3], - ID: testLocalityIDs[3], - Weight: 80, - Priority: 1, - }, { - Endpoints: testEndpoints[1], - ID: testLocalityIDs[1], - Weight: 80, - Priority: 0, - }, { - Endpoints: testEndpoints[2], - ID: testLocalityIDs[2], - Weight: 20, - Priority: 1, - }, { - Endpoints: testEndpoints[0], - ID: testLocalityIDs[0], - Weight: 20, - Priority: 0, - }}}, - &EDSConfig{ - ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}, - ClusterName: testClusterName, - EDSServiceName: testEDSServiceName, - MaxConcurrentRequests: newUint32(testMaxReq), - LrsLoadReportingServerName: lrsServer, - }, - ) - if err != nil { - t.Fatalf("buildPriorityConfigJSON(...) failed: %v", err) - } - priorityB := balancer.Get(priority.Name) - if _, err = priorityB.(balancer.ConfigParser).ParseConfig(got); err != nil { - t.Fatalf("ParseConfig(%+v) failed: %v", got, err) - } - } -} - -func newString(s string) *string { - return &s -} - -func newUint32(i uint32) *uint32 { - return &i -} diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index 9a41fa9e2b33..bf7e7f6c421c 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -31,9 +31,11 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/stub" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/balancer/balancergroup" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" "google.golang.org/grpc/xds/internal/testutils" @@ -59,11 +61,11 @@ func init() { balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond * 100 } -func setupTestEDS(t *testing.T, initChild *loadBalancingConfig) (balancer.Balancer, *testutils.TestClientConn, *fakeclient.Client, func()) { +func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) (balancer.Balancer, *testutils.TestClientConn, *fakeclient.Client, func()) { xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) cc := testutils.NewTestClientConn(t) builder := balancer.Get(Name) - edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) + edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) if edsb == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } @@ -71,9 +73,12 @@ func setupTestEDS(t *testing.T, initChild *loadBalancingConfig) (balancer.Balanc defer cancel() if err := edsb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &EDSConfig{ - ClusterName: testClusterName, - ChildPolicy: initChild, + BalancerConfig: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + Cluster: testClusterName, + Type: balancerconfig.DiscoveryMechanismTypeEDS, + }}, + EndpointPickingPolicy: initChild, }, }); err != nil { edsb.Close() @@ -462,10 +467,17 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { stub.Register(balancerName, stub.BalancerFuncs{ UpdateClientConnState: func(bd *stub.BalancerData, s balancer.ClientConnState) error { - if len(s.ResolverState.Addresses) == 0 { - return nil + m, _ := bd.Data.(map[string]bool) + if m == nil { + m = make(map[string]bool) + bd.Data = m + } + for _, addr := range s.ResolverState.Addresses { + if !m[addr.Addr] { + m[addr.Addr] = true + bd.ClientConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) + } } - bd.ClientConn.NewSubConn(s.ResolverState.Addresses, balancer.NewSubConnOptions{}) return nil }, UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { @@ -477,9 +489,24 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { }) t.Logf("initialize with sub-balancer: stub-balancer") - edsb, cc, xdsC, cleanup := setupTestEDS(t, &loadBalancingConfig{Name: balancerName}) + edsb, cc, xdsC, cleanup := setupTestEDS(t, &internalserviceconfig.BalancerConfig{Name: balancerName}) defer cleanup() + t.Logf("update sub-balancer to stub-balancer") + if err := edsb.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + Cluster: testClusterName, + Type: balancerconfig.DiscoveryMechanismTypeEDS, + }}, + EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ + Name: balancerName, + }, + }, + }); err != nil { + t.Fatal(err) + } + // Two localities, each with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) @@ -497,10 +524,19 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { t.Logf("update sub-balancer to round-robin") if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ClusterName: testClusterName, ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + Cluster: testClusterName, + Type: balancerconfig.DiscoveryMechanismTypeEDS, + }}, + EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, }); err != nil { t.Fatal(err) } + for i := 0; i < 2; i++ { <-cc.RemoveSubConnCh } @@ -518,10 +554,19 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { t.Logf("update sub-balancer to stub-balancer") if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ClusterName: testClusterName, ChildPolicy: &loadBalancingConfig{Name: balancerName}}, + BalancerConfig: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + Cluster: testClusterName, + Type: balancerconfig.DiscoveryMechanismTypeEDS, + }}, + EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ + Name: balancerName, + }, + }, }); err != nil { t.Fatal(err) } + for i := 0; i < 2; i++ { scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) && @@ -542,10 +587,19 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { t.Logf("update sub-balancer to round-robin") if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ClusterName: testClusterName, ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + Cluster: testClusterName, + Type: balancerconfig.DiscoveryMechanismTypeEDS, + }}, + EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, }); err != nil { t.Fatal(err) } + for i := 0; i < 2; i++ { <-cc.RemoveSubConnCh } @@ -568,14 +622,20 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { var maxRequests uint32 = 50 if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}, - ClusterName: testClusterName, - MaxConcurrentRequests: &maxRequests, + BalancerConfig: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + Cluster: testClusterName, + MaxConcurrentRequests: &maxRequests, + Type: balancerconfig.DiscoveryMechanismTypeEDS, + }}, + EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, }, }); err != nil { t.Fatal(err) } + // One locality with one backend. clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) @@ -628,14 +688,20 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { // update afterwards). Make sure the new picker uses the new configs. var maxRequests2 uint32 = 10 if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{Name: roundrobin.Name}, - ClusterName: testClusterName, - MaxConcurrentRequests: &maxRequests2, + BalancerConfig: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + Cluster: testClusterName, + MaxConcurrentRequests: &maxRequests2, + Type: balancerconfig.DiscoveryMechanismTypeEDS, + }}, + EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, }, }); err != nil { t.Fatal(err) } + // Picks with drops. dones = []func(){} p2 := <-cc.NewPickerCh diff --git a/xds/internal/balancer/clusterresolver/eds_watcher.go b/xds/internal/balancer/clusterresolver/eds_watcher.go deleted file mode 100644 index 02186702c0ec..000000000000 --- a/xds/internal/balancer/clusterresolver/eds_watcher.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package clusterresolver - -import ( - "google.golang.org/grpc/xds/internal/xdsclient" -) - -// watchUpdate wraps the information received from a registered EDS watcher. A -// non-nil error is propagated to the underlying child balancer. A valid update -// results in creating a new child balancer (priority balancer, if one doesn't -// already exist) and pushing the updated balancer config to it. -type watchUpdate struct { - eds xdsclient.EndpointsUpdate - err error -} - -// edsWatcher takes an EDS balancer config, and use the xds_client to watch EDS -// updates. The EDS updates are passed back to the balancer via a channel. -type edsWatcher struct { - parent *clusterResolverBalancer - - updateChannel chan *watchUpdate - - edsToWatch string - edsCancel func() -} - -func (ew *edsWatcher) updateConfig(config *EDSConfig) { - // If EDSServiceName is set, use it to watch EDS. Otherwise, use the cluster - // name. - newEDSToWatch := config.EDSServiceName - if newEDSToWatch == "" { - newEDSToWatch = config.ClusterName - } - - if ew.edsToWatch == newEDSToWatch { - return - } - - // Restart EDS watch when the eds name to watch has changed. - ew.edsToWatch = newEDSToWatch - - if ew.edsCancel != nil { - ew.edsCancel() - } - cancelEDSWatch := ew.parent.xdsClient.WatchEndpoints(newEDSToWatch, func(update xdsclient.EndpointsUpdate, err error) { - select { - case <-ew.updateChannel: - default: - } - ew.updateChannel <- &watchUpdate{eds: update, err: err} - }) - ew.parent.logger.Infof("Watch started on resource name %v with xds-client %p", newEDSToWatch, ew.parent.xdsClient) - ew.edsCancel = func() { - cancelEDSWatch() - ew.parent.logger.Infof("Watch cancelled on resource name %v with xds-client %p", newEDSToWatch, ew.parent.xdsClient) - } - -} - -// stopWatch stops the EDS watch. -// -// Call to updateConfig will restart the watch with the new name. -func (ew *edsWatcher) stopWatch() { - if ew.edsCancel != nil { - ew.edsCancel() - ew.edsCancel = nil - } - ew.edsToWatch = "" -} diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index a4c6d5b1c658..b2935be0c362 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -28,6 +28,8 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/testutils" ) @@ -712,3 +714,107 @@ func (s) TestEDSPriority_FirstPriorityRemoved(t *testing.T) { t.Fatal(err) } } + +// Watch resources from EDS and DNS, with EDS as the higher priority. Lower +// priority is used when higher priority is not ready. +func (s) TestFallbackToDNS(t *testing.T) { + const testDNSEndpointAddr = "3.1.4.1:5" + // dnsTargetCh, dnsCloseCh, resolveNowCh, dnsR, cleanup := setupDNS() + dnsTargetCh, _, resolveNowCh, dnsR, cleanupDNS := setupDNS() + defer cleanupDNS() + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) + defer cleanup() + + if err := edsb.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{ + { + Type: balancerconfig.DiscoveryMechanismTypeEDS, + Cluster: testClusterName, + }, + { + Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: testDNSTarget, + }, + }, + }, + }); err != nil { + t.Fatal(err) + } + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + select { + case target := <-dnsTargetCh: + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for building DNS resolver") + } + + // One locality with one backend. + clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) + xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) + + // Also send a DNS update, because the balancer needs both updates from all + // resources to move on. + dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: testDNSEndpointAddr}}}) + + addrs0 := <-cc.NewSubConnAddrsCh + if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc0 := <-cc.NewSubConnCh + + // p0 is ready. + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // Test roundrobin with only p0 subconns. + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + t.Fatal(err) + } + + // Turn down 0, p1 (DNS) will be used. + edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + + // The transient failure above should not trigger a re-resolve to the DNS + // resolver. Need to read to clear the channel, to avoid potential deadlock + // writing to the channel later. + shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shortCancel() + select { + case <-resolveNowCh: + t.Fatal("unexpected re-resolve trigger by transient failure from EDS endpoint") + case <-shortCtx.Done(): + } + + // The addresses used to create new SubConn should be the DNS endpoint. + addrs1 := <-cc.NewSubConnAddrsCh + if got, want := addrs1[0].Addr, testDNSEndpointAddr; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc1 := <-cc.NewSubConnCh + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // Test pick with 1. + if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + t.Fatal(err) + } + + // Turn down the DNS endpoint, this should trigger an re-resolve in the DNS + // resolver. + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + + // The transient failure above should trigger a re-resolve to the DNS + // resolver. Need to read to clear the channel, to avoid potential deadlock + // writing to the channel later. + select { + case <-resolveNowCh: + case <-ctx.Done(): + t.Fatal("Timed out waiting for re-resolve") + } +} diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index 29aed0e72f4a..e68d77d3efe9 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -28,8 +28,8 @@ import ( // resourceUpdate is a combined update from all the resources, in the order of // priority. For example, it can be {EDS, EDS, DNS}. type resourceUpdate struct { - p []balancerconfig.PriorityConfig - err error + priorities []balancerconfig.PriorityConfig + err error } type discoveryMechanism interface { @@ -197,7 +197,7 @@ func (rr *resourceResolver) generate() { case <-rr.updateChannel: default: } - rr.updateChannel <- &resourceUpdate{p: ret} + rr.updateChannel <- &resourceUpdate{priorities: ret} } type edsDiscoveryMechanism struct { diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go index 9a9438155098..621ca2a127c8 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -62,14 +62,14 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { }{ {name: "watch EDS", clusterName: testClusterName, - edsName: testServiceName, - wantName: testServiceName, + edsName: testEDSServcie, + wantName: testEDSServcie, edsUpdate: testEDSUpdates[0], want: []balancerconfig.PriorityConfig{{ Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeEDS, Cluster: testClusterName, - EDSServiceName: testServiceName, + EDSServiceName: testEDSServcie, }, EDSResp: testEDSUpdates[0], }}, @@ -110,7 +110,7 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { fakeClient.InvokeWatchEDSCallback("", test.edsUpdate, nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, test.want); diff != "" { + if diff := cmp.Diff(u.priorities, test.want); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -123,7 +123,7 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) } if edsNameCanceled != test.wantName { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled, testServiceName) + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled, testEDSServcie) } }) } @@ -192,7 +192,7 @@ func (s) TestResourceResolverOneDNSResource(t *testing.T) { dnsR.UpdateState(resolver.State{Addresses: test.addrs}) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, test.want); diff != "" { + if diff := cmp.Diff(u.priorities, test.want); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -224,7 +224,7 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ Type: balancerconfig.DiscoveryMechanismTypeEDS, Cluster: testClusterName, - EDSServiceName: testServiceName, + EDSServiceName: testEDSServcie, }}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -232,19 +232,19 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { if err != nil { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - if gotEDSName1 != testServiceName { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testServiceName) + if gotEDSName1 != testEDSServcie { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testEDSServcie) } // Invoke callback, should get an update. fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeEDS, Cluster: testClusterName, - EDSServiceName: testServiceName, + EDSServiceName: testEDSServcie, }, EDSResp: testEDSUpdates[0], }}); diff != "" { @@ -264,7 +264,7 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) } if edsNameCanceled1 != gotEDSName1 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, testServiceName) + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, testEDSServcie) } gotEDSName2, err := fakeClient.WaitForWatchEDS(ctx) if err != nil { @@ -287,7 +287,7 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeEDS, Cluster: testClusterName, @@ -314,7 +314,7 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { } select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeEDS, Cluster: testClusterName, @@ -385,7 +385,7 @@ func (s) TestResourceResolverNoChangeNoUpdate(t *testing.T) { fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{ { Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeEDS, @@ -497,7 +497,7 @@ func (s) TestResourceResolverChangePriority(t *testing.T) { fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{ { Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeEDS, @@ -538,7 +538,7 @@ func (s) TestResourceResolverChangePriority(t *testing.T) { } select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{ { Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeEDS, @@ -625,7 +625,7 @@ func (s) TestResourceResolverEDSAndDNS(t *testing.T) { dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{ { Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeEDS, @@ -687,7 +687,7 @@ func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeEDS, Cluster: testClusterName, @@ -724,7 +724,7 @@ func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, @@ -845,7 +845,7 @@ func (s) TestResourceResolverDNSResolveNow(t *testing.T) { dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.p, []balancerconfig.PriorityConfig{{ + if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ Mechanism: balancerconfig.DiscoveryMechanism{ Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, diff --git a/xds/internal/balancer/priority/config.go b/xds/internal/balancer/priority/config.go index c9cb16e323f0..37f1c9a829a8 100644 --- a/xds/internal/balancer/priority/config.go +++ b/xds/internal/balancer/priority/config.go @@ -29,7 +29,7 @@ import ( // Child is a child of priority balancer. type Child struct { Config *internalserviceconfig.BalancerConfig `json:"config,omitempty"` - IgnoreReresolutionRequests bool + IgnoreReresolutionRequests bool `json:"ignoreReresolutionRequests,omitempty"` } // LBConfig represents priority balancer's config. From 8332d5b997af9e1554418167860351696d35e628 Mon Sep 17 00:00:00 2001 From: lzhfromustc <43191155+lzhfromustc@users.noreply.github.com> Date: Wed, 21 Jul 2021 13:40:04 -0400 Subject: [PATCH 165/998] test: fix possible goroutine leaks in unit tests (#4570) --- internal/resolver/config_selector_test.go | 6 ++++-- xds/internal/httpfilter/fault/fault_test.go | 4 ++-- xds/internal/server/listener_wrapper_test.go | 6 +++++- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/internal/resolver/config_selector_test.go b/internal/resolver/config_selector_test.go index e5a50995df11..e1dae8bde27c 100644 --- a/internal/resolver/config_selector_test.go +++ b/internal/resolver/config_selector_test.go @@ -48,6 +48,8 @@ func (s) TestSafeConfigSelector(t *testing.T) { retChan1 := make(chan *RPCConfig) retChan2 := make(chan *RPCConfig) + defer close(retChan1) + defer close(retChan2) one := 1 two := 2 @@ -55,8 +57,8 @@ func (s) TestSafeConfigSelector(t *testing.T) { resp1 := &RPCConfig{MethodConfig: serviceconfig.MethodConfig{MaxReqSize: &one}} resp2 := &RPCConfig{MethodConfig: serviceconfig.MethodConfig{MaxReqSize: &two}} - cs1Called := make(chan struct{}) - cs2Called := make(chan struct{}) + cs1Called := make(chan struct{}, 1) + cs2Called := make(chan struct{}, 1) cs1 := &fakeConfigSelector{ selectConfig: func(r RPCInfo) (*RPCConfig, error) { diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 46606449cb9d..a77ab58ad356 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -608,7 +608,7 @@ func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - streams := make(chan testpb.TestService_FullDuplexCallClient) + streams := make(chan testpb.TestService_FullDuplexCallClient, 5) // startStream() is called 5 times startStream := func() { str, err := client.FullDuplexCall(ctx) if err != nil { @@ -620,7 +620,7 @@ func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { str := <-streams str.CloseSend() if _, err := str.Recv(); err != io.EOF { - t.Fatal("stream error:", err) + t.Error("stream error:", err) } } releaseStream := func() { diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index bef2ad56e18f..837c851a733f 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -117,7 +117,10 @@ type fakeListener struct { } func (fl *fakeListener) Accept() (net.Conn, error) { - cne := <-fl.acceptCh + cne, ok := <-fl.acceptCh + if !ok { + return nil, errors.New("a non-temporary error") + } return cne.conn, cne.err } @@ -262,6 +265,7 @@ func (s) TestListenerWrapper_Accept(t *testing.T) { }}, nil) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() + defer close(lis.acceptCh) select { case <-ctx.Done(): t.Fatalf("timeout waiting for the ready channel to be written to after receipt of a good Listener update") From 0a8c63739a87bee6ff6097d272b63727659f4503 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Wed, 21 Jul 2021 10:50:37 -0700 Subject: [PATCH 166/998] grpclb: propagate the most recent connection error when grpclb enters transient failure (#4605) --- balancer/grpclb/grpclb.go | 6 +++- balancer/grpclb/grpclb_test.go | 59 ++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index a43d8964119f..49d11d0d2e21 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -25,6 +25,7 @@ package grpclb import ( "context" "errors" + "fmt" "sync" "time" @@ -221,6 +222,7 @@ type lbBalancer struct { // when resolved address updates are received, and read in the goroutine // handling fallback. resolvedBackendAddrs []resolver.Address + connErr error // the last connection error } // regeneratePicker takes a snapshot of the balancer, and generates a picker from @@ -230,7 +232,7 @@ type lbBalancer struct { // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: balancer.ErrTransientFailure} + lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} return } @@ -336,6 +338,8 @@ func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubCo // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(lb.scStates, sc) + case connectivity.TransientFailure: + lb.connErr = scs.ConnectionError } // Force regenerate picker if // - this sc became ready from not-ready diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index 9cbb338c2415..d6275b657f90 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -1232,6 +1232,65 @@ func (s) TestGRPCLBPickFirst(t *testing.T) { } } +func (s) TestGRPCLBBackendConnectionErrorPropagation(t *testing.T) { + r := manual.NewBuilderWithScheme("whatever") + + // Start up an LB which will tell the client to fall back + // right away. + tss, cleanup, err := newLoadBalancer(0, "", nil) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) + } + defer cleanup() + + // Start a standalone backend, to be used during fallback. The creds + // are intentionally misconfigured in order to simulate failure of a + // security handshake. + beLis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen %v", err) + } + defer beLis.Close() + standaloneBEs := startBackends("arbitrary.invalid.name", true, beLis) + defer stopBackends(standaloneBEs) + + creds := serverNameCheckCreds{} + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), + grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testpb.NewTestServiceClient(cc) + + r.UpdateState(resolver.State{Addresses: []resolver.Address{{ + Addr: tss.lbAddr, + Type: resolver.GRPCLB, + ServerName: lbServerName, + }, { + Addr: beLis.Addr().String(), + Type: resolver.Backend, + }}}) + + // If https://github.com/grpc/grpc-go/blob/65cabd74d8e18d7347fecd414fa8d83a00035f5f/balancer/grpclb/grpclb_test.go#L103 + // changes, then expectedErrMsg may need to be updated. + const expectedErrMsg = "received unexpected server name" + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + var wg sync.WaitGroup + wg.Add(1) + go func() { + tss.ls.fallbackNow() + wg.Done() + }() + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err == nil || !strings.Contains(err.Error(), expectedErrMsg) { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, rpc error containing substring: %q", testC, err, expectedErrMsg) + } + wg.Wait() +} + type failPreRPCCred struct{} func (failPreRPCCred) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { From a0bed723f1c00c8b07c6ceaf1f6ac2cb42ec0b35 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 21 Jul 2021 21:58:19 -0400 Subject: [PATCH 167/998] xds: add http filters to FilterChain matching (#4595) * Add HTTP Filters to FilterChain --- xds/internal/server/listener_wrapper_test.go | 9 + xds/internal/xdsclient/filter_chain.go | 21 +- xds/internal/xdsclient/filter_chain_test.go | 481 ++++++++++++++++++- xds/internal/xdsclient/lds_test.go | 30 -- xds/internal/xdsclient/xds.go | 105 ++-- xds/server_test.go | 9 + 6 files changed, 538 insertions(+), 117 deletions(-) diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 837c851a733f..8a79e2321dd9 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -30,6 +30,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc/internal/grpctest" @@ -82,6 +83,14 @@ var listenerWithFilterChains = &v3listenerpb.Listener{ }), }, }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + }, }, }, } diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index 7303ebd3ce70..7089b97594a1 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -26,7 +26,6 @@ import ( v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" - "google.golang.org/grpc/xds/internal/version" ) @@ -50,14 +49,11 @@ const ( // FilterChain captures information from within a FilterChain message in a // Listener resource. -// -// Currently, this simply contains the security configuration found in the -// 'transport_socket' field of the filter chain. The actual set of filters -// associated with this filter chain are not captured here, since we do not -// support these filters on the server-side yet. type FilterChain struct { // SecurityCfg contains transport socket security configuration. SecurityCfg *SecurityConfig + // HTTPFilters represent the HTTP Filters that comprise this FilterChain. + HTTPFilters []HTTPFilter } // SourceType specifies the connection source IP match type. @@ -395,16 +391,20 @@ func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePre } // filterChainFromProto extracts the relevant information from the FilterChain -// proto and stores it in our internal representation. Currently, we only -// process the security configuration stored in the transport_socket field. +// proto and stores it in our internal representation. func filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { + httpFilters, err := processNetworkFilters(fc.GetFilters()) + if err != nil { + return nil, err + } + filterChain := &FilterChain{HTTPFilters: httpFilters} // If the transport_socket field is not specified, it means that the control // plane has not sent us any security config. This is fine and the server // will use the fallback credentials configured as part of the // xdsCredentials. ts := fc.GetTransportSocket() if ts == nil { - return &FilterChain{}, nil + return filterChain, nil } if name := ts.GetName(); name != transportSocketName { return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) @@ -431,7 +431,8 @@ func filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { if sc.RequireClientCert && sc.RootInstanceName == "" { return nil, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") } - return &FilterChain{SecurityCfg: sc}, nil + filterChain.SecurityCfg = sc + return filterChain, nil } // FilterChainLookupParams wraps parameters to be passed to Lookup. diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go index 25390b7f9248..e330a73a145b 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -28,8 +28,11 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/wrapperspb" @@ -37,6 +40,25 @@ import ( "google.golang.org/grpc/xds/internal/version" ) +var ( + emptyValidNetworkFilters = []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + } + validServerSideHTTPFilter1 = &v3httppb.HttpFilter{ + Name: "serverOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, + } + validServerSideHTTPFilter2 = &v3httppb.HttpFilter{ + Name: "serverOnlyCustomFilter2", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, + } +) + // TestNewFilterChainImpl_Failure_BadMatchFields verifies cases where we have a // single filter chain with match criteria that contains unsupported fields. func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { @@ -150,11 +172,13 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { FilterChainMatch: &v3listenerpb.FilterChainMatch{ PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16), cidrRangeFromAddressAndPrefixLen("10.0.0.0", 0)}, }, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.2.2", 16)}, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -165,15 +189,19 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_ANY}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_EXTERNAL}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_EXTERNAL}, + Filters: emptyValidNetworkFilters, }, }, }, @@ -186,11 +214,13 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { FilterChainMatch: &v3listenerpb.FilterChainMatch{ SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16), cidrRangeFromAddressAndPrefixLen("10.0.0.0", 0)}, }, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.2.2", 16)}, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -201,12 +231,15 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3, 4, 5}}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{5, 6, 7}}, + Filters: emptyValidNetworkFilters, }, }, }, @@ -243,6 +276,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { TransportSocket: &v3corepb.TransportSocket{Name: "unsupported-transport-socket-name"}, + Filters: emptyValidNetworkFilters, }, }, }, @@ -259,6 +293,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{}), }, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -278,6 +313,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { }, }, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -294,6 +330,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{}), }, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -318,6 +355,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { }), }, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -342,6 +380,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { }), }, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -360,6 +399,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { }), }, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -377,6 +417,349 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { } } +// TestNewFilterChainImpl_Failure_BadHTTPFilters verifies cases where the HTTP +// Filters in the filter chain are invalid. +func TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { + tests := []struct { + name string + lis *v3listenerpb.Listener + wantErr string + }{ + { + name: "client side HTTP filter", + lis: &v3listenerpb.Listener{ + Name: "grpc/server?xds.resource.listening_address=0.0.0.0:9999", + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + { + Name: "clientOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: clientOnlyCustomFilterConfig}, + }, + }, + }), + }, + }, + }, + }, + }, + }, + wantErr: "invalid server side HTTP Filters", + }, + { + name: "one valid then one invalid HTTP filter", + lis: &v3listenerpb.Listener{ + Name: "grpc/server?xds.resource.listening_address=0.0.0.0:9999", + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + { + Name: "clientOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: clientOnlyCustomFilterConfig}, + }, + }, + }), + }, + }, + }, + }, + }, + }, + wantErr: "invalid server side HTTP Filters", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := NewFilterChainManager(test.lis) + if err == nil || !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) + } + }) + } +} + +// TestNewFilterChainImpl_Success_HTTPFilters tests the construction of the +// filter chain with valid HTTP Filters present. +func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { + tests := []struct { + name string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + name: "singular valid http filter", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + }, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: {HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + }}, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + }}, + }, + }, + { + name: "two valid http filters", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + validServerSideHTTPFilter2, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + validServerSideHTTPFilter2, + }, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: {HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + { + Name: "serverOnlyCustomFilter2", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + }}, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + { + Name: "serverOnlyCustomFilter2", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + }, + }, + }, + }, + // In the case of two HTTP Connection Manager's being present, the + // second HTTP Connection Manager should be validated, but ignored. + { + name: "two hcms", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + validServerSideHTTPFilter2, + }, + }), + }, + }, + { + Name: "hcm2", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + validServerSideHTTPFilter2, + }, + }), + }, + }, + { + Name: "hcm2", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + }, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: {HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + { + Name: "serverOnlyCustomFilter2", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + }}, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + { + Name: "serverOnlyCustomFilter2", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{}), cmpOpts) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + // TestNewFilterChainImpl_Success_SecurityConfig verifies cases where the // security configuration in the filter chain contains valid data. func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { @@ -390,10 +773,13 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { lis: &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { - Name: "filter-chain-1", + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: emptyValidNetworkFilters, + }, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -432,6 +818,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }), }, }, + Filters: emptyValidNetworkFilters, }, }, DefaultFilterChain: &v3listenerpb.FilterChain{ @@ -448,6 +835,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }), }, }, + Filters: emptyValidNetworkFilters, }, }, wantFC: &FilterChainManager{ @@ -504,6 +892,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }), }, }, + Filters: emptyValidNetworkFilters, }, }, DefaultFilterChain: &v3listenerpb.FilterChain{ @@ -528,6 +917,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }), }, }, + Filters: emptyValidNetworkFilters, }, }, wantFC: &FilterChainManager{ @@ -573,7 +963,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } - if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{})) { + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{}), cmpopts.EquateEmpty()) { t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) } }) @@ -610,7 +1000,8 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { lis: &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { - Name: "good-chain", + Name: "good-chain", + Filters: emptyValidNetworkFilters, }, { Name: "unsupported-destination-port", @@ -618,9 +1009,10 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, DestinationPort: &wrapperspb.UInt32Value{Value: 666}, }, + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -634,7 +1026,8 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { lis: &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { - Name: "good-chain", + Name: "good-chain", + Filters: emptyValidNetworkFilters, }, { Name: "unsupported-server-names", @@ -642,9 +1035,10 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, ServerNames: []string{"example-server"}, }, + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -661,7 +1055,8 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { lis: &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { - Name: "good-chain", + Name: "good-chain", + Filters: emptyValidNetworkFilters, }, { Name: "unsupported-transport-protocol", @@ -669,9 +1064,10 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, TransportProtocol: "tls", }, + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -688,7 +1084,8 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { lis: &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { - Name: "good-chain", + Name: "good-chain", + Filters: emptyValidNetworkFilters, }, { Name: "unsupported-application-protocol", @@ -696,9 +1093,10 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, ApplicationProtocols: []string{"h2"}, }, + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -718,7 +1116,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } - if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{})) { + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{}), cmpopts.EquateEmpty()) { t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) } }) @@ -740,6 +1138,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { { // Unspecified destination prefix. FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + Filters: emptyValidNetworkFilters, }, { // v4 wildcard destination prefix. @@ -747,6 +1146,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, }, + Filters: emptyValidNetworkFilters, }, { // v6 wildcard destination prefix. @@ -754,15 +1154,18 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("::", 0)}, SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, }, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}}, + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -849,15 +1252,17 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, }, + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -901,15 +1306,17 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, }, + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -952,6 +1359,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3}}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ @@ -960,9 +1368,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, SourcePorts: []uint32{1, 2, 3}, }, + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -1010,15 +1419,18 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}, TransportProtocol: "raw_buffer", }, + Filters: emptyValidNetworkFilters, }, { // This chain will be dropped in favor of the above @@ -1032,6 +1444,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 16)}, }, + Filters: emptyValidNetworkFilters, }, { // This chain will be dropped for unsupported server @@ -1040,6 +1453,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.1", 32)}, ServerNames: []string{"foo", "bar"}, }, + Filters: emptyValidNetworkFilters, }, { // This chain will be dropped for unsupported transport @@ -1048,6 +1462,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.2", 32)}, TransportProtocol: "not-raw-buffer", }, + Filters: emptyValidNetworkFilters, }, { // This chain will be dropped for unsupported @@ -1056,9 +1471,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.3", 32)}, ApplicationProtocols: []string{"h2"}, }, + Filters: emptyValidNetworkFilters, }, }, - DefaultFilterChain: &v3listenerpb.FilterChain{}, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, }, wantFC: &FilterChainManager{ dstPrefixMap: map[string]*destPrefixEntry{ @@ -1147,6 +1563,7 @@ func TestLookup_Failures(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + Filters: emptyValidNetworkFilters, }, }, }, @@ -1165,6 +1582,7 @@ func TestLookup_Failures(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -1184,6 +1602,7 @@ func TestLookup_Failures(t *testing.T) { SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 24)}, SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -1200,12 +1619,14 @@ func TestLookup_Failures(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3}}, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, SourcePorts: []uint32{1}, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -1224,6 +1645,7 @@ func TestLookup_Failures(t *testing.T) { FilterChains: []*v3listenerpb.FilterChain{ { FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3}}, + Filters: emptyValidNetworkFilters, }, }, }, @@ -1246,11 +1668,13 @@ func TestLookup_Failures(t *testing.T) { PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.1", 32)}, ServerNames: []string{"foo"}, }, + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.0", 16)}, }, + Filters: emptyValidNetworkFilters, }, }, }, @@ -1293,6 +1717,7 @@ func TestLookup_Successes(t *testing.T) { }), }, }, + Filters: emptyValidNetworkFilters, }, }, // A default filter chain with an empty transport socket. @@ -1307,12 +1732,14 @@ func TestLookup_Successes(t *testing.T) { }), }, }, + Filters: emptyValidNetworkFilters, }, } lisWithoutDefaultChain := &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { TransportSocket: transportSocketWithInstanceName("unspecified-dest-and-source-prefix"), + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ @@ -1320,16 +1747,19 @@ func TestLookup_Successes(t *testing.T) { SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, }, TransportSocket: transportSocketWithInstanceName("wildcard-prefixes-v4"), + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("::", 0)}, }, TransportSocket: transportSocketWithInstanceName("wildcard-source-prefix-v6"), + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-unspecified-source-type"), + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ @@ -1337,6 +1767,7 @@ func TestLookup_Successes(t *testing.T) { SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, }, TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-specific-source-type"), + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ @@ -1345,6 +1776,7 @@ func TestLookup_Successes(t *testing.T) { SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, }, TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-specific-source-type-specific-source-prefix"), + Filters: emptyValidNetworkFilters, }, { FilterChainMatch: &v3listenerpb.FilterChainMatch{ @@ -1354,6 +1786,7 @@ func TestLookup_Successes(t *testing.T) { SourcePorts: []uint32{80}, }, TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-specific-source-type-specific-source-prefix-specific-source-port"), + Filters: emptyValidNetworkFilters, }, }, } @@ -1462,7 +1895,7 @@ func TestLookup_Successes(t *testing.T) { if err != nil { t.Fatalf("FilterChainManager.Lookup(%v) failed: %v", test.params, err) } - if !cmp.Equal(gotFC, test.wantFC) { + if !cmp.Equal(gotFC, test.wantFC, cmpopts.EquateEmpty()) { t.Fatalf("FilterChainManager.Lookup(%v) = %v, want %v", test.params, gotFC, test.wantFC) } }) @@ -1480,9 +1913,10 @@ func (fci *FilterChainManager) Equal(other *FilterChainManager) bool { return true } switch { - case !cmp.Equal(fci.dstPrefixMap, other.dstPrefixMap): + case !cmp.Equal(fci.dstPrefixMap, other.dstPrefixMap, cmpopts.EquateEmpty()): return false - case !cmp.Equal(fci.def, other.def): + // TODO: Support comparing dstPrefixes slice? + case !cmp.Equal(fci.def, other.def, cmpopts.EquateEmpty(), protocmp.Transform()): return false } return true @@ -1499,7 +1933,7 @@ func (dpe *destPrefixEntry) Equal(other *destPrefixEntry) bool { return false } for i, st := range dpe.srcTypeArr { - if !cmp.Equal(st, other.srcTypeArr[i]) { + if !cmp.Equal(st, other.srcTypeArr[i], cmpopts.EquateEmpty()) { return false } } @@ -1513,7 +1947,8 @@ func (sp *sourcePrefixes) Equal(other *sourcePrefixes) bool { if sp == nil { return true } - return cmp.Equal(sp.srcPrefixMap, other.srcPrefixMap) + // TODO: Support comparing srcPrefixes slice? + return cmp.Equal(sp.srcPrefixMap, other.srcPrefixMap, cmpopts.EquateEmpty()) } func (spe *sourcePrefixEntry) Equal(other *sourcePrefixEntry) bool { @@ -1526,7 +1961,7 @@ func (spe *sourcePrefixEntry) Equal(other *sourcePrefixEntry) bool { switch { case !cmp.Equal(spe.net, other.net): return false - case !cmp.Equal(spe.srcPortMap, other.srcPortMap): + case !cmp.Equal(spe.srcPortMap, other.srcPortMap, cmpopts.EquateEmpty(), protocmp.Transform()): return false } return true diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index ebebde84b6a1..012efd16d7b1 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -917,36 +917,6 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { wantMD: errMD, wantErr: "failed unmarshaling of network filter", }, - { - name: "client only http filter inside the network filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: localSocketAddress, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - Filters: []*v3listenerpb.Filter{ - { - Name: "hcm", - ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ - HttpFilters: []*v3httppb.HttpFilter{ - { - Name: "clientOnlyCustomFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: clientOnlyCustomFilterConfig}, - }, - }, - }), - }, - }, - }, - }, - }, - })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: errMD, - wantErr: "not supported server-side", - }, { name: "unexpected transport socket name", resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 8dfb573f335f..79c2efcd2cbc 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -268,13 +268,6 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err Port: strconv.Itoa(int(sockAddr.GetPortValue())), }, } - chains := lis.GetFilterChains() - if def := lis.GetDefaultFilterChain(); def != nil { - chains = append(chains, def) - } - if err := validateNetworkFilterChains(chains); err != nil { - return nil, err - } fcMgr, err := NewFilterChainManager(lis) if err != nil { @@ -284,59 +277,63 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err return lu, nil } -func validateNetworkFilterChains(filterChains []*v3listenerpb.FilterChain) error { - for _, filterChain := range filterChains { - seenNames := make(map[string]bool, len(filterChain.GetFilters())) - seenHCM := false - for _, filter := range filterChain.GetFilters() { - name := filter.GetName() - if name == "" { - return fmt.Errorf("filter chain {%+v} is missing name field in filter: {%+v}", filterChain, filter) +func processNetworkFilters(filters []*v3listenerpb.Filter) ([]HTTPFilter, error) { + seenNames := make(map[string]bool, len(filters)) + seenHCM := false + var httpFilters []HTTPFilter + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, fmt.Errorf("network filters {%+v} is missing name field in filter: {%+v}", filters, filter) + } + if seenNames[name] { + return nil, fmt.Errorf("network filters {%+v} has duplicate filter name %q", filters, name) + } + seenNames[name] = true + + // Network filters have a oneof field named `config_type` where we + // only support `TypedConfig` variant. + switch typ := filter.GetConfigType().(type) { + case *v3listenerpb.Filter_TypedConfig: + // The typed_config field has an `anypb.Any` proto which could + // directly contain the serialized bytes of the actual filter + // configuration, or it could be encoded as a `TypedStruct`. + // TODO: Add support for `TypedStruct`. + tc := filter.GetTypedConfig() + + // The only network filter that we currently support is the v3 + // HttpConnectionManager. So, we can directly check the type_url + // and unmarshal the config. + // TODO: Implement a registry of supported network filters (like + // we have for HTTP filters), when we have to support network + // filters other than HttpConnectionManager. + if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { + return nil, fmt.Errorf("network filters {%+v} has unsupported network filter %q in filter {%+v}", filters, tc.GetTypeUrl(), filter) } - if seenNames[name] { - return fmt.Errorf("filter chain {%+v} has duplicate filter name %q", filterChain, name) + hcm := &v3httppb.HttpConnectionManager{} + if err := ptypes.UnmarshalAny(tc, hcm); err != nil { + return nil, fmt.Errorf("network filters {%+v} failed unmarshaling of network filter {%+v}: %v", filters, filter, err) } - seenNames[name] = true - - // Network filters have a oneof field named `config_type` where we - // only support `TypedConfig` variant. - switch typ := filter.GetConfigType().(type) { - case *v3listenerpb.Filter_TypedConfig: - // The typed_config field has an `anypb.Any` proto which could - // directly contain the serialized bytes of the actual filter - // configuration, or it could be encoded as a `TypedStruct`. - // TODO: Add support for `TypedStruct`. - tc := filter.GetTypedConfig() - - // The only network filter that we currently support is the v3 - // HttpConnectionManager. So, we can directly check the type_url - // and unmarshal the config. - // TODO: Implement a registry of supported network filters (like - // we have for HTTP filters), when we have to support network - // filters other than HttpConnectionManager. - if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { - return fmt.Errorf("filter chain {%+v} has unsupported network filter %q in filter {%+v}", filterChain, tc.GetTypeUrl(), filter) - } - hcm := &v3httppb.HttpConnectionManager{} - if err := ptypes.UnmarshalAny(tc, hcm); err != nil { - return fmt.Errorf("filter chain {%+v} failed unmarshaling of network filter {%+v}: %v", filterChain, filter, err) - } - // We currently don't support HTTP filters on the server-side. - // We will be adding support for it in the future. So, we want - // to make sure that the http_filters configuration is valid. - if _, err := processHTTPFilters(hcm.GetHttpFilters(), true); err != nil { - return err - } + // "Any filters after HttpConnectionManager should be ignored during + // connection processing but still be considered for validity. + // HTTPConnectionManager must have valid http_filters." - A36 + filters, err := processHTTPFilters(hcm.GetHttpFilters(), true) + if err != nil { + return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}", filters, hcm.GetHttpFilters()) + } + if !seenHCM { + // TODO: Implement terminal filter logic, as per A36. + httpFilters = filters seenHCM = true - default: - return fmt.Errorf("filter chain {%+v} has unsupported config_type %T in filter %s", filterChain, typ, filter.GetName()) } - } - if !seenHCM { - return fmt.Errorf("filter chain {%+v} missing HttpConnectionManager filter", filterChain) + default: + return nil, fmt.Errorf("network filters {%+v} has unsupported config_type %T in filter %s", filters, typ, filter.GetName()) } } - return nil + if !seenHCM { + return nil, fmt.Errorf("network filters {%+v} missing HttpConnectionManager filter", filters) + } + return httpFilters, nil } // UnmarshalRouteConfig processes resources received in an RDS response, diff --git a/xds/server_test.go b/xds/server_test.go index 68e0d85c692d..00b8518fa9d9 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -32,6 +32,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -683,6 +684,14 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { }), }, }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + }, }, }, }) From c513103bee39e1ebc3793e7128941794667779de Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 21 Jul 2021 22:42:38 -0400 Subject: [PATCH 168/998] Add extra layer on top of RBAC Engine (#4576) * Add extra layer in RBAC --- internal/xds/rbac/matchers.go | 70 +- internal/xds/rbac/rbac_engine.go | 217 +++++- internal/xds/rbac/rbac_engine_test.go | 1000 ++++++++++++++++--------- 3 files changed, 885 insertions(+), 402 deletions(-) diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go index 47be35c1d0d7..25d1cc0e8b98 100644 --- a/internal/xds/rbac/matchers.go +++ b/internal/xds/rbac/matchers.go @@ -32,7 +32,7 @@ import ( // matcher is an interface that takes data about incoming RPC's and returns // whether it matches with whatever matcher implements this interface. type matcher interface { - match(data *RPCData) bool + match(data *rpcData) bool } // policyMatcher helps determine whether an incoming RPC call matches a policy. @@ -63,7 +63,7 @@ func newPolicyMatcher(policy *v3rbacpb.Policy) (*policyMatcher, error) { }, nil } -func (pm *policyMatcher) match(data *RPCData) bool { +func (pm *policyMatcher) match(data *rpcData) bool { // A policy matches if and only if at least one of its permissions match the // action taking place AND at least one if its principals match the // downstream peer. @@ -202,7 +202,7 @@ type orMatcher struct { matchers []matcher } -func (om *orMatcher) match(data *RPCData) bool { +func (om *orMatcher) match(data *rpcData) bool { // Range through child matchers and pass in data about incoming RPC, and // only one child matcher has to match to be logically successful. for _, m := range om.matchers { @@ -219,7 +219,7 @@ type andMatcher struct { matchers []matcher } -func (am *andMatcher) match(data *RPCData) bool { +func (am *andMatcher) match(data *rpcData) bool { for _, m := range am.matchers { if !m.match(data) { return false @@ -234,7 +234,7 @@ func (am *andMatcher) match(data *RPCData) bool { type alwaysMatcher struct { } -func (am *alwaysMatcher) match(data *RPCData) bool { +func (am *alwaysMatcher) match(data *rpcData) bool { return true } @@ -244,7 +244,7 @@ type notMatcher struct { matcherToNot matcher } -func (nm *notMatcher) match(data *RPCData) bool { +func (nm *notMatcher) match(data *rpcData) bool { return !nm.matcherToNot.match(data) } @@ -284,8 +284,8 @@ func newHeaderMatcher(headerMatcherConfig *v3route_componentspb.HeaderMatcher) ( return &headerMatcher{matcher: m}, nil } -func (hm *headerMatcher) match(data *RPCData) bool { - return hm.matcher.Match(data.MD) +func (hm *headerMatcher) match(data *rpcData) bool { + return hm.matcher.Match(data.md) } // urlPathMatcher matches on the URL Path of the incoming RPC. In gRPC, this @@ -303,8 +303,8 @@ func newURLPathMatcher(pathMatcher *v3matcherpb.PathMatcher) (*urlPathMatcher, e return &urlPathMatcher{stringMatcher: stringMatcher}, nil } -func (upm *urlPathMatcher) match(data *RPCData) bool { - return upm.stringMatcher.Match(data.FullMethod) +func (upm *urlPathMatcher) match(data *rpcData) bool { + return upm.stringMatcher.Match(data.fullMethod) } // sourceIPMatcher and destinationIPMatcher both are matchers that match against @@ -329,8 +329,8 @@ func newSourceIPMatcher(cidrRange *v3corepb.CidrRange) (*sourceIPMatcher, error) return &sourceIPMatcher{ipNet: ipNet}, nil } -func (sim *sourceIPMatcher) match(data *RPCData) bool { - return sim.ipNet.Contains(net.IP(net.ParseIP(data.PeerInfo.Addr.String()))) +func (sim *sourceIPMatcher) match(data *rpcData) bool { + return sim.ipNet.Contains(net.IP(net.ParseIP(data.peerInfo.Addr.String()))) } type destinationIPMatcher struct { @@ -346,8 +346,8 @@ func newDestinationIPMatcher(cidrRange *v3corepb.CidrRange) (*destinationIPMatch return &destinationIPMatcher{ipNet: ipNet}, nil } -func (dim *destinationIPMatcher) match(data *RPCData) bool { - return dim.ipNet.Contains(net.IP(net.ParseIP(data.DestinationAddr.String()))) +func (dim *destinationIPMatcher) match(data *rpcData) bool { + return dim.ipNet.Contains(net.IP(net.ParseIP(data.destinationAddr.String()))) } // portMatcher matches on whether the destination port of the RPC matches the @@ -361,8 +361,8 @@ func newPortMatcher(destinationPort uint32) *portMatcher { return &portMatcher{destinationPort: destinationPort} } -func (pm *portMatcher) match(data *RPCData) bool { - return data.DestinationPort == pm.destinationPort +func (pm *portMatcher) match(data *rpcData) bool { + return data.destinationPort == pm.destinationPort } // authenticatedMatcher matches on the name of the Principal. If set, the URI @@ -370,17 +370,47 @@ func (pm *portMatcher) match(data *RPCData) bool { // subject field is used. If unset, it applies to any user that is // authenticated. authenticatedMatcher implements the matcher interface. type authenticatedMatcher struct { - stringMatcher internalmatcher.StringMatcher + stringMatcher *internalmatcher.StringMatcher } func newAuthenticatedMatcher(authenticatedMatcherConfig *v3rbacpb.Principal_Authenticated) (*authenticatedMatcher, error) { + // Represents this line in the RBAC documentation = "If unset, it applies to + // any user that is authenticated" (see package-level comments). + if authenticatedMatcherConfig.PrincipalName == nil { + return &authenticatedMatcher{}, nil + } stringMatcher, err := internalmatcher.StringMatcherFromProto(authenticatedMatcherConfig.PrincipalName) if err != nil { return nil, err } - return &authenticatedMatcher{stringMatcher: stringMatcher}, nil + return &authenticatedMatcher{stringMatcher: &stringMatcher}, nil } -func (am *authenticatedMatcher) match(data *RPCData) bool { - return am.stringMatcher.Match(data.PrincipalName) +func (am *authenticatedMatcher) match(data *rpcData) bool { + // Represents this line in the RBAC documentation = "If unset, it applies to + // any user that is authenticated" (see package-level comments). An + // authenticated downstream in a stateful TLS connection will have to + // provide a certificate to prove their identity. Thus, you can simply check + // if there is a certificate present. + if am.stringMatcher == nil { + return len(data.certs) != 0 + } + // No certificate present, so will never successfully match. + if len(data.certs) == 0 { + return false + } + cert := data.certs[0] + // The order of matching as per the RBAC documentation (see package-level comments) + // is as follows: URI SANs, DNS SANs, and then subject name. + for _, uriSAN := range cert.URIs { + if am.stringMatcher.Match(uriSAN.String()) { + return true + } + } + for _, dnsSAN := range cert.DNSNames { + if am.stringMatcher.Match(dnsSAN) { + return true + } + } + return am.stringMatcher.Match(cert.Subject.String()) } diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index 29c96d9fcf20..609d123c8039 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -15,68 +15,207 @@ */ // Package rbac provides service-level and method-level access control for a -// service. +// service. See +// https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/rbac/v3/rbac.proto#role-based-access-control-rbac +// for documentation. package rbac import ( + "context" + "crypto/x509" + "errors" + "fmt" "net" + "strconv" v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" ) -// Engine is used for matching incoming RPCs to policies. -type Engine struct { - policies map[string]*policyMatcher +// ChainEngine represents a chain of RBAC Engines, used to make authorization +// decisions on incoming RPCs. +type ChainEngine struct { + chainedEngines []*engine } -// NewEngine creates an RBAC Engine based on the contents of policy. If the -// config is invalid (and fails to build underlying tree of matchers), NewEngine -// will return an error. This created RBAC Engine will not persist the action -// present in the policy, and will leave up to caller to handle the action that -// is attached to the config. -func NewEngine(policy *v3rbacpb.RBAC) (*Engine, error) { - policies := make(map[string]*policyMatcher) - for name, config := range policy.Policies { - matcher, err := newPolicyMatcher(config) +// NewChainEngine returns a chain of RBAC engines, used to make authorization +// decisions on incoming RPCs. Returns a non-nil error for invalid policies. +func NewChainEngine(policies []*v3rbacpb.RBAC) (*ChainEngine, error) { + var engines []*engine + for _, policy := range policies { + engine, err := newEngine(policy) if err != nil { return nil, err } - policies[name] = matcher + engines = append(engines, engine) } - return &Engine{policies: policies}, nil + return &ChainEngine{chainedEngines: engines}, nil } -// RPCData wraps data pulled from an incoming RPC that the RBAC engine needs to -// find a matching policy. -type RPCData struct { - // MD is the HTTP Headers that are present in the incoming RPC. - MD metadata.MD - // PeerInfo is information about the downstream peer. - PeerInfo *peer.Peer - // FullMethod is the method name being called on the upstream service. - FullMethod string - // DestinationPort is the port that the RPC is being sent to on the - // server. - DestinationPort uint32 - // DestinationAddr is the address that the RPC is being sent to. - DestinationAddr net.Addr - // PrincipalName is the name of the downstream principal. If set, the URI - // SAN or DNS SAN in that order is used from the certificate, otherwise the - // subject field is used. If unset, it applies to any user that is - // authenticated. - PrincipalName string +// IsAuthorized determines if an incoming RPC is authorized based on the chain of RBAC +// engines and their associated actions. +// +// Errors returned by this function are compatible with the status package. +func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { + // This conversion step (i.e. pulling things out of ctx) can be done once, + // and then be used for the whole chain of RBAC Engines. + rpcData, err := newRPCData(ctx) + if err != nil { + return status.Errorf(codes.InvalidArgument, "missing fields in ctx %+v: %v", ctx, err) + } + for _, engine := range cre.chainedEngines { + matchingPolicyName, ok := engine.findMatchingPolicy(rpcData) + + switch { + case engine.action == v3rbacpb.RBAC_ALLOW && !ok: + return status.Errorf(codes.PermissionDenied, "incoming RPC did not match an allow policy") + case engine.action == v3rbacpb.RBAC_DENY && ok: + return status.Errorf(codes.PermissionDenied, "incoming RPC matched a deny policy %q", matchingPolicyName) + } + // Every policy in the engine list must be queried. Thus, iterate to the + // next policy. + } + // If the incoming RPC gets through all of the engines successfully (i.e. + // doesn't not match an allow or match a deny engine), the RPC is authorized + // to proceed. + return status.Error(codes.OK, "") +} + +// engine is used for matching incoming RPCs to policies. +type engine struct { + policies map[string]*policyMatcher + // action must be ALLOW or DENY. + action v3rbacpb.RBAC_Action } -// FindMatchingPolicy determines if an incoming RPC matches a policy. On a -// successful match, it returns the name of the matching policy and a true -// boolean to specify that there was a matching policy found. -func (r *Engine) FindMatchingPolicy(data *RPCData) (string, bool) { +// newEngine creates an RBAC Engine based on the contents of policy. Returns a +// non-nil error if the policy is invalid. +func newEngine(config *v3rbacpb.RBAC) (*engine, error) { + a := *config.Action.Enum() + if a != v3rbacpb.RBAC_ALLOW && a != v3rbacpb.RBAC_DENY { + return nil, fmt.Errorf("unsupported action %s", config.Action) + } + + policies := make(map[string]*policyMatcher, len(config.Policies)) + for name, policy := range config.Policies { + matcher, err := newPolicyMatcher(policy) + if err != nil { + return nil, err + } + policies[name] = matcher + } + return &engine{ + policies: policies, + action: a, + }, nil +} + +// findMatchingPolicy determines if an incoming RPC matches a policy. On a +// successful match, it returns the name of the matching policy and a true bool +// to specify that there was a matching policy found. It returns false in +// the case of not finding a matching policy. +func (r *engine) findMatchingPolicy(rpcData *rpcData) (string, bool) { for policy, matcher := range r.policies { - if matcher.match(data) { + if matcher.match(rpcData) { return policy, true } } return "", false } + +// newRPCData takes an incoming context (should be a context representing state +// needed for server RPC Call with metadata, peer info (used for source ip/port +// and TLS information) and connection (used for destination ip/port) piped into +// it) and the method name of the Service being called server side and populates +// an rpcData struct ready to be passed to the RBAC Engine to find a matching +// policy. +func newRPCData(ctx context.Context) (*rpcData, error) { + // The caller should populate all of these fields (i.e. for empty headers, + // pipe an empty md into context). + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errors.New("missing metadata in incoming context") + } + + pi, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("missing peer info in incoming context") + } + + // The methodName will be available in the passed in ctx from a unary or streaming + // interceptor, as grpc.Server pipes in a transport stream which contains the methodName + // into contexts available in both unary or streaming interceptors. + mn, ok := grpc.Method(ctx) + if !ok { + return nil, errors.New("missing method in incoming context") + } + + // The connection is needed in order to find the destination address and + // port of the incoming RPC Call. + conn := getConnection(ctx) + if conn == nil { + return nil, errors.New("missing connection in incoming context") + } + _, dPort, err := net.SplitHostPort(conn.LocalAddr().String()) + if err != nil { + return nil, fmt.Errorf("error parsing local address: %v", err) + } + dp, err := strconv.ParseUint(dPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("error parsing local address: %v", err) + } + + var peerCertificates []*x509.Certificate + if pi.AuthInfo != nil { + tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo) + if ok { + peerCertificates = tlsInfo.State.PeerCertificates + } + } + + return &rpcData{ + md: md, + peerInfo: pi, + fullMethod: mn, + destinationPort: uint32(dp), + destinationAddr: conn.LocalAddr(), + certs: peerCertificates, + }, nil +} + +// rpcData wraps data pulled from an incoming RPC that the RBAC engine needs to +// find a matching policy. +type rpcData struct { + // md is the HTTP Headers that are present in the incoming RPC. + md metadata.MD + // peerInfo is information about the downstream peer. + peerInfo *peer.Peer + // fullMethod is the method name being called on the upstream service. + fullMethod string + // destinationPort is the port that the RPC is being sent to on the + // server. + destinationPort uint32 + // destinationAddr is the address that the RPC is being sent to. + destinationAddr net.Addr + // certs are the certificates presented by the peer during a TLS + // handshake. + certs []*x509.Certificate +} + +type connectionKey struct{} + +func getConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. +func SetConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go index 47ed5a1342e0..2521ac4526aa 100644 --- a/internal/xds/rbac/rbac_engine_test.go +++ b/internal/xds/rbac/rbac_engine_test.go @@ -17,6 +17,12 @@ package rbac import ( + "context" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "net" + "net/url" "testing" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -25,8 +31,13 @@ import ( v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" ) type s struct { @@ -44,110 +55,196 @@ type addr struct { func (addr) Network() string { return "" } func (a *addr) String() string { return a.ipAddress } -// TestRBACEngineConstruction tests the construction of the RBAC Engine. Due to -// some types of RBAC configuration being logically wrong and returning an error +// TestNewChainEngine tests the construction of the ChainEngine. Due to some +// types of RBAC configuration being logically wrong and returning an error // rather than successfully constructing the RBAC Engine, this test tests both // RBAC Configurations deemed successful and also RBAC Configurations that will // raise errors. -func (s) TestRBACEngineConstruction(t *testing.T) { +func (s) TestNewChainEngine(t *testing.T) { tests := []struct { - name string - rbacConfig *v3rbacpb.RBAC - wantErr bool + name string + policies []*v3rbacpb.RBAC + wantErr bool }{ { - name: "TestSuccessCaseAnyMatch", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "anyone": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + name: "SuccessCaseAnyMatchSingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, }, { - name: "TestSuccessCaseSimplePolicy", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "localhost-fan": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, - {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + name: "SuccessCaseAnyMatchMultiple", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, }, { - name: "TestSuccessCaseEnvoyExample", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "service-admin": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/admin"}}}}}, - {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/superuser"}}}}}, - }, - }, - "product-viewer": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ - Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, - {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/products"}}}}}}, - {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ - Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 80}}, - {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 443}}, - }, - }}}, - }, + name: "SuccessCaseSimplePolicySingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + }, + // SuccessCaseSimplePolicyTwoPolicies tests the construction of the + // chained engines in the case where there are two policies in a list, + // one with an allow policy and one with a deny policy. A situation + // where two policies (allow and deny) is a very common use case for + // this API, and should successfully build. + { + name: "SuccessCaseSimplePolicyTwoPolicies", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, }, }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, }, { - name: "TestSourceIpMatcherSuccess", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "certain-source-ip": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, + name: "SuccessCaseEnvoyExampleSingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "service-admin": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/admin"}}}}}, + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/superuser"}}}}}, + }, }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + "product-viewer": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/products"}}}}}}, + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 80}}, + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 443}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, }, { - name: "TestSourceIpMatcherFailure", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "certain-source-ip": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, + name: "SourceIpMatcherSuccessSingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + }, + { + name: "SourceIpMatcherFailureSingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, }, }, }, @@ -155,30 +252,36 @@ func (s) TestRBACEngineConstruction(t *testing.T) { wantErr: true, }, { - name: "TestDestinationIpMatcherSuccess", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "certain-destination-ip": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + name: "DestinationIpMatcherSuccess", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-destination-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, }, { - name: "TestDestinationIpMatcherFailure", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "certain-destination-ip": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + name: "DestinationIpMatcherFailure", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-destination-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, @@ -186,82 +289,135 @@ func (s) TestRBACEngineConstruction(t *testing.T) { wantErr: true, }, { - name: "TestMatcherToNotPolicy", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "not-secret-content": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_NotRule{NotRule: &v3rbacpb.Permission{Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/secret-content"}}}}}}}}, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + name: "MatcherToNotPolicy", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "not-secret-content": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_NotRule{NotRule: &v3rbacpb.Permission{Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/secret-content"}}}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, }, { - name: "TestMatcherToNotPrincipal", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "not-from-certain-ip": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, + name: "MatcherToNotPrinicipal", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "not-from-certain-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_NotId{NotId: &v3rbacpb.Principal{Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}}}, + }, }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_NotId{NotId: &v3rbacpb.Principal{Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}}}, + }, + }, + }, + }, + // PrinicpalProductViewer tests the construction of a chained engine + // with a policy that allows any downstream to send a GET request on a + // certain path. + { + name: "PrincipalProductViewer", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "product-viewer": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + { + Identifier: &v3rbacpb.Principal_AndIds{AndIds: &v3rbacpb.Principal_Set{Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/books"}}}}}}, + {Identifier: &v3rbacpb.Principal_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/cars"}}}}}}, + }, + }}}, + }}}, + }, + }, }, }, }, }, }, + // Certain Headers tests the construction of a chained engine with a + // policy that allows any downstream to send an HTTP request with + // certain headers. { - name: "TestPrincipalProductViewer", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "product-viewer": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, - }, - Principals: []*v3rbacpb.Principal{ - { - Identifier: &v3rbacpb.Principal_AndIds{AndIds: &v3rbacpb.Principal_Set{Ids: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, - {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ - Ids: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/books"}}}}}}, - {Identifier: &v3rbacpb.Principal_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/cars"}}}}}}, - }, + name: "CertainHeaders", + policies: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-headers": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + { + Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "GET"}}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_RangeMatch{RangeMatch: &v3typepb.Int64Range{ + Start: 0, + End: 64, + }}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PresentMatch{PresentMatch: true}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ContainsMatch{ContainsMatch: "GET"}}}}, }}}, - }}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "LogAction", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_LOG, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, }, }, }, }, }, + wantErr: true, }, { - name: "TestCertainHeaders", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "certain-headers": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, - }, - Principals: []*v3rbacpb.Principal{ - { - Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{Ids: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, - {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "GET"}}}}}, - {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_RangeMatch{RangeMatch: &v3typepb.Int64Range{ - Start: 0, - End: 64, - }}}}}, - {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PresentMatch{PresentMatch: true}}}}, - {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "GET"}}}}, - {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: "GET"}}}}, - {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ContainsMatch{ContainsMatch: "GET"}}}}, - }}}, + name: "ActionNotSpecified", + policies: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, }, }, }, @@ -271,336 +427,494 @@ func (s) TestRBACEngineConstruction(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if _, err := NewEngine(test.rbacConfig); (err != nil) != test.wantErr { - t.Fatalf("NewEngine(%+v) returned err: %v, wantErr: %v", test.rbacConfig, err, test.wantErr) + if _, err := NewChainEngine(test.policies); (err != nil) != test.wantErr { + t.Fatalf("NewChainEngine(%+v) returned err: %v, wantErr: %v", test.policies, err, test.wantErr) } }) } } -// TestRBACEngine tests the RBAC Engine by configuring the engine in different -// scenarios. After configuring the engine in a certain way, this test pings the -// engine with different kinds of data representing incoming RPC's, and verifies -// that it works as expected. -func (s) TestRBACEngine(t *testing.T) { +// TestChainEngine tests the chain of RBAC Engines by configuring the chain of +// engines in a certain way in different scenarios. After configuring the chain +// of engines in a certain way, this test pings the chain of engines with +// different types of data representing incoming RPC's (piped into a context), +// and verifies that it works as expected. +func (s) TestChainEngine(t *testing.T) { tests := []struct { name string - rbacConfig *v3rbacpb.RBAC + rbacConfigs []*v3rbacpb.RBAC rbacQueries []struct { - rpcData *RPCData - wantMatchingPolicyName string - wantMatch bool + rpcData *rpcData + wantStatusCode codes.Code } }{ - // TestSuccessCaseAnyMatch tests an RBAC Engine instantiated with a - // config with a policy with any rules for both permissions and + // SuccessCaseAnyMatch tests a single RBAC Engine instantiated with + // a config with a policy with any rules for both permissions and // principals, meaning that any data about incoming RPC's that the RBAC // Engine is queried with should match that policy. { - name: "TestSuccessCaseAnyMatch", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "anyone": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + name: "SuccessCaseAnyMatch", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, - rbacQueries: // Any incoming RPC should match with the anyone policy - []struct { - rpcData *RPCData - wantMatchingPolicyName string - wantMatch bool + rbacQueries: []struct { + rpcData *rpcData + wantStatusCode codes.Code }{ { - rpcData: &RPCData{ - FullMethod: "some method", - }, - wantMatchingPolicyName: "anyone", - wantMatch: true, - }, - { - rpcData: &RPCData{ - DestinationPort: 100, + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, }, - wantMatchingPolicyName: "anyone", - wantMatch: true, + wantStatusCode: codes.OK, }, }, }, - // TestSuccessCaseSimplePolicy is a test that tests a simple policy that - // only allows an rpc to proceed if the rpc is calling a certain path - // and port. + // SuccessCaseSimplePolicy is a test that tests a single policy + // that only allows an rpc to proceed if the rpc is calling with a certain + // path. { - name: "TestSuccessCaseSimplePolicy", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "localhost-fan": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, - {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + name: "SuccessCaseSimplePolicy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, rbacQueries: []struct { - rpcData *RPCData - wantMatchingPolicyName string - wantMatch bool + rpcData *rpcData + wantStatusCode codes.Code }{ - // This RPC should match with the local host fan policy. + // This RPC should match with the local host fan policy. Thus, + // this RPC should be allowed to proceed. { - rpcData: &RPCData{ - MD: map[string][]string{ - ":path": {"localhost-fan-page"}, + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, }, - DestinationPort: 8080, }, - wantMatchingPolicyName: "localhost-fan", - wantMatch: true}, - // This RPC shouldn't match with the local host fan policy. + wantStatusCode: codes.OK, + }, + + // This RPC shouldn't match with the local host fan policy. Thus, + // this rpc shouldn't be allowed to proceed. { - rpcData: &RPCData{ - DestinationPort: 100, + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, }, - wantMatchingPolicyName: ""}, + wantStatusCode: codes.PermissionDenied, + }, }, }, - // TestSuccessCaseEnvoyExample is a test based on the example provided + // SuccessCaseEnvoyExample is a test based on the example provided // in the EnvoyProxy docs. The RBAC Config contains two policies, // service admin and product viewer, that provides an example of a real // RBAC Config that might be configured for a given for a given backend // service. { - name: "TestSuccessCaseEnvoyExample", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "service-admin": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/admin"}}}}}, - {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/superuser"}}}}}, - }, - }, - "product-viewer": { - Permissions: []*v3rbacpb.Permission{ - { - Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ - Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, - {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/products"}}}}}}, - {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ - Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 80}}, - {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 443}}, - }, - }}}, + name: "SuccessCaseEnvoyExample", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "service-admin": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "//cluster.local/ns/default/sa/admin"}}}}}, + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "//cluster.local/ns/default/sa/superuser"}}}}}, + }, + }, + "product-viewer": { + Permissions: []*v3rbacpb.Permission{ + { + Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/products"}}}}}}, + }, + }, }, - }, }, }, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, rbacQueries: []struct { - rpcData *RPCData - wantMatchingPolicyName string - wantMatch bool + rpcData *rpcData + wantStatusCode codes.Code }{ // This incoming RPC Call should match with the service admin // policy. { - rpcData: &RPCData{ - FullMethod: "some method", - PrincipalName: "cluster.local/ns/default/sa/admin", - }, - wantMatchingPolicyName: "service-admin", - wantMatch: true}, - // This incoming RPC Call should match with the product - // viewer policy. - { - rpcData: &RPCData{ - DestinationPort: 80, - MD: map[string][]string{ - ":method": {"GET"}, + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + URIs: []*url.URL{ + { + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + }, + }, }, - FullMethod: "/products", }, - wantMatchingPolicyName: "product-viewer", - wantMatch: true}, - // These incoming RPC calls should not match any policy - - // represented by an empty matching policy name and false being - // returned. + wantStatusCode: codes.OK, + }, + // These incoming RPC calls should not match any policy. { - rpcData: &RPCData{ - DestinationPort: 100, + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, }, - wantMatchingPolicyName: ""}, + wantStatusCode: codes.PermissionDenied, + }, { - rpcData: &RPCData{ - FullMethod: "get-product-list", - DestinationPort: 8080, + rpcData: &rpcData{ + fullMethod: "get-product-list", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, }, - wantMatchingPolicyName: ""}, + wantStatusCode: codes.PermissionDenied, + }, { - rpcData: &RPCData{ - PrincipalName: "localhost", - DestinationPort: 8080, + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + Subject: pkix.Name{ + CommonName: "localhost", + }, + }, + }, + }, + }, + }, }, - wantMatchingPolicyName: ""}, + wantStatusCode: codes.PermissionDenied, + }, }, }, { - name: "TestNotMatcher", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "not-secret-content": { - Permissions: []*v3rbacpb.Permission{ - { - Rule: &v3rbacpb.Permission_NotRule{ - NotRule: &v3rbacpb.Permission{Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/secret-content"}}}}}}, + name: "NotMatcher", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "not-secret-content": { + Permissions: []*v3rbacpb.Permission{ + { + Rule: &v3rbacpb.Permission_NotRule{ + NotRule: &v3rbacpb.Permission{Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/secret-content"}}}}}}, + }, }, }, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, }, }, }, rbacQueries: []struct { - rpcData *RPCData - wantMatchingPolicyName string - wantMatch bool + rpcData *rpcData + wantStatusCode codes.Code }{ // This incoming RPC Call should match with the not-secret-content policy. { - rpcData: &RPCData{ - FullMethod: "/regular-content", + rpcData: &rpcData{ + fullMethod: "/regular-content", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, }, - wantMatchingPolicyName: "not-secret-content", - wantMatch: true, + wantStatusCode: codes.OK, }, - // This incoming RPC Call shouldn't match with the not-secret-content policy. + // This incoming RPC Call shouldn't match with the not-secret-content-policy. { - rpcData: &RPCData{ - FullMethod: "/secret-content", + rpcData: &rpcData{ + fullMethod: "/secret-content", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, }, - wantMatchingPolicyName: "", - wantMatch: false, + wantStatusCode: codes.PermissionDenied, }, }, }, { - name: "TestSourceIpMatcher", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "certain-source-ip": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}, - }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + name: "SourceIpMatcher", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, }, }, }, }, rbacQueries: []struct { - rpcData *RPCData - wantMatchingPolicyName string - wantMatch bool + rpcData *rpcData + wantStatusCode codes.Code }{ // This incoming RPC Call should match with the certain-source-ip policy. { - rpcData: &RPCData{ - PeerInfo: &peer.Peer{ + rpcData: &rpcData{ + peerInfo: &peer.Peer{ Addr: &addr{ipAddress: "0.0.0.0"}, }, }, - wantMatchingPolicyName: "certain-source-ip", - wantMatch: true, + wantStatusCode: codes.OK, }, // This incoming RPC Call shouldn't match with the certain-source-ip policy. { - rpcData: &RPCData{ - PeerInfo: &peer.Peer{ + rpcData: &rpcData{ + peerInfo: &peer.Peer{ Addr: &addr{ipAddress: "10.0.0.0"}, }, }, - wantMatchingPolicyName: "", - wantMatch: false, + wantStatusCode: codes.PermissionDenied, }, }, }, { - name: "TestDestinationIpMatcher", - rbacConfig: &v3rbacpb.RBAC{ - Policies: map[string]*v3rbacpb.Policy{ - "certain-destination-ip": { - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + name: "DestinationIpMatcher", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-destination-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, }, - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + rbacQueries: []struct { + rpcData *rpcData + wantStatusCode codes.Code + }{ + // This incoming RPC Call shouldn't match with the + // certain-destination-ip policy, as the test listens on local + // host. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, }, }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + // AllowAndDenyPolicy tests a policy with an allow (on path) and + // deny (on port) policy chained together. This represents how a user + // configured interceptor would use this, and also is a potential + // configuration for a dynamic xds interceptor. + { + name: "AllowAndDenyPolicy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, }, }, rbacQueries: []struct { - rpcData *RPCData - wantMatchingPolicyName string - wantMatch bool + rpcData *rpcData + wantStatusCode codes.Code }{ - // This incoming RPC Call should match with the certain-destination-ip policy. + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. { - rpcData: &RPCData{ - DestinationAddr: &addr{ipAddress: "0.0.0.10"}, + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, }, - wantMatchingPolicyName: "certain-destination-ip", - wantMatch: true, + wantStatusCode: codes.PermissionDenied, }, - // This incoming RPC Call shouldn't match with the certain-destination-ip policy. + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. { - rpcData: &RPCData{ - DestinationAddr: &addr{ipAddress: "10.0.0.0"}, + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, }, - wantMatchingPolicyName: "", + wantStatusCode: codes.PermissionDenied, }, }, }, } + for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // Instantiate the rbacEngine with different configurations that + // Instantiate the chainedRBACEngine with different configurations that are // interesting to test and to query. - rbacEngine, err := NewEngine(test.rbacConfig) + cre, err := NewChainEngine(test.rbacConfigs) if err != nil { t.Fatalf("Error constructing RBAC Engine: %v", err) } - // Query that created RBAC Engine with different args to see if the - // RBAC Engine configured a certain way works as intended. - for _, queryToRBACEngine := range test.rbacQueries { - // The matchingPolicyName returned will be empty in the case of - // no matching policy. Thus, matchingPolicyName can also be used - // to test the "error" condition of no matching policies. - matchingPolicyName, matchingPolicyFound := rbacEngine.FindMatchingPolicy(queryToRBACEngine.rpcData) - if matchingPolicyFound != queryToRBACEngine.wantMatch || matchingPolicyName != queryToRBACEngine.wantMatchingPolicyName { - t.Errorf("FindMatchingPolicy(%+v) returned (%v, %v), want (%v, %v)", queryToRBACEngine.rpcData, matchingPolicyName, matchingPolicyFound, queryToRBACEngine.wantMatchingPolicyName, queryToRBACEngine.wantMatch) - } + // Query the created chain of RBAC Engines with different args to see + // if the chain of RBAC Engines configured as such works as intended. + for _, data := range test.rbacQueries { + func() { + // Construct the context with three data points that have enough + // information to represent incoming RPC's. This will be how a + // user uses this API. A user will have to put MD, PeerInfo, and + // the connection the RPC is sent on in the context. + ctx := metadata.NewIncomingContext(context.Background(), data.rpcData.md) + + // Make a TCP connection with a certain destination port. The + // address/port of this connection will be used to populate the + // destination ip/port in RPCData struct. This represents what + // the user of ChainEngine will have to place into + // context, as this is only way to get destination ip and port. + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Error listening: %v", err) + } + defer lis.Close() + connCh := make(chan net.Conn, 1) + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("Error accepting connection: %v", err) + return + } + connCh <- conn + }() + _, err = net.Dial("tcp", lis.Addr().String()) + if err != nil { + t.Fatalf("Error dialing: %v", err) + } + conn := <-connCh + defer conn.Close() + ctx = SetConnection(ctx, conn) + ctx = peer.NewContext(ctx, data.rpcData.peerInfo) + stream := &ServerTransportStreamWithMethod{ + method: data.rpcData.fullMethod, + } + + ctx = grpc.NewContextWithServerTransportStream(ctx, stream) + err = cre.IsAuthorized(ctx) + if gotCode := status.Code(err); gotCode != data.wantStatusCode { + t.Fatalf("IsAuthorized(%+v, %+v) returned (%+v), want(%+v)", ctx, data.rpcData.fullMethod, gotCode, data.wantStatusCode) + } + }() } }) } } + +type ServerTransportStreamWithMethod struct { + method string +} + +func (sts *ServerTransportStreamWithMethod) Method() string { + return sts.method +} + +func (sts *ServerTransportStreamWithMethod) SetHeader(md metadata.MD) error { + return nil +} + +func (sts *ServerTransportStreamWithMethod) SendHeader(md metadata.MD) error { + return nil +} + +func (sts *ServerTransportStreamWithMethod) SetTrailer(md metadata.MD) error { + return nil +} From 582ef458c6d8174087877ee83bb514abc16650a5 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 22 Jul 2021 16:12:30 -0700 Subject: [PATCH 169/998] cluster_resolver: move balancer config types into cluster_resolver package and unexport (#4607) --- .../balancer/cdsbalancer/cdsbalancer.go | 11 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 7 +- .../clusterresolver/balancerconfig/type.go | 140 ----------- .../balancerconfig/type_test.go | 88 ------- .../clusterresolver/clusterresolver.go | 5 +- .../clusterresolver/clusterresolver_test.go | 5 +- .../balancer/clusterresolver/config.go | 120 +++++++++- .../balancer/clusterresolver/config_test.go | 81 ++++++- .../{balancerconfig => }/configbuilder.go | 42 ++-- .../configbuilder_test.go | 34 +-- .../balancer/clusterresolver/eds_impl_test.go | 29 ++- .../balancer/clusterresolver/priority_test.go | 7 +- .../clusterresolver/resource_resolver.go | 23 +- .../clusterresolver/resource_resolver_test.go | 225 +++++++++--------- .../balancer/weightedtarget/weightedtarget.go | 4 +- 15 files changed, 371 insertions(+), 450 deletions(-) delete mode 100644 xds/internal/balancer/clusterresolver/balancerconfig/type.go delete mode 100644 xds/internal/balancer/clusterresolver/balancerconfig/type_test.go rename xds/internal/balancer/clusterresolver/{balancerconfig => }/configbuilder.go (89%) rename xds/internal/balancer/clusterresolver/{balancerconfig => }/configbuilder_test.go (97%) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index e52a34a7d29a..3ea14add9ebf 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -35,7 +35,6 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -304,12 +303,12 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) } - dms := make([]balancerconfig.DiscoveryMechanism, len(update.updates)) + dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates)) for i, cu := range update.updates { switch cu.ClusterType { case xdsclient.ClusterTypeEDS: - dms[i] = balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + dms[i] = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeEDS, Cluster: cu.ClusterName, EDSServiceName: cu.EDSServiceName, MaxConcurrentRequests: cu.MaxRequests, @@ -322,8 +321,8 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { } case xdsclient.ClusterTypeLogicalDNS: - dms[i] = balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + dms[i] = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, DNSHostname: cu.DNSHostName, } default: diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index a4c6d40f7824..864af36857bc 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -36,7 +36,6 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" @@ -198,8 +197,8 @@ func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { // edsCCS is a helper function to construct a good update passed from the // cdsBalancer to the edsBalancer. func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientConnState { - discoveryMechanism := balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + discoveryMechanism := clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeEDS, Cluster: service, MaxConcurrentRequests: countMax, } @@ -208,7 +207,7 @@ func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientCon } lbCfg := &clusterresolver.LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{discoveryMechanism}, + DiscoveryMechanisms: []clusterresolver.DiscoveryMechanism{discoveryMechanism}, } return balancer.ClientConnState{ diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/type.go b/xds/internal/balancer/clusterresolver/balancerconfig/type.go deleted file mode 100644 index 3e47b8234e33..000000000000 --- a/xds/internal/balancer/clusterresolver/balancerconfig/type.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package balancerconfig - -import ( - "bytes" - "encoding/json" - "fmt" -) - -// DiscoveryMechanismType is the type of discovery mechanism. -type DiscoveryMechanismType int - -const ( - // DiscoveryMechanismTypeEDS is eds. - DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:"EDS"` - // DiscoveryMechanismTypeLogicalDNS is DNS. - DiscoveryMechanismTypeLogicalDNS // `json:"LOGICAL_DNS"` -) - -// MarshalJSON marshals a DiscoveryMechanismType to a quoted json string. -// -// This is necessary to handle enum (as strings) from JSON. -// -// Note that this needs to be defined on the type not pointer, otherwise the -// variables of this type will marshal to int not string. -func (t DiscoveryMechanismType) MarshalJSON() ([]byte, error) { - buffer := bytes.NewBufferString(`"`) - switch t { - case DiscoveryMechanismTypeEDS: - buffer.WriteString("EDS") - case DiscoveryMechanismTypeLogicalDNS: - buffer.WriteString("LOGICAL_DNS") - } - buffer.WriteString(`"`) - return buffer.Bytes(), nil -} - -// UnmarshalJSON unmarshals a quoted json string to the DiscoveryMechanismType. -func (t *DiscoveryMechanismType) UnmarshalJSON(b []byte) error { - var s string - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - switch s { - case "EDS": - *t = DiscoveryMechanismTypeEDS - case "LOGICAL_DNS": - *t = DiscoveryMechanismTypeLogicalDNS - default: - return fmt.Errorf("unable to unmarshal string %q to type DiscoveryMechanismType", s) - } - return nil -} - -// DiscoveryMechanism is the discovery mechanism, can be either EDS or DNS. -// -// For DNS, the ClientConn target will be used for name resolution. -// -// For EDS, if EDSServiceName is not empty, it will be used for watching. If -// EDSServiceName is empty, Cluster will be used. -type DiscoveryMechanism struct { - // Cluster is the cluster name. - Cluster string `json:"cluster,omitempty"` - // LoadReportingServerName is the LRS server to send load reports to. If - // not present, load reporting will be disabled. If set to the empty string, - // load reporting will be sent to the same server that we obtained CDS data - // from. - LoadReportingServerName *string `json:"lrsLoadReportingServerName,omitempty"` - // MaxConcurrentRequests is the maximum number of outstanding requests can - // be made to the upstream cluster. Default is 1024. - MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` - // Type is the discovery mechanism type. - Type DiscoveryMechanismType `json:"type,omitempty"` - // EDSServiceName is the EDS service name, as returned in CDS. May be unset - // if not specified in CDS. For type EDS only. - // - // This is used for EDS watch if set. If unset, Cluster is used for EDS - // watch. - EDSServiceName string `json:"edsServiceName,omitempty"` - // DNSHostname is the DNS name to resolve in "host:port" form. For type - // LOGICAL_DNS only. - DNSHostname string `json:"dnsHostname,omitempty"` -} - -// Equal returns whether the DiscoveryMechanism is the same with the parameter. -func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { - switch { - case dm.Cluster != b.Cluster: - return false - case !equalStringP(dm.LoadReportingServerName, b.LoadReportingServerName): - return false - case !equalUint32P(dm.MaxConcurrentRequests, b.MaxConcurrentRequests): - return false - case dm.Type != b.Type: - return false - case dm.EDSServiceName != b.EDSServiceName: - return false - case dm.DNSHostname != b.DNSHostname: - return false - } - return true -} - -func equalStringP(a, b *string) bool { - if a == nil && b == nil { - return true - } - if a == nil || b == nil { - return false - } - return *a == *b -} - -func equalUint32P(a, b *uint32) bool { - if a == nil && b == nil { - return true - } - if a == nil || b == nil { - return false - } - return *a == *b -} diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/type_test.go b/xds/internal/balancer/clusterresolver/balancerconfig/type_test.go deleted file mode 100644 index 1adbc7c5d230..000000000000 --- a/xds/internal/balancer/clusterresolver/balancerconfig/type_test.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package balancerconfig - -import ( - "encoding/json" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestDiscoveryMechanismTypeMarshalJSON(t *testing.T) { - tests := []struct { - name string - typ DiscoveryMechanismType - want string - }{ - { - name: "eds", - typ: DiscoveryMechanismTypeEDS, - want: `"EDS"`, - }, - { - name: "dns", - typ: DiscoveryMechanismTypeLogicalDNS, - want: `"LOGICAL_DNS"`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got, err := json.Marshal(tt.typ); err != nil || string(got) != tt.want { - t.Fatalf("DiscoveryMechanismTypeEDS.MarshalJSON() = (%v, %v), want (%s, nil)", string(got), err, tt.want) - } - }) - } -} -func TestDiscoveryMechanismTypeUnmarshalJSON(t *testing.T) { - tests := []struct { - name string - js string - want DiscoveryMechanismType - wantErr bool - }{ - { - name: "eds", - js: `"EDS"`, - want: DiscoveryMechanismTypeEDS, - }, - { - name: "dns", - js: `"LOGICAL_DNS"`, - want: DiscoveryMechanismTypeLogicalDNS, - }, - { - name: "error", - js: `"1234"`, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var got DiscoveryMechanismType - err := json.Unmarshal([]byte(tt.js), &got) - if (err != nil) != tt.wantErr { - t.Fatalf("DiscoveryMechanismTypeEDS.UnmarshalJSON() error = %v, wantErr %v", err, tt.wantErr) - } - if diff := cmp.Diff(got, tt.want); diff != "" { - t.Fatalf("DiscoveryMechanismTypeEDS.UnmarshalJSON() got unexpected output, diff (-got +want): %v", diff) - } - }) - } -} diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index cb8176d16448..b48e3e97b716 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -34,7 +34,6 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -138,7 +137,7 @@ type clusterResolverBalancer struct { attrsWithClient *attributes.Attributes // Attributes with xdsClient attached to be passed to the child policies. child balancer.Balancer - priorities []balancerconfig.PriorityConfig + priorities []priorityConfig watchUpdateReceived bool } @@ -210,7 +209,7 @@ func (b *clusterResolverBalancer) updateChildConfig() error { b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } - childCfgBytes, addrs, err := balancerconfig.BuildPriorityConfigJSON(b.priorities, b.config.EndpointPickingPolicy) + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, b.config.EndpointPickingPolicy) if err != nil { return fmt.Errorf("failed to build priority balancer config: %v", err) } diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 7e2df25e0535..e7d0cd347cb7 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" @@ -492,9 +491,9 @@ func (s) TestClientWatchEDS(t *testing.T) { func newLBConfigWithOneEDS(edsServiceName string) *LBConfig { return &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + DiscoveryMechanisms: []DiscoveryMechanism{{ Cluster: testClusterName, - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, EDSServiceName: edsServiceName, }}, } diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index 043c834399e6..3bcb432b4091 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -18,13 +18,129 @@ package clusterresolver import ( + "bytes" "encoding/json" + "fmt" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" ) +// DiscoveryMechanismType is the type of discovery mechanism. +type DiscoveryMechanismType int + +const ( + // DiscoveryMechanismTypeEDS is eds. + DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:"EDS"` + // DiscoveryMechanismTypeLogicalDNS is DNS. + DiscoveryMechanismTypeLogicalDNS // `json:"LOGICAL_DNS"` +) + +// MarshalJSON marshals a DiscoveryMechanismType to a quoted json string. +// +// This is necessary to handle enum (as strings) from JSON. +// +// Note that this needs to be defined on the type not pointer, otherwise the +// variables of this type will marshal to int not string. +func (t DiscoveryMechanismType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch t { + case DiscoveryMechanismTypeEDS: + buffer.WriteString("EDS") + case DiscoveryMechanismTypeLogicalDNS: + buffer.WriteString("LOGICAL_DNS") + } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +// UnmarshalJSON unmarshals a quoted json string to the DiscoveryMechanismType. +func (t *DiscoveryMechanismType) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + switch s { + case "EDS": + *t = DiscoveryMechanismTypeEDS + case "LOGICAL_DNS": + *t = DiscoveryMechanismTypeLogicalDNS + default: + return fmt.Errorf("unable to unmarshal string %q to type DiscoveryMechanismType", s) + } + return nil +} + +// DiscoveryMechanism is the discovery mechanism, can be either EDS or DNS. +// +// For DNS, the ClientConn target will be used for name resolution. +// +// For EDS, if EDSServiceName is not empty, it will be used for watching. If +// EDSServiceName is empty, Cluster will be used. +type DiscoveryMechanism struct { + // Cluster is the cluster name. + Cluster string `json:"cluster,omitempty"` + // LoadReportingServerName is the LRS server to send load reports to. If + // not present, load reporting will be disabled. If set to the empty string, + // load reporting will be sent to the same server that we obtained CDS data + // from. + LoadReportingServerName *string `json:"lrsLoadReportingServerName,omitempty"` + // MaxConcurrentRequests is the maximum number of outstanding requests can + // be made to the upstream cluster. Default is 1024. + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + // Type is the discovery mechanism type. + Type DiscoveryMechanismType `json:"type,omitempty"` + // EDSServiceName is the EDS service name, as returned in CDS. May be unset + // if not specified in CDS. For type EDS only. + // + // This is used for EDS watch if set. If unset, Cluster is used for EDS + // watch. + EDSServiceName string `json:"edsServiceName,omitempty"` + // DNSHostname is the DNS name to resolve in "host:port" form. For type + // LOGICAL_DNS only. + DNSHostname string `json:"dnsHostname,omitempty"` +} + +// Equal returns whether the DiscoveryMechanism is the same with the parameter. +func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { + switch { + case dm.Cluster != b.Cluster: + return false + case !equalStringP(dm.LoadReportingServerName, b.LoadReportingServerName): + return false + case !equalUint32P(dm.MaxConcurrentRequests, b.MaxConcurrentRequests): + return false + case dm.Type != b.Type: + return false + case dm.EDSServiceName != b.EDSServiceName: + return false + case dm.DNSHostname != b.DNSHostname: + return false + } + return true +} + +func equalStringP(a, b *string) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +func equalUint32P(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + // LBConfig is the config for cluster resolver balancer. type LBConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -32,7 +148,7 @@ type LBConfig struct { // // Must have at least one element. Results from each discovery mechanism are // concatenated together in successive priorities. - DiscoveryMechanisms []balancerconfig.DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` + DiscoveryMechanisms []DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` // LocalityPickingPolicy is policy for locality picking. // diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index 1333692b7fca..77b14deb6abe 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -21,13 +21,75 @@ package clusterresolver import ( + "encoding/json" "testing" "github.com/google/go-cmp/cmp" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" ) +func TestDiscoveryMechanismTypeMarshalJSON(t *testing.T) { + tests := []struct { + name string + typ DiscoveryMechanismType + want string + }{ + { + name: "eds", + typ: DiscoveryMechanismTypeEDS, + want: `"EDS"`, + }, + { + name: "dns", + typ: DiscoveryMechanismTypeLogicalDNS, + want: `"LOGICAL_DNS"`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got, err := json.Marshal(tt.typ); err != nil || string(got) != tt.want { + t.Fatalf("DiscoveryMechanismTypeEDS.MarshalJSON() = (%v, %v), want (%s, nil)", string(got), err, tt.want) + } + }) + } +} +func TestDiscoveryMechanismTypeUnmarshalJSON(t *testing.T) { + tests := []struct { + name string + js string + want DiscoveryMechanismType + wantErr bool + }{ + { + name: "eds", + js: `"EDS"`, + want: DiscoveryMechanismTypeEDS, + }, + { + name: "dns", + js: `"LOGICAL_DNS"`, + want: DiscoveryMechanismTypeLogicalDNS, + }, + { + name: "error", + js: `"1234"`, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got DiscoveryMechanismType + err := json.Unmarshal([]byte(tt.js), &got) + if (err != nil) != tt.wantErr { + t.Fatalf("DiscoveryMechanismTypeEDS.UnmarshalJSON() error = %v, wantErr %v", err, tt.wantErr) + } + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Fatalf("DiscoveryMechanismTypeEDS.UnmarshalJSON() got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} + const ( testJSONConfig1 = `{ "discoveryMechanisms": [{ @@ -60,9 +122,6 @@ const ( "localityPickingPolicy":[{"pick_first":{}}], "endpointPickingPolicy":[{"pick_first":{}}] }` - - testLRSServer = "test-lrs-server" - testMaxRequests = 314 ) func TestParseConfig(t *testing.T) { @@ -82,12 +141,12 @@ func TestParseConfig(t *testing.T) { name: "OK with one discovery mechanism", js: testJSONConfig1, want: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{ + DiscoveryMechanisms: []DiscoveryMechanism{ { Cluster: testClusterName, LoadReportingServerName: newString(testLRSServer), MaxConcurrentRequests: newUint32(testMaxRequests), - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServcie, }, }, @@ -100,16 +159,16 @@ func TestParseConfig(t *testing.T) { name: "OK with multiple discovery mechanisms", js: testJSONConfig2, want: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{ + DiscoveryMechanisms: []DiscoveryMechanism{ { Cluster: testClusterName, LoadReportingServerName: newString(testLRSServer), MaxConcurrentRequests: newUint32(testMaxRequests), - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServcie, }, { - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + Type: DiscoveryMechanismTypeLogicalDNS, }, }, LocalityPickingPolicy: nil, @@ -121,12 +180,12 @@ func TestParseConfig(t *testing.T) { name: "OK with picking policy override", js: testJSONConfig3, want: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{ + DiscoveryMechanisms: []DiscoveryMechanism{ { Cluster: testClusterName, LoadReportingServerName: newString(testLRSServer), MaxConcurrentRequests: newUint32(testMaxRequests), - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServcie, }, }, diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go similarity index 89% rename from xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go rename to xds/internal/balancer/clusterresolver/configbuilder.go index 4f96cc61f1ef..dfbdc1e2d671 100644 --- a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -16,17 +16,7 @@ * */ -// Package balancerconfig contains utility functions to build balancer config. -// The built config will generate a tree of balancers with priority, -// cluster_impl, weighted_target, lrs, and roundrobin. -// -// This is in a subpackage of cluster_resolver so that it can be used by the EDS -// balancer. Eventually we will delete the EDS balancer, and replace it with -// cluster_resolver, then we can move the functions to package cluster_resolver, -// and unexport them. -// -// TODO: move and unexport. Read above. -package balancerconfig +package clusterresolver import ( "encoding/json" @@ -47,22 +37,22 @@ import ( const million = 1000000 -// PriorityConfig is config for one priority. For example, if there an EDS and a -// DNS, the priority list will be [priorityConfig{EDS}, PriorityConfig{DNS}]. +// priorityConfig is config for one priority. For example, if there an EDS and a +// DNS, the priority list will be [priorityConfig{EDS}, priorityConfig{DNS}]. // -// Each PriorityConfig corresponds to one discovery mechanism from the LBConfig +// Each priorityConfig corresponds to one discovery mechanism from the LBConfig // generated by the CDS balancer. The CDS balancer resolves the cluster name to // an ordered list of discovery mechanisms (if the top cluster is an aggregated // cluster), one for each underlying cluster. -type PriorityConfig struct { - Mechanism DiscoveryMechanism - // EDSResp is set only if type is EDS. - EDSResp xdsclient.EndpointsUpdate - // Addresses is set only if type is DNS. - Addresses []string +type priorityConfig struct { + mechanism DiscoveryMechanism + // edsResp is set only if type is EDS. + edsResp xdsclient.EndpointsUpdate + // addresses is set only if type is DNS. + addresses []string } -// BuildPriorityConfigJSON builds balancer config for the passed in +// buildPriorityConfigJSON builds balancer config for the passed in // priorities. // // The built tree of balancers (see test for the output struct). @@ -94,7 +84,7 @@ type PriorityConfig struct { // // TODO: support setting locality picking policy, and add a parameter for // locality picking policy. -func BuildPriorityConfigJSON(priorities []PriorityConfig, endpointPickingPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { +func buildPriorityConfigJSON(priorities []priorityConfig, endpointPickingPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { pc, addrs := buildPriorityConfig(priorities, endpointPickingPolicy) ret, err := json.Marshal(pc) if err != nil { @@ -103,15 +93,15 @@ func BuildPriorityConfigJSON(priorities []PriorityConfig, endpointPickingPolicy return ret, addrs, nil } -func buildPriorityConfig(priorities []PriorityConfig, endpointPickingPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address) { +func buildPriorityConfig(priorities []priorityConfig, endpointPickingPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address) { var ( retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} retAddrs []resolver.Address ) for i, p := range priorities { - switch p.Mechanism.Type { + switch p.mechanism.Type { case DiscoveryMechanismTypeEDS: - names, configs, addrs := buildClusterImplConfigForEDS(i, p.EDSResp, p.Mechanism, endpointPickingPolicy) + names, configs, addrs := buildClusterImplConfigForEDS(i, p.edsResp, p.mechanism, endpointPickingPolicy) retConfig.Priorities = append(retConfig.Priorities, names...) for n, c := range configs { retConfig.Children[n] = &priority.Child{ @@ -122,7 +112,7 @@ func buildPriorityConfig(priorities []PriorityConfig, endpointPickingPolicy *int } retAddrs = append(retAddrs, addrs...) case DiscoveryMechanismTypeLogicalDNS: - name, config, addrs := buildClusterImplConfigForDNS(i, p.Addresses) + name, config, addrs := buildClusterImplConfigForDNS(i, p.addresses) retConfig.Priorities = append(retConfig.Priorities, name) retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: config}, diff --git a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go similarity index 97% rename from xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go rename to xds/internal/balancer/clusterresolver/configbuilder_test.go index 95ded6019c57..d8f17d053aae 100644 --- a/xds/internal/balancer/clusterresolver/balancerconfig/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -1,3 +1,5 @@ +// +build go1.12 + /* * * Copyright 2021 gRPC authors. @@ -16,7 +18,7 @@ * */ -package balancerconfig +package clusterresolver import ( "bytes" @@ -41,10 +43,8 @@ import ( ) const ( - testClusterName = "test-cluster-name" testLRSServer = "test-lrs-server" testMaxRequests = 314 - testEDSServcie = "test-eds-service-name" testEDSServiceName = "service-name-from-parent" testDropCategory = "test-drops" testDropOverMillion = 1 @@ -123,16 +123,16 @@ func init() { // TestBuildPriorityConfigJSON is a sanity check that the built balancer config // can be parsed. The behavior test is covered by TestBuildPriorityConfig. func TestBuildPriorityConfigJSON(t *testing.T) { - gotConfig, _, err := BuildPriorityConfigJSON([]PriorityConfig{ + gotConfig, _, err := buildPriorityConfigJSON([]priorityConfig{ { - Mechanism: DiscoveryMechanism{ + mechanism: DiscoveryMechanism{ Cluster: testClusterName, LoadReportingServerName: newString(testLRSServer), MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServiceName, }, - EDSResp: xdsclient.EndpointsUpdate{ + edsResp: xdsclient.EndpointsUpdate{ Drops: []xdsclient.OverloadDropConfig{ { Category: testDropCategory, @@ -149,10 +149,10 @@ func TestBuildPriorityConfigJSON(t *testing.T) { }, }, { - Mechanism: DiscoveryMechanism{ + mechanism: DiscoveryMechanism{ Type: DiscoveryMechanismTypeLogicalDNS, }, - Addresses: testAddressStrs[4], + addresses: testAddressStrs[4], }, }, nil) if err != nil { @@ -173,16 +173,16 @@ func TestBuildPriorityConfigJSON(t *testing.T) { } func TestBuildPriorityConfig(t *testing.T) { - gotConfig, gotAddrs := buildPriorityConfig([]PriorityConfig{ + gotConfig, gotAddrs := buildPriorityConfig([]priorityConfig{ { - Mechanism: DiscoveryMechanism{ + mechanism: DiscoveryMechanism{ Cluster: testClusterName, LoadReportingServerName: newString(testLRSServer), MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServiceName, }, - EDSResp: xdsclient.EndpointsUpdate{ + edsResp: xdsclient.EndpointsUpdate{ Drops: []xdsclient.OverloadDropConfig{ { Category: testDropCategory, @@ -199,10 +199,10 @@ func TestBuildPriorityConfig(t *testing.T) { }, }, { - Mechanism: DiscoveryMechanism{ + mechanism: DiscoveryMechanism{ Type: DiscoveryMechanismTypeLogicalDNS, }, - Addresses: testAddressStrs[4], + addresses: testAddressStrs[4], }, }, nil) @@ -723,14 +723,6 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { } } -func newString(s string) *string { - return &s -} - -func newUint32(i uint32) *uint32 { - return &i -} - func assertString(f func() (string, error)) string { s, err := f() if err != nil { diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index bf7e7f6c421c..f565c5249870 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -35,7 +35,6 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/balancer/balancergroup" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" "google.golang.org/grpc/xds/internal/testutils" @@ -74,9 +73,9 @@ func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) if err := edsb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + DiscoveryMechanisms: []DiscoveryMechanism{{ Cluster: testClusterName, - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, }}, EndpointPickingPolicy: initChild, }, @@ -495,9 +494,9 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { t.Logf("update sub-balancer to stub-balancer") if err := edsb.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + DiscoveryMechanisms: []DiscoveryMechanism{{ Cluster: testClusterName, - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, }}, EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ Name: balancerName, @@ -525,9 +524,9 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { t.Logf("update sub-balancer to round-robin") if err := edsb.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + DiscoveryMechanisms: []DiscoveryMechanism{{ Cluster: testClusterName, - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, }}, EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, @@ -555,9 +554,9 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { t.Logf("update sub-balancer to stub-balancer") if err := edsb.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + DiscoveryMechanisms: []DiscoveryMechanism{{ Cluster: testClusterName, - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, }}, EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ Name: balancerName, @@ -588,9 +587,9 @@ func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { t.Logf("update sub-balancer to round-robin") if err := edsb.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + DiscoveryMechanisms: []DiscoveryMechanism{{ Cluster: testClusterName, - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, }}, EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, @@ -623,10 +622,10 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { var maxRequests uint32 = 50 if err := edsb.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + DiscoveryMechanisms: []DiscoveryMechanism{{ Cluster: testClusterName, MaxConcurrentRequests: &maxRequests, - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, }}, EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, @@ -689,10 +688,10 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { var maxRequests2 uint32 = 10 if err := edsb.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{{ + DiscoveryMechanisms: []DiscoveryMechanism{{ Cluster: testClusterName, MaxConcurrentRequests: &maxRequests2, - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, }}, EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index b2935be0c362..2e0e9fd5d2ff 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/testutils" ) @@ -727,13 +726,13 @@ func (s) TestFallbackToDNS(t *testing.T) { if err := edsb.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []balancerconfig.DiscoveryMechanism{ + DiscoveryMechanisms: []DiscoveryMechanism{ { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, { - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, }, diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index e68d77d3efe9..2125bd2326f2 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -21,14 +21,13 @@ package clusterresolver import ( "sync" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/xdsclient" ) // resourceUpdate is a combined update from all the resources, in the order of // priority. For example, it can be {EDS, EDS, DNS}. type resourceUpdate struct { - priorities []balancerconfig.PriorityConfig + priorities []priorityConfig err error } @@ -43,7 +42,7 @@ type discoveryMechanism interface { // mechanisms, both for the same EDS resource, but has different circuit // breaking config. type discoveryMechanismKey struct { - typ balancerconfig.DiscoveryMechanismType + typ DiscoveryMechanismType name string } @@ -52,7 +51,7 @@ type discoveryMechanismKey struct { // mechanism for fields like circuit breaking, LRS etc when generating the // balancer config. type resolverMechanismTuple struct { - dm balancerconfig.DiscoveryMechanism + dm DiscoveryMechanism dmKey discoveryMechanismKey r discoveryMechanism } @@ -63,7 +62,7 @@ type resourceResolver struct { // mu protects the slice and map, and content of the resolvers in the slice. mu sync.Mutex - mechanisms []balancerconfig.DiscoveryMechanism + mechanisms []DiscoveryMechanism children []resolverMechanismTuple childrenMap map[discoveryMechanismKey]discoveryMechanism } @@ -76,7 +75,7 @@ func newResourceResolver(parent *clusterResolverBalancer) *resourceResolver { } } -func equalDiscoveryMechanisms(a, b []balancerconfig.DiscoveryMechanism) bool { +func equalDiscoveryMechanisms(a, b []DiscoveryMechanism) bool { if len(a) != len(b) { return false } @@ -89,7 +88,7 @@ func equalDiscoveryMechanisms(a, b []balancerconfig.DiscoveryMechanism) bool { return true } -func (rr *resourceResolver) updateMechanisms(mechanisms []balancerconfig.DiscoveryMechanism) { +func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { rr.mu.Lock() defer rr.mu.Unlock() if equalDiscoveryMechanisms(rr.mechanisms, mechanisms) { @@ -102,7 +101,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []balancerconfig.Discove // Start one watch for each new discover mechanism {type+resource_name}. for i, dm := range mechanisms { switch dm.Type { - case balancerconfig.DiscoveryMechanismTypeEDS: + case DiscoveryMechanismTypeEDS: // If EDSServiceName is not set, use the cluster name as EDS service // name to watch. nameToWatch := dm.EDSServiceName @@ -118,7 +117,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []balancerconfig.Discove rr.childrenMap[dmKey] = r } rr.children[i] = resolverMechanismTuple{dm: dm, dmKey: dmKey, r: r} - case balancerconfig.DiscoveryMechanismTypeLogicalDNS: + case DiscoveryMechanismTypeLogicalDNS: // Name to resolve in DNS is the hostname, not the ClientConn // target. dmKey := discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} @@ -172,7 +171,7 @@ func (rr *resourceResolver) stop() { // // caller must hold rr.mu. func (rr *resourceResolver) generate() { - var ret []balancerconfig.PriorityConfig + var ret []priorityConfig for _, rDM := range rr.children { r, ok := rr.childrenMap[rDM.dmKey] if !ok { @@ -188,9 +187,9 @@ func (rr *resourceResolver) generate() { } switch uu := u.(type) { case xdsclient.EndpointsUpdate: - ret = append(ret, balancerconfig.PriorityConfig{Mechanism: rDM.dm, EDSResp: uu}) + ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu}) case []string: - ret = append(ret, balancerconfig.PriorityConfig{Mechanism: rDM.dm, Addresses: uu}) + ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu}) } } select { diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go index 621ca2a127c8..efdb4d58da69 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -28,7 +28,6 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/xds/internal/balancer/clusterresolver/balancerconfig" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" xdsclient "google.golang.org/grpc/xds/internal/xdsclient" @@ -58,20 +57,20 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { clusterName, edsName string wantName string edsUpdate xdsclient.EndpointsUpdate - want []balancerconfig.PriorityConfig + want []priorityConfig }{ {name: "watch EDS", clusterName: testClusterName, edsName: testEDSServcie, wantName: testEDSServcie, edsUpdate: testEDSUpdates[0], - want: []balancerconfig.PriorityConfig{{ - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + want: []priorityConfig{{ + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, EDSServiceName: testEDSServcie, }, - EDSResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], }}, }, { @@ -79,20 +78,20 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { clusterName: testClusterName, wantName: testClusterName, edsUpdate: testEDSUpdates[1], - want: []balancerconfig.PriorityConfig{{ - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + want: []priorityConfig{{ + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, - EDSResp: testEDSUpdates[1], + edsResp: testEDSUpdates[1], }}, }, } { t.Run(test.name, func(t *testing.T) { fakeClient := fakeclient.NewClient() rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + rr.updateMechanisms([]DiscoveryMechanism{{ + Type: DiscoveryMechanismTypeEDS, Cluster: test.clusterName, EDSServiceName: test.edsName, }}) @@ -110,7 +109,7 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { fakeClient.InvokeWatchEDSCallback("", test.edsUpdate, nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, test.want); diff != "" { + if diff := cmp.Diff(u.priorities, test.want, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -152,19 +151,19 @@ func (s) TestResourceResolverOneDNSResource(t *testing.T) { target string wantTarget resolver.Target addrs []resolver.Address - want []balancerconfig.PriorityConfig + want []priorityConfig }{ { name: "watch DNS", target: testDNSTarget, wantTarget: resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}, addrs: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}, - want: []balancerconfig.PriorityConfig{{ - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + want: []priorityConfig{{ + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, - Addresses: []string{"1.1.1.1", "2.2.2.2"}, + addresses: []string{"1.1.1.1", "2.2.2.2"}, }}, }, } { @@ -173,8 +172,8 @@ func (s) TestResourceResolverOneDNSResource(t *testing.T) { defer cleanup() fakeClient := fakeclient.NewClient() rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + rr.updateMechanisms([]DiscoveryMechanism{{ + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: test.target, }}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -192,7 +191,7 @@ func (s) TestResourceResolverOneDNSResource(t *testing.T) { dnsR.UpdateState(resolver.State{Addresses: test.addrs}) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, test.want); diff != "" { + if diff := cmp.Diff(u.priorities, test.want, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -221,8 +220,8 @@ func (s) TestResourceResolverOneDNSResource(t *testing.T) { func (s) TestResourceResolverChangeEDSName(t *testing.T) { fakeClient := fakeclient.NewClient() rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + rr.updateMechanisms([]DiscoveryMechanism{{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, EDSServiceName: testEDSServcie, }}) @@ -240,14 +239,14 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + if diff := cmp.Diff(u.priorities, []priorityConfig{{ + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, EDSServiceName: testEDSServcie, }, - EDSResp: testEDSUpdates[0], - }}); diff != "" { + edsResp: testEDSUpdates[0], + }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -255,8 +254,8 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { } // Change name to watch. - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + rr.updateMechanisms([]DiscoveryMechanism{{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }}) edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) @@ -287,13 +286,13 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + if diff := cmp.Diff(u.priorities, []priorityConfig{{ + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, - EDSResp: testEDSUpdates[1], - }}); diff != "" { + edsResp: testEDSUpdates[1], + }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -302,8 +301,8 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { // Change circuit breaking count, should get an update with new circuit // breaking count, but shouldn't trigger new watch. - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + rr.updateMechanisms([]DiscoveryMechanism{{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, MaxConcurrentRequests: newUint32(123), }}) @@ -314,14 +313,14 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { } select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + if diff := cmp.Diff(u.priorities, []priorityConfig{{ + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, MaxConcurrentRequests: newUint32(123), }, - EDSResp: testEDSUpdates[1], - }}); diff != "" { + edsResp: testEDSUpdates[1], + }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -344,13 +343,13 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { func (s) TestResourceResolverNoChangeNoUpdate(t *testing.T) { fakeClient := fakeclient.NewClient() rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + rr.updateMechanisms([]DiscoveryMechanism{ { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[1], MaxConcurrentRequests: newUint32(100), }, @@ -385,23 +384,23 @@ func (s) TestResourceResolverNoChangeNoUpdate(t *testing.T) { fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{ + if diff := cmp.Diff(u.priorities, []priorityConfig{ { - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, - EDSResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], }, { - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[1], MaxConcurrentRequests: newUint32(100), }, - EDSResp: testEDSUpdates[1], + edsResp: testEDSUpdates[1], }, - }); diff != "" { + }, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -409,13 +408,13 @@ func (s) TestResourceResolverNoChangeNoUpdate(t *testing.T) { } // Send the same resources with the same priorities, shouldn't any change. - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + rr.updateMechanisms([]DiscoveryMechanism{ { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[1], MaxConcurrentRequests: newUint32(100), }, @@ -457,13 +456,13 @@ func (s) TestResourceResolverNoChangeNoUpdate(t *testing.T) { func (s) TestResourceResolverChangePriority(t *testing.T) { fakeClient := fakeclient.NewClient() rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + rr.updateMechanisms([]DiscoveryMechanism{ { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[1], }, }) @@ -497,22 +496,22 @@ func (s) TestResourceResolverChangePriority(t *testing.T) { fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{ + if diff := cmp.Diff(u.priorities, []priorityConfig{ { - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, - EDSResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], }, { - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[1], }, - EDSResp: testEDSUpdates[1], + edsResp: testEDSUpdates[1], }, - }); diff != "" { + }, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -521,13 +520,13 @@ func (s) TestResourceResolverChangePriority(t *testing.T) { // Send the same resources with different priorities, shouldn't trigger // watch, but should trigger an update with the new priorities. - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + rr.updateMechanisms([]DiscoveryMechanism{ { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[1], }, { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, }) @@ -538,22 +537,22 @@ func (s) TestResourceResolverChangePriority(t *testing.T) { } select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{ + if diff := cmp.Diff(u.priorities, []priorityConfig{ { - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[1], }, - EDSResp: testEDSUpdates[1], + edsResp: testEDSUpdates[1], }, { - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, - EDSResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], }, - }); diff != "" { + }, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -584,13 +583,13 @@ func (s) TestResourceResolverEDSAndDNS(t *testing.T) { defer cleanup() fakeClient := fakeclient.NewClient() rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + rr.updateMechanisms([]DiscoveryMechanism{ { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, { - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, }) @@ -625,22 +624,22 @@ func (s) TestResourceResolverEDSAndDNS(t *testing.T) { dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{ + if diff := cmp.Diff(u.priorities, []priorityConfig{ { - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, - EDSResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], }, { - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, - Addresses: []string{"1.1.1.1", "2.2.2.2"}, + addresses: []string{"1.1.1.1", "2.2.2.2"}, }, - }); diff != "" { + }, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -669,8 +668,8 @@ func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { defer cleanup() fakeClient := fakeclient.NewClient() rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + rr.updateMechanisms([]DiscoveryMechanism{{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -687,13 +686,13 @@ func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeEDS, + if diff := cmp.Diff(u.priorities, []priorityConfig{{ + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, - EDSResp: testEDSUpdates[0], - }}); diff != "" { + edsResp: testEDSUpdates[0], + }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -701,8 +700,8 @@ func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { } // Update to watch DNS instead. Should cancel EDS, and start DNS. - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + rr.updateMechanisms([]DiscoveryMechanism{{ + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }}) select { @@ -724,13 +723,13 @@ func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + if diff := cmp.Diff(u.priorities, []priorityConfig{{ + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, - Addresses: []string{"1.1.1.1", "2.2.2.2"}, - }}); diff != "" { + addresses: []string{"1.1.1.1", "2.2.2.2"}, + }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -752,13 +751,13 @@ func (s) TestResourceResolverError(t *testing.T) { defer cleanup() fakeClient := fakeclient.NewClient() rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{ + rr.updateMechanisms([]DiscoveryMechanism{ { - Type: balancerconfig.DiscoveryMechanismTypeEDS, + Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, { - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, }) @@ -826,8 +825,8 @@ func (s) TestResourceResolverDNSResolveNow(t *testing.T) { defer cleanup() fakeClient := fakeclient.NewClient() rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]balancerconfig.DiscoveryMechanism{{ - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + rr.updateMechanisms([]DiscoveryMechanism{{ + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -845,13 +844,13 @@ func (s) TestResourceResolverDNSResolveNow(t *testing.T) { dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []balancerconfig.PriorityConfig{{ - Mechanism: balancerconfig.DiscoveryMechanism{ - Type: balancerconfig.DiscoveryMechanismTypeLogicalDNS, + if diff := cmp.Diff(u.priorities, []priorityConfig{{ + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, - Addresses: []string{"1.1.1.1", "2.2.2.2"}, - }}); diff != "" { + addresses: []string{"1.1.1.1", "2.2.2.2"}, + }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/xds/internal/balancer/weightedtarget/weightedtarget.go index fd9da9c59d71..eb6516af56e9 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget.go @@ -81,7 +81,7 @@ type weightedTargetBalancer struct { } // UpdateClientConnState takes the new targets in balancer group, -// creates/deletes sub-balancers and sends them update. Addresses are split into +// creates/deletes sub-balancers and sends them update. addresses are split into // groups based on hierarchy path. func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) @@ -137,7 +137,7 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat } // Forwards all the update: - // - Addresses are from the map after splitting with hierarchy path, + // - addresses are from the map after splitting with hierarchy path, // - Top level service config and attributes are the same, // - Balancer config comes from the targets map. // From 1ddab338690a578975747239ad4ecd2ae63b1965 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 23 Jul 2021 10:37:18 -0700 Subject: [PATCH 170/998] client: fix detection of whether IO was performed in NewStream (#4611) For transparent retry. Also allow non-WFR RPCs to retry indefinitely on errors that resulted in no I/O; the spec used to forbid it at one point during development, but it no longer does. --- internal/transport/http2_client.go | 35 +++++++++++++------- internal/transport/transport_test.go | 2 +- rpc_util.go | 28 ++++++++-------- stream.go | 49 +++++++++++++++++----------- 4 files changed, 70 insertions(+), 44 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 8b6254b5bdc7..fa41fec903ee 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -616,26 +616,39 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// PerformedIOError wraps an error to indicate IO may have been performed -// before the error occurred. -type PerformedIOError struct { +// NewStreamError wraps an error and reports additional information. +type NewStreamError struct { Err error + + DoNotRetry bool + PerformedIO bool } -// Error implements error. -func (p PerformedIOError) Error() string { - return p.Err.Error() +func (e NewStreamError) Error() string { + return e.Err.Error() } // NewStream creates a stream and registers it into the transport as "active" -// streams. +// streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + defer func() { + if err != nil { + nse, ok := err.(*NewStreamError) + if !ok { + nse = &NewStreamError{Err: err} + } + if len(t.perRPCCreds) > 0 || callHdr.Creds != nil { + // We may have performed I/O in the per-RPC creds callback, so do not + // allow transparent retry. + nse.PerformedIO = true + } + err = nse + } + }() ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - return nil, PerformedIOError{err} + return nil, err } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -741,7 +754,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea break } if hdrListSizeErr != nil { - return nil, hdrListSizeErr + return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} } firstTry = false select { diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 92990eaf7b27..28cace0ba5d0 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -780,7 +780,7 @@ func (s) TestGracefulClose(t *testing.T) { go func() { defer wg.Done() str, err := ct.NewStream(ctx, &CallHdr{}) - if err == ErrConnClosing { + if err != nil && err.(*NewStreamError).Err == ErrConnClosing { return } else if err != nil { t.Errorf("_.NewStream(_, _) = _, %v, want _, %v", err, ErrConnClosing) diff --git a/rpc_util.go b/rpc_util.go index 1831a73e73d3..87987a2e652f 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -829,26 +829,28 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { - if err == nil || err == io.EOF { + switch err { + case nil, io.EOF: return err - } - if err == io.ErrUnexpectedEOF { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } - if _, ok := status.FromError(err); ok { - return err - } + switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err } + return status.Error(codes.Unknown, err.Error()) } diff --git a/stream.go b/stream.go index ed6af683d209..e224af12d218 100644 --- a/stream.go +++ b/stream.go @@ -421,12 +421,9 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(cs.ctx, cs.callHdr) if err != nil { - if _, ok := err.(transport.PerformedIOError); ok { - // Return without converting to an RPC error so retry code can - // inspect. - return err - } - return toRPCErr(err) + // Return without converting to an RPC error so retry code can + // inspect. + return err } cs.attempt.s = s cs.attempt.p = &parser{r: s} @@ -525,19 +522,28 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. func (cs *clientStream) shouldRetry(err error) error { - unprocessed := false if cs.attempt.s == nil { - pioErr, ok := err.(transport.PerformedIOError) - if ok { - // Unwrap error. - err = toRPCErr(pioErr.Err) - } else { - unprocessed = true + // Error from NewClientStream. + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected, but assume no I/O was performed and the RPC is not + // fatal, so retry indefinitely. + return nil } - if !ok && !cs.callInfo.failFast { - // In the event of a non-IO operation error from NewStream, we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. + + // Unwrap and convert error. + err = toRPCErr(nse.Err) + + // Never retry DoNotRetry errors, which indicate the RPC should not be + // retried due to max header list size violation, etc. + if nse.DoNotRetry { + return err + } + + // In the event of a non-IO operation error from NewStream, we never + // attempted to write anything to the wire, so we can retry + // indefinitely. + if !nse.PerformedIO { return nil } } @@ -546,6 +552,7 @@ func (cs *clientStream) shouldRetry(err error) error { return err } // Wait for the trailers. + unprocessed := false if cs.attempt.s != nil { <-cs.attempt.s.Done() unprocessed = cs.attempt.s.Unprocessed() @@ -634,7 +641,7 @@ func (cs *clientStream) shouldRetry(err error) error { // Returns nil if a retry was performed and succeeded; error otherwise. func (cs *clientStream) retryLocked(lastErr error) error { for { - cs.attempt.finish(lastErr) + cs.attempt.finish(toRPCErr(lastErr)) if err := cs.shouldRetry(lastErr); err != nil { cs.commitAttemptLocked() return err @@ -661,7 +668,11 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) for { if cs.committed { cs.mu.Unlock() - return op(cs.attempt) + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) } a := cs.attempt cs.mu.Unlock() From 00edd8c13a7a27bc25c8de2a68cf6de35f88bd7e Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Mon, 26 Jul 2021 13:02:56 -0700 Subject: [PATCH 171/998] Add xDS k8s url-map test Kokoro job (#4614) --- test/kokoro/xds_k8s.sh | 2 +- test/kokoro/xds_url_map.cfg | 13 ++++ test/kokoro/xds_url_map.sh | 134 ++++++++++++++++++++++++++++++++++++ 3 files changed, 148 insertions(+), 1 deletion(-) create mode 100644 test/kokoro/xds_url_map.cfg create mode 100755 test/kokoro/xds_url_map.sh diff --git a/test/kokoro/xds_k8s.sh b/test/kokoro/xds_k8s.sh index 4d1fda18a9f9..dbe200962c18 100755 --- a/test/kokoro/xds_k8s.sh +++ b/test/kokoro/xds_k8s.sh @@ -129,7 +129,7 @@ run_test() { main() { local script_dir script_dir="$(dirname "$0")" - # shellcheck source=tools/internal_ci/linux/grpc_xds_k8s_install_test_driver.sh + # shellcheck source=test/kokoro/xds_k8s_install_test_driver.sh source "${script_dir}/xds_k8s_install_test_driver.sh" set -x if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then diff --git a/test/kokoro/xds_url_map.cfg b/test/kokoro/xds_url_map.cfg new file mode 100644 index 000000000000..f6fd84a419af --- /dev/null +++ b/test/kokoro/xds_url_map.cfg @@ -0,0 +1,13 @@ +# Config file for internal CI + +# Location of the continuous shell script in repository. +build_file: "grpc-go/test/kokoro/xds_url_map.sh" +timeout_mins: 60 + +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*sponge_log.log" + strip_prefix: "artifacts" + } +} diff --git a/test/kokoro/xds_url_map.sh b/test/kokoro/xds_url_map.sh new file mode 100755 index 000000000000..f30fd1a15b57 --- /dev/null +++ b/test/kokoro/xds_url_map.sh @@ -0,0 +1,134 @@ +#!/usr/bin/env bash +# Copyright 2021 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +# Constants +readonly GITHUB_REPOSITORY_NAME="grpc-go" +# GKE Cluster +readonly GKE_CLUSTER_NAME="interop-test-psm-sec-v2-us-central1-a" +readonly GKE_CLUSTER_ZONE="us-central1-a" +## xDS test client Docker images +readonly CLIENT_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-client" +readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}" + +####################################### +# Builds test app Docker images and pushes them to GCR +# Globals: +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# Arguments: +# None +# Outputs: +# Writes the output of `gcloud builds submit` to stdout, stderr +####################################### +build_test_app_docker_images() { + echo "Building Go xDS interop test app Docker images" + docker build -f "${SRC_DIR}/interop/xds/client/Dockerfile" -t "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + gcloud -q auth configure-docker + docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" +} + +####################################### +# Builds test app and its docker images unless they already exist +# Globals: +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# FORCE_IMAGE_BUILD +# Arguments: +# None +# Outputs: +# Writes the output to stdout, stderr +####################################### +build_docker_images_if_needed() { + # Check if images already exist + client_tags="$(gcloud_gcr_list_image_tags "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Client image: %s:%s\n" "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${client_tags:-Client image not found}" + + # Build if any of the images are missing, or FORCE_IMAGE_BUILD=1 + if [[ "${FORCE_IMAGE_BUILD}" == "1" || -z "${client_tags}" ]]; then + build_test_app_docker_images + else + echo "Skipping Go test app build" + fi +} + +####################################### +# Executes the test case +# Globals: +# TEST_DRIVER_FLAGFILE: Relative path to test driver flagfile +# KUBE_CONTEXT: The name of kubectl context with GKE cluster access +# TEST_XML_OUTPUT_DIR: Output directory for the test xUnit XML report +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# Arguments: +# Test case name +# Outputs: +# Writes the output of test execution to stdout, stderr +# Test xUnit report to ${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml +####################################### +run_test() { + # Test driver usage: + # https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage + local test_name="${1:?Usage: run_test test_name}" + set -x + python -m "tests.${test_name}" \ + --flagfile="${TEST_DRIVER_FLAGFILE}" \ + --kube_context="${KUBE_CONTEXT}" \ + --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ + --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ + --flagfile="config/url-map.cfg" + set +x +} + +####################################### +# Main function: provision software necessary to execute tests, and run them +# Globals: +# KOKORO_ARTIFACTS_DIR +# GITHUB_REPOSITORY_NAME +# SRC_DIR: Populated with absolute path to the source repo +# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing +# the test driver +# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code +# TEST_DRIVER_FLAGFILE: Populated with relative path to test driver flagfile +# TEST_XML_OUTPUT_DIR: Populated with the path to test xUnit XML report +# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build +# GIT_COMMIT: Populated with the SHA-1 of git commit being built +# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built +# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access +# Arguments: +# None +# Outputs: +# Writes the output of test execution to stdout, stderr +####################################### +main() { + local script_dir + script_dir="$(dirname "$0")" + # shellcheck source=test/kokoro/xds_k8s_install_test_driver.sh + source "${script_dir}/xds_k8s_install_test_driver.sh" + set -x + if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then + kokoro_setup_test_driver "${GITHUB_REPOSITORY_NAME}" + else + local_setup_test_driver "${script_dir}" + fi + build_docker_images_if_needed + # Run tests + cd "${TEST_DRIVER_FULL_DIR}" + run_test url_map +} + +main "$@" From 245ad25715e019716d10f5b24d761f85ff158c15 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 27 Jul 2021 15:13:18 -0400 Subject: [PATCH 172/998] Change version to 1.41.0-dev (#4625) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 54e4ea43bd44..9717a560b9ba 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.40.0-dev" +const Version = "1.41.0-dev" From 61c704607b40236f021f3120e5a4b1c237ed8ade Mon Sep 17 00:00:00 2001 From: raymonder jin Date: Thu, 29 Jul 2021 02:02:38 +0800 Subject: [PATCH 173/998] fix typo (#4616) --- xds/internal/balancer/balancergroup/balancergroup_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xds/internal/balancer/balancergroup/balancergroup_test.go b/xds/internal/balancer/balancergroup/balancergroup_test.go index cf9a228ec1fc..355810a52004 100644 --- a/xds/internal/balancer/balancergroup/balancergroup_test.go +++ b/xds/internal/balancer/balancergroup/balancergroup_test.go @@ -845,7 +845,7 @@ func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing. defer replaceDefaultSubBalancerCloseTimeout(time.Second)() _, _, cc, addrToSC := initBalancerGroupForCachingTest(t) - // The sub-balancer is not re-added withtin timeout. The subconns should be + // The sub-balancer is not re-added within timeout. The subconns should be // removed. removeTimeout := time.After(DefaultSubBalancerCloseTimeout) scToRemove := map[balancer.SubConn]int{ From ad0a2a847cdfb3204c30d1423436fdeec8ff17bf Mon Sep 17 00:00:00 2001 From: April Kyle Nassi Date: Wed, 28 Jul 2021 14:46:46 -0700 Subject: [PATCH 174/998] Update MAINTAINERS.md (#4628) moved 2 to emeritus list --- MAINTAINERS.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 093c82b3afe8..c6672c0a3efe 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -8,17 +8,18 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB for general contribution guidelines. ## Maintainers (in alphabetical order) -- [canguler](https://github.com/canguler), Google LLC + - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC - [menghanl](https://github.com/menghanl), Google LLC - [srini100](https://github.com/srini100), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC - [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC - [jtattermusch](https://github.com/jtattermusch), Google LLC - [lyuxuan](https://github.com/lyuxuan), Google LLC - [makmukhi](https://github.com/makmukhi), Google LLC From ea9b7a0a7651baaf43c5403cb83349fffb5162df Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 29 Jul 2021 17:23:32 -0700 Subject: [PATCH 175/998] xds: fix a typo (#4631) --- internal/xds/env/env.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index e90c7ffc465c..448fd63c21eb 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -50,13 +50,13 @@ const ( var ( // BootstrapFileName holds the name of the file which contains xDS bootstrap // configuration. Users can specify the location of the bootstrap file by - // setting the environment variable "GRPC_XDS_BOOSTRAP". + // setting the environment variable "GRPC_XDS_BOOTSTRAP". // // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileName = os.Getenv(BootstrapFileNameEnv) // BootstrapFileContent holds the content of the xDS bootstrap // configuration. Users can specify the bootstrap config by - // setting the environment variable "GRPC_XDS_BOOSTRAP_CONFIG". + // setting the environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". // // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) From 8ed8dd26555f396d81f497415086ec73103e5825 Mon Sep 17 00:00:00 2001 From: ZhenLian Date: Mon, 2 Aug 2021 13:03:54 -0700 Subject: [PATCH 176/998] advancedtls: fix a typo in crl.go (#4634) --- security/advancedtls/crl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/advancedtls/crl.go b/security/advancedtls/crl.go index 5b3f90127ee7..3931c1ec6298 100644 --- a/security/advancedtls/crl.go +++ b/security/advancedtls/crl.go @@ -40,7 +40,7 @@ import ( var grpclogLogger = grpclog.Component("advancedtls") // Cache is an interface to cache CRL files. -// The cache implemetation must be concurrency safe. +// The cache implementation must be concurrency safe. // A fixed size lru cache from golang-lru is recommended. type Cache interface { // Add adds a value to the cache. From c052940bcd91bba85050ac193aeeca6e1c588e8a Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 2 Aug 2021 13:05:02 -0700 Subject: [PATCH 177/998] server: fix leaked net.Conn (#4633) This happens when NewServerTransport() returns nil, nil. The rawConn is closed when the transport is closed, which will never happen in this case (since the returned transport is nil). --- server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/server.go b/server.go index de1708306417..0251f48daf1d 100644 --- a/server.go +++ b/server.go @@ -863,6 +863,7 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { // Finish handshaking (HTTP2) st := s.newHTTP2Transport(conn, authInfo) if st == nil { + conn.Close() return } From edb9b3bc226676eba6fe1cddec44d082b5a30e4f Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 2 Aug 2021 15:56:58 -0700 Subject: [PATCH 178/998] github: update stale bot to v4 (#4636) --- .github/workflows/stale.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 807d97c813b9..5e01a1e70c45 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -13,7 +13,7 @@ jobs: pull-requests: write steps: - - uses: actions/stale@v3 + - uses: actions/stale@v4 with: repo-token: ${{ secrets.GITHUB_TOKEN }} days-before-stale: 6 @@ -21,7 +21,7 @@ jobs: only-labels: 'Status: Requires Reporter Clarification' stale-issue-label: 'stale' stale-pr-label: 'stale' - operations-per-run: 60 + operations-per-run: 999 stale-issue-message: > This issue is labeled as requiring an update from the reporter, and no update has been received after 6 days. If no update is provided in the next 7 days, this issue will be automatically closed. From 0d6854ab5ecc205b0f7437919b7988f67144eba9 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 3 Aug 2021 14:17:02 -0700 Subject: [PATCH 179/998] transport: fix race accessing s.recvCompress (#4641) --- internal/transport/http2_client.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index fa41fec903ee..0cd6da1e73f7 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -1301,6 +1301,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string statusGen *status.Status + recvCompress string httpStatusCode *int httpStatusErr string rawStatusCode = codes.Unknown @@ -1323,7 +1324,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata[hf.Name] = append(mdata[hf.Name], hf.Value) isGRPC = true case "grpc-encoding": - s.recvCompress = hf.Value + recvCompress = hf.Value case "grpc-status": code, err := strconv.ParseInt(hf.Value, 10, 32) if err != nil { @@ -1436,6 +1437,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. + s.recvCompress = recvCompress if len(mdata) > 0 { s.header = mdata } From 6ba56c814be74c95e35a000582e074a380e545b0 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 3 Aug 2021 15:12:56 -0700 Subject: [PATCH 180/998] transport: fix race accessing s.recvCompress (#4645) This is a backport of #4641 From 74370577fa163f6022fb88a5926192a7c26a3933 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 5 Aug 2021 17:28:06 -0400 Subject: [PATCH 181/998] xds: Add route to filterchain (#4610) * Added RDS Information from LDS in filter chain --- xds/internal/server/listener_wrapper_test.go | 16 +- xds/internal/testutils/e2e/clientresources.go | 30 +- xds/internal/xdsclient/client.go | 21 + xds/internal/xdsclient/filter_chain.go | 126 ++++- xds/internal/xdsclient/filter_chain_test.go | 495 ++++++++++++++++-- xds/internal/xdsclient/lds_test.go | 41 +- xds/internal/xdsclient/rds_test.go | 79 ++- xds/internal/xdsclient/v2/rds_test.go | 18 +- xds/internal/xdsclient/xds.go | 170 +++--- xds/server_test.go | 16 +- 10 files changed, 818 insertions(+), 194 deletions(-) diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 8a79e2321dd9..088fb01d78a1 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -30,6 +30,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" @@ -87,7 +88,20 @@ var listenerWithFilterChains = &v3listenerpb.Listener{ { Name: "filter-1", ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + }), }, }, }, diff --git a/xds/internal/testutils/e2e/clientresources.go b/xds/internal/testutils/e2e/clientresources.go index 7c8311a51cc3..8089534614ca 100644 --- a/xds/internal/testutils/e2e/clientresources.go +++ b/xds/internal/testutils/e2e/clientresources.go @@ -199,7 +199,20 @@ func DefaultServerListener(host string, port uint32, secLevel SecurityLevel) *v3 { Name: "filter-1", ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + }), }, }, }, @@ -230,7 +243,20 @@ func DefaultServerListener(host string, port uint32, secLevel SecurityLevel) *v3 { Name: "filter-1", ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + }), }, }, }, diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index a7de226cd292..85dbf0ee8619 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -294,6 +294,25 @@ type HashPolicy struct { RegexSubstitution string } +// RouteAction is the action of the route from a received RDS response. +type RouteAction int + +const ( + // RouteActionUnsupported are routing types currently unsupported by grpc. + // According to A36, "A Route with an inappropriate action causes RPCs + // matching that route to fail." + RouteActionUnsupported RouteAction = iota + // RouteActionRoute is the expected route type on the client side. Route + // represents routing a request to some upstream cluster. On the client + // side, if an RPC matches to a route that is not RouteActionRoute, the RPC + // will fail according to A36. + RouteActionRoute + // RouteActionNonForwardingAction is the expected route type on the server + // side. NonForwardingAction represents when a route will generate a + // response directly, without forwarding to an upstream host. + RouteActionNonForwardingAction +) + // Route is both a specification of how to match a request as well as an // indication of the action to take upon match. type Route struct { @@ -321,6 +340,8 @@ type Route struct { // unused if the matching WeightedCluster contains an override for that // filter. HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + + RouteAction RouteAction } // WeightedCluster contains settings for an xds RouteAction.WeightedCluster. diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index 7089b97594a1..49ebe887c36a 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -24,8 +24,10 @@ import ( "net" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/xds/internal/version" ) @@ -54,6 +56,15 @@ type FilterChain struct { SecurityCfg *SecurityConfig // HTTPFilters represent the HTTP Filters that comprise this FilterChain. HTTPFilters []HTTPFilter + // RouteConfigName is the route configuration name for this FilterChain. + // + // Only one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned for this filter chain. + // + // Only one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate } // SourceType specifies the connection source IP match type. @@ -109,6 +120,11 @@ type FilterChainManager struct { dstPrefixes []*destPrefixEntry def *FilterChain // Default filter chain, if specified. + + // RouteConfigNames are the route configuration names which need to be + // dynamically queried for RDS Configuration for any FilterChains which + // specify to load RDS Configuration dynamically. + RouteConfigNames map[string]bool } // destPrefixEntry is the value type of the map indexed on destination prefixes. @@ -158,7 +174,10 @@ type sourcePrefixEntry struct { // create a FilterChainManager. func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { // Parse all the filter chains and build the internal data structures. - fci := &FilterChainManager{dstPrefixMap: make(map[string]*destPrefixEntry)} + fci := &FilterChainManager{ + dstPrefixMap: make(map[string]*destPrefixEntry), + RouteConfigNames: make(map[string]bool), + } if err := fci.addFilterChains(lis.GetFilterChains()); err != nil { return nil, err } @@ -187,7 +206,7 @@ func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, err var def *FilterChain if dfc := lis.GetDefaultFilterChain(); dfc != nil { var err error - if def, err = filterChainFromProto(dfc); err != nil { + if def, err = fci.filterChainFromProto(dfc); err != nil { return nil, err } } @@ -368,7 +387,7 @@ func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePre srcPorts = append(srcPorts, int(port)) } - fc, err := filterChainFromProto(fcProto) + fc, err := fci.filterChainFromProto(fcProto) if err != nil { return err } @@ -391,13 +410,19 @@ func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePre } // filterChainFromProto extracts the relevant information from the FilterChain -// proto and stores it in our internal representation. -func filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { - httpFilters, err := processNetworkFilters(fc.GetFilters()) +// proto and stores it in our internal representation. It also persists any +// RouteNames which need to be queried dynamically via RDS. +func (fci *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { + filterChain, err := processNetworkFilters(fc.GetFilters()) if err != nil { return nil, err } - filterChain := &FilterChain{HTTPFilters: httpFilters} + // These route names will be dynamically queried via RDS in the wrapped + // listener, which receives the LDS response, if specified for the filter + // chain. + if filterChain.RouteConfigName != "" { + fci.RouteConfigNames[filterChain.RouteConfigName] = true + } // If the transport_socket field is not specified, it means that the control // plane has not sent us any security config. This is fine and the server // will use the fallback credentials configured as part of the @@ -435,6 +460,93 @@ func filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { return filterChain, nil } +func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) { + filterChain := &FilterChain{} + seenNames := make(map[string]bool, len(filters)) + seenHCM := false + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, fmt.Errorf("network filters {%+v} is missing name field in filter: {%+v}", filters, filter) + } + if seenNames[name] { + return nil, fmt.Errorf("network filters {%+v} has duplicate filter name %q", filters, name) + } + seenNames[name] = true + + // Network filters have a oneof field named `config_type` where we + // only support `TypedConfig` variant. + switch typ := filter.GetConfigType().(type) { + case *v3listenerpb.Filter_TypedConfig: + // The typed_config field has an `anypb.Any` proto which could + // directly contain the serialized bytes of the actual filter + // configuration, or it could be encoded as a `TypedStruct`. + // TODO: Add support for `TypedStruct`. + tc := filter.GetTypedConfig() + + // The only network filter that we currently support is the v3 + // HttpConnectionManager. So, we can directly check the type_url + // and unmarshal the config. + // TODO: Implement a registry of supported network filters (like + // we have for HTTP filters), when we have to support network + // filters other than HttpConnectionManager. + if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { + return nil, fmt.Errorf("network filters {%+v} has unsupported network filter %q in filter {%+v}", filters, tc.GetTypeUrl(), filter) + } + hcm := &v3httppb.HttpConnectionManager{} + if err := ptypes.UnmarshalAny(tc, hcm); err != nil { + return nil, fmt.Errorf("network filters {%+v} failed unmarshaling of network filter {%+v}: %v", filters, filter, err) + } + // "Any filters after HttpConnectionManager should be ignored during + // connection processing but still be considered for validity. + // HTTPConnectionManager must have valid http_filters." - A36 + filters, err := processHTTPFilters(hcm.GetHttpFilters(), true) + if err != nil { + return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}", filters, hcm.GetHttpFilters()) + } + if !seenHCM { + // TODO: Implement terminal filter logic, as per A36. + filterChain.HTTPFilters = filters + seenHCM = true + switch hcm.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if hcm.GetRds().GetConfigSource().GetAds() == nil { + return nil, fmt.Errorf("ConfigSource is not ADS: %+v", hcm) + } + name := hcm.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", hcm) + } + filterChain.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + // "RouteConfiguration validation logic inherits all + // previous validations made for client-side usage as RDS + // does not distinguish between client-side and + // server-side." - A36 + // Can specify v3 here, as will never get to this function + // if v2. + routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig(), nil, false) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + filterChain.InlineRouteConfig = &routeU + case nil: + // No-op, as no route specifier is a valid configuration on + // the server side. + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", hcm.RouteSpecifier) + } + } + default: + return nil, fmt.Errorf("network filters {%+v} has unsupported config_type %T in filter %s", filters, typ, filter.GetName()) + } + } + if !seenHCM { + return nil, fmt.Errorf("network filters {%+v} missing HttpConnectionManager filter", filters) + } + return filterChain, nil +} + // FilterChainLookupParams wraps parameters to be passed to Lookup. type FilterChainLookupParams struct { // IsUnspecified indicates whether the server is listening on a wildcard diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go index e330a73a145b..525c865c7ffd 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -28,6 +28,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/google/go-cmp/cmp" @@ -41,11 +42,30 @@ import ( ) var ( + routeConfig = &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}} + inlineRouteConfig = &RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*Route{{Prefix: newStringP("/"), RouteAction: RouteActionNonForwardingAction}}, + }}} emptyValidNetworkFilters = []*v3listenerpb.Filter{ { Name: "filter-1", ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), }, }, } @@ -249,8 +269,7 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { const wantErr = "multiple filter chains with overlapping matching rules are defined" for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis) - if err == nil || !strings.Contains(err.Error(), wantErr) { + if _, err := NewFilterChainManager(test.lis); err == nil || !strings.Contains(err.Error(), wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, wantErr) } }) @@ -417,6 +436,323 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { } } +// TestNewFilterChainImpl_Success_RouteUpdate tests the construction of the +// filter chain with valid HTTP Filters present. +func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { + tests := []struct { + name string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + name: "rds", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-1", + }, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-1", + }, + }, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + RouteConfigName: "route-1", + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + RouteConfigName: "route-1", + }, + RouteConfigNames: map[string]bool{"route-1": true}, + }, + }, + { + name: "inline route config", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + }, + }, + }, + // two rds tests whether the Filter Chain Manager successfully persists + // the two RDS names that need to be dynamically queried. + { + name: "two rds", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-1", + }, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-2", + }, + }, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + RouteConfigName: "route-1", + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + RouteConfigName: "route-2", + }, + RouteConfigNames: map[string]bool{ + "route-1": true, + "route-2": true, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{}), cmpOpts) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + +// TestNewFilterChainImpl_Failure_BadRouteUpdate verifies cases where the Route +// Update in the filter chain are invalid. +func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { + tests := []struct { + name string + lis *v3listenerpb.Listener + wantErr string + }{ + { + name: "not-ads", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + RouteConfigName: "route-1", + }, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + RouteConfigName: "route-1", + }, + }, + }), + }, + }, + }, + }, + }, + wantErr: "ConfigSource is not ADS", + }, + { + name: "unsupported-route-specifier", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + }), + }, + }, + }, + }, + }, + wantErr: "unsupported type", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := NewFilterChainManager(test.lis) + if err == nil || !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) + } + }) + } +} + // TestNewFilterChainImpl_Failure_BadHTTPFilters verifies cases where the HTTP // Filters in the filter chain are invalid. func TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { @@ -513,6 +849,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, }), }, }, @@ -528,6 +867,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, }), }, }, @@ -548,7 +890,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Filter: serverOnlyHTTPFilter{}, Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, - }}, + }, + InlineRouteConfig: inlineRouteConfig, + }, }, }, }, @@ -556,13 +900,14 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { }, }, }, - def: &FilterChain{HTTPFilters: []HTTPFilter{ - { + def: &FilterChain{ + HTTPFilters: []HTTPFilter{{ Name: "serverOnlyCustomFilter", Filter: serverOnlyHTTPFilter{}, Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, - }, - }}, + }}, + InlineRouteConfig: inlineRouteConfig, + }, }, }, { @@ -580,6 +925,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { validServerSideHTTPFilter1, validServerSideHTTPFilter2, }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, }), }, }, @@ -596,6 +944,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { validServerSideHTTPFilter1, validServerSideHTTPFilter2, }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, }), }, }, @@ -621,7 +972,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Filter: serverOnlyHTTPFilter{}, Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, - }}, + }, + InlineRouteConfig: inlineRouteConfig, + }, }, }, }, @@ -641,6 +994,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, }, + InlineRouteConfig: inlineRouteConfig, }, }, }, @@ -661,6 +1015,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { validServerSideHTTPFilter1, validServerSideHTTPFilter2, }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, }), }, }, @@ -671,6 +1028,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, }), }, }, @@ -687,6 +1047,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { validServerSideHTTPFilter1, validServerSideHTTPFilter2, }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, }), }, }, @@ -697,6 +1060,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, }), }, }, @@ -722,7 +1088,9 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Filter: serverOnlyHTTPFilter{}, Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, - }}, + }, + InlineRouteConfig: inlineRouteConfig, + }, }, }, }, @@ -742,6 +1110,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, }, + InlineRouteConfig: inlineRouteConfig, }, }, }, @@ -789,7 +1158,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -797,7 +1166,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }, }, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, { @@ -851,6 +1220,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { IdentityInstanceName: "identityPluginInstance", IdentityCertName: "identityCertName", }, + InlineRouteConfig: inlineRouteConfig, }, }, }, @@ -864,6 +1234,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { IdentityInstanceName: "defaultIdentityPluginInstance", IdentityCertName: "defaultIdentityCertName", }, + InlineRouteConfig: inlineRouteConfig, }, }, }, @@ -936,6 +1307,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { IdentityCertName: "identityCertName", RequireClientCert: true, }, + InlineRouteConfig: inlineRouteConfig, }, }, }, @@ -952,6 +1324,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { IdentityCertName: "defaultIdentityCertName", RequireClientCert: true, }, + InlineRouteConfig: inlineRouteConfig, }, }, }, @@ -982,7 +1355,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1018,7 +1391,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { dstPrefixMap: map[string]*destPrefixEntry{ unspecifiedPrefixMapKey: unspecifiedEntry, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, { @@ -1047,7 +1420,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { net: ipNetFromCIDR("192.168.2.2/16"), }, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, { @@ -1076,7 +1449,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { net: ipNetFromCIDR("192.168.2.2/16"), }, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, { @@ -1105,7 +1478,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { net: ipNetFromCIDR("192.168.2.2/16"), }, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, } @@ -1175,7 +1548,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1191,7 +1564,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1207,7 +1580,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1221,7 +1594,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1235,7 +1608,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1243,7 +1616,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, { @@ -1273,7 +1646,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1289,7 +1662,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1297,7 +1670,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, { @@ -1327,7 +1700,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { "10.0.0.0/8": { net: ipNetFromCIDR("10.0.0.0/8"), srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1342,7 +1715,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { "192.168.0.0/16": { net: ipNetFromCIDR("192.168.0.0/16"), srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1350,7 +1723,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, { @@ -1381,9 +1754,9 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 1: {}, - 2: {}, - 3: {}, + 1: {InlineRouteConfig: inlineRouteConfig}, + 2: {InlineRouteConfig: inlineRouteConfig}, + 3: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1400,9 +1773,9 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { "192.168.0.0/16": { net: ipNetFromCIDR("192.168.0.0/16"), srcPortMap: map[int]*FilterChain{ - 1: {}, - 2: {}, - 3: {}, + 1: {InlineRouteConfig: inlineRouteConfig}, + 2: {InlineRouteConfig: inlineRouteConfig}, + 3: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1410,7 +1783,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, { @@ -1484,7 +1857,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1498,7 +1871,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1512,7 +1885,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1532,7 +1905,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcTypeArr: [3]*sourcePrefixes{}, }, }, - def: &FilterChain{}, + def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, }, }, } @@ -1804,7 +2177,10 @@ func TestLookup_Successes(t *testing.T) { IsUnspecifiedListener: true, DestAddr: net.IPv4(10, 1, 1, 1), }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "default"}}, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "default"}, + InlineRouteConfig: inlineRouteConfig, + }, }, { desc: "unspecified destination match", @@ -1815,7 +2191,10 @@ func TestLookup_Successes(t *testing.T) { SourceAddr: net.IPv4(10, 1, 1, 1), SourcePort: 1, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "unspecified-dest-and-source-prefix"}}, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "unspecified-dest-and-source-prefix"}, + InlineRouteConfig: inlineRouteConfig, + }, }, { desc: "wildcard destination match v4", @@ -1826,7 +2205,10 @@ func TestLookup_Successes(t *testing.T) { SourceAddr: net.IPv4(10, 1, 1, 1), SourcePort: 1, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-prefixes-v4"}}, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-prefixes-v4"}, + InlineRouteConfig: inlineRouteConfig, + }, }, { desc: "wildcard source match v6", @@ -1837,7 +2219,10 @@ func TestLookup_Successes(t *testing.T) { SourceAddr: net.ParseIP("2001:68::2"), SourcePort: 1, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-source-prefix-v6"}}, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-source-prefix-v6"}, + InlineRouteConfig: inlineRouteConfig, + }, }, { desc: "specific destination and wildcard source type match", @@ -1848,7 +2233,10 @@ func TestLookup_Successes(t *testing.T) { SourceAddr: net.IPv4(192, 168, 100, 1), SourcePort: 80, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-unspecified-source-type"}}, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-unspecified-source-type"}, + InlineRouteConfig: inlineRouteConfig, + }, }, { desc: "specific destination and source type match", @@ -1859,7 +2247,10 @@ func TestLookup_Successes(t *testing.T) { SourceAddr: net.IPv4(10, 1, 1, 1), SourcePort: 80, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type"}}, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type"}, + InlineRouteConfig: inlineRouteConfig, + }, }, { desc: "specific destination source type and source prefix", @@ -1870,7 +2261,10 @@ func TestLookup_Successes(t *testing.T) { SourceAddr: net.IPv4(192, 168, 92, 100), SourcePort: 70, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix"}}, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix"}, + InlineRouteConfig: inlineRouteConfig, + }, }, { desc: "specific destination source type source prefix and source port", @@ -1881,7 +2275,10 @@ func TestLookup_Successes(t *testing.T) { SourceAddr: net.IPv4(192, 168, 92, 100), SourcePort: 80, }, - wantFC: &FilterChain{SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix-specific-source-port"}}, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix-specific-source-port"}, + InlineRouteConfig: inlineRouteConfig, + }, }, } @@ -1918,6 +2315,8 @@ func (fci *FilterChainManager) Equal(other *FilterChainManager) bool { // TODO: Support comparing dstPrefixes slice? case !cmp.Equal(fci.def, other.def, cmpopts.EquateEmpty(), protocmp.Transform()): return false + case !cmp.Equal(fci.RouteConfigNames, other.RouteConfigNames, cmpopts.EquateEmpty()): + return false } return true } diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index 012efd16d7b1..c04f92393fdf 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -493,7 +493,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { InlineRouteConfig: &RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ Domains: []string{v3LDSTarget}, - Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, RouteAction: RouteActionRoute}}, }}}, MaxStreamDuration: time.Second, Raw: v3LisWithInlineRoute, @@ -563,11 +563,30 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { ) var ( + routeConfig = &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}} + inlineRouteConfig = &RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*Route{{Prefix: newStringP("/"), RouteAction: RouteActionNonForwardingAction}}, + }}} emptyValidNetworkFilters = []*v3listenerpb.Filter{ { Name: "filter-1", ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), }, }, } @@ -806,13 +825,21 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { { Name: "name", ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), }, }, { Name: "name", ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), }, }, }, @@ -1051,7 +1078,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {}, + 0: {InlineRouteConfig: inlineRouteConfig}, }, }, }, @@ -1144,6 +1171,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { IdentityInstanceName: "identityPluginInstance", IdentityCertName: "identityCertName", }, + InlineRouteConfig: inlineRouteConfig, }, }, }, @@ -1157,6 +1185,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { IdentityInstanceName: "defaultIdentityPluginInstance", IdentityCertName: "defaultIdentityCertName", }, + InlineRouteConfig: inlineRouteConfig, }, }, }, @@ -1192,6 +1221,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { IdentityCertName: "identityCertName", RequireClientCert: true, }, + InlineRouteConfig: inlineRouteConfig, }, }, }, @@ -1208,6 +1238,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { IdentityCertName: "defaultIdentityCertName", RequireClientCert: true, }, + InlineRouteConfig: inlineRouteConfig, }, }, }, diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index 15a2afee1f7f..11edabce5b7c 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -76,6 +76,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Routes: []*Route{{ Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + RouteAction: RouteActionRoute, }}, HTTPFilterConfigOverride: cfgs, }}, @@ -178,7 +179,10 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { VirtualHosts: []*VirtualHost{ { Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP("/"), CaseInsensitive: true, WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP("/"), + CaseInsensitive: true, + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, }, }, @@ -220,11 +224,15 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, }, }, @@ -254,7 +262,9 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { VirtualHosts: []*VirtualHost{ { Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP("/"), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, }, }, @@ -331,6 +341,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { "b": {Weight: 3}, "c": {Weight: 5}, }, + RouteAction: RouteActionRoute, }}, }, }, @@ -365,6 +376,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, MaxStreamDuration: newDurationP(time.Second), + RouteAction: RouteActionRoute, }}, }, }, @@ -399,6 +411,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, MaxStreamDuration: newDurationP(time.Second), + RouteAction: RouteActionRoute, }}, }, }, @@ -433,6 +446,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, MaxStreamDuration: newDurationP(0), + RouteAction: RouteActionRoute, }}, }, }, @@ -621,11 +635,15 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, }, Raw: v2RouteConfig, @@ -644,11 +662,15 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, }, Raw: v3RouteConfig, @@ -667,11 +689,15 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, }, Raw: v3RouteConfig, @@ -680,11 +706,15 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, }, Raw: v2RouteConfig, @@ -714,11 +744,15 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, }, Raw: v3RouteConfig, @@ -727,11 +761,15 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}}}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, + RouteAction: RouteActionRoute}}, }, }, Raw: v2RouteConfig, @@ -794,6 +832,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { CaseInsensitive: true, WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60, HTTPFilterConfigOverride: cfgs}}, HTTPFilterConfigOverride: cfgs, + RouteAction: RouteActionRoute, }} } ) @@ -833,6 +872,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { Prefix: newStringP("/"), CaseInsensitive: true, WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + RouteAction: RouteActionRoute, }}, }, { @@ -880,6 +920,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }, Fraction: newUInt32P(10000), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + RouteAction: RouteActionRoute, }}, wantErr: false, }, @@ -925,6 +966,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }, Fraction: newUInt32P(10000), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + RouteAction: RouteActionRoute, }}, wantErr: false, }, @@ -959,6 +1001,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { wantRoutes: []*Route{{ Prefix: newStringP("/a/"), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + RouteAction: RouteActionRoute, }}, wantErr: false, }, @@ -1126,6 +1169,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { wantRoutes: []*Route{{ Prefix: newStringP("/a/"), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + RouteAction: RouteActionRoute, }}, wantErr: false, }, @@ -1151,6 +1195,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { wantRoutes: []*Route{{ Prefix: newStringP("/a/"), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 20}, "B": {Weight: 30}}, + RouteAction: RouteActionRoute, }}, wantErr: false, }, @@ -1206,6 +1251,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { HashPolicies: []*HashPolicy{ {HashPolicyType: HashPolicyTypeChannelID}, }, + RouteAction: RouteActionRoute, }}, wantErr: false, }, @@ -1264,6 +1310,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {HashPolicyType: HashPolicyTypeHeader, HeaderName: ":path"}, }, + RouteAction: RouteActionRoute, }}, wantErr: false, }, diff --git a/xds/internal/xdsclient/v2/rds_test.go b/xds/internal/xdsclient/v2/rds_test.go index 745308c4e5b6..57058aa36e94 100644 --- a/xds/internal/xdsclient/v2/rds_test.go +++ b/xds/internal/xdsclient/v2/rds_test.go @@ -111,11 +111,16 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: xdsclient.RouteActionRoute}}, }, { Domains: []string{goodLDSTarget1}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName2: {Weight: 1}}}}, + Routes: []*xdsclient.Route{{ + Prefix: newStringP(""), + WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName2: {Weight: 1}}, + RouteAction: xdsclient.RouteActionRoute}}, }, }, Raw: marshaledGoodRouteConfig2, @@ -136,11 +141,16 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, + Routes: []*xdsclient.Route{{ + Prefix: newStringP(""), + WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: xdsclient.RouteActionRoute}}, }, { Domains: []string{goodLDSTarget1}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName1: {Weight: 1}}}}, + Routes: []*xdsclient.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName1: {Weight: 1}}, + RouteAction: xdsclient.RouteActionRoute}}, }, }, Raw: marshaledGoodRouteConfig1, diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 79c2efcd2cbc..c3b090fe42ab 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -277,65 +277,6 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err return lu, nil } -func processNetworkFilters(filters []*v3listenerpb.Filter) ([]HTTPFilter, error) { - seenNames := make(map[string]bool, len(filters)) - seenHCM := false - var httpFilters []HTTPFilter - for _, filter := range filters { - name := filter.GetName() - if name == "" { - return nil, fmt.Errorf("network filters {%+v} is missing name field in filter: {%+v}", filters, filter) - } - if seenNames[name] { - return nil, fmt.Errorf("network filters {%+v} has duplicate filter name %q", filters, name) - } - seenNames[name] = true - - // Network filters have a oneof field named `config_type` where we - // only support `TypedConfig` variant. - switch typ := filter.GetConfigType().(type) { - case *v3listenerpb.Filter_TypedConfig: - // The typed_config field has an `anypb.Any` proto which could - // directly contain the serialized bytes of the actual filter - // configuration, or it could be encoded as a `TypedStruct`. - // TODO: Add support for `TypedStruct`. - tc := filter.GetTypedConfig() - - // The only network filter that we currently support is the v3 - // HttpConnectionManager. So, we can directly check the type_url - // and unmarshal the config. - // TODO: Implement a registry of supported network filters (like - // we have for HTTP filters), when we have to support network - // filters other than HttpConnectionManager. - if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { - return nil, fmt.Errorf("network filters {%+v} has unsupported network filter %q in filter {%+v}", filters, tc.GetTypeUrl(), filter) - } - hcm := &v3httppb.HttpConnectionManager{} - if err := ptypes.UnmarshalAny(tc, hcm); err != nil { - return nil, fmt.Errorf("network filters {%+v} failed unmarshaling of network filter {%+v}: %v", filters, filter, err) - } - // "Any filters after HttpConnectionManager should be ignored during - // connection processing but still be considered for validity. - // HTTPConnectionManager must have valid http_filters." - A36 - filters, err := processHTTPFilters(hcm.GetHttpFilters(), true) - if err != nil { - return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}", filters, hcm.GetHttpFilters()) - } - if !seenHCM { - // TODO: Implement terminal filter logic, as per A36. - httpFilters = filters - seenHCM = true - } - default: - return nil, fmt.Errorf("network filters {%+v} has unsupported config_type %T in filter %s", filters, typ, filter.GetName()) - } - } - if !seenHCM { - return nil, fmt.Errorf("network filters {%+v} missing HttpConnectionManager filter", filters) - } - return httpFilters, nil -} - // UnmarshalRouteConfig processes resources received in an RDS response, // validates them, and transforms them into a native struct which contains only // fields we are interested in. The provided hostname determines the route @@ -491,65 +432,74 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, route.Fraction = &n } - route.WeightedClusters = make(map[string]WeightedCluster) - action := r.GetRoute() + switch r.GetAction().(type) { + case *v3routepb.Route_Route: + route.WeightedClusters = make(map[string]WeightedCluster) + action := r.GetRoute() - // Hash Policies are only applicable for a Ring Hash LB. - if env.RingHashSupport { - hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) - if err != nil { - return nil, err + // Hash Policies are only applicable for a Ring Hash LB. + if env.RingHashSupport { + hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) + if err != nil { + return nil, err + } + route.HashPolicies = hp } - route.HashPolicies = hp - } - switch a := action.GetClusterSpecifier().(type) { - case *v3routepb.RouteAction_Cluster: - route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} - case *v3routepb.RouteAction_WeightedClusters: - wcs := a.WeightedClusters - var totalWeight uint32 - for _, c := range wcs.Clusters { - w := c.GetWeight().GetValue() - if w == 0 { - continue - } - wc := WeightedCluster{Weight: w} - if !v2 { - cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) - if err != nil { - return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + switch a := action.GetClusterSpecifier().(type) { + case *v3routepb.RouteAction_Cluster: + route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} + case *v3routepb.RouteAction_WeightedClusters: + wcs := a.WeightedClusters + var totalWeight uint32 + for _, c := range wcs.Clusters { + w := c.GetWeight().GetValue() + if w == 0 { + continue + } + wc := WeightedCluster{Weight: w} + if !v2 { + cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) + if err != nil { + return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + } + wc.HTTPFilterConfigOverride = cfgs } - wc.HTTPFilterConfigOverride = cfgs + route.WeightedClusters[c.GetName()] = wc + totalWeight += w } - route.WeightedClusters[c.GetName()] = wc - totalWeight += w - } - // envoy xds doc - // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight - wantTotalWeight := uint32(100) - if tw := wcs.GetTotalWeight(); tw != nil { - wantTotalWeight = tw.GetValue() + // envoy xds doc + // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight + wantTotalWeight := uint32(100) + if tw := wcs.GetTotalWeight(); tw != nil { + wantTotalWeight = tw.GetValue() + } + if totalWeight != wantTotalWeight { + return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) + } + if totalWeight == 0 { + return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + } + case *v3routepb.RouteAction_ClusterHeader: + continue } - if totalWeight != wantTotalWeight { - return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) + + msd := action.GetMaxStreamDuration() + // Prefer grpc_timeout_header_max, if set. + dur := msd.GetGrpcTimeoutHeaderMax() + if dur == nil { + dur = msd.GetMaxStreamDuration() } - if totalWeight == 0 { - return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + if dur != nil { + d := dur.AsDuration() + route.MaxStreamDuration = &d } - case *v3routepb.RouteAction_ClusterHeader: - continue - } - - msd := action.GetMaxStreamDuration() - // Prefer grpc_timeout_header_max, if set. - dur := msd.GetGrpcTimeoutHeaderMax() - if dur == nil { - dur = msd.GetMaxStreamDuration() - } - if dur != nil { - d := dur.AsDuration() - route.MaxStreamDuration = &d + route.RouteAction = RouteActionRoute + case *v3routepb.Route_NonForwardingAction: + // Expected to be used on server side. + route.RouteAction = RouteActionNonForwardingAction + default: + route.RouteAction = RouteActionUnsupported } if !v2 { diff --git a/xds/server_test.go b/xds/server_test.go index 00b8518fa9d9..df002dbabdcf 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -32,6 +32,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "google.golang.org/grpc" @@ -688,7 +689,20 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { { Name: "filter-1", ConfigType: &v3listenerpb.Filter_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + }), }, }, }, From fc30d5b571f5981b71e8391a04e23c5f98eab4c3 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 5 Aug 2021 14:30:04 -0700 Subject: [PATCH 182/998] xds/cluster_resolver: support RING_HASH as a child of cluster_resolver balancer (#4621) 1. merge endpoint picking and localility picking policy to one field in cluster_resolver's balancer config - This field only supports ROUND_ROBIN or RING_HASH. - This is to support RING_HASH policy, which is responsible both endpoint picking and locality picking. - If policy is RING_HASH, endpoints in localities will be flattened to a list of endpoints, and passed to the policy. 1. support building policy config with RING_HASH as a child - The config tree has one less layer comparing with ROUND_ROBIN - This also need to define RING_HASH's balancer config config 1. Deleted test `TestEDS_UpdateSubBalancerName` because now the balancer doesn't support updating child to a custom policy. --- internal/serviceconfig/serviceconfig.go | 4 +- .../clusterresolver/clusterresolver.go | 2 +- .../balancer/clusterresolver/config.go | 35 +-- .../balancer/clusterresolver/config_test.go | 72 ++++- .../balancer/clusterresolver/configbuilder.go | 145 ++++++++-- .../clusterresolver/configbuilder_test.go | 249 +++++++++++++++++- .../balancer/clusterresolver/eds_impl_test.go | 166 ------------ xds/internal/balancer/ringhash/config.go | 59 +++++ xds/internal/balancer/ringhash/config_test.go | 68 +++++ 9 files changed, 573 insertions(+), 227 deletions(-) create mode 100644 xds/internal/balancer/ringhash/config.go create mode 100644 xds/internal/balancer/ringhash/config_test.go diff --git a/internal/serviceconfig/serviceconfig.go b/internal/serviceconfig/serviceconfig.go index c0634d152c2e..badbdbf597f3 100644 --- a/internal/serviceconfig/serviceconfig.go +++ b/internal/serviceconfig/serviceconfig.go @@ -78,6 +78,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { return err } + var names []string for i, lbcfg := range ir { if len(lbcfg) != 1 { return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) @@ -92,6 +93,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { for name, jsonCfg = range lbcfg { } + names = append(names, name) builder := balancer.Get(name) if builder == nil { // If the balancer is not registered, move on to the next config. @@ -120,7 +122,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { // return. This means we had a loadBalancingConfig slice but did not // encounter a registered policy. The config is considered invalid in this // case. - return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) } // MethodConfig defines the configuration recommended by the service providers for a diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index b48e3e97b716..b9568173badc 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -209,7 +209,7 @@ func (b *clusterResolverBalancer) updateChildConfig() error { b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } - childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, b.config.EndpointPickingPolicy) + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, b.config.XDSLBPolicy) if err != nil { return fmt.Errorf("failed to build priority balancer config: %v", err) } diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index 3bcb432b4091..33191fc7bd03 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -21,6 +21,7 @@ import ( "bytes" "encoding/json" "fmt" + "strings" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" @@ -150,29 +151,33 @@ type LBConfig struct { // concatenated together in successive priorities. DiscoveryMechanisms []DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` - // LocalityPickingPolicy is policy for locality picking. + // XDSLBPolicy specifies the policy for locality picking and endpoint picking. // - // This policy's config is expected to be in the format used by the - // weighted_target policy. Note that the config should include an empty - // value for the "targets" field; that empty value will be replaced by one - // that is dynamically generated based on the EDS data. Optional; defaults - // to "weighted_target". - LocalityPickingPolicy *internalserviceconfig.BalancerConfig `json:"localityPickingPolicy,omitempty"` - - // EndpointPickingPolicy is policy for endpoint picking. + // Note that it's not normal balancing policy, and it can only be either + // ROUND_ROBIN or RING_HASH. // - // This will be configured as the policy for each child in the - // locality-policy's config. Optional; defaults to "round_robin". - EndpointPickingPolicy *internalserviceconfig.BalancerConfig `json:"endpointPickingPolicy,omitempty"` - - // TODO: read and warn if endpoint is not roundrobin or locality is not - // weightedtarget. + // For ROUND_ROBIN, the policy name will be "ROUND_ROBIN", and the config + // will be empty. This sets the locality-picking policy to weighted_target + // and the endpoint-picking policy to round_robin. + // + // For RING_HASH, the policy name will be "RING_HASH", and the config will + // be lb config for the ring_hash_experimental LB Policy. ring_hash policy + // is responsible for both locality picking and endpoint picking. + XDSLBPolicy *internalserviceconfig.BalancerConfig `json:"xdsLbPolicy,omitempty"` } +const ( + rrName = "ROUND_ROBIN" + rhName = "RING_HASH" +) + func parseConfig(c json.RawMessage) (*LBConfig, error) { var cfg LBConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } + if lbp := cfg.XDSLBPolicy; lbp != nil && !strings.EqualFold(lbp.Name, rrName) && !strings.EqualFold(lbp.Name, rhName) { + return nil, fmt.Errorf("unsupported child policy with name %q, not one of {%q,%q}", lbp.Name, rrName, rhName) + } return &cfg, nil } diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index 77b14deb6abe..f3ba4430eba0 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -25,6 +25,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/balancer/stub" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" ) @@ -90,6 +91,14 @@ func TestDiscoveryMechanismTypeUnmarshalJSON(t *testing.T) { } } +func init() { + // This is needed now for the config parsing tests to pass. Otherwise they + // will fail with "RING_HASH unsupported". + // + // TODO: delete this once ring-hash policy is implemented and imported. + stub.Register(rhName, stub.BalancerFuncs{}) +} + const ( testJSONConfig1 = `{ "discoveryMechanisms": [{ @@ -119,8 +128,27 @@ const ( "type": "EDS", "edsServiceName": "test-eds-service-name" }], - "localityPickingPolicy":[{"pick_first":{}}], - "endpointPickingPolicy":[{"pick_first":{}}] + "xdsLbPolicy":[{"ROUND_ROBIN":{}}] +}` + testJSONConfig4 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServerName": "test-lrs-server", + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name" + }], + "xdsLbPolicy":[{"RING_HASH":{}}] +}` + testJSONConfig5 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServerName": "test-lrs-server", + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name" + }], + "xdsLbPolicy":[{"pick_first":{}}] }` ) @@ -150,8 +178,7 @@ func TestParseConfig(t *testing.T) { EDSServiceName: testEDSServcie, }, }, - LocalityPickingPolicy: nil, - EndpointPickingPolicy: nil, + XDSLBPolicy: nil, }, wantErr: false, }, @@ -171,13 +198,12 @@ func TestParseConfig(t *testing.T) { Type: DiscoveryMechanismTypeLogicalDNS, }, }, - LocalityPickingPolicy: nil, - EndpointPickingPolicy: nil, + XDSLBPolicy: nil, }, wantErr: false, }, { - name: "OK with picking policy override", + name: "OK with picking policy round_robin", js: testJSONConfig3, want: &LBConfig{ DiscoveryMechanisms: []DiscoveryMechanism{ @@ -189,24 +215,44 @@ func TestParseConfig(t *testing.T) { EDSServiceName: testEDSServcie, }, }, - LocalityPickingPolicy: &internalserviceconfig.BalancerConfig{ - Name: "pick_first", + XDSLBPolicy: &internalserviceconfig.BalancerConfig{ + Name: "ROUND_ROBIN", Config: nil, }, - EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ - Name: "pick_first", + }, + wantErr: false, + }, + { + name: "OK with picking policy ring_hash", + js: testJSONConfig4, + want: &LBConfig{ + DiscoveryMechanisms: []DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServerName: newString(testLRSServer), + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServcie, + }, + }, + XDSLBPolicy: &internalserviceconfig.BalancerConfig{ + Name: "RING_HASH", Config: nil, }, }, wantErr: false, }, + { + name: "unsupported picking policy", + js: testJSONConfig5, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := parseConfig([]byte(tt.js)) if (err != nil) != tt.wantErr { - t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) - return + t.Fatalf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) } if diff := cmp.Diff(got, tt.want); diff != "" { t.Errorf("parseConfig() got unexpected output, diff (-got +want): %v", diff) diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index dfbdc1e2d671..e1242f3bd0a4 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -57,6 +58,9 @@ type priorityConfig struct { // // The built tree of balancers (see test for the output struct). // +// If xds lb policy is ROUND_ROBIN, the children will be weighted_target for +// locality picking, and round_robin for endpoint picking. +// // ┌────────┐ // │priority│ // └┬──────┬┘ @@ -77,15 +81,31 @@ type priorityConfig struct { // │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ // └────────────────┘ └────────────────┘ └────────────────┘ └────────────────┘ // +// If xds lb policy is RING_HASH, the children will be just a ring_hash policy. +// The endpoints from all localities will be flattened to one addresses list, +// and the ring_hash policy will pick endpoints from it. +// +// ┌────────┐ +// │priority│ +// └┬──────┬┘ +// │ │ +// ┌──────────▼─┐ ┌─▼──────────┐ +// │cluster_impl│ │cluster_impl│ +// └──────┬─────┘ └─────┬──────┘ +// │ │ +// ┌──────▼─────┐ ┌─────▼──────┐ +// │ ring_hash │ │ ring_hash │ +// └────────────┘ └────────────┘ +// // If endpointPickingPolicy is nil, roundrobin will be used. // // Custom locality picking policy isn't support, and weighted_target is always // used. -// -// TODO: support setting locality picking policy, and add a parameter for -// locality picking policy. -func buildPriorityConfigJSON(priorities []priorityConfig, endpointPickingPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { - pc, addrs := buildPriorityConfig(priorities, endpointPickingPolicy) +func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { + pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) + if err != nil { + return nil, nil, fmt.Errorf("failed to build priority config: %v", err) + } ret, err := json.Marshal(pc) if err != nil { return nil, nil, fmt.Errorf("failed to marshal built priority config struct into json: %v", err) @@ -93,7 +113,7 @@ func buildPriorityConfigJSON(priorities []priorityConfig, endpointPickingPolicy return ret, addrs, nil } -func buildPriorityConfig(priorities []priorityConfig, endpointPickingPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address) { +func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address, error) { var ( retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} retAddrs []resolver.Address @@ -101,7 +121,10 @@ func buildPriorityConfig(priorities []priorityConfig, endpointPickingPolicy *int for i, p := range priorities { switch p.mechanism.Type { case DiscoveryMechanismTypeEDS: - names, configs, addrs := buildClusterImplConfigForEDS(i, p.edsResp, p.mechanism, endpointPickingPolicy) + names, configs, addrs, err := buildClusterImplConfigForEDS(i, p.edsResp, p.mechanism, xdsLBPolicy) + if err != nil { + return nil, nil, err + } retConfig.Priorities = append(retConfig.Priorities, names...) for n, c := range configs { retConfig.Children[n] = &priority.Child{ @@ -123,7 +146,7 @@ func buildPriorityConfig(priorities []priorityConfig, endpointPickingPolicy *int retAddrs = append(retAddrs, addrs...) } } - return retConfig, retAddrs + return retConfig, retAddrs, nil } func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string, *clusterimpl.LBConfig, []resolver.Address) { @@ -146,17 +169,13 @@ func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string // - map{"p0":p0_config, "p1":p1_config} // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 -func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.EndpointsUpdate, mechanism DiscoveryMechanism, endpointPickingPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address) { +func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { var ( retNames []string retAddrs []resolver.Address retConfigs = make(map[string]*clusterimpl.LBConfig) ) - if endpointPickingPolicy == nil { - endpointPickingPolicy = &internalserviceconfig.BalancerConfig{Name: roundrobin.Name} - } - drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) for _, d := range edsResp.Drops { drops = append(drops, clusterimpl.DropConfig{ @@ -171,19 +190,14 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.Endpoint // Prepend parent priority to the priority names, to avoid duplicates. pName := fmt.Sprintf("priority-%v-%v", parentPriority, priorityName) retNames = append(retNames, pName) - wtConfig, addrs := localitiesToWeightedTarget(priorityLocalities, pName, endpointPickingPolicy, mechanism.Cluster, mechanism.EDSServiceName) - retConfigs[pName] = &clusterimpl.LBConfig{ - Cluster: mechanism.Cluster, - EDSServiceName: mechanism.EDSServiceName, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: weightedtarget.Name, Config: wtConfig}, - LoadReportingServerName: mechanism.LoadReportingServerName, - MaxConcurrentRequests: mechanism.MaxConcurrentRequests, - DropCategories: drops, + cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) + if err != nil { + return nil, nil, nil, err } + retConfigs[pName] = cfg retAddrs = append(retAddrs, addrs...) } - - return retNames, retConfigs, retAddrs + return retNames, retConfigs, retAddrs, nil } // groupLocalitiesByPriority returns the localities grouped by priority. @@ -234,12 +248,95 @@ func dedupSortedIntSlice(a []int) []int { return a[:i+1] } +// rrBalancerConfig is a const roundrobin config, used as child of +// weighted-roundrobin. To avoid allocating memory everytime. +var rrBalancerConfig = &internalserviceconfig.BalancerConfig{Name: roundrobin.Name} + +// priorityLocalitiesToClusterImpl takes a list of localities (with the same +// priority), and generates a cluster impl policy config, and a list of +// addresses. +func priorityLocalitiesToClusterImpl(localities []xdsclient.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { + clusterImplCfg := &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + EDSServiceName: mechanism.EDSServiceName, + LoadReportingServerName: mechanism.LoadReportingServerName, + MaxConcurrentRequests: mechanism.MaxConcurrentRequests, + DropCategories: drops, + // ChildPolicy is not set. Will be set based on xdsLBPolicy + } + + if xdsLBPolicy == nil || xdsLBPolicy.Name == rrName { + // If lb policy is ROUND_ROBIN: + // - locality-picking policy is weighted_target + // - endpoint-picking policy is round_robin + logger.Infof("xds lb policy is %q, building config with weighted_target + round_robin", rrName) + // Child of weighted_target is hardcoded to round_robin. + wtConfig, addrs := localitiesToWeightedTarget(localities, priorityName, rrBalancerConfig) + clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: weightedtarget.Name, Config: wtConfig} + return clusterImplCfg, addrs, nil + } + + if xdsLBPolicy.Name == rhName { + // If lb policy is RIHG_HASH, will build one ring_hash policy as child. + // The endpoints from all localities will be flattened to one addresses + // list, and the ring_hash policy will pick endpoints from it. + logger.Infof("xds lb policy is %q, building config with ring_hash", rhName) + addrs := localitiesToRingHash(localities, priorityName) + // Set child to ring_hash, note that the ring_hash config is from + // xdsLBPolicy. + clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: ringhash.Name, Config: xdsLBPolicy.Config} + return clusterImplCfg, addrs, nil + } + + return nil, nil, fmt.Errorf("unsupported xds LB policy %q, not one of {%q,%q}", xdsLBPolicy.Name, rrName, rhName) +} + +// localitiesToRingHash takes a list of localities (with the same priority), and +// generates a list of addresses. +// +// The addresses have path hierarchy set to [priority-name], so priority knows +// which child policy they are for. +func localitiesToRingHash(localities []xdsclient.Locality, priorityName string) []resolver.Address { + var addrs []resolver.Address + for _, locality := range localities { + var lw uint32 = 1 + if locality.Weight != 0 { + lw = locality.Weight + } + localityStr, err := locality.ID.ToString() + if err != nil { + localityStr = fmt.Sprintf("%+v", locality.ID) + } + for _, endpoint := range locality.Endpoints { + // Filter out all "unhealthy" endpoints (unknown and healthy are + // both considered to be healthy: + // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). + if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { + continue + } + + var ew uint32 = 1 + if endpoint.Weight != 0 { + ew = endpoint.Weight + } + + // The weight of each endpoint is locality_weight * endpoint_weight. + ai := weightedroundrobin.AddrInfo{Weight: lw * ew} + addr := weightedroundrobin.SetAddrInfo(resolver.Address{Addr: endpoint.Address}, ai) + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) + addrs = append(addrs, addr) + } + } + return addrs +} + // localitiesToWeightedTarget takes a list of localities (with the same // priority), and generates a weighted target config, and list of addresses. // // The addresses have path hierarchy set to [priority-name, locality-name], so // priority and weighted target know which child policy they are for. -func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig, cluster, edsService string) (*weightedtarget.LBConfig, []resolver.Address) { +func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) { weightedTargets := make(map[string]weightedtarget.Target) var addrs []resolver.Address for _, locality := range localities { diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index d8f17d053aae..174bd376e5c5 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -173,7 +174,7 @@ func TestBuildPriorityConfigJSON(t *testing.T) { } func TestBuildPriorityConfig(t *testing.T) { - gotConfig, gotAddrs := buildPriorityConfig([]priorityConfig{ + gotConfig, gotAddrs, _ := buildPriorityConfig([]priorityConfig{ { mechanism: DiscoveryMechanism{ Cluster: testClusterName, @@ -332,7 +333,7 @@ func TestBuildClusterImplConfigForDNS(t *testing.T) { } func TestBuildClusterImplConfigForEDS(t *testing.T) { - gotNames, gotConfigs, gotAddrs := buildClusterImplConfigForEDS( + gotNames, gotConfigs, gotAddrs, _ := buildClusterImplConfigForEDS( 2, xdsclient.EndpointsUpdate{ Drops: []xdsclient.OverloadDropConfig{ @@ -561,6 +562,141 @@ func TestDedupSortedIntSlice(t *testing.T) { } } +func TestPriorityLocalitiesToClusterImpl(t *testing.T) { + tests := []struct { + name string + localities []xdsclient.Locality + priorityName string + mechanism DiscoveryMechanism + childPolicy *internalserviceconfig.BalancerConfig + wantConfig *clusterimpl.LBConfig + wantAddrs []resolver.Address + wantErr bool + }{{ + name: "round robin as child, no LRS", + localities: []xdsclient.Locality{ + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }, + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + Weight: 80, + }, + }, + priorityName: "test-priority", + childPolicy: &internalserviceconfig.BalancerConfig{Name: rrName}, + mechanism: DiscoveryMechanism{ + Cluster: testClusterName, + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServcie, + }, + // lrsServer is nil, so LRS policy will not be used. + wantConfig: &clusterimpl.LBConfig{ + Cluster: testClusterName, + EDSServiceName: testEDSServcie, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedtarget.Name, + Config: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }, + }, + }, + }, + wantAddrs: []resolver.Address{ + testAddrWithAttrs("addr-1-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + }, + }, + { + name: "ring_hash as child", + localities: []xdsclient.Locality{ + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }, + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + Weight: 80, + }, + }, + priorityName: "test-priority", + childPolicy: &internalserviceconfig.BalancerConfig{Name: rhName, Config: &ringhash.LBConfig{MinRingSize: 1, MaxRingSize: 2}}, + // lrsServer is nil, so LRS policy will not be used. + wantConfig: &clusterimpl.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: ringhash.Name, + Config: &ringhash.LBConfig{MinRingSize: 1, MaxRingSize: 2}, + }, + }, + wantAddrs: []resolver.Address{ + testAddrWithAttrs("addr-1-1", newUint32(1800), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", newUint32(200), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", newUint32(7200), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", newUint32(800), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + }, + }, + { + name: "unsupported child", + localities: []xdsclient.Locality{{ + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }}, + priorityName: "test-priority", + childPolicy: &internalserviceconfig.BalancerConfig{Name: "some-child"}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := priorityLocalitiesToClusterImpl(tt.localities, tt.priorityName, tt.mechanism, nil, tt.childPolicy) + if (err != nil) != tt.wantErr { + t.Fatalf("priorityLocalitiesToClusterImpl() error = %v, wantErr %v", err, tt.wantErr) + } + if diff := cmp.Diff(got, tt.wantConfig); diff != "" { + t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(got1, tt.wantAddrs, cmp.AllowUnexported(attributes.Attributes{})); diff != "" { + t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) + } + }) + } +} + func TestLocalitiesToWeightedTarget(t *testing.T) { tests := []struct { name string @@ -568,8 +704,6 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { priorityName string childPolicy *internalserviceconfig.BalancerConfig lrsServer *string - cluster string - edsService string wantConfig *weightedtarget.LBConfig wantAddrs []resolver.Address }{ @@ -596,8 +730,6 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { priorityName: "test-priority", childPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, lrsServer: newString("test-lrs-server"), - cluster: "test-cluster", - edsService: "test-eds-service", wantConfig: &weightedtarget.LBConfig{ Targets: map[string]weightedtarget.Target{ assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { @@ -712,7 +844,7 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, got1 := localitiesToWeightedTarget(tt.localities, tt.priorityName, tt.childPolicy, tt.cluster, tt.edsService) + got, got1 := localitiesToWeightedTarget(tt.localities, tt.priorityName, tt.childPolicy) if diff := cmp.Diff(got, tt.wantConfig); diff != "" { t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) } @@ -723,6 +855,109 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { } } +func TestLocalitiesToRingHash(t *testing.T) { + tests := []struct { + name string + localities []xdsclient.Locality + priorityName string + wantAddrs []resolver.Address + }{ + { + // Check that address weights are locality_weight * endpoint_weight. + name: "with locality and endpoint weight", + localities: []xdsclient.Locality{ + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }, + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + Weight: 80, + }, + }, + priorityName: "test-priority", + wantAddrs: []resolver.Address{ + testAddrWithAttrs("addr-1-1", newUint32(1800), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", newUint32(200), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", newUint32(7200), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", newUint32(800), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + }, + }, + { + // Check that endpoint_weight is 0, weight is the locality weight. + name: "locality weight only", + localities: []xdsclient.Locality{ + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }, + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + Weight: 80, + }, + }, + priorityName: "test-priority", + wantAddrs: []resolver.Address{ + testAddrWithAttrs("addr-1-1", newUint32(20), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", newUint32(20), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", newUint32(80), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", newUint32(80), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + }, + }, + { + // Check that locality_weight is 0, weight is the endpoint weight. + name: "endpoint weight only", + localities: []xdsclient.Locality{ + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + }, + { + Endpoints: []xdsclient.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + }, + }, + priorityName: "test-priority", + wantAddrs: []resolver.Address{ + testAddrWithAttrs("addr-1-1", newUint32(90), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", newUint32(10), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", newUint32(90), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", newUint32(10), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := localitiesToRingHash(tt.localities, tt.priorityName) + if diff := cmp.Diff(got, tt.wantAddrs, cmp.AllowUnexported(attributes.Attributes{})); diff != "" { + t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) + } + }) + } +} + func assertString(f func() (string, error)) string { s, err := f() if err != nil { diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index f565c5249870..0fd1e590d463 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -28,9 +28,7 @@ import ( corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/balancer/stub" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/balancer/balancergroup" @@ -77,7 +75,6 @@ func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) Cluster: testClusterName, Type: DiscoveryMechanismTypeEDS, }}, - EndpointPickingPolicy: initChild, }, }); err != nil { edsb.Close() @@ -458,163 +455,6 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { } } -// Create XDS balancer, and update sub-balancer before handling eds responses. -// Then switch between round-robin and a test stub-balancer after handling first -// eds response. -func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { - const balancerName = "stubBalancer-TestEDS_UpdateSubBalancerName" - - stub.Register(balancerName, stub.BalancerFuncs{ - UpdateClientConnState: func(bd *stub.BalancerData, s balancer.ClientConnState) error { - m, _ := bd.Data.(map[string]bool) - if m == nil { - m = make(map[string]bool) - bd.Data = m - } - for _, addr := range s.ResolverState.Addresses { - if !m[addr.Addr] { - m[addr.Addr] = true - bd.ClientConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) - } - } - return nil - }, - UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { - bd.ClientConn.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &testutils.TestConstPicker{Err: testutils.ErrTestConstPicker}, - }) - }, - }) - - t.Logf("initialize with sub-balancer: stub-balancer") - edsb, cc, xdsC, cleanup := setupTestEDS(t, &internalserviceconfig.BalancerConfig{Name: balancerName}) - defer cleanup() - - t.Logf("update sub-balancer to stub-balancer") - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{{ - Cluster: testClusterName, - Type: DiscoveryMechanismTypeEDS, - }}, - EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ - Name: balancerName, - }, - }, - }); err != nil { - t.Fatal(err) - } - - // Two localities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - - for i := 0; i < 2; i++ { - sc := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - } - - if err := testErrPickerFromCh(cc.NewPickerCh, testutils.ErrTestConstPicker); err != nil { - t.Fatal(err) - } - - t.Logf("update sub-balancer to round-robin") - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{{ - Cluster: testClusterName, - Type: DiscoveryMechanismTypeEDS, - }}, - EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, - }, - }); err != nil { - t.Fatal(err) - } - - for i := 0; i < 2; i++ { - <-cc.RemoveSubConnCh - } - - sc1 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - sc2 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1, sc2}); err != nil { - t.Fatal(err) - } - - t.Logf("update sub-balancer to stub-balancer") - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{{ - Cluster: testClusterName, - Type: DiscoveryMechanismTypeEDS, - }}, - EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ - Name: balancerName, - }, - }, - }); err != nil { - t.Fatal(err) - } - - for i := 0; i < 2; i++ { - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) && - !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want (%v or %v), got %v", sc1, sc2, scToRemove) - } - edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - } - - for i := 0; i < 2; i++ { - sc := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - } - - if err := testErrPickerFromCh(cc.NewPickerCh, testutils.ErrTestConstPicker); err != nil { - t.Fatal(err) - } - - t.Logf("update sub-balancer to round-robin") - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{{ - Cluster: testClusterName, - Type: DiscoveryMechanismTypeEDS, - }}, - EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, - }, - }); err != nil { - t.Fatal(err) - } - - for i := 0; i < 2; i++ { - <-cc.RemoveSubConnCh - } - - sc3 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - sc4 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3, sc4}); err != nil { - t.Fatal(err) - } -} - func (s) TestEDS_CircuitBreaking(t *testing.T) { edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() @@ -627,9 +467,6 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { MaxConcurrentRequests: &maxRequests, Type: DiscoveryMechanismTypeEDS, }}, - EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, }, }); err != nil { t.Fatal(err) @@ -693,9 +530,6 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { MaxConcurrentRequests: &maxRequests2, Type: DiscoveryMechanismTypeEDS, }}, - EndpointPickingPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, }, }); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/ringhash/config.go b/xds/internal/balancer/ringhash/config.go new file mode 100644 index 000000000000..4d94ebb71824 --- /dev/null +++ b/xds/internal/balancer/ringhash/config.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the ring_hash balancer. +const Name = "ring_hash_experimental" + +// LBConfig is the balancer config for ring_hash balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + MinRingSize uint64 `json:"minRingSize,omitempty"` + MaxRingSize uint64 `json:"maxRingSize,omitempty"` +} + +const ( + defaultMinSize = 1024 + defaultMaxSize = 8 * 1024 * 1024 // 8M +) + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + if cfg.MinRingSize == 0 { + cfg.MinRingSize = defaultMinSize + } + if cfg.MaxRingSize == 0 { + cfg.MaxRingSize = defaultMaxSize + } + if cfg.MinRingSize > cfg.MaxRingSize { + return nil, fmt.Errorf("min %v is greater than max %v", cfg.MinRingSize, cfg.MaxRingSize) + } + return &cfg, nil +} diff --git a/xds/internal/balancer/ringhash/config_test.go b/xds/internal/balancer/ringhash/config_test.go new file mode 100644 index 000000000000..a2a966dc3181 --- /dev/null +++ b/xds/internal/balancer/ringhash/config_test.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestParseConfig(t *testing.T) { + tests := []struct { + name string + js string + want *LBConfig + wantErr bool + }{ + { + name: "OK", + js: `{"minRingSize": 1, "maxRingSize": 2}`, + want: &LBConfig{MinRingSize: 1, MaxRingSize: 2}, + }, + { + name: "OK with default min", + js: `{"maxRingSize": 2000}`, + want: &LBConfig{MinRingSize: defaultMinSize, MaxRingSize: 2000}, + }, + { + name: "OK with default max", + js: `{"minRingSize": 2000}`, + want: &LBConfig{MinRingSize: 2000, MaxRingSize: defaultMaxSize}, + }, + { + name: "min greater than max", + js: `{"minRingSize": 10, "maxRingSize": 2}`, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseConfig([]byte(tt.js)) + if (err != nil) != tt.wantErr { + t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("parseConfig() got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} From 574137db7de3c10e010d5023626169f13540cef1 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 6 Aug 2021 10:56:44 -0700 Subject: [PATCH 183/998] xds: fix flaky test (TestPickerUpdateAfterClose) (#4658) --- .../balancer/clusterimpl/balancer_test.go | 68 +++++++++++++++---- 1 file changed, 53 insertions(+), 15 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index e583b647473d..dc47941ef2c5 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -22,6 +22,7 @@ package clusterimpl import ( "context" + "errors" "fmt" "strings" "testing" @@ -30,9 +31,12 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" xdsinternal "google.golang.org/grpc/xds/internal" @@ -62,6 +66,14 @@ var ( } ) +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { return func() balancer.SubConn { scst, _ := p.Pick(balancer.PickInfo{}) @@ -75,7 +87,7 @@ func init() { // TestDropByCategory verifies that the balancer correctly drops the picks, and // that the drops are reported. -func TestDropByCategory(t *testing.T) { +func (s) TestDropByCategory(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -233,7 +245,7 @@ func TestDropByCategory(t *testing.T) { // TestDropCircuitBreaking verifies that the balancer correctly drops the picks // due to circuit breaking, and that the drops are reported. -func TestDropCircuitBreaking(t *testing.T) { +func (s) TestDropCircuitBreaking(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -341,10 +353,11 @@ func TestDropCircuitBreaking(t *testing.T) { } } -// TestPickerUpdateAfterClose covers the case that cluster_impl wants to update -// picker after it's closed. Because picker updates are sent in the run() -// goroutine. -func TestPickerUpdateAfterClose(t *testing.T) { +// TestPickerUpdateAfterClose covers the case where a child policy sends a +// picker update after the cluster_impl policy is closed. Because picker updates +// are handled in the run() goroutine, which exits before Close() returns, we +// expect the above picker update to be dropped. +func (s) TestPickerUpdateAfterClose(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -353,6 +366,30 @@ func TestPickerUpdateAfterClose(t *testing.T) { cc := testutils.NewTestClientConn(t) b := builder.Build(cc, balancer.BuildOptions{}) + // Create a stub balancer which waits for the cluster_impl policy to be + // closed before sending a picker update (upon receipt of a subConn state + // change). + closeCh := make(chan struct{}) + const childPolicyName = "stubBalancer-TestPickerUpdateAfterClose" + stub.Register(childPolicyName, stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + // Create a subConn which will be used later on to test the race + // between UpdateSubConnState() and Close(). + bd.ClientConn.NewSubConn(ccs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, _ balancer.SubConn, _ balancer.SubConnState) { + go func() { + // Wait for Close() to be called on the parent policy before + // sending the picker update. + <-closeCh + bd.ClientConn.UpdateState(balancer.State{ + Picker: base.NewErrPicker(errors.New("dummy error picker")), + }) + }() + }, + }) + var maxRequest uint32 = 50 if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), @@ -361,7 +398,7 @@ func TestPickerUpdateAfterClose(t *testing.T) { EDSServiceName: testServiceName, MaxConcurrentRequests: &maxRequest, ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, + Name: childPolicyName, }, }, }); err != nil { @@ -369,23 +406,24 @@ func TestPickerUpdateAfterClose(t *testing.T) { t.Fatalf("unexpected error from UpdateClientConnState: %v", err) } - // Send SubConn state changes to trigger picker updates. Balancer will - // closed in a defer. + // Send a subConn state change to trigger a picker update. The stub balancer + // that we use as the child policy will not send a picker update until the + // parent policy is closed. sc1 := <-cc.NewSubConnCh b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - // This close will race with the SubConn state update. b.Close() + close(closeCh) select { case <-cc.NewPickerCh: t.Fatalf("unexpected picker update after balancer is closed") - case <-time.After(time.Millisecond * 10): + case <-time.After(defaultShortTestTimeout): } } // TestClusterNameInAddressAttributes covers the case that cluster name is // attached to the subconn address attributes. -func TestClusterNameInAddressAttributes(t *testing.T) { +func (s) TestClusterNameInAddressAttributes(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -470,7 +508,7 @@ func TestClusterNameInAddressAttributes(t *testing.T) { // TestReResolution verifies that when a SubConn turns transient failure, // re-resolution is triggered. -func TestReResolution(t *testing.T) { +func (s) TestReResolution(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -547,7 +585,7 @@ func TestReResolution(t *testing.T) { } } -func TestLoadReporting(t *testing.T) { +func (s) TestLoadReporting(t *testing.T) { var testLocality = xdsinternal.LocalityID{ Region: "test-region", Zone: "test-zone", @@ -662,7 +700,7 @@ func TestLoadReporting(t *testing.T) { // - the init config specifies "" as the LRS server // - config modifies LRS server to a different string // - config sets LRS server to nil to stop load reporting -func TestUpdateLRSServer(t *testing.T) { +func (s) TestUpdateLRSServer(t *testing.T) { var testLocality = xdsinternal.LocalityID{ Region: "test-region", Zone: "test-zone", From 01bababd83492b6eb1c7046ab4c3a4b1bcc5e9d6 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 9 Aug 2021 23:15:57 -0400 Subject: [PATCH 184/998] Added connection to transport context (#4649) * Added connection to transport context --- internal/transport/http2_server.go | 17 ++++++++- internal/xds/rbac/rbac_engine.go | 16 ++------ internal/xds/rbac/rbac_engine_test.go | 7 +++- test/end2end_test.go | 54 +++++++++++++++++++++++++++ 4 files changed, 79 insertions(+), 15 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index e3799d50aa71..88c37723d983 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -217,7 +217,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) t := &http2Server{ - ctx: context.Background(), + ctx: setConnection(context.Background(), conn), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), @@ -1345,3 +1345,18 @@ func getJitter(v time.Duration) time.Duration { j := grpcrand.Int63n(2*r) - r return time.Duration(j) } + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index 609d123c8039..f08228f77911 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -32,11 +32,14 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" ) +var getConnection = transport.GetConnection + // ChainEngine represents a chain of RBAC Engines, used to make authorization // decisions on incoming RPCs. type ChainEngine struct { @@ -206,16 +209,3 @@ type rpcData struct { // handshake. certs []*x509.Certificate } - -type connectionKey struct{} - -func getConnection(ctx context.Context) net.Conn { - conn, _ := ctx.Value(connectionKey{}).(net.Conn) - return conn -} - -// SetConnection adds the connection to the context to be able to get -// information about the destination ip and port for an incoming RPC. -func SetConnection(ctx context.Context, conn net.Conn) context.Context { - return context.WithValue(ctx, connectionKey{}, conn) -} diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go index 2521ac4526aa..807df9b87a81 100644 --- a/internal/xds/rbac/rbac_engine_test.go +++ b/internal/xds/rbac/rbac_engine_test.go @@ -440,6 +440,9 @@ func (s) TestNewChainEngine(t *testing.T) { // different types of data representing incoming RPC's (piped into a context), // and verifies that it works as expected. func (s) TestChainEngine(t *testing.T) { + defer func(gc func(ctx context.Context) net.Conn) { + getConnection = gc + }(getConnection) tests := []struct { name string rbacConfigs []*v3rbacpb.RBAC @@ -882,7 +885,9 @@ func (s) TestChainEngine(t *testing.T) { } conn := <-connCh defer conn.Close() - ctx = SetConnection(ctx, conn) + getConnection = func(context.Context) net.Conn { + return conn + } ctx = peer.NewContext(ctx, data.rpcData.peerInfo) stream := &ServerTransportStreamWithMethod{ method: data.rpcData.fullMethod, diff --git a/test/end2end_test.go b/test/end2end_test.go index 3d941b187bf9..92790143d2ea 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7766,3 +7766,57 @@ func (cvd *credentialsVerifyDeadline) Clone() credentials.TransportCredentials { func (cvd *credentialsVerifyDeadline) OverrideServerName(s string) error { return nil } + +func unaryInterceptorVerifyConn(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + conn := transport.GetConnection(ctx) + if conn == nil { + return nil, status.Error(codes.NotFound, "connection was not in context") + } + return nil, status.Error(codes.OK, "") +} + +// TestUnaryServerInterceptorGetsConnection tests whether the accepted conn on +// the server gets to any unary interceptors on the server side. +func (s) TestUnaryServerInterceptorGetsConnection(t *testing.T) { + ss := &stubserver.StubServer{} + if err := ss.Start([]grpc.ServerOption{grpc.UnaryInterceptor(unaryInterceptorVerifyConn)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v, want _, error code %s", err, codes.OK) + } +} + +func streamingInterceptorVerifyConn(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + conn := transport.GetConnection(ss.Context()) + if conn == nil { + return status.Error(codes.NotFound, "connection was not in context") + } + return status.Error(codes.OK, "") +} + +// TestStreamingServerInterceptorGetsConnection tests whether the accepted conn on +// the server gets to any streaming interceptors on the server side. +func (s) TestStreamingServerInterceptorGetsConnection(t *testing.T) { + ss := &stubserver.StubServer{} + if err := ss.Start([]grpc.ServerOption{grpc.StreamInterceptor(streamingInterceptorVerifyConn)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + s, err := ss.Client.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{}) + if err != nil { + t.Fatalf("ss.Client.StreamingOutputCall(_) = _, %v, want _, ", err) + } + if _, err := s.Recv(); err != io.EOF { + t.Fatalf("ss.Client.StreamingInputCall(_) = _, %v, want _, %v", err, io.EOF) + } +} From 997ce619eb555b6a481e741afa6390ad3cd80d5c Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 10 Aug 2021 13:22:34 -0700 Subject: [PATCH 185/998] clientconn: do not automatically reconnect addrConns; go idle instead (#4613) --- balancer/balancer.go | 17 +- balancer_conn_wrappers.go | 14 +- clientconn.go | 285 ++++++++++++++-------------- clientconn_state_transition_test.go | 9 +- clientconn_test.go | 28 ++- pickfirst.go | 15 +- test/channelz_test.go | 18 +- test/creds_test.go | 1 + test/end2end_test.go | 15 +- 9 files changed, 239 insertions(+), 163 deletions(-) diff --git a/balancer/balancer.go b/balancer/balancer.go index ab531f4c0b80..6a1b779edc26 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -353,8 +353,9 @@ var ErrBadResolverState = errors.New("bad resolver state") // // It's not thread safe. type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. } // RecordTransition records state change happening in subConn and based on that @@ -362,9 +363,10 @@ type ConnectivityStateEvaluator struct { // // - If at least one SubConn in Ready, the aggregated state is Ready; // - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. +// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; +// - Else the aggregated state is Idle // -// Idle and Shutdown are not considered. +// Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { // Update counters. for idx, state := range []connectivity.State{oldState, newState} { @@ -374,6 +376,8 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal } } @@ -384,5 +388,8 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne if cse.numConnecting > 0 { return connectivity.Connecting } - return connectivity.TransientFailure + if cse.numTransientFailure > 0 { + return connectivity.TransientFailure + } + return connectivity.Idle } diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index dd8397963974..0ddb24f375f4 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -239,17 +239,17 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { return } - ac, err := cc.newAddrConn(addrs, opts) + newAC, err := cc.newAddrConn(addrs, opts) if err != nil { channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() + acbw.ac = newAC + newAC.mu.Lock() + newAC.acbw = acbw + newAC.mu.Unlock() if acState != connectivity.Idle { - ac.connect() + go newAC.connect() } } } @@ -257,7 +257,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { func (acbw *acBalancerWrapper) Connect() { acbw.mu.Lock() defer acbw.mu.Unlock() - acbw.ac.connect() + go acbw.ac.connect() } func (acbw *acBalancerWrapper) getAddrConn() *addrConn { diff --git a/clientconn.go b/clientconn.go index b2bccfed136e..b9e9eed4681b 100644 --- a/clientconn.go +++ b/clientconn.go @@ -322,6 +322,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { + cc.Connect() s := cc.GetState() if s == connectivity.Ready { break @@ -539,12 +540,31 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // // Experimental // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + if cc.GetState() == connectivity.Idle { + cc.mu.Lock() + for ac := range cc.conns { + // TODO: should this be a signal to the LB policy instead? + go ac.connect() + } + cc.mu.Unlock() + } +} + func (cc *ClientConn) scWatcher() { for { select { @@ -845,8 +865,7 @@ func (ac *addrConn) connect() error { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + ac.resetTransport() return nil } @@ -883,6 +902,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { + // a.ServerName takes precedent over ClientConn authority, if present. + if a.ServerName == "" { + a.ServerName = ac.cc.authority + } if reflect.DeepEqual(ac.curAddr, a) { curAddrFound = true break @@ -1135,112 +1158,86 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } + ac.updateConnectivityState(connectivity.TransientFailure, err) - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } - - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) - - ac.updateConnectivityState(connectivity.Connecting, nil) - ac.transport = nil + // Backoff. + b := ac.resetBackoff ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure, err) - - // Backoff. - b := ac.resetBackoff + ac.backoffIdx++ ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } - continue + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return } ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close(fmt.Errorf("reached connectivity state: SHUTDOWN")) - return + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, err) } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - hctx, hcancel := context.WithCancel(ac.ctx) - ac.startHealthCheck(hctx) ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - // restart connecting - the top of the loop will set state to - // CONNECTING. This is against the current connectivity semantics doc, - // however it allows for graceful behavior for RPCs not yet dispatched - // - unfortunate timing would otherwise lead to the RPC failing even - // though the TRANSIENT_FAILURE state (called for by the doc) would be - // instantaneous. - // - // Ideally we should transition to Idle here and block until there is - // RPC activity that leads to the balancer requesting a reconnect of - // the associated SubConn. + return } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing + return errConnClosing } ac.cc.mu.RLock() @@ -1255,9 +1252,9 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(addr, copts, connectDeadline) if err == nil { - return newTr, addr, reconnect, nil + return nil } if firstConnErr == nil { firstConnErr = err @@ -1266,57 +1263,49 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, firstConnErr + return firstConnErr } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + // TODO: Delete prefaceReceived and move the logic to wait for it into the + // transport. + prefaceReceived := grpcsync.NewEvent() + connClosed := grpcsync.NewEvent() // addr.ServerName takes precedent over ClientConn authority, if present. if addr.ServerName == "" { addr.ServerName = ac.cc.authority } - once := sync.Once{} - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - reconnect.Fire() - } + hctx, hcancel := context.WithCancel(ac.ctx) + hcStarted := false // protected by ac.mu onClose := func() { ac.mu.Lock() - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - close(onCloseCalled) - reconnect.Fire() + defer ac.mu.Unlock() + defer connClosed.Fire() + if !hcStarted { + // We didn't start the health check or set the state to READY, so + // no need to do anything else here. + return + } + hcancel() + ac.transport = nil + // Refresh the name resolver + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } } - onPrefaceReceipt := func() { - close(prefaceReceived) + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + ac.mu.Unlock() + onClose() } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) @@ -1325,27 +1314,47 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne copts.ChannelzParentID = ac.channelzID } - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) - return nil, nil, err + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + return err } select { case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. - newTr.Close(fmt.Errorf("failed to receive server preface within timeout")) - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: + err := fmt.Errorf("failed to receive server preface within timeout") + newTr.Close(err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + return err + case <-prefaceReceived.Done(): // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + ac.mu.Lock() + defer ac.mu.Unlock() + defer prefaceReceived.Fire() + if connClosed.HasFired() { + // onClose called first; go idle but do nothing else. + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } + return nil + } + ac.curAddr = addr + ac.transport = newTr + hcStarted = true + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil + case <-connClosed.Done(): + // The transport has already closed. If we received the preface, too, + // this is not an error. + select { + case <-prefaceReceived.Done(): + return nil + default: + return errors.New("connection closed before server preface received") + } } - return newTr, reconnect, nil } // startHealthCheck starts the health checking stream (RPC) to watch the health diff --git a/clientconn_state_transition_test.go b/clientconn_state_transition_test.go index cd1213fb4fd1..2090c8de689b 100644 --- a/clientconn_state_transition_test.go +++ b/clientconn_state_transition_test.go @@ -75,7 +75,7 @@ func (s) TestStateTransitions_SingleAddress(t *testing.T) { }, }, { - desc: "When the connection is closed, the client enters TRANSIENT FAILURE.", + desc: "When the connection is closed before the preface is sent, the client enters TRANSIENT FAILURE.", want: []connectivity.State{ connectivity.Connecting, connectivity.TransientFailure, @@ -167,6 +167,7 @@ func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, s t.Fatal(err) } defer client.Close() + go stayConnected(client) stateNotifications := testBalancerBuilder.nextStateNotifier() @@ -193,11 +194,12 @@ func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, s } } -// When a READY connection is closed, the client enters CONNECTING. +// When a READY connection is closed, the client enters IDLE then CONNECTING. func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { want := []connectivity.State{ connectivity.Connecting, connectivity.Ready, + connectivity.Idle, connectivity.Connecting, } @@ -240,6 +242,7 @@ func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { t.Fatal(err) } defer client.Close() + go stayConnected(client) stateNotifications := testBalancerBuilder.nextStateNotifier() @@ -359,6 +362,7 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { want := []connectivity.State{ connectivity.Connecting, connectivity.Ready, + connectivity.Idle, connectivity.Connecting, } @@ -415,6 +419,7 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { t.Fatal(err) } defer client.Close() + go stayConnected(client) stateNotifications := testBalancerBuilder.nextStateNotifier() diff --git a/clientconn_test.go b/clientconn_test.go index a50db9419c2a..da765615be12 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -217,7 +217,7 @@ func (s) TestDialWaitsForServerSettingsAndFails(t *testing.T) { client.Close() t.Fatalf("Unexpected success (err=nil) while dialing") } - expectedMsg := "server handshake" + expectedMsg := "server preface" if !strings.Contains(err.Error(), context.DeadlineExceeded.Error()) || !strings.Contains(err.Error(), expectedMsg) { t.Fatalf("DialContext(_) = %v; want a message that includes both %q and %q", err, context.DeadlineExceeded.Error(), expectedMsg) } @@ -289,6 +289,9 @@ func (s) TestCloseConnectionWhenServerPrefaceNotReceived(t *testing.T) { if err != nil { t.Fatalf("Error while dialing. Err: %v", err) } + + go stayConnected(client) + // wait for connection to be accepted on the server. timer := time.NewTimer(time.Second * 10) select { @@ -311,9 +314,7 @@ func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { defer lis.Close() done := make(chan struct{}) go func() { // Launch the server. - defer func() { - close(done) - }() + defer close(done) conn, err := lis.Accept() // Accept the connection only to close it immediately. if err != nil { t.Errorf("Error while accepting. Err: %v", err) @@ -340,13 +341,13 @@ func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { prevAt = meow } }() - client, err := Dial(lis.Addr().String(), WithInsecure()) + cc, err := Dial(lis.Addr().String(), WithInsecure()) if err != nil { t.Fatalf("Error while dialing. Err: %v", err) } - defer client.Close() + defer cc.Close() + go stayConnected(cc) <-done - } func (s) TestWithTimeout(t *testing.T) { @@ -831,6 +832,7 @@ func (s) TestResetConnectBackoff(t *testing.T) { t.Fatalf("Dial() = _, %v; want _, nil", err) } defer cc.Close() + go stayConnected(cc) select { case <-dials: case <-time.NewTimer(10 * time.Second).C: @@ -985,6 +987,7 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { t.Fatal(err) } defer client.Close() + go stayConnected(client) timeout := time.After(5 * time.Second) @@ -1112,3 +1115,14 @@ func testDefaultServiceConfigWhenResolverReturnInvalidServiceConfig(t *testing.T t.Fatal("default service config failed to be applied after 1s") } } + +// stayConnected makes cc stay connected by repeatedly calling cc.Connect() +// until the state becomes Shutdown or until 10 seconds elapses. +func stayConnected(cc *ClientConn) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + for state := cc.GetState(); state != connectivity.Shutdown && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + cc.Connect() + } +} diff --git a/pickfirst.go b/pickfirst.go index b858c2a5e63b..d32161c748d6 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -107,10 +107,12 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S } switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: + case connectivity.Ready: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + case connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ ConnectivityState: s.ConnectivityState, @@ -131,6 +133,17 @@ func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + sc balancer.SubConn +} + +func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + i.sc.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + func init() { balancer.Register(newPickfirstBuilder()) } diff --git a/test/channelz_test.go b/test/channelz_test.go index 47e7eb927169..6cb09dd8d89e 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -1689,8 +1689,22 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { } te.srvs[0].Stop() te.srvs[1].Stop() - // Here, we just wait for all sockets to be up. In the future, if we implement - // IDLE, we may need to make several rpc calls to create the sockets. + // Here, we just wait for all sockets to be up. Make several rpc calls to + // create the sockets since we do not automatically reconnect. + done := make(chan struct{}) + defer close(done) + go func() { + for { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + tc.EmptyCall(ctx, &testpb.Empty{}) + cancel() + select { + case <-time.After(10 * time.Millisecond): + case <-done: + return + } + } + }() if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { diff --git a/test/creds_test.go b/test/creds_test.go index 6b3fc2a46076..0c6018641819 100644 --- a/test/creds_test.go +++ b/test/creds_test.go @@ -165,6 +165,7 @@ func (c *clientTimeoutCreds) Clone() credentials.TransportCredentials { func (s) TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) { te := newTest(t, env{name: "timeout-cred", network: "tcp", security: "empty"}) te.userAgent = testAppUA + te.nonBlockingDial = true te.startServer(&testServer{security: te.e.security}) defer te.tearDown() diff --git a/test/end2end_test.go b/test/end2end_test.go index 92790143d2ea..689a86841944 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7123,7 +7123,20 @@ func (s) TestGoAwayThenClose(t *testing.T) { // Send GO_AWAY to connection 1. go s1.GracefulStop() - // Wait for connection 2 to be established. + // Wait for the ClientConn to enter IDLE state. + state := cc.GetState() + for ; state != connectivity.Idle && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + if state != connectivity.Idle { + t.Fatalf("timed out waiting for IDLE channel state; last state = %v", state) + } + + // Initiate another RPC to create another connection. + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) + } + + // Assert that connection 2 has been established. <-conn2Established.Done() // Close connection 1. From c7c1e9e0ec7aed0a530cde1e7d2fc7382a6816a2 Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Tue, 10 Aug 2021 20:31:26 -0700 Subject: [PATCH 186/998] Update xDS client/server image per-branch tag after build (#4661) --- test/kokoro/xds_k8s.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/kokoro/xds_k8s.sh b/test/kokoro/xds_k8s.sh index dbe200962c18..67f35ae850a5 100755 --- a/test/kokoro/xds_k8s.sh +++ b/test/kokoro/xds_k8s.sh @@ -43,6 +43,11 @@ build_test_app_docker_images() { gcloud -q auth configure-docker docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" docker push "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" + if [[ -n $KOKORO_JOB_NAME ]]; then + branch_name=$(echo "$KOKORO_JOB_NAME" | sed -E 's|^grpc/go/([^/]+)/.*|\1|') + tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${branch_name}" + tag_and_push_docker_image "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" "${branch_name}" + fi } ####################################### From 9c668aeab86903a70e291eb47a04f48d84e67006 Mon Sep 17 00:00:00 2001 From: Aliaksandr Mianzhynski Date: Wed, 11 Aug 2021 19:17:59 +0300 Subject: [PATCH 187/998] all: preallocate slices where possible (#4609) --- authz/rbac_translator.go | 8 ++++---- balancer/rls/internal/keys/builder.go | 2 +- balancer/roundrobin/roundrobin.go | 2 +- channelz/service/service.go | 2 +- cmd/protoc-gen-go-grpc/grpc.go | 2 +- internal/channelz/funcs.go | 2 +- internal/resolver/dns/dns_resolver.go | 2 +- internal/resolver/dns/dns_resolver_test.go | 6 ++++-- internal/xds/rbac/rbac_engine.go | 2 +- interop/xds/client/client.go | 5 +++-- xds/csds/csds.go | 8 ++++---- .../balancer/clusterresolver/configbuilder.go | 11 ++++------- xds/internal/resolver/matcher.go | 2 +- xds/internal/testutils/protos.go | 4 ++-- xds/internal/xdsclient/filter_chain.go | 13 ++++++++----- xds/internal/xdsclient/transport_helper.go | 5 +++-- xds/internal/xdsclient/v2/loadreport.go | 10 ++++------ xds/internal/xdsclient/v3/loadreport.go | 10 ++++------ xds/internal/xdsclient/xds.go | 3 +-- 19 files changed, 49 insertions(+), 50 deletions(-) diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index 8dc764896053..fe305a1adce0 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -140,7 +140,7 @@ func getHeaderMatcher(key, value string) *v3routepb.HeaderMatcher { } func parsePrincipalNames(principalNames []string) []*v3rbacpb.Principal { - var ps []*v3rbacpb.Principal + ps := make([]*v3rbacpb.Principal, 0, len(principalNames)) for _, principalName := range principalNames { newPrincipalName := &v3rbacpb.Principal{ Identifier: &v3rbacpb.Principal_Authenticated_{ @@ -165,7 +165,7 @@ func parsePeer(source peer) (*v3rbacpb.Principal, error) { } func parsePaths(paths []string) []*v3rbacpb.Permission { - var ps []*v3rbacpb.Permission + ps := make([]*v3rbacpb.Permission, 0, len(paths)) for _, path := range paths { newPath := &v3rbacpb.Permission{ Rule: &v3rbacpb.Permission_UrlPath{ @@ -177,7 +177,7 @@ func parsePaths(paths []string) []*v3rbacpb.Permission { } func parseHeaderValues(key string, values []string) []*v3rbacpb.Permission { - var vs []*v3rbacpb.Permission + vs := make([]*v3rbacpb.Permission, 0, len(values)) for _, value := range values { newHeader := &v3rbacpb.Permission{ Rule: &v3rbacpb.Permission_Header{ @@ -204,7 +204,7 @@ func unsupportedHeader(key string) bool { } func parseHeaders(headers []header) ([]*v3rbacpb.Permission, error) { - var hs []*v3rbacpb.Permission + hs := make([]*v3rbacpb.Permission, 0, len(headers)) for i, header := range headers { if header.Key == "" { return nil, fmt.Errorf(`"headers" %d: "key" is not present`, i) diff --git a/balancer/rls/internal/keys/builder.go b/balancer/rls/internal/keys/builder.go index 5ce5a9da508a..24767b405f06 100644 --- a/balancer/rls/internal/keys/builder.go +++ b/balancer/rls/internal/keys/builder.go @@ -218,7 +218,7 @@ func (b builder) keys(md metadata.MD) KeyMap { } func mapToString(kv map[string]string) string { - var keys []string + keys := make([]string, 0, len(kv)) for k := range kv { keys = append(keys, k) } diff --git a/balancer/roundrobin/roundrobin.go b/balancer/roundrobin/roundrobin.go index 43c2a15373a1..3c163f3b5a05 100644 --- a/balancer/roundrobin/roundrobin.go +++ b/balancer/roundrobin/roundrobin.go @@ -51,7 +51,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } - var scs []balancer.SubConn + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) for sc := range info.ReadySCs { scs = append(scs, sc) } diff --git a/channelz/service/service.go b/channelz/service/service.go index c60ab604e81b..c1639de8b267 100644 --- a/channelz/service/service.go +++ b/channelz/service/service.go @@ -78,7 +78,7 @@ func channelTraceToProto(ct *channelz.ChannelTrace) *channelzpb.ChannelTrace { if ts, err := ptypes.TimestampProto(ct.CreationTime); err == nil { pbt.CreationTimestamp = ts } - var events []*channelzpb.ChannelTraceEvent + events := make([]*channelzpb.ChannelTraceEvent, 0, len(ct.Events)) for _, e := range ct.Events { cte := &channelzpb.ChannelTraceEvent{ Description: e.Desc, diff --git a/cmd/protoc-gen-go-grpc/grpc.go b/cmd/protoc-gen-go-grpc/grpc.go index 24ad747cc91f..f45e0403fd4f 100644 --- a/cmd/protoc-gen-go-grpc/grpc.go +++ b/cmd/protoc-gen-go-grpc/grpc.go @@ -207,7 +207,7 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P() // Server handler implementations. - var handlerNames []string + handlerNames := make([]string, 0, len(service.Methods)) for _, method := range service.Methods { hname := genServerMethod(gen, file, g, method) handlerNames = append(handlerNames, hname) diff --git a/internal/channelz/funcs.go b/internal/channelz/funcs.go index f7314139303e..6d5760d95146 100644 --- a/internal/channelz/funcs.go +++ b/internal/channelz/funcs.go @@ -630,7 +630,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) if count == 0 { end = true } - var s []*SocketMetric + s := make([]*SocketMetric, 0, len(sks)) for _, ns := range sks { sm := &SocketMetric{} sm.SocketData = ns.s.ChannelzMetric() diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index 03825bbe7b56..fa1f21aa9abb 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -323,12 +323,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { } func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err } + newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { ip, ok := formatIP(a) if !ok { diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index 73749eca44b0..69307a981cf2 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -30,6 +30,8 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/internal/envconfig" @@ -748,7 +750,7 @@ func testDNSResolver(t *testing.T) { if cnt == 0 { t.Fatalf("UpdateState not called after 2s; aborting") } - if !reflect.DeepEqual(a.addrWant, state.Addresses) { + if !cmp.Equal(a.addrWant, state.Addresses, cmpopts.EquateEmpty()) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrWant) } sc := scFromState(state) @@ -976,7 +978,7 @@ func testDNSResolverWithSRV(t *testing.T) { if cnt == 0 { t.Fatalf("UpdateState not called after 2s; aborting") } - if !reflect.DeepEqual(a.addrWant, state.Addresses) { + if !cmp.Equal(a.addrWant, state.Addresses, cmpopts.EquateEmpty()) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrWant) } gs := grpclbstate.Get(state) diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index f08228f77911..269edabeaa65 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -49,7 +49,7 @@ type ChainEngine struct { // NewChainEngine returns a chain of RBAC engines, used to make authorization // decisions on incoming RPCs. Returns a non-nil error for invalid policies. func NewChainEngine(policies []*v3rbacpb.RBAC) (*ChainEngine, error) { - var engines []*engine + engines := make([]*engine, 0, len(policies)) for _, policy := range policies { engine, err := newEngine(policy) if err != nil { diff --git a/interop/xds/client/client.go b/interop/xds/client/client.go index 92999b2193ed..190c8a7d7866 100644 --- a/interop/xds/client/client.go +++ b/interop/xds/client/client.go @@ -314,12 +314,13 @@ const ( emptyCall string = "EmptyCall" ) -func parseRPCTypes(rpcStr string) (ret []string) { +func parseRPCTypes(rpcStr string) []string { if len(rpcStr) == 0 { return []string{unaryCall} } rpcs := strings.Split(rpcStr, ",") + ret := make([]string, 0, len(rpcStr)) for _, r := range rpcs { switch r { case unaryCall, emptyCall: @@ -329,7 +330,7 @@ func parseRPCTypes(rpcStr string) (ret []string) { log.Fatalf("unsupported RPC type: %v", r) } } - return + return ret } type rpcConfig struct { diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 1b54a3a4c6e3..c4477a55d1a8 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -164,7 +164,7 @@ func nodeProtoToV3(n proto.Message) *v3corepb.Node { func (s *ClientStatusDiscoveryServer) buildLDSPerXDSConfig() *v3statuspb.PerXdsConfig { version, dump := s.xdsClient.DumpLDS() - var resources []*v3adminpb.ListenersConfigDump_DynamicListener + resources := make([]*v3adminpb.ListenersConfigDump_DynamicListener, 0, len(dump)) for name, d := range dump { configDump := &v3adminpb.ListenersConfigDump_DynamicListener{ Name: name, @@ -198,7 +198,7 @@ func (s *ClientStatusDiscoveryServer) buildLDSPerXDSConfig() *v3statuspb.PerXdsC func (s *ClientStatusDiscoveryServer) buildRDSPerXDSConfig() *v3statuspb.PerXdsConfig { _, dump := s.xdsClient.DumpRDS() - var resources []*v3adminpb.RoutesConfigDump_DynamicRouteConfig + resources := make([]*v3adminpb.RoutesConfigDump_DynamicRouteConfig, 0, len(dump)) for _, d := range dump { configDump := &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ VersionInfo: d.MD.Version, @@ -228,7 +228,7 @@ func (s *ClientStatusDiscoveryServer) buildRDSPerXDSConfig() *v3statuspb.PerXdsC func (s *ClientStatusDiscoveryServer) buildCDSPerXDSConfig() *v3statuspb.PerXdsConfig { version, dump := s.xdsClient.DumpCDS() - var resources []*v3adminpb.ClustersConfigDump_DynamicCluster + resources := make([]*v3adminpb.ClustersConfigDump_DynamicCluster, 0, len(dump)) for _, d := range dump { configDump := &v3adminpb.ClustersConfigDump_DynamicCluster{ VersionInfo: d.MD.Version, @@ -259,7 +259,7 @@ func (s *ClientStatusDiscoveryServer) buildCDSPerXDSConfig() *v3statuspb.PerXdsC func (s *ClientStatusDiscoveryServer) buildEDSPerXDSConfig() *v3statuspb.PerXdsConfig { _, dump := s.xdsClient.DumpEDS() - var resources []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig + resources := make([]*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig, 0, len(dump)) for _, d := range dump { configDump := &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ VersionInfo: d.MD.Version, diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index e1242f3bd0a4..475497d48950 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -152,7 +152,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string, *clusterimpl.LBConfig, []resolver.Address) { // Endpoint picking policy for DNS is hardcoded to pick_first. const childPolicy = "pick_first" - var retAddrs []resolver.Address + retAddrs := make([]resolver.Address, 0, len(addrStrs)) pName := fmt.Sprintf("priority-%v", parentPriority) for _, addrStr := range addrStrs { retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) @@ -170,12 +170,6 @@ func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { - var ( - retNames []string - retAddrs []resolver.Address - retConfigs = make(map[string]*clusterimpl.LBConfig) - ) - drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) for _, d := range edsResp.Drops { drops = append(drops, clusterimpl.DropConfig{ @@ -185,6 +179,9 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.Endpoint } priorityChildNames, priorities := groupLocalitiesByPriority(edsResp.Localities) + retNames := make([]string, 0, len(priorityChildNames)) + retAddrs := make([]resolver.Address, 0, len(priorityChildNames)) + retConfigs := make(map[string]*clusterimpl.LBConfig, len(priorityChildNames)) for _, priorityName := range priorityChildNames { priorityLocalities := priorities[priorityName] // Prepend parent priority to the priority names, to avoid duplicates. diff --git a/xds/internal/resolver/matcher.go b/xds/internal/resolver/matcher.go index 6e09d93afa78..4a0a0ca149ae 100644 --- a/xds/internal/resolver/matcher.go +++ b/xds/internal/resolver/matcher.go @@ -43,7 +43,7 @@ func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { return nil, fmt.Errorf("illegal route: missing path_matcher") } - var headerMatchers []matcher.HeaderMatcher + headerMatchers := make([]matcher.HeaderMatcher, 0, len(r.Headers)) for _, h := range r.Headers { var matcherT matcher.HeaderMatcher switch { diff --git a/xds/internal/testutils/protos.go b/xds/internal/testutils/protos.go index e0dba0e2b301..fc3cdf307fcd 100644 --- a/xds/internal/testutils/protos.go +++ b/xds/internal/testutils/protos.go @@ -59,7 +59,7 @@ type ClusterLoadAssignmentBuilder struct { // NewClusterLoadAssignmentBuilder creates a ClusterLoadAssignmentBuilder. func NewClusterLoadAssignmentBuilder(clusterName string, dropPercents map[string]uint32) *ClusterLoadAssignmentBuilder { - var drops []*v2xdspb.ClusterLoadAssignment_Policy_DropOverload + drops := make([]*v2xdspb.ClusterLoadAssignment_Policy_DropOverload, 0, len(dropPercents)) for n, d := range dropPercents { drops = append(drops, &v2xdspb.ClusterLoadAssignment_Policy_DropOverload{ Category: n, @@ -88,7 +88,7 @@ type AddLocalityOptions struct { // AddLocality adds a locality to the builder. func (clab *ClusterLoadAssignmentBuilder) AddLocality(subzone string, weight uint32, priority uint32, addrsWithPort []string, opts *AddLocalityOptions) { - var lbEndPoints []*v2endpointpb.LbEndpoint + lbEndPoints := make([]*v2endpointpb.LbEndpoint, 0, len(addrsWithPort)) for i, a := range addrsWithPort { host, portStr, err := net.SplitHostPort(a) if err != nil { diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index 49ebe887c36a..c8230d693b93 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -242,8 +242,9 @@ func (fci *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) } func (fci *FilterChainManager) addFilterChainsForDestPrefixes(fc *v3listenerpb.FilterChain) error { - var dstPrefixes []*net.IPNet - for _, pr := range fc.GetFilterChainMatch().GetPrefixRanges() { + ranges := fc.GetFilterChainMatch().GetPrefixRanges() + dstPrefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range ranges { cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) _, ipnet, err := net.ParseCIDR(cidr) if err != nil { @@ -342,7 +343,8 @@ func (fci *FilterChainManager) addFilterChainsForSourceType(dstEntry *destPrefix // structures and delegates control to addFilterChainsForSourcePorts to continue // building the internal data structure. func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map[string]*sourcePrefixEntry, fc *v3listenerpb.FilterChain) error { - var srcPrefixes []*net.IPNet + ranges := fc.GetFilterChainMatch().GetSourcePrefixRanges() + srcPrefixes := make([]*net.IPNet, 0, len(ranges)) for _, pr := range fc.GetFilterChainMatch().GetSourcePrefixRanges() { cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) _, ipnet, err := net.ParseCIDR(cidr) @@ -382,8 +384,9 @@ func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map // It is here that we determine if there are multiple filter chains with // overlapping matching rules. func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePrefixEntry, fcProto *v3listenerpb.FilterChain) error { - var srcPorts []int - for _, port := range fcProto.GetFilterChainMatch().GetSourcePorts() { + ports := fcProto.GetFilterChainMatch().GetSourcePorts() + srcPorts := make([]int, 0, len(ports)) + for _, port := range ports { srcPorts = append(srcPorts, int(port)) } diff --git a/xds/internal/xdsclient/transport_helper.go b/xds/internal/xdsclient/transport_helper.go index 1e3caa606d1d..4c56daaf011b 100644 --- a/xds/internal/xdsclient/transport_helper.go +++ b/xds/internal/xdsclient/transport_helper.go @@ -342,11 +342,12 @@ func (t *TransportHelper) recv(stream grpc.ClientStream) bool { } } -func mapToSlice(m map[string]bool) (ret []string) { +func mapToSlice(m map[string]bool) []string { + ret := make([]string, 0, len(m)) for i := range m { ret = append(ret, i) } - return + return ret } type watchAction struct { diff --git a/xds/internal/xdsclient/v2/loadreport.go b/xds/internal/xdsclient/v2/loadreport.go index 77db5eb9d8d6..f0034e21c353 100644 --- a/xds/internal/xdsclient/v2/loadreport.go +++ b/xds/internal/xdsclient/v2/loadreport.go @@ -99,24 +99,22 @@ func (v2c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) } - var clusterStats []*v2endpointpb.ClusterStats + clusterStats := make([]*v2endpointpb.ClusterStats, 0, len(loads)) for _, sd := range loads { - var ( - droppedReqs []*v2endpointpb.ClusterStats_DroppedRequests - localityStats []*v2endpointpb.UpstreamLocalityStats - ) + droppedReqs := make([]*v2endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) for category, count := range sd.Drops { droppedReqs = append(droppedReqs, &v2endpointpb.ClusterStats_DroppedRequests{ Category: category, DroppedCount: count, }) } + localityStats := make([]*v2endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) for l, localityData := range sd.LocalityStats { lid, err := internal.LocalityIDFromString(l) if err != nil { return err } - var loadMetricStats []*v2endpointpb.EndpointLoadMetricStats + loadMetricStats := make([]*v2endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) for name, loadData := range localityData.LoadStats { loadMetricStats = append(loadMetricStats, &v2endpointpb.EndpointLoadMetricStats{ MetricName: name, diff --git a/xds/internal/xdsclient/v3/loadreport.go b/xds/internal/xdsclient/v3/loadreport.go index 147751baab03..8cdb5476fbbd 100644 --- a/xds/internal/xdsclient/v3/loadreport.go +++ b/xds/internal/xdsclient/v3/loadreport.go @@ -99,24 +99,22 @@ func (v3c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) } - var clusterStats []*v3endpointpb.ClusterStats + clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) for _, sd := range loads { - var ( - droppedReqs []*v3endpointpb.ClusterStats_DroppedRequests - localityStats []*v3endpointpb.UpstreamLocalityStats - ) + droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) for category, count := range sd.Drops { droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ Category: category, DroppedCount: count, }) } + localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) for l, localityData := range sd.LocalityStats { lid, err := internal.LocalityIDFromString(l) if err != nil { return err } - var loadMetricStats []*v3endpointpb.EndpointLoadMetricStats + loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) for name, loadData := range localityData.LoadStats { loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ MetricName: name, diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index c3b090fe42ab..656ad854e670 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -324,7 +324,7 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s // message, the cluster field will contain the clusterName or weighted clusters // we are looking for. func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { - var vhs []*VirtualHost + vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) for _, vh := range rc.GetVirtualHosts() { routes, err := routesProtoToSlice(vh.Routes, logger, v2) if err != nil { @@ -348,7 +348,6 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, error) { var routesRet []*Route - for _, r := range routes { match := r.GetMatch() if match == nil { From 88dc96b463fb9a695e6181750e78524df1903601 Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Wed, 11 Aug 2021 14:33:44 -0700 Subject: [PATCH 188/998] Copy the tag_and_push_docker_image method to grpc-go (#4667) --- test/kokoro/xds_k8s_install_test_driver.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/kokoro/xds_k8s_install_test_driver.sh b/test/kokoro/xds_k8s_install_test_driver.sh index a342bd995f92..aacaaf1ef56a 100755 --- a/test/kokoro/xds_k8s_install_test_driver.sh +++ b/test/kokoro/xds_k8s_install_test_driver.sh @@ -360,3 +360,21 @@ local_setup_test_driver() { readonly TEST_XML_OUTPUT_DIR="${TEST_DRIVER_FULL_DIR}/out" mkdir -p "${TEST_XML_OUTPUT_DIR}" } + +####################################### +# Tag and push the given Docker image +# Arguments: +# The Docker image name +# The Docker image original tag name +# The Docker image new tag name +# Outputs: +# Writes the output to stdout, stderr, files +####################################### +tag_and_push_docker_image() { + local image_name="$1" + local from_tag="$2" + local to_tag="$3" + + docker tag "${image_name}:${from_tag}" "${image_name}:${to_tag}" + docker push "${image_name}:${to_tag}" +} From ad87ad009856d3423e067fc49b990d05e16d706c Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 11 Aug 2021 18:48:24 -0400 Subject: [PATCH 189/998] xds: Add support for Dynamic RDS in listener wrapper (#4655) * Add support for Dynamic RDS in listener wrapper --- xds/internal/resolver/watch_service_test.go | 26 +- xds/internal/resolver/xds_resolver_test.go | 38 +- xds/internal/server/listener_wrapper.go | 124 +++++- xds/internal/server/listener_wrapper_test.go | 134 +++++- xds/internal/server/rds_handler.go | 133 ++++++ xds/internal/server/rds_handler_test.go | 403 +++++++++++++++++++ xds/internal/testutils/fakeclient/client.go | 34 +- xds/internal/xdsclient/client.go | 1 - xds/server.go | 5 + xds/server_test.go | 79 +++- 10 files changed, 914 insertions(+), 63 deletions(-) create mode 100644 xds/internal/server/rds_handler.go create mode 100644 xds/internal/server/rds_handler_test.go diff --git a/xds/internal/resolver/watch_service_test.go b/xds/internal/resolver/watch_service_test.go index 31c45bf3977f..270a8e8ac040 100644 --- a/xds/internal/resolver/watch_service_test.go +++ b/xds/internal/resolver/watch_service_test.go @@ -169,7 +169,7 @@ func (s) TestServiceWatch(t *testing.T) { waitForWatchRouteConfig(ctx, t, xdsC, routeStr) wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -187,7 +187,7 @@ func (s) TestServiceWatch(t *testing.T) { WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}, }}, }} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -223,7 +223,7 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) { waitForWatchRouteConfig(ctx, t, xdsC, routeStr) wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -237,14 +237,14 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) { // Another LDS update with a different RDS_name. xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr + "2"}, nil) - if err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { + if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { t.Fatalf("wait for cancel route watch failed: %v, want nil", err) } waitForWatchRouteConfig(ctx, t, xdsC, routeStr+"2") // RDS update for the new name. wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback(routeStr+"2", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -279,7 +279,7 @@ func (s) TestServiceWatchLDSUpdateMaxStreamDuration(t *testing.T) { WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}, ldsConfig: ldsConfig{maxStreamDuration: time.Second}, } - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -303,7 +303,7 @@ func (s) TestServiceWatchLDSUpdateMaxStreamDuration(t *testing.T) { Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, }} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -337,7 +337,7 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) { Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, }} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -354,7 +354,7 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) { xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelRouteConfigWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelRouteConfigWatch(sCtx); err != context.DeadlineExceeded { t.Fatalf("wait for cancel route watch failed: %v, want nil", err) } } @@ -378,7 +378,7 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -402,7 +402,7 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{wantVirtualHosts2}, }}, nil) // This inline RDS resource should cause the RDS watch to be canceled. - if err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { + if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { t.Fatalf("wait for cancel route watch failed: %v, want nil", err) } if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate2); err != nil { @@ -412,7 +412,7 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { // Switch LDS update back to LDS with RDS name to watch. xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -429,7 +429,7 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { VirtualHosts: []*xdsclient.VirtualHost{wantVirtualHosts2}, }}, nil) // This inline RDS resource should cause the RDS watch to be canceled. - if err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { + if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { t.Fatalf("wait for cancel route watch failed: %v, want nil", err) } if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate2); err != nil { diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 9bce8ffe8bf6..8bd2cec67446 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -270,7 +270,7 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { // Call the watchAPI callback after closing the resolver, and make sure no // update is triggerred on the ClientConn. xdsR.Close() - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -317,7 +317,7 @@ func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) @@ -406,7 +406,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { } { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -480,7 +480,7 @@ func (s) TestXDSResolverRequestHash(t *testing.T) { waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke watchAPI callback with a good service update (with hash policies // specified) and wait for UpdateState method to be called on ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -541,7 +541,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -572,7 +572,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { // Delete the resource suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) if _, err = tcc.stateCh.Receive(ctx); err != nil { t.Fatalf("Error waiting for UpdateState to be called: %v", err) @@ -601,7 +601,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -651,7 +651,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { // Delete the resource. The channel should receive a service config with the // original cluster but with an erroring config selector. suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) if gotState, err = tcc.stateCh.Receive(ctx); err != nil { t.Fatalf("Error waiting for UpdateState to be called: %v", err) @@ -712,7 +712,7 @@ func (s) TestXDSResolverWRR(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -772,7 +772,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -862,7 +862,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -912,7 +912,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { // Perform TWO updates to ensure the old config selector does not hold a // reference to test-cluster-1. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -922,7 +922,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { }, nil) tcc.stateCh.Receive(ctx) // Ignore the first update. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -961,7 +961,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { // test-cluster-1. res.OnCommitted() - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -1012,7 +1012,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) @@ -1020,7 +1020,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, @@ -1040,7 +1040,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr2 := errors.New("bad serviceupdate 2") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr2) + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr2) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr2 { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr2) } @@ -1066,7 +1066,7 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != context.DeadlineExceeded { t.Fatalf("ClientConn.ReportError() received %v, %v, want channel recv timeout", gotErrVal, gotErr) @@ -1295,7 +1295,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ + xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{targetStr}, diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 727e95b94f13..821424a11a36 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -84,6 +84,13 @@ func (s ServingMode) String() string { // non-nil error if the server has transitioned into not-serving mode. type ServingModeCallback func(addr net.Addr, mode ServingMode, err error) +// DrainCallback is the callback that an xDS-enabled server registers to get +// notified about updates to the Listener configuration. The server is expected +// to gracefully shutdown existing connections, thereby forcing clients to +// reconnect and have the new configuration applied to the newly created +// connections. +type DrainCallback func(addr net.Addr) + func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", p)) } @@ -92,6 +99,7 @@ func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { // the listenerWrapper. type XDSClient interface { WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsclient.RouteConfigUpdate, error)) func() BootstrapConfig() *bootstrap.Config } @@ -108,6 +116,9 @@ type ListenerWrapperParams struct { XDSClient XDSClient // ModeCallback is the callback to invoke when the serving mode changes. ModeCallback ServingModeCallback + // DrainCallback is the callback to invoke when the Listener gets a LDS + // update. + DrainCallback DrainCallback } // NewListenerWrapper creates a new listenerWrapper with params. It returns a @@ -122,10 +133,13 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru xdsCredsInUse: params.XDSCredsInUse, xdsC: params.XDSClient, modeCallback: params.ModeCallback, + drainCallback: params.DrainCallback, isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), - closed: grpcsync.NewEvent(), - goodUpdate: grpcsync.NewEvent(), + closed: grpcsync.NewEvent(), + goodUpdate: grpcsync.NewEvent(), + ldsUpdateCh: make(chan ldsUpdateWithError, 1), + rdsUpdateCh: make(chan rdsHandlerUpdate, 1), } lw.logger = prefixLogger(lw) @@ -134,15 +148,23 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru lisAddr := lw.Listener.Addr().String() lw.addr, lw.port, _ = net.SplitHostPort(lisAddr) + lw.rdsHandler = newRDSHandler(lw.xdsC, lw.rdsUpdateCh) + cancelWatch := lw.xdsC.WatchListener(lw.name, lw.handleListenerUpdate) lw.logger.Infof("Watch started on resource name %v", lw.name) lw.cancelWatch = func() { cancelWatch() lw.logger.Infof("Watch cancelled on resource name %v", lw.name) } + go lw.run() return lw, lw.goodUpdate.Done() } +type ldsUpdateWithError struct { + update xdsclient.ListenerUpdate + err error +} + // listenerWrapper wraps the net.Listener associated with the listening address // passed to Serve(). It also contains all other state associated with this // particular invocation of Serve(). @@ -155,6 +177,7 @@ type listenerWrapper struct { xdsC XDSClient cancelWatch func() modeCallback ServingModeCallback + drainCallback DrainCallback // Set to true if the listener is bound to the IP_ANY address (which is // "0.0.0.0" for IPv4 and "::" for IPv6). @@ -185,6 +208,16 @@ type listenerWrapper struct { mode ServingMode // Filter chains received as part of the last good update. filterChains *xdsclient.FilterChainManager + // rdsHandler is used for any dynamic RDS resources specified in a LDS + // update. + rdsHandler *rdsHandler + // rdsUpdates are the RDS resources received from the management + // server, keyed on the RouteName of the RDS resource. + rdsUpdates map[string]xdsclient.RouteConfigUpdate // TODO: if this will be read in accept, this will need a read lock as well. + // ldsUpdateCh is a channel for XDSClient LDS updates. + ldsUpdateCh chan ldsUpdateWithError + // rdsUpdateCh is a channel for XDSClient RDS updates. + rdsUpdateCh chan rdsHandlerUpdate } // Accept blocks on an Accept() on the underlying listener, and wraps the @@ -264,6 +297,10 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { conn.Close() continue } + // TODO: once matched an accepted connection to a filter chain, + // instantiate the HTTP filters in the filter chain + the filter + // overrides, pipe filters and route into connection, which will + // eventually be passed to xdsUnary/Stream interceptors. return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil } } @@ -277,25 +314,76 @@ func (l *listenerWrapper) Close() error { if l.cancelWatch != nil { l.cancelWatch() } + l.rdsHandler.close() return nil } +// run is a long running goroutine which handles all xds updates. LDS and RDS +// push updates onto a channel which is read and acted upon from this goroutine. +func (l *listenerWrapper) run() { + for { + select { + case <-l.closed.Done(): + return + case u := <-l.ldsUpdateCh: + l.handleLDSUpdate(u) + case u := <-l.rdsUpdateCh: + l.handleRDSUpdate(u) + } + } +} + +// handleLDSUpdate is the callback which handles LDS Updates. It writes the +// received update to the update channel, which is picked up by the run +// goroutine. func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, err error) { if l.closed.HasFired() { l.logger.Warningf("Resource %q received update: %v with error: %v, after listener was closed", l.name, update, err) return } + // Remove any existing entry in ldsUpdateCh and replace with the new one, as the only update + // listener cares about is most recent update. + select { + case <-l.ldsUpdateCh: + default: + } + l.ldsUpdateCh <- ldsUpdateWithError{update: update, err: err} +} - if err != nil { - l.logger.Warningf("Received error for resource %q: %+v", l.name, err) - if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { - l.switchMode(nil, ServingModeNotServing, err) +// handleRDSUpdate handles a full rds update from rds handler. On a successful +// update, the server will switch to ServingModeServing as the full +// configuration (both LDS and RDS) has been received. +func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) { + if l.closed.HasFired() { + l.logger.Warningf("RDS received update: %v with error: %v, after listener was closed", update.updates, update.err) + return + } + if update.err != nil { + l.logger.Warningf("Received error for rds names specified in resource %q: %+v", l.name, update.err) + if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { + l.switchMode(nil, ServingModeNotServing, update.err) } // For errors which are anything other than "resource-not-found", we // continue to use the old configuration. return } - l.logger.Infof("Received update for resource %q: %+v", l.name, update) + l.rdsUpdates = update.updates + + l.switchMode(l.filterChains, ServingModeServing, nil) + l.goodUpdate.Fire() +} + +func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { + if update.err != nil { + l.logger.Warningf("Received error for resource %q: %+v", l.name, update.err) + if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { + l.switchMode(nil, ServingModeNotServing, update.err) + } + // For errors which are anything other than "resource-not-found", we + // continue to use the old configuration. + return + } + l.logger.Infof("Received update for resource %q: %+v", l.name, update.update) // Make sure that the socket address on the received Listener resource // matches the address of the net.Listener passed to us by the user. This @@ -309,14 +397,30 @@ func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, // What this means is that the XDSClient has ACKed a resource which can push // the server into a "not serving" mode. This is not ideal, but this is // what we have decided to do. See gRPC A36 for more details. - ilc := update.InboundListenerCfg + ilc := update.update.InboundListenerCfg if ilc.Address != l.addr || ilc.Port != l.port { l.switchMode(nil, ServingModeNotServing, fmt.Errorf("address (%s:%s) in Listener update does not match listening address: (%s:%s)", ilc.Address, ilc.Port, l.addr, l.port)) return } - l.switchMode(ilc.FilterChains, ServingModeServing, nil) - l.goodUpdate.Fire() + // "Updates to a Listener cause all older connections on that Listener to be + // gracefully shut down with a grace period of 10 minutes for long-lived + // RPC's, such that clients will reconnect and have the updated + // configuration apply." - A36 Note that this is not the same as moving the + // Server's state to ServingModeNotServing. That prevents new connections + // from being accepted, whereas here we simply want the clients to reconnect + // to get the updated configuration. + if l.drainCallback != nil { + l.drainCallback(l.Listener.Addr()) + } + l.rdsHandler.updateRouteNamesToWatch(ilc.FilterChains.RouteConfigNames) + // If there are no dynamic RDS Configurations still needed to be received + // from the management server, this listener has all the configuration + // needed, and is ready to serve. + if len(ilc.FilterChains.RouteConfigNames) == 0 { + l.switchMode(ilc.FilterChains, ServingModeServing, nil) + l.goodUpdate.Fire() + } } func (l *listenerWrapper) switchMode(fcs *xdsclient.FilterChainManager, newMode ServingMode, err error) { diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 088fb01d78a1..41cba859223b 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -48,6 +48,50 @@ const ( defaultTestShortTimeout = 10 * time.Millisecond ) +var listenerWithRouteConfiguration = &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourcePorts: []uint32{80}, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-1", + }, + }, + }), + }, + }, + }, + }, + }, +} + var listenerWithFilterChains = &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { @@ -221,7 +265,7 @@ func (s) TestNewListenerWrapper(t *testing.T) { t.Fatalf("error when waiting for a watch on a Listener resource: %v", err) } if name != testListenerResourceName { - t.Fatalf("listenerWrapper registered a watch on %s, want %s", name, testListenerResourceName) + t.Fatalf("listenerWrapper registered a lds watch on %s, want %s", name, testListenerResourceName) } // Push an error to the listener update handler. @@ -234,12 +278,18 @@ func (s) TestNewListenerWrapper(t *testing.T) { t.Fatalf("ready channel written to after receipt of a bad Listener update") } + fcm, err := xdsclient.NewFilterChainManager(listenerWithFilterChains) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } + // Push an update whose address does not match the address to which our // listener is bound, and verify that the ready channel is not written to. xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ InboundListenerCfg: &xdsclient.InboundListenerConfig{ - Address: "10.0.0.1", - Port: "50051", + Address: "10.0.0.1", + Port: "50051", + FilterChains: fcm, }}, nil) timer = time.NewTimer(defaultTestShortTimeout) select { @@ -250,11 +300,16 @@ func (s) TestNewListenerWrapper(t *testing.T) { } // Push a good update, and verify that the ready channel is written to. + // Since there are no dynamic RDS updates needed to be received, the + // ListenerWrapper does not have to wait for anything else before telling + // that it is ready. xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ InboundListenerCfg: &xdsclient.InboundListenerConfig{ - Address: fakeListenerHost, - Port: strconv.Itoa(fakeListenerPort), + Address: fakeListenerHost, + Port: strconv.Itoa(fakeListenerPort), + FilterChains: fcm, }}, nil) + select { case <-ctx.Done(): t.Fatalf("timeout waiting for the ready channel to be written to after receipt of a good Listener update") @@ -262,6 +317,75 @@ func (s) TestNewListenerWrapper(t *testing.T) { } } +// TestNewListenerWrapperWithRouteUpdate tests the scenario where the listener +// gets built, starts a watch, that watch returns a list of Route Names to +// return, than receives an update from the rds handler. Only after receiving +// the update from the rds handler should it move the server to +// ServingModeServing. +func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { + _, readyCh, xdsC, _, cleanup := newListenerWrapper(t) + defer cleanup() + + // Verify that the listener wrapper registers a listener watch for the + // expected Listener resource name. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + name, err := xdsC.WaitForWatchListener(ctx) + if err != nil { + t.Fatalf("error when waiting for a watch on a Listener resource: %v", err) + } + if name != testListenerResourceName { + t.Fatalf("listenerWrapper registered a lds watch on %s, want %s", name, testListenerResourceName) + } + fcm, err := xdsclient.NewFilterChainManager(listenerWithRouteConfiguration) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } + + // Push a good update which contains a Filter Chain that specifies dynamic + // RDS Resources that need to be received. This should ping rds handler + // about which rds names to start, which will eventually start a watch on + // xds client for rds name "route-1". + xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + InboundListenerCfg: &xdsclient.InboundListenerConfig{ + Address: fakeListenerHost, + Port: strconv.Itoa(fakeListenerPort), + FilterChains: fcm, + }}, nil) + + // This should start a watch on xds client for rds name "route-1". + routeName, err := xdsC.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("error when waiting for a watch on a Route resource: %v", err) + } + if routeName != "route-1" { + t.Fatalf("listenerWrapper registered a lds watch on %s, want %s", routeName, "route-1") + } + + // This shouldn't invoke good update channel, as has not received rds updates yet. + timer := time.NewTimer(defaultTestShortTimeout) + select { + case <-timer.C: + timer.Stop() + case <-readyCh: + t.Fatalf("ready channel written to without rds configuration specified") + } + + // Invoke rds callback for the started rds watch. This valid rds callback + // should trigger the listener wrapper to fire GoodUpdate, as it has + // received both it's LDS Configuration and also RDS Configuration, + // specified in LDS Configuration. + xdsC.InvokeWatchRouteConfigCallback("route-1", xdsclient.RouteConfigUpdate{}, nil) + + // All of the xDS updates have completed, so can expect to send a ping on + // good update channel. + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for the ready channel to be written to after receipt of a good rds update") + case <-readyCh: + } +} + func (s) TestListenerWrapper_Accept(t *testing.T) { boCh := testutils.NewChannel() origBackoffFunc := backoffFunc diff --git a/xds/internal/server/rds_handler.go b/xds/internal/server/rds_handler.go new file mode 100644 index 000000000000..cc676c4ca05f --- /dev/null +++ b/xds/internal/server/rds_handler.go @@ -0,0 +1,133 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient" +) + +// rdsHandlerUpdate wraps the full RouteConfigUpdate that are dynamically +// queried for a given server side listener. +type rdsHandlerUpdate struct { + updates map[string]xdsclient.RouteConfigUpdate + err error +} + +// rdsHandler handles any RDS queries that need to be started for a given server +// side listeners Filter Chains (i.e. not inline). +type rdsHandler struct { + xdsC XDSClient + + mu sync.Mutex + updates map[string]xdsclient.RouteConfigUpdate + cancels map[string]func() + + // For a rdsHandler update, the only update wrapped listener cares about is + // most recent one, so this channel will be opportunistically drained before + // sending any new updates. + updateChannel chan rdsHandlerUpdate +} + +// newRDSHandler creates a new rdsHandler to watch for RDS resources. +// listenerWrapper updates the list of route names to watch by calling +// updateRouteNamesToWatch() upon receipt of new Listener configuration. +func newRDSHandler(xdsC XDSClient, ch chan rdsHandlerUpdate) *rdsHandler { + return &rdsHandler{ + xdsC: xdsC, + updateChannel: ch, + updates: make(map[string]xdsclient.RouteConfigUpdate), + cancels: make(map[string]func()), + } +} + +// updateRouteNamesToWatch handles a list of route names to watch for a given +// server side listener (if a filter chain specifies dynamic RDS configuration). +// This function handles all the logic with respect to any routes that may have +// been added or deleted as compared to what was previously present. +func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool) { + rh.mu.Lock() + defer rh.mu.Unlock() + // Add and start watches for any routes for any new routes in + // routeNamesToWatch. + for routeName := range routeNamesToWatch { + if _, ok := rh.cancels[routeName]; !ok { + func(routeName string) { + rh.cancels[routeName] = rh.xdsC.WatchRouteConfig(routeName, func(update xdsclient.RouteConfigUpdate, err error) { + rh.handleRouteUpdate(routeName, update, err) + }) + }(routeName) + } + } + + // Delete and cancel watches for any routes from persisted routeNamesToWatch + // that are no longer present. + for routeName := range rh.cancels { + if _, ok := routeNamesToWatch[routeName]; !ok { + rh.cancels[routeName]() + delete(rh.cancels, routeName) + delete(rh.updates, routeName) + } + } + + // If the full list (determined by length) of updates are now successfully + // updated, the listener is ready to be updated. + if len(rh.updates) == len(rh.cancels) && len(routeNamesToWatch) != 0 { + drainAndPush(rh.updateChannel, rdsHandlerUpdate{updates: rh.updates}) + } +} + +// handleRouteUpdate persists the route config for a given route name, and also +// sends an update to the Listener Wrapper on an error received or if the rds +// handler has a full collection of updates. +func (rh *rdsHandler) handleRouteUpdate(routeName string, update xdsclient.RouteConfigUpdate, err error) { + if err != nil { + drainAndPush(rh.updateChannel, rdsHandlerUpdate{err: err}) + return + } + rh.mu.Lock() + defer rh.mu.Unlock() + rh.updates[routeName] = update + + // If the full list (determined by length) of updates have successfully + // updated, the listener is ready to be updated. + if len(rh.updates) == len(rh.cancels) { + drainAndPush(rh.updateChannel, rdsHandlerUpdate{updates: rh.updates}) + } +} + +func drainAndPush(ch chan rdsHandlerUpdate, update rdsHandlerUpdate) { + select { + case <-ch: + default: + } + ch <- update +} + +// close() is meant to be called by wrapped listener when the wrapped listener +// is closed, and it cleans up resources by canceling all the active RDS +// watches. +func (rh *rdsHandler) close() { + rh.mu.Lock() + defer rh.mu.Unlock() + for _, cancel := range rh.cancels { + cancel() + } +} diff --git a/xds/internal/server/rds_handler_test.go b/xds/internal/server/rds_handler_test.go new file mode 100644 index 000000000000..b20d47c3001d --- /dev/null +++ b/xds/internal/server/rds_handler_test.go @@ -0,0 +1,403 @@ +// +build go1.12 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" +) + +const ( + route1 = "route1" + route2 = "route2" + route3 = "route3" +) + +// setupTests creates a rds handler with a fake xds client for control over the +// xds client. +func setupTests() (*rdsHandler, *fakeclient.Client, chan rdsHandlerUpdate) { + xdsC := fakeclient.NewClient() + ch := make(chan rdsHandlerUpdate, 1) + rh := newRDSHandler(xdsC, ch) + return rh, xdsC, ch +} + +// waitForFuncWithNames makes sure that a blocking function returns the correct +// set of names, where order doesn't matter. This takes away nondeterminism from +// ranging through a map. +func waitForFuncWithNames(ctx context.Context, f func(context.Context) (string, error), names ...string) error { + wantNames := make(map[string]bool, len(names)) + for _, name := range names { + wantNames[name] = true + } + gotNames := make(map[string]bool, len(names)) + for range wantNames { + name, err := f(ctx) + if err != nil { + return err + } + gotNames[name] = true + } + if !cmp.Equal(gotNames, wantNames) { + return fmt.Errorf("got routeNames %v, want %v", gotNames, wantNames) + } + return nil +} + +// TestSuccessCaseOneRDSWatch tests the simplest scenario: the rds handler +// receives a single route name, starts a watch for that route name, gets a +// successful update, and then writes an update to the update channel for +// listener to pick up. +func (s) TestSuccessCaseOneRDSWatch(t *testing.T) { + rh, fakeClient, ch := setupTests() + // When you first update the rds handler with a list of a single Route names + // that needs dynamic RDS Configuration, this Route name has not been seen + // before, so the RDS Handler should start a watch on that RouteName. + rh.updateRouteNamesToWatch(map[string]bool{route1: true}) + // The RDS Handler should start a watch for that routeName. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotRoute, err := fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error: %v", err) + } + if gotRoute != route1 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route1) + } + rdsUpdate := xdsclient.RouteConfigUpdate{} + // Invoke callback with the xds client with a certain route update. Due to + // this route update updating every route name that rds handler handles, + // this should write to the update channel to send to the listener. + fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil) + rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: rdsUpdate} + select { + case rhu := <-ch: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + // Close the rds handler. This is meant to be called when the lis wrapper is + // closed, and the call should cancel all the watches present (for this + // test, a single watch). + rh.close() + routeNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error: %v", err) + } + if routeNameDeleted != route1 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) + } +} + +// TestSuccessCaseTwoUpdates tests the case where the rds handler receives an +// update with a single Route, then receives a second update with two routes. +// The handler should start a watch for the added route, and if received a RDS +// update for that route it should send an update with both RDS updates present. +func (s) TestSuccessCaseTwoUpdates(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotRoute, err := fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error: %v", err) + } + if gotRoute != route1 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route1) + } + + // Update the RDSHandler with route names which adds a route name to watch. + // This should trigger the RDSHandler to start a watch for the added route + // name to watch. + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true}) + gotRoute, err = fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error: %v", err) + } + if gotRoute != route2 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route2) + } + + // Invoke the callback with an update for route 1. This shouldn't cause the + // handler to write an update, as it has not received RouteConfigurations + // for every RouteName. + rdsUpdate1 := xdsclient.RouteConfigUpdate{} + fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate1, nil) + + // The RDS Handler should not send an update. + sCtx, sCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCtxCancel() + select { + case <-ch: + t.Fatal("RDS Handler wrote an update to updateChannel when it shouldn't have, as each route name has not received an update yet") + case <-sCtx.Done(): + } + + // Invoke the callback with an update for route 2. This should cause the + // handler to write an update, as it has received RouteConfigurations for + // every RouteName. + rdsUpdate2 := xdsclient.RouteConfigUpdate{} + fakeClient.InvokeWatchRouteConfigCallback(route2, rdsUpdate2, nil) + // The RDS Handler should then update the listener wrapper with an update + // with two route configurations, as both route names the RDS Handler handles + // have received an update. + rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: rdsUpdate1, route2: rdsUpdate2} + select { + case rhu := <-ch: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the rds handler update to be written to the update buffer.") + } + + // Close the rds handler. This is meant to be called when the lis wrapper is + // closed, and the call should cancel all the watches present (for this + // test, two watches on route1 and route2). + rh.close() + if err = waitForFuncWithNames(ctx, fakeClient.WaitForCancelRouteConfigWatch, route1, route2); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } +} + +// TestSuccessCaseDeletedRoute tests the case where the rds handler receives an +// update with two routes, then receives an update with only one route. The RDS +// Handler is expected to cancel the watch for the route no longer present. +func (s) TestSuccessCaseDeletedRoute(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Will start two watches. + if err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } + + // Update the RDSHandler with route names which deletes a route name to + // watch. This should trigger the RDSHandler to cancel the watch for the + // deleted route name to watch. + rh.updateRouteNamesToWatch(map[string]bool{route1: true}) + // This should delete the watch for route2. + routeNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error %v", err) + } + if routeNameDeleted != route2 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route2) + } + + rdsUpdate := xdsclient.RouteConfigUpdate{} + // Invoke callback with the xds client with a certain route update. Due to + // this route update updating every route name that rds handler handles, + // this should write to the update channel to send to the listener. + fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil) + rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: rdsUpdate} + select { + case rhu := <-ch: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + rh.close() + routeNameDeleted, err = fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error: %v", err) + } + if routeNameDeleted != route1 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) + } +} + +// TestSuccessCaseTwoUpdatesAddAndDeleteRoute tests the case where the rds +// handler receives an update with two routes, and then receives an update with +// two routes, one previously there and one added (i.e. 12 -> 23). This should +// cause the route that is no longer there to be deleted and cancelled, and the +// route that was added should have a watch started for it. +func (s) TestSuccessCaseTwoUpdatesAddAndDeleteRoute(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } + + // Update the rds handler with two routes, one which was already there and a new route. + // This should cause the rds handler to delete/cancel watch for route 1 and start a watch + // for route 3. + rh.updateRouteNamesToWatch(map[string]bool{route2: true, route3: true}) + + // Start watch comes first, which should be for route3 as was just added. + gotRoute, err := fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error: %v", err) + } + if gotRoute != route3 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route3) + } + + // Then route 1 should be deleted/cancelled watch for, as it is no longer present + // in the new RouteName to watch map. + routeNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error: %v", err) + } + if routeNameDeleted != route1 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) + } + + // Invoke the callback with an update for route 2. This shouldn't cause the + // handler to write an update, as it has not received RouteConfigurations + // for every RouteName. + rdsUpdate2 := xdsclient.RouteConfigUpdate{} + fakeClient.InvokeWatchRouteConfigCallback(route2, rdsUpdate2, nil) + + // The RDS Handler should not send an update. + sCtx, sCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCtxCancel() + select { + case <-ch: + t.Fatalf("RDS Handler wrote an update to updateChannel when it shouldn't have, as each route name has not received an update yet") + case <-sCtx.Done(): + } + + // Invoke the callback with an update for route 3. This should cause the + // handler to write an update, as it has received RouteConfigurations for + // every RouteName. + rdsUpdate3 := xdsclient.RouteConfigUpdate{} + fakeClient.InvokeWatchRouteConfigCallback(route3, rdsUpdate3, nil) + // The RDS Handler should then update the listener wrapper with an update + // with two route configurations, as both route names the RDS Handler handles + // have received an update. + rhuWant := map[string]xdsclient.RouteConfigUpdate{route2: rdsUpdate2, route3: rdsUpdate3} + select { + case rhu := <-rh.updateChannel: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the rds handler update to be written to the update buffer.") + } + // Close the rds handler. This is meant to be called when the lis wrapper is + // closed, and the call should cancel all the watches present (for this + // test, two watches on route2 and route3). + rh.close() + if err = waitForFuncWithNames(ctx, fakeClient.WaitForCancelRouteConfigWatch, route2, route3); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } +} + +// TestSuccessCaseSecondUpdateMakesRouteFull tests the scenario where the rds handler gets +// told to watch three rds configurations, gets two successful updates, then gets told to watch +// only those two. The rds handler should then write an update to update buffer. +func (s) TestSuccessCaseSecondUpdateMakesRouteFull(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true, route3: true}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2, route3); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } + + // Invoke the callbacks for two of the three watches. Since RDS is not full, + // this shouldn't trigger rds handler to write an update to update buffer. + fakeClient.InvokeWatchRouteConfigCallback(route1, xdsclient.RouteConfigUpdate{}, nil) + fakeClient.InvokeWatchRouteConfigCallback(route2, xdsclient.RouteConfigUpdate{}, nil) + + // The RDS Handler should not send an update. + sCtx, sCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCtxCancel() + select { + case <-rh.updateChannel: + t.Fatalf("RDS Handler wrote an update to updateChannel when it shouldn't have, as each route name has not received an update yet") + case <-sCtx.Done(): + } + + // Tell the rds handler to now only watch Route 1 and Route 2. This should + // trigger the rds handler to write an update to the update buffer as it now + // has full rds configuration. + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true}) + // Route 3 should be deleted/cancelled watch for, as it is no longer present + // in the new RouteName to watch map. + routeNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error: %v", err) + } + if routeNameDeleted != route3 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) + } + rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: {}, route2: {}} + select { + case rhu := <-ch: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the rds handler update to be written to the update buffer.") + } +} + +// TestErrorReceived tests the case where the rds handler receives a route name +// to watch, then receives an update with an error. This error should be then +// written to the update channel. +func (s) TestErrorReceived(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotRoute, err := fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error %v", err) + } + if gotRoute != route1 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route1) + } + + rdsErr := errors.New("some error") + fakeClient.InvokeWatchRouteConfigCallback(route1, xdsclient.RouteConfigUpdate{}, rdsErr) + select { + case rhu := <-ch: + if rhu.err.Error() != "some error" { + t.Fatalf("Did not receive the expected error, instead received: %v", rhu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel") + } +} diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index 255454080360..b582fd9bee91 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -52,7 +52,7 @@ type Client struct { bootstrapCfg *bootstrap.Config ldsCb func(xdsclient.ListenerUpdate, error) - rdsCb func(xdsclient.RouteConfigUpdate, error) + rdsCbs map[string]func(xdsclient.RouteConfigUpdate, error) cdsCbs map[string]func(xdsclient.ClusterUpdate, error) edsCbs map[string]func(xdsclient.EndpointsUpdate, error) @@ -95,10 +95,10 @@ func (xdsC *Client) WaitForCancelListenerWatch(ctx context.Context) error { // WatchRouteConfig registers a RDS watch. func (xdsC *Client) WatchRouteConfig(routeName string, callback func(xdsclient.RouteConfigUpdate, error)) func() { - xdsC.rdsCb = callback + xdsC.rdsCbs[routeName] = callback xdsC.rdsWatchCh.Send(routeName) return func() { - xdsC.rdsCancelCh.Send(nil) + xdsC.rdsCancelCh.Send(routeName) } } @@ -116,15 +116,28 @@ func (xdsC *Client) WaitForWatchRouteConfig(ctx context.Context) (string, error) // // Not thread safe with WatchRouteConfig. Only call this after // WaitForWatchRouteConfig. -func (xdsC *Client) InvokeWatchRouteConfigCallback(update xdsclient.RouteConfigUpdate, err error) { - xdsC.rdsCb(update, err) +func (xdsC *Client) InvokeWatchRouteConfigCallback(name string, update xdsclient.RouteConfigUpdate, err error) { + if len(xdsC.rdsCbs) != 1 { + xdsC.rdsCbs[name](update, err) + return + } + // Keeps functionality with previous usage of this on client side, if single + // callback call that callback. + var routeName string + for route := range xdsC.rdsCbs { + routeName = route + } + xdsC.rdsCbs[routeName](update, err) } // WaitForCancelRouteConfigWatch waits for a RDS watch to be cancelled and returns // context.DeadlineExceeded otherwise. -func (xdsC *Client) WaitForCancelRouteConfigWatch(ctx context.Context) error { - _, err := xdsC.rdsCancelCh.Receive(ctx) - return err +func (xdsC *Client) WaitForCancelRouteConfigWatch(ctx context.Context) (string, error) { + val, err := xdsC.rdsCancelCh.Receive(ctx) + if err != nil { + return "", err + } + return val.(string), err } // WatchCluster registers a CDS watch. @@ -293,16 +306,17 @@ func NewClientWithName(name string) *Client { return &Client{ name: name, ldsWatchCh: testutils.NewChannel(), - rdsWatchCh: testutils.NewChannel(), + rdsWatchCh: testutils.NewChannelWithSize(10), cdsWatchCh: testutils.NewChannelWithSize(10), edsWatchCh: testutils.NewChannelWithSize(10), ldsCancelCh: testutils.NewChannel(), - rdsCancelCh: testutils.NewChannel(), + rdsCancelCh: testutils.NewChannelWithSize(10), cdsCancelCh: testutils.NewChannelWithSize(10), edsCancelCh: testutils.NewChannelWithSize(10), loadReportCh: testutils.NewChannel(), lrsCancelCh: testutils.NewChannel(), loadStore: load.NewStore(), + rdsCbs: make(map[string]func(xdsclient.RouteConfigUpdate, error)), cdsCbs: make(map[string]func(xdsclient.ClusterUpdate, error)), edsCbs: make(map[string]func(xdsclient.EndpointsUpdate, error)), Closed: grpcsync.NewEvent(), diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 85dbf0ee8619..53c450b3b60c 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -251,7 +251,6 @@ type InboundListenerConfig struct { // of interest to the registered RDS watcher. type RouteConfigUpdate struct { VirtualHosts []*VirtualHost - // Raw is the resource from the xds response. Raw *anypb.Any } diff --git a/xds/server.go b/xds/server.go index aad05d81d116..c096b1662b28 100644 --- a/xds/server.go +++ b/xds/server.go @@ -233,6 +233,11 @@ func (s *GRPCServer) Serve(lis net.Listener) error { err: err, }) }, + DrainCallback: func(addr net.Addr) { + if gs, ok := s.gs.(*grpc.Server); ok { + drainServerTransports(gs, addr.String()) + } + }, }) // Block until a good LDS response is received or the server is stopped. diff --git a/xds/server_test.go b/xds/server_test.go index df002dbabdcf..143e19d08c7e 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -35,6 +35,7 @@ import ( v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" @@ -53,6 +54,67 @@ const ( testServerListenerResourceNameTemplate = "/path/to/resource/%s/%s" ) +var listenerWithFilterChains = &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourcePorts: []uint32{80}, + }, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + }), + }, + }, + }, + }, + }, +} + type s struct { grpctest.Tester } @@ -73,9 +135,10 @@ func (f *fakeGRPCServer) RegisterService(*grpc.ServiceDesc, interface{}) { f.registerServiceCh.Send(nil) } -func (f *fakeGRPCServer) Serve(net.Listener) error { +func (f *fakeGRPCServer) Serve(lis net.Listener) error { f.serveCh.Send(nil) <-f.done + lis.Close() return nil } @@ -377,12 +440,17 @@ func (s) TestServeSuccess(t *testing.T) { // Push a good LDS response, and wait for Serve() to be invoked on the // underlying grpc.Server. + fcm, err := xdsclient.NewFilterChainManager(listenerWithFilterChains) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } addr, port := splitHostPort(lis.Addr().String()) client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ RouteConfigName: "routeconfig", InboundListenerCfg: &xdsclient.InboundListenerConfig{ - Address: addr, - Port: port, + Address: addr, + Port: port, + FilterChains: fcm, }, }, nil) if _, err := fs.serveCh.Receive(ctx); err != nil { @@ -404,8 +472,9 @@ func (s) TestServeSuccess(t *testing.T) { client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ RouteConfigName: "routeconfig", InboundListenerCfg: &xdsclient.InboundListenerConfig{ - Address: "10.20.30.40", - Port: "666", + Address: "10.20.30.40", + Port: "666", + FilterChains: fcm, }, }, nil) sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) From a42567fe92f005c47e60146bdbb0d5f7fc232219 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 12 Aug 2021 11:12:02 -0700 Subject: [PATCH 190/998] xds: support picking ringhash in xds client and cds policy (#4657) --- .../balancer/cdsbalancer/cdsbalancer.go | 15 +++ .../cdsbalancer/cdsbalancer_security_test.go | 14 +-- .../balancer/cdsbalancer/cdsbalancer_test.go | 34 ++++-- .../balancer/cdsbalancer/cluster_handler.go | 9 ++ .../cdsbalancer/cluster_handler_test.go | 23 +++- xds/internal/xdsclient/cds_test.go | 111 +++++++++++++++++- xds/internal/xdsclient/client.go | 17 +++ xds/internal/xdsclient/xds.go | 37 +++++- 8 files changed, 238 insertions(+), 22 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 3ea14add9ebf..9fb3e9bd48ae 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -32,9 +32,11 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -333,6 +335,19 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { DiscoveryMechanisms: dms, } + // lbPolicy is set only when the policy is ringhash. The default (when it's + // not set) is roundrobin. And similarly, we only need to set XDSLBPolicy + // for ringhash (it also defaults to roundrobin). + if lbp := update.lbPolicy; lbp != nil { + lbCfg.XDSLBPolicy = &internalserviceconfig.BalancerConfig{ + Name: ringhash.Name, + Config: &ringhash.LBConfig{ + MinRingSize: lbp.MinimumRingSize, + MaxRingSize: lbp.MaximumRingSize, + }, + } + } + ccState := balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), BalancerConfig: lbCfg, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 7eb1d0889395..ddab01d8a51b 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -253,7 +253,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -309,7 +309,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // newChildBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -465,7 +465,7 @@ func (s) TestSecurityConfigUpdate_BadToGood(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -499,7 +499,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -552,7 +552,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -602,7 +602,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -680,7 +680,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { SubjectAltNameMatchers: testSANMatchers, }, } - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 864af36857bc..fe754a529fa7 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -32,10 +32,12 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpctest" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/ringhash" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" @@ -131,8 +133,8 @@ func (tb *testEDSBalancer) waitForClientConnUpdate(ctx context.Context, wantCCS if xdsclient.FromResolverState(gotCCS.ResolverState) == nil { return fmt.Errorf("want resolver state with XDSClient attached, got one without") } - if !cmp.Equal(gotCCS, wantCCS, cmpopts.IgnoreFields(resolver.State{}, "Attributes")) { - return fmt.Errorf("received ClientConnState: %+v, want %+v", gotCCS, wantCCS) + if diff := cmp.Diff(gotCCS, wantCCS, cmpopts.IgnoreFields(resolver.State{}, "Attributes")); diff != "" { + return fmt.Errorf("received unexpected ClientConnState, diff (-got +want): %v", diff) } return nil } @@ -196,7 +198,7 @@ func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { // edsCCS is a helper function to construct a good update passed from the // cdsBalancer to the edsBalancer. -func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientConnState { +func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy *internalserviceconfig.BalancerConfig) balancer.ClientConnState { discoveryMechanism := clusterresolver.DiscoveryMechanism{ Type: clusterresolver.DiscoveryMechanismTypeEDS, Cluster: service, @@ -208,6 +210,7 @@ func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientCon } lbCfg := &clusterresolver.LBConfig{ DiscoveryMechanisms: []clusterresolver.DiscoveryMechanism{discoveryMechanism}, + XDSLBPolicy: xdslbpolicy, } return balancer.ClientConnState{ @@ -361,12 +364,23 @@ func (s) TestHandleClusterUpdate(t *testing.T) { { name: "happy-case-with-lrs", cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName, EnableLRS: true}, - wantCCS: edsCCS(serviceName, nil, true), + wantCCS: edsCCS(serviceName, nil, true, nil), }, { name: "happy-case-without-lrs", cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName}, - wantCCS: edsCCS(serviceName, nil, false), + wantCCS: edsCCS(serviceName, nil, false, nil), + }, + { + name: "happy-case-with-ring-hash-lb-policy", + cdsUpdate: xdsclient.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: &xdsclient.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + }, + wantCCS: edsCCS(serviceName, nil, false, &internalserviceconfig.BalancerConfig{ + Name: ringhash.Name, + Config: &ringhash.LBConfig{MinRingSize: 10, MaxRingSize: 100}, + }), }, } @@ -434,7 +448,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -519,7 +533,7 @@ func (s) TestResolverError(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -568,7 +582,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -603,7 +617,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // the service's counter with the new max requests. var maxRequests uint32 = 1 cdsUpdate := xdsclient.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} - wantCCS := edsCCS(clusterName, &maxRequests, false) + wantCCS := edsCCS(clusterName, &maxRequests, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -636,7 +650,7 @@ func (s) TestClose(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index 1f5acafe110b..163a8c0a2e18 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -32,6 +32,14 @@ var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a type clusterHandlerUpdate struct { // securityCfg is the Security Config from the top (root) cluster. securityCfg *xdsclient.SecurityConfig + // lbPolicy is the lb policy from the top (root) cluster. + // + // Currently, we only support roundrobin or ringhash, and since roundrobin + // does need configs, this is only set to the ringhash config, if the policy + // is ringhash. In the future, if we support more policies, we can make this + // an interface, and set it to config of the other policies. + lbPolicy *xdsclient.ClusterLBPolicyRingHash + // updates is a list of ClusterUpdates from all the leaf clusters. updates []xdsclient.ClusterUpdate err error @@ -101,6 +109,7 @@ func (ch *clusterHandler) constructClusterUpdate() { } ch.updateChannel <- clusterHandlerUpdate{ securityCfg: ch.root.clusterUpdate.SecurityCfg, + lbPolicy: ch.root.clusterUpdate.LBPolicy, updates: clusterUpdate, } } diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index dc69dd34e2af..cdc84f44d9a3 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -53,20 +53,34 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { name string clusterName string clusterUpdate xdsclient.ClusterUpdate + lbPolicy *xdsclient.ClusterLBPolicyRingHash }{ - {name: "test-update-root-cluster-EDS-success", + { + name: "test-update-root-cluster-EDS-success", clusterName: edsService, clusterUpdate: xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeEDS, ClusterName: edsService, - }}, + }, + }, + { + name: "test-update-root-cluster-EDS-with-ring-hash", + clusterName: logicalDNSService, + clusterUpdate: xdsclient.ClusterUpdate{ + ClusterType: xdsclient.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService, + LBPolicy: &xdsclient.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + }, + lbPolicy: &xdsclient.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + }, { name: "test-update-root-cluster-Logical-DNS-success", clusterName: logicalDNSService, clusterUpdate: xdsclient.ClusterUpdate{ ClusterType: xdsclient.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, - }}, + }, + }, } for _, test := range tests { @@ -98,6 +112,9 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{test.clusterUpdate}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } + if diff := cmp.Diff(chu.lbPolicy, test.lbPolicy); diff != "" { + t.Fatalf("got unexpected lb policy in cluster update, diff (-got, +want): %v", diff) + } case <-ctx.Done(): t.Fatal("Timed out waiting for update from update channel.") } diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index 88bfe21a7bdb..e2c7705d5a45 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -108,7 +108,7 @@ func (s) TestValidateCluster_Failure(t *testing.T) { wantErr: true, }, { - name: "non-round-robin-lb-policy", + name: "non-round-robin-or-ring-hash-lb-policy", cluster: &v3clusterpb.Cluster{ ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ @@ -140,6 +140,59 @@ func (s) TestValidateCluster_Failure(t *testing.T) { wantUpdate: emptyUpdate, wantErr: true, }, + { + name: "ring-hash-hash-function-not-xx-hash", + cluster: &v3clusterpb.Cluster{ + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + HashFunction: v3clusterpb.Cluster_RingHashLbConfig_MURMUR_HASH_2, + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "ring-hash-min-bound-greater-than-max", + cluster: &v3clusterpb.Cluster{ + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MinimumRingSize: wrapperspb.UInt64(100), + MaximumRingSize: wrapperspb.UInt64(10), + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "ring-hash-min-bound-greater-than-upper-bound", + cluster: &v3clusterpb.Cluster{ + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MinimumRingSize: wrapperspb.UInt64(ringHashSizeUpperBound + 1), + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "ring-hash-max-bound-greater-than-upper-bound", + cluster: &v3clusterpb.Cluster{ + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MaximumRingSize: wrapperspb.UInt64(ringHashSizeUpperBound + 1), + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, } oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv @@ -301,6 +354,62 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, }, + { + name: "happiest-case-with-ring-hash-lb-policy-with-default-config", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true, + LBPolicy: &ClusterLBPolicyRingHash{MinimumRingSize: defaultRingHashMinSize, MaximumRingSize: defaultRingHashMaxSize}, + }, + }, + { + name: "happiest-case-with-ring-hash-lb-policy-with-none-default-config", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }, + }, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true, + LBPolicy: &ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + }, + }, } oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 53c450b3b60c..754a025678a9 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -421,6 +421,13 @@ const ( ClusterTypeAggregate ) +// ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its +// config. +type ClusterLBPolicyRingHash struct { + MinimumRingSize uint64 + MaximumRingSize uint64 +} + // ClusterUpdate contains information from a received CDS response, which is of // interest to the registered CDS watcher. type ClusterUpdate struct { @@ -443,6 +450,16 @@ type ClusterUpdate struct { // a prioritized list of cluster names. PrioritizedClusterNames []string + // LBPolicy is the lb policy for this cluster. + // + // This only support round_robin and ring_hash. + // - if it's nil, the lb policy is round_robin + // - if it's not nil, the lb policy is ring_hash, the this field has the config. + // + // When we add more support policies, this can be made an interface, and + // will be set to different types based on the policy type. + LBPolicy *ClusterLBPolicyRingHash + // Raw is the resource from the xds response. Raw *anypb.Any } diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 656ad854e670..569913936d56 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -574,8 +574,42 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin return cluster.GetName(), cu, nil } +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M +) + func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { - if cluster.GetLbPolicy() != v3clusterpb.Cluster_ROUND_ROBIN { + var lbPolicy *ClusterLBPolicyRingHash + switch cluster.GetLbPolicy() { + case v3clusterpb.Cluster_ROUND_ROBIN: + lbPolicy = nil // The default is round_robin, and there's no config to set. + case v3clusterpb.Cluster_RING_HASH: + rhc := cluster.GetRingHashLbConfig() + if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { + return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) + } + // Minimum defaults to 1024 entries, and limited to 8M entries Maximum + // defaults to 8M entries, and limited to 8M entries + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhc.GetMinimumRingSize(); min != nil { + if min.GetValue() > ringHashSizeUpperBound { + return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash mininum ring size %v in response: %+v", min.GetValue(), cluster) + } + minSize = min.GetValue() + } + if max := rhc.GetMaximumRingSize(); max != nil { + if max.GetValue() > ringHashSizeUpperBound { + return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash maxinum ring size %v in response: %+v", max.GetValue(), cluster) + } + maxSize = max.GetValue() + } + if minSize > maxSize { + return ClusterUpdate{}, fmt.Errorf("ring_hash config min size %v is greater than max %v", minSize, maxSize) + } + lbPolicy = &ClusterLBPolicyRingHash{MinimumRingSize: minSize, MaximumRingSize: maxSize} + default: return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } @@ -594,6 +628,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu EnableLRS: cluster.GetLrsServer().GetSelf() != nil, SecurityCfg: sc, MaxRequests: circuitBreakersFromCluster(cluster), + LBPolicy: lbPolicy, } // Validate and set cluster type from the response. From 52cea2453436fbb4b962d3cb2da34da7ef6f10c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=90=B4=E4=BA=B2=E5=BA=93=E9=87=8C?= <36129334+wuqinqiang@users.noreply.github.com> Date: Thu, 19 Aug 2021 04:31:22 +0800 Subject: [PATCH 191/998] server: fix net.conn closed twice (#4663) --- server.go | 1 - 1 file changed, 1 deletion(-) diff --git a/server.go b/server.go index 0251f48daf1d..596384fdc41c 100644 --- a/server.go +++ b/server.go @@ -909,7 +909,6 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() - c.Close() channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } From 8ab16ef276a33df4cdb106446eeff40ff56a6928 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 18 Aug 2021 15:04:35 -0700 Subject: [PATCH 192/998] balancer: add ExitIdle optional interface (#4673) --- balancer/balancer.go | 46 +++++++++++++------ balancer/base/balancer.go | 5 ++ balancer/grpclb/grpclb.go | 2 + balancer/rls/internal/balancer.go | 4 ++ balancer_conn_wrappers.go | 39 +++++++++++++--- balancer_switching_test.go | 2 + clientconn.go | 14 +++--- internal/balancer/stub/stub.go | 7 +++ pickfirst.go | 6 +++ test/balancer_test.go | 7 ++- .../balancer/balancergroup/balancergroup.go | 27 +++++++++++ .../balancer/cdsbalancer/cdsbalancer.go | 18 ++++++++ .../balancer/cdsbalancer/cdsbalancer_test.go | 38 ++++++++++++++- .../balancer/clusterimpl/clusterimpl.go | 15 ++++++ .../balancer/clustermanager/clustermanager.go | 4 ++ .../clusterresolver/clusterresolver.go | 18 ++++++++ .../clusterresolver/clusterresolver_test.go | 3 ++ xds/internal/balancer/priority/balancer.go | 4 ++ .../balancer/weightedtarget/weightedtarget.go | 4 ++ 19 files changed, 231 insertions(+), 32 deletions(-) diff --git a/balancer/balancer.go b/balancer/balancer.go index 6a1b779edc26..34c435d90720 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -75,24 +75,26 @@ func Get(name string) Builder { return nil } -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. +// A SubConn represents a single connection to a gRPC backend service. // -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// Each SubConn contains a list of addresses. // -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. // -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -326,6 +328,20 @@ type Balancer interface { Close() } +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + // SubConnState describes the state of a SubConn. type SubConnState struct { // ConnectivityState is the connectivity state of the SubConn. diff --git a/balancer/base/balancer.go b/balancer/base/balancer.go index c883efa0bbf5..b1286533e73c 100644 --- a/balancer/base/balancer.go +++ b/balancer/base/balancer.go @@ -251,6 +251,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + // NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index 49d11d0d2e21..adf596111604 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -488,3 +488,5 @@ func (lb *lbBalancer) Close() { } lb.cc.close() } + +func (lb *lbBalancer) ExitIdle() {} diff --git a/balancer/rls/internal/balancer.go b/balancer/rls/internal/balancer.go index 7af97b76faf1..b23783bf9da4 100644 --- a/balancer/rls/internal/balancer.go +++ b/balancer/rls/internal/balancer.go @@ -129,6 +129,10 @@ func (lb *rlsBalancer) Close() { } } +func (lb *rlsBalancer) ExitIdle() { + // TODO: are we 100% sure this should be a nop? +} + // updateControlChannel updates the RLS client if required. // Caller must hold lb.mu. func (lb *rlsBalancer) updateControlChannel(newCfg *lbConfig) { diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 0ddb24f375f4..f4ea61746823 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -37,15 +37,20 @@ type scStateUpdate struct { err error } +// exitIdle contains no data and is just a signal sent on the updateCh in +// ccBalancerWrapper to instruct the balancer to exit idle. +type exitIdle struct{} + // ccBalancerWrapper is a wrapper on top of cc for balancers. // It implements balancer.ClientConn interface. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + hasExitIdle bool + updateCh *buffer.Unbounded + closed *grpcsync.Event + done *grpcsync.Event mu sync.Mutex subConns map[*acBalancerWrapper]struct{} @@ -61,6 +66,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui } go ccb.watcher() ccb.balancer = b.Build(ccb, bopts) + _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) return ccb } @@ -86,6 +92,17 @@ func (ccb *ccBalancerWrapper) watcher() { ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) } ccb.mu.Unlock() + case exitIdle: + if ccb.cc.GetState() == connectivity.Idle { + if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { + // We already checked that the balancer implements + // ExitIdle before pushing the event to updateCh, but + // check conditionally again as defensive programming. + ccb.balancerMu.Lock() + ei.ExitIdle() + ccb.balancerMu.Unlock() + } + } default: logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) } @@ -118,6 +135,14 @@ func (ccb *ccBalancerWrapper) close() { <-ccb.done.Done() } +func (ccb *ccBalancerWrapper) exitIdle() bool { + if !ccb.hasExitIdle { + return false + } + ccb.updateCh.Put(exitIdle{}) + return true +} + func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be @@ -144,8 +169,8 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat func (ccb *ccBalancerWrapper) resolverError(err error) { ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() ccb.balancer.ResolverError(err) - ccb.balancerMu.Unlock() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { diff --git a/balancer_switching_test.go b/balancer_switching_test.go index e9fee87d8f81..5d9a1f9fffc1 100644 --- a/balancer_switching_test.go +++ b/balancer_switching_test.go @@ -58,6 +58,8 @@ func (b *magicalLB) UpdateClientConnState(balancer.ClientConnState) error { func (b *magicalLB) Close() {} +func (b *magicalLB) ExitIdle() {} + func init() { balancer.Register(&magicalLB{}) } diff --git a/clientconn.go b/clientconn.go index b9e9eed4681b..62dc3bdaf52f 100644 --- a/clientconn.go +++ b/clientconn.go @@ -555,13 +555,13 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - if cc.GetState() == connectivity.Idle { - cc.mu.Lock() - for ac := range cc.conns { - // TODO: should this be a signal to the LB policy instead? - go ac.connect() - } - cc.mu.Unlock() + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { + return + } + for ac := range cc.conns { + go ac.connect() } } diff --git a/internal/balancer/stub/stub.go b/internal/balancer/stub/stub.go index e3757c1a50bc..950eaaa0278a 100644 --- a/internal/balancer/stub/stub.go +++ b/internal/balancer/stub/stub.go @@ -33,6 +33,7 @@ type BalancerFuncs struct { ResolverError func(*BalancerData, error) UpdateSubConnState func(*BalancerData, balancer.SubConn, balancer.SubConnState) Close func(*BalancerData) + ExitIdle func(*BalancerData) } // BalancerData contains data relevant to a stub balancer. @@ -75,6 +76,12 @@ func (b *bal) Close() { } } +func (b *bal) ExitIdle() { + if b.bf.ExitIdle != nil { + b.bf.ExitIdle(b.bd) + } +} + type bb struct { name string bf BalancerFuncs diff --git a/pickfirst.go b/pickfirst.go index d32161c748d6..f194d14a0816 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -124,6 +124,12 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S func (b *pickfirstBalancer) Close() { } +func (b *pickfirstBalancer) ExitIdle() { + if b.state == connectivity.Idle { + b.sc.Connect() + } +} + type picker struct { result balancer.PickResult err error diff --git a/test/balancer_test.go b/test/balancer_test.go index a6a8f726afa8..bb87ac834d6b 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -116,6 +116,8 @@ func (b *testBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubCon func (b *testBalancer) Close() {} +func (b *testBalancer) ExitIdle() {} + type picker struct { err error sc balancer.SubConn @@ -373,8 +375,9 @@ func (testBalancerKeepAddresses) UpdateSubConnState(sc balancer.SubConn, s balan panic("not used") } -func (testBalancerKeepAddresses) Close() { -} +func (testBalancerKeepAddresses) Close() {} + +func (testBalancerKeepAddresses) ExitIdle() {} // Make sure that non-grpclb balancers don't get grpclb addresses even if name // resolver sends them diff --git a/xds/internal/balancer/balancergroup/balancergroup.go b/xds/internal/balancer/balancergroup/balancergroup.go index 6d54728dc919..5798b03ac506 100644 --- a/xds/internal/balancer/balancergroup/balancergroup.go +++ b/xds/internal/balancer/balancergroup/balancergroup.go @@ -104,6 +104,22 @@ func (sbc *subBalancerWrapper) startBalancer() { } } +func (sbc *subBalancerWrapper) exitIdle() { + b := sbc.balancer + if b == nil { + return + } + if ei, ok := b.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + for sc, b := range sbc.group.scToSubBalancer { + if b == sbc { + sc.Connect() + } + } +} + func (sbc *subBalancerWrapper) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { b := sbc.balancer if b == nil { @@ -493,6 +509,17 @@ func (bg *BalancerGroup) Close() { bg.outgoingMu.Unlock() } +// ExitIdle should be invoked when the parent LB policy's ExitIdle is invoked. +// It will trigger this on all sub-balancers, or reconnect their subconns if +// not supported. +func (bg *BalancerGroup) ExitIdle() { + bg.outgoingMu.Lock() + for _, config := range bg.idToBalancerConfig { + config.exitIdle() + } + bg.outgoingMu.Unlock() +} + const ( serverLoadCPUName = "cpu_utilization" serverLoadMemoryName = "mem_utilization" diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 9fb3e9bd48ae..82d2a96958e2 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -141,6 +141,8 @@ type scUpdate struct { state balancer.SubConnState } +type exitIdle struct{} + // cdsBalancer implements a CDS based LB policy. It instantiates a // cluster_resolver balancer to further resolve the serviceName received from // CDS, into localities and endpoints. Implements the balancer.Balancer @@ -376,6 +378,18 @@ func (b *cdsBalancer) run() { break } b.childLB.UpdateSubConnState(update.subConn, update.state) + case exitIdle: + if b.childLB == nil { + b.logger.Errorf("xds: received ExitIdle with no child balancer") + break + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.childLB.(balancer.ExitIdler); ok { + ei.ExitIdle() + } } case u := <-b.clusterHandler.updateChannel: b.handleWatchUpdate(u) @@ -494,6 +508,10 @@ func (b *cdsBalancer) Close() { <-b.done.Done() } +func (b *cdsBalancer) ExitIdle() { + b.updateCh.Put(exitIdle{}) +} + // ccWrapper wraps the balancer.ClientConn passed to the CDS balancer at // creation and intercepts the NewSubConn() and UpdateAddresses() call from the // child policy to add security configuration required by xDS credentials. diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index fe754a529fa7..c59810958024 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -1,3 +1,4 @@ +//go:build go1.12 // +build go1.12 /* @@ -85,7 +86,8 @@ type testEDSBalancer struct { // resolverErrCh is a channel used to signal a resolver error. resolverErrCh *testutils.Channel // closeCh is a channel used to signal the closing of this balancer. - closeCh *testutils.Channel + closeCh *testutils.Channel + exitIdleCh *testutils.Channel // parentCC is the balancer.ClientConn passed to this test balancer as part // of the Build() call. parentCC balancer.ClientConn @@ -102,6 +104,7 @@ func newTestEDSBalancer() *testEDSBalancer { scStateCh: testutils.NewChannel(), resolverErrCh: testutils.NewChannel(), closeCh: testutils.NewChannel(), + exitIdleCh: testutils.NewChannel(), } } @@ -122,6 +125,10 @@ func (tb *testEDSBalancer) Close() { tb.closeCh.Send(struct{}{}) } +func (tb *testEDSBalancer) ExitIdle() { + tb.exitIdleCh.Send(struct{}{}) +} + // waitForClientConnUpdate verifies if the testEDSBalancer receives the // provided ClientConnState within a reasonable amount of time. func (tb *testEDSBalancer) waitForClientConnUpdate(ctx context.Context, wantCCS balancer.ClientConnState) error { @@ -705,6 +712,35 @@ func (s) TestClose(t *testing.T) { } } +func (s) TestExitIdle(t *testing.T) { + // This creates a CDS balancer, pushes a ClientConnState update with a fake + // xdsClient, and makes sure that the CDS balancer registers a watch on the + // provided xdsClient. + xdsC, cdsB, edsB, _, cancel := setupWithWatch(t) + defer func() { + cancel() + cdsB.Close() + }() + + // Here we invoke the watch callback registered on the fake xdsClient. This + // will trigger the watch handler on the CDS balancer, which will attempt to + // create a new EDS balancer. The fake EDS balancer created above will be + // returned to the CDS balancer, because we have overridden the + // newChildBalancer function as part of test setup. + cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + wantCCS := edsCCS(serviceName, nil, false, nil) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { + t.Fatal(err) + } + + // Call ExitIdle on the CDS balancer. + cdsB.ExitIdle() + + edsB.exitIdleCh.Receive(ctx) +} + // TestParseConfig verifies the ParseConfig() method in the CDS balancer. func (s) TestParseConfig(t *testing.T) { bb := balancer.Get(cdsName) diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 1b49dccbc633..03d357b1f4e9 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -335,6 +335,21 @@ func (b *clusterImplBalancer) Close() { b.logger.Infof("Shutdown") } +func (b *clusterImplBalancer) ExitIdle() { + if b.childLB == nil { + return + } + if ei, ok := b.childLB.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + // Fallback for children that don't support ExitIdle -- connect to all + // SubConns. + for _, sc := range b.scWrappers { + sc.Connect() + } +} + // Override methods to accept updates from the child LB. func (b *clusterImplBalancer) UpdateState(state balancer.State) { diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index 211133d384e8..318545d79b01 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -136,6 +136,10 @@ func (b *bal) Close() { b.logger.Infof("Shutdown") } +func (b *bal) ExitIdle() { + b.bg.ExitIdle() +} + const prefix = "[xds-cluster-manager-lb %p] " var logger = grpclog.Component("xds") diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index b9568173badc..66a5aab305eb 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -115,6 +115,8 @@ type scUpdate struct { state balancer.SubConnState } +type exitIdle struct{} + // clusterResolverBalancer manages xdsClient and the actual EDS balancer implementation that // does load balancing. // @@ -279,6 +281,18 @@ func (b *clusterResolverBalancer) run() { break } b.child.UpdateSubConnState(update.subConn, update.state) + case exitIdle: + if b.child == nil { + b.logger.Errorf("xds: received ExitIdle with no child balancer") + break + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.child.(balancer.ExitIdler); ok { + ei.ExitIdle() + } } case u := <-b.resourceWatcher.updateChannel: b.handleWatchUpdate(u) @@ -348,6 +362,10 @@ func (b *clusterResolverBalancer) Close() { <-b.done.Done() } +func (b *clusterResolverBalancer) ExitIdle() { + b.updateCh.Put(exitIdle{}) +} + // ccWrapper overrides ResolveNow(), so that re-resolution from the child // policies will trigger the DNS resolver in cluster_resolver balancer. type ccWrapper struct { diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index e7d0cd347cb7..fc98fec4550d 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -1,3 +1,4 @@ +//go:build go1.12 // +build go1.12 /* @@ -128,6 +129,8 @@ func (f *fakeChildBalancer) UpdateSubConnState(sc balancer.SubConn, state balanc func (f *fakeChildBalancer) Close() {} +func (f *fakeChildBalancer) ExitIdle() {} + func (f *fakeChildBalancer) waitForClientConnStateChange(ctx context.Context) error { _, err := f.clientConnState.Receive(ctx) if err != nil { diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index 7475145c612b..23e8aa775030 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -201,6 +201,10 @@ func (b *priorityBalancer) Close() { b.stopPriorityInitTimer() } +func (b *priorityBalancer) ExitIdle() { + b.bg.ExitIdle() +} + // stopPriorityInitTimer stops the priorityInitTimer if it's not nil, and set it // to nil. // diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/xds/internal/balancer/weightedtarget/weightedtarget.go index eb6516af56e9..f05e0aca19f3 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget.go @@ -172,3 +172,7 @@ func (b *weightedTargetBalancer) Close() { b.stateAggregator.Stop() b.bg.Close() } + +func (b *weightedTargetBalancer) ExitIdle() { + b.bg.ExitIdle() +} From c361e9ea1646283baf7b23a5d060c45fce9a1dea Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 23 Aug 2021 19:39:14 -0400 Subject: [PATCH 193/998] Move Server Credentials Handshake to transport (#4692) * Move Server Credentials Handshake to transport --- internal/transport/http2_server.go | 19 ++++++++++++-- internal/transport/transport.go | 4 ++- server.go | 42 +++++++----------------------- 3 files changed, 30 insertions(+), 35 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 88c37723d983..cd0ebed98845 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -133,6 +133,20 @@ type http2Server struct { // underlying conn gets closed before the client preface could be read, it // returns a nil transport and a nil error. func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err == credentials.ErrConnDispatched { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -215,14 +229,15 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + done := make(chan struct{}) t := &http2Server{ - ctx: setConnection(context.Background(), conn), + ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, + authInfo: authInfo, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 14198126457b..d3bf65b2bdff 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -30,6 +30,7 @@ import ( "net" "sync" "sync/atomic" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -518,7 +519,8 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 - AuthInfo credentials.AuthInfo + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters diff --git a/server.go b/server.go index 596384fdc41c..d6155c0e8543 100644 --- a/server.go +++ b/server.go @@ -710,13 +710,6 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - type listenSocket struct { net.Listener channelzID int64 @@ -839,35 +832,14 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - // In deployments where a gRPC server runs behind a cloud load - // balancer which performs regular TCP level health checks, the - // connection is closed immediately by the latter. Skipping the - // error here will help reduce log clutter. - if err != io.EOF { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - } - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) if st == nil { - conn.Close() return } - rawConn.SetDeadline(time.Time{}) if !s.addConn(lisAddr, st) { return } @@ -888,10 +860,11 @@ func (s *Server) drainServerTransports(addr string) { // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, StatsHandler: s.opts.statsHandler, KeepaliveParams: s.opts.keepaliveParams, @@ -909,6 +882,11 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + c.Close() + } channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } From 45a623cbefb83b4708e549616fde9c6d613710ad Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 24 Aug 2021 10:02:55 -0700 Subject: [PATCH 194/998] test: use non blocking dials in end2end_test (#4687) --- test/balancer_test.go | 39 ++++++++++++++++----------- test/creds_test.go | 1 - test/end2end_test.go | 22 +++------------ test/insecure_creds_test.go | 53 +++++++++++++++---------------------- 4 files changed, 48 insertions(+), 67 deletions(-) diff --git a/test/balancer_test.go b/test/balancer_test.go index bb87ac834d6b..e2fa4cf31d0c 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -28,6 +28,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" @@ -87,7 +88,7 @@ func (b *testBalancer) UpdateClientConnState(state balancer.ClientConnState) err logger.Errorf("testBalancer: failed to NewSubConn: %v", err) return nil } - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &picker{sc: b.sc, bal: b}}) + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable, bal: b}}) b.sc.Connect() } return nil @@ -105,8 +106,10 @@ func (b *testBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubCon } switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: + case connectivity.Ready: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{sc: sc, bal: b}}) + case connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{sc: sc, bal: b, idle: true}}) case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable, bal: b}}) case connectivity.TransientFailure: @@ -119,15 +122,20 @@ func (b *testBalancer) Close() {} func (b *testBalancer) ExitIdle() {} type picker struct { - err error - sc balancer.SubConn - bal *testBalancer + err error + sc balancer.SubConn + bal *testBalancer + idle bool } func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { if p.err != nil { return balancer.PickResult{}, p.err } + if p.idle { + p.sc.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } extraMD, _ := grpcutil.ExtraMetadata(info.Ctx) info.Ctx = nil // Do not validate context. p.bal.pickInfos = append(p.bal.pickInfos, info) @@ -196,14 +204,14 @@ func testPickExtraMetadata(t *testing.T, e env) { cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) - // The RPCs will fail, but we don't care. We just need the pick to happen. - ctx1, cancel1 := context.WithTimeout(context.Background(), time.Second) - defer cancel1() - tc.EmptyCall(ctx1, &testpb.Empty{}) - - ctx2, cancel2 := context.WithTimeout(context.Background(), time.Second) - defer cancel2() - tc.EmptyCall(ctx2, &testpb.Empty{}, grpc.CallContentSubtype(testSubContentType)) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, nil) + } + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.CallContentSubtype(testSubContentType)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, nil) + } want := []metadata.MD{ // First RPC doesn't have sub-content-type. @@ -211,9 +219,8 @@ func testPickExtraMetadata(t *testing.T, e env) { // Second RPC has sub-content-type "proto". {"content-type": []string{"application/grpc+proto"}}, } - - if !cmp.Equal(b.pickExtraMDs, want) { - t.Fatalf("%s", cmp.Diff(b.pickExtraMDs, want)) + if diff := cmp.Diff(want, b.pickExtraMDs); diff != "" { + t.Fatalf("unexpected diff in metadata (-want, +got): %s", diff) } } diff --git a/test/creds_test.go b/test/creds_test.go index 0c6018641819..6b3fc2a46076 100644 --- a/test/creds_test.go +++ b/test/creds_test.go @@ -165,7 +165,6 @@ func (c *clientTimeoutCreds) Clone() credentials.TransportCredentials { func (s) TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) { te := newTest(t, env{name: "timeout-cred", network: "tcp", security: "empty"}) te.userAgent = testAppUA - te.nonBlockingDial = true te.startServer(&testServer{security: te.e.security}) defer te.tearDown() diff --git a/test/end2end_test.go b/test/end2end_test.go index 689a86841944..5702f18bdb2a 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -509,10 +509,6 @@ type test struct { customDialOptions []grpc.DialOption resolverScheme string - // All test dialing is blocking by default. Set this to true if dial - // should be non-blocking. - nonBlockingDial bool - // These are are set once startServer is called. The common case is to have // only one testServer. srv stopper @@ -829,10 +825,6 @@ func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) if te.customCodec != nil { opts = append(opts, grpc.WithDefaultCallOptions(grpc.ForceCodec(te.customCodec))) } - if !te.nonBlockingDial && te.srvAddr != "" { - // Only do a blocking dial if server is up. - opts = append(opts, grpc.WithBlock()) - } if te.srvAddr == "" { te.srvAddr = "client.side.only.test" } @@ -1872,7 +1864,6 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { defer te1.tearDown() te1.resolverScheme = r.Scheme() - te1.nonBlockingDial = true te1.startServer(&testServer{security: e.security}) cc1 := te1.clientConn(grpc.WithResolvers(r)) @@ -1960,7 +1951,6 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te2 := testServiceConfigSetup(t, e) te2.resolverScheme = r.Scheme() - te2.nonBlockingDial = true te2.maxClientReceiveMsgSize = newInt(1024) te2.maxClientSendMsgSize = newInt(1024) @@ -2020,7 +2010,6 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te3 := testServiceConfigSetup(t, e) te3.resolverScheme = r.Scheme() - te3.nonBlockingDial = true te3.maxClientReceiveMsgSize = newInt(4096) te3.maxClientSendMsgSize = newInt(4096) @@ -2113,7 +2102,6 @@ func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") te.resolverScheme = r.Scheme() - te.nonBlockingDial = true cc := te.clientConn(grpc.WithResolvers(r)) tc := testpb.NewTestServiceClient(cc) @@ -5141,8 +5129,7 @@ func (s) TestFlowControlLogicalRace(t *testing.T) { go s.Serve(lis) - ctx := context.Background() - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } @@ -5151,7 +5138,7 @@ func (s) TestFlowControlLogicalRace(t *testing.T) { failures := 0 for i := 0; i < requestCount; i++ { - ctx, cancel := context.WithTimeout(ctx, requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{}) if err != nil { t.Fatalf("StreamingOutputCall; err = %q", err) @@ -6572,7 +6559,7 @@ func (s) TestServeExitsWhenListenerClosed(t *testing.T) { close(done) }() - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { t.Fatalf("Failed to dial server: %v", err) } @@ -6791,7 +6778,7 @@ func (s) TestDisabledIOBuffers(t *testing.T) { defer s.Stop() dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second) defer dcancel() - cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0)) + cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0)) if err != nil { t.Fatalf("Failed to dial server") } @@ -7180,7 +7167,6 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") te.resolverScheme = r.Scheme() - te.nonBlockingDial = true cc := te.clientConn(grpc.WithResolvers(r)) tc := testpb.NewTestServiceClient(cc) diff --git a/test/insecure_creds_test.go b/test/insecure_creds_test.go index 19f8bb8b791b..791cf650887e 100644 --- a/test/insecure_creds_test.go +++ b/test/insecure_creds_test.go @@ -124,21 +124,19 @@ func (s) TestInsecureCreds(t *testing.T) { go s.Serve(lis) addr := lis.Addr().String() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - cOpts := []grpc.DialOption{grpc.WithBlock()} + opts := []grpc.DialOption{grpc.WithInsecure()} if test.clientInsecureCreds { - cOpts = append(cOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } else { - cOpts = append(cOpts, grpc.WithInsecure()) + opts = []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} } - cc, err := grpc.DialContext(ctx, addr, cOpts...) + cc, err := grpc.Dial(addr, opts...) if err != nil { t.Fatalf("grpc.Dial(%q) failed: %v", addr, err) } defer cc.Close() c := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if _, err = c.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall(_, _) = _, %v; want _, ", err) } @@ -151,19 +149,16 @@ func (s) TestInsecureCredsWithPerRPCCredentials(t *testing.T) { desc string perRPCCredsViaDialOptions bool perRPCCredsViaCallOptions bool - wantErr string }{ { desc: "send PerRPCCredentials via DialOptions", perRPCCredsViaDialOptions: true, perRPCCredsViaCallOptions: false, - wantErr: "context deadline exceeded", }, { desc: "send PerRPCCredentials via CallOptions", perRPCCredsViaDialOptions: false, perRPCCredsViaCallOptions: true, - wantErr: "transport: cannot send secure credentials on an insecure connection", }, } for _, test := range tests { @@ -174,44 +169,38 @@ func (s) TestInsecureCredsWithPerRPCCredentials(t *testing.T) { }, } - sOpts := []grpc.ServerOption{} - sOpts = append(sOpts, grpc.Creds(insecure.NewCredentials())) - s := grpc.NewServer(sOpts...) + s := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("net.Listen(tcp, localhost:0) failed: %v", err) } - go s.Serve(lis) addr := lis.Addr().String() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cOpts := []grpc.DialOption{grpc.WithBlock()} - cOpts = append(cOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) + dopts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} if test.perRPCCredsViaDialOptions { - cOpts = append(cOpts, grpc.WithPerRPCCredentials(testLegacyPerRPCCredentials{})) - if _, err := grpc.DialContext(ctx, addr, cOpts...); !strings.Contains(err.Error(), test.wantErr) { - t.Fatalf("InsecureCredsWithPerRPCCredentials/send_PerRPCCredentials_via_DialOptions = %v; want %s", err, test.wantErr) - } + dopts = append(dopts, grpc.WithPerRPCCredentials(testLegacyPerRPCCredentials{})) } - + copts := []grpc.CallOption{} if test.perRPCCredsViaCallOptions { - cc, err := grpc.DialContext(ctx, addr, cOpts...) - if err != nil { - t.Fatalf("grpc.Dial(%q) failed: %v", addr, err) - } - defer cc.Close() - - c := testpb.NewTestServiceClient(cc) - if _, err = c.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testLegacyPerRPCCredentials{})); !strings.Contains(err.Error(), test.wantErr) { - t.Fatalf("InsecureCredsWithPerRPCCredentials/send_PerRPCCredentials_via_CallOptions = %v; want %s", err, test.wantErr) - } + copts = append(copts, grpc.PerRPCCredentials(testLegacyPerRPCCredentials{})) + } + cc, err := grpc.Dial(addr, dopts...) + if err != nil { + t.Fatalf("grpc.Dial(%q) failed: %v", addr, err) + } + defer cc.Close() + + const wantErr = "transport: cannot send secure credentials on an insecure connection" + c := testpb.NewTestServiceClient(cc) + if _, err = c.EmptyCall(ctx, &testpb.Empty{}, copts...); err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("InsecureCredsWithPerRPCCredentials/send_PerRPCCredentials_via_CallOptions = %v; want %s", err, wantErr) } }) } From dc3afb202f85e5540ece8743b114c7287a5f37a4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 24 Aug 2021 11:04:25 -0700 Subject: [PATCH 195/998] xds: deflake Test/ServerSideXDS_ServingModeChanges (#4689) --- .../test/xds_server_serving_mode_test.go | 83 +++++++++---------- 1 file changed, 38 insertions(+), 45 deletions(-) diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 8fb346298abf..fe6d6d145de5 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -28,11 +28,10 @@ import ( "net" "sync" "testing" + "time" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal/testutils" @@ -159,28 +158,20 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } // Create a ClientConn to the first listener and make a successful RPCs. - cc1, err := grpc.DialContext(ctx, lis1.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + cc1, err := grpc.Dial(lis1.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } defer cc1.Close() - - client1 := testpb.NewTestServiceClient(cc1) - if _, err := client1.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } + waitForSuccessfulRPC(ctx, t, cc1) // Create a ClientConn to the second listener and make a successful RPCs. - cc2, err := grpc.DialContext(ctx, lis2.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + cc2, err := grpc.Dial(lis2.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } defer cc2.Close() - - client2 := testpb.NewTestServiceClient(cc2) - if _, err := client2.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } + waitForSuccessfulRPC(ctx, t, cc2) // Update the management server to remove the second listener resource. This // should push only the second listener into "not-serving" mode. @@ -194,21 +185,9 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatal(err) } - // Make sure cc1 is still in READY state, while cc2 has moved out of READY. - if s := cc1.GetState(); s != connectivity.Ready { - t.Fatalf("clientConn1 state is %s, want %s", s, connectivity.Ready) - } - if !cc2.WaitForStateChange(ctx, connectivity.Ready) { - t.Fatal("clientConn2 failed to move out of READY") - } - // Make sure RPCs succeed on cc1 and fail on cc2. - if _, err := client1.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } - if _, err := client2.EmptyCall(ctx, &testpb.Empty{}); err == nil { - t.Fatal("rpc EmptyCall() succeeded when expected to fail") - } + waitForSuccessfulRPC(ctx, t, cc1) + waitForFailedRPC(ctx, t, cc2) // Update the management server to remove the first listener resource as // well. This should push the first listener into "not-serving" mode. Second @@ -223,18 +202,9 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatal(err) } - // Make sure cc1 has moved out of READY. - if !cc1.WaitForStateChange(ctx, connectivity.Ready) { - t.Fatal("clientConn1 failed to move out of READY") - } - // Make sure RPCs fail on both. - if _, err := client1.EmptyCall(ctx, &testpb.Empty{}); err == nil { - t.Fatal("rpc EmptyCall() succeeded when expected to fail") - } - if _, err := client2.EmptyCall(ctx, &testpb.Empty{}); err == nil { - t.Fatal("rpc EmptyCall() succeeded when expected to fail") - } + waitForFailedRPC(ctx, t, cc1) + waitForFailedRPC(ctx, t, cc2) // Make sure new connection attempts to "not-serving" servers fail. We use a // short timeout since we expect this to fail. @@ -261,12 +231,8 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } // The clientConns created earlier should be able to make RPCs now. - if _, err := client1.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } - if _, err := client2.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } + waitForSuccessfulRPC(ctx, t, cc1) + waitForSuccessfulRPC(ctx, t, cc2) } func waitForModeChange(ctx context.Context, modeTracker *modeTracker, addr net.Addr, wantMode xds.ServingMode) error { @@ -279,3 +245,30 @@ func waitForModeChange(ctx context.Context, modeTracker *modeTracker, addr net.A } } } + +func waitForSuccessfulRPC(ctx context.Context, t *testing.T, cc *grpc.ClientConn) { + t.Helper() + + c := testpb.NewTestServiceClient(cc) + if _, err := c.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} + +func waitForFailedRPC(ctx context.Context, t *testing.T, cc *grpc.ClientConn) { + t.Helper() + + c := testpb.NewTestServiceClient(cc) + ticker := time.NewTimer(10 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + t.Fatalf("failure when waiting for RPCs to fail: %v", ctx.Err()) + case <-ticker.C: + if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil { + return + } + } + } +} From bfd964bba69658b989ff619c40383e59d13770f1 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 24 Aug 2021 11:19:04 -0700 Subject: [PATCH 196/998] xds: use the defaultTestTimeout instead of the short one (#4684) --- xds/internal/xdsclient/v2/client_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xds/internal/xdsclient/v2/client_test.go b/xds/internal/xdsclient/v2/client_test.go index 138c9a161695..4b7ed0cb08c9 100644 --- a/xds/internal/xdsclient/v2/client_test.go +++ b/xds/internal/xdsclient/v2/client_test.go @@ -528,7 +528,7 @@ func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} t.Log("Bad LDS response pushed to fakeServer...") - timer := time.NewTimer(defaultTestShortTimeout) + timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: t.Fatal("Timeout when expecting LDS update") From 46ab723bb20867a29022047224194fefd311cb37 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 24 Aug 2021 12:30:13 -0700 Subject: [PATCH 197/998] multiple: remove appengine specific build constraints and code (#4685) --- Makefile | 2 - channelz/service/func_nonlinux.go | 2 +- install_gae.sh | 6 --- internal/channelz/types_linux.go | 2 - internal/channelz/types_nonlinux.go | 4 +- internal/channelz/util_linux.go | 2 - internal/channelz/util_nonlinux.go | 2 +- internal/channelz/util_test.go | 2 +- internal/credentials/spiffe.go | 2 - internal/credentials/spiffe_appengine.go | 31 ----------- internal/credentials/syscallconn.go | 2 - internal/credentials/syscallconn_appengine.go | 30 ----------- internal/credentials/syscallconn_test.go | 2 - internal/profiling/buffer/buffer.go | 2 - internal/profiling/buffer/buffer_appengine.go | 43 --------------- internal/profiling/buffer/buffer_test.go | 2 - internal/syscall/syscall_linux.go | 2 - internal/syscall/syscall_nonlinux.go | 20 +++---- .../{client.go => client_linux.go} | 2 - security/advancedtls/sni.go | 2 +- security/advancedtls/sni_appengine.go | 30 ----------- security/advancedtls/sni_beforego114.go | 4 +- test/go_vet/vet.go | 53 ------------------- vet.sh | 4 -- 24 files changed, 18 insertions(+), 235 deletions(-) delete mode 100755 install_gae.sh delete mode 100644 internal/credentials/spiffe_appengine.go delete mode 100644 internal/credentials/syscallconn_appengine.go delete mode 100644 internal/profiling/buffer/buffer_appengine.go rename interop/grpclb_fallback/{client.go => client_linux.go} (99%) delete mode 100644 security/advancedtls/sni_appengine.go delete mode 100644 test/go_vet/vet.go diff --git a/Makefile b/Makefile index 1f0722f16243..1f8960922b3b 100644 --- a/Makefile +++ b/Makefile @@ -41,8 +41,6 @@ vetdeps: clean \ proto \ test \ - testappengine \ - testappenginedeps \ testrace \ vet \ vetdeps diff --git a/channelz/service/func_nonlinux.go b/channelz/service/func_nonlinux.go index eb53334ed0d1..64ecea947de0 100644 --- a/channelz/service/func_nonlinux.go +++ b/channelz/service/func_nonlinux.go @@ -1,4 +1,4 @@ -// +build !linux appengine +// +build !linux /* * diff --git a/install_gae.sh b/install_gae.sh deleted file mode 100755 index 15ff9facdd78..000000000000 --- a/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" \ No newline at end of file diff --git a/internal/channelz/types_linux.go b/internal/channelz/types_linux.go index 692dd6181778..1b1c4cce34a9 100644 --- a/internal/channelz/types_linux.go +++ b/internal/channelz/types_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/channelz/types_nonlinux.go b/internal/channelz/types_nonlinux.go index 19c2fc521dcf..909edba25a44 100644 --- a/internal/channelz/types_nonlinux.go +++ b/internal/channelz/types_nonlinux.go @@ -1,4 +1,4 @@ -// +build !linux appengine +// +build !linux /* * @@ -37,6 +37,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } diff --git a/internal/channelz/util_linux.go b/internal/channelz/util_linux.go index fdf409d55de3..8d194e44e1dc 100644 --- a/internal/channelz/util_linux.go +++ b/internal/channelz/util_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/channelz/util_nonlinux.go b/internal/channelz/util_nonlinux.go index 8864a0811164..d600417fb8c2 100644 --- a/internal/channelz/util_nonlinux.go +++ b/internal/channelz/util_nonlinux.go @@ -1,4 +1,4 @@ -// +build !linux appengine +// +build !linux /* * diff --git a/internal/channelz/util_test.go b/internal/channelz/util_test.go index 3d1a1183fa41..2621e7410d61 100644 --- a/internal/channelz/util_test.go +++ b/internal/channelz/util_test.go @@ -1,4 +1,4 @@ -// +build linux,!appengine +// +build linux /* * diff --git a/internal/credentials/spiffe.go b/internal/credentials/spiffe.go index be70b6cdfc31..25ade623058e 100644 --- a/internal/credentials/spiffe.go +++ b/internal/credentials/spiffe.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2020 gRPC authors. diff --git a/internal/credentials/spiffe_appengine.go b/internal/credentials/spiffe_appengine.go deleted file mode 100644 index af6f57719768..000000000000 --- a/internal/credentials/spiffe_appengine.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import ( - "crypto/tls" - "net/url" -) - -// SPIFFEIDFromState is a no-op for appengine builds. -func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { - return nil -} diff --git a/internal/credentials/syscallconn.go b/internal/credentials/syscallconn.go index f499a614c20e..2919632d657e 100644 --- a/internal/credentials/syscallconn.go +++ b/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/credentials/syscallconn_appengine.go b/internal/credentials/syscallconn_appengine.go deleted file mode 100644 index a6144cd661c2..000000000000 --- a/internal/credentials/syscallconn_appengine.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import ( - "net" -) - -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn -} diff --git a/internal/credentials/syscallconn_test.go b/internal/credentials/syscallconn_test.go index ee17a0ca67bc..b229a47d116e 100644 --- a/internal/credentials/syscallconn_test.go +++ b/internal/credentials/syscallconn_test.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/profiling/buffer/buffer.go b/internal/profiling/buffer/buffer.go index 45745cd09197..f4cd4201de11 100644 --- a/internal/profiling/buffer/buffer.go +++ b/internal/profiling/buffer/buffer.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2019 gRPC authors. diff --git a/internal/profiling/buffer/buffer_appengine.go b/internal/profiling/buffer/buffer_appengine.go deleted file mode 100644 index c92599e5b9c0..000000000000 --- a/internal/profiling/buffer/buffer_appengine.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package buffer - -// CircularBuffer is a no-op implementation for appengine builds. -// -// Appengine does not support stats because of lack of the support for unsafe -// pointers, which are necessary to efficiently store and retrieve things into -// and from a circular buffer. As a result, Push does not do anything and Drain -// returns an empty slice. -type CircularBuffer struct{} - -// NewCircularBuffer returns a no-op for appengine builds. -func NewCircularBuffer(size uint32) (*CircularBuffer, error) { - return nil, nil -} - -// Push returns a no-op for appengine builds. -func (cb *CircularBuffer) Push(x interface{}) { -} - -// Drain returns a no-op for appengine builds. -func (cb *CircularBuffer) Drain() []interface{} { - return nil -} diff --git a/internal/profiling/buffer/buffer_test.go b/internal/profiling/buffer/buffer_test.go index 86bd77d4a2e6..a7f3b61e4afa 100644 --- a/internal/profiling/buffer/buffer_test.go +++ b/internal/profiling/buffer/buffer_test.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2019 gRPC authors. diff --git a/internal/syscall/syscall_linux.go b/internal/syscall/syscall_linux.go index 4b2964f2a1e3..b3a72276dee4 100644 --- a/internal/syscall/syscall_linux.go +++ b/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/syscall/syscall_nonlinux.go b/internal/syscall/syscall_nonlinux.go index 7913ef1dbfb5..121f29468417 100644 --- a/internal/syscall/syscall_nonlinux.go +++ b/internal/syscall/syscall_nonlinux.go @@ -1,4 +1,4 @@ -// +build !linux appengine +// +build !linux /* * @@ -35,41 +35,41 @@ var logger = grpclog.Component("core") func log() { once.Do(func() { - logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environments.") }) } -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environments. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. +// GetRusage is a no-op function under non-linux environments. func GetRusage() *Rusage { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. +// between two Rusage structs. It a no-op function for non-linux environments. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +// SetTCPUserTimeout is a no-op function under non-linux environments. func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() return -1, nil diff --git a/interop/grpclb_fallback/client.go b/interop/grpclb_fallback/client_linux.go similarity index 99% rename from interop/grpclb_fallback/client.go rename to interop/grpclb_fallback/client_linux.go index 61b2fae6968e..c9b25a894b30 100644 --- a/interop/grpclb_fallback/client.go +++ b/interop/grpclb_fallback/client_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2019 gRPC authors. diff --git a/security/advancedtls/sni.go b/security/advancedtls/sni.go index 120acf2b376d..c503aa7ee90d 100644 --- a/security/advancedtls/sni.go +++ b/security/advancedtls/sni.go @@ -1,4 +1,4 @@ -// +build !appengine,go1.14 +// +build go1.14 /* * diff --git a/security/advancedtls/sni_appengine.go b/security/advancedtls/sni_appengine.go deleted file mode 100644 index fffbb0107ddd..000000000000 --- a/security/advancedtls/sni_appengine.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package advancedtls - -import ( - "crypto/tls" -) - -// buildGetCertificates is a no-op for appengine builds. -func buildGetCertificates(clientHello *tls.ClientHelloInfo, o *ServerOptions) (*tls.Certificate, error) { - return nil, nil -} diff --git a/security/advancedtls/sni_beforego114.go b/security/advancedtls/sni_beforego114.go index 26a09b988491..d89b696d944e 100644 --- a/security/advancedtls/sni_beforego114.go +++ b/security/advancedtls/sni_beforego114.go @@ -1,4 +1,4 @@ -// +build !appengine,!go1.14 +// +build !go1.14 /* * @@ -26,7 +26,7 @@ import ( ) // buildGetCertificates returns the first cert contained in ServerOptions for -// non-appengine builds before version 1.4. +// builds before version 1.14. func buildGetCertificates(clientHello *tls.ClientHelloInfo, o *ServerOptions) (*tls.Certificate, error) { if o.IdentityOptions.GetIdentityCertificatesForServer == nil { return nil, fmt.Errorf("function GetCertificates must be specified") diff --git a/test/go_vet/vet.go b/test/go_vet/vet.go deleted file mode 100644 index 475e8d683fc3..000000000000 --- a/test/go_vet/vet.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// vet checks whether files that are supposed to be built on appengine running -// Go 1.10 or earlier import an unsupported package (e.g. "unsafe", "syscall"). -package main - -import ( - "fmt" - "go/build" - "os" -) - -func main() { - fail := false - b := build.Default - b.BuildTags = []string{"appengine", "appenginevm"} - argsWithoutProg := os.Args[1:] - for _, dir := range argsWithoutProg { - p, err := b.Import(".", dir, 0) - if _, ok := err.(*build.NoGoError); ok { - continue - } else if err != nil { - fmt.Printf("build.Import failed due to %v\n", err) - fail = true - continue - } - for _, pkg := range p.Imports { - if pkg == "syscall" || pkg == "unsafe" { - fmt.Printf("Package %s/%s importing %s package without appengine build tag is NOT ALLOWED!\n", p.Dir, p.Name, pkg) - fail = true - } - } - } - if fail { - os.Exit(1) - } -} diff --git a/vet.sh b/vet.sh index 5eaa8b05d6d3..d923187a7b3a 100755 --- a/vet.sh +++ b/vet.sh @@ -89,10 +89,6 @@ not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - misspell -error . # - Check that generated proto files are up to date. From 5f4bc66745e1af8406741bb329a7bb7119631e02 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 24 Aug 2021 13:52:18 -0700 Subject: [PATCH 198/998] grpc: fix stayConnected function to connect upon entry (#4699) If stayConnected was called while the ClientConn was in IDLE already, it would never call Connect, and stay stuck in that state. This change ensures cc.Connect is always called at least once. --- clientconn_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/clientconn_test.go b/clientconn_test.go index da765615be12..e7381b8e6770 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -1122,7 +1122,10 @@ func stayConnected(cc *ClientConn) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - for state := cc.GetState(); state != connectivity.Shutdown && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + for { cc.Connect() + if state := cc.GetState(); state == connectivity.Shutdown || !cc.WaitForStateChange(ctx, state) { + return + } } } From 6bd8e8cf30e25b6cde3ec16389ff470680c107b1 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 24 Aug 2021 14:24:34 -0700 Subject: [PATCH 199/998] multiple: remove support for Go 1.11 (#4700) --- .github/workflows/testing.yml | 3 -- examples/go.mod | 2 +- go.mod | 2 +- internal/resolver/dns/go113.go | 33 --------------- security/advancedtls/sni_beforego114.go | 42 ------------------- security/authorization/go.mod | 2 +- ...x_go110_test.go => channelz_linux_test.go} | 2 - test/tools/go.mod | 2 +- 8 files changed, 4 insertions(+), 84 deletions(-) delete mode 100644 internal/resolver/dns/go113.go delete mode 100644 security/advancedtls/sni_beforego114.go rename test/{channelz_linux_go110_test.go => channelz_linux_test.go} (99%) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index c94c812f97b7..e85c59e44bcd 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -68,9 +68,6 @@ jobs: - type: tests goversion: 1.14 - - type: tests # Keep until interop tests no longer require Go1.11 - goversion: 1.11 - steps: # Setup the environment. - name: Setup GOARCH diff --git a/examples/go.mod b/examples/go.mod index 143a8fc03f57..4f19b852edd8 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/examples -go 1.11 +go 1.14 require ( github.com/golang/protobuf v1.4.3 diff --git a/go.mod b/go.mod index 2f2cf1eb7669..8cb729ba0d35 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc -go 1.11 +go 1.14 require ( github.com/cespare/xxhash v1.1.0 diff --git a/internal/resolver/dns/go113.go b/internal/resolver/dns/go113.go deleted file mode 100644 index 8783a8cf8214..000000000000 --- a/internal/resolver/dns/go113.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package dns - -import "net" - -func init() { - filterError = func(err error) error { - if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { - // The name does not exist; not an error. - return nil - } - return err - } -} diff --git a/security/advancedtls/sni_beforego114.go b/security/advancedtls/sni_beforego114.go deleted file mode 100644 index d89b696d944e..000000000000 --- a/security/advancedtls/sni_beforego114.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !go1.14 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package advancedtls - -import ( - "crypto/tls" - "fmt" -) - -// buildGetCertificates returns the first cert contained in ServerOptions for -// builds before version 1.14. -func buildGetCertificates(clientHello *tls.ClientHelloInfo, o *ServerOptions) (*tls.Certificate, error) { - if o.IdentityOptions.GetIdentityCertificatesForServer == nil { - return nil, fmt.Errorf("function GetCertificates must be specified") - } - certificates, err := o.IdentityOptions.GetIdentityCertificatesForServer(clientHello) - if err != nil { - return nil, err - } - if len(certificates) == 0 { - return nil, fmt.Errorf("no certificates configured") - } - return certificates[0], nil -} diff --git a/security/authorization/go.mod b/security/authorization/go.mod index 0581b3401f32..ce34742af2c9 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/security/authorization -go 1.12 +go 1.14 require ( github.com/envoyproxy/go-control-plane v0.9.5 diff --git a/test/channelz_linux_go110_test.go b/test/channelz_linux_test.go similarity index 99% rename from test/channelz_linux_go110_test.go rename to test/channelz_linux_test.go index dea374bfc08b..aa6febe537a0 100644 --- a/test/channelz_linux_go110_test.go +++ b/test/channelz_linux_test.go @@ -1,5 +1,3 @@ -// +build linux - /* * * Copyright 2018 gRPC authors. diff --git a/test/tools/go.mod b/test/tools/go.mod index 874268d34fce..9c964971413e 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/test/tools -go 1.11 +go 1.14 require ( github.com/client9/misspell v0.3.4 From 498743c19e864d45b6761fd0b8c6cf7ad72eb271 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Wed, 25 Aug 2021 14:03:53 -0700 Subject: [PATCH 200/998] xds/c2p: update default XDS server name in C2P resolver (#4705) --- xds/googledirectpath/googlec2p.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 0c2f984fbcb1..b9f1c712014e 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -48,7 +48,7 @@ import ( const ( c2pScheme = "google-c2p" - tdURL = "directpath-trafficdirector.googleapis.com" + tdURL = "directpath-pa.googleapis.com" httpReqTimeout = 10 * time.Second zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" From 712e8d4f57fd4a4fbb83406148f9c71eb3e7714e Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 25 Aug 2021 14:51:41 -0700 Subject: [PATCH 201/998] Remove support for Go 1.13 and older (cont) (#4706) --- credentials/go12.go | 30 ------------------ credentials/sts/sts.go | 2 -- credentials/sts/sts_test.go | 2 -- credentials/tls.go | 3 ++ .../tls/certprovider/distributor_test.go | 2 -- .../tls/certprovider/pemfile/builder_test.go | 2 -- .../tls/certprovider/pemfile/watcher_test.go | 18 ++++------- credentials/tls/certprovider/store_test.go | 31 ++++++++++++------- credentials/xds/xds_client_test.go | 2 -- credentials/xds/xds_server_test.go | 2 -- internal/leakcheck/leakcheck.go | 1 - internal/resolver/dns/dns_resolver.go | 7 +---- internal/xds/matcher/matcher_header_test.go | 2 -- .../advancedtls_integration_test.go | 2 -- security/advancedtls/advancedtls_test.go | 2 -- security/advancedtls/sni.go | 2 -- security/authorization/engine/engine_test.go | 2 -- security/authorization/engine/util_test.go | 2 -- xds/csds/csds_test.go | 2 -- xds/googledirectpath/googlec2p_test.go | 2 -- .../balancergroup/balancergroup_test.go | 2 -- .../balancer/balancergroup/testutils_test.go | 2 -- .../cdsbalancer/cdsbalancer_security_test.go | 2 -- .../balancer/cdsbalancer/cdsbalancer_test.go | 3 -- .../cdsbalancer/cluster_handler_test.go | 2 -- .../balancer/clusterimpl/balancer_test.go | 2 -- .../balancer/clusterimpl/config_test.go | 2 -- .../clustermanager/clustermanager_test.go | 2 -- .../balancer/clustermanager/config_test.go | 2 -- .../clusterresolver/clusterresolver_test.go | 3 -- .../balancer/clusterresolver/config_test.go | 2 -- .../clusterresolver/configbuilder_test.go | 2 -- .../balancer/clusterresolver/eds_impl_test.go | 2 -- .../balancer/clusterresolver/priority_test.go | 2 -- .../clusterresolver/resource_resolver_test.go | 2 -- .../balancer/clusterresolver/testutil_test.go | 2 -- xds/internal/balancer/orca/orca_test.go | 2 -- .../balancer/priority/balancer_test.go | 2 -- xds/internal/balancer/priority/config_test.go | 2 -- .../priority/ignore_resolve_now_test.go | 2 -- xds/internal/balancer/priority/utils_test.go | 2 -- .../weightedtarget_config_test.go | 2 -- .../weightedtarget/weightedtarget_test.go | 2 -- xds/internal/httpfilter/fault/fault_test.go | 1 - xds/internal/internal_test.go | 2 -- xds/internal/resolver/matcher_path_test.go | 2 -- xds/internal/resolver/matcher_test.go | 2 -- xds/internal/resolver/serviceconfig_test.go | 2 -- xds/internal/resolver/watch_service_test.go | 2 -- xds/internal/resolver/xds_resolver_test.go | 2 -- xds/internal/server/listener_wrapper_test.go | 2 -- xds/internal/server/rds_handler_test.go | 2 -- .../test/xds_client_integration_test.go | 1 - xds/internal/test/xds_integration_test.go | 1 - .../test/xds_server_integration_test.go | 1 - .../test/xds_server_serving_mode_test.go | 1 - xds/internal/testutils/balancer_test.go | 2 -- .../xdsclient/bootstrap/bootstrap_test.go | 2 -- xds/internal/xdsclient/cds_test.go | 2 -- xds/internal/xdsclient/client_test.go | 2 -- xds/internal/xdsclient/dump_test.go | 2 -- xds/internal/xdsclient/eds_test.go | 2 -- xds/internal/xdsclient/filter_chain_test.go | 2 -- xds/internal/xdsclient/lds_test.go | 2 -- xds/internal/xdsclient/load/store_test.go | 2 -- xds/internal/xdsclient/loadreport_test.go | 2 -- xds/internal/xdsclient/rds_test.go | 2 -- .../xdsclient/requests_counter_test.go | 2 -- xds/internal/xdsclient/v2/ack_test.go | 2 -- xds/internal/xdsclient/v2/cds_test.go | 2 -- xds/internal/xdsclient/v2/client_test.go | 2 -- xds/internal/xdsclient/v2/eds_test.go | 2 -- xds/internal/xdsclient/v2/lds_test.go | 2 -- xds/internal/xdsclient/v2/rds_test.go | 2 -- .../xdsclient/watchers_cluster_test.go | 2 -- .../xdsclient/watchers_endpoints_test.go | 2 -- .../xdsclient/watchers_listener_test.go | 2 -- xds/internal/xdsclient/watchers_route_test.go | 2 -- xds/internal/xdsclient/xdsclient_test.go | 2 -- xds/server_test.go | 2 -- 80 files changed, 29 insertions(+), 206 deletions(-) delete mode 100644 credentials/go12.go diff --git a/credentials/go12.go b/credentials/go12.go deleted file mode 100644 index ccbf35b33125..000000000000 --- a/credentials/go12.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import "crypto/tls" - -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" -} diff --git a/credentials/sts/sts.go b/credentials/sts/sts.go index 9285192a8eba..da5fa1ad16f5 100644 --- a/credentials/sts/sts.go +++ b/credentials/sts/sts.go @@ -1,5 +1,3 @@ -// +build go1.13 - /* * * Copyright 2020 gRPC authors. diff --git a/credentials/sts/sts_test.go b/credentials/sts/sts_test.go index 8738c424d087..dd634361d7cf 100644 --- a/credentials/sts/sts_test.go +++ b/credentials/sts/sts_test.go @@ -1,5 +1,3 @@ -// +build go1.13 - /* * * Copyright 2020 gRPC authors. diff --git a/credentials/tls.go b/credentials/tls.go index 8ee7124f2265..784822d0560a 100644 --- a/credentials/tls.go +++ b/credentials/tls.go @@ -230,4 +230,7 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", } diff --git a/credentials/tls/certprovider/distributor_test.go b/credentials/tls/certprovider/distributor_test.go index bec00e919bcf..48d51375616f 100644 --- a/credentials/tls/certprovider/distributor_test.go +++ b/credentials/tls/certprovider/distributor_test.go @@ -1,5 +1,3 @@ -// +build go1.13 - /* * * Copyright 2020 gRPC authors. diff --git a/credentials/tls/certprovider/pemfile/builder_test.go b/credentials/tls/certprovider/pemfile/builder_test.go index 2e49289ff899..bef00e10c19d 100644 --- a/credentials/tls/certprovider/pemfile/builder_test.go +++ b/credentials/tls/certprovider/pemfile/builder_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/credentials/tls/certprovider/pemfile/watcher_test.go b/credentials/tls/certprovider/pemfile/watcher_test.go index 8b772245525e..6cc65bd50001 100644 --- a/credentials/tls/certprovider/pemfile/watcher_test.go +++ b/credentials/tls/certprovider/pemfile/watcher_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. @@ -24,7 +22,6 @@ import ( "context" "fmt" "io/ioutil" - "math/big" "os" "path" "testing" @@ -32,7 +29,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" @@ -59,17 +55,15 @@ func Test(t *testing.T) { } func compareKeyMaterial(got, want *certprovider.KeyMaterial) error { - // x509.Certificate type defines an Equal() method, but does not check for - // nil. This has been fixed in - // https://github.com/golang/go/commit/89865f8ba64ccb27f439cce6daaa37c9aa38f351, - // but this is only available starting go1.14. - // TODO(easwars): Remove this check once we remove support for go1.13. - if (got.Certs == nil && want.Certs != nil) || (want.Certs == nil && got.Certs != nil) { + if len(got.Certs) != len(want.Certs) { return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) } - if !cmp.Equal(got.Certs, want.Certs, cmp.AllowUnexported(big.Int{})) { - return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) + for i := 0; i < len(got.Certs); i++ { + if !got.Certs[i].Leaf.Equal(want.Certs[i].Leaf) { + return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) + } } + // x509.CertPool contains only unexported fields some of which contain other // unexported fields. So usage of cmp.AllowUnexported() or // cmpopts.IgnoreUnexported() does not help us much here. Also, the standard diff --git a/credentials/tls/certprovider/store_test.go b/credentials/tls/certprovider/store_test.go index 00d33a2be872..ee1f4a358ba5 100644 --- a/credentials/tls/certprovider/store_test.go +++ b/credentials/tls/certprovider/store_test.go @@ -1,5 +1,3 @@ -// +build go1.13 - /* * * Copyright 2020 gRPC authors. @@ -27,10 +25,11 @@ import ( "errors" "fmt" "io/ioutil" - "reflect" "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/testdata" @@ -154,15 +153,23 @@ func readAndVerifyKeyMaterial(ctx context.Context, kmr kmReader, wantKM *KeyMate } func compareKeyMaterial(got, want *KeyMaterial) error { - // TODO(easwars): Remove all references to reflect.DeepEqual and use - // cmp.Equal instead. Currently, the later panics because x509.Certificate - // type defines an Equal method, but does not check for nil. This has been - // fixed in - // https://github.com/golang/go/commit/89865f8ba64ccb27f439cce6daaa37c9aa38f351, - // but this is only available starting go1.14. So, once we remove support - // for go1.13, we can make the switch. - if !reflect.DeepEqual(got, want) { - return fmt.Errorf("provider.KeyMaterial() = %+v, want %+v", got, want) + if len(got.Certs) != len(want.Certs) { + return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) + } + for i := 0; i < len(got.Certs); i++ { + if !got.Certs[i].Leaf.Equal(want.Certs[i].Leaf) { + return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) + } + } + + // x509.CertPool contains only unexported fields some of which contain other + // unexported fields. So usage of cmp.AllowUnexported() or + // cmpopts.IgnoreUnexported() does not help us much here. Also, the standard + // library does not provide a way to compare CertPool values. Comparing the + // subjects field of the certs in the CertPool seems like a reasonable + // approach. + if gotR, wantR := got.Roots.Subjects(), want.Roots.Subjects(); !cmp.Equal(gotR, wantR, cmpopts.EquateEmpty()) { + return fmt.Errorf("keyMaterial roots = %v, want %v", gotR, wantR) } return nil } diff --git a/credentials/xds/xds_client_test.go b/credentials/xds/xds_client_test.go index 07a65b6e3cf2..f4b86df060b2 100644 --- a/credentials/xds/xds_client_test.go +++ b/credentials/xds/xds_client_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/credentials/xds/xds_server_test.go b/credentials/xds/xds_server_test.go index 65f7e8ffa3b9..5c29ba38c286 100644 --- a/credentials/xds/xds_server_test.go +++ b/credentials/xds/xds_server_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/internal/leakcheck/leakcheck.go b/internal/leakcheck/leakcheck.go index 1d4fcef994ba..946c575f140f 100644 --- a/internal/leakcheck/leakcheck.go +++ b/internal/leakcheck/leakcheck.go @@ -42,7 +42,6 @@ var goroutinesToIgnore = []string{ "runtime_mcall", "(*loggingT).flushDaemon", "goroutine in C code", - "httputil.DumpRequestOut", // TODO: Remove this once Go1.13 support is removed. https://github.com/golang/go/issues/37669. } // RegisterIgnoreGoroutine appends s into the ignore goroutine list. The diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index fa1f21aa9abb..75301c514913 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -277,18 +277,13 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return newAddrs, nil } -var filterError = func(err error) error { +func handleDNSError(err error, lookupType string) error { if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). return nil } - return err -} - -func handleDNSError(err error, lookupType string) error { - err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) logger.Info(err) diff --git a/internal/xds/matcher/matcher_header_test.go b/internal/xds/matcher/matcher_header_test.go index 902b8112e889..9a0d51300d0a 100644 --- a/internal/xds/matcher/matcher_header_test.go +++ b/internal/xds/matcher/matcher_header_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/security/advancedtls/advancedtls_integration_test.go b/security/advancedtls/advancedtls_integration_test.go index 2fabe8f17a8d..4bb9e645b0a1 100644 --- a/security/advancedtls/advancedtls_integration_test.go +++ b/security/advancedtls/advancedtls_integration_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/security/advancedtls/advancedtls_test.go b/security/advancedtls/advancedtls_test.go index 827cf031ef6f..64da81a1700c 100644 --- a/security/advancedtls/advancedtls_test.go +++ b/security/advancedtls/advancedtls_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/security/advancedtls/sni.go b/security/advancedtls/sni.go index c503aa7ee90d..3e7befb1f904 100644 --- a/security/advancedtls/sni.go +++ b/security/advancedtls/sni.go @@ -1,5 +1,3 @@ -// +build go1.14 - /* * * Copyright 2020 gRPC authors. diff --git a/security/authorization/engine/engine_test.go b/security/authorization/engine/engine_test.go index e56f218e5e2b..c159c4bd5c21 100644 --- a/security/authorization/engine/engine_test.go +++ b/security/authorization/engine/engine_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * Copyright 2020 gRPC authors. * diff --git a/security/authorization/engine/util_test.go b/security/authorization/engine/util_test.go index 43514296d83a..e766fbf3ffe0 100644 --- a/security/authorization/engine/util_test.go +++ b/security/authorization/engine/util_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 98dc93e86713..6b594550d8d7 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 8f98d3159d3a..a208fad66c58 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/balancer/balancergroup/balancergroup_test.go b/xds/internal/balancer/balancergroup/balancergroup_test.go index 355810a52004..9cc7bd072ecf 100644 --- a/xds/internal/balancer/balancergroup/balancergroup_test.go +++ b/xds/internal/balancer/balancergroup/balancergroup_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * Copyright 2019 gRPC authors. * diff --git a/xds/internal/balancer/balancergroup/testutils_test.go b/xds/internal/balancer/balancergroup/testutils_test.go index 8c0543083ab3..1429fa87b3f2 100644 --- a/xds/internal/balancer/balancergroup/testutils_test.go +++ b/xds/internal/balancer/balancergroup/testutils_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index ddab01d8a51b..9483818e306e 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * Copyright 2020 gRPC authors. * diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index c59810958024..30b612fc7d01 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -1,6 +1,3 @@ -//go:build go1.12 -// +build go1.12 - /* * Copyright 2019 gRPC authors. * diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index cdc84f44d9a3..cb9b4e14da3c 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * Copyright 2021 gRPC authors. * diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index dc47941ef2c5..65ec17348f46 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/clusterimpl/config_test.go b/xds/internal/balancer/clusterimpl/config_test.go index f83155b5339a..ccb0c5e74d90 100644 --- a/xds/internal/balancer/clusterimpl/config_test.go +++ b/xds/internal/balancer/clusterimpl/config_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index 42c53648553e..a40d954ad64f 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/clustermanager/config_test.go b/xds/internal/balancer/clustermanager/config_test.go index f591f5ad32d8..3328ba1d300f 100644 --- a/xds/internal/balancer/clustermanager/config_test.go +++ b/xds/internal/balancer/clustermanager/config_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index fc98fec4550d..25ccbe3ca23f 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -1,6 +1,3 @@ -//go:build go1.12 -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index f3ba4430eba0..17d5f409674f 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index 174bd376e5c5..3e2ad8a2e64e 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index 0fd1e590d463..00814a6212b2 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * Copyright 2019 gRPC authors. * diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index 2e0e9fd5d2ff..8438a373d9d9 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go index efdb4d58da69..2a365850cd78 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/balancer/clusterresolver/testutil_test.go b/xds/internal/balancer/clusterresolver/testutil_test.go index 0025846c07c9..48759603827a 100644 --- a/xds/internal/balancer/clusterresolver/testutil_test.go +++ b/xds/internal/balancer/clusterresolver/testutil_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * Copyright 2020 gRPC authors. * diff --git a/xds/internal/balancer/orca/orca_test.go b/xds/internal/balancer/orca/orca_test.go index ff02b3c16087..d7a44134e22b 100644 --- a/xds/internal/balancer/orca/orca_test.go +++ b/xds/internal/balancer/orca/orca_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * Copyright 2019 gRPC authors. * diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index bc43a74557e6..13699ce0c970 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/balancer/priority/config_test.go b/xds/internal/balancer/priority/config_test.go index 42498bfa2b74..8316224c91be 100644 --- a/xds/internal/balancer/priority/config_test.go +++ b/xds/internal/balancer/priority/config_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/priority/ignore_resolve_now_test.go b/xds/internal/balancer/priority/ignore_resolve_now_test.go index 452753de6c7b..b7cecd6c1ff2 100644 --- a/xds/internal/balancer/priority/ignore_resolve_now_test.go +++ b/xds/internal/balancer/priority/ignore_resolve_now_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/balancer/priority/utils_test.go b/xds/internal/balancer/priority/utils_test.go index a4b1a5285102..c80a89b080f9 100644 --- a/xds/internal/balancer/priority/utils_test.go +++ b/xds/internal/balancer/priority/utils_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go index 3cd6f74df723..c239a3ae5a4e 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_test.go index b792c28c6ab0..eeebab733d61 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index a77ab58ad356..596fce468ee7 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -1,4 +1,3 @@ -// +build go1.12 // +build !386 /* diff --git a/xds/internal/internal_test.go b/xds/internal/internal_test.go index 9240d0a89d58..903b9db23c48 100644 --- a/xds/internal/internal_test.go +++ b/xds/internal/internal_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/resolver/matcher_path_test.go b/xds/internal/resolver/matcher_path_test.go index 7b0d296fc324..263a049108e4 100644 --- a/xds/internal/resolver/matcher_path_test.go +++ b/xds/internal/resolver/matcher_path_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/resolver/matcher_test.go b/xds/internal/resolver/matcher_test.go index f7d5486cc136..6143dd9f46e1 100644 --- a/xds/internal/resolver/matcher_test.go +++ b/xds/internal/resolver/matcher_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/resolver/serviceconfig_test.go b/xds/internal/resolver/serviceconfig_test.go index b0bc86c882b3..0e7d5daaaf5d 100644 --- a/xds/internal/resolver/serviceconfig_test.go +++ b/xds/internal/resolver/serviceconfig_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/resolver/watch_service_test.go b/xds/internal/resolver/watch_service_test.go index 270a8e8ac040..b622118e4b3e 100644 --- a/xds/internal/resolver/watch_service_test.go +++ b/xds/internal/resolver/watch_service_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 8bd2cec67446..d0ebd3f5035b 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 41cba859223b..4b8e5f857ccb 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/server/rds_handler_test.go b/xds/internal/server/rds_handler_test.go index b20d47c3001d..d1daffd940c0 100644 --- a/xds/internal/server/rds_handler_test.go +++ b/xds/internal/server/rds_handler_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index e94b70c9fb64..4465d2146ff5 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -1,4 +1,3 @@ -// +build go1.12 // +build !386 /* diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index b66cdd59cafe..74d84831520c 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -1,4 +1,3 @@ -// +build go1.12 // +build !386 /* diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 4bf13e9305be..9983c6e4aeb3 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -1,4 +1,3 @@ -// +build go1.12 // +build !386 /* diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index fe6d6d145de5..f6c3d39a3254 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -1,4 +1,3 @@ -// +build go1.13 // +build !386 /* diff --git a/xds/internal/testutils/balancer_test.go b/xds/internal/testutils/balancer_test.go index 83393dcd1e98..4891eb9cdadf 100644 --- a/xds/internal/testutils/balancer_test.go +++ b/xds/internal/testutils/balancer_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index f62ed2b54245..501d62102d21 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index e2c7705d5a45..bb563a6f5191 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index abf51c45b83a..9d8086aeb4e1 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index 5c31b183a6bc..83479978d765 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/xdsclient/eds_test.go b/xds/internal/xdsclient/eds_test.go index ebd26a40f957..2fe35989f7d3 100644 --- a/xds/internal/xdsclient/eds_test.go +++ b/xds/internal/xdsclient/eds_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go index 525c865c7ffd..9e9d603c3837 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2021 gRPC authors. diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index c04f92393fdf..9960349816e8 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/load/store_test.go b/xds/internal/xdsclient/load/store_test.go index e7db4e26b176..46568591f9e4 100644 --- a/xds/internal/xdsclient/load/store_test.go +++ b/xds/internal/xdsclient/load/store_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index 0f29e96fc1eb..88a08eb43fda 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index 11edabce5b7c..3787ae0ff328 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/requests_counter_test.go b/xds/internal/xdsclient/requests_counter_test.go index cd95aeaf82e0..e2eeea774e24 100644 --- a/xds/internal/xdsclient/requests_counter_test.go +++ b/xds/internal/xdsclient/requests_counter_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/v2/ack_test.go b/xds/internal/xdsclient/v2/ack_test.go index 1c41cd9d1ad8..d2f0605f6d08 100644 --- a/xds/internal/xdsclient/v2/ack_test.go +++ b/xds/internal/xdsclient/v2/ack_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/xdsclient/v2/cds_test.go b/xds/internal/xdsclient/v2/cds_test.go index da4738460930..e15e13074681 100644 --- a/xds/internal/xdsclient/v2/cds_test.go +++ b/xds/internal/xdsclient/v2/cds_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/xdsclient/v2/client_test.go b/xds/internal/xdsclient/v2/client_test.go index 4b7ed0cb08c9..2a45a52ca1d9 100644 --- a/xds/internal/xdsclient/v2/client_test.go +++ b/xds/internal/xdsclient/v2/client_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/xdsclient/v2/eds_test.go b/xds/internal/xdsclient/v2/eds_test.go index 7338ec80a5e5..5062dff9c07c 100644 --- a/xds/internal/xdsclient/v2/eds_test.go +++ b/xds/internal/xdsclient/v2/eds_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/xdsclient/v2/lds_test.go b/xds/internal/xdsclient/v2/lds_test.go index e6f1d8f1cf02..db26681fb3d2 100644 --- a/xds/internal/xdsclient/v2/lds_test.go +++ b/xds/internal/xdsclient/v2/lds_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. diff --git a/xds/internal/xdsclient/v2/rds_test.go b/xds/internal/xdsclient/v2/rds_test.go index 57058aa36e94..efc010224778 100644 --- a/xds/internal/xdsclient/v2/rds_test.go +++ b/xds/internal/xdsclient/v2/rds_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index 0ded36445994..939b7921b0be 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go index 9b5000b8bb9f..0e46886cc4d3 100644 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go index f7a1169bee39..da0255e37a8e 100644 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go index 5e9b51da9d22..e569192b510d 100644 --- a/xds/internal/xdsclient/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/internal/xdsclient/xdsclient_test.go b/xds/internal/xdsclient/xdsclient_test.go index 7b33c145f3bd..f348df481615 100644 --- a/xds/internal/xdsclient/xdsclient_test.go +++ b/xds/internal/xdsclient/xdsclient_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. diff --git a/xds/server_test.go b/xds/server_test.go index 143e19d08c7e..84622de0dadb 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2020 gRPC authors. From 0b372df5f45ee5e81aaae18ae9e5ad60eab60586 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 26 Aug 2021 10:21:09 -0700 Subject: [PATCH 202/998] xds/client: NACK ringhash lb policy if env var is not set (#4707) --- xds/internal/xdsclient/cds_test.go | 6 ++++++ xds/internal/xdsclient/xds.go | 3 +++ 2 files changed, 9 insertions(+) diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index bb563a6f5191..0ccd62b607b0 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -196,6 +196,9 @@ func (s) TestValidateCluster_Failure(t *testing.T) { oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv env.AggregateAndDNSSupportEnv = true defer func() { env.AggregateAndDNSSupportEnv = oldAggregateAndDNSSupportEnv }() + oldRingHashSupport := env.RingHashSupport + env.RingHashSupport = true + defer func() { env.RingHashSupport = oldRingHashSupport }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { if update, err := validateClusterAndConstructClusterUpdate(test.cluster); err == nil { @@ -413,6 +416,9 @@ func (s) TestValidateCluster_Success(t *testing.T) { oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv env.AggregateAndDNSSupportEnv = true defer func() { env.AggregateAndDNSSupportEnv = oldAggregateAndDNSSupportEnv }() + oldRingHashSupport := env.RingHashSupport + env.RingHashSupport = true + defer func() { env.RingHashSupport = oldRingHashSupport }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { update, err := validateClusterAndConstructClusterUpdate(test.cluster) diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 569913936d56..d6c44bad48ab 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -586,6 +586,9 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu case v3clusterpb.Cluster_ROUND_ROBIN: lbPolicy = nil // The default is round_robin, and there's no config to set. case v3clusterpb.Cluster_RING_HASH: + if !env.RingHashSupport { + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } rhc := cluster.GetRingHashLbConfig() if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) From d074cae66bc68d4ec5ccf427de2fce700223f4c7 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 26 Aug 2021 11:21:36 -0700 Subject: [PATCH 203/998] github: fold security tests into 'tests'; update testing to 1.17-1.15 (#4708) --- .github/workflows/testing.yml | 21 ++++++++++--------- channelz/service/func_nonlinux.go | 1 + channelz/service/service_sktopt_test.go | 1 + channelz/service/util_sktopt_386_test.go | 1 + channelz/service/util_sktopt_amd64_test.go | 1 + credentials/alts/alts_test.go | 1 + credentials/alts/utils_test.go | 1 + internal/channelz/types_nonlinux.go | 1 + internal/channelz/util_nonlinux.go | 1 + internal/channelz/util_test.go | 1 + internal/profiling/goid_modified.go | 1 + internal/profiling/goid_regular.go | 1 + internal/syscall/syscall_nonlinux.go | 1 + internal/transport/proxy_test.go | 1 + test/authority_test.go | 1 + test/race.go | 1 + test/tools/tools.go | 1 + xds/internal/httpfilter/fault/fault_test.go | 1 + .../test/xds_client_integration_test.go | 1 + xds/internal/test/xds_integration_test.go | 1 + .../test/xds_server_integration_test.go | 1 + .../test/xds_server_serving_mode_test.go | 1 + 22 files changed, 32 insertions(+), 10 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index e85c59e44bcd..7bf815463606 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -41,32 +41,32 @@ jobs: matrix: include: - type: vet+tests - goversion: 1.16 + goversion: 1.17 - type: tests - goversion: 1.16 + goversion: 1.17 testflags: -race - type: tests - goversion: 1.16 + goversion: 1.17 grpcenv: GRPC_GO_RETRY=on - type: extras - goversion: 1.16 + goversion: 1.17 - type: tests - goversion: 1.16 + goversion: 1.17 goarch: 386 - type: tests - goversion: 1.16 + goversion: 1.17 goarch: arm64 - type: tests - goversion: 1.15 + goversion: 1.16 - type: tests - goversion: 1.14 + goversion: 1.15 steps: # Setup the environment. @@ -104,6 +104,9 @@ jobs: run: | go version go test ${{ matrix.testflags }} -cpu 1,4 -timeout 7m google.golang.org/grpc/... + cd ${GITHUB_WORKSPACE}/security/advancedtls && go test ${{ matrix.testflags }} -timeout 2m google.golang.org/grpc/security/advancedtls/... + cd ${GITHUB_WORKSPACE}/security/authorization && go test ${{ matrix.testflags }} -timeout 2m google.golang.org/grpc/security/authorization/... + # Non-core gRPC tests (examples, interop, etc) - name: Run extras tests @@ -113,5 +116,3 @@ jobs: examples/examples_test.sh security/advancedtls/examples/examples_test.sh interop/interop_test.sh - cd ${GITHUB_WORKSPACE}/security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... - cd ${GITHUB_WORKSPACE}/security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... diff --git a/channelz/service/func_nonlinux.go b/channelz/service/func_nonlinux.go index 64ecea947de0..473495d6655e 100644 --- a/channelz/service/func_nonlinux.go +++ b/channelz/service/func_nonlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/channelz/service/service_sktopt_test.go b/channelz/service/service_sktopt_test.go index ecd4a2ad05f9..4ea6b20cd6a6 100644 --- a/channelz/service/service_sktopt_test.go +++ b/channelz/service/service_sktopt_test.go @@ -1,3 +1,4 @@ +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 diff --git a/channelz/service/util_sktopt_386_test.go b/channelz/service/util_sktopt_386_test.go index d9c981271361..3ba3dc96e7c6 100644 --- a/channelz/service/util_sktopt_386_test.go +++ b/channelz/service/util_sktopt_386_test.go @@ -1,3 +1,4 @@ +//go:build 386 && linux // +build 386,linux /* diff --git a/channelz/service/util_sktopt_amd64_test.go b/channelz/service/util_sktopt_amd64_test.go index 0ff06d128330..124d7b758199 100644 --- a/channelz/service/util_sktopt_amd64_test.go +++ b/channelz/service/util_sktopt_amd64_test.go @@ -1,3 +1,4 @@ +//go:build amd64 && linux // +build amd64,linux /* diff --git a/credentials/alts/alts_test.go b/credentials/alts/alts_test.go index cbb1656d20c3..22ad5a48b09e 100644 --- a/credentials/alts/alts_test.go +++ b/credentials/alts/alts_test.go @@ -1,3 +1,4 @@ +//go:build linux || windows // +build linux windows /* diff --git a/credentials/alts/utils_test.go b/credentials/alts/utils_test.go index 2231de3dccc4..531cdfce6e3a 100644 --- a/credentials/alts/utils_test.go +++ b/credentials/alts/utils_test.go @@ -1,3 +1,4 @@ +//go:build linux || windows // +build linux windows /* diff --git a/internal/channelz/types_nonlinux.go b/internal/channelz/types_nonlinux.go index 909edba25a44..8b06eed1ab8b 100644 --- a/internal/channelz/types_nonlinux.go +++ b/internal/channelz/types_nonlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/internal/channelz/util_nonlinux.go b/internal/channelz/util_nonlinux.go index d600417fb8c2..837ddc402400 100644 --- a/internal/channelz/util_nonlinux.go +++ b/internal/channelz/util_nonlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/internal/channelz/util_test.go b/internal/channelz/util_test.go index 2621e7410d61..9de6679043d7 100644 --- a/internal/channelz/util_test.go +++ b/internal/channelz/util_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/internal/profiling/goid_modified.go b/internal/profiling/goid_modified.go index b186499cd0d1..ff1a5f5933b7 100644 --- a/internal/profiling/goid_modified.go +++ b/internal/profiling/goid_modified.go @@ -1,3 +1,4 @@ +//go:build grpcgoid // +build grpcgoid /* diff --git a/internal/profiling/goid_regular.go b/internal/profiling/goid_regular.go index 891c2e98f9db..042933227d81 100644 --- a/internal/profiling/goid_regular.go +++ b/internal/profiling/goid_regular.go @@ -1,3 +1,4 @@ +//go:build !grpcgoid // +build !grpcgoid /* diff --git a/internal/syscall/syscall_nonlinux.go b/internal/syscall/syscall_nonlinux.go index 121f29468417..999f52cd75bd 100644 --- a/internal/syscall/syscall_nonlinux.go +++ b/internal/syscall/syscall_nonlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/internal/transport/proxy_test.go b/internal/transport/proxy_test.go index 5434e9ed5c01..404354a19dbb 100644 --- a/internal/transport/proxy_test.go +++ b/internal/transport/proxy_test.go @@ -1,3 +1,4 @@ +//go:build !race // +build !race /* diff --git a/test/authority_test.go b/test/authority_test.go index 17ae178b73c9..15afa759c907 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/test/race.go b/test/race.go index acfa0dfae37c..d99f0a410ac6 100644 --- a/test/race.go +++ b/test/race.go @@ -1,3 +1,4 @@ +//go:build race // +build race /* diff --git a/test/tools/tools.go b/test/tools/tools.go index 3a614f765f50..646a144ccca1 100644 --- a/test/tools/tools.go +++ b/test/tools/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools /* diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 596fce468ee7..695961c129a1 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -1,3 +1,4 @@ +//go:build !386 // +build !386 /* diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index 4465d2146ff5..9760e75363e2 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -1,3 +1,4 @@ +//go:build !386 // +build !386 /* diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index 74d84831520c..1973ad4a1196 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -1,3 +1,4 @@ +//go:build !386 // +build !386 /* diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 9983c6e4aeb3..11ac8fc70c15 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -1,3 +1,4 @@ +//go:build !386 // +build !386 /* diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index f6c3d39a3254..995a6794fa83 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -1,3 +1,4 @@ +//go:build !386 // +build !386 /* From 43b19ef0e473c675b0ec7666a9856bf5edd7439e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 26 Aug 2021 13:29:59 -0700 Subject: [PATCH 204/998] grpctest: extend use of mutex to guard more things (#4710) --- internal/grpctest/tlogger.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/grpctest/tlogger.go b/internal/grpctest/tlogger.go index 95c3598d1d5d..f1712245ec4a 100644 --- a/internal/grpctest/tlogger.go +++ b/internal/grpctest/tlogger.go @@ -49,11 +49,11 @@ const ( type tLogger struct { v int - t *testing.T - start time.Time initialized bool - m sync.Mutex // protects errors + mu sync.Mutex // guards t, start, and errors + t *testing.T + start time.Time errors map[*regexp.Regexp]int } @@ -76,6 +76,8 @@ func getCallingPrefix(depth int) (string, error) { // log logs the message with the specified parameters to the tLogger. func (g *tLogger) log(ltype logType, depth int, format string, args ...interface{}) { + g.mu.Lock() + defer g.mu.Unlock() prefix, err := getCallingPrefix(callingFrame + depth) if err != nil { g.t.Error(err) @@ -119,14 +121,14 @@ func (g *tLogger) log(ltype logType, depth int, format string, args ...interface // Update updates the testing.T that the testing logger logs to. Should be done // before every test. It also initializes the tLogger if it has not already. func (g *tLogger) Update(t *testing.T) { + g.mu.Lock() + defer g.mu.Unlock() if !g.initialized { grpclog.SetLoggerV2(TLogger) g.initialized = true } g.t = t g.start = time.Now() - g.m.Lock() - defer g.m.Unlock() g.errors = map[*regexp.Regexp]int{} } @@ -141,20 +143,20 @@ func (g *tLogger) ExpectError(expr string) { // ExpectErrorN declares an error to be expected n times. func (g *tLogger) ExpectErrorN(expr string, n int) { + g.mu.Lock() + defer g.mu.Unlock() re, err := regexp.Compile(expr) if err != nil { g.t.Error(err) return } - g.m.Lock() - defer g.m.Unlock() g.errors[re] += n } // EndTest checks if expected errors were not encountered. func (g *tLogger) EndTest(t *testing.T) { - g.m.Lock() - defer g.m.Unlock() + g.mu.Lock() + defer g.mu.Unlock() for re, count := range g.errors { if count > 0 { t.Errorf("Expected error '%v' not encountered", re.String()) @@ -165,8 +167,6 @@ func (g *tLogger) EndTest(t *testing.T) { // expected determines if the error string is protected or not. func (g *tLogger) expected(s string) bool { - g.m.Lock() - defer g.m.Unlock() for re, count := range g.errors { if re.FindStringIndex(s) != nil { g.errors[re]-- From 85b9a1a0fa3fc7ce6677ac19267b380ef0cf59a7 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 27 Aug 2021 08:18:29 -0700 Subject: [PATCH 205/998] xds: pass empty balancer.BuildOptions in clusterresolver_test (#4711) --- .../balancer/clusterresolver/clusterresolver_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 25ccbe3ca23f..6af81f89f1f3 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -207,7 +207,7 @@ func (s) TestSubConnStateChange(t *testing.T) { defer cleanup() builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) if edsB == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } @@ -253,7 +253,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { defer cleanup() builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) if edsB == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } @@ -341,7 +341,7 @@ func (s) TestErrorFromResolver(t *testing.T) { defer cleanup() builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) if edsB == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } @@ -443,7 +443,7 @@ func (s) TestClientWatchEDS(t *testing.T) { defer cleanup() builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) if edsB == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } From ef66d13abb84ad6c6d99c8cbf3697607b7891f32 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 30 Aug 2021 16:49:46 -0400 Subject: [PATCH 206/998] xds: Required Router Filter for both Client and Server side (#4676) * Added isTerminal() to FilterAPI and required router filter on Client and Server side --- xds/internal/httpfilter/fault/fault.go | 4 + xds/internal/httpfilter/httpfilter.go | 3 + xds/internal/httpfilter/router/router.go | 4 + xds/internal/server/listener_wrapper_test.go | 4 + xds/internal/testutils/e2e/clientresources.go | 5 + xds/internal/xdsclient/filter_chain.go | 2 +- xds/internal/xdsclient/filter_chain_test.go | 215 +++++++++++++++--- xds/internal/xdsclient/lds_test.go | 210 ++++++++++++++--- xds/internal/xdsclient/xds.go | 14 ++ xds/server_test.go | 4 + 10 files changed, 403 insertions(+), 62 deletions(-) diff --git a/xds/internal/httpfilter/fault/fault.go b/xds/internal/httpfilter/fault/fault.go index 42f8e70af93b..725b50a76a83 100644 --- a/xds/internal/httpfilter/fault/fault.go +++ b/xds/internal/httpfilter/fault/fault.go @@ -101,6 +101,10 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil return parseConfig(override) } +func (builder) IsTerminal() bool { + return false +} + var _ httpfilter.ClientInterceptorBuilder = builder{} func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ClientInterceptor, error) { diff --git a/xds/internal/httpfilter/httpfilter.go b/xds/internal/httpfilter/httpfilter.go index 1f5f005e9bd2..3e10e4f34867 100644 --- a/xds/internal/httpfilter/httpfilter.go +++ b/xds/internal/httpfilter/httpfilter.go @@ -50,6 +50,9 @@ type Filter interface { // not accept a custom type. The resulting FilterConfig will later be // passed to Build. ParseFilterConfigOverride(proto.Message) (FilterConfig, error) + // IsTerminal returns whether this Filter is terminal or not (i.e. it must + // be last filter in the filter chain). + IsTerminal() bool } // ClientInterceptorBuilder constructs a Client Interceptor. If this type is diff --git a/xds/internal/httpfilter/router/router.go b/xds/internal/httpfilter/router/router.go index b0f9d9d9a1e9..1ac6518170fc 100644 --- a/xds/internal/httpfilter/router/router.go +++ b/xds/internal/httpfilter/router/router.go @@ -73,6 +73,10 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil return config{}, nil } +func (builder) IsTerminal() bool { + return true +} + var ( _ httpfilter.ClientInterceptorBuilder = builder{} _ httpfilter.ServerInterceptorBuilder = builder{} diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 4b8e5f857ccb..010b2044d405 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -34,6 +34,8 @@ import ( wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" + _ "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -82,6 +84,7 @@ var listenerWithRouteConfiguration = &v3listenerpb.Listener{ RouteConfigName: "route-1", }, }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, }), }, }, @@ -143,6 +146,7 @@ var listenerWithFilterChains = &v3listenerpb.Listener{ Action: &v3routepb.Route_NonForwardingAction{}, }}}}}, }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, }), }, }, diff --git a/xds/internal/testutils/e2e/clientresources.go b/xds/internal/testutils/e2e/clientresources.go index 8089534614ca..d1487374e35a 100644 --- a/xds/internal/testutils/e2e/clientresources.go +++ b/xds/internal/testutils/e2e/clientresources.go @@ -97,6 +97,9 @@ func DefaultClientResources(params ResourceParams) UpdateOptions { } } +// RouterHTTPFilter is the HTTP Filter configuration for the Router filter. +var RouterHTTPFilter = HTTPFilter("router", &v3routerpb.Router{}) + // DefaultClientListener returns a basic xds Listener resource to be used on // the client side. func DefaultClientListener(target, routeName string) *v3listenerpb.Listener { @@ -212,6 +215,7 @@ func DefaultServerListener(host string, port uint32, secLevel SecurityLevel) *v3 Action: &v3routepb.Route_NonForwardingAction{}, }}}}}, }, + HttpFilters: []*v3httppb.HttpFilter{RouterHTTPFilter}, }), }, }, @@ -256,6 +260,7 @@ func DefaultServerListener(host string, port uint32, secLevel SecurityLevel) *v3 Action: &v3routepb.Route_NonForwardingAction{}, }}}}}, }, + HttpFilters: []*v3httppb.HttpFilter{RouterHTTPFilter}, }), }, }, diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index c8230d693b93..715295b70b88 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -505,7 +505,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) // HTTPConnectionManager must have valid http_filters." - A36 filters, err := processHTTPFilters(hcm.GetHttpFilters(), true) if err != nil { - return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}", filters, hcm.GetHttpFilters()) + return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}: %v", filters, hcm.GetHttpFilters(), err) } if !seenHCM { // TODO: Implement terminal filter logic, as per A36. diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go index 9e9d603c3837..55cbaed717c3 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -27,6 +27,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/google/go-cmp/cmp" @@ -36,6 +37,9 @@ import ( "google.golang.org/protobuf/types/known/wrapperspb" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/version" ) @@ -63,6 +67,7 @@ var ( RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -75,6 +80,11 @@ var ( Name: "serverOnlyCustomFilter2", ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, } + emptyRouterFilter = e2e.RouterHTTPFilter + routerBuilder = httpfilter.Get(router.TypeURL) + routerConfig, _ = routerBuilder.ParseFilterConfig(testutils.MarshalAny(&v3routerpb.Router{})) + routerFilter = HTTPFilter{Name: "router", Filter: routerBuilder, Config: routerConfig} + routerFilterList = []HTTPFilter{routerFilter} ) // TestNewFilterChainImpl_Failure_BadMatchFields verifies cases where we have a @@ -461,6 +471,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { RouteConfigName: "route-1", }, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -481,6 +492,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { RouteConfigName: "route-1", }, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -497,6 +509,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { srcPortMap: map[int]*FilterChain{ 0: { RouteConfigName: "route-1", + HTTPFilters: routerFilterList, }, }, }, @@ -507,6 +520,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { }, def: &FilterChain{ RouteConfigName: "route-1", + HTTPFilters: routerFilterList, }, RouteConfigNames: map[string]bool{"route-1": true}, }, @@ -525,6 +539,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -540,6 +555,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -556,6 +572,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { srcPortMap: map[int]*FilterChain{ 0: { InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -566,6 +583,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { }, def: &FilterChain{ InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -590,6 +608,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { RouteConfigName: "route-1", }, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -610,6 +629,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { RouteConfigName: "route-2", }, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -626,6 +646,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { srcPortMap: map[int]*FilterChain{ 0: { RouteConfigName: "route-1", + HTTPFilters: routerFilterList, }, }, }, @@ -636,6 +657,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { }, def: &FilterChain{ RouteConfigName: "route-2", + HTTPFilters: routerFilterList, }, RouteConfigNames: map[string]bool{ "route-1": true, @@ -675,12 +697,14 @@ func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { { Name: "hcm", ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ Rds: &v3httppb.Rds{ RouteConfigName: "route-1", }, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -698,6 +722,7 @@ func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { RouteConfigName: "route-1", }, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -718,6 +743,7 @@ func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { ConfigType: &v3listenerpb.Filter_TypedConfig{ TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -731,6 +757,7 @@ func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { ConfigType: &v3listenerpb.Filter_TypedConfig{ TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -846,6 +873,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, + emptyRouterFilter, }, RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, @@ -864,6 +892,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, + emptyRouterFilter, }, RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, @@ -888,6 +917,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Filter: serverOnlyHTTPFilter{}, Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, + routerFilter, }, InlineRouteConfig: inlineRouteConfig, }, @@ -899,11 +929,14 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { }, }, def: &FilterChain{ - HTTPFilters: []HTTPFilter{{ - Name: "serverOnlyCustomFilter", - Filter: serverOnlyHTTPFilter{}, - Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, - }}, + HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + routerFilter, + }, InlineRouteConfig: inlineRouteConfig, }, }, @@ -922,6 +955,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, validServerSideHTTPFilter2, + emptyRouterFilter, }, RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, @@ -941,6 +975,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, validServerSideHTTPFilter2, + emptyRouterFilter, }, RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, @@ -970,6 +1005,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Filter: serverOnlyHTTPFilter{}, Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, + routerFilter, }, InlineRouteConfig: inlineRouteConfig, }, @@ -991,6 +1027,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Filter: serverOnlyHTTPFilter{}, Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, + routerFilter, }, InlineRouteConfig: inlineRouteConfig, }, @@ -1012,6 +1049,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, validServerSideHTTPFilter2, + emptyRouterFilter, }, RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, @@ -1025,6 +1063,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, + emptyRouterFilter, }, RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, @@ -1044,6 +1083,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, validServerSideHTTPFilter2, + emptyRouterFilter, }, RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, @@ -1057,6 +1097,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ HttpFilters: []*v3httppb.HttpFilter{ validServerSideHTTPFilter1, + emptyRouterFilter, }, RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, @@ -1086,6 +1127,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Filter: serverOnlyHTTPFilter{}, Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, + routerFilter, }, InlineRouteConfig: inlineRouteConfig, }, @@ -1107,6 +1149,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { Filter: serverOnlyHTTPFilter{}, Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, }, + routerFilter, }, InlineRouteConfig: inlineRouteConfig, }, @@ -1156,7 +1199,10 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1164,7 +1210,10 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { }, }, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, { @@ -1219,6 +1268,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { IdentityCertName: "identityCertName", }, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1233,6 +1283,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { IdentityCertName: "defaultIdentityCertName", }, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1306,6 +1357,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { RequireClientCert: true, }, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1323,6 +1375,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { RequireClientCert: true, }, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1353,7 +1406,10 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1389,7 +1445,10 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { dstPrefixMap: map[string]*destPrefixEntry{ unspecifiedPrefixMapKey: unspecifiedEntry, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, { @@ -1418,7 +1477,10 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { net: ipNetFromCIDR("192.168.2.2/16"), }, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, { @@ -1447,7 +1509,10 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { net: ipNetFromCIDR("192.168.2.2/16"), }, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, { @@ -1476,7 +1541,10 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { net: ipNetFromCIDR("192.168.2.2/16"), }, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, } @@ -1546,7 +1614,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1562,7 +1633,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1578,7 +1652,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1592,7 +1669,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1606,7 +1686,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1614,7 +1697,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, { @@ -1644,7 +1730,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1660,7 +1749,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1668,7 +1760,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, { @@ -1698,7 +1793,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { "10.0.0.0/8": { net: ipNetFromCIDR("10.0.0.0/8"), srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1713,7 +1811,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { "192.168.0.0/16": { net: ipNetFromCIDR("192.168.0.0/16"), srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1721,7 +1822,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, { @@ -1752,9 +1856,18 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 1: {InlineRouteConfig: inlineRouteConfig}, - 2: {InlineRouteConfig: inlineRouteConfig}, - 3: {InlineRouteConfig: inlineRouteConfig}, + 1: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + 2: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + 3: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1771,9 +1884,18 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { "192.168.0.0/16": { net: ipNetFromCIDR("192.168.0.0/16"), srcPortMap: map[int]*FilterChain{ - 1: {InlineRouteConfig: inlineRouteConfig}, - 2: {InlineRouteConfig: inlineRouteConfig}, - 3: {InlineRouteConfig: inlineRouteConfig}, + 1: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + 2: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + 3: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1781,7 +1903,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { }, }, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, { @@ -1855,7 +1980,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1869,7 +1997,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1883,7 +2014,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1903,7 +2037,10 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { srcTypeArr: [3]*sourcePrefixes{}, }, }, - def: &FilterChain{InlineRouteConfig: inlineRouteConfig}, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, } @@ -2178,6 +2315,7 @@ func TestLookup_Successes(t *testing.T) { wantFC: &FilterChain{ SecurityCfg: &SecurityConfig{IdentityInstanceName: "default"}, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, { @@ -2192,6 +2330,7 @@ func TestLookup_Successes(t *testing.T) { wantFC: &FilterChain{ SecurityCfg: &SecurityConfig{IdentityInstanceName: "unspecified-dest-and-source-prefix"}, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, { @@ -2206,6 +2345,7 @@ func TestLookup_Successes(t *testing.T) { wantFC: &FilterChain{ SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-prefixes-v4"}, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, { @@ -2220,6 +2360,7 @@ func TestLookup_Successes(t *testing.T) { wantFC: &FilterChain{ SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-source-prefix-v6"}, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, { @@ -2234,6 +2375,7 @@ func TestLookup_Successes(t *testing.T) { wantFC: &FilterChain{ SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-unspecified-source-type"}, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, { @@ -2248,6 +2390,7 @@ func TestLookup_Successes(t *testing.T) { wantFC: &FilterChain{ SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type"}, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, { @@ -2262,6 +2405,7 @@ func TestLookup_Successes(t *testing.T) { wantFC: &FilterChain{ SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix"}, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, { @@ -2276,6 +2420,7 @@ func TestLookup_Successes(t *testing.T) { wantFC: &FilterChain{ SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix-specific-source-port"}, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, } diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index 9960349816e8..8b9dc7135004 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -33,6 +33,8 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/httpfilter" + _ "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/version" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" @@ -139,6 +141,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, }}}}}}}, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ MaxStreamDuration: durationpb.New(time.Second), }, @@ -146,6 +149,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, }) v3LisWithFilters = func(fs ...*v3httppb.HttpFilter) *anypb.Any { + fs = append(fs, emptyRouterFilter) return testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, ApiListener: &v3listenerpb.ApiListener{ @@ -290,24 +294,52 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { name: "v3 with no filters", resources: []*anypb.Any{v3LisWithFilters()}, wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters()}, + v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, Version: testVersion, }, }, + { + name: "v3 no terminal filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny( + &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: v3RouteConfigName, + }, + }, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + }), + }, + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: true, + }, { name: "v3 with custom filter", resources: []*anypb.Any{v3LisWithFilters(customFilter)}, wantUpdate: map[string]ListenerUpdate{ v3LDSTarget: { RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }}, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, + }, + routerFilter, + }, Raw: v3LisWithFilters(customFilter), }, }, @@ -322,11 +354,14 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { wantUpdate: map[string]ListenerUpdate{ v3LDSTarget: { RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterTypedStructConfig}, - }}, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterTypedStructConfig}, + }, + routerFilter, + }, Raw: v3LisWithFilters(typedStructFilter), }, }, @@ -341,11 +376,14 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { wantUpdate: map[string]ListenerUpdate{ v3LDSTarget: { RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }}, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, + }, + routerFilter, + }, Raw: v3LisWithFilters(customOptionalFilter), }, }, @@ -375,7 +413,9 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Name: "customFilter2", Filter: httpFilter{}, Config: filterConfig{Cfg: customFilterConfig}, - }}, + }, + routerFilter, + }, Raw: v3LisWithFilters(customFilter, customFilter2), }, }, @@ -399,6 +439,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(serverOnlyOptionalCustomFilter), + HTTPFilters: routerFilterList, }, }, wantMD: UpdateMetadata{ @@ -412,11 +453,13 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { wantUpdate: map[string]ListenerUpdate{ v3LDSTarget: { RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "clientOnlyCustomFilter", - Filter: clientOnlyHTTPFilter{}, - Config: filterConfig{Cfg: clientOnlyCustomFilterConfig}, - }}, + HTTPFilters: []HTTPFilter{ + { + Name: "clientOnlyCustomFilter", + Filter: clientOnlyHTTPFilter{}, + Config: filterConfig{Cfg: clientOnlyCustomFilterConfig}, + }, + routerFilter}, Raw: v3LisWithFilters(clientOnlyCustomFilter), }, }, @@ -453,6 +496,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { v3LDSTarget: { RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: routerFilterList, Raw: v3LisWithFilters(unknownOptionalFilter), }, }, @@ -476,7 +520,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { name: "v3 listener resource", resources: []*anypb.Any{v3LisWithFilters()}, wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters()}, + v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -495,6 +539,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }}}, MaxStreamDuration: time.Second, Raw: v3LisWithInlineRoute, + HTTPFilters: routerFilterList, }, }, wantMD: UpdateMetadata{ @@ -507,7 +552,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { resources: []*anypb.Any{v2Lis, v3LisWithFilters()}, wantUpdate: map[string]ListenerUpdate{ v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis}, - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters()}, + v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -530,7 +575,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, wantUpdate: map[string]ListenerUpdate{ v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis}, - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters()}, + v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}, "bad": {}, }, wantMD: errMD, @@ -561,6 +606,10 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { ) var ( + serverOnlyCustomFilter = &v3httppb.HttpFilter{ + Name: "serverOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, + } routeConfig = &v3routepb.RouteConfiguration{ Name: "routeName", VirtualHosts: []*v3routepb.VirtualHost{{ @@ -584,6 +633,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, }), }, }, @@ -827,6 +877,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -837,6 +888,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ RouteConfig: routeConfig, }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, }), }, }, @@ -848,6 +900,89 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { wantMD: errMD, wantErr: "duplicate filter name", }, + { + name: "no terminal filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "http filters list is empty", + }, + { + name: "terminal filter not last", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter, serverOnlyCustomFilter}, + }), + }, + }, + }, + }, + }, + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "is a terminal filter but it is not last in the filter chain", + }, + { + name: "last not terminal filter", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{serverOnlyCustomFilter}, + }), + }, + }, + }, + }, + }, + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "is not a terminal filter", + }, { name: "unsupported oneof in typed config of http filter", resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ @@ -1076,7 +1211,10 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { srcPrefixMap: map[string]*sourcePrefixEntry{ unspecifiedPrefixMapKey: { srcPortMap: map[int]*FilterChain{ - 0: {InlineRouteConfig: inlineRouteConfig}, + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, }, }, }, @@ -1170,6 +1308,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { IdentityCertName: "identityCertName", }, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1184,6 +1323,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { IdentityCertName: "defaultIdentityCertName", }, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1220,6 +1360,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { RequireClientCert: true, }, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1237,6 +1378,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { RequireClientCert: true, }, InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1291,6 +1433,10 @@ func (httpFilter) ParseFilterConfigOverride(override proto.Message) (httpfilter. return filterConfig{Override: override}, nil } +func (httpFilter) IsTerminal() bool { + return false +} + // errHTTPFilter returns errors no matter what is passed to ParseFilterConfig. type errHTTPFilter struct { httpfilter.ClientInterceptorBuilder @@ -1306,6 +1452,10 @@ func (errHTTPFilter) ParseFilterConfigOverride(override proto.Message) (httpfilt return nil, fmt.Errorf("error from ParseFilterConfigOverride") } +func (errHTTPFilter) IsTerminal() bool { + return false +} + func init() { httpfilter.Register(httpFilter{}) httpfilter.Register(errHTTPFilter{}) @@ -1328,6 +1478,10 @@ func (serverOnlyHTTPFilter) ParseFilterConfigOverride(override proto.Message) (h return filterConfig{Override: override}, nil } +func (serverOnlyHTTPFilter) IsTerminal() bool { + return false +} + // clientOnlyHTTPFilter does not implement ServerInterceptorBuilder type clientOnlyHTTPFilter struct { httpfilter.ClientInterceptorBuilder @@ -1343,6 +1497,10 @@ func (clientOnlyHTTPFilter) ParseFilterConfigOverride(override proto.Message) (h return filterConfig{Override: override}, nil } +func (clientOnlyHTTPFilter) IsTerminal() bool { + return false +} + var customFilterConfig = &anypb.Any{ TypeUrl: "custom.filter", Value: []byte{1, 2, 3}, diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index d6c44bad48ab..27367996bfc4 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -244,6 +244,20 @@ func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilt // Save name/config ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) } + // "Validation will fail if a terminal filter is not the last filter in the + // chain or if a non-terminal filter is the last filter in the chain." - A39 + if len(ret) == 0 { + return nil, fmt.Errorf("http filters list is empty") + } + var i int + for ; i < len(ret)-1; i++ { + if ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is a terminal filter but it is not last in the filter chain", ret[i].Name) + } + } + if !ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is not a terminal filter", ret[len(ret)-1].Name) + } return ret, nil } diff --git a/xds/server_test.go b/xds/server_test.go index 84622de0dadb..680b65620507 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -40,7 +40,9 @@ import ( "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" + _ "google.golang.org/grpc/xds/internal/httpfilter/router" xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -105,6 +107,7 @@ var listenerWithFilterChains = &v3listenerpb.Listener{ Action: &v3routepb.Route_NonForwardingAction{}, }}}}}, }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, }), }, }, @@ -769,6 +772,7 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { Action: &v3routepb.Route_NonForwardingAction{}, }}}}}, }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, }), }, }, From 198d951db5082bddddd36e53efa8e9cbc924a228 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 31 Aug 2021 09:27:06 -0400 Subject: [PATCH 207/998] xds: Instantiated HTTP Filters on Server Side (#4669) * Instantiated HTTP Filters on Server Side --- internal/resolver/config_selector.go | 7 +- xds/internal/resolver/serviceconfig.go | 8 +- xds/internal/resolver/xds_resolver_test.go | 4 +- xds/internal/server/conn_wrapper.go | 4 + xds/internal/server/listener_wrapper.go | 42 +++- xds/internal/xdsclient/filter_chain.go | 77 +++++++ xds/internal/xdsclient/filter_chain_test.go | 205 ++++++++++++++++++ .../{resolver => xdsclient}/matcher.go | 25 ++- .../{resolver => xdsclient}/matcher_path.go | 2 +- .../matcher_path_test.go | 2 +- .../{resolver => xdsclient}/matcher_test.go | 12 +- 11 files changed, 354 insertions(+), 34 deletions(-) rename xds/internal/{resolver => xdsclient}/matcher.go (85%) rename xds/internal/{resolver => xdsclient}/matcher_path.go (99%) rename xds/internal/{resolver => xdsclient}/matcher_path_test.go (99%) rename xds/internal/{resolver => xdsclient}/matcher_test.go (94%) diff --git a/internal/resolver/config_selector.go b/internal/resolver/config_selector.go index 5e7f36703d4b..be7e13d58597 100644 --- a/internal/resolver/config_selector.go +++ b/internal/resolver/config_selector.go @@ -117,9 +117,12 @@ type ClientInterceptor interface { NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) } -// ServerInterceptor is unimplementable; do not use. +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. type ServerInterceptor interface { - notDefined() + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. } type csKeyType string diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index 9eaff52dbcce..8bfa92cfa883 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -117,8 +117,8 @@ type routeCluster struct { } type route struct { - m *compositeMatcher // converted from route matchers - clusters wrr.WRR // holds *routeCluster entries + m *xdsclient.CompositeMatcher // converted from route matchers + clusters wrr.WRR // holds *routeCluster entries maxStreamDuration time.Duration // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig @@ -146,7 +146,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP var rt *route // Loop through routes in order and select first match. for _, r := range cs.routes { - if r.m.match(rpcInfo) { + if r.m.Match(rpcInfo) { rt = &r break } @@ -350,7 +350,7 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro cs.routes[i].clusters = clusters var err error - cs.routes[i].m, err = routeToMatcher(rt) + cs.routes[i].m, err = xdsclient.RouteToMatcher(rt) if err != nil { return nil, err } diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index d0ebd3f5035b..b79514f8bba0 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -1391,13 +1391,13 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { func replaceRandNumGenerator(start int64) func() { nextInt := start - grpcrandInt63n = func(int64) (ret int64) { + xdsclient.RandInt63n = func(int64) (ret int64) { ret = nextInt nextInt++ return } return func() { - grpcrandInt63n = grpcrand.Int63n + xdsclient.RandInt63n = grpcrand.Int63n } } diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index 43be4673655a..e4ca1c8c055e 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -58,6 +58,10 @@ type connWrapper struct { // completing the HTTP2 handshake. deadlineMu sync.Mutex deadline time.Time + + // The virtual hosts with matchable routes and instantiated HTTP Filters per + // route. + virtualHosts []xdsclient.VirtualHostWithInterceptors } // SetDeadline makes a copy of the passed in deadline and forwards the call to diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 821424a11a36..f730ca3c97dc 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -21,10 +21,13 @@ package server import ( + "errors" "fmt" "net" "sync" + "sync/atomic" "time" + "unsafe" "google.golang.org/grpc/backoff" "google.golang.org/grpc/grpclog" @@ -208,12 +211,13 @@ type listenerWrapper struct { mode ServingMode // Filter chains received as part of the last good update. filterChains *xdsclient.FilterChainManager + // rdsHandler is used for any dynamic RDS resources specified in a LDS // update. rdsHandler *rdsHandler // rdsUpdates are the RDS resources received from the management // server, keyed on the RouteName of the RDS resource. - rdsUpdates map[string]xdsclient.RouteConfigUpdate // TODO: if this will be read in accept, this will need a read lock as well. + rdsUpdates unsafe.Pointer // map[string]xdsclient.RouteConfigUpdate // ldsUpdateCh is a channel for XDSClient LDS updates. ldsUpdateCh chan ldsUpdateWithError // rdsUpdateCh is a channel for XDSClient RDS updates. @@ -297,11 +301,35 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { conn.Close() continue } - // TODO: once matched an accepted connection to a filter chain, - // instantiate the HTTP filters in the filter chain + the filter - // overrides, pipe filters and route into connection, which will - // eventually be passed to xdsUnary/Stream interceptors. - return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil + var rc xdsclient.RouteConfigUpdate + if fc.InlineRouteConfig != nil { + rc = *fc.InlineRouteConfig + } else { + rcPtr := atomic.LoadPointer(&l.rdsUpdates) + rcuPtr := (*map[string]xdsclient.RouteConfigUpdate)(rcPtr) + // This shouldn't happen, but this error protects against a panic. + if rcuPtr == nil { + return nil, errors.New("route configuration pointer is nil") + } + rcu := *rcuPtr + rc = rcu[fc.RouteConfigName] + } + // The filter chain will construct a usuable route table on each + // connection accept. This is done because preinstantiating every route + // table before it is needed for a connection would potentially lead to + // a lot of cpu time and memory allocated for route tables that will + // never be used. There was also a thought to cache this configuration, + // and reuse it for the next accepted connection. However, this would + // lead to a lot of code complexity (RDS Updates for a given route name + // can come it at any time), and connections aren't accepted too often, + // so this reinstantation of the Route Configuration is an acceptable + // tradeoff for simplicity. + if err := fc.ConstructUsableRouteConfiguration(rc); err != nil { + l.logger.Warningf("route configuration construction: %v", err) + conn.Close() + continue + } + return &connWrapper{Conn: conn, filterChain: fc, parent: l, virtualHosts: fc.VirtualHosts}, nil } } @@ -367,7 +395,7 @@ func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) { // continue to use the old configuration. return } - l.rdsUpdates = update.updates + atomic.StorePointer(&l.rdsUpdates, unsafe.Pointer(&update.updates)) l.switchMode(l.filterChains, ServingModeServing, nil) l.goodUpdate.Fire() diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index 715295b70b88..ffaaf710f302 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -28,6 +28,8 @@ import ( v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" ) @@ -65,6 +67,81 @@ type FilterChain struct { // // Only one of RouteConfigName and InlineRouteConfig is set. InlineRouteConfig *RouteConfigUpdate + + // VirtualHosts are the virtual hosts ready to be used in the xds interceptors. + // It contains a way to match routes using a matcher and also instantiates + // HTTPFilter overrides to simply run incoming RPC's through if they are selected. + VirtualHosts []VirtualHostWithInterceptors +} + +// VirtualHostWithInterceptors captures information present in a VirtualHost +// update, and also contains routes with instantiated HTTP Filters. +type VirtualHostWithInterceptors struct { + // Domains are the domain names which map to this Virtual Host. On the + // server side, this will be dictated by the :authority header of the + // incoming RPC. + Domains []string + // Routes are the Routes for this Virtual Host. + Routes []RouteWithInterceptors +} + +// RouteWithInterceptors captures information in a Route, and contains +// a usable matcher and also instantiated HTTP Filters. +type RouteWithInterceptors struct { + // M is the matcher used to match to this route. + M *CompositeMatcher + // Interceptors are interceptors instantiated for this route. These will be + // constructed from a combination of the top level configuration and any + // HTTP Filter overrides present in Virtual Host or Route. + Interceptors []resolver.ServerInterceptor +} + +// ConstructUsableRouteConfiguration takes Route Configuration and converts it +// into matchable route configuration, with instantiated HTTP Filters per route. +func (f *FilterChain) ConstructUsableRouteConfiguration(config RouteConfigUpdate) error { + vhs := make([]VirtualHostWithInterceptors, len(config.VirtualHosts)) + for _, vh := range config.VirtualHosts { + vhwi, err := f.convertVirtualHost(vh) + if err != nil { + return fmt.Errorf("virtual host construction: %v", err) + } + vhs = append(vhs, vhwi) + } + f.VirtualHosts = vhs + return nil +} + +func (f *FilterChain) convertVirtualHost(virtualHost *VirtualHost) (VirtualHostWithInterceptors, error) { + rs := make([]RouteWithInterceptors, len(virtualHost.Routes)) + for i, r := range virtualHost.Routes { + var err error + rs[i].M, err = RouteToMatcher(r) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("matcher construction: %v", err) + } + for _, filter := range f.HTTPFilters { + // Route is highest priority on server side, as there is no concept + // of an upstream cluster on server side. + override := r.HTTPFilterConfigOverride[filter.Name] + if override == nil { + // Virtual Host is second priority. + override = virtualHost.HTTPFilterConfigOverride[filter.Name] + } + sb, ok := filter.Filter.(httpfilter.ServerInterceptorBuilder) + if !ok { + // Should not happen if it passed xdsClient validation. + return VirtualHostWithInterceptors{}, fmt.Errorf("filter does not support use in server") + } + si, err := sb.BuildServerInterceptor(filter.Config, override) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("filter construction: %v", err) + } + if si != nil { + rs[i].Interceptors = append(rs[i].Interceptors, si) + } + } + } + return VirtualHostWithInterceptors{Domains: virtualHost.Domains, Routes: rs}, nil } // SourceType specifies the connection source IP match type. diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go index 55cbaed717c3..a3ef42f87608 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -19,6 +19,8 @@ package xdsclient import ( + "context" + "errors" "fmt" "net" "strings" @@ -36,6 +38,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/wrapperspb" + iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" @@ -43,6 +46,12 @@ import ( "google.golang.org/grpc/xds/internal/version" ) +const ( + topLevel = "top level" + vhLevel = "virtual host level" + rLevel = "route level" +) + var ( routeConfig = &v3routepb.RouteConfiguration{ Name: "routeName", @@ -2442,6 +2451,202 @@ func TestLookup_Successes(t *testing.T) { } } +type filterCfg struct { + httpfilter.FilterConfig + // Level is what differentiates top level filters ("top level") vs. second + // level ("virtual host level"), and third level ("route level"). + level string +} + +type filterBuilder struct { + httpfilter.Filter +} + +var _ httpfilter.ServerInterceptorBuilder = &filterBuilder{} + +func (fb *filterBuilder) BuildServerInterceptor(config httpfilter.FilterConfig, override httpfilter.FilterConfig) (iresolver.ServerInterceptor, error) { + var level string + level = config.(filterCfg).level + + if override != nil { + level = override.(filterCfg).level + } + return &serverInterceptor{level: level}, nil +} + +type serverInterceptor struct { + level string +} + +func (si *serverInterceptor) AllowRPC(context.Context) error { + return errors.New(si.level) +} + +func TestHTTPFilterInstantiation(t *testing.T) { + tests := []struct { + name string + filters []HTTPFilter + routeConfig RouteConfigUpdate + // A list of strings which will be built from iterating through the + // filters ["top level", "vh level", "route level", "route level"...] + // wantErrs is the list of error strings that will be constructed from + // the deterministic iteration through the vh list and route list. The + // error string will be determined by the level of config that the + // filter builder receives (i.e. top level, vs. virtual host level vs. + // route level). + wantErrs []string + }{ + { + name: "one http filter no overrides", + filters: []HTTPFilter{ + {Name: "server-interceptor", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + }, + }, + }, + }}, + wantErrs: []string{topLevel}, + }, + { + name: "one http filter vh override", + filters: []HTTPFilter{ + {Name: "server-interceptor", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + }, + }, + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor": filterCfg{level: vhLevel}, + }, + }, + }}, + wantErrs: []string{vhLevel}, + }, + { + name: "one http filter route override", + filters: []HTTPFilter{ + {Name: "server-interceptor", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor": filterCfg{level: rLevel}, + }, + }, + }, + }, + }}, + wantErrs: []string{rLevel}, + }, + // This tests the scenario where there are three http filters, and one + // gets overridden by route and one by virtual host. + { + name: "three http filters vh override route override", + filters: []HTTPFilter{ + {Name: "server-interceptor1", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + {Name: "server-interceptor2", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + {Name: "server-interceptor3", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor3": filterCfg{level: rLevel}, + }, + }, + }, + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor2": filterCfg{level: vhLevel}, + }, + }, + }}, + wantErrs: []string{topLevel, vhLevel, rLevel}, + }, + // This tests the scenario where there are three http filters, and two + // virtual hosts with different vh + route overrides for each virtual + // host. + { + name: "three http filters two vh", + filters: []HTTPFilter{ + {Name: "server-interceptor1", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + {Name: "server-interceptor2", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + {Name: "server-interceptor3", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor3": filterCfg{level: rLevel}, + }, + }, + }, + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor2": filterCfg{level: vhLevel}, + }, + }, + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor1": filterCfg{level: rLevel}, + "server-interceptor2": filterCfg{level: rLevel}, + }, + }, + }, + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor2": filterCfg{level: vhLevel}, + "server-interceptor3": filterCfg{level: vhLevel}, + }, + }, + }}, + wantErrs: []string{topLevel, vhLevel, rLevel, rLevel, rLevel, vhLevel}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fc := FilterChain{ + HTTPFilters: test.filters, + } + fc.ConstructUsableRouteConfiguration(test.routeConfig) + // Build out list of errors by iterating through the virtual hosts and routes, + // and running the filters in route configurations. + var errs []string + for _, vh := range fc.VirtualHosts { + for _, r := range vh.Routes { + for _, int := range r.Interceptors { + errs = append(errs, int.AllowRPC(context.Background()).Error()) + } + } + } + if !cmp.Equal(errs, test.wantErrs) { + t.Fatalf("List of errors %v, want %v", errs, test.wantErrs) + } + }) + } +} + // The Equal() methods defined below help with using cmp.Equal() on these types // which contain all unexported fields. diff --git a/xds/internal/resolver/matcher.go b/xds/internal/xdsclient/matcher.go similarity index 85% rename from xds/internal/resolver/matcher.go rename to xds/internal/xdsclient/matcher.go index 4a0a0ca149ae..324e1f4dcab2 100644 --- a/xds/internal/resolver/matcher.go +++ b/xds/internal/xdsclient/matcher.go @@ -16,7 +16,7 @@ * */ -package resolver +package xdsclient import ( "fmt" @@ -27,10 +27,10 @@ import ( iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/xds/internal/xdsclient" ) -func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { +// RouteToMatcher converts a route to a Matcher to match incoming RPC's against. +func RouteToMatcher(r *Route) (*CompositeMatcher, error) { var pm pathMatcher switch { case r.Regex != nil: @@ -75,18 +75,20 @@ func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { return newCompositeMatcher(pm, headerMatchers, fractionMatcher), nil } -// compositeMatcher.match returns true if all matchers return true. -type compositeMatcher struct { +// CompositeMatcher is a matcher that holds onto many matchers and aggregates +// the matching results. +type CompositeMatcher struct { pm pathMatcher hms []matcher.HeaderMatcher fm *fractionMatcher } -func newCompositeMatcher(pm pathMatcher, hms []matcher.HeaderMatcher, fm *fractionMatcher) *compositeMatcher { - return &compositeMatcher{pm: pm, hms: hms, fm: fm} +func newCompositeMatcher(pm pathMatcher, hms []matcher.HeaderMatcher, fm *fractionMatcher) *CompositeMatcher { + return &CompositeMatcher{pm: pm, hms: hms, fm: fm} } -func (a *compositeMatcher) match(info iresolver.RPCInfo) bool { +// Match returns true if all matchers return true. +func (a *CompositeMatcher) Match(info iresolver.RPCInfo) bool { if a.pm != nil && !a.pm.match(info.Method) { return false } @@ -119,7 +121,7 @@ func (a *compositeMatcher) match(info iresolver.RPCInfo) bool { return true } -func (a *compositeMatcher) String() string { +func (a *CompositeMatcher) String() string { var ret string if a.pm != nil { ret += a.pm.String() @@ -141,10 +143,11 @@ func newFractionMatcher(fraction uint32) *fractionMatcher { return &fractionMatcher{fraction: int64(fraction)} } -var grpcrandInt63n = grpcrand.Int63n +// RandInt63n overwrites grpcrand for control in tests. +var RandInt63n = grpcrand.Int63n func (fm *fractionMatcher) match() bool { - t := grpcrandInt63n(1000000) + t := RandInt63n(1000000) return t <= fm.fraction } diff --git a/xds/internal/resolver/matcher_path.go b/xds/internal/xdsclient/matcher_path.go similarity index 99% rename from xds/internal/resolver/matcher_path.go rename to xds/internal/xdsclient/matcher_path.go index 88a04f6d7bef..a00c6954ef53 100644 --- a/xds/internal/resolver/matcher_path.go +++ b/xds/internal/xdsclient/matcher_path.go @@ -16,7 +16,7 @@ * */ -package resolver +package xdsclient import ( "regexp" diff --git a/xds/internal/resolver/matcher_path_test.go b/xds/internal/xdsclient/matcher_path_test.go similarity index 99% rename from xds/internal/resolver/matcher_path_test.go rename to xds/internal/xdsclient/matcher_path_test.go index 263a049108e4..a211034a60dd 100644 --- a/xds/internal/resolver/matcher_path_test.go +++ b/xds/internal/xdsclient/matcher_path_test.go @@ -16,7 +16,7 @@ * */ -package resolver +package xdsclient import ( "regexp" diff --git a/xds/internal/resolver/matcher_test.go b/xds/internal/xdsclient/matcher_test.go similarity index 94% rename from xds/internal/resolver/matcher_test.go rename to xds/internal/xdsclient/matcher_test.go index 6143dd9f46e1..000adf49e289 100644 --- a/xds/internal/resolver/matcher_test.go +++ b/xds/internal/xdsclient/matcher_test.go @@ -16,7 +16,7 @@ * */ -package resolver +package xdsclient import ( "context" @@ -107,7 +107,7 @@ func TestAndMatcherMatch(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { a := newCompositeMatcher(tt.pm, []matcher.HeaderMatcher{tt.hm}, nil) - if got := a.match(tt.info); got != tt.want { + if got := a.Match(tt.info); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -118,11 +118,11 @@ func TestFractionMatcherMatch(t *testing.T) { const fraction = 500000 fm := newFractionMatcher(fraction) defer func() { - grpcrandInt63n = grpcrand.Int63n + RandInt63n = grpcrand.Int63n }() // rand > fraction, should return false. - grpcrandInt63n = func(n int64) int64 { + RandInt63n = func(n int64) int64 { return fraction + 1 } if matched := fm.match(); matched { @@ -130,7 +130,7 @@ func TestFractionMatcherMatch(t *testing.T) { } // rand == fraction, should return true. - grpcrandInt63n = func(n int64) int64 { + RandInt63n = func(n int64) int64 { return fraction } if matched := fm.match(); !matched { @@ -138,7 +138,7 @@ func TestFractionMatcherMatch(t *testing.T) { } // rand < fraction, should return true. - grpcrandInt63n = func(n int64) int64 { + RandInt63n = func(n int64) int64 { return fraction - 1 } if matched := fm.match(); !matched { From f7d66b5846f00b6ab0b41a675aef9764176830fa Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Tue, 31 Aug 2021 13:42:43 -0700 Subject: [PATCH 208/998] Change to a non-workload-identity GKE cluster (#4723) --- test/kokoro/xds_url_map.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/kokoro/xds_url_map.sh b/test/kokoro/xds_url_map.sh index f30fd1a15b57..865119a57def 100755 --- a/test/kokoro/xds_url_map.sh +++ b/test/kokoro/xds_url_map.sh @@ -18,8 +18,8 @@ set -eo pipefail # Constants readonly GITHUB_REPOSITORY_NAME="grpc-go" # GKE Cluster -readonly GKE_CLUSTER_NAME="interop-test-psm-sec-v2-us-central1-a" -readonly GKE_CLUSTER_ZONE="us-central1-a" +readonly GKE_CLUSTER_NAME="interop-test-psm-basic" +readonly GKE_CLUSTER_ZONE="us-central1-c" ## xDS test client Docker images readonly CLIENT_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-client" readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}" From ed501aa1fd1d368d77e17de619046e2e1ebb82a9 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 1 Sep 2021 20:08:00 +0200 Subject: [PATCH 209/998] xds/internal/resolver: update github.com/cespare/xxhash to v2 (#4671) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit github.com/cespare/xxhash/v2 supports Go ≥ 1.11 and this package states 1.11 in its go.mod file. The only symbol used from the xxhash package is the Sum64String func which still exists and works the same in v2. This gets rid of two indirect dependencies. --- examples/go.sum | 8 ++------ go.mod | 2 +- go.sum | 8 ++------ security/advancedtls/examples/go.sum | 4 +--- security/advancedtls/go.sum | 4 +--- xds/internal/resolver/serviceconfig.go | 2 +- xds/internal/resolver/serviceconfig_test.go | 2 +- xds/internal/resolver/xds_resolver_test.go | 2 +- 8 files changed, 10 insertions(+), 22 deletions(-) diff --git a/examples/go.sum b/examples/go.sum index 4b9be0bebf85..16d17f9cdb0a 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -1,12 +1,10 @@ cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= @@ -42,8 +40,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= diff --git a/go.mod b/go.mod index 8cb729ba0d35..4387b31e387a 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module google.golang.org/grpc go 1.14 require ( - github.com/cespare/xxhash v1.1.0 + github.com/cespare/xxhash/v2 v2.1.1 github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b diff --git a/go.sum b/go.sum index 372b4ea3d201..4ce2fc810e3f 100644 --- a/go.sum +++ b/go.sum @@ -2,13 +2,11 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= @@ -54,8 +52,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 004437a7ea66..9a616f8b80da 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -1,8 +1,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -34,7 +33,6 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 004437a7ea66..9a616f8b80da 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,8 +1,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -34,7 +33,6 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index 8bfa92cfa883..27f6aab7ad09 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -27,7 +27,7 @@ import ( "sync/atomic" "time" - "github.com/cespare/xxhash" + xxhash "github.com/cespare/xxhash/v2" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" diff --git a/xds/internal/resolver/serviceconfig_test.go b/xds/internal/resolver/serviceconfig_test.go index 0e7d5daaaf5d..568873ebbc71 100644 --- a/xds/internal/resolver/serviceconfig_test.go +++ b/xds/internal/resolver/serviceconfig_test.go @@ -24,7 +24,7 @@ import ( "regexp" "testing" - "github.com/cespare/xxhash" + xxhash "github.com/cespare/xxhash/v2" "github.com/google/go-cmp/cmp" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/metadata" diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index b79514f8bba0..3b147e4ff207 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -26,7 +26,7 @@ import ( "testing" "time" - "github.com/cespare/xxhash" + xxhash "github.com/cespare/xxhash/v2" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" From 51003aa81e09b20c1a74ec88c961a68902349143 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 1 Sep 2021 13:49:44 -0700 Subject: [PATCH 210/998] xds: start a management server per test (#4720) --- .../test/xds_client_integration_test.go | 11 ++- xds/internal/test/xds_integration_test.go | 81 +++++++------------ .../test/xds_server_integration_test.go | 37 +++++---- .../test/xds_server_serving_mode_test.go | 11 ++- 4 files changed, 68 insertions(+), 72 deletions(-) diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index 9760e75363e2..40e6644fc1fa 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -65,13 +65,16 @@ func clientSetup(t *testing.T) (uint32, func()) { } func (s) TestClientSideXDS(t *testing.T) { - port, cleanup := clientSetup(t) - defer cleanup() + managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + port, cleanup2 := clientSetup(t) + defer cleanup2() const serviceName = "my-service-client-side-xds" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, - NodeID: xdsClientNodeID, + NodeID: nodeID, Host: "localhost", Port: port, SecLevel: e2e.SecurityLevelNone, @@ -81,7 +84,7 @@ func (s) TestClientSideXDS(t *testing.T) { } // Create a ClientConn and make a successful RPC. - cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsResolverBuilder)) + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index 1973ad4a1196..ef93df2efe62 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -29,16 +29,15 @@ import ( "encoding/json" "fmt" "io/ioutil" - "log" "os" "path" "testing" "time" "github.com/google/uuid" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" "google.golang.org/grpc/testdata" @@ -70,34 +69,6 @@ func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, er return &testpb.Empty{}, nil } -var ( - // Globals corresponding to the single instance of the xDS management server - // which is spawned for all the tests in this package. - managementServer *e2e.ManagementServer - xdsClientNodeID string - bootstrapContents []byte - xdsResolverBuilder resolver.Builder -) - -// TestMain sets up an xDS management server, runs all tests, and stops the -// management server. -func TestMain(m *testing.M) { - // The management server is started and stopped from here, but the leakcheck - // runs after every individual test. So, we need to skip the goroutine which - // spawns the management server and is blocked on the call to `Serve()`. - leakcheck.RegisterIgnoreGoroutine("e2e.StartManagementServer") - - cancel, err := setupManagementServer() - if err != nil { - log.Printf("setupManagementServer() failed: %v", err) - os.Exit(1) - } - - code := m.Run() - cancel() - os.Exit(code) -} - func createTmpFile(src, dst string) error { data, err := ioutil.ReadFile(src) if err != nil { @@ -159,37 +130,45 @@ func createClientTLSCredentials(t *testing.T) credentials.TransportCredentials { // setupManagement server performs the following: // - spin up an xDS management server on a local port // - set up certificates for consumption by the file_watcher plugin -// - sets up the global variables which refer to this management server and the -// nodeID to be used when talking to this management server. +// - creates a bootstrap file in a temporary location +// - creates an xDS resolver using the above bootstrap contents // -// Returns a function to be invoked by the caller to stop the management -// server. -func setupManagementServer() (cleanup func(), err error) { +// Returns the following: +// - management server +// - nodeID to be used by the client when connecting to the management server +// - bootstrap contents to be used by the client +// - xDS resolver builder to be used by the client +// - a cleanup function to be invoked at the end of the test +func setupManagementServer(t *testing.T) (*e2e.ManagementServer, string, []byte, resolver.Builder, func()) { + t.Helper() + // Turn on the env var protection for client-side security. origClientSideSecurityEnvVar := env.ClientSideSecuritySupport env.ClientSideSecuritySupport = true // Spin up an xDS management server on a local port. - managementServer, err = e2e.StartManagementServer() + server, err := e2e.StartManagementServer() if err != nil { - return nil, err + t.Fatalf("Failed to spin up the xDS management server: %v", err) } defer func() { if err != nil { - managementServer.Stop() + server.Stop() } }() // Create a directory to hold certs and key files used on the server side. serverDir, err := createTmpDirWithFiles("testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") if err != nil { - return nil, err + server.Stop() + t.Fatal(err) } // Create a directory to hold certs and key files used on the client side. clientDir, err := createTmpDirWithFiles("testClientSideXDS*", "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") if err != nil { - return nil, err + server.Stop() + t.Fatal(err) } // Create certificate providers section of the bootstrap config with entries @@ -200,24 +179,26 @@ func setupManagementServer() (cleanup func(), err error) { } // Create a bootstrap file in a temporary directory. - xdsClientNodeID = uuid.New().String() - bootstrapContents, err = xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ + nodeID := uuid.New().String() + bootstrapContents, err := xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ Version: xdsinternal.TransportV3, - NodeID: xdsClientNodeID, - ServerURI: managementServer.Address, + NodeID: nodeID, + ServerURI: server.Address, CertificateProviders: cpc, ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, }) if err != nil { - return nil, err + server.Stop() + t.Fatalf("Failed to create bootstrap file: %v", err) } - xdsResolverBuilder, err = xds.NewXDSResolverWithConfigForTesting(bootstrapContents) + resolver, err := xds.NewXDSResolverWithConfigForTesting(bootstrapContents) if err != nil { - return nil, err + server.Stop() + t.Fatalf("Failed to create xDS resolver for testing: %v", err) } - return func() { - managementServer.Stop() + return server, nodeID, bootstrapContents, resolver, func() { + server.Stop() env.ClientSideSecuritySupport = origClientSideSecurityEnvVar - }, nil + } } diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 11ac8fc70c15..9aa309981fe0 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -56,7 +56,7 @@ const ( // Returns the following: // - local listener on which the xDS-enabled gRPC server is serving on // - cleanup function to be invoked by the tests when done -func setupGRPCServer(t *testing.T) (net.Listener, func()) { +func setupGRPCServer(t *testing.T, bootstrapContents []byte) (net.Listener, func()) { t.Helper() // Configure xDS credentials to be used on the server-side. @@ -111,8 +111,11 @@ func hostPortFromListener(lis net.Listener) (string, uint32, error) { // the client and the server. This results in both of them using the // configured fallback credentials (which is insecure creds in this case). func (s) TestServerSideXDS_Fallback(t *testing.T) { - lis, cleanup := setupGRPCServer(t) - defer cleanup() + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() // Grab the host and port of the server and create client side xDS resources // corresponding to it. This contains default resources with no security @@ -124,7 +127,7 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { const serviceName = "my-service-fallback" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, - NodeID: xdsClientNodeID, + NodeID: nodeID, Host: host, Port: port, SecLevel: e2e.SecurityLevelNone, @@ -152,7 +155,7 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { // Create a ClientConn with the xds scheme and make a successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(xdsResolverBuilder)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -190,8 +193,11 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - lis, cleanup := setupGRPCServer(t) - defer cleanup() + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() // Grab the host and port of the server and create client side xDS // resources corresponding to it. @@ -206,7 +212,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { serviceName := "my-service-file-watcher-certs-" + test.name resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, - NodeID: xdsClientNodeID, + NodeID: nodeID, Host: host, Port: port, SecLevel: test.secLevel, @@ -234,7 +240,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { // Create a ClientConn with the xds scheme and make an RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(xdsResolverBuilder)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -257,8 +263,11 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { // configuration pointing to the use of the file_watcher plugin and we verify // that the same client is now able to successfully make an RPC. func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { - lis, cleanup := setupGRPCServer(t) - defer cleanup() + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() // Grab the host and port of the server and create client side xDS resources // corresponding to it. This contains default resources with no security @@ -271,7 +280,7 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { const serviceName = "my-service-security-config-change" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, - NodeID: xdsClientNodeID, + NodeID: nodeID, Host: host, Port: port, SecLevel: e2e.SecurityLevelNone, @@ -299,7 +308,7 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { // Create a ClientConn with the xds scheme and make a successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(xdsCreds), grpc.WithResolvers(xdsResolverBuilder)) + xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(xdsCreds), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -329,7 +338,7 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { // security configuration for mTLS with a file watcher certificate provider. resources = e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, - NodeID: xdsClientNodeID, + NodeID: nodeID, Host: host, Port: port, SecLevel: e2e.SecurityLevelMTLS, diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 995a6794fa83..2178cf359bde 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -86,6 +86,9 @@ func (mt *modeTracker) waitForUpdate(ctx context.Context) error { // xDS enabled gRPC servers. It verifies that appropriate mode changes happen in // the server, and also verifies behavior of clientConns under these modes. func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { + managementServer, nodeID, bootstrapContents, _, cleanup := setupManagementServer(t) + defer cleanup() + // Configure xDS credentials to be used on the server-side. creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{ FallbackCreds: insecure.NewCredentials(), @@ -131,7 +134,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } listener2 := e2e.DefaultServerListener(host2, port2, e2e.SecurityLevelNone) resources := e2e.UpdateOptions{ - NodeID: xdsClientNodeID, + NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener1, listener2}, } if err := managementServer.Update(resources); err != nil { @@ -176,7 +179,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { // Update the management server to remove the second listener resource. This // should push only the second listener into "not-serving" mode. if err := managementServer.Update(e2e.UpdateOptions{ - NodeID: xdsClientNodeID, + NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener1}, }); err != nil { t.Error(err) @@ -193,7 +196,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { // well. This should push the first listener into "not-serving" mode. Second // listener is already in "not-serving" mode. if err := managementServer.Update(e2e.UpdateOptions{ - NodeID: xdsClientNodeID, + NodeID: nodeID, Listeners: []*v3listenerpb.Listener{}, }); err != nil { t.Error(err) @@ -216,7 +219,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { // Update the management server with both listener resources. if err := managementServer.Update(e2e.UpdateOptions{ - NodeID: xdsClientNodeID, + NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener1, listener2}, }); err != nil { t.Error(err) From d6a5f5f4f3621542ec98cfed52c0620beab9fbd5 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 2 Sep 2021 10:49:35 -0700 Subject: [PATCH 211/998] ringhash: the ring (#4701) --- xds/internal/balancer/ringhash/ring.go | 170 ++++++++++++++++++++ xds/internal/balancer/ringhash/ring_test.go | 113 +++++++++++++ 2 files changed, 283 insertions(+) create mode 100644 xds/internal/balancer/ringhash/ring.go create mode 100644 xds/internal/balancer/ringhash/ring_test.go diff --git a/xds/internal/balancer/ringhash/ring.go b/xds/internal/balancer/ringhash/ring.go new file mode 100644 index 000000000000..1fe1744e4ac7 --- /dev/null +++ b/xds/internal/balancer/ringhash/ring.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + "math" + "sort" + "strconv" + + xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/resolver" +) + +// subConn is a placeholder struct needed by the ring. It will be moved to the +// balancer file, and will add more fields (balancer.SubConn, connectivity state +// and so on). +type subConn struct { + addr string +} + +type ring struct { + items []*ringEntry +} + +type subConnWithWeight struct { + sc *subConn + weight float64 +} + +type ringEntry struct { + idx int + hash uint64 + sc *subConn +} + +// newRing creates a ring from the subConns. The ring size is limited by the +// passed in max/min. +// +// ring entries will be created for each subConn, and subConn with high weight +// (specified by the address) may have multiple entries. +// +// For example, for subConns with weights {a:3, b:3, c:4}, a generated ring of +// size 10 could be: +// - {idx:0 hash:3689675255460411075 b} +// - {idx:1 hash:4262906501694543955 c} +// - {idx:2 hash:5712155492001633497 c} +// - {idx:3 hash:8050519350657643659 b} +// - {idx:4 hash:8723022065838381142 b} +// - {idx:5 hash:11532782514799973195 a} +// - {idx:6 hash:13157034721563383607 c} +// - {idx:7 hash:14468677667651225770 c} +// - {idx:8 hash:17336016884672388720 a} +// - {idx:9 hash:18151002094784932496 a} +// +// To pick from a ring, a binary search will be done for the given target hash, +// and first item with hash >= given hash will be returned. +func newRing(subConns map[resolver.Address]*subConn, minRingSize, maxRingSize uint64) (*ring, error) { + // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 + normalizedWeights, minWeight, err := normalizeWeights(subConns) + if err != nil { + return nil, err + } + // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. + + // Scale up the size of the ring such that the least-weighted host gets a + // whole number of hashes on the ring. + // + // Note that size is limited by the input max/min. + scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize)) + ringSize := math.Ceil(scale) + items := make([]*ringEntry, 0, int(ringSize)) + + // For each entry, scale*weight nodes are generated in the ring. + // + // Not all of these are whole numbers. E.g. for weights {a:3,b:3,c:4}, if + // ring size is 7, scale is 6.66. The numbers of nodes will be + // {a,a,b,b,c,c,c}. + // + // A hash is generated for each item, and later the results will be sorted + // based on the hash. + var ( + idx int + targetIdx float64 + ) + for _, scw := range normalizedWeights { + targetIdx += scale * scw.weight + for float64(idx) < targetIdx { + h := xxhash.Sum64String(scw.sc.addr + strconv.Itoa(len(items))) + items = append(items, &ringEntry{idx: idx, hash: h, sc: scw.sc}) + idx++ + } + } + + // Sort items based on hash, to prepare for binary search. + sort.Slice(items, func(i, j int) bool { return items[i].hash < items[j].hash }) + for i, ii := range items { + ii.idx = i + } + return &ring{items: items}, nil +} + +// normalizeWeights divides all the weights by the sum, so that the total weight +// is 1. +func normalizeWeights(subConns map[resolver.Address]*subConn) (_ []subConnWithWeight, min float64, _ error) { + if len(subConns) == 0 { + return nil, 0, fmt.Errorf("number of subconns is 0") + } + var weightSum uint32 + for a := range subConns { + // The address weight was moved from attributes to the Metadata field. + // This is necessary (all the attributes need to be stripped) for the + // balancer to detect identical {address+weight} combination. + weightSum += a.Metadata.(uint32) + } + if weightSum == 0 { + return nil, 0, fmt.Errorf("total weight of all subconns is 0") + } + weightSumF := float64(weightSum) + ret := make([]subConnWithWeight, 0, len(subConns)) + min = math.MaxFloat64 + for a, sc := range subConns { + nw := float64(a.Metadata.(uint32)) / weightSumF + ret = append(ret, subConnWithWeight{sc: sc, weight: nw}) + if nw < min { + min = nw + } + } + // Sort the addresses to return consistent results. + // + // Note: this might not be necessary, but this makes sure the ring is + // consistent as long as the addresses are the same, for example, in cases + // where an address is added and then removed, the RPCs will still pick the + // same old SubConn. + sort.Slice(ret, func(i, j int) bool { return ret[i].sc.addr < ret[j].sc.addr }) + return ret, min, nil +} + +// pick does a binary search. It returns the item with smallest index i that +// r.items[i].hash >= h. +func (r *ring) pick(h uint64) *ringEntry { + i := sort.Search(len(r.items), func(i int) bool { return r.items[i].hash >= h }) + if i == len(r.items) { + // If not found, and h is greater than the largest hash, return the + // first item. + i = 0 + } + return r.items[i] +} + +// next returns the next entry. +func (r *ring) next(e *ringEntry) *ringEntry { + return r.items[(e.idx+1)%len(r.items)] +} diff --git a/xds/internal/balancer/ringhash/ring_test.go b/xds/internal/balancer/ringhash/ring_test.go new file mode 100644 index 000000000000..2d664e05bb24 --- /dev/null +++ b/xds/internal/balancer/ringhash/ring_test.go @@ -0,0 +1,113 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + "math" + "testing" + + xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/resolver" +) + +func testAddr(addr string, weight uint32) resolver.Address { + return resolver.Address{Addr: addr, Metadata: weight} +} + +func TestRingNew(t *testing.T) { + testAddrs := []resolver.Address{ + testAddr("a", 3), + testAddr("b", 3), + testAddr("c", 4), + } + var totalWeight float64 = 10 + testSubConnMap := map[resolver.Address]*subConn{ + testAddr("a", 3): {addr: "a"}, + testAddr("b", 3): {addr: "b"}, + testAddr("c", 4): {addr: "c"}, + } + for _, min := range []uint64{3, 4, 6, 8} { + for _, max := range []uint64{20, 8} { + t.Run(fmt.Sprintf("size-min-%v-max-%v", min, max), func(t *testing.T) { + r, _ := newRing(testSubConnMap, min, max) + totalCount := len(r.items) + if totalCount < int(min) || totalCount > int(max) { + t.Fatalf("unexpect size %v, want min %v, max %v", totalCount, min, max) + } + for _, a := range testAddrs { + var count int + for _, ii := range r.items { + if ii.sc.addr == a.Addr { + count++ + } + } + got := float64(count) / float64(totalCount) + want := float64(a.Metadata.(uint32)) / totalWeight + if !equalApproximately(got, want) { + t.Fatalf("unexpected item weight in ring: %v != %v", got, want) + } + } + }) + } + } +} + +func equalApproximately(x, y float64) bool { + delta := math.Abs(x - y) + mean := math.Abs(x+y) / 2.0 + return delta/mean < 0.25 +} + +func TestRingPick(t *testing.T) { + r, _ := newRing(map[resolver.Address]*subConn{ + {Addr: "a", Metadata: uint32(3)}: {addr: "a"}, + {Addr: "b", Metadata: uint32(3)}: {addr: "b"}, + {Addr: "c", Metadata: uint32(4)}: {addr: "c"}, + }, 10, 20) + for _, h := range []uint64{xxhash.Sum64String("1"), xxhash.Sum64String("2"), xxhash.Sum64String("3"), xxhash.Sum64String("4")} { + t.Run(fmt.Sprintf("picking-hash-%v", h), func(t *testing.T) { + e := r.pick(h) + var low uint64 + if e.idx > 0 { + low = r.items[e.idx-1].hash + } + high := e.hash + // h should be in [low, high). + if h < low || h >= high { + t.Fatalf("unexpected item picked, hash: %v, low: %v, high: %v", h, low, high) + } + }) + } +} + +func TestRingNext(t *testing.T) { + r, _ := newRing(map[resolver.Address]*subConn{ + {Addr: "a", Metadata: uint32(3)}: {addr: "a"}, + {Addr: "b", Metadata: uint32(3)}: {addr: "b"}, + {Addr: "c", Metadata: uint32(4)}: {addr: "c"}, + }, 10, 20) + + for _, e := range r.items { + ne := r.next(e) + if ne.idx != (e.idx+1)%len(r.items) { + t.Fatalf("next(%+v) returned unexpected %+v", e, ne) + } + } +} From b189f5e1bc9a495447332355df8a9648e65a2e44 Mon Sep 17 00:00:00 2001 From: Ashitha Santhosh <55257063+ashithasantosh@users.noreply.github.com> Date: Thu, 2 Sep 2021 11:22:07 -0700 Subject: [PATCH 212/998] authz: create interceptors for gRPC security policy API (#4664) * Static Authorization Interceptor --- authz/rbac_translator.go | 43 ++-- authz/rbac_translator_test.go | 209 +++++++++++------- authz/sdk_end2end_test.go | 307 ++++++++++++++++++++++++++ authz/sdk_server_interceptors.go | 75 +++++++ authz/sdk_server_interceptors_test.go | 56 +++++ internal/xds/rbac/rbac_engine.go | 8 +- 6 files changed, 595 insertions(+), 103 deletions(-) create mode 100644 authz/sdk_end2end_test.go create mode 100644 authz/sdk_server_interceptors.go create mode 100644 authz/sdk_server_interceptors_test.go diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index fe305a1adce0..039d76bc99d9 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -23,6 +23,7 @@ package authz import ( + "bytes" "encoding/json" "fmt" "strings" @@ -93,7 +94,7 @@ func getStringMatcher(value string) *v3matcherpb.StringMatcher { switch { case value == "*": return &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_Prefix{}, + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{}, } case strings.HasSuffix(value, "*"): prefix := strings.TrimSuffix(value, "*") @@ -117,7 +118,7 @@ func getHeaderMatcher(key, value string) *v3routepb.HeaderMatcher { case value == "*": return &v3routepb.HeaderMatcher{ Name: key, - HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{}, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{}, } case strings.HasSuffix(value, "*"): prefix := strings.TrimSuffix(value, "*") @@ -268,34 +269,38 @@ func parseRules(rules []rule, prefixName string) (map[string]*v3rbacpb.Policy, e } // translatePolicy translates SDK authorization policy in JSON format to two -// Envoy RBAC polices (deny and allow policy). If the policy cannot be parsed -// or is invalid, an error will be returned. -func translatePolicy(policyStr string) (*v3rbacpb.RBAC, *v3rbacpb.RBAC, error) { - var policy authorizationPolicy - if err := json.Unmarshal([]byte(policyStr), &policy); err != nil { - return nil, nil, fmt.Errorf("failed to unmarshal policy: %v", err) +// Envoy RBAC polices (deny followed by allow policy) or only one Envoy RBAC +// allow policy. If the input policy cannot be parsed or is invalid, an error +// will be returned. +func translatePolicy(policyStr string) ([]*v3rbacpb.RBAC, error) { + policy := &authorizationPolicy{} + d := json.NewDecoder(bytes.NewReader([]byte(policyStr))) + d.DisallowUnknownFields() + if err := d.Decode(policy); err != nil { + return nil, fmt.Errorf("failed to unmarshal policy: %v", err) } if policy.Name == "" { - return nil, nil, fmt.Errorf(`"name" is not present`) + return nil, fmt.Errorf(`"name" is not present`) } if len(policy.AllowRules) == 0 { - return nil, nil, fmt.Errorf(`"allow_rules" is not present`) + return nil, fmt.Errorf(`"allow_rules" is not present`) } - allowPolicies, err := parseRules(policy.AllowRules, policy.Name) - if err != nil { - return nil, nil, fmt.Errorf(`"allow_rules" %v`, err) - } - allowRBAC := &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_ALLOW, Policies: allowPolicies} - var denyRBAC *v3rbacpb.RBAC + rbacs := make([]*v3rbacpb.RBAC, 0, 2) if len(policy.DenyRules) > 0 { denyPolicies, err := parseRules(policy.DenyRules, policy.Name) if err != nil { - return nil, nil, fmt.Errorf(`"deny_rules" %v`, err) + return nil, fmt.Errorf(`"deny_rules" %v`, err) } - denyRBAC = &v3rbacpb.RBAC{ + denyRBAC := &v3rbacpb.RBAC{ Action: v3rbacpb.RBAC_DENY, Policies: denyPolicies, } + rbacs = append(rbacs, denyRBAC) } - return denyRBAC, allowRBAC, nil + allowPolicies, err := parseRules(policy.AllowRules, policy.Name) + if err != nil { + return nil, fmt.Errorf(`"allow_rules" %v`, err) + } + allowRBAC := &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_ALLOW, Policies: allowPolicies} + return append(rbacs, allowRBAC), nil } diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go index 425cae85b037..9a883e9d78d5 100644 --- a/authz/rbac_translator_test.go +++ b/authz/rbac_translator_test.go @@ -32,10 +32,9 @@ import ( func TestTranslatePolicy(t *testing.T) { tests := map[string]struct { - authzPolicy string - wantErr string - wantDenyPolicy *v3rbacpb.RBAC - wantAllowPolicy *v3rbacpb.RBAC + authzPolicy string + wantErr string + wantPolicies []*v3rbacpb.RBAC }{ "valid policy": { authzPolicy: `{ @@ -82,81 +81,133 @@ func TestTranslatePolicy(t *testing.T) { } }] }`, - wantDenyPolicy: &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_DENY, Policies: map[string]*v3rbacpb.Policy{ - "authz_deny_policy_1": { - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ - Ids: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Authenticated_{ - Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}}}}}, - {Identifier: &v3rbacpb.Principal_Authenticated_{ - Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "spiffe://bar"}}}}}, - {Identifier: &v3rbacpb.Principal_Authenticated_{ - Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: "baz"}}}}}, - {Identifier: &v3rbacpb.Principal_Authenticated_{ - Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://abc.*.com"}}}}}, - }}}}}, - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Any{Any: true}}}, - }, - }}, - wantAllowPolicy: &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_ALLOW, Policies: map[string]*v3rbacpb.Policy{ - "authz_allow_policy_1": { - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ - Ids: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Authenticated_{ - Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: ""}}}}}, - }}}}}, - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ - Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ - Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_UrlPath{ - UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "path-foo"}}}}}}, - }}}}}}}}}, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "spiffe://bar"}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: "baz"}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://abc.*.com"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, }, - "authz_allow_policy_2": { - Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Any{Any: true}}}, - Permissions: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ - Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ - Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_UrlPath{ - UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "path-bar"}}}}}}, - {Rule: &v3rbacpb.Permission_UrlPath{ - UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: "baz"}}}}}}, - }}}}, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ Rules: []*v3rbacpb.Permission{ {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Header{ - Header: &v3routepb.HeaderMatcher{ - Name: "key-1", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "foo"}}}}, - {Rule: &v3rbacpb.Permission_Header{ - Header: &v3routepb.HeaderMatcher{ - Name: "key-1", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: "bar"}}}}, - }}}}, + {Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "path-foo"}, + }}}, + }}, + }, + }}}, + }, + }}}, + }, + }, + "authz_allow_policy_2": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ Rules: []*v3rbacpb.Permission{ - {Rule: &v3rbacpb.Permission_Header{ - Header: &v3routepb.HeaderMatcher{ - Name: "key-2", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "baz"}}}}, - }}}}}}}}}}}}}, + {Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "path-bar"}, + }}}, + }}, + {Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: "baz"}, + }}}, + }}, + }, + }}}, + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{ + Header: &v3routepb.HeaderMatcher{ + Name: "key-1", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "foo"}, + }, + }}, + {Rule: &v3rbacpb.Permission_Header{ + Header: &v3routepb.HeaderMatcher{ + Name: "key-1", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: "bar"}, + }, + }}, + }, + }}}, + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{ + Header: &v3routepb.HeaderMatcher{ + Name: "key-2", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "baz"}, + }, + }}, + }, + }}}, + }, + }}}, + }, + }}}, + }, + }, + }, }, - }}, + }, + }, + "unknown field": { + authzPolicy: `{"random": 123}`, + wantErr: "failed to unmarshal policy", }, "missing name field": { authzPolicy: `{}`, @@ -167,10 +218,8 @@ func TestTranslatePolicy(t *testing.T) { wantErr: "failed to unmarshal policy", }, "missing allow rules field": { - authzPolicy: `{"name": "authz-foo"}`, - wantErr: `"allow_rules" is not present`, - wantDenyPolicy: nil, - wantAllowPolicy: nil, + authzPolicy: `{"name": "authz-foo"}`, + wantErr: `"allow_rules" is not present`, }, "missing rule name field": { authzPolicy: `{ @@ -210,18 +259,14 @@ func TestTranslatePolicy(t *testing.T) { wantErr: `"allow_rules" 0: "headers" 0: unsupported "key" :method`, }, } - for name, test := range tests { t.Run(name, func(t *testing.T) { - gotDenyPolicy, gotAllowPolicy, gotErr := translatePolicy(test.authzPolicy) + gotPolicies, gotErr := translatePolicy(test.authzPolicy) if gotErr != nil && !strings.HasPrefix(gotErr.Error(), test.wantErr) { t.Fatalf("unexpected error\nwant:%v\ngot:%v", test.wantErr, gotErr) } - if diff := cmp.Diff(gotDenyPolicy, test.wantDenyPolicy, protocmp.Transform()); diff != "" { - t.Fatalf("unexpected deny policy\ndiff (-want +got):\n%s", diff) - } - if diff := cmp.Diff(gotAllowPolicy, test.wantAllowPolicy, protocmp.Transform()); diff != "" { - t.Fatalf("unexpected allow policy\ndiff (-want +got):\n%s", diff) + if diff := cmp.Diff(gotPolicies, test.wantPolicies, protocmp.Transform()); diff != "" { + t.Fatalf("unexpected policy\ndiff (-want +got):\n%s", diff) } }) } diff --git a/authz/sdk_end2end_test.go b/authz/sdk_end2end_test.go new file mode 100644 index 000000000000..92a5e4f4b210 --- /dev/null +++ b/authz/sdk_end2end_test.go @@ -0,0 +1,307 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package authz_test + +import ( + "context" + "io" + "net" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/authz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + pb "google.golang.org/grpc/test/grpc_testing" +) + +type testServer struct { + pb.UnimplementedTestServiceServer +} + +func (s *testServer) UnaryCall(ctx context.Context, req *pb.SimpleRequest) (*pb.SimpleResponse, error) { + return &pb.SimpleResponse{}, nil +} + +func (s *testServer) StreamingInputCall(stream pb.TestService_StreamingInputCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&pb.StreamingInputCallResponse{}) + } + if err != nil { + return err + } + } +} + +func TestSDKEnd2End(t *testing.T) { + tests := map[string]struct { + authzPolicy string + md metadata.MD + wantStatusCode codes.Code + wantErr string + }{ + "DeniesRpcRequestMatchInDenyNoMatchInAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_StreamingOutputCall", + "request": { + "paths": + [ + "/grpc.testing.TestService/StreamingOutputCall" + ] + } + } + ], + "deny_rules": + [ + { + "name": "deny_TestServiceCalls", + "request": { + "paths": + [ + "/grpc.testing.TestService/UnaryCall", + "/grpc.testing.TestService/StreamingInputCall" + ], + "headers": + [ + { + "key": "key-abc", + "values": + [ + "val-abc", + "val-def" + ] + } + ] + } + } + ] + }`, + md: metadata.Pairs("key-abc", "val-abc"), + wantStatusCode: codes.PermissionDenied, + wantErr: "unauthorized RPC request rejected", + }, + "DeniesRpcRequestMatchInDenyAndAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_TestServiceCalls", + "request": { + "paths": + [ + "/grpc.testing.TestService/*" + ] + } + } + ], + "deny_rules": + [ + { + "name": "deny_TestServiceCalls", + "request": { + "paths": + [ + "/grpc.testing.TestService/*" + ] + } + } + ] + }`, + wantStatusCode: codes.PermissionDenied, + wantErr: "unauthorized RPC request rejected", + }, + "AllowsRpcRequestNoMatchInDenyMatchInAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_all" + } + ], + "deny_rules": + [ + { + "name": "deny_TestServiceCalls", + "request": { + "paths": + [ + "/grpc.testing.TestService/UnaryCall", + "/grpc.testing.TestService/StreamingInputCall" + ], + "headers": + [ + { + "key": "key-abc", + "values": + [ + "val-abc", + "val-def" + ] + } + ] + } + } + ] + }`, + md: metadata.Pairs("key-xyz", "val-xyz"), + wantStatusCode: codes.OK, + }, + "AllowsRpcRequestNoMatchInDenyAndAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_some_user", + "source": { + "principals": + [ + "some_user" + ] + } + } + ], + "deny_rules": + [ + { + "name": "deny_StreamingOutputCall", + "request": { + "paths": + [ + "/grpc.testing.TestService/StreamingOutputCall" + ] + } + } + ] + }`, + wantStatusCode: codes.PermissionDenied, + wantErr: "unauthorized RPC request rejected", + }, + "AllowsRpcRequestEmptyDenyMatchInAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_UnaryCall", + "request": + { + "paths": + [ + "/grpc.testing.TestService/UnaryCall" + ] + } + }, + { + "name": "allow_StreamingInputCall", + "request": + { + "paths": + [ + "/grpc.testing.TestService/StreamingInputCall" + ] + } + } + ] + }`, + wantStatusCode: codes.OK, + }, + "DeniesRpcRequestEmptyDenyNoMatchInAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_StreamingOutputCall", + "request": + { + "paths": + [ + "/grpc.testing.TestService/StreamingOutputCall" + ] + } + } + ] + }`, + wantStatusCode: codes.PermissionDenied, + wantErr: "unauthorized RPC request rejected", + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Start a gRPC server with SDK unary and stream server interceptors. + i, _ := authz.NewStatic(test.authzPolicy) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor), + grpc.ChainStreamInterceptor(i.StreamInterceptor)) + pb.RegisterTestServiceServer(s, &testServer{}) + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := pb.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + ctx = metadata.NewOutgoingContext(ctx, test.md) + + // Verifying authorization decision for Unary RPC. + _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != test.wantStatusCode || got.Message() != test.wantErr { + t.Fatalf("[UnaryCall] error want:{%v %v} got:{%v %v}", test.wantStatusCode, test.wantErr, got.Code(), got.Message()) + } + + // Verifying authorization decision for Streaming RPC. + stream, err := client.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("failed StreamingInputCall err: %v", err) + } + req := &pb.StreamingInputCallRequest{ + Payload: &pb.Payload{ + Body: []byte("hi"), + }, + } + if err := stream.Send(req); err != nil && err != io.EOF { + t.Fatalf("failed stream.Send err: %v", err) + } + _, err = stream.CloseAndRecv() + if got := status.Convert(err); got.Code() != test.wantStatusCode || got.Message() != test.wantErr { + t.Fatalf("[StreamingCall] error want:{%v %v} got:{%v %v}", test.wantStatusCode, test.wantErr, got.Code(), got.Message()) + } + }) + } +} diff --git a/authz/sdk_server_interceptors.go b/authz/sdk_server_interceptors.go new file mode 100644 index 000000000000..a2f992b5f263 --- /dev/null +++ b/authz/sdk_server_interceptors.go @@ -0,0 +1,75 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package authz + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/xds/rbac" + "google.golang.org/grpc/status" +) + +// StaticInterceptor contains engines used to make authorization decisions. It +// either contains two engines deny engine followed by an allow engine or only +// one allow engine. +type StaticInterceptor struct { + engines rbac.ChainEngine +} + +// NewStatic returns a new StaticInterceptor from a static authorization policy +// JSON string. +func NewStatic(authzPolicy string) (*StaticInterceptor, error) { + rbacs, err := translatePolicy(authzPolicy) + if err != nil { + return nil, err + } + chainEngine, err := rbac.NewChainEngine(rbacs) + if err != nil { + return nil, err + } + return &StaticInterceptor{*chainEngine}, nil +} + +// UnaryInterceptor intercepts incoming Unary RPC requests. +// Only authorized requests are allowed to pass. Otherwise, an unauthorized +// error is returned to the client. +func (i *StaticInterceptor) UnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + err := i.engines.IsAuthorized(ctx) + if err != nil { + if status.Code(err) == codes.PermissionDenied { + return nil, status.Errorf(codes.PermissionDenied, "unauthorized RPC request rejected") + } + return nil, err + } + return handler(ctx, req) +} + +// StreamInterceptor intercepts incoming Stream RPC requests. +// Only authorized requests are allowed to pass. Otherwise, an unauthorized +// error is returned to the client. +func (i *StaticInterceptor) StreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + err := i.engines.IsAuthorized(ss.Context()) + if err != nil { + if status.Code(err) == codes.PermissionDenied { + return status.Errorf(codes.PermissionDenied, "unauthorized RPC request rejected") + } + return err + } + return handler(srv, ss) +} diff --git a/authz/sdk_server_interceptors_test.go b/authz/sdk_server_interceptors_test.go new file mode 100644 index 000000000000..e2c1072e7d86 --- /dev/null +++ b/authz/sdk_server_interceptors_test.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package authz_test + +import ( + "testing" + + "google.golang.org/grpc/authz" +) + +func TestNewStatic(t *testing.T) { + tests := map[string]struct { + authzPolicy string + wantErr bool + }{ + "InvalidPolicyFailsToCreateInterceptor": { + authzPolicy: `{}`, + wantErr: true, + }, + "ValidPolicyCreatesInterceptor": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_all" + } + ] + }`, + wantErr: false, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if _, err := authz.NewStatic(test.authzPolicy); (err != nil) != test.wantErr { + t.Fatalf("NewStatic(%v) returned err: %v, want err: %v", test.authzPolicy, err, test.wantErr) + } + }) + } +} diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index 269edabeaa65..1642d26b1ae8 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -32,12 +32,15 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" ) +var logger = grpclog.Component("rbac") + var getConnection = transport.GetConnection // ChainEngine represents a chain of RBAC Engines, used to make authorization @@ -69,7 +72,8 @@ func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { // and then be used for the whole chain of RBAC Engines. rpcData, err := newRPCData(ctx) if err != nil { - return status.Errorf(codes.InvalidArgument, "missing fields in ctx %+v: %v", ctx, err) + logger.Errorf("newRPCData: %v", err) + return status.Errorf(codes.Internal, "gRPC RBAC: %v", err) } for _, engine := range cre.chainedEngines { matchingPolicyName, ok := engine.findMatchingPolicy(rpcData) @@ -86,7 +90,7 @@ func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { // If the incoming RPC gets through all of the engines successfully (i.e. // doesn't not match an allow or match a deny engine), the RPC is authorized // to proceed. - return status.Error(codes.OK, "") + return nil } // engine is used for matching incoming RPCs to policies. From c93e472777b9d2963eff865ff4ee9f0895876b43 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 2 Sep 2021 14:43:26 -0400 Subject: [PATCH 213/998] Fixed race in Filter Chain (#4728) --- xds/internal/server/listener_wrapper.go | 5 +++-- xds/internal/xdsclient/filter_chain.go | 12 +++--------- xds/internal/xdsclient/filter_chain_test.go | 7 +++++-- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index f730ca3c97dc..2d30ca1231d0 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -324,12 +324,13 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { // can come it at any time), and connections aren't accepted too often, // so this reinstantation of the Route Configuration is an acceptable // tradeoff for simplicity. - if err := fc.ConstructUsableRouteConfiguration(rc); err != nil { + vhswi, err := fc.ConstructUsableRouteConfiguration(rc) + if err != nil { l.logger.Warningf("route configuration construction: %v", err) conn.Close() continue } - return &connWrapper{Conn: conn, filterChain: fc, parent: l, virtualHosts: fc.VirtualHosts}, nil + return &connWrapper{Conn: conn, filterChain: fc, parent: l, virtualHosts: vhswi}, nil } } diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index ffaaf710f302..9784083de104 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -67,11 +67,6 @@ type FilterChain struct { // // Only one of RouteConfigName and InlineRouteConfig is set. InlineRouteConfig *RouteConfigUpdate - - // VirtualHosts are the virtual hosts ready to be used in the xds interceptors. - // It contains a way to match routes using a matcher and also instantiates - // HTTPFilter overrides to simply run incoming RPC's through if they are selected. - VirtualHosts []VirtualHostWithInterceptors } // VirtualHostWithInterceptors captures information present in a VirtualHost @@ -98,17 +93,16 @@ type RouteWithInterceptors struct { // ConstructUsableRouteConfiguration takes Route Configuration and converts it // into matchable route configuration, with instantiated HTTP Filters per route. -func (f *FilterChain) ConstructUsableRouteConfiguration(config RouteConfigUpdate) error { +func (f *FilterChain) ConstructUsableRouteConfiguration(config RouteConfigUpdate) ([]VirtualHostWithInterceptors, error) { vhs := make([]VirtualHostWithInterceptors, len(config.VirtualHosts)) for _, vh := range config.VirtualHosts { vhwi, err := f.convertVirtualHost(vh) if err != nil { - return fmt.Errorf("virtual host construction: %v", err) + return nil, fmt.Errorf("virtual host construction: %v", err) } vhs = append(vhs, vhwi) } - f.VirtualHosts = vhs - return nil + return vhs, nil } func (f *FilterChain) convertVirtualHost(virtualHost *VirtualHost) (VirtualHostWithInterceptors, error) { diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go index a3ef42f87608..fd6b3c224276 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -2629,11 +2629,14 @@ func TestHTTPFilterInstantiation(t *testing.T) { fc := FilterChain{ HTTPFilters: test.filters, } - fc.ConstructUsableRouteConfiguration(test.routeConfig) + vhswi, err := fc.ConstructUsableRouteConfiguration(test.routeConfig) + if err != nil { + t.Fatalf("Error constructing usable route configuration: %v", err) + } // Build out list of errors by iterating through the virtual hosts and routes, // and running the filters in route configurations. var errs []string - for _, vh := range fc.VirtualHosts { + for _, vh := range vhswi { for _, r := range vh.Routes { for _, int := range r.Interceptors { errs = append(errs, int.AllowRPC(context.Background()).Error()) From b2ba77a36ff809ab344b98368d9ecc3e12f943d6 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 3 Sep 2021 10:59:33 -0700 Subject: [PATCH 214/998] xds: use separate update channels for listeners in test (#4712) --- .../test/xds_server_serving_mode_test.go | 169 +++++++++--------- 1 file changed, 88 insertions(+), 81 deletions(-) diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 2178cf359bde..ee353ca74c0c 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -24,9 +24,7 @@ package xds_test import ( "context" - "fmt" "net" - "sync" "testing" "time" @@ -34,54 +32,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" - "google.golang.org/grpc/internal/testutils" testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/xds" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" ) -// A convenience typed used to keep track of mode changes on multiple listeners. -type modeTracker struct { - mu sync.Mutex - modes map[string]xds.ServingMode - updateCh *testutils.Channel -} - -func newModeTracker() *modeTracker { - return &modeTracker{ - modes: make(map[string]xds.ServingMode), - updateCh: testutils.NewChannel(), - } -} - -func (mt *modeTracker) updateMode(ctx context.Context, addr net.Addr, mode xds.ServingMode) { - mt.mu.Lock() - defer mt.mu.Unlock() - - mt.modes[addr.String()] = mode - // Sometimes we could get state updates which are not expected by the test. - // Using `Send()` here would block in that case and cause the whole test to - // hang and will eventually only timeout when the `-timeout` passed to `go - // test` elapses. Using `SendContext()` here instead fails the test within a - // reasonable timeout. - mt.updateCh.SendContext(ctx, nil) -} - -func (mt *modeTracker) getMode(addr net.Addr) xds.ServingMode { - mt.mu.Lock() - defer mt.mu.Unlock() - return mt.modes[addr.String()] -} - -func (mt *modeTracker) waitForUpdate(ctx context.Context) error { - _, err := mt.updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("error when waiting for a mode change update: %v", err) - } - return nil -} - // TestServerSideXDS_ServingModeChanges tests the serving mode functionality in // xDS enabled gRPC servers. It verifies that appropriate mode changes happen in // the server, and also verifies behavior of clientConns under these modes. @@ -97,13 +53,34 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatal(err) } - // Create a server option to get notified about serving mode changes. + // Create two local listeners and pass it to Serve(). + lis1, err := xdstestutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis2, err := xdstestutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + // Create a couple of channels on which mode updates will be pushed. + updateCh1 := make(chan xds.ServingMode, 1) + updateCh2 := make(chan xds.ServingMode, 1) + + // Create a server option to get notified about serving mode changes, and + // push the updated mode on the channels created above. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - modeTracker := newModeTracker() modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { t.Logf("serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) - modeTracker.updateMode(ctx, addr, args.Mode) + switch addr.String() { + case lis1.Addr().String(): + updateCh1 <- args.Mode + case lis2.Addr().String(): + updateCh2 <- args.Mode + default: + t.Logf("serving mode callback invoked for unknown listener address: %q", addr.String()) + } }) // Initialize an xDS-enabled gRPC server and register the stubServer on it. @@ -111,16 +88,6 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { defer server.Stop() testpb.RegisterTestServiceServer(server, &testService{}) - // Create two local listeners and pass it to Serve(). - lis1, err := xdstestutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - lis2, err := xdstestutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - // Setup the management server to respond with server-side Listener // resources for both listeners. host1, port1, err := hostPortFromListener(lis1) @@ -153,11 +120,21 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { }() // Wait for both listeners to move to "serving" mode. - if err := waitForModeChange(ctx, modeTracker, lis1.Addr(), xds.ServingModeServing); err != nil { - t.Fatal(err) + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh1: + if mode != xds.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + } } - if err := waitForModeChange(ctx, modeTracker, lis2.Addr(), xds.ServingModeServing); err != nil { - t.Fatal(err) + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh2: + if mode != xds.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + } } // Create a ClientConn to the first listener and make a successful RPCs. @@ -184,8 +161,25 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { }); err != nil { t.Error(err) } - if err := waitForModeChange(ctx, modeTracker, lis2.Addr(), xds.ServingModeNotServing); err != nil { - t.Fatal(err) + + // Wait for lis2 to move to "not-serving" mode. lis1 also receives an update + // here even though it stays in "serving" mode. + // See https://github.com/grpc/grpc-go/issues/4695. + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh1: + if mode != xds.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + } + } + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh2: + if mode != xds.ServingModeNotServing { + t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeNotServing) + } } // Make sure RPCs succeed on cc1 and fail on cc2. @@ -201,8 +195,17 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { }); err != nil { t.Error(err) } - if err := waitForModeChange(ctx, modeTracker, lis1.Addr(), xds.ServingModeNotServing); err != nil { - t.Fatal(err) + + // Wait for lis1 to move to "not-serving" mode. lis2 was already removed + // from the xdsclient's resource cache. So, lis2's callback will not be + // invoked this time around. + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh1: + if mode != xds.ServingModeNotServing { + t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeNotServing) + } } // Make sure RPCs fail on both. @@ -226,11 +229,21 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } // Wait for both listeners to move to "serving" mode. - if err := waitForModeChange(ctx, modeTracker, lis1.Addr(), xds.ServingModeServing); err != nil { - t.Fatal(err) + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh1: + if mode != xds.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + } } - if err := waitForModeChange(ctx, modeTracker, lis2.Addr(), xds.ServingModeServing); err != nil { - t.Fatal(err) + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh2: + if mode != xds.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + } } // The clientConns created earlier should be able to make RPCs now. @@ -238,17 +251,6 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { waitForSuccessfulRPC(ctx, t, cc2) } -func waitForModeChange(ctx context.Context, modeTracker *modeTracker, addr net.Addr, wantMode xds.ServingMode) error { - for { - if gotMode := modeTracker.getMode(addr); gotMode == wantMode { - return nil - } - if err := modeTracker.waitForUpdate(ctx); err != nil { - return err - } - } -} - func waitForSuccessfulRPC(ctx context.Context, t *testing.T, cc *grpc.ClientConn) { t.Helper() @@ -261,8 +263,13 @@ func waitForSuccessfulRPC(ctx context.Context, t *testing.T, cc *grpc.ClientConn func waitForFailedRPC(ctx context.Context, t *testing.T, cc *grpc.ClientConn) { t.Helper() + // Attempt one RPC before waiting for the ticker to expire. c := testpb.NewTestServiceClient(cc) - ticker := time.NewTimer(10 * time.Millisecond) + if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil { + return + } + + ticker := time.NewTimer(1 * time.Second) defer ticker.Stop() for { select { From 0ca7dca97726252050774a4bff20d92ca5772331 Mon Sep 17 00:00:00 2001 From: yihuaz Date: Tue, 7 Sep 2021 09:12:01 -0700 Subject: [PATCH 215/998] oauth: Allow access to Google API regional endpoints via Google Default Credentials (#4713) --- credentials/oauth/oauth.go | 19 ++++++++++- credentials/oauth/oauth_test.go | 60 +++++++++++++++++++++++++++++++++ internal/credentials/util.go | 4 ++- 3 files changed, 81 insertions(+), 2 deletions(-) create mode 100644 credentials/oauth/oauth_test.go diff --git a/credentials/oauth/oauth.go b/credentials/oauth/oauth.go index 852ae375cfc7..c748fd21ce2b 100644 --- a/credentials/oauth/oauth.go +++ b/credentials/oauth/oauth.go @@ -23,6 +23,7 @@ import ( "context" "fmt" "io/ioutil" + "net/url" "sync" "golang.org/x/oauth2" @@ -56,6 +57,16 @@ func (ts TokenSource) RequireTransportSecurity() bool { return true } +// removeServiceNameFromJWTURI removes RPC service name from URI. +func removeServiceNameFromJWTURI(uri string) (string, error) { + parsed, err := url.Parse(uri) + if err != nil { + return "", err + } + parsed.Path = "/" + return parsed.String(), nil +} + type jwtAccess struct { jsonKey []byte } @@ -75,9 +86,15 @@ func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) } func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + // Remove RPC service name from URI that will be used as audience + // in a self-signed JWT token. It follows https://google.aip.dev/auth/4111. + aud, err := removeServiceNameFromJWTURI(uri[0]) + if err != nil { + return nil, err + } // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with // uri as the key, to avoid recreating for every RPC. - ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, aud) if err != nil { return nil, err } diff --git a/credentials/oauth/oauth_test.go b/credentials/oauth/oauth_test.go new file mode 100644 index 000000000000..7e62ecb36c12 --- /dev/null +++ b/credentials/oauth/oauth_test.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package oauth + +import ( + "strings" + "testing" +) + +func checkErrorMsg(err error, msg string) bool { + if err == nil && msg == "" { + return true + } else if err != nil { + return strings.Contains(err.Error(), msg) + } + return false +} + +func TestRemoveServiceNameFromJWTURI(t *testing.T) { + tests := []struct { + name string + uri string + wantedURI string + wantedErrMsg string + }{ + { + name: "invalid URI", + uri: "ht tp://foo.com", + wantedErrMsg: "first path segment in URL cannot contain colon", + }, + { + name: "valid URI", + uri: "https://foo.com/go/", + wantedURI: "https://foo.com/", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got, err := removeServiceNameFromJWTURI(tt.uri); got != tt.wantedURI || !checkErrorMsg(err, tt.wantedErrMsg) { + t.Errorf("RemoveServiceNameFromJWTURI() = %s, %v, want %s, %v", got, err, tt.wantedURI, tt.wantedErrMsg) + } + }) + } +} diff --git a/internal/credentials/util.go b/internal/credentials/util.go index 55664fa46b81..f792fd22cafc 100644 --- a/internal/credentials/util.go +++ b/internal/credentials/util.go @@ -18,7 +18,9 @@ package credentials -import "crypto/tls" +import ( + "crypto/tls" +) const alpnProtoStrH2 = "h2" From c99a9c19b08500bd4259e95e3529ff483a0ae405 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 7 Sep 2021 10:10:36 -0700 Subject: [PATCH 216/998] priority: forward the first IDLE state and picker (#4731) --- .../balancer/priority/balancer_priority.go | 24 +++++ .../balancer/priority/balancer_test.go | 96 +++++++++++++++++++ 2 files changed, 120 insertions(+) diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index 3ba0b9e929d7..d9b54fa2baae 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -231,6 +231,8 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S b.handlePriorityWithNewStateTransientFailure(child, priority) case connectivity.Connecting: b.handlePriorityWithNewStateConnecting(child, priority, oldState) + case connectivity.Idle: + b.handlePriorityWithNewStateIdle(child, priority) default: // New state is Idle, should never happen. Don't forward. } @@ -356,3 +358,25 @@ func (b *priorityBalancer) handlePriorityWithNewStateConnecting(child *childBala // Old state is Connecting, TransientFailure or Shutdown. Don't forward. } } + +// handlePriorityWithNewStateIdle handles state Idle from a higher or equal +// priority. +// +// An update with state Idle: +// - If it's from higher priority: +// - Do nothing +// - It actually shouldn't happen, no balancer switches back to Idle. +// - If it's from priorityInUse: +// - Forward only +// +// Caller must make sure priorityInUse is not higher than priority. +// +// Caller must hold mu. +func (b *priorityBalancer) handlePriorityWithNewStateIdle(child *childBalancer, priority int) { + // priorityInUse is lower than this priority, do nothing. + if b.priorityInUse > priority { + return + } + // Forward the update. + b.cc.UpdateState(child.state) +} diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 13699ce0c970..b884035442e4 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -1778,3 +1778,99 @@ func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { t.Fatalf("timeout waiting for ResolveNow()") } } + +const initIdleBalancerName = "test-init-Idle-balancer" + +var errsTestInitIdle = []error{ + fmt.Errorf("init Idle balancer error 0"), + fmt.Errorf("init Idle balancer error 1"), +} + +func init() { + for i := 0; i < 2; i++ { + ii := i + stub.Register(fmt.Sprintf("%s-%d", initIdleBalancerName, ii), stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { + bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{}) + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + err := fmt.Errorf("wrong picker error") + if state.ConnectivityState == connectivity.Idle { + err = errsTestInitIdle[ii] + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &testutils.TestConstPicker{Err: err}, + }) + }, + }) + } +} + +// If the high priorities send initial pickers with Idle state, their pickers +// should get picks, because policies like ringhash starts in Idle, and doesn't +// connect. +// +// Init 0, 1; 0 is Idle, use 0; 0 is down, start 1; 1 is Idle, use 1. +func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + // Two children, with priorities [0, 1], each with one backend. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 0)}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 1)}}, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + addrs0 := <-cc.NewSubConnAddrsCh + if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc0 := <-cc.NewSubConnCh + + // Send an Idle state update to trigger an Idle picker update. + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + p0 := <-cc.NewPickerCh + if pr, err := p0.Pick(balancer.PickInfo{}); err != errsTestInitIdle[0] { + t.Fatalf("pick returned %v, %v, want _, %v", pr, err, errsTestInitIdle[0]) + } + + // Turn p0 down, to start p1. + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs + // will retry. + p1 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + if _, err := p1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) + } + } + + addrs1 := <-cc.NewSubConnAddrsCh + if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc1 := <-cc.NewSubConnCh + // Idle picker from p1 should also be forwarded. + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + p2 := <-cc.NewPickerCh + if pr, err := p2.Pick(balancer.PickInfo{}); err != errsTestInitIdle[1] { + t.Fatalf("pick returned %v, %v, want _, %v", pr, err, errsTestInitIdle[1]) + } +} From 00a7dc8901e6f74713b131601d76cfc8fb62f8b0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 7 Sep 2021 10:28:56 -0700 Subject: [PATCH 217/998] xds: remove env var protection for security on client (#4735) --- internal/xds/env/env.go | 12 +---- xds/internal/test/xds_integration_test.go | 10 +---- xds/internal/xdsclient/cds_test.go | 53 ----------------------- xds/internal/xdsclient/xds.go | 12 ++--- 4 files changed, 7 insertions(+), 80 deletions(-) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index 448fd63c21eb..c2e4e5d97185 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -39,9 +39,8 @@ const ( // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" - clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" - aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" @@ -64,13 +63,6 @@ var ( // be enabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "true". RingHashSupport = strings.EqualFold(os.Getenv(ringHashSupportEnv), "true") - // ClientSideSecuritySupport is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - ClientSideSecuritySupport = strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "true") // AggregateAndDNSSupportEnv indicates whether processing of aggregated // cluster and DNS cluster is enabled, which can be enabled by setting the // environment variable diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index ef93df2efe62..c643cd28f527 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -38,7 +38,6 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" "google.golang.org/grpc/testdata" "google.golang.org/grpc/xds" @@ -142,10 +141,6 @@ func createClientTLSCredentials(t *testing.T) credentials.TransportCredentials { func setupManagementServer(t *testing.T) (*e2e.ManagementServer, string, []byte, resolver.Builder, func()) { t.Helper() - // Turn on the env var protection for client-side security. - origClientSideSecurityEnvVar := env.ClientSideSecuritySupport - env.ClientSideSecuritySupport = true - // Spin up an xDS management server on a local port. server, err := e2e.StartManagementServer() if err != nil { @@ -197,8 +192,5 @@ func setupManagementServer(t *testing.T) (*e2e.ManagementServer, string, []byte, t.Fatalf("Failed to create xDS resolver for testing: %v", err) } - return server, nodeID, bootstrapContents, resolver, func() { - server.Stop() - env.ClientSideSecuritySupport = origClientSideSecurityEnvVar - } + return server, nodeID, bootstrapContents, resolver, func() { server.Stop() } } diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index 0ccd62b607b0..88eda33780d2 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -432,60 +432,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { } } -func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { - // Turn off the env var protection for client-side security. - origClientSideSecurityEnvVar := env.ClientSideSecuritySupport - env.ClientSideSecuritySupport = false - defer func() { env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }() - - cluster := &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "rootInstance", - CertificateName: "rootCert", - }, - }, - }, - }), - }, - }, - } - wantUpdate := ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, - } - gotUpdate, err := validateClusterAndConstructClusterUpdate(cluster) - if err != nil { - t.Errorf("validateClusterAndConstructClusterUpdate() failed: %v", err) - } - if diff := cmp.Diff(wantUpdate, gotUpdate); diff != "" { - t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, got):\n%s", diff) - } -} - func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { - // Turn on the env var protection for client-side security. - origClientSideSecurityEnvVar := env.ClientSideSecuritySupport - env.ClientSideSecuritySupport = true - defer func() { env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }() - const ( identityPluginInstance = "identityPluginInstance" identityCertName = "identityCert" diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 27367996bfc4..bfb2e3261a02 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -630,14 +630,10 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } - // Process security configuration received from the control plane iff the - // corresponding environment variable is set. - var sc *SecurityConfig - if env.ClientSideSecuritySupport { - var err error - if sc, err = securityConfigFromCluster(cluster); err != nil { - return ClusterUpdate{}, err - } + // Process security configuration received from the control plane . + sc, err := securityConfigFromCluster(cluster) + if err != nil { + return ClusterUpdate{}, err } ret := ClusterUpdate{ From 973e7cb9a17d398b9ddff102e19701f9e7a7a096 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 7 Sep 2021 10:41:26 -0700 Subject: [PATCH 218/998] ringhash: the picker (#4730) --- xds/internal/balancer/ringhash/config.go | 3 - xds/internal/balancer/ringhash/picker.go | 149 +++++++++ xds/internal/balancer/ringhash/picker_test.go | 285 ++++++++++++++++++ xds/internal/balancer/ringhash/ring.go | 7 - xds/internal/balancer/ringhash/ringhash.go | 123 ++++++++ .../balancer/ringhash/ringhash_test.go | 38 +++ xds/internal/balancer/ringhash/util.go | 1 - xds/internal/testutils/balancer.go | 13 +- 8 files changed, 605 insertions(+), 14 deletions(-) create mode 100644 xds/internal/balancer/ringhash/picker.go create mode 100644 xds/internal/balancer/ringhash/picker_test.go create mode 100644 xds/internal/balancer/ringhash/ringhash.go create mode 100644 xds/internal/balancer/ringhash/ringhash_test.go diff --git a/xds/internal/balancer/ringhash/config.go b/xds/internal/balancer/ringhash/config.go index 4d94ebb71824..5cb4aab3d9c9 100644 --- a/xds/internal/balancer/ringhash/config.go +++ b/xds/internal/balancer/ringhash/config.go @@ -25,9 +25,6 @@ import ( "google.golang.org/grpc/serviceconfig" ) -// Name is the name of the ring_hash balancer. -const Name = "ring_hash_experimental" - // LBConfig is the balancer config for ring_hash balancer. type LBConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` diff --git a/xds/internal/balancer/ringhash/picker.go b/xds/internal/balancer/ringhash/picker.go new file mode 100644 index 000000000000..6d035b0c1911 --- /dev/null +++ b/xds/internal/balancer/ringhash/picker.go @@ -0,0 +1,149 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/status" +) + +type picker struct { + ring *ring +} + +// handleRICSResult is the return type of handleRICS. It's needed to wrap the +// returned error from Pick() in a struct. With this, if the return values are +// `balancer.PickResult, error, bool`, linter complains because error is not the +// last return value. +type handleRICSResult struct { + pr balancer.PickResult + err error +} + +// handleRICS generates pick result if the entry is in Ready, Idle, Connecting +// or Shutdown. TransientFailure will be handled specifically after this +// function returns. +// +// The first return value indicates if the state is in Ready, Idle, Connecting +// or Shutdown. If it's true, the PickResult and error should be returned from +// Pick() as is. +func handleRICS(e *ringEntry) (handleRICSResult, bool) { + switch state := e.sc.effectiveState(); state { + case connectivity.Ready: + return handleRICSResult{pr: balancer.PickResult{SubConn: e.sc.sc}}, true + case connectivity.Idle: + // Trigger Connect() and queue the pick. + e.sc.queueConnect() + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.Connecting: + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.TransientFailure: + // Return ok==false, so TransientFailure will be handled afterwards. + return handleRICSResult{}, false + case connectivity.Shutdown: + // Shutdown can happen in a race where the old picker is called. A new + // picker should already be sent. + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + default: + // Should never reach this. All the connectivity states are already + // handled in the cases. + // + // FIXME: add an error log. + return handleRICSResult{err: status.Errorf(codes.Unavailable, "SubConn has undefined connectivity state: %v", state)}, true + } +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + e := p.ring.pick(getRequestHash(info.Ctx)) + if hr, ok := handleRICS(e); ok { + return hr.pr, hr.err + } + // ok was false, the entry is in transient failure. + return p.handleTransientFailure(e) +} + +func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, error) { + // Queue a connect on the first picked SubConn. + e.sc.queueConnect() + + // Find next entry in the ring, skipping duplicate SubConns. + e2 := nextSkippingDuplicates(p.ring, e) + if e2 == nil { + // There's no next entry available, fail the pick. + return balancer.PickResult{}, fmt.Errorf("the only SubConn is in Transient Failure") + } + + // For the second SubConn, also check Ready/Idle/Connecting as if it's the + // first entry. + if hr, ok := handleRICS(e2); ok { + return hr.pr, hr.err + } + + // The second SubConn is also in TransientFailure. Queue a connect on it. + e2.sc.queueConnect() + + // If it gets here, this is after the second SubConn, and the second SubConn + // was in TransientFailure. + // + // Loop over all other SubConns: + // - If all SubConns so far are all TransientFailure, trigger Connect() on + // the TransientFailure SubConns, and keep going. + // - If there's one SubConn that's not in TransientFailure, keep checking + // the remaining SubConns (in case there's a Ready, which will be returned), + // but don't not trigger Connect() on the other SubConns. + var firstNonFailedFound bool + for ee := nextSkippingDuplicates(p.ring, e2); ee != e; ee = nextSkippingDuplicates(p.ring, ee) { + scState := ee.sc.effectiveState() + if scState == connectivity.Ready { + return balancer.PickResult{SubConn: ee.sc.sc}, nil + } + if firstNonFailedFound { + continue + } + if scState == connectivity.TransientFailure { + // This will queue a connect. + ee.sc.queueConnect() + continue + } + // This is a SubConn in a non-failure state. We continue to check the + // other SubConns, but remember that there was a non-failed SubConn + // seen. After this, Pick() will never trigger any SubConn to Connect(). + firstNonFailedFound = true + if scState == connectivity.Idle { + // This is the first non-failed SubConn, and it is in a real Idle + // state. Trigger it to Connect(). + ee.sc.queueConnect() + } + } + return balancer.PickResult{}, fmt.Errorf("no connection is Ready") +} + +func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { + for next := ring.next(entry); next != entry; next = ring.next(next) { + if next.sc != entry.sc { + return next + } + } + // There's no qualifying next entry. + return nil +} diff --git a/xds/internal/balancer/ringhash/picker_test.go b/xds/internal/balancer/ringhash/picker_test.go new file mode 100644 index 000000000000..c88698ebbdfe --- /dev/null +++ b/xds/internal/balancer/ringhash/picker_test.go @@ -0,0 +1,285 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/xds/internal/testutils" +) + +func newTestRing(cStats []connectivity.State) *ring { + var items []*ringEntry + for i, st := range cStats { + testSC := testutils.TestSubConns[i] + items = append(items, &ringEntry{ + idx: i, + hash: uint64((i + 1) * 10), + sc: &subConn{ + addr: testSC.String(), + sc: testSC, + state: st, + }, + }) + } + return &ring{items: items} +} + +func TestPickerPickFirstTwo(t *testing.T) { + tests := []struct { + name string + ring *ring + hash uint64 + wantSC balancer.SubConn + wantErr error + wantSCToConnect balancer.SubConn + }{ + { + name: "picked is Ready", + ring: newTestRing([]connectivity.State{connectivity.Ready, connectivity.Idle}), + hash: 5, + wantSC: testutils.TestSubConns[0], + }, + { + name: "picked is connecting, queue", + ring: newTestRing([]connectivity.State{connectivity.Connecting, connectivity.Idle}), + hash: 5, + wantErr: balancer.ErrNoSubConnAvailable, + }, + { + name: "picked is Idle, connect and queue", + ring: newTestRing([]connectivity.State{connectivity.Idle, connectivity.Idle}), + hash: 5, + wantErr: balancer.ErrNoSubConnAvailable, + wantSCToConnect: testutils.TestSubConns[0], + }, + { + name: "picked is TransientFailure, next is ready, return", + ring: newTestRing([]connectivity.State{connectivity.TransientFailure, connectivity.Ready}), + hash: 5, + wantSC: testutils.TestSubConns[1], + }, + { + name: "picked is TransientFailure, next is connecting, queue", + ring: newTestRing([]connectivity.State{connectivity.TransientFailure, connectivity.Connecting}), + hash: 5, + wantErr: balancer.ErrNoSubConnAvailable, + }, + { + name: "picked is TransientFailure, next is Idle, connect and queue", + ring: newTestRing([]connectivity.State{connectivity.TransientFailure, connectivity.Idle}), + hash: 5, + wantErr: balancer.ErrNoSubConnAvailable, + wantSCToConnect: testutils.TestSubConns[1], + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &picker{ring: tt.ring} + got, err := p.Pick(balancer.PickInfo{ + Ctx: SetRequestHash(context.Background(), tt.hash), + }) + if err != tt.wantErr { + t.Errorf("Pick() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !cmp.Equal(got, balancer.PickResult{SubConn: tt.wantSC}, cmpOpts) { + t.Errorf("Pick() got = %v, want picked SubConn: %v", got, tt.wantSC) + } + if sc := tt.wantSCToConnect; sc != nil { + select { + case <-sc.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestShortTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc) + } + } + }) + } +} + +// TestPickerPickTriggerTFConnect covers that if the picked SubConn is +// TransientFailures, all SubConns until a non-TransientFailure are queued for +// Connect(). +func TestPickerPickTriggerTFConnect(t *testing.T) { + ring := newTestRing([]connectivity.State{ + connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, + connectivity.Idle, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, + }) + p := &picker{ring: ring} + _, err := p.Pick(balancer.PickInfo{Ctx: SetRequestHash(context.Background(), 5)}) + if err == nil { + t.Fatalf("Pick() error = %v, want non-nil", err) + } + // The first 4 SubConns, all in TransientFailure, should be queued to + // connect. + for i := 0; i < 4; i++ { + it := ring.items[i] + if !it.sc.connectQueued { + t.Errorf("the %d-th SubConn is not queued for connect", i) + } + } + // The other SubConns, after the first Idle, should not be queued to + // connect. + for i := 5; i < len(ring.items); i++ { + it := ring.items[i] + if it.sc.connectQueued { + t.Errorf("the %d-th SubConn is unexpected queued for connect", i) + } + } +} + +// TestPickerPickTriggerTFReturnReady covers that if the picked SubConn is +// TransientFailure, SubConn 2 and 3 are TransientFailure, 4 is Ready. SubConn 2 +// and 3 will Connect(), and 4 will be returned. +func TestPickerPickTriggerTFReturnReady(t *testing.T) { + ring := newTestRing([]connectivity.State{ + connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Ready, + }) + p := &picker{ring: ring} + pr, err := p.Pick(balancer.PickInfo{Ctx: SetRequestHash(context.Background(), 5)}) + if err != nil { + t.Fatalf("Pick() error = %v, want nil", err) + } + if wantSC := testutils.TestSubConns[3]; pr.SubConn != wantSC { + t.Fatalf("Pick() = %v, want %v", pr.SubConn, wantSC) + } + // The first 3 SubConns, all in TransientFailure, should be queued to + // connect. + for i := 0; i < 3; i++ { + it := ring.items[i] + if !it.sc.connectQueued { + t.Errorf("the %d-th SubConn is not queued for connect", i) + } + } +} + +// TestPickerPickTriggerTFWithIdle covers that if the picked SubConn is +// TransientFailure, SubConn 2 is TransientFailure, 3 is Idle (init Idle). Pick +// will be queue, SubConn 3 will Connect(), SubConn 4 and 5 (in TransientFailre) +// will not queue a Connect. +func TestPickerPickTriggerTFWithIdle(t *testing.T) { + ring := newTestRing([]connectivity.State{ + connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Idle, connectivity.TransientFailure, connectivity.TransientFailure, + }) + p := &picker{ring: ring} + _, err := p.Pick(balancer.PickInfo{Ctx: SetRequestHash(context.Background(), 5)}) + if err == balancer.ErrNoSubConnAvailable { + t.Fatalf("Pick() error = %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + // The first 2 SubConns, all in TransientFailure, should be queued to + // connect. + for i := 0; i < 2; i++ { + it := ring.items[i] + if !it.sc.connectQueued { + t.Errorf("the %d-th SubConn is not queued for connect", i) + } + } + // SubConn 3 was in Idle, so should Connect() + select { + case <-testutils.TestSubConns[2].ConnectCh: + case <-time.After(defaultTestShortTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", testutils.TestSubConns[2]) + } + // The other SubConns, after the first Idle, should not be queued to + // connect. + for i := 3; i < len(ring.items); i++ { + it := ring.items[i] + if it.sc.connectQueued { + t.Errorf("the %d-th SubConn is unexpected queued for connect", i) + } + } +} + +func TestNextSkippingDuplicatesNoDup(t *testing.T) { + testRing := newTestRing([]connectivity.State{connectivity.Idle, connectivity.Idle}) + tests := []struct { + name string + ring *ring + cur *ringEntry + want *ringEntry + }{ + { + name: "no dup", + ring: testRing, + cur: testRing.items[0], + want: testRing.items[1], + }, + { + name: "only one entry", + ring: &ring{items: []*ringEntry{testRing.items[0]}}, + cur: testRing.items[0], + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := nextSkippingDuplicates(tt.ring, tt.cur); !cmp.Equal(got, tt.want, cmpOpts) { + t.Errorf("nextSkippingDuplicates() = %v, want %v", got, tt.want) + } + }) + } +} + +// addDups adds duplicates of items[0] to the ring. +func addDups(r *ring, count int) *ring { + var ( + items []*ringEntry + idx int + ) + for i, it := range r.items { + itt := *it + itt.idx = idx + items = append(items, &itt) + idx++ + if i == 0 { + // Add duplicate of items[0] to the ring + for j := 0; j < count; j++ { + itt2 := *it + itt2.idx = idx + items = append(items, &itt2) + idx++ + } + } + } + return &ring{items: items} +} + +func TestNextSkippingDuplicatesMoreDup(t *testing.T) { + testRing := newTestRing([]connectivity.State{connectivity.Idle, connectivity.Idle}) + // Make a new ring with duplicate SubConns. + dupTestRing := addDups(testRing, 3) + if got := nextSkippingDuplicates(dupTestRing, dupTestRing.items[0]); !cmp.Equal(got, dupTestRing.items[len(dupTestRing.items)-1], cmpOpts) { + t.Errorf("nextSkippingDuplicates() = %v, want %v", got, dupTestRing.items[len(dupTestRing.items)-1]) + } +} + +func TestNextSkippingDuplicatesOnlyDup(t *testing.T) { + testRing := newTestRing([]connectivity.State{connectivity.Idle}) + // Make a new ring with only duplicate SubConns. + dupTestRing := addDups(testRing, 3) + // This ring only has duplicates of items[0], should return nil. + if got := nextSkippingDuplicates(dupTestRing, dupTestRing.items[0]); got != nil { + t.Errorf("nextSkippingDuplicates() = %v, want nil", got) + } +} diff --git a/xds/internal/balancer/ringhash/ring.go b/xds/internal/balancer/ringhash/ring.go index 1fe1744e4ac7..68e844cfb487 100644 --- a/xds/internal/balancer/ringhash/ring.go +++ b/xds/internal/balancer/ringhash/ring.go @@ -28,13 +28,6 @@ import ( "google.golang.org/grpc/resolver" ) -// subConn is a placeholder struct needed by the ring. It will be moved to the -// balancer file, and will add more fields (balancer.SubConn, connectivity state -// and so on). -type subConn struct { - addr string -} - type ring struct { items []*ringEntry } diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go new file mode 100644 index 000000000000..b87cce64801b --- /dev/null +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package ringhash implements the ringhash balancer. +package ringhash + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" +) + +// Name is the name of the ring_hash balancer. +const Name = "ring_hash_experimental" + +type subConn struct { + addr string + sc balancer.SubConn + + mu sync.RWMutex + // This is the actual state of this SubConn (as updated by the ClientConn). + // The effective state can be different, see comment of attemptedToConnect. + state connectivity.State + // attemptedToConnect is whether this SubConn has attempted to connect ever. + // So that only the initial Idle is Idle, after any attempt to connect, + // following Idles are all TransientFailure. + // + // This affects the effective connectivity state of this SubConn, e.g. if + // the actual state is Idle, but this SubConn has attempted to connect, the + // effective state is TransientFailure. + // + // This is used in pick(). E.g. if a subConn is Idle, but has + // attemptedToConnect as true, pick() will + // - consider this SubConn as TransientFailure, and check the state of the + // next SubConn. + // - trigger Connect() (note that normally a SubConn in real + // TransientFailure cannot Connect()) + // + // Note this should only be set when updating the state (from Idle to + // anything else), not when Connect() is called, because there's a small + // window after the first Connect(), before the state switches to something + // else. + attemptedToConnect bool + // connectQueued is true if a Connect() was queued for this SubConn while + // it's not in Idle (most likely was in TransientFailure). A Connect() will + // be triggered on this SubConn when it turns Idle. + // + // When connectivity state is updated to Idle for this SubConn, if + // connectQueued is true, Connect() will be called on the SubConn. + connectQueued bool +} + +// SetState updates the state of this SubConn. +// +// It also handles the queued Connect(). If the new state is Idle, and a +// Connect() was queued, this SubConn will be triggered to Connect(). +// +// FIXME: unexport this. It's exported so that staticcheck doesn't complain +// about unused functions. +func (sc *subConn) SetState(s connectivity.State) { + sc.mu.Lock() + defer sc.mu.Unlock() + // Any state change to non-Idle means there was an attempt to connect. + if s != connectivity.Idle { + sc.attemptedToConnect = true + } + switch s { + case connectivity.Idle: + // Trigger Connect() if new state is Idle, and there is a queued connect. + if sc.connectQueued { + sc.connectQueued = false + sc.sc.Connect() + } + case connectivity.Connecting, connectivity.Ready: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + } + sc.state = s +} + +// effectiveState returns the effective state of this SubConn. It can be +// different from the actual state, e.g. Idle after any attempt to connect (any +// Idle other than the initial Idle) is considered TransientFailure. +func (sc *subConn) effectiveState() connectivity.State { + sc.mu.RLock() + defer sc.mu.RUnlock() + if sc.state == connectivity.Idle && sc.attemptedToConnect { + return connectivity.TransientFailure + } + return sc.state +} + +// queueConnect sets a boolean so that when the SubConn state changes to Idle, +// it's Connect() will be triggered. If the SubConn state is already Idle, it +// will just call Connect(). +func (sc *subConn) queueConnect() { + sc.mu.Lock() + defer sc.mu.Unlock() + if sc.state == connectivity.Idle { + sc.sc.Connect() + return + } + // Queue this connect, and when this SubConn switches back to Idle (happens + // after backoff in TransientFailure), it will Connect(). + sc.connectQueued = true +} diff --git a/xds/internal/balancer/ringhash/ringhash_test.go b/xds/internal/balancer/ringhash/ringhash_test.go new file mode 100644 index 000000000000..bf5da95bf8b3 --- /dev/null +++ b/xds/internal/balancer/ringhash/ringhash_test.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/xds/internal/testutils" +) + +var ( + cmpOpts = cmp.Options{ + cmp.AllowUnexported(testutils.TestSubConn{}, ringEntry{}, subConn{}), + cmpopts.IgnoreFields(subConn{}, "mu"), + } +) + +const ( + defaultTestShortTimeout = 10 * time.Millisecond +) diff --git a/xds/internal/balancer/ringhash/util.go b/xds/internal/balancer/ringhash/util.go index 848b20844d89..92bb3ae5b791 100644 --- a/xds/internal/balancer/ringhash/util.go +++ b/xds/internal/balancer/ringhash/util.go @@ -16,7 +16,6 @@ * */ -// Package ringhash contains the functionality to support Ring Hash in grpc. package ringhash import "context" diff --git a/xds/internal/testutils/balancer.go b/xds/internal/testutils/balancer.go index 820d5bf3a1ac..ff74da71cc95 100644 --- a/xds/internal/testutils/balancer.go +++ b/xds/internal/testutils/balancer.go @@ -46,21 +46,28 @@ var TestSubConns []*TestSubConn func init() { for i := 0; i < TestSubConnsCount; i++ { TestSubConns = append(TestSubConns, &TestSubConn{ - id: fmt.Sprintf("sc%d", i), + id: fmt.Sprintf("sc%d", i), + ConnectCh: make(chan struct{}, 1), }) } } // TestSubConn implements the SubConn interface, to be used in tests. type TestSubConn struct { - id string + id string + ConnectCh chan struct{} } // UpdateAddresses is a no-op. func (tsc *TestSubConn) UpdateAddresses([]resolver.Address) {} // Connect is a no-op. -func (tsc *TestSubConn) Connect() {} +func (tsc *TestSubConn) Connect() { + select { + case tsc.ConnectCh <- struct{}{}: + default: + } +} // String implements stringer to print human friendly error message. func (tsc *TestSubConn) String() string { From 2f3355d2244eb436564a93dfbe2b0ba907adeb98 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 7 Sep 2021 11:11:16 -0700 Subject: [PATCH 219/998] xds: update go-control-plane to latest (#4737) --- examples/go.sum | 16 +++++++-------- go.mod | 2 +- go.sum | 14 +++++++------ security/advancedtls/examples/go.sum | 8 ++++---- security/advancedtls/go.sum | 8 ++++---- xds/csds/csds_test.go | 12 +++++------ xds/internal/httpfilter/fault/fault_test.go | 12 +++++++---- .../test/xds_client_integration_test.go | 6 +++--- .../test/xds_server_integration_test.go | 20 +++++++++---------- .../test/xds_server_serving_mode_test.go | 8 ++++---- xds/internal/testutils/e2e/server.go | 17 +++++++++++++--- 11 files changed, 70 insertions(+), 53 deletions(-) diff --git a/examples/go.sum b/examples/go.sum index 16d17f9cdb0a..a359cfc183f8 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -7,12 +7,12 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158 h1:CevA8fI91PAnP8vpnXuB8ZYAZ5wqY86nAbxfgK8tWO4= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -41,8 +41,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -88,7 +88,7 @@ google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/go.mod b/go.mod index 4387b31e387a..022cc9828fed 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.14 require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 - github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 + github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.4.3 github.com/google/go-cmp v0.5.0 diff --git a/go.sum b/go.sum index 4ce2fc810e3f..6e7ae0db2b37 100644 --- a/go.sum +++ b/go.sum @@ -11,16 +11,16 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158 h1:CevA8fI91PAnP8vpnXuB8ZYAZ5wqY86nAbxfgK8tWO4= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -53,8 +53,9 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -118,7 +119,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 9a616f8b80da..272f1afa4079 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -3,9 +3,9 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -34,7 +34,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -77,6 +77,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 9a616f8b80da..272f1afa4079 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -3,9 +3,9 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -34,7 +34,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -77,6 +77,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 6b594550d8d7..ffaae2d739ab 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -161,7 +161,9 @@ func init() { func TestCSDS(t *testing.T) { const retryCount = 10 - xdsC, mgmServer, nodeID, stream, cleanup := commonSetup(t) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + xdsC, mgmServer, nodeID, stream, cleanup := commonSetup(ctx, t) defer cleanup() for _, target := range ldsTargets { @@ -188,7 +190,7 @@ func TestCSDS(t *testing.T) { time.Sleep(time.Millisecond * 100) } - if err := mgmServer.Update(e2e.UpdateOptions{ + if err := mgmServer.Update(ctx, e2e.UpdateOptions{ NodeID: nodeID, Listeners: listeners, Routes: routes, @@ -209,7 +211,7 @@ func TestCSDS(t *testing.T) { } const nackResourceIdx = 0 - if err := mgmServer.Update(e2e.UpdateOptions{ + if err := mgmServer.Update(ctx, e2e.UpdateOptions{ NodeID: nodeID, Listeners: []*v3listenerpb.Listener{ {Name: ldsTargets[nackResourceIdx], ApiListener: &v3listenerpb.ApiListener{}}, // 0 will be nacked. 1 will stay the same. @@ -241,7 +243,7 @@ func TestCSDS(t *testing.T) { } } -func commonSetup(t *testing.T) (xdsclient.XDSClient, *e2e.ManagementServer, string, v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { +func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.ManagementServer, string, v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { t.Helper() // Spin up a xDS management server on a local port. @@ -292,7 +294,6 @@ func commonSetup(t *testing.T) (xdsclient.XDSClient, *e2e.ManagementServer, stri t.Fatalf("cannot connect to server: %v", err) } c := v3statuspbgrpc.NewClientStatusDiscoveryServiceClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) if err != nil { t.Fatalf("cannot get ServerReflectionInfo: %v", err) @@ -300,7 +301,6 @@ func commonSetup(t *testing.T) (xdsclient.XDSClient, *e2e.ManagementServer, stri return xdsC, fs, nodeID, stream, func() { fs.Stop() - cancel() conn.Close() server.Stop() csdss.Close() diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 695961c129a1..c2959054da9a 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -58,6 +58,8 @@ import ( _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register the v3 xDS API client. ) +const defaultTestTimeout = 10 * time.Second + type s struct { grpctest.Tester } @@ -520,7 +522,9 @@ func (s) TestFaultInjection_Unary(t *testing.T) { resources.Listeners[0].ApiListener.ApiListener = hcmAny resources.Listeners[0].FilterChains[0].Filters[0].ConfigType = &v3listenerpb.Filter_TypedConfig{TypedConfig: hcmAny} - if err := fs.Update(resources); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := fs.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -593,7 +597,9 @@ func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { resources.Listeners[0].ApiListener.ApiListener = hcmAny resources.Listeners[0].FilterChains[0].Filters[0].ConfigType = &v3listenerpb.Filter_TypedConfig{TypedConfig: hcmAny} - if err := fs.Update(resources); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := fs.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -605,8 +611,6 @@ func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { defer cc.Close() client := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() streams := make(chan testpb.TestService_FullDuplexCallClient, 5) // startStream() is called 5 times startStream := func() { diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index 40e6644fc1fa..f4b60e676945 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -79,7 +79,9 @@ func (s) TestClientSideXDS(t *testing.T) { Port: port, SecLevel: e2e.SecurityLevelNone, }) - if err := managementServer.Update(resources); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -91,8 +93,6 @@ func (s) TestClientSideXDS(t *testing.T) { defer cc.Close() client := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("rpc EmptyCall() failed: %v", err) } diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 9aa309981fe0..3749e1dcf2b3 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -140,7 +140,9 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { resources.Listeners = append(resources.Listeners, inboundLis) // Setup the management server with client and server-side resources. - if err := managementServer.Update(resources); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -153,8 +155,6 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { } // Create a ClientConn with the xds scheme and make a successful RPC. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) @@ -225,7 +225,9 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { resources.Listeners = append(resources.Listeners, inboundLis) // Setup the management server with client and server resources. - if err := managementServer.Update(resources); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -238,8 +240,6 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { } // Create a ClientConn with the xds scheme and make an RPC. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) @@ -293,7 +293,9 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { resources.Listeners = append(resources.Listeners, inboundLis) // Setup the management server with client and server-side resources. - if err := managementServer.Update(resources); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -306,8 +308,6 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { } // Create a ClientConn with the xds scheme and make a successful RPC. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(xdsCreds), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) @@ -345,7 +345,7 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { }) inboundLis = e2e.DefaultServerListener(host, port, e2e.SecurityLevelMTLS) resources.Listeners = append(resources.Listeners, inboundLis) - if err := managementServer.Update(resources); err != nil { + if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index ee353ca74c0c..90a6aa103884 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -104,7 +104,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener1, listener2}, } - if err := managementServer.Update(resources); err != nil { + if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -155,7 +155,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { // Update the management server to remove the second listener resource. This // should push only the second listener into "not-serving" mode. - if err := managementServer.Update(e2e.UpdateOptions{ + if err := managementServer.Update(ctx, e2e.UpdateOptions{ NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener1}, }); err != nil { @@ -189,7 +189,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { // Update the management server to remove the first listener resource as // well. This should push the first listener into "not-serving" mode. Second // listener is already in "not-serving" mode. - if err := managementServer.Update(e2e.UpdateOptions{ + if err := managementServer.Update(ctx, e2e.UpdateOptions{ NodeID: nodeID, Listeners: []*v3listenerpb.Listener{}, }); err != nil { @@ -221,7 +221,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } // Update the management server with both listener resources. - if err := managementServer.Update(e2e.UpdateOptions{ + if err := managementServer.Update(ctx, e2e.UpdateOptions{ NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener1, listener2}, }); err != nil { diff --git a/xds/internal/testutils/e2e/server.go b/xds/internal/testutils/e2e/server.go index bc795b1b0cd0..e47dcc5213c2 100644 --- a/xds/internal/testutils/e2e/server.go +++ b/xds/internal/testutils/e2e/server.go @@ -33,6 +33,7 @@ import ( v3discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/envoyproxy/go-control-plane/pkg/cache/types" v3cache "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + v3resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" v3server "github.com/envoyproxy/go-control-plane/pkg/server/v3" "google.golang.org/grpc" @@ -133,11 +134,21 @@ type UpdateOptions struct { // Update changes the resource snapshot held by the management server, which // updates connected clients as required. -func (s *ManagementServer) Update(opts UpdateOptions) error { +func (s *ManagementServer) Update(ctx context.Context, opts UpdateOptions) error { s.version++ // Create a snapshot with the passed in resources. - snapshot := v3cache.NewSnapshot(strconv.Itoa(s.version), resourceSlice(opts.Endpoints), resourceSlice(opts.Clusters), resourceSlice(opts.Routes), resourceSlice(opts.Listeners), nil /*runtimes*/, nil /*secrets*/) + resources := map[v3resource.Type][]types.Resource{ + v3resource.ListenerType: resourceSlice(opts.Listeners), + v3resource.RouteType: resourceSlice(opts.Routes), + v3resource.ClusterType: resourceSlice(opts.Clusters), + v3resource.EndpointType: resourceSlice(opts.Endpoints), + } + snapshot, err := v3cache.NewSnapshot(strconv.Itoa(s.version), resources) + if err != nil { + return fmt.Errorf("failed to create new snapshot cache: %v", err) + + } if !opts.SkipValidation { if err := snapshot.Consistent(); err != nil { return fmt.Errorf("failed to create new resource snapshot: %v", err) @@ -146,7 +157,7 @@ func (s *ManagementServer) Update(opts UpdateOptions) error { logger.Infof("Created new resource snapshot...") // Update the cache with the new resource snapshot. - if err := s.cache.SetSnapshot(opts.NodeID, snapshot); err != nil { + if err := s.cache.SetSnapshot(ctx, opts.NodeID, snapshot); err != nil { return fmt.Errorf("failed to update resource snapshot in management server: %v", err) } logger.Infof("Updated snapshot cache with resource snapshot...") From a6a63177ae6094f9baa83b046bb4f20426ba5b82 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 8 Sep 2021 10:00:44 -0700 Subject: [PATCH 220/998] xds: add retry support (#4738) --- .github/workflows/testing.yml | 2 +- internal/envconfig/envconfig.go | 6 +- internal/xds/env/env.go | 4 + test/retry_test.go | 3 +- xds/internal/resolver/serviceconfig.go | 27 ++- .../test/xds_client_integration_test.go | 161 +++++++++++++++++- xds/internal/xdsclient/client.go | 20 +++ xds/internal/xdsclient/rds_test.go | 87 +++++++++- xds/internal/xdsclient/xds.go | 72 +++++++- 9 files changed, 370 insertions(+), 12 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 7bf815463606..9a45ba040853 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -49,7 +49,7 @@ jobs: - type: tests goversion: 1.17 - grpcenv: GRPC_GO_RETRY=on + grpcenv: GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY=true - type: extras goversion: 1.17 diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index 73931a94bcad..e766ac04af21 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -22,6 +22,8 @@ package envconfig import ( "os" "strings" + + xdsenv "google.golang.org/grpc/internal/xds/env" ) const ( @@ -31,8 +33,8 @@ const ( ) var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on" or if XDS retry support is enabled. + Retry = strings.EqualFold(os.Getenv(retryStr), "on") || xdsenv.RetrySupport // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index c2e4e5d97185..9d5d47ff2a8f 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -41,6 +41,7 @@ const ( ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" @@ -70,6 +71,9 @@ var ( // "true". AggregateAndDNSSupportEnv = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + // RetrySupport indicates whether xDS retry is enabled. + RetrySupport = strings.EqualFold(os.Getenv(retrySupportEnv), "true") + // C2PResolverSupport indicates whether support for C2P resolver is enabled. // This can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". diff --git a/test/retry_test.go b/test/retry_test.go index f93c9ac053f7..ad1268faa96e 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -113,7 +113,8 @@ func (s) TestRetryUnary(t *testing.T) { } func (s) TestRetryDisabledByDefault(t *testing.T) { - if strings.EqualFold(os.Getenv("GRPC_GO_RETRY"), "on") { + if strings.EqualFold(os.Getenv("GRPC_GO_RETRY"), "on") || + strings.EqualFold(os.Getenv("GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY"), "true") { return } i := -1 diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index 27f6aab7ad09..dceea49b3b60 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" @@ -107,6 +108,8 @@ func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) { type virtualHost struct { // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig + // retry policy present in virtual host + retryConfig *xdsclient.RetryConfig } // routeCluster holds information about a cluster as referenced by a route. @@ -122,6 +125,7 @@ type route struct { maxStreamDuration time.Duration // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig + retryConfig *xdsclient.RetryConfig hashPolicies []*xdsclient.HashPolicy } @@ -195,10 +199,25 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP if rt.maxStreamDuration != 0 { config.MethodConfig.Timeout = &rt.maxStreamDuration } + if rt.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(rt.retryConfig) + } else if cs.virtualHost.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(cs.virtualHost.retryConfig) + } return config, nil } +func retryConfigToPolicy(config *xdsclient.RetryConfig) *serviceconfig.RetryPolicy { + return &serviceconfig.RetryPolicy{ + MaxAttempts: int(config.NumRetries) + 1, + InitialBackoff: config.RetryBackoff.BaseInterval, + MaxBackoff: config.RetryBackoff.MaxInterval, + BackoffMultiplier: 2, + RetryableStatusCodes: config.RetryOn, + } +} + func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsclient.HashPolicy) uint64 { var hash uint64 var generatedHash bool @@ -322,8 +341,11 @@ var newWRR = wrr.NewRandom // r.activeClusters for previously-unseen clusters. func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, error) { cs := &configSelector{ - r: r, - virtualHost: virtualHost{httpFilterConfigOverride: su.virtualHost.HTTPFilterConfigOverride}, + r: r, + virtualHost: virtualHost{ + httpFilterConfigOverride: su.virtualHost.HTTPFilterConfigOverride, + retryConfig: su.virtualHost.RetryConfig, + }, routes: make([]route, len(su.virtualHost.Routes)), clusters: make(map[string]*clusterInfo), httpFilterConfig: su.ldsConfig.httpFilterConfig, @@ -361,6 +383,7 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro } cs.routes[i].httpFilterConfigOverride = rt.HTTPFilterConfigOverride + cs.routes[i].retryConfig = rt.RetryConfig cs.routes[i].hashPolicies = rt.HashPolicies } diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index f4b60e676945..23ea1546935e 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -28,10 +28,16 @@ import ( "testing" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -42,10 +48,10 @@ import ( // Returns the following: // - the port the server is listening on // - cleanup function to be invoked by the tests when done -func clientSetup(t *testing.T) (uint32, func()) { +func clientSetup(t *testing.T, tss testpb.TestServiceServer) (uint32, func()) { // Initialize a gRPC server and register the stubServer on it. server := grpc.NewServer() - testpb.RegisterTestServiceServer(server, &testService{}) + testpb.RegisterTestServiceServer(server, tss) // Create a local listener and pass it to Serve(). lis, err := testutils.LocalTCPListener() @@ -68,7 +74,7 @@ func (s) TestClientSideXDS(t *testing.T) { managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) defer cleanup1() - port, cleanup2 := clientSetup(t) + port, cleanup2 := clientSetup(t, &testService{}) defer cleanup2() const serviceName = "my-service-client-side-xds" @@ -97,3 +103,152 @@ func (s) TestClientSideXDS(t *testing.T) { t.Fatalf("rpc EmptyCall() failed: %v", err) } } + +func (s) TestClientSideRetry(t *testing.T) { + if !env.RetrySupport { + // Skip this test if retry is not enabled. + return + } + + ctr := 0 + errs := []codes.Code{codes.ResourceExhausted} + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + defer func() { ctr++ }() + if ctr < len(errs) { + return nil, status.Errorf(errs[ctr], "this should be retried") + } + return &testpb.Empty{}, nil + }, + } + + managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + port, cleanup2 := clientSetup(t, ss) + defer cleanup2() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + defer cancel() + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.ResourceExhausted { + t.Fatalf("rpc EmptyCall() = _, %v; want _, ResourceExhausted", err) + } + + testCases := []struct { + name string + vhPolicy *v3routepb.RetryPolicy + routePolicy *v3routepb.RetryPolicy + errs []codes.Code // the errors returned by the server for each RPC + tryAgainErr codes.Code // the error that would be returned if we are still using the old retry policies. + errWant codes.Code + }{{ + name: "virtualHost only, fail", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted,unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 1}, + }, + errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, + routePolicy: nil, + tryAgainErr: codes.ResourceExhausted, + errWant: codes.Unavailable, + }, { + name: "virtualHost only", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted, unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, + routePolicy: nil, + tryAgainErr: codes.Unavailable, + errWant: codes.OK, + }, { + name: "virtualHost+route, fail", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted,unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + routePolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, + tryAgainErr: codes.OK, + errWant: codes.Unavailable, + }, { + name: "virtualHost+route", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + routePolicy: &v3routepb.RetryPolicy{ + RetryOn: "unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + errs: []codes.Code{codes.Unavailable}, + tryAgainErr: codes.Unavailable, + errWant: codes.OK, + }, { + name: "virtualHost+route, not enough attempts", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + routePolicy: &v3routepb.RetryPolicy{ + RetryOn: "unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 1}, + }, + errs: []codes.Code{codes.Unavailable, codes.Unavailable}, + tryAgainErr: codes.OK, + errWant: codes.Unavailable, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + errs = tc.errs + + // Confirm tryAgainErr is correct before updating resources. + ctr = 0 + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if code := status.Code(err); code != tc.tryAgainErr { + t.Fatalf("with old retry policy: EmptyCall() = _, %v; want _, %v", err, tc.tryAgainErr) + } + + resources.Routes[0].VirtualHosts[0].RetryPolicy = tc.vhPolicy + resources.Routes[0].VirtualHosts[0].Routes[0].GetRoute().RetryPolicy = tc.routePolicy + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + for { + ctr = 0 + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if code := status.Code(err); code == tc.tryAgainErr { + continue + } else if code != tc.errWant { + t.Fatalf("rpc EmptyCall() = _, %v; want _, %v", err, tc.errWant) + } + break + } + }) + } +} diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 754a025678a9..b6310bb70081 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -33,6 +33,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/xdsclient/load" @@ -269,6 +270,24 @@ type VirtualHost struct { // may be unused if the matching Route contains an override for that // filter. HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig +} + +// RetryConfig contains all retry-related configuration in either a VirtualHost +// or Route. +type RetryConfig struct { + // RetryOn is a set of status codes on which to retry. Only Canceled, + // DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are + // supported; any other values will be omitted. + RetryOn map[codes.Code]bool + NumRetries uint32 // maximum number of retry attempts + RetryBackoff RetryBackoff // retry backoff policy +} + +// RetryBackoff describes the backoff policy for retries. +type RetryBackoff struct { + BaseInterval time.Duration // initial backoff duration between attempts + MaxInterval time.Duration // maximum backoff duration } // HashPolicyType specifies the type of HashPolicy from a received RDS Response. @@ -339,6 +358,7 @@ type Route struct { // unused if the matching WeightedCluster contains an override for that // filter. HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig RouteAction RouteAction } diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index 3787ae0ff328..138e3a0bd2b6 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -26,6 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" @@ -80,6 +81,49 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { }}, } } + goodRouteConfigWithRetryPolicy = func(vhrp *v3routepb.RetryPolicy, rrp *v3routepb.RetryPolicy) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + RetryPolicy: rrp, + }, + }, + }}, + RetryPolicy: vhrp, + }}, + } + } + goodUpdateWithRetryPolicy = func(vhrc *RetryConfig, rrc *RetryConfig) RouteConfigUpdate { + if !env.RetrySupport { + vhrc = nil + rrc = nil + } + return RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + RouteAction: RouteActionRoute, + RetryConfig: rrc, + }}, + RetryConfig: vhrc, + }}, + } + } + defaultRetryBackoff = RetryBackoff{BaseInterval: 25 * time.Millisecond, MaxInterval: 250 * time.Millisecond} + goodUpdateIfRetryDisabled = func() RouteConfigUpdate { + if env.RetrySupport { + return RouteConfigUpdate{} + } + return goodUpdateWithRetryPolicy(nil, nil) + } ) tests := []struct { @@ -485,8 +529,49 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("unknown.custom.filter")}), wantUpdate: goodUpdateWithFilterConfigs(nil), }, + { + name: "good-route-config-with-retry-policy", + rc: goodRouteConfigWithRetryPolicy( + &v3routepb.RetryPolicy{RetryOn: "cancelled"}, + &v3routepb.RetryPolicy{RetryOn: "deadline-exceeded,unsupported", NumRetries: &wrapperspb.UInt32Value{Value: 2}}), + wantUpdate: goodUpdateWithRetryPolicy( + &RetryConfig{RetryOn: map[codes.Code]bool{codes.Canceled: true}, NumRetries: 1, RetryBackoff: defaultRetryBackoff}, + &RetryConfig{RetryOn: map[codes.Code]bool{codes.DeadlineExceeded: true}, NumRetries: 2, RetryBackoff: defaultRetryBackoff}), + }, + { + name: "good-route-config-with-retry-backoff", + rc: goodRouteConfigWithRetryPolicy( + &v3routepb.RetryPolicy{RetryOn: "internal", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{BaseInterval: durationpb.New(10 * time.Millisecond), MaxInterval: durationpb.New(10 * time.Millisecond)}}, + &v3routepb.RetryPolicy{RetryOn: "resource-exhausted", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{BaseInterval: durationpb.New(10 * time.Millisecond)}}), + wantUpdate: goodUpdateWithRetryPolicy( + &RetryConfig{RetryOn: map[codes.Code]bool{codes.Internal: true}, NumRetries: 1, RetryBackoff: RetryBackoff{BaseInterval: 10 * time.Millisecond, MaxInterval: 10 * time.Millisecond}}, + &RetryConfig{RetryOn: map[codes.Code]bool{codes.ResourceExhausted: true}, NumRetries: 1, RetryBackoff: RetryBackoff{BaseInterval: 10 * time.Millisecond, MaxInterval: 100 * time.Millisecond}}), + }, + { + name: "bad-retry-policy-0-retries", + rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "cancelled", NumRetries: &wrapperspb.UInt32Value{Value: 0}}, nil), + wantUpdate: goodUpdateIfRetryDisabled(), + wantError: env.RetrySupport, + }, + { + name: "bad-retry-policy-0-base-interval", + rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "cancelled", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{BaseInterval: durationpb.New(0)}}, nil), + wantUpdate: goodUpdateIfRetryDisabled(), + wantError: env.RetrySupport, + }, + { + name: "bad-retry-policy-negative-max-interval", + rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "cancelled", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{MaxInterval: durationpb.New(-time.Second)}}, nil), + wantUpdate: goodUpdateIfRetryDisabled(), + wantError: env.RetrySupport, + }, + { + name: "bad-retry-policy-negative-max-interval-no-known-retry-on", + rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "something", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{MaxInterval: durationpb.New(-time.Second)}}, nil), + wantUpdate: goodUpdateIfRetryDisabled(), + wantError: env.RetrySupport, + }, } - for _, test := range tests { t.Run(test.name, func(t *testing.T) { gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc, nil, false) diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index bfb2e3261a02..3d9148eebcd4 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -39,6 +39,7 @@ import ( v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/protobuf/types/known/anypb" @@ -344,9 +345,14 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l if err != nil { return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) } + rc, err := generateRetryConfig(vh.GetRetryPolicy()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } vhOut := &VirtualHost{ - Domains: vh.GetDomains(), - Routes: routes, + Domains: vh.GetDomains(), + Routes: routes, + RetryConfig: rc, } if !v2 { cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) @@ -360,6 +366,60 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l return RouteConfigUpdate{VirtualHosts: vhs}, nil } +func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { + if !env.RetrySupport || rp == nil { + return nil, nil + } + + cfg := &RetryConfig{RetryOn: make(map[codes.Code]bool)} + for _, s := range strings.Split(rp.GetRetryOn(), ",") { + switch strings.TrimSpace(strings.ToLower(s)) { + case "cancelled": + cfg.RetryOn[codes.Canceled] = true + case "deadline-exceeded": + cfg.RetryOn[codes.DeadlineExceeded] = true + case "internal": + cfg.RetryOn[codes.Internal] = true + case "resource-exhausted": + cfg.RetryOn[codes.ResourceExhausted] = true + case "unavailable": + cfg.RetryOn[codes.Unavailable] = true + } + } + + if rp.NumRetries == nil { + cfg.NumRetries = 1 + } else { + cfg.NumRetries = rp.GetNumRetries().Value + if cfg.NumRetries < 1 { + return nil, fmt.Errorf("retry_policy.num_retries = %v; must be >= 1", cfg.NumRetries) + } + } + + backoff := rp.GetRetryBackOff() + if backoff == nil { + cfg.RetryBackoff.BaseInterval = 25 * time.Millisecond + } else { + cfg.RetryBackoff.BaseInterval = backoff.GetBaseInterval().AsDuration() + if cfg.RetryBackoff.BaseInterval <= 0 { + return nil, fmt.Errorf("retry_policy.base_interval = %v; must be > 0", cfg.RetryBackoff.BaseInterval) + } + } + if max := backoff.GetMaxInterval(); max == nil { + cfg.RetryBackoff.MaxInterval = 10 * cfg.RetryBackoff.BaseInterval + } else { + cfg.RetryBackoff.MaxInterval = max.AsDuration() + if cfg.RetryBackoff.MaxInterval <= 0 { + return nil, fmt.Errorf("retry_policy.max_interval = %v; must be > 0", cfg.RetryBackoff.MaxInterval) + } + } + + if len(cfg.RetryOn) == 0 { + return &RetryConfig{}, nil + } + return cfg, nil +} + func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, error) { var routesRet []*Route for _, r := range routes { @@ -507,7 +567,15 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, d := dur.AsDuration() route.MaxStreamDuration = &d } + + var err error + route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) + if err != nil { + return nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) + } + route.RouteAction = RouteActionRoute + case *v3routepb.Route_NonForwardingAction: // Expected to be used on server side. route.RouteAction = RouteActionNonForwardingAction From 1fe5adbbf82f15781a0ce3f704012dc44e6b8e63 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Wed, 8 Sep 2021 17:31:51 -0700 Subject: [PATCH 221/998] interop-testing: add soak test cases to interop client (#4677) --- interop/client/client.go | 39 +++++++++++------- interop/test_utils.go | 88 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+), 14 deletions(-) diff --git a/interop/client/client.go b/interop/client/client.go index 7b9339d7b614..456b74e101db 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -26,6 +26,7 @@ import ( "io/ioutil" "net" "strconv" + "time" "google.golang.org/grpc" _ "google.golang.org/grpc/balancer/grpclb" @@ -48,20 +49,24 @@ const ( ) var ( - caFile = flag.String("ca_file", "", "The file containning the CA root cert file") - useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true") - useALTS = flag.Bool("use_alts", false, "Connection uses ALTS if true (this option can only be used on GCP)") - customCredentialsType = flag.String("custom_credentials_type", "", "Custom creds to use, excluding TLS or ALTS") - altsHSAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address") - testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") - serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") - oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") - defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") - serverHost = flag.String("server_host", "localhost", "The server host name") - serverPort = flag.Int("server_port", 10000, "The server port number") - serviceConfigJSON = flag.String("service_config_json", "", "Disables service config lookups and sets the provided string as the default service config.") - tlsServerName = flag.String("server_host_override", "", "The server name used to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") - testCase = flag.String("test_case", "large_unary", + caFile = flag.String("ca_file", "", "The file containning the CA root cert file") + useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true") + useALTS = flag.Bool("use_alts", false, "Connection uses ALTS if true (this option can only be used on GCP)") + customCredentialsType = flag.String("custom_credentials_type", "", "Custom creds to use, excluding TLS or ALTS") + altsHSAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address") + testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") + serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") + oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") + defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") + serverHost = flag.String("server_host", "localhost", "The server host name") + serverPort = flag.Int("server_port", 10000, "The server port number") + serviceConfigJSON = flag.String("service_config_json", "", "Disables service config lookups and sets the provided string as the default service config.") + soakIterations = flag.Int("soak_iterations", 10, "The number of iterations to use for the two soak tests: rpc_soak and channel_soak") + soakMaxFailures = flag.Int("soak_max_failures", 0, "The number of iterations in soak tests that are allowed to fail (either due to non-OK status code or exceeding the per-iteration max acceptable latency).") + soakPerIterationMaxAcceptableLatencyMs = flag.Int("soak_per_iteration_max_acceptable_latency_ms", 1000, "The number of milliseconds a single iteration in the two soak tests (rpc_soak and channel_soak) should take.") + soakOverallTimeoutSeconds = flag.Int("soak_overall_timeout_seconds", 10, "The overall number of seconds after which a soak test should stop and fail, if the desired number of iterations have not yet completed.") + tlsServerName = flag.String("server_host_override", "", "The server name used to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") + testCase = flag.String("test_case", "large_unary", `Configure different test cases. Valid options are: empty_unary : empty (zero bytes) request and response; large_unary : single request and (large) response; @@ -292,6 +297,12 @@ func main() { case "pick_first_unary": interop.DoPickFirstUnary(tc) logger.Infoln("PickFirstUnary done") + case "rpc_soak": + interop.DoSoakTest(tc, serverAddr, opts, false /* resetChannel */, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) + logger.Infoln("RpcSoak done") + case "channel_soak": + interop.DoSoakTest(tc, serverAddr, opts, true /* resetChannel */, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) + logger.Infoln("ChannelSoak done") default: logger.Fatal("Unsupported test case: ", *testCase) } diff --git a/interop/test_utils.go b/interop/test_utils.go index cbcbcc4da173..19a5c1f7cd33 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -20,10 +20,12 @@ package interop import ( + "bytes" "context" "fmt" "io" "io/ioutil" + "os" "strings" "time" @@ -31,6 +33,7 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/grpc" + "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" @@ -673,6 +676,91 @@ func DoPickFirstUnary(tc testgrpc.TestServiceClient) { } } +func doOneSoakIteration(ctx context.Context, tc testgrpc.TestServiceClient, resetChannel bool, serverAddr string, dopts []grpc.DialOption) (latency time.Duration, err error) { + start := time.Now() + client := tc + if resetChannel { + var conn *grpc.ClientConn + conn, err = grpc.Dial(serverAddr, dopts...) + if err != nil { + return + } + defer conn.Close() + client = testgrpc.NewTestServiceClient(conn) + } + // per test spec, don't include channel shutdown in latency measurement + defer func() { latency = time.Since(start) }() + // do a large-unary RPC + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: int32(largeRespSize), + Payload: pl, + } + var reply *testpb.SimpleResponse + reply, err = client.UnaryCall(ctx, req) + if err != nil { + err = fmt.Errorf("/TestService/UnaryCall RPC failed: %s", err) + return + } + t := reply.GetPayload().GetType() + s := len(reply.GetPayload().GetBody()) + if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { + err = fmt.Errorf("got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) + return + } + return +} + +// DoSoakTest runs large unary RPCs in a loop for a configurable number of times, with configurable failure thresholds. +// If resetChannel is false, then each RPC will be performed on tc. Otherwise, each RPC will be performed on a new +// stub that is created with the provided server address and dial options. +func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.DialOption, resetChannel bool, soakIterations int, maxFailures int, perIterationMaxAcceptableLatency time.Duration, overallDeadline time.Time) { + start := time.Now() + ctx, cancel := context.WithDeadline(context.Background(), overallDeadline) + defer cancel() + iterationsDone := 0 + totalFailures := 0 + hopts := stats.HistogramOptions{ + NumBuckets: 20, + GrowthFactor: 1, + BaseBucketSize: 1, + MinValue: 0, + } + h := stats.NewHistogram(hopts) + for i := 0; i < soakIterations; i++ { + if time.Now().After(overallDeadline) { + break + } + iterationsDone++ + latency, err := doOneSoakIteration(ctx, tc, resetChannel, serverAddr, dopts) + latencyMs := int64(latency / time.Millisecond) + h.Add(latencyMs) + if err != nil { + totalFailures++ + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d failed: %s\n", i, latencyMs, err) + continue + } + if latency > perIterationMaxAcceptableLatency { + totalFailures++ + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d exceeds max acceptable latency: %d\n", i, latencyMs, perIterationMaxAcceptableLatency.Milliseconds()) + continue + } + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d succeeded\n", i, latencyMs) + } + var b bytes.Buffer + h.Print(&b) + fmt.Fprintln(os.Stderr, "Histogram of per-iteration latencies in milliseconds:") + fmt.Fprintln(os.Stderr, b.String()) + fmt.Fprintf(os.Stderr, "soak test ran: %d / %d iterations. total failures: %d. max failures threshold: %d. See breakdown above for which iterations succeeded, failed, and why for more info.\n", iterationsDone, soakIterations, totalFailures, maxFailures) + if iterationsDone < soakIterations { + logger.Fatalf("soak test consumed all %f seconds of time and quit early, only having ran %d out of desired %d iterations.", overallDeadline.Sub(start).Seconds(), iterationsDone, soakIterations) + } + if totalFailures > maxFailures { + logger.Fatalf("soak test total failures: %d exceeds max failures threshold: %d.", totalFailures, maxFailures) + } +} + type testServer struct { testgrpc.UnimplementedTestServiceServer } From 2608e38e6386be7400720fecf2ece176c4cbc1b2 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 9 Sep 2021 13:35:41 -0400 Subject: [PATCH 222/998] xds: Added server side routing (#4726) * Added server side routing --- xds/internal/resolver/watch_service.go | 97 +------ xds/internal/resolver/watch_service_test.go | 48 +--- xds/internal/server/conn_wrapper.go | 6 + .../test/xds_server_integration_test.go | 240 ++++++++++++++++++ xds/internal/testutils/e2e/clientresources.go | 10 +- xds/internal/xdsclient/filter_chain.go | 3 + xds/internal/xdsclient/matcher.go | 122 +++++++++ xds/internal/xdsclient/matcher_test.go | 46 ++++ xds/internal/xdsclient/v2/rds_test.go | 2 +- xds/internal/xdsclient/xds.go | 2 +- xds/server.go | 70 ++++- 11 files changed, 495 insertions(+), 151 deletions(-) diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index bea5bbcda140..da0bf95f3b9f 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -20,7 +20,6 @@ package resolver import ( "fmt" - "strings" "sync" "time" @@ -152,7 +151,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er } func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteConfigUpdate) { - matchVh := findBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) + matchVh := xdsclient.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) if matchVh == nil { // No matching virtual host found. w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName)) @@ -192,97 +191,3 @@ func (w *serviceUpdateWatcher) close() { w.rdsCancel = nil } } - -type domainMatchType int - -const ( - domainMatchTypeInvalid domainMatchType = iota - domainMatchTypeUniversal - domainMatchTypePrefix - domainMatchTypeSuffix - domainMatchTypeExact -) - -// Exact > Suffix > Prefix > Universal > Invalid. -func (t domainMatchType) betterThan(b domainMatchType) bool { - return t > b -} - -func matchTypeForDomain(d string) domainMatchType { - if d == "" { - return domainMatchTypeInvalid - } - if d == "*" { - return domainMatchTypeUniversal - } - if strings.HasPrefix(d, "*") { - return domainMatchTypeSuffix - } - if strings.HasSuffix(d, "*") { - return domainMatchTypePrefix - } - if strings.Contains(d, "*") { - return domainMatchTypeInvalid - } - return domainMatchTypeExact -} - -func match(domain, host string) (domainMatchType, bool) { - switch typ := matchTypeForDomain(domain); typ { - case domainMatchTypeInvalid: - return typ, false - case domainMatchTypeUniversal: - return typ, true - case domainMatchTypePrefix: - // abc.* - return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) - case domainMatchTypeSuffix: - // *.123 - return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) - case domainMatchTypeExact: - return typ, domain == host - default: - return domainMatchTypeInvalid, false - } -} - -// findBestMatchingVirtualHost returns the virtual host whose domains field best -// matches host -// -// The domains field support 4 different matching pattern types: -// - Exact match -// - Suffix match (e.g. “*ABC”) -// - Prefix match (e.g. “ABC*) -// - Universal match (e.g. “*”) -// -// The best match is defined as: -// - A match is better if it’s matching pattern type is better -// - Exact match > suffix match > prefix match > universal match -// - If two matches are of the same pattern type, the longer match is better -// - This is to compare the length of the matching pattern, e.g. “*ABCDE” > -// “*ABC” -func findBestMatchingVirtualHost(host string, vHosts []*xdsclient.VirtualHost) *xdsclient.VirtualHost { - var ( - matchVh *xdsclient.VirtualHost - matchType = domainMatchTypeInvalid - matchLen int - ) - for _, vh := range vHosts { - for _, domain := range vh.Domains { - typ, matched := match(domain, host) - if typ == domainMatchTypeInvalid { - // The rds response is invalid. - return nil - } - if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { - // The previous match has better type, or the previous match has - // better length, or this domain isn't a match. - continue - } - matchVh = vh - matchType = typ - matchLen = len(domain) - } - } - return matchVh -} diff --git a/xds/internal/resolver/watch_service_test.go b/xds/internal/resolver/watch_service_test.go index b622118e4b3e..1bf65c4d4506 100644 --- a/xds/internal/resolver/watch_service_test.go +++ b/xds/internal/resolver/watch_service_test.go @@ -32,52 +32,6 @@ import ( "google.golang.org/protobuf/proto" ) -func (s) TestMatchTypeForDomain(t *testing.T) { - tests := []struct { - d string - want domainMatchType - }{ - {d: "", want: domainMatchTypeInvalid}, - {d: "*", want: domainMatchTypeUniversal}, - {d: "bar.*", want: domainMatchTypePrefix}, - {d: "*.abc.com", want: domainMatchTypeSuffix}, - {d: "foo.bar.com", want: domainMatchTypeExact}, - {d: "foo.*.com", want: domainMatchTypeInvalid}, - } - for _, tt := range tests { - if got := matchTypeForDomain(tt.d); got != tt.want { - t.Errorf("matchTypeForDomain(%q) = %v, want %v", tt.d, got, tt.want) - } - } -} - -func (s) TestMatch(t *testing.T) { - tests := []struct { - name string - domain string - host string - wantTyp domainMatchType - wantMatched bool - }{ - {name: "invalid-empty", domain: "", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, - {name: "invalid", domain: "a.*.b", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, - {name: "universal", domain: "*", host: "abc.com", wantTyp: domainMatchTypeUniversal, wantMatched: true}, - {name: "prefix-match", domain: "abc.*", host: "abc.123", wantTyp: domainMatchTypePrefix, wantMatched: true}, - {name: "prefix-no-match", domain: "abc.*", host: "abcd.123", wantTyp: domainMatchTypePrefix, wantMatched: false}, - {name: "suffix-match", domain: "*.123", host: "abc.123", wantTyp: domainMatchTypeSuffix, wantMatched: true}, - {name: "suffix-no-match", domain: "*.123", host: "abc.1234", wantTyp: domainMatchTypeSuffix, wantMatched: false}, - {name: "exact-match", domain: "foo.bar", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: true}, - {name: "exact-no-match", domain: "foo.bar.com", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotTyp, gotMatched := match(tt.domain, tt.host); gotTyp != tt.wantTyp || gotMatched != tt.wantMatched { - t.Errorf("match() = %v, %v, want %v, %v", gotTyp, gotMatched, tt.wantTyp, tt.wantMatched) - } - }) - } -} - func (s) TestFindBestMatchingVirtualHost(t *testing.T) { var ( oneExactMatch = &xdsclient.VirtualHost{ @@ -121,7 +75,7 @@ func (s) TestFindBestMatchingVirtualHost(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := findBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { + if got := xdsclient.FindBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { t.Errorf("findBestMatchingxdsclient.VirtualHost() = %v, want %v", got, tt.want) } }) diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index e4ca1c8c055e..dd0374dc88e4 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -64,6 +64,11 @@ type connWrapper struct { virtualHosts []xdsclient.VirtualHostWithInterceptors } +// VirtualHosts returns the virtual hosts to be used for server side routing. +func (c *connWrapper) VirtualHosts() []xdsclient.VirtualHostWithInterceptors { + return c.virtualHosts +} + // SetDeadline makes a copy of the passed in deadline and forwards the call to // the underlying rawConn. func (c *connWrapper) SetDeadline(t time.Time) error { @@ -128,6 +133,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { return xdsHI, nil } +// Close closes the providers and the underlying connection. func (c *connWrapper) Close() error { if c.identityProvider != nil { c.identityProvider.Close() diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 3749e1dcf2b3..9136953f7978 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -27,15 +27,23 @@ import ( "fmt" "net" "strconv" + "strings" "testing" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/status" "google.golang.org/grpc/xds" "google.golang.org/grpc/xds/internal/testutils/e2e" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" xdscreds "google.golang.org/grpc/credentials/xds" testpb "google.golang.org/grpc/test/grpc_testing" xdstestutils "google.golang.org/grpc/xds/internal/testutils" @@ -354,3 +362,235 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { t.Fatalf("rpc EmptyCall() failed: %v", err) } } + +// TestServerSideXDS_RouteConfiguration is an e2e test which verifies routing +// functionality. The xDS enabled server will be set up with route configuration +// where the route configuration has routes with the correct routing actions +// (NonForwardingAction), and the RPC's matching those routes should proceed as +// normal. +func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + + // Create an inbound xDS listener resource with route configuration which + // selectively will allow RPC's through or not. This will test routing in + // xds(Unary|Stream)Interceptors. + vhs := []*v3routepb.VirtualHost{ + // Virtual host that will never be matched to test Virtual Host selection. + { + Domains: []string{"this will not match*"}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }, + }, + }, + // This Virtual Host will actually get matched to. + { + Domains: []string{"*"}, + Routes: []*v3routepb.Route{ + // A routing rule that can be selectively triggered based on properties about incoming RPC. + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/EmptyCall"}, + // "Fully-qualified RPC method name with leading slash. Same as :path header". + }, + // Correct Action, so RPC's that match this route should proceed to interceptor processing. + Action: &v3routepb.Route_NonForwardingAction{}, + }, + // This routing rule is matched the same way as the one above, + // except has an incorrect action for the server side. However, + // since routing chooses the first route which matches an + // incoming RPC, this should never get invoked (iteration + // through this route slice is deterministic). + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/EmptyCall"}, + // "Fully-qualified RPC method name with leading slash. Same as :path header". + }, + // Incorrect Action, so RPC's that match this route should get denied. + Action: &v3routepb.Route_Route{}, + }, + // Another routing rule that can be selectively triggered based on incoming RPC. + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/UnaryCall"}, + }, + // Wrong action (!Non_Forwarding_Action) so RPC's that match this route should get denied. + Action: &v3routepb.Route_Route{}, + }, + // Another routing rule that can be selectively triggered based on incoming RPC. + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/StreamingInputCall"}, + }, + // Wrong action (!Non_Forwarding_Action) so RPC's that match this route should get denied. + Action: &v3routepb.Route_Route{}, + }, + // Not matching route, this is be able to get invoked logically (i.e. doesn't have to match the Route configurations above). + }}, + } + inboundLis := &v3listenerpb.Listener{ + Name: fmt.Sprintf(e2e.ServerListenerResourceNameTemplate, net.JoinHostPort(host, strconv.Itoa(int(port)))), + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: port, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "v4-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("router", &v3routerpb.Router{})}, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: vhs, + }, + }, + }), + }, + }, + }, + }, + { + Name: "v6-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("router", &v3routerpb.Router{})}, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: vhs, + }, + }, + }), + }, + }, + }, + }, + }, + } + resources.Listeners = append(resources.Listeners, inboundLis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Setup the management server with client and server-side resources. + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithInsecure(), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + + // This Empty Call should match to a route with a correct action + // (NonForwardingAction). Thus, this RPC should proceed as normal. There is + // a routing rule that this RPC would match to that has an incorrect action, + // but the server should only use the first route matched to with the + // correct action. + if _, err = client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // This Unary Call should match to a route with an incorrect action. Thus, + // this RPC should not go through as per A36, and this call should receive + // an error with codes.Unavailable. + if _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.Unavailable { + t.Fatalf("client.UnaryCall() = _, %v, want _, error code %s", err, codes.Unavailable) + } + + // This Streaming Call should match to a route with an incorrect action. + // Thus, this RPC should not go through as per A36, and this call should + // receive an error with codes.Unavailable. + stream, err := client.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("StreamingInputCall(_) = _, %v, want ", err) + } + if _, err = stream.CloseAndRecv(); status.Code(err) != codes.Unavailable || !strings.Contains(err.Error(), "the incoming RPC matched to a route that was not of action type non forwarding") { + t.Fatalf("streaming RPC should have been denied") + } + + // This Full Duplex should not match to a route, and thus should return an + // error and not proceed. + dStream, err := client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("FullDuplexCall(_) = _, %v, want ", err) + } + if _, err = dStream.Recv(); status.Code(err) != codes.Unavailable || !strings.Contains(err.Error(), "the incoming RPC did not match a configured Route") { + t.Fatalf("streaming RPC should have been denied") + } +} diff --git a/xds/internal/testutils/e2e/clientresources.go b/xds/internal/testutils/e2e/clientresources.go index d1487374e35a..daae1aea8004 100644 --- a/xds/internal/testutils/e2e/clientresources.go +++ b/xds/internal/testutils/e2e/clientresources.go @@ -207,7 +207,10 @@ func DefaultServerListener(host string, port uint32, secLevel SecurityLevel) *v3 RouteConfig: &v3routepb.RouteConfiguration{ Name: "routeName", VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{"lds.target.good:3333"}, + // This "*" string matches on any incoming authority. This is to ensure any + // incoming RPC matches to Route_NonForwardingAction and will proceed as + // normal. + Domains: []string{"*"}, Routes: []*v3routepb.Route{{ Match: &v3routepb.RouteMatch{ PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, @@ -252,7 +255,10 @@ func DefaultServerListener(host string, port uint32, secLevel SecurityLevel) *v3 RouteConfig: &v3routepb.RouteConfiguration{ Name: "routeName", VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{"lds.target.good:3333"}, + // This "*" string matches on any incoming authority. This is to ensure any + // incoming RPC matches to Route_NonForwardingAction and will proceed as + // normal. + Domains: []string{"*"}, Routes: []*v3routepb.Route{{ Match: &v3routepb.RouteMatch{ PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index 9784083de104..9f49e6d1885e 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -85,6 +85,8 @@ type VirtualHostWithInterceptors struct { type RouteWithInterceptors struct { // M is the matcher used to match to this route. M *CompositeMatcher + // RouteAction is the type of routing action to initiate once matched to. + RouteAction RouteAction // Interceptors are interceptors instantiated for this route. These will be // constructed from a combination of the top level configuration and any // HTTP Filter overrides present in Virtual Host or Route. @@ -109,6 +111,7 @@ func (f *FilterChain) convertVirtualHost(virtualHost *VirtualHost) (VirtualHostW rs := make([]RouteWithInterceptors, len(virtualHost.Routes)) for i, r := range virtualHost.Routes { var err error + rs[i].RouteAction = r.RouteAction rs[i].M, err = RouteToMatcher(r) if err != nil { return VirtualHostWithInterceptors{}, fmt.Errorf("matcher construction: %v", err) diff --git a/xds/internal/xdsclient/matcher.go b/xds/internal/xdsclient/matcher.go index 324e1f4dcab2..e663e02769f8 100644 --- a/xds/internal/xdsclient/matcher.go +++ b/xds/internal/xdsclient/matcher.go @@ -154,3 +154,125 @@ func (fm *fractionMatcher) match() bool { func (fm *fractionMatcher) String() string { return fmt.Sprintf("fraction:%v", fm.fraction) } + +type domainMatchType int + +const ( + domainMatchTypeInvalid domainMatchType = iota + domainMatchTypeUniversal + domainMatchTypePrefix + domainMatchTypeSuffix + domainMatchTypeExact +) + +// Exact > Suffix > Prefix > Universal > Invalid. +func (t domainMatchType) betterThan(b domainMatchType) bool { + return t > b +} + +func matchTypeForDomain(d string) domainMatchType { + if d == "" { + return domainMatchTypeInvalid + } + if d == "*" { + return domainMatchTypeUniversal + } + if strings.HasPrefix(d, "*") { + return domainMatchTypeSuffix + } + if strings.HasSuffix(d, "*") { + return domainMatchTypePrefix + } + if strings.Contains(d, "*") { + return domainMatchTypeInvalid + } + return domainMatchTypeExact +} + +func match(domain, host string) (domainMatchType, bool) { + switch typ := matchTypeForDomain(domain); typ { + case domainMatchTypeInvalid: + return typ, false + case domainMatchTypeUniversal: + return typ, true + case domainMatchTypePrefix: + // abc.* + return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) + case domainMatchTypeSuffix: + // *.123 + return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) + case domainMatchTypeExact: + return typ, domain == host + default: + return domainMatchTypeInvalid, false + } +} + +// FindBestMatchingVirtualHost returns the virtual host whose domains field best +// matches host +// +// The domains field support 4 different matching pattern types: +// - Exact match +// - Suffix match (e.g. “*ABC”) +// - Prefix match (e.g. “ABC*) +// - Universal match (e.g. “*”) +// +// The best match is defined as: +// - A match is better if it’s matching pattern type is better +// - Exact match > suffix match > prefix match > universal match +// - If two matches are of the same pattern type, the longer match is better +// - This is to compare the length of the matching pattern, e.g. “*ABCDE” > +// “*ABC” +func FindBestMatchingVirtualHost(host string, vHosts []*VirtualHost) *VirtualHost { // Maybe move this crap to client + var ( + matchVh *VirtualHost + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, host) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} + +// FindBestMatchingVirtualHostServer returns the virtual host whose domains field best +// matches authority. +func FindBestMatchingVirtualHostServer(authority string, vHosts []VirtualHostWithInterceptors) *VirtualHostWithInterceptors { + var ( + matchVh *VirtualHostWithInterceptors + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, authority) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = &vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} diff --git a/xds/internal/xdsclient/matcher_test.go b/xds/internal/xdsclient/matcher_test.go index 000adf49e289..f750d07d6e4f 100644 --- a/xds/internal/xdsclient/matcher_test.go +++ b/xds/internal/xdsclient/matcher_test.go @@ -145,3 +145,49 @@ func TestFractionMatcherMatch(t *testing.T) { t.Errorf("match() = %v, want match", matched) } } + +func (s) TestMatchTypeForDomain(t *testing.T) { + tests := []struct { + d string + want domainMatchType + }{ + {d: "", want: domainMatchTypeInvalid}, + {d: "*", want: domainMatchTypeUniversal}, + {d: "bar.*", want: domainMatchTypePrefix}, + {d: "*.abc.com", want: domainMatchTypeSuffix}, + {d: "foo.bar.com", want: domainMatchTypeExact}, + {d: "foo.*.com", want: domainMatchTypeInvalid}, + } + for _, tt := range tests { + if got := matchTypeForDomain(tt.d); got != tt.want { + t.Errorf("matchTypeForDomain(%q) = %v, want %v", tt.d, got, tt.want) + } + } +} + +func (s) TestMatch(t *testing.T) { + tests := []struct { + name string + domain string + host string + wantTyp domainMatchType + wantMatched bool + }{ + {name: "invalid-empty", domain: "", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, + {name: "invalid", domain: "a.*.b", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, + {name: "universal", domain: "*", host: "abc.com", wantTyp: domainMatchTypeUniversal, wantMatched: true}, + {name: "prefix-match", domain: "abc.*", host: "abc.123", wantTyp: domainMatchTypePrefix, wantMatched: true}, + {name: "prefix-no-match", domain: "abc.*", host: "abcd.123", wantTyp: domainMatchTypePrefix, wantMatched: false}, + {name: "suffix-match", domain: "*.123", host: "abc.123", wantTyp: domainMatchTypeSuffix, wantMatched: true}, + {name: "suffix-no-match", domain: "*.123", host: "abc.1234", wantTyp: domainMatchTypeSuffix, wantMatched: false}, + {name: "exact-match", domain: "foo.bar", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: true}, + {name: "exact-no-match", domain: "foo.bar.com", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotTyp, gotMatched := match(tt.domain, tt.host); gotTyp != tt.wantTyp || gotMatched != tt.wantMatched { + t.Errorf("match() = %v, %v, want %v, %v", gotTyp, gotMatched, tt.wantTyp, tt.wantMatched) + } + }) + } +} diff --git a/xds/internal/xdsclient/v2/rds_test.go b/xds/internal/xdsclient/v2/rds_test.go index efc010224778..00ac2791ad6e 100644 --- a/xds/internal/xdsclient/v2/rds_test.go +++ b/xds/internal/xdsclient/v2/rds_test.go @@ -81,7 +81,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { }, wantUpdateErr: false, }, - // No VirtualHosts in the response. Just one test case here for a bad + // No virtualHosts in the response. Just one test case here for a bad // RouteConfiguration, since the others are covered in // TestGetClusterFromRouteConfiguration. { diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 3d9148eebcd4..022d644a955c 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -330,7 +330,7 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s // VirtualHost whose domain field matches the server name from the URI passed // to the gRPC channel, and it contains a clusterName or a weighted cluster. // -// The RouteConfiguration includes a list of VirtualHosts, which may have zero +// The RouteConfiguration includes a list of virtualHosts, which may have zero // or more elements. We are interested in the element whose domains field // matches the server name specified in the "xds:" URI. The only field in the // VirtualHost proto that the we are interested in is the list of routes. We diff --git a/xds/server.go b/xds/server.go index c096b1662b28..2014fcf5ec95 100644 --- a/xds/server.go +++ b/xds/server.go @@ -27,12 +27,17 @@ import ( "sync" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/buffer" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/server" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -317,18 +322,75 @@ func (s *GRPCServer) GracefulStop() { } } +// routeAndProcess routes the incoming RPC to a configured route in the route +// table and also processes the RPC by running the incoming RPC through any HTTP +// Filters configured. +func routeAndProcess(ctx context.Context) error { + conn := transport.GetConnection(ctx) + cw, ok := conn.(interface { + VirtualHosts() []xdsclient.VirtualHostWithInterceptors + }) + if !ok { + return errors.New("missing virtual hosts in incoming context") + } + mn, ok := grpc.Method(ctx) + if !ok { + return errors.New("missing method name in incoming context") + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return errors.New("missing metadata in incoming context") + } + // A41 added logic to the core grpc implementation to guarantee that once + // the RPC gets to this point, there will be a single, unambiguous authority + // present in the header map. + authority := md.Get(":authority") + vh := xdsclient.FindBestMatchingVirtualHostServer(authority[0], cw.VirtualHosts()) + if vh == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Virtual Host") + } + + var rwi *xdsclient.RouteWithInterceptors + rpcInfo := iresolver.RPCInfo{ + Context: ctx, + Method: mn, + } + for _, r := range vh.Routes { + if r.M.Match(rpcInfo) { + // "NonForwardingAction is expected for all Routes used on server-side; a route with an inappropriate action causes + // RPCs matching that route to fail with UNAVAILABLE." - A36 + if r.RouteAction != xdsclient.RouteActionNonForwardingAction { + return status.Error(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") + } + rwi = &r + break + } + } + if rwi == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Route") + } + for _, interceptor := range rwi.Interceptors { + if err := interceptor.AllowRPC(ctx); err != nil { + return status.Errorf(codes.PermissionDenied, "Incoming RPC is not allowed: %v", err) + } + } + return nil +} + // xdsUnaryInterceptor is the unary interceptor added to the gRPC server to // perform any xDS specific functionality on unary RPCs. -// -// This is a no-op at this point. func xdsUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + if err := routeAndProcess(ctx); err != nil { + return nil, err + } return handler(ctx, req) } // xdsStreamInterceptor is the stream interceptor added to the gRPC server to // perform any xDS specific functionality on streaming RPCs. -// -// This is a no-op at this point. func xdsStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if err := routeAndProcess(ss.Context()); err != nil { + return err + } return handler(srv, ss) } From 0a99ae2d035feeb87506e767bd88d3b7364d1059 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 10 Sep 2021 09:04:59 -0700 Subject: [PATCH 223/998] xds: support new fields to fetch security configuration (#4747) --- xds/internal/xdsclient/cds_test.go | 200 +++++++++++++++++++- xds/internal/xdsclient/client.go | 32 ++++ xds/internal/xdsclient/filter_chain.go | 8 +- xds/internal/xdsclient/lds_test.go | 252 ++++++++++++++++++++++++- xds/internal/xdsclient/xds.go | 107 ++++++++++- 5 files changed, 579 insertions(+), 20 deletions(-) diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index 88eda33780d2..a72322c1ba0d 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -727,7 +727,44 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { wantErr: true, }, { - name: "happy-case-with-no-identity-certs", + name: "invalid-regex-in-matching-SAN-with-new-fields", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexBad}}}, + }, + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "happy-case-with-no-identity-certs-using-deprecated-fields", cluster: &v3clusterpb.Cluster{ Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, @@ -767,7 +804,49 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, { - name: "happy-case-with-validation-context-provider-instance", + name: "happy-case-with-no-identity-certs-using-new-fields", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }), + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, + SecurityCfg: &SecurityConfig{ + RootInstanceName: rootPluginInstance, + RootCertName: rootCertName, + }, + }, + }, + { + name: "happy-case-with-validation-context-provider-instance-using-deprecated-fields", cluster: &v3clusterpb.Cluster{ Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, @@ -813,7 +892,55 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, { - name: "happy-case-with-combined-validation-context", + name: "happy-case-with-validation-context-provider-instance-using-new-fields", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: identityPluginInstance, + CertificateName: identityCertName, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }), + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, + SecurityCfg: &SecurityConfig{ + RootInstanceName: rootPluginInstance, + RootCertName: rootCertName, + IdentityInstanceName: identityPluginInstance, + IdentityCertName: identityCertName, + }, + }, + }, + { + name: "happy-case-with-combined-validation-context-using-deprecated-fields", cluster: &v3clusterpb.Cluster{ Name: clusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, @@ -879,6 +1006,73 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, }, + { + name: "happy-case-with-combined-validation-context-using-new-fields", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: identityPluginInstance, + CertificateName: identityCertName, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + { + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: sanExact}, + IgnoreCase: true, + }, + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: sanPrefix}}, + {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: sanSuffix}}, + {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexGood}}}, + {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: sanContains}}, + }, + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }, + }), + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, + SecurityCfg: &SecurityConfig{ + RootInstanceName: rootPluginInstance, + RootCertName: rootCertName, + IdentityInstanceName: identityPluginInstance, + IdentityCertName: identityCertName, + SubjectAltNameMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP(sanExact), nil, nil, nil, nil, true), + matcher.StringMatcherForTesting(nil, newStringP(sanPrefix), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP(sanSuffix), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, sanRE, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP(sanContains), nil, false), + }, + }, + }, + }, } for _, test := range tests { diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index b6310bb70081..0968ba59c494 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -425,6 +425,38 @@ type SecurityConfig struct { RequireClientCert bool } +// Equal returns true if sc is equal to other. +func (sc *SecurityConfig) Equal(other *SecurityConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + } + switch { + case sc.RootInstanceName != other.RootInstanceName: + return false + case sc.RootCertName != other.RootCertName: + return false + case sc.IdentityInstanceName != other.IdentityInstanceName: + return false + case sc.IdentityCertName != other.IdentityCertName: + return false + case sc.RequireClientCert != other.RequireClientCert: + return false + default: + if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) { + return false + } + for i := 0; i < len(sc.SubjectAltNameMatchers); i++ { + if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) { + return false + } + } + } + return true +} + // ClusterType is the type of cluster from a received CDS response. type ClusterType int diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index 9f49e6d1885e..5b2fd79973ad 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -522,12 +522,14 @@ func (fci *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain if downstreamCtx.GetCommonTlsContext() == nil { return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") } - sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext()) + sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext(), true) if err != nil { return nil, err } - if sc.IdentityInstanceName == "" { - return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + if sc == nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + return filterChain, nil } sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() if sc.RequireClientCert && sc.RootInstanceName == "" { diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index 8b9dc7135004..af1cba181752 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -658,7 +658,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }) - listenerNoValidationContext = testutils.MarshalAny(&v3listenerpb.Listener{ + listenerNoValidationContextDeprecatedFields = testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -698,7 +698,47 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }) - listenerWithValidationContext = testutils.MarshalAny(&v3listenerpb.Listener{ + listenerNoValidationContextNewFields = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + }, + }), + }, + }, + }, + }) + listenerWithValidationContextDeprecatedFields = testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -752,6 +792,66 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }) + listenerWithValidationContextNewFields = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + }, + }, + }, + }), + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "defaultRootPluginInstance", + CertificateName: "defaultRootCertName", + }, + }, + }, + }, + }, + }), + }, + }, + }, + }) errMD = UpdateMetadata{ Status: ServiceStatusNACKed, Version: testVersion, @@ -1233,7 +1333,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, { - name: "no identity and root certificate providers", + name: "no identity and root certificate providers using deprecated fields", resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, @@ -1262,6 +1362,36 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { wantMD: errMD, wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", }, + { + name: "no identity and root certificate providers using new fields", + resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, + })}, + wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantMD: errMD, + wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", + }, { name: "no identity certificate provider with require_client_cert", resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ @@ -1287,8 +1417,57 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", }, { - name: "happy case with no validation context", - resources: []*anypb.Any{listenerNoValidationContext}, + name: "happy case with no validation context using deprecated fields", + resources: []*anypb.Any{listenerNoValidationContextDeprecatedFields}, + wantUpdate: map[string]ListenerUpdate{ + v3LDSTarget: { + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + Raw: listenerNoValidationContextDeprecatedFields, + }, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, + { + name: "happy case with no validation context using new fields", + resources: []*anypb.Any{listenerNoValidationContextNewFields}, wantUpdate: map[string]ListenerUpdate{ v3LDSTarget: { InboundListenerCfg: &InboundListenerConfig{ @@ -1327,7 +1506,62 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - Raw: listenerNoValidationContext, + Raw: listenerNoValidationContextNewFields, + }, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, + { + name: "happy case with validation context provider instance with deprecated fields", + resources: []*anypb.Any{listenerWithValidationContextDeprecatedFields}, + wantUpdate: map[string]ListenerUpdate{ + v3LDSTarget: { + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + RootInstanceName: "rootPluginInstance", + RootCertName: "rootCertName", + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + RequireClientCert: true, + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + RootInstanceName: "defaultRootPluginInstance", + RootCertName: "defaultRootCertName", + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + RequireClientCert: true, + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + Raw: listenerWithValidationContextDeprecatedFields, }, }, wantMD: UpdateMetadata{ @@ -1336,8 +1570,8 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, { - name: "happy case with validation context provider instance", - resources: []*anypb.Any{listenerWithValidationContext}, + name: "happy case with validation context provider instance with new fields", + resources: []*anypb.Any{listenerWithValidationContextNewFields}, wantUpdate: map[string]ListenerUpdate{ v3LDSTarget: { InboundListenerCfg: &InboundListenerConfig{ @@ -1382,7 +1616,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - Raw: listenerWithValidationContext, + Raw: listenerWithValidationContextNewFields, }, }, wantMD: UpdateMetadata{ diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 022d644a955c..bd223ff7aa8d 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -812,18 +812,41 @@ func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, e return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") } - sc, err := securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext()) + return securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext(), false) +} + +// common is expected to be not nil. +func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + sc, err := securityConfigFromCommonTLSContextUsingNewFields(common) if err != nil { return nil, err } - if sc.RootInstanceName == "" { - return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") + if sc == nil || sc.Equal(&SecurityConfig{}) { + // If we can't get a valid security config from the new fields, we + // fallback to the old deprecated fields. + // TODO(easwars): Remove this once TD starts populating the new fields. + sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common) + if err != nil { + return nil, err + } + } + if sc != nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + if server { + if sc.IdentityInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + } + } else { + if sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") + } + } } return sc, nil } -// common is expected to be not nil. -func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext) (*SecurityConfig, error) { +func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext) (*SecurityConfig, error) { // The `CommonTlsContext` contains a // `tls_certificate_certificate_provider_instance` field of type // `CertificateProviderInstance`, which contains the provider instance name @@ -873,6 +896,80 @@ func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext) (*Secu return sc, nil } +// gRFC A29 https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md +// specifies the new way to fetch security configuration and says the following: +// +// Although there are various ways to obtain certificates as per this proto +// (which are supported by Envoy), gRPC supports only one of them and that is +// the `CertificateProviderPluginInstance` proto. +// +// This helper function attempts to fetch security configuration from the +// `CertificateProviderPluginInstance` message, given a CommonTlsContext. +func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext) (*SecurityConfig, error) { + // The `tls_certificate_provider_instance` field of type + // `CertificateProviderPluginInstance` is used to fetch the identity + // certificate provider. + sc := &SecurityConfig{} + if identity := common.GetTlsCertificateProviderInstance(); identity != nil { + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + } + + // The `CommonTlsContext` contains a oneof field `validation_context_type`, + // which contains the `CertificateValidationContext` message in one of the + // following ways: + // - `validation_context` field + // - this is directly of type `CertificateValidationContext` + // - `combined_validation_context` field + // - this is of type `CombinedCertificateValidationContext` and contains + // a `default validation context` field of type + // `CertificateValidationContext` + // + // The `CertificateValidationContext` message has the following fields that + // we are interested in: + // - `ca_certificate_provider_instance` + // - this is of type `CertificateProviderPluginInstance` + // - `match_subject_alt_names` + // - this is a list of string matchers + // + // The `CertificateProviderPluginInstance` message contains two fields + // - instance_name + // - this is the certificate provider instance name to be looked up in + // the bootstrap configuration + // - certificate_name + // - this is an opaque name passed to the certificate provider + var validationCtx *v3tlspb.CertificateValidationContext + switch common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_ValidationContext: + validationCtx = common.GetValidationContext() + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + validationCtx = common.GetCombinedValidationContext().GetDefaultValidationContext() + case nil: + // It is valid for the validation context to be nil on the server side. + return sc, nil + } + if validationCtx == nil || validationCtx.GetCaCertificateProviderInstance() == nil { + // Bail out if the `CertificateProviderPluginInstance` message is not + // found through one of the way detailed above. + return nil, nil + } + + if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { + sc.RootInstanceName = rootProvider.GetInstanceName() + sc.RootCertName = rootProvider.GetCertificateName() + } + var matchers []matcher.StringMatcher + for _, m := range validationCtx.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + sc.SubjectAltNameMatchers = matchers + return sc, nil +} + // circuitBreakersFromCluster extracts the circuit breakers configuration from // the received cluster resource. Returns nil if no CircuitBreakers or no // Thresholds in CircuitBreakers. From 43e8fd4f69b65fd51d72578df4afa5c0519ca2b5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 10 Sep 2021 10:59:25 -0700 Subject: [PATCH 224/998] xds: don't remove env var protection for security on the client yet (#4752) Set the value to true by default, and remove it one release later. --- internal/xds/env/env.go | 26 ++++++++++++++-- xds/internal/xdsclient/cds_test.go | 48 ++++++++++++++++++++++++++++++ xds/internal/xdsclient/xds.go | 12 +++++--- 3 files changed, 79 insertions(+), 7 deletions(-) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index 9d5d47ff2a8f..7f879b44da00 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -39,9 +39,10 @@ const ( // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" - aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" + clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" @@ -64,6 +65,13 @@ var ( // be enabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "true". RingHashSupport = strings.EqualFold(os.Getenv(ringHashSupportEnv), "true") + // ClientSideSecuritySupport is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + ClientSideSecuritySupport = strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "true") // AggregateAndDNSSupportEnv indicates whether processing of aggregated // cluster and DNS cluster is enabled, which can be enabled by setting the // environment variable @@ -81,3 +89,15 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) + +func init() { + // Set the env var used to control processing of security configuration on + // the client-side to true by default. + // TODO(easwars): Remove this env var completely in 1.42.x release. + // + // If the env var is set explicitly, honor it. + ClientSideSecuritySupport = true + if val, ok := os.LookupEnv(clientSideSecuritySupportEnv); ok { + ClientSideSecuritySupport = strings.EqualFold(val, "true") + } +} diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index a72322c1ba0d..f03cbe7efb4f 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -432,6 +432,54 @@ func (s) TestValidateCluster_Success(t *testing.T) { } } +func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { + // Turn off the env var protection for client-side security. + origClientSideSecurityEnvVar := env.ClientSideSecuritySupport + env.ClientSideSecuritySupport = false + defer func() { env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }() + + cluster := &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "rootInstance", + CertificateName: "rootCert", + }, + }, + }, + }), + }, + }, + } + wantUpdate := ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + EnableLRS: false, + } + gotUpdate, err := validateClusterAndConstructClusterUpdate(cluster) + if err != nil { + t.Errorf("validateClusterAndConstructClusterUpdate() failed: %v", err) + } + if diff := cmp.Diff(wantUpdate, gotUpdate); diff != "" { + t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, got):\n%s", diff) + } +} + func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { const ( identityPluginInstance = "identityPluginInstance" diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index bd223ff7aa8d..236d11f3731e 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -698,10 +698,14 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } - // Process security configuration received from the control plane . - sc, err := securityConfigFromCluster(cluster) - if err != nil { - return ClusterUpdate{}, err + // Process security configuration received from the control plane iff the + // corresponding environment variable is set. + var sc *SecurityConfig + if env.ClientSideSecuritySupport { + var err error + if sc, err = securityConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } } ret := ClusterUpdate{ From 16cf65612e633d1cc0be8c65ee7a49fbe2b27825 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 10 Sep 2021 11:24:25 -0700 Subject: [PATCH 225/998] xds: update xdsclient NACK to keep valid resources (#4743) --- xds/csds/csds_test.go | 49 +++-- xds/internal/xdsclient/callback.go | 165 +++++++++------- xds/internal/xdsclient/cds_test.go | 48 ++--- xds/internal/xdsclient/client.go | 8 +- xds/internal/xdsclient/client_test.go | 56 +++--- xds/internal/xdsclient/dump_test.go | 68 ++++--- xds/internal/xdsclient/eds_test.go | 27 +-- xds/internal/xdsclient/lds_test.go | 181 +++++++++--------- xds/internal/xdsclient/rds_test.go | 42 ++-- xds/internal/xdsclient/v2/cds_test.go | 15 +- xds/internal/xdsclient/v2/client_test.go | 32 ++-- xds/internal/xdsclient/v2/eds_test.go | 19 +- xds/internal/xdsclient/v2/lds_test.go | 35 ++-- xds/internal/xdsclient/v2/rds_test.go | 25 +-- .../xdsclient/watchers_cluster_test.go | 137 +++++++++---- .../xdsclient/watchers_endpoints_test.go | 107 ++++++++--- .../xdsclient/watchers_listener_test.go | 117 ++++++++--- xds/internal/xdsclient/watchers_route_test.go | 107 ++++++++--- xds/internal/xdsclient/xds.go | 76 ++++++-- 19 files changed, 808 insertions(+), 506 deletions(-) diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index ffaae2d739ab..da7144f7eecc 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -211,22 +211,28 @@ func TestCSDS(t *testing.T) { } const nackResourceIdx = 0 + var ( + nackListeners = append([]*v3listenerpb.Listener{}, listeners...) + nackRoutes = append([]*v3routepb.RouteConfiguration{}, routes...) + nackClusters = append([]*v3clusterpb.Cluster{}, clusters...) + nackEndpoints = append([]*v3endpointpb.ClusterLoadAssignment{}, endpoints...) + ) + nackListeners[0] = &v3listenerpb.Listener{Name: ldsTargets[nackResourceIdx], ApiListener: &v3listenerpb.ApiListener{}} // 0 will be nacked. 1 will stay the same. + nackRoutes[0] = &v3routepb.RouteConfiguration{ + Name: rdsTargets[nackResourceIdx], VirtualHosts: []*v3routepb.VirtualHost{{Routes: []*v3routepb.Route{{}}}}, + } + nackClusters[0] = &v3clusterpb.Cluster{ + Name: cdsTargets[nackResourceIdx], ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, + } + nackEndpoints[0] = &v3endpointpb.ClusterLoadAssignment{ + ClusterName: edsTargets[nackResourceIdx], Endpoints: []*v3endpointpb.LocalityLbEndpoints{{}}, + } if err := mgmServer.Update(ctx, e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{ - {Name: ldsTargets[nackResourceIdx], ApiListener: &v3listenerpb.ApiListener{}}, // 0 will be nacked. 1 will stay the same. - }, - Routes: []*v3routepb.RouteConfiguration{ - {Name: rdsTargets[nackResourceIdx], VirtualHosts: []*v3routepb.VirtualHost{{ - Routes: []*v3routepb.Route{{}}, - }}}, - }, - Clusters: []*v3clusterpb.Cluster{ - {Name: cdsTargets[nackResourceIdx], ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}}, - }, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{ - {ClusterName: edsTargets[nackResourceIdx], Endpoints: []*v3endpointpb.LocalityLbEndpoints{{}}}, - }, + NodeID: nodeID, + Listeners: nackListeners, + Routes: nackRoutes, + Clusters: nackClusters, + Endpoints: nackEndpoints, SkipValidation: true, }); err != nil { t.Fatal(err) @@ -490,7 +496,6 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco ackVersion = "1" nackVersion = "2" ) - if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { return fmt.Errorf("failed to send: %v", err) } @@ -514,13 +519,14 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco configDump := &v3adminpb.ListenersConfigDump_DynamicListener{ Name: ldsTargets[i], ActiveState: &v3adminpb.ListenersConfigDump_DynamicListenerState{ - VersionInfo: ackVersion, + VersionInfo: nackVersion, Listener: listenerAnys[i], LastUpdated: nil, }, ClientStatus: v3adminpb.ClientResourceStatus_ACKED, } if i == nackResourceIdx { + configDump.ActiveState.VersionInfo = ackVersion configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED configDump.ErrorState = &v3adminpb.UpdateFailureState{ Details: "blahblah", @@ -540,12 +546,13 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco var wantRoutes []*v3adminpb.RoutesConfigDump_DynamicRouteConfig for i := range rdsTargets { configDump := &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ - VersionInfo: ackVersion, + VersionInfo: nackVersion, RouteConfig: routeAnys[i], LastUpdated: nil, ClientStatus: v3adminpb.ClientResourceStatus_ACKED, } if i == nackResourceIdx { + configDump.VersionInfo = ackVersion configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED configDump.ErrorState = &v3adminpb.UpdateFailureState{ Details: "blahblah", @@ -564,12 +571,13 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco var wantCluster []*v3adminpb.ClustersConfigDump_DynamicCluster for i := range cdsTargets { configDump := &v3adminpb.ClustersConfigDump_DynamicCluster{ - VersionInfo: ackVersion, + VersionInfo: nackVersion, Cluster: clusterAnys[i], LastUpdated: nil, ClientStatus: v3adminpb.ClientResourceStatus_ACKED, } if i == nackResourceIdx { + configDump.VersionInfo = ackVersion configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED configDump.ErrorState = &v3adminpb.UpdateFailureState{ Details: "blahblah", @@ -589,12 +597,13 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco var wantEndpoint []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig for i := range cdsTargets { configDump := &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ - VersionInfo: ackVersion, + VersionInfo: nackVersion, EndpointConfig: endpointAnys[i], LastUpdated: nil, ClientStatus: v3adminpb.ClientResourceStatus_ACKED, } if i == nackResourceIdx { + configDump.VersionInfo = ackVersion configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED configDump.ErrorState = &v3adminpb.UpdateFailureState{ Details: "blahblah", diff --git a/xds/internal/xdsclient/callback.go b/xds/internal/xdsclient/callback.go index b8ad0ec76362..0374389fbca1 100644 --- a/xds/internal/xdsclient/callback.go +++ b/xds/internal/xdsclient/callback.go @@ -76,15 +76,17 @@ func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) { // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata UpdateMetadata) { +func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, metadata UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() + c.ldsVersion = metadata.Version if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. c.ldsVersion = metadata.ErrState.Version - for name := range updates { - if s, ok := c.ldsWatchers[name]; ok { + } + for name, uErr := range updates { + if s, ok := c.ldsWatchers[name]; ok { + if uErr.Err != nil { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.ldsMD[name] @@ -92,25 +94,27 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata Up mdCopy.Status = metadata.Status c.ldsMD[name] = mdCopy for wi := range s { - wi.newError(metadata.ErrState.Err) + wi.newError(uErr.Err) } + continue } - } - return - } - - // If no error received, the status is ACK. - c.ldsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.ldsWatchers[name]; ok { - // Only send the update if this is not an error. + // If the resource is valid, send the update. for wi := range s { - wi.newUpdate(update) + wi.newUpdate(uErr.Update) } // Sync cache. - c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update)) - c.ldsCache[name] = update - c.ldsMD[name] = metadata + c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + c.ldsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + c.ldsMD[name] = mdCopy } } // Resources not in the new update were removed by the server, so delete @@ -137,15 +141,18 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata Up // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metadata UpdateMetadata) { +func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTuple, metadata UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() + // If no error received, the status is ACK. + c.rdsVersion = metadata.Version if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. c.rdsVersion = metadata.ErrState.Version - for name := range updates { - if s, ok := c.rdsWatchers[name]; ok { + } + for name, uErr := range updates { + if s, ok := c.rdsWatchers[name]; ok { + if uErr.Err != nil { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.rdsMD[name] @@ -153,25 +160,27 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metad mdCopy.Status = metadata.Status c.rdsMD[name] = mdCopy for wi := range s { - wi.newError(metadata.ErrState.Err) + wi.newError(uErr.Err) } + continue } - } - return - } - - // If no error received, the status is ACK. - c.rdsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.rdsWatchers[name]; ok { - // Only send the update if this is not an error. + // If the resource is valid, send the update. for wi := range s { - wi.newUpdate(update) + wi.newUpdate(uErr.Update) } // Sync cache. - c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update)) - c.rdsCache[name] = update - c.rdsMD[name] = metadata + c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + c.rdsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + c.rdsMD[name] = mdCopy } } } @@ -181,15 +190,17 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metad // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata UpdateMetadata) { +func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metadata UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() + c.cdsVersion = metadata.Version if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. c.cdsVersion = metadata.ErrState.Version - for name := range updates { - if s, ok := c.cdsWatchers[name]; ok { + } + for name, uErr := range updates { + if s, ok := c.cdsWatchers[name]; ok { + if uErr.Err != nil { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.cdsMD[name] @@ -197,25 +208,29 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata Upda mdCopy.Status = metadata.Status c.cdsMD[name] = mdCopy for wi := range s { - wi.newError(metadata.ErrState.Err) + // Send the watcher the individual error, instead of the + // overall combined error from the metadata.ErrState. + wi.newError(uErr.Err) } + continue } - } - return - } - - // If no error received, the status is ACK. - c.cdsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.cdsWatchers[name]; ok { - // Only send the update if this is not an error. + // If the resource is valid, send the update. for wi := range s { - wi.newUpdate(update) + wi.newUpdate(uErr.Update) } // Sync cache. - c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update)) - c.cdsCache[name] = update - c.cdsMD[name] = metadata + c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + c.cdsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + c.cdsMD[name] = mdCopy } } // Resources not in the new update were removed by the server, so delete @@ -242,15 +257,17 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata Upda // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdate, metadata UpdateMetadata) { +func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdateErrTuple, metadata UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() + c.edsVersion = metadata.Version if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. c.edsVersion = metadata.ErrState.Version - for name := range updates { - if s, ok := c.edsWatchers[name]; ok { + } + for name, uErr := range updates { + if s, ok := c.edsWatchers[name]; ok { + if uErr.Err != nil { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.edsMD[name] @@ -258,25 +275,29 @@ func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdate, metadata U mdCopy.Status = metadata.Status c.edsMD[name] = mdCopy for wi := range s { - wi.newError(metadata.ErrState.Err) + // Send the watcher the individual error, instead of the + // overall combined error from the metadata.ErrState. + wi.newError(uErr.Err) } + continue } - } - return - } - - // If no error received, the status is ACK. - c.edsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.edsWatchers[name]; ok { - // Only send the update if this is not an error. + // If the resource is valid, send the update. for wi := range s { - wi.newUpdate(update) + wi.newUpdate(uErr.Update) } // Sync cache. - c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(update)) - c.edsCache[name] = update - c.edsMD[name] = metadata + c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + c.edsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + c.edsMD[name] = mdCopy } } } diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index f03cbe7efb4f..dc29cffac3b5 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -1187,7 +1187,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { tests := []struct { name string resources []*anypb.Any - wantUpdate map[string]ClusterUpdate + wantUpdate map[string]ClusterUpdateErrTuple wantMD UpdateMetadata wantErr bool }{ @@ -1199,7 +1199,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, @@ -1217,7 +1217,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, @@ -1230,13 +1230,15 @@ func (s) TestUnmarshalCluster(t *testing.T) { ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, }), }, - wantUpdate: map[string]ClusterUpdate{"test": {}}, + wantUpdate: map[string]ClusterUpdateErrTuple{ + "test": {Err: cmpopts.AnyError}, + }, wantMD: UpdateMetadata{ Status: ServiceStatusNACKed, Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, @@ -1244,12 +1246,12 @@ func (s) TestUnmarshalCluster(t *testing.T) { { name: "v2 cluster", resources: []*anypb.Any{v2ClusterAny}, - wantUpdate: map[string]ClusterUpdate{ - v2ClusterName: { + wantUpdate: map[string]ClusterUpdateErrTuple{ + v2ClusterName: {Update: ClusterUpdate{ ClusterName: v2ClusterName, EDSServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -1259,12 +1261,12 @@ func (s) TestUnmarshalCluster(t *testing.T) { { name: "v3 cluster", resources: []*anypb.Any{v3ClusterAny}, - wantUpdate: map[string]ClusterUpdate{ - v3ClusterName: { + wantUpdate: map[string]ClusterUpdateErrTuple{ + v3ClusterName: {Update: ClusterUpdate{ ClusterName: v3ClusterName, EDSServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -1274,17 +1276,17 @@ func (s) TestUnmarshalCluster(t *testing.T) { { name: "multiple clusters", resources: []*anypb.Any{v2ClusterAny, v3ClusterAny}, - wantUpdate: map[string]ClusterUpdate{ - v2ClusterName: { + wantUpdate: map[string]ClusterUpdateErrTuple{ + v2ClusterName: {Update: ClusterUpdate{ ClusterName: v2ClusterName, EDSServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, - }, - v3ClusterName: { + }}, + v3ClusterName: {Update: ClusterUpdate{ ClusterName: v3ClusterName, EDSServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -1303,25 +1305,25 @@ func (s) TestUnmarshalCluster(t *testing.T) { }), v3ClusterAny, }, - wantUpdate: map[string]ClusterUpdate{ - v2ClusterName: { + wantUpdate: map[string]ClusterUpdateErrTuple{ + v2ClusterName: {Update: ClusterUpdate{ ClusterName: v2ClusterName, EDSServiceName: v2Service, EnableLRS: true, Raw: v2ClusterAny, - }, - v3ClusterName: { + }}, + v3ClusterName: {Update: ClusterUpdate{ ClusterName: v3ClusterName, EDSServiceName: v3Service, EnableLRS: true, Raw: v3ClusterAny, - }, - "bad": {}, + }}, + "bad": {Err: cmpopts.AnyError}, }, wantMD: UpdateMetadata{ Status: ServiceStatusNACKed, Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 0968ba59c494..e549d558954f 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -134,14 +134,14 @@ type loadReportingOptions struct { // resource updates from an APIClient for a specific version. type UpdateHandler interface { // NewListeners handles updates to xDS listener resources. - NewListeners(map[string]ListenerUpdate, UpdateMetadata) + NewListeners(map[string]ListenerUpdateErrTuple, UpdateMetadata) // NewRouteConfigs handles updates to xDS RouteConfiguration resources. - NewRouteConfigs(map[string]RouteConfigUpdate, UpdateMetadata) + NewRouteConfigs(map[string]RouteConfigUpdateErrTuple, UpdateMetadata) // NewClusters handles updates to xDS Cluster resources. - NewClusters(map[string]ClusterUpdate, UpdateMetadata) + NewClusters(map[string]ClusterUpdateErrTuple, UpdateMetadata) // NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely // referred to as Endpoints) resources. - NewEndpoints(map[string]EndpointsUpdate, UpdateMetadata) + NewEndpoints(map[string]EndpointsUpdateErrTuple, UpdateMetadata) // NewConnectionError handles connection errors from the xDS stream. The // error will be reported to all the resource watchers. NewConnectionError(err error) diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index 9d8086aeb4e1..d8e942196311 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -62,19 +62,11 @@ const ( var ( cmpOpts = cmp.Options{ cmpopts.EquateEmpty(), + cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), cmp.Comparer(func(a, b time.Time) bool { return true }), - cmp.Comparer(func(x, y error) bool { - if x == nil || y == nil { - return x == nil && y == nil - } - return x.Error() == y.Error() - }), protocmp.Transform(), } - // When comparing NACK UpdateMetadata, we only care if error is nil, but not - // the details in error. - errPlaceHolder = fmt.Errorf("error whose details don't matter") cmpOptsIgnoreDetails = cmp.Options{ cmp.Comparer(func(a, b time.Time) bool { return true }), cmp.Comparer(func(x, y error) bool { @@ -170,7 +162,7 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { clusterUpdateCh := testutils.NewChannel() firstTime := true client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) // Calls another watch inline, to ensure there's deadlock. client.WatchCluster("another-random-name", func(ClusterUpdate, error) {}) @@ -184,13 +176,13 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { } wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2}, UpdateMetadata{}) + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2, nil); err != nil { t.Fatal(err) } @@ -201,15 +193,15 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want if err != nil { return fmt.Errorf("timeout when waiting for listener update: %v", err) } - gotUpdate := u.(ldsUpdateErr) + gotUpdate := u.(ListenerUpdateErrTuple) if wantErr != nil { - if gotUpdate.err != wantErr { - return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr) + if gotUpdate.Err != wantErr { + return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) } return nil } - if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) { - return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) + if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate) { + return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) } return nil } @@ -219,15 +211,15 @@ func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, w if err != nil { return fmt.Errorf("timeout when waiting for route configuration update: %v", err) } - gotUpdate := u.(rdsUpdateErr) + gotUpdate := u.(RouteConfigUpdateErrTuple) if wantErr != nil { - if gotUpdate.err != wantErr { - return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr) + if gotUpdate.Err != wantErr { + return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) } return nil } - if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) { - return fmt.Errorf("unexpected route config update: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) + if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate) { + return fmt.Errorf("unexpected route config update: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) } return nil } @@ -237,15 +229,15 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU if err != nil { return fmt.Errorf("timeout when waiting for cluster update: %v", err) } - gotUpdate := u.(clusterUpdateErr) + gotUpdate := u.(ClusterUpdateErrTuple) if wantErr != nil { - if gotUpdate.err != wantErr { - return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr) + if gotUpdate.Err != wantErr { + return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) } return nil } - if !cmp.Equal(gotUpdate.u, wantUpdate) { - return fmt.Errorf("unexpected clusterUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) + if !cmp.Equal(gotUpdate.Update, wantUpdate) { + return fmt.Errorf("unexpected clusterUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) } return nil } @@ -255,15 +247,15 @@ func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wan if err != nil { return fmt.Errorf("timeout when waiting for endpoints update: %v", err) } - gotUpdate := u.(endpointsUpdateErr) + gotUpdate := u.(EndpointsUpdateErrTuple) if wantErr != nil { - if gotUpdate.err != wantErr { - return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.err, wantErr) + if gotUpdate.Err != wantErr { + return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) } return nil } - if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate, cmpopts.EquateEmpty()) { - return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) + if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate, cmpopts.EquateEmpty()) { + return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) } return nil } diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index 83479978d765..d03479ca4ade 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -101,16 +101,16 @@ func (s) TestLDSConfigDump(t *testing.T) { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.ListenerUpdate) + update0 := make(map[string]xdsclient.ListenerUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range listenerRaws { - update0[n] = xdsclient.ListenerUpdate{Raw: r} + update0[n] = xdsclient.ListenerUpdateErrTuple{Update: xdsclient.ListenerUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, + MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewListeners(update0, xdsclient.UpdateMetadata{Version: testVersion}) + updateHandler.NewListeners(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpLDS, testVersion, want0); err != nil { @@ -120,10 +120,12 @@ func (s) TestLDSConfigDump(t *testing.T) { const nackVersion = "lds-version-nack" var nackErr = fmt.Errorf("lds nack error") updateHandler.NewListeners( - map[string]xdsclient.ListenerUpdate{ - ldsTargets[0]: {}, + map[string]xdsclient.ListenerUpdateErrTuple{ + ldsTargets[0]: {Err: nackErr}, + ldsTargets[1]: {Update: xdsclient.ListenerUpdate{Raw: listenerRaws[ldsTargets[1]]}}, }, xdsclient.UpdateMetadata{ + Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, @@ -137,6 +139,7 @@ func (s) TestLDSConfigDump(t *testing.T) { // message, as well as the NACK error. wantDump[ldsTargets[0]] = xdsclient.UpdateWithMD{ MD: xdsclient.UpdateMetadata{ + Status: xdsclient.ServiceStatusNACKed, Version: testVersion, ErrState: &xdsclient.UpdateErrorMetadata{ Version: nackVersion, @@ -147,7 +150,7 @@ func (s) TestLDSConfigDump(t *testing.T) { } wantDump[ldsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, + MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, Raw: listenerRaws[ldsTargets[1]], } if err := compareDump(client.DumpLDS, nackVersion, wantDump); err != nil { @@ -212,16 +215,16 @@ func (s) TestRDSConfigDump(t *testing.T) { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.RouteConfigUpdate) + update0 := make(map[string]xdsclient.RouteConfigUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range routeRaws { - update0[n] = xdsclient.RouteConfigUpdate{Raw: r} + update0[n] = xdsclient.RouteConfigUpdateErrTuple{Update: xdsclient.RouteConfigUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, + MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewRouteConfigs(update0, xdsclient.UpdateMetadata{Version: testVersion}) + updateHandler.NewRouteConfigs(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpRDS, testVersion, want0); err != nil { @@ -231,10 +234,12 @@ func (s) TestRDSConfigDump(t *testing.T) { const nackVersion = "rds-version-nack" var nackErr = fmt.Errorf("rds nack error") updateHandler.NewRouteConfigs( - map[string]xdsclient.RouteConfigUpdate{ - rdsTargets[0]: {}, + map[string]xdsclient.RouteConfigUpdateErrTuple{ + rdsTargets[0]: {Err: nackErr}, + rdsTargets[1]: {Update: xdsclient.RouteConfigUpdate{Raw: routeRaws[rdsTargets[1]]}}, }, xdsclient.UpdateMetadata{ + Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, @@ -248,6 +253,7 @@ func (s) TestRDSConfigDump(t *testing.T) { // message, as well as the NACK error. wantDump[rdsTargets[0]] = xdsclient.UpdateWithMD{ MD: xdsclient.UpdateMetadata{ + Status: xdsclient.ServiceStatusNACKed, Version: testVersion, ErrState: &xdsclient.UpdateErrorMetadata{ Version: nackVersion, @@ -257,7 +263,7 @@ func (s) TestRDSConfigDump(t *testing.T) { Raw: routeRaws[rdsTargets[0]], } wantDump[rdsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, + MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, Raw: routeRaws[rdsTargets[1]], } if err := compareDump(client.DumpRDS, nackVersion, wantDump); err != nil { @@ -323,16 +329,16 @@ func (s) TestCDSConfigDump(t *testing.T) { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.ClusterUpdate) + update0 := make(map[string]xdsclient.ClusterUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range clusterRaws { - update0[n] = xdsclient.ClusterUpdate{Raw: r} + update0[n] = xdsclient.ClusterUpdateErrTuple{Update: xdsclient.ClusterUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, + MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewClusters(update0, xdsclient.UpdateMetadata{Version: testVersion}) + updateHandler.NewClusters(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpCDS, testVersion, want0); err != nil { @@ -342,10 +348,12 @@ func (s) TestCDSConfigDump(t *testing.T) { const nackVersion = "cds-version-nack" var nackErr = fmt.Errorf("cds nack error") updateHandler.NewClusters( - map[string]xdsclient.ClusterUpdate{ - cdsTargets[0]: {}, + map[string]xdsclient.ClusterUpdateErrTuple{ + cdsTargets[0]: {Err: nackErr}, + cdsTargets[1]: {Update: xdsclient.ClusterUpdate{Raw: clusterRaws[cdsTargets[1]]}}, }, xdsclient.UpdateMetadata{ + Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, @@ -359,6 +367,7 @@ func (s) TestCDSConfigDump(t *testing.T) { // message, as well as the NACK error. wantDump[cdsTargets[0]] = xdsclient.UpdateWithMD{ MD: xdsclient.UpdateMetadata{ + Status: xdsclient.ServiceStatusNACKed, Version: testVersion, ErrState: &xdsclient.UpdateErrorMetadata{ Version: nackVersion, @@ -368,7 +377,7 @@ func (s) TestCDSConfigDump(t *testing.T) { Raw: clusterRaws[cdsTargets[0]], } wantDump[cdsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, + MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, Raw: clusterRaws[cdsTargets[1]], } if err := compareDump(client.DumpCDS, nackVersion, wantDump); err != nil { @@ -420,16 +429,16 @@ func (s) TestEDSConfigDump(t *testing.T) { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.EndpointsUpdate) + update0 := make(map[string]xdsclient.EndpointsUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range endpointRaws { - update0[n] = xdsclient.EndpointsUpdate{Raw: r} + update0[n] = xdsclient.EndpointsUpdateErrTuple{Update: xdsclient.EndpointsUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, + MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewEndpoints(update0, xdsclient.UpdateMetadata{Version: testVersion}) + updateHandler.NewEndpoints(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpEDS, testVersion, want0); err != nil { @@ -439,10 +448,12 @@ func (s) TestEDSConfigDump(t *testing.T) { const nackVersion = "eds-version-nack" var nackErr = fmt.Errorf("eds nack error") updateHandler.NewEndpoints( - map[string]xdsclient.EndpointsUpdate{ - edsTargets[0]: {}, + map[string]xdsclient.EndpointsUpdateErrTuple{ + edsTargets[0]: {Err: nackErr}, + edsTargets[1]: {Update: xdsclient.EndpointsUpdate{Raw: endpointRaws[edsTargets[1]]}}, }, xdsclient.UpdateMetadata{ + Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, @@ -456,6 +467,7 @@ func (s) TestEDSConfigDump(t *testing.T) { // message, as well as the NACK error. wantDump[edsTargets[0]] = xdsclient.UpdateWithMD{ MD: xdsclient.UpdateMetadata{ + Status: xdsclient.ServiceStatusNACKed, Version: testVersion, ErrState: &xdsclient.UpdateErrorMetadata{ Version: nackVersion, @@ -465,7 +477,7 @@ func (s) TestEDSConfigDump(t *testing.T) { Raw: endpointRaws[edsTargets[0]], } wantDump[edsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, + MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, Raw: endpointRaws[edsTargets[1]], } if err := compareDump(client.DumpEDS, nackVersion, wantDump); err != nil { diff --git a/xds/internal/xdsclient/eds_test.go b/xds/internal/xdsclient/eds_test.go index 2fe35989f7d3..d09134f58206 100644 --- a/xds/internal/xdsclient/eds_test.go +++ b/xds/internal/xdsclient/eds_test.go @@ -30,6 +30,7 @@ import ( anypb "github.com/golang/protobuf/ptypes/any" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/version" @@ -137,7 +138,7 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { tests := []struct { name string resources []*anypb.Any - wantUpdate map[string]EndpointsUpdate + wantUpdate map[string]EndpointsUpdateErrTuple wantMD UpdateMetadata wantErr bool }{ @@ -149,7 +150,7 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, @@ -167,7 +168,7 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, @@ -180,13 +181,13 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) return clab0.Build() }())}, - wantUpdate: map[string]EndpointsUpdate{"test": {}}, + wantUpdate: map[string]EndpointsUpdateErrTuple{"test": {Err: cmpopts.AnyError}}, wantMD: UpdateMetadata{ Status: ServiceStatusNACKed, Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, @@ -194,8 +195,8 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { { name: "v3 endpoints", resources: []*anypb.Any{v3EndpointsAny}, - wantUpdate: map[string]EndpointsUpdate{ - "test": { + wantUpdate: map[string]EndpointsUpdateErrTuple{ + "test": {Update: EndpointsUpdate{ Drops: nil, Localities: []Locality{ { @@ -220,7 +221,7 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { }, }, Raw: v3EndpointsAny, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -239,8 +240,8 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { return clab0.Build() }()), }, - wantUpdate: map[string]EndpointsUpdate{ - "test": { + wantUpdate: map[string]EndpointsUpdateErrTuple{ + "test": {Update: EndpointsUpdate{ Drops: nil, Localities: []Locality{ { @@ -265,15 +266,15 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { }, }, Raw: v3EndpointsAny, - }, - "bad": {}, + }}, + "bad": {Err: cmpopts.AnyError}, }, wantMD: UpdateMetadata{ Status: ServiceStatusNACKed, Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index af1cba181752..d43cbaa80870 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -29,6 +29,7 @@ import ( "github.com/golang/protobuf/proto" spb "github.com/golang/protobuf/ptypes/struct" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/grpc/internal/testutils" @@ -176,7 +177,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, } ) @@ -184,7 +185,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { tests := []struct { name string resources []*anypb.Any - wantUpdate map[string]ListenerUpdate + wantUpdate map[string]ListenerUpdateErrTuple wantMD UpdateMetadata wantErr bool }{ @@ -214,7 +215,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }(), }, }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, @@ -226,7 +227,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { ApiListener: testutils.MarshalAny(&v2xdspb.Listener{}), }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, @@ -242,7 +243,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }), }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, @@ -256,7 +257,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }), }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, @@ -279,7 +280,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }), }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, @@ -293,8 +294,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v3 with no filters", resources: []*anypb.Any{v3LisWithFilters()}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -322,15 +323,15 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }), }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, { name: "v3 with custom filter", resources: []*anypb.Any{v3LisWithFilters(customFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: []HTTPFilter{ { @@ -341,7 +342,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { routerFilter, }, Raw: v3LisWithFilters(customFilter), - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -351,8 +352,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v3 with custom filter in typed struct", resources: []*anypb.Any{v3LisWithFilters(typedStructFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: []HTTPFilter{ { @@ -363,7 +364,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { routerFilter, }, Raw: v3LisWithFilters(typedStructFilter), - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -373,8 +374,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v3 with optional custom filter", resources: []*anypb.Any{v3LisWithFilters(customOptionalFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: []HTTPFilter{ { @@ -385,7 +386,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { routerFilter, }, Raw: v3LisWithFilters(customOptionalFilter), - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -395,15 +396,15 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v3 with two filters with same name", resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, { name: "v3 with two filters - same type different name", resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter2)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: []HTTPFilter{{ Name: "customFilter", @@ -417,7 +418,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { routerFilter, }, Raw: v3LisWithFilters(customFilter, customFilter2), - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -427,20 +428,20 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v3 with server-only filter", resources: []*anypb.Any{v3LisWithFilters(serverOnlyCustomFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, { name: "v3 with optional server-only filter", resources: []*anypb.Any{v3LisWithFilters(serverOnlyOptionalCustomFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(serverOnlyOptionalCustomFilter), HTTPFilters: routerFilterList, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -450,8 +451,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v3 with client-only filter", resources: []*anypb.Any{v3LisWithFilters(clientOnlyCustomFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: []HTTPFilter{ { @@ -461,7 +462,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, routerFilter}, Raw: v3LisWithFilters(clientOnlyCustomFilter), - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -471,34 +472,34 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v3 with err filter", resources: []*anypb.Any{v3LisWithFilters(errFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, { name: "v3 with optional err filter", resources: []*anypb.Any{v3LisWithFilters(errOptionalFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, { name: "v3 with unknown filter", resources: []*anypb.Any{v3LisWithFilters(unknownFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: true, }, { name: "v3 with unknown filter (optional)", resources: []*anypb.Any{v3LisWithFilters(unknownOptionalFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters(unknownOptionalFilter), - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -508,8 +509,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v2 listener resource", resources: []*anypb.Any{v2Lis}, - wantUpdate: map[string]ListenerUpdate{ - v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -519,8 +520,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v3 listener resource", resources: []*anypb.Any{v3LisWithFilters()}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -530,8 +531,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "v3 listener with inline route configuration", resources: []*anypb.Any{v3LisWithInlineRoute}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ InlineRouteConfig: &RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ Domains: []string{v3LDSTarget}, @@ -540,7 +541,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { MaxStreamDuration: time.Second, Raw: v3LisWithInlineRoute, HTTPFilters: routerFilterList, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -550,9 +551,9 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { name: "multiple listener resources", resources: []*anypb.Any{v2Lis, v3LisWithFilters()}, - wantUpdate: map[string]ListenerUpdate{ - v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis}, - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}}, + v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -573,10 +574,10 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }}), v3LisWithFilters(), }, - wantUpdate: map[string]ListenerUpdate{ - v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis}, - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}, - "bad": {}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}}, + v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}}, + "bad": {Err: cmpopts.AnyError}, }, wantMD: errMD, wantErr: true, @@ -857,7 +858,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, } ) @@ -865,7 +866,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { tests := []struct { name string resources []*anypb.Any - wantUpdate map[string]ListenerUpdate + wantUpdate map[string]ListenerUpdateErrTuple wantMD UpdateMetadata wantErr string }{ @@ -877,7 +878,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { {Name: "listener-filter-1"}, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "unsupported field 'listener_filters'", }, @@ -887,14 +888,14 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { Name: v3LDSTarget, UseOriginalDst: &wrapperspb.BoolValue{Value: true}, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "unsupported field 'use_original_dst'", }, { name: "no address field", resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{Name: v3LDSTarget})}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "no address field in LDS response", }, @@ -904,7 +905,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { Name: v3LDSTarget, Address: &v3corepb.Address{}, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "no socket_address field in LDS response", }, @@ -920,7 +921,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "no supported filter chains and no default filter chain", }, @@ -935,7 +936,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "missing HttpConnectionManager filter", }, @@ -957,7 +958,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "missing name field in filter", }, @@ -996,7 +997,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "duplicate filter name", }, @@ -1023,7 +1024,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "http filters list is empty", }, @@ -1051,7 +1052,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "is a terminal filter but it is not last in the filter chain", }, @@ -1079,7 +1080,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "is not a terminal filter", }, @@ -1100,7 +1101,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "unsupported config_type", }, @@ -1124,7 +1125,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "multiple filter chains with overlapping matching rules are defined", }, @@ -1147,7 +1148,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "unsupported network filter", }, @@ -1173,7 +1174,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "failed unmarshaling of network filter", }, @@ -1192,7 +1193,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "transport_socket field has unexpected name", }, @@ -1214,7 +1215,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "transport_socket field has unexpected typeURL", }, @@ -1239,7 +1240,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "failed to unmarshal DownstreamTlsContext in LDS response", }, @@ -1261,7 +1262,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", }, @@ -1291,15 +1292,15 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "validation context contains unexpected type", }, { name: "empty transport socket", resources: []*anypb.Any{listenerEmptyTransportSocket}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ InboundListenerCfg: &InboundListenerConfig{ Address: "0.0.0.0", Port: "9999", @@ -1325,7 +1326,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, Raw: listenerEmptyTransportSocket, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -1358,7 +1359,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", }, @@ -1388,7 +1389,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", }, @@ -1412,15 +1413,15 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, })}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, wantMD: errMD, wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", }, { name: "happy case with no validation context using deprecated fields", resources: []*anypb.Any{listenerNoValidationContextDeprecatedFields}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ InboundListenerCfg: &InboundListenerConfig{ Address: "0.0.0.0", Port: "9999", @@ -1458,7 +1459,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, Raw: listenerNoValidationContextDeprecatedFields, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -1468,8 +1469,8 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { { name: "happy case with no validation context using new fields", resources: []*anypb.Any{listenerNoValidationContextNewFields}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ InboundListenerCfg: &InboundListenerConfig{ Address: "0.0.0.0", Port: "9999", @@ -1507,7 +1508,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, Raw: listenerNoValidationContextNewFields, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -1517,8 +1518,8 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { { name: "happy case with validation context provider instance with deprecated fields", resources: []*anypb.Any{listenerWithValidationContextDeprecatedFields}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ InboundListenerCfg: &InboundListenerConfig{ Address: "0.0.0.0", Port: "9999", @@ -1562,7 +1563,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, Raw: listenerWithValidationContextDeprecatedFields, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -1572,8 +1573,8 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { { name: "happy case with validation context provider instance with new fields", resources: []*anypb.Any{listenerWithValidationContextNewFields}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ InboundListenerCfg: &InboundListenerConfig{ Address: "0.0.0.0", Port: "9999", @@ -1617,7 +1618,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, Raw: listenerWithValidationContextNewFields, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index 138e3a0bd2b6..6d1f8588f2df 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -668,7 +668,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { tests := []struct { name string resources []*anypb.Any - wantUpdate map[string]RouteConfigUpdate + wantUpdate map[string]RouteConfigUpdateErrTuple wantMD UpdateMetadata wantErr bool }{ @@ -680,7 +680,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, @@ -698,7 +698,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, @@ -713,8 +713,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { { name: "v2 routeConfig resource", resources: []*anypb.Any{v2RouteConfig}, - wantUpdate: map[string]RouteConfigUpdate{ - v2RouteConfigName: { + wantUpdate: map[string]RouteConfigUpdateErrTuple{ + v2RouteConfigName: {Update: RouteConfigUpdate{ VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -730,7 +730,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }, }, Raw: v2RouteConfig, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -740,8 +740,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { { name: "v3 routeConfig resource", resources: []*anypb.Any{v3RouteConfig}, - wantUpdate: map[string]RouteConfigUpdate{ - v3RouteConfigName: { + wantUpdate: map[string]RouteConfigUpdateErrTuple{ + v3RouteConfigName: {Update: RouteConfigUpdate{ VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -757,7 +757,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }, }, Raw: v3RouteConfig, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -767,8 +767,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { { name: "multiple routeConfig resources", resources: []*anypb.Any{v2RouteConfig, v3RouteConfig}, - wantUpdate: map[string]RouteConfigUpdate{ - v3RouteConfigName: { + wantUpdate: map[string]RouteConfigUpdateErrTuple{ + v3RouteConfigName: {Update: RouteConfigUpdate{ VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -784,8 +784,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }, }, Raw: v3RouteConfig, - }, - v2RouteConfigName: { + }}, + v2RouteConfigName: {Update: RouteConfigUpdate{ VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -801,7 +801,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }, }, Raw: v2RouteConfig, - }, + }}, }, wantMD: UpdateMetadata{ Status: ServiceStatusACKed, @@ -822,8 +822,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }}}}}), v3RouteConfig, }, - wantUpdate: map[string]RouteConfigUpdate{ - v3RouteConfigName: { + wantUpdate: map[string]RouteConfigUpdateErrTuple{ + v3RouteConfigName: {Update: RouteConfigUpdate{ VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -839,8 +839,8 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }, }, Raw: v3RouteConfig, - }, - v2RouteConfigName: { + }}, + v2RouteConfigName: {Update: RouteConfigUpdate{ VirtualHosts: []*VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -856,15 +856,15 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }, }, Raw: v2RouteConfig, - }, - "bad": {}, + }}, + "bad": {Err: cmpopts.AnyError}, }, wantMD: UpdateMetadata{ Status: ServiceStatusNACKed, Version: testVersion, ErrState: &UpdateErrorMetadata{ Version: testVersion, - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantErr: true, diff --git a/xds/internal/xdsclient/v2/cds_test.go b/xds/internal/xdsclient/v2/cds_test.go index e15e13074681..cef7563017c4 100644 --- a/xds/internal/xdsclient/v2/cds_test.go +++ b/xds/internal/xdsclient/v2/cds_test.go @@ -25,6 +25,7 @@ import ( xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" anypb "github.com/golang/protobuf/ptypes/any" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" @@ -100,7 +101,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { name string cdsResponse *xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.ClusterUpdate + wantUpdate map[string]xdsclient.ClusterUpdateErrTuple wantUpdateMD xdsclient.UpdateMetadata wantUpdateErr bool }{ @@ -113,7 +114,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -127,7 +128,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -148,8 +149,8 @@ func (s) TestCDSHandleResponse(t *testing.T) { name: "one-uninteresting-cluster", cdsResponse: goodCDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName2: {ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}, + wantUpdate: map[string]xdsclient.ClusterUpdateErrTuple{ + goodClusterName2: {Update: xdsclient.ClusterUpdate{ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, @@ -161,8 +162,8 @@ func (s) TestCDSHandleResponse(t *testing.T) { name: "one-good-cluster", cdsResponse: goodCDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName1: {ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}, + wantUpdate: map[string]xdsclient.ClusterUpdateErrTuple{ + goodClusterName1: {Update: xdsclient.ClusterUpdate{ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, diff --git a/xds/internal/xdsclient/v2/client_test.go b/xds/internal/xdsclient/v2/client_test.go index 2a45a52ca1d9..ed4322b0dc51 100644 --- a/xds/internal/xdsclient/v2/client_test.go +++ b/xds/internal/xdsclient/v2/client_test.go @@ -21,7 +21,6 @@ package v2 import ( "context" "errors" - "fmt" "testing" "time" @@ -285,9 +284,6 @@ var ( }, TypeUrl: version.V2RouteConfigURL, } - // An place holder error. When comparing UpdateErrorMetadata, we only check - // if error is nil, and don't compare error content. - errPlaceHolder = fmt.Errorf("err place holder") ) type watchHandleTestcase struct { @@ -305,7 +301,7 @@ type testUpdateReceiver struct { f func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) } -func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdate, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdateErrTuple, metadata xdsclient.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -313,7 +309,7 @@ func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdate, t.newUpdate(xdsclient.ListenerResource, dd, metadata) } -func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigUpdate, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigUpdateErrTuple, metadata xdsclient.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -321,7 +317,7 @@ func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigU t.newUpdate(xdsclient.RouteConfigResource, dd, metadata) } -func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdate, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdateErrTuple, metadata xdsclient.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -329,7 +325,7 @@ func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdate, m t.newUpdate(xdsclient.ClusterResource, dd, metadata) } -func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdate, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdateErrTuple, metadata xdsclient.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -367,27 +363,27 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { if rType == test.rType { switch test.rType { case xdsclient.ListenerResource: - dd := make(map[string]xdsclient.ListenerUpdate) + dd := make(map[string]xdsclient.ListenerUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.ListenerUpdate) + dd[n] = u.(xdsclient.ListenerUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) case xdsclient.RouteConfigResource: - dd := make(map[string]xdsclient.RouteConfigUpdate) + dd := make(map[string]xdsclient.RouteConfigUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.RouteConfigUpdate) + dd[n] = u.(xdsclient.RouteConfigUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) case xdsclient.ClusterResource: - dd := make(map[string]xdsclient.ClusterUpdate) + dd := make(map[string]xdsclient.ClusterUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.ClusterUpdate) + dd[n] = u.(xdsclient.ClusterUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) case xdsclient.EndpointsResource: - dd := make(map[string]xdsclient.EndpointsUpdate) + dd := make(map[string]xdsclient.EndpointsUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.EndpointsUpdate) + dd[n] = u.(xdsclient.EndpointsUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) } @@ -437,7 +433,7 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { cmpopts.EquateEmpty(), protocmp.Transform(), cmpopts.IgnoreFields(xdsclient.UpdateMetadata{}, "Timestamp"), cmpopts.IgnoreFields(xdsclient.UpdateErrorMetadata{}, "Timestamp"), - cmp.Comparer(func(x, y error) bool { return (x == nil) == (y == nil) }), + cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), } uErr, err := gotUpdateCh.Receive(ctx) if err == context.DeadlineExceeded { @@ -668,7 +664,7 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) { if v, err := callbackCh.Receive(ctx); err != nil { t.Fatal("Timeout when expecting LDS update") - } else if _, ok := v.(xdsclient.ListenerUpdate); !ok { + } else if _, ok := v.(xdsclient.ListenerUpdateErrTuple); !ok { t.Fatalf("Expect an LDS update from watcher, got %v", v) } } diff --git a/xds/internal/xdsclient/v2/eds_test.go b/xds/internal/xdsclient/v2/eds_test.go index 5062dff9c07c..8176b6dfb93a 100644 --- a/xds/internal/xdsclient/v2/eds_test.go +++ b/xds/internal/xdsclient/v2/eds_test.go @@ -24,6 +24,7 @@ import ( v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" anypb "github.com/golang/protobuf/ptypes/any" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" xtestutils "google.golang.org/grpc/xds/internal/testutils" @@ -75,7 +76,7 @@ func (s) TestEDSHandleResponse(t *testing.T) { name string edsResponse *v2xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.EndpointsUpdate + wantUpdate map[string]xdsclient.EndpointsUpdateErrTuple wantUpdateMD xdsclient.UpdateMetadata wantUpdateErr bool }{ @@ -88,7 +89,7 @@ func (s) TestEDSHandleResponse(t *testing.T) { wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -102,7 +103,7 @@ func (s) TestEDSHandleResponse(t *testing.T) { wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -112,8 +113,8 @@ func (s) TestEDSHandleResponse(t *testing.T) { name: "one-uninterestring-assignment", edsResponse: goodEDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.EndpointsUpdate{ - "not-goodEDSName": { + wantUpdate: map[string]xdsclient.EndpointsUpdateErrTuple{ + "not-goodEDSName": {Update: xdsclient.EndpointsUpdate{ Localities: []xdsclient.Locality{ { Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}}, @@ -123,7 +124,7 @@ func (s) TestEDSHandleResponse(t *testing.T) { }, }, Raw: marshaledGoodCLA2, - }, + }}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, @@ -135,8 +136,8 @@ func (s) TestEDSHandleResponse(t *testing.T) { name: "one-good-assignment", edsResponse: goodEDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.EndpointsUpdate{ - goodEDSName: { + wantUpdate: map[string]xdsclient.EndpointsUpdateErrTuple{ + goodEDSName: {Update: xdsclient.EndpointsUpdate{ Localities: []xdsclient.Locality{ { Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}}, @@ -152,7 +153,7 @@ func (s) TestEDSHandleResponse(t *testing.T) { }, }, Raw: marshaledGoodCLA1, - }, + }}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, diff --git a/xds/internal/xdsclient/v2/lds_test.go b/xds/internal/xdsclient/v2/lds_test.go index db26681fb3d2..a0600550095b 100644 --- a/xds/internal/xdsclient/v2/lds_test.go +++ b/xds/internal/xdsclient/v2/lds_test.go @@ -23,6 +23,7 @@ import ( "time" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/xds/internal/xdsclient" ) @@ -35,7 +36,7 @@ func (s) TestLDSHandleResponse(t *testing.T) { name string ldsResponse *v2xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.ListenerUpdate + wantUpdate map[string]xdsclient.ListenerUpdateErrTuple wantUpdateMD xdsclient.UpdateMetadata wantUpdateErr bool }{ @@ -48,7 +49,7 @@ func (s) TestLDSHandleResponse(t *testing.T) { wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -62,7 +63,7 @@ func (s) TestLDSHandleResponse(t *testing.T) { wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -74,13 +75,13 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "no-apiListener-in-response", ldsResponse: noAPIListenerLDSResponse, wantErr: true, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget1: {}, + wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ + goodLDSTarget1: {Err: cmpopts.AnyError}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -90,8 +91,8 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "one-good-listener", ldsResponse: goodLDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget1: {RouteConfigName: goodRouteName1, Raw: marshaledListener1}, + wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ + goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, @@ -104,9 +105,9 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "multiple-good-listener", ldsResponse: ldsResponseWithMultipleResources, wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget1: {RouteConfigName: goodRouteName1, Raw: marshaledListener1}, - goodLDSTarget2: {RouteConfigName: goodRouteName1, Raw: marshaledListener2}, + wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ + goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, + goodLDSTarget2: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, @@ -120,14 +121,14 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "good-bad-ugly-listeners", ldsResponse: goodBadUglyLDSResponse, wantErr: true, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget1: {RouteConfigName: goodRouteName1, Raw: marshaledListener1}, - goodLDSTarget2: {}, + wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ + goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, + goodLDSTarget2: {Err: cmpopts.AnyError}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -137,8 +138,8 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "one-uninteresting-listener", ldsResponse: goodLDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget2: {RouteConfigName: goodRouteName1, Raw: marshaledListener2}, + wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ + goodLDSTarget2: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, diff --git a/xds/internal/xdsclient/v2/rds_test.go b/xds/internal/xdsclient/v2/rds_test.go index 00ac2791ad6e..3389f0539469 100644 --- a/xds/internal/xdsclient/v2/rds_test.go +++ b/xds/internal/xdsclient/v2/rds_test.go @@ -24,6 +24,7 @@ import ( "time" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient" @@ -49,7 +50,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name string rdsResponse *xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.RouteConfigUpdate + wantUpdate map[string]xdsclient.RouteConfigUpdateErrTuple wantUpdateMD xdsclient.UpdateMetadata wantUpdateErr bool }{ @@ -62,7 +63,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -76,7 +77,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusNACKed, ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, + Err: cmpopts.AnyError, }, }, wantUpdateErr: false, @@ -88,11 +89,11 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name: "no-virtual-hosts-in-response", rdsResponse: noVirtualHostsInRDSResponse, wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdate{ - goodRouteName1: { + wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{ + goodRouteName1: {Update: xdsclient.RouteConfigUpdate{ VirtualHosts: nil, Raw: marshaledNoVirtualHostsRouteConfig, - }, + }}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, @@ -104,8 +105,8 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name: "one-uninteresting-route-config", rdsResponse: goodRDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdate{ - goodRouteName2: { + wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{ + goodRouteName2: {Update: xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -122,7 +123,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { }, }, Raw: marshaledGoodRouteConfig2, - }, + }}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, @@ -134,8 +135,8 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name: "one-good-route-config", rdsResponse: goodRDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdate{ - goodRouteName1: { + wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{ + goodRouteName1: {Update: xdsclient.RouteConfigUpdate{ VirtualHosts: []*xdsclient.VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -152,7 +153,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { }, }, Raw: marshaledGoodRouteConfig1, - }, + }}, }, wantUpdateMD: xdsclient.UpdateMetadata{ Status: xdsclient.ServiceStatusACKed, diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index 939b7921b0be..bbfc1e96dcf7 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -28,11 +28,6 @@ import ( "google.golang.org/grpc/internal/testutils" ) -type clusterUpdateErr struct { - u ClusterUpdate - err error -} - // TestClusterWatch covers the cases: // - an update is received after a watch() // - an update for another resource name @@ -57,21 +52,21 @@ func (s) TestClusterWatch(t *testing.T) { clusterUpdateCh := testutils.NewChannel() cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another update, with an extra resource for a different resource name. - client.NewClusters(map[string]ClusterUpdate{ - testCDSName: wantUpdate, + client.NewClusters(map[string]ClusterUpdateErrTuple{ + testCDSName: {Update: wantUpdate}, "randomName": {}, }, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { @@ -80,7 +75,7 @@ func (s) TestClusterWatch(t *testing.T) { // Cancel watch, and send update again. cancelWatch() - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -115,7 +110,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { clusterUpdateCh := testutils.NewChannel() clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) cancelLastWatch = client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -128,7 +123,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { } wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -137,7 +132,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { // Cancel the last watch, and send update again. cancelLastWatch() - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) for i := 0; i < count-1; i++ { if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -178,7 +173,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { clusterUpdateCh := testutils.NewChannel() clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -193,7 +188,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. clusterUpdateCh2 := testutils.NewChannel() client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -201,9 +196,9 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdate{ - testCDSName + "1": wantUpdate1, - testCDSName + "2": wantUpdate2, + client.NewClusters(map[string]ClusterUpdateErrTuple{ + testCDSName + "1": {Update: wantUpdate1}, + testCDSName + "2": {Update: wantUpdate2}, }, UpdateMetadata{}) for i := 0; i < count; i++ { @@ -238,15 +233,15 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { clusterUpdateCh := testutils.NewChannel() client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{ - testCDSName: wantUpdate, + client.NewClusters(map[string]ClusterUpdateErrTuple{ + testCDSName: {Update: wantUpdate}, }, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) @@ -255,7 +250,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { // Another watch for the resource in cache. clusterUpdateCh2 := testutils.NewChannel() client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -299,7 +294,7 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { clusterUpdateCh := testutils.NewChannel() client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: u, err: err}) + clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: u, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -309,9 +304,9 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for cluster update: %v", err) } - gotUpdate := u.(clusterUpdateErr) - if gotUpdate.err == nil || !cmp.Equal(gotUpdate.u, ClusterUpdate{}) { - t.Fatalf("unexpected clusterUpdate: (%v, %v), want: (ClusterUpdate{}, nil)", gotUpdate.u, gotUpdate.err) + gotUpdate := u.(ClusterUpdateErrTuple) + if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, ClusterUpdate{}) { + t.Fatalf("unexpected clusterUpdate: (%v, %v), want: (ClusterUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err) } } @@ -338,15 +333,15 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { clusterUpdateCh := testutils.NewChannel() client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: u, err: err}) + clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: u, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{ - testCDSName: wantUpdate, + client.NewClusters(map[string]ClusterUpdateErrTuple{ + testCDSName: {Update: wantUpdate}, }, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) @@ -386,7 +381,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) { clusterUpdateCh1 := testutils.NewChannel() client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) { - clusterUpdateCh1.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh1.Send(ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -395,7 +390,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) { // Another watch for a different name. clusterUpdateCh2 := testutils.NewChannel() client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -403,9 +398,9 @@ func (s) TestClusterResourceRemoved(t *testing.T) { wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdate{ - testCDSName + "1": wantUpdate1, - testCDSName + "2": wantUpdate2, + client.NewClusters(map[string]ClusterUpdateErrTuple{ + testCDSName + "1": {Update: wantUpdate1}, + testCDSName + "2": {Update: wantUpdate2}, }, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh1, wantUpdate1, nil); err != nil { t.Fatal(err) @@ -415,10 +410,10 @@ func (s) TestClusterResourceRemoved(t *testing.T) { } // Send another update to remove resource 1. - client.NewClusters(map[string]ClusterUpdate{testCDSName + "2": wantUpdate2}, UpdateMetadata{}) + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) // Watcher 1 should get an error. - if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(clusterUpdateErr).err) != ErrorTypeResourceNotFound { + if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ClusterUpdateErrTuple).Err) != ErrorTypeResourceNotFound { t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } @@ -428,7 +423,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) { } // Send one more update without resource 1. - client.NewClusters(map[string]ClusterUpdate{testCDSName + "2": wantUpdate2}, UpdateMetadata{}) + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) // Watcher 1 should not see an update. sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -465,7 +460,7 @@ func (s) TestClusterWatchNACKError(t *testing.T) { clusterUpdateCh := testutils.NewChannel() cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) + clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { @@ -473,8 +468,70 @@ func (s) TestClusterWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewClusters(map[string]ClusterUpdate{testCDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, ClusterUpdate{}, nil); err != nil { + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: { + Err: wantError, + }}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + if err := verifyClusterUpdate(ctx, clusterUpdateCh, ClusterUpdate{}, wantError); err != nil { + t.Fatal(err) + } +} + +// TestClusterWatchPartialValid covers the case that a response contains both +// valid and invalid resources. This response will be NACK'ed by the xdsclient. +// But the watchers with valid resources should receive the update, those with +// invalida resources should receive an error. +func (s) TestClusterWatchPartialValid(t *testing.T) { + apiClientCh, cleanup := overrideNewAPIClient() + defer cleanup() + + client, err := newWithConfig(clientOpts(testXDSServer, false)) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + c, err := apiClientCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + apiClient := c.(*testAPIClient) + + const badResourceName = "bad-resource" + updateChs := make(map[string]*testutils.Channel) + + for _, name := range []string{testCDSName, badResourceName} { + clusterUpdateCh := testutils.NewChannel() + cancelWatch := client.WatchCluster(name, func(update ClusterUpdate, err error) { + clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + }) + defer func() { + cancelWatch() + if _, err := apiClient.removeWatches[ClusterResource].Receive(ctx); err != nil { + t.Fatalf("want watch to be canceled, got err: %v", err) + } + }() + if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + updateChs[name] = clusterUpdateCh + } + + wantError := fmt.Errorf("testing error") + wantError2 := fmt.Errorf("individual error") + client.NewClusters(map[string]ClusterUpdateErrTuple{ + testCDSName: {Update: ClusterUpdate{ClusterName: testEDSName}}, + badResourceName: {Err: wantError2}, + }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + + // The valid resource should be sent to the watcher. + if err := verifyClusterUpdate(ctx, updateChs[testCDSName], ClusterUpdate{ClusterName: testEDSName}, nil); err != nil { + t.Fatal(err) + } + + // The failed watcher should receive an error. + if err := verifyClusterUpdate(ctx, updateChs[badResourceName], ClusterUpdate{}, wantError2); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go index 0e46886cc4d3..e9b726c003e5 100644 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -46,11 +46,6 @@ var ( } ) -type endpointsUpdateErr struct { - u EndpointsUpdate - err error -} - // TestEndpointsWatch covers the cases: // - an update is received after a watch() // - an update for another resource name (which doesn't trigger callback) @@ -75,20 +70,20 @@ func (s) TestEndpointsWatch(t *testing.T) { endpointsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) + endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another update for a different resource name. - client.NewEndpoints(map[string]EndpointsUpdate{"randomName": {}}, UpdateMetadata{}) + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{"randomName": {}}, UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -97,7 +92,7 @@ func (s) TestEndpointsWatch(t *testing.T) { // Cancel watch, and send update again. cancelWatch() - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -134,7 +129,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { endpointsUpdateCh := testutils.NewChannel() endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) cancelLastWatch = client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) + endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -147,7 +142,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { } wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -156,7 +151,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { // Cancel the last watch, and send update again. cancelLastWatch() - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) for i := 0; i < count-1; i++ { if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -197,7 +192,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { endpointsUpdateCh := testutils.NewChannel() endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) client.WatchEndpoints(testCDSName+"1", func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) + endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -212,7 +207,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. endpointsUpdateCh2 := testutils.NewChannel() client.WatchEndpoints(testCDSName+"2", func(update EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(endpointsUpdateErr{u: update, err: err}) + endpointsUpdateCh2.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -220,9 +215,9 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { wantUpdate1 := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} wantUpdate2 := EndpointsUpdate{Localities: []Locality{testLocalities[1]}} - client.NewEndpoints(map[string]EndpointsUpdate{ - testCDSName + "1": wantUpdate1, - testCDSName + "2": wantUpdate2, + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{ + testCDSName + "1": {Update: wantUpdate1}, + testCDSName + "2": {Update: wantUpdate2}, }, UpdateMetadata{}) for i := 0; i < count; i++ { @@ -257,14 +252,14 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { endpointsUpdateCh := testutils.NewChannel() client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) + endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -272,7 +267,7 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { // Another watch for the resource in cache. endpointsUpdateCh2 := testutils.NewChannel() client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(endpointsUpdateErr{u: update, err: err}) + endpointsUpdateCh2.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -316,7 +311,7 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { endpointsUpdateCh := testutils.NewChannel() client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) + endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -326,9 +321,9 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for endpoints update: %v", err) } - gotUpdate := u.(endpointsUpdateErr) - if gotUpdate.err == nil || !cmp.Equal(gotUpdate.u, EndpointsUpdate{}) { - t.Fatalf("unexpected endpointsUpdate: (%v, %v), want: (EndpointsUpdate{}, nil)", gotUpdate.u, gotUpdate.err) + gotUpdate := u.(EndpointsUpdateErrTuple) + if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, EndpointsUpdate{}) { + t.Fatalf("unexpected endpointsUpdate: (%v, %v), want: (EndpointsUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err) } } @@ -354,7 +349,7 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { endpointsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) + endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { @@ -362,8 +357,68 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, EndpointsUpdate{}, wantError); err != nil { t.Fatal(err) } } + +// TestEndpointsWatchPartialValid covers the case that a response contains both +// valid and invalid resources. This response will be NACK'ed by the xdsclient. +// But the watchers with valid resources should receive the update, those with +// invalida resources should receive an error. +func (s) TestEndpointsWatchPartialValid(t *testing.T) { + apiClientCh, cleanup := overrideNewAPIClient() + defer cleanup() + + client, err := newWithConfig(clientOpts(testXDSServer, false)) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + c, err := apiClientCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + apiClient := c.(*testAPIClient) + + const badResourceName = "bad-resource" + updateChs := make(map[string]*testutils.Channel) + + for _, name := range []string{testCDSName, badResourceName} { + endpointsUpdateCh := testutils.NewChannel() + cancelWatch := client.WatchEndpoints(name, func(update EndpointsUpdate, err error) { + endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + }) + defer func() { + cancelWatch() + if _, err := apiClient.removeWatches[EndpointsResource].Receive(ctx); err != nil { + t.Fatalf("want watch to be canceled, got err: %v", err) + } + }() + if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + updateChs[name] = endpointsUpdateCh + } + + wantError := fmt.Errorf("testing error") + wantError2 := fmt.Errorf("individual error") + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{ + testCDSName: {Update: EndpointsUpdate{Localities: []Locality{testLocalities[0]}}}, + badResourceName: {Err: wantError2}, + }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + + // The valid resource should be sent to the watcher. + if err := verifyEndpointsUpdate(ctx, updateChs[testCDSName], EndpointsUpdate{Localities: []Locality{testLocalities[0]}}, nil); err != nil { + t.Fatal(err) + } + + // The failed watcher should receive an error. + if err := verifyEndpointsUpdate(ctx, updateChs[badResourceName], EndpointsUpdate{}, wantError2); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go index da0255e37a8e..62acf24b80d9 100644 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -26,11 +26,6 @@ import ( "google.golang.org/grpc/internal/testutils" ) -type ldsUpdateErr struct { - u ListenerUpdate - err error -} - // TestLDSWatch covers the cases: // - an update is received after a watch() // - an update for another resource name @@ -55,21 +50,21 @@ func (s) TestLDSWatch(t *testing.T) { ldsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) + ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another update, with an extra resource for a different resource name. - client.NewListeners(map[string]ListenerUpdate{ - testLDSName: wantUpdate, + client.NewListeners(map[string]ListenerUpdateErrTuple{ + testLDSName: {Update: wantUpdate}, "randomName": {}, }, UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { @@ -78,7 +73,7 @@ func (s) TestLDSWatch(t *testing.T) { // Cancel watch, and send update again. cancelWatch() - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -116,7 +111,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { ldsUpdateCh := testutils.NewChannel() ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) cancelLastWatch = client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) + ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -129,7 +124,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { } wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -138,7 +133,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { // Cancel the last watch, and send update again. cancelLastWatch() - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) for i := 0; i < count-1; i++ { if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -180,7 +175,7 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { ldsUpdateCh := testutils.NewChannel() ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) + ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -195,7 +190,7 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. ldsUpdateCh2 := testutils.NewChannel() client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err}) + ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -203,9 +198,9 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { wantUpdate1 := ListenerUpdate{RouteConfigName: testRDSName + "1"} wantUpdate2 := ListenerUpdate{RouteConfigName: testRDSName + "2"} - client.NewListeners(map[string]ListenerUpdate{ - testLDSName + "1": wantUpdate1, - testLDSName + "2": wantUpdate2, + client.NewListeners(map[string]ListenerUpdateErrTuple{ + testLDSName + "1": {Update: wantUpdate1}, + testLDSName + "2": {Update: wantUpdate2}, }, UpdateMetadata{}) for i := 0; i < count; i++ { @@ -240,14 +235,14 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { ldsUpdateCh := testutils.NewChannel() client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) + ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -255,7 +250,7 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { // Another watch for the resource in cache. ldsUpdateCh2 := testutils.NewChannel() client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err}) + ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -302,7 +297,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) { ldsUpdateCh1 := testutils.NewChannel() client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) { - ldsUpdateCh1.Send(ldsUpdateErr{u: update, err: err}) + ldsUpdateCh1.Send(ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -310,7 +305,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) { // Another watch for a different name. ldsUpdateCh2 := testutils.NewChannel() client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err}) + ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -318,9 +313,9 @@ func (s) TestLDSResourceRemoved(t *testing.T) { wantUpdate1 := ListenerUpdate{RouteConfigName: testEDSName + "1"} wantUpdate2 := ListenerUpdate{RouteConfigName: testEDSName + "2"} - client.NewListeners(map[string]ListenerUpdate{ - testLDSName + "1": wantUpdate1, - testLDSName + "2": wantUpdate2, + client.NewListeners(map[string]ListenerUpdateErrTuple{ + testLDSName + "1": {Update: wantUpdate1}, + testLDSName + "2": {Update: wantUpdate2}, }, UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh1, wantUpdate1, nil); err != nil { t.Fatal(err) @@ -330,10 +325,10 @@ func (s) TestLDSResourceRemoved(t *testing.T) { } // Send another update to remove resource 1. - client.NewListeners(map[string]ListenerUpdate{testLDSName + "2": wantUpdate2}, UpdateMetadata{}) + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) // Watcher 1 should get an error. - if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ldsUpdateErr).err) != ErrorTypeResourceNotFound { + if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ListenerUpdateErrTuple).Err) != ErrorTypeResourceNotFound { t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } @@ -343,7 +338,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) { } // Send one more update without resource 1. - client.NewListeners(map[string]ListenerUpdate{testLDSName + "2": wantUpdate2}, UpdateMetadata{}) + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) // Watcher 1 should not see an update. sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -380,7 +375,7 @@ func (s) TestListenerWatchNACKError(t *testing.T) { ldsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) + ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { @@ -388,8 +383,68 @@ func (s) TestListenerWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewListeners(map[string]ListenerUpdate{testLDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, ListenerUpdate{}, wantError); err != nil { t.Fatal(err) } } + +// TestListenerWatchPartialValid covers the case that a response contains both +// valid and invalid resources. This response will be NACK'ed by the xdsclient. +// But the watchers with valid resources should receive the update, those with +// invalida resources should receive an error. +func (s) TestListenerWatchPartialValid(t *testing.T) { + apiClientCh, cleanup := overrideNewAPIClient() + defer cleanup() + + client, err := newWithConfig(clientOpts(testXDSServer, false)) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + c, err := apiClientCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + apiClient := c.(*testAPIClient) + + const badResourceName = "bad-resource" + updateChs := make(map[string]*testutils.Channel) + + for _, name := range []string{testLDSName, badResourceName} { + ldsUpdateCh := testutils.NewChannel() + cancelWatch := client.WatchListener(name, func(update ListenerUpdate, err error) { + ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + }) + defer func() { + cancelWatch() + if _, err := apiClient.removeWatches[ListenerResource].Receive(ctx); err != nil { + t.Fatalf("want watch to be canceled, got err: %v", err) + } + }() + if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + updateChs[name] = ldsUpdateCh + } + + wantError := fmt.Errorf("testing error") + wantError2 := fmt.Errorf("individual error") + client.NewListeners(map[string]ListenerUpdateErrTuple{ + testLDSName: {Update: ListenerUpdate{RouteConfigName: testEDSName}}, + badResourceName: {Err: wantError2}, + }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + + // The valid resource should be sent to the watcher. + if err := verifyListenerUpdate(ctx, updateChs[testLDSName], ListenerUpdate{RouteConfigName: testEDSName}, nil); err != nil { + t.Fatal(err) + } + + // The failed watcher should receive an error. + if err := verifyListenerUpdate(ctx, updateChs[badResourceName], ListenerUpdate{}, wantError2); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go index e569192b510d..cfb5449befd8 100644 --- a/xds/internal/xdsclient/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -28,11 +28,6 @@ import ( "google.golang.org/grpc/internal/testutils" ) -type rdsUpdateErr struct { - u RouteConfigUpdate - err error -} - // TestRDSWatch covers the cases: // - an update is received after a watch() // - an update for another resource name (which doesn't trigger callback) @@ -57,7 +52,7 @@ func (s) TestRDSWatch(t *testing.T) { rdsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) + rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -71,13 +66,13 @@ func (s) TestRDSWatch(t *testing.T) { }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another update for a different resource name. - client.NewRouteConfigs(map[string]RouteConfigUpdate{"randomName": {}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{"randomName": {}}, UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -86,7 +81,7 @@ func (s) TestRDSWatch(t *testing.T) { // Cancel watch, and send update again. cancelWatch() - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -123,7 +118,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { rdsUpdateCh := testutils.NewChannel() rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) cancelLastWatch = client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) + rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -143,7 +138,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -152,7 +147,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { // Cancel the last watch, and send update again. cancelLastWatch() - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) for i := 0; i < count-1; i++ { if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -193,7 +188,7 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { rdsUpdateCh := testutils.NewChannel() rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) client.WatchRouteConfig(testRDSName+"1", func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) + rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -208,7 +203,7 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. rdsUpdateCh2 := testutils.NewChannel() client.WatchRouteConfig(testRDSName+"2", func(update RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(rdsUpdateErr{u: update, err: err}) + rdsUpdateCh2.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -230,9 +225,9 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdate{ - testRDSName + "1": wantUpdate1, - testRDSName + "2": wantUpdate2, + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{ + testRDSName + "1": {Update: wantUpdate1}, + testRDSName + "2": {Update: wantUpdate2}, }, UpdateMetadata{}) for i := 0; i < count; i++ { @@ -267,7 +262,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { rdsUpdateCh := testutils.NewChannel() client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) + rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -281,7 +276,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -289,7 +284,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { // Another watch for the resource in cache. rdsUpdateCh2 := testutils.NewChannel() client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(rdsUpdateErr{u: update, err: err}) + rdsUpdateCh2.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -298,7 +293,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { } // New watch should receives the update. - if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdateErr{})) { + if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, RouteConfigUpdateErrTuple{wantUpdate, nil}, cmp.AllowUnexported(RouteConfigUpdateErrTuple{})) { t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err) } @@ -332,7 +327,7 @@ func (s) TestRouteWatchNACKError(t *testing.T) { rdsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchRouteConfig(testCDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) + rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { @@ -340,8 +335,74 @@ func (s) TestRouteWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewRouteConfigs(map[string]RouteConfigUpdate{testCDSName: {}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testCDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, RouteConfigUpdate{}, wantError); err != nil { t.Fatal(err) } } + +// TestRouteWatchPartialValid covers the case that a response contains both +// valid and invalid resources. This response will be NACK'ed by the xdsclient. +// But the watchers with valid resources should receive the update, those with +// invalida resources should receive an error. +func (s) TestRouteWatchPartialValid(t *testing.T) { + apiClientCh, cleanup := overrideNewAPIClient() + defer cleanup() + + client, err := newWithConfig(clientOpts(testXDSServer, false)) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + c, err := apiClientCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + apiClient := c.(*testAPIClient) + + const badResourceName = "bad-resource" + updateChs := make(map[string]*testutils.Channel) + + for _, name := range []string{testRDSName, badResourceName} { + rdsUpdateCh := testutils.NewChannel() + cancelWatch := client.WatchRouteConfig(name, func(update RouteConfigUpdate, err error) { + rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + }) + defer func() { + cancelWatch() + if _, err := apiClient.removeWatches[RouteConfigResource].Receive(ctx); err != nil { + t.Fatalf("want watch to be canceled, got err: %v", err) + } + }() + if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + updateChs[name] = rdsUpdateCh + } + + wantError := fmt.Errorf("testing error") + wantError2 := fmt.Errorf("individual error") + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{ + testRDSName: {Update: RouteConfigUpdate{VirtualHosts: []*VirtualHost{{ + Domains: []string{testLDSName}, + Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + }}}}, + badResourceName: {Err: wantError2}, + }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + + // The valid resource should be sent to the watcher. + if err := verifyRouteConfigUpdate(ctx, updateChs[testRDSName], RouteConfigUpdate{VirtualHosts: []*VirtualHost{{ + Domains: []string{testLDSName}, + Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + }}}, nil); err != nil { + t.Fatal(err) + } + + // The failed watcher should receive an error. + if err := verifyRouteConfigUpdate(ctx, updateChs[badResourceName], RouteConfigUpdate{}, wantError2); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 236d11f3731e..c2d627c4f25f 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -58,8 +58,8 @@ const transportSocketName = "envoy.transport_sockets.tls" // UnmarshalListener processes resources received in an LDS response, validates // them, and transforms them into a native struct which contains only fields we // are interested in. -func UnmarshalListener(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ListenerUpdate, UpdateMetadata, error) { - update := make(map[string]ListenerUpdate) +func UnmarshalListener(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]ListenerUpdateErrTuple) md, err := processAllResources(version, resources, logger, update) return update, md, err } @@ -296,8 +296,8 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err // validates them, and transforms them into a native struct which contains only // fields we are interested in. The provided hostname determines the route // configuration resources of interest. -func UnmarshalRouteConfig(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]RouteConfigUpdate, UpdateMetadata, error) { - update := make(map[string]RouteConfigUpdate) +func UnmarshalRouteConfig(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]RouteConfigUpdateErrTuple) md, err := processAllResources(version, resources, logger, update) return update, md, err } @@ -631,8 +631,8 @@ func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logg // UnmarshalCluster processes resources received in an CDS response, validates // them, and transforms them into a native struct which contains only fields we // are interested in. -func UnmarshalCluster(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ClusterUpdate, UpdateMetadata, error) { - update := make(map[string]ClusterUpdate) +func UnmarshalCluster(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]ClusterUpdateErrTuple) md, err := processAllResources(version, resources, logger, update) return update, md, err } @@ -995,8 +995,8 @@ func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { // UnmarshalEndpoints processes resources received in an EDS response, // validates them, and transforms them into a native struct which contains only // fields we are interested in. -func UnmarshalEndpoints(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]EndpointsUpdate, UpdateMetadata, error) { - update := make(map[string]EndpointsUpdate) +func UnmarshalEndpoints(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]EndpointsUpdateErrTuple) md, err := processAllResources(version, resources, logger, update) return update, md, err } @@ -1090,9 +1090,45 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, return ret, nil } +// ListenerUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ListenerUpdateErrTuple struct { + Update ListenerUpdate + Err error +} + +// RouteConfigUpdateErrTuple is a tuple with the update and error. It contains +// the results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type RouteConfigUpdateErrTuple struct { + Update RouteConfigUpdate + Err error +} + +// ClusterUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ClusterUpdateErrTuple struct { + Update ClusterUpdate + Err error +} + +// EndpointsUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type EndpointsUpdateErrTuple struct { + Update EndpointsUpdate + Err error +} + // processAllResources unmarshals and validates the resources, populates the // provided ret (a map), and returns metadata and error. // +// After this function, the ret map will be populated with both valid and +// invalid updates. Invalid resources will have an entry with the key as the +// resource name, value as an empty update. +// // The type of the resource is determined by the type of ret. E.g. // map[string]ListenerUpdate means this is for LDS. func processAllResources(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger, ret interface{}) (UpdateMetadata, error) { @@ -1106,10 +1142,10 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog for _, r := range resources { switch ret2 := ret.(type) { - case map[string]ListenerUpdate: + case map[string]ListenerUpdateErrTuple: name, update, err := unmarshalListenerResource(r, logger) if err == nil { - ret2[name] = update + ret2[name] = ListenerUpdateErrTuple{Update: update} continue } if name == "" { @@ -1119,11 +1155,11 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog perResourceErrors[name] = err // Add place holder in the map so we know this resource name was in // the response. - ret2[name] = ListenerUpdate{} - case map[string]RouteConfigUpdate: + ret2[name] = ListenerUpdateErrTuple{Err: err} + case map[string]RouteConfigUpdateErrTuple: name, update, err := unmarshalRouteConfigResource(r, logger) if err == nil { - ret2[name] = update + ret2[name] = RouteConfigUpdateErrTuple{Update: update} continue } if name == "" { @@ -1133,11 +1169,11 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog perResourceErrors[name] = err // Add place holder in the map so we know this resource name was in // the response. - ret2[name] = RouteConfigUpdate{} - case map[string]ClusterUpdate: + ret2[name] = RouteConfigUpdateErrTuple{Err: err} + case map[string]ClusterUpdateErrTuple: name, update, err := unmarshalClusterResource(r, logger) if err == nil { - ret2[name] = update + ret2[name] = ClusterUpdateErrTuple{Update: update} continue } if name == "" { @@ -1147,11 +1183,11 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog perResourceErrors[name] = err // Add place holder in the map so we know this resource name was in // the response. - ret2[name] = ClusterUpdate{} - case map[string]EndpointsUpdate: + ret2[name] = ClusterUpdateErrTuple{Err: err} + case map[string]EndpointsUpdateErrTuple: name, update, err := unmarshalEndpointsResource(r, logger) if err == nil { - ret2[name] = update + ret2[name] = EndpointsUpdateErrTuple{Update: update} continue } if name == "" { @@ -1161,7 +1197,7 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog perResourceErrors[name] = err // Add place holder in the map so we know this resource name was in // the response. - ret2[name] = EndpointsUpdate{} + ret2[name] = EndpointsUpdateErrTuple{Err: err} } } From 4e07a14b4e66e90ebf54ccc361012cb2b10724fd Mon Sep 17 00:00:00 2001 From: Cesar Ghali Date: Fri, 10 Sep 2021 13:58:12 -0700 Subject: [PATCH 226/998] credentials/ALTS: Ensure ALTS record protocol names are consistent (#4754) --- credentials/alts/internal/conn/record_test.go | 82 ++++++++++--------- 1 file changed, 43 insertions(+), 39 deletions(-) diff --git a/credentials/alts/internal/conn/record_test.go b/credentials/alts/internal/conn/record_test.go index 59d4f41e9e1c..c18f902b4012 100644 --- a/credentials/alts/internal/conn/record_test.go +++ b/credentials/alts/internal/conn/record_test.go @@ -40,11 +40,15 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } +const ( + rekeyRecordProtocol = "ALTSRP_GCM_AES128_REKEY" +) + var ( - nextProtocols = []string{"ALTSRP_GCM_AES128"} + recordProtocols = []string{rekeyRecordProtocol} altsRecordFuncs = map[string]ALTSRecordFunc{ // ALTS handshaker protocols. - "ALTSRP_GCM_AES128": func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) { + rekeyRecordProtocol: func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) { return NewAES128GCM(s, keyData) }, } @@ -77,7 +81,7 @@ func (c *testConn) Close() error { return nil } -func newTestALTSRecordConn(in, out *bytes.Buffer, side core.Side, np string, protected []byte) *conn { +func newTestALTSRecordConn(in, out *bytes.Buffer, side core.Side, rp string, protected []byte) *conn { key := []byte{ // 16 arbitrary bytes. 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x4f, 0x49} @@ -85,23 +89,23 @@ func newTestALTSRecordConn(in, out *bytes.Buffer, side core.Side, np string, pro in: in, out: out, } - c, err := NewConn(&tc, side, np, key, protected) + c, err := NewConn(&tc, side, rp, key, protected) if err != nil { panic(fmt.Sprintf("Unexpected error creating test ALTS record connection: %v", err)) } return c.(*conn) } -func newConnPair(np string, clientProtected []byte, serverProtected []byte) (client, server *conn) { +func newConnPair(rp string, clientProtected []byte, serverProtected []byte) (client, server *conn) { clientBuf := new(bytes.Buffer) serverBuf := new(bytes.Buffer) - clientConn := newTestALTSRecordConn(clientBuf, serverBuf, core.ClientSide, np, clientProtected) - serverConn := newTestALTSRecordConn(serverBuf, clientBuf, core.ServerSide, np, serverProtected) + clientConn := newTestALTSRecordConn(clientBuf, serverBuf, core.ClientSide, rp, clientProtected) + serverConn := newTestALTSRecordConn(serverBuf, clientBuf, core.ServerSide, rp, serverProtected) return clientConn, serverConn } -func testPingPong(t *testing.T, np string) { - clientConn, serverConn := newConnPair(np, nil, nil) +func testPingPong(t *testing.T, rp string) { + clientConn, serverConn := newConnPair(rp, nil, nil) clientMsg := []byte("Client Message") if n, err := clientConn.Write(clientMsg); n != len(clientMsg) || err != nil { t.Fatalf("Client Write() = %v, %v; want %v, ", n, err, len(clientMsg)) @@ -128,13 +132,13 @@ func testPingPong(t *testing.T, np string) { } func (s) TestPingPong(t *testing.T) { - for _, np := range nextProtocols { - testPingPong(t, np) + for _, rp := range recordProtocols { + testPingPong(t, rp) } } -func testSmallReadBuffer(t *testing.T, np string) { - clientConn, serverConn := newConnPair(np, nil, nil) +func testSmallReadBuffer(t *testing.T, rp string) { + clientConn, serverConn := newConnPair(rp, nil, nil) msg := []byte("Very Important Message") if n, err := clientConn.Write(msg); err != nil { t.Fatalf("Write() = %v, %v; want %v, ", n, err, len(msg)) @@ -155,13 +159,13 @@ func testSmallReadBuffer(t *testing.T, np string) { } func (s) TestSmallReadBuffer(t *testing.T) { - for _, np := range nextProtocols { - testSmallReadBuffer(t, np) + for _, rp := range recordProtocols { + testSmallReadBuffer(t, rp) } } -func testLargeMsg(t *testing.T, np string) { - clientConn, serverConn := newConnPair(np, nil, nil) +func testLargeMsg(t *testing.T, rp string) { + clientConn, serverConn := newConnPair(rp, nil, nil) // msgLen is such that the length in the framing is larger than the // default size of one frame. msgLen := altsRecordDefaultLength - msgTypeFieldSize - clientConn.crypto.EncryptionOverhead() + 1 @@ -179,12 +183,12 @@ func testLargeMsg(t *testing.T, np string) { } func (s) TestLargeMsg(t *testing.T) { - for _, np := range nextProtocols { - testLargeMsg(t, np) + for _, rp := range recordProtocols { + testLargeMsg(t, rp) } } -func testIncorrectMsgType(t *testing.T, np string) { +func testIncorrectMsgType(t *testing.T, rp string) { // framedMsg is an empty ciphertext with correct framing but wrong // message type. framedMsg := make([]byte, MsgLenFieldSize+msgTypeFieldSize) @@ -193,7 +197,7 @@ func testIncorrectMsgType(t *testing.T, np string) { binary.LittleEndian.PutUint32(framedMsg[MsgLenFieldSize:], wrongMsgType) in := bytes.NewBuffer(framedMsg) - c := newTestALTSRecordConn(in, nil, core.ClientSide, np, nil) + c := newTestALTSRecordConn(in, nil, core.ClientSide, rp, nil) b := make([]byte, 1) if n, err := c.Read(b); n != 0 || err == nil { t.Fatalf("Read() = , want %v", fmt.Errorf("received frame with incorrect message type %v", wrongMsgType)) @@ -201,15 +205,15 @@ func testIncorrectMsgType(t *testing.T, np string) { } func (s) TestIncorrectMsgType(t *testing.T) { - for _, np := range nextProtocols { - testIncorrectMsgType(t, np) + for _, rp := range recordProtocols { + testIncorrectMsgType(t, rp) } } -func testFrameTooLarge(t *testing.T, np string) { +func testFrameTooLarge(t *testing.T, rp string) { buf := new(bytes.Buffer) - clientConn := newTestALTSRecordConn(nil, buf, core.ClientSide, np, nil) - serverConn := newTestALTSRecordConn(buf, nil, core.ServerSide, np, nil) + clientConn := newTestALTSRecordConn(nil, buf, core.ClientSide, rp, nil) + serverConn := newTestALTSRecordConn(buf, nil, core.ServerSide, rp, nil) // payloadLen is such that the length in the framing is larger than // allowed in one frame. payloadLen := altsRecordLengthLimit - msgTypeFieldSize - clientConn.crypto.EncryptionOverhead() + 1 @@ -234,15 +238,15 @@ func testFrameTooLarge(t *testing.T, np string) { } func (s) TestFrameTooLarge(t *testing.T) { - for _, np := range nextProtocols { - testFrameTooLarge(t, np) + for _, rp := range recordProtocols { + testFrameTooLarge(t, rp) } } -func testWriteLargeData(t *testing.T, np string) { +func testWriteLargeData(t *testing.T, rp string) { // Test sending and receiving messages larger than the maximum write // buffer size. - clientConn, serverConn := newConnPair(np, nil, nil) + clientConn, serverConn := newConnPair(rp, nil, nil) // Message size is intentionally chosen to not be multiple of // payloadLengthLimtit. msgSize := altsWriteBufferMaxSize + (100 * 1024) @@ -277,25 +281,25 @@ func testWriteLargeData(t *testing.T, np string) { } func (s) TestWriteLargeData(t *testing.T) { - for _, np := range nextProtocols { - testWriteLargeData(t, np) + for _, rp := range recordProtocols { + testWriteLargeData(t, rp) } } -func testProtectedBuffer(t *testing.T, np string) { +func testProtectedBuffer(t *testing.T, rp string) { key := []byte{ // 16 arbitrary bytes. 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x4f, 0x49} // Encrypt a message to be passed to NewConn as a client-side protected // buffer. - newCrypto := protocols[np] + newCrypto := protocols[rp] if newCrypto == nil { - t.Fatalf("Unknown next protocol %q", np) + t.Fatalf("Unknown record protocol %q", rp) } crypto, err := newCrypto(core.ClientSide, key) if err != nil { - t.Fatalf("Failed to create a crypter for protocol %q: %v", np, err) + t.Fatalf("Failed to create a crypter for protocol %q: %v", rp, err) } msg := []byte("Client Protected Message") encryptedMsg, err := crypto.Encrypt(nil, msg) @@ -307,7 +311,7 @@ func testProtectedBuffer(t *testing.T, np string) { binary.LittleEndian.PutUint32(protectedMsg[4:], altsRecordMsgType) protectedMsg = append(protectedMsg, encryptedMsg...) - _, serverConn := newConnPair(np, nil, protectedMsg) + _, serverConn := newConnPair(rp, nil, protectedMsg) rcvClientMsg := make([]byte, len(msg)) if n, err := serverConn.Read(rcvClientMsg); n != len(rcvClientMsg) || err != nil { t.Fatalf("Server Read() = %v, %v; want %v, ", n, err, len(rcvClientMsg)) @@ -318,7 +322,7 @@ func testProtectedBuffer(t *testing.T, np string) { } func (s) TestProtectedBuffer(t *testing.T) { - for _, np := range nextProtocols { - testProtectedBuffer(t, np) + for _, rp := range recordProtocols { + testProtectedBuffer(t, rp) } } From 7f560ef4c5224efb8a86f2877315c381c30fa126 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 10 Sep 2021 14:08:26 -0700 Subject: [PATCH 227/998] grpc: close underlying transport when subConn is closed when in connecting state (#4751) --- clientconn.go | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/clientconn.go b/clientconn.go index 62dc3bdaf52f..8012b3af4501 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1322,17 +1322,19 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne } select { - case <-time.After(time.Until(connectDeadline)): + case <-connectCtx.Done(): // We didn't get the preface in time. - err := fmt.Errorf("failed to receive server preface within timeout") - newTr.Close(err) - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) - return err + newTr.Close(transport.ErrConnClosing) + if connectCtx.Err() == context.DeadlineExceeded { + err := errors.New("failed to receive server preface within timeout") + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + return err + } + return nil case <-prefaceReceived.Done(): // We got the preface - huzzah! things are good. ac.mu.Lock() defer ac.mu.Unlock() - defer prefaceReceived.Fire() if connClosed.HasFired() { // onClose called first; go idle but do nothing else. if ac.state != connectivity.Shutdown { @@ -1340,6 +1342,16 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne } return nil } + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // been set at that point. + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + go newTr.Close(transport.ErrConnClosing) + return nil + } ac.curAddr = addr ac.transport = newTr hcStarted = true From d25e31e741ddfb45f4126cd20e357185751e42c2 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 10 Sep 2021 14:12:13 -0700 Subject: [PATCH 228/998] client: fix case where GOAWAY would leak connections and memory (#4755) --- clientconn.go | 7 ++++++- test/end2end_test.go | 10 +++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/clientconn.go b/clientconn.go index 8012b3af4501..e933eae36d0c 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1287,9 +1287,14 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() defer connClosed.Fire() - if !hcStarted { + if !hcStarted || hctx.Err() != nil { // We didn't start the health check or set the state to READY, so // no need to do anything else here. + // + // OR, we have already cancelled the health check context, meaning + // we have already called onClose once for this transport. In this + // case it would be dangerous to clear the transport and update the + // state, since there may be a new transport in this addrConn. return } hcancel() diff --git a/test/end2end_test.go b/test/end2end_test.go index 5702f18bdb2a..18e4ffc16e57 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7074,11 +7074,11 @@ func (s) TestGoAwayThenClose(t *testing.T) { s2 := grpc.NewServer() defer s2.Stop() testpb.RegisterTestServiceServer(s2, ts) - go s2.Serve(lis2) r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{ {Addr: lis1.Addr().String()}, + {Addr: lis2.Addr().String()}, }}) cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithInsecure()) if err != nil { @@ -7102,10 +7102,7 @@ func (s) TestGoAwayThenClose(t *testing.T) { t.Fatalf("unexpected error from first recv: %v", err) } - r.UpdateState(resolver.State{Addresses: []resolver.Address{ - {Addr: lis1.Addr().String()}, - {Addr: lis2.Addr().String()}, - }}) + go s2.Serve(lis2) // Send GO_AWAY to connection 1. go s1.GracefulStop() @@ -7126,6 +7123,9 @@ func (s) TestGoAwayThenClose(t *testing.T) { // Assert that connection 2 has been established. <-conn2Established.Done() + // Close the listener for server2 to prevent it from allowing new connections. + lis2.Close() + // Close connection 1. s1.Stop() From 03268c8ed29e801944a2265a82f240f7c0e1b1c3 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 10 Sep 2021 16:25:09 -0700 Subject: [PATCH 229/998] balancer: fix aggregated state to not report idle with zero subconns (#4756) --- balancer/balancer.go | 11 +++++++++-- balancer/base/balancer.go | 10 +++++++--- balancer/roundrobin/roundrobin.go | 2 +- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/balancer/balancer.go b/balancer/balancer.go index 34c435d90720..178de0898aa4 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -372,6 +372,7 @@ type ConnectivityStateEvaluator struct { numReady uint64 // Number of addrConns in ready state. numConnecting uint64 // Number of addrConns in connecting state. numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. } // RecordTransition records state change happening in subConn and based on that @@ -380,7 +381,8 @@ type ConnectivityStateEvaluator struct { // - If at least one SubConn in Ready, the aggregated state is Ready; // - Else if at least one SubConn in Connecting, the aggregated state is Connecting; // - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; -// - Else the aggregated state is Idle +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else there are no subconns and the aggregated state is Transient Failure // // Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { @@ -394,6 +396,8 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numConnecting += updateVal case connectivity.TransientFailure: cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal } } @@ -407,5 +411,8 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne if cse.numTransientFailure > 0 { return connectivity.TransientFailure } - return connectivity.Idle + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure } diff --git a/balancer/base/balancer.go b/balancer/base/balancer.go index b1286533e73c..8dd504299fee 100644 --- a/balancer/base/balancer.go +++ b/balancer/base/balancer.go @@ -133,6 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() } else { // Always update the subconn's address in case the attributes @@ -213,10 +214,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } - if oldS == connectivity.TransientFailure && s == connectivity.Connecting { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or // CONNECTING transitions to prevent the aggregated state from being // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } return } b.scStates[sc] = s @@ -242,7 +247,6 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su b.state == connectivity.TransientFailure { b.regeneratePicker() } - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } diff --git a/balancer/roundrobin/roundrobin.go b/balancer/roundrobin/roundrobin.go index 3c163f3b5a05..274eb2f85802 100644 --- a/balancer/roundrobin/roundrobin.go +++ b/balancer/roundrobin/roundrobin.go @@ -47,7 +47,7 @@ func init() { type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + logger.Infof("roundrobinPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } From 77ffb2ef318a2b8442b9fb10f80724013b2e65eb Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 13 Sep 2021 14:09:57 -0400 Subject: [PATCH 230/998] xds: RBAC HTTP Filter (#4748) * xds: RBAC HTTP Filter --- xds/internal/httpfilter/rbac/rbac.go | 171 +++++++++++++ xds/internal/test/xds_integration_test.go | 4 + .../test/xds_server_integration_test.go | 233 ++++++++++++++++++ 3 files changed, 408 insertions(+) create mode 100644 xds/internal/httpfilter/rbac/rbac.go diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go new file mode 100644 index 000000000000..969111b619a4 --- /dev/null +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -0,0 +1,171 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rbac implements the Envoy RBAC HTTP filter. +package rbac + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/rbac" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" +) + +func init() { + httpfilter.Register(builder{}) +} + +type builder struct { +} + +type config struct { + httpfilter.FilterConfig + config *rpb.RBAC +} + +func (builder) TypeURLs() []string { + return []string{ + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute", + } +} + +// Parsing is the same for the base config and the override config. +func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { + // All the validation logic described in A41. + for _, policy := range rbacCfg.GetRules().GetPolicies() { + // "Policy.condition and Policy.checked_condition must cause a + // validation failure if present." - A41 + if policy.Condition != nil { + return nil, errors.New("rbac: Policy.condition is present") + } + if policy.CheckedCondition != nil { + return nil, errors.New("rbac: policy.CheckedCondition is present") + } + + // "It is also a validation failure if Permission or Principal has a + // header matcher for a grpc- prefixed header name or :scheme." - A41 + for _, principal := range policy.Principals { + if principal.GetHeader() != nil { + name := principal.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: principal header matcher for %v is :scheme or starts with grpc", name) + } + } + } + for _, permission := range policy.Permissions { + if permission.GetHeader() != nil { + name := permission.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: permission header matcher for %v is :scheme or starts with grpc", name) + } + } + } + } + + return config{config: rbacCfg}, nil +} + +func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("rbac: nil configuration message provided") + } + any, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rbac: error parsing config %v: unknown type %T", cfg, cfg) + } + msg := new(rpb.RBAC) + if err := ptypes.UnmarshalAny(any, msg); err != nil { + return nil, fmt.Errorf("rbac: error parsing config %v: %v", cfg, err) + } + return parseConfig(msg) +} + +func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + if override == nil { + return nil, fmt.Errorf("rbac: nil configuration message provided") + } + any, ok := override.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rbac: error parsing override config %v: unknown type %T", override, override) + } + msg := new(rpb.RBACPerRoute) + if err := ptypes.UnmarshalAny(any, msg); err != nil { + return nil, fmt.Errorf("rbac: error parsing override config %v: %v", override, err) + } + return parseConfig(msg.Rbac) +} + +func (builder) IsTerminal() bool { + return false +} + +var _ httpfilter.ServerInterceptorBuilder = builder{} + +// BuildServerInterceptor is an optional interface builder implements in order +// to signify it works server side. +func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override httpfilter.FilterConfig) (resolver.ServerInterceptor, error) { + if cfg == nil { + return nil, fmt.Errorf("rbac: nil config provided") + } + + c, ok := cfg.(config) + if !ok { + return nil, fmt.Errorf("rbac: incorrect config type provided (%T): %v", cfg, cfg) + } + + if override != nil { + // override completely replaces the listener configuration; but we + // still validate the listener config type. + c, ok = override.(config) + if !ok { + return nil, fmt.Errorf("rbac: incorrect override config type provided (%T): %v", override, override) + } + } + + icfg := c.config + // "If absent, no enforcing RBAC policy will be applied" - RBAC + // Documentation for Rules field. + if icfg.Rules == nil { + return nil, nil + } + + ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{icfg.Rules}) + if err != nil { + return nil, fmt.Errorf("error constructing matching engine: %v", err) + } + return &interceptor{chainEngine: ce}, nil +} + +type interceptor struct { + chainEngine *rbac.ChainEngine +} + +func (i *interceptor) AllowRPC(ctx context.Context) error { + return i.chainEngine.IsAuthorized(ctx) +} diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go index c643cd28f527..4b7cca3b828f 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/test/xds_integration_test.go @@ -68,6 +68,10 @@ func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, er return &testpb.Empty{}, nil } +func (*testService) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil +} + func createTmpFile(src, dst string) error { data, err := ioutil.ReadFile(src) if err != nil { diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 9136953f7978..89122e0647c3 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -36,13 +36,18 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/status" "google.golang.org/grpc/xds" + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" "google.golang.org/grpc/xds/internal/testutils/e2e" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + anypb "github.com/golang/protobuf/ptypes/any" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" xdscreds "google.golang.org/grpc/credentials/xds" testpb "google.golang.org/grpc/test/grpc_testing" @@ -594,3 +599,231 @@ func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { t.Fatalf("streaming RPC should have been denied") } } + +// serverListenerWithRBACHTTPFilters returns an xds Listener resource with HTTP Filters defined in the HCM, and a route +// configuration that always matches to a route and a VH. +func serverListenerWithRBACHTTPFilters(host string, port uint32, rbacCfg *rpb.RBAC) *v3listenerpb.Listener { + // Rather than declare typed config inline, take a HCM proto and append the + // RBAC Filters to it. + hcm := &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}, + // This tests override parsing + building when RBAC Filter + // passed both normal and override config. + TypedPerFilterConfig: map[string]*anypb.Any{ + "rbac": testutils.MarshalAny(&rpb.RBACPerRoute{Rbac: rbacCfg}), + }, + }}}, + }, + } + hcm.HttpFilters = nil + hcm.HttpFilters = append(hcm.HttpFilters, e2e.HTTPFilter("rbac", rbacCfg)) + hcm.HttpFilters = append(hcm.HttpFilters, e2e.RouterHTTPFilter) + + return &v3listenerpb.Listener{ + Name: fmt.Sprintf(e2e.ServerListenerResourceNameTemplate, net.JoinHostPort(host, strconv.Itoa(int(port)))), + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: port, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "v4-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(hcm), + }, + }, + }, + }, + { + Name: "v6-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(hcm), + }, + }, + }, + }, + }, + } +} + +// TestRBACHTTPFilter tests the xds configured RBAC HTTP Filter. It sets up the +// full end to end flow, and makes sure certain RPC's are successful and proceed +// as normal and certain RPC's are denied by the RBAC HTTP Filter which gets +// called by hooked xds interceptors. +func (s) TestRBACHTTPFilter(t *testing.T) { + tests := []struct { + name string + rbacCfg *rpb.RBAC + wantStatusEmptyCall codes.Code + wantStatusUnaryCall codes.Code + }{ + // This test tests an RBAC HTTP Filter which is configured to allow any RPC. + // Any RPC passing through this RBAC HTTP Filter should proceed as normal. + { + name: "allow-anything", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + }, + // This test tests an RBAC HTTP Filter which is configured to allow only + // RPC's with certain paths ("UnaryCall"). Only unary calls passing + // through this RBAC HTTP Filter should proceed as normal, and any + // others should be denied. + { + name: "allow-certain-path", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-path": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "/grpc.testing.TestService/UnaryCall"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.PermissionDenied, + wantStatusUnaryCall: codes.OK, + }, + // This test that a RBAC Config with nil rules means that every RPC is + // allowed. This maps to the line "If absent, no enforcing RBAC policy + // will be applied" from the RBAC Proto documentation for the Rules + // field. + { + name: "absent-rules", + rbacCfg: &rpb.RBAC{ + Rules: nil, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + func() { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + inboundLis := serverListenerWithRBACHTTPFilters(host, port, test.rbacCfg) + resources.Listeners = append(resources.Listeners, inboundLis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Setup the management server with client and server-side resources. + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithInsecure(), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != test.wantStatusEmptyCall { + t.Fatalf("EmptyCall() returned err with status: %v, wantStatusEmptyCall: %v", status.Code(err), test.wantStatusEmptyCall) + } + + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != test.wantStatusUnaryCall { + t.Fatalf("UnaryCall() returned err with status: %v, wantStatusUnaryCall: %v", err, test.wantStatusUnaryCall) + } + }() + }) + } +} From 5bfc05fb0cf08fd2a8257d2bca8dba552263ba7e Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 13 Sep 2021 11:50:52 -0700 Subject: [PATCH 231/998] grpc: clarify the use of transport.ErrConnClosing from createTransport() (#4757) --- clientconn.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clientconn.go b/clientconn.go index e933eae36d0c..34cc4c948db0 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1329,6 +1329,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne select { case <-connectCtx.Done(): // We didn't get the preface in time. + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. newTr.Close(transport.ErrConnClosing) if connectCtx.Err() == context.DeadlineExceeded { err := errors.New("failed to receive server preface within timeout") @@ -1352,8 +1355,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not // been set at that point. + // // We run this in a goroutine because newTr.Close() calls onClose() // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. go newTr.Close(transport.ErrConnClosing) return nil } From 5d8e5aad40bedb696205b96b786f1d0e1326b3f8 Mon Sep 17 00:00:00 2001 From: Kobi Date: Tue, 14 Sep 2021 17:15:02 +0300 Subject: [PATCH 232/998] Create NOTICE.txt (#4739) --- NOTICE.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 NOTICE.txt diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 000000000000..530197749e9d --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. From d41f21ca050b1721093702ede81c21b7e3bdaa63 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 14 Sep 2021 15:11:42 -0700 Subject: [PATCH 233/998] stats: support stats for all retry attempts; support transparent retry (#4749) --- stats/stats.go | 7 +- stream.go | 129 ++++++++++++++++--------------- test/end2end_test.go | 46 +++++++++--- test/retry_test.go | 175 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 284 insertions(+), 73 deletions(-) diff --git a/stats/stats.go b/stats/stats.go index a5ebeeb6932d..0285dcc6a268 100644 --- a/stats/stats.go +++ b/stats/stats.go @@ -36,12 +36,12 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC begins. +// Begin contains stats when an RPC attempt begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool - // BeginTime is the time when the RPC begins. + // BeginTime is the time when the RPC attempt begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool @@ -49,6 +49,9 @@ type Begin struct { IsClientStream bool // IsServerStream indicates whether the RPC is a server streaming RPC. IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool } // IsClient indicates if the stats information is from client side. diff --git a/stream.go b/stream.go index e224af12d218..78b7e9e80153 100644 --- a/stream.go +++ b/stream.go @@ -274,35 +274,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - IsClientStream: desc.ClientStreams, - IsServerStream: desc.ServerStreams, - } - sh.HandleRPC(ctx, begin) - } cs := &clientStream{ callHdr: callHdr, @@ -316,7 +287,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cp: cp, comp: comp, cancel: cancel, - beginTime: beginTime, firstAttempt: true, onCommit: onCommit, } @@ -325,9 +295,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs.binlog = binarylog.GetMethodLogger(method) - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { + if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { cs.finish(err) return nil, err } @@ -375,8 +343,43 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client // newAttemptLocked creates a new attempt with a transport. // If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { +func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + sh := cs.cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, + } + sh.HandleRPC(ctx, begin) + } + + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + newAttempt := &csAttempt{ + ctx: ctx, + beginTime: beginTime, cs: cs, dc: cs.cc.dopts.dc, statsHandler: sh, @@ -391,15 +394,14 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r } }() - if err := cs.ctx.Err(); err != nil { + if err := ctx.Err(); err != nil { return toRPCErr(err) } - ctx := cs.ctx if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } @@ -419,7 +421,7 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { // Return without converting to an RPC error so retry code can // inspect. @@ -444,8 +446,7 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream methodConfig *MethodConfig @@ -485,6 +486,7 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { + ctx context.Context cs *clientStream t transport.ClientTransport s *transport.Stream @@ -503,6 +505,7 @@ type csAttempt struct { trInfo *traceInfo statsHandler stats.Handler + beginTime time.Time } func (cs *clientStream) commitAttemptLocked() { @@ -520,15 +523,16 @@ func (cs *clientStream) commitAttempt() { } // shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (cs *clientStream) shouldRetry(err error) (bool, error) { if cs.attempt.s == nil { // Error from NewClientStream. nse, ok := err.(*transport.NewStreamError) if !ok { // Unexpected, but assume no I/O was performed and the RPC is not // fatal, so retry indefinitely. - return nil + return true, nil } // Unwrap and convert error. @@ -537,19 +541,19 @@ func (cs *clientStream) shouldRetry(err error) error { // Never retry DoNotRetry errors, which indicate the RPC should not be // retried due to max header list size violation, etc. if nse.DoNotRetry { - return err + return false, err } // In the event of a non-IO operation error from NewStream, we never // attempted to write anything to the wire, so we can retry // indefinitely. if !nse.PerformedIO { - return nil + return true, nil } } if cs.finished || cs.committed { // RPC is finished or committed; cannot retry. - return err + return false, err } // Wait for the trailers. unprocessed := false @@ -559,17 +563,17 @@ func (cs *clientStream) shouldRetry(err error) error { } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - return nil + return true, nil } if cs.cc.dopts.disableRetry { - return err + return false, err } pushback := 0 hasPushback := false if cs.attempt.s != nil { if !cs.attempt.s.TrailersOnly() { - return err + return false, err } // TODO(retry): Move down if the spec changes to not check server pushback @@ -580,13 +584,13 @@ func (cs *clientStream) shouldRetry(err error) error { if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } hasPushback = true } else if len(sps) > 1 { channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } } @@ -599,16 +603,16 @@ func (cs *clientStream) shouldRetry(err error) error { rp := cs.methodConfig.RetryPolicy if rp == nil || !rp.RetryableStatusCodes[code] { - return err + return false, err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { - return err + return false, err } if cs.numRetries+1 >= rp.MaxAttempts { - return err + return false, err } var dur time.Duration @@ -631,10 +635,10 @@ func (cs *clientStream) shouldRetry(err error) error { select { case <-t.C: cs.numRetries++ - return nil + return false, nil case <-cs.ctx.Done(): t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() + return false, status.FromContextError(cs.ctx.Err()).Err() } } @@ -642,12 +646,13 @@ func (cs *clientStream) shouldRetry(err error) error { func (cs *clientStream) retryLocked(lastErr error) error { for { cs.attempt.finish(toRPCErr(lastErr)) - if err := cs.shouldRetry(lastErr); err != nil { + isTransparent, err := cs.shouldRetry(lastErr) + if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(nil, nil); err != nil { + if err := cs.newAttemptLocked(isTransparent); err != nil { return err } if lastErr = cs.replayBufferLocked(); lastErr == nil { @@ -937,7 +942,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return io.EOF } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -985,7 +990,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { a.mu.Unlock() } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1047,12 +1052,12 @@ func (a *csAttempt) finish(err error) { if a.statsHandler != nil { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + a.statsHandler.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { diff --git a/test/end2end_test.go b/test/end2end_test.go index 18e4ffc16e57..bce752701dab 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -3724,10 +3724,12 @@ func (s) TestTransparentRetry(t *testing.T) { } defer lis.Close() server := &httpServer{ - headerFields: [][]string{{ - ":status", "200", - "content-type", "application/grpc", - "grpc-status", "0", + responses: []httpServerResponse{{ + trailers: [][]string{{ + ":status", "200", + "content-type", "application/grpc", + "grpc-status", "0", + }}, }}, refuseStream: func(i uint32) bool { switch i { @@ -7343,9 +7345,15 @@ func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) { doHTTPHeaderTest(t, codes.Internal, header, header, header) } +type httpServerResponse struct { + headers [][]string + payload []byte + trailers [][]string +} + type httpServer struct { - headerFields [][]string refuseStream func(uint32) bool + responses []httpServerResponse } func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error { @@ -7369,6 +7377,10 @@ func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields }) } +func (s *httpServer) writePayload(framer *http2.Framer, sid uint32, payload []byte) error { + return framer.WriteData(sid, false, payload) +} + func (s *httpServer) start(t *testing.T, lis net.Listener) { // Launch an HTTP server to send back header. go func() { @@ -7394,7 +7406,7 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) { var sid uint32 // Loop until conn is closed and framer returns io.EOF - for { + for requestNum := 0; ; requestNum = (requestNum + 1) % len(s.responses) { // Read frames until a header is received. for { frame, err := framer.ReadFrame() @@ -7413,13 +7425,29 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) { writer.Flush() } } - for i, headers := range s.headerFields { - if err = s.writeHeader(framer, sid, headers, i == len(s.headerFields)-1); err != nil { + + response := s.responses[requestNum] + for _, header := range response.headers { + if err = s.writeHeader(framer, sid, header, false); err != nil { t.Errorf("Error at server-side while writing headers. Err: %v", err) return } writer.Flush() } + if response.payload != nil { + if err = s.writePayload(framer, sid, response.payload); err != nil { + t.Errorf("Error at server-side while writing payload. Err: %v", err) + return + } + writer.Flush() + } + for i, trailer := range response.trailers { + if err = s.writeHeader(framer, sid, trailer, i == len(response.trailers)-1); err != nil { + t.Errorf("Error at server-side while writing trailers. Err: %v", err) + return + } + writer.Flush() + } } }() } @@ -7432,7 +7460,7 @@ func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string } defer lis.Close() server := &httpServer{ - headerFields: headerFields, + responses: []httpServerResponse{{trailers: headerFields}}, } server.start(t, lis) cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) diff --git a/test/retry_test.go b/test/retry_test.go index ad1268faa96e..156f92fa5454 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -22,9 +22,12 @@ import ( "context" "fmt" "io" + "net" "os" + "reflect" "strconv" "strings" + "sync" "testing" "time" @@ -34,6 +37,7 @@ import ( "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -550,3 +554,174 @@ func (s) TestRetryStreaming(t *testing.T) { }() } } + +type retryStatsHandler struct { + mu sync.Mutex + s []stats.RPCStats +} + +func (*retryStatsHandler) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context { + return ctx +} +func (h *retryStatsHandler) HandleRPC(_ context.Context, s stats.RPCStats) { + h.mu.Lock() + h.s = append(h.s, s) + h.mu.Unlock() +} +func (*retryStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} +func (*retryStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +func (s) TestRetryStats(t *testing.T) { + defer enableRetry()() + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen. Err: %v", err) + } + defer lis.Close() + server := &httpServer{ + responses: []httpServerResponse{{ + trailers: [][]string{{ + ":status", "200", + "content-type", "application/grpc", + "grpc-status", "14", // UNAVAILABLE + "grpc-message", "unavailable retry", + "grpc-retry-pushback-ms", "10", + }}, + }, { + headers: [][]string{{ + ":status", "200", + "content-type", "application/grpc", + }}, + payload: []byte{0, 0, 0, 0, 0}, // header for 0-byte response message. + trailers: [][]string{{ + "grpc-status", "0", // OK + }}, + }}, + refuseStream: func(i uint32) bool { + return i == 1 + }, + } + server.start(t, lis) + handler := &retryStatsHandler{} + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithStatsHandler(handler), + grpc.WithDefaultServiceConfig((`{ + "methodConfig": [{ + "name": [{"service": "grpc.testing.TestService"}], + "retryPolicy": { + "MaxAttempts": 4, + "InitialBackoff": ".01s", + "MaxBackoff": ".01s", + "BackoffMultiplier": 1.0, + "RetryableStatusCodes": [ "UNAVAILABLE" ] + } + }]}`))) + if err != nil { + t.Fatalf("failed to dial due to err: %v", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + client := testpb.NewTestServiceClient(cc) + + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("unexpected EmptyCall error: %v", err) + } + handler.mu.Lock() + want := []stats.RPCStats{ + &stats.Begin{}, + &stats.OutHeader{FullMethod: "/grpc.testing.TestService/EmptyCall"}, + &stats.OutPayload{WireLength: 5}, + &stats.End{}, + + &stats.Begin{IsTransparentRetryAttempt: true}, + &stats.OutHeader{FullMethod: "/grpc.testing.TestService/EmptyCall"}, + &stats.OutPayload{WireLength: 5}, + &stats.InTrailer{Trailer: metadata.Pairs("content-type", "application/grpc", "grpc-retry-pushback-ms", "10")}, + &stats.End{}, + + &stats.Begin{}, + &stats.OutHeader{FullMethod: "/grpc.testing.TestService/EmptyCall"}, + &stats.OutPayload{WireLength: 5}, + &stats.InHeader{}, + &stats.InPayload{WireLength: 5}, + &stats.InTrailer{}, + &stats.End{}, + } + + // There is a race between noticing the RST_STREAM during the first RPC + // attempt and writing the payload. If we detect that the client did not + // send the OutPayload, we remove it from want. + if _, ok := handler.s[2].(*stats.End); ok { + want = append(want[:2], want[3:]...) + } + + toString := func(ss []stats.RPCStats) (ret []string) { + for _, s := range ss { + ret = append(ret, fmt.Sprintf("%T - %v", s, s)) + } + return ret + } + t.Logf("Handler received frames:\n%v\n---\nwant:\n%v\n", + strings.Join(toString(handler.s), "\n"), + strings.Join(toString(want), "\n")) + + if len(handler.s) != len(want) { + t.Fatalf("received unexpected number of RPCStats: got %v; want %v", len(handler.s), len(want)) + } + + // There is a race between receiving the payload (triggered by the + // application / gRPC library) and receiving the trailer (triggered at the + // transport layer). Adjust the received stats accordingly if necessary. + // Note: we measure from the end of the RPCStats due to the race above. + tIdx, pIdx := len(handler.s)-3, len(handler.s)-2 + _, okT := handler.s[tIdx].(*stats.InTrailer) + _, okP := handler.s[pIdx].(*stats.InPayload) + if okT && okP { + handler.s[pIdx], handler.s[tIdx] = handler.s[tIdx], handler.s[pIdx] + } + + for i := range handler.s { + w, s := want[i], handler.s[i] + + // Validate the event type + if reflect.TypeOf(w) != reflect.TypeOf(s) { + t.Fatalf("at position %v: got %T; want %T", i, s, w) + } + wv, sv := reflect.ValueOf(w).Elem(), reflect.ValueOf(s).Elem() + + // Validate that Client is always true + if sv.FieldByName("Client").Interface().(bool) != true { + t.Fatalf("at position %v: got Client=false; want true", i) + } + + // Validate any set fields in want + for i := 0; i < wv.NumField(); i++ { + if !wv.Field(i).IsZero() { + if got, want := sv.Field(i).Interface(), wv.Field(i).Interface(); !reflect.DeepEqual(got, want) { + name := reflect.TypeOf(w).Elem().Field(i).Name + t.Fatalf("at position %v, field %v: got %v; want %v", i, name, got, want) + } + } + } + + // Since the above only tests non-zero-value fields, test + // IsTransparentRetryAttempt=false explicitly when needed. + if wb, ok := w.(*stats.Begin); ok && !wb.IsTransparentRetryAttempt { + if s.(*stats.Begin).IsTransparentRetryAttempt { + t.Fatalf("at position %v: got IsTransparentRetryAttempt=true; want false", i) + } + } + } + + // Validate timings between last Begin and preceding End. + end := handler.s[len(handler.s)-8].(*stats.End) + begin := handler.s[len(handler.s)-7].(*stats.Begin) + diff := begin.BeginTime.Sub(end.EndTime) + if diff < 10*time.Millisecond || diff > 50*time.Millisecond { + t.Fatalf("pushback time before final attempt = %v; want ~10ms", diff) + } +} From 2d4e44a0cd75808908c9fb98aac764af6558ff6e Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 14 Sep 2021 16:11:03 -0700 Subject: [PATCH 234/998] xds/affinity: fix bugs in clusterresolver and xds-resolver (#4744) --- xds/internal/balancer/clusterresolver/config.go | 6 ++++-- xds/internal/balancer/clusterresolver/config_test.go | 5 +++-- xds/internal/resolver/serviceconfig.go | 2 +- xds/internal/resolver/serviceconfig_test.go | 4 ++-- xds/internal/resolver/xds_resolver_test.go | 2 +- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index 33191fc7bd03..a6a3cbab8040 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -23,8 +23,10 @@ import ( "fmt" "strings" + "google.golang.org/grpc/balancer/roundrobin" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" ) // DiscoveryMechanismType is the type of discovery mechanism. @@ -167,8 +169,8 @@ type LBConfig struct { } const ( - rrName = "ROUND_ROBIN" - rhName = "RING_HASH" + rrName = roundrobin.Name + rhName = ringhash.Name ) func parseConfig(c json.RawMessage) (*LBConfig, error) { diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index 17d5f409674f..796f8a493722 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -25,6 +25,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/internal/balancer/stub" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" ) func TestDiscoveryMechanismTypeMarshalJSON(t *testing.T) { @@ -136,7 +137,7 @@ const ( "type": "EDS", "edsServiceName": "test-eds-service-name" }], - "xdsLbPolicy":[{"RING_HASH":{}}] + "xdsLbPolicy":[{"ring_hash_experimental":{}}] }` testJSONConfig5 = `{ "discoveryMechanisms": [{ @@ -234,7 +235,7 @@ func TestParseConfig(t *testing.T) { }, }, XDSLBPolicy: &internalserviceconfig.BalancerConfig{ - Name: "RING_HASH", + Name: ringhash.Name, Config: nil, }, }, diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index dceea49b3b60..ddf699f938b3 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -226,7 +226,7 @@ func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies [ var generatedPolicyHash bool switch policy.HashPolicyType { case xdsclient.HashPolicyTypeHeader: - md, ok := metadata.FromIncomingContext(rpcInfo.Context) + md, ok := metadata.FromOutgoingContext(rpcInfo.Context) if !ok { continue } diff --git a/xds/internal/resolver/serviceconfig_test.go b/xds/internal/resolver/serviceconfig_test.go index 568873ebbc71..a1a48944dc46 100644 --- a/xds/internal/resolver/serviceconfig_test.go +++ b/xds/internal/resolver/serviceconfig_test.go @@ -73,7 +73,7 @@ func (s) TestGenerateRequestHash(t *testing.T) { }}, requestHashWant: xxhash.Sum64String("/new-products"), rpcInfo: iresolver.RPCInfo{ - Context: metadata.NewIncomingContext(context.Background(), metadata.Pairs(":path", "/products")), + Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs(":path", "/products")), Method: "/some-method", }, }, @@ -101,7 +101,7 @@ func (s) TestGenerateRequestHash(t *testing.T) { }}, requestHashWant: xxhash.Sum64String("eaebece"), rpcInfo: iresolver.RPCInfo{ - Context: metadata.NewIncomingContext(context.Background(), metadata.Pairs(":path", "abc")), + Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs(":path", "abc")), Method: "/some-method", }, }, diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 3b147e4ff207..90e6c1d4db05 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -510,7 +510,7 @@ func (s) TestXDSResolverRequestHash(t *testing.T) { } // Selecting a config when there was a hash policy specified in the route // that will be selected should put a request hash in the config's context. - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: metadata.NewIncomingContext(context.Background(), metadata.Pairs(":path", "/products"))}) + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs(":path", "/products"))}) if err != nil { t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) } From 98ccf472da9a7e01d53bd27e5ad537d46c1b5ca9 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 15 Sep 2021 13:35:51 -0700 Subject: [PATCH 235/998] priority: handle Idle children the same way as Ready (#4769) --- .../balancer/priority/balancer_priority.go | 31 +++---------------- 1 file changed, 5 insertions(+), 26 deletions(-) diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index d9b54fa2baae..bd2c6724ea5c 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -225,16 +225,17 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S child.state = s switch s.ConnectivityState { - case connectivity.Ready: + case connectivity.Ready, connectivity.Idle: + // Note that idle is also handled as if it's Ready. It will close the + // lower priorities (which will be kept in a cache, not deleted), and + // new picks will use the Idle picker. b.handlePriorityWithNewStateReady(child, priority) case connectivity.TransientFailure: b.handlePriorityWithNewStateTransientFailure(child, priority) case connectivity.Connecting: b.handlePriorityWithNewStateConnecting(child, priority, oldState) - case connectivity.Idle: - b.handlePriorityWithNewStateIdle(child, priority) default: - // New state is Idle, should never happen. Don't forward. + // New state is Shutdown, should never happen. Don't forward. } } @@ -358,25 +359,3 @@ func (b *priorityBalancer) handlePriorityWithNewStateConnecting(child *childBala // Old state is Connecting, TransientFailure or Shutdown. Don't forward. } } - -// handlePriorityWithNewStateIdle handles state Idle from a higher or equal -// priority. -// -// An update with state Idle: -// - If it's from higher priority: -// - Do nothing -// - It actually shouldn't happen, no balancer switches back to Idle. -// - If it's from priorityInUse: -// - Forward only -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateIdle(child *childBalancer, priority int) { - // priorityInUse is lower than this priority, do nothing. - if b.priorityInUse > priority { - return - } - // Forward the update. - b.cc.UpdateState(child.state) -} From c84a5de06496bf8416cebf9d0058f481e37c165e Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 15 Sep 2021 17:02:08 -0400 Subject: [PATCH 236/998] transport/server: add :method POST to incoming metadata (#4770) * transport/server: add :method POST to incoming metadata --- binarylog/binarylog_end2end_test.go | 16 +++++++++++++ internal/transport/http2_server.go | 1 + test/end2end_test.go | 37 +++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+) diff --git a/binarylog/binarylog_end2end_test.go b/binarylog/binarylog_end2end_test.go index 61eeb68edae8..7da91ad1a8da 100644 --- a/binarylog/binarylog_end2end_test.go +++ b/binarylog/binarylog_end2end_test.go @@ -850,11 +850,27 @@ func equalLogEntry(entries ...*pb.GrpcLogEntry) (equal bool) { tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...) h.Metadata.Entry = tmp sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key }) + // Delete headers that have POST values here since we cannot control + // this. + for i, entry := range h.Metadata.Entry { + if entry.Key == ":method" { + h.Metadata.Entry = append(h.Metadata.Entry[:i], h.Metadata.Entry[i+1:]...) + break + } + } } if h := e.GetServerHeader(); h != nil { tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...) h.Metadata.Entry = tmp sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key }) + // Delete headers that have POST values here since we cannot control + // this. + for i, entry := range h.Metadata.Entry { + if entry.Key == ":method" { + h.Metadata.Entry = append(h.Metadata.Entry[:i], h.Metadata.Entry[i+1:]...) + break + } + } } if h := e.GetTrailer(); h != nil { sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key }) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index cd0ebed98845..0ecfe09ceecc 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -380,6 +380,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.recvCompress = hf.Value case ":method": httpMethod = hf.Value + mdata[":method"] = append(mdata[":method"], hf.Value) case ":path": s.method = hf.Value case "grpc-timeout": diff --git a/test/end2end_test.go b/test/end2end_test.go index bce752701dab..4c7e2f1fc75c 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7847,3 +7847,40 @@ func (s) TestStreamingServerInterceptorGetsConnection(t *testing.T) { t.Fatalf("ss.Client.StreamingInputCall(_) = _, %v, want _, %v", err, io.EOF) } } + +func unaryInterceptorVerifyPost(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Error(codes.NotFound, "metadata was not in context") + } + method := md.Get(":method") + if len(method) != 1 { + return nil, status.Error(codes.InvalidArgument, ":method value had more than one value") + } + if method[0] != "POST" { + return nil, status.Error(codes.InvalidArgument, ":method value was not post") + } + return handler(ctx, req) +} + +// TestUnaryInterceptorGetsPost verifies that the server transport adds a +// :method POST header to metadata, and that that added Header is visibile at +// the grpc layer. +func (s) TestUnaryInterceptorGetsPost(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{grpc.UnaryInterceptor(unaryInterceptorVerifyPost)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v, want _, error code %s", err, codes.OK) + } +} From 4c5f7fb0eecd984708e0c1eeea7d426f275b22d3 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 15 Sep 2021 14:05:59 -0700 Subject: [PATCH 237/998] xds: de-experimentalize xDS apis required for psm security (#4753) --- connectivity/connectivity.go | 35 ++++++++++++- credentials/xds/xds.go | 5 -- xds/internal/server/listener_wrapper.go | 49 ++++--------------- .../test/xds_server_serving_mode_test.go | 33 +++++++------ xds/server.go | 7 +-- xds/server_options.go | 23 +++------ xds/server_test.go | 13 ++--- xds/xds.go | 10 ++-- 8 files changed, 83 insertions(+), 92 deletions(-) diff --git a/connectivity/connectivity.go b/connectivity/connectivity.go index 010156261505..4a89926422bc 100644 --- a/connectivity/connectivity.go +++ b/connectivity/connectivity.go @@ -18,7 +18,6 @@ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. package connectivity import ( @@ -45,7 +44,7 @@ func (s State) String() string { return "SHUTDOWN" default: logger.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" + return "INVALID_STATE" } } @@ -61,3 +60,35 @@ const ( // Shutdown indicates the ClientConn has started shutting down. Shutdown ) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/credentials/xds/xds.go b/credentials/xds/xds.go index 0243009df644..680ea9cfa109 100644 --- a/credentials/xds/xds.go +++ b/credentials/xds/xds.go @@ -18,11 +18,6 @@ // Package xds provides a transport credentials implementation where the // security configuration is pushed by a management server using xDS APIs. -// -// Experimental -// -// Notice: All APIs in this package are EXPERIMENTAL and may be removed in a -// later release. package xds import ( diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 2d30ca1231d0..0d1173324bb6 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -30,6 +30,7 @@ import ( "unsafe" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" internalbackoff "google.golang.org/grpc/internal/backoff" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -51,41 +52,11 @@ var ( backoffFunc = bs.Backoff ) -// ServingMode indicates the current mode of operation of the server. -// -// This API exactly mirrors the one in the public xds package. We have to -// redefine it here to avoid a cyclic dependency. -type ServingMode int - -const ( - // ServingModeStarting indicates that the serving is starting up. - ServingModeStarting ServingMode = iota - // ServingModeServing indicates the the server contains all required xDS - // configuration is serving RPCs. - ServingModeServing - // ServingModeNotServing indicates that the server is not accepting new - // connections. Existing connections will be closed gracefully, allowing - // in-progress RPCs to complete. A server enters this mode when it does not - // contain the required xDS configuration to serve RPCs. - ServingModeNotServing -) - -func (s ServingMode) String() string { - switch s { - case ServingModeNotServing: - return "not-serving" - case ServingModeServing: - return "serving" - default: - return "starting" - } -} - // ServingModeCallback is the callback that users can register to get notified // about the server's serving mode changes. The callback is invoked with the // address of the listener and its new mode. The err parameter is set to a // non-nil error if the server has transitioned into not-serving mode. -type ServingModeCallback func(addr net.Addr, mode ServingMode, err error) +type ServingModeCallback func(addr net.Addr, mode connectivity.ServingMode, err error) // DrainCallback is the callback that an xDS-enabled server registers to get // notified about updates to the Listener configuration. The server is expected @@ -208,7 +179,7 @@ type listenerWrapper struct { // get a Listener resource update). mu sync.RWMutex // Current serving mode. - mode ServingMode + mode connectivity.ServingMode // Filter chains received as part of the last good update. filterChains *xdsclient.FilterChainManager @@ -267,7 +238,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { } l.mu.RLock() - if l.mode == ServingModeNotServing { + if l.mode == connectivity.ServingModeNotServing { // Close connections as soon as we accept them when we are in // "not-serving" mode. Since we accept a net.Listener from the user // in Serve(), we cannot close the listener when we move to @@ -390,7 +361,7 @@ func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) { if update.err != nil { l.logger.Warningf("Received error for rds names specified in resource %q: %+v", l.name, update.err) if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { - l.switchMode(nil, ServingModeNotServing, update.err) + l.switchMode(nil, connectivity.ServingModeNotServing, update.err) } // For errors which are anything other than "resource-not-found", we // continue to use the old configuration. @@ -398,7 +369,7 @@ func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) { } atomic.StorePointer(&l.rdsUpdates, unsafe.Pointer(&update.updates)) - l.switchMode(l.filterChains, ServingModeServing, nil) + l.switchMode(l.filterChains, connectivity.ServingModeServing, nil) l.goodUpdate.Fire() } @@ -406,7 +377,7 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { if update.err != nil { l.logger.Warningf("Received error for resource %q: %+v", l.name, update.err) if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { - l.switchMode(nil, ServingModeNotServing, update.err) + l.switchMode(nil, connectivity.ServingModeNotServing, update.err) } // For errors which are anything other than "resource-not-found", we // continue to use the old configuration. @@ -428,7 +399,7 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { // what we have decided to do. See gRPC A36 for more details. ilc := update.update.InboundListenerCfg if ilc.Address != l.addr || ilc.Port != l.port { - l.switchMode(nil, ServingModeNotServing, fmt.Errorf("address (%s:%s) in Listener update does not match listening address: (%s:%s)", ilc.Address, ilc.Port, l.addr, l.port)) + l.switchMode(nil, connectivity.ServingModeNotServing, fmt.Errorf("address (%s:%s) in Listener update does not match listening address: (%s:%s)", ilc.Address, ilc.Port, l.addr, l.port)) return } @@ -447,12 +418,12 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { // from the management server, this listener has all the configuration // needed, and is ready to serve. if len(ilc.FilterChains.RouteConfigNames) == 0 { - l.switchMode(ilc.FilterChains, ServingModeServing, nil) + l.switchMode(ilc.FilterChains, connectivity.ServingModeServing, nil) l.goodUpdate.Fire() } } -func (l *listenerWrapper) switchMode(fcs *xdsclient.FilterChainManager, newMode ServingMode, err error) { +func (l *listenerWrapper) switchMode(fcs *xdsclient.FilterChainManager, newMode connectivity.ServingMode, err error) { l.mu.Lock() defer l.mu.Unlock() diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 90a6aa103884..7282aa94dd26 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -30,6 +30,7 @@ import ( v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" testpb "google.golang.org/grpc/test/grpc_testing" @@ -64,8 +65,8 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } // Create a couple of channels on which mode updates will be pushed. - updateCh1 := make(chan xds.ServingMode, 1) - updateCh2 := make(chan xds.ServingMode, 1) + updateCh1 := make(chan connectivity.ServingMode, 1) + updateCh2 := make(chan connectivity.ServingMode, 1) // Create a server option to get notified about serving mode changes, and // push the updated mode on the channels created above. @@ -124,16 +125,16 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { case <-ctx.Done(): t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh1: - if mode != xds.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + if mode != connectivity.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) } } select { case <-ctx.Done(): t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh2: - if mode != xds.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + if mode != connectivity.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) } } @@ -169,16 +170,16 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { case <-ctx.Done(): t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh1: - if mode != xds.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + if mode != connectivity.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) } } select { case <-ctx.Done(): t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh2: - if mode != xds.ServingModeNotServing { - t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeNotServing) + if mode != connectivity.ServingModeNotServing { + t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) } } @@ -203,8 +204,8 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { case <-ctx.Done(): t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh1: - if mode != xds.ServingModeNotServing { - t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeNotServing) + if mode != connectivity.ServingModeNotServing { + t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) } } @@ -233,16 +234,16 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { case <-ctx.Done(): t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh1: - if mode != xds.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + if mode != connectivity.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) } } select { case <-ctx.Done(): t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh2: - if mode != xds.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, xds.ServingModeServing) + if mode != connectivity.ServingModeServing { + t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) } } diff --git a/xds/server.go b/xds/server.go index 2014fcf5ec95..33a490957995 100644 --- a/xds/server.go +++ b/xds/server.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" @@ -231,7 +232,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error { ListenerResourceName: name, XDSCredsInUse: s.xdsCredsInUse, XDSClient: s.xdsC, - ModeCallback: func(addr net.Addr, mode server.ServingMode, err error) { + ModeCallback: func(addr net.Addr, mode connectivity.ServingMode, err error) { modeUpdateCh.Put(&modeChangeArgs{ addr: addr, mode: mode, @@ -261,7 +262,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error { // modeChangeArgs wraps argument required for invoking mode change callback. type modeChangeArgs struct { addr net.Addr - mode server.ServingMode + mode connectivity.ServingMode err error } @@ -278,7 +279,7 @@ func (s *GRPCServer) handleServingModeChanges(updateCh *buffer.Unbounded) { case u := <-updateCh.Get(): updateCh.Load() args := u.(*modeChangeArgs) - if args.mode == ServingModeNotServing { + if args.mode == connectivity.ServingModeNotServing { // We type assert our underlying gRPC server to the real // grpc.Server here before trying to initiate the drain // operation. This approach avoids performing the same type diff --git a/xds/server_options.go b/xds/server_options.go index 0918c097a3e5..1d46c3adb7b2 100644 --- a/xds/server_options.go +++ b/xds/server_options.go @@ -22,7 +22,7 @@ import ( "net" "google.golang.org/grpc" - iserver "google.golang.org/grpc/xds/internal/server" + "google.golang.org/grpc/connectivity" ) type serverOptions struct { @@ -41,20 +41,6 @@ func ServingModeCallback(cb ServingModeCallbackFunc) grpc.ServerOption { return &serverOption{apply: func(o *serverOptions) { o.modeCallback = cb }} } -// ServingMode indicates the current mode of operation of the server. -type ServingMode = iserver.ServingMode - -const ( - // ServingModeServing indicates the the server contains all required xDS - // configuration is serving RPCs. - ServingModeServing = iserver.ServingModeServing - // ServingModeNotServing indicates that the server is not accepting new - // connections. Existing connections will be closed gracefully, allowing - // in-progress RPCs to complete. A server enters this mode when it does not - // contain the required xDS configuration to serve RPCs. - ServingModeNotServing = iserver.ServingModeNotServing -) - // ServingModeCallbackFunc is the callback that users can register to get // notified about the server's serving mode changes. The callback is invoked // with the address of the listener and its new mode. @@ -66,7 +52,7 @@ type ServingModeCallbackFunc func(addr net.Addr, args ServingModeChangeArgs) // function. type ServingModeChangeArgs struct { // Mode is the new serving mode of the server listener. - Mode ServingMode + Mode connectivity.ServingMode // Err is set to a non-nil error if the server has transitioned into // not-serving mode. Err error @@ -80,6 +66,11 @@ type ServingModeChangeArgs struct { // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func BootstrapContentsForTesting(contents []byte) grpc.ServerOption { return &serverOption{apply: func(o *serverOptions) { o.bootstrapContents = contents }} } diff --git a/xds/server_test.go b/xds/server_test.go index 680b65620507..0866e0414ae2 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -35,6 +35,7 @@ import ( v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/credentials/xds" @@ -435,8 +436,8 @@ func (s) TestServeSuccess(t *testing.T) { if err != nil { t.Fatalf("error when waiting for serving mode to change: %v", err) } - if mode := v.(ServingMode); mode != ServingModeNotServing { - t.Fatalf("server mode is %q, want %q", mode, ServingModeNotServing) + if mode := v.(connectivity.ServingMode); mode != connectivity.ServingModeNotServing { + t.Fatalf("server mode is %q, want %q", mode, connectivity.ServingModeNotServing) } // Push a good LDS response, and wait for Serve() to be invoked on the @@ -463,8 +464,8 @@ func (s) TestServeSuccess(t *testing.T) { if err != nil { t.Fatalf("error when waiting for serving mode to change: %v", err) } - if mode := v.(ServingMode); mode != ServingModeServing { - t.Fatalf("server mode is %q, want %q", mode, ServingModeServing) + if mode := v.(connectivity.ServingMode); mode != connectivity.ServingModeServing { + t.Fatalf("server mode is %q, want %q", mode, connectivity.ServingModeServing) } // Push an update to the registered listener watch callback with a Listener @@ -489,8 +490,8 @@ func (s) TestServeSuccess(t *testing.T) { if err != nil { t.Fatalf("error when waiting for serving mode to change: %v", err) } - if mode := v.(ServingMode); mode != ServingModeNotServing { - t.Fatalf("server mode is %q, want %q", mode, ServingModeNotServing) + if mode := v.(connectivity.ServingMode); mode != connectivity.ServingModeNotServing { + t.Fatalf("server mode is %q, want %q", mode, connectivity.ServingModeNotServing) } } diff --git a/xds/xds.go b/xds/xds.go index 864d2e6a2e3a..ec16c9f520bc 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -25,11 +25,6 @@ // // See https://github.com/grpc/grpc-go/tree/master/examples/features/xds for // example. -// -// Experimental -// -// Notice: All APIs in this package are experimental and may be removed in a -// later release. package xds import ( @@ -87,6 +82,11 @@ func init() { // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { return xdsresolver.NewBuilder(bootstrapConfig) } From 4f093b9a5afa5f3c8f29774dbdce8c02ce516d70 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 15 Sep 2021 14:47:18 -0700 Subject: [PATCH 238/998] ringhash: the balancer (#4741) --- xds/internal/balancer/ringhash/logging.go | 34 ++ xds/internal/balancer/ringhash/picker.go | 17 +- xds/internal/balancer/ringhash/ringhash.go | 363 +++++++++++++-- .../balancer/ringhash/ringhash_test.go | 420 ++++++++++++++++++ xds/internal/test/xds_client_affinity_test.go | 136 ++++++ 5 files changed, 938 insertions(+), 32 deletions(-) create mode 100644 xds/internal/balancer/ringhash/logging.go create mode 100644 xds/internal/test/xds_client_affinity_test.go diff --git a/xds/internal/balancer/ringhash/logging.go b/xds/internal/balancer/ringhash/logging.go new file mode 100644 index 000000000000..64a1d467f554 --- /dev/null +++ b/xds/internal/balancer/ringhash/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[ring-hash-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *ringhashBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/xds/internal/balancer/ringhash/picker.go b/xds/internal/balancer/ringhash/picker.go index 6d035b0c1911..dcea6d46e517 100644 --- a/xds/internal/balancer/ringhash/picker.go +++ b/xds/internal/balancer/ringhash/picker.go @@ -24,11 +24,17 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/status" ) type picker struct { - ring *ring + ring *ring + logger *grpclog.PrefixLogger +} + +func newPicker(ring *ring, logger *grpclog.PrefixLogger) *picker { + return &picker{ring: ring, logger: logger} } // handleRICSResult is the return type of handleRICS. It's needed to wrap the @@ -47,7 +53,7 @@ type handleRICSResult struct { // The first return value indicates if the state is in Ready, Idle, Connecting // or Shutdown. If it's true, the PickResult and error should be returned from // Pick() as is. -func handleRICS(e *ringEntry) (handleRICSResult, bool) { +func (p *picker) handleRICS(e *ringEntry) (handleRICSResult, bool) { switch state := e.sc.effectiveState(); state { case connectivity.Ready: return handleRICSResult{pr: balancer.PickResult{SubConn: e.sc.sc}}, true @@ -67,15 +73,14 @@ func handleRICS(e *ringEntry) (handleRICSResult, bool) { default: // Should never reach this. All the connectivity states are already // handled in the cases. - // - // FIXME: add an error log. + p.logger.Errorf("SubConn has undefined connectivity state: %v", state) return handleRICSResult{err: status.Errorf(codes.Unavailable, "SubConn has undefined connectivity state: %v", state)}, true } } func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { e := p.ring.pick(getRequestHash(info.Ctx)) - if hr, ok := handleRICS(e); ok { + if hr, ok := p.handleRICS(e); ok { return hr.pr, hr.err } // ok was false, the entry is in transient failure. @@ -95,7 +100,7 @@ func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, erro // For the second SubConn, also check Ready/Idle/Connecting as if it's the // first entry. - if hr, ok := handleRICS(e2); ok { + if hr, ok := p.handleRICS(e2); ok { return hr.pr, hr.err } diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index b87cce64801b..f8a47f165bdf 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -20,15 +20,50 @@ package ringhash import ( + "encoding/json" + "errors" + "fmt" "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) // Name is the name of the ring_hash balancer. const Name = "ring_hash_experimental" +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &ringhashBalancer{ + cc: cc, + subConns: make(map[resolver.Address]*subConn), + scStates: make(map[balancer.SubConn]*subConn), + csEvltr: &connectivityStateEvaluator{}, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + type subConn struct { addr string sc balancer.SubConn @@ -37,26 +72,25 @@ type subConn struct { // This is the actual state of this SubConn (as updated by the ClientConn). // The effective state can be different, see comment of attemptedToConnect. state connectivity.State - // attemptedToConnect is whether this SubConn has attempted to connect ever. - // So that only the initial Idle is Idle, after any attempt to connect, - // following Idles are all TransientFailure. + // failing is whether this SubConn is in a failing state. A subConn is + // considered to be in a failing state if it was previously in + // TransientFailure. // - // This affects the effective connectivity state of this SubConn, e.g. if - // the actual state is Idle, but this SubConn has attempted to connect, the - // effective state is TransientFailure. + // This affects the effective connectivity state of this SubConn, e.g. + // - if the actual state is Idle or Connecting, but this SubConn is failing, + // the effective state is TransientFailure. // - // This is used in pick(). E.g. if a subConn is Idle, but has - // attemptedToConnect as true, pick() will + // This is used in pick(). E.g. if a subConn is Idle, but has failing as + // true, pick() will // - consider this SubConn as TransientFailure, and check the state of the // next SubConn. // - trigger Connect() (note that normally a SubConn in real // TransientFailure cannot Connect()) // - // Note this should only be set when updating the state (from Idle to - // anything else), not when Connect() is called, because there's a small - // window after the first Connect(), before the state switches to something - // else. - attemptedToConnect bool + // A subConn starts in non-failing (failing is false). A transition to + // TransientFailure sets failing to true (and it stays true). A transition + // to Ready sets failing to false. + failing bool // connectQueued is true if a Connect() was queued for this SubConn while // it's not in Idle (most likely was in TransientFailure). A Connect() will // be triggered on this SubConn when it turns Idle. @@ -66,20 +100,13 @@ type subConn struct { connectQueued bool } -// SetState updates the state of this SubConn. +// setState updates the state of this SubConn. // // It also handles the queued Connect(). If the new state is Idle, and a // Connect() was queued, this SubConn will be triggered to Connect(). -// -// FIXME: unexport this. It's exported so that staticcheck doesn't complain -// about unused functions. -func (sc *subConn) SetState(s connectivity.State) { +func (sc *subConn) setState(s connectivity.State) { sc.mu.Lock() defer sc.mu.Unlock() - // Any state change to non-Idle means there was an attempt to connect. - if s != connectivity.Idle { - sc.attemptedToConnect = true - } switch s { case connectivity.Idle: // Trigger Connect() if new state is Idle, and there is a queued connect. @@ -87,21 +114,30 @@ func (sc *subConn) SetState(s connectivity.State) { sc.connectQueued = false sc.sc.Connect() } - case connectivity.Connecting, connectivity.Ready: + case connectivity.Connecting: // Clear connectQueued if the SubConn isn't failing. This state // transition is unlikely to happen, but handle this just in case. sc.connectQueued = false + case connectivity.Ready: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + // Set to a non-failing state. + sc.failing = false + case connectivity.TransientFailure: + // Set to a failing state. + sc.failing = true } sc.state = s } // effectiveState returns the effective state of this SubConn. It can be -// different from the actual state, e.g. Idle after any attempt to connect (any -// Idle other than the initial Idle) is considered TransientFailure. +// different from the actual state, e.g. Idle while the subConn is failing is +// considered TransientFailure. Read comment of field failing for other cases. func (sc *subConn) effectiveState() connectivity.State { sc.mu.RLock() defer sc.mu.RUnlock() - if sc.state == connectivity.Idle && sc.attemptedToConnect { + if sc.failing && (sc.state == connectivity.Idle || sc.state == connectivity.Connecting) { return connectivity.TransientFailure } return sc.state @@ -121,3 +157,278 @@ func (sc *subConn) queueConnect() { // after backoff in TransientFailure), it will Connect(). sc.connectQueued = true } + +type ringhashBalancer struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + config *LBConfig + + subConns map[resolver.Address]*subConn // `attributes` is stripped from the keys of this map (the addresses) + scStates map[balancer.SubConn]*subConn + + // ring is always in sync with subConns. When subConns change, a new ring is + // generated. Note that address weights updates (they are keys in the + // subConns map) also regenerates the ring. + ring *ring + picker balancer.Picker + csEvltr *connectivityStateEvaluator + state connectivity.State + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +// updateAddresses creates new SubConns and removes SubConns, based on the +// address update. +// +// The return value is whether the new address list is different from the +// previous. True if +// - an address was added +// - an address was removed +// - an address's weight was updated +// +// Note that this function doesn't trigger SubConn connecting, so all the new +// SubConn states are Idle. +func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { + var addrsUpdated bool + // addrsSet is the set converted from addrs, it's used for quick lookup of + // an address. + // + // Addresses in this map all have attributes stripped, but metadata set to + // the weight. So that weight change can be detected. + // + // TODO: this won't be necessary if there are ways to compare address + // attributes. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range addrs { + aNoAttrs := a + // Strip attributes but set Metadata to the weight. + aNoAttrs.Attributes = nil + w := weightedroundrobin.GetAddrInfo(a).Weight + if w == 0 { + // If weight is not set, use 1. + w = 1 + } + aNoAttrs.Metadata = w + addrsSet[aNoAttrs] = struct{}{} + if scInfo, ok := b.subConns[aNoAttrs]; !ok { + // When creating SubConn, the original address with attributes is + // passed through. So that connection configurations in attributes + // (like creds) will be used. + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: true}) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + scs := &subConn{addr: a.Addr, sc: sc} + scs.setState(connectivity.Idle) + b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) + b.subConns[aNoAttrs] = scs + b.scStates[sc] = scs + addrsUpdated = true + } else { + // Always update the subconn's address in case the attributes + // changed. The SubConn does a reflect.DeepEqual of the new and old + // addresses. So this is a noop if the current address is the same + // as the old one (including attributes). + b.subConns[aNoAttrs] = scInfo + b.cc.UpdateAddresses(scInfo.sc, []resolver.Address{a}) + } + } + for a, scInfo := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(scInfo.sc) + delete(b.subConns, a) + addrsUpdated = true + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in UpdateSubConnState. + } + } + return addrsUpdated +} + +func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + if b.config == nil { + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + b.config = newConfig + } + + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + if b.updateAddresses(s.ResolverState.Addresses) { + // If addresses were updated, no matter whether it resulted in SubConn + // creation/deletion, or just weight update, we will need to regenerate + // the ring. + var err error + b.ring, err = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize) + if err != nil { + panic(err) + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + return nil +} + +func (b *ringhashBalancer) ResolverError(err error) { + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +// UpdateSubConnState updates the per-SubConn state stored in the ring, and also +// the aggregated state. +// +// It triggers an update to cc when: +// - the new state is TransientFailure, to update the error message +// - it's possible that this is a noop, but sending an extra update is easier +// than comparing errors +// - the aggregated state is changed +// - the same picker will be sent again, but this update may trigger a re-pick +// for some RPCs. +func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + b.logger.Infof("handle SubConn state change: %p, %v", sc, s) + scs, ok := b.scStates[sc] + if !ok { + b.logger.Infof("got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + oldSCState := scs.effectiveState() + scs.setState(s) + newSCState := scs.effectiveState() + + var sendUpdate bool + oldBalancerState := b.state + b.state = b.csEvltr.recordTransition(oldSCState, newSCState) + if oldBalancerState != b.state { + sendUpdate = true + } + + switch s { + case connectivity.Idle: + // When the overall state is TransientFailure, this will never get picks + // if there's a lower priority. Need to keep the SubConns connecting so + // there's a chance it will recover. + if b.state == connectivity.TransientFailure { + scs.queueConnect() + } + // No need to send an update. No queued RPC can be unblocked. If the + // overall state changed because of this, sendUpdate is already true. + case connectivity.Connecting: + // No need to send an update. No queued RPC can be unblocked. If the + // overall state changed because of this, sendUpdate is already true. + case connectivity.Ready: + // Resend the picker, there's no need to regenerate the picker because + // the ring didn't change. + sendUpdate = true + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + // Regenerate picker to update error message. + b.regeneratePicker() + sendUpdate = true + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + if sendUpdate { + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *ringhashBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *ringhashBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = base.NewErrPicker(b.mergeErrors()) + return + } + b.picker = newPicker(b.ring, b.logger) +} + +func (b *ringhashBalancer) Close() {} + +// connectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type connectivityStateEvaluator struct { + nums [5]uint64 +} + +// recordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If there is at least one subchannel in READY state, report READY. +// - If there are 2 or more subchannels in TRANSIENT_FAILURE state, report TRANSIENT_FAILURE. +// - If there is at least one subchannel in CONNECTING state, report CONNECTING. +// - If there is at least one subchannel in Idle state, report Idle. +// - Otherwise, report TRANSIENT_FAILURE. +// +// Note that if there are 1 connecting, 2 transient failure, the overall state +// is transient failure. This is because the second transient failure is a +// fallback of the first failing SubConn, and we want to report transient +// failure to failover to the lower priority. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + cse.nums[state] += updateVal + } + + if cse.nums[connectivity.Ready] > 0 { + return connectivity.Ready + } + if cse.nums[connectivity.TransientFailure] > 1 { + return connectivity.TransientFailure + } + if cse.nums[connectivity.Connecting] > 0 { + return connectivity.Connecting + } + if cse.nums[connectivity.Idle] > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/xds/internal/balancer/ringhash/ringhash_test.go b/xds/internal/balancer/ringhash/ringhash_test.go index bf5da95bf8b3..fb85367e4a41 100644 --- a/xds/internal/balancer/ringhash/ringhash_test.go +++ b/xds/internal/balancer/ringhash/ringhash_test.go @@ -19,10 +19,18 @@ package ringhash import ( + "context" + "fmt" + "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/testutils" ) @@ -34,5 +42,417 @@ var ( ) const ( + defaultTestTimeout = 10 * time.Second defaultTestShortTimeout = 10 * time.Millisecond + + testBackendAddrsCount = 12 ) + +var ( + testBackendAddrStrs []string + testConfig = &LBConfig{MinRingSize: 1, MaxRingSize: 10} +) + +func init() { + for i := 0; i < testBackendAddrsCount; i++ { + testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) + } +} + +func ctxWithHash(h uint64) context.Context { + return SetRequestHash(context.Background(), h) +} + +// setupTest creates the balancer, and does an initial sanity check. +func setupTest(t *testing.T, addrs []resolver.Address) (*testutils.TestClientConn, balancer.Balancer, balancer.Picker) { + t.Helper() + cc := testutils.NewTestClientConn(t) + builder := balancer.Get(Name) + b := builder.Build(cc, balancer.BuildOptions{}) + if b == nil { + t.Fatalf("builder.Build(%s) failed and returned nil", Name) + } + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: addrs}, + BalancerConfig: testConfig, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + + for _, addr := range addrs { + addr1 := <-cc.NewSubConnAddrsCh + if want := []resolver.Address{addr}; !cmp.Equal(addr1, want, cmp.AllowUnexported(attributes.Attributes{})) { + t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr1, want, cmp.AllowUnexported(attributes.Attributes{}))) + } + sc1 := <-cc.NewSubConnCh + // All the SubConns start in Idle, and should not Connect(). + select { + case <-sc1.(*testutils.TestSubConn).ConnectCh: + t.Errorf("unexpected Connect() from SubConn %v", sc1) + case <-time.After(defaultTestShortTimeout): + } + } + + // Should also have a picker, with all SubConns in Idle. + p1 := <-cc.NewPickerCh + return cc, b, p1 +} + +func TestOneSubConn(t *testing.T) { + wantAddr1 := resolver.Address{Addr: testBackendAddrStrs[0]} + cc, b, p0 := setupTest(t, []resolver.Address{wantAddr1}) + ring0 := p0.(*picker).ring + + firstHash := ring0.items[0].hash + // firstHash-1 will pick the first (and only) SubConn from the ring. + testHash := firstHash - 1 + // The first pick should be queued, and should trigger Connect() on the only + // SubConn. + if _, err := p0.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + sc0 := ring0.items[0].sc.sc + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // Send state updates to Ready. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // Test pick with one backend. + p1 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p1.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } +} + +// TestThreeBackendsAffinity covers that there are 3 SubConns, RPCs with the +// same hash always pick the same SubConn. When the one picked is down, another +// one will be picked. +func TestThreeSubConnsAffinity(t *testing.T) { + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + {Addr: testBackendAddrStrs[2]}, + } + cc, b, p0 := setupTest(t, wantAddrs) + // This test doesn't update addresses, so this ring will be used by all the + // pickers. + ring0 := p0.(*picker).ring + + firstHash := ring0.items[0].hash + // firstHash+1 will pick the second SubConn from the ring. + testHash := firstHash + 1 + // The first pick should be queued, and should trigger Connect() on the only + // SubConn. + if _, err := p0.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + // The picked SubConn should be the second in the ring. + sc0 := ring0.items[1].sc.sc + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // Send state updates to Ready. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p1 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p1.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } + + // Turn down the subConn in use. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + p2 := <-cc.NewPickerCh + // Pick with the same hash should be queued, because the SubConn after the + // first picked is Idle. + if _, err := p2.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + + // The third SubConn in the ring should connect. + sc1 := ring0.items[2].sc.sc + select { + case <-sc1.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc1) + } + + // Send state updates to Ready. + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // New picks should all return this SubConn. + p3 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p3.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) + } + } + + // Now, after backoff, the first picked SubConn will turn Idle. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + // The picks above should have queued Connect() for the first picked + // SubConn, so this Idle state change will trigger a Connect(). + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // After the first picked SubConn turn Ready, new picks should return it + // again (even though the second picked SubConn is also Ready). + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p4 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p4.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } +} + +// TestThreeBackendsAffinity covers that there are 3 SubConns, RPCs with the +// same hash always pick the same SubConn. Then try different hash to pick +// another backend, and verify the first hash still picks the first backend. +func TestThreeSubConnsAffinityMultiple(t *testing.T) { + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + {Addr: testBackendAddrStrs[2]}, + } + cc, b, p0 := setupTest(t, wantAddrs) + // This test doesn't update addresses, so this ring will be used by all the + // pickers. + ring0 := p0.(*picker).ring + + firstHash := ring0.items[0].hash + // firstHash+1 will pick the second SubConn from the ring. + testHash := firstHash + 1 + // The first pick should be queued, and should trigger Connect() on the only + // SubConn. + if _, err := p0.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + sc0 := ring0.items[1].sc.sc + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // Send state updates to Ready. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // First hash should always pick sc0. + p1 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p1.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } + + secondHash := ring0.items[1].hash + // secondHash+1 will pick the third SubConn from the ring. + testHash2 := secondHash + 1 + if _, err := p0.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash2)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + sc1 := ring0.items[2].sc.sc + select { + case <-sc1.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc1) + } + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // With the new generated picker, hash2 always picks sc1. + p2 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p2.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash2)}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) + } + } + // But the first hash still picks sc0. + for i := 0; i < 5; i++ { + gotSCSt, _ := p2.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } +} + +func TestAddrWeightChange(t *testing.T) { + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + {Addr: testBackendAddrStrs[2]}, + } + cc, b, p0 := setupTest(t, wantAddrs) + ring0 := p0.(*picker).ring + + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: wantAddrs}, + BalancerConfig: nil, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + select { + case <-cc.NewPickerCh: + t.Fatalf("unexpected picker after UpdateClientConn with the same addresses") + case <-time.After(defaultTestShortTimeout): + } + + // Delete an address, should send a new Picker. + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + }}, + BalancerConfig: nil, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + var p1 balancer.Picker + select { + case p1 = <-cc.NewPickerCh: + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for picker after UpdateClientConn with different addresses") + } + ring1 := p1.(*picker).ring + if ring1 == ring0 { + t.Fatalf("new picker after removing address has the same ring as before, want different") + } + + // Another update with the same addresses, but different weight. + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + weightedroundrobin.SetAddrInfo( + resolver.Address{Addr: testBackendAddrStrs[1]}, + weightedroundrobin.AddrInfo{Weight: 2}), + }}, + BalancerConfig: nil, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + var p2 balancer.Picker + select { + case p2 = <-cc.NewPickerCh: + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for picker after UpdateClientConn with different addresses") + } + if p2.(*picker).ring == ring1 { + t.Fatalf("new picker after changing address weight has the same ring as before, want different") + } +} + +// TestSubConnToConnectWhenOverallTransientFailure covers the situation when the +// overall state is TransientFailure, the SubConns turning Idle will be +// triggered to Connect(). But not when the overall state is not +// TransientFailure. +func TestSubConnToConnectWhenOverallTransientFailure(t *testing.T) { + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + {Addr: testBackendAddrStrs[2]}, + } + _, b, p0 := setupTest(t, wantAddrs) + ring0 := p0.(*picker).ring + + // Turn all SubConns to TransientFailure. + for _, it := range ring0.items { + b.UpdateSubConnState(it.sc.sc, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + } + + // The next one turning Idle should Connect(). + sc0 := ring0.items[0].sc.sc + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // If this SubConn is ready. Other SubConns turning Idle will not Connect(). + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // The third SubConn in the ring should connect. + sc1 := ring0.items[1].sc.sc + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + select { + case <-sc1.(*testutils.TestSubConn).ConnectCh: + t.Errorf("unexpected Connect() from SubConn %v", sc1) + case <-time.After(defaultTestShortTimeout): + } +} + +func TestConnectivityStateEvaluatorRecordTransition(t *testing.T) { + tests := []struct { + name string + from, to []connectivity.State + want connectivity.State + }{ + { + name: "one ready", + from: []connectivity.State{connectivity.Idle}, + to: []connectivity.State{connectivity.Ready}, + want: connectivity.Ready, + }, + { + name: "one connecting", + from: []connectivity.State{connectivity.Idle}, + to: []connectivity.State{connectivity.Connecting}, + want: connectivity.Connecting, + }, + { + name: "one ready one transient failure", + from: []connectivity.State{connectivity.Idle, connectivity.Idle}, + to: []connectivity.State{connectivity.Ready, connectivity.TransientFailure}, + want: connectivity.Ready, + }, + { + name: "one connecting one transient failure", + from: []connectivity.State{connectivity.Idle, connectivity.Idle}, + to: []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, + want: connectivity.Connecting, + }, + { + name: "one connecting two transient failure", + from: []connectivity.State{connectivity.Idle, connectivity.Idle, connectivity.Idle}, + to: []connectivity.State{connectivity.Connecting, connectivity.TransientFailure, connectivity.TransientFailure}, + want: connectivity.TransientFailure, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cse := &connectivityStateEvaluator{} + var got connectivity.State + for i, fff := range tt.from { + ttt := tt.to[i] + got = cse.recordTransition(fff, ttt) + } + if got != tt.want { + t.Errorf("recordTransition() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/xds/internal/test/xds_client_affinity_test.go b/xds/internal/test/xds_client_affinity_test.go new file mode 100644 index 000000000000..e9ddfe157b12 --- /dev/null +++ b/xds/internal/test/xds_client_affinity_test.go @@ -0,0 +1,136 @@ +//go:build !386 +// +build !386 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/xds/env" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/xds/internal/testutils/e2e" +) + +const hashHeaderName = "session_id" + +// hashRouteConfig returns a RouteConfig resource with hash policy set to +// header "session_id". +func hashRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{{ + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: hashHeaderName, + }, + }, + Terminal: true, + }}, + }}, + }}, + }}, + } +} + +// ringhashCluster returns a Cluster resource that picks ringhash as the lb +// policy. +func ringhashCluster(clusterName, edsServiceName string) *v3clusterpb.Cluster { + return &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: edsServiceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + } +} + +// TestClientSideAffinitySanityCheck tests that the affinity config can be +// propagated to pick the ring_hash policy. It doesn't test the affinity +// behavior in ring_hash policy. +func (s) TestClientSideAffinitySanityCheck(t *testing.T) { + defer func() func() { + old := env.RingHashSupport + env.RingHashSupport = true + return func() { env.RingHashSupport = old } + }()() + + managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + port, cleanup2 := clientSetup(t, &testService{}) + defer cleanup2() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + // Replace RDS and CDS resources with ringhash config, but keep the resource + // names. + resources.Routes = []*v3routepb.RouteConfiguration{hashRouteConfig( + resources.Routes[0].Name, + resources.Listeners[0].Name, + resources.Clusters[0].Name, + )} + resources.Clusters = []*v3clusterpb.Cluster{ringhashCluster( + resources.Clusters[0].Name, + resources.Clusters[0].EdsClusterConfig.ServiceName, + )} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} From 7cf9689be2d2b1e7f00dfc15d2516b7635c65c45 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 15 Sep 2021 15:38:01 -0700 Subject: [PATCH 239/998] xds: validations for security config, as specified in A29 (#4762) * xds: validations for security config, as specified in A29 * make vet happy * fix error log * fix error msg in test --- xds/internal/xdsclient/cds_test.go | 240 ++++++++++++++++++++ xds/internal/xdsclient/filter_chain.go | 11 + xds/internal/xdsclient/filter_chain_test.go | 63 +++++ xds/internal/xdsclient/xds.go | 82 +++++-- 4 files changed, 378 insertions(+), 18 deletions(-) diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index dc29cffac3b5..177665bd615d 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -20,6 +20,7 @@ package xdsclient import ( "regexp" + "strings" "testing" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" @@ -480,6 +481,176 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { } } +func (s) TestSecurityConfigFromCommonTLSContextUsingNewFields_ErrorCases(t *testing.T) { + tests := []struct { + name string + common *v3tlspb.CommonTlsContext + server bool + wantErr string + }{ + { + name: "unsupported-tls_certificates-field-for-identity-certs", + common: &v3tlspb.CommonTlsContext{ + TlsCertificates: []*v3tlspb.TlsCertificate{ + {CertificateChain: &v3corepb.DataSource{}}, + }, + }, + wantErr: "unsupported field tls_certificates is set in CommonTlsContext message", + }, + { + name: "unsupported-tls_certificates_sds_secret_configs-field-for-identity-certs", + common: &v3tlspb.CommonTlsContext{ + TlsCertificateSdsSecretConfigs: []*v3tlspb.SdsSecretConfig{ + {Name: "sds-secrets-config"}, + }, + }, + wantErr: "unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message", + }, + { + name: "unsupported-sds-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", + }, + }, + }, + wantErr: "validation context contains unexpected type", + }, + { + name: "missing-ca_certificate_provider_instance-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{}, + }, + }, + wantErr: "expected field ca_certificate_provider_instance is missing in CommonTlsContext message", + }, + { + name: "unsupported-field-verify_certificate_spki-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + VerifyCertificateSpki: []string{"spki"}, + }, + }, + }, + wantErr: "unsupported verify_certificate_spki field in CommonTlsContext message", + }, + { + name: "unsupported-field-verify_certificate_hash-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + VerifyCertificateHash: []string{"hash"}, + }, + }, + }, + wantErr: "unsupported verify_certificate_hash field in CommonTlsContext message", + }, + { + name: "unsupported-field-require_signed_certificate_timestamp-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + RequireSignedCertificateTimestamp: &wrapperspb.BoolValue{Value: true}, + }, + }, + }, + wantErr: "unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message", + }, + { + name: "unsupported-field-crl-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + Crl: &v3corepb.DataSource{}, + }, + }, + }, + wantErr: "unsupported crl field in CommonTlsContext message", + }, + { + name: "unsupported-field-custom_validator_config-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + CustomValidatorConfig: &v3corepb.TypedExtensionConfig{}, + }, + }, + }, + wantErr: "unsupported custom_validator_config field in CommonTlsContext message", + }, + { + name: "invalid-match_subject_alt_names-field-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: ""}}, + }, + }, + }, + }, + wantErr: "empty prefix is not allowed in StringMatcher", + }, + { + name: "unsupported-field-matching-subject-alt-names-in-validation-context-of-server", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "sanPrefix"}}, + }, + }, + }, + }, + server: true, + wantErr: "match_subject_alt_names field in validation context is not supported on the server", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := securityConfigFromCommonTLSContextUsingNewFields(test.common, test.server) + if err == nil { + t.Fatal("securityConfigFromCommonTLSContextUsingNewFields() succeeded when expected to fail") + } + if !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("securityConfigFromCommonTLSContextUsingNewFields() returned err: %v, wantErr: %v", err, test.wantErr) + } + }) + } +} + func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { const ( identityPluginInstance = "identityPluginInstance" @@ -503,6 +674,25 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { wantUpdate ClusterUpdate wantErr bool }{ + { + name: "transport-socket-matches", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocketMatches: []*v3clusterpb.Cluster_TransportSocketMatch{ + {Name: "transport-socket-match-1"}, + }, + }, + wantErr: true, + }, { name: "transport-socket-unsupported-name", cluster: &v3clusterpb.Cluster{ @@ -574,6 +764,56 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, wantErr: true, }, + { + name: "transport-socket-unsupported-tls-params-field", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsParams: &v3tlspb.TlsParameters{}, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "transport-socket-unsupported-custom-handshaker-field", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + CustomHandshaker: &v3corepb.TypedExtensionConfig{}, + }, + }), + }, + }, + }, + wantErr: true, + }, { name: "transport-socket-unsupported-validation-context", cluster: &v3clusterpb.Cluster{ diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index 5b2fd79973ad..87aea9e46e1d 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -519,6 +519,17 @@ func (fci *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain if err := proto.Unmarshal(any.GetValue(), downstreamCtx); err != nil { return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) } + if downstreamCtx.GetRequireSni().GetValue() { + return nil, fmt.Errorf("require_sni field set to true in DownstreamTlsContext message: %v", downstreamCtx) + } + if downstreamCtx.GetOcspStaplePolicy() != v3tlspb.DownstreamTlsContext_LENIENT_STAPLING { + return nil, fmt.Errorf("ocsp_staple_policy field set to unsupported value in DownstreamTlsContext message: %v", downstreamCtx) + } + // The following fields from `DownstreamTlsContext` are ignore: + // - disable_stateless_session_resumption + // - session_ticket_keys + // - session_ticket_keys_sds_secret_config + // - session_timeout if downstreamCtx.GetCommonTlsContext() == nil { return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") } diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go index fd6b3c224276..5219e34cb105 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -372,6 +372,44 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { }, wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", }, + { + desc: "require_sni-set-to-true-in-downstreamTlsContext", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireSni: &wrapperspb.BoolValue{Value: true}, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "require_sni field set to true in DownstreamTlsContext message", + }, + { + desc: "unsupported-ocsp_staple_policy-in-downstreamTlsContext", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + OcspStaplePolicy: v3tlspb.DownstreamTlsContext_STRICT_STAPLING, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "ocsp_staple_policy field set to unsupported value in DownstreamTlsContext message", + }, { desc: "unsupported validation context in transport socket", lis: &v3listenerpb.Listener{ @@ -397,6 +435,31 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { }, wantErr: "validation context contains unexpected type", }, + { + desc: "unsupported match_subject_alt_names field in transport socket", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", + }, + }, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "validation context contains unexpected type", + }, { desc: "no root certificate provider with require_client_cert", lis: &v3listenerpb.Listener{ diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index c2d627c4f25f..686c52a350ff 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -794,6 +794,9 @@ func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { // securityConfigFromCluster extracts the relevant security configuration from // the received Cluster resource. func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { + if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { + return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) + } // The Cluster resource contains a `transport_socket` field, which contains // a oneof `typed_config` field of type `protobuf.Any`. The any proto // contains a marshaled representation of an `UpstreamTlsContext` message. @@ -812,6 +815,10 @@ func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, e if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) } + // The following fields from `UpstreamTlsContext` are ignored: + // - sni + // - allow_renegotiation + // - max_session_keys if upstreamCtx.GetCommonTlsContext() == nil { return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") } @@ -820,16 +827,22 @@ func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, e } // common is expected to be not nil. +// The `alpn_protocols` field is ignored. func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { - sc, err := securityConfigFromCommonTLSContextUsingNewFields(common) - if err != nil { - return nil, err + if common.GetTlsParams() != nil { + return nil, fmt.Errorf("unsupported tls_params field in CommonTlsContext message: %+v", common) + } + if common.GetCustomHandshaker() != nil { + return nil, fmt.Errorf("unsupported custom_handshaker field in CommonTlsContext message: %+v", common) } + + // For now, if we can't get a valid security config from the new fields, we + // fallback to the old deprecated fields. + // TODO: Drop support for deprecated fields. NACK if err != nil here. + sc, _ := securityConfigFromCommonTLSContextUsingNewFields(common, server) if sc == nil || sc.Equal(&SecurityConfig{}) { - // If we can't get a valid security config from the new fields, we - // fallback to the old deprecated fields. - // TODO(easwars): Remove this once TD starts populating the new fields. - sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common) + var err error + sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common, server) if err != nil { return nil, err } @@ -850,7 +863,7 @@ func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server return sc, nil } -func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext) (*SecurityConfig, error) { +func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { // The `CommonTlsContext` contains a // `tls_certificate_certificate_provider_instance` field of type // `CertificateProviderInstance`, which contains the provider instance name @@ -883,6 +896,9 @@ func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.Comm matchers = append(matchers, matcher) } } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } sc.SubjectAltNameMatchers = matchers if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { sc.RootInstanceName = pi.GetInstanceName() @@ -909,15 +925,20 @@ func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.Comm // // This helper function attempts to fetch security configuration from the // `CertificateProviderPluginInstance` message, given a CommonTlsContext. -func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext) (*SecurityConfig, error) { +func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { // The `tls_certificate_provider_instance` field of type // `CertificateProviderPluginInstance` is used to fetch the identity // certificate provider. sc := &SecurityConfig{} - if identity := common.GetTlsCertificateProviderInstance(); identity != nil { - sc.IdentityInstanceName = identity.GetInstanceName() - sc.IdentityCertName = identity.GetCertificateName() + identity := common.GetTlsCertificateProviderInstance() + if identity == nil && len(common.GetTlsCertificates()) != 0 { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificates is set in CommonTlsContext message: %+v", common) + } + if identity == nil && common.GetTlsCertificateSdsSecretConfigs() != nil { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message: %+v", common) } + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() // The `CommonTlsContext` contains a oneof field `validation_context_type`, // which contains the `CertificateValidationContext` message in one of the @@ -943,7 +964,7 @@ func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsC // - certificate_name // - this is an opaque name passed to the certificate provider var validationCtx *v3tlspb.CertificateValidationContext - switch common.GetValidationContextType().(type) { + switch typ := common.GetValidationContextType().(type) { case *v3tlspb.CommonTlsContext_ValidationContext: validationCtx = common.GetValidationContext() case *v3tlspb.CommonTlsContext_CombinedValidationContext: @@ -951,11 +972,33 @@ func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsC case nil: // It is valid for the validation context to be nil on the server side. return sc, nil - } - if validationCtx == nil || validationCtx.GetCaCertificateProviderInstance() == nil { - // Bail out if the `CertificateProviderPluginInstance` message is not - // found through one of the way detailed above. - return nil, nil + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", typ) + } + // If we get here, it means that the `CertificateValidationContext` message + // was found through one of the supported ways. It is an error if the + // validation context is specified, but it does not contain the + // ca_certificate_provider_instance field which contains information about + // the certificate provider to be used for the root certificates. + if validationCtx.GetCaCertificateProviderInstance() == nil { + return nil, fmt.Errorf("expected field ca_certificate_provider_instance is missing in CommonTlsContext message: %+v", common) + } + // The following fields are ignored: + // - trusted_ca + // - watched_directory + // - allow_expired_certificate + // - trust_chain_verification + switch { + case len(validationCtx.GetVerifyCertificateSpki()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_spki field in CommonTlsContext message: %+v", common) + case len(validationCtx.GetVerifyCertificateHash()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) + case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): + return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) + case validationCtx.GetCrl() != nil: + return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) + case validationCtx.GetCustomValidatorConfig() != nil: + return nil, fmt.Errorf("unsupported custom_validator_config field in CommonTlsContext message: %+v", common) } if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { @@ -970,6 +1013,9 @@ func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsC } matchers = append(matchers, matcher) } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } sc.SubjectAltNameMatchers = matchers return sc, nil } From b186ee8975f3c69bc36333a99fc82d1388977012 Mon Sep 17 00:00:00 2001 From: Ed Warnicke Date: Thu, 16 Sep 2021 09:59:36 -0500 Subject: [PATCH 240/998] test/bufconn: add Listener.DialContext(context.Context) (#4763) --- test/bufconn/bufconn.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/bufconn/bufconn.go b/test/bufconn/bufconn.go index 168cdb8578dd..3f77f4876eb8 100644 --- a/test/bufconn/bufconn.go +++ b/test/bufconn/bufconn.go @@ -21,6 +21,7 @@ package bufconn import ( + "context" "fmt" "io" "net" @@ -86,8 +87,17 @@ func (l *Listener) Addr() net.Addr { return addr{} } // providing it the server half of the connection, and returns the client half // of the connection. func (l *Listener) Dial() (net.Conn, error) { + return l.DialContext(context.Background()) +} + +// DialContext creates an in-memory full-duplex network connection, unblocks Accept by +// providing it the server half of the connection, and returns the client half +// of the connection. If ctx is Done, returns ctx.Err() +func (l *Listener) DialContext(ctx context.Context) (net.Conn, error) { p1, p2 := newPipe(l.sz), newPipe(l.sz) select { + case <-ctx.Done(): + return nil, ctx.Err() case <-l.done: return nil, errClosed case l.ch <- &conn{p1, p2}: From 03b2ebe5080c2b521c742cf6e06bd0824b75fc52 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 16 Sep 2021 11:07:04 -0700 Subject: [PATCH 241/998] xds: enable ringhash and retry by default (#4776) --- .github/workflows/testing.yml | 4 --- internal/xds/env/env.go | 22 +++--------- test/retry_test.go | 64 ----------------------------------- 3 files changed, 5 insertions(+), 85 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 9a45ba040853..5e1e09cdc697 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -47,10 +47,6 @@ jobs: goversion: 1.17 testflags: -race - - type: tests - goversion: 1.17 - grpcenv: GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY=true - - type: extras goversion: 1.17 diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index 7f879b44da00..5bd1e4e857ff 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -62,16 +62,16 @@ var ( // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) // RingHashSupport indicates whether ring hash support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "true". - RingHashSupport = strings.EqualFold(os.Getenv(ringHashSupportEnv), "true") + // be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + RingHashSupport = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") // ClientSideSecuritySupport is used to control processing of security // configuration on the client-side. // // Note that there is no env var protection for the server-side because we // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. - ClientSideSecuritySupport = strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "true") + ClientSideSecuritySupport = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") // AggregateAndDNSSupportEnv indicates whether processing of aggregated // cluster and DNS cluster is enabled, which can be enabled by setting the // environment variable @@ -80,7 +80,7 @@ var ( AggregateAndDNSSupportEnv = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") // RetrySupport indicates whether xDS retry is enabled. - RetrySupport = strings.EqualFold(os.Getenv(retrySupportEnv), "true") + RetrySupport = !strings.EqualFold(os.Getenv(retrySupportEnv), "false") // C2PResolverSupport indicates whether support for C2P resolver is enabled. // This can be enabled by setting the environment variable @@ -89,15 +89,3 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) - -func init() { - // Set the env var used to control processing of security configuration on - // the client-side to true by default. - // TODO(easwars): Remove this env var completely in 1.42.x release. - // - // If the env var is set explicitly, honor it. - ClientSideSecuritySupport = true - if val, ok := os.LookupEnv(clientSideSecuritySupportEnv); ok { - ClientSideSecuritySupport = strings.EqualFold(val, "true") - } -} diff --git a/test/retry_test.go b/test/retry_test.go index 156f92fa5454..dcd3a2158db8 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -23,7 +23,6 @@ import ( "fmt" "io" "net" - "os" "reflect" "strconv" "strings" @@ -116,69 +115,6 @@ func (s) TestRetryUnary(t *testing.T) { } } -func (s) TestRetryDisabledByDefault(t *testing.T) { - if strings.EqualFold(os.Getenv("GRPC_GO_RETRY"), "on") || - strings.EqualFold(os.Getenv("GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY"), "true") { - return - } - i := -1 - ss := &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { - i++ - switch i { - case 0: - return nil, status.New(codes.AlreadyExists, "retryable error").Err() - } - return &testpb.Empty{}, nil - }, - } - if err := ss.Start([]grpc.ServerOption{}); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - ss.NewServiceConfig(`{ - "methodConfig": [{ - "name": [{"service": "grpc.testing.TestService"}], - "waitForReady": true, - "retryPolicy": { - "MaxAttempts": 4, - "InitialBackoff": ".01s", - "MaxBackoff": ".01s", - "BackoffMultiplier": 1.0, - "RetryableStatusCodes": [ "ALREADY_EXISTS" ] - } - }]}`) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - for { - if ctx.Err() != nil { - t.Fatalf("Timed out waiting for service config update") - } - if ss.CC.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { - break - } - time.Sleep(time.Millisecond) - } - cancel() - - testCases := []struct { - code codes.Code - count int - }{ - {codes.AlreadyExists, 0}, - } - for _, tc := range testCases { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) - cancel() - if status.Code(err) != tc.code { - t.Fatalf("EmptyCall(_, _) = _, %v; want _, ", err, tc.code) - } - if i != tc.count { - t.Fatalf("i = %v; want %v", i, tc.count) - } - } -} - func (s) TestRetryThrottling(t *testing.T) { defer enableRetry()() i := -1 From 567da6b86340a83d509467638c91e68168bc1921 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 16 Sep 2021 13:38:35 -0700 Subject: [PATCH 242/998] tlogger: print log type (#4774) Error logs cause tests to fail. This makes it easier (possible) to find the error log --- internal/grpctest/tlogger.go | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/internal/grpctest/tlogger.go b/internal/grpctest/tlogger.go index f1712245ec4a..bbb2a2ff4fb0 100644 --- a/internal/grpctest/tlogger.go +++ b/internal/grpctest/tlogger.go @@ -41,8 +41,23 @@ const callingFrame = 4 type logType int +func (l logType) String() string { + switch l { + case infoLog: + return "INFO" + case warningLog: + return "WARNING" + case errorLog: + return "ERROR" + case fatalLog: + return "FATAL" + } + return "UNKNOWN" +} + const ( - logLog logType = iota + infoLog logType = iota + warningLog errorLog fatalLog ) @@ -83,7 +98,7 @@ func (g *tLogger) log(ltype logType, depth int, format string, args ...interface g.t.Error(err) return } - args = append([]interface{}{prefix}, args...) + args = append([]interface{}{ltype.String() + " " + prefix}, args...) args = append(args, fmt.Sprintf(" (t=+%s)", time.Since(g.start))) if format == "" { @@ -180,35 +195,35 @@ func (g *tLogger) expected(s string) bool { } func (g *tLogger) Info(args ...interface{}) { - g.log(logLog, 0, "", args...) + g.log(infoLog, 0, "", args...) } func (g *tLogger) Infoln(args ...interface{}) { - g.log(logLog, 0, "", args...) + g.log(infoLog, 0, "", args...) } func (g *tLogger) Infof(format string, args ...interface{}) { - g.log(logLog, 0, format, args...) + g.log(infoLog, 0, format, args...) } func (g *tLogger) InfoDepth(depth int, args ...interface{}) { - g.log(logLog, depth, "", args...) + g.log(infoLog, depth, "", args...) } func (g *tLogger) Warning(args ...interface{}) { - g.log(logLog, 0, "", args...) + g.log(warningLog, 0, "", args...) } func (g *tLogger) Warningln(args ...interface{}) { - g.log(logLog, 0, "", args...) + g.log(warningLog, 0, "", args...) } func (g *tLogger) Warningf(format string, args ...interface{}) { - g.log(logLog, 0, format, args...) + g.log(warningLog, 0, format, args...) } func (g *tLogger) WarningDepth(depth int, args ...interface{}) { - g.log(logLog, depth, "", args...) + g.log(warningLog, depth, "", args...) } func (g *tLogger) Error(args ...interface{}) { From e469f0d5f5bcc1324dc3940c584e0969e2ea1f90 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 17 Sep 2021 01:01:07 -0400 Subject: [PATCH 243/998] xds: Add env var protection for RBAC HTTP Filter (#4765) * xds: Add env var protection for RBAC HTTP Filter --- internal/xds/env/env.go | 4 + xds/internal/httpfilter/httpfilter.go | 5 + xds/internal/httpfilter/rbac/rbac.go | 19 ++ xds/internal/server/listener_wrapper.go | 10 +- xds/internal/server/listener_wrapper_test.go | 6 + .../test/xds_server_integration_test.go | 226 +++++++++++++++++- xds/internal/xdsclient/filter_chain.go | 4 + xds/internal/xdsclient/filter_chain_test.go | 36 +++ xds/internal/xdsclient/lds_test.go | 6 + xds/server.go | 13 +- 10 files changed, 322 insertions(+), 7 deletions(-) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index 5bd1e4e857ff..b171ac91f177 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -43,6 +43,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RBAC" c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" @@ -82,6 +83,9 @@ var ( // RetrySupport indicates whether xDS retry is enabled. RetrySupport = !strings.EqualFold(os.Getenv(retrySupportEnv), "false") + // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled. + RBACSupport = strings.EqualFold(os.Getenv(rbacSupportEnv), "true") + // C2PResolverSupport indicates whether support for C2P resolver is enabled. // This can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". diff --git a/xds/internal/httpfilter/httpfilter.go b/xds/internal/httpfilter/httpfilter.go index 3e10e4f34867..b4399f9faeb3 100644 --- a/xds/internal/httpfilter/httpfilter.go +++ b/xds/internal/httpfilter/httpfilter.go @@ -94,6 +94,11 @@ func Register(b Filter) { } } +// UnregisterForTesting unregisters the HTTP Filter for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} + // Get returns the HTTPFilter registered with typeURL. // // If no filter is register with typeURL, nil will be returned. diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go index 969111b619a4..98bcd73ffa41 100644 --- a/xds/internal/httpfilter/rbac/rbac.go +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -28,6 +28,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/internal/xds/rbac" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/protobuf/types/known/anypb" @@ -37,9 +38,27 @@ import ( ) func init() { + if env.RBACSupport { + httpfilter.Register(builder{}) + } +} + +// RegisterForTesting registers the RBAC HTTP Filter for testing purposes, regardless +// of the RBAC environment variable. This is needed because there is no way to set the RBAC +// environment variable to true in a test before init() in this package is run. +func RegisterForTesting() { httpfilter.Register(builder{}) } +// UnregisterForTesting unregisters the RBAC HTTP Filter for testing purposes. This is needed because +// there is no way to unregister the HTTP Filter after registering it solely for testing purposes using +// rbac.RegisterForTesting() +func UnregisterForTesting() { + for _, typeURL := range builder.TypeURLs(builder{}) { + httpfilter.UnregisterForTesting(typeURL) + } +} + type builder struct { } diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 0d1173324bb6..99c9a7532307 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -35,6 +35,7 @@ import ( internalbackoff "google.golang.org/grpc/internal/backoff" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) @@ -272,6 +273,9 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { conn.Close() continue } + if !env.RBACSupport { + return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil + } var rc xdsclient.RouteConfigUpdate if fc.InlineRouteConfig != nil { rc = *fc.InlineRouteConfig @@ -410,8 +414,10 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { // Server's state to ServingModeNotServing. That prevents new connections // from being accepted, whereas here we simply want the clients to reconnect // to get the updated configuration. - if l.drainCallback != nil { - l.drainCallback(l.Listener.Addr()) + if env.RBACSupport { + if l.drainCallback != nil { + l.drainCallback(l.Listener.Addr()) + } } l.rdsHandler.updateRouteNamesToWatch(ilc.FilterChains.RouteConfigNames) // If there are no dynamic RDS Configurations still needed to be received diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 010b2044d405..383729363665 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -34,6 +34,7 @@ import ( wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/xds/env" _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/testutils/fakeclient" @@ -325,6 +326,11 @@ func (s) TestNewListenerWrapper(t *testing.T) { // the update from the rds handler should it move the server to // ServingModeServing. func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() _, readyCh, xdsC, _, cleanup := newListenerWrapper(t) defer cleanup() diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 89122e0647c3..8baa3fbe2a32 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -34,9 +34,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/status" "google.golang.org/grpc/xds" - _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" + "google.golang.org/grpc/xds/internal/httpfilter/rbac" "google.golang.org/grpc/xds/internal/testutils/e2e" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -374,6 +375,11 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { // (NonForwardingAction), and the RPC's matching those routes should proceed as // normal. func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) defer cleanup1() @@ -711,6 +717,13 @@ func serverListenerWithRBACHTTPFilters(host string, port uint32, rbacCfg *rpb.RB // as normal and certain RPC's are denied by the RBAC HTTP Filter which gets // called by hooked xds interceptors. func (s) TestRBACHTTPFilter(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() + rbac.RegisterForTesting() + defer rbac.UnregisterForTesting() tests := []struct { name string rbacCfg *rpb.RBAC @@ -823,7 +836,218 @@ func (s) TestRBACHTTPFilter(t *testing.T) { if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != test.wantStatusUnaryCall { t.Fatalf("UnaryCall() returned err with status: %v, wantStatusUnaryCall: %v", err, test.wantStatusUnaryCall) } + + // Toggle the RBAC Env variable off, this should disable RBAC and allow any RPC"s through (will not go through + // routing or processed by HTTP Filters and thus will never get denied by RBAC). + env.RBACSupport = false + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { + t.Fatalf("EmptyCall() returned err with status: %v, once RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.OK { + t.Fatalf("UnaryCall() returned err with status: %v, once RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } + // Toggle RBAC back on for next iterations. + env.RBACSupport = true }() }) } } + +// serverListenerWithBadRouteConfiguration returns an xds Listener resource with +// a Route Configuration that will never successfully match in order to test +// RBAC Environment variable being toggled on and off. +func serverListenerWithBadRouteConfiguration(host string, port uint32) *v3listenerpb.Listener { + return &v3listenerpb.Listener{ + Name: fmt.Sprintf(e2e.ServerListenerResourceNameTemplate, net.JoinHostPort(host, strconv.Itoa(int(port)))), + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: port, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "v4-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + // Incoming RPC's will try and match to Virtual Hosts based on their :authority header. + // Thus, incoming RPC's will never match to a Virtual Host (server side requires matching + // to a VH/Route of type Non Forwarding Action to proceed normally), and all incoming RPC's + // with this route configuration will be denied. + Domains: []string{"will-never-match"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + }), + }, + }, + }, + }, + { + Name: "v6-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + // Incoming RPC's will try and match to Virtual Hosts based on their :authority header. + // Thus, incoming RPC's will never match to a Virtual Host (server side requires matching + // to a VH/Route of type Non Forwarding Action to proceed normally), and all incoming RPC's + // with this route configuration will be denied. + Domains: []string{"will-never-match"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + }), + }, + }, + }, + }, + }, + } +} + +// TestRBACToggledOffThenToggledOnWithBadRouteConfiguration tests a scenario +// where the server gets a listener configuration with a route table that is +// garbage, with incoming RPC's never matching to a VH/Route of type Non +// Forwarding Action, thus never proceeding as normal. In the default scenario +// (RBAC Env Var turned off, thus all logic related to Route Configuration +// protected), the RPC's should simply proceed as normal due to ignoring the +// route configuration. Once toggling the route configuration on, the RPC's +// should all fail after updating the Server. +func (s) TestRBACToggledOffThenToggledOnWithBadRouteConfiguration(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + + // The inbound listener needs a route table that will never match on a VH, + // and thus shouldn't allow incoming RPC's to proceed. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + // This bad route configuration shouldn't affect incoming RPC's from + // proceeding as normal, as the configuration shouldn't be parsed due to the + // RBAC Environment variable not being set to true. + inboundLis := serverListenerWithBadRouteConfiguration(host, port) + resources.Listeners = append(resources.Listeners, inboundLis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Setup the management server with client and server-side resources. + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithInsecure(), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + + // The default setting of RBAC being disabled should allow any RPC's to + // proceed as normal. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { + t.Fatalf("EmptyCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.OK { + t.Fatalf("UnaryCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } + + // After toggling RBAC support on, all the RPC's should get denied with + // status code Unavailable due to not matching to a route of type Non + // Forwarding Action (Route Table not configured properly). + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() + // Update the server with the same configuration, this is blocking on server + // side so no raciness here. + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { + t.Fatalf("EmptyCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.Unavailable { + t.Fatalf("UnaryCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } +} diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index 87aea9e46e1d..3b010ebdb92b 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -29,6 +29,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" ) @@ -598,6 +599,9 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) // TODO: Implement terminal filter logic, as per A36. filterChain.HTTPFilters = filters seenHCM = true + if !env.RBACSupport { + continue + } switch hcm.RouteSpecifier.(type) { case *v3httppb.HttpConnectionManager_Rds: if hcm.GetRds().GetConfigSource().GetAds() == nil { diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go index 5219e34cb105..2cc73b0a5119 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -40,6 +40,7 @@ import ( iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" @@ -519,6 +520,11 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { // TestNewFilterChainImpl_Success_RouteUpdate tests the construction of the // filter chain with valid HTTP Filters present. func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() tests := []struct { name string lis *v3listenerpb.Listener @@ -754,6 +760,11 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { // TestNewFilterChainImpl_Failure_BadRouteUpdate verifies cases where the Route // Update in the filter chain are invalid. func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() tests := []struct { name string lis *v3listenerpb.Listener @@ -927,6 +938,11 @@ func TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { // TestNewFilterChainImpl_Success_HTTPFilters tests the construction of the // filter chain with valid HTTP Filters present. func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() tests := []struct { name string lis *v3listenerpb.Listener @@ -1245,6 +1261,11 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { // TestNewFilterChainImpl_Success_SecurityConfig verifies cases where the // security configuration in the filter chain contains valid data. func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() tests := []struct { desc string lis *v3listenerpb.Listener @@ -1472,6 +1493,11 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { // success at config validation time and the filter chains which contains // unsupported match fields will be skipped at lookup time. func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() unspecifiedEntry := &destPrefixEntry{ srcTypeArr: [3]*sourcePrefixes{ { @@ -1637,6 +1663,11 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { // TestNewFilterChainImpl_Success_AllCombinations verifies different // combinations of the supported match criteria. func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() tests := []struct { desc string lis *v3listenerpb.Listener @@ -2283,6 +2314,11 @@ func TestLookup_Failures(t *testing.T) { } func TestLookup_Successes(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() lisWithDefaultChain := &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index d43cbaa80870..9045c0b90872 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" @@ -601,6 +602,11 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { } func (s) TestUnmarshalListener_ServerSide(t *testing.T) { + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() const ( v3LDSTarget = "grpc/server?xds.resource.listening_address=0.0.0.0:9999" testVersion = "test-version-lds-server" diff --git a/xds/server.go b/xds/server.go index 33a490957995..b36fa64b5008 100644 --- a/xds/server.go +++ b/xds/server.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/server" @@ -381,8 +382,10 @@ func routeAndProcess(ctx context.Context) error { // xdsUnaryInterceptor is the unary interceptor added to the gRPC server to // perform any xDS specific functionality on unary RPCs. func xdsUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - if err := routeAndProcess(ctx); err != nil { - return nil, err + if env.RBACSupport { + if err := routeAndProcess(ctx); err != nil { + return nil, err + } } return handler(ctx, req) } @@ -390,8 +393,10 @@ func xdsUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServ // xdsStreamInterceptor is the stream interceptor added to the gRPC server to // perform any xDS specific functionality on streaming RPCs. func xdsStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if err := routeAndProcess(ss.Context()); err != nil { - return err + if env.RBACSupport { + if err := routeAndProcess(ss.Context()); err != nil { + return err + } } return handler(srv, ss) } From 1109452fd118ec20164e859f71c0bb59fd209d21 Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Fri, 17 Sep 2021 15:19:26 -0700 Subject: [PATCH 244/998] [Backport grpc#27373] add testing_version flag (#4783) --- test/kokoro/xds_url_map.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test/kokoro/xds_url_map.sh b/test/kokoro/xds_url_map.sh index 865119a57def..cce3fff18b65 100755 --- a/test/kokoro/xds_url_map.sh +++ b/test/kokoro/xds_url_map.sh @@ -89,6 +89,7 @@ run_test() { --flagfile="${TEST_DRIVER_FLAGFILE}" \ --kube_context="${KUBE_CONTEXT}" \ --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ + --testing_version=$(echo "$KOKORO_JOB_NAME" | sed -E 's|^grpc/go/([^/]+)/.*|\1|') \ --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ --flagfile="config/url-map.cfg" set +x From 5417cf809116a5e3e8ca06b15cb48cbffb946204 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 20 Sep 2021 13:27:27 -0700 Subject: [PATCH 245/998] xds/test: delete use of removed types (#4784) They were deprecated, and removed later. --- internal/xds/matcher/string_matcher_test.go | 7 ------- xds/internal/xdsclient/rds_test.go | 2 +- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/internal/xds/matcher/string_matcher_test.go b/internal/xds/matcher/string_matcher_test.go index b634aa041963..389963b94e9e 100644 --- a/internal/xds/matcher/string_matcher_test.go +++ b/internal/xds/matcher/string_matcher_test.go @@ -67,13 +67,6 @@ func TestStringMatcherFromProto(t *testing.T) { }, wantErr: true, }, - { - desc: "invalid deprecated regex", - inputProto: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_HiddenEnvoyDeprecatedRegex{}, - }, - wantErr: true, - }, { desc: "happy case exact", inputProto: &v3matcherpb.StringMatcher{ diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index 6d1f8588f2df..f5f906375c7c 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -1146,7 +1146,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { Headers: []*v3routepb.HeaderMatcher{ { Name: "th", - HeaderMatchSpecifier: &v3routepb.HeaderMatcher_HiddenEnvoyDeprecatedRegexMatch{}, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_StringMatch{}, }, }, }, From d53469981f2356f7c270d4b3beaafc6d1a653817 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 21 Sep 2021 10:39:59 -0700 Subject: [PATCH 246/998] transport: fix transparent retries when per-RPC credentials are in use (#4785) --- internal/transport/http2_client.go | 42 ++++++++++++++---------------- stream.go | 2 +- 2 files changed, 20 insertions(+), 24 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 0cd6da1e73f7..75586307435a 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -616,12 +616,22 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// NewStreamError wraps an error and reports additional information. +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. In this case there is no reason to retry at all, as it is +// assumed the RPC would continue to fail on subsequent attempts. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error - DoNotRetry bool - PerformedIO bool + DoNotRetry bool + DoNotTransparentRetry bool } func (e NewStreamError) Error() string { @@ -631,24 +641,10 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { - defer func() { - if err != nil { - nse, ok := err.(*NewStreamError) - if !ok { - nse = &NewStreamError{Err: err} - } - if len(t.perRPCCreds) > 0 || callHdr.Creds != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - nse.PerformedIO = true - } - err = nse - } - }() ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, err + return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -748,7 +744,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true }, hdr) if err != nil { - return nil, err + return nil, &NewStreamError{Err: err} } if success { break @@ -759,12 +755,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea firstTry = false select { case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, errStreamDrain + return nil, &NewStreamError{Err: errStreamDrain} case <-t.ctx.Done(): - return nil, ErrConnClosing + return nil, &NewStreamError{Err: ErrConnClosing} } } if t.statsHandler != nil { diff --git a/stream.go b/stream.go index 78b7e9e80153..625d47b34e59 100644 --- a/stream.go +++ b/stream.go @@ -547,7 +547,7 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { // In the event of a non-IO operation error from NewStream, we never // attempted to write anything to the wire, so we can retry // indefinitely. - if !nse.PerformedIO { + if !nse.DoNotTransparentRetry { return true, nil } } From 4ddf8ceaa7b5de2170b082bfc7162c4887ddaeb5 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 21 Sep 2021 10:55:00 -0700 Subject: [PATCH 247/998] Revert "transport/server: add :method POST to incoming metadata (#4770)" (#4790) This reverts commit c84a5de06496bf8416cebf9d0058f481e37c165e. --- binarylog/binarylog_end2end_test.go | 16 ------------- internal/transport/http2_server.go | 1 - test/end2end_test.go | 37 ----------------------------- 3 files changed, 54 deletions(-) diff --git a/binarylog/binarylog_end2end_test.go b/binarylog/binarylog_end2end_test.go index 7da91ad1a8da..61eeb68edae8 100644 --- a/binarylog/binarylog_end2end_test.go +++ b/binarylog/binarylog_end2end_test.go @@ -850,27 +850,11 @@ func equalLogEntry(entries ...*pb.GrpcLogEntry) (equal bool) { tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...) h.Metadata.Entry = tmp sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key }) - // Delete headers that have POST values here since we cannot control - // this. - for i, entry := range h.Metadata.Entry { - if entry.Key == ":method" { - h.Metadata.Entry = append(h.Metadata.Entry[:i], h.Metadata.Entry[i+1:]...) - break - } - } } if h := e.GetServerHeader(); h != nil { tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...) h.Metadata.Entry = tmp sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key }) - // Delete headers that have POST values here since we cannot control - // this. - for i, entry := range h.Metadata.Entry { - if entry.Key == ":method" { - h.Metadata.Entry = append(h.Metadata.Entry[:i], h.Metadata.Entry[i+1:]...) - break - } - } } if h := e.GetTrailer(); h != nil { sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key }) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 0ecfe09ceecc..cd0ebed98845 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -380,7 +380,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.recvCompress = hf.Value case ":method": httpMethod = hf.Value - mdata[":method"] = append(mdata[":method"], hf.Value) case ":path": s.method = hf.Value case "grpc-timeout": diff --git a/test/end2end_test.go b/test/end2end_test.go index 4c7e2f1fc75c..bce752701dab 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7847,40 +7847,3 @@ func (s) TestStreamingServerInterceptorGetsConnection(t *testing.T) { t.Fatalf("ss.Client.StreamingInputCall(_) = _, %v, want _, %v", err, io.EOF) } } - -func unaryInterceptorVerifyPost(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, status.Error(codes.NotFound, "metadata was not in context") - } - method := md.Get(":method") - if len(method) != 1 { - return nil, status.Error(codes.InvalidArgument, ":method value had more than one value") - } - if method[0] != "POST" { - return nil, status.Error(codes.InvalidArgument, ":method value was not post") - } - return handler(ctx, req) -} - -// TestUnaryInterceptorGetsPost verifies that the server transport adds a -// :method POST header to metadata, and that that added Header is visibile at -// the grpc layer. -func (s) TestUnaryInterceptorGetsPost(t *testing.T) { - ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil - }, - } - if err := ss.Start([]grpc.ServerOption{grpc.UnaryInterceptor(unaryInterceptorVerifyPost)}); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { - t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v, want _, error code %s", err, codes.OK) - } -} From 616977cc7974d6cbec50399297db7026d791c9dd Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 21 Sep 2021 11:32:51 -0700 Subject: [PATCH 248/998] Change version to 1.42.0-dev (#4793) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 9717a560b9ba..6ba1fd5bdb76 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.41.0-dev" +const Version = "1.42.0-dev" From 606403ded29c7b922a66b4c5a449a1643269bc96 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 21 Sep 2021 19:33:18 -0400 Subject: [PATCH 249/998] transport: fix log spam from Server Authentication Handshake errors (#4798) * transport: fix log spam from Server Authentication Handshake errors --- internal/transport/http2_server.go | 8 +++++--- server.go | 7 ++++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index cd0ebed98845..19c13e041d3b 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -139,9 +139,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, var err error conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err == credentials.ErrConnDispatched { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { return nil, err } return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) diff --git a/server.go b/server.go index d6155c0e8543..557f29559de1 100644 --- a/server.go +++ b/server.go @@ -887,7 +887,12 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { c.Close() } - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != credentials.ErrConnDispatched { + if err != io.EOF { + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + } return nil } From 1f12bf44284e6ba4be72cd028a2a1eb01c2d18bb Mon Sep 17 00:00:00 2001 From: Yury Frolov <57130330+EinKrebs@users.noreply.github.com> Date: Wed, 22 Sep 2021 23:04:45 +0500 Subject: [PATCH 250/998] transport: fix a typo in http2_server.go (#4745) --- internal/transport/http2_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 19c13e041d3b..3d1d5c1d4cdb 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -129,7 +129,7 @@ type http2Server struct { // options from config. // // It returns a non-nil transport and a nil error on success. On failure, it -// returns a non-nil transport and a nil-error. For a special case where the +// returns a nil transport and a non-nil error. For a special case where the // underlying conn gets closed before the client preface could be read, it // returns a nil transport and a nil error. func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { From 458ea7640a92039aad37edc67b63e6d040a93320 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 22 Sep 2021 15:08:44 -0400 Subject: [PATCH 251/998] xds: Added validations for HCM to support xDS RBAC Filter (#4786) * xds: Added validations for HCM to support xDS RBAC Filter --- internal/xds/rbac/matchers.go | 2 +- xds/internal/xdsclient/filter_chain.go | 12 +++ xds/internal/xdsclient/lds_test.go | 144 +++++++++++++++++++++++++ xds/internal/xdsclient/xds.go | 9 ++ 4 files changed, 166 insertions(+), 1 deletion(-) diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go index 25d1cc0e8b98..4cea3ebe9f78 100644 --- a/internal/xds/rbac/matchers.go +++ b/internal/xds/rbac/matchers.go @@ -183,7 +183,7 @@ func matchersFromPrincipals(principals []*v3rbacpb.Principal) ([]matcher, error) // The source ip principal identifier is deprecated. Thus, a // principal typed as a source ip in the identifier will be a no-op. // The config should use DirectRemoteIp instead. - case *v3rbacpb.Principal_RemoteIp: + case *v3rbacpb.Principal_RemoteIp: // Allow equating RBAC's direct_remote_ip...do we need this? // Not supported in gRPC RBAC currently - a principal typed as // Remote Ip in the initial config will be a no-op. case *v3rbacpb.Principal_Metadata: diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index 3b010ebdb92b..f2b29f52a445 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -596,6 +596,18 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}: %v", filters, hcm.GetHttpFilters(), err) } if !seenHCM { + // Validate for RBAC in only the HCM that will be used, since this isn't a logical validation failure, + // it's simply a validation to support RBAC HTTP Filter. + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if hcm.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", hcm) + } + if len(hcm.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", hcm) + } + // TODO: Implement terminal filter logic, as per A36. filterChain.HTTPFilters = filters seenHCM = true diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index 9045c0b90872..e24eda1e789f 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -173,6 +173,30 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, }) } + v3LisToTestRBAC = func(xffNumTrustedHops uint32, originalIpDetectionExtensions []*v3corepb.TypedExtensionConfig) *anypb.Any { + return testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny( + &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: v3RouteConfigName, + }, + }, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + XffNumTrustedHops: xffNumTrustedHops, + OriginalIpDetectionExtensions: originalIpDetectionExtensions, + }), + }, + }) + } errMD = UpdateMetadata{ Status: ServiceStatusNACKed, Version: testVersion, @@ -529,6 +553,49 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Version: testVersion, }, }, + // "To allow equating RBAC's direct_remote_ip and + // remote_ip...HttpConnectionManager.xff_num_trusted_hops must be unset + // or zero and HttpConnectionManager.original_ip_detection_extensions + // must be empty." - A41 + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-valid", + resources: []*anypb.Any{v3LisToTestRBAC(0, nil)}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{routerFilter}, + Raw: v3LisToTestRBAC(0, nil), + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, + // In order to support xDS Configured RBAC HTTPFilter equating direct + // remote ip and remote ip, xffNumTrustedHops cannot be greater than + // zero. This is because if you can trust a ingress proxy hop when + // determining an origin clients ip address, direct remote ip != remote + // ip. + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-num-untrusted-hops", + resources: []*anypb.Any{v3LisToTestRBAC(1, nil)}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, + wantMD: errMD, + wantErr: true, + }, + // In order to support xDS Configured RBAC HTTPFilter equating direct + // remote ip and remote ip, originalIpDetectionExtensions must be empty. + // This is because if you have to ask ip-detection-extension for the + // original ip, direct remote ip might not equal remote ip. + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-original-ip-detection-extension", + resources: []*anypb.Any{v3LisToTestRBAC(0, []*v3corepb.TypedExtensionConfig{{Name: "something"}})}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, + wantMD: errMD, + wantErr: true, + }, { name: "v3 listener with inline route configuration", resources: []*anypb.Any{v3LisWithInlineRoute}, @@ -868,6 +935,32 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, } ) + v3LisToTestRBAC := func(xffNumTrustedHops uint32, originalIpDetectionExtensions []*v3corepb.TypedExtensionConfig) *anypb.Any { + return testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + XffNumTrustedHops: xffNumTrustedHops, + OriginalIpDetectionExtensions: originalIpDetectionExtensions, + }), + }, + }, + }, + }, + }, + }) + } tests := []struct { name string @@ -1272,6 +1365,57 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { wantMD: errMD, wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", }, + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-valid", + resources: []*anypb.Any{v3LisToTestRBAC(0, nil)}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Raw: listenerEmptyTransportSocket, + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-num-untrusted-hops", + resources: []*anypb.Any{v3LisToTestRBAC(1, nil)}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, + wantMD: errMD, + wantErr: "xff_num_trusted_hops must be unset or zero", + }, + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-original-ip-detection-extension", + resources: []*anypb.Any{v3LisToTestRBAC(0, []*v3corepb.TypedExtensionConfig{{Name: "something"}})}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, + wantMD: errMD, + wantErr: "original_ip_detection_extensions must be empty", + }, { name: "unsupported validation context in transport socket", resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 686c52a350ff..6b172ac8aa2a 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -104,6 +104,15 @@ func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.Prefi if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil { return nil, fmt.Errorf("failed to unmarshal api_listner: %v", err) } + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if apiLis.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", apiLis) + } + if len(apiLis.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", apiLis) + } switch apiLis.RouteSpecifier.(type) { case *v3httppb.HttpConnectionManager_Rds: From e6246c22eb0440d525ce1c226b0c9f1ea9ea693a Mon Sep 17 00:00:00 2001 From: Evan Jones Date: Wed, 22 Sep 2021 16:30:27 -0400 Subject: [PATCH 252/998] server: optimize chain interceptors (-1 allocation, -10% time/call) (#4746) --- server.go | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/server.go b/server.go index 557f29559de1..1e82c955529a 100644 --- a/server.go +++ b/server.go @@ -1106,16 +1106,21 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - var i int - var next UnaryHandler - next = func(ctx context.Context, req interface{}) (interface{}, error) { - if i == len(interceptors)-1 { - return interceptors[i](ctx, req, info, handler) + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next UnaryHandler + } + state.next = func(ctx context.Context, req interface{}) (interface{}, error) { + if state.i == len(interceptors)-1 { + return interceptors[state.i](ctx, req, info, handler) } - i++ - return interceptors[i-1](ctx, req, info, next) + state.i++ + return interceptors[state.i-1](ctx, req, info, state.next) } - return next(ctx, req) + return state.next(ctx, req) } } @@ -1391,16 +1396,21 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - var i int - var next StreamHandler - next = func(srv interface{}, ss ServerStream) error { - if i == len(interceptors)-1 { - return interceptors[i](srv, ss, info, handler) + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next StreamHandler + } + state.next = func(srv interface{}, ss ServerStream) error { + if state.i == len(interceptors)-1 { + return interceptors[state.i](srv, ss, info, handler) } - i++ - return interceptors[i-1](srv, ss, info, next) + state.i++ + return interceptors[state.i-1](srv, ss, info, state.next) } - return next(srv, ss) + return state.next(srv, ss) } } From d623accd30f0f13047e6e2b7147aee41691054c3 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 22 Sep 2021 16:01:18 -0700 Subject: [PATCH 253/998] xds: fix parent balancers to handle Idle children (#4801) --- .../clustermanager/balancerstateaggregator.go | 9 ++- .../clustermanager/clustermanager_test.go | 65 +++++++++++++++++++ .../weightedaggregator/aggregator.go | 8 ++- .../weightedtarget/weightedtarget_test.go | 61 +++++++++++++++++ 4 files changed, 141 insertions(+), 2 deletions(-) diff --git a/xds/internal/balancer/clustermanager/balancerstateaggregator.go b/xds/internal/balancer/clustermanager/balancerstateaggregator.go index 35eb86c3590c..6e0e03299f95 100644 --- a/xds/internal/balancer/clustermanager/balancerstateaggregator.go +++ b/xds/internal/balancer/clustermanager/balancerstateaggregator.go @@ -183,13 +183,18 @@ func (bsa *balancerStateAggregator) build() balancer.State { // handling the special connecting after ready, as in UpdateState(). Then a // function to calculate the aggregated connectivity state as in this // function. - var readyN, connectingN int + // + // TODO: use balancer.ConnectivityStateEvaluator to calculate the aggregated + // state. + var readyN, connectingN, idleN int for _, ps := range bsa.idToPickerState { switch ps.stateToAggregate { case connectivity.Ready: readyN++ case connectivity.Connecting: connectingN++ + case connectivity.Idle: + idleN++ } } var aggregatedState connectivity.State @@ -198,6 +203,8 @@ func (bsa *balancerStateAggregator) build() balancer.State { aggregatedState = connectivity.Ready case connectingN > 0: aggregatedState = connectivity.Connecting + case idleN > 0: + aggregatedState = connectivity.Idle default: aggregatedState = connectivity.TransientFailure } diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index a40d954ad64f..d3475ea3f5d8 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -565,3 +565,68 @@ func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) { t.Fatal(err2) } } + +const initIdleBalancerName = "test-init-Idle-balancer" + +var errTestInitIdle = fmt.Errorf("init Idle balancer error 0") + +func init() { + stub.Register(initIdleBalancerName, stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { + bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{}) + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + err := fmt.Errorf("wrong picker error") + if state.ConnectivityState == connectivity.Idle { + err = errTestInitIdle + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &testutils.TestConstPicker{Err: err}, + }) + }, + }) +} + +// TestInitialIdle covers the case that if the child reports Idle, the overall +// state will be Idle. +func TestInitialIdle(t *testing.T) { + cc := testutils.NewTestClientConn(t) + rtb := rtBuilder.Build(cc, balancer.BuildOptions{}) + + configJSON1 := `{ +"children": { + "cds:cluster_1":{ "childPolicy": [{"test-init-Idle-balancer":""}] } +} +}` + + config1, err := rtParser.ParseConfig([]byte(configJSON1)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0], Attributes: nil}, + } + if err := rtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), + }}, + BalancerConfig: config1, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that a subconn is created with the address, and the hierarchy path + // in the address is cleared. + for range wantAddrs { + sc := <-cc.NewSubConnCh + rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + } + + if state1 := <-cc.NewStateCh; state1 != connectivity.Idle { + t.Fatalf("Received aggregated state: %v, want Idle", state1) + } +} diff --git a/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go b/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go index 6c36e2a69cd9..7e1d106e9ff9 100644 --- a/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -200,7 +200,9 @@ func (wbsa *Aggregator) BuildAndUpdate() { func (wbsa *Aggregator) build() balancer.State { wbsa.logger.Infof("Child pickers with config: %+v", wbsa.idToPickerState) m := wbsa.idToPickerState - var readyN, connectingN int + // TODO: use balancer.ConnectivityStateEvaluator to calculate the aggregated + // state. + var readyN, connectingN, idleN int readyPickerWithWeights := make([]weightedPickerState, 0, len(m)) for _, ps := range m { switch ps.stateToAggregate { @@ -209,6 +211,8 @@ func (wbsa *Aggregator) build() balancer.State { readyPickerWithWeights = append(readyPickerWithWeights, *ps) case connectivity.Connecting: connectingN++ + case connectivity.Idle: + idleN++ } } var aggregatedState connectivity.State @@ -217,6 +221,8 @@ func (wbsa *Aggregator) build() balancer.State { aggregatedState = connectivity.Ready case connectingN > 0: aggregatedState = connectivity.Connecting + case idleN > 0: + aggregatedState = connectivity.Idle default: aggregatedState = connectivity.TransientFailure } diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_test.go index eeebab733d61..b0e4df895885 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_test.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget_test.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/hierarchy" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -263,3 +264,63 @@ func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { return scst.SubConn } } + +const initIdleBalancerName = "test-init-Idle-balancer" + +var errTestInitIdle = fmt.Errorf("init Idle balancer error 0") + +func init() { + stub.Register(initIdleBalancerName, stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { + bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{}) + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + err := fmt.Errorf("wrong picker error") + if state.ConnectivityState == connectivity.Idle { + err = errTestInitIdle + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &testutils.TestConstPicker{Err: err}, + }) + }, + }) +} + +// TestInitialIdle covers the case that if the child reports Idle, the overall +// state will be Idle. +func TestInitialIdle(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + + // Start with "cluster_1: round_robin". + config1, err := wtbParser.ParseConfig([]byte(`{"targets":{"cluster_1":{"weight":1,"childPolicy":[{"test-init-Idle-balancer":""}]}}}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0], Attributes: nil}, + } + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), + }}, + BalancerConfig: config1, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that a subconn is created with the address, and the hierarchy path + // in the address is cleared. + for range wantAddrs { + sc := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + } + + if state1 := <-cc.NewStateCh; state1 != connectivity.Idle { + t.Fatalf("Received aggregated state: %v, want Idle", state1) + } +} From 32cd3d617642c49c435ab2a435e716efd4a5e949 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Wed, 22 Sep 2021 16:08:17 -0700 Subject: [PATCH 254/998] interop: don't use WithBlock dial option in the client (#4805) --- interop/client/client.go | 1 - 1 file changed, 1 deletion(-) diff --git a/interop/client/client.go b/interop/client/client.go index 456b74e101db..f41f56fbbd55 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -202,7 +202,6 @@ func main() { if len(*serviceConfigJSON) > 0 { opts = append(opts, grpc.WithDisableServiceConfig(), grpc.WithDefaultServiceConfig(*serviceConfigJSON)) } - opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(serverAddr, opts...) if err != nil { logger.Fatalf("Fail to dial: %v", err) From d7208f02ca7721bef504d100b61c1ef8cd569390 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 22 Sep 2021 16:35:39 -0700 Subject: [PATCH 255/998] github: set a shorter timeout on testing jobs (#4806) --- .github/workflows/codeql-analysis.yml | 1 + .github/workflows/testing.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 5251f01db088..05f393ef1a75 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -19,6 +19,7 @@ jobs: analyze: name: Analyze runs-on: ubuntu-latest + timeout-minutes: 20 strategy: fail-fast: false diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 5e1e09cdc697..37544060ea28 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -18,6 +18,7 @@ jobs: # Check generated protos match their source repos (optional for PRs). vet-proto: runs-on: ubuntu-latest + timeout-minutes: 20 steps: # Setup the environment. - name: Setup Go @@ -37,6 +38,7 @@ jobs: env: VET_SKIP_PROTO: 1 runs-on: ubuntu-latest + timeout-minutes: 20 strategy: matrix: include: From 83a3461520f69c1896990dfae724101c1ed6a1d2 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 22 Sep 2021 17:43:36 -0700 Subject: [PATCH 256/998] xds: have separate tests for RBAC on and off (#4807) --- .../test/xds_server_integration_test.go | 82 +++++++++++++------ 1 file changed, 57 insertions(+), 25 deletions(-) diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 8baa3fbe2a32..e95186ae0fb1 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -970,15 +970,14 @@ func serverListenerWithBadRouteConfiguration(host string, port uint32) *v3listen } } -// TestRBACToggledOffThenToggledOnWithBadRouteConfiguration tests a scenario -// where the server gets a listener configuration with a route table that is -// garbage, with incoming RPC's never matching to a VH/Route of type Non -// Forwarding Action, thus never proceeding as normal. In the default scenario -// (RBAC Env Var turned off, thus all logic related to Route Configuration -// protected), the RPC's should simply proceed as normal due to ignoring the -// route configuration. Once toggling the route configuration on, the RPC's -// should all fail after updating the Server. -func (s) TestRBACToggledOffThenToggledOnWithBadRouteConfiguration(t *testing.T) { +func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) { + // Turn RBAC support on. + oldRBAC := env.RBACSupport + env.RBACSupport = true + defer func() { + env.RBACSupport = oldRBAC + }() + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) defer cleanup1() @@ -1000,9 +999,9 @@ func (s) TestRBACToggledOffThenToggledOnWithBadRouteConfiguration(t *testing.T) Port: port, SecLevel: e2e.SecurityLevelNone, }) - // This bad route configuration shouldn't affect incoming RPC's from - // proceeding as normal, as the configuration shouldn't be parsed due to the - // RBAC Environment variable not being set to true. + // Since RBAC support is turned ON, all the RPC's should get denied with + // status code Unavailable due to not matching to a route of type Non + // Forwarding Action (Route Table not configured properly). inboundLis := serverListenerWithBadRouteConfiguration(host, port) resources.Listeners = append(resources.Listeners, inboundLis) @@ -1020,34 +1019,67 @@ func (s) TestRBACToggledOffThenToggledOnWithBadRouteConfiguration(t *testing.T) defer cc.Close() client := testpb.NewTestServiceClient(cc) - - // The default setting of RBAC being disabled should allow any RPC's to - // proceed as normal. - if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { t.Fatalf("EmptyCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) } - if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.OK { + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.Unavailable { t.Fatalf("UnaryCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) } +} - // After toggling RBAC support on, all the RPC's should get denied with - // status code Unavailable due to not matching to a route of type Non - // Forwarding Action (Route Table not configured properly). +func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) { + // Turn RBAC support off. oldRBAC := env.RBACSupport - env.RBACSupport = true + env.RBACSupport = false defer func() { env.RBACSupport = oldRBAC }() - // Update the server with the same configuration, this is blocking on server - // side so no raciness here. + + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + + // The inbound listener needs a route table that will never match on a VH, + // and thus shouldn't allow incoming RPC's to proceed. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + // This bad route configuration shouldn't affect incoming RPC's from + // proceeding as normal, as the configuration shouldn't be parsed due to the + // RBAC Environment variable not being set to true. + inboundLis := serverListenerWithBadRouteConfiguration(host, port) + resources.Listeners = append(resources.Listeners, inboundLis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Setup the management server with client and server-side resources. if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } - if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithInsecure(), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { t.Fatalf("EmptyCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) } - if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.Unavailable { + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.OK { t.Fatalf("UnaryCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) } } From 78d3aa8b3ed1b59bf84db4242ac7c316e8943797 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 23 Sep 2021 07:43:14 -0700 Subject: [PATCH 257/998] grpc: cleanup parse target and authority tests (#4787) --- clientconn_authority_test.go | 122 +++++++++++++++++++++ clientconn_parsed_target_test.go | 183 +++++++++++++++++++++++++++++++ clientconn_test.go | 56 ---------- internal/grpcutil/target_test.go | 114 ------------------- 4 files changed, 305 insertions(+), 170 deletions(-) create mode 100644 clientconn_authority_test.go create mode 100644 clientconn_parsed_target_test.go delete mode 100644 internal/grpcutil/target_test.go diff --git a/clientconn_authority_test.go b/clientconn_authority_test.go new file mode 100644 index 000000000000..5cd705e2d4f0 --- /dev/null +++ b/clientconn_authority_test.go @@ -0,0 +1,122 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "testing" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/testdata" +) + +func (s) TestClientConnAuthority(t *testing.T) { + serverNameOverride := "over.write.server.name" + creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), serverNameOverride) + if err != nil { + t.Fatalf("credentials.NewClientTLSFromFile(_, %q) failed: %v", err, serverNameOverride) + } + + tests := []struct { + name string + target string + opts []DialOption + wantAuthority string + }{ + { + name: "default", + target: "Non-Existent.Server:8080", + opts: []DialOption{WithInsecure()}, + wantAuthority: "Non-Existent.Server:8080", + }, + { + name: "override-via-creds", + target: "Non-Existent.Server:8080", + opts: []DialOption{WithTransportCredentials(creds)}, + wantAuthority: serverNameOverride, + }, + { + name: "override-via-WithAuthority", + target: "Non-Existent.Server:8080", + opts: []DialOption{WithInsecure(), WithAuthority("authority-override")}, + wantAuthority: "authority-override", + }, + { + name: "override-via-creds-and-WithAuthority", + target: "Non-Existent.Server:8080", + // WithAuthority override works only for insecure creds. + opts: []DialOption{WithTransportCredentials(creds), WithAuthority("authority-override")}, + wantAuthority: serverNameOverride, + }, + { + name: "unix relative", + target: "unix:sock.sock", + opts: []DialOption{WithInsecure()}, + wantAuthority: "localhost", + }, + { + name: "unix relative with custom dialer", + target: "unix:sock.sock", + opts: []DialOption{WithInsecure(), WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "", addr) + })}, + wantAuthority: "localhost", + }, + { + name: "unix absolute", + target: "unix:/sock.sock", + opts: []DialOption{WithInsecure()}, + wantAuthority: "localhost", + }, + { + name: "unix absolute with custom dialer", + target: "unix:///sock.sock", + opts: []DialOption{WithInsecure(), WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "", addr) + })}, + wantAuthority: "localhost", + }, + { + name: "localhost colon port", + target: "localhost:50051", + opts: []DialOption{WithInsecure()}, + wantAuthority: "localhost:50051", + }, + { + name: "colon port", + target: ":50051", + opts: []DialOption{WithInsecure()}, + wantAuthority: "localhost:50051", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cc, err := Dial(test.target, test.opts...) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", test.target, err) + } + defer cc.Close() + if cc.authority != test.wantAuthority { + t.Fatalf("cc.authority = %q, want %q", cc.authority, test.wantAuthority) + } + }) + } +} diff --git a/clientconn_parsed_target_test.go b/clientconn_parsed_target_test.go new file mode 100644 index 000000000000..fda06f9fa147 --- /dev/null +++ b/clientconn_parsed_target_test.go @@ -0,0 +1,183 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "net" + "testing" + "time" + + "google.golang.org/grpc/resolver" +) + +func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { + defScheme := resolver.GetDefaultScheme() + tests := []struct { + target string + wantParsed resolver.Target + }{ + // No scheme is specified. + {target: "", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ""}}, + {target: "://", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://"}}, + {target: ":///", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///"}}, + {target: "://a/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/"}}, + {target: ":///a", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///a"}}, + {target: "://a/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/b"}}, + {target: "/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/"}}, + {target: "a/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a/b"}}, + {target: "a//b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a//b"}}, + {target: "google.com", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com"}}, + {target: "google.com/?a=b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com/?a=b"}}, + {target: "/unix/socket/address", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address"}}, + + // An unregistered scheme is specified. + {target: "a:///", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///"}}, + {target: "a://b/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/"}}, + {target: "a:///b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///b"}}, + {target: "a://b/c", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/c"}}, + {target: "a:b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:b"}}, + {target: "a:/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:/b"}}, + {target: "a://b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b"}}, + + // A registered scheme is specified. + {target: "dns:///google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "google.com"}}, + {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}}, + {target: "dns://a.server.com/google.com/?a=b", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/?a=b"}}, + {target: "unix:///a/b/c", wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "/a/b/c"}}, + {target: "unix-abstract:a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c"}}, + {target: "unix-abstract:a b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a b"}}, + {target: "unix-abstract:a:b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a:b"}}, + {target: "unix-abstract:a-b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a-b"}}, + {target: "unix-abstract:/ a///://::!@#$%^&*()b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/ a///://::!@#$%^&*()b"}}, + {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "passthrough:abc"}}, + {target: "unix-abstract:unix:///abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "unix:///abc"}}, + {target: "unix-abstract:///a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/a/b/c"}}, + {target: "unix-abstract:///", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/"}}, + {target: "unix-abstract://authority", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "//authority"}}, + {target: "unix://domain", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unix://domain"}}, + {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{Scheme: "passthrough", Authority: "", Endpoint: "unix:///a/b/c"}}, + } + + for _, test := range tests { + t.Run(test.target, func(t *testing.T) { + cc, err := Dial(test.target, WithInsecure()) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", test.target, err) + } + defer cc.Close() + + if gotParsed := cc.parsedTarget; gotParsed != test.wantParsed { + t.Errorf("cc.parsedTarget = %+v, want %+v", gotParsed, test.wantParsed) + } + }) + } +} + +func (s) TestParsedTarget_Failure_WithoutCustomDialer(t *testing.T) { + targets := []string{ + "unix://a/b/c", + "unix-abstract://authority/a/b/c", + } + + for _, target := range targets { + t.Run(target, func(t *testing.T) { + if cc, err := Dial(target, WithInsecure()); err == nil { + defer cc.Close() + t.Fatalf("Dial(%q) succeeded cc.parsedTarget = %+v, expected to fail", target, cc.parsedTarget) + } + }) + } +} + +func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { + defScheme := resolver.GetDefaultScheme() + tests := []struct { + target string + wantParsed resolver.Target + wantDialerAddress string + }{ + // unix:[local_path], unix:[/absolute], and unix://[/absolute] have + // different behaviors with a custom dialer. + { + target: "unix:a/b/c", + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unix:a/b/c"}, + wantDialerAddress: "unix:a/b/c", + }, + { + target: "unix:/a/b/c", + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unix:/a/b/c"}, + wantDialerAddress: "unix:/a/b/c", + }, + { + target: "unix:///a/b/c", + wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "/a/b/c"}, + wantDialerAddress: "unix:///a/b/c", + }, + { + target: "dns:///127.0.0.1:50051", + wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "127.0.0.1:50051"}, + wantDialerAddress: "127.0.0.1:50051", + }, + { + target: ":///127.0.0.1:50051", + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///127.0.0.1:50051"}, + wantDialerAddress: ":///127.0.0.1:50051", + }, + { + target: "dns://authority/127.0.0.1:50051", + wantParsed: resolver.Target{Scheme: "dns", Authority: "authority", Endpoint: "127.0.0.1:50051"}, + wantDialerAddress: "127.0.0.1:50051", + }, + { + target: "://authority/127.0.0.1:50051", + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://authority/127.0.0.1:50051"}, + wantDialerAddress: "://authority/127.0.0.1:50051", + }, + } + + for _, test := range tests { + t.Run(test.target, func(t *testing.T) { + addrCh := make(chan string, 1) + dialer := func(ctx context.Context, address string) (net.Conn, error) { + addrCh <- address + return nil, errors.New("dialer error") + } + + cc, err := Dial(test.target, WithInsecure(), WithContextDialer(dialer)) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", test.target, err) + } + defer cc.Close() + + select { + case addr := <-addrCh: + if addr != test.wantDialerAddress { + t.Fatalf("address in custom dialer is %q, want %q", addr, test.wantDialerAddress) + } + case <-time.After(time.Second): + t.Fatal("timeout when waiting for custom dialer to be invoked") + } + if gotParsed := cc.parsedTarget; gotParsed != test.wantParsed { + t.Errorf("cc.parsedTarget for dial target %q = %+v, want %+v", test.target, gotParsed, test.wantParsed) + } + }) + } +} diff --git a/clientconn_test.go b/clientconn_test.go index e7381b8e6770..f0c55a9a1326 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -376,62 +376,6 @@ func (s) TestWithTransportCredentialsTLS(t *testing.T) { } } -func (s) TestDefaultAuthority(t *testing.T) { - target := "Non-Existent.Server:8080" - conn, err := Dial(target, WithInsecure()) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v, want _, ", err) - } - defer conn.Close() - if conn.authority != target { - t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, target) - } -} - -func (s) TestTLSServerNameOverwrite(t *testing.T) { - overwriteServerName := "over.write.server.name" - creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), overwriteServerName) - if err != nil { - t.Fatalf("Failed to create credentials %v", err) - } - conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTransportCredentials(creds)) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v, want _, ", err) - } - defer conn.Close() - if conn.authority != overwriteServerName { - t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) - } -} - -func (s) TestWithAuthority(t *testing.T) { - overwriteServerName := "over.write.server.name" - conn, err := Dial("passthrough:///Non-Existent.Server:80", WithInsecure(), WithAuthority(overwriteServerName)) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v, want _, ", err) - } - defer conn.Close() - if conn.authority != overwriteServerName { - t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) - } -} - -func (s) TestWithAuthorityAndTLS(t *testing.T) { - overwriteServerName := "over.write.server.name" - creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), overwriteServerName) - if err != nil { - t.Fatalf("Failed to create credentials %v", err) - } - conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTransportCredentials(creds), WithAuthority("no.effect.authority")) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v, want _, ", err) - } - defer conn.Close() - if conn.authority != overwriteServerName { - t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) - } -} - // When creating a transport configured with n addresses, only calculate the // backoff once per "round" of attempts instead of once per address (n times // per "round" of attempts). diff --git a/internal/grpcutil/target_test.go b/internal/grpcutil/target_test.go deleted file mode 100644 index f6c586dd0808..000000000000 --- a/internal/grpcutil/target_test.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcutil - -import ( - "testing" - - "google.golang.org/grpc/resolver" -) - -func TestParseTarget(t *testing.T) { - for _, test := range []resolver.Target{ - {Scheme: "dns", Authority: "", Endpoint: "google.com"}, - {Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}, - {Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/?a=b"}, - {Scheme: "passthrough", Authority: "", Endpoint: "/unix/socket/address"}, - } { - str := test.Scheme + "://" + test.Authority + "/" + test.Endpoint - got := ParseTarget(str, false) - if got != test { - t.Errorf("ParseTarget(%q, false) = %+v, want %+v", str, got, test) - } - got = ParseTarget(str, true) - if got != test { - t.Errorf("ParseTarget(%q, true) = %+v, want %+v", str, got, test) - } - } -} - -func TestParseTargetString(t *testing.T) { - for _, test := range []struct { - targetStr string - want resolver.Target - wantWithDialer resolver.Target - }{ - {targetStr: "", want: resolver.Target{Scheme: "", Authority: "", Endpoint: ""}}, - {targetStr: ":///", want: resolver.Target{Scheme: "", Authority: "", Endpoint: ""}}, - {targetStr: "a:///", want: resolver.Target{Scheme: "a", Authority: "", Endpoint: ""}}, - {targetStr: "://a/", want: resolver.Target{Scheme: "", Authority: "a", Endpoint: ""}}, - {targetStr: ":///a", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a"}}, - {targetStr: "a://b/", want: resolver.Target{Scheme: "a", Authority: "b", Endpoint: ""}}, - {targetStr: "a:///b", want: resolver.Target{Scheme: "a", Authority: "", Endpoint: "b"}}, - {targetStr: "://a/b", want: resolver.Target{Scheme: "", Authority: "a", Endpoint: "b"}}, - {targetStr: "a://b/c", want: resolver.Target{Scheme: "a", Authority: "b", Endpoint: "c"}}, - {targetStr: "dns:///google.com", want: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "google.com"}}, - {targetStr: "dns://a.server.com/google.com", want: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}}, - {targetStr: "dns://a.server.com/google.com/?a=b", want: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/?a=b"}}, - - {targetStr: "/", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "/"}}, - {targetStr: "google.com", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "google.com"}}, - {targetStr: "google.com/?a=b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "google.com/?a=b"}}, - {targetStr: "/unix/socket/address", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "/unix/socket/address"}}, - - // If we can only parse part of the target. - {targetStr: "://", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "://"}}, - {targetStr: "unix://domain", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "unix://domain"}}, - {targetStr: "unix://a/b/c", want: resolver.Target{Scheme: "unix", Authority: "a", Endpoint: "/b/c"}}, - {targetStr: "a:b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a:b"}}, - {targetStr: "a/b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a/b"}}, - {targetStr: "a:/b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a:/b"}}, - {targetStr: "a//b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a//b"}}, - {targetStr: "a://b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a://b"}}, - - // Unix cases without custom dialer. - // unix:[local_path], unix:[/absolute], and unix://[/absolute] have different - // behaviors with a custom dialer, to prevent behavior changes with custom dialers. - {targetStr: "unix:a/b/c", want: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}, wantWithDialer: resolver.Target{Scheme: "", Authority: "", Endpoint: "unix:a/b/c"}}, - {targetStr: "unix:/a/b/c", want: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "/a/b/c"}, wantWithDialer: resolver.Target{Scheme: "", Authority: "", Endpoint: "unix:/a/b/c"}}, - {targetStr: "unix:///a/b/c", want: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "/a/b/c"}}, - - {targetStr: "unix-abstract:a/b/c", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c"}}, - {targetStr: "unix-abstract:a b", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a b"}}, - {targetStr: "unix-abstract:a:b", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a:b"}}, - {targetStr: "unix-abstract:a-b", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a-b"}}, - {targetStr: "unix-abstract:/ a///://::!@#$%^&*()b", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/ a///://::!@#$%^&*()b"}}, - {targetStr: "unix-abstract:passthrough:abc", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "passthrough:abc"}}, - {targetStr: "unix-abstract:unix:///abc", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "unix:///abc"}}, - {targetStr: "unix-abstract:///a/b/c", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/a/b/c"}}, - {targetStr: "unix-abstract://authority/a/b/c", want: resolver.Target{Scheme: "unix-abstract", Authority: "authority", Endpoint: "/a/b/c"}}, - {targetStr: "unix-abstract:///", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/"}}, - {targetStr: "unix-abstract://authority", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "//authority"}}, - - {targetStr: "passthrough:///unix:///a/b/c", want: resolver.Target{Scheme: "passthrough", Authority: "", Endpoint: "unix:///a/b/c"}}, - } { - got := ParseTarget(test.targetStr, false) - if got != test.want { - t.Errorf("ParseTarget(%q, false) = %+v, want %+v", test.targetStr, got, test.want) - } - wantWithDialer := test.wantWithDialer - if wantWithDialer == (resolver.Target{}) { - wantWithDialer = test.want - } - got = ParseTarget(test.targetStr, true) - if got != wantWithDialer { - t.Errorf("ParseTarget(%q, true) = %+v, want %+v", test.targetStr, got, wantWithDialer) - } - } -} From 6ff68b489ecba2884aff152835d745389598935a Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 23 Sep 2021 14:40:18 -0700 Subject: [PATCH 258/998] channelz: recommend using admin.Register instead (#4797) --- admin/admin.go | 1 + channelz/service/service.go | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/admin/admin.go b/admin/admin.go index 5212250b7d4e..803a4b935340 100644 --- a/admin/admin.go +++ b/admin/admin.go @@ -20,6 +20,7 @@ // administration services to a gRPC server. The services registered are: // // - Channelz: https://github.com/grpc/proposal/blob/master/A14-channelz.md +// // - CSDS: https://github.com/grpc/proposal/blob/master/A40-csds-support.md // // Experimental diff --git a/channelz/service/service.go b/channelz/service/service.go index c1639de8b267..9e325376f6cd 100644 --- a/channelz/service/service.go +++ b/channelz/service/service.go @@ -43,6 +43,10 @@ func init() { var logger = grpclog.Component("channelz") // RegisterChannelzServiceToServer registers the channelz service to the given server. +// +// Note: it is preferred to use the admin API +// (https://pkg.go.dev/google.golang.org/grpc/admin#Register) instead to +// register Channelz and other administrative services. func RegisterChannelzServiceToServer(s grpc.ServiceRegistrar) { channelzgrpc.RegisterChannelzServer(s, newCZServer()) } From 11437f66f20f3473e09fcf3fb5c23d4388af936f Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 24 Sep 2021 15:29:25 -0700 Subject: [PATCH 259/998] test: add option to make httpServer wait for END_STREAM; fix RetryStats race (#4811) --- internal/transport/http2_client.go | 2 +- internal/transport/http2_server.go | 2 +- test/end2end_test.go | 28 ++++++++++++++++++++++++---- test/retry_test.go | 15 ++++----------- 4 files changed, 30 insertions(+), 17 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 75586307435a..ea3babb118bd 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -1073,7 +1073,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 3d1d5c1d4cdb..f1594d663af3 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -734,7 +734,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { s.write(recvMsg{buffer: buffer}) } } - if f.Header().Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) diff --git a/test/end2end_test.go b/test/end2end_test.go index bce752701dab..e84b9e99170b 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7352,8 +7352,11 @@ type httpServerResponse struct { } type httpServer struct { - refuseStream func(uint32) bool - responses []httpServerResponse + // If waitForEndStream is set, wait for the client to send a frame with end + // stream in it before sending a response/refused stream. + waitForEndStream bool + refuseStream func(uint32) bool + responses []httpServerResponse } func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error { @@ -7416,8 +7419,25 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) { } return } - if hframe, ok := frame.(*http2.HeadersFrame); ok { - sid = hframe.Header().StreamID + sid = 0 + switch fr := frame.(type) { + case *http2.HeadersFrame: + // Respond after this if we are not waiting for an end + // stream or if this frame ends it. + if !s.waitForEndStream || fr.StreamEnded() { + sid = fr.Header().StreamID + } + + case *http2.DataFrame: + // Respond after this if we were waiting for an end stream + // and this frame ends it. (If we were not waiting for an + // end stream, this stream was already responded to when + // the headers were received.) + if s.waitForEndStream && fr.StreamEnded() { + sid = fr.Header().StreamID + } + } + if sid != 0 { if s.refuseStream == nil || !s.refuseStream(sid) { break } diff --git a/test/retry_test.go b/test/retry_test.go index dcd3a2158db8..7f068d79f44d 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -517,6 +517,7 @@ func (s) TestRetryStats(t *testing.T) { } defer lis.Close() server := &httpServer{ + waitForEndStream: true, responses: []httpServerResponse{{ trailers: [][]string{{ ":status", "200", @@ -588,13 +589,6 @@ func (s) TestRetryStats(t *testing.T) { &stats.End{}, } - // There is a race between noticing the RST_STREAM during the first RPC - // attempt and writing the payload. If we detect that the client did not - // send the OutPayload, we remove it from want. - if _, ok := handler.s[2].(*stats.End); ok { - want = append(want[:2], want[3:]...) - } - toString := func(ss []stats.RPCStats) (ret []string) { for _, s := range ss { ret = append(ret, fmt.Sprintf("%T - %v", s, s)) @@ -612,8 +606,7 @@ func (s) TestRetryStats(t *testing.T) { // There is a race between receiving the payload (triggered by the // application / gRPC library) and receiving the trailer (triggered at the // transport layer). Adjust the received stats accordingly if necessary. - // Note: we measure from the end of the RPCStats due to the race above. - tIdx, pIdx := len(handler.s)-3, len(handler.s)-2 + const tIdx, pIdx = 13, 14 _, okT := handler.s[tIdx].(*stats.InTrailer) _, okP := handler.s[pIdx].(*stats.InPayload) if okT && okP { @@ -654,8 +647,8 @@ func (s) TestRetryStats(t *testing.T) { } // Validate timings between last Begin and preceding End. - end := handler.s[len(handler.s)-8].(*stats.End) - begin := handler.s[len(handler.s)-7].(*stats.Begin) + end := handler.s[8].(*stats.End) + begin := handler.s[9].(*stats.Begin) diff := begin.BeginTime.Sub(end.EndTime) if diff < 10*time.Millisecond || diff > 50*time.Millisecond { t.Fatalf("pushback time before final attempt = %v; want ~10ms", diff) From 689f7b154ee8a3f3ab6a6107ff7ad78189baae06 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 27 Sep 2021 16:55:46 -0400 Subject: [PATCH 260/998] transport: logic specified in A41 to support RBAC xDS HTTP Filter (#4803) * transport: logic specified in A41 to support RBAC xDS HTTP Filter --- internal/transport/controlbuf.go | 8 +- internal/transport/http2_server.go | 40 ++++ internal/transport/transport_test.go | 336 ++++++++++++++++++++++----- test/end2end_test.go | 123 ++++++++++ 4 files changed, 442 insertions(+), 65 deletions(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 45532f8aeaab..8394d252df03 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -133,6 +133,7 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM type earlyAbortStream struct { + httpStatus uint32 streamID uint32 contentSubtype string status *status.Status @@ -771,9 +772,12 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if l.side == clientSide { return errors.New("earlyAbortStream not handled on client") } - + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } headerFields := []hpack.HeaderField{ - {Name: ":status", Value: "200"}, + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index f1594d663af3..3e4655e8e65d 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -390,6 +390,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if timeout, err = decodeTimeout(hf.Value); err != nil { headerError = true } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + } + headerError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -404,6 +411,25 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } } + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if logger.V(logLevel) { + logger.Errorf("transport: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 400, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + }) + return false + } + if !isGRPC || headerError { t.controlBuf.put(&cleanupStream{ streamID: streamID, @@ -414,6 +440,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return false } + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone @@ -494,6 +533,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 28cace0ba5d0..4e561a73c4cb 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -1663,81 +1663,291 @@ func (s) TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) { } } -// If the client sends an HTTP/2 request with a :method header with a value other than POST, as specified in -// the gRPC over HTTP/2 specification, the server should close the stream. -func (s) TestServerWithClientSendingWrongMethod(t *testing.T) { - server := setUpServerOnly(t, 0, &ServerConfig{}, suspended) - defer server.stop() - // Create a client directly to not couple what you can send to API of http2_client.go. - mconn, err := net.Dial("tcp", server.lis.Addr().String()) - if err != nil { - t.Fatalf("Client failed to dial: %v", err) +// TestHeadersCausingStreamError tests headers that should cause a stream protocol +// error, which would end up with a RST_STREAM being sent to the client and also +// the server closing the stream. +func (s) TestHeadersCausingStreamError(t *testing.T) { + tests := []struct { + name string + headers []struct { + name string + values []string + } + }{ + // If the client sends an HTTP/2 request with a :method header with a + // value other than POST, as specified in the gRPC over HTTP/2 + // specification, the server should close the stream. + { + name: "Client Sending Wrong Method", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"PUT"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + }, + }, + // "Transports must consider requests containing the Connection header + // as malformed" - A41 Malformed requests map to a stream error of type + // PROTOCOL_ERROR. + { + name: "Connection header present", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + {name: "connection", values: []string{"not-supported"}}, + }, + }, + // multiple :authority or multiple Host headers would make the eventual + // :authority ambiguous as per A41. Since these headers won't have a + // content-type that corresponds to a grpc-client, the server should + // simply write a RST_STREAM to the wire. + { + // Note: multiple authority headers are handled by the framer + // itself, which will cause a stream error. Thus, it will never get + // to operateHeaders with the check in operateHeaders for stream + // error, but the server transport will still send a stream error. + name: "Multiple authority headers", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost", "localhost2"}}, + {name: "host", values: []string{"localhost"}}, + }, + }, } - defer mconn.Close() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + server := setUpServerOnly(t, 0, &ServerConfig{}, suspended) + defer server.stop() + // Create a client directly to not tie what you can send to API of + // http2_client.go (i.e. control headers being sent). + mconn, err := net.Dial("tcp", server.lis.Addr().String()) + if err != nil { + t.Fatalf("Client failed to dial: %v", err) + } + defer mconn.Close() - if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) { - t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, ", n, err, len(clientPreface)) + if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) { + t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, ", n, err, len(clientPreface)) + } + + framer := http2.NewFramer(mconn, mconn) + if err := framer.WriteSettings(); err != nil { + t.Fatalf("Error while writing settings: %v", err) + } + + // result chan indicates that reader received a RSTStream from server. + // An error will be passed on it if any other frame is received. + result := testutils.NewChannel() + + // Launch a reader goroutine. + go func() { + for { + frame, err := framer.ReadFrame() + if err != nil { + return + } + switch frame := frame.(type) { + case *http2.SettingsFrame: + // Do nothing. A settings frame is expected from server preface. + case *http2.RSTStreamFrame: + if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeProtocol { + // Client only created a single stream, so RST Stream should be for that single stream. + result.Send(fmt.Errorf("RST stream received with streamID: %d and code %v, want streamID: 1 and code: http.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))) + } + // Records that client successfully received RST Stream frame. + result.Send(nil) + return + default: + // The server should send nothing but a single RST Stream frame. + result.Send(errors.New("the client received a frame other than RST Stream")) + } + } + }() + + var buf bytes.Buffer + henc := hpack.NewEncoder(&buf) + + // Needs to build headers deterministically to conform to gRPC over + // HTTP/2 spec. + for _, header := range test.headers { + for _, value := range header.values { + if err := henc.WriteField(hpack.HeaderField{Name: header.name, Value: value}); err != nil { + t.Fatalf("Error while encoding header: %v", err) + } + } + } + + if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil { + t.Fatalf("Error while writing headers: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + r, err := result.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving from channel: %v", err) + } + if r != nil { + t.Fatalf("want nil, got %v", r) + } + }) } +} - framer := http2.NewFramer(mconn, mconn) - if err := framer.WriteSettings(); err != nil { - t.Fatalf("Error while writing settings: %v", err) +// TestHeadersMultipleHosts tests that a request with multiple hosts gets +// rejected with HTTP Status 400 and gRPC status Internal, regardless of whether +// the client is speaking gRPC or not. +func (s) TestHeadersMultipleHosts(t *testing.T) { + tests := []struct { + name string + headers []struct { + name string + values []string + } + }{ + // Note: multiple authority headers are handled by the framer itself, + // which will cause a stream error. Thus, it will never get to + // operateHeaders with the check in operateHeaders for possible grpc-status sent back. + + // multiple :authority or multiple Host headers would make the eventual + // :authority ambiguous as per A41. This takes precedence even over the + // fact a request is non grpc. All of these requests should be rejected + // with grpc-status Internal. + { + name: "Multiple host headers non grpc", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "host", values: []string{"localhost", "localhost2"}}, + }, + }, + { + name: "Multiple host headers grpc", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + {name: "host", values: []string{"localhost", "localhost2"}}, + }, + }, } + for _, test := range tests { + server := setUpServerOnly(t, 0, &ServerConfig{}, suspended) + defer server.stop() + // Create a client directly to not tie what you can send to API of + // http2_client.go (i.e. control headers being sent). + mconn, err := net.Dial("tcp", server.lis.Addr().String()) + if err != nil { + t.Fatalf("Client failed to dial: %v", err) + } + defer mconn.Close() - // success chan indicates that reader received a RSTStream from server. - // An error will be passed on it if any other frame is received. - success := testutils.NewChannel() + if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) { + t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, ", n, err, len(clientPreface)) + } - // Launch a reader goroutine. - go func() { - for { - frame, err := framer.ReadFrame() - if err != nil { - return + framer := http2.NewFramer(mconn, mconn) + framer.ReadMetaHeaders = hpack.NewDecoder(4096, nil) + if err := framer.WriteSettings(); err != nil { + t.Fatalf("Error while writing settings: %v", err) + } + + // result chan indicates that reader received a Headers Frame with + // desired grpc status and message from server. An error will be passed + // on it if any other frame is received. + result := testutils.NewChannel() + + // Launch a reader goroutine. + go func() { + for { + frame, err := framer.ReadFrame() + if err != nil { + return + } + switch frame := frame.(type) { + case *http2.SettingsFrame: + // Do nothing. A settings frame is expected from server preface. + case *http2.MetaHeadersFrame: + var status, grpcStatus, grpcMessage string + for _, header := range frame.Fields { + if header.Name == ":status" { + status = header.Value + } + if header.Name == "grpc-status" { + grpcStatus = header.Value + } + if header.Name == "grpc-message" { + grpcMessage = header.Value + } + } + if status != "400" { + result.Send(fmt.Errorf("incorrect HTTP Status got %v, want 200", status)) + return + } + if grpcStatus != "13" { // grpc status code internal + result.Send(fmt.Errorf("incorrect gRPC Status got %v, want 13", grpcStatus)) + return + } + if !strings.Contains(grpcMessage, "both must only have 1 value as per HTTP/2 spec") { + result.Send(fmt.Errorf("incorrect gRPC message")) + return + } + + // Records that client successfully received a HeadersFrame + // with expected Trailers-Only response. + result.Send(nil) + return + default: + // The server should send nothing but a single Settings and Headers frame. + result.Send(errors.New("the client received a frame other than Settings or Headers")) + } } - switch frame := frame.(type) { - case *http2.SettingsFrame: - // Do nothing. A settings frame is expected from server preface. - case *http2.RSTStreamFrame: - if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeProtocol { - // Client only created a single stream, so RST Stream should be for that single stream. - t.Errorf("RST stream received with streamID: %d and code %v, want streamID: 1 and code: http.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode)) + }() + + var buf bytes.Buffer + henc := hpack.NewEncoder(&buf) + + // Needs to build headers deterministically to conform to gRPC over + // HTTP/2 spec. + for _, header := range test.headers { + for _, value := range header.values { + if err := henc.WriteField(hpack.HeaderField{Name: header.name, Value: value}); err != nil { + t.Fatalf("Error while encoding header: %v", err) } - // Records that client successfully received RST Stream frame. - success.Send(nil) - return - default: - // The server should send nothing but a single RST Stream frame. - success.Send(errors.New("The client received a frame other than RST Stream")) } } - }() - - // Done with HTTP/2 setup - now create a stream with a bad method header. - var buf bytes.Buffer - henc := hpack.NewEncoder(&buf) - // Method is required to be POST in a gRPC call. - if err := henc.WriteField(hpack.HeaderField{Name: ":method", Value: "PUT"}); err != nil { - t.Fatalf("Error while encoding header: %v", err) - } - // Have the rest of the headers be ok and within the gRPC over HTTP/2 spec. - if err := henc.WriteField(hpack.HeaderField{Name: ":path", Value: "foo"}); err != nil { - t.Fatalf("Error while encoding header: %v", err) - } - if err := henc.WriteField(hpack.HeaderField{Name: ":authority", Value: "localhost"}); err != nil { - t.Fatalf("Error while encoding header: %v", err) - } - if err := henc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}); err != nil { - t.Fatalf("Error while encoding header: %v", err) - } - if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil { - t.Fatalf("Error while writing headers: %v", err) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if e, err := success.Receive(ctx); e != nil || err != nil { - t.Fatalf("Error in frame server should send: %v. Error receiving from channel: %v", e, err) + if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil { + t.Fatalf("Error while writing headers: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + r, err := result.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving from channel: %v", err) + } + if r != nil { + t.Fatalf("want nil, got %v", r) + } } } diff --git a/test/end2end_test.go b/test/end2end_test.go index e84b9e99170b..9d7fcb23206c 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7867,3 +7867,126 @@ func (s) TestStreamingServerInterceptorGetsConnection(t *testing.T) { t.Fatalf("ss.Client.StreamingInputCall(_) = _, %v, want _, %v", err, io.EOF) } } + +// unaryInterceptorVerifyAuthority verifies there is an unambiguous :authority +// once the request gets to an interceptor. An unambiguous :authority is defined +// as at most a single :authority header, and no host header according to A41. +func unaryInterceptorVerifyAuthority(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Error(codes.NotFound, "metadata was not in context") + } + authority := md.Get(":authority") + if len(authority) > 1 { // Should be an unambiguous authority by the time it gets to interceptor. + return nil, status.Error(codes.NotFound, ":authority value had more than one value") + } + // Host header shouldn't be present by the time it gets to the interceptor + // level (should either be renamed to :authority or explicitly deleted). + host := md.Get("host") + if len(host) != 0 { + return nil, status.Error(codes.NotFound, "host header should not be present in metadata") + } + // Pass back the authority for verification on client - NotFound so + // grpc-message will be available to read for verification. + if len(authority) == 0 { + // Represent no :authority header present with an empty string. + return nil, status.Error(codes.NotFound, "") + } + return nil, status.Error(codes.NotFound, authority[0]) +} + +// TestAuthorityHeader tests that the eventual :authority that reaches the grpc +// layer is unambiguous due to logic added in A41. +func (s) TestAuthorityHeader(t *testing.T) { + tests := []struct { + name string + headers []string + wantAuthority string + }{ + // "If :authority is missing, Host must be renamed to :authority." - A41 + { + name: "Missing :authority", + // Codepath triggered by incoming headers with no authority but with + // a host. + headers: []string{ + ":method", "POST", + ":path", "/grpc.testing.TestService/UnaryCall", + "content-type", "application/grpc", + "te", "trailers", + "host", "localhost", + }, + wantAuthority: "localhost", + }, + { + name: "Missing :authority and host", + // Codepath triggered by incoming headers with no :authority and no + // host. + headers: []string{ + ":method", "POST", + ":path", "/grpc.testing.TestService/UnaryCall", + "content-type", "application/grpc", + "te", "trailers", + }, + wantAuthority: "", + }, + // "If :authority is present, Host must be discarded." - A41 + { + name: ":authority and host present", + // Codepath triggered by incoming headers with both an authority + // header and a host header. + headers: []string{ + ":method", "POST", + ":path", "/grpc.testing.TestService/UnaryCall", + ":authority", "localhost", + "content-type", "application/grpc", + "host", "localhost2", + }, + wantAuthority: "localhost", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + te := newTest(t, tcpClearRREnv) + ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }} + te.unaryServerInt = unaryInterceptorVerifyAuthority + te.startServer(ts) + defer te.tearDown() + success := testutils.NewChannel() + te.withServerTester(func(st *serverTester) { + st.writeHeaders(http2.HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(test.headers...), + EndStream: false, + EndHeaders: true, + }) + st.writeData(1, true, []byte{0, 0, 0, 0, 0}) + + for { + frame := st.wantAnyFrame() + f, ok := frame.(*http2.MetaHeadersFrame) + if !ok { + continue + } + for _, header := range f.Fields { + if header.Name == "grpc-message" { + success.Send(header.Value) + return + } + } + } + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotAuthority, err := success.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving from channel: %v", err) + } + if gotAuthority != test.wantAuthority { + t.Fatalf("gotAuthority: %v, wantAuthority %v", gotAuthority, test.wantAuthority) + } + }) + } +} From 4555155af248cab3368e5c5e650bd216366c8bb5 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 27 Sep 2021 17:36:16 -0400 Subject: [PATCH 261/998] xds: Small changes at xDS RBAC Layer (#4815) * xds: Small changes at xDS RBAC Layer --- internal/xds/rbac/rbac_engine.go | 5 + xds/internal/httpfilter/rbac/rbac.go | 26 ++++- .../test/xds_server_integration_test.go | 102 ++++++++++++++++++ 3 files changed, 132 insertions(+), 1 deletion(-) diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index 1642d26b1ae8..775ed622d578 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -148,6 +148,11 @@ func newRPCData(ctx context.Context) (*rpcData, error) { if !ok { return nil, errors.New("missing metadata in incoming context") } + // ":method can be hard-coded to POST if unavailable" - A41 + md[":method"] = []string{"POST"} + // "If the transport exposes TE in Metadata, then RBAC must special-case the + // header to treat it as not present." - A41 + delete(md, "TE") pi, ok := peer.FromContext(ctx) if !ok { diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go index 98bcd73ffa41..91700ca733d5 100644 --- a/xds/internal/httpfilter/rbac/rbac.go +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -106,7 +106,6 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { } } } - return config{config: rbacCfg}, nil } @@ -174,6 +173,31 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http return nil, nil } + // "Envoy aliases :authority and Host in its header map implementation, so + // they should be treated equivalent for the RBAC matchers; there must be no + // behavior change depending on which of the two header names is used in the + // RBAC policy." - A41. Loop through config's principals and policies, change + // any header matcher with value "host" to :authority", as that is what + // grpc-go shifts both headers to in transport layer. + for _, policy := range icfg.Rules.GetPolicies() { + for _, principal := range policy.Principals { + if principal.GetHeader() != nil { + name := principal.GetHeader().GetName() + if name == "host" { + principal.GetHeader().Name = ":authority" + } + } + } + for _, permission := range policy.Permissions { + if permission.GetHeader() != nil { + name := permission.GetHeader().GetName() + if name == "host" { + permission.GetHeader().Name = ":authority" + } + } + } + } + ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{icfg.Rules}) if err != nil { return nil, fmt.Errorf("error constructing matching engine: %v", err) diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index e95186ae0fb1..65abc90e19a8 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -788,6 +788,108 @@ func (s) TestRBACHTTPFilter(t *testing.T) { wantStatusEmptyCall: codes.OK, wantStatusUnaryCall: codes.OK, }, + // The two tests below test that configuring the xDS RBAC HTTP Filter + // with :authority and host header matchers end up being logically + // equivalent. This represents functionality from this line in A41 - + // "As documented for HeaderMatcher, Envoy aliases :authority and Host + // in its header map implementation, so they should be treated + // equivalent for the RBAC matchers; there must be no behavior change + // depending on which of the two header names is used in the RBAC + // policy." + + // This test tests an xDS RBAC Filter with an :authority header matcher. + { + name: "match-on-authority", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "match-on-authority": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":authority", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "my-service-fallback"}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + }, + // This test tests that configuring an xDS RBAC Filter with a host + // header matcher has the same behavior as if it was configured with + // :authority. Since host and authority are aliased, this should still + // continue to match on incoming RPC's :authority, just as the test + // above. + { + name: "match-on-host", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "match-on-authority": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: "host", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "my-service-fallback"}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + }, + // This test tests that the RBAC HTTP Filter hard codes the :method + // header to POST. Since the RBAC Configuration says to deny every RPC + // with a method :POST, every RPC tried should be denied. + { + name: "deny-post", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "post-method": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "POST"}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.PermissionDenied, + wantStatusUnaryCall: codes.PermissionDenied, + }, + // This test tests that RBAC ignores the TE: trailers header (which is + // hardcoded in http2_client.go for every RPC). Since the RBAC + // Configuration says to only ALLOW RPC's with a TE: Trailers, every RPC + // tried should be denied. + { + name: "allow-only-te", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "post-method": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: "TE", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "trailers"}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.PermissionDenied, + wantStatusUnaryCall: codes.PermissionDenied, + }, } for _, test := range tests { From 710419d32bfd469509bae5b73274f5825ad13554 Mon Sep 17 00:00:00 2001 From: ZhenLian Date: Mon, 27 Sep 2021 16:42:32 -0700 Subject: [PATCH 262/998] advancedtls: add revocation support to client/server options (#4781) --- security/advancedtls/advancedtls.go | 71 ++++++++++++------- .../advancedtls_integration_test.go | 4 +- security/advancedtls/advancedtls_test.go | 36 +++++++++- 3 files changed, 82 insertions(+), 29 deletions(-) diff --git a/security/advancedtls/advancedtls.go b/security/advancedtls/advancedtls.go index 534a3ed417ba..1892c5ed7661 100644 --- a/security/advancedtls/advancedtls.go +++ b/security/advancedtls/advancedtls.go @@ -181,6 +181,9 @@ type ClientOptions struct { RootOptions RootCertificateOptions // VType is the verification type on the client side. VType VerificationType + // RevocationConfig is the configurations for certificate revocation checks. + // It could be nil if such checks are not needed. + RevocationConfig *RevocationConfig } // ServerOptions contains the fields needed to be filled by the server. @@ -199,6 +202,9 @@ type ServerOptions struct { RequireClientCert bool // VType is the verification type on the server side. VType VerificationType + // RevocationConfig is the configurations for certificate revocation checks. + // It could be nil if such checks are not needed. + RevocationConfig *RevocationConfig } func (o *ClientOptions) config() (*tls.Config, error) { @@ -356,11 +362,12 @@ func (o *ServerOptions) config() (*tls.Config, error) { // advancedTLSCreds is the credentials required for authenticating a connection // using TLS. type advancedTLSCreds struct { - config *tls.Config - verifyFunc CustomVerificationFunc - getRootCAs func(params *GetRootCAsParams) (*GetRootCAsResults, error) - isClient bool - vType VerificationType + config *tls.Config + verifyFunc CustomVerificationFunc + getRootCAs func(params *GetRootCAsParams) (*GetRootCAsResults, error) + isClient bool + vType VerificationType + revocationConfig *RevocationConfig } func (c advancedTLSCreds) Info() credentials.ProtocolInfo { @@ -451,6 +458,14 @@ func buildVerifyFunc(c *advancedTLSCreds, return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { chains := verifiedChains var leafCert *x509.Certificate + rawCertList := make([]*x509.Certificate, len(rawCerts)) + for i, asn1Data := range rawCerts { + cert, err := x509.ParseCertificate(asn1Data) + if err != nil { + return err + } + rawCertList[i] = cert + } if c.vType == CertAndHostVerification || c.vType == CertVerification { // perform possible trust credential reloading and certificate check rootCAs := c.config.RootCAs @@ -469,14 +484,6 @@ func buildVerifyFunc(c *advancedTLSCreds, rootCAs = results.TrustCerts } // Verify peers' certificates against RootCAs and get verifiedChains. - certs := make([]*x509.Certificate, len(rawCerts)) - for i, asn1Data := range rawCerts { - cert, err := x509.ParseCertificate(asn1Data) - if err != nil { - return err - } - certs[i] = cert - } keyUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} if !c.isClient { keyUsages = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} @@ -487,7 +494,7 @@ func buildVerifyFunc(c *advancedTLSCreds, Intermediates: x509.NewCertPool(), KeyUsages: keyUsages, } - for _, cert := range certs[1:] { + for _, cert := range rawCertList[1:] { opts.Intermediates.AddCert(cert) } // Perform default hostname check if specified. @@ -501,11 +508,21 @@ func buildVerifyFunc(c *advancedTLSCreds, opts.DNSName = parsedName } var err error - chains, err = certs[0].Verify(opts) + chains, err = rawCertList[0].Verify(opts) if err != nil { return err } - leafCert = certs[0] + leafCert = rawCertList[0] + } + // Perform certificate revocation check if specified. + if c.revocationConfig != nil { + verifiedChains := chains + if verifiedChains == nil { + verifiedChains = [][]*x509.Certificate{rawCertList} + } + if err := CheckChainRevocation(verifiedChains, *c.revocationConfig); err != nil { + return err + } } // Perform custom verification check if specified. if c.verifyFunc != nil { @@ -529,11 +546,12 @@ func NewClientCreds(o *ClientOptions) (credentials.TransportCredentials, error) return nil, err } tc := &advancedTLSCreds{ - config: conf, - isClient: true, - getRootCAs: o.RootOptions.GetRootCertificates, - verifyFunc: o.VerifyPeer, - vType: o.VType, + config: conf, + isClient: true, + getRootCAs: o.RootOptions.GetRootCertificates, + verifyFunc: o.VerifyPeer, + vType: o.VType, + revocationConfig: o.RevocationConfig, } tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) return tc, nil @@ -547,11 +565,12 @@ func NewServerCreds(o *ServerOptions) (credentials.TransportCredentials, error) return nil, err } tc := &advancedTLSCreds{ - config: conf, - isClient: false, - getRootCAs: o.RootOptions.GetRootCertificates, - verifyFunc: o.VerifyPeer, - vType: o.VType, + config: conf, + isClient: false, + getRootCAs: o.RootOptions.GetRootCertificates, + verifyFunc: o.VerifyPeer, + vType: o.VType, + revocationConfig: o.RevocationConfig, } tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) return tc, nil diff --git a/security/advancedtls/advancedtls_integration_test.go b/security/advancedtls/advancedtls_integration_test.go index 4bb9e645b0a1..8cddfc234b12 100644 --- a/security/advancedtls/advancedtls_integration_test.go +++ b/security/advancedtls/advancedtls_integration_test.go @@ -380,7 +380,7 @@ func (s) TestEnd2End(t *testing.T) { } clientTLSCreds, err := NewClientCreds(clientOptions) if err != nil { - t.Fatalf("clientTLSCreds failed to create") + t.Fatalf("clientTLSCreds failed to create: %v", err) } // ------------------------Scenario 1------------------------------------ // stage = 0, initial connection should succeed @@ -796,7 +796,7 @@ func (s) TestDefaultHostNameCheck(t *testing.T) { } clientTLSCreds, err := NewClientCreds(clientOptions) if err != nil { - t.Fatalf("clientTLSCreds failed to create") + t.Fatalf("clientTLSCreds failed to create: %v", err) } shouldFail := false if test.expectError { diff --git a/security/advancedtls/advancedtls_test.go b/security/advancedtls/advancedtls_test.go index 64da81a1700c..7092d46e60fa 100644 --- a/security/advancedtls/advancedtls_test.go +++ b/security/advancedtls/advancedtls_test.go @@ -27,10 +27,12 @@ import ( "net" "testing" + lru "github.com/hashicorp/golang-lru" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/security/advancedtls/internal/testutils" + "google.golang.org/grpc/security/advancedtls/testdata" ) type s struct { @@ -339,6 +341,10 @@ func (s) TestClientServerHandshake(t *testing.T) { getRootCAsForServerBad := func(params *GetRootCAsParams) (*GetRootCAsResults, error) { return nil, fmt.Errorf("bad root certificate reloading") } + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } for _, test := range []struct { desc string clientCert []tls.Certificate @@ -349,6 +355,7 @@ func (s) TestClientServerHandshake(t *testing.T) { clientVType VerificationType clientRootProvider certprovider.Provider clientIdentityProvider certprovider.Provider + clientRevocationConfig *RevocationConfig clientExpectHandshakeError bool serverMutualTLS bool serverCert []tls.Certificate @@ -359,6 +366,7 @@ func (s) TestClientServerHandshake(t *testing.T) { serverVType VerificationType serverRootProvider certprovider.Provider serverIdentityProvider certprovider.Provider + serverRevocationConfig *RevocationConfig serverExpectError bool }{ // Client: nil setting except verifyFuncGood @@ -642,6 +650,30 @@ func (s) TestClientServerHandshake(t *testing.T) { serverRootProvider: fakeProvider{isClient: false}, serverVType: CertVerification, }, + // Client: set valid credentials with the revocation config + // Server: set valid credentials with the revocation config + // Expected Behavior: success, because non of the certificate chains sent in the connection are revoked + { + desc: "Client sets peer cert, reload root function with verifyFuncGood; Server sets peer cert, reload root function; mutualTLS", + clientCert: []tls.Certificate{cs.ClientCert1}, + clientGetRoot: getRootCAsForClient, + clientVerifyFunc: clientVerifyFuncGood, + clientVType: CertVerification, + clientRevocationConfig: &RevocationConfig{ + RootDir: testdata.Path("crl"), + AllowUndetermined: true, + Cache: cache, + }, + serverMutualTLS: true, + serverCert: []tls.Certificate{cs.ServerCert1}, + serverGetRoot: getRootCAsForServer, + serverVType: CertVerification, + serverRevocationConfig: &RevocationConfig{ + RootDir: testdata.Path("crl"), + AllowUndetermined: true, + Cache: cache, + }, + }, } { test := test t.Run(test.desc, func(t *testing.T) { @@ -665,6 +697,7 @@ func (s) TestClientServerHandshake(t *testing.T) { RequireClientCert: test.serverMutualTLS, VerifyPeer: test.serverVerifyFunc, VType: test.serverVType, + RevocationConfig: test.serverRevocationConfig, } go func(done chan credentials.AuthInfo, lis net.Listener, serverOptions *ServerOptions) { serverRawConn, err := lis.Accept() @@ -706,7 +739,8 @@ func (s) TestClientServerHandshake(t *testing.T) { GetRootCertificates: test.clientGetRoot, RootProvider: test.clientRootProvider, }, - VType: test.clientVType, + VType: test.clientVType, + RevocationConfig: test.clientRevocationConfig, } clientTLS, err := NewClientCreds(clientOptions) if err != nil { From 08927214a41e3a2d937658689167363942c06426 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 28 Sep 2021 10:11:52 -0700 Subject: [PATCH 263/998] xds/rds: NACK unknown route action cluster specifier (#4788) --- xds/internal/test/xds_server_integration_test.go | 12 +++++++++--- xds/internal/xdsclient/rds_test.go | 14 ++++++++++++++ xds/internal/xdsclient/xds.go | 2 ++ 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 65abc90e19a8..d39b728b1265 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -439,7 +439,9 @@ func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { // "Fully-qualified RPC method name with leading slash. Same as :path header". }, // Incorrect Action, so RPC's that match this route should get denied. - Action: &v3routepb.Route_Route{}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: ""}}, + }, }, // Another routing rule that can be selectively triggered based on incoming RPC. { @@ -447,7 +449,9 @@ func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/UnaryCall"}, }, // Wrong action (!Non_Forwarding_Action) so RPC's that match this route should get denied. - Action: &v3routepb.Route_Route{}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: ""}}, + }, }, // Another routing rule that can be selectively triggered based on incoming RPC. { @@ -455,7 +459,9 @@ func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/StreamingInputCall"}, }, // Wrong action (!Non_Forwarding_Action) so RPC's that match this route should get denied. - Action: &v3routepb.Route_Route{}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: ""}}, + }, }, // Not matching route, this is be able to get invoked logically (i.e. doesn't have to match the Route configurations above). }}, diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index f5f906375c7c..ca87c0b97715 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -1231,6 +1231,20 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }, wantErr: true, }, + { + name: "unsupported cluster specifier", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{}}}, + }, + }, + wantErr: true, + }, { name: "default totalWeight is 100 in weighted clusters action", routes: []*v3routepb.Route{ diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 6b172ac8aa2a..28070d88cd5f 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -564,6 +564,8 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, } case *v3routepb.RouteAction_ClusterHeader: continue + default: + return nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) } msd := action.GetMaxStreamDuration() From 75f1d4b986342c24fab707fc6be37c51f9f8ee50 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 28 Sep 2021 12:20:57 -0700 Subject: [PATCH 264/998] transport: call stats handler for trailers before closeStream (#4816) --- internal/transport/http2_client.go | 39 +++++++++++++++--------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index ea3babb118bd..dc369212dc5a 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -1403,26 +1403,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } isHeader := false - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), - Compression: s.recvCompress, - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - Trailer: s.trailer.Copy(), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) - } - } - }() // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { @@ -1444,6 +1424,25 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + if !endStream { return } From 34df1b42aecf459d913a1b6aaf835e1d4eea22d3 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 28 Sep 2021 15:27:00 -0400 Subject: [PATCH 265/998] xds: Small RBAC Changes defined in A41 (#4818) * xds: Small RBAC Changes defined in A41 --- internal/xds/rbac/matchers.go | 54 ++++++----- internal/xds/rbac/rbac_engine.go | 6 +- internal/xds/rbac/rbac_engine_test.go | 90 ++++++++++++++++++- xds/internal/httpfilter/rbac/rbac.go | 6 ++ .../test/xds_server_integration_test.go | 24 +++++ 5 files changed, 151 insertions(+), 29 deletions(-) diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go index 4cea3ebe9f78..28dabf465919 100644 --- a/internal/xds/rbac/matchers.go +++ b/internal/xds/rbac/matchers.go @@ -106,7 +106,9 @@ func matchersFromPermissions(permissions []*v3rbacpb.Permission) ([]matcher, err } matchers = append(matchers, m) case *v3rbacpb.Permission_DestinationIp: - m, err := newDestinationIPMatcher(permission.GetDestinationIp()) + // Due to this being on server side, the destination IP is the local + // IP. + m, err := newLocalIPMatcher(permission.GetDestinationIp()) if err != nil { return nil, err } @@ -155,7 +157,7 @@ func matchersFromPrincipals(principals []*v3rbacpb.Principal) ([]matcher, error) } matchers = append(matchers, authenticatedMatcher) case *v3rbacpb.Principal_DirectRemoteIp: - m, err := newSourceIPMatcher(principal.GetDirectRemoteIp()) + m, err := newRemoteIPMatcher(principal.GetDirectRemoteIp()) if err != nil { return nil, err } @@ -183,9 +185,14 @@ func matchersFromPrincipals(principals []*v3rbacpb.Principal) ([]matcher, error) // The source ip principal identifier is deprecated. Thus, a // principal typed as a source ip in the identifier will be a no-op. // The config should use DirectRemoteIp instead. - case *v3rbacpb.Principal_RemoteIp: // Allow equating RBAC's direct_remote_ip...do we need this? - // Not supported in gRPC RBAC currently - a principal typed as - // Remote Ip in the initial config will be a no-op. + case *v3rbacpb.Principal_RemoteIp: + // RBAC in gRPC treats direct_remote_ip and remote_ip as logically + // equivalent, as per A41. + m, err := newRemoteIPMatcher(principal.GetRemoteIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) case *v3rbacpb.Principal_Metadata: // Not supported in gRPC RBAC currently - a principal typed as // Metadata in the initial config will be a no-op. @@ -307,18 +314,19 @@ func (upm *urlPathMatcher) match(data *rpcData) bool { return upm.stringMatcher.Match(data.fullMethod) } -// sourceIPMatcher and destinationIPMatcher both are matchers that match against -// a CIDR Range. Two different matchers are needed as the source and ip address -// come from different parts of the data about incoming RPC's passed in. -// Matching a CIDR Range means to determine whether the IP Address falls within -// the CIDR Range or not. They both implement the matcher interface. -type sourceIPMatcher struct { +// remoteIPMatcher and localIPMatcher both are matchers that match against +// a CIDR Range. Two different matchers are needed as the remote and destination +// ip addresses come from different parts of the data about incoming RPC's +// passed in. Matching a CIDR Range means to determine whether the IP Address +// falls within the CIDR Range or not. They both implement the matcher +// interface. +type remoteIPMatcher struct { // ipNet represents the CidrRange that this matcher was configured with. - // This is what will source and destination IP's will be matched against. + // This is what will remote and destination IP's will be matched against. ipNet *net.IPNet } -func newSourceIPMatcher(cidrRange *v3corepb.CidrRange) (*sourceIPMatcher, error) { +func newRemoteIPMatcher(cidrRange *v3corepb.CidrRange) (*remoteIPMatcher, error) { // Convert configuration to a cidrRangeString, as Go standard library has // methods that parse cidr string. cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) @@ -326,28 +334,28 @@ func newSourceIPMatcher(cidrRange *v3corepb.CidrRange) (*sourceIPMatcher, error) if err != nil { return nil, err } - return &sourceIPMatcher{ipNet: ipNet}, nil + return &remoteIPMatcher{ipNet: ipNet}, nil } -func (sim *sourceIPMatcher) match(data *rpcData) bool { +func (sim *remoteIPMatcher) match(data *rpcData) bool { return sim.ipNet.Contains(net.IP(net.ParseIP(data.peerInfo.Addr.String()))) } -type destinationIPMatcher struct { +type localIPMatcher struct { ipNet *net.IPNet } -func newDestinationIPMatcher(cidrRange *v3corepb.CidrRange) (*destinationIPMatcher, error) { +func newLocalIPMatcher(cidrRange *v3corepb.CidrRange) (*localIPMatcher, error) { cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) _, ipNet, err := net.ParseCIDR(cidrRangeString) if err != nil { return nil, err } - return &destinationIPMatcher{ipNet: ipNet}, nil + return &localIPMatcher{ipNet: ipNet}, nil } -func (dim *destinationIPMatcher) match(data *rpcData) bool { - return dim.ipNet.Contains(net.IP(net.ParseIP(data.destinationAddr.String()))) +func (dim *localIPMatcher) match(data *rpcData) bool { + return dim.ipNet.Contains(net.IP(net.ParseIP(data.localAddr.String()))) } // portMatcher matches on whether the destination port of the RPC matches the @@ -395,9 +403,11 @@ func (am *authenticatedMatcher) match(data *rpcData) bool { if am.stringMatcher == nil { return len(data.certs) != 0 } - // No certificate present, so will never successfully match. + // "If there is no client certificate (thus no SAN nor Subject), check if "" + // (empty string) matches. If it matches, the principal_name is said to + // match" - A41 if len(data.certs) == 0 { - return false + return am.stringMatcher.Match("") } cert := data.certs[0] // The order of matching as per the RBAC documentation (see package-level comments) diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index 775ed622d578..b3d372e75b7d 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -195,7 +195,7 @@ func newRPCData(ctx context.Context) (*rpcData, error) { peerInfo: pi, fullMethod: mn, destinationPort: uint32(dp), - destinationAddr: conn.LocalAddr(), + localAddr: conn.LocalAddr(), certs: peerCertificates, }, nil } @@ -212,8 +212,8 @@ type rpcData struct { // destinationPort is the port that the RPC is being sent to on the // server. destinationPort uint32 - // destinationAddr is the address that the RPC is being sent to. - destinationAddr net.Addr + // localAddr is the address that the RPC is being sent to. + localAddr net.Addr // certs are the certificates presented by the peer during a TLS // handshake. certs []*x509.Certificate diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go index 807df9b87a81..17832458209a 100644 --- a/internal/xds/rbac/rbac_engine_test.go +++ b/internal/xds/rbac/rbac_engine_test.go @@ -686,11 +686,11 @@ func (s) TestChainEngine(t *testing.T) { }, }, { - name: "SourceIpMatcher", + name: "DirectRemoteIpMatcher", rbacConfigs: []*v3rbacpb.RBAC{ { Policies: map[string]*v3rbacpb.Policy{ - "certain-source-ip": { + "certain-direct-remote-ip": { Permissions: []*v3rbacpb.Permission{ {Rule: &v3rbacpb.Permission_Any{Any: true}}, }, @@ -705,7 +705,51 @@ func (s) TestChainEngine(t *testing.T) { rpcData *rpcData wantStatusCode codes.Code }{ - // This incoming RPC Call should match with the certain-source-ip policy. + // This incoming RPC Call should match with the certain-direct-remote-ip policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This incoming RPC Call shouldn't match with the certain-direct-remote-ip policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + // This test tests a RBAC policy configured with a remote-ip policy. + // This should be logically equivalent to configuring a Engine with a + // direct-remote-ip policy, as per A41 - "allow equating RBAC's + // direct_remote_ip and remote_ip." + { + name: "RemoteIpMatcher", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-remote-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_RemoteIp{RemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + }, + }, + rbacQueries: []struct { + rpcData *rpcData + wantStatusCode codes.Code + }{ + // This incoming RPC Call should match with the certain-remote-ip policy. { rpcData: &rpcData{ peerInfo: &peer.Peer{ @@ -714,7 +758,7 @@ func (s) TestChainEngine(t *testing.T) { }, wantStatusCode: codes.OK, }, - // This incoming RPC Call shouldn't match with the certain-source-ip policy. + // This incoming RPC Call shouldn't match with the certain-remote-ip policy. { rpcData: &rpcData{ peerInfo: &peer.Peer{ @@ -840,6 +884,44 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, + // This test tests that when there are no SANs or Subject's + // distinguished name in incoming RPC's, that authenticated matchers + // match against the empty string. + { + name: "default-matching-no-credentials", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "service-admin": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}}}}}, + }, + }, + }, + }, + }, + rbacQueries: []struct { + rpcData *rpcData + wantStatusCode codes.Code + }{ + // This incoming RPC Call should match with the service admin + // policy. No authentication info is provided, so the + // authenticated matcher should match to the string matcher on + // the empty string, matching to the service-admin policy. + { + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + }, + }, } for _, test := range tests { diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go index 91700ca733d5..e92e2e64421b 100644 --- a/xds/internal/httpfilter/rbac/rbac.go +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -173,6 +173,12 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http return nil, nil } + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configurated." - A41 + if icfg.Rules.Action == v3rbacpb.RBAC_LOG { + return nil, nil + } + // "Envoy aliases :authority and Host in its header map implementation, so // they should be treated equivalent for the RBAC matchers; there must be no // behavior change depending on which of the two header names is used in the diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index d39b728b1265..707a9605d82f 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -896,6 +896,30 @@ func (s) TestRBACHTTPFilter(t *testing.T) { wantStatusEmptyCall: codes.PermissionDenied, wantStatusUnaryCall: codes.PermissionDenied, }, + // This test tests that an RBAC Config with Action.LOG configured allows + // every RPC through. This maps to the line "At this time, if the + // RBAC.action is Action.LOG then the policy will be completely ignored, + // as if RBAC was not configurated." from A41 + { + name: "action-log", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_LOG, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + }, } for _, test := range tests { From e6d0d2818a7380920b806ae629320500e739bd5c Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 28 Sep 2021 13:55:29 -0700 Subject: [PATCH 266/998] internal: log SubConn type if it's not the expected type (#4813) --- picker_wrapper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/picker_wrapper.go b/picker_wrapper.go index 0878ada9dbb2..e8367cb8993b 100644 --- a/picker_wrapper.go +++ b/picker_wrapper.go @@ -144,7 +144,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - logger.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } if t := acw.getAddrConn().getReadyTransport(); t != nil { From adb21c46100568b95ab41e97e3b267923a0a92a0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 29 Sep 2021 16:58:46 -0700 Subject: [PATCH 267/998] rls: improve config parsing (#4819) --- balancer/rls/internal/config.go | 262 ++++++++++++--------------- balancer/rls/internal/config_test.go | 157 +++++++--------- 2 files changed, 182 insertions(+), 237 deletions(-) diff --git a/balancer/rls/internal/config.go b/balancer/rls/internal/config.go index a3deb8906c9a..b27a2970f083 100644 --- a/balancer/rls/internal/config.go +++ b/balancer/rls/internal/config.go @@ -22,15 +22,16 @@ import ( "bytes" "encoding/json" "fmt" + "net/url" "time" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/ptypes" durationpb "github.com/golang/protobuf/ptypes/duration" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/keys" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -62,9 +63,10 @@ type lbConfig struct { staleAge time.Duration cacheSizeBytes int64 defaultTarget string - cpName string - cpTargetField string - cpConfig map[string]json.RawMessage + + childPolicyName string + childPolicyConfig map[string]json.RawMessage + childPolicyTargetField string } func (lbCfg *lbConfig) Equal(other *lbConfig) bool { @@ -75,21 +77,21 @@ func (lbCfg *lbConfig) Equal(other *lbConfig) bool { lbCfg.staleAge == other.staleAge && lbCfg.cacheSizeBytes == other.cacheSizeBytes && lbCfg.defaultTarget == other.defaultTarget && - lbCfg.cpName == other.cpName && - lbCfg.cpTargetField == other.cpTargetField && - cpConfigEqual(lbCfg.cpConfig, other.cpConfig) + lbCfg.childPolicyName == other.childPolicyName && + lbCfg.childPolicyTargetField == other.childPolicyTargetField && + childPolicyConfigEqual(lbCfg.childPolicyConfig, other.childPolicyConfig) } -func cpConfigEqual(am, bm map[string]json.RawMessage) bool { - if (bm == nil) != (am == nil) { +func childPolicyConfigEqual(a, b map[string]json.RawMessage) bool { + if (b == nil) != (a == nil) { return false } - if len(bm) != len(am) { + if len(b) != len(a) { return false } - for k, jsonA := range am { - jsonB, ok := bm[k] + for k, jsonA := range a { + jsonB, ok := b[k] if !ok { return false } @@ -100,71 +102,18 @@ func cpConfigEqual(am, bm map[string]json.RawMessage) bool { return true } -// This struct resembles the JSON respresentation of the loadBalancing config +// This struct resembles the JSON representation of the loadBalancing config // and makes it easier to unmarshal. type lbConfigJSON struct { RouteLookupConfig json.RawMessage - ChildPolicy []*loadBalancingConfig + ChildPolicy []map[string]json.RawMessage ChildPolicyConfigTargetFieldName string } -// loadBalancingConfig represents a single load balancing config, -// stored in JSON format. -// -// TODO(easwars): This code seems to be repeated in a few places -// (service_config.go and in the xds code as well). Refactor and re-use. -type loadBalancingConfig struct { - Name string - Config json.RawMessage -} - -// MarshalJSON returns a JSON encoding of l. -func (l *loadBalancingConfig) MarshalJSON() ([]byte, error) { - return nil, fmt.Errorf("rls: loadBalancingConfig.MarshalJSON() is unimplemented") -} - -// UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. -func (l *loadBalancingConfig) UnmarshalJSON(data []byte) error { - var cfg map[string]json.RawMessage - if err := json.Unmarshal(data, &cfg); err != nil { - return err - } - for name, config := range cfg { - l.Name = name - l.Config = config - } - return nil -} - // ParseConfig parses and validates the JSON representation of the service // config and returns the loadBalancingConfig to be used by the RLS LB policy. // // Helps implement the balancer.ConfigParser interface. -// -// The following validation checks are performed: -// * routeLookupConfig: -// ** grpc_keybuilders field: -// - must have at least one entry -// - must not have two entries with the same Name -// - must not have any entry with a Name with the service field unset or -// empty -// - must not have any entries without a Name -// - must not have a headers entry that has required_match set -// - must not have two headers entries with the same key within one entry -// ** lookup_service field: -// - must be set and non-empty and must parse as a target URI -// ** max_age field: -// - if not specified or is greater than maxMaxAge, it will be reset to -// maxMaxAge -// ** stale_age field: -// - if the value is greater than or equal to max_age, it is ignored -// - if set, then max_age must also be set -// ** valid_targets field: -// - will be ignored -// ** cache_size_bytes field: -// - must be greater than zero -// - TODO(easwars): Define a minimum value for this field, to be used when -// left unspecified // * childPolicy field: // - must find a valid child policy with a valid config (the child policy must // be able to parse the provided config successfully when we pass it a dummy @@ -178,20 +127,58 @@ func (*rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, return nil, fmt.Errorf("rls: json unmarshal failed for service config {%+v}: %v", string(c), err) } + // Unmarshal and validate contents of the RLS proto. m := jsonpb.Unmarshaler{AllowUnknownFields: true} rlsProto := &rlspb.RouteLookupConfig{} if err := m.Unmarshal(bytes.NewReader(cfgJSON.RouteLookupConfig), rlsProto); err != nil { return nil, fmt.Errorf("rls: bad RouteLookupConfig proto {%+v}: %v", string(cfgJSON.RouteLookupConfig), err) } + lbCfg, err := parseRLSProto(rlsProto) + if err != nil { + return nil, err + } - var childPolicy *loadBalancingConfig - for _, lbcfg := range cfgJSON.ChildPolicy { - if balancer.Get(lbcfg.Name) != nil { - childPolicy = lbcfg - break - } + // Unmarshal and validate child policy configs. + if cfgJSON.ChildPolicyConfigTargetFieldName == "" { + return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config {%+v}", string(c)) } + name, config, err := parseChildPolicyConfigs(cfgJSON.ChildPolicy, cfgJSON.ChildPolicyConfigTargetFieldName) + if err != nil { + return nil, err + } + lbCfg.childPolicyName = name + lbCfg.childPolicyConfig = config + lbCfg.childPolicyTargetField = cfgJSON.ChildPolicyConfigTargetFieldName + return lbCfg, nil +} +// parseRLSProto fetches relevant information from the RouteLookupConfig proto +// and validates the values in the process. +// +// The following validation checks are performed: +// ** grpc_keybuilders field: +// - must have at least one entry +// - must not have two entries with the same Name +// - must not have any entry with a Name with the service field unset or +// empty +// - must not have any entries without a Name +// - must not have a headers entry that has required_match set +// - must not have two headers entries with the same key within one entry +// ** lookup_service field: +// - must be set and non-empty and must parse as a target URI +// ** max_age field: +// - if not specified or is greater than maxMaxAge, it will be reset to +// maxMaxAge +// ** stale_age field: +// - if the value is greater than or equal to max_age, it is ignored +// - if set, then max_age must also be set +// ** valid_targets field: +// - will be ignored +// ** cache_size_bytes field: +// - must be greater than zero +// - TODO(easwars): Define a minimum value for this field, to be used when +// left unspecified +func parseRLSProto(rlsProto *rlspb.RouteLookupConfig) (*lbConfig, error) { kbMap, err := keys.MakeBuilderMap(rlsProto) if err != nil { return nil, err @@ -199,64 +186,54 @@ func (*rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, lookupService := rlsProto.GetLookupService() if lookupService == "" { - return nil, fmt.Errorf("rls: empty lookup_service in service config {%+v}", string(c)) + return nil, fmt.Errorf("rls: empty lookup_service in route lookup config {%+v}", rlsProto) + } + parsedTarget, err := url.Parse(lookupService) + if err != nil { + // If the first attempt failed because of a missing scheme, try again + // with the default scheme. + parsedTarget, err = url.Parse(resolver.GetDefaultScheme() + ":///" + lookupService) + if err != nil { + return nil, fmt.Errorf("rls: invalid target URI in lookup_service {%s}", lookupService) + } } - parsedTarget := grpcutil.ParseTarget(lookupService, false) if parsedTarget.Scheme == "" { parsedTarget.Scheme = resolver.GetDefaultScheme() } if resolver.Get(parsedTarget.Scheme) == nil { - return nil, fmt.Errorf("rls: invalid target URI in lookup_service {%s}", lookupService) + return nil, fmt.Errorf("rls: unregistered scheme in lookup_service {%s}", lookupService) } lookupServiceTimeout, err := convertDuration(rlsProto.GetLookupServiceTimeout()) if err != nil { - return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in service config {%+v}: %v", string(c), err) + return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in route lookup config {%+v}: %v", rlsProto, err) } if lookupServiceTimeout == 0 { lookupServiceTimeout = defaultLookupServiceTimeout } maxAge, err := convertDuration(rlsProto.GetMaxAge()) if err != nil { - return nil, fmt.Errorf("rls: failed to parse max_age in service config {%+v}: %v", string(c), err) + return nil, fmt.Errorf("rls: failed to parse max_age in route lookup config {%+v}: %v", rlsProto, err) } staleAge, err := convertDuration(rlsProto.GetStaleAge()) if err != nil { - return nil, fmt.Errorf("rls: failed to parse staleAge in service config {%+v}: %v", string(c), err) + return nil, fmt.Errorf("rls: failed to parse staleAge in route lookup config {%+v}: %v", rlsProto, err) } if staleAge != 0 && maxAge == 0 { - return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in service config {%+v}", string(c)) + return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in route lookup config {%+v}", rlsProto) } if staleAge >= maxAge { logger.Info("rls: stale_age {%v} is greater than max_age {%v}, ignoring it", staleAge, maxAge) staleAge = 0 } if maxAge == 0 || maxAge > maxMaxAge { - logger.Infof("rls: max_age in service config is %v, using %v", maxAge, maxMaxAge) + logger.Infof("rls: max_age in route lookup config is %v, using %v", maxAge, maxMaxAge) maxAge = maxMaxAge } cacheSizeBytes := rlsProto.GetCacheSizeBytes() if cacheSizeBytes <= 0 { - return nil, fmt.Errorf("rls: cache_size_bytes must be greater than 0 in service config {%+v}", string(c)) + return nil, fmt.Errorf("rls: cache_size_bytes must be greater than 0 in route lookup config {%+v}", rlsProto) } - if childPolicy == nil { - return nil, fmt.Errorf("rls: childPolicy is invalid in service config {%+v}", string(c)) - } - if cfgJSON.ChildPolicyConfigTargetFieldName == "" { - return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config {%+v}", string(c)) - } - // TODO(easwars): When we start instantiating the child policy from the - // parent RLS LB policy, we could make this function a method on the - // lbConfig object and share the code. We would be parsing the child policy - // config again during that time. The only difference betweeen now and then - // would be that we would be using real targetField name instead of the - // dummy. So, we could make the targetName field a parameter to this - // function during the refactor. - cpCfg, err := validateChildPolicyConfig(childPolicy, cfgJSON.ChildPolicyConfigTargetFieldName) - if err != nil { - return nil, err - } - return &lbConfig{ kbMap: kbMap, lookupService: lookupService, @@ -265,57 +242,50 @@ func (*rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, staleAge: staleAge, cacheSizeBytes: cacheSizeBytes, defaultTarget: rlsProto.GetDefaultTarget(), - // TODO(easwars): Once we refactor validateChildPolicyConfig and make - // it a method on the lbConfig object, we could directly store the - // balancer.Builder and/or balancer.ConfigParser here instead of the - // Name. That would mean that we would have to create the lbConfig - // object here first before validating the childPolicy config, but - // that's a minor detail. - cpName: childPolicy.Name, - cpTargetField: cfgJSON.ChildPolicyConfigTargetFieldName, - cpConfig: cpCfg, }, nil } -// validateChildPolicyConfig validates the child policy config received in the -// service config. This makes it possible for us to reject service configs -// which contain invalid child policy configs which we know will fail for sure. -// -// It does the following: -// * Unmarshals the provided child policy config into a map of string to -// json.RawMessage. This allows us to add an entry to the map corresponding -// to the targetFieldName that we received in the service config. -// * Marshals the map back into JSON, finds the config parser associated with -// the child policy and asks it to validate the config. -// * If the validation succeeded, removes the dummy entry from the map and -// returns it. If any of the above steps failed, it returns an error. -func validateChildPolicyConfig(cp *loadBalancingConfig, cpTargetField string) (map[string]json.RawMessage, error) { - var childConfig map[string]json.RawMessage - if err := json.Unmarshal(cp.Config, &childConfig); err != nil { - return nil, fmt.Errorf("rls: json unmarshal failed for child policy config {%+v}: %v", cp.Config, err) - } - childConfig[cpTargetField], _ = json.Marshal(dummyChildPolicyTarget) +// parseChildPolicyConfigs iterates through the list of child policies and picks +// the first registered policy and validates its config. +func parseChildPolicyConfigs(childPolicies []map[string]json.RawMessage, targetFieldName string) (string, map[string]json.RawMessage, error) { + for i, config := range childPolicies { + if len(config) != 1 { + return "", nil, fmt.Errorf("rls: invalid childPolicy: entry %v does not contain exactly 1 policy/config pair: %q", i, config) + } - jsonCfg, err := json.Marshal(childConfig) - if err != nil { - return nil, fmt.Errorf("rls: json marshal failed for child policy config {%+v}: %v", childConfig, err) - } - builder := balancer.Get(cp.Name) - if builder == nil { - // This should never happen since we already made sure that the child - // policy name mentioned in the service config is a valid one. - return nil, fmt.Errorf("rls: balancer builder not found for child_policy %v", cp.Name) - } - parser, ok := builder.(balancer.ConfigParser) - if !ok { - return nil, fmt.Errorf("rls: balancer builder for child_policy does not implement balancer.ConfigParser: %v", cp.Name) - } - _, err = parser.ParseConfig(jsonCfg) - if err != nil { - return nil, fmt.Errorf("rls: childPolicy config validation failed: %v", err) + var name string + var rawCfg json.RawMessage + for name, rawCfg = range config { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + parser, ok := builder.(balancer.ConfigParser) + if !ok { + return "", nil, fmt.Errorf("rls: childPolicy %q with config %q does not support config parsing", name, string(rawCfg)) + } + + // To validate child policy configs we do the following: + // - unmarshal the raw JSON bytes of the child policy config into a map + // - add an entry with key set to `target_field_name` and a dummy value + // - marshal the map back to JSON and parse the config using the parser + // retrieved previously + var childConfig map[string]json.RawMessage + if err := json.Unmarshal(rawCfg, &childConfig); err != nil { + return "", nil, fmt.Errorf("rls: json unmarshal failed for child policy config %q: %v", string(rawCfg), err) + } + childConfig[targetFieldName], _ = json.Marshal(dummyChildPolicyTarget) + jsonCfg, err := json.Marshal(childConfig) + if err != nil { + return "", nil, fmt.Errorf("rls: json marshal failed for child policy config {%+v}: %v", childConfig, err) + } + if _, err := parser.ParseConfig(jsonCfg); err != nil { + return "", nil, fmt.Errorf("rls: childPolicy config validation failed: %v", err) + } + return name, childConfig, nil } - delete(childConfig, cpTargetField) - return childConfig, nil + return "", nil, fmt.Errorf("rls: invalid childPolicy config: no supported policies found in %+v", childPolicies) } func convertDuration(d *durationpb.Duration) (time.Duration, error) { diff --git a/balancer/rls/internal/config_test.go b/balancer/rls/internal/config_test.go index 1efd054512b2..41d330c604e2 100644 --- a/balancer/rls/internal/config_test.go +++ b/balancer/rls/internal/config_test.go @@ -25,8 +25,6 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/grpclb" // grpclb for config parsing. _ "google.golang.org/grpc/internal/resolver/passthrough" // passthrough resolver. @@ -58,12 +56,13 @@ func testEqual(a, b *lbConfig) bool { a.staleAge == b.staleAge && a.cacheSizeBytes == b.cacheSizeBytes && a.defaultTarget == b.defaultTarget && - a.cpName == b.cpName && - a.cpTargetField == b.cpTargetField && - cmp.Equal(a.cpConfig, b.cpConfig) + a.childPolicyName == b.childPolicyName && + a.childPolicyTargetField == b.childPolicyTargetField && + childPolicyConfigEqual(a.childPolicyConfig, b.childPolicyConfig) } func TestParseConfig(t *testing.T) { + childPolicyTargetFieldVal, _ := json.Marshal(dummyChildPolicyTarget) tests := []struct { desc string input []byte @@ -85,7 +84,7 @@ func TestParseConfig(t *testing.T) { "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], - "lookupService": "passthrough:///target", + "lookupService": ":///target", "maxAge" : "500s", "staleAge": "600s", "cacheSizeBytes": 1000, @@ -99,15 +98,18 @@ func TestParseConfig(t *testing.T) { "childPolicyConfigTargetFieldName": "service_name" }`), wantCfg: &lbConfig{ - lookupService: "passthrough:///target", - lookupServiceTimeout: 10 * time.Second, // This is the default value. - maxAge: 5 * time.Minute, // This is max maxAge. - staleAge: time.Duration(0), // StaleAge is ignore because it was higher than maxAge. - cacheSizeBytes: 1000, - defaultTarget: "passthrough:///default", - cpName: "grpclb", - cpTargetField: "service_name", - cpConfig: map[string]json.RawMessage{"childPolicy": json.RawMessage(`[{"pickfirst": {}}]`)}, + lookupService: ":///target", + lookupServiceTimeout: 10 * time.Second, // This is the default value. + maxAge: 5 * time.Minute, // This is max maxAge. + staleAge: time.Duration(0), // StaleAge is ignore because it was higher than maxAge. + cacheSizeBytes: 1000, + defaultTarget: "passthrough:///default", + childPolicyName: "grpclb", + childPolicyTargetField: "service_name", + childPolicyConfig: map[string]json.RawMessage{ + "childPolicy": json.RawMessage(`[{"pickfirst": {}}]`), + "service_name": json.RawMessage(childPolicyTargetFieldVal), + }, }, }, { @@ -118,7 +120,7 @@ func TestParseConfig(t *testing.T) { "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], - "lookupService": "passthrough:///target", + "lookupService": "target", "lookupServiceTimeout" : "100s", "maxAge": "60s", "staleAge" : "50s", @@ -129,15 +131,18 @@ func TestParseConfig(t *testing.T) { "childPolicyConfigTargetFieldName": "service_name" }`), wantCfg: &lbConfig{ - lookupService: "passthrough:///target", - lookupServiceTimeout: 100 * time.Second, - maxAge: 60 * time.Second, - staleAge: 50 * time.Second, - cacheSizeBytes: 1000, - defaultTarget: "passthrough:///default", - cpName: "grpclb", - cpTargetField: "service_name", - cpConfig: map[string]json.RawMessage{"childPolicy": json.RawMessage(`[{"pickfirst": {}}]`)}, + lookupService: "target", + lookupServiceTimeout: 100 * time.Second, + maxAge: 60 * time.Second, + staleAge: 50 * time.Second, + cacheSizeBytes: 1000, + defaultTarget: "passthrough:///default", + childPolicyName: "grpclb", + childPolicyTargetField: "service_name", + childPolicyConfig: map[string]json.RawMessage{ + "childPolicy": json.RawMessage(`[{"pickfirst": {}}]`), + "service_name": json.RawMessage(childPolicyTargetFieldVal), + }, }, }, } @@ -191,10 +196,10 @@ func TestParseConfigErrors(t *testing.T) { }] } }`), - wantErr: "rls: empty lookup_service in service config", + wantErr: "rls: empty lookup_service in route lookup config", }, { - desc: "invalid lookup service URI", + desc: "unregistered scheme in lookup service URI", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ @@ -204,7 +209,7 @@ func TestParseConfigErrors(t *testing.T) { "lookupService": "badScheme:///target" } }`), - wantErr: "rls: invalid target URI in lookup_service", + wantErr: "rls: unregistered scheme in lookup_service", }, { desc: "invalid lookup service timeout", @@ -264,7 +269,7 @@ func TestParseConfigErrors(t *testing.T) { "staleAge" : "10s" } }`), - wantErr: "rls: stale_age is set, but max_age is not in service config", + wantErr: "rls: stale_age is set, but max_age is not in route lookup config", }, { desc: "invalid cache size", @@ -280,7 +285,7 @@ func TestParseConfigErrors(t *testing.T) { "staleAge" : "25s" } }`), - wantErr: "rls: cache_size_bytes must be greater than 0 in service config", + wantErr: "rls: cache_size_bytes must be greater than 0 in route lookup config", }, { desc: "no child policy", @@ -296,9 +301,10 @@ func TestParseConfigErrors(t *testing.T) { "staleAge" : "25s", "cacheSizeBytes": 1000, "defaultTarget": "passthrough:///default" - } + }, + "childPolicyConfigTargetFieldName": "service_name" }`), - wantErr: "rls: childPolicy is invalid in service config", + wantErr: "rls: invalid childPolicy config: no supported policies found", }, { desc: "no known child policy", @@ -318,9 +324,35 @@ func TestParseConfigErrors(t *testing.T) { "childPolicy": [ {"cds_experimental": {"Cluster": "my-fav-cluster"}}, {"unknown-policy": {"unknown-field": "unknown-value"}} - ] + ], + "childPolicyConfigTargetFieldName": "service_name" + }`), + wantErr: "rls: invalid childPolicy config: no supported policies found", + }, + { + desc: "invalid child policy config - more than one entry in map", + input: []byte(`{ + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [{"service": "service", "method": "method"}], + "headers": [{"key": "k1", "names": ["v1"]}] + }], + "lookupService": "passthrough:///target", + "lookupServiceTimeout" : "10s", + "maxAge": "30s", + "staleAge" : "25s", + "cacheSizeBytes": 1000, + "defaultTarget": "passthrough:///default" + }, + "childPolicy": [ + { + "cds_experimental": {"Cluster": "my-fav-cluster"}, + "unknown-policy": {"unknown-field": "unknown-value"} + } + ], + "childPolicyConfigTargetFieldName": "service_name" }`), - wantErr: "rls: childPolicy is invalid in service config", + wantErr: "does not contain exactly 1 policy/config pair", }, { desc: "no childPolicyConfigTargetFieldName", @@ -381,60 +413,3 @@ func TestParseConfigErrors(t *testing.T) { }) } } - -func TestValidateChildPolicyConfig(t *testing.T) { - jsonCfg := json.RawMessage(`[{"round_robin" : {}}, {"pick_first" : {}}]`) - wantChildConfig := map[string]json.RawMessage{"childPolicy": jsonCfg} - cp := &loadBalancingConfig{ - Name: "grpclb", - Config: []byte(`{"childPolicy": [{"round_robin" : {}}, {"pick_first" : {}}]}`), - } - cpTargetField := "serviceName" - - gotChildConfig, err := validateChildPolicyConfig(cp, cpTargetField) - if err != nil || !cmp.Equal(gotChildConfig, wantChildConfig) { - t.Errorf("validateChildPolicyConfig(%v, %v) = {%v, %v}, want {%v, nil}", cp, cpTargetField, gotChildConfig, err, wantChildConfig) - } -} - -func TestValidateChildPolicyConfigErrors(t *testing.T) { - tests := []struct { - desc string - cp *loadBalancingConfig - wantErrPrefix string - }{ - { - desc: "unknown child policy", - cp: &loadBalancingConfig{ - Name: "unknown", - Config: []byte(`{}`), - }, - wantErrPrefix: "rls: balancer builder not found for child_policy", - }, - { - desc: "balancer builder does not implement ConfigParser", - cp: &loadBalancingConfig{ - Name: balancerWithoutConfigParserName, - Config: []byte(`{}`), - }, - wantErrPrefix: "rls: balancer builder for child_policy does not implement balancer.ConfigParser", - }, - { - desc: "child policy config parsing failure", - cp: &loadBalancingConfig{ - Name: "grpclb", - Config: []byte(`{"childPolicy": "not-an-array"}`), - }, - wantErrPrefix: "rls: childPolicy config validation failed", - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - gotChildConfig, gotErr := validateChildPolicyConfig(test.cp, "") - if gotChildConfig != nil || !strings.HasPrefix(fmt.Sprint(gotErr), test.wantErrPrefix) { - t.Errorf("validateChildPolicyConfig(%v) = {%v, %v}, want {nil, %v}", test.cp, gotChildConfig, gotErr, test.wantErrPrefix) - } - }) - } -} From 2ae5ac1637d68d584d20815d965538481a3c11c7 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 30 Sep 2021 10:04:19 -0700 Subject: [PATCH 268/998] xds: nack if certprovider instance name is missing in bootstrap config (#4799) --- .../test/xds_security_config_nack_test.go | 372 ++++++++++++++++++ xds/internal/xdsclient/cds_test.go | 8 +- xds/internal/xdsclient/client.go | 67 ++++ xds/internal/xdsclient/eds_test.go | 8 +- xds/internal/xdsclient/lds_test.go | 18 +- xds/internal/xdsclient/rds_test.go | 8 +- xds/internal/xdsclient/v2/client.go | 42 +- xds/internal/xdsclient/v3/client.go | 42 +- xds/internal/xdsclient/xds.go | 59 ++- 9 files changed, 575 insertions(+), 49 deletions(-) create mode 100644 xds/internal/test/xds_security_config_nack_test.go diff --git a/xds/internal/test/xds_security_config_nack_test.go b/xds/internal/test/xds_security_config_nack_test.go new file mode 100644 index 000000000000..f2f641fe8055 --- /dev/null +++ b/xds/internal/test/xds_security_config_nack_test.go @@ -0,0 +1,372 @@ +//go:build !386 +// +build !386 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/xds/internal/testutils/e2e" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { + const ( + serviceName = "my-service-client-side-xds" + missingIdentityProviderInstance = "missing-identity-provider-instance" + missingRootProviderInstance = "missing-root-provider-instance" + ) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + // Grab the host and port of the server and create client side xDS + // resources corresponding to it. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + + // Create xDS resources to be consumed on the client side. This + // includes the listener, route configuration, cluster (with + // security configuration) and endpoint resources. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelMTLS, + }) + + tests := []struct { + name string + securityConfig *v3corepb.TransportSocket + wantErr bool + }{ + { + name: "both identity and root providers are not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingIdentityProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingRootProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "only identity provider is not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingIdentityProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ServerSideCertProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "only root provider is not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ServerSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingRootProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "both identity and root providers are present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ServerSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ServerSideCertProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: false, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Create an inbound xDS listener resource for the server side. + inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelMTLS) + for _, fc := range inboundLis.GetFilterChains() { + fc.TransportSocket = test.securityConfig + } + + // Setup the management server with client and server resources. + if len(resources.Listeners) == 1 { + resources.Listeners = append(resources.Listeners, inboundLis) + } else { + resources.Listeners[1] = inboundLis + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make an RPC. + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Make a context with a shorter timeout from the top level test + // context for cases where we expect failures. + timeout := defaultTestTimeout + if test.wantErr { + timeout = defaultTestShortTimeout + } + ctx2, cancel2 := context.WithTimeout(ctx, timeout) + defer cancel2() + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx2, &testpb.Empty{}, grpc.WaitForReady(true)); (err != nil) != test.wantErr { + t.Fatalf("EmptyCall() returned err: %v, wantErr %v", err, test.wantErr) + } + }) + } +} + +func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { + const ( + serviceName = "my-service-client-side-xds" + missingIdentityProviderInstance = "missing-identity-provider-instance" + missingRootProviderInstance = "missing-root-provider-instance" + ) + + // setupManagementServer() sets up a bootstrap file with certificate + // provider instance names: `e2e.ServerSideCertProviderInstance` and + // `e2e.ClientSideCertProviderInstance`. + managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + port, cleanup2 := clientSetup(t, &testService{}) + defer cleanup2() + + // This creates a `Cluster` resource with a security config which refers to + // `e2e.ClientSideCertProviderInstance` for both root and identity certs. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelMTLS, + }) + + tests := []struct { + name string + securityConfig *v3corepb.TransportSocket + wantErr bool + }{ + { + name: "both identity and root providers are not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingIdentityProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingRootProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "only identity provider is not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingIdentityProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ClientSideCertProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "only root provider is not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ClientSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingRootProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "both identity and root providers are present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ClientSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ClientSideCertProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: false, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + resources.Clusters[0].TransportSocket = test.securityConfig + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Make a context with a shorter timeout from the top level test + // context for cases where we expect failures. + timeout := defaultTestTimeout + if test.wantErr { + timeout = defaultTestShortTimeout + } + ctx2, cancel2 := context.WithTimeout(ctx, timeout) + defer cancel2() + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx2, &testpb.Empty{}, grpc.WaitForReady(true)); (err != nil) != test.wantErr { + t.Fatalf("EmptyCall() returned err: %v, wantErr %v", err, test.wantErr) + } + }) + } +} diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/cds_test.go index 177665bd615d..21e3b05b9089 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/cds_test.go @@ -1571,9 +1571,13 @@ func (s) TestUnmarshalCluster(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - update, md, err := UnmarshalCluster(testVersion, test.resources, nil) + opts := &UnmarshalOptions{ + Version: testVersion, + Resources: test.resources, + } + update, md, err := UnmarshalCluster(opts) if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalCluster(), got err: %v, wantErr: %v", err, test.wantErr) + t.Fatalf("UnmarshalCluster(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) } if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { t.Errorf("got unexpected update, diff (-got +want): %v", diff) diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index e549d558954f..3230c66c06e3 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -72,12 +72,20 @@ func getAPIClientBuilder(version version.TransportAPI) APIClientBuilder { return nil } +// UpdateValidatorFunc performs validations on update structs using +// context/logic available at the xdsClient layer. Since these validation are +// performed on internal update structs, they can be shared between different +// API clients. +type UpdateValidatorFunc func(interface{}) error + // BuildOptions contains options to be passed to client builders. type BuildOptions struct { // Parent is a top-level xDS client which has the intelligence to take // appropriate action based on xDS responses received from the management // server. Parent UpdateHandler + // Validator performs post unmarshal validation checks. + Validator UpdateValidatorFunc // NodeProto contains the Node proto to be used in xDS requests. The actual // type depends on the transport protocol version used. NodeProto proto.Message @@ -680,6 +688,7 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) ( apiClient, err := newAPIClient(config.TransportAPI, cc, BuildOptions{ Parent: c, + Validator: c.updateValidator, NodeProto: config.NodeProto, Backoff: backoff.DefaultExponential.Backoff, Logger: c.logger, @@ -733,6 +742,64 @@ func (c *clientImpl) Close() { c.logger.Infof("Shutdown") } +func (c *clientImpl) filterChainUpdateValidator(fc *FilterChain) error { + if fc == nil { + return nil + } + return c.securityConfigUpdateValidator(fc.SecurityCfg) +} + +func (c *clientImpl) securityConfigUpdateValidator(sc *SecurityConfig) error { + if sc == nil { + return nil + } + if sc.IdentityInstanceName != "" { + if _, ok := c.config.CertProviderConfigs[sc.IdentityInstanceName]; !ok { + return fmt.Errorf("identitiy certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) + } + } + if sc.RootInstanceName != "" { + if _, ok := c.config.CertProviderConfigs[sc.RootInstanceName]; !ok { + return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) + } + } + return nil +} + +func (c *clientImpl) updateValidator(u interface{}) error { + switch update := u.(type) { + case ListenerUpdate: + if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil { + return nil + } + + fcm := update.InboundListenerCfg.FilterChains + for _, dst := range fcm.dstPrefixMap { + for _, srcType := range dst.srcTypeArr { + if srcType == nil { + continue + } + for _, src := range srcType.srcPrefixMap { + for _, fc := range src.srcPortMap { + if err := c.filterChainUpdateValidator(fc); err != nil { + return err + } + } + } + } + } + return c.filterChainUpdateValidator(fcm.def) + case ClusterUpdate: + return c.securityConfigUpdateValidator(update.SecurityCfg) + default: + // We currently invoke this update validation function only for LDS and + // CDS updates. In the future, if we wish to invoke it for other xDS + // updates, corresponding plumbing needs to be added to those unmarshal + // functions. + } + return nil +} + // ResourceType identifies resources in a transport protocol agnostic way. These // will be used in transport version agnostic code, while the versioned API // clients will map these to appropriate version URLs. diff --git a/xds/internal/xdsclient/eds_test.go b/xds/internal/xdsclient/eds_test.go index d09134f58206..d0af8a988d83 100644 --- a/xds/internal/xdsclient/eds_test.go +++ b/xds/internal/xdsclient/eds_test.go @@ -282,9 +282,13 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - update, md, err := UnmarshalEndpoints(testVersion, test.resources, nil) + opts := &UnmarshalOptions{ + Version: testVersion, + Resources: test.resources, + } + update, md, err := UnmarshalEndpoints(opts) if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalEndpoints(), got err: %v, wantErr: %v", err, test.wantErr) + t.Fatalf("UnmarshalEndpoints(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) } if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { t.Errorf("got unexpected update, diff (-got +want): %v", diff) diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index e24eda1e789f..18e2f55ede46 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -654,9 +654,13 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - update, md, err := UnmarshalListener(testVersion, test.resources, nil) + opts := &UnmarshalOptions{ + Version: testVersion, + Resources: test.resources, + } + update, md, err := UnmarshalListener(opts) if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalListener(), got err: %v, wantErr: %v", err, test.wantErr) + t.Fatalf("UnmarshalListener(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) } if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { t.Errorf("got unexpected update, diff (-got +want): %v", diff) @@ -1779,12 +1783,16 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - gotUpdate, md, err := UnmarshalListener(testVersion, test.resources, nil) + opts := &UnmarshalOptions{ + Version: testVersion, + Resources: test.resources, + } + gotUpdate, md, err := UnmarshalListener(opts) if (err != nil) != (test.wantErr != "") { - t.Fatalf("UnmarshalListener(), got err: %v, wantErr: %v", err, test.wantErr) + t.Fatalf("UnmarshalListener(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) } if err != nil && !strings.Contains(err.Error(), test.wantErr) { - t.Fatalf("UnmarshalListener() = %v wantErr: %q", err, test.wantErr) + t.Fatalf("UnmarshalListener(%+v) = %v wantErr: %q", opts, err, test.wantErr) } if diff := cmp.Diff(gotUpdate, test.wantUpdate, cmpOpts); diff != "" { t.Errorf("got unexpected update, diff (-got +want): %v", diff) diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index ca87c0b97715..c89e8cddca41 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -872,9 +872,13 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - update, md, err := UnmarshalRouteConfig(testVersion, test.resources, nil) + opts := &UnmarshalOptions{ + Version: testVersion, + Resources: test.resources, + } + update, md, err := UnmarshalRouteConfig(opts) if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalRouteConfig(), got err: %v, wantErr: %v", err, test.wantErr) + t.Fatalf("UnmarshalRouteConfig(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) } if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { t.Errorf("got unexpected update, diff (-got +want): %v", diff) diff --git a/xds/internal/xdsclient/v2/client.go b/xds/internal/xdsclient/v2/client.go index 766db2564b8d..dc137f63e5f5 100644 --- a/xds/internal/xdsclient/v2/client.go +++ b/xds/internal/xdsclient/v2/client.go @@ -66,10 +66,11 @@ func newClient(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIC return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, (*v2corepb.Node)(nil)) } v2c := &client{ - cc: cc, - parent: opts.Parent, - nodeProto: nodeProto, - logger: opts.Logger, + cc: cc, + parent: opts.Parent, + nodeProto: nodeProto, + logger: opts.Logger, + updateValidator: opts.Validator, } v2c.ctx, v2c.cancelCtx = context.WithCancel(context.Background()) v2c.TransportHelper = xdsclient.NewTransportHelper(v2c, opts.Logger, opts.Backoff) @@ -90,8 +91,9 @@ type client struct { logger *grpclog.PrefixLogger // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. - cc *grpc.ClientConn - nodeProto *v2corepb.Node + cc *grpc.ClientConn + nodeProto *v2corepb.Node + updateValidator xdsclient.UpdateValidatorFunc } func (v2c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { @@ -186,7 +188,12 @@ func (v2c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v2c *client) handleLDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalListener(resp.GetVersionInfo(), resp.GetResources(), v2c.logger) + update, md, err := xdsclient.UnmarshalListener(&xdsclient.UnmarshalOptions{ + Version: resp.GetVersionInfo(), + Resources: resp.GetResources(), + Logger: v2c.logger, + UpdateValidator: v2c.updateValidator, + }) v2c.parent.NewListeners(update, md) return err } @@ -195,7 +202,12 @@ func (v2c *client) handleLDSResponse(resp *v2xdspb.DiscoveryResponse) error { // server. On receipt of a good response, it caches validated resources and also // invokes the registered watcher callback. func (v2c *client) handleRDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalRouteConfig(resp.GetVersionInfo(), resp.GetResources(), v2c.logger) + update, md, err := xdsclient.UnmarshalRouteConfig(&xdsclient.UnmarshalOptions{ + Version: resp.GetVersionInfo(), + Resources: resp.GetResources(), + Logger: v2c.logger, + UpdateValidator: v2c.updateValidator, + }) v2c.parent.NewRouteConfigs(update, md) return err } @@ -204,13 +216,23 @@ func (v2c *client) handleRDSResponse(resp *v2xdspb.DiscoveryResponse) error { // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v2c *client) handleCDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalCluster(resp.GetVersionInfo(), resp.GetResources(), v2c.logger) + update, md, err := xdsclient.UnmarshalCluster(&xdsclient.UnmarshalOptions{ + Version: resp.GetVersionInfo(), + Resources: resp.GetResources(), + Logger: v2c.logger, + UpdateValidator: v2c.updateValidator, + }) v2c.parent.NewClusters(update, md) return err } func (v2c *client) handleEDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalEndpoints(resp.GetVersionInfo(), resp.GetResources(), v2c.logger) + update, md, err := xdsclient.UnmarshalEndpoints(&xdsclient.UnmarshalOptions{ + Version: resp.GetVersionInfo(), + Resources: resp.GetResources(), + Logger: v2c.logger, + UpdateValidator: v2c.updateValidator, + }) v2c.parent.NewEndpoints(update, md) return err } diff --git a/xds/internal/xdsclient/v3/client.go b/xds/internal/xdsclient/v3/client.go index 6088189f97f1..827c06b741b7 100644 --- a/xds/internal/xdsclient/v3/client.go +++ b/xds/internal/xdsclient/v3/client.go @@ -66,10 +66,11 @@ func newClient(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIC return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, v3corepb.Node{}) } v3c := &client{ - cc: cc, - parent: opts.Parent, - nodeProto: nodeProto, - logger: opts.Logger, + cc: cc, + parent: opts.Parent, + nodeProto: nodeProto, + logger: opts.Logger, + updateValidator: opts.Validator, } v3c.ctx, v3c.cancelCtx = context.WithCancel(context.Background()) v3c.TransportHelper = xdsclient.NewTransportHelper(v3c, opts.Logger, opts.Backoff) @@ -90,8 +91,9 @@ type client struct { logger *grpclog.PrefixLogger // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. - cc *grpc.ClientConn - nodeProto *v3corepb.Node + cc *grpc.ClientConn + nodeProto *v3corepb.Node + updateValidator xdsclient.UpdateValidatorFunc } func (v3c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { @@ -186,7 +188,12 @@ func (v3c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v3c *client) handleLDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalListener(resp.GetVersionInfo(), resp.GetResources(), v3c.logger) + update, md, err := xdsclient.UnmarshalListener(&xdsclient.UnmarshalOptions{ + Version: resp.GetVersionInfo(), + Resources: resp.GetResources(), + Logger: v3c.logger, + UpdateValidator: v3c.updateValidator, + }) v3c.parent.NewListeners(update, md) return err } @@ -195,7 +202,12 @@ func (v3c *client) handleLDSResponse(resp *v3discoverypb.DiscoveryResponse) erro // server. On receipt of a good response, it caches validated resources and also // invokes the registered watcher callback. func (v3c *client) handleRDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalRouteConfig(resp.GetVersionInfo(), resp.GetResources(), v3c.logger) + update, md, err := xdsclient.UnmarshalRouteConfig(&xdsclient.UnmarshalOptions{ + Version: resp.GetVersionInfo(), + Resources: resp.GetResources(), + Logger: v3c.logger, + UpdateValidator: v3c.updateValidator, + }) v3c.parent.NewRouteConfigs(update, md) return err } @@ -204,13 +216,23 @@ func (v3c *client) handleRDSResponse(resp *v3discoverypb.DiscoveryResponse) erro // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v3c *client) handleCDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalCluster(resp.GetVersionInfo(), resp.GetResources(), v3c.logger) + update, md, err := xdsclient.UnmarshalCluster(&xdsclient.UnmarshalOptions{ + Version: resp.GetVersionInfo(), + Resources: resp.GetResources(), + Logger: v3c.logger, + UpdateValidator: v3c.updateValidator, + }) v3c.parent.NewClusters(update, md) return err } func (v3c *client) handleEDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalEndpoints(resp.GetVersionInfo(), resp.GetResources(), v3c.logger) + update, md, err := xdsclient.UnmarshalEndpoints(&xdsclient.UnmarshalOptions{ + Version: resp.GetVersionInfo(), + Resources: resp.GetResources(), + Logger: v3c.logger, + UpdateValidator: v3c.updateValidator, + }) v3c.parent.NewEndpoints(update, md) return err } diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 28070d88cd5f..732c4e6addc2 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -55,16 +55,29 @@ import ( // to this value by the management server. const transportSocketName = "envoy.transport_sockets.tls" +// UnmarshalOptions wraps the input parameters for `UnmarshalXxx` functions. +type UnmarshalOptions struct { + // Version is the version of the received response. + Version string + // Resources are the xDS resources resources in the received response. + Resources []*anypb.Any + // Logger is the prefix logger to be used during unmarshaling. + Logger *grpclog.PrefixLogger + // UpdateValidator is a post unmarshal validation check provided by the + // upper layer. + UpdateValidator UpdateValidatorFunc +} + // UnmarshalListener processes resources received in an LDS response, validates // them, and transforms them into a native struct which contains only fields we // are interested in. -func UnmarshalListener(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) { +func UnmarshalListener(opts *UnmarshalOptions) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) { update := make(map[string]ListenerUpdateErrTuple) - md, err := processAllResources(version, resources, logger, update) + md, err := processAllResources(opts, update) return update, md, err } -func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { +func unmarshalListenerResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { if !IsListenerResource(r.GetTypeUrl()) { return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } @@ -80,6 +93,11 @@ func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (stri if err != nil { return lis.GetName(), ListenerUpdate{}, err } + if f != nil { + if err := f(*lu); err != nil { + return lis.GetName(), ListenerUpdate{}, err + } + } lu.Raw = r return lis.GetName(), *lu, nil } @@ -305,9 +323,9 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err // validates them, and transforms them into a native struct which contains only // fields we are interested in. The provided hostname determines the route // configuration resources of interest. -func UnmarshalRouteConfig(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) { +func UnmarshalRouteConfig(opts *UnmarshalOptions) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) { update := make(map[string]RouteConfigUpdateErrTuple) - md, err := processAllResources(version, resources, logger, update) + md, err := processAllResources(opts, update) return update, md, err } @@ -642,13 +660,13 @@ func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logg // UnmarshalCluster processes resources received in an CDS response, validates // them, and transforms them into a native struct which contains only fields we // are interested in. -func UnmarshalCluster(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) { +func UnmarshalCluster(opts *UnmarshalOptions) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) { update := make(map[string]ClusterUpdateErrTuple) - md, err := processAllResources(version, resources, logger, update) + md, err := processAllResources(opts, update) return update, md, err } -func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { +func unmarshalClusterResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { if !IsClusterResource(r.GetTypeUrl()) { return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } @@ -663,6 +681,11 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin return cluster.GetName(), ClusterUpdate{}, err } cu.Raw = r + if f != nil { + if err := f(cu); err != nil { + return "", ClusterUpdate{}, err + } + } return cluster.GetName(), cu, nil } @@ -1052,9 +1075,9 @@ func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { // UnmarshalEndpoints processes resources received in an EDS response, // validates them, and transforms them into a native struct which contains only // fields we are interested in. -func UnmarshalEndpoints(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) { +func UnmarshalEndpoints(opts *UnmarshalOptions) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) { update := make(map[string]EndpointsUpdateErrTuple) - md, err := processAllResources(version, resources, logger, update) + md, err := processAllResources(opts, update) return update, md, err } @@ -1188,19 +1211,19 @@ type EndpointsUpdateErrTuple struct { // // The type of the resource is determined by the type of ret. E.g. // map[string]ListenerUpdate means this is for LDS. -func processAllResources(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger, ret interface{}) (UpdateMetadata, error) { +func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadata, error) { timestamp := time.Now() md := UpdateMetadata{ - Version: version, + Version: opts.Version, Timestamp: timestamp, } var topLevelErrors []error perResourceErrors := make(map[string]error) - for _, r := range resources { + for _, r := range opts.Resources { switch ret2 := ret.(type) { case map[string]ListenerUpdateErrTuple: - name, update, err := unmarshalListenerResource(r, logger) + name, update, err := unmarshalListenerResource(r, opts.UpdateValidator, opts.Logger) if err == nil { ret2[name] = ListenerUpdateErrTuple{Update: update} continue @@ -1214,7 +1237,7 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog // the response. ret2[name] = ListenerUpdateErrTuple{Err: err} case map[string]RouteConfigUpdateErrTuple: - name, update, err := unmarshalRouteConfigResource(r, logger) + name, update, err := unmarshalRouteConfigResource(r, opts.Logger) if err == nil { ret2[name] = RouteConfigUpdateErrTuple{Update: update} continue @@ -1228,7 +1251,7 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog // the response. ret2[name] = RouteConfigUpdateErrTuple{Err: err} case map[string]ClusterUpdateErrTuple: - name, update, err := unmarshalClusterResource(r, logger) + name, update, err := unmarshalClusterResource(r, opts.UpdateValidator, opts.Logger) if err == nil { ret2[name] = ClusterUpdateErrTuple{Update: update} continue @@ -1242,7 +1265,7 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog // the response. ret2[name] = ClusterUpdateErrTuple{Err: err} case map[string]EndpointsUpdateErrTuple: - name, update, err := unmarshalEndpointsResource(r, logger) + name, update, err := unmarshalEndpointsResource(r, opts.Logger) if err == nil { ret2[name] = EndpointsUpdateErrTuple{Update: update} continue @@ -1278,7 +1301,7 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog md.Status = ServiceStatusNACKed errRet := combineErrors(typeStr, topLevelErrors, perResourceErrors) md.ErrState = &UpdateErrorMetadata{ - Version: version, + Version: opts.Version, Err: errRet, Timestamp: timestamp, } From 127c052c701b81daa5970c695438f6ef08c76040 Mon Sep 17 00:00:00 2001 From: Mohan Li <67390330+mohanli-ml@users.noreply.github.com> Date: Thu, 30 Sep 2021 13:06:50 -0700 Subject: [PATCH 269/998] credentials/google: introduce a new API `NewComputeEngineCredsWithOptions` (#4767) --- credentials/google/google.go | 22 ++++++++++++++++++++-- credentials/google/google_test.go | 5 +++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/credentials/google/google.go b/credentials/google/google.go index 265d193c7c37..07d0d0dc29cc 100644 --- a/credentials/google/google.go +++ b/credentials/google/google.go @@ -64,14 +64,32 @@ func NewDefaultCredentials() credentials.Bundle { // // This API is experimental. func NewComputeEngineCredentials() credentials.Bundle { + return NewComputeEngineCredsWithOptions(ComputeEngineCredsOptions{}) +} + +// ComputeEngineCredsOptions constructs compite engine credentials with options. +type ComputeEngineCredsOptions struct { + // PerRPCCreds is a per RPC credentials that is passed to a bundle. + PerRPCCreds credentials.PerRPCCredentials +} + +// NewComputeEngineCredsWithOptions returns a credentials bundle that is configured to work +// with google services. This API must only be used when running on GCE. +// +// This API is experimental. +func NewComputeEngineCredsWithOptions(perRPCOpts ComputeEngineCredsOptions) credentials.Bundle { + perRPC := oauth.NewComputeEngine() + if perRPCOpts.PerRPCCreds != nil { + perRPC = perRPCOpts.PerRPCCreds + } c := &creds{ newPerRPCCreds: func() credentials.PerRPCCredentials { - return oauth.NewComputeEngine() + return perRPC }, } bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { - logger.Warningf("compute engine creds: failed to create new creds: %v", err) + logger.Warningf("compute engine creds with per rpc: failed to create new creds: %v", err) } return bundle } diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go index fee51f945016..647f8a16fed5 100644 --- a/credentials/google/google_test.go +++ b/credentials/google/google_test.go @@ -76,8 +76,9 @@ func overrideNewCredsFuncs() func() { func TestClientHandshakeBasedOnClusterName(t *testing.T) { defer overrideNewCredsFuncs()() for bundleTyp, tc := range map[string]credentials.Bundle{ - "defaultCreds": NewDefaultCredentials(), - "computeCreds": NewComputeEngineCredentials(), + "defaultCreds": NewDefaultCredentials(), + "computeCreds": NewComputeEngineCredentials(), + "computeCredsPerRPC": NewComputeEngineCredsWithOptions(ComputeEngineCredsOptions{}), } { tests := []struct { name string From 69e1b54deb77c76b6832f36e4b31d4316a34f17d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 1 Oct 2021 11:09:12 -0700 Subject: [PATCH 270/998] test: fix stayConnected to call Connect after state reports IDLE (#4821) --- clientconn_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/clientconn_test.go b/clientconn_test.go index f0c55a9a1326..d276c7b5f2ff 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -1067,8 +1067,14 @@ func stayConnected(cc *ClientConn) { defer cancel() for { - cc.Connect() - if state := cc.GetState(); state == connectivity.Shutdown || !cc.WaitForStateChange(ctx, state) { + state := cc.GetState() + switch state { + case connectivity.Idle: + cc.Connect() + case connectivity.Shutdown: + return + } + if !cc.WaitForStateChange(ctx, state) { return } } From b9f62538f003011893c70991c36bdb5b680eed8e Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 1 Oct 2021 11:09:26 -0700 Subject: [PATCH 271/998] rls: pull proto changes made in grpc-proto/pull/98 (#4832) --- .../internal/proto/grpc_lookup_v1/rls.pb.go | 76 +++++++++++-------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go b/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go index 9de23aae645c..9f063fd3d620 100644 --- a/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go @@ -111,6 +111,8 @@ type RouteLookupRequest struct { TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` // Reason for making this request. Reason RouteLookupRequest_Reason `protobuf:"varint,5,opt,name=reason,proto3,enum=grpc.lookup.v1.RouteLookupRequest_Reason" json:"reason,omitempty"` + // For REASON_STALE, the header_data from the stale response, if any. + StaleHeaderData string `protobuf:"bytes,6,opt,name=stale_header_data,json=staleHeaderData,proto3" json:"stale_header_data,omitempty"` // Map of key values extracted via key builders for the gRPC or HTTP request. KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -177,6 +179,13 @@ func (x *RouteLookupRequest) GetReason() RouteLookupRequest_Reason { return RouteLookupRequest_REASON_UNKNOWN } +func (x *RouteLookupRequest) GetStaleHeaderData() string { + if x != nil { + return x.StaleHeaderData + } + return "" +} + func (x *RouteLookupRequest) GetKeyMap() map[string]string { if x != nil { return x.KeyMap @@ -250,7 +259,7 @@ var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0xf1, 0x02, 0x0a, 0x12, 0x52, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0x9d, 0x03, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x16, 0x0a, @@ -261,38 +270,41 @@ var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, - 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, - 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, - 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, - 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, - 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, - 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, - 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x22, 0x5e, - 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, - 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, - 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, - 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, + 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, + 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, 0x39, + 0x0a, 0x0b, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, + 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, + 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, + 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, + 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, - 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, - 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, - 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, - 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, + 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( From 09970207abb5f88aeb1b31fd1190a0934e302e0f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 1 Oct 2021 15:27:28 -0700 Subject: [PATCH 272/998] xds: remove race in TestUnmarshalCluster_WithUpdateValidatorFunc (#4836) --- .../test/xds_security_config_nack_test.go | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/xds/internal/test/xds_security_config_nack_test.go b/xds/internal/test/xds_security_config_nack_test.go index f2f641fe8055..7b8e36c3f3a4 100644 --- a/xds/internal/test/xds_security_config_nack_test.go +++ b/xds/internal/test/xds_security_config_nack_test.go @@ -222,25 +222,6 @@ func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { missingRootProviderInstance = "missing-root-provider-instance" ) - // setupManagementServer() sets up a bootstrap file with certificate - // provider instance names: `e2e.ServerSideCertProviderInstance` and - // `e2e.ClientSideCertProviderInstance`. - managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) - defer cleanup1() - - port, cleanup2 := clientSetup(t, &testService{}) - defer cleanup2() - - // This creates a `Cluster` resource with a security config which refers to - // `e2e.ClientSideCertProviderInstance` for both root and identity certs. - resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: serviceName, - NodeID: nodeID, - Host: "localhost", - Port: port, - SecLevel: e2e.SecurityLevelMTLS, - }) - tests := []struct { name string securityConfig *v3corepb.TransportSocket @@ -340,11 +321,30 @@ func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { }, } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() for _, test := range tests { t.Run(test.name, func(t *testing.T) { + // setupManagementServer() sets up a bootstrap file with certificate + // provider instance names: `e2e.ServerSideCertProviderInstance` and + // `e2e.ClientSideCertProviderInstance`. + managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + + port, cleanup2 := clientSetup(t, &testService{}) + defer cleanup2() + + // This creates a `Cluster` resource with a security config which + // refers to `e2e.ClientSideCertProviderInstance` for both root and + // identity certs. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelMTLS, + }) resources.Clusters[0].TransportSocket = test.securityConfig + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } From f068a13ef05d63510828ed59e7cf3651a02c7118 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 4 Oct 2021 11:22:00 -0700 Subject: [PATCH 273/998] server: add missing conn.Close if the connection dies before reading the HTTP/2 preface (#4837) --- internal/transport/http2_server.go | 7 ++-- server.go | 6 ++-- test/end2end_test.go | 58 ++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 7 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 3e4655e8e65d..f2cad9ebc311 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -290,10 +290,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if _, err := io.ReadFull(t.conn, preface); err != nil { // In deployments where a gRPC server runs behind a cloud load balancer // which performs regular TCP level health checks, the connection is - // closed immediately by the latter. Skipping the error here will help - // reduce log clutter. + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. if err == io.EOF { - return nil, nil + return nil, io.EOF } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } diff --git a/server.go b/server.go index 1e82c955529a..eadf9e05fd18 100644 --- a/server.go +++ b/server.go @@ -885,13 +885,11 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { // ErrConnDispatched means that the connection was dispatched away from // gRPC; those connections should be left open. if err != credentials.ErrConnDispatched { - c.Close() - } - // Don't log on ErrConnDispatched and io.EOF to prevent log spam. - if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } + c.Close() } return nil } diff --git a/test/end2end_test.go b/test/end2end_test.go index 9d7fcb23206c..957d13f731f7 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7990,3 +7990,61 @@ func (s) TestAuthorityHeader(t *testing.T) { }) } } + +// wrapCloseListener tracks Accepts/Closes and maintains a counter of the +// number of open connections. +type wrapCloseListener struct { + net.Listener + connsOpen int32 +} + +// wrapCloseListener is returned by wrapCloseListener.Accept and decrements its +// connsOpen when Close is called. +type wrapCloseConn struct { + net.Conn + lis *wrapCloseListener + closeOnce sync.Once +} + +func (w *wrapCloseListener) Accept() (net.Conn, error) { + conn, err := w.Listener.Accept() + if err != nil { + return nil, err + } + atomic.AddInt32(&w.connsOpen, 1) + return &wrapCloseConn{Conn: conn, lis: w}, nil +} + +func (w *wrapCloseConn) Close() error { + defer w.closeOnce.Do(func() { atomic.AddInt32(&w.lis.connsOpen, -1) }) + return w.Conn.Close() +} + +// TestServerClosesConn ensures conn.Close is always closed even if the client +// doesn't complete the HTTP/2 handshake. +func (s) TestServerClosesConn(t *testing.T) { + lis := bufconn.Listen(20) + wrapLis := &wrapCloseListener{Listener: lis} + + s := grpc.NewServer() + go s.Serve(wrapLis) + defer s.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + for i := 0; i < 10; i++ { + conn, err := lis.DialContext(ctx) + if err != nil { + t.Fatalf("Dial = _, %v; want _, nil", err) + } + conn.Close() + } + for ctx.Err() == nil { + if atomic.LoadInt32(&wrapLis.connsOpen) == 0 { + return + } + time.Sleep(50 * time.Millisecond) + } + t.Fatalf("timed out waiting for conns to be closed by server; still open: %v", atomic.LoadInt32(&wrapLis.connsOpen)) +} From f2974e7778b189c094a2d5fe087d680f7a72050e Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 4 Oct 2021 11:54:27 -0700 Subject: [PATCH 274/998] kokoro: remove expired letsencrypt.org cert and update (#4840) --- test/kokoro/xds.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/kokoro/xds.sh b/test/kokoro/xds.sh index f9cb7dab7332..7b7f48dba309 100755 --- a/test/kokoro/xds.sh +++ b/test/kokoro/xds.sh @@ -3,6 +3,12 @@ set -exu -o pipefail [[ -f /VERSION ]] && cat /VERSION + +echo "Remove the expired letsencrypt.org cert and update the CA certificates" +sudo apt-get install -y ca-certificates +sudo rm /usr/share/ca-certificates/mozilla/DST_Root_CA_X3.crt +sudo update-ca-certificates + cd github export GOPATH="${HOME}/gopath" From 02da625150e8ee126d4b84dfed27d2453f2617f4 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 4 Oct 2021 14:01:09 -0700 Subject: [PATCH 275/998] github: increase timeout for codeql and disable for PRs (#4841) --- .github/workflows/codeql-analysis.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 05f393ef1a75..2a73b94079c5 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -3,9 +3,6 @@ name: "CodeQL" on: push: branches: [ master ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ master ] schedule: - cron: '24 20 * * 3' @@ -19,7 +16,7 @@ jobs: analyze: name: Analyze runs-on: ubuntu-latest - timeout-minutes: 20 + timeout-minutes: 30 strategy: fail-fast: false From ee479e630f859849f23d70ebf2fa3021f5ad2658 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 5 Oct 2021 14:49:15 -0700 Subject: [PATCH 276/998] creds/google: replace NewComputeEngineCredsWithOptions with NewDefaultCredentialsWithOptions (#4830) --- credentials/google/google.go | 86 +++++++++++++------------------ credentials/google/google_test.go | 6 +-- 2 files changed, 40 insertions(+), 52 deletions(-) diff --git a/credentials/google/google.go b/credentials/google/google.go index 07d0d0dc29cc..63625a4b6803 100644 --- a/credentials/google/google.go +++ b/credentials/google/google.go @@ -35,75 +35,63 @@ const tokenRequestTimeout = 30 * time.Second var logger = grpclog.Component("credentials") -// NewDefaultCredentials returns a credentials bundle that is configured to work -// with google services. +// DefaultCredentialsOptions constructs options to build DefaultCredentials. +type DefaultCredentialsOptions struct { + // PerRPCCreds is a per RPC credentials that is passed to a bundle. + PerRPCCreds credentials.PerRPCCredentials +} + +// NewDefaultCredentialsWithOptions returns a credentials bundle that is +// configured to work with google services. // // This API is experimental. -func NewDefaultCredentials() credentials.Bundle { - c := &creds{ - newPerRPCCreds: func() credentials.PerRPCCredentials { - ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) - defer cancel() - perRPCCreds, err := oauth.NewApplicationDefault(ctx) - if err != nil { - logger.Warningf("google default creds: failed to create application oauth: %v", err) - } - return perRPCCreds - }, +func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle { + if opts.PerRPCCreds == nil { + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + var err error + opts.PerRPCCreds, err = oauth.NewApplicationDefault(ctx) + if err != nil { + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) + } } + c := &creds{opts: opts} bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { - logger.Warningf("google default creds: failed to create new creds: %v", err) + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create new creds: %v", err) } return bundle } -// NewComputeEngineCredentials returns a credentials bundle that is configured to work -// with google services. This API must only be used when running on GCE. Authentication configured -// by this API represents the GCE VM's default service account. +// NewDefaultCredentials returns a credentials bundle that is configured to work +// with google services. // // This API is experimental. -func NewComputeEngineCredentials() credentials.Bundle { - return NewComputeEngineCredsWithOptions(ComputeEngineCredsOptions{}) -} - -// ComputeEngineCredsOptions constructs compite engine credentials with options. -type ComputeEngineCredsOptions struct { - // PerRPCCreds is a per RPC credentials that is passed to a bundle. - PerRPCCreds credentials.PerRPCCredentials +func NewDefaultCredentials() credentials.Bundle { + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{}) } -// NewComputeEngineCredsWithOptions returns a credentials bundle that is configured to work -// with google services. This API must only be used when running on GCE. +// NewComputeEngineCredentials returns a credentials bundle that is configured to work +// with google services. This API must only be used when running on GCE. Authentication configured +// by this API represents the GCE VM's default service account. // // This API is experimental. -func NewComputeEngineCredsWithOptions(perRPCOpts ComputeEngineCredsOptions) credentials.Bundle { - perRPC := oauth.NewComputeEngine() - if perRPCOpts.PerRPCCreds != nil { - perRPC = perRPCOpts.PerRPCCreds - } - c := &creds{ - newPerRPCCreds: func() credentials.PerRPCCredentials { - return perRPC - }, - } - bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) - if err != nil { - logger.Warningf("compute engine creds with per rpc: failed to create new creds: %v", err) - } - return bundle +func NewComputeEngineCredentials() credentials.Bundle { + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{ + PerRPCCreds: oauth.NewComputeEngine(), + }) } // creds implements credentials.Bundle. type creds struct { + opts DefaultCredentialsOptions + // Supported modes are defined in internal/internal.go. mode string - // The transport credentials associated with this bundle. + // The active transport credentials associated with this bundle. transportCreds credentials.TransportCredentials - // The per RPC credentials associated with this bundle. + // The active per RPC credentials associated with this bundle. perRPCCreds credentials.PerRPCCredentials - // Creates new per RPC credentials - newPerRPCCreds func() credentials.PerRPCCredentials } func (c *creds) TransportCredentials() credentials.TransportCredentials { @@ -130,8 +118,8 @@ var ( // existing Bundle may cause races. func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { newCreds := &creds{ - mode: mode, - newPerRPCCreds: c.newPerRPCCreds, + opts: c.opts, + mode: mode, } // Create transport credentials. @@ -147,7 +135,7 @@ func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { } if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { - newCreds.perRPCCreds = newCreds.newPerRPCCreds() + newCreds.perRPCCreds = newCreds.opts.PerRPCCreds } return newCreds, nil diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go index 647f8a16fed5..6a6e492ee77d 100644 --- a/credentials/google/google_test.go +++ b/credentials/google/google_test.go @@ -76,9 +76,9 @@ func overrideNewCredsFuncs() func() { func TestClientHandshakeBasedOnClusterName(t *testing.T) { defer overrideNewCredsFuncs()() for bundleTyp, tc := range map[string]credentials.Bundle{ - "defaultCreds": NewDefaultCredentials(), - "computeCreds": NewComputeEngineCredentials(), - "computeCredsPerRPC": NewComputeEngineCredsWithOptions(ComputeEngineCredsOptions{}), + "defaultCredsWithOptions": NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{}), + "defaultCreds": NewDefaultCredentials(), + "computeCreds": NewComputeEngineCredentials(), } { tests := []struct { name string From 4bd99953513f3d9de6a75075cfb51bc2224429e0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 5 Oct 2021 16:55:25 -0700 Subject: [PATCH 277/998] xds: suppress redundant resource updates using proto.Equal (#4831) --- .../test/xds_server_serving_mode_test.go | 125 ++++++++++-- xds/internal/xdsclient/callback.go | 45 +++-- xds/internal/xdsclient/client_test.go | 13 +- .../xdsclient/watchers_cluster_test.go | 56 ++++-- .../xdsclient/watchers_endpoints_test.go | 47 +++-- .../xdsclient/watchers_listener_test.go | 179 ++++++++++++++++-- xds/internal/xdsclient/watchers_route_test.go | 48 +++-- 7 files changed, 414 insertions(+), 99 deletions(-) diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 7282aa94dd26..ac4b3929cb65 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -24,6 +24,7 @@ package xds_test import ( "context" + "fmt" "net" "testing" "time" @@ -39,6 +40,118 @@ import ( "google.golang.org/grpc/xds/internal/testutils/e2e" ) +// TestServerSideXDS_RedundantUpdateSuppression tests the scenario where the +// control plane sends the same resource update. It verifies that the mode +// change callback is not invoked and client connections to the server are not +// recycled. +func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { + managementServer, nodeID, bootstrapContents, _, cleanup := setupManagementServer(t) + defer cleanup() + + creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + t.Fatal(err) + } + lis, err := xdstestutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + updateCh := make(chan connectivity.ServingMode, 1) + + // Create a server option to get notified about serving mode changes. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { + t.Logf("serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + updateCh <- args.Mode + }) + + // Initialize an xDS-enabled gRPC server and register the stubServer on it. + server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) + defer server.Stop() + testpb.RegisterTestServiceServer(server, &testService{}) + + // Setup the management server to respond with the listener resources. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + listener := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener}, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + // Wait for the listener to move to "serving" mode. + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh: + if mode != connectivity.ServingModeServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + + // Create a ClientConn and make a successful RPCs. + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + waitForSuccessfulRPC(ctx, t, cc) + + // Start a goroutine to make sure that we do not see any connectivity state + // changes on the client connection. If redundant updates are not + // suppressed, server will recycle client connections. + errCh := make(chan error, 1) + go func() { + if cc.WaitForStateChange(ctx, connectivity.Ready) { + errCh <- fmt.Errorf("unexpected connectivity state change {%s --> %s} on the client connection", connectivity.Ready, cc.GetState()) + return + } + errCh <- nil + }() + + // Update the management server with the same listener resource. This will + // update the resource version though, and should result in a the management + // server sending the same resource to the xDS-enabled gRPC server. + if err := managementServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener}, + }); err != nil { + t.Fatal(err) + } + + // Since redundant resource updates are suppressed, we should not see the + // mode change callback being invoked. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case mode := <-updateCh: + t.Fatalf("unexpected mode change callback with new mode %v", mode) + } + + // Make sure RPCs continue to succeed. + waitForSuccessfulRPC(ctx, t, cc) + + // Cancel the context to ensure that the WaitForStateChange call exits early + // and returns false. + cancel() + if err := <-errCh; err != nil { + t.Fatal(err) + } +} + // TestServerSideXDS_ServingModeChanges tests the serving mode functionality in // xDS enabled gRPC servers. It verifies that appropriate mode changes happen in // the server, and also verifies behavior of clientConns under these modes. @@ -163,17 +276,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Error(err) } - // Wait for lis2 to move to "not-serving" mode. lis1 also receives an update - // here even though it stays in "serving" mode. - // See https://github.com/grpc/grpc-go/issues/4695. - select { - case <-ctx.Done(): - t.Fatalf("timed out waiting for a mode change update: %v", err) - case mode := <-updateCh1: - if mode != connectivity.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) - } - } + // Wait for lis2 to move to "not-serving" mode. select { case <-ctx.Done(): t.Fatalf("timed out waiting for a mode change update: %v", err) diff --git a/xds/internal/xdsclient/callback.go b/xds/internal/xdsclient/callback.go index 0374389fbca1..0c2665e84c0e 100644 --- a/xds/internal/xdsclient/callback.go +++ b/xds/internal/xdsclient/callback.go @@ -18,7 +18,10 @@ package xdsclient -import "google.golang.org/grpc/internal/pretty" +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/protobuf/proto" +) type watcherInfoWithUpdate struct { wi *watchInfo @@ -98,9 +101,13 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, met } continue } - // If the resource is valid, send the update. - for wi := range s { - wi.newUpdate(uErr.Update) + // If we get here, it means that the update is a valid one. Notify + // watchers only if this is a first time update or it is different + // from the one currently cached. + if cur, ok := c.ldsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { + for wi := range s { + wi.newUpdate(uErr.Update) + } } // Sync cache. c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) @@ -164,9 +171,13 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTupl } continue } - // If the resource is valid, send the update. - for wi := range s { - wi.newUpdate(uErr.Update) + // If we get here, it means that the update is a valid one. Notify + // watchers only if this is a first time update or it is different + // from the one currently cached. + if cur, ok := c.rdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { + for wi := range s { + wi.newUpdate(uErr.Update) + } } // Sync cache. c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) @@ -214,9 +225,13 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metad } continue } - // If the resource is valid, send the update. - for wi := range s { - wi.newUpdate(uErr.Update) + // If we get here, it means that the update is a valid one. Notify + // watchers only if this is a first time update or it is different + // from the one currently cached. + if cur, ok := c.cdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { + for wi := range s { + wi.newUpdate(uErr.Update) + } } // Sync cache. c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) @@ -281,9 +296,13 @@ func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdateErrTuple, me } continue } - // If the resource is valid, send the update. - for wi := range s { - wi.newUpdate(uErr.Update) + // If we get here, it means that the update is a valid one. Notify + // watchers only if this is a first time update or it is different + // from the one currently cached. + if cur, ok := c.edsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { + for wi := range s { + wi.newUpdate(uErr.Update) + } } // Sync cache. c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index d8e942196311..7c3423cd5ad7 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -26,6 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -181,7 +182,9 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { t.Fatal(err) } - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} + // The second update needs to be different in the underlying resource proto + // for the watch callback to be invoked. + wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2, nil); err != nil { t.Fatal(err) @@ -200,7 +203,7 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want } return nil } - if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate) { + if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate, protocmp.Transform()) { return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) } return nil @@ -218,7 +221,7 @@ func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, w } return nil } - if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate) { + if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate, protocmp.Transform()) { return fmt.Errorf("unexpected route config update: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) } return nil @@ -236,7 +239,7 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU } return nil } - if !cmp.Equal(gotUpdate.Update, wantUpdate) { + if !cmp.Equal(gotUpdate.Update, wantUpdate, protocmp.Transform()) { return fmt.Errorf("unexpected clusterUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) } return nil @@ -254,7 +257,7 @@ func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wan } return nil } - if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate, cmpopts.EquateEmpty()) { + if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate, cmpopts.EquateEmpty(), protocmp.Transform()) { return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) } return nil diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index bbfc1e96dcf7..c06319e959c6 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/testutils" ) @@ -64,12 +65,16 @@ func (s) TestClusterWatch(t *testing.T) { t.Fatal(err) } - // Another update, with an extra resource for a different resource name. + // Push an update, with an extra resource for a different resource name. + // Specify a non-nil raw proto in the original resource to ensure that the + // new update is not considered equal to the old one. + newUpdate := wantUpdate + newUpdate.Raw = &anypb.Any{} client.NewClusters(map[string]ClusterUpdateErrTuple{ - testCDSName: {Update: wantUpdate}, + testCDSName: {Update: newUpdate}, "randomName": {}, }, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { + if err := verifyClusterUpdate(ctx, clusterUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } @@ -130,19 +135,28 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { } } - // Cancel the last watch, and send update again. + // Cancel the last watch, and send update again. None of the watchers should + // be notified because one has been cancelled, and the other is receiving + // the same update. cancelLastWatch() client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) - for i := 0; i < count-1; i++ { - if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil { - t.Fatal(err) - } + for i := 0; i < count; i++ { + func() { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := clusterUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected ClusterUpdate: %v, %v, want channel recv timeout", u, err) + } + }() } - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateChs[count-1].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) + // Push a new update and make sure the uncancelled watcher is invoked. + // Specify a non-nil raw proto to ensure that the new update is not + // considered equal to the old one. + newUpdate := ClusterUpdate{ClusterName: testEDSName, Raw: &anypb.Any{}} + client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: newUpdate}}, UpdateMetadata{}) + if err := verifyClusterUpdate(ctx, clusterUpdateChs[0], newUpdate, nil); err != nil { + t.Fatal(err) } } @@ -417,22 +431,26 @@ func (s) TestClusterResourceRemoved(t *testing.T) { t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } - // Watcher 2 should get the same update again. - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) + // Watcher 2 should not see an update since the resource has not changed. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := clusterUpdateCh2.Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected ClusterUpdate: %v, want receiving from channel timeout", u) } - // Send one more update without resource 1. + // Send another update with resource 2 modified. Specify a non-nil raw proto + // to ensure that the new update is not considered equal to the old one. + wantUpdate2 = ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) // Watcher 1 should not see an update. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := clusterUpdateCh1.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) + t.Errorf("unexpected Cluster: %v, want receiving from channel timeout", u) } - // Watcher 2 should get the same update again. + // Watcher 2 should get the update. if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go index e9b726c003e5..b87723e5086e 100644 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" @@ -82,18 +83,23 @@ func (s) TestEndpointsWatch(t *testing.T) { t.Fatal(err) } - // Another update for a different resource name. - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{"randomName": {}}, UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) + // Push an update, with an extra resource for a different resource name. + // Specify a non-nil raw proto in the original resource to ensure that the + // new update is not considered equal to the old one. + newUpdate := wantUpdate + newUpdate.Raw = &anypb.Any{} + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{ + testCDSName: {Update: newUpdate}, + "randomName": {}, + }, UpdateMetadata{}) + if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, newUpdate, nil); err != nil { + t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) @@ -149,19 +155,28 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { } } - // Cancel the last watch, and send update again. + // Cancel the last watch, and send update again. None of the watchers should + // be notified because one has been cancelled, and the other is receiving + // the same update. cancelLastWatch() client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) - for i := 0; i < count-1; i++ { - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil { - t.Fatal(err) - } + for i := 0; i < count; i++ { + func() { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := endpointsUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) + } + }() } - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := endpointsUpdateChs[count-1].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) + // Push a new update and make sure the uncancelled watcher is invoked. + // Specify a non-nil raw proto to ensure that the new update is not + // considered equal to the old one. + newUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}, Raw: &anypb.Any{}} + client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: newUpdate}}, UpdateMetadata{}) + if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[0], newUpdate, nil); err != nil { + t.Fatal(err) } } diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go index 62acf24b80d9..176e6bbcb7b4 100644 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -23,7 +23,10 @@ import ( "fmt" "testing" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "google.golang.org/grpc/internal/testutils" + "google.golang.org/protobuf/types/known/anypb" ) // TestLDSWatch covers the cases: @@ -62,12 +65,15 @@ func (s) TestLDSWatch(t *testing.T) { t.Fatal(err) } - // Another update, with an extra resource for a different resource name. + // Push an update, with an extra resource for a different resource name. + // Specify a non-nil raw proto in the original resource to ensure that the + // new update is not considered equal to the old one. + newUpdate := ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} client.NewListeners(map[string]ListenerUpdateErrTuple{ - testLDSName: {Update: wantUpdate}, + testLDSName: {Update: newUpdate}, "randomName": {}, }, UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { + if err := verifyListenerUpdate(ctx, ldsUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } @@ -77,7 +83,7 @@ func (s) TestLDSWatch(t *testing.T) { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) + t.Fatalf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) } } @@ -131,19 +137,28 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { } } - // Cancel the last watch, and send update again. + // Cancel the last watch, and send update again. None of the watchers should + // be notified because one has been cancelled, and the other is receiving + // the same update. cancelLastWatch() client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) - for i := 0; i < count-1; i++ { - if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil { - t.Fatal(err) - } + for i := 0; i < count; i++ { + func() { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := ldsUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) + } + }() } - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateChs[count-1].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) + // Push a new update and make sure the uncancelled watcher is invoked. + // Specify a non-nil raw proto to ensure that the new update is not + // considered equal to the old one. + newUpdate := ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: newUpdate}}, UpdateMetadata{}) + if err := verifyListenerUpdate(ctx, ldsUpdateChs[0], newUpdate, nil); err != nil { + t.Fatal(err) } } @@ -332,22 +347,26 @@ func (s) TestLDSResourceRemoved(t *testing.T) { t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } - // Watcher 2 should get the same update again. - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) + // Watcher 2 should not see an update since the resource has not changed. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := ldsUpdateCh2.Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u) } - // Send one more update without resource 1. + // Send another update with resource 2 modified. Specify a non-nil raw proto + // to ensure that the new update is not considered equal to the old one. + wantUpdate2 = ListenerUpdate{RouteConfigName: testEDSName + "2", Raw: &anypb.Any{}} client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) // Watcher 1 should not see an update. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := ldsUpdateCh1.Receive(sCtx); err != context.DeadlineExceeded { t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u) } - // Watcher 2 should get the same update again. + // Watcher 2 should get the update. if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil { t.Fatal(err) } @@ -448,3 +467,125 @@ func (s) TestListenerWatchPartialValid(t *testing.T) { t.Fatal(err) } } + +// TestListenerWatch_RedundantUpdateSupression tests scenarios where an update +// with an unmodified resource is suppressed, and modified resource is not. +func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { + apiClientCh, cleanup := overrideNewAPIClient() + defer cleanup() + + client, err := newWithConfig(clientOpts(testXDSServer, false)) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + c, err := apiClientCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + apiClient := c.(*testAPIClient) + + ldsUpdateCh := testutils.NewChannel() + client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { + ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + }) + if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + + basicListener := testutils.MarshalAny(&v3listenerpb.Listener{ + Name: testLDSName, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{RouteConfigName: "route-config-name"}, + }, + }), + }, + }) + listenerWithFilter1 := testutils.MarshalAny(&v3listenerpb.Listener{ + Name: testLDSName, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{RouteConfigName: "route-config-name"}, + }, + HttpFilters: []*v3httppb.HttpFilter{ + { + Name: "customFilter1", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, + }, + }, + }), + }, + }) + listenerWithFilter2 := testutils.MarshalAny(&v3listenerpb.Listener{ + Name: testLDSName, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{RouteConfigName: "route-config-name"}, + }, + HttpFilters: []*v3httppb.HttpFilter{ + { + Name: "customFilter2", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, + }, + }, + }), + }, + }) + + tests := []struct { + update ListenerUpdate + wantCallback bool + }{ + { + // First update. Callback should be invoked. + update: ListenerUpdate{Raw: basicListener}, + wantCallback: true, + }, + { + // Same update as previous. Callback should be skipped. + update: ListenerUpdate{Raw: basicListener}, + wantCallback: false, + }, + { + // New update. Callback should be invoked. + update: ListenerUpdate{Raw: listenerWithFilter1}, + wantCallback: true, + }, + { + // Same update as previous. Callback should be skipped. + update: ListenerUpdate{Raw: listenerWithFilter1}, + wantCallback: false, + }, + { + // New update. Callback should be invoked. + update: ListenerUpdate{Raw: listenerWithFilter2}, + wantCallback: true, + }, + { + // Same update as previous. Callback should be skipped. + update: ListenerUpdate{Raw: listenerWithFilter2}, + wantCallback: false, + }, + } + for _, test := range tests { + client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, UpdateMetadata{}) + if test.wantCallback { + if err := verifyListenerUpdate(ctx, ldsUpdateCh, test.update, nil); err != nil { + t.Fatal(err) + } + } else { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u) + } + } + } +} diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go index cfb5449befd8..70c8dd829e9e 100644 --- a/xds/internal/xdsclient/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/testutils" ) @@ -71,18 +72,23 @@ func (s) TestRDSWatch(t *testing.T) { t.Fatal(err) } - // Another update for a different resource name. - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{"randomName": {}}, UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) + // Push an update, with an extra resource for a different resource name. + // Specify a non-nil raw proto in the original resource to ensure that the + // new update is not considered equal to the old one. + newUpdate := wantUpdate + newUpdate.Raw = &anypb.Any{} + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{ + testRDSName: {Update: newUpdate}, + "randomName": {}, + }, UpdateMetadata{}) + if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, newUpdate, nil); err != nil { + t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) @@ -145,19 +151,29 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { } } - // Cancel the last watch, and send update again. + // Cancel the last watch, and send update again. None of the watchers should + // be notified because one has been cancelled, and the other is receiving + // the same update. cancelLastWatch() client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) - for i := 0; i < count-1; i++ { - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil { - t.Fatal(err) - } + for i := 0; i < count; i++ { + func() { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := rdsUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) + } + }() } - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := rdsUpdateChs[count-1].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) + // Push a new update and make sure the uncancelled watcher is invoked. + // Specify a non-nil raw proto to ensure that the new update is not + // considered equal to the old one. + newUpdate := wantUpdate + newUpdate.Raw = &anypb.Any{} + client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: newUpdate}}, UpdateMetadata{}) + if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[0], newUpdate, nil); err != nil { + t.Fatal(err) } } From d16cfedb5f31caad933f6bb4f3aa3a85177fb989 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 6 Oct 2021 19:26:22 -0400 Subject: [PATCH 278/998] Rename env var (#4849) Rename env var --- internal/xds/env/env.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index b171ac91f177..2977bfa62857 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -43,7 +43,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" - rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RBAC" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" From 404d8fd5139bfb7be9b4bf675e76eed53207dd8c Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 6 Oct 2021 19:26:43 -0400 Subject: [PATCH 279/998] Added imports for HTTP Filters (#4850) Added imports for HTTP Filters --- xds/xds.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/xds/xds.go b/xds/xds.go index ec16c9f520bc..27547b56d226 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -39,6 +39,8 @@ import ( _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. xdsresolver "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register the v2 xDS API client. _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register the v3 xDS API client. From b9d7c74e01f89a52880332f584776ccfe0c27756 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 7 Oct 2021 11:47:53 -0700 Subject: [PATCH 280/998] xds: local interop tests (#4823) --- .github/workflows/testing.yml | 1 + interop/xds/server/server.go | 17 +- xds/csds/csds_test.go | 2 +- xds/internal/test/e2e/README.md | 19 ++ xds/internal/test/e2e/controlplane.go | 62 +++++ xds/internal/test/e2e/e2e.go | 176 ++++++++++++ xds/internal/test/e2e/e2e_test.go | 262 ++++++++++++++++++ xds/internal/test/e2e/e2e_utils.go | 36 +++ xds/internal/test/e2e/run.sh | 6 + xds/internal/testutils/e2e/clientresources.go | 30 +- 10 files changed, 591 insertions(+), 20 deletions(-) create mode 100644 xds/internal/test/e2e/README.md create mode 100644 xds/internal/test/e2e/controlplane.go create mode 100644 xds/internal/test/e2e/e2e.go create mode 100644 xds/internal/test/e2e/e2e_test.go create mode 100644 xds/internal/test/e2e/e2e_utils.go create mode 100755 xds/internal/test/e2e/run.sh diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 37544060ea28..b687fdb3dc39 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -114,3 +114,4 @@ jobs: examples/examples_test.sh security/advancedtls/examples/examples_test.sh interop/interop_test.sh + xds/internal/test/e2e/run.sh diff --git a/interop/xds/server/server.go b/interop/xds/server/server.go index 2f33799f9404..afbbc56af89e 100644 --- a/interop/xds/server/server.go +++ b/interop/xds/server/server.go @@ -43,15 +43,19 @@ import ( ) var ( - port = flag.Int("port", 8080, "Listening port for test service") - maintenancePort = flag.Int("maintenance_port", 8081, "Listening port for maintenance services like health, reflection, channelz etc when -secure_mode is true. When -secure_mode is false, all these services will be registered on -port") - serverID = flag.String("server_id", "go_server", "Server ID included in response") - secureMode = flag.Bool("secure_mode", false, "If true, retrieve security configuration from the management server. Else, use insecure credentials.") + port = flag.Int("port", 8080, "Listening port for test service") + maintenancePort = flag.Int("maintenance_port", 8081, "Listening port for maintenance services like health, reflection, channelz etc when -secure_mode is true. When -secure_mode is false, all these services will be registered on -port") + serverID = flag.String("server_id", "go_server", "Server ID included in response") + secureMode = flag.Bool("secure_mode", false, "If true, retrieve security configuration from the management server. Else, use insecure credentials.") + hostNameOverride = flag.String("host_name_override", "", "If set, use this as the hostname instead of the real hostname") logger = grpclog.Component("interop") ) func getHostname() string { + if *hostNameOverride != "" { + return *hostNameOverride + } hostname, err := os.Hostname() if err != nil { log.Fatalf("failed to get hostname: %v", err) @@ -64,6 +68,7 @@ func getHostname() string { type testServiceImpl struct { testgrpc.UnimplementedTestServiceServer hostname string + serverID string } func (s *testServiceImpl) EmptyCall(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { @@ -73,7 +78,7 @@ func (s *testServiceImpl) EmptyCall(ctx context.Context, _ *testpb.Empty) (*test func (s *testServiceImpl) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { grpc.SetHeader(ctx, metadata.Pairs("hostname", s.hostname)) - return &testpb.SimpleResponse{ServerId: *serverID, Hostname: s.hostname}, nil + return &testpb.SimpleResponse{ServerId: s.serverID, Hostname: s.hostname}, nil } // xdsUpdateHealthServiceImpl provides an implementation of the @@ -108,7 +113,7 @@ func main() { logger.Fatal("-port and -maintenance_port must be different when -secure_mode is set") } - testService := &testServiceImpl{hostname: getHostname()} + testService := &testServiceImpl{hostname: getHostname(), serverID: *serverID} healthServer := health.NewServer() updateHealthService := &xdsUpdateHealthServiceImpl{healthServer: healthServer} diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index da7144f7eecc..9de83d37fecb 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -153,7 +153,7 @@ func init() { clusterAnys[i] = testutils.MarshalAny(clusters[i]) } for i := range edsTargets { - endpoints[i] = e2e.DefaultEndpoint(edsTargets[i], ips[i], ports[i]) + endpoints[i] = e2e.DefaultEndpoint(edsTargets[i], ips[i], ports[i:i+1]) endpointAnys[i] = testutils.MarshalAny(endpoints[i]) } } diff --git a/xds/internal/test/e2e/README.md b/xds/internal/test/e2e/README.md new file mode 100644 index 000000000000..33cffa0da56f --- /dev/null +++ b/xds/internal/test/e2e/README.md @@ -0,0 +1,19 @@ +Build client and server binaries. + +```sh +go build -o ./binaries/client ../../../../interop/xds/client/ +go build -o ./binaries/server ../../../../interop/xds/server/ +``` + +Run the test + +```sh +go test . -v +``` + +The client/server paths are flags + +```sh +go test . -v -client=$HOME/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/xds-test-client +``` +Note that grpc logs are only turned on for Go. diff --git a/xds/internal/test/e2e/controlplane.go b/xds/internal/test/e2e/controlplane.go new file mode 100644 index 000000000000..247991b83d33 --- /dev/null +++ b/xds/internal/test/e2e/controlplane.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e + +import ( + "fmt" + + "github.com/google/uuid" + xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/xds/internal/testutils/e2e" +) + +type controlPlane struct { + server *e2e.ManagementServer + nodeID string + bootstrapContent string +} + +func newControlPlane(testName string) (*controlPlane, error) { + // Spin up an xDS management server on a local port. + server, err := e2e.StartManagementServer() + if err != nil { + return nil, fmt.Errorf("failed to spin up the xDS management server: %v", err) + } + + nodeID := uuid.New().String() + bootstrapContentBytes, err := xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ + Version: xdsinternal.TransportV3, + NodeID: nodeID, + ServerURI: server.Address, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + }) + if err != nil { + server.Stop() + return nil, fmt.Errorf("failed to create bootstrap file: %v", err) + } + + return &controlPlane{ + server: server, + nodeID: nodeID, + bootstrapContent: string(bootstrapContentBytes), + }, nil +} + +func (cp *controlPlane) stop() { + cp.server.Stop() +} diff --git a/xds/internal/test/e2e/e2e.go b/xds/internal/test/e2e/e2e.go new file mode 100644 index 000000000000..82c7f9dfb25a --- /dev/null +++ b/xds/internal/test/e2e/e2e.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package e2e implements xds e2e tests using go-control-plane. +package e2e + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + + "google.golang.org/grpc" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +func cmd(path string, logger io.Writer, args []string, env []string) (*exec.Cmd, error) { + cmd := exec.Command(path, args...) + cmd.Env = append(os.Environ(), env...) + cmd.Stdout = logger + cmd.Stderr = logger + return cmd, nil +} + +const ( + clientStatsPort = 60363 // TODO: make this different per-test, only needed for parallel tests. +) + +type client struct { + cmd *exec.Cmd + + target string + statsCC *grpc.ClientConn +} + +// newClient create a client with the given target and bootstrap content. +func newClient(target, binaryPath, bootstrap string, logger io.Writer, flags ...string) (*client, error) { + cmd, err := cmd( + binaryPath, + logger, + append([]string{ + "--server=" + target, + "--print_response=true", + "--qps=100", + fmt.Sprintf("--stats_port=%d", clientStatsPort), + }, flags...), // Append any flags from caller. + []string{ + "GRPC_GO_LOG_VERBOSITY_LEVEL=99", + "GRPC_GO_LOG_SEVERITY_LEVEL=info", + "GRPC_XDS_BOOTSTRAP_CONFIG=" + bootstrap, // The bootstrap content doesn't need to be quoted. + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to run client cmd: %v", err) + } + cmd.Start() + + cc, err := grpc.Dial(fmt.Sprintf("localhost:%d", clientStatsPort), grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.WaitForReady(true))) + if err != nil { + return nil, err + } + return &client{ + cmd: cmd, + target: target, + statsCC: cc, + }, nil +} + +func (c *client) clientStats(ctx context.Context) (*testpb.LoadBalancerStatsResponse, error) { + ccc := testpb.NewLoadBalancerStatsServiceClient(c.statsCC) + return ccc.GetClientStats(ctx, &testpb.LoadBalancerStatsRequest{ + NumRpcs: 100, + TimeoutSec: 10, + }) +} + +func (c *client) configRPCs(ctx context.Context, req *testpb.ClientConfigureRequest) error { + ccc := testpb.NewXdsUpdateClientConfigureServiceClient(c.statsCC) + _, err := ccc.Configure(ctx, req) + return err +} + +func (c *client) channelzSubChannels(ctx context.Context) ([]*channelzpb.Subchannel, error) { + ccc := channelzpb.NewChannelzClient(c.statsCC) + r, err := ccc.GetTopChannels(ctx, &channelzpb.GetTopChannelsRequest{}) + if err != nil { + return nil, err + } + + var ret []*channelzpb.Subchannel + for _, cc := range r.Channel { + if cc.Data.Target != c.target { + continue + } + for _, sc := range cc.SubchannelRef { + rr, err := ccc.GetSubchannel(ctx, &channelzpb.GetSubchannelRequest{SubchannelId: sc.SubchannelId}) + if err != nil { + return nil, err + } + ret = append(ret, rr.Subchannel) + } + } + return ret, nil +} + +func (c *client) stop() { + c.cmd.Process.Kill() + c.cmd.Wait() +} + +const ( + serverPort = 50051 // TODO: make this different per-test, only needed for parallel tests. +) + +type server struct { + cmd *exec.Cmd + port int +} + +// newServer creates multiple servers with the given bootstrap content. +// +// Each server gets a different hostname, in the format of +// -. +func newServers(hostnamePrefix, binaryPath, bootstrap string, logger io.Writer, count int) (_ []*server, err error) { + var ret []*server + defer func() { + if err != nil { + for _, s := range ret { + s.stop() + } + } + }() + for i := 0; i < count; i++ { + port := serverPort + i + cmd, err := cmd( + binaryPath, + logger, + []string{ + fmt.Sprintf("--port=%d", port), + fmt.Sprintf("--host_name_override=%s-%d", hostnamePrefix, i), + }, + []string{ + "GRPC_GO_LOG_VERBOSITY_LEVEL=99", + "GRPC_GO_LOG_SEVERITY_LEVEL=info", + "GRPC_XDS_BOOTSTRAP_CONFIG=" + bootstrap, // The bootstrap content doesn't need to be quoted., + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to run server cmd: %v", err) + } + cmd.Start() + ret = append(ret, &server{cmd: cmd, port: port}) + } + return ret, nil +} + +func (s *server) stop() { + s.cmd.Process.Kill() + s.cmd.Wait() +} diff --git a/xds/internal/test/e2e/e2e_test.go b/xds/internal/test/e2e/e2e_test.go new file mode 100644 index 000000000000..5c116b478fe9 --- /dev/null +++ b/xds/internal/test/e2e/e2e_test.go @@ -0,0 +1,262 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e + +import ( + "bytes" + "context" + "flag" + "fmt" + "os" + "strconv" + "testing" + "time" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/xds/internal/testutils/e2e" +) + +var ( + clientPath = flag.String("client", "./binaries/client", "The interop client") + serverPath = flag.String("server", "./binaries/server", "The interop server") +) + +func TestMain(m *testing.M) { + flag.Parse() + if _, err := os.Stat(*clientPath); os.IsNotExist(err) { + return + } + if _, err := os.Stat(*serverPath); os.IsNotExist(err) { + return + } + os.Exit(m.Run()) +} + +type testOpts struct { + testName string + backendCount int + clientFlags []string +} + +func setup(t *testing.T, opts testOpts) (*controlPlane, *client, []*server) { + t.Helper() + backendCount := 1 + if opts.backendCount != 0 { + backendCount = opts.backendCount + } + + cp, err := newControlPlane(opts.testName) + if err != nil { + t.Fatalf("failed to start control-plane: %v", err) + } + t.Cleanup(cp.stop) + + var clientLog bytes.Buffer + c, err := newClient(fmt.Sprintf("xds:///%s", opts.testName), *clientPath, cp.bootstrapContent, &clientLog, opts.clientFlags...) + if err != nil { + t.Fatalf("failed to start client: %v", err) + } + t.Cleanup(c.stop) + + var serverLog bytes.Buffer + servers, err := newServers(opts.testName, *serverPath, cp.bootstrapContent, &serverLog, backendCount) + if err != nil { + t.Fatalf("failed to start server: %v", err) + } + t.Cleanup(func() { + for _, s := range servers { + s.stop() + } + }) + t.Cleanup(func() { + // TODO: find a better way to print the log. They are long, and hide the failure. + t.Logf("\n----- client logs -----\n%v", clientLog.String()) + t.Logf("\n----- server logs -----\n%v", serverLog.String()) + }) + return cp, c, servers +} + +func TestPingPong(t *testing.T) { + const testName = "pingpong" + cp, c, _ := setup(t, testOpts{testName: testName}) + + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: testName, + NodeID: cp.nodeID, + Host: "localhost", + Port: serverPort, + SecLevel: e2e.SecurityLevelNone, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := cp.server.Update(ctx, resources); err != nil { + t.Fatalf("failed to update control plane resources: %v", err) + } + + st, err := c.clientStats(ctx) + if err != nil { + t.Fatalf("failed to get client stats: %v", err) + } + if st.NumFailures != 0 { + t.Fatalf("Got %v failures: %+v", st.NumFailures, st) + } +} + +// TestAffinity covers the affinity tests with ringhash policy. +// - client is configured to use ringhash, with 3 backends +// - all RPCs will hash a specific metadata header +// - verify that +// - all RPCs with the same metadata value are sent to the same backend +// - only one backend is Ready +// - send more RPCs with different metadata values until a new backend is picked, and verify that +// - only two backends are in Ready +func TestAffinity(t *testing.T) { + const ( + testName = "affinity" + backendCount = 3 + testMDKey = "xds_md" + testMDValue = "unary_yranu" + ) + cp, c, servers := setup(t, testOpts{ + testName: testName, + backendCount: backendCount, + clientFlags: []string{"--rpc=EmptyCall", fmt.Sprintf("--metadata=EmptyCall:%s:%s", testMDKey, testMDValue)}, + }) + + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: testName, + NodeID: cp.nodeID, + Host: "localhost", + Port: serverPort, + SecLevel: e2e.SecurityLevelNone, + }) + + // Update EDS to multiple backends. + var ports []uint32 + for _, s := range servers { + ports = append(ports, uint32(s.port)) + } + edsMsg := resources.Endpoints[0] + resources.Endpoints[0] = e2e.DefaultEndpoint( + edsMsg.ClusterName, + "localhost", + ports, + ) + + // Update CDS lbpolicy to ringhash. + cdsMsg := resources.Clusters[0] + cdsMsg.LbPolicy = v3clusterpb.Cluster_RING_HASH + + // Update RDS to hash the header. + rdsMsg := resources.Routes[0] + rdsMsg.VirtualHosts[0].Routes[0].Action = &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: cdsMsg.Name}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{{ + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: testMDKey, + }, + }, + }}, + }} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := cp.server.Update(ctx, resources); err != nil { + t.Fatalf("failed to update control plane resources: %v", err) + } + + // Note: We can skip CSDS check because there's no long delay as in TD. + // + // The client stats check doesn't race with the xds resource update because + // there's only one version of xds resource, updated at the beginning of the + // test. So there's no need to retry the stats call. + // + // In the future, we may add tests that update xds in the middle. Then we + // either need to retry clientStats(), or make a CSDS check before so the + // result is stable. + + st, err := c.clientStats(ctx) + if err != nil { + t.Fatalf("failed to get client stats: %v", err) + } + if st.NumFailures != 0 { + t.Fatalf("Got %v failures: %+v", st.NumFailures, st) + } + if len(st.RpcsByPeer) != 1 { + t.Fatalf("more than 1 backends got traffic: %v, want 1", st.RpcsByPeer) + } + + // Call channelz to verify that only one subchannel is in state Ready. + scs, err := c.channelzSubChannels(ctx) + if err != nil { + t.Fatalf("failed to fetch channelz: %v", err) + } + verifySubConnStates(t, scs, map[channelzpb.ChannelConnectivityState_State]int{ + channelzpb.ChannelConnectivityState_READY: 1, + channelzpb.ChannelConnectivityState_IDLE: 2, + }) + + // Send Unary call with different metadata value with integers starting from + // 0. Stop when a second peer is picked. + var ( + diffPeerPicked bool + mdValue int + ) + for !diffPeerPicked { + if err := c.configRPCs(ctx, &testpb.ClientConfigureRequest{ + Types: []testpb.ClientConfigureRequest_RpcType{ + testpb.ClientConfigureRequest_EMPTY_CALL, + testpb.ClientConfigureRequest_UNARY_CALL, + }, + Metadata: []*testpb.ClientConfigureRequest_Metadata{ + {Type: testpb.ClientConfigureRequest_EMPTY_CALL, Key: testMDKey, Value: testMDValue}, + {Type: testpb.ClientConfigureRequest_UNARY_CALL, Key: testMDKey, Value: strconv.Itoa(mdValue)}, + }, + }); err != nil { + t.Fatalf("failed to configure RPC: %v", err) + } + + st, err := c.clientStats(ctx) + if err != nil { + t.Fatalf("failed to get client stats: %v", err) + } + if st.NumFailures != 0 { + t.Fatalf("Got %v failures: %+v", st.NumFailures, st) + } + if len(st.RpcsByPeer) == 2 { + break + } + + mdValue++ + } + + // Call channelz to verify that only one subchannel is in state Ready. + scs2, err := c.channelzSubChannels(ctx) + if err != nil { + t.Fatalf("failed to fetch channelz: %v", err) + } + verifySubConnStates(t, scs2, map[channelzpb.ChannelConnectivityState_State]int{ + channelzpb.ChannelConnectivityState_READY: 2, + channelzpb.ChannelConnectivityState_IDLE: 1, + }) +} diff --git a/xds/internal/test/e2e/e2e_utils.go b/xds/internal/test/e2e/e2e_utils.go new file mode 100644 index 000000000000..34b0ee9eb092 --- /dev/null +++ b/xds/internal/test/e2e/e2e_utils.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" +) + +func verifySubConnStates(t *testing.T, scs []*channelzpb.Subchannel, want map[channelzpb.ChannelConnectivityState_State]int) { + t.Helper() + var scStatsCount = map[channelzpb.ChannelConnectivityState_State]int{} + for _, sc := range scs { + scStatsCount[sc.Data.State.State]++ + } + if diff := cmp.Diff(scStatsCount, want); diff != "" { + t.Fatalf("got unexpected number of subchannels in state Ready, %v, scs: %v", diff, scs) + } +} diff --git a/xds/internal/test/e2e/run.sh b/xds/internal/test/e2e/run.sh new file mode 100755 index 000000000000..4363d6cbd94f --- /dev/null +++ b/xds/internal/test/e2e/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +mkdir binaries +go build -o ./binaries/client ../../../../interop/xds/client/ +go build -o ./binaries/server ../../../../interop/xds/server/ +go test . -v diff --git a/xds/internal/testutils/e2e/clientresources.go b/xds/internal/testutils/e2e/clientresources.go index daae1aea8004..f3f7f6307c53 100644 --- a/xds/internal/testutils/e2e/clientresources.go +++ b/xds/internal/testutils/e2e/clientresources.go @@ -93,7 +93,7 @@ func DefaultClientResources(params ResourceParams) UpdateOptions { Listeners: []*v3listenerpb.Listener{DefaultClientListener(params.DialTarget, routeConfigName)}, Routes: []*v3routepb.RouteConfiguration{DefaultRouteConfig(routeConfigName, params.DialTarget, clusterName)}, Clusters: []*v3clusterpb.Cluster{DefaultCluster(clusterName, endpointsName, params.SecLevel)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{DefaultEndpoint(endpointsName, params.Host, params.Port)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{DefaultEndpoint(endpointsName, params.Host, []uint32{params.Port})}, } } @@ -358,21 +358,25 @@ func DefaultCluster(clusterName, edsServiceName string, secLevel SecurityLevel) } // DefaultEndpoint returns a basic xds Endpoint resource. -func DefaultEndpoint(clusterName string, host string, port uint32) *v3endpointpb.ClusterLoadAssignment { +func DefaultEndpoint(clusterName string, host string, ports []uint32) *v3endpointpb.ClusterLoadAssignment { + var lbEndpoints []*v3endpointpb.LbEndpoint + for _, port := range ports { + lbEndpoints = append(lbEndpoints, &v3endpointpb.LbEndpoint{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: port}}, + }}, + }}, + }) + } return &v3endpointpb.ClusterLoadAssignment{ ClusterName: clusterName, Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ - Locality: &v3corepb.Locality{SubZone: "subzone"}, - LbEndpoints: []*v3endpointpb.LbEndpoint{{ - HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{Endpoint: &v3endpointpb.Endpoint{ - Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Protocol: v3corepb.SocketAddress_TCP, - Address: host, - PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: uint32(port)}}, - }}, - }}, - }}, + Locality: &v3corepb.Locality{SubZone: "subzone"}, + LbEndpoints: lbEndpoints, LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, Priority: 0, }}, From 524d10cbce3e1c597c48589341c01332f71c3d93 Mon Sep 17 00:00:00 2001 From: Terry Wilson Date: Thu, 7 Oct 2021 11:58:49 -0700 Subject: [PATCH 281/998] kokoro: source test driver install script from core repo (#4825) --- test/kokoro/xds_k8s.sh | 13 +- test/kokoro/xds_k8s_install_test_driver.sh | 380 --------------------- test/kokoro/xds_url_map.sh | 13 +- 3 files changed, 16 insertions(+), 390 deletions(-) delete mode 100755 test/kokoro/xds_k8s_install_test_driver.sh diff --git a/test/kokoro/xds_k8s.sh b/test/kokoro/xds_k8s.sh index 67f35ae850a5..f91d1d026d67 100755 --- a/test/kokoro/xds_k8s.sh +++ b/test/kokoro/xds_k8s.sh @@ -17,9 +17,7 @@ set -eo pipefail # Constants readonly GITHUB_REPOSITORY_NAME="grpc-go" -# GKE Cluster -readonly GKE_CLUSTER_NAME="interop-test-psm-sec-v2-us-central1-a" -readonly GKE_CLUSTER_ZONE="us-central1-a" +readonly TEST_DRIVER_INSTALL_SCRIPT_URL="https://raw.githubusercontent.com/${TEST_DRIVER_REPO_OWNER:-grpc}/grpc/${TEST_DRIVER_BRANCH:-master}/tools/internal_ci/linux/grpc_xds_k8s_install_test_driver.sh" ## xDS test server/client Docker images readonly SERVER_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-server" readonly CLIENT_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-client" @@ -134,8 +132,13 @@ run_test() { main() { local script_dir script_dir="$(dirname "$0")" - # shellcheck source=test/kokoro/xds_k8s_install_test_driver.sh - source "${script_dir}/xds_k8s_install_test_driver.sh" + + # Source the test driver from the master branch. + echo "Sourcing test driver install script from: ${TEST_DRIVER_INSTALL_SCRIPT_URL}" + source /dev/stdin <<< "$(curl -s "${TEST_DRIVER_INSTALL_SCRIPT_URL}")" + + activate_gke_cluster GKE_CLUSTER_PSM_SECURITY + set -x if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then kokoro_setup_test_driver "${GITHUB_REPOSITORY_NAME}" diff --git a/test/kokoro/xds_k8s_install_test_driver.sh b/test/kokoro/xds_k8s_install_test_driver.sh deleted file mode 100755 index aacaaf1ef56a..000000000000 --- a/test/kokoro/xds_k8s_install_test_driver.sh +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# TODO(sergiitk): move to grpc/grpc when implementing support of other languages -set -eo pipefail - -# Constants -readonly PYTHON_VERSION="3.6" -# Test driver -readonly TEST_DRIVER_REPO_NAME="grpc" -readonly TEST_DRIVER_REPO_URL="https://github.com/${TEST_DRIVER_REPO_OWNER:-grpc}/grpc.git" -readonly TEST_DRIVER_BRANCH="${TEST_DRIVER_BRANCH:-master}" -readonly TEST_DRIVER_PATH="tools/run_tests/xds_k8s_test_driver" -readonly TEST_DRIVER_PROTOS_PATH="src/proto/grpc/testing" - -####################################### -# Run command end report its exit code. Doesn't exit on non-zero exit code. -# Globals: -# None -# Arguments: -# Command to execute -# Outputs: -# Writes the output of given command to stdout, stderr -####################################### -run_ignore_exit_code() { - local exit_code=-1 - "$@" || exit_code=$? - echo "Exit code: ${exit_code}" -} - -####################################### -# Parses information about git repository at given path to global variables. -# Globals: -# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build -# GIT_COMMIT: Populated with the SHA-1 of git commit being built -# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built -# Arguments: -# Git source dir -####################################### -parse_src_repo_git_info() { - local src_dir="${SRC_DIR:?SRC_DIR must be set}" - readonly GIT_ORIGIN_URL=$(git -C "${src_dir}" remote get-url origin) - readonly GIT_COMMIT=$(git -C "${src_dir}" rev-parse HEAD) - readonly GIT_COMMIT_SHORT=$(git -C "${src_dir}" rev-parse --short HEAD) -} - -####################################### -# List GCR image tags matching given tag name. -# Arguments: -# Image name -# Tag name -# Outputs: -# Writes the table with the list of found tags to stdout. -# If no tags found, the output is an empty string. -####################################### -gcloud_gcr_list_image_tags() { - gcloud container images list-tags --format="table[box](tags,digest,timestamp.date())" --filter="tags:$2" "$1" -} - -####################################### -# A helper to execute `gcloud -q components update`. -# Arguments: -# None -# Outputs: -# Writes the output of `gcloud` command to stdout, stderr -####################################### -gcloud_update() { - echo "Update gcloud components:" - gcloud -q components update -} - -####################################### -# Create kube context authenticated with GKE cluster, saves context name. -# to KUBE_CONTEXT -# Globals: -# GKE_CLUSTER_NAME -# GKE_CLUSTER_ZONE -# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access -# Arguments: -# None -# Outputs: -# Writes the output of `gcloud` command to stdout, stderr -# Writes authorization info $HOME/.kube/config -####################################### -gcloud_get_cluster_credentials() { - gcloud container clusters get-credentials "${GKE_CLUSTER_NAME}" --zone "${GKE_CLUSTER_ZONE}" - readonly KUBE_CONTEXT="$(kubectl config current-context)" -} - -####################################### -# Clone the source code of the test driver to $TEST_DRIVER_REPO_DIR, unless -# given folder exists. -# Globals: -# TEST_DRIVER_REPO_URL -# TEST_DRIVER_BRANCH -# TEST_DRIVER_REPO_DIR: path to the repo containing the test driver -# TEST_DRIVER_REPO_DIR_USE_EXISTING: set non-empty value to use exiting -# clone of the driver repo located at $TEST_DRIVER_REPO_DIR. -# Useful for debugging the build script locally. -# Arguments: -# None -# Outputs: -# Writes the output of `git` command to stdout, stderr -# Writes driver source code to $TEST_DRIVER_REPO_DIR -####################################### -test_driver_get_source() { - if [[ -n "${TEST_DRIVER_REPO_DIR_USE_EXISTING}" && -d "${TEST_DRIVER_REPO_DIR}" ]]; then - echo "Using exiting driver directory: ${TEST_DRIVER_REPO_DIR}." - else - echo "Cloning driver to ${TEST_DRIVER_REPO_URL} branch ${TEST_DRIVER_BRANCH} to ${TEST_DRIVER_REPO_DIR}" - git clone -b "${TEST_DRIVER_BRANCH}" --depth=1 "${TEST_DRIVER_REPO_URL}" "${TEST_DRIVER_REPO_DIR}" - fi -} - -####################################### -# Install Python modules from required in $TEST_DRIVER_FULL_DIR/requirements.txt -# to Python virtual environment. Creates and activates Python venv if necessary. -# Globals: -# TEST_DRIVER_FULL_DIR -# PYTHON_VERSION -# Arguments: -# None -# Outputs: -# Writes the output of `python`, `pip` commands to stdout, stderr -# Writes the list of installed modules to stdout -####################################### -test_driver_pip_install() { - echo "Install python dependencies" - cd "${TEST_DRIVER_FULL_DIR}" - - # Create and activate virtual environment unless already using one - if [[ -z "${VIRTUAL_ENV}" ]]; then - local venv_dir="${TEST_DRIVER_FULL_DIR}/venv" - if [[ -d "${venv_dir}" ]]; then - echo "Found python virtual environment directory: ${venv_dir}" - else - echo "Creating python virtual environment: ${venv_dir}" - "python${PYTHON_VERSION}" -m venv "${venv_dir}" - fi - # Intentional: No need to check python venv activate script. - # shellcheck source=/dev/null - source "${venv_dir}/bin/activate" - fi - - pip install -r requirements.txt - echo "Installed Python packages:" - pip list -} - -####################################### -# Compile proto-files needed for the test driver -# Globals: -# TEST_DRIVER_REPO_DIR -# TEST_DRIVER_FULL_DIR -# TEST_DRIVER_PROTOS_PATH -# Arguments: -# None -# Outputs: -# Writes the output of `python -m grpc_tools.protoc` to stdout, stderr -# Writes the list if compiled python code to stdout -# Writes compiled python code with proto messages and grpc services to -# $TEST_DRIVER_FULL_DIR/src/proto -####################################### -test_driver_compile_protos() { - declare -a protos - protos=( - "${TEST_DRIVER_PROTOS_PATH}/test.proto" - "${TEST_DRIVER_PROTOS_PATH}/messages.proto" - "${TEST_DRIVER_PROTOS_PATH}/empty.proto" - ) - echo "Generate python code from grpc.testing protos: ${protos[*]}" - cd "${TEST_DRIVER_REPO_DIR}" - python -m grpc_tools.protoc \ - --proto_path=. \ - --python_out="${TEST_DRIVER_FULL_DIR}" \ - --grpc_python_out="${TEST_DRIVER_FULL_DIR}" \ - "${protos[@]}" - local protos_out_dir="${TEST_DRIVER_FULL_DIR}/${TEST_DRIVER_PROTOS_PATH}" - echo "Generated files ${protos_out_dir}:" - ls -Fl "${protos_out_dir}" -} - -####################################### -# Installs the test driver and it's requirements. -# https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#installation -# Globals: -# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing -# the test driver -# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code -# Arguments: -# The directory for test driver's source code -# Outputs: -# Writes the output to stdout, stderr -####################################### -test_driver_install() { - readonly TEST_DRIVER_REPO_DIR="${1:?Usage test_driver_install TEST_DRIVER_REPO_DIR}" - readonly TEST_DRIVER_FULL_DIR="${TEST_DRIVER_REPO_DIR}/${TEST_DRIVER_PATH}" - test_driver_get_source - test_driver_pip_install - test_driver_compile_protos -} - -####################################### -# Outputs Kokoro image version and Ubuntu's lsb_release -# Arguments: -# None -# Outputs: -# Writes the output to stdout -####################################### -kokoro_print_version() { - echo "Kokoro VM version:" - if [[ -f /VERSION ]]; then - cat /VERSION - fi - run_ignore_exit_code lsb_release -a -} - -####################################### -# Report extra information about the job via sponge properties. -# Globals: -# KOKORO_ARTIFACTS_DIR -# GIT_ORIGIN_URL -# GIT_COMMIT_SHORT -# TESTGRID_EXCLUDE -# Arguments: -# None -# Outputs: -# Writes the output to stdout -# Writes job properties to $KOKORO_ARTIFACTS_DIR/custom_sponge_config.csv -####################################### -kokoro_write_sponge_properties() { - # CSV format: "property_name","property_value" - # Bump TESTS_FORMAT_VERSION when reported test name changed enough to when it - # makes more sense to discard previous test results from a testgrid board. - # Use GIT_ORIGIN_URL to exclude test runs executed against repo forks from - # testgrid reports. - cat >"${KOKORO_ARTIFACTS_DIR}/custom_sponge_config.csv" < Date: Thu, 7 Oct 2021 22:46:49 -0400 Subject: [PATCH 282/998] Added logs to rbac (#4853) Added logs to rbac --- internal/xds/rbac/rbac_engine.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index b3d372e75b7d..a25f9cfdeefa 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -39,6 +39,8 @@ import ( "google.golang.org/grpc/status" ) +const logLevel = 2 + var logger = grpclog.Component("rbac") var getConnection = transport.GetConnection @@ -77,6 +79,9 @@ func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { } for _, engine := range cre.chainedEngines { matchingPolicyName, ok := engine.findMatchingPolicy(rpcData) + if logger.V(logLevel) && ok { + logger.Infof("incoming RPC matched to policy %v in engine with action %v", matchingPolicyName, engine.action) + } switch { case engine.action == v3rbacpb.RBAC_ALLOW && !ok: From b99d1040b71caf9b22be570edb68d85dcb6c515c Mon Sep 17 00:00:00 2001 From: Ashitha Santhosh <55257063+ashithasantosh@users.noreply.github.com> Date: Fri, 8 Oct 2021 17:09:55 -0700 Subject: [PATCH 283/998] authz: create file watcher interceptor for gRPC SDK API (#4760) * authz: create file watcher interceptor for gRPC SDK API --- authz/sdk_end2end_test.go | 327 ++++++++++++++++++++++---- authz/sdk_server_interceptors.go | 97 ++++++++ authz/sdk_server_interceptors_test.go | 75 +++++- 3 files changed, 451 insertions(+), 48 deletions(-) diff --git a/authz/sdk_end2end_test.go b/authz/sdk_end2end_test.go index 92a5e4f4b210..093b2bb437d2 100644 --- a/authz/sdk_end2end_test.go +++ b/authz/sdk_end2end_test.go @@ -21,13 +21,16 @@ package authz_test import ( "context" "io" + "io/ioutil" "net" + "os" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/authz" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" pb "google.golang.org/grpc/test/grpc_testing" @@ -53,15 +56,21 @@ func (s *testServer) StreamingInputCall(stream pb.TestService_StreamingInputCall } } -func TestSDKEnd2End(t *testing.T) { - tests := map[string]struct { - authzPolicy string - md metadata.MD - wantStatusCode codes.Code - wantErr string - }{ - "DeniesRpcRequestMatchInDenyNoMatchInAllow": { - authzPolicy: `{ +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +var sdkTests = map[string]struct { + authzPolicy string + md metadata.MD + wantStatus *status.Status +}{ + "DeniesRpcMatchInDenyNoMatchInAllow": { + authzPolicy: `{ "name": "authz", "allow_rules": [ @@ -100,12 +109,11 @@ func TestSDKEnd2End(t *testing.T) { } ] }`, - md: metadata.Pairs("key-abc", "val-abc"), - wantStatusCode: codes.PermissionDenied, - wantErr: "unauthorized RPC request rejected", - }, - "DeniesRpcRequestMatchInDenyAndAllow": { - authzPolicy: `{ + md: metadata.Pairs("key-abc", "val-abc"), + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, + "DeniesRpcMatchInDenyAndAllow": { + authzPolicy: `{ "name": "authz", "allow_rules": [ @@ -132,11 +140,10 @@ func TestSDKEnd2End(t *testing.T) { } ] }`, - wantStatusCode: codes.PermissionDenied, - wantErr: "unauthorized RPC request rejected", - }, - "AllowsRpcRequestNoMatchInDenyMatchInAllow": { - authzPolicy: `{ + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, + "AllowsRpcNoMatchInDenyMatchInAllow": { + authzPolicy: `{ "name": "authz", "allow_rules": [ @@ -169,11 +176,11 @@ func TestSDKEnd2End(t *testing.T) { } ] }`, - md: metadata.Pairs("key-xyz", "val-xyz"), - wantStatusCode: codes.OK, - }, - "AllowsRpcRequestNoMatchInDenyAndAllow": { - authzPolicy: `{ + md: metadata.Pairs("key-xyz", "val-xyz"), + wantStatus: status.New(codes.OK, ""), + }, + "DeniesRpcNoMatchInDenyAndAllow": { + authzPolicy: `{ "name": "authz", "allow_rules": [ @@ -200,11 +207,10 @@ func TestSDKEnd2End(t *testing.T) { } ] }`, - wantStatusCode: codes.PermissionDenied, - wantErr: "unauthorized RPC request rejected", - }, - "AllowsRpcRequestEmptyDenyMatchInAllow": { - authzPolicy: `{ + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, + "AllowsRpcEmptyDenyMatchInAllow": { + authzPolicy: `{ "name": "authz", "allow_rules": [ @@ -230,10 +236,10 @@ func TestSDKEnd2End(t *testing.T) { } ] }`, - wantStatusCode: codes.OK, - }, - "DeniesRpcRequestEmptyDenyNoMatchInAllow": { - authzPolicy: `{ + wantStatus: status.New(codes.OK, ""), + }, + "DeniesRpcEmptyDenyNoMatchInAllow": { + authzPolicy: `{ "name": "authz", "allow_rules": [ @@ -249,22 +255,85 @@ func TestSDKEnd2End(t *testing.T) { } ] }`, - wantStatusCode: codes.PermissionDenied, - wantErr: "unauthorized RPC request rejected", - }, - } - for name, test := range tests { + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, +} + +func (s) TestSDKStaticPolicyEnd2End(t *testing.T) { + for name, test := range sdkTests { t.Run(name, func(t *testing.T) { // Start a gRPC server with SDK unary and stream server interceptors. i, _ := authz.NewStatic(test.authzPolicy) + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor), + grpc.ChainStreamInterceptor(i.StreamInterceptor)) + defer s.Stop() + pb.RegisterTestServiceServer(s, &testServer{}) + lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("error listening: %v", err) } + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := pb.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + ctx = metadata.NewOutgoingContext(ctx, test.md) + + // Verifying authorization decision for Unary RPC. + _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { + t.Fatalf("[UnaryCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) + } + + // Verifying authorization decision for Streaming RPC. + stream, err := client.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("failed StreamingInputCall err: %v", err) + } + req := &pb.StreamingInputCallRequest{ + Payload: &pb.Payload{ + Body: []byte("hi"), + }, + } + if err := stream.Send(req); err != nil && err != io.EOF { + t.Fatalf("failed stream.Send err: %v", err) + } + _, err = stream.CloseAndRecv() + if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { + t.Fatalf("[StreamingCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) + } + }) + } +} + +func (s) TestSDKFileWatcherEnd2End(t *testing.T) { + for name, test := range sdkTests { + t.Run(name, func(t *testing.T) { + file := createTmpPolicyFile(t, name, []byte(test.authzPolicy)) + i, _ := authz.NewFileWatcher(file, 1*time.Second) + defer i.Close() + + // Start a gRPC server with SDK unary and stream server interceptors. s := grpc.NewServer( grpc.ChainUnaryInterceptor(i.UnaryInterceptor), grpc.ChainStreamInterceptor(i.StreamInterceptor)) + defer s.Stop() pb.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + defer lis.Close() go s.Serve(lis) // Establish a connection to the server. @@ -281,8 +350,8 @@ func TestSDKEnd2End(t *testing.T) { // Verifying authorization decision for Unary RPC. _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) - if got := status.Convert(err); got.Code() != test.wantStatusCode || got.Message() != test.wantErr { - t.Fatalf("[UnaryCall] error want:{%v %v} got:{%v %v}", test.wantStatusCode, test.wantErr, got.Code(), got.Message()) + if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { + t.Fatalf("[UnaryCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) } // Verifying authorization decision for Streaming RPC. @@ -299,9 +368,181 @@ func TestSDKEnd2End(t *testing.T) { t.Fatalf("failed stream.Send err: %v", err) } _, err = stream.CloseAndRecv() - if got := status.Convert(err); got.Code() != test.wantStatusCode || got.Message() != test.wantErr { - t.Fatalf("[StreamingCall] error want:{%v %v} got:{%v %v}", test.wantStatusCode, test.wantErr, got.Code(), got.Message()) + if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { + t.Fatalf("[StreamingCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) } }) } } + +func retryUntil(ctx context.Context, tsc pb.TestServiceClient, want *status.Status) (lastErr error) { + for ctx.Err() == nil { + _, lastErr = tsc.UnaryCall(ctx, &pb.SimpleRequest{}) + if s := status.Convert(lastErr); s.Code() == want.Code() && s.Message() == want.Message() { + return nil + } + time.Sleep(20 * time.Millisecond) + } + return lastErr +} + +func (s) TestSDKFileWatcher_ValidPolicyRefresh(t *testing.T) { + valid1 := sdkTests["DeniesRpcMatchInDenyAndAllow"] + file := createTmpPolicyFile(t, "valid_policy_refresh", []byte(valid1.authzPolicy)) + i, _ := authz.NewFileWatcher(file, 100*time.Millisecond) + defer i.Close() + + // Start a gRPC server with SDK unary server interceptor. + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + pb.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + defer lis.Close() + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := pb.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { + t.Fatalf("error want:{%v} got:{%v}", valid1.wantStatus.Err(), got.Err()) + } + + // Rewrite the file with a different valid authorization policy. + valid2 := sdkTests["AllowsRpcEmptyDenyMatchInAllow"] + if err := ioutil.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { + t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) + } + + // Verifying authorization decision. + if got := retryUntil(ctx, client, valid2.wantStatus); got != nil { + t.Fatalf("error want:{%v} got:{%v}", valid2.wantStatus.Err(), got) + } +} + +func (s) TestSDKFileWatcher_InvalidPolicySkipReload(t *testing.T) { + valid := sdkTests["DeniesRpcMatchInDenyAndAllow"] + file := createTmpPolicyFile(t, "invalid_policy_skip_reload", []byte(valid.authzPolicy)) + i, _ := authz.NewFileWatcher(file, 20*time.Millisecond) + defer i.Close() + + // Start a gRPC server with SDK unary server interceptors. + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + pb.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + defer lis.Close() + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := pb.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid.wantStatus.Code() || got.Message() != valid.wantStatus.Message() { + t.Fatalf("error want:{%v} got:{%v}", valid.wantStatus.Err(), got.Err()) + } + + // Skips the invalid policy update, and continues to use the valid policy. + if err := ioutil.WriteFile(file, []byte("{}"), os.ModePerm); err != nil { + t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) + } + + // Wait 40 ms for background go routine to read updated files. + time.Sleep(40 * time.Millisecond) + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid.wantStatus.Code() || got.Message() != valid.wantStatus.Message() { + t.Fatalf("error want:{%v} got:{%v}", valid.wantStatus.Err(), got.Err()) + } +} + +func (s) TestSDKFileWatcher_RecoversFromReloadFailure(t *testing.T) { + valid1 := sdkTests["DeniesRpcMatchInDenyAndAllow"] + file := createTmpPolicyFile(t, "recovers_from_reload_failure", []byte(valid1.authzPolicy)) + i, _ := authz.NewFileWatcher(file, 100*time.Millisecond) + defer i.Close() + + // Start a gRPC server with SDK unary server interceptors. + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + pb.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + defer lis.Close() + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := pb.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { + t.Fatalf("error want:{%v} got:{%v}", valid1.wantStatus.Err(), got.Err()) + } + + // Skips the invalid policy update, and continues to use the valid policy. + if err := ioutil.WriteFile(file, []byte("{}"), os.ModePerm); err != nil { + t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) + } + + // Wait 120 ms for background go routine to read updated files. + time.Sleep(120 * time.Millisecond) + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { + t.Fatalf("error want:{%v} got:{%v}", valid1.wantStatus.Err(), got.Err()) + } + + // Rewrite the file with a different valid authorization policy. + valid2 := sdkTests["AllowsRpcEmptyDenyMatchInAllow"] + if err := ioutil.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { + t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) + } + + // Verifying authorization decision. + if got := retryUntil(ctx, client, valid2.wantStatus); got != nil { + t.Fatalf("error want:{%v} got:{%v}", valid2.wantStatus.Err(), got) + } +} diff --git a/authz/sdk_server_interceptors.go b/authz/sdk_server_interceptors.go index a2f992b5f263..72dc14ed85e4 100644 --- a/authz/sdk_server_interceptors.go +++ b/authz/sdk_server_interceptors.go @@ -17,14 +17,23 @@ package authz import ( + "bytes" "context" + "fmt" + "io/ioutil" + "sync/atomic" + "time" + "unsafe" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/xds/rbac" "google.golang.org/grpc/status" ) +var logger = grpclog.Component("authz") + // StaticInterceptor contains engines used to make authorization decisions. It // either contains two engines deny engine followed by an allow engine or only // one allow engine. @@ -73,3 +82,91 @@ func (i *StaticInterceptor) StreamInterceptor(srv interface{}, ss grpc.ServerStr } return handler(srv, ss) } + +// FileWatcherInterceptor contains details used to make authorization decisions +// by watching a file path that contains authorization policy in JSON format. +type FileWatcherInterceptor struct { + internalInterceptor unsafe.Pointer // *StaticInterceptor + policyFile string + policyContents []byte + refreshDuration time.Duration + cancel context.CancelFunc +} + +// NewFileWatcher returns a new FileWatcherInterceptor from a policy file +// that contains JSON string of authorization policy and a refresh duration to +// specify the amount of time between policy refreshes. +func NewFileWatcher(file string, duration time.Duration) (*FileWatcherInterceptor, error) { + if file == "" { + return nil, fmt.Errorf("authorization policy file path is empty") + } + if duration <= time.Duration(0) { + return nil, fmt.Errorf("requires refresh interval(%v) greater than 0s", duration) + } + i := &FileWatcherInterceptor{policyFile: file, refreshDuration: duration} + if err := i.updateInternalInterceptor(); err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + i.cancel = cancel + // Create a background go routine for policy refresh. + go i.run(ctx) + return i, nil +} + +func (i *FileWatcherInterceptor) run(ctx context.Context) { + ticker := time.NewTicker(i.refreshDuration) + for { + if err := i.updateInternalInterceptor(); err != nil { + logger.Warningf("authorization policy reload status err: %v", err) + } + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-ticker.C: + } + } +} + +// updateInternalInterceptor checks if the policy file that is watching has changed, +// and if so, updates the internalInterceptor with the policy. Unlike the +// constructor, if there is an error in reading the file or parsing the policy, the +// previous internalInterceptors will not be replaced. +func (i *FileWatcherInterceptor) updateInternalInterceptor() error { + policyContents, err := ioutil.ReadFile(i.policyFile) + if err != nil { + return fmt.Errorf("policyFile(%s) read failed: %v", i.policyFile, err) + } + if bytes.Equal(i.policyContents, policyContents) { + return nil + } + i.policyContents = policyContents + policyContentsString := string(policyContents) + interceptor, err := NewStatic(policyContentsString) + if err != nil { + return err + } + atomic.StorePointer(&i.internalInterceptor, unsafe.Pointer(interceptor)) + logger.Infof("authorization policy reload status: successfully loaded new policy %v", policyContentsString) + return nil +} + +// Close cleans up resources allocated by the interceptor. +func (i *FileWatcherInterceptor) Close() { + i.cancel() +} + +// UnaryInterceptor intercepts incoming Unary RPC requests. +// Only authorized requests are allowed to pass. Otherwise, an unauthorized +// error is returned to the client. +func (i *FileWatcherInterceptor) UnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return ((*StaticInterceptor)(atomic.LoadPointer(&i.internalInterceptor))).UnaryInterceptor(ctx, req, info, handler) +} + +// StreamInterceptor intercepts incoming Stream RPC requests. +// Only authorized requests are allowed to pass. Otherwise, an unauthorized +// error is returned to the client. +func (i *FileWatcherInterceptor) StreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return ((*StaticInterceptor)(atomic.LoadPointer(&i.internalInterceptor))).StreamInterceptor(srv, ss, info, handler) +} diff --git a/authz/sdk_server_interceptors_test.go b/authz/sdk_server_interceptors_test.go index e2c1072e7d86..f43f9807612e 100644 --- a/authz/sdk_server_interceptors_test.go +++ b/authz/sdk_server_interceptors_test.go @@ -19,19 +19,43 @@ package authz_test import ( + "fmt" + "io/ioutil" + "os" + "path" "testing" + "time" "google.golang.org/grpc/authz" ) -func TestNewStatic(t *testing.T) { +func createTmpPolicyFile(t *testing.T, dirSuffix string, policy []byte) string { + t.Helper() + + // Create a temp directory. Passing an empty string for the first argument + // uses the system temp directory. + dir, err := ioutil.TempDir("", dirSuffix) + if err != nil { + t.Fatalf("ioutil.TempDir() failed: %v", err) + } + t.Logf("Using tmpdir: %s", dir) + // Write policy into file. + filename := path.Join(dir, "policy.json") + if err := ioutil.WriteFile(filename, policy, os.ModePerm); err != nil { + t.Fatalf("ioutil.WriteFile(%q) failed: %v", filename, err) + } + t.Logf("Wrote policy %s to file at %s", string(policy), filename) + return filename +} + +func (s) TestNewStatic(t *testing.T) { tests := map[string]struct { authzPolicy string - wantErr bool + wantErr error }{ "InvalidPolicyFailsToCreateInterceptor": { authzPolicy: `{}`, - wantErr: true, + wantErr: fmt.Errorf(`"name" is not present`), }, "ValidPolicyCreatesInterceptor": { authzPolicy: `{ @@ -43,14 +67,55 @@ func TestNewStatic(t *testing.T) { } ] }`, - wantErr: false, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { - if _, err := authz.NewStatic(test.authzPolicy); (err != nil) != test.wantErr { + if _, err := authz.NewStatic(test.authzPolicy); fmt.Sprint(err) != fmt.Sprint(test.wantErr) { t.Fatalf("NewStatic(%v) returned err: %v, want err: %v", test.authzPolicy, err, test.wantErr) } }) } } + +func (s) TestNewFileWatcher(t *testing.T) { + tests := map[string]struct { + authzPolicy string + refreshDuration time.Duration + wantErr error + }{ + "InvalidRefreshDurationFailsToCreateInterceptor": { + refreshDuration: time.Duration(0), + wantErr: fmt.Errorf("requires refresh interval(0s) greater than 0s"), + }, + "InvalidPolicyFailsToCreateInterceptor": { + authzPolicy: `{}`, + refreshDuration: time.Duration(1), + wantErr: fmt.Errorf(`"name" is not present`), + }, + "ValidPolicyCreatesInterceptor": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_all" + } + ] + }`, + refreshDuration: time.Duration(1), + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + file := createTmpPolicyFile(t, name, []byte(test.authzPolicy)) + i, err := authz.NewFileWatcher(file, test.refreshDuration) + if fmt.Sprint(err) != fmt.Sprint(test.wantErr) { + t.Fatalf("NewFileWatcher(%v) returned err: %v, want err: %v", test.authzPolicy, err, test.wantErr) + } + if i != nil { + i.Close() + } + }) + } +} From 49f638878973e2ccc20859209a73e1c3a02de015 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 11 Oct 2021 11:06:15 -0700 Subject: [PATCH 284/998] grpclog: support formatting output as JSON (#4854) --- grpclog/loggerv2.go | 86 +++++++++++++++++++++++++---------- grpclog/loggerv2_test.go | 4 +- interop/xds/client/Dockerfile | 1 + 3 files changed, 65 insertions(+), 26 deletions(-) diff --git a/grpclog/loggerv2.go b/grpclog/loggerv2.go index 4ee33171e008..34098bb8eb59 100644 --- a/grpclog/loggerv2.go +++ b/grpclog/loggerv2.go @@ -19,11 +19,14 @@ package grpclog import ( + "encoding/json" + "fmt" "io" "io/ioutil" "log" "os" "strconv" + "strings" "google.golang.org/grpc/internal/grpclog" ) @@ -95,8 +98,9 @@ var severityName = []string{ // loggerT is the default logger used by grpclog. type loggerT struct { - m []*log.Logger - v int + m []*log.Logger + v int + jsonFormat bool } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -105,19 +109,32 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -142,58 +159,79 @@ func newLoggerV2() LoggerV2 { if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) } func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) + g.output(infoLog, fmt.Sprint(args...)) } func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) + g.output(infoLog, fmt.Sprintln(args...)) } func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) + g.output(infoLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) + g.output(warningLog, fmt.Sprint(args...)) } func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) + g.output(warningLog, fmt.Sprintln(args...)) } func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) + g.output(warningLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) + g.output(errorLog, fmt.Sprint(args...)) } func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) + g.output(errorLog, fmt.Sprintln(args...)) } func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) + g.output(errorLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) } func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) } func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *loggerT) V(l int) bool { diff --git a/grpclog/loggerv2_test.go b/grpclog/loggerv2_test.go index 756f215f9c86..0b2c8b23d668 100644 --- a/grpclog/loggerv2_test.go +++ b/grpclog/loggerv2_test.go @@ -52,9 +52,9 @@ func TestLoggerV2Severity(t *testing.T) { } // check if b is in the format of: -// WARNING: 2017/04/07 14:55:42 WARNING +// 2017/04/07 14:55:42 WARNING: WARNING func checkLogForSeverity(s int, b []byte) error { - expected := regexp.MustCompile(fmt.Sprintf(`^%s: [0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} %s\n$`, severityName[s], severityName[s])) + expected := regexp.MustCompile(fmt.Sprintf(`^[0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} %s: %s\n$`, severityName[s], severityName[s])) if m := expected.Match(b); !m { return fmt.Errorf("got: %v, want string in format of: %v", string(b), severityName[s]+": 2016/10/05 17:09:26 "+severityName[s]) } diff --git a/interop/xds/client/Dockerfile b/interop/xds/client/Dockerfile index 59cacb94a26a..533bb6adb3ed 100644 --- a/interop/xds/client/Dockerfile +++ b/interop/xds/client/Dockerfile @@ -33,4 +33,5 @@ FROM alpine COPY --from=build /go/src/grpc-go/client . ENV GRPC_GO_LOG_VERBOSITY_LEVEL=2 ENV GRPC_GO_LOG_SEVERITY_LEVEL="info" +ENV GRPC_GO_LOG_FORMATTER="json" ENTRYPOINT ["./client"] From 6c56e211a0691f83bb59c5100e79f25d04cd4bb0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 11 Oct 2021 14:55:12 -0700 Subject: [PATCH 285/998] grpclb: add `target_field` to service config (#4847) --- balancer/grpclb/grpclb.go | 32 +- balancer/grpclb/grpclb_config.go | 1 + balancer/grpclb/grpclb_config_test.go | 40 +- balancer/grpclb/grpclb_remote_balancer.go | 43 +- balancer/grpclb/grpclb_test.go | 618 ++++++++++++---------- balancer/grpclb/grpclb_test_util_test.go | 25 +- 6 files changed, 450 insertions(+), 309 deletions(-) diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index adf596111604..fe423af182a4 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -135,6 +135,7 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal lb := &lbBalancer{ cc: newLBCacheClientConn(cc), + dialTarget: opt.Target.Endpoint, target: opt.Target.Endpoint, opt: opt, fallbackTimeout: b.fallbackTimeout, @@ -164,9 +165,10 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal } type lbBalancer struct { - cc *lbCacheClientConn - target string - opt balancer.BuildOptions + cc *lbCacheClientConn + dialTarget string // user's dial target + target string // same as dialTarget unless overridden in service config + opt balancer.BuildOptions usePickFirst bool @@ -398,6 +400,30 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { lb.mu.Lock() defer lb.mu.Unlock() + // grpclb uses the user's dial target to populate the `Name` field of the + // `InitialLoadBalanceRequest` message sent to the remote balancer. But when + // grpclb is used a child policy in the context of RLS, we want the `Name` + // field to be populated with the value received from the RLS server. To + // support this use case, an optional "target_name" field has been added to + // the grpclb LB policy's config. If specified, it overrides the name of + // the target to be sent to the remote balancer; if not, the target to be + // sent to the balancer will continue to be obtained from the target URI + // passed to the gRPC client channel. Whenever that target to be sent to the + // balancer is updated, we need to restart the stream to the balancer as + // this target is sent in the first message on the stream. + if gc != nil { + target := lb.dialTarget + if gc.TargetName != "" { + target = gc.TargetName + } + if target != lb.target { + lb.target = target + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.cancelRemoteBalancerCall() + } + } + } + newUsePickFirst := childIsPickFirst(gc) if lb.usePickFirst == newUsePickFirst { return diff --git a/balancer/grpclb/grpclb_config.go b/balancer/grpclb/grpclb_config.go index aac3719631b4..b4e23dee0172 100644 --- a/balancer/grpclb/grpclb_config.go +++ b/balancer/grpclb/grpclb_config.go @@ -34,6 +34,7 @@ const ( type grpclbServiceConfig struct { serviceconfig.LoadBalancingConfig ChildPolicy *[]map[string]json.RawMessage + TargetName string } func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { diff --git a/balancer/grpclb/grpclb_config_test.go b/balancer/grpclb/grpclb_config_test.go index 5a45de90494b..0db2299157ec 100644 --- a/balancer/grpclb/grpclb_config_test.go +++ b/balancer/grpclb/grpclb_config_test.go @@ -20,52 +20,68 @@ package grpclb import ( "encoding/json" - "errors" - "fmt" - "reflect" - "strings" "testing" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc/serviceconfig" ) func (s) TestParse(t *testing.T) { tests := []struct { name string - s string + sc string want serviceconfig.LoadBalancingConfig - wantErr error + wantErr bool }{ { name: "empty", - s: "", + sc: "", want: nil, - wantErr: errors.New("unexpected end of JSON input"), + wantErr: true, }, { name: "success1", - s: `{"childPolicy":[{"pick_first":{}}]}`, + sc: ` +{ + "childPolicy": [ + {"pick_first":{}} + ], + "targetName": "foo-service" +}`, want: &grpclbServiceConfig{ ChildPolicy: &[]map[string]json.RawMessage{ {"pick_first": json.RawMessage("{}")}, }, + TargetName: "foo-service", }, }, { name: "success2", - s: `{"childPolicy":[{"round_robin":{}},{"pick_first":{}}]}`, + sc: ` +{ + "childPolicy": [ + {"round_robin":{}}, + {"pick_first":{}} + ], + "targetName": "foo-service" +}`, want: &grpclbServiceConfig{ ChildPolicy: &[]map[string]json.RawMessage{ {"round_robin": json.RawMessage("{}")}, {"pick_first": json.RawMessage("{}")}, }, + TargetName: "foo-service", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got, err := (&lbBuilder{}).ParseConfig(json.RawMessage(tt.s)); !reflect.DeepEqual(got, tt.want) || !strings.Contains(fmt.Sprint(err), fmt.Sprint(tt.wantErr)) { - t.Errorf("parseFullServiceConfig() = %+v, %+v, want %+v, ", got, err, tt.want, tt.wantErr) + got, err := (&lbBuilder{}).ParseConfig(json.RawMessage(tt.sc)) + if (err != nil) != (tt.wantErr) { + t.Fatalf("ParseConfig(%q) returned error: %v, wantErr: %v", tt.sc, err, tt.wantErr) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Fatalf("ParseConfig(%q) returned unexpected difference (-want +got):\n%s", tt.sc, diff) } }) } diff --git a/balancer/grpclb/grpclb_remote_balancer.go b/balancer/grpclb/grpclb_remote_balancer.go index 5ac8d86bd570..0210c012d7b0 100644 --- a/balancer/grpclb/grpclb_remote_balancer.go +++ b/balancer/grpclb/grpclb_remote_balancer.go @@ -206,6 +206,9 @@ type remoteBalancerCCWrapper struct { backoff backoff.Strategy done chan struct{} + streamMu sync.Mutex + streamCancel func() + // waitgroup to wait for all goroutines to exit. wg sync.WaitGroup } @@ -319,10 +322,8 @@ func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, i } } -func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) { +func (ccw *remoteBalancerCCWrapper) callRemoteBalancer(ctx context.Context) (backoff bool, _ error) { lbClient := &loadBalancerClient{cc: ccw.cc} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) if err != nil { return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) @@ -362,11 +363,43 @@ func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) return false, ccw.readServerList(stream) } +// cancelRemoteBalancerCall cancels the context used by the stream to the remote +// balancer. watchRemoteBalancer() takes care of restarting this call after the +// stream fails. +func (ccw *remoteBalancerCCWrapper) cancelRemoteBalancerCall() { + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() +} + func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { - defer ccw.wg.Done() + defer func() { + ccw.wg.Done() + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + // This is to make sure that we don't leak the context when we are + // directly returning from inside of the below `for` loop. + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() + }() + var retryCount int + var ctx context.Context for { - doBackoff, err := ccw.callRemoteBalancer() + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ctx, ccw.streamCancel = context.WithCancel(context.Background()) + ccw.streamMu.Unlock() + + doBackoff, err := ccw.callRemoteBalancer(ctx) select { case <-ccw.done: return diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index d6275b657f90..3b666764728f 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -31,12 +31,16 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc" "google.golang.org/grpc/balancer" grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -60,6 +64,13 @@ var ( fakeName = "fake.Name" ) +const ( + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond + testUserAgent = "test-user-agent" + grpclbConfig = `{"loadBalancingConfig": [{"grpclb": {}}]}` +) + type s struct { grpctest.Tester } @@ -136,18 +147,6 @@ func (s *rpcStats) merge(cs *lbpb.ClientStats) { s.mu.Unlock() } -func mapsEqual(a, b map[string]int64) bool { - if len(a) != len(b) { - return false - } - for k, v1 := range a { - if v2, ok := b[k]; !ok || v1 != v2 { - return false - } - } - return true -} - func atomicEqual(a, b *int64) bool { return atomic.LoadInt64(a) == atomic.LoadInt64(b) } @@ -172,7 +171,7 @@ func (s *rpcStats) equal(o *rpcStats) bool { defer s.mu.Unlock() o.mu.Lock() defer o.mu.Unlock() - return mapsEqual(s.numCallsDropped, o.numCallsDropped) + return cmp.Equal(s.numCallsDropped, o.numCallsDropped, cmpopts.EquateEmpty()) } func (s *rpcStats) String() string { @@ -188,24 +187,28 @@ func (s *rpcStats) String() string { type remoteBalancer struct { lbgrpc.UnimplementedLoadBalancerServer - sls chan *lbpb.ServerList - statsDura time.Duration - done chan struct{} - stats *rpcStats - statsChan chan *lbpb.ClientStats - fbChan chan struct{} - - customUserAgent string + sls chan *lbpb.ServerList + statsDura time.Duration + done chan struct{} + stats *rpcStats + statsChan chan *lbpb.ClientStats + fbChan chan struct{} + balanceLoadCh chan struct{} // notify successful invocation of BalanceLoad + + wantUserAgent string // expected user-agent in metadata of BalancerLoad + wantServerName string // expected server name in InitialLoadBalanceRequest } -func newRemoteBalancer(customUserAgent string, statsChan chan *lbpb.ClientStats) *remoteBalancer { +func newRemoteBalancer(wantUserAgent, wantServerName string, statsChan chan *lbpb.ClientStats) *remoteBalancer { return &remoteBalancer{ - sls: make(chan *lbpb.ServerList, 1), - done: make(chan struct{}), - stats: newRPCStats(), - statsChan: statsChan, - fbChan: make(chan struct{}), - customUserAgent: customUserAgent, + sls: make(chan *lbpb.ServerList, 1), + done: make(chan struct{}), + stats: newRPCStats(), + statsChan: statsChan, + fbChan: make(chan struct{}), + balanceLoadCh: make(chan struct{}, 1), + wantUserAgent: wantUserAgent, + wantServerName: wantServerName, } } @@ -218,15 +221,18 @@ func (b *remoteBalancer) fallbackNow() { b.fbChan <- struct{}{} } +func (b *remoteBalancer) updateServerName(name string) { + b.wantServerName = name +} + func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServer) error { md, ok := metadata.FromIncomingContext(stream.Context()) if !ok { return status.Error(codes.Internal, "failed to receive metadata") } - if b.customUserAgent != "" { - ua := md["user-agent"] - if len(ua) == 0 || !strings.HasPrefix(ua[0], b.customUserAgent) { - return status.Errorf(codes.InvalidArgument, "received unexpected user-agent: %v, want prefix %q", ua, b.customUserAgent) + if b.wantUserAgent != "" { + if ua := md["user-agent"]; len(ua) == 0 || !strings.HasPrefix(ua[0], b.wantUserAgent) { + return status.Errorf(codes.InvalidArgument, "received unexpected user-agent: %v, want prefix %q", ua, b.wantUserAgent) } } @@ -235,9 +241,10 @@ func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServe return err } initReq := req.GetInitialRequest() - if initReq.Name != beServerName { - return status.Errorf(codes.InvalidArgument, "invalid service name: %v", initReq.Name) + if initReq.Name != b.wantServerName { + return status.Errorf(codes.InvalidArgument, "invalid service name: %q, want: %q", initReq.Name, b.wantServerName) } + b.balanceLoadCh <- struct{}{} resp := &lbpb.LoadBalanceResponse{ LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{ InitialResponse: &lbpb.InitialLoadBalanceResponse{ @@ -253,11 +260,8 @@ func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServe } go func() { for { - var ( - req *lbpb.LoadBalanceRequest - err error - ) - if req, err = stream.Recv(); err != nil { + req, err := stream.Recv() + if err != nil { return } b.stats.merge(req.GetClientStats()) @@ -347,7 +351,7 @@ type testServers struct { beListeners []net.Listener } -func newLoadBalancer(numberOfBackends int, customUserAgent string, statsChan chan *lbpb.ClientStats) (tss *testServers, cleanup func(), err error) { +func startBackendsAndRemoteLoadBalancer(numberOfBackends int, customUserAgent string, statsChan chan *lbpb.ClientStats) (tss *testServers, cleanup func(), err error) { var ( beListeners []net.Listener ls *remoteBalancer @@ -380,7 +384,7 @@ func newLoadBalancer(numberOfBackends int, customUserAgent string, statsChan cha sn: lbServerName, } lb = grpc.NewServer(grpc.Creds(lbCreds)) - ls = newRemoteBalancer(customUserAgent, statsChan) + ls = newRemoteBalancer(customUserAgent, beServerName, statsChan) lbgrpc.RegisterLoadBalancerServer(lb, ls) go func() { lb.Serve(lbLis) @@ -407,34 +411,29 @@ func newLoadBalancer(numberOfBackends int, customUserAgent string, statsChan cha return } -var grpclbConfig = `{"loadBalancingConfig": [{"grpclb": {}}]}` - func (s) TestGRPCLB(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - const testUserAgent = "test-user-agent" - tss, cleanup, err := newLoadBalancer(1, testUserAgent, nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, testUserAgent, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, - } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, + tss.ls.sls <- &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, } - tss.ls.sls <- sl - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer), + + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), grpc.WithUserAgent(testUserAgent)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) @@ -445,12 +444,11 @@ func (s) TestGRPCLB(t *testing.T) { rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, &grpclbstate.State{BalancerAddresses: []resolver.Address{{ Addr: tss.lbAddr, - Type: resolver.Backend, ServerName: lbServerName, }}}) r.UpdateState(rs) - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) @@ -461,7 +459,7 @@ func (s) TestGRPCLB(t *testing.T) { func (s) TestGRPCLBWeighted(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - tss, cleanup, err := newLoadBalancer(2, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(2, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -481,23 +479,25 @@ func (s) TestGRPCLBWeighted(t *testing.T) { portsToIndex[tss.bePorts[i]] = i } - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }}}) + rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, + &grpclbstate.State{BalancerAddresses: []resolver.Address{{ + Addr: tss.lbAddr, + ServerName: lbServerName, + }}}) + r.UpdateState(rs) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() sequences := []string{"00101", "00011"} for _, seq := range sequences { var ( @@ -526,7 +526,7 @@ func (s) TestGRPCLBWeighted(t *testing.T) { func (s) TestDropRequest(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - tss, cleanup, err := newLoadBalancer(2, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(2, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -546,22 +546,23 @@ func (s) TestDropRequest(t *testing.T) { Drop: true, }}, } - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }}}) + rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, + &grpclbstate.State{BalancerAddresses: []resolver.Address{{ + Addr: tss.lbAddr, + ServerName: lbServerName, + }}}) + r.UpdateState(rs) var ( i int @@ -573,6 +574,8 @@ func (s) TestDropRequest(t *testing.T) { sleepEachLoop = time.Millisecond loopCount = int(time.Second / sleepEachLoop) ) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() // Make a non-fail-fast RPC and wait for it to succeed. for i = 0; i < loopCount; i++ { if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err == nil { @@ -681,49 +684,51 @@ func (s) TestBalancerDisconnects(t *testing.T) { lbs []*grpc.Server ) for i := 0; i < 2; i++ { - tss, cleanup, err := newLoadBalancer(1, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, - } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, + tss.ls.sls <- &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, } - tss.ls.sls <- sl tests = append(tests, tss) lbs = append(lbs, tss.lb) } - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tests[0].lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: tests[1].lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }}}) + rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, + &grpclbstate.State{BalancerAddresses: []resolver.Address{ + { + Addr: tests[0].lbAddr, + ServerName: lbServerName, + }, + { + Addr: tests[1].lbAddr, + ServerName: lbServerName, + }, + }}) + r.UpdateState(rs) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() var p peer.Peer if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) @@ -750,16 +755,27 @@ func (s) TestBalancerDisconnects(t *testing.T) { func (s) TestFallback(t *testing.T) { balancer.Register(newLBBuilderWithFallbackTimeout(100 * time.Millisecond)) defer balancer.Register(newLBBuilder()) - r := manual.NewBuilderWithScheme("whatever") - tss, cleanup, err := newLoadBalancer(1, "", nil) + // Start a remote balancer and a backend. Push the backend address to the + // remote balancer. + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() + sl := &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, + } + tss.ls.sls <- sl - // Start a standalone backend. + // Start a standalone backend for fallback. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) @@ -768,37 +784,29 @@ func (s) TestFallback(t *testing.T) { standaloneBEs := startBackends(beServerName, true, beLis) defer stopBackends(standaloneBEs) - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, - } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, - } - tss.ls.sls <- sl - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: "invalid.address", - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}}) + // Push an update to the resolver with fallback backend address stored in + // the `Addresses` field and an invalid remote balancer address stored in + // attributes, which will cause fallback behavior to be invoked. + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: "invalid.address", ServerName: lbServerName}}}) + r.UpdateState(rs) + // Make an RPC and verify that it got routed to the fallback backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() var p peer.Peer if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) @@ -807,15 +815,21 @@ func (s) TestFallback(t *testing.T) { t.Fatalf("got peer: %v, want peer: %v", p.Addr, beLis.Addr()) } - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}}) + // Push another update to the resolver, this time with a valid balancer + // address in the attributes field. + rs = resolver.State{ + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.UpdateState(rs) + select { + case <-ctx.Done(): + t.Fatalf("timeout when waiting for BalanceLoad RPC to be called on the remote balancer") + case <-tss.ls.balanceLoadCh: + } + // Wait for RPCs to get routed to the backend behind the remote balancer. var backendUsed bool for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { @@ -856,7 +870,7 @@ func (s) TestFallback(t *testing.T) { t.Fatalf("No RPC sent to fallback after 2 seconds") } - // Restart backend and remote balancer, should not use backends. + // Restart backend and remote balancer, should not use fallback backend. tss.beListeners[0].(*restartableListener).restart() tss.lbListener.(*restartableListener).restart() tss.ls.sls <- sl @@ -880,13 +894,25 @@ func (s) TestFallback(t *testing.T) { func (s) TestExplicitFallback(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - tss, cleanup, err := newLoadBalancer(1, "", nil) + // Start a remote balancer and a backend. Push the backend address to the + // remote balancer. + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() + sl := &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, + } + tss.ls.sls <- sl - // Start a standalone backend. + // Start a standalone backend for fallback. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) @@ -895,37 +921,25 @@ func (s) TestExplicitFallback(t *testing.T) { standaloneBEs := startBackends(beServerName, true, beLis) defer stopBackends(standaloneBEs) - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, - } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, - } - tss.ls.sls <- sl - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}}) + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.UpdateState(rs) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() var p peer.Peer var backendUsed bool for i := 0; i < 2000; i++ { @@ -980,23 +994,34 @@ func (s) TestExplicitFallback(t *testing.T) { } func (s) TestFallBackWithNoServerAddress(t *testing.T) { - resolveNowCh := make(chan struct{}, 1) + resolveNowCh := testutils.NewChannel() r := manual.NewBuilderWithScheme("whatever") r.ResolveNowCallback = func(resolver.ResolveNowOptions) { - select { - case <-resolveNowCh: - default: + ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if err := resolveNowCh.SendContext(ctx, nil); err != nil { + t.Error("timeout when attemping to send on resolverNowCh") } - resolveNowCh <- struct{}{} } - tss, cleanup, err := newLoadBalancer(1, "", nil) + // Start a remote balancer and a backend. Push the backend address to the + // remote balancer yet. + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() + sl := &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, + } - // Start a standalone backend. + // Start a standalone backend for fallback. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) @@ -1005,81 +1030,61 @@ func (s) TestFallBackWithNoServerAddress(t *testing.T) { standaloneBEs := startBackends(beServerName, true, beLis) defer stopBackends(standaloneBEs) - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, - } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, - } - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) - // Select grpclb with service config. - const pfc = `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"round_robin":{}}]}}]}` - scpr := r.CC.ParseServiceConfig(pfc) - if scpr.Err != nil { - t.Fatalf("Error parsing config %q: %v", pfc, scpr.Err) - } - + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() for i := 0; i < 2; i++ { - // Send an update with only backend address. grpclb should enter fallback - // and use the fallback backend. + // Send an update with only backend address. grpclb should enter + // fallback and use the fallback backend. r.UpdateState(resolver.State{ - Addresses: []resolver.Address{{ - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}, - ServiceConfig: scpr, + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), }) - select { - case <-resolveNowCh: - t.Errorf("unexpected resolveNow when grpclb gets no balancer address 1111, %d", i) - case <-time.After(time.Second): + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := resolveNowCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("unexpected resolveNow when grpclb gets no balancer address 1111, %d", i) } var p peer.Peer - rpcCtx, rpcCancel := context.WithTimeout(context.Background(), time.Second) - defer rpcCancel() - if _, err := testC.EmptyCall(rpcCtx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) } if p.Addr.String() != beLis.Addr().String() { t.Fatalf("got peer: %v, want peer: %v", p.Addr, beLis.Addr()) } - select { - case <-resolveNowCh: + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := resolveNowCh.Receive(sCtx); err != context.DeadlineExceeded { t.Errorf("unexpected resolveNow when grpclb gets no balancer address 2222, %d", i) - case <-time.After(time.Second): } tss.ls.sls <- sl // Send an update with balancer address. The backends behind grpclb should // be used. - r.UpdateState(resolver.State{ - Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}, - ServiceConfig: scpr, - }) + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.UpdateState(rs) + + select { + case <-ctx.Done(): + t.Fatalf("timeout when waiting for BalanceLoad RPC to be called on the remote balancer") + case <-tss.ls.balanceLoadCh: + } var backendUsed bool for i := 0; i < 1000; i++ { @@ -1101,7 +1106,7 @@ func (s) TestFallBackWithNoServerAddress(t *testing.T) { func (s) TestGRPCLBPickFirst(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - tss, cleanup, err := newLoadBalancer(3, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(3, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -1125,11 +1130,10 @@ func (s) TestGRPCLBPickFirst(t *testing.T) { portsToIndex[tss.bePorts[i]] = i } - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } @@ -1143,21 +1147,11 @@ func (s) TestGRPCLBPickFirst(t *testing.T) { tss.ls.sls <- &lbpb.ServerList{Servers: beServers[0:3]} // Start with sub policy pick_first. - const pfc = `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}` - scpr := r.CC.ParseServiceConfig(pfc) - if scpr.Err != nil { - t.Fatalf("Error parsing config %q: %v", pfc, scpr.Err) - } - - r.UpdateState(resolver.State{ - Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }}, - ServiceConfig: scpr, - }) + rs := resolver.State{ServiceConfig: r.CC.ParseServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)} + r.UpdateState(grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}})) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() result = "" for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { @@ -1194,19 +1188,12 @@ func (s) TestGRPCLBPickFirst(t *testing.T) { } // Switch sub policy to roundrobin. - grpclbServiceConfigEmpty := r.CC.ParseServiceConfig(`{}`) - if grpclbServiceConfigEmpty.Err != nil { - t.Fatalf("Error parsing config %q: %v", `{}`, grpclbServiceConfigEmpty.Err) - } - - r.UpdateState(resolver.State{ - Addresses: []resolver.Address{{ + rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, + &grpclbstate.State{BalancerAddresses: []resolver.Address{{ Addr: tss.lbAddr, - Type: resolver.GRPCLB, ServerName: lbServerName, - }}, - ServiceConfig: grpclbServiceConfigEmpty, - }) + }}}) + r.UpdateState(rs) result = "" for i := 0; i < 1000; i++ { @@ -1235,9 +1222,8 @@ func (s) TestGRPCLBPickFirst(t *testing.T) { func (s) TestGRPCLBBackendConnectionErrorPropagation(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - // Start up an LB which will tell the client to fall back - // right away. - tss, cleanup, err := newLoadBalancer(0, "", nil) + // Start up an LB which will tells the client to fall back right away. + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(0, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -1254,30 +1240,27 @@ func (s) TestGRPCLBBackendConnectionErrorPropagation(t *testing.T) { standaloneBEs := startBackends("arbitrary.invalid.name", true, beLis) defer stopBackends(standaloneBEs) - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}}) + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.UpdateState(rs) // If https://github.com/grpc/grpc-go/blob/65cabd74d8e18d7347fecd414fa8d83a00035f5f/balancer/grpclb/grpclb_test.go#L103 // changes, then expectedErrMsg may need to be updated. const expectedErrMsg = "received unexpected server name" - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() var wg sync.WaitGroup wg.Add(1) @@ -1291,6 +1274,87 @@ func (s) TestGRPCLBBackendConnectionErrorPropagation(t *testing.T) { wg.Wait() } +func (s) TestGRPCLBWithTargetNameFieldInConfig(t *testing.T) { + r := manual.NewBuilderWithScheme("whatever") + + // Start a remote balancer and a backend. Push the backend address to the + // remote balancer. + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) + } + defer cleanup() + sl := &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, + } + tss.ls.sls <- sl + + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + grpc.WithUserAgent(testUserAgent)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testpb.NewTestServiceClient(cc) + + // Push a resolver update with grpclb configuration which does not contain the + // target_name field. Our fake remote balancer is configured to always + // expect `beServerName` as the server name in the initial request. + rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, + &grpclbstate.State{BalancerAddresses: []resolver.Address{{ + Addr: tss.lbAddr, + ServerName: lbServerName, + }}}) + r.UpdateState(rs) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout when waiting for BalanceLoad RPC to be called on the remote balancer") + case <-tss.ls.balanceLoadCh: + } + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + } + + // When the value of target_field changes, grpclb will recreate the stream + // to the remote balancer. So, we need to update the fake remote balancer to + // expect a new server name in the initial request. + const newServerName = "new-server-name" + tss.ls.updateServerName(newServerName) + tss.ls.sls <- sl + + // Push the resolver update with target_field changed. + // Push a resolver update with grpclb configuration containing the + // target_name field. Our fake remote balancer has been updated above to expect the newServerName in the initial request. + lbCfg := fmt.Sprintf(`{"loadBalancingConfig": [{"grpclb": {"targetName": "%s"}}]}`, newServerName) + rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(lbCfg)}, + &grpclbstate.State{BalancerAddresses: []resolver.Address{{ + Addr: tss.lbAddr, + ServerName: lbServerName, + }}}) + r.UpdateState(rs) + select { + case <-ctx.Done(): + t.Fatalf("timeout when waiting for BalanceLoad RPC to be called on the remote balancer") + case <-tss.ls.balanceLoadCh: + } + + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + } +} + type failPreRPCCred struct{} func (failPreRPCCred) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { @@ -1314,7 +1378,7 @@ func checkStats(stats, expected *rpcStats) error { func runAndCheckStats(t *testing.T, drop bool, statsChan chan *lbpb.ClientStats, runRPCs func(*grpc.ClientConn), statsWant *rpcStats) error { r := manual.NewBuilderWithScheme("whatever") - tss, cleanup, err := newLoadBalancer(1, "", statsChan) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", statsChan) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } diff --git a/balancer/grpclb/grpclb_test_util_test.go b/balancer/grpclb/grpclb_test_util_test.go index 5d3e6ba7fed9..c143e9617543 100644 --- a/balancer/grpclb/grpclb_test_util_test.go +++ b/balancer/grpclb/grpclb_test_util_test.go @@ -48,19 +48,20 @@ func newRestartableListener(l net.Listener) *restartableListener { } } -func (l *restartableListener) Accept() (conn net.Conn, err error) { - conn, err = l.Listener.Accept() - if err == nil { - l.mu.Lock() - if l.closed { - conn.Close() - l.mu.Unlock() - return nil, &tempError{} - } - l.conns = append(l.conns, conn) - l.mu.Unlock() +func (l *restartableListener) Accept() (net.Conn, error) { + conn, err := l.Listener.Accept() + if err != nil { + return nil, err } - return + + l.mu.Lock() + defer l.mu.Unlock() + if l.closed { + conn.Close() + return nil, &tempError{} + } + l.conns = append(l.conns, conn) + return conn, nil } func (l *restartableListener) Close() error { From ea41fbfa10817592c85b4ada15d3d1ba3d6fdae7 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 11 Oct 2021 14:55:45 -0700 Subject: [PATCH 286/998] examples: unix abstract socket (#4848) --- examples/examples_test.sh | 3 + examples/features/unix_abstract/README.md | 29 ++++++++ .../features/unix_abstract/client/main.go | 68 +++++++++++++++++++ .../features/unix_abstract/server/main.go | 58 ++++++++++++++++ 4 files changed, 158 insertions(+) create mode 100644 examples/features/unix_abstract/README.md create mode 100644 examples/features/unix_abstract/client/main.go create mode 100644 examples/features/unix_abstract/server/main.go diff --git a/examples/examples_test.sh b/examples/examples_test.sh index 9015272f33e0..f5c82d062b22 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -58,6 +58,7 @@ EXAMPLES=( "features/metadata" "features/multiplex" "features/name_resolving" + "features/unix_abstract" ) declare -A EXPECTED_SERVER_OUTPUT=( @@ -73,6 +74,7 @@ declare -A EXPECTED_SERVER_OUTPUT=( ["features/metadata"]="message:\"this is examples/metadata\", sending echo" ["features/multiplex"]=":50051" ["features/name_resolving"]="serving on localhost:50051" + ["features/unix_abstract"]="serving on @abstract-unix-socket" ) declare -A EXPECTED_CLIENT_OUTPUT=( @@ -88,6 +90,7 @@ declare -A EXPECTED_CLIENT_OUTPUT=( ["features/metadata"]="this is examples/metadata" ["features/multiplex"]="Greeting: Hello multiplex" ["features/name_resolving"]="calling helloworld.Greeter/SayHello to \"example:///resolver.example.grpc.io\"" + ["features/unix_abstract"]="calling echo.Echo/UnaryEcho to unix-abstract:abstract-unix-socket" ) cd ./examples diff --git a/examples/features/unix_abstract/README.md b/examples/features/unix_abstract/README.md new file mode 100644 index 000000000000..32b3bd5f262c --- /dev/null +++ b/examples/features/unix_abstract/README.md @@ -0,0 +1,29 @@ +# Unix abstract sockets + +This examples shows how to start a gRPC server listening on a unix abstract +socket and how to get a gRPC client to connect to it. + +## What is a unix abstract socket + +An abstract socket address is distinguished from a regular unix socket by the +fact that the first byte of the address is a null byte ('\0'). The address has +no connection with filesystem path names. + +## Try it + +``` +go run server/main.go +``` + +``` +go run client/main.go +``` + +## Explanation + +The gRPC server in this example listens on an address starting with a null byte +and the network is `unix`. The client uses the `unix-abstract` scheme with the +endpoint set to the abstract unix socket address without the null byte. The +`unix` resolver takes care of adding the null byte on the client. See +https://github.com/grpc/grpc/blob/master/doc/naming.md for the more details. + diff --git a/examples/features/unix_abstract/client/main.go b/examples/features/unix_abstract/client/main.go new file mode 100644 index 000000000000..4f48aca9bdfd --- /dev/null +++ b/examples/features/unix_abstract/client/main.go @@ -0,0 +1,68 @@ +//go:build linux +// +build linux + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client which dials a server on an abstract unix +// socket. +package main + +import ( + "context" + "fmt" + "log" + "time" + + "google.golang.org/grpc" + ecpb "google.golang.org/grpc/examples/features/proto/echo" +) + +func callUnaryEcho(c ecpb.EchoClient, message string) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + r, err := c.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) + if err != nil { + log.Fatalf("could not greet: %v", err) + } + fmt.Println(r.Message) +} + +func makeRPCs(cc *grpc.ClientConn, n int) { + hwc := ecpb.NewEchoClient(cc) + for i := 0; i < n; i++ { + callUnaryEcho(hwc, "this is examples/unix_abstract") + } +} + +func main() { + // A dial target of `unix:@abstract-unix-socket` should also work fine for + // this example because of golang conventions (net.Dial behavior). But we do + // not recommend this since we explicitly added the `unix-abstract` scheme + // for cross-language compatibility. + addr := "unix-abstract:abstract-unix-socket" + cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + log.Fatalf("grpc.Dial(%q) failed: %v", addr, err) + } + defer cc.Close() + + fmt.Printf("--- calling echo.Echo/UnaryEcho to %s\n", addr) + makeRPCs(cc, 10) + fmt.Println() +} diff --git a/examples/features/unix_abstract/server/main.go b/examples/features/unix_abstract/server/main.go new file mode 100644 index 000000000000..a82b957c1f07 --- /dev/null +++ b/examples/features/unix_abstract/server/main.go @@ -0,0 +1,58 @@ +//go:build linux +// +build linux + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server listening for gRPC connections on an +// abstract unix socket. +package main + +import ( + "context" + "fmt" + "log" + "net" + + "google.golang.org/grpc" + + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +type ecServer struct { + pb.UnimplementedEchoServer + addr string +} + +func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { + return &pb.EchoResponse{Message: fmt.Sprintf("%s (from %s)", req.Message, s.addr)}, nil +} + +func main() { + netw, addr := "unix", "\x00abstract-unix-socket" + lis, err := net.Listen(netw, addr) + if err != nil { + log.Fatalf("net.Listen(%q, %q) failed: %v", netw, addr, err) + } + s := grpc.NewServer() + pb.RegisterEchoServer(s, &ecServer{addr: addr}) + log.Printf("serving on %s\n", lis.Addr().String()) + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} From 2fe71180762478c66b0027f780b95c40fc563a55 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 11 Oct 2021 15:42:10 -0700 Subject: [PATCH 287/998] xds/e2e: move flag check to each test, and call t.Skip() (#4861) --- xds/internal/test/e2e/e2e.go | 8 +++++--- xds/internal/test/e2e/e2e_test.go | 17 ++++++----------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/xds/internal/test/e2e/e2e.go b/xds/internal/test/e2e/e2e.go index 82c7f9dfb25a..ade6339bf534 100644 --- a/xds/internal/test/e2e/e2e.go +++ b/xds/internal/test/e2e/e2e.go @@ -26,7 +26,9 @@ import ( "os/exec" "google.golang.org/grpc" + channelzgrpc "google.golang.org/grpc/channelz/grpc_channelz_v1" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" + testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" ) @@ -83,7 +85,7 @@ func newClient(target, binaryPath, bootstrap string, logger io.Writer, flags ... } func (c *client) clientStats(ctx context.Context) (*testpb.LoadBalancerStatsResponse, error) { - ccc := testpb.NewLoadBalancerStatsServiceClient(c.statsCC) + ccc := testgrpc.NewLoadBalancerStatsServiceClient(c.statsCC) return ccc.GetClientStats(ctx, &testpb.LoadBalancerStatsRequest{ NumRpcs: 100, TimeoutSec: 10, @@ -91,13 +93,13 @@ func (c *client) clientStats(ctx context.Context) (*testpb.LoadBalancerStatsResp } func (c *client) configRPCs(ctx context.Context, req *testpb.ClientConfigureRequest) error { - ccc := testpb.NewXdsUpdateClientConfigureServiceClient(c.statsCC) + ccc := testgrpc.NewXdsUpdateClientConfigureServiceClient(c.statsCC) _, err := ccc.Configure(ctx, req) return err } func (c *client) channelzSubChannels(ctx context.Context) ([]*channelzpb.Subchannel, error) { - ccc := channelzpb.NewChannelzClient(c.statsCC) + ccc := channelzgrpc.NewChannelzClient(c.statsCC) r, err := ccc.GetTopChannels(ctx, &channelzpb.GetTopChannelsRequest{}) if err != nil { return nil, err diff --git a/xds/internal/test/e2e/e2e_test.go b/xds/internal/test/e2e/e2e_test.go index 5c116b478fe9..6984566db2e7 100644 --- a/xds/internal/test/e2e/e2e_test.go +++ b/xds/internal/test/e2e/e2e_test.go @@ -39,17 +39,6 @@ var ( serverPath = flag.String("server", "./binaries/server", "The interop server") ) -func TestMain(m *testing.M) { - flag.Parse() - if _, err := os.Stat(*clientPath); os.IsNotExist(err) { - return - } - if _, err := os.Stat(*serverPath); os.IsNotExist(err) { - return - } - os.Exit(m.Run()) -} - type testOpts struct { testName string backendCount int @@ -58,6 +47,12 @@ type testOpts struct { func setup(t *testing.T, opts testOpts) (*controlPlane, *client, []*server) { t.Helper() + if _, err := os.Stat(*clientPath); os.IsNotExist(err) { + t.Skip("skipped because client is not found") + } + if _, err := os.Stat(*serverPath); os.IsNotExist(err) { + t.Skip("skipped because server is not found") + } backendCount := 1 if opts.backendCount != 0 { backendCount = opts.backendCount From 45097a8aa69aa220b3471bd736389136afd35f24 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 13 Oct 2021 10:50:52 -0700 Subject: [PATCH 288/998] Revert "kokoro: remove expired letsencrypt.org cert and update" (#4860) --- test/kokoro/xds.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/kokoro/xds.sh b/test/kokoro/xds.sh index 7b7f48dba309..f9cb7dab7332 100755 --- a/test/kokoro/xds.sh +++ b/test/kokoro/xds.sh @@ -3,12 +3,6 @@ set -exu -o pipefail [[ -f /VERSION ]] && cat /VERSION - -echo "Remove the expired letsencrypt.org cert and update the CA certificates" -sudo apt-get install -y ca-certificates -sudo rm /usr/share/ca-certificates/mozilla/DST_Root_CA_X3.crt -sudo update-ca-certificates - cd github export GOPATH="${HOME}/gopath" From aaff9e7ab90624f6be3da4555eeb27c68ba29e9d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 14 Oct 2021 14:54:02 -0700 Subject: [PATCH 289/998] grpc: better RFC 3986 compliant target parsing (#4817) --- clientconn.go | 184 ++++++++++++++++++----- clientconn_authority_test.go | 20 ++- clientconn_parsed_target_test.go | 111 ++++++++------ credentials/credentials.go | 15 +- dialoptions.go | 3 +- internal/grpcutil/grpcutil.go | 20 +++ internal/grpcutil/target.go | 89 ----------- internal/resolver/unix/unix.go | 12 +- internal/transport/http2_client.go | 20 ++- resolver/resolver.go | 40 +++-- resolver_conn_wrapper_test.go | 33 ---- test/authority_test.go | 9 +- xds/internal/xdsclient/v2/client_test.go | 2 +- 13 files changed, 314 insertions(+), 244 deletions(-) create mode 100644 internal/grpcutil/grpcutil.go delete mode 100644 internal/grpcutil/target.go diff --git a/clientconn.go b/clientconn.go index 34cc4c948db0..eacf5fd8fccf 100644 --- a/clientconn.go +++ b/clientconn.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "math" + "net/url" "reflect" "strings" "sync" @@ -37,7 +38,6 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -248,38 +248,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) - channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) - resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) - } + resolverBuilder, err := cc.parseTargetAndFindResolver() + if err != nil { + return nil, err } - - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { - cc.authority = "localhost" - } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { - cc.authority = "localhost" + cc.parsedTarget.Endpoint - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + if err != nil { + return nil, err } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) if cc.dopts.scChan != nil && !scSet { // Blocking wait for the initial service config. @@ -902,10 +879,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { - // a.ServerName takes precedent over ClientConn authority, if present. - if a.ServerName == "" { - a.ServerName = ac.cc.authority - } + a.ServerName = ac.cc.getServerName(a) if reflect.DeepEqual(ac.curAddr, a) { curAddrFound = true break @@ -919,6 +893,26 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return curAddrFound } +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { if sc == nil { return MethodConfig{} @@ -1275,11 +1269,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne prefaceReceived := grpcsync.NewEvent() connClosed := grpcsync.NewEvent() - // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName == "" { - addr.ServerName = ac.cc.authority - } - + addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) hcStarted := false // protected by ac.mu @@ -1621,3 +1611,117 @@ func (cc *ClientConn) connectionError() error { defer cc.lceMu.Unlock() return cc.lastConnectionError } + +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + return rb, nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + if cc.dopts.copts.Dialer != nil { + defScheme = "passthrough" + } + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return nil, err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + } + cc.parsedTarget = parsedTarget + return rb, nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and endpoint. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field instead of the `Endpoint` field. + endpoint := u.Path + if endpoint == "" { + endpoint = u.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + Endpoint: endpoint, + URL: *u, + }, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + switch { + case authorityFromDialOption != "": + return authorityFromDialOption, nil + case authorityFromCreds != "": + return authorityFromCreds, nil + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + return "localhost", nil + case strings.HasPrefix(endpoint, ":"): + return "localhost" + endpoint, nil + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + return endpoint, nil + } +} diff --git a/clientconn_authority_test.go b/clientconn_authority_test.go index 5cd705e2d4f0..7a77de64c570 100644 --- a/clientconn_authority_test.go +++ b/clientconn_authority_test.go @@ -59,10 +59,9 @@ func (s) TestClientConnAuthority(t *testing.T) { wantAuthority: "authority-override", }, { - name: "override-via-creds-and-WithAuthority", - target: "Non-Existent.Server:8080", - // WithAuthority override works only for insecure creds. - opts: []DialOption{WithTransportCredentials(creds), WithAuthority("authority-override")}, + name: "override-via-creds-and-WithAuthority", + target: "Non-Existent.Server:8080", + opts: []DialOption{WithTransportCredentials(creds), WithAuthority(serverNameOverride)}, wantAuthority: serverNameOverride, }, { @@ -120,3 +119,16 @@ func (s) TestClientConnAuthority(t *testing.T) { }) } } + +func (s) TestClientConnAuthority_CredsAndDialOptionMismatch(t *testing.T) { + serverNameOverride := "over.write.server.name" + creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), serverNameOverride) + if err != nil { + t.Fatalf("credentials.NewClientTLSFromFile(_, %q) failed: %v", err, serverNameOverride) + } + opts := []DialOption{WithTransportCredentials(creds), WithAuthority("authority-override")} + if cc, err := Dial("Non-Existent.Server:8000", opts...); err == nil { + cc.Close() + t.Fatal("grpc.Dial() succeeded when expected to fail") + } +} diff --git a/clientconn_parsed_target_test.go b/clientconn_parsed_target_test.go index fda06f9fa147..e41fafe09666 100644 --- a/clientconn_parsed_target_test.go +++ b/clientconn_parsed_target_test.go @@ -22,9 +22,12 @@ import ( "context" "errors" "net" + "net/url" "testing" "time" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/resolver" ) @@ -35,45 +38,47 @@ func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { wantParsed resolver.Target }{ // No scheme is specified. - {target: "", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ""}}, - {target: "://", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://"}}, - {target: ":///", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///"}}, - {target: "://a/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/"}}, - {target: ":///a", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///a"}}, - {target: "://a/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/b"}}, - {target: "/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/"}}, - {target: "a/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a/b"}}, - {target: "a//b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a//b"}}, - {target: "google.com", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com"}}, - {target: "google.com/?a=b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com/?a=b"}}, - {target: "/unix/socket/address", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address"}}, + {target: "", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "", URL: url.URL{Scheme: defScheme, Path: "/"}}}, + {target: "://", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://", URL: url.URL{Scheme: defScheme, Path: "/://"}}}, + {target: ":///", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///", URL: url.URL{Scheme: defScheme, Path: "/:///"}}}, + {target: "://a/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/", URL: url.URL{Scheme: defScheme, Path: "/://a/"}}}, + {target: ":///a", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///a", URL: url.URL{Scheme: defScheme, Path: "/:///a"}}}, + {target: "://a/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/b", URL: url.URL{Scheme: defScheme, Path: "/://a/b"}}}, + {target: "/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/", URL: url.URL{Scheme: defScheme, Path: "//"}}}, + {target: "a/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a/b", URL: url.URL{Scheme: defScheme, Path: "/a/b"}}}, + {target: "a//b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a//b", URL: url.URL{Scheme: defScheme, Path: "/a//b"}}}, + {target: "google.com", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com", URL: url.URL{Scheme: defScheme, Path: "/google.com"}}}, + {target: "google.com/?a=b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com/", URL: url.URL{Scheme: defScheme, Path: "/google.com/", RawQuery: "a=b"}}}, + {target: "/unix/socket/address", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address", URL: url.URL{Scheme: defScheme, Path: "//unix/socket/address"}}}, // An unregistered scheme is specified. - {target: "a:///", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///"}}, - {target: "a://b/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/"}}, - {target: "a:///b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///b"}}, - {target: "a://b/c", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/c"}}, - {target: "a:b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:b"}}, - {target: "a:/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:/b"}}, - {target: "a://b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b"}}, + {target: "a:///", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///", URL: url.URL{Scheme: defScheme, Path: "/a:///"}}}, + {target: "a://b/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/", URL: url.URL{Scheme: defScheme, Path: "/a://b/"}}}, + {target: "a:///b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///b", URL: url.URL{Scheme: defScheme, Path: "/a:///b"}}}, + {target: "a://b/c", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/c", URL: url.URL{Scheme: defScheme, Path: "/a://b/c"}}}, + {target: "a:b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:b", URL: url.URL{Scheme: defScheme, Path: "/a:b"}}}, + {target: "a:/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:/b", URL: url.URL{Scheme: defScheme, Path: "/a:/b"}}}, + {target: "a://b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b", URL: url.URL{Scheme: defScheme, Path: "/a://b"}}}, // A registered scheme is specified. - {target: "dns:///google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "google.com"}}, - {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}}, - {target: "dns://a.server.com/google.com/?a=b", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/?a=b"}}, - {target: "unix:///a/b/c", wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "/a/b/c"}}, - {target: "unix-abstract:a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c"}}, - {target: "unix-abstract:a b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a b"}}, - {target: "unix-abstract:a:b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a:b"}}, - {target: "unix-abstract:a-b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a-b"}}, - {target: "unix-abstract:/ a///://::!@#$%^&*()b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/ a///://::!@#$%^&*()b"}}, - {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "passthrough:abc"}}, - {target: "unix-abstract:unix:///abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "unix:///abc"}}, - {target: "unix-abstract:///a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/a/b/c"}}, - {target: "unix-abstract:///", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/"}}, - {target: "unix-abstract://authority", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "//authority"}}, - {target: "unix://domain", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unix://domain"}}, - {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{Scheme: "passthrough", Authority: "", Endpoint: "unix:///a/b/c"}}, + {target: "dns:///google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "google.com", URL: url.URL{Scheme: "dns", Path: "/google.com"}}}, + {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com", URL: url.URL{Scheme: "dns", Host: "a.server.com", Path: "/google.com"}}}, + {target: "dns://a.server.com/google.com/?a=b", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/", URL: url.URL{Scheme: "dns", Host: "a.server.com", Path: "/google.com/", RawQuery: "a=b"}}}, + {target: "unix:///a/b/c", wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix", Path: "/a/b/c"}}}, + {target: "unix-abstract:a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "a/b/c"}}}, + {target: "unix-abstract:a b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a b", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "a b"}}}, + {target: "unix-abstract:a:b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a:b", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "a:b"}}}, + {target: "unix-abstract:a-b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a-b", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "a-b"}}}, + {target: "unix-abstract:/ a///://::!@#$%25^&*()b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: " a///://::!@", URL: url.URL{Scheme: "unix-abstract", Path: "/ a///://::!@", RawPath: "/ a///://::!@", Fragment: "$%^&*()b", RawFragment: "$%25^&*()b"}}}, + {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "passthrough:abc", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "passthrough:abc"}}}, + {target: "unix-abstract:unix:///abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "unix:///abc", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "unix:///abc"}}}, + {target: "unix-abstract:///a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix-abstract", Path: "/a/b/c"}}}, + {target: "unix-abstract:///", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "", URL: url.URL{Scheme: "unix-abstract", Path: "/"}}}, + {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{Scheme: "passthrough", Authority: "", Endpoint: "unix:///a/b/c", URL: url.URL{Scheme: "passthrough", Path: "/unix:///a/b/c"}}}, + + // Cases for `scheme:absolute-path`. + {target: "dns:/a/b/c", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "dns", Path: "/a/b/c"}}}, + {target: "unregistered:/a/b/c", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unregistered:/a/b/c", URL: url.URL{Scheme: defScheme, Path: "/unregistered:/a/b/c"}}}, } for _, test := range tests { @@ -84,8 +89,8 @@ func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { } defer cc.Close() - if gotParsed := cc.parsedTarget; gotParsed != test.wantParsed { - t.Errorf("cc.parsedTarget = %+v, want %+v", gotParsed, test.wantParsed) + if !cmp.Equal(cc.parsedTarget, test.wantParsed) { + t.Errorf("cc.parsedTarget for dial target %q = %+v, want %+v", test.target, cc.parsedTarget, test.wantParsed) } }) } @@ -94,7 +99,9 @@ func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { func (s) TestParsedTarget_Failure_WithoutCustomDialer(t *testing.T) { targets := []string{ "unix://a/b/c", + "unix://authority", "unix-abstract://authority/a/b/c", + "unix-abstract://authority", } for _, target := range targets { @@ -118,39 +125,49 @@ func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { // different behaviors with a custom dialer. { target: "unix:a/b/c", - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unix:a/b/c"}, + wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix", Opaque: "a/b/c"}}, wantDialerAddress: "unix:a/b/c", }, { target: "unix:/a/b/c", - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unix:/a/b/c"}, - wantDialerAddress: "unix:/a/b/c", + wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix", Path: "/a/b/c"}}, + wantDialerAddress: "unix:///a/b/c", }, { target: "unix:///a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "/a/b/c"}, + wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix", Path: "/a/b/c"}}, wantDialerAddress: "unix:///a/b/c", }, { target: "dns:///127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "127.0.0.1:50051"}, + wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "127.0.0.1:50051", URL: url.URL{Scheme: "dns", Path: "/127.0.0.1:50051"}}, wantDialerAddress: "127.0.0.1:50051", }, { target: ":///127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///127.0.0.1:50051"}, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///127.0.0.1:50051", URL: url.URL{Scheme: defScheme, Path: "/:///127.0.0.1:50051"}}, wantDialerAddress: ":///127.0.0.1:50051", }, { target: "dns://authority/127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: "dns", Authority: "authority", Endpoint: "127.0.0.1:50051"}, + wantParsed: resolver.Target{Scheme: "dns", Authority: "authority", Endpoint: "127.0.0.1:50051", URL: url.URL{Scheme: "dns", Host: "authority", Path: "/127.0.0.1:50051"}}, wantDialerAddress: "127.0.0.1:50051", }, { target: "://authority/127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://authority/127.0.0.1:50051"}, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://authority/127.0.0.1:50051", URL: url.URL{Scheme: defScheme, Path: "/://authority/127.0.0.1:50051"}}, wantDialerAddress: "://authority/127.0.0.1:50051", }, + { + target: "/unix/socket/address", + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address", URL: url.URL{Scheme: defScheme, Path: "//unix/socket/address"}}, + wantDialerAddress: "/unix/socket/address", + }, + { + target: "passthrough://a.server.com/google.com", + wantParsed: resolver.Target{Scheme: "passthrough", Authority: "a.server.com", Endpoint: "google.com", URL: url.URL{Scheme: "passthrough", Host: "a.server.com", Path: "/google.com"}}, + wantDialerAddress: "google.com", + }, } for _, test := range tests { @@ -175,8 +192,8 @@ func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { case <-time.After(time.Second): t.Fatal("timeout when waiting for custom dialer to be invoked") } - if gotParsed := cc.parsedTarget; gotParsed != test.wantParsed { - t.Errorf("cc.parsedTarget for dial target %q = %+v, want %+v", test.target, gotParsed, test.wantParsed) + if !cmp.Equal(cc.parsedTarget, test.wantParsed) { + t.Errorf("cc.parsedTarget for dial target %q = %+v, want %+v", test.target, cc.parsedTarget, test.wantParsed) } }) } diff --git a/credentials/credentials.go b/credentials/credentials.go index 7eee7e4ec126..a671107584f5 100644 --- a/credentials/credentials.go +++ b/credentials/credentials.go @@ -140,6 +140,11 @@ type TransportCredentials interface { // Additionally, ClientHandshakeInfo data will be available via the context // passed to this call. // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns @@ -153,9 +158,13 @@ type TransportCredentials interface { Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. OverrideServerName(string) error } diff --git a/dialoptions.go b/dialoptions.go index 7a497237bbd3..5f7b7a164cea 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -482,8 +482,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt } // WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. +// :authority pseudo-header and as the server name in authentication handshake. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a diff --git a/internal/grpcutil/grpcutil.go b/internal/grpcutil/grpcutil.go new file mode 100644 index 000000000000..e2f948e8f4f4 --- /dev/null +++ b/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/internal/grpcutil/target.go b/internal/grpcutil/target.go deleted file mode 100644 index 8833021da02e..000000000000 --- a/internal/grpcutil/target.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcutil provides a bunch of utility functions to be used across the -// gRPC codebase. -package grpcutil - -import ( - "strings" - - "google.golang.org/grpc/resolver" -) - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// ParseTarget splits target into a resolver.Target struct containing scheme, -// authority and endpoint. skipUnixColonParsing indicates that the parse should -// not parse "unix:[path]" cases. This should be true in cases where a custom -// dialer is present, to prevent a behavior change. -// -// If target is not a valid scheme://authority/endpoint as specified in -// https://github.com/grpc/grpc/blob/master/doc/naming.md, -// it returns {Endpoint: target}. -func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { - var ok bool - if strings.HasPrefix(target, "unix-abstract:") { - if strings.HasPrefix(target, "unix-abstract://") { - // Maybe, with Authority specified, try to parse it - var remain string - ret.Scheme, remain, _ = split2(target, "://") - ret.Authority, ret.Endpoint, ok = split2(remain, "/") - if !ok { - // No Authority, add the "//" back - ret.Endpoint = "//" + remain - } else { - // Found Authority, add the "/" back - ret.Endpoint = "/" + ret.Endpoint - } - } else { - // Without Authority specified, split target on ":" - ret.Scheme, ret.Endpoint, _ = split2(target, ":") - } - return ret - } - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { - // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, - // because splitting on :// only handles the - // "unix://[/absolute/path]" case. Only handle if the dialer is nil, - // to avoid a behavior change with custom dialers. - return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} - } - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - if ret.Scheme == "unix" { - // Add the "/" back in the unix case, so the unix resolver receives the - // actual endpoint in the "unix://[/absolute/path]" case. - ret.Endpoint = "/" + ret.Endpoint - } - return ret -} diff --git a/internal/resolver/unix/unix.go b/internal/resolver/unix/unix.go index 0d5a811ddfad..20852e59df29 100644 --- a/internal/resolver/unix/unix.go +++ b/internal/resolver/unix/unix.go @@ -37,7 +37,17 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv if target.Authority != "" { return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) } - addr := resolver.Address{Addr: target.Endpoint} + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { // prepend "\x00" to address for unix-abstract addr.Addr = "\x00" + addr.Addr diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index dc369212dc5a..2521a7d7a408 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -25,6 +25,7 @@ import ( "math" "net" "net/http" + "path/filepath" "strconv" "strings" "sync" @@ -146,13 +147,20 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. if networkType == "unix" && !strings.HasPrefix(address, "\x00") { - // For backward compatibility, if the user dialed "unix:///path", - // the passthrough resolver would be used and the user's custom - // dialer would see "unix:///path". Since the unix resolver is used - // and the address is now "/path", prepend "unix://" so the user's - // custom dialer sees the same address. - return fn(ctx, "unix://"+address) + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) } return fn(ctx, address) } diff --git a/resolver/resolver.go b/resolver/resolver.go index 6a9d234a597a..9116897b463e 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -23,6 +23,7 @@ package resolver import ( "context" "net" + "net/url" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -204,25 +205,36 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. // -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. // -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// Examples: // -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - Scheme string + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. Authority string - Endpoint string + // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when + // the former is empty. + Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL } // Builder creates a resolver that will be used to watch name resolution updates. diff --git a/resolver_conn_wrapper_test.go b/resolver_conn_wrapper_test.go index 81c5b9ea874f..1036946ad1e4 100644 --- a/resolver_conn_wrapper_test.go +++ b/resolver_conn_wrapper_test.go @@ -21,8 +21,6 @@ package grpc import ( "context" "errors" - "fmt" - "net" "strings" "testing" "time" @@ -36,37 +34,6 @@ import ( "google.golang.org/grpc/status" ) -// The target string with unknown scheme should be kept unchanged and passed to -// the dialer. -func (s) TestDialParseTargetUnknownScheme(t *testing.T) { - for _, test := range []struct { - targetStr string - want string - }{ - {"/unix/socket/address", "/unix/socket/address"}, - - // For known scheme. - {"passthrough://a.server.com/google.com", "google.com"}, - } { - dialStrCh := make(chan string, 1) - cc, err := Dial(test.targetStr, WithInsecure(), WithDialer(func(addr string, _ time.Duration) (net.Conn, error) { - select { - case dialStrCh <- addr: - default: - } - return nil, fmt.Errorf("test dialer, always error") - })) - if err != nil { - t.Fatalf("Failed to create ClientConn: %v", err) - } - got := <-dialStrCh - cc.Close() - if got != test.want { - t.Errorf("Dial(%q), dialer got %q, want %q", test.targetStr, got, test.want) - } - } -} - const happyBalancerName = "happy balancer" func init() { diff --git a/test/authority_test.go b/test/authority_test.go index 15afa759c907..0f823bdbd1b0 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -103,10 +103,11 @@ var authorityTests = []authorityTest{ authority: "localhost", }, { - name: "UnixAbsolute", - address: "/tmp/sock.sock", - target: "unix:/tmp/sock.sock", - authority: "localhost", + name: "UnixAbsolute", + address: "/tmp/sock.sock", + target: "unix:/tmp/sock.sock", + authority: "localhost", + dialTargetWant: "unix:///tmp/sock.sock", }, { name: "UnixAbsoluteAlternate", diff --git a/xds/internal/xdsclient/v2/client_test.go b/xds/internal/xdsclient/v2/client_test.go index ed4322b0dc51..fc3fa821a157 100644 --- a/xds/internal/xdsclient/v2/client_test.go +++ b/xds/internal/xdsclient/v2/client_test.go @@ -608,7 +608,7 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) { } defer sCleanup() - const scheme = "xds_client_test_whatever" + const scheme = "xds-client-test-whatever" rb := manual.NewBuilderWithScheme(scheme) rb.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: "no.such.server"}}}) From 3bae5f5b658c8795d4cfa636082b02ef051b56dd Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 14 Oct 2021 16:51:55 -0700 Subject: [PATCH 290/998] xds: use protos from cncf/xds instead of cncf/udpa (#4866) --- examples/go.sum | 3 +- go.mod | 1 + go.sum | 3 +- security/advancedtls/examples/go.sum | 1 + security/advancedtls/go.sum | 1 + .../balancer/balancergroup/balancergroup.go | 2 +- .../balancergroup/balancergroup_test.go | 3 +- xds/internal/balancer/clusterimpl/picker.go | 2 +- xds/internal/balancer/orca/orca.go | 2 +- xds/internal/balancer/orca/orca_test.go | 2 +- xds/internal/httpfilter/httpfilter.go | 13 ++-- xds/internal/xdsclient/lds_test.go | 67 +++++++++++++++---- xds/internal/xdsclient/rds_test.go | 15 +++-- xds/internal/xdsclient/xds.go | 29 +++++--- 14 files changed, 102 insertions(+), 42 deletions(-) diff --git a/examples/go.sum b/examples/go.sum index a359cfc183f8..1724c644936d 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -7,8 +7,9 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158 h1:CevA8fI91PAnP8vpnXuB8ZYAZ5wqY86nAbxfgK8tWO4= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs= diff --git a/go.mod b/go.mod index 022cc9828fed..9ebdecac8dd4 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.14 require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 + github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.4.3 diff --git a/go.sum b/go.sum index 6e7ae0db2b37..634b30ff6415 100644 --- a/go.sum +++ b/go.sum @@ -11,8 +11,9 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158 h1:CevA8fI91PAnP8vpnXuB8ZYAZ5wqY86nAbxfgK8tWO4= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 272f1afa4079..4dee8b2615e7 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -4,6 +4,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 272f1afa4079..4dee8b2615e7 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -4,6 +4,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= diff --git a/xds/internal/balancer/balancergroup/balancergroup.go b/xds/internal/balancer/balancergroup/balancergroup.go index 5798b03ac506..749c6b36e717 100644 --- a/xds/internal/balancer/balancergroup/balancergroup.go +++ b/xds/internal/balancer/balancergroup/balancergroup.go @@ -23,7 +23,7 @@ import ( "sync" "time" - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" + orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/balancer" diff --git a/xds/internal/balancer/balancergroup/balancergroup_test.go b/xds/internal/balancer/balancergroup/balancergroup_test.go index 9cc7bd072ecf..82d0c2dfb3ea 100644 --- a/xds/internal/balancer/balancergroup/balancergroup_test.go +++ b/xds/internal/balancer/balancergroup/balancergroup_test.go @@ -30,10 +30,9 @@ import ( "testing" "time" - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" + orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index db29c550be11..bcade35cba56 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -19,7 +19,7 @@ package clusterimpl import ( - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" + orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" diff --git a/xds/internal/balancer/orca/orca.go b/xds/internal/balancer/orca/orca.go index 28016806eec4..75b9439d4dba 100644 --- a/xds/internal/balancer/orca/orca.go +++ b/xds/internal/balancer/orca/orca.go @@ -18,7 +18,7 @@ package orca import ( - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" + orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "github.com/golang/protobuf/proto" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancerload" diff --git a/xds/internal/balancer/orca/orca_test.go b/xds/internal/balancer/orca/orca_test.go index d7a44134e22b..9979c1a9d107 100644 --- a/xds/internal/balancer/orca/orca_test.go +++ b/xds/internal/balancer/orca/orca_test.go @@ -20,7 +20,7 @@ import ( "strings" "testing" - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" + orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/internal/grpctest" diff --git a/xds/internal/httpfilter/httpfilter.go b/xds/internal/httpfilter/httpfilter.go index b4399f9faeb3..dd9a278389b5 100644 --- a/xds/internal/httpfilter/httpfilter.go +++ b/xds/internal/httpfilter/httpfilter.go @@ -40,15 +40,16 @@ type Filter interface { // will be registered by each of its supported message types. TypeURLs() []string // ParseFilterConfig parses the provided configuration proto.Message from - // the LDS configuration of this filter. This may be an anypb.Any or a - // udpa.type.v1.TypedStruct for filters that do not accept a custom type. - // The resulting FilterConfig will later be passed to Build. + // the LDS configuration of this filter. This may be an anypb.Any, a + // udpa.type.v1.TypedStruct, or an xds.type.v3.TypedStruct for filters that + // do not accept a custom type. The resulting FilterConfig will later be + // passed to Build. ParseFilterConfig(proto.Message) (FilterConfig, error) // ParseFilterConfigOverride parses the provided override configuration // proto.Message from the RDS override configuration of this filter. This - // may be an anypb.Any or a udpa.type.v1.TypedStruct for filters that do - // not accept a custom type. The resulting FilterConfig will later be - // passed to Build. + // may be an anypb.Any, a udpa.type.v1.TypedStruct, or an + // xds.type.v3.TypedStruct for filters that do not accept a custom type. + // The resulting FilterConfig will later be passed to Build. ParseFilterConfigOverride(proto.Message) (FilterConfig, error) // IsTerminal returns whether this Filter is terminal or not (i.e. it must // be last filter in the filter chain). diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/lds_test.go index 18e2f55ede46..f889e380eab3 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/lds_test.go @@ -24,30 +24,30 @@ import ( "testing" "time" - v1typepb "github.com/cncf/udpa/go/udpa/type/v1" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" "github.com/golang/protobuf/proto" - spb "github.com/golang/protobuf/ptypes/struct" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/protobuf/types/known/durationpb" - "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/protobuf/types/known/durationpb" + v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" + v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v2httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" v2listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" anypb "github.com/golang/protobuf/ptypes/any" + spb "github.com/golang/protobuf/ptypes/struct" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" ) @@ -81,9 +81,13 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Name: "customFilter", ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, } - typedStructFilter = &v3httppb.HttpFilter{ + oldTypedStructFilter = &v3httppb.HttpFilter{ + Name: "customFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: wrappedCustomFilterOldTypedStructConfig}, + } + newTypedStructFilter = &v3httppb.HttpFilter{ Name: "customFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: wrappedCustomFilterTypedStructConfig}, + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: wrappedCustomFilterNewTypedStructConfig}, } customOptionalFilter = &v3httppb.HttpFilter{ Name: "customFilter", @@ -375,8 +379,8 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, }, { - name: "v3 with custom filter in typed struct", - resources: []*anypb.Any{v3LisWithFilters(typedStructFilter)}, + name: "v3 with custom filter in old typed struct", + resources: []*anypb.Any{v3LisWithFilters(oldTypedStructFilter)}, wantUpdate: map[string]ListenerUpdateErrTuple{ v3LDSTarget: {Update: ListenerUpdate{ RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, @@ -384,11 +388,33 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { { Name: "customFilter", Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterTypedStructConfig}, + Config: filterConfig{Cfg: customFilterOldTypedStructConfig}, }, routerFilter, }, - Raw: v3LisWithFilters(typedStructFilter), + Raw: v3LisWithFilters(oldTypedStructFilter), + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, + { + name: "v3 with custom filter in new typed struct", + resources: []*anypb.Any{v3LisWithFilters(newTypedStructFilter)}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterNewTypedStructConfig}, + }, + routerFilter, + }, + Raw: v3LisWithFilters(newTypedStructFilter), }}, }, wantMD: UpdateMetadata{ @@ -1914,7 +1940,19 @@ var clientOnlyCustomFilterConfig = &anypb.Any{ Value: []byte{1, 2, 3}, } -var customFilterTypedStructConfig = &v1typepb.TypedStruct{ +// This custom filter uses the old TypedStruct message from the cncf/udpa repo. +var customFilterOldTypedStructConfig = &v1udpatypepb.TypedStruct{ + TypeUrl: "custom.filter", + Value: &spb.Struct{ + Fields: map[string]*spb.Value{ + "foo": {Kind: &spb.Value_StringValue{StringValue: "bar"}}, + }, + }, +} +var wrappedCustomFilterOldTypedStructConfig *anypb.Any + +// This custom filter uses the new TypedStruct message from the cncf/xds repo. +var customFilterNewTypedStructConfig = &v3cncftypepb.TypedStruct{ TypeUrl: "custom.filter", Value: &spb.Struct{ Fields: map[string]*spb.Value{ @@ -1922,10 +1960,11 @@ var customFilterTypedStructConfig = &v1typepb.TypedStruct{ }, }, } -var wrappedCustomFilterTypedStructConfig *anypb.Any +var wrappedCustomFilterNewTypedStructConfig *anypb.Any func init() { - wrappedCustomFilterTypedStructConfig = testutils.MarshalAny(customFilterTypedStructConfig) + wrappedCustomFilterOldTypedStructConfig = testutils.MarshalAny(customFilterOldTypedStructConfig) + wrappedCustomFilterNewTypedStructConfig = testutils.MarshalAny(customFilterNewTypedStructConfig) } var unknownFilterConfig = &anypb.Any{ diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/rds_test.go index c89e8cddca41..8b419244d672 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/rds_test.go @@ -500,9 +500,14 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), }, { - name: "good-route-config-with-http-filter-config-typed-struct", - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterTypedStructConfig}), - wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterTypedStructConfig}}), + name: "good-route-config-with-http-filter-config-in-old-typed-struct", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterOldTypedStructConfig}), + wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterOldTypedStructConfig}}), + }, + { + name: "good-route-config-with-http-filter-config-in-new-typed-struct", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterNewTypedStructConfig}), + wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterNewTypedStructConfig}}), }, { name: "good-route-config-with-optional-http-filter-config", @@ -1422,8 +1427,8 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }, { name: "with custom HTTP filter config in typed struct", - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterTypedStructConfig}), - wantRoutes: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterTypedStructConfig}}), + routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterOldTypedStructConfig}), + wantRoutes: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterOldTypedStructConfig}}), }, { name: "with optional custom HTTP filter config", diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go index 732c4e6addc2..4b4f0680de67 100644 --- a/xds/internal/xdsclient/xds.go +++ b/xds/internal/xdsclient/xds.go @@ -27,7 +27,8 @@ import ( "strings" "time" - v1typepb "github.com/cncf/udpa/go/udpa/type/v1" + v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" + v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" @@ -39,10 +40,11 @@ import ( v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/matcher" - "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/xds/env" @@ -171,15 +173,24 @@ func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.Prefi } func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { - // The real type name is inside the TypedStruct. - s := new(v1typepb.TypedStruct) - if !ptypes.Is(config, s) { + switch { + case ptypes.Is(config, &v3cncftypepb.TypedStruct{}): + // The real type name is inside the new TypedStruct message. + s := new(v3cncftypepb.TypedStruct) + if err := ptypes.UnmarshalAny(config, s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + case ptypes.Is(config, &v1udpatypepb.TypedStruct{}): + // The real type name is inside the old TypedStruct message. + s := new(v1udpatypepb.TypedStruct) + if err := ptypes.UnmarshalAny(config, s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + default: return config, config.GetTypeUrl(), nil } - if err := ptypes.UnmarshalAny(config, s); err != nil { - return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) - } - return s, s.GetTypeUrl(), nil } func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Filter, httpfilter.FilterConfig, error) { From 7e5fcc68f6a52f4589b3acfe683e8ef761484d30 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 14 Oct 2021 19:48:43 -0700 Subject: [PATCH 291/998] rls: update rls proto (#4877) --- balancer/rls/internal/client.go | 4 +- balancer/rls/internal/client_test.go | 4 +- .../internal/proto/grpc_lookup_v1/rls.pb.go | 116 +++++++----------- 3 files changed, 47 insertions(+), 77 deletions(-) diff --git a/balancer/rls/internal/client.go b/balancer/rls/internal/client.go index 0e8a1c932f11..c233e6e35289 100644 --- a/balancer/rls/internal/client.go +++ b/balancer/rls/internal/client.go @@ -68,8 +68,8 @@ func (c *rlsClient) lookup(path string, keyMap map[string]string, cb lookupCallb go func() { ctx, cancel := context.WithTimeout(context.Background(), c.rpcTimeout) resp, err := c.stub.RouteLookup(ctx, &rlspb.RouteLookupRequest{ - Server: c.origDialTarget, - Path: path, + // TODO(easwars): Use extra_keys field to populate host, service and + // method keys. TargetType: grpcTargetType, KeyMap: keyMap, }) diff --git a/balancer/rls/internal/client_test.go b/balancer/rls/internal/client_test.go index 4bf0303a2769..f5289f7a1ac0 100644 --- a/balancer/rls/internal/client_test.go +++ b/balancer/rls/internal/client_test.go @@ -131,8 +131,8 @@ func (s) TestLookupSuccess(t *testing.T) { "k2": "v2", } wantLookupRequest := &rlspb.RouteLookupRequest{ - Server: defaultDialTarget, - Path: rlsReqPath, + // TODO(easwars): Use extra_keys field to populate host, service and + // method keys. TargetType: "grpc", KeyMap: rlsReqKeyMap, } diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go b/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go index 9f063fd3d620..437dff2201c9 100644 --- a/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go @@ -94,18 +94,6 @@ type RouteLookupRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Full host name of the target server, e.g. firestore.googleapis.com. - // Only set for gRPC requests; HTTP requests must use key_map explicitly. - // Deprecated in favor of setting key_map keys with GrpcKeyBuilder.extra_keys. - // - // Deprecated: Do not use. - Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"` - // Full path of the request, i.e. "/service/method". - // Only set for gRPC requests; HTTP requests must use key_map explicitly. - // Deprecated in favor of setting key_map keys with GrpcKeyBuilder.extra_keys. - // - // Deprecated: Do not use. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` // Target type allows the client to specify what kind of target format it // would like from RLS to allow it to find the regional server, e.g. "grpc". TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` @@ -149,22 +137,6 @@ func (*RouteLookupRequest) Descriptor() ([]byte, []int) { return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0} } -// Deprecated: Do not use. -func (x *RouteLookupRequest) GetServer() string { - if x != nil { - return x.Server - } - return "" -} - -// Deprecated: Do not use. -func (x *RouteLookupRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - func (x *RouteLookupRequest) GetTargetType() string { if x != nil { return x.TargetType @@ -259,52 +231,50 @@ var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0x9d, 0x03, 0x0a, 0x12, 0x52, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0x83, 0x03, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x16, 0x0a, - 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, - 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, - 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, - 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, 0x39, - 0x0a, 0x0b, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, - 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, - 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, - 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, - 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, - 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, - 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65, + 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, + 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53, + 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, + 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, + 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( From 3db1cb09ea4400876343a34342f116dbcbcdb490 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 15 Oct 2021 10:15:34 -0700 Subject: [PATCH 292/998] xds/clusterimpl: fix SubConn wrapper returned by picker during race (#4876) --- xds/internal/balancer/clusterimpl/picker.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index bcade35cba56..8cce07553082 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -102,17 +102,15 @@ func newPicker(s balancer.State, config *dropConfigs, loadStore load.PerClusterR func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // Don't drop unless the inner picker is READY. Similar to // https://github.com/grpc/grpc-go/issues/2622. - if d.s.ConnectivityState != connectivity.Ready { - return d.s.Picker.Pick(info) - } - - // Check if this RPC should be dropped by category. - for _, dp := range d.drops { - if dp.drop() { - if d.loadStore != nil { - d.loadStore.CallDropped(dp.category) + if d.s.ConnectivityState == connectivity.Ready { + // Check if this RPC should be dropped by category. + for _, dp := range d.drops { + if dp.drop() { + if d.loadStore != nil { + d.loadStore.CallDropped(dp.category) + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") } - return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") } } From 36d87572dba304d444361ca3ddc77d5a9c0f831c Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 15 Oct 2021 10:39:56 -0700 Subject: [PATCH 293/998] attributes: add Equal method; resolver: add AddressMap and State.BalancerAttributes (#4855) --- attributes/attributes.go | 78 +++++---- attributes/attributes_test.go | 54 +++++-- balancer/base/balancer.go | 67 +++----- balancer/base/balancer_test.go | 28 ---- balancer/grpclb/state/state.go | 2 +- balancer/roundrobin/roundrobin_test.go | 84 ++++++---- .../weightedroundrobin/weightedroundrobin.go | 10 +- ...bin_test.go => weightedroundrobin_test.go} | 2 +- internal/credentials/xds/handshake_info.go | 10 +- internal/hierarchy/hierarchy.go | 26 ++- internal/hierarchy/hierarchy_test.go | 52 +++--- internal/metadata/metadata.go | 30 +++- internal/metadata/metadata_test.go | 4 +- internal/resolver/config_selector.go | 2 +- internal/transport/networktype/networktype.go | 2 +- internal/xds_handshake_cluster.go | 2 +- resolver/map.go | 103 ++++++++++++ resolver/map_test.go | 153 ++++++++++++++++++ resolver/resolver.go | 16 +- resolver/resolver_test.go | 33 ++++ .../clustermanager/clustermanager_test.go | 38 ++--- xds/internal/internal.go | 13 +- xds/internal/xdsclient/attributes.go | 2 +- 23 files changed, 594 insertions(+), 217 deletions(-) rename balancer/weightedroundrobin/{weightedwoundrobin_test.go => weightedroundrobin_test.go} (97%) create mode 100644 resolver/map.go create mode 100644 resolver/map_test.go create mode 100644 resolver/resolver_test.go diff --git a/attributes/attributes.go b/attributes/attributes.go index 3220d87be403..6ff2792ee4fa 100644 --- a/attributes/attributes.go +++ b/attributes/attributes.go @@ -25,55 +25,75 @@ // later release. package attributes -import "fmt" - // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own -// types for keys. +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. type Attributes struct { m map[interface{}]interface{} } -// New returns a new Attributes containing all key/value pairs in kvs. If the -// same key appears multiple times, the last value overwrites all previous -// values for that key. Panics if len(kvs) is not even. -func New(kvs ...interface{}) *Attributes { - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} - for i := 0; i < len(kvs)/2; i++ { - a.m[kvs[i*2]] = kvs[i*2+1] - } - return a +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} } -// WithValues returns a new Attributes containing all key/value pairs in a and -// kvs. Panics if len(kvs) is not even. If the same key appears multiple -// times, the last value overwrites all previous values for that key. To -// remove an existing key, use a nil value. -func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { if a == nil { - return New(kvs...) + return New(key, value) } - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } - for i := 0; i < len(kvs)/2; i++ { - n.m[kvs[i*2]] = kvs[i*2+1] - } + n.m[key] = value return n } // Value returns the value associated with these attributes for key, or nil if -// no value is associated with key. +// no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key interface{}) interface{} { if a == nil { return nil } return a.m[key] } + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} diff --git a/attributes/attributes_test.go b/attributes/attributes_test.go index 1174e2371a5f..02d5b24f3df1 100644 --- a/attributes/attributes_test.go +++ b/attributes/attributes_test.go @@ -20,41 +20,71 @@ package attributes_test import ( "fmt" - "reflect" "testing" "google.golang.org/grpc/attributes" ) +type stringVal struct { + s string +} + +func (s stringVal) Equal(o interface{}) bool { + os, ok := o.(stringVal) + return ok && s.s == os.s +} + func ExampleAttributes() { type keyOne struct{} type keyTwo struct{} - a := attributes.New(keyOne{}, 1, keyTwo{}, "two") + a := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "two"}) fmt.Println("Key one:", a.Value(keyOne{})) fmt.Println("Key two:", a.Value(keyTwo{})) // Output: // Key one: 1 - // Key two: two + // Key two: {two} } -func ExampleAttributes_WithValues() { +func ExampleAttributes_WithValue() { type keyOne struct{} type keyTwo struct{} a := attributes.New(keyOne{}, 1) - a = a.WithValues(keyTwo{}, "two") + a = a.WithValue(keyTwo{}, stringVal{s: "two"}) fmt.Println("Key one:", a.Value(keyOne{})) fmt.Println("Key two:", a.Value(keyTwo{})) // Output: // Key one: 1 - // Key two: two + // Key two: {two} } -// Test that two attributes with the same content are `reflect.DeepEqual`. -func TestDeepEqual(t *testing.T) { +// Test that two attributes with the same content are Equal. +func TestEqual(t *testing.T) { type keyOne struct{} - a1 := attributes.New(keyOne{}, 1) - a2 := attributes.New(keyOne{}, 1) - if !reflect.DeepEqual(a1, a2) { - t.Fatalf("reflect.DeepEqual(%+v, %+v), want true, got false", a1, a2) + type keyTwo struct{} + a1 := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "two"}) + a2 := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "two"}) + if !a1.Equal(a2) { + t.Fatalf("%+v.Equals(%+v) = false; want true", a1, a2) + } + if !a2.Equal(a1) { + t.Fatalf("%+v.Equals(%+v) = false; want true", a2, a1) + } +} + +// Test that two attributes with different content are not Equal. +func TestNotEqual(t *testing.T) { + type keyOne struct{} + type keyTwo struct{} + a1 := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "two"}) + a2 := attributes.New(keyOne{}, 2).WithValue(keyTwo{}, stringVal{s: "two"}) + a3 := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "one"}) + if a1.Equal(a2) { + t.Fatalf("%+v.Equals(%+v) = true; want false", a1, a2) + } + if a2.Equal(a1) { + t.Fatalf("%+v.Equals(%+v) = true; want false", a2, a1) + } + if a3.Equal(a1) { + t.Fatalf("%+v.Equals(%+v) = true; want false", a3, a1) } } diff --git a/balancer/base/balancer.go b/balancer/base/balancer.go index 8dd504299fee..908c6e3376e0 100644 --- a/balancer/base/balancer.go +++ b/balancer/base/balancer.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -42,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]subConnInfo), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -58,11 +57,6 @@ func (bb *baseBuilder) Name() string { return bb.name } -type subConnInfo struct { - subConn balancer.SubConn - attrs *attributes.Attributes -} - type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder @@ -70,7 +64,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) + subConns *resolver.AddressMap scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -81,7 +75,7 @@ type baseBalancer struct { func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -105,57 +99,32 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) + addrsSet := resolver.NewAddressMap() for _, a := range s.ResolverState.Addresses { - // Strip attributes from addresses before using them as map keys. So - // that when two addresses only differ in attributes pointers (but with - // the same attribute content), they are considered the same address. - // - // Note that this doesn't handle the case where the attribute content is - // different. So if users want to set different attributes to create - // duplicate connections to the same backend, it doesn't work. This is - // fine for now, because duplicate is done by setting Metadata today. - // - // TODO: read attributes to handle duplicate connections. - aNoAttrs := a - aNoAttrs.Attributes = nil - addrsSet[aNoAttrs] = struct{}{} - if scInfo, ok := b.subConns[aNoAttrs]; !ok { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - // - // When creating SubConn, the original address with attributes is - // passed through. So that connection configurations in attributes - // (like creds) will be used. sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} + b.subConns.Set(a, sc) b.scStates[sc] = connectivity.Idle b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() - } else { - // Always update the subconn's address in case the attributes - // changed. - // - // The SubConn does a reflect.DeepEqual of the new and old - // addresses. So this is a noop if the current address is the same - // as the old one (including attributes). - scInfo.attrs = a.Attributes - b.subConns[aNoAttrs] = scInfo - b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) } } - for a, scInfo := range b.subConns { + b.subConns.Range(func(a resolver.Address, sci interface{}) { + sc := sci.(balancer.SubConn) // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(scInfo.subConn) - delete(b.subConns, a) + if _, ok := addrsSet.Get(a); !ok { + b.cc.RemoveSubConn(sc) + b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. } - } + }) // If resolver state contains no addresses, return an error so ClientConn // will trigger re-resolve. Also records this as an resolver error, so when // the overall state turns transient failure, the error message will have @@ -193,12 +162,12 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, scInfo := range b.subConns { - if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { - addr.Attributes = scInfo.attrs - readySCs[scInfo.subConn] = SubConnInfo{Address: addr} + b.subConns.Range(func(addr resolver.Address, sci interface{}) { + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} } - } + }) b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } diff --git a/balancer/base/balancer_test.go b/balancer/base/balancer_test.go index f8ff8cf98444..3a3ccd6ba71a 100644 --- a/balancer/base/balancer_test.go +++ b/balancer/base/balancer_test.go @@ -54,34 +54,6 @@ func (p *testPickBuilder) Build(info PickerBuildInfo) balancer.Picker { return nil } -func TestBaseBalancerStripAttributes(t *testing.T) { - b := (&baseBuilder{}).Build(&testClientConn{ - newSubConn: func(addrs []resolver.Address, _ balancer.NewSubConnOptions) (balancer.SubConn, error) { - for _, addr := range addrs { - if addr.Attributes == nil { - t.Errorf("in NewSubConn, got address %+v with nil attributes, want not nil", addr) - } - } - return &testSubConn{}, nil - }, - }, balancer.BuildOptions{}).(*baseBalancer) - - b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: []resolver.Address{ - {Addr: "1.1.1.1", Attributes: &attributes.Attributes{}}, - {Addr: "2.2.2.2", Attributes: &attributes.Attributes{}}, - }, - }, - }) - - for addr := range b.subConns { - if addr.Attributes != nil { - t.Errorf("in b.subConns, got address %+v with not nil attributes, want nil", addr) - } - } -} - func TestBaseBalancerReserveAttributes(t *testing.T) { var v = func(info PickerBuildInfo) { for _, sc := range info.ReadySCs { diff --git a/balancer/grpclb/state/state.go b/balancer/grpclb/state/state.go index a24264a34f5f..4ecfa1c21511 100644 --- a/balancer/grpclb/state/state.go +++ b/balancer/grpclb/state/state.go @@ -39,7 +39,7 @@ type State struct { // Set returns a copy of the provided state with attributes containing s. s's // data should not be mutated after calling Set. func Set(state resolver.State, s *State) resolver.State { - state.Attributes = state.Attributes.WithValues(key, s) + state.Attributes = state.Attributes.WithValue(key, s) return state } diff --git a/balancer/roundrobin/roundrobin_test.go b/balancer/roundrobin/roundrobin_test.go index b89cdb4a30f3..eb25055ff78c 100644 --- a/balancer/roundrobin/roundrobin_test.go +++ b/balancer/roundrobin/roundrobin_test.go @@ -59,18 +59,26 @@ type testServer struct { testMDChan chan []string } -func newTestServer() *testServer { - return &testServer{testMDChan: make(chan []string, 1)} +func newTestServer(mdchan bool) *testServer { + t := &testServer{} + if mdchan { + t.testMDChan = make(chan []string, 1) + } + return t } func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if s.testMDChan == nil { + return &testpb.Empty{}, nil + } md, ok := metadata.FromIncomingContext(ctx) - if ok && len(md[testMDKey]) != 0 { - select { - case s.testMDChan <- md[testMDKey]: - case <-ctx.Done(): - return nil, ctx.Err() - } + if !ok { + return nil, status.Errorf(codes.Internal, "no metadata in context") + } + select { + case s.testMDChan <- md[testMDKey]: + case <-ctx.Done(): + return nil, ctx.Err() } return &testpb.Empty{}, nil } @@ -91,7 +99,7 @@ func (t *test) cleanup() { } } -func startTestServers(count int) (_ *test, err error) { +func startTestServers(count int, mdchan bool) (_ *test, err error) { t := &test{} defer func() { @@ -106,7 +114,7 @@ func startTestServers(count int) (_ *test, err error) { } s := grpc.NewServer() - sImpl := newTestServer() + sImpl := newTestServer(mdchan) testpb.RegisterTestServiceServer(s, sImpl) t.servers = append(t.servers, s) t.serverImpls = append(t.serverImpls, sImpl) @@ -123,7 +131,7 @@ func startTestServers(count int) (_ *test, err error) { func (s) TestOneBackend(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - test, err := startTestServers(1) + test, err := startTestServers(1, false) if err != nil { t.Fatalf("failed to start servers: %v", err) } @@ -153,7 +161,7 @@ func (s) TestBackendsRoundRobin(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") backendCount := 5 - test, err := startTestServers(backendCount) + test, err := startTestServers(backendCount, false) if err != nil { t.Fatalf("failed to start servers: %v", err) } @@ -210,7 +218,7 @@ func (s) TestBackendsRoundRobin(t *testing.T) { func (s) TestAddressesRemoved(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - test, err := startTestServers(1) + test, err := startTestServers(1, false) if err != nil { t.Fatalf("failed to start servers: %v", err) } @@ -255,7 +263,7 @@ func (s) TestAddressesRemoved(t *testing.T) { func (s) TestCloseWithPendingRPC(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - test, err := startTestServers(1) + test, err := startTestServers(1, false) if err != nil { t.Fatalf("failed to start servers: %v", err) } @@ -287,7 +295,7 @@ func (s) TestCloseWithPendingRPC(t *testing.T) { func (s) TestNewAddressWhileBlocking(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - test, err := startTestServers(1) + test, err := startTestServers(1, false) if err != nil { t.Fatalf("failed to start servers: %v", err) } @@ -334,7 +342,7 @@ func (s) TestOneServerDown(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") backendCount := 3 - test, err := startTestServers(backendCount) + test, err := startTestServers(backendCount, false) if err != nil { t.Fatalf("failed to start servers: %v", err) } @@ -430,7 +438,7 @@ func (s) TestAllServersDown(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") backendCount := 3 - test, err := startTestServers(backendCount) + test, err := startTestServers(backendCount, false) if err != nil { t.Fatalf("failed to start servers: %v", err) } @@ -500,7 +508,7 @@ func (s) TestAllServersDown(t *testing.T) { func (s) TestUpdateAddressAttributes(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - test, err := startTestServers(1) + test, err := startTestServers(1, true) if err != nil { t.Fatalf("failed to start servers: %v", err) } @@ -512,23 +520,26 @@ func (s) TestUpdateAddressAttributes(t *testing.T) { } defer cc.Close() testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { + + // The first RPC should fail because there's no address. + ctxShort, cancel2 := context.WithTimeout(ctx, time.Millisecond) + defer cancel2() + if _, err := testc.EmptyCall(ctxShort, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) // The second RPC should succeed. - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } // The second RPC should not set metadata, so there's no md in the channel. - select { - case md1 := <-test.serverImpls[0].testMDChan: + md1 := <-test.serverImpls[0].testMDChan + if md1 != nil { t.Fatalf("got md: %v, want empty metadata", md1) - case <-time.After(time.Microsecond * 100): } const testMDValue = "test-md-value" @@ -536,14 +547,21 @@ func (s) TestUpdateAddressAttributes(t *testing.T) { r.UpdateState(resolver.State{Addresses: []resolver.Address{ imetadata.Set(resolver.Address{Addr: test.addresses[0]}, metadata.Pairs(testMDKey, testMDValue)), }}) - // The third RPC should succeed. - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - // The third RPC should send metadata with it. - md2 := <-test.serverImpls[0].testMDChan - if len(md2) == 0 || md2[0] != testMDValue { - t.Fatalf("got md: %v, want %v", md2, []string{testMDValue}) + // A future RPC should send metadata with it. The update doesn't + // necessarily happen synchronously, so we wait some time before failing if + // some RPCs do not contain it. + for { + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + if status.Code(err) == codes.DeadlineExceeded { + t.Fatalf("timed out waiting for metadata in response") + } + t.Fatalf("EmptyCall() = _, %v, want _, ", err) + } + md2 := <-test.serverImpls[0].testMDChan + if len(md2) == 1 && md2[0] == testMDValue { + return + } + time.Sleep(10 * time.Millisecond) } } diff --git a/balancer/weightedroundrobin/weightedroundrobin.go b/balancer/weightedroundrobin/weightedroundrobin.go index 4b7d3bfedff2..f15dddb56218 100644 --- a/balancer/weightedroundrobin/weightedroundrobin.go +++ b/balancer/weightedroundrobin/weightedroundrobin.go @@ -36,6 +36,12 @@ type AddrInfo struct { Weight uint32 } +// Equal allows the values to be compared by Attributes.Equal. +func (a AddrInfo) Equal(o interface{}) bool { + oa, ok := o.(AddrInfo) + return ok && oa.Weight == a.Weight +} + // SetAddrInfo returns a copy of addr in which the Attributes field is updated // with addrInfo. // @@ -44,7 +50,7 @@ type AddrInfo struct { // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(attributeKey{}, addrInfo) + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) return addr } @@ -55,7 +61,7 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func GetAddrInfo(addr resolver.Address) AddrInfo { - v := addr.Attributes.Value(attributeKey{}) + v := addr.BalancerAttributes.Value(attributeKey{}) ai, _ := v.(AddrInfo) return ai } diff --git a/balancer/weightedroundrobin/weightedwoundrobin_test.go b/balancer/weightedroundrobin/weightedroundrobin_test.go similarity index 97% rename from balancer/weightedroundrobin/weightedwoundrobin_test.go rename to balancer/weightedroundrobin/weightedroundrobin_test.go index aa46c449a13d..d83619da2e6a 100644 --- a/balancer/weightedroundrobin/weightedwoundrobin_test.go +++ b/balancer/weightedroundrobin/weightedroundrobin_test.go @@ -73,7 +73,7 @@ func TestAddrInfoToAndFromAttributes(t *testing.T) { } func TestGetAddInfoEmpty(t *testing.T) { - addr := resolver.Address{Attributes: attributes.New()} + addr := resolver.Address{} gotAddrInfo := GetAddrInfo(addr) wantAddrInfo := AddrInfo{} if !cmp.Equal(gotAddrInfo, wantAddrInfo) { diff --git a/internal/credentials/xds/handshake_info.go b/internal/credentials/xds/handshake_info.go index 6ef43cc89fa3..9fa0c94f41e8 100644 --- a/internal/credentials/xds/handshake_info.go +++ b/internal/credentials/xds/handshake_info.go @@ -43,10 +43,18 @@ func init() { // the Attributes field of resolver.Address. type handshakeAttrKey struct{} +// Equal reports whether the handshake info structs are identical (have the +// same pointer). This is sufficient as all subconns from one CDS balancer use +// the same one. +func (hi *HandshakeInfo) Equal(o interface{}) bool { + oh, ok := o.(*HandshakeInfo) + return ok && oh == hi +} + // SetHandshakeInfo returns a copy of addr in which the Attributes field is // updated with hInfo. func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo) + addr.Attributes = addr.Attributes.WithValue(handshakeAttrKey{}, hInfo) return addr } diff --git a/internal/hierarchy/hierarchy.go b/internal/hierarchy/hierarchy.go index a2f990f552e6..341d3405dc6c 100644 --- a/internal/hierarchy/hierarchy.go +++ b/internal/hierarchy/hierarchy.go @@ -30,19 +30,37 @@ type pathKeyType string const pathKey = pathKeyType("grpc.internal.address.hierarchical_path") +type pathValue []string + +func (p pathValue) Equal(o interface{}) bool { + op, ok := o.(pathValue) + if !ok { + return false + } + if len(op) != len(p) { + return false + } + for i, v := range p { + if v != op[i] { + return false + } + } + return true +} + // Get returns the hierarchical path of addr. func Get(addr resolver.Address) []string { - attrs := addr.Attributes + attrs := addr.BalancerAttributes if attrs == nil { return nil } - path, _ := attrs.Value(pathKey).([]string) - return path + path, _ := attrs.Value(pathKey).(pathValue) + return ([]string)(path) } // Set overrides the hierarchical path in addr with path. func Set(addr resolver.Address, path []string) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(pathKey, path) + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(pathKey, pathValue(path)) return addr } diff --git a/internal/hierarchy/hierarchy_test.go b/internal/hierarchy/hierarchy_test.go index fc62f82b0850..1043d5f81dfa 100644 --- a/internal/hierarchy/hierarchy_test.go +++ b/internal/hierarchy/hierarchy_test.go @@ -40,7 +40,7 @@ func TestGet(t *testing.T) { { name: "set", addr: resolver.Address{ - Attributes: attributes.New(pathKey, []string{"a", "b"}), + BalancerAttributes: attributes.New(pathKey, pathValue{"a", "b"}), }, want: []string{"a", "b"}, }, @@ -68,7 +68,7 @@ func TestSet(t *testing.T) { { name: "before is set", addr: resolver.Address{ - Attributes: attributes.New(pathKey, []string{"before", "a", "b"}), + BalancerAttributes: attributes.New(pathKey, pathValue{"before", "a", "b"}), }, path: []string{"a", "b"}, }, @@ -93,19 +93,19 @@ func TestGroup(t *testing.T) { { name: "all with hierarchy", addrs: []resolver.Address{ - {Addr: "a0", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "b0", Attributes: attributes.New(pathKey, []string{"b"})}, - {Addr: "b1", Attributes: attributes.New(pathKey, []string{"b"})}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "b0", BalancerAttributes: attributes.New(pathKey, pathValue{"b"})}, + {Addr: "b1", BalancerAttributes: attributes.New(pathKey, pathValue{"b"})}, }, want: map[string][]resolver.Address{ "a": { - {Addr: "a0", Attributes: attributes.New(pathKey, []string{})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{})}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{})}, }, "b": { - {Addr: "b0", Attributes: attributes.New(pathKey, []string{})}, - {Addr: "b1", Attributes: attributes.New(pathKey, []string{})}, + {Addr: "b0", BalancerAttributes: attributes.New(pathKey, pathValue{})}, + {Addr: "b1", BalancerAttributes: attributes.New(pathKey, pathValue{})}, }, }, }, @@ -113,15 +113,15 @@ func TestGroup(t *testing.T) { // Addresses without hierarchy are ignored. name: "without hierarchy", addrs: []resolver.Address{ - {Addr: "a0", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "b0", Attributes: nil}, - {Addr: "b1", Attributes: nil}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "b0", BalancerAttributes: nil}, + {Addr: "b1", BalancerAttributes: nil}, }, want: map[string][]resolver.Address{ "a": { - {Addr: "a0", Attributes: attributes.New(pathKey, []string{})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{})}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{})}, }, }, }, @@ -130,15 +130,15 @@ func TestGroup(t *testing.T) { // the address is ignored. name: "wrong type", addrs: []resolver.Address{ - {Addr: "a0", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "b0", Attributes: attributes.New(pathKey, "b")}, - {Addr: "b1", Attributes: attributes.New(pathKey, 314)}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "b0", BalancerAttributes: attributes.New(pathKey, "b")}, + {Addr: "b1", BalancerAttributes: attributes.New(pathKey, 314)}, }, want: map[string][]resolver.Address{ "a": { - {Addr: "a0", Attributes: attributes.New(pathKey, []string{})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{})}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{})}, }, }, }, @@ -167,14 +167,14 @@ func TestGroupE2E(t *testing.T) { var addrsWithHierarchy []resolver.Address for p, wts := range hierarchy { - path1 := []string{p} + path1 := pathValue{p} for wt, addrs := range wts { - path2 := append([]string(nil), path1...) + path2 := append(pathValue(nil), path1...) path2 = append(path2, wt) for _, addr := range addrs { a := resolver.Address{ - Addr: addr, - Attributes: attributes.New(pathKey, path2), + Addr: addr, + BalancerAttributes: attributes.New(pathKey, path2), } addrsWithHierarchy = append(addrsWithHierarchy, a) } diff --git a/internal/metadata/metadata.go b/internal/metadata/metadata.go index 302262613a02..b8733dbf340d 100644 --- a/internal/metadata/metadata.go +++ b/internal/metadata/metadata.go @@ -30,14 +30,38 @@ type mdKeyType string const mdKey = mdKeyType("grpc.internal.address.metadata") +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + // Get returns the metadata of addr. func Get(addr resolver.Address) metadata.MD { attrs := addr.Attributes if attrs == nil { return nil } - md, _ := attrs.Value(mdKey).(metadata.MD) - return md + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) } // Set sets (overrides) the metadata in addr. @@ -45,6 +69,6 @@ func Get(addr resolver.Address) metadata.MD { // When a SubConn is created with this address, the RPCs sent on it will all // have this metadata. func Set(addr resolver.Address, md metadata.MD) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(mdKey, md) + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } diff --git a/internal/metadata/metadata_test.go b/internal/metadata/metadata_test.go index 68c2ca5808c7..1aa0f9798e8c 100644 --- a/internal/metadata/metadata_test.go +++ b/internal/metadata/metadata_test.go @@ -41,7 +41,7 @@ func TestGet(t *testing.T) { { name: "not set", addr: resolver.Address{ - Attributes: attributes.New(mdKey, metadata.Pairs("k", "v")), + Attributes: attributes.New(mdKey, mdValue(metadata.Pairs("k", "v"))), }, want: metadata.Pairs("k", "v"), }, @@ -69,7 +69,7 @@ func TestSet(t *testing.T) { { name: "set before", addr: resolver.Address{ - Attributes: attributes.New(mdKey, metadata.Pairs("bef", "ore")), + Attributes: attributes.New(mdKey, mdValue(metadata.Pairs("bef", "ore"))), }, md: metadata.Pairs("k", "v"), }, diff --git a/internal/resolver/config_selector.go b/internal/resolver/config_selector.go index be7e13d58597..c7a18a948adb 100644 --- a/internal/resolver/config_selector.go +++ b/internal/resolver/config_selector.go @@ -132,7 +132,7 @@ const csKey = csKeyType("grpc.internal.resolver.configSelector") // SetConfigSelector sets the config selector in state and returns the new // state. func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { - state.Attributes = state.Attributes.WithValues(csKey, cs) + state.Attributes = state.Attributes.WithValue(csKey, cs) return state } diff --git a/internal/transport/networktype/networktype.go b/internal/transport/networktype/networktype.go index 7bb53cff1011..c11b5278274f 100644 --- a/internal/transport/networktype/networktype.go +++ b/internal/transport/networktype/networktype.go @@ -31,7 +31,7 @@ const key = keyType("grpc.internal.transport.networktype") // Set returns a copy of the provided address with attributes containing networkType. func Set(address resolver.Address, networkType string) resolver.Address { - address.Attributes = address.Attributes.WithValues(key, networkType) + address.Attributes = address.Attributes.WithValue(key, networkType) return address } diff --git a/internal/xds_handshake_cluster.go b/internal/xds_handshake_cluster.go index 3677c3f04f84..e8b492774d1a 100644 --- a/internal/xds_handshake_cluster.go +++ b/internal/xds_handshake_cluster.go @@ -28,7 +28,7 @@ type handshakeClusterNameKey struct{} // SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field // is updated with the cluster name. func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) return addr } diff --git a/resolver/map.go b/resolver/map.go new file mode 100644 index 000000000000..bfde61b331c5 --- /dev/null +++ b/resolver/map.go @@ -0,0 +1,103 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + m map[string]addressMapEntryList +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[string]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + if len(l) == 0 { + return -1 + } + for i, entry := range l { + if entry.addr.ServerName == addr.ServerName && + entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + a.m[addr.Addr][entry].value = value + return + } + a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + entryList := a.m[addr.Addr] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addr.Addr] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + return len(a.m) +} + +// Range invokes f for each entry in the map. +func (a *AddressMap) Range(f func(addr Address, value interface{})) { + for _, entryList := range a.m { + for _, entry := range entryList { + f(entry.addr, entry.value) + } + } +} diff --git a/resolver/map_test.go b/resolver/map_test.go new file mode 100644 index 000000000000..86191d82bbb3 --- /dev/null +++ b/resolver/map_test.go @@ -0,0 +1,153 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "testing" + + "google.golang.org/grpc/attributes" +) + +// Note: each address is different from addr1 by one value. addr7 matches +// addr1, since the only difference is BalancerAttributes, which are not +// compared. +var ( + addr1 = Address{Addr: "a1", Attributes: attributes.New("a1", 3), ServerName: "s1"} + addr2 = Address{Addr: "a2", Attributes: attributes.New("a1", 3), ServerName: "s1"} + addr3 = Address{Addr: "a1", Attributes: attributes.New("a2", 3), ServerName: "s1"} + addr4 = Address{Addr: "a1", Attributes: attributes.New("a1", 2), ServerName: "s1"} + addr5 = Address{Addr: "a1", Attributes: attributes.New("a1", "3"), ServerName: "s1"} + addr6 = Address{Addr: "a1", Attributes: attributes.New("a1", 3), ServerName: "s2"} + addr7 = Address{Addr: "a1", Attributes: attributes.New("a1", 3), ServerName: "s1", BalancerAttributes: attributes.New("xx", 3)} +) + +func (s) TestAddressMap_Length(t *testing.T) { + addrMap := NewAddressMap() + if got := addrMap.Len(); got != 0 { + t.Fatalf("addrMap.Len() = %v; want 0", got) + } + for i := 0; i < 10; i++ { + addrMap.Set(addr1, nil) + if got, want := addrMap.Len(), 1; got != want { + t.Fatalf("addrMap.Len() = %v; want %v", got, want) + } + addrMap.Set(addr7, nil) // aliases addr1 + } + for i := 0; i < 10; i++ { + addrMap.Set(addr2, nil) + if got, want := addrMap.Len(), 2; got != want { + t.Fatalf("addrMap.Len() = %v; want %v", got, want) + } + } +} + +func (s) TestAddressMap_Get(t *testing.T) { + addrMap := NewAddressMap() + addrMap.Set(addr1, 1) + + if got, ok := addrMap.Get(addr2); ok || got != nil { + t.Fatalf("addrMap.Get(addr1) = %v, %v; want nil, false", got, ok) + } + + addrMap.Set(addr2, 2) + addrMap.Set(addr3, 3) + addrMap.Set(addr4, 4) + addrMap.Set(addr5, 5) + addrMap.Set(addr6, 6) + addrMap.Set(addr7, 7) // aliases addr1 + if got, ok := addrMap.Get(addr1); !ok || got.(int) != 7 { + t.Fatalf("addrMap.Get(addr1) = %v, %v; want %v, true", got, ok, 7) + } + if got, ok := addrMap.Get(addr2); !ok || got.(int) != 2 { + t.Fatalf("addrMap.Get(addr2) = %v, %v; want %v, true", got, ok, 2) + } + if got, ok := addrMap.Get(addr3); !ok || got.(int) != 3 { + t.Fatalf("addrMap.Get(addr3) = %v, %v; want %v, true", got, ok, 3) + } + if got, ok := addrMap.Get(addr4); !ok || got.(int) != 4 { + t.Fatalf("addrMap.Get(addr4) = %v, %v; want %v, true", got, ok, 4) + } + if got, ok := addrMap.Get(addr5); !ok || got.(int) != 5 { + t.Fatalf("addrMap.Get(addr5) = %v, %v; want %v, true", got, ok, 5) + } + if got, ok := addrMap.Get(addr6); !ok || got.(int) != 6 { + t.Fatalf("addrMap.Get(addr6) = %v, %v; want %v, true", got, ok, 6) + } + if got, ok := addrMap.Get(addr7); !ok || got.(int) != 7 { + t.Fatalf("addrMap.Get(addr7) = %v, %v; want %v, true", got, ok, 7) + } +} + +func (s) TestAddressMap_Delete(t *testing.T) { + addrMap := NewAddressMap() + addrMap.Set(addr1, 1) + addrMap.Set(addr2, 2) + if got, want := addrMap.Len(), 2; got != want { + t.Fatalf("addrMap.Len() = %v; want %v", got, want) + } + addrMap.Delete(addr3) + addrMap.Delete(addr4) + addrMap.Delete(addr5) + addrMap.Delete(addr6) + addrMap.Delete(addr7) // aliases addr1 + if got, ok := addrMap.Get(addr1); ok || got != nil { + t.Fatalf("addrMap.Get(addr1) = %v, %v; want nil, false", got, ok) + } + if got, ok := addrMap.Get(addr7); ok || got != nil { + t.Fatalf("addrMap.Get(addr7) = %v, %v; want nil, false", got, ok) + } + if got, ok := addrMap.Get(addr2); !ok || got.(int) != 2 { + t.Fatalf("addrMap.Get(addr2) = %v, %v; want %v, true", got, ok, 2) + } +} + +func (s) TestAddressMap_Range(t *testing.T) { + addrMap := NewAddressMap() + addrMap.Set(addr1, 1) + addrMap.Set(addr2, 2) + addrMap.Set(addr3, 3) + addrMap.Set(addr4, 4) + addrMap.Set(addr5, 5) + addrMap.Set(addr6, 6) + addrMap.Set(addr7, 7) // aliases addr1 + + want := map[int]bool{2: true, 3: true, 4: true, 5: true, 6: true, 7: true} + test := func(a1, a2 Address, n int, v interface{}) { + if a1.Addr == a2.Addr && a1.Attributes == a2.Attributes && a1.ServerName == a2.ServerName { + if ok := want[n]; !ok { + t.Fatal("matched address multiple times:", a1, n, want) + } + if n != v.(int) { + t.Fatalf("%v read value %v; want %v:", a1, v, n) + } + delete(want, n) + } + } + addrMap.Range(func(a Address, v interface{}) { + test(a, addr1, 7, v) + test(a, addr2, 2, v) + test(a, addr3, 3, v) + test(a, addr4, 4, v) + test(a, addr5, 5, v) + test(a, addr6, 6, v) + }) + if len(want) != 0 { + t.Fatalf("did not find expected addresses; remaining: %v", want) + } +} diff --git a/resolver/resolver.go b/resolver/resolver.go index 9116897b463e..873b932b20d6 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -117,9 +117,14 @@ type Address struct { ServerName string // Attributes contains arbitrary data about this address intended for - // consumption by the load balancing policy. + // consumption by the SubConn. Attributes *attributes.Attributes + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attribes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + // Type is the type of this address. // // Deprecated: use Attributes instead. @@ -132,6 +137,15 @@ type Address struct { Metadata interface{} } +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +func (a *Address) Equal(o *Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { diff --git a/resolver/resolver_test.go b/resolver/resolver_test.go new file mode 100644 index 000000000000..8d061f9b66d2 --- /dev/null +++ b/resolver/resolver_test.go @@ -0,0 +1,33 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "testing" + + "google.golang.org/grpc/internal/grpctest" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index d3475ea3f5d8..191a5d56b692 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -85,7 +85,7 @@ type ignoreAttrsRRBalancer struct { func (trrb *ignoreAttrsRRBalancer) UpdateClientConnState(s balancer.ClientConnState) error { var newAddrs []resolver.Address for _, a := range s.ResolverState.Addresses { - a.Attributes = nil + a.BalancerAttributes = nil newAddrs = append(newAddrs, a) } s.ResolverState.Addresses = newAddrs @@ -137,8 +137,8 @@ func TestClusterPicks(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. wantAddrs := []resolver.Address{ - {Addr: testBackendAddrStrs[0], Attributes: nil}, - {Addr: testBackendAddrStrs[1], Attributes: nil}, + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := rtb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: []resolver.Address{ @@ -156,11 +156,11 @@ func TestClusterPicks(t *testing.T) { for range wantAddrs { addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m1[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -215,8 +215,8 @@ func TestConfigUpdateAddCluster(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. wantAddrs := []resolver.Address{ - {Addr: testBackendAddrStrs[0], Attributes: nil}, - {Addr: testBackendAddrStrs[1], Attributes: nil}, + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := rtb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: []resolver.Address{ @@ -234,11 +234,11 @@ func TestConfigUpdateAddCluster(t *testing.T) { for range wantAddrs { addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m1[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -285,7 +285,7 @@ func TestConfigUpdateAddCluster(t *testing.T) { if err != nil { t.Fatalf("failed to parse balancer config: %v", err) } - wantAddrs = append(wantAddrs, resolver.Address{Addr: testBackendAddrStrs[2], Attributes: nil}) + wantAddrs = append(wantAddrs, resolver.Address{Addr: testBackendAddrStrs[2], BalancerAttributes: nil}) if err := rtb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: []resolver.Address{ hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), @@ -300,11 +300,11 @@ func TestConfigUpdateAddCluster(t *testing.T) { // Expect exactly one new subconn. addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m1[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -372,8 +372,8 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. wantAddrs := []resolver.Address{ - {Addr: testBackendAddrStrs[0], Attributes: nil}, - {Addr: testBackendAddrStrs[1], Attributes: nil}, + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := rtb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: []resolver.Address{ @@ -391,11 +391,11 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { for range wantAddrs { addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m1[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -475,11 +475,11 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { for range wantAddrs { addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m2[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -608,7 +608,7 @@ func TestInitialIdle(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. wantAddrs := []resolver.Address{ - {Addr: testBackendAddrStrs[0], Attributes: nil}, + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, } if err := rtb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: []resolver.Address{ diff --git a/xds/internal/internal.go b/xds/internal/internal.go index 0cccd3824101..8df20a1f9c0a 100644 --- a/xds/internal/internal.go +++ b/xds/internal/internal.go @@ -46,6 +46,15 @@ func (l LocalityID) ToString() (string, error) { return string(b), nil } +// Equal allows the values to be compared by Attributes.Equal. +func (l LocalityID) Equal(o interface{}) bool { + ol, ok := o.(LocalityID) + if !ok { + return false + } + return l.Region == ol.Region && l.Zone == ol.Zone && l.SubZone == ol.SubZone +} + // LocalityIDFromString converts a json representation of locality, into a // LocalityID struct. func LocalityIDFromString(s string) (ret LocalityID, _ error) { @@ -62,12 +71,12 @@ const localityKey = localityKeyType("grpc.xds.internal.address.locality") // GetLocalityID returns the locality ID of addr. func GetLocalityID(addr resolver.Address) LocalityID { - path, _ := addr.Attributes.Value(localityKey).(LocalityID) + path, _ := addr.BalancerAttributes.Value(localityKey).(LocalityID) return path } // SetLocalityID sets locality ID in addr to l. func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(localityKey, l) + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(localityKey, l) return addr } diff --git a/xds/internal/xdsclient/attributes.go b/xds/internal/xdsclient/attributes.go index d2357df0727c..467c205a2559 100644 --- a/xds/internal/xdsclient/attributes.go +++ b/xds/internal/xdsclient/attributes.go @@ -54,6 +54,6 @@ func FromResolverState(state resolver.State) XDSClient { // SetClient sets c in state and returns the new state. func SetClient(state resolver.State, c XDSClient) resolver.State { - state.Attributes = state.Attributes.WithValues(clientKey, c) + state.Attributes = state.Attributes.WithValue(clientKey, c) return state } From d590071c10a9ed4e4a453307a11b213827c1fb81 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 15 Oct 2021 11:26:46 -0700 Subject: [PATCH 294/998] status: clarify FromError docstring (#4880) --- status/status.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/status/status.go b/status/status.go index 54d187186b8f..af2cffe985c0 100644 --- a/status/status.go +++ b/status/status.go @@ -73,11 +73,16 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced by this -// package or has a method `GRPCStatus() *Status`. -// If err is nil, a Status is returned with codes.OK and no message. -// Otherwise, ok is false and a Status is returned with codes.Unknown and -// the original error message. +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. +// +// - If err is nil, a Status is returned with codes.OK and no message. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true From 4757d0249e2d5d16f259ce4224f7ec5fb7f284ee Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 15 Oct 2021 16:14:49 -0400 Subject: [PATCH 295/998] xds: Make regex matchers match on full string, not just partial match (#4875) * xds: Make regex matchers match on full string, not just partial match --- internal/grpcutil/regex.go | 28 ++++++++++ internal/grpcutil/regex_test.go | 60 +++++++++++++++++++++ internal/xds/matcher/matcher_header.go | 3 +- internal/xds/matcher/matcher_header_test.go | 14 +++++ internal/xds/matcher/string_matcher.go | 3 +- internal/xds/matcher/string_matcher_test.go | 6 +++ xds/internal/xdsclient/matcher_path.go | 4 +- xds/internal/xdsclient/matcher_path_test.go | 2 + 8 files changed, 117 insertions(+), 3 deletions(-) create mode 100644 internal/grpcutil/regex.go create mode 100644 internal/grpcutil/regex_test.go diff --git a/internal/grpcutil/regex.go b/internal/grpcutil/regex.go new file mode 100644 index 000000000000..2810a8ba2fdf --- /dev/null +++ b/internal/grpcutil/regex.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full string matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, string string) bool { + re.Longest() + rem := re.FindString(string) + return len(rem) == len(string) +} diff --git a/internal/grpcutil/regex_test.go b/internal/grpcutil/regex_test.go new file mode 100644 index 000000000000..1b2299858daa --- /dev/null +++ b/internal/grpcutil/regex_test.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "regexp" + "testing" +) + +func TestFullMatchWithRegex(t *testing.T) { + tests := []struct { + name string + regexStr string + string string + want bool + }{ + { + name: "not match because only partial", + regexStr: "^a+$", + string: "ab", + want: false, + }, + { + name: "match because fully match", + regexStr: "^a+$", + string: "aa", + want: true, + }, + { + name: "longest", + regexStr: "a(|b)", + string: "ab", + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + hrm := regexp.MustCompile(tt.regexStr) + if got := FullMatchWithRegex(hrm, tt.string); got != tt.want { + t.Errorf("match() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/xds/matcher/matcher_header.go b/internal/xds/matcher/matcher_header.go index 35a22adadcf2..c3944373cd7f 100644 --- a/internal/xds/matcher/matcher_header.go +++ b/internal/xds/matcher/matcher_header.go @@ -24,6 +24,7 @@ import ( "strconv" "strings" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" ) @@ -91,7 +92,7 @@ func (hrm *HeaderRegexMatcher) Match(md metadata.MD) bool { if !ok { return false } - return hrm.re.MatchString(v) + return grpcutil.FullMatchWithRegex(hrm.re, v) } func (hrm *HeaderRegexMatcher) String() string { diff --git a/internal/xds/matcher/matcher_header_test.go b/internal/xds/matcher/matcher_header_test.go index 9a0d51300d0a..7e78065212cf 100644 --- a/internal/xds/matcher/matcher_header_test.go +++ b/internal/xds/matcher/matcher_header_test.go @@ -107,6 +107,20 @@ func TestHeaderRegexMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "abc"), want: false, }, + { + name: "no match because only part of value matches with regex", + key: "header", + regexStr: "^a+$", + md: metadata.Pairs("header", "ab"), + want: false, + }, + { + name: "match because full value matches with regex", + key: "header", + regexStr: "^a+$", + md: metadata.Pairs("header", "aa"), + want: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/xds/matcher/string_matcher.go b/internal/xds/matcher/string_matcher.go index d7df6a1e2b40..c138f78735bc 100644 --- a/internal/xds/matcher/string_matcher.go +++ b/internal/xds/matcher/string_matcher.go @@ -27,6 +27,7 @@ import ( "strings" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + "google.golang.org/grpc/internal/grpcutil" ) // StringMatcher contains match criteria for matching a string, and is an @@ -58,7 +59,7 @@ func (sm StringMatcher) Match(input string) bool { case sm.suffixMatch != nil: return strings.HasSuffix(input, *sm.suffixMatch) case sm.regexMatch != nil: - return sm.regexMatch.MatchString(input) + return grpcutil.FullMatchWithRegex(sm.regexMatch, input) case sm.containsMatch != nil: return strings.Contains(input, *sm.containsMatch) } diff --git a/internal/xds/matcher/string_matcher_test.go b/internal/xds/matcher/string_matcher_test.go index 389963b94e9e..9528b57e44a5 100644 --- a/internal/xds/matcher/string_matcher_test.go +++ b/internal/xds/matcher/string_matcher_test.go @@ -266,6 +266,12 @@ func TestMatch(t *testing.T) { input: "goodregex", wantMatch: true, }, + { + desc: "regex match failure because only part match", + matcher: regexMatcher, + input: "goodregexa", + wantMatch: false, + }, { desc: "regex match failure", matcher: regexMatcher, diff --git a/xds/internal/xdsclient/matcher_path.go b/xds/internal/xdsclient/matcher_path.go index a00c6954ef53..2ca0e4bbcc44 100644 --- a/xds/internal/xdsclient/matcher_path.go +++ b/xds/internal/xdsclient/matcher_path.go @@ -21,6 +21,8 @@ package xdsclient import ( "regexp" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) type pathMatcher interface { @@ -93,7 +95,7 @@ func newPathRegexMatcher(re *regexp.Regexp) *pathRegexMatcher { } func (prm *pathRegexMatcher) match(path string) bool { - return prm.re.MatchString(path) + return grpcutil.FullMatchWithRegex(prm.re, path) } func (prm *pathRegexMatcher) String() string { diff --git a/xds/internal/xdsclient/matcher_path_test.go b/xds/internal/xdsclient/matcher_path_test.go index a211034a60dd..003d6db72e29 100644 --- a/xds/internal/xdsclient/matcher_path_test.go +++ b/xds/internal/xdsclient/matcher_path_test.go @@ -80,6 +80,8 @@ func TestPathRegexMatcherMatch(t *testing.T) { }{ {name: "match", regexPath: "^/s+/m.*$", path: "/sss/me", want: true}, {name: "not match", regexPath: "^/s+/m*$", path: "/sss/b", want: false}, + {name: "no match because only part of path matches with regex", regexPath: "^a+$", path: "ab", want: false}, + {name: "match because full path matches with regex", regexPath: "^a+$", path: "aa", want: true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 3d9e9c42dcee664dfa4ed149eff07bd766517dfe Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Mon, 18 Oct 2021 14:57:49 -0700 Subject: [PATCH 296/998] logging: enable json logging in xds server container (#4885) --- interop/xds/server/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/interop/xds/server/Dockerfile b/interop/xds/server/Dockerfile index cd8dcb5ccaad..21dadb918810 100644 --- a/interop/xds/server/Dockerfile +++ b/interop/xds/server/Dockerfile @@ -33,4 +33,5 @@ FROM alpine COPY --from=build /go/src/grpc-go/server . ENV GRPC_GO_LOG_VERBOSITY_LEVEL=2 ENV GRPC_GO_LOG_SEVERITY_LEVEL="info" +ENV GRPC_GO_LOG_FORMATTER="json" ENTRYPOINT ["./server"] From 01ed64857e3146000ec99cdea4f2932204f17cdd Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 18 Oct 2021 15:12:44 -0700 Subject: [PATCH 297/998] update go.mod to point to latest cncf/udpa repo (#4884) --- examples/go.sum | 5 +++-- go.mod | 2 +- go.sum | 4 +++- security/advancedtls/examples/go.sum | 3 ++- security/advancedtls/go.sum | 3 ++- 5 files changed, 11 insertions(+), 6 deletions(-) diff --git a/examples/go.sum b/examples/go.sum index 1724c644936d..1eff984d41bb 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -5,9 +5,10 @@ github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= diff --git a/go.mod b/go.mod index 9ebdecac8dd4..fcffdceef25c 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.14 require ( github.com/cespare/xxhash/v2 v2.1.1 - github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 + github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b diff --git a/go.sum b/go.sum index 634b30ff6415..8b542e0beb65 100644 --- a/go.sum +++ b/go.sum @@ -9,9 +9,11 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 4dee8b2615e7..d926d5ffba71 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -2,8 +2,9 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 4dee8b2615e7..d926d5ffba71 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -2,8 +2,9 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= From 3b1d3e48c925e24950daaf3401a584b5224daca1 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 19 Oct 2021 16:47:48 -0700 Subject: [PATCH 298/998] examples: update load balancing example to use loadBalancingConfig (#4887) --- examples/features/load_balancing/client/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/features/load_balancing/client/main.go b/examples/features/load_balancing/client/main.go index 1578df16671b..72c5ad9c1468 100644 --- a/examples/features/load_balancing/client/main.go +++ b/examples/features/load_balancing/client/main.go @@ -55,7 +55,7 @@ func makeRPCs(cc *grpc.ClientConn, n int) { } func main() { - // "pick_first" is the default, so there's no need to set the load balancer. + // "pick_first" is the default, so there's no need to set the load balancing policy. pickfirstConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), grpc.WithInsecure(), @@ -74,7 +74,7 @@ func main() { // Make another ClientConn with round_robin policy. roundrobinConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), - grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`), // This sets the initial balancing policy. + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), // This sets the initial balancing policy. grpc.WithInsecure(), grpc.WithBlock(), ) From fbf9b56376584af6f55ae5c719a4e8ee9dad744a Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 20 Oct 2021 09:31:50 -0700 Subject: [PATCH 299/998] grpc: stabilize WithDefaultServiceConfig and improve godoc (#4888) --- dialoptions.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/dialoptions.go b/dialoptions.go index 5f7b7a164cea..2af1b6c2f14b 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -518,14 +518,16 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // -// 1. WithDisableServiceConfig is also used. -// 2. Resolver does not return a service config or if the resolver returns an -// invalid service config. +// 1. WithDisableServiceConfig is also used, or // -// Experimental +// 2. The name resolver does not provide a service config or provides an +// invalid service config. // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s From 0d5030755c3d452f35f5eef98684f728f9f54b9f Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 20 Oct 2021 10:51:48 -0700 Subject: [PATCH 300/998] xds/priority: handle new low priority when high priority is in Idle (#4889) --- .../balancer/priority/balancer_priority.go | 1 + .../balancer/priority/balancer_test.go | 81 +++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index bd2c6724ea5c..37cd44560437 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -88,6 +88,7 @@ func (b *priorityBalancer) syncPriority() { if !child.started || child.state.ConnectivityState == connectivity.Ready || + child.state.ConnectivityState == connectivity.Idle || p == len(b.priorities)-1 { if b.childInUse != "" && b.childInUse != child.name { // childInUse was set and is different from this child, will diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index b884035442e4..29b712c0f55d 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -1874,3 +1874,84 @@ func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { t.Fatalf("pick returned %v, %v, want _, %v", pr, err, errsTestInitIdle[1]) } } + +// If the high priorities send initial pickers with Idle state, their pickers +// should get picks, because policies like ringhash starts in Idle, and doesn't +// connect. In this case, if a lower priority is added, it shouldn't switch to +// the lower priority. +// +// Init 0; 0 is Idle, use 0; add 1, use 0. +func (s) TestPriority_AddLowPriorityWhenHighIsInIdle(t *testing.T) { + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + // One child, with priorities [0], one backend. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 0)}}, + }, + Priorities: []string{"child-0"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + addrs0 := <-cc.NewSubConnAddrsCh + if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc0 := <-cc.NewSubConnCh + + // Send an Idle state update to trigger an Idle picker update. + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + p0 := <-cc.NewPickerCh + if pr, err := p0.Pick(balancer.PickInfo{}); err != errsTestInitIdle[0] { + t.Fatalf("pick returned %v, %v, want _, %v", pr, err, errsTestInitIdle[0]) + } + + // Add 1, should keep using 0. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 0)}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 1)}}, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // The ClientConn state update triggers a priority switch, from p0 -> p0 + // (since p0 is still in use). Along with this the update, p0 also gets a + // ClientConn state update, with the addresses, which didn't change in this + // test (this update to the child is necessary in case the addresses are + // different). + // + // The test child policy, initIdleBalancer, blindly calls NewSubConn with + // all the addresses it receives, so this will trigger a NewSubConn with the + // old p0 addresses. (Note that in a real balancer, like roundrobin, no new + // SubConn will be created because the addresses didn't change). + // + // The check below makes sure that the addresses are still from p0, and not + // from p1. This is good enough for the purpose of this test. + addrsNew := <-cc.NewSubConnAddrsCh + if got, want := addrsNew[0].Addr, testBackendAddrStrs[0]; got != want { + // Fail if p1 is started and creates a SubConn. + t.Fatalf("got unexpected call to NewSubConn with addr: %v, want %v", addrsNew, want) + } +} From bd0f88150dfdee1a5a316f2f64ecb9cf05c27fb7 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 20 Oct 2021 10:52:03 -0700 Subject: [PATCH 301/998] grpclb: recover after receiving an empty server list (#4879) --- balancer/grpclb/grpclb_remote_balancer.go | 12 +++- balancer/grpclb/grpclb_test.go | 67 +++++++++++++++++++++++ 2 files changed, 77 insertions(+), 2 deletions(-) diff --git a/balancer/grpclb/grpclb_remote_balancer.go b/balancer/grpclb/grpclb_remote_balancer.go index 0210c012d7b0..330df4baa218 100644 --- a/balancer/grpclb/grpclb_remote_balancer.go +++ b/balancer/grpclb/grpclb_remote_balancer.go @@ -135,11 +135,19 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback } if lb.usePickFirst { - var sc balancer.SubConn - for _, sc = range lb.subConns { + var ( + scKey resolver.Address + sc balancer.SubConn + ) + for scKey, sc = range lb.subConns { break } if sc != nil { + if len(backendAddrs) == 0 { + lb.cc.cc.RemoveSubConn(sc) + delete(lb.subConns, scKey) + return + } lb.cc.cc.UpdateAddresses(sc, backendAddrs) sc.Connect() return diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index 3b666764728f..22aa8f1b868a 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -1274,6 +1274,73 @@ func (s) TestGRPCLBBackendConnectionErrorPropagation(t *testing.T) { wg.Wait() } +func testGRPCLBEmptyServerList(t *testing.T, svcfg string) { + r := manual.NewBuilderWithScheme("whatever") + + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) + } + defer cleanup() + + beServers := []*lbpb.Server{{ + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }} + + creds := serverNameCheckCreds{} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&creds), + grpc.WithContextDialer(fakeNameDialer)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testpb.NewTestServiceClient(cc) + + tss.ls.sls <- &lbpb.ServerList{Servers: beServers} + + rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(svcfg)}, + &grpclbstate.State{BalancerAddresses: []resolver.Address{{ + Addr: tss.lbAddr, + ServerName: lbServerName, + }}}) + r.UpdateState(rs) + t.Log("Perform an initial RPC and expect it to succeed...") + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("Initial _.EmptyCall(_, _) = _, %v, want _, ", err) + } + t.Log("Now send an empty server list. Wait until we see an RPC failure to make sure the client got it...") + tss.ls.sls <- &lbpb.ServerList{} + gotError := false + for i := 0; i < 100; i++ { + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { + gotError = true + break + } + } + if !gotError { + t.Fatalf("Expected to eventually see an RPC fail after the grpclb sends an empty server list, but none did.") + } + t.Log("Now send a non-empty server list. A wait-for-ready RPC should now succeed...") + tss.ls.sls <- &lbpb.ServerList{Servers: beServers} + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("Final _.EmptyCall(_, _) = _, %v, want _, ", err) + } +} + +func (s) TestGRPCLBEmptyServerListRoundRobin(t *testing.T) { + testGRPCLBEmptyServerList(t, `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"round_robin":{}}]}}]}`) +} + +func (s) TestGRPCLBEmptyServerListPickFirst(t *testing.T) { + testGRPCLBEmptyServerList(t, `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`) +} + func (s) TestGRPCLBWithTargetNameFieldInConfig(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") From 2a312458e6bd0d3fd9ffccee9b6906b6e753bb8f Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 20 Oct 2021 14:01:46 -0700 Subject: [PATCH 302/998] client: don't force passthrough as default resolver (#4890) --- clientconn.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/clientconn.go b/clientconn.go index eacf5fd8fccf..5a9e7d754fe2 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1633,9 +1633,6 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { // scheme, except when a custom dialer is specified in which case, we should // always use passthrough scheme. defScheme := resolver.GetDefaultScheme() - if cc.dopts.copts.Dialer != nil { - defScheme = "passthrough" - } channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) canonicalTarget := defScheme + ":///" + cc.target From f00baa6c3c8455ef1db2ac64f7b89a63ec7d2776 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 20 Oct 2021 15:07:37 -0700 Subject: [PATCH 303/998] resolver: replace AddressMap.Range with Keys (#4891) Co-authored-by: Menghan Li --- balancer/base/balancer.go | 10 ++++++---- resolver/map.go | 14 ++++++++++---- resolver/map_test.go | 35 ++++++++++++----------------------- resolver/resolver.go | 2 +- 4 files changed, 29 insertions(+), 32 deletions(-) diff --git a/balancer/base/balancer.go b/balancer/base/balancer.go index 908c6e3376e0..a67074a3ad06 100644 --- a/balancer/base/balancer.go +++ b/balancer/base/balancer.go @@ -115,7 +115,8 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc.Connect() } } - b.subConns.Range(func(a resolver.Address, sci interface{}) { + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { @@ -124,7 +125,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. } - }) + } // If resolver state contains no addresses, return an error so ClientConn // will trigger re-resolve. Also records this as an resolver error, so when // the overall state turns transient failure, the error message will have @@ -162,12 +163,13 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - b.subConns.Range(func(addr resolver.Address, sci interface{}) { + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) sc := sci.(balancer.SubConn) if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } - }) + } b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } diff --git a/resolver/map.go b/resolver/map.go index bfde61b331c5..e87ecd0eeb38 100644 --- a/resolver/map.go +++ b/resolver/map.go @@ -90,14 +90,20 @@ func (a *AddressMap) Delete(addr Address) { // Len returns the number of entries in the map. func (a *AddressMap) Len() int { - return len(a.m) + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret } -// Range invokes f for each entry in the map. -func (a *AddressMap) Range(f func(addr Address, value interface{})) { +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { - f(entry.addr, entry.value) + ret = append(ret, entry.addr) } } + return ret } diff --git a/resolver/map_test.go b/resolver/map_test.go index 86191d82bbb3..26d539bcd6d3 100644 --- a/resolver/map_test.go +++ b/resolver/map_test.go @@ -19,8 +19,11 @@ package resolver import ( + "fmt" + "sort" "testing" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc/attributes" ) @@ -117,7 +120,7 @@ func (s) TestAddressMap_Delete(t *testing.T) { } } -func (s) TestAddressMap_Range(t *testing.T) { +func (s) TestAddressMap_Keys(t *testing.T) { addrMap := NewAddressMap() addrMap.Set(addr1, 1) addrMap.Set(addr2, 2) @@ -127,27 +130,13 @@ func (s) TestAddressMap_Range(t *testing.T) { addrMap.Set(addr6, 6) addrMap.Set(addr7, 7) // aliases addr1 - want := map[int]bool{2: true, 3: true, 4: true, 5: true, 6: true, 7: true} - test := func(a1, a2 Address, n int, v interface{}) { - if a1.Addr == a2.Addr && a1.Attributes == a2.Attributes && a1.ServerName == a2.ServerName { - if ok := want[n]; !ok { - t.Fatal("matched address multiple times:", a1, n, want) - } - if n != v.(int) { - t.Fatalf("%v read value %v; want %v:", a1, v, n) - } - delete(want, n) - } - } - addrMap.Range(func(a Address, v interface{}) { - test(a, addr1, 7, v) - test(a, addr2, 2, v) - test(a, addr3, 3, v) - test(a, addr4, 4, v) - test(a, addr5, 5, v) - test(a, addr6, 6, v) - }) - if len(want) != 0 { - t.Fatalf("did not find expected addresses; remaining: %v", want) + want := []Address{addr1, addr2, addr3, addr4, addr5, addr6} + got := addrMap.Keys() + if d := cmp.Diff(want, got, cmp.Transformer("sort", func(in []Address) []Address { + out := append([]Address(nil), in...) + sort.Slice(out, func(i, j int) bool { return fmt.Sprint(out[i]) < fmt.Sprint(out[j]) }) + return out + })); d != "" { + t.Fatalf("addrMap.Keys returned unexpected elements (-want, +got):\n%v", d) } } diff --git a/resolver/resolver.go b/resolver/resolver.go index 873b932b20d6..e28b68026062 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -139,7 +139,7 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o *Address) bool { +func (a *Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && From 4f21cde702d9f9b1c874791e1c3751b1f7d192ce Mon Sep 17 00:00:00 2001 From: Ashitha Santhosh <55257063+ashithasantosh@users.noreply.github.com> Date: Thu, 21 Oct 2021 15:39:02 -0700 Subject: [PATCH 304/998] authz: support empty principals and fix rbac authenticated matcher (#4883) * authz: support empty principals in SDK and fixes to rbac authenticated matcher. * Minor formatting * Remove pointer from principals fields * resolving comments --- authz/rbac_translator.go | 20 ++- authz/rbac_translator_test.go | 26 ++++ authz/sdk_end2end_test.go | 209 +++++++++++++++++++++++--- internal/xds/rbac/matchers.go | 12 +- internal/xds/rbac/rbac_engine.go | 5 + internal/xds/rbac/rbac_engine_test.go | 14 ++ 6 files changed, 255 insertions(+), 31 deletions(-) diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index 039d76bc99d9..4a790b1a702c 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -155,14 +155,20 @@ func parsePrincipalNames(principalNames []string) []*v3rbacpb.Principal { } func parsePeer(source peer) (*v3rbacpb.Principal, error) { - if len(source.Principals) > 0 { - return principalOr(parsePrincipalNames(source.Principals)), nil + if source.Principals == nil { + return &v3rbacpb.Principal{ + Identifier: &v3rbacpb.Principal_Any{ + Any: true, + }, + }, nil } - return &v3rbacpb.Principal{ - Identifier: &v3rbacpb.Principal_Any{ - Any: true, - }, - }, nil + if len(source.Principals) == 0 { + return &v3rbacpb.Principal{ + Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{}, + }}, nil + } + return principalOr(parsePrincipalNames(source.Principals)), nil } func parsePaths(paths []string) []*v3rbacpb.Permission { diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go index 9a883e9d78d5..9b88362ea90b 100644 --- a/authz/rbac_translator_test.go +++ b/authz/rbac_translator_test.go @@ -205,6 +205,32 @@ func TestTranslatePolicy(t *testing.T) { }, }, }, + "empty principal field": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [{ + "name": "allow_authenticated", + "source": {"principals":[]} + }] + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{}, + }}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + }, + }, + }, "unknown field": { authzPolicy: `{"random": 123}`, wantErr: "failed to unmarshal policy", diff --git a/authz/sdk_end2end_test.go b/authz/sdk_end2end_test.go index 093b2bb437d2..79fa379bceac 100644 --- a/authz/sdk_end2end_test.go +++ b/authz/sdk_end2end_test.go @@ -20,6 +20,8 @@ package authz_test import ( "context" + "crypto/tls" + "crypto/x509" "io" "io/ioutil" "net" @@ -30,10 +32,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/authz" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" pb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/testdata" ) type testServer struct { @@ -69,7 +73,7 @@ var sdkTests = map[string]struct { md metadata.MD wantStatus *status.Status }{ - "DeniesRpcMatchInDenyNoMatchInAllow": { + "DeniesRPCMatchInDenyNoMatchInAllow": { authzPolicy: `{ "name": "authz", "allow_rules": @@ -112,7 +116,7 @@ var sdkTests = map[string]struct { md: metadata.Pairs("key-abc", "val-abc"), wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), }, - "DeniesRpcMatchInDenyAndAllow": { + "DeniesRPCMatchInDenyAndAllow": { authzPolicy: `{ "name": "authz", "allow_rules": @@ -142,7 +146,7 @@ var sdkTests = map[string]struct { }`, wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), }, - "AllowsRpcNoMatchInDenyMatchInAllow": { + "AllowsRPCNoMatchInDenyMatchInAllow": { authzPolicy: `{ "name": "authz", "allow_rules": @@ -179,7 +183,7 @@ var sdkTests = map[string]struct { md: metadata.Pairs("key-xyz", "val-xyz"), wantStatus: status.New(codes.OK, ""), }, - "DeniesRpcNoMatchInDenyAndAllow": { + "DeniesRPCNoMatchInDenyAndAllow": { authzPolicy: `{ "name": "authz", "allow_rules": @@ -209,7 +213,7 @@ var sdkTests = map[string]struct { }`, wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), }, - "AllowsRpcEmptyDenyMatchInAllow": { + "AllowsRPCEmptyDenyMatchInAllow": { authzPolicy: `{ "name": "authz", "allow_rules": @@ -238,7 +242,7 @@ var sdkTests = map[string]struct { }`, wantStatus: status.New(codes.OK, ""), }, - "DeniesRpcEmptyDenyNoMatchInAllow": { + "DeniesRPCEmptyDenyNoMatchInAllow": { authzPolicy: `{ "name": "authz", "allow_rules": @@ -257,6 +261,45 @@ var sdkTests = map[string]struct { }`, wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), }, + "DeniesRPCRequestWithPrincipalsFieldOnUnauthenticatedConnection": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_TestServiceCalls", + "source": { + "principals": + [ + "foo" + ] + }, + "request": { + "paths": + [ + "/grpc.testing.TestService/*" + ] + } + } + ] + }`, + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, + "DeniesRPCRequestWithEmptyPrincipalsOnUnauthenticatedConnection": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_authenticated", + "source": { + "principals": [] + } + } + ] + }`, + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, } func (s) TestSDKStaticPolicyEnd2End(t *testing.T) { @@ -315,6 +358,136 @@ func (s) TestSDKStaticPolicyEnd2End(t *testing.T) { } } +func (s) TestSDKAllowsRPCRequestWithEmptyPrincipalsOnTLSAuthenticatedConnection(t *testing.T) { + authzPolicy := `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_authenticated", + "source": { + "principals": [] + } + } + ] + }` + // Start a gRPC server with SDK unary server interceptor. + i, _ := authz.NewStatic(authzPolicy) + creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) + if err != nil { + t.Fatalf("failed to generate credentials: %v", err) + } + s := grpc.NewServer( + grpc.Creds(creds), + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + pb.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + go s.Serve(lis) + + // Establish a connection to the server. + creds, err = credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com") + if err != nil { + t.Fatalf("failed to load credentials: %v", err) + } + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(creds)) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := pb.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + if _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}); err != nil { + t.Fatalf("client.UnaryCall(_, _) = %v; want nil", err) + } +} + +func (s) TestSDKAllowsRPCRequestWithEmptyPrincipalsOnMTLSAuthenticatedConnection(t *testing.T) { + authzPolicy := `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_authenticated", + "source": { + "principals": [] + } + } + ] + }` + // Start a gRPC server with SDK unary server interceptor. + i, _ := authz.NewStatic(authzPolicy) + cert, err := tls.LoadX509KeyPair(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(x509/server1_cert.pem, x509/server1_key.pem) failed: %v", err) + } + ca, err := ioutil.ReadFile(testdata.Path("x509/client_ca_cert.pem")) + if err != nil { + t.Fatalf("ioutil.ReadFile(x509/client_ca_cert.pem) failed: %v", err) + } + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(ca) { + t.Fatal("failed to append certificates") + } + creds := credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + Certificates: []tls.Certificate{cert}, + ClientCAs: certPool, + }) + s := grpc.NewServer( + grpc.Creds(creds), + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + pb.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + go s.Serve(lis) + + // Establish a connection to the server. + cert, err = tls.LoadX509KeyPair(testdata.Path("x509/client1_cert.pem"), testdata.Path("x509/client1_key.pem")) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) + } + ca, err = ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) + if err != nil { + t.Fatalf("ioutil.ReadFile(x509/server_ca_cert.pem) failed: %v", err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(ca) { + t.Fatal("failed to append certificates") + } + creds = credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + ServerName: "x.test.example.com", + }) + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(creds)) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := pb.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + if _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}); err != nil { + t.Fatalf("client.UnaryCall(_, _) = %v; want nil", err) + } +} + func (s) TestSDKFileWatcherEnd2End(t *testing.T) { for name, test := range sdkTests { t.Run(name, func(t *testing.T) { @@ -387,7 +560,7 @@ func retryUntil(ctx context.Context, tsc pb.TestServiceClient, want *status.Stat } func (s) TestSDKFileWatcher_ValidPolicyRefresh(t *testing.T) { - valid1 := sdkTests["DeniesRpcMatchInDenyAndAllow"] + valid1 := sdkTests["DeniesRPCMatchInDenyAndAllow"] file := createTmpPolicyFile(t, "valid_policy_refresh", []byte(valid1.authzPolicy)) i, _ := authz.NewFileWatcher(file, 100*time.Millisecond) defer i.Close() @@ -419,23 +592,23 @@ func (s) TestSDKFileWatcher_ValidPolicyRefresh(t *testing.T) { // Verifying authorization decision. _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { - t.Fatalf("error want:{%v} got:{%v}", valid1.wantStatus.Err(), got.Err()) + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid1.wantStatus.Err()) } // Rewrite the file with a different valid authorization policy. - valid2 := sdkTests["AllowsRpcEmptyDenyMatchInAllow"] + valid2 := sdkTests["AllowsRPCEmptyDenyMatchInAllow"] if err := ioutil.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) } // Verifying authorization decision. if got := retryUntil(ctx, client, valid2.wantStatus); got != nil { - t.Fatalf("error want:{%v} got:{%v}", valid2.wantStatus.Err(), got) + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got, valid2.wantStatus.Err()) } } func (s) TestSDKFileWatcher_InvalidPolicySkipReload(t *testing.T) { - valid := sdkTests["DeniesRpcMatchInDenyAndAllow"] + valid := sdkTests["DeniesRPCMatchInDenyAndAllow"] file := createTmpPolicyFile(t, "invalid_policy_skip_reload", []byte(valid.authzPolicy)) i, _ := authz.NewFileWatcher(file, 20*time.Millisecond) defer i.Close() @@ -467,7 +640,7 @@ func (s) TestSDKFileWatcher_InvalidPolicySkipReload(t *testing.T) { // Verifying authorization decision. _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid.wantStatus.Code() || got.Message() != valid.wantStatus.Message() { - t.Fatalf("error want:{%v} got:{%v}", valid.wantStatus.Err(), got.Err()) + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid.wantStatus.Err()) } // Skips the invalid policy update, and continues to use the valid policy. @@ -481,12 +654,12 @@ func (s) TestSDKFileWatcher_InvalidPolicySkipReload(t *testing.T) { // Verifying authorization decision. _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid.wantStatus.Code() || got.Message() != valid.wantStatus.Message() { - t.Fatalf("error want:{%v} got:{%v}", valid.wantStatus.Err(), got.Err()) + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid.wantStatus.Err()) } } func (s) TestSDKFileWatcher_RecoversFromReloadFailure(t *testing.T) { - valid1 := sdkTests["DeniesRpcMatchInDenyAndAllow"] + valid1 := sdkTests["DeniesRPCMatchInDenyAndAllow"] file := createTmpPolicyFile(t, "recovers_from_reload_failure", []byte(valid1.authzPolicy)) i, _ := authz.NewFileWatcher(file, 100*time.Millisecond) defer i.Close() @@ -518,7 +691,7 @@ func (s) TestSDKFileWatcher_RecoversFromReloadFailure(t *testing.T) { // Verifying authorization decision. _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { - t.Fatalf("error want:{%v} got:{%v}", valid1.wantStatus.Err(), got.Err()) + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid1.wantStatus.Err()) } // Skips the invalid policy update, and continues to use the valid policy. @@ -532,17 +705,17 @@ func (s) TestSDKFileWatcher_RecoversFromReloadFailure(t *testing.T) { // Verifying authorization decision. _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { - t.Fatalf("error want:{%v} got:{%v}", valid1.wantStatus.Err(), got.Err()) + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid1.wantStatus.Err()) } // Rewrite the file with a different valid authorization policy. - valid2 := sdkTests["AllowsRpcEmptyDenyMatchInAllow"] + valid2 := sdkTests["AllowsRPCEmptyDenyMatchInAllow"] if err := ioutil.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) } // Verifying authorization decision. if got := retryUntil(ctx, client, valid2.wantStatus); got != nil { - t.Fatalf("error want:{%v} got:{%v}", valid2.wantStatus.Err(), got) + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got, valid2.wantStatus.Err()) } } diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go index 28dabf465919..6129a292d23e 100644 --- a/internal/xds/rbac/matchers.go +++ b/internal/xds/rbac/matchers.go @@ -395,13 +395,13 @@ func newAuthenticatedMatcher(authenticatedMatcherConfig *v3rbacpb.Principal_Auth } func (am *authenticatedMatcher) match(data *rpcData) bool { - // Represents this line in the RBAC documentation = "If unset, it applies to - // any user that is authenticated" (see package-level comments). An - // authenticated downstream in a stateful TLS connection will have to - // provide a certificate to prove their identity. Thus, you can simply check - // if there is a certificate present. + if data.authType != "tls" { + // Connection is not authenticated. + return false + } if am.stringMatcher == nil { - return len(data.certs) != 0 + // Allows any authenticated user. + return true } // "If there is no client certificate (thus no SAN nor Subject), check if "" // (empty string) matches. If it matches, the principal_name is said to diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index a25f9cfdeefa..ecb8512ac51a 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -187,10 +187,12 @@ func newRPCData(ctx context.Context) (*rpcData, error) { return nil, fmt.Errorf("error parsing local address: %v", err) } + var authType string var peerCertificates []*x509.Certificate if pi.AuthInfo != nil { tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo) if ok { + authType = pi.AuthInfo.AuthType() peerCertificates = tlsInfo.State.PeerCertificates } } @@ -201,6 +203,7 @@ func newRPCData(ctx context.Context) (*rpcData, error) { fullMethod: mn, destinationPort: uint32(dp), localAddr: conn.LocalAddr(), + authType: authType, certs: peerCertificates, }, nil } @@ -219,6 +222,8 @@ type rpcData struct { destinationPort uint32 // localAddr is the address that the RPC is being sent to. localAddr net.Addr + // authType is the type of authentication e.g. "tls". + authType string // certs are the certificates presented by the peer during a TLS // handshake. certs []*x509.Certificate diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go index 17832458209a..e2e5d98c2a88 100644 --- a/internal/xds/rbac/rbac_engine_test.go +++ b/internal/xds/rbac/rbac_engine_test.go @@ -916,6 +916,20 @@ func (s) TestChainEngine(t *testing.T) { fullMethod: "some method", peerInfo: &peer.Peer{ Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + URIs: []*url.URL{ + { + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + }, + }, }, }, wantStatusCode: codes.OK, From 03753f593ccb9034846f7a17db5165a4827b35a2 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 25 Oct 2021 17:42:07 -0700 Subject: [PATCH 305/998] creds/google: fix CFE cluster name check (#4893) --- credentials/google/google_test.go | 2 +- credentials/google/xds.go | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go index 6a6e492ee77d..8c08712087df 100644 --- a/credentials/google/google_test.go +++ b/credentials/google/google_test.go @@ -101,7 +101,7 @@ func TestClientHandshakeBasedOnClusterName(t *testing.T) { { name: "with CFE cluster name", ctx: icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ - Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, cfeClusterName).Attributes, + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, "google_cfe_bigtable.googleapis.com").Attributes, }), // CFE should use tls. wantTyp: "tls", diff --git a/credentials/google/xds.go b/credentials/google/xds.go index 588c685e2592..b8c2e8f9204c 100644 --- a/credentials/google/xds.go +++ b/credentials/google/xds.go @@ -21,18 +21,19 @@ package google import ( "context" "net" + "strings" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" ) -const cfeClusterName = "google-cfe" +const cfeClusterNamePrefix = "google_cfe_" // clusterTransportCreds is a combo of TLS + ALTS. // // On the client, ClientHandshake picks TLS or ALTS based on address attributes. // - if attributes has cluster name -// - if cluster name is "google_cfe", use TLS +// - if cluster name has prefix "google_cfe_", use TLS // - otherwise, use ALTS // - else, do TLS // @@ -55,7 +56,7 @@ func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority s return c.tls.ClientHandshake(ctx, authority, rawConn) } cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes) - if !ok || cn == cfeClusterName { + if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) { return c.tls.ClientHandshake(ctx, authority, rawConn) } // If attributes have cluster name, and cluster name is not cfe, it's a From f1d87c14c2165a7ddbc26f6319e777c88c1b3c85 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 26 Oct 2021 10:33:08 -0700 Subject: [PATCH 306/998] client: properly disable retry if GRPC_GO_RETRY=off (#4899) --- internal/envconfig/envconfig.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index e766ac04af21..9f25a67fc6bd 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -33,8 +33,9 @@ const ( ) var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on" or if XDS retry support is enabled. - Retry = strings.EqualFold(os.Getenv(retryStr), "on") || xdsenv.RetrySupport + // Retry is enabled unless explicitly disabled via "GRPC_GO_RETRY=off" or + // if XDS retry support is explicitly disabled. + Retry = !strings.EqualFold(os.Getenv(retryStr), "off") && xdsenv.RetrySupport // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) From 6e8625df635bd7afe589d38a1559ebacf4273b07 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 26 Oct 2021 14:43:44 -0700 Subject: [PATCH 307/998] doc: promote WithDisableRetry to stable; clarify retry is enabled by default (#4901) --- dialoptions.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/dialoptions.go b/dialoptions.go index 2af1b6c2f14b..40d8ba6596ab 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -539,14 +539,8 @@ func WithDefaultServiceConfig(s string) DialOption { // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. // -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Retry support is currently enabled by default, but may be disabled by +// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true From 9fa26982649594f2b84a442644d3c8995d4dac1b Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 26 Oct 2021 15:11:36 -0700 Subject: [PATCH 308/998] xds/csds: populate new GenericXdsConfig field (#4898) --- xds/csds/csds.go | 148 +++---------- xds/csds/csds_test.go | 487 +++++++++++++++--------------------------- 2 files changed, 201 insertions(+), 434 deletions(-) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index c4477a55d1a8..1d817fbcc865 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -26,7 +26,6 @@ package csds import ( "context" "io" - "time" v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -56,6 +55,13 @@ var ( } ) +const ( + listenerTypeURL = "envoy.config.listener.v3.Listener" + routeConfigTypeURL = "envoy.config.route.v3.RouteConfiguration" + clusterTypeURL = "envoy.config.cluster.v3.Cluster" + endpointsTypeURL = "envoy.config.endpoint.v3.ClusterLoadAssignment" +) + // ClientStatusDiscoveryServer implementations interface ClientStatusDiscoveryServiceServer. type ClientStatusDiscoveryServer struct { // xdsClient will always be the same in practice. But we keep a copy in each @@ -108,16 +114,21 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp return nil, status.Errorf(codes.InvalidArgument, "node_matchers are not supported, request contains node_matchers: %v", req.NodeMatchers) } + lds := dumpToGenericXdsConfig(listenerTypeURL, s.xdsClient.DumpLDS) + rds := dumpToGenericXdsConfig(routeConfigTypeURL, s.xdsClient.DumpRDS) + cds := dumpToGenericXdsConfig(clusterTypeURL, s.xdsClient.DumpCDS) + eds := dumpToGenericXdsConfig(endpointsTypeURL, s.xdsClient.DumpEDS) + configs := make([]*v3statuspb.ClientConfig_GenericXdsConfig, 0, len(lds)+len(rds)+len(cds)+len(eds)) + configs = append(configs, lds...) + configs = append(configs, rds...) + configs = append(configs, cds...) + configs = append(configs, eds...) + ret := &v3statuspb.ClientStatusResponse{ Config: []*v3statuspb.ClientConfig{ { - Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().NodeProto), - XdsConfig: []*v3statuspb.PerXdsConfig{ - s.buildLDSPerXDSConfig(), - s.buildRDSPerXDSConfig(), - s.buildCDSPerXDSConfig(), - s.buildEDSPerXDSConfig(), - }, + Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().NodeProto), + GenericXdsConfigs: configs, }, }, } @@ -162,129 +173,28 @@ func nodeProtoToV3(n proto.Message) *v3corepb.Node { return node } -func (s *ClientStatusDiscoveryServer) buildLDSPerXDSConfig() *v3statuspb.PerXdsConfig { - version, dump := s.xdsClient.DumpLDS() - resources := make([]*v3adminpb.ListenersConfigDump_DynamicListener, 0, len(dump)) +func dumpToGenericXdsConfig(typeURL string, dumpF func() (string, map[string]xdsclient.UpdateWithMD)) []*v3statuspb.ClientConfig_GenericXdsConfig { + _, dump := dumpF() + ret := make([]*v3statuspb.ClientConfig_GenericXdsConfig, 0, len(dump)) for name, d := range dump { - configDump := &v3adminpb.ListenersConfigDump_DynamicListener{ + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: typeURL, Name: name, - ClientStatus: serviceStatusToProto(d.MD.Status), - } - if (d.MD.Timestamp != time.Time{}) { - configDump.ActiveState = &v3adminpb.ListenersConfigDump_DynamicListenerState{ - VersionInfo: d.MD.Version, - Listener: d.Raw, - LastUpdated: timestamppb.New(d.MD.Timestamp), - } - } - if errState := d.MD.ErrState; errState != nil { - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - LastUpdateAttempt: timestamppb.New(errState.Timestamp), - Details: errState.Err.Error(), - VersionInfo: errState.Version, - } - } - resources = append(resources, configDump) - } - return &v3statuspb.PerXdsConfig{ - PerXdsConfig: &v3statuspb.PerXdsConfig_ListenerConfig{ - ListenerConfig: &v3adminpb.ListenersConfigDump{ - VersionInfo: version, - DynamicListeners: resources, - }, - }, - } -} - -func (s *ClientStatusDiscoveryServer) buildRDSPerXDSConfig() *v3statuspb.PerXdsConfig { - _, dump := s.xdsClient.DumpRDS() - resources := make([]*v3adminpb.RoutesConfigDump_DynamicRouteConfig, 0, len(dump)) - for _, d := range dump { - configDump := &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ VersionInfo: d.MD.Version, + XdsConfig: d.Raw, + LastUpdated: timestamppb.New(d.MD.Timestamp), ClientStatus: serviceStatusToProto(d.MD.Status), } - if (d.MD.Timestamp != time.Time{}) { - configDump.RouteConfig = d.Raw - configDump.LastUpdated = timestamppb.New(d.MD.Timestamp) - } if errState := d.MD.ErrState; errState != nil { - configDump.ErrorState = &v3adminpb.UpdateFailureState{ + config.ErrorState = &v3adminpb.UpdateFailureState{ LastUpdateAttempt: timestamppb.New(errState.Timestamp), Details: errState.Err.Error(), VersionInfo: errState.Version, } } - resources = append(resources, configDump) - } - return &v3statuspb.PerXdsConfig{ - PerXdsConfig: &v3statuspb.PerXdsConfig_RouteConfig{ - RouteConfig: &v3adminpb.RoutesConfigDump{ - DynamicRouteConfigs: resources, - }, - }, - } -} - -func (s *ClientStatusDiscoveryServer) buildCDSPerXDSConfig() *v3statuspb.PerXdsConfig { - version, dump := s.xdsClient.DumpCDS() - resources := make([]*v3adminpb.ClustersConfigDump_DynamicCluster, 0, len(dump)) - for _, d := range dump { - configDump := &v3adminpb.ClustersConfigDump_DynamicCluster{ - VersionInfo: d.MD.Version, - ClientStatus: serviceStatusToProto(d.MD.Status), - } - if (d.MD.Timestamp != time.Time{}) { - configDump.Cluster = d.Raw - configDump.LastUpdated = timestamppb.New(d.MD.Timestamp) - } - if errState := d.MD.ErrState; errState != nil { - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - LastUpdateAttempt: timestamppb.New(errState.Timestamp), - Details: errState.Err.Error(), - VersionInfo: errState.Version, - } - } - resources = append(resources, configDump) - } - return &v3statuspb.PerXdsConfig{ - PerXdsConfig: &v3statuspb.PerXdsConfig_ClusterConfig{ - ClusterConfig: &v3adminpb.ClustersConfigDump{ - VersionInfo: version, - DynamicActiveClusters: resources, - }, - }, - } -} - -func (s *ClientStatusDiscoveryServer) buildEDSPerXDSConfig() *v3statuspb.PerXdsConfig { - _, dump := s.xdsClient.DumpEDS() - resources := make([]*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig, 0, len(dump)) - for _, d := range dump { - configDump := &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ - VersionInfo: d.MD.Version, - ClientStatus: serviceStatusToProto(d.MD.Status), - } - if (d.MD.Timestamp != time.Time{}) { - configDump.EndpointConfig = d.Raw - configDump.LastUpdated = timestamppb.New(d.MD.Timestamp) - } - if errState := d.MD.ErrState; errState != nil { - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - LastUpdateAttempt: timestamppb.New(errState.Timestamp), - Details: errState.Err.Error(), - VersionInfo: errState.Version, - } - } - resources = append(resources, configDump) - } - return &v3statuspb.PerXdsConfig{ - PerXdsConfig: &v3statuspb.PerXdsConfig_EndpointConfig{ - EndpointConfig: &v3adminpb.EndpointsConfigDump{ - DynamicEndpointConfigs: resources, - }, - }, + ret = append(ret, config) } + return ret } func serviceStatusToProto(serviceStatus xdsclient.ServiceStatus) v3adminpb.ClientResourceStatus { diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 9de83d37fecb..ed1088b4339d 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -21,15 +21,13 @@ package csds import ( "context" "fmt" + "sort" "strings" "testing" "time" - "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/internal/testutils" @@ -40,7 +38,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/protobuf/types/known/timestamppb" v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -58,65 +55,42 @@ const ( ) var cmpOpts = cmp.Options{ - cmpopts.EquateEmpty(), - cmp.Comparer(func(a, b *timestamppb.Timestamp) bool { return true }), - protocmp.IgnoreFields(&v3adminpb.UpdateFailureState{}, "last_update_attempt", "details"), - protocmp.SortRepeated(func(a, b *v3adminpb.ListenersConfigDump_DynamicListener) bool { - return strings.Compare(a.Name, b.Name) < 0 - }), - protocmp.SortRepeated(func(a, b *v3adminpb.RoutesConfigDump_DynamicRouteConfig) bool { - if a.RouteConfig == nil { - return false - } - if b.RouteConfig == nil { - return true - } - var at, bt v3routepb.RouteConfiguration - if err := ptypes.UnmarshalAny(a.RouteConfig, &at); err != nil { - panic("failed to unmarshal RouteConfig" + err.Error()) - } - if err := ptypes.UnmarshalAny(b.RouteConfig, &bt); err != nil { - panic("failed to unmarshal RouteConfig" + err.Error()) - } - return strings.Compare(at.Name, bt.Name) < 0 - }), - protocmp.SortRepeated(func(a, b *v3adminpb.ClustersConfigDump_DynamicCluster) bool { - if a.Cluster == nil { - return false - } - if b.Cluster == nil { - return true - } - var at, bt v3clusterpb.Cluster - if err := ptypes.UnmarshalAny(a.Cluster, &at); err != nil { - panic("failed to unmarshal Cluster" + err.Error()) - } - if err := ptypes.UnmarshalAny(b.Cluster, &bt); err != nil { - panic("failed to unmarshal Cluster" + err.Error()) - } - return strings.Compare(at.Name, bt.Name) < 0 + cmp.Transformer("sort", func(in []*v3statuspb.ClientConfig_GenericXdsConfig) []*v3statuspb.ClientConfig_GenericXdsConfig { + out := append([]*v3statuspb.ClientConfig_GenericXdsConfig(nil), in...) + sort.Slice(out, func(i, j int) bool { + a, b := out[i], out[j] + if a == nil { + return true + } + if b == nil { + return false + } + if strings.Compare(a.TypeUrl, b.TypeUrl) == 0 { + return strings.Compare(a.Name, b.Name) < 0 + } + return strings.Compare(a.TypeUrl, b.TypeUrl) < 0 + }) + return out }), - protocmp.SortRepeated(func(a, b *v3adminpb.EndpointsConfigDump_DynamicEndpointConfig) bool { - if a.EndpointConfig == nil { - return false - } - if b.EndpointConfig == nil { - return true - } - var at, bt v3endpointpb.ClusterLoadAssignment - if err := ptypes.UnmarshalAny(a.EndpointConfig, &at); err != nil { - panic("failed to unmarshal Endpoints" + err.Error()) + protocmp.Transform(), +} + +// filterFields clears unimportant fields in the proto messages. +// +// protocmp.IgnoreFields() doesn't work on nil messages (it panics). +func filterFields(ms []*v3statuspb.ClientConfig_GenericXdsConfig) []*v3statuspb.ClientConfig_GenericXdsConfig { + out := append([]*v3statuspb.ClientConfig_GenericXdsConfig{}, ms...) + for _, m := range out { + if m == nil { + continue } - if err := ptypes.UnmarshalAny(b.EndpointConfig, &bt); err != nil { - panic("failed to unmarshal Endpoints" + err.Error()) + m.LastUpdated = nil + if m.ErrorState != nil { + m.ErrorState.Details = "blahblah" + m.ErrorState.LastUpdateAttempt = nil } - return strings.Compare(at.ClusterName, bt.ClusterName) < 0 - }), - protocmp.IgnoreFields(&v3adminpb.ListenersConfigDump_DynamicListenerState{}, "last_updated"), - protocmp.IgnoreFields(&v3adminpb.RoutesConfigDump_DynamicRouteConfig{}, "last_updated"), - protocmp.IgnoreFields(&v3adminpb.ClustersConfigDump_DynamicCluster{}, "last_updated"), - protocmp.IgnoreFields(&v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{}, "last_updated"), - protocmp.Transform(), + } + return out } var ( @@ -329,67 +303,31 @@ func checkForRequested(stream v3statuspbgrpc.ClientStatusDiscoveryService_Stream if n := len(r.Config); n != 1 { return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(r)) } - if n := len(r.Config[0].XdsConfig); n != 4 { - return fmt.Errorf("got %d xds configs (one for each type), want 4: %v", n, proto.MarshalTextString(r)) - } - for _, cfg := range r.Config[0].XdsConfig { - switch config := cfg.PerXdsConfig.(type) { - case *v3statuspb.PerXdsConfig_ListenerConfig: - var wantLis []*v3adminpb.ListenersConfigDump_DynamicListener - for i := range ldsTargets { - wantLis = append(wantLis, &v3adminpb.ListenersConfigDump_DynamicListener{ - Name: ldsTargets[i], - ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - wantDump := &v3adminpb.ListenersConfigDump{ - DynamicListeners: wantLis, - } - if diff := cmp.Diff(config.ListenerConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_RouteConfig: - var wantRoutes []*v3adminpb.RoutesConfigDump_DynamicRouteConfig - for range rdsTargets { - wantRoutes = append(wantRoutes, &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ - ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - wantDump := &v3adminpb.RoutesConfigDump{ - DynamicRouteConfigs: wantRoutes, - } - if diff := cmp.Diff(config.RouteConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_ClusterConfig: - var wantCluster []*v3adminpb.ClustersConfigDump_DynamicCluster - for range cdsTargets { - wantCluster = append(wantCluster, &v3adminpb.ClustersConfigDump_DynamicCluster{ - ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - wantDump := &v3adminpb.ClustersConfigDump{ - DynamicActiveClusters: wantCluster, - } - if diff := cmp.Diff(config.ClusterConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_EndpointConfig: - var wantEndpoint []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig - for range cdsTargets { - wantEndpoint = append(wantEndpoint, &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ - ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - wantDump := &v3adminpb.EndpointsConfigDump{ - DynamicEndpointConfigs: wantEndpoint, - } - if diff := cmp.Diff(config.EndpointConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - default: - return fmt.Errorf("unexpected PerXdsConfig: %+v; %v", cfg.PerXdsConfig, protoToJSON(r)) - } + + var want []*v3statuspb.ClientConfig_GenericXdsConfig + // Status is Requested, but version and xds config are all unset. + for i := range ldsTargets { + want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: listenerTypeURL, Name: ldsTargets[i], ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, + }) + } + for i := range rdsTargets { + want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: routeConfigTypeURL, Name: rdsTargets[i], ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, + }) + } + for i := range cdsTargets { + want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: clusterTypeURL, Name: cdsTargets[i], ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, + }) + } + for i := range edsTargets { + want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: endpointsTypeURL, Name: edsTargets[i], ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, + }) + } + if diff := cmp.Diff(filterFields(r.Config[0].GenericXdsConfigs), want, cmpOpts); diff != "" { + return fmt.Errorf(diff) } return nil } @@ -409,84 +347,47 @@ func checkForACKed(stream v3statuspbgrpc.ClientStatusDiscoveryService_StreamClie if n := len(r.Config); n != 1 { return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(r)) } - if n := len(r.Config[0].XdsConfig); n != 4 { - return fmt.Errorf("got %d xds configs (one for each type), want 4: %v", n, proto.MarshalTextString(r)) - } - for _, cfg := range r.Config[0].XdsConfig { - switch config := cfg.PerXdsConfig.(type) { - case *v3statuspb.PerXdsConfig_ListenerConfig: - var wantLis []*v3adminpb.ListenersConfigDump_DynamicListener - for i := range ldsTargets { - wantLis = append(wantLis, &v3adminpb.ListenersConfigDump_DynamicListener{ - Name: ldsTargets[i], - ActiveState: &v3adminpb.ListenersConfigDump_DynamicListenerState{ - VersionInfo: wantVersion, - Listener: listenerAnys[i], - LastUpdated: nil, - }, - ErrorState: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - wantDump := &v3adminpb.ListenersConfigDump{ - VersionInfo: wantVersion, - DynamicListeners: wantLis, - } - if diff := cmp.Diff(config.ListenerConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_RouteConfig: - var wantRoutes []*v3adminpb.RoutesConfigDump_DynamicRouteConfig - for i := range rdsTargets { - wantRoutes = append(wantRoutes, &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ - VersionInfo: wantVersion, - RouteConfig: routeAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - wantDump := &v3adminpb.RoutesConfigDump{ - DynamicRouteConfigs: wantRoutes, - } - if diff := cmp.Diff(config.RouteConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_ClusterConfig: - var wantCluster []*v3adminpb.ClustersConfigDump_DynamicCluster - for i := range cdsTargets { - wantCluster = append(wantCluster, &v3adminpb.ClustersConfigDump_DynamicCluster{ - VersionInfo: wantVersion, - Cluster: clusterAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - wantDump := &v3adminpb.ClustersConfigDump{ - VersionInfo: wantVersion, - DynamicActiveClusters: wantCluster, - } - if diff := cmp.Diff(config.ClusterConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_EndpointConfig: - var wantEndpoint []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig - for i := range cdsTargets { - wantEndpoint = append(wantEndpoint, &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ - VersionInfo: wantVersion, - EndpointConfig: endpointAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - wantDump := &v3adminpb.EndpointsConfigDump{ - DynamicEndpointConfigs: wantEndpoint, - } - if diff := cmp.Diff(config.EndpointConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - default: - return fmt.Errorf("unexpected PerXdsConfig: %+v; %v", cfg.PerXdsConfig, protoToJSON(r)) - } + + var want []*v3statuspb.ClientConfig_GenericXdsConfig + // Status is Acked, config is filled with the prebuilt Anys. + for i := range ldsTargets { + want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: listenerTypeURL, + Name: ldsTargets[i], + VersionInfo: wantVersion, + XdsConfig: listenerAnys[i], + ClientStatus: v3adminpb.ClientResourceStatus_ACKED, + }) + } + for i := range rdsTargets { + want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: routeConfigTypeURL, + Name: rdsTargets[i], + VersionInfo: wantVersion, + XdsConfig: routeAnys[i], + ClientStatus: v3adminpb.ClientResourceStatus_ACKED, + }) + } + for i := range cdsTargets { + want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: clusterTypeURL, + Name: cdsTargets[i], + VersionInfo: wantVersion, + XdsConfig: clusterAnys[i], + ClientStatus: v3adminpb.ClientResourceStatus_ACKED, + }) + } + for i := range edsTargets { + want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: endpointsTypeURL, + Name: edsTargets[i], + VersionInfo: wantVersion, + XdsConfig: endpointAnys[i], + ClientStatus: v3adminpb.ClientResourceStatus_ACKED, + }) + } + if diff := cmp.Diff(filterFields(r.Config[0].GenericXdsConfigs), want, cmpOpts); diff != "" { + return fmt.Errorf(diff) } return nil } @@ -508,129 +409,85 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco if n := len(r.Config); n != 1 { return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(r)) } - if n := len(r.Config[0].XdsConfig); n != 4 { - return fmt.Errorf("got %d xds configs (one for each type), want 4: %v", n, proto.MarshalTextString(r)) - } - for _, cfg := range r.Config[0].XdsConfig { - switch config := cfg.PerXdsConfig.(type) { - case *v3statuspb.PerXdsConfig_ListenerConfig: - var wantLis []*v3adminpb.ListenersConfigDump_DynamicListener - for i := range ldsTargets { - configDump := &v3adminpb.ListenersConfigDump_DynamicListener{ - Name: ldsTargets[i], - ActiveState: &v3adminpb.ListenersConfigDump_DynamicListenerState{ - VersionInfo: nackVersion, - Listener: listenerAnys[i], - LastUpdated: nil, - }, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - configDump.ActiveState.VersionInfo = ackVersion - configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - wantLis = append(wantLis, configDump) - } - wantDump := &v3adminpb.ListenersConfigDump{ - VersionInfo: nackVersion, - DynamicListeners: wantLis, - } - if diff := cmp.Diff(config.ListenerConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_RouteConfig: - var wantRoutes []*v3adminpb.RoutesConfigDump_DynamicRouteConfig - for i := range rdsTargets { - configDump := &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ - VersionInfo: nackVersion, - RouteConfig: routeAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - configDump.VersionInfo = ackVersion - configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - wantRoutes = append(wantRoutes, configDump) - } - wantDump := &v3adminpb.RoutesConfigDump{ - DynamicRouteConfigs: wantRoutes, - } - if diff := cmp.Diff(config.RouteConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_ClusterConfig: - var wantCluster []*v3adminpb.ClustersConfigDump_DynamicCluster - for i := range cdsTargets { - configDump := &v3adminpb.ClustersConfigDump_DynamicCluster{ - VersionInfo: nackVersion, - Cluster: clusterAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - configDump.VersionInfo = ackVersion - configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - wantCluster = append(wantCluster, configDump) - } - wantDump := &v3adminpb.ClustersConfigDump{ - VersionInfo: nackVersion, - DynamicActiveClusters: wantCluster, - } - if diff := cmp.Diff(config.ClusterConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) + + var want []*v3statuspb.ClientConfig_GenericXdsConfig + // Resources with the nackIdx are NACKed. + for i := range ldsTargets { + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: listenerTypeURL, + Name: ldsTargets[i], + VersionInfo: nackVersion, + XdsConfig: listenerAnys[i], + ClientStatus: v3adminpb.ClientResourceStatus_ACKED, + } + if i == nackResourceIdx { + config.VersionInfo = ackVersion + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{ + Details: "blahblah", + VersionInfo: nackVersion, } - case *v3statuspb.PerXdsConfig_EndpointConfig: - var wantEndpoint []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig - for i := range cdsTargets { - configDump := &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ - VersionInfo: nackVersion, - EndpointConfig: endpointAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - configDump.VersionInfo = ackVersion - configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - wantEndpoint = append(wantEndpoint, configDump) + } + want = append(want, config) + } + for i := range rdsTargets { + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: routeConfigTypeURL, + Name: rdsTargets[i], + VersionInfo: nackVersion, + XdsConfig: routeAnys[i], + ClientStatus: v3adminpb.ClientResourceStatus_ACKED, + } + if i == nackResourceIdx { + config.VersionInfo = ackVersion + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{ + Details: "blahblah", + VersionInfo: nackVersion, } - wantDump := &v3adminpb.EndpointsConfigDump{ - DynamicEndpointConfigs: wantEndpoint, + } + want = append(want, config) + } + for i := range cdsTargets { + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: clusterTypeURL, + Name: cdsTargets[i], + VersionInfo: nackVersion, + XdsConfig: clusterAnys[i], + ClientStatus: v3adminpb.ClientResourceStatus_ACKED, + } + if i == nackResourceIdx { + config.VersionInfo = ackVersion + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{ + Details: "blahblah", + VersionInfo: nackVersion, } - if diff := cmp.Diff(config.EndpointConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) + } + want = append(want, config) + } + for i := range edsTargets { + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: endpointsTypeURL, + Name: edsTargets[i], + VersionInfo: nackVersion, + XdsConfig: endpointAnys[i], + ClientStatus: v3adminpb.ClientResourceStatus_ACKED, + } + if i == nackResourceIdx { + config.VersionInfo = ackVersion + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{ + Details: "blahblah", + VersionInfo: nackVersion, } - default: - return fmt.Errorf("unexpected PerXdsConfig: %+v; %v", cfg.PerXdsConfig, protoToJSON(r)) } + want = append(want, config) } - return nil -} - -func protoToJSON(p proto.Message) string { - mm := jsonpb.Marshaler{ - Indent: " ", + if diff := cmp.Diff(filterFields(r.Config[0].GenericXdsConfigs), want, cmpOpts); diff != "" { + return fmt.Errorf(diff) } - ret, _ := mm.MarshalToString(p) - return ret + return nil } func TestCSDSNoXDSClient(t *testing.T) { From d47437c91e798b5370e019660cd74c9c0c8e32fb Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 28 Oct 2021 12:16:33 -0400 Subject: [PATCH 309/998] xds: Fix invert functionality for header matcher (#4902) * Fix invert functionality for header matcher --- internal/xds/matcher/matcher_header.go | 79 ++++----- internal/xds/matcher/matcher_header_test.go | 186 ++++++++++++++++---- internal/xds/rbac/matchers.go | 17 +- xds/internal/xdsclient/matcher.go | 16 +- xds/internal/xdsclient/matcher_test.go | 12 +- 5 files changed, 208 insertions(+), 102 deletions(-) diff --git a/internal/xds/matcher/matcher_header.go b/internal/xds/matcher/matcher_header.go index c3944373cd7f..fd4833d3fff8 100644 --- a/internal/xds/matcher/matcher_header.go +++ b/internal/xds/matcher/matcher_header.go @@ -50,13 +50,14 @@ func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { // HeaderExactMatcher matches on an exact match of the value of the header. type HeaderExactMatcher struct { - key string - exact string + key string + exact string + invert bool } // NewHeaderExactMatcher returns a new HeaderExactMatcher. -func NewHeaderExactMatcher(key, exact string) *HeaderExactMatcher { - return &HeaderExactMatcher{key: key, exact: exact} +func NewHeaderExactMatcher(key, exact string, invert bool) *HeaderExactMatcher { + return &HeaderExactMatcher{key: key, exact: exact, invert: invert} } // Match returns whether the passed in HTTP Headers match according to the @@ -66,7 +67,7 @@ func (hem *HeaderExactMatcher) Match(md metadata.MD) bool { if !ok { return false } - return v == hem.exact + return (v == hem.exact) != hem.invert } func (hem *HeaderExactMatcher) String() string { @@ -76,13 +77,14 @@ func (hem *HeaderExactMatcher) String() string { // HeaderRegexMatcher matches on whether the entire request header value matches // the regex. type HeaderRegexMatcher struct { - key string - re *regexp.Regexp + key string + re *regexp.Regexp + invert bool } // NewHeaderRegexMatcher returns a new HeaderRegexMatcher. -func NewHeaderRegexMatcher(key string, re *regexp.Regexp) *HeaderRegexMatcher { - return &HeaderRegexMatcher{key: key, re: re} +func NewHeaderRegexMatcher(key string, re *regexp.Regexp, invert bool) *HeaderRegexMatcher { + return &HeaderRegexMatcher{key: key, re: re, invert: invert} } // Match returns whether the passed in HTTP Headers match according to the @@ -92,7 +94,7 @@ func (hrm *HeaderRegexMatcher) Match(md metadata.MD) bool { if !ok { return false } - return grpcutil.FullMatchWithRegex(hrm.re, v) + return grpcutil.FullMatchWithRegex(hrm.re, v) != hrm.invert } func (hrm *HeaderRegexMatcher) String() string { @@ -104,11 +106,12 @@ func (hrm *HeaderRegexMatcher) String() string { type HeaderRangeMatcher struct { key string start, end int64 // represents [start, end). + invert bool } // NewHeaderRangeMatcher returns a new HeaderRangeMatcher. -func NewHeaderRangeMatcher(key string, start, end int64) *HeaderRangeMatcher { - return &HeaderRangeMatcher{key: key, start: start, end: end} +func NewHeaderRangeMatcher(key string, start, end int64, invert bool) *HeaderRangeMatcher { + return &HeaderRangeMatcher{key: key, start: start, end: end, invert: invert} } // Match returns whether the passed in HTTP Headers match according to the @@ -119,9 +122,9 @@ func (hrm *HeaderRangeMatcher) Match(md metadata.MD) bool { return false } if i, err := strconv.ParseInt(v, 10, 64); err == nil && i >= hrm.start && i < hrm.end { - return true + return !hrm.invert } - return false + return hrm.invert } func (hrm *HeaderRangeMatcher) String() string { @@ -136,7 +139,10 @@ type HeaderPresentMatcher struct { } // NewHeaderPresentMatcher returns a new HeaderPresentMatcher. -func NewHeaderPresentMatcher(key string, present bool) *HeaderPresentMatcher { +func NewHeaderPresentMatcher(key string, present bool, invert bool) *HeaderPresentMatcher { + if invert { + present = !present + } return &HeaderPresentMatcher{key: key, present: present} } @@ -144,7 +150,7 @@ func NewHeaderPresentMatcher(key string, present bool) *HeaderPresentMatcher { // HeaderPresentMatcher. func (hpm *HeaderPresentMatcher) Match(md metadata.MD) bool { vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) - present := ok && len(vs) > 0 + present := ok && len(vs) > 0 // TODO: Are we sure we need this len(vs) > 0? return present == hpm.present } @@ -157,11 +163,12 @@ func (hpm *HeaderPresentMatcher) String() string { type HeaderPrefixMatcher struct { key string prefix string + invert bool } // NewHeaderPrefixMatcher returns a new HeaderPrefixMatcher. -func NewHeaderPrefixMatcher(key string, prefix string) *HeaderPrefixMatcher { - return &HeaderPrefixMatcher{key: key, prefix: prefix} +func NewHeaderPrefixMatcher(key string, prefix string, invert bool) *HeaderPrefixMatcher { + return &HeaderPrefixMatcher{key: key, prefix: prefix, invert: invert} } // Match returns whether the passed in HTTP Headers match according to the @@ -171,7 +178,7 @@ func (hpm *HeaderPrefixMatcher) Match(md metadata.MD) bool { if !ok { return false } - return strings.HasPrefix(v, hpm.prefix) + return strings.HasPrefix(v, hpm.prefix) != hpm.invert } func (hpm *HeaderPrefixMatcher) String() string { @@ -183,11 +190,12 @@ func (hpm *HeaderPrefixMatcher) String() string { type HeaderSuffixMatcher struct { key string suffix string + invert bool } // NewHeaderSuffixMatcher returns a new HeaderSuffixMatcher. -func NewHeaderSuffixMatcher(key string, suffix string) *HeaderSuffixMatcher { - return &HeaderSuffixMatcher{key: key, suffix: suffix} +func NewHeaderSuffixMatcher(key string, suffix string, invert bool) *HeaderSuffixMatcher { + return &HeaderSuffixMatcher{key: key, suffix: suffix, invert: invert} } // Match returns whether the passed in HTTP Headers match according to the @@ -197,7 +205,7 @@ func (hsm *HeaderSuffixMatcher) Match(md metadata.MD) bool { if !ok { return false } - return strings.HasSuffix(v, hsm.suffix) + return strings.HasSuffix(v, hsm.suffix) != hsm.invert } func (hsm *HeaderSuffixMatcher) String() string { @@ -209,14 +217,15 @@ func (hsm *HeaderSuffixMatcher) String() string { type HeaderContainsMatcher struct { key string contains string + invert bool } // NewHeaderContainsMatcher returns a new HeaderContainsMatcher. key is the HTTP // Header key to match on, and contains is the value that the header should // should contain for a successful match. An empty contains string does not // work, use HeaderPresentMatcher in that case. -func NewHeaderContainsMatcher(key string, contains string) *HeaderContainsMatcher { - return &HeaderContainsMatcher{key: key, contains: contains} +func NewHeaderContainsMatcher(key string, contains string, invert bool) *HeaderContainsMatcher { + return &HeaderContainsMatcher{key: key, contains: contains, invert: invert} } // Match returns whether the passed in HTTP Headers match according to the @@ -226,29 +235,9 @@ func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { if !ok { return false } - return strings.Contains(v, hcm.contains) + return strings.Contains(v, hcm.contains) != hcm.invert } func (hcm *HeaderContainsMatcher) String() string { return fmt.Sprintf("headerContains:%v%v", hcm.key, hcm.contains) } - -// InvertMatcher inverts the match result of the underlying header matcher. -type InvertMatcher struct { - m HeaderMatcher -} - -// NewInvertMatcher returns a new InvertMatcher. -func NewInvertMatcher(m HeaderMatcher) *InvertMatcher { - return &InvertMatcher{m: m} -} - -// Match returns whether the passed in HTTP Headers match according to the -// InvertMatcher. -func (i *InvertMatcher) Match(md metadata.MD) bool { - return !i.m.Match(md) -} - -func (i *InvertMatcher) String() string { - return fmt.Sprintf("invert{%s}", i.m) -} diff --git a/internal/xds/matcher/matcher_header_test.go b/internal/xds/matcher/matcher_header_test.go index 7e78065212cf..f567f3198242 100644 --- a/internal/xds/matcher/matcher_header_test.go +++ b/internal/xds/matcher/matcher_header_test.go @@ -31,6 +31,7 @@ func TestHeaderExactMatcherMatch(t *testing.T) { key, exact string md metadata.MD want bool + invert bool }{ { name: "one value one match", @@ -61,10 +62,34 @@ func TestHeaderExactMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "abc"), want: false, }, + { + name: "invert header not present", + key: "th", + exact: "tv", + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, + }, + { + name: "invert header match", + key: "th", + exact: "tv", + md: metadata.Pairs("th", "tv"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + exact: "tv", + md: metadata.Pairs("th", "tvv"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hem := NewHeaderExactMatcher(tt.key, tt.exact) + hem := NewHeaderExactMatcher(tt.key, tt.exact, tt.invert) if got := hem.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } @@ -78,6 +103,7 @@ func TestHeaderRegexMatcherMatch(t *testing.T) { key, regexStr string md metadata.MD want bool + invert bool }{ { name: "one value one match", @@ -121,10 +147,34 @@ func TestHeaderRegexMatcherMatch(t *testing.T) { md: metadata.Pairs("header", "aa"), want: true, }, + { + name: "invert header not present", + key: "th", + regexStr: "^t+v*$", + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, + }, + { + name: "invert header match", + key: "th", + regexStr: "^t+v*$", + md: metadata.Pairs("th", "tttvv"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + regexStr: "^t+v*$", + md: metadata.Pairs("th", "abc"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hrm := NewHeaderRegexMatcher(tt.key, regexp.MustCompile(tt.regexStr)) + hrm := NewHeaderRegexMatcher(tt.key, regexp.MustCompile(tt.regexStr), tt.invert) if got := hrm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } @@ -139,6 +189,7 @@ func TestHeaderRangeMatcherMatch(t *testing.T) { start, end int64 md metadata.MD want bool + invert bool }{ { name: "match", @@ -168,10 +219,34 @@ func TestHeaderRangeMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "-5"), want: true, }, + { + name: "invert header not present", + key: "th", + start: 1, end: 10, + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, + }, + { + name: "invert header match", + key: "th", + start: 1, end: 10, + md: metadata.Pairs("th", "5"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + start: 1, end: 9, + md: metadata.Pairs("th", "10"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hrm := NewHeaderRangeMatcher(tt.key, tt.start, tt.end) + hrm := NewHeaderRangeMatcher(tt.key, tt.start, tt.end, tt.invert) if got := hrm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } @@ -186,6 +261,7 @@ func TestHeaderPresentMatcherMatch(t *testing.T) { present bool md metadata.MD want bool + invert bool }{ { name: "want present is present", @@ -215,10 +291,34 @@ func TestHeaderPresentMatcherMatch(t *testing.T) { md: metadata.Pairs("abc", "tv"), want: true, }, + { + name: "invert header not present", + key: "th", + present: true, + md: metadata.Pairs(":method", "GET"), + want: true, + invert: true, + }, + { + name: "invert header match", + key: "th", + present: true, + md: metadata.Pairs("th", "tv"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + present: true, + md: metadata.Pairs(":method", "GET"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hpm := NewHeaderPresentMatcher(tt.key, tt.present) + hpm := NewHeaderPresentMatcher(tt.key, tt.present, tt.invert) if got := hpm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } @@ -232,6 +332,7 @@ func TestHeaderPrefixMatcherMatch(t *testing.T) { key, prefix string md metadata.MD want bool + invert bool }{ { name: "one value one match", @@ -261,10 +362,34 @@ func TestHeaderPrefixMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "abc"), want: false, }, + { + name: "invert header not present", + key: "th", + prefix: "tv", + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, + }, + { + name: "invert header match", + key: "th", + prefix: "tv", + md: metadata.Pairs("th", "tv123"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + prefix: "tv", + md: metadata.Pairs("th", "abc"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hpm := NewHeaderPrefixMatcher(tt.key, tt.prefix) + hpm := NewHeaderPrefixMatcher(tt.key, tt.prefix, tt.invert) if got := hpm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } @@ -278,6 +403,7 @@ func TestHeaderSuffixMatcherMatch(t *testing.T) { key, suffix string md metadata.MD want bool + invert bool }{ { name: "one value one match", @@ -307,40 +433,36 @@ func TestHeaderSuffixMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "abc"), want: false, }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - hsm := NewHeaderSuffixMatcher(tt.key, tt.suffix) - if got := hsm.Match(tt.md); got != tt.want { - t.Errorf("match() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestInvertMatcherMatch(t *testing.T) { - tests := []struct { - name string - m HeaderMatcher - md metadata.MD - }{ { - name: "true->false", - m: NewHeaderExactMatcher("th", "tv"), - md: metadata.Pairs("th", "tv"), + name: "invert header not present", + key: "th", + suffix: "tv", + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, }, { - name: "false->true", - m: NewHeaderExactMatcher("th", "abc"), - md: metadata.Pairs("th", "tv"), + name: "invert header match", + key: "th", + suffix: "tv", + md: metadata.Pairs("th", "123tv"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + suffix: "tv", + md: metadata.Pairs("th", "abc"), + want: true, + invert: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := NewInvertMatcher(tt.m).Match(tt.md) - want := !tt.m.Match(tt.md) - if got != want { - t.Errorf("match() = %v, want %v", got, want) + hsm := NewHeaderSuffixMatcher(tt.key, tt.suffix, tt.invert) + if got := hsm.Match(tt.md); got != tt.want { + t.Errorf("match() = %v, want %v", got, tt.want) } }) } diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go index 6129a292d23e..6f30c8016e2b 100644 --- a/internal/xds/rbac/matchers.go +++ b/internal/xds/rbac/matchers.go @@ -265,29 +265,26 @@ func newHeaderMatcher(headerMatcherConfig *v3route_componentspb.HeaderMatcher) ( var m internalmatcher.HeaderMatcher switch headerMatcherConfig.HeaderMatchSpecifier.(type) { case *v3route_componentspb.HeaderMatcher_ExactMatch: - m = internalmatcher.NewHeaderExactMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetExactMatch()) + m = internalmatcher.NewHeaderExactMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetExactMatch(), headerMatcherConfig.InvertMatch) case *v3route_componentspb.HeaderMatcher_SafeRegexMatch: regex, err := regexp.Compile(headerMatcherConfig.GetSafeRegexMatch().Regex) if err != nil { return nil, err } - m = internalmatcher.NewHeaderRegexMatcher(headerMatcherConfig.Name, regex) + m = internalmatcher.NewHeaderRegexMatcher(headerMatcherConfig.Name, regex, headerMatcherConfig.InvertMatch) case *v3route_componentspb.HeaderMatcher_RangeMatch: - m = internalmatcher.NewHeaderRangeMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetRangeMatch().Start, headerMatcherConfig.GetRangeMatch().End) + m = internalmatcher.NewHeaderRangeMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetRangeMatch().Start, headerMatcherConfig.GetRangeMatch().End, headerMatcherConfig.InvertMatch) case *v3route_componentspb.HeaderMatcher_PresentMatch: - m = internalmatcher.NewHeaderPresentMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPresentMatch()) + m = internalmatcher.NewHeaderPresentMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPresentMatch(), headerMatcherConfig.InvertMatch) case *v3route_componentspb.HeaderMatcher_PrefixMatch: - m = internalmatcher.NewHeaderPrefixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPrefixMatch()) + m = internalmatcher.NewHeaderPrefixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPrefixMatch(), headerMatcherConfig.InvertMatch) case *v3route_componentspb.HeaderMatcher_SuffixMatch: - m = internalmatcher.NewHeaderSuffixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetSuffixMatch()) + m = internalmatcher.NewHeaderSuffixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetSuffixMatch(), headerMatcherConfig.InvertMatch) case *v3route_componentspb.HeaderMatcher_ContainsMatch: - m = internalmatcher.NewHeaderContainsMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetContainsMatch()) + m = internalmatcher.NewHeaderContainsMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetContainsMatch(), headerMatcherConfig.InvertMatch) default: return nil, errors.New("unknown header matcher type") } - if headerMatcherConfig.InvertMatch { - m = internalmatcher.NewInvertMatcher(m) - } return &headerMatcher{matcher: m}, nil } diff --git a/xds/internal/xdsclient/matcher.go b/xds/internal/xdsclient/matcher.go index e663e02769f8..85fff30638e6 100644 --- a/xds/internal/xdsclient/matcher.go +++ b/xds/internal/xdsclient/matcher.go @@ -46,25 +46,23 @@ func RouteToMatcher(r *Route) (*CompositeMatcher, error) { headerMatchers := make([]matcher.HeaderMatcher, 0, len(r.Headers)) for _, h := range r.Headers { var matcherT matcher.HeaderMatcher + invert := h.InvertMatch != nil && *h.InvertMatch switch { case h.ExactMatch != nil && *h.ExactMatch != "": - matcherT = matcher.NewHeaderExactMatcher(h.Name, *h.ExactMatch) + matcherT = matcher.NewHeaderExactMatcher(h.Name, *h.ExactMatch, invert) case h.RegexMatch != nil: - matcherT = matcher.NewHeaderRegexMatcher(h.Name, h.RegexMatch) + matcherT = matcher.NewHeaderRegexMatcher(h.Name, h.RegexMatch, invert) case h.PrefixMatch != nil && *h.PrefixMatch != "": - matcherT = matcher.NewHeaderPrefixMatcher(h.Name, *h.PrefixMatch) + matcherT = matcher.NewHeaderPrefixMatcher(h.Name, *h.PrefixMatch, invert) case h.SuffixMatch != nil && *h.SuffixMatch != "": - matcherT = matcher.NewHeaderSuffixMatcher(h.Name, *h.SuffixMatch) + matcherT = matcher.NewHeaderSuffixMatcher(h.Name, *h.SuffixMatch, invert) case h.RangeMatch != nil: - matcherT = matcher.NewHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End) + matcherT = matcher.NewHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End, invert) case h.PresentMatch != nil: - matcherT = matcher.NewHeaderPresentMatcher(h.Name, *h.PresentMatch) + matcherT = matcher.NewHeaderPresentMatcher(h.Name, *h.PresentMatch, invert) default: return nil, fmt.Errorf("illegal route: missing header_match_specifier") } - if h.InvertMatch != nil && *h.InvertMatch { - matcherT = matcher.NewInvertMatcher(matcherT) - } headerMatchers = append(headerMatchers, matcherT) } diff --git a/xds/internal/xdsclient/matcher_test.go b/xds/internal/xdsclient/matcher_test.go index f750d07d6e4f..724fa8269582 100644 --- a/xds/internal/xdsclient/matcher_test.go +++ b/xds/internal/xdsclient/matcher_test.go @@ -40,7 +40,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "both match", pm: newPathExactMatcher("/a/b", false), - hm: matcher.NewHeaderExactMatcher("th", "tv"), + hm: matcher.NewHeaderExactMatcher("th", "tv", false), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -50,7 +50,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "both match with path case insensitive", pm: newPathExactMatcher("/A/B", true), - hm: matcher.NewHeaderExactMatcher("th", "tv"), + hm: matcher.NewHeaderExactMatcher("th", "tv", false), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -60,7 +60,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "only one match", pm: newPathExactMatcher("/a/b", false), - hm: matcher.NewHeaderExactMatcher("th", "tv"), + hm: matcher.NewHeaderExactMatcher("th", "tv", false), info: iresolver.RPCInfo{ Method: "/z/y", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -70,7 +70,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "both not match", pm: newPathExactMatcher("/z/y", false), - hm: matcher.NewHeaderExactMatcher("th", "abc"), + hm: matcher.NewHeaderExactMatcher("th", "abc", false), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -80,7 +80,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "fake header", pm: newPathPrefixMatcher("/", false), - hm: matcher.NewHeaderExactMatcher("content-type", "fake"), + hm: matcher.NewHeaderExactMatcher("content-type", "fake", false), info: iresolver.RPCInfo{ Method: "/a/b", Context: grpcutil.WithExtraMetadata(context.Background(), metadata.Pairs( @@ -92,7 +92,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "binary header", pm: newPathPrefixMatcher("/", false), - hm: matcher.NewHeaderPresentMatcher("t-bin", true), + hm: matcher.NewHeaderPresentMatcher("t-bin", true, false), info: iresolver.RPCInfo{ Method: "/a/b", Context: grpcutil.WithExtraMetadata( From 2d7bdf2d2327ce888df559ca90db6bac53ef8b99 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 29 Oct 2021 13:17:49 -0400 Subject: [PATCH 310/998] xds: Set RBAC on by default (#4909) --- internal/xds/env/env.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index 2977bfa62857..87d3c2433a4f 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -83,8 +83,10 @@ var ( // RetrySupport indicates whether xDS retry is enabled. RetrySupport = !strings.EqualFold(os.Getenv(retrySupportEnv), "false") - // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled. - RBACSupport = strings.EqualFold(os.Getenv(rbacSupportEnv), "true") + // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + RBACSupport = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") // C2PResolverSupport indicates whether support for C2P resolver is enabled. // This can be enabled by setting the environment variable From bb655a914d276cab79dbd0769c4892f204d234d4 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 29 Oct 2021 11:45:00 -0700 Subject: [PATCH 311/998] internal: update rls.pb.go (#4913) --- .../proto/grpc_lookup_v1/rls_config.pb.go | 141 +++++++++++++----- 1 file changed, 106 insertions(+), 35 deletions(-) diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go b/balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go index 414b74cdb3b5..7e4c932e20ff 100644 --- a/balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -486,6 +486,56 @@ func (x *RouteLookupConfig) GetDefaultTarget() string { return "" } +// RouteLookupClusterSpecifier is used in xDS to represent a cluster specifier +// plugin for RLS. +type RouteLookupClusterSpecifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The RLS config for this cluster specifier plugin instance. + RouteLookupConfig *RouteLookupConfig `protobuf:"bytes,1,opt,name=route_lookup_config,json=routeLookupConfig,proto3" json:"route_lookup_config,omitempty"` +} + +func (x *RouteLookupClusterSpecifier) Reset() { + *x = RouteLookupClusterSpecifier{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupClusterSpecifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupClusterSpecifier) ProtoMessage() {} + +func (x *RouteLookupClusterSpecifier) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupClusterSpecifier.ProtoReflect.Descriptor instead. +func (*RouteLookupClusterSpecifier) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{4} +} + +func (x *RouteLookupClusterSpecifier) GetRouteLookupConfig() *RouteLookupConfig { + if x != nil { + return x.RouteLookupConfig + } + return nil +} + // To match, one of the given Name fields must match; the service and method // fields are specified as fixed strings. The service name is required and // includes the proto package name. The method name may be omitted, in @@ -502,7 +552,7 @@ type GrpcKeyBuilder_Name struct { func (x *GrpcKeyBuilder_Name) Reset() { *x = GrpcKeyBuilder_Name{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -515,7 +565,7 @@ func (x *GrpcKeyBuilder_Name) String() string { func (*GrpcKeyBuilder_Name) ProtoMessage() {} func (x *GrpcKeyBuilder_Name) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -564,7 +614,7 @@ type GrpcKeyBuilder_ExtraKeys struct { func (x *GrpcKeyBuilder_ExtraKeys) Reset() { *x = GrpcKeyBuilder_ExtraKeys{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -577,7 +627,7 @@ func (x *GrpcKeyBuilder_ExtraKeys) String() string { func (*GrpcKeyBuilder_ExtraKeys) ProtoMessage() {} func (x *GrpcKeyBuilder_ExtraKeys) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -716,13 +766,20 @@ var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{ 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x42, - 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, - 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, - 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, - 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, + 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, + 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -737,36 +794,38 @@ func file_grpc_lookup_v1_rls_config_proto_rawDescGZIP() []byte { return file_grpc_lookup_v1_rls_config_proto_rawDescData } -var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_grpc_lookup_v1_rls_config_proto_goTypes = []interface{}{ - (*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher - (*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder - (*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder - (*RouteLookupConfig)(nil), // 3: grpc.lookup.v1.RouteLookupConfig - (*GrpcKeyBuilder_Name)(nil), // 4: grpc.lookup.v1.GrpcKeyBuilder.Name - (*GrpcKeyBuilder_ExtraKeys)(nil), // 5: grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys - nil, // 6: grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry - nil, // 7: grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry - (*durationpb.Duration)(nil), // 8: google.protobuf.Duration + (*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher + (*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder + (*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder + (*RouteLookupConfig)(nil), // 3: grpc.lookup.v1.RouteLookupConfig + (*RouteLookupClusterSpecifier)(nil), // 4: grpc.lookup.v1.RouteLookupClusterSpecifier + (*GrpcKeyBuilder_Name)(nil), // 5: grpc.lookup.v1.GrpcKeyBuilder.Name + (*GrpcKeyBuilder_ExtraKeys)(nil), // 6: grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + nil, // 7: grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + nil, // 8: grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration } var file_grpc_lookup_v1_rls_config_proto_depIdxs = []int32{ - 4, // 0: grpc.lookup.v1.GrpcKeyBuilder.names:type_name -> grpc.lookup.v1.GrpcKeyBuilder.Name - 5, // 1: grpc.lookup.v1.GrpcKeyBuilder.extra_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + 5, // 0: grpc.lookup.v1.GrpcKeyBuilder.names:type_name -> grpc.lookup.v1.GrpcKeyBuilder.Name + 6, // 1: grpc.lookup.v1.GrpcKeyBuilder.extra_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys 0, // 2: grpc.lookup.v1.GrpcKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher - 6, // 3: grpc.lookup.v1.GrpcKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + 7, // 3: grpc.lookup.v1.GrpcKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry 0, // 4: grpc.lookup.v1.HttpKeyBuilder.query_parameters:type_name -> grpc.lookup.v1.NameMatcher 0, // 5: grpc.lookup.v1.HttpKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher - 7, // 6: grpc.lookup.v1.HttpKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + 8, // 6: grpc.lookup.v1.HttpKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry 2, // 7: grpc.lookup.v1.RouteLookupConfig.http_keybuilders:type_name -> grpc.lookup.v1.HttpKeyBuilder 1, // 8: grpc.lookup.v1.RouteLookupConfig.grpc_keybuilders:type_name -> grpc.lookup.v1.GrpcKeyBuilder - 8, // 9: grpc.lookup.v1.RouteLookupConfig.lookup_service_timeout:type_name -> google.protobuf.Duration - 8, // 10: grpc.lookup.v1.RouteLookupConfig.max_age:type_name -> google.protobuf.Duration - 8, // 11: grpc.lookup.v1.RouteLookupConfig.stale_age:type_name -> google.protobuf.Duration - 12, // [12:12] is the sub-list for method output_type - 12, // [12:12] is the sub-list for method input_type - 12, // [12:12] is the sub-list for extension type_name - 12, // [12:12] is the sub-list for extension extendee - 0, // [0:12] is the sub-list for field type_name + 9, // 9: grpc.lookup.v1.RouteLookupConfig.lookup_service_timeout:type_name -> google.protobuf.Duration + 9, // 10: grpc.lookup.v1.RouteLookupConfig.max_age:type_name -> google.protobuf.Duration + 9, // 11: grpc.lookup.v1.RouteLookupConfig.stale_age:type_name -> google.protobuf.Duration + 3, // 12: grpc.lookup.v1.RouteLookupClusterSpecifier.route_lookup_config:type_name -> grpc.lookup.v1.RouteLookupConfig + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name } func init() { file_grpc_lookup_v1_rls_config_proto_init() } @@ -824,7 +883,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { } } file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcKeyBuilder_Name); i { + switch v := v.(*RouteLookupClusterSpecifier); i { case 0: return &v.state case 1: @@ -836,6 +895,18 @@ func file_grpc_lookup_v1_rls_config_proto_init() { } } file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcKeyBuilder_Name); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { case 0: return &v.state @@ -854,7 +925,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_lookup_v1_rls_config_proto_rawDesc, NumEnums: 0, - NumMessages: 8, + NumMessages: 9, NumExtensions: 0, NumServices: 0, }, From d6aca733b397729771c5869b785523269696d2d6 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 29 Oct 2021 11:58:36 -0700 Subject: [PATCH 312/998] Change version to 1.43.0-dev (#4912) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 6ba1fd5bdb76..e0d75bb7d94f 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.42.0-dev" +const Version = "1.43.0-dev" From 6d465fe912e45eb50cac4788b336f950bb244425 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 1 Nov 2021 11:04:24 -0700 Subject: [PATCH 313/998] grpclb: move restartableListener to testutils (#4919) --- balancer/grpclb/grpclb_test.go | 12 +-- balancer/grpclb/grpclb_test_util_test.go | 86 ------------------- internal/testutils/restartable_listener.go | 99 ++++++++++++++++++++++ 3 files changed, 105 insertions(+), 92 deletions(-) delete mode 100644 balancer/grpclb/grpclb_test_util_test.go create mode 100644 internal/testutils/restartable_listener.go diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index 22aa8f1b868a..1807edb9cb71 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -369,7 +369,7 @@ func startBackendsAndRemoteLoadBalancer(numberOfBackends int, customUserAgent st beIPs = append(beIPs, beLis.Addr().(*net.TCPAddr).IP) bePorts = append(bePorts, beLis.Addr().(*net.TCPAddr).Port) - beListeners = append(beListeners, newRestartableListener(beLis)) + beListeners = append(beListeners, testutils.NewRestartableListener(beLis)) } backends := startBackends(beServerName, false, beListeners...) @@ -379,7 +379,7 @@ func startBackendsAndRemoteLoadBalancer(numberOfBackends int, customUserAgent st err = fmt.Errorf("failed to create the listener for the load balancer %v", err) return } - lbLis = newRestartableListener(lbLis) + lbLis = testutils.NewRestartableListener(lbLis) lbCreds := &serverNameCheckCreds{ sn: lbServerName, } @@ -846,8 +846,8 @@ func (s) TestFallback(t *testing.T) { } // Close backend and remote balancer connections, should use fallback. - tss.beListeners[0].(*restartableListener).stopPreviousConns() - tss.lbListener.(*restartableListener).stopPreviousConns() + tss.beListeners[0].(*testutils.RestartableListener).Stop() + tss.lbListener.(*testutils.RestartableListener).Stop() var fallbackUsed bool for i := 0; i < 2000; i++ { @@ -871,8 +871,8 @@ func (s) TestFallback(t *testing.T) { } // Restart backend and remote balancer, should not use fallback backend. - tss.beListeners[0].(*restartableListener).restart() - tss.lbListener.(*restartableListener).restart() + tss.beListeners[0].(*testutils.RestartableListener).Restart() + tss.lbListener.(*testutils.RestartableListener).Restart() tss.ls.sls <- sl var backendUsed2 bool diff --git a/balancer/grpclb/grpclb_test_util_test.go b/balancer/grpclb/grpclb_test_util_test.go deleted file mode 100644 index c143e9617543..000000000000 --- a/balancer/grpclb/grpclb_test_util_test.go +++ /dev/null @@ -1,86 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "net" - "sync" -) - -type tempError struct{} - -func (*tempError) Error() string { - return "grpclb test temporary error" -} -func (*tempError) Temporary() bool { - return true -} - -type restartableListener struct { - net.Listener - addr string - - mu sync.Mutex - closed bool - conns []net.Conn -} - -func newRestartableListener(l net.Listener) *restartableListener { - return &restartableListener{ - Listener: l, - addr: l.Addr().String(), - } -} - -func (l *restartableListener) Accept() (net.Conn, error) { - conn, err := l.Listener.Accept() - if err != nil { - return nil, err - } - - l.mu.Lock() - defer l.mu.Unlock() - if l.closed { - conn.Close() - return nil, &tempError{} - } - l.conns = append(l.conns, conn) - return conn, nil -} - -func (l *restartableListener) Close() error { - return l.Listener.Close() -} - -func (l *restartableListener) stopPreviousConns() { - l.mu.Lock() - l.closed = true - tmp := l.conns - l.conns = nil - l.mu.Unlock() - for _, conn := range tmp { - conn.Close() - } -} - -func (l *restartableListener) restart() { - l.mu.Lock() - l.closed = false - l.mu.Unlock() -} diff --git a/internal/testutils/restartable_listener.go b/internal/testutils/restartable_listener.go new file mode 100644 index 000000000000..1f5019391911 --- /dev/null +++ b/internal/testutils/restartable_listener.go @@ -0,0 +1,99 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testutils + +import ( + "net" + "sync" +) + +type tempError struct{} + +func (*tempError) Error() string { + return "restartable listener temporary error" +} +func (*tempError) Temporary() bool { + return true +} + +// RestartableListener wraps a net.Listener and supports stopping and restarting +// the latter. +type RestartableListener struct { + lis net.Listener + + mu sync.Mutex + stopped bool + conns []net.Conn +} + +// NewRestartableListener returns a new RestartableListener wrapping l. +func NewRestartableListener(l net.Listener) *RestartableListener { + return &RestartableListener{lis: l} +} + +// Accept waits for and returns the next connection to the listener. +// +// If the listener is currently not accepting new connections, because `Stop` +// was called on it, the connection is immediately closed after accepting +// without any bytes being sent on it. +func (l *RestartableListener) Accept() (net.Conn, error) { + conn, err := l.lis.Accept() + if err != nil { + return nil, err + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.stopped { + conn.Close() + return nil, &tempError{} + } + l.conns = append(l.conns, conn) + return conn, nil +} + +// Close closes the listener. +func (l *RestartableListener) Close() error { + return l.lis.Close() +} + +// Addr returns the listener's network address. +func (l *RestartableListener) Addr() net.Addr { + return l.lis.Addr() +} + +// Stop closes existing connections on the listener and prevents new connections +// from being accepted. +func (l *RestartableListener) Stop() { + l.mu.Lock() + l.stopped = true + tmp := l.conns + l.conns = nil + l.mu.Unlock() + for _, conn := range tmp { + conn.Close() + } +} + +// Restart gets a previously stopped listener to start accepting connections. +func (l *RestartableListener) Restart() { + l.mu.Lock() + l.stopped = false + l.mu.Unlock() +} From 467630fc2490f88997b233c309109e51eaa0ba25 Mon Sep 17 00:00:00 2001 From: Hrishi Hiraskar Date: Mon, 1 Nov 2021 23:36:39 +0530 Subject: [PATCH 314/998] examples: remove usage of WithBlock in examples (#4858) --- examples/examples_test.sh | 48 +++++++++++++++++-- .../features/authentication/client/main.go | 1 - examples/features/compression/client/main.go | 2 +- examples/features/deadline/client/main.go | 2 +- examples/features/debugging/client/main.go | 16 ++++--- .../features/encryption/ALTS/client/main.go | 2 +- .../features/encryption/TLS/client/main.go | 2 +- examples/features/errors/client/main.go | 2 +- examples/features/interceptor/client/main.go | 2 +- .../features/load_balancing/client/main.go | 2 - examples/features/metadata/client/main.go | 2 +- examples/features/multiplex/client/main.go | 2 +- .../features/name_resolving/client/main.go | 2 - .../features/unix_abstract/client/main.go | 22 +++++---- .../features/unix_abstract/server/main.go | 15 ++++-- examples/helloworld/greeter_client/main.go | 17 +++---- examples/helloworld/greeter_server/main.go | 9 ++-- examples/route_guide/client/client.go | 3 +- 18 files changed, 101 insertions(+), 50 deletions(-) diff --git a/examples/examples_test.sh b/examples/examples_test.sh index f5c82d062b22..bde2837f659b 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -20,6 +20,9 @@ set +e export TMPDIR=$(mktemp -d) trap "rm -rf ${TMPDIR}" EXIT +export SERVER_PORT=50051 +export UNIX_ADDR=abstract-unix-socket + clean () { for i in {1..10}; do jobs -p | xargs -n1 pkill -P @@ -61,6 +64,36 @@ EXAMPLES=( "features/unix_abstract" ) +declare -A SERVER_ARGS=( + ["features/unix_abstract"]="-addr $UNIX_ADDR" + ["default"]="-port $SERVER_PORT" +) + +declare -A CLIENT_ARGS=( + ["features/unix_abstract"]="-addr $UNIX_ADDR" + ["default"]="-addr localhost:$SERVER_PORT" +) + +declare -A SERVER_WAIT_COMMAND=( + ["features/unix_abstract"]="lsof -U | grep $UNIX_ADDR" + ["default"]="lsof -i :$SERVER_PORT | grep $SERVER_PORT" +) + +wait_for_server () { + example=$1 + wait_command=${SERVER_WAIT_COMMAND[$example]:-${SERVER_WAIT_COMMAND["default"]}} + echo "$(tput setaf 4) waiting for server to start $(tput sgr 0)" + for i in {1..10}; do + eval "$wait_command" 2>&1 &>/dev/null + if [ $? -eq 0 ]; then + pass "server started" + return + fi + sleep 1 + done + fail "cannot determine if server started" +} + declare -A EXPECTED_SERVER_OUTPUT=( ["helloworld"]="Received: world" ["route_guide"]="" @@ -105,6 +138,13 @@ for example in ${EXAMPLES[@]}; do pass "successfully built server" fi + # Start server + SERVER_LOG="$(mktemp)" + server_args=${SERVER_ARGS[$example]:-${SERVER_ARGS["default"]}} + go run ./$example/*server/*.go $server_args &> $SERVER_LOG & + + wait_for_server $example + # Build client if ! go build -o /dev/null ./${example}/*client/*.go; then fail "failed to build client" @@ -112,12 +152,10 @@ for example in ${EXAMPLES[@]}; do pass "successfully built client" fi - # Start server - SERVER_LOG="$(mktemp)" - go run ./$example/*server/*.go &> $SERVER_LOG & - + # Start client CLIENT_LOG="$(mktemp)" - if ! timeout 20 go run ${example}/*client/*.go &> $CLIENT_LOG; then + client_args=${CLIENT_ARGS[$example]:-${CLIENT_ARGS["default"]}} + if ! timeout 20 go run ${example}/*client/*.go $client_args &> $CLIENT_LOG; then fail "client failed to communicate with server got server log: $(cat $SERVER_LOG) diff --git a/examples/features/authentication/client/main.go b/examples/features/authentication/client/main.go index 0c5c9d948e37..ec46f2c52da4 100644 --- a/examples/features/authentication/client/main.go +++ b/examples/features/authentication/client/main.go @@ -66,7 +66,6 @@ func main() { grpc.WithTransportCredentials(creds), } - opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(*addr, opts...) if err != nil { log.Fatalf("did not connect: %v", err) diff --git a/examples/features/compression/client/main.go b/examples/features/compression/client/main.go index df6d825a3ee5..4375c5d7ef93 100644 --- a/examples/features/compression/client/main.go +++ b/examples/features/compression/client/main.go @@ -37,7 +37,7 @@ func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/deadline/client/main.go b/examples/features/deadline/client/main.go index 026ce96f429a..0e2626130cec 100644 --- a/examples/features/deadline/client/main.go +++ b/examples/features/deadline/client/main.go @@ -72,7 +72,7 @@ func streamingCall(c pb.EchoClient, requestID int, message string, want codes.Co func main() { flag.Parse() - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/debugging/client/main.go b/examples/features/debugging/client/main.go index faf6b5d5fa2f..373f7db08e91 100644 --- a/examples/features/debugging/client/main.go +++ b/examples/features/debugging/client/main.go @@ -21,9 +21,9 @@ package main import ( "context" + "flag" "log" "net" - "os" "time" "google.golang.org/grpc" @@ -38,9 +38,15 @@ const ( defaultName = "world" ) +var ( + addr = flag.String("addr", "localhost:50051", "the address to connect to") + name = flag.String("name", defaultName, "Name to greet") +) + func main() { + flag.Parse() /***** Set up the server serving channelz service. *****/ - lis, err := net.Listen("tcp", ":50052") + lis, err := net.Listen("tcp", *addr) if err != nil { log.Fatalf("failed to listen: %v", err) } @@ -64,17 +70,13 @@ func main() { c := pb.NewGreeterClient(conn) // Contact the server and print out its response. - name := defaultName - if len(os.Args) > 1 { - name = os.Args[1] - } /***** Make 100 SayHello RPCs *****/ for i := 0; i < 100; i++ { // Setting a 150ms timeout on the RPC. ctx, cancel := context.WithTimeout(context.Background(), 150*time.Millisecond) defer cancel() - r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: *name}) if err != nil { log.Printf("could not greet: %v", err) } else { diff --git a/examples/features/encryption/ALTS/client/main.go b/examples/features/encryption/ALTS/client/main.go index e2654f5865ff..aa090807ba34 100644 --- a/examples/features/encryption/ALTS/client/main.go +++ b/examples/features/encryption/ALTS/client/main.go @@ -50,7 +50,7 @@ func main() { altsTC := alts.NewClientCreds(alts.DefaultClientOptions()) // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(altsTC), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(altsTC)) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/encryption/TLS/client/main.go b/examples/features/encryption/TLS/client/main.go index 718196b1bb41..4f78ccca0366 100644 --- a/examples/features/encryption/TLS/client/main.go +++ b/examples/features/encryption/TLS/client/main.go @@ -54,7 +54,7 @@ func main() { } // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds)) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/errors/client/main.go b/examples/features/errors/client/main.go index b87fb48a9bbd..4bacff5f3e5b 100644 --- a/examples/features/errors/client/main.go +++ b/examples/features/errors/client/main.go @@ -38,7 +38,7 @@ func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/interceptor/client/main.go b/examples/features/interceptor/client/main.go index 0c2015169d17..eba69b3c9887 100644 --- a/examples/features/interceptor/client/main.go +++ b/examples/features/interceptor/client/main.go @@ -153,7 +153,7 @@ func main() { } // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds), grpc.WithUnaryInterceptor(unaryInterceptor), grpc.WithStreamInterceptor(streamInterceptor), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds), grpc.WithUnaryInterceptor(unaryInterceptor), grpc.WithStreamInterceptor(streamInterceptor)) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/load_balancing/client/main.go b/examples/features/load_balancing/client/main.go index 72c5ad9c1468..5dd0ddfc7d08 100644 --- a/examples/features/load_balancing/client/main.go +++ b/examples/features/load_balancing/client/main.go @@ -59,7 +59,6 @@ func main() { pickfirstConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), grpc.WithInsecure(), - grpc.WithBlock(), ) if err != nil { log.Fatalf("did not connect: %v", err) @@ -76,7 +75,6 @@ func main() { fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), // This sets the initial balancing policy. grpc.WithInsecure(), - grpc.WithBlock(), ) if err != nil { log.Fatalf("did not connect: %v", err) diff --git a/examples/features/metadata/client/main.go b/examples/features/metadata/client/main.go index 715fb6f5acbd..3aa3a599c2dd 100644 --- a/examples/features/metadata/client/main.go +++ b/examples/features/metadata/client/main.go @@ -286,7 +286,7 @@ const message = "this is examples/metadata" func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/multiplex/client/main.go b/examples/features/multiplex/client/main.go index 72d6cd56775b..e25bb7a838bb 100644 --- a/examples/features/multiplex/client/main.go +++ b/examples/features/multiplex/client/main.go @@ -58,7 +58,7 @@ func callUnaryEcho(client ecpb.EchoClient, message string) { func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/name_resolving/client/main.go b/examples/features/name_resolving/client/main.go index 1c56dcce15df..25bd5fd46a75 100644 --- a/examples/features/name_resolving/client/main.go +++ b/examples/features/name_resolving/client/main.go @@ -58,7 +58,6 @@ func main() { passthroughConn, err := grpc.Dial( fmt.Sprintf("passthrough:///%s", backendAddr), // Dial to "passthrough:///localhost:50051" grpc.WithInsecure(), - grpc.WithBlock(), ) if err != nil { log.Fatalf("did not connect: %v", err) @@ -73,7 +72,6 @@ func main() { exampleConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), // Dial to "example:///resolver.example.grpc.io" grpc.WithInsecure(), - grpc.WithBlock(), ) if err != nil { log.Fatalf("did not connect: %v", err) diff --git a/examples/features/unix_abstract/client/main.go b/examples/features/unix_abstract/client/main.go index 4f48aca9bdfd..96c6f82bf19b 100644 --- a/examples/features/unix_abstract/client/main.go +++ b/examples/features/unix_abstract/client/main.go @@ -25,6 +25,7 @@ package main import ( "context" + "flag" "fmt" "log" "time" @@ -33,6 +34,14 @@ import ( ecpb "google.golang.org/grpc/examples/features/proto/echo" ) +var ( + // A dial target of `unix:@abstract-unix-socket` should also work fine for + // this example because of golang conventions (net.Dial behavior). But we do + // not recommend this since we explicitly added the `unix-abstract` scheme + // for cross-language compatibility. + addr = flag.String("addr", "abstract-unix-socket", "The unix abstract socket address") +) + func callUnaryEcho(c ecpb.EchoClient, message string) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -51,18 +60,15 @@ func makeRPCs(cc *grpc.ClientConn, n int) { } func main() { - // A dial target of `unix:@abstract-unix-socket` should also work fine for - // this example because of golang conventions (net.Dial behavior). But we do - // not recommend this since we explicitly added the `unix-abstract` scheme - // for cross-language compatibility. - addr := "unix-abstract:abstract-unix-socket" - cc, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + flag.Parse() + sockAddr := fmt.Sprintf("unix-abstract:%v", *addr) + cc, err := grpc.Dial(sockAddr, grpc.WithInsecure()) if err != nil { - log.Fatalf("grpc.Dial(%q) failed: %v", addr, err) + log.Fatalf("grpc.Dial(%q) failed: %v", sockAddr, err) } defer cc.Close() - fmt.Printf("--- calling echo.Echo/UnaryEcho to %s\n", addr) + fmt.Printf("--- calling echo.Echo/UnaryEcho to %s\n", sockAddr) makeRPCs(cc, 10) fmt.Println() } diff --git a/examples/features/unix_abstract/server/main.go b/examples/features/unix_abstract/server/main.go index a82b957c1f07..4ef4ff5b7637 100644 --- a/examples/features/unix_abstract/server/main.go +++ b/examples/features/unix_abstract/server/main.go @@ -25,6 +25,7 @@ package main import ( "context" + "flag" "fmt" "log" "net" @@ -34,6 +35,10 @@ import ( pb "google.golang.org/grpc/examples/features/proto/echo" ) +var ( + addr = flag.String("addr", "abstract-unix-socket", "The unix abstract socket address") +) + type ecServer struct { pb.UnimplementedEchoServer addr string @@ -44,13 +49,15 @@ func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.Echo } func main() { - netw, addr := "unix", "\x00abstract-unix-socket" - lis, err := net.Listen(netw, addr) + flag.Parse() + netw := "unix" + socketAddr := fmt.Sprintf("\x00%v", *addr) + lis, err := net.Listen(netw, socketAddr) if err != nil { - log.Fatalf("net.Listen(%q, %q) failed: %v", netw, addr, err) + log.Fatalf("net.Listen(%q, %q) failed: %v", netw, socketAddr, err) } s := grpc.NewServer() - pb.RegisterEchoServer(s, &ecServer{addr: addr}) + pb.RegisterEchoServer(s, &ecServer{addr: socketAddr}) log.Printf("serving on %s\n", lis.Addr().String()) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) diff --git a/examples/helloworld/greeter_client/main.go b/examples/helloworld/greeter_client/main.go index 0ca4cbaa344c..b27b7da43dd1 100644 --- a/examples/helloworld/greeter_client/main.go +++ b/examples/helloworld/greeter_client/main.go @@ -21,8 +21,8 @@ package main import ( "context" + "flag" "log" - "os" "time" "google.golang.org/grpc" @@ -30,13 +30,18 @@ import ( ) const ( - address = "localhost:50051" defaultName = "world" ) +var ( + addr = flag.String("addr", "localhost:50051", "the address to connect to") + name = flag.String("name", defaultName, "Name to greet") +) + func main() { + flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } @@ -44,13 +49,9 @@ func main() { c := pb.NewGreeterClient(conn) // Contact the server and print out its response. - name := defaultName - if len(os.Args) > 1 { - name = os.Args[1] - } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: *name}) if err != nil { log.Fatalf("could not greet: %v", err) } diff --git a/examples/helloworld/greeter_server/main.go b/examples/helloworld/greeter_server/main.go index 4d077db92cfb..728bb19fd118 100644 --- a/examples/helloworld/greeter_server/main.go +++ b/examples/helloworld/greeter_server/main.go @@ -21,6 +21,8 @@ package main import ( "context" + "flag" + "fmt" "log" "net" @@ -28,8 +30,8 @@ import ( pb "google.golang.org/grpc/examples/helloworld/helloworld" ) -const ( - port = ":50051" +var ( + port = flag.Int("port", 50051, "The server port") ) // server is used to implement helloworld.GreeterServer. @@ -44,7 +46,8 @@ func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloRe } func main() { - lis, err := net.Listen("tcp", port) + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } diff --git a/examples/route_guide/client/client.go b/examples/route_guide/client/client.go index f18c10af8b14..4d705e9ca99b 100644 --- a/examples/route_guide/client/client.go +++ b/examples/route_guide/client/client.go @@ -39,7 +39,7 @@ import ( var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") caFile = flag.String("ca_file", "", "The file containing the CA root cert file") - serverAddr = flag.String("server_addr", "localhost:10000", "The server address in the format of host:port") + serverAddr = flag.String("addr", "localhost:50051", "The server address in the format of host:port") serverHostOverride = flag.String("server_host_override", "x.test.example.com", "The server name used to verify the hostname returned by the TLS handshake") ) @@ -167,7 +167,6 @@ func main() { opts = append(opts, grpc.WithInsecure()) } - opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(*serverAddr, opts...) if err != nil { log.Fatalf("fail to dial: %v", err) From 1163cfabe9a0c8869e5919f9b418a455e72dfff4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 1 Nov 2021 14:32:55 -0700 Subject: [PATCH 315/998] xds: move LocalTCPListener to internal/testutils (#4920) --- .../testutils/local_listener.go | 0 xds/csds/csds_test.go | 9 ++-- xds/internal/httpfilter/fault/fault_test.go | 5 +-- .../test/xds_client_integration_test.go | 2 +- .../test/xds_server_integration_test.go | 3 +- .../test/xds_server_serving_mode_test.go | 13 +++--- xds/server_test.go | 41 ++++++++++--------- 7 files changed, 36 insertions(+), 37 deletions(-) rename {xds/internal => internal}/testutils/local_listener.go (100%) diff --git a/xds/internal/testutils/local_listener.go b/internal/testutils/local_listener.go similarity index 100% rename from xds/internal/testutils/local_listener.go rename to internal/testutils/local_listener.go diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index ed1088b4339d..88df77a62242 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds" _ "google.golang.org/grpc/xds/internal/httpfilter/router" - xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/protobuf/testing/protocmp" @@ -258,9 +257,9 @@ func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.M } v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) // Create a local listener and pass it to Serve(). - lis, err := xtestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xtestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } go func() { if err := server.Serve(lis); err != nil { @@ -504,9 +503,9 @@ func TestCSDSNoXDSClient(t *testing.T) { defer csdss.Close() v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) // Create a local listener and pass it to Serve(). - lis, err := xtestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xtestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } go func() { if err := server.Serve(lis); err != nil { diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index c2959054da9a..1bad1f92d65d 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -42,7 +42,6 @@ import ( "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/protobuf/types/known/wrapperspb" @@ -122,9 +121,9 @@ func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { testpb.RegisterTestServiceServer(server, &testService{}) // Create a local listener and pass it to Serve(). - lis, err := xtestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xtestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } go func() { diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index 23ea1546935e..e26e3e08f4c9 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -31,9 +31,9 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 707a9605d82f..6641a678db3c 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -52,7 +52,6 @@ import ( wrapperspb "github.com/golang/protobuf/ptypes/wrappers" xdscreds "google.golang.org/grpc/credentials/xds" testpb "google.golang.org/grpc/test/grpc_testing" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" ) const ( @@ -86,7 +85,7 @@ func setupGRPCServer(t *testing.T, bootstrapContents []byte) (net.Listener, func testpb.RegisterTestServiceServer(server, &testService{}) // Create a local listener and pass it to Serve(). - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index ac4b3929cb65..5fa17546e81d 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -29,15 +29,16 @@ import ( "testing" "time" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" - testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/e2e" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + testpb "google.golang.org/grpc/test/grpc_testing" ) // TestServerSideXDS_RedundantUpdateSuppression tests the scenario where the @@ -52,7 +53,7 @@ func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { if err != nil { t.Fatal(err) } - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } @@ -168,11 +169,11 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } // Create two local listeners and pass it to Serve(). - lis1, err := xdstestutils.LocalTCPListener() + lis1, err := testutils.LocalTCPListener() if err != nil { t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } - lis2, err := xdstestutils.LocalTCPListener() + lis2, err := testutils.LocalTCPListener() if err != nil { t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } diff --git a/xds/server_test.go b/xds/server_test.go index 0866e0414ae2..63cb6878ee7a 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -28,12 +28,6 @@ import ( "testing" "time" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" @@ -47,6 +41,13 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" ) const ( @@ -389,9 +390,9 @@ func (s) TestServeSuccess(t *testing.T) { server := NewGRPCServer(modeChangeOption) defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } // Call Serve() in a goroutine, and push on a channel when Serve returns. @@ -506,9 +507,9 @@ func (s) TestServeWithStop(t *testing.T) { // it after the LDS watch has been registered. server := NewGRPCServer() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } // Call Serve() in a goroutine, and push on a channel when Serve returns. @@ -565,9 +566,9 @@ func (s) TestServeBootstrapFailure(t *testing.T) { server := NewGRPCServer() defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } serveDone := testutils.NewChannel() @@ -636,9 +637,9 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { server := NewGRPCServer(grpc.Creds(xdsCreds)) defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } serveDone := testutils.NewChannel() @@ -672,9 +673,9 @@ func (s) TestServeNewClientFailure(t *testing.T) { server := NewGRPCServer() defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } serveDone := testutils.NewChannel() @@ -704,9 +705,9 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { server := NewGRPCServer() defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } // Call Serve() in a goroutine, and push on a channel when Serve returns. @@ -818,9 +819,9 @@ func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) { server := NewGRPCServer(grpc.Creds(xdsCreds)) defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } // Call Serve() in a goroutine, and push on a channel when Serve returns. From 29deb6bfa173768ba53b14c0b5c8af13854eb3a2 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 2 Nov 2021 10:26:08 -0700 Subject: [PATCH 316/998] xds/bootstrap: refactor to support top level and per-authority server config (#4892) --- xds/csds/csds.go | 2 +- xds/googledirectpath/googlec2p.go | 10 +- xds/googledirectpath/googlec2p_test.go | 13 +- xds/internal/xdsclient/bootstrap/bootstrap.go | 321 ++++++++++++------ .../xdsclient/bootstrap/bootstrap_test.go | 125 ++++--- xds/internal/xdsclient/client.go | 34 +- xds/internal/xdsclient/client_test.go | 16 +- xds/internal/xdsclient/dump_test.go | 32 +- xds/internal/xdsclient/loadreport.go | 4 +- xds/internal/xdsclient/loadreport_test.go | 10 +- xds/internal/xdsclient/xdsclient_test.go | 36 +- xds/server_test.go | 32 +- 12 files changed, 384 insertions(+), 251 deletions(-) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 1d817fbcc865..23f9c760b637 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -127,7 +127,7 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp ret := &v3statuspb.ClientStatusResponse{ Config: []*v3statuspb.ClientConfig{ { - Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().NodeProto), + Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().XDSServer.NodeProto), GenericXdsConfigs: configs, }, }, diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index b9f1c712014e..4f753e49c71a 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -103,10 +103,12 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts balancerName = tdURL } config := &bootstrap.Config{ - BalancerName: balancerName, - Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()), - TransportAPI: version.TransportV3, - NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), + XDSServer: &bootstrap.ServerConfig{ + ServerURI: balancerName, + Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()), + TransportAPI: version.TransportV3, + NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), + }, } // Create singleton xds client with this config. The xds client will be diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index a208fad66c58..c8162317cc30 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -211,15 +211,18 @@ func TestBuildXDS(t *testing.T) { } } wantConfig := &bootstrap.Config{ - BalancerName: tdURL, - TransportAPI: version.TransportV3, - NodeProto: wantNode, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: tdURL, + TransportAPI: version.TransportV3, + NodeProto: wantNode, + }, } if tt.tdURI != "" { - wantConfig.BalancerName = tt.tdURI + wantConfig.XDSServer.ServerURI = tt.tdURI } cmpOpts := cmp.Options{ - cmpopts.IgnoreFields(bootstrap.Config{}, "Creds"), + cmpopts.IgnoreFields(bootstrap.ServerConfig{}, "Creds"), + cmp.AllowUnexported(bootstrap.ServerConfig{}), protocmp.Transform(), } select { diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index fa229d99593e..7f75525cc631 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -25,6 +25,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "strings" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -58,34 +59,164 @@ var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version) // For overriding in unit tests. var bootstrapFileReadFunc = ioutil.ReadFile -// Config provides the xDS client with several key bits of information that it -// requires in its interaction with the management server. The Config is -// initialized from the bootstrap file. -type Config struct { - // BalancerName is the name of the management server to connect to. +// ServerConfig contains the configuration to connect to a server, including +// URI, creds, and transport API version (e.g. v2 or v3). +type ServerConfig struct { + // ServerURI is the management server to connect to. // - // The bootstrap file contains a list of servers (with name+creds), but we - // pick the first one. - BalancerName string + // The bootstrap file contains an ordered list of xDS servers to contact for + // this authority. The first one is picked. + ServerURI string // Creds contains the credentials to be used while talking to the xDS // server, as a grpc.DialOption. Creds grpc.DialOption + // credsType is the type of the creds. It will be used to dedup servers. + credsType string // TransportAPI indicates the API version of xDS transport protocol to use. // This describes the xDS gRPC endpoint and version of // DiscoveryRequest/Response used on the wire. TransportAPI version.TransportAPI // NodeProto contains the Node proto to be used in xDS requests. The actual // type depends on the transport protocol version used. + // + // Note that it's specified in the bootstrap globally for all the servers, + // but we keep it in each server config so that its type (e.g. *v2pb.Node or + // *v3pb.Node) is consistent with the transport API version. NodeProto proto.Message +} + +// UnmarshalJSON takes the json data (a list of servers) and unmarshals the +// first one in the list. +func (sc *ServerConfig) UnmarshalJSON(data []byte) error { + var servers []*xdsServer + if err := json.Unmarshal(data, &servers); err != nil { + return fmt.Errorf("xds: json.Unmarshal(data) for field xds_servers failed during bootstrap: %v", err) + } + if len(servers) < 1 { + return fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any management server to connect to") + } + xs := servers[0] + sc.ServerURI = xs.ServerURI + for _, cc := range xs.ChannelCreds { + // We stop at the first credential type that we support. + sc.credsType = cc.Type + if cc.Type == credsGoogleDefault { + sc.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials()) + break + } else if cc.Type == credsInsecure { + sc.Creds = grpc.WithTransportCredentials(insecure.NewCredentials()) + break + } + } + for _, f := range xs.ServerFeatures { + if f == serverFeaturesV3 { + sc.TransportAPI = version.TransportV3 + } + } + return nil +} + +// Authority contains configuration for an Authority for an xDS control plane +// server. See the Authorities field in the Config struct for how it's used. +type Authority struct { + // ClientListenerResourceNameTemplate is template for the name of the + // Listener resource to subscribe to for a gRPC client channel. Used only + // when the channel is created using an "xds:" URI with this authority name. + // + // The token "%s", if present in this string, will be replaced + // with %-encoded service authority (i.e., the path part of the target + // URI used to create the gRPC channel). + // + // Must start with "xdstp:///". If it does not, + // that is considered a bootstrap file parsing error. + // + // If not present in the bootstrap file, defaults to + // "xdstp:///envoy.config.listener.v3.Listener/%s". + ClientListenerResourceNameTemplate string + // XDSServer contains the management server and config to connect to for + // this authority. + XDSServer *ServerConfig +} + +// UnmarshalJSON implement json unmarshaller. +func (a *Authority) UnmarshalJSON(data []byte) error { + var jsonData map[string]json.RawMessage + if err := json.Unmarshal(data, &jsonData); err != nil { + return fmt.Errorf("xds: failed to parse authority: %v", err) + } + + for k, v := range jsonData { + switch k { + case "xds_servers": + if err := json.Unmarshal(v, &a.XDSServer); err != nil { + return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + case "client_listener_resource_name_template": + if err := json.Unmarshal(v, &a.ClientListenerResourceNameTemplate); err != nil { + return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + } + } + return nil +} + +// Config provides the xDS client with several key bits of information that it +// requires in its interaction with the management server. The Config is +// initialized from the bootstrap file. +type Config struct { + // XDSServer is the management server to connect to. + // + // The bootstrap file contains a list of servers (with name+creds), but we + // pick the first one. + XDSServer *ServerConfig // CertProviderConfigs contains a mapping from certificate provider plugin // instance names to parsed buildable configs. CertProviderConfigs map[string]*certprovider.BuildableConfig // ServerListenerResourceNameTemplate is a template for the name of the - // Listener resource to subscribe to for a gRPC server. If the token `%s` is - // present in the string, it will be replaced with the server's listening - // "IP:port" (e.g., "0.0.0.0:8080", "[::]:8080"). For example, a value of - // "example/resource/%s" could become "example/resource/0.0.0.0:8080". + // Listener resource to subscribe to for a gRPC server. + // + // If starts with "xdstp:", will be interpreted as a new-style name, + // in which case the authority of the URI will be used to select the + // relevant configuration in the "authorities" map. + // + // The token "%s", if present in this string, will be replaced with the IP + // and port on which the server is listening. (e.g., "0.0.0.0:8080", + // "[::]:8080"). For example, a value of "example/resource/%s" could become + // "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", + // the replaced string will be %-encoded. + // + // There is no default; if unset, xDS-based server creation fails. ServerListenerResourceNameTemplate string + // A template for the name of the Listener resource to subscribe to + // for a gRPC client channel. Used only when the channel is created + // with an "xds:" URI with no authority. + // + // If starts with "xdstp:", will be interpreted as a new-style name, + // in which case the authority of the URI will be used to select the + // relevant configuration in the "authorities" map. + // + // The token "%s", if present in this string, will be replaced with + // the service authority (i.e., the path part of the target URI + // used to create the gRPC channel). If the template starts with + // "xdstp:", the replaced string will be %-encoded. + // + // Defaults to "%s". + ClientDefaultListenerResourceNameTemplate string + + // Authorities is a map of authority name to corresponding configuration. + // + // This is used in the following cases: + // - A gRPC client channel is created using an "xds:" URI that includes + // an authority. + // - A gRPC client channel is created using an "xds:" URI with no + // authority, but the "client_default_listener_resource_name_template" + // field above turns it into an "xdstp:" URI. + // - A gRPC server is created and the + // "server_listener_resource_name_template" field is an "xdstp:" URI. + // + // In any of those cases, it is an error if the specified authority is + // not present in this map. + Authorities map[string]*Authority } type channelCreds struct { @@ -125,34 +256,6 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { // NewConfig returns a new instance of Config initialized by reading the // bootstrap file found at ${GRPC_XDS_BOOTSTRAP}. // -// The format of the bootstrap file will be as follows: -// { -// "xds_servers": [ -// { -// "server_uri": , -// "channel_creds": [ -// { -// "type": , -// "config": -// } -// ], -// "server_features": [ ... ], -// } -// ], -// "node": , -// "certificate_providers" : { -// "default": { -// "plugin_name": "default-plugin-name", -// "config": { default plugin config in JSON } -// }, -// "foo": { -// "plugin_name": "foo", -// "config": { foo plugin config in JSON } -// } -// }, -// "server_listener_resource_name_template": "grpc/server?xds.resource.listening_address=%s" -// } -// // Currently, we support exactly one type of credential, which is // "google_default", where we use the host's default certs for transport // credentials and a Google oauth token for call credentials. @@ -162,6 +265,8 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { // fields left unspecified, in which case the caller should use some sane // defaults. func NewConfig() (*Config, error) { + // Examples of the bootstrap json can be found in the generator tests + // https://github.com/GoogleCloudPlatform/traffic-director-grpc-bootstrap/blob/master/main_test.go. data, err := bootstrapConfigFromEnvVariable() if err != nil { return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) @@ -181,7 +286,7 @@ func NewConfigFromContents(data []byte) (*Config, error) { return nil, fmt.Errorf("xds: Failed to parse bootstrap config: %v", err) } - serverSupportsV3 := false + var node *v3corepb.Node m := jsonpb.Unmarshaler{AllowUnknownFields: true} for k, v := range jsonData { switch k { @@ -192,37 +297,14 @@ func NewConfigFromContents(data []byte) (*Config, error) { // "build_version" field. In any case, the unmarshal will succeed // because we have set the `AllowUnknownFields` option on the // unmarshaler. - n := &v3corepb.Node{} - if err := m.Unmarshal(bytes.NewReader(v), n); err != nil { + node = &v3corepb.Node{} + if err := m.Unmarshal(bytes.NewReader(v), node); err != nil { return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) } - config.NodeProto = n case "xds_servers": - var servers []*xdsServer - if err := json.Unmarshal(v, &servers); err != nil { + if err := json.Unmarshal(v, &config.XDSServer); err != nil { return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) } - if len(servers) < 1 { - return nil, fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any management server to connect to") - } - xs := servers[0] - config.BalancerName = xs.ServerURI - for _, cc := range xs.ChannelCreds { - // We stop at the first credential type that we support. - if cc.Type == credsGoogleDefault { - config.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials()) - break - } else if cc.Type == credsInsecure { - config.Creds = grpc.WithTransportCredentials(insecure.NewCredentials()) - break - } - } - for _, f := range xs.ServerFeatures { - switch f { - case serverFeaturesV3: - serverSupportsV3 = true - } - } case "certificate_providers": var providerInstances map[string]json.RawMessage if err := json.Unmarshal(v, &providerInstances); err != nil { @@ -256,27 +338,42 @@ func NewConfigFromContents(data []byte) (*Config, error) { if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil { return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) } + default: + logger.Warningf("Bootstrap content has unknown field: %s", k) } // Do not fail the xDS bootstrap when an unknown field is seen. This can // happen when an older version client reads a newer version bootstrap // file with new fields. } - if config.BalancerName == "" { + if config.ClientDefaultListenerResourceNameTemplate == "" { + // Default value of the default client listener name template is "%s". + config.ClientDefaultListenerResourceNameTemplate = "%s" + } + if config.XDSServer == nil { + return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers", jsonData["xds_servers"]) + } + if config.XDSServer.ServerURI == "" { return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) } - if config.Creds == nil { + if config.XDSServer.Creds == nil { return nil, fmt.Errorf("xds: Required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) } - - // We end up using v3 transport protocol version only if the server supports - // v3, indicated by the presence of "xds_v3" in server_features. The default - // value of the enum type "version.TransportAPI" is v2. - if serverSupportsV3 { - config.TransportAPI = version.TransportV3 + // Post-process the authorities' client listener resource template field: + // - if set, it must start with "xdstp:///" + // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" + for name, authority := range config.Authorities { + prefix := fmt.Sprintf("xdstp://%s", name) + if authority.ClientListenerResourceNameTemplate == "" { + authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" + continue + } + if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { + return nil, fmt.Errorf("xds: field ClientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) + } } - if err := config.updateNodeProto(); err != nil { + if err := config.updateNodeProto(node); err != nil { return nil, err } logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) @@ -285,47 +382,57 @@ func NewConfigFromContents(data []byte) (*Config, error) { // updateNodeProto updates the node proto read from the bootstrap file. // -// Node proto in Config contains a v3.Node protobuf message corresponding to the -// JSON contents found in the bootstrap file. This method performs some post +// The input node is a v3.Node protobuf message corresponding to the JSON +// contents found in the bootstrap file. This method performs some post // processing on it: -// 1. If we don't find a nodeProto in the bootstrap file, we create an empty one -// here. That way, callers of this function can always expect that the NodeProto -// field is non-nil. -// 2. If the transport protocol version to be used is not v3, we convert the -// current v3.Node proto in a v2.Node proto. -// 3. Some additional fields which are not expected to be set in the bootstrap +// 1. If the node is nil, we create an empty one here. That way, callers of this +// function can always expect that the NodeProto field is non-nil. +// 2. Some additional fields which are not expected to be set in the bootstrap // file are populated here. -func (c *Config) updateNodeProto() error { - if c.TransportAPI == version.TransportV3 { - v3, _ := c.NodeProto.(*v3corepb.Node) - if v3 == nil { - v3 = &v3corepb.Node{} - } - v3.UserAgentName = gRPCUserAgentName - v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning) - c.NodeProto = v3 - return nil +// 3. For each server config (both top level and in each authority), we set its +// node field to the v3.Node, or a v2.Node with the same content, depending on +// the server's transprot API version. +func (c *Config) updateNodeProto(node *v3corepb.Node) error { + v3 := node + if v3 == nil { + v3 = &v3corepb.Node{} } + v3.UserAgentName = gRPCUserAgentName + v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} + v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning) v2 := &v2corepb.Node{} - if c.NodeProto != nil { - v3, err := proto.Marshal(c.NodeProto) - if err != nil { - return fmt.Errorf("xds: proto.Marshal(%v): %v", c.NodeProto, err) - } - if err := proto.Unmarshal(v3, v2); err != nil { - return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3, err) - } + v3bytes, err := proto.Marshal(v3) + if err != nil { + return fmt.Errorf("xds: proto.Marshal(%v): %v", v3, err) + } + if err := proto.Unmarshal(v3bytes, v2); err != nil { + return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3bytes, err) } - c.NodeProto = v2 - // BuildVersion is deprecated, and is replaced by user_agent_name and // user_agent_version. But the management servers are still using the old // field, so we will keep both set. v2.BuildVersion = gRPCVersion - v2.UserAgentName = gRPCUserAgentName v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - v2.ClientFeatures = append(v2.ClientFeatures, clientFeatureNoOverprovisioning) + + switch c.XDSServer.TransportAPI { + case version.TransportV2: + c.XDSServer.NodeProto = v2 + case version.TransportV3: + c.XDSServer.NodeProto = v3 + } + + for _, a := range c.Authorities { + if a.XDSServer == nil { + continue + } + switch a.XDSServer.TransportAPI { + case version.TransportV2: + a.XDSServer.NodeProto = v2 + case version.TransportV3: + a.XDSServer.NodeProto = v3 + } + } + return nil } diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 501d62102d21..9d6ead0ff5b5 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -30,6 +30,7 @@ import ( "github.com/golang/protobuf/proto" structpb "github.com/golang/protobuf/ptypes/struct" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc" "google.golang.org/grpc/credentials/google" @@ -207,59 +208,44 @@ var ( ClientFeatures: []string{clientFeatureNoOverprovisioning}, } nilCredsConfigV2 = &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: v2NodeProto, + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + credsType: "insecure", + NodeProto: v2NodeProto, + }, + ClientDefaultListenerResourceNameTemplate: "%s", } nonNilCredsConfigV2 = &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - NodeProto: v2NodeProto, + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + credsType: "google_default", + NodeProto: v2NodeProto, + }, + ClientDefaultListenerResourceNameTemplate: "%s", } nonNilCredsConfigV3 = &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - TransportAPI: version.TransportV3, - NodeProto: v3NodeProto, + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + credsType: "google_default", + TransportAPI: version.TransportV3, + NodeProto: v3NodeProto, + }, + ClientDefaultListenerResourceNameTemplate: "%s", } ) func (c *Config) compare(want *Config) error { - if c.BalancerName != want.BalancerName { - return fmt.Errorf("config.BalancerName is %s, want %s", c.BalancerName, want.BalancerName) - } - // Since Creds is of type grpc.DialOption interface, where the - // implementation is provided by a function, it is not possible to compare. - if (c.Creds != nil) != (want.Creds != nil) { - return fmt.Errorf("config.Creds is %#v, want %#v", c.Creds, want.Creds) - } - if c.TransportAPI != want.TransportAPI { - return fmt.Errorf("config.TransportAPI is %v, want %v", c.TransportAPI, want.TransportAPI) - - } - if diff := cmp.Diff(want.NodeProto, c.NodeProto, cmp.Comparer(proto.Equal)); diff != "" { - return fmt.Errorf("config.NodeProto diff (-want, +got):\n%s", diff) - } - if c.ServerListenerResourceNameTemplate != want.ServerListenerResourceNameTemplate { - return fmt.Errorf("config.ServerListenerResourceNameTemplate is %q, want %q", c.ServerListenerResourceNameTemplate, want.ServerListenerResourceNameTemplate) - } - - // A vanilla cmp.Equal or cmp.Diff will not produce useful error message - // here. So, we iterate through the list of configs and compare them one at - // a time. - gotCfgs := c.CertProviderConfigs - wantCfgs := want.CertProviderConfigs - if len(gotCfgs) != len(wantCfgs) { - return fmt.Errorf("config.CertProviderConfigs is %d entries, want %d", len(gotCfgs), len(wantCfgs)) - } - for instance, gotCfg := range gotCfgs { - wantCfg, ok := wantCfgs[instance] - if !ok { - return fmt.Errorf("config.CertProviderConfigs has unexpected plugin instance %q with config %q", instance, gotCfg.String()) - } - if got, want := gotCfg.String(), wantCfg.String(); got != want { - return fmt.Errorf("config.CertProviderConfigs for plugin instance %q has config %q, want %q", instance, got, want) - } + if diff := cmp.Diff(c, want, + cmpopts.EquateEmpty(), + cmp.AllowUnexported(ServerConfig{}), + cmp.Comparer(proto.Equal), + cmp.Comparer(func(a, b grpc.DialOption) bool { return (a != nil) == (b != nil) }), + cmp.Transformer("certproviderconfigstring", func(a *certprovider.BuildableConfig) string { return a.String() }), + ); diff != "" { + return fmt.Errorf("diff: %v", diff) } return nil } @@ -285,6 +271,7 @@ func setupBootstrapOverride(bootstrapFileMap map[string]string) func() { // This function overrides the bootstrap file NAME env variable, to test the // code that reads file with the given fileName. func testNewConfigWithFileNameEnv(t *testing.T, fileName string, wantError bool, wantConfig *Config) { + t.Helper() origBootstrapFileName := env.BootstrapFileName env.BootstrapFileName = fileName defer func() { env.BootstrapFileName = origBootstrapFileName }() @@ -304,10 +291,10 @@ func testNewConfigWithFileNameEnv(t *testing.T, fileName string, wantError bool, // This function overrides the bootstrap file CONTENT env variable, to test the // code that uses the content from env directly. func testNewConfigWithFileContentEnv(t *testing.T, fileName string, wantError bool, wantConfig *Config) { + t.Helper() b, err := bootstrapFileReadFunc(fileName) if err != nil { - // If file reading failed, skip this test. - return + t.Skip(err) } origBootstrapContent := env.BootstrapFileContent env.BootstrapFileContent = string(b) @@ -404,14 +391,18 @@ func TestNewConfigV2ProtoSuccess(t *testing.T) { }{ { "emptyNodeProto", &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: &v2corepb.Node{ - BuildVersion: gRPCVersion, - UserAgentName: gRPCUserAgentName, - UserAgentVersionType: &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, - ClientFeatures: []string{clientFeatureNoOverprovisioning}, + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + credsType: "insecure", + NodeProto: &v2corepb.Node{ + BuildVersion: gRPCVersion, + UserAgentName: gRPCUserAgentName, + UserAgentVersionType: &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, + ClientFeatures: []string{clientFeatureNoOverprovisioning}, + }, }, + ClientDefaultListenerResourceNameTemplate: "%s", }, }, {"unknownTopLevelFieldInFile", nilCredsConfigV2}, @@ -670,13 +661,17 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { defer cancel() goodConfig := &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - TransportAPI: version.TransportV3, - NodeProto: v3NodeProto, + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + credsType: "google_default", + TransportAPI: version.TransportV3, + NodeProto: v3NodeProto, + }, CertProviderConfigs: map[string]*certprovider.BuildableConfig{ "fakeProviderInstance": wantCfg, }, + ClientDefaultListenerResourceNameTemplate: "%s", } tests := []struct { name string @@ -760,11 +755,15 @@ func TestNewConfigWithServerListenerResourceNameTemplate(t *testing.T) { { name: "goodServerListenerResourceNameTemplate", wantConfig: &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - TransportAPI: version.TransportV2, - NodeProto: v2NodeProto, - ServerListenerResourceNameTemplate: "grpc/server?xds.resource.listening_address=%s", + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + credsType: "google_default", + TransportAPI: version.TransportV2, + NodeProto: v2NodeProto, + }, + ServerListenerResourceNameTemplate: "grpc/server?xds.resource.listening_address=%s", + ClientDefaultListenerResourceNameTemplate: "%s", }, }, } diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 3230c66c06e3..8b7d4bad04f9 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -28,8 +28,6 @@ import ( "sync" "time" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/golang/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" @@ -629,27 +627,18 @@ type clientImpl struct { // newWithConfig returns a new xdsClient with the given config. func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (*clientImpl, error) { switch { - case config.BalancerName == "": + case config.XDSServer == nil: + return nil, errors.New("xds: no xds_server provided") + case config.XDSServer.ServerURI == "": return nil, errors.New("xds: no xds_server name provided in options") - case config.Creds == nil: + case config.XDSServer.Creds == nil: return nil, errors.New("xds: no credentials provided in options") - case config.NodeProto == nil: + case config.XDSServer.NodeProto == nil: return nil, errors.New("xds: no node_proto provided in options") } - switch config.TransportAPI { - case version.TransportV2: - if _, ok := config.NodeProto.(*v2corepb.Node); !ok { - return nil, fmt.Errorf("xds: Node proto type (%T) does not match API version: %v", config.NodeProto, config.TransportAPI) - } - case version.TransportV3: - if _, ok := config.NodeProto.(*v3corepb.Node); !ok { - return nil, fmt.Errorf("xds: Node proto type (%T) does not match API version: %v", config.NodeProto, config.TransportAPI) - } - } - dopts := []grpc.DialOption{ - config.Creds, + config.XDSServer.Creds, grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 5 * time.Minute, Timeout: 20 * time.Second, @@ -677,23 +666,24 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) ( lrsClients: make(map[string]*lrsClient), } - cc, err := grpc.Dial(config.BalancerName, dopts...) + cc, err := grpc.Dial(config.XDSServer.ServerURI, dopts...) if err != nil { // An error from a non-blocking dial indicates something serious. - return nil, fmt.Errorf("xds: failed to dial balancer {%s}: %v", config.BalancerName, err) + return nil, fmt.Errorf("xds: failed to dial balancer {%s}: %v", config.XDSServer.ServerURI, err) } c.cc = cc c.logger = prefixLogger((c)) - c.logger.Infof("Created ClientConn to xDS management server: %s", config.BalancerName) + c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) - apiClient, err := newAPIClient(config.TransportAPI, cc, BuildOptions{ + apiClient, err := newAPIClient(config.XDSServer.TransportAPI, cc, BuildOptions{ Parent: c, Validator: c.updateValidator, - NodeProto: config.NodeProto, + NodeProto: config.XDSServer.NodeProto, Backoff: backoff.DefaultExponential.Backoff, Logger: c.logger, }) if err != nil { + cc.Close() return nil, err } c.apiClient = apiClient diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index 7c3423cd5ad7..2a6d6ae2a536 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -82,9 +82,11 @@ func clientOpts(balancerName string, overrideWatchExpiryTimeout bool) (*bootstra watchExpiryTimeout = defaultTestWatchExpiryTimeout } return &bootstrap.Config{ - BalancerName: balancerName, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: balancerName, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, }, watchExpiryTimeout } @@ -269,9 +271,11 @@ func (s) TestClientNewSingleton(t *testing.T) { oldBootstrapNewConfig := bootstrapNewConfig bootstrapNewConfig = func() (*bootstrap.Config, error) { return &bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithInsecure(), - NodeProto: xdstestutils.EmptyNodeProtoV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithInsecure(), + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, }, nil } defer func() { bootstrapNewConfig = oldBootstrapNewConfig }() diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index d03479ca4ade..c162a9418f23 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -75,9 +75,11 @@ func (s) TestLDSConfigDump(t *testing.T) { } client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, }, defaultTestWatchExpiryTimeout) if err != nil { t.Fatalf("failed to create client: %v", err) @@ -189,9 +191,11 @@ func (s) TestRDSConfigDump(t *testing.T) { } client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, }, defaultTestWatchExpiryTimeout) if err != nil { t.Fatalf("failed to create client: %v", err) @@ -303,9 +307,11 @@ func (s) TestCDSConfigDump(t *testing.T) { } client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, }, defaultTestWatchExpiryTimeout) if err != nil { t.Fatalf("failed to create client: %v", err) @@ -403,9 +409,11 @@ func (s) TestEDSConfigDump(t *testing.T) { } client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, }, defaultTestWatchExpiryTimeout) if err != nil { t.Fatalf("failed to create client: %v", err) diff --git a/xds/internal/xdsclient/loadreport.go b/xds/internal/xdsclient/loadreport.go index 32a71dada7fb..4df9a5c0c3a4 100644 --- a/xds/internal/xdsclient/loadreport.go +++ b/xds/internal/xdsclient/loadreport.go @@ -115,12 +115,12 @@ func (lrsC *lrsClient) startStream() { var cc *grpc.ClientConn lrsC.parent.logger.Infof("Starting load report to server: %s", lrsC.server) - if lrsC.server == "" || lrsC.server == lrsC.parent.config.BalancerName { + if lrsC.server == "" || lrsC.server == lrsC.parent.config.XDSServer.ServerURI { // Reuse the xDS client if server is the same. cc = lrsC.parent.cc } else { lrsC.parent.logger.Infof("LRS server is different from management server, starting a new ClientConn") - ccNew, err := grpc.Dial(lrsC.server, lrsC.parent.config.Creds) + ccNew, err := grpc.Dial(lrsC.server, lrsC.parent.config.XDSServer.Creds) if err != nil { // An error from a non-blocking dial indicates something serious. lrsC.parent.logger.Infof("xds: failed to dial load report server {%s}: %v", lrsC.server, err) diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index 88a08eb43fda..db31de6cf78b 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -55,10 +55,12 @@ func (s) TestLRSClient(t *testing.T) { defer sCleanup() xdsC, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: fs.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: &v2corepb.Node{}, - TransportAPI: version.TransportV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: fs.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV2, + NodeProto: &v2corepb.Node{}, + }, }, defaultClientWatchExpiryTimeout) if err != nil { t.Fatalf("failed to create xds client: %v", err) diff --git a/xds/internal/xdsclient/xdsclient_test.go b/xds/internal/xdsclient/xdsclient_test.go index f348df481615..23d6d6f54183 100644 --- a/xds/internal/xdsclient/xdsclient_test.go +++ b/xds/internal/xdsclient/xdsclient_test.go @@ -56,34 +56,42 @@ func (s) TestNew(t *testing.T) { { name: "empty-balancer-name", config: &bootstrap.Config{ - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: testutils.EmptyNodeProtoV2, + XDSServer: &bootstrap.ServerConfig{ + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: testutils.EmptyNodeProtoV2, + }, }, wantErr: true, }, { name: "empty-dial-creds", config: &bootstrap.Config{ - BalancerName: testXDSServer, - NodeProto: testutils.EmptyNodeProtoV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + NodeProto: testutils.EmptyNodeProtoV2, + }, }, wantErr: true, }, { name: "empty-node-proto", config: &bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + }, }, wantErr: true, }, { name: "node-proto-version-mismatch", config: &bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: testutils.EmptyNodeProtoV3, - TransportAPI: version.TransportV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV2, + NodeProto: testutils.EmptyNodeProtoV3, + }, }, wantErr: true, }, @@ -91,9 +99,11 @@ func (s) TestNew(t *testing.T) { { name: "happy-case", config: &bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithInsecure(), - NodeProto: testutils.EmptyNodeProtoV2, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithInsecure(), + NodeProto: testutils.EmptyNodeProtoV2, + }, }, }, } diff --git a/xds/server_test.go b/xds/server_test.go index 63cb6878ee7a..501b8ba76e20 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -322,9 +322,11 @@ func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { newXDSClient = func() (xdsclient.XDSClient, error) { c := fakeclient.NewClient() c.SetBootstrapConfig(&bootstrap.Config{ - BalancerName: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV3, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: "dummyBalancer", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV3, + }, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, CertProviderConfigs: certProviderConfigs, }) @@ -352,9 +354,11 @@ func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, newXDSClient = func() (xdsclient.XDSClient, error) { c := fakeclient.NewClient() bc := &bootstrap.Config{ - BalancerName: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV3, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: "dummyBalancer", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV3, + }, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, } if includeCertProviderCfg { @@ -599,18 +603,22 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { { desc: "certificate provider config is missing", bootstrapConfig: &bootstrap.Config{ - BalancerName: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV3, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: "dummyBalancer", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV3, + }, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, }, }, { desc: "server_listener_resource_name_template is missing", bootstrapConfig: &bootstrap.Config{ - BalancerName: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV3, + XDSServer: &bootstrap.ServerConfig{ + ServerURI: "dummyBalancer", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV3, + }, CertProviderConfigs: certProviderConfigs, }, }, From 670c133e568e89c901d05b898af9f5ce72a88c6d Mon Sep 17 00:00:00 2001 From: Uddeshya Singh Date: Tue, 2 Nov 2021 23:36:24 +0530 Subject: [PATCH 317/998] transport/http2_server : Move up streamID validation in operate headers (#4873) --- internal/transport/http2_server.go | 36 +++++++++++++++++++----------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index f2cad9ebc311..2c6eaf0e59cf 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -73,7 +73,6 @@ type http2Server struct { writerDone chan struct{} // sync point to enable testing. remoteAddr net.Addr localAddr net.Addr - maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle framer *framer @@ -123,6 +122,11 @@ type http2Server struct { bufferPool *bufferPool connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen } // NewServerTransport creates a http2 transport with conn and configuration @@ -334,6 +338,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + streamID := frame.Header().StreamID // frame.Truncated is set to true when framer detects that the current header @@ -348,6 +356,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return false } + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } + return true + } + t.maxStreamID = streamID + buf := newRecvBuffer() s := &Stream{ id: streamID, @@ -355,7 +372,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf: buf, fc: &inFlow{limit: uint32(t.initialWindowSize)}, } - var ( // If a gRPC Response-Headers has already been received, then it means // that the peer is speaking gRPC and we are in gRPC mode. @@ -498,16 +514,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.cancel() return false } - if streamID%2 != 1 || streamID <= t.maxStreamID { - t.mu.Unlock() - // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - s.cancel() - return true - } - t.maxStreamID = streamID if httpMethod != http.MethodPost { t.mu.Unlock() if logger.V(logLevel) { @@ -1293,20 +1299,23 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} // Handles outgoing GoAway and returns true if loopy needs to put itself // in draining mode. func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() t.mu.Lock() if t.state == closing { // TODO(mmukhi): This seems unnecessary. t.mu.Unlock() + t.maxStreamMu.Unlock() // The transport is closing. return false, ErrConnClosing } - sid := t.maxStreamID if !g.headsUp { // Stop accepting more streams now. t.state = draining + sid := t.maxStreamID if len(t.activeStreams) == 0 { g.closeConn = true } t.mu.Unlock() + t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } @@ -1319,6 +1328,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return true, nil } t.mu.Unlock() + t.maxStreamMu.Unlock() // For a graceful close, send out a GoAway with stream ID of MaxUInt32, // Follow that with a ping and wait for the ack to come back or a timer // to expire. During this time accept new streams since they might have From c105005da2bc5faa27ece6a2ba48c6ba0d62f53d Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 3 Nov 2021 13:25:05 -0400 Subject: [PATCH 318/998] xds: NACK missing route specifier server side (#4925) * xds: NACK missing route specifier server side --- xds/internal/xdsclient/client.go | 4 +-- xds/internal/xdsclient/filter_chain.go | 3 +- xds/internal/xdsclient/filter_chain_test.go | 34 +++++++++++++++++++++ 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 8b7d4bad04f9..39f1df215d1a 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -206,12 +206,12 @@ type ListenerUpdate struct { // RouteConfigName is the route configuration name corresponding to the // target which is being watched through LDS. // - // Only one of RouteConfigName and InlineRouteConfig is set. + // Exactly one of RouteConfigName and InlineRouteConfig is set. RouteConfigName string // InlineRouteConfig is the inline route configuration (RDS response) // returned inside LDS. // - // Only one of RouteConfigName and InlineRouteConfig is set. + // Exactly one of RouteConfigName and InlineRouteConfig is set. InlineRouteConfig *RouteConfigUpdate // MaxStreamDuration contains the HTTP connection manager's diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/filter_chain.go index f2b29f52a445..7503e0e48761 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/filter_chain.go @@ -637,8 +637,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) } filterChain.InlineRouteConfig = &routeU case nil: - // No-op, as no route specifier is a valid configuration on - // the server side. + return nil, fmt.Errorf("no RouteSpecifier: %+v", hcm) default: return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", hcm.RouteSpecifier) } diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/filter_chain_test.go index 2cc73b0a5119..ae1035e76409 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/filter_chain_test.go @@ -770,6 +770,40 @@ func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { lis *v3listenerpb.Listener wantErr string }{ + { + name: "missing-route-specifier", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + wantErr: "no RouteSpecifier", + }, { name: "not-ads", lis: &v3listenerpb.Listener{ From 3b94303f375441714f9f4c488781e8928e000537 Mon Sep 17 00:00:00 2001 From: Igor Zibarev Date: Wed, 3 Nov 2021 23:20:53 +0300 Subject: [PATCH 319/998] grpc: stabilize WithConnectParams DialOption (#4915) --- dialoptions.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/dialoptions.go b/dialoptions.go index 40d8ba6596ab..280df92e74ba 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -228,18 +228,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { }) } -// WithConnectParams configures the dialer to use the provided ConnectParams. +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. // // The backoff configuration specified as part of the ConnectParams overrides // all defaults specified in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider // using the backoff.DefaultConfig as a base, in cases where you want to // override only a subset of the backoff configuration. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func WithConnectParams(p ConnectParams) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = internalbackoff.Exponential{Config: p.Backoff} From 5841c8c78316e5b2ab44d37698034b146cb8c091 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 5 Nov 2021 09:30:57 -0700 Subject: [PATCH 320/998] xds/balancergroup: remove xds dependency from balancergroup (#4955) --- .../balancer/balancergroup/balancergroup.go | 71 +--------- .../balancergroup/balancergroup_test.go | 129 ++++-------------- .../balancer/balancergroup/testutils_test.go | 33 ----- .../balancer/clustermanager/clustermanager.go | 2 +- xds/internal/balancer/priority/balancer.go | 2 +- .../balancer/weightedtarget/weightedtarget.go | 2 +- 6 files changed, 30 insertions(+), 209 deletions(-) delete mode 100644 xds/internal/balancer/balancergroup/testutils_test.go diff --git a/xds/internal/balancer/balancergroup/balancergroup.go b/xds/internal/balancer/balancergroup/balancergroup.go index 749c6b36e717..d516f215323b 100644 --- a/xds/internal/balancer/balancergroup/balancergroup.go +++ b/xds/internal/balancer/balancergroup/balancergroup.go @@ -23,9 +23,6 @@ import ( "sync" "time" - orcapb "github.com/cncf/xds/go/xds/data/orca/v3" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/cache" @@ -178,7 +175,6 @@ func (sbc *subBalancerWrapper) stopBalancer() { // // Updates from ClientConn are forwarded to sub-balancers // - service config update -// - Not implemented // - address update // - subConn state change // - find the corresponding balancer and forward @@ -199,7 +195,6 @@ type BalancerGroup struct { cc balancer.ClientConn buildOpts balancer.BuildOptions logger *grpclog.PrefixLogger - loadStore load.PerClusterReporter // TODO: delete this, no longer needed. It was used by EDS. // stateAggregator is where the state/picker updates will be sent to. It's // provided by the parent balancer, to build a picker with all the @@ -254,15 +249,11 @@ var DefaultSubBalancerCloseTimeout = 15 * time.Minute // New creates a new BalancerGroup. Note that the BalancerGroup // needs to be started to work. -// -// TODO(easwars): Pass an options struct instead of N args. -func New(cc balancer.ClientConn, bOpts balancer.BuildOptions, stateAggregator BalancerStateAggregator, loadStore load.PerClusterReporter, logger *grpclog.PrefixLogger) *BalancerGroup { +func New(cc balancer.ClientConn, bOpts balancer.BuildOptions, stateAggregator BalancerStateAggregator, logger *grpclog.PrefixLogger) *BalancerGroup { return &BalancerGroup{ - cc: cc, - buildOpts: bOpts, - logger: logger, - loadStore: loadStore, - + cc: cc, + buildOpts: bOpts, + logger: logger, stateAggregator: stateAggregator, idToBalancerConfig: make(map[string]*subBalancerWrapper), @@ -467,10 +458,6 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver // state, then forward to ClientConn. func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) { bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state) - if bg.loadStore != nil { - // Only wrap the picker to do load reporting if loadStore was set. - state.Picker = newLoadReportPicker(state.Picker, id, bg.loadStore) - } // Send new state to the aggregator, without holding the incomingMu. // incomingMu is to protect all calls to the parent ClientConn, this update @@ -519,53 +506,3 @@ func (bg *BalancerGroup) ExitIdle() { } bg.outgoingMu.Unlock() } - -const ( - serverLoadCPUName = "cpu_utilization" - serverLoadMemoryName = "mem_utilization" -) - -type loadReportPicker struct { - p balancer.Picker - - locality string - loadStore load.PerClusterReporter -} - -func newLoadReportPicker(p balancer.Picker, id string, loadStore load.PerClusterReporter) *loadReportPicker { - return &loadReportPicker{ - p: p, - locality: id, - loadStore: loadStore, - } -} - -func (lrp *loadReportPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - res, err := lrp.p.Pick(info) - if err != nil { - return res, err - } - - lrp.loadStore.CallStarted(lrp.locality) - oldDone := res.Done - res.Done = func(info balancer.DoneInfo) { - if oldDone != nil { - oldDone(info) - } - lrp.loadStore.CallFinished(lrp.locality, info.Err) - - load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport) - if !ok { - return - } - lrp.loadStore.CallServerLoad(lrp.locality, serverLoadCPUName, load.CpuUtilization) - lrp.loadStore.CallServerLoad(lrp.locality, serverLoadMemoryName, load.MemUtilization) - for n, d := range load.RequestCost { - lrp.loadStore.CallServerLoad(lrp.locality, n, d) - } - for n, d := range load.Utilization { - lrp.loadStore.CallServerLoad(lrp.locality, n, d) - } - } - return res, err -} diff --git a/xds/internal/balancer/balancergroup/balancergroup_test.go b/xds/internal/balancer/balancergroup/balancergroup_test.go index 82d0c2dfb3ea..60c74a16b6ba 100644 --- a/xds/internal/balancer/balancergroup/balancergroup_test.go +++ b/xds/internal/balancer/balancergroup/balancergroup_test.go @@ -30,19 +30,17 @@ import ( "testing" "time" - orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/load" ) var ( @@ -64,6 +62,14 @@ func init() { DefaultSubBalancerCloseTimeout = time.Millisecond } +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { return func() balancer.SubConn { scst, _ := p.Pick(balancer.PickInfo{}) @@ -71,18 +77,18 @@ func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { } } -func newTestBalancerGroup(t *testing.T, loadStore load.PerClusterReporter) (*testutils.TestClientConn, *weightedaggregator.Aggregator, *BalancerGroup) { +func newTestBalancerGroup(t *testing.T) (*testutils.TestClientConn, *weightedaggregator.Aggregator, *BalancerGroup) { cc := testutils.NewTestClientConn(t) gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, loadStore, nil) + bg := New(cc, balancer.BuildOptions{}, gator, nil) bg.Start() return cc, gator, bg } // 1 balancer, 1 backend -> 2 backends -> 1 backend. func (s) TestBalancerGroup_OneRR_AddRemoveBackend(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) + cc, gator, bg := newTestBalancerGroup(t) // Add one balancer to group. gator.Add(testBalancerIDs[0], 1) @@ -138,7 +144,7 @@ func (s) TestBalancerGroup_OneRR_AddRemoveBackend(t *testing.T) { // 2 balancers, each with 1 backend. func (s) TestBalancerGroup_TwoRR_OneBackend(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) + cc, gator, bg := newTestBalancerGroup(t) // Add two balancers to group and send one resolved address to both // balancers. @@ -168,7 +174,7 @@ func (s) TestBalancerGroup_TwoRR_OneBackend(t *testing.T) { // 2 balancers, each with more than 1 backends. func (s) TestBalancerGroup_TwoRR_MoreBackends(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) + cc, gator, bg := newTestBalancerGroup(t) // Add two balancers to group and send one resolved address to both // balancers. @@ -253,7 +259,7 @@ func (s) TestBalancerGroup_TwoRR_MoreBackends(t *testing.T) { // 2 balancers with different weights. func (s) TestBalancerGroup_TwoRR_DifferentWeight_MoreBackends(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) + cc, gator, bg := newTestBalancerGroup(t) // Add two balancers to group and send two resolved addresses to both // balancers. @@ -289,7 +295,7 @@ func (s) TestBalancerGroup_TwoRR_DifferentWeight_MoreBackends(t *testing.T) { // totally 3 balancers, add/remove balancer. func (s) TestBalancerGroup_ThreeRR_RemoveBalancer(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) + cc, gator, bg := newTestBalancerGroup(t) // Add three balancers to group and send one resolved address to both // balancers. @@ -356,7 +362,7 @@ func (s) TestBalancerGroup_ThreeRR_RemoveBalancer(t *testing.T) { // 2 balancers, change balancer weight. func (s) TestBalancerGroup_TwoRR_ChangeWeight_MoreBackends(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) + cc, gator, bg := newTestBalancerGroup(t) // Add two balancers to group and send two resolved addresses to both // balancers. @@ -400,95 +406,6 @@ func (s) TestBalancerGroup_TwoRR_ChangeWeight_MoreBackends(t *testing.T) { } } -func (s) TestBalancerGroup_LoadReport(t *testing.T) { - loadStore := load.NewStore() - const ( - testCluster = "test-cluster" - testEDSService = "test-eds-service" - ) - cc, gator, bg := newTestBalancerGroup(t, loadStore.PerCluster(testCluster, testEDSService)) - - backendToBalancerID := make(map[balancer.SubConn]string) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - sc1 := <-cc.NewSubConnCh - sc2 := <-cc.NewSubConnCh - backendToBalancerID[sc1] = testBalancerIDs[0] - backendToBalancerID[sc2] = testBalancerIDs[0] - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - sc3 := <-cc.NewSubConnCh - sc4 := <-cc.NewSubConnCh - backendToBalancerID[sc3] = testBalancerIDs[1] - backendToBalancerID[sc4] = testBalancerIDs[1] - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - // bg1 has a weight of 2, while bg2 has a weight of 1. So, we expect 20 of - // these picks to go to bg1 and 10 of them to bg2. And since there are two - // subConns in each group, we expect the picks to be equally split between - // the subConns. We do not call Done() on picks routed to sc1, so we expect - // these to show up as pending rpcs. - wantStoreData := []*load.Data{{ - Cluster: testCluster, - Service: testEDSService, - LocalityStats: map[string]load.LocalityData{ - testBalancerIDs[0]: { - RequestStats: load.RequestData{Succeeded: 10, InProgress: 10}, - LoadStats: map[string]load.ServerLoadData{ - "cpu_utilization": {Count: 10, Sum: 100}, - "mem_utilization": {Count: 10, Sum: 50}, - "pic": {Count: 10, Sum: 31.4}, - "piu": {Count: 10, Sum: 31.4}, - }, - }, - testBalancerIDs[1]: { - RequestStats: load.RequestData{Succeeded: 10}, - LoadStats: map[string]load.ServerLoadData{ - "cpu_utilization": {Count: 10, Sum: 100}, - "mem_utilization": {Count: 10, Sum: 50}, - "pic": {Count: 10, Sum: 31.4}, - "piu": {Count: 10, Sum: 31.4}, - }, - }, - }, - }} - for i := 0; i < 30; i++ { - scst, _ := p1.Pick(balancer.PickInfo{}) - if scst.Done != nil && scst.SubConn != sc1 { - scst.Done(balancer.DoneInfo{ - ServerLoad: &orcapb.OrcaLoadReport{ - CpuUtilization: 10, - MemUtilization: 5, - RequestCost: map[string]float64{"pic": 3.14}, - Utilization: map[string]float64{"piu": 3.14}, - }, - }) - } - } - - gotStoreData := loadStore.Stats([]string{testCluster}) - if diff := cmp.Diff(wantStoreData, gotStoreData, cmpopts.EquateEmpty(), cmpopts.EquateApprox(0, 0.1), cmpopts.IgnoreFields(load.Data{}, "ReportInterval")); diff != "" { - t.Errorf("store.stats() returned unexpected diff (-want +got):\n%s", diff) - } -} - // Create a new balancer group, add balancer and backends, but not start. // - b1, weight 2, backends [0,1] // - b2, weight 1, backends [2,3] @@ -502,7 +419,7 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { cc := testutils.NewTestClientConn(t) gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil, nil) + bg := New(cc, balancer.BuildOptions{}, gator, nil) // Add two balancers to group and send two resolved addresses to both // balancers. @@ -598,7 +515,7 @@ func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { cc := testutils.NewTestClientConn(t) gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil, nil) + bg := New(cc, balancer.BuildOptions{}, gator, nil) gator.Add(testBalancerIDs[0], 2) bg.Add(testBalancerIDs[0], builder) @@ -614,7 +531,7 @@ func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { // transient_failure, the picks won't fail with transient_failure, and should // instead wait for the other sub-balancer. func (s) TestBalancerGroup_InitOneSubBalancerTransientFailure(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) + cc, gator, bg := newTestBalancerGroup(t) // Add two balancers to group and send one resolved address to both // balancers. @@ -645,7 +562,7 @@ func (s) TestBalancerGroup_InitOneSubBalancerTransientFailure(t *testing.T) { // connecting, the overall state stays in transient_failure, and all picks // return transient failure error. func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) + cc, gator, bg := newTestBalancerGroup(t) // Add two balancers to group and send one resolved address to both // balancers. @@ -700,7 +617,7 @@ func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregat cc := testutils.NewTestClientConn(t) gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil, nil) + bg := New(cc, balancer.BuildOptions{}, gator, nil) // Add two balancers to group and send two resolved addresses to both // balancers. @@ -993,7 +910,7 @@ func (s) TestBalancerGroupBuildOptions(t *testing.T) { }, }) cc := testutils.NewTestClientConn(t) - bg := New(cc, bOpts, nil, nil, nil) + bg := New(cc, bOpts, nil, nil) bg.Start() // Add the stub balancer build above as a child policy. diff --git a/xds/internal/balancer/balancergroup/testutils_test.go b/xds/internal/balancer/balancergroup/testutils_test.go deleted file mode 100644 index 1429fa87b3f2..000000000000 --- a/xds/internal/balancer/balancergroup/testutils_test.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package balancergroup - -import ( - "testing" - - "google.golang.org/grpc/internal/grpctest" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index 318545d79b01..188b39d467af 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -46,7 +46,7 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal b.logger = prefixLogger(b) b.stateAggregator = newBalancerStateAggregator(cc, b.logger) b.stateAggregator.start() - b.bg = balancergroup.New(cc, opts, b.stateAggregator, nil, b.logger) + b.bg = balancergroup.New(cc, opts, b.stateAggregator, b.logger) b.bg.Start() b.logger.Infof("Created") return b diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index 23e8aa775030..39053dbc1bfe 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -59,7 +59,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba } b.logger = prefixLogger(b) - b.bg = balancergroup.New(cc, bOpts, b, nil, b.logger) + b.bg = balancergroup.New(cc, bOpts, b, b.logger) b.bg.Start() go b.run() b.logger.Infof("Created") diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/xds/internal/balancer/weightedtarget/weightedtarget.go index f05e0aca19f3..d036c9b5bd5b 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/xds/internal/balancer/weightedtarget/weightedtarget.go @@ -52,7 +52,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba b.logger = prefixLogger(b) b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR) b.stateAggregator.Start() - b.bg = balancergroup.New(cc, bOpts, b.stateAggregator, nil, b.logger) + b.bg = balancergroup.New(cc, bOpts, b.stateAggregator, b.logger) b.bg.Start() b.logger.Infof("Created") return b From 878cea23105641ab36ceb688af326139cf60b842 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Fri, 5 Nov 2021 12:03:53 -0700 Subject: [PATCH 321/998] kokoro: Enable xds authz_test (#4954) * kokoro: Enable xds authz_test --- test/kokoro/xds_k8s.cfg | 2 +- test/kokoro/xds_k8s.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/kokoro/xds_k8s.cfg b/test/kokoro/xds_k8s.cfg index 4d5e019991f6..4b6bcb58d75d 100644 --- a/test/kokoro/xds_k8s.cfg +++ b/test/kokoro/xds_k8s.cfg @@ -2,7 +2,7 @@ # Location of the continuous shell script in repository. build_file: "grpc-go/test/kokoro/xds_k8s.sh" -timeout_mins: 120 +timeout_mins: 180 action { define_artifacts { diff --git a/test/kokoro/xds_k8s.sh b/test/kokoro/xds_k8s.sh index f91d1d026d67..c76f0860df9f 100755 --- a/test/kokoro/xds_k8s.sh +++ b/test/kokoro/xds_k8s.sh @@ -150,6 +150,7 @@ main() { cd "${TEST_DRIVER_FULL_DIR}" run_test baseline_test run_test security_test + run_test authz_test } main "$@" From 3fa19881780d55772d5d71cf88b4965d28f69af8 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 8 Nov 2021 10:14:30 -0800 Subject: [PATCH 322/998] internal: update pb.go (#4962) --- channelz/grpc_channelz_v1/channelz.pb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/channelz/grpc_channelz_v1/channelz.pb.go b/channelz/grpc_channelz_v1/channelz.pb.go index 416b3528d5c6..4caf9e76e54c 100644 --- a/channelz/grpc_channelz_v1/channelz.pb.go +++ b/channelz/grpc_channelz_v1/channelz.pb.go @@ -167,7 +167,7 @@ type Channel struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The identifier for this channel. This should bet set. + // The identifier for this channel. This should be set. Ref *ChannelRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` // Data specific to this channel. Data *ChannelData `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // At most one of 'channel_ref+subchannel_ref' and 'socket' is set. From 79e9c9571a1949d3abae203a127fa5d4f02fb071 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 8 Nov 2021 11:11:24 -0800 Subject: [PATCH 323/998] xds/client: move unmarshal functions and types to a separate package (#4904) --- xds/csds/csds.go | 13 +- xds/csds/csds_test.go | 9 +- .../balancer/cdsbalancer/cdsbalancer.go | 7 +- .../cdsbalancer/cdsbalancer_security_test.go | 24 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 31 +- .../balancer/cdsbalancer/cluster_handler.go | 21 +- .../cdsbalancer/cluster_handler_test.go | 120 +- .../clusterresolver/clusterresolver_test.go | 15 +- .../balancer/clusterresolver/configbuilder.go | 20 +- .../clusterresolver/configbuilder_test.go | 184 +-- .../balancer/clusterresolver/eds_impl_test.go | 5 +- .../clusterresolver/resource_resolver.go | 7 +- .../clusterresolver/resource_resolver_test.go | 8 +- .../balancer/clusterresolver/testutil_test.go | 26 +- xds/internal/resolver/serviceconfig.go | 24 +- xds/internal/resolver/serviceconfig_test.go | 16 +- xds/internal/resolver/watch_service.go | 13 +- xds/internal/resolver/watch_service_test.go | 138 +- xds/internal/resolver/xds_resolver_test.go | 161 +- xds/internal/server/conn_wrapper.go | 8 +- xds/internal/server/listener_wrapper.go | 19 +- xds/internal/server/listener_wrapper_test.go | 28 +- xds/internal/server/rds_handler.go | 12 +- xds/internal/server/rds_handler_test.go | 30 +- xds/internal/testutils/fakeclient/client.go | 31 +- xds/internal/xdsclient/attributes.go | 9 +- xds/internal/xdsclient/callback.go | 29 +- xds/internal/xdsclient/client.go | 533 +------ xds/internal/xdsclient/client_test.go | 49 +- xds/internal/xdsclient/dump.go | 17 +- xds/internal/xdsclient/dump_test.go | 121 +- xds/internal/xdsclient/v2/ack_test.go | 3 +- xds/internal/xdsclient/v2/cds_test.go | 39 +- xds/internal/xdsclient/v2/client.go | 19 +- xds/internal/xdsclient/v2/client_test.go | 47 +- xds/internal/xdsclient/v2/eds_test.go | 45 +- xds/internal/xdsclient/v2/lds_test.go | 68 +- xds/internal/xdsclient/v2/rds_test.go | 72 +- xds/internal/xdsclient/v3/client.go | 19 +- xds/internal/xdsclient/watchers.go | 29 +- .../xdsclient/watchers_cluster_test.go | 125 +- .../xdsclient/watchers_endpoints_test.go | 93 +- .../xdsclient/watchers_listener_test.go | 139 +- xds/internal/xdsclient/watchers_route_test.go | 103 +- xds/internal/xdsclient/xds.go | 1345 ----------------- .../{ => xdsresource}/filter_chain.go | 41 +- .../{ => xdsresource}/filter_chain_test.go | 53 +- .../xdsclient/{ => xdsresource}/matcher.go | 3 +- .../{ => xdsresource}/matcher_path.go | 3 +- .../{ => xdsresource}/matcher_path_test.go | 9 +- .../{ => xdsresource}/matcher_test.go | 7 +- .../xdsclient/xdsresource/test_utils_test.go | 52 + xds/internal/xdsclient/xdsresource/type.go | 107 ++ .../xdsclient/xdsresource/type_cds.go | 87 ++ .../xdsclient/xdsresource/type_eds.go | 80 + .../xdsclient/xdsresource/type_lds.go | 87 ++ .../xdsclient/xdsresource/type_rds.go | 245 +++ .../xdsclient/xdsresource/unmarshal.go | 174 +++ .../xdsclient/xdsresource/unmarshal_cds.go | 456 ++++++ .../unmarshal_cds_test.go} | 5 +- .../xdsclient/xdsresource/unmarshal_eds.go | 131 ++ .../unmarshal_eds_test.go} | 5 +- .../xdsclient/xdsresource/unmarshal_lds.go | 297 ++++ .../unmarshal_lds_test.go} | 5 +- .../xdsclient/xdsresource/unmarshal_rds.go | 373 +++++ .../unmarshal_rds_test.go} | 5 +- xds/server.go | 9 +- xds/server_test.go | 23 +- 68 files changed, 3218 insertions(+), 2913 deletions(-) delete mode 100644 xds/internal/xdsclient/xds.go rename xds/internal/xdsclient/{ => xdsresource}/filter_chain.go (95%) rename xds/internal/xdsclient/{ => xdsresource}/filter_chain_test.go (98%) rename xds/internal/xdsclient/{ => xdsresource}/matcher.go (99%) rename xds/internal/xdsclient/{ => xdsresource}/matcher_path.go (99%) rename xds/internal/xdsclient/{ => xdsresource}/matcher_path_test.go (94%) rename xds/internal/xdsclient/{ => xdsresource}/matcher_test.go (98%) create mode 100644 xds/internal/xdsclient/xdsresource/test_utils_test.go create mode 100644 xds/internal/xdsclient/xdsresource/type.go create mode 100644 xds/internal/xdsclient/xdsresource/type_cds.go create mode 100644 xds/internal/xdsclient/xdsresource/type_eds.go create mode 100644 xds/internal/xdsclient/xdsresource/type_lds.go create mode 100644 xds/internal/xdsclient/xdsresource/type_rds.go create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal.go create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal_cds.go rename xds/internal/xdsclient/{cds_test.go => xdsresource/unmarshal_cds_test.go} (99%) create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal_eds.go rename xds/internal/xdsclient/{eds_test.go => xdsresource/unmarshal_eds_test.go} (99%) create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal_lds.go rename xds/internal/xdsclient/{lds_test.go => xdsresource/unmarshal_lds_test.go} (99%) create mode 100644 xds/internal/xdsclient/xdsresource/unmarshal_rds.go rename xds/internal/xdsclient/{rds_test.go => xdsresource/unmarshal_rds_test.go} (99%) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 23f9c760b637..5b9d1c467cb2 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/timestamppb" _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register v2 xds_client. @@ -197,17 +198,17 @@ func dumpToGenericXdsConfig(typeURL string, dumpF func() (string, map[string]xds return ret } -func serviceStatusToProto(serviceStatus xdsclient.ServiceStatus) v3adminpb.ClientResourceStatus { +func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { switch serviceStatus { - case xdsclient.ServiceStatusUnknown: + case xdsresource.ServiceStatusUnknown: return v3adminpb.ClientResourceStatus_UNKNOWN - case xdsclient.ServiceStatusRequested: + case xdsresource.ServiceStatusRequested: return v3adminpb.ClientResourceStatus_REQUESTED - case xdsclient.ServiceStatusNotExist: + case xdsresource.ServiceStatusNotExist: return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST - case xdsclient.ServiceStatusACKed: + case xdsresource.ServiceStatusACKed: return v3adminpb.ClientResourceStatus_ACKED - case xdsclient.ServiceStatusNACKed: + case xdsresource.ServiceStatusNACKed: return v3adminpb.ClientResourceStatus_NACKED default: return v3adminpb.ClientResourceStatus_UNKNOWN diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 88df77a62242..0bf305899de8 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -35,6 +35,7 @@ import ( _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" @@ -140,16 +141,16 @@ func TestCSDS(t *testing.T) { defer cleanup() for _, target := range ldsTargets { - xdsC.WatchListener(target, func(xdsclient.ListenerUpdate, error) {}) + xdsC.WatchListener(target, func(xdsresource.ListenerUpdate, error) {}) } for _, target := range rdsTargets { - xdsC.WatchRouteConfig(target, func(xdsclient.RouteConfigUpdate, error) {}) + xdsC.WatchRouteConfig(target, func(xdsresource.RouteConfigUpdate, error) {}) } for _, target := range cdsTargets { - xdsC.WatchCluster(target, func(xdsclient.ClusterUpdate, error) {}) + xdsC.WatchCluster(target, func(xdsresource.ClusterUpdate, error) {}) } for _, target := range edsTargets { - xdsC.WatchEndpoints(target, func(xdsclient.EndpointsUpdate, error) {}) + xdsC.WatchEndpoints(target, func(xdsresource.EndpointsUpdate, error) {}) } for i := 0; i < retryCount; i++ { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 82d2a96958e2..9c128dfb4639 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/clusterresolver" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -185,7 +186,7 @@ func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) { // management server, creates appropriate certificate provider plugins, and // updates the HandhakeInfo which is added as an address attribute in // NewSubConn() calls. -func (b *cdsBalancer) handleSecurityConfig(config *xdsclient.SecurityConfig) error { +func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error { // If xdsCredentials are not in use, i.e, the user did not want to get // security configuration from an xDS server, we should not be acting on the // received security config here. Doing so poses a security threat. @@ -310,7 +311,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates)) for i, cu := range update.updates { switch cu.ClusterType { - case xdsclient.ClusterTypeEDS: + case xdsresource.ClusterTypeEDS: dms[i] = clusterresolver.DiscoveryMechanism{ Type: clusterresolver.DiscoveryMechanismTypeEDS, Cluster: cu.ClusterName, @@ -324,7 +325,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { dms[i].LoadReportingServerName = new(string) } - case xdsclient.ClusterTypeLogicalDNS: + case xdsresource.ClusterTypeLogicalDNS: dms[i] = clusterresolver.DiscoveryMechanism{ Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, DNSHostname: cu.DNSHostName, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 9483818e306e..778d711f2190 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -36,8 +36,8 @@ import ( "google.golang.org/grpc/resolver" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -57,17 +57,17 @@ var ( } fpb1, fpb2 *fakeProviderBuilder bootstrapConfig *bootstrap.Config - cdsUpdateWithGoodSecurityCfg = xdsclient.ClusterUpdate{ + cdsUpdateWithGoodSecurityCfg = xdsresource.ClusterUpdate{ ClusterName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "default1", IdentityInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, }, } - cdsUpdateWithMissingSecurityCfg = xdsclient.ClusterUpdate{ + cdsUpdateWithMissingSecurityCfg = xdsresource.ClusterUpdate{ ClusterName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "not-default", }, } @@ -250,7 +250,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -306,7 +306,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -566,7 +566,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // an update which contains bad security config. So, we expect the CDS // balancer to forward this error to the EDS balancer and eventually the // channel needs to be put in a bad state. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -671,9 +671,9 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ + cdsUpdate := xdsresource.ClusterUpdate{ ClusterName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "default1", SubjectAltNameMatchers: testSANMatchers, }, @@ -696,9 +696,9 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { } // Push another update with a new security configuration. - cdsUpdate = xdsclient.ClusterUpdate{ + cdsUpdate = xdsresource.ClusterUpdate{ ClusterName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, }, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 30b612fc7d01..7979f82e8f6e 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -39,6 +39,7 @@ import ( xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -58,7 +59,7 @@ func Test(t *testing.T) { // cdsWatchInfo wraps the update and the error sent in a CDS watch callback. type cdsWatchInfo struct { - update xdsclient.ClusterUpdate + update xdsresource.ClusterUpdate err error } @@ -361,25 +362,25 @@ func (s) TestHandleClusterUpdate(t *testing.T) { tests := []struct { name string - cdsUpdate xdsclient.ClusterUpdate + cdsUpdate xdsresource.ClusterUpdate updateErr error wantCCS balancer.ClientConnState }{ { name: "happy-case-with-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName, EnableLRS: true}, + cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName, EnableLRS: true}, wantCCS: edsCCS(serviceName, nil, true, nil), }, { name: "happy-case-without-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ClusterName: serviceName}, + cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName}, wantCCS: edsCCS(serviceName, nil, false, nil), }, { name: "happy-case-with-ring-hash-lb-policy", - cdsUpdate: xdsclient.ClusterUpdate{ + cdsUpdate: xdsresource.ClusterUpdate{ ClusterName: serviceName, - LBPolicy: &xdsclient.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + LBPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, }, wantCCS: edsCCS(serviceName, nil, false, &internalserviceconfig.BalancerConfig{ Name: ringhash.Name, @@ -417,7 +418,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // resolver error at this point should result in the CDS balancer returning // an error picker. watcherErr := errors.New("cdsBalancer watcher error") - xdsC.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, watcherErr) + xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, watcherErr) // Since the error being pushed here is not a resource-not-found-error, the // registered watch should not be cancelled. @@ -451,14 +452,14 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } // Again push a non-resource-not-found-error through the watcher callback. - xdsC.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, watcherErr) + xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, watcherErr) // Make sure the registered watch is not cancelled. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() @@ -472,7 +473,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // Push a resource-not-found-error this time around. resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") - xdsC.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, resourceErr) + xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, resourceErr) // Make sure that the watch is not cancelled. This error indicates that the // request cluster resource is not found. We should continue to watch it. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) @@ -536,7 +537,7 @@ func (s) TestResolverError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) @@ -585,7 +586,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -620,7 +621,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will update // the service's counter with the new max requests. var maxRequests uint32 = 1 - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} wantCCS := edsCCS(clusterName, &maxRequests, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -653,7 +654,7 @@ func (s) TestClose(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -724,7 +725,7 @@ func (s) TestExitIdle(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} wantCCS := edsCCS(serviceName, nil, false, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index 163a8c0a2e18..a10d8d772f2b 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -21,6 +21,7 @@ import ( "sync" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") @@ -31,17 +32,17 @@ var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a // (if one doesn't already exist) and pushing the update to it. type clusterHandlerUpdate struct { // securityCfg is the Security Config from the top (root) cluster. - securityCfg *xdsclient.SecurityConfig + securityCfg *xdsresource.SecurityConfig // lbPolicy is the lb policy from the top (root) cluster. // // Currently, we only support roundrobin or ringhash, and since roundrobin // does need configs, this is only set to the ringhash config, if the policy // is ringhash. In the future, if we support more policies, we can make this // an interface, and set it to config of the other policies. - lbPolicy *xdsclient.ClusterLBPolicyRingHash + lbPolicy *xdsresource.ClusterLBPolicyRingHash // updates is a list of ClusterUpdates from all the leaf clusters. - updates []xdsclient.ClusterUpdate + updates []xdsresource.ClusterUpdate err error } @@ -139,7 +140,7 @@ type clusterNode struct { // A ClusterUpdate in order to build a list of cluster updates for CDS to // send down to child XdsClusterResolverLoadBalancingPolicy. - clusterUpdate xdsclient.ClusterUpdate + clusterUpdate xdsresource.ClusterUpdate // This boolean determines whether this Node has received an update or not. // This isn't the best practice, but this will protect a list of Cluster @@ -176,7 +177,7 @@ func (c *clusterNode) delete() { } // Construct cluster update (potentially a list of ClusterUpdates) for a node. -func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error) { +func (c *clusterNode) constructClusterUpdate() ([]xdsresource.ClusterUpdate, error) { // If the cluster has not yet received an update, the cluster update is not // yet ready. if !c.receivedUpdate { @@ -185,13 +186,13 @@ func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error // Base case - LogicalDNS or EDS. Both of these cluster types will be tied // to a single ClusterUpdate. - if c.clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate { - return []xdsclient.ClusterUpdate{c.clusterUpdate}, nil + if c.clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { + return []xdsresource.ClusterUpdate{c.clusterUpdate}, nil } // If an aggregate construct a list by recursively calling down to all of // it's children. - var childrenUpdates []xdsclient.ClusterUpdate + var childrenUpdates []xdsresource.ClusterUpdate for _, child := range c.children { childUpdateList, err := child.constructClusterUpdate() if err != nil { @@ -206,7 +207,7 @@ func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error // also handles any logic with regards to any child state that may have changed. // At the end of the handleResp(), the clusterUpdate will be pinged in certain // situations to try and construct an update to send back to CDS. -func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err error) { +func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err error) { c.clusterHandler.clusterMutex.Lock() defer c.clusterHandler.clusterMutex.Unlock() if err != nil { // Write this error for run() to pick up in CDS LB policy. @@ -230,7 +231,7 @@ func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err erro // handler to return. Also, if there was any children from previously, // delete the children, as the cluster type is no longer an aggregate // cluster. - if clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate { + if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { for _, child := range c.children { child.delete() } diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index cb9b4e14da3c..4a00fe7d542a 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -23,7 +23,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -50,32 +50,32 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { tests := []struct { name string clusterName string - clusterUpdate xdsclient.ClusterUpdate - lbPolicy *xdsclient.ClusterLBPolicyRingHash + clusterUpdate xdsresource.ClusterUpdate + lbPolicy *xdsresource.ClusterLBPolicyRingHash }{ { name: "test-update-root-cluster-EDS-success", clusterName: edsService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, }, { name: "test-update-root-cluster-EDS-with-ring-hash", clusterName: logicalDNSService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, - LBPolicy: &xdsclient.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + LBPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, }, - lbPolicy: &xdsclient.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + lbPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, }, { name: "test-update-root-cluster-Logical-DNS-success", clusterName: logicalDNSService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, }, @@ -107,7 +107,7 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{test.clusterUpdate}); diff != "" { + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{test.clusterUpdate}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } if diff := cmp.Diff(chu.lbPolicy, test.lbPolicy); diff != "" { @@ -140,29 +140,29 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { tests := []struct { name string clusterName string - clusterUpdate xdsclient.ClusterUpdate - newClusterUpdate xdsclient.ClusterUpdate + clusterUpdate xdsresource.ClusterUpdate + newClusterUpdate xdsresource.ClusterUpdate }{ {name: "test-update-root-cluster-then-new-update-EDS-success", clusterName: edsService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, - newClusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + newClusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService2, }, }, { name: "test-update-root-cluster-then-new-update-Logical-DNS-success", clusterName: logicalDNSService, - clusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, - newClusterUpdate: xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + newClusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService2, }, }, @@ -204,7 +204,7 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { fakeClient.InvokeWatchClusterCallback(test.newClusterUpdate, nil) select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{test.newClusterUpdate}); diff != "" { + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{test.newClusterUpdate}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -242,8 +242,8 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { // start watches for the aggregate cluster's children. The ping to the // clusterHandler at the end of handleResp should be a no-op, as neither the // EDS or LogicalDNS child clusters have received an update yet. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) @@ -290,8 +290,8 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { } // Send callback for the EDS child cluster. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, nil) @@ -306,8 +306,8 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { // Invoke callback for Logical DNS child cluster. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, nil) @@ -320,11 +320,11 @@ func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { // ordered as per the cluster update. select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{{ - ClusterType: xdsclient.ClusterTypeEDS, + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, { - ClusterType: xdsclient.ClusterTypeLogicalDNS, + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) @@ -352,19 +352,19 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) fakeClient.WaitForWatchCluster(ctx) fakeClient.WaitForWatchCluster(ctx) - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, nil) - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, nil) @@ -374,8 +374,8 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService2}, }, nil) @@ -414,8 +414,8 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { // Invoke a callback for the new logicalDNSService2 - this will fill out the // tree with successful updates. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService2, }, nil) @@ -427,11 +427,11 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{{ - ClusterType: xdsclient.ClusterTypeEDS, + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, { - ClusterType: xdsclient.ClusterTypeLogicalDNS, + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService2, }}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) @@ -459,19 +459,19 @@ func (s) TestUpdateRootClusterAggregateThenChangeRootToEDS(t *testing.T) { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: aggregateClusterService, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) fakeClient.WaitForWatchCluster(ctx) fakeClient.WaitForWatchCluster(ctx) - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService, }, nil) - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeLogicalDNS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, ClusterName: logicalDNSService, }, nil) @@ -536,7 +536,7 @@ func (s) TestHandleRespInvokedWithError(t *testing.T) { if err != nil { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, errors.New("some error")) + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, errors.New("some error")) select { case chu := <-ch.updateChannel: if chu.err.Error() != "some error" { @@ -563,8 +563,8 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { if err != nil { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService2, }, nil) select { @@ -574,8 +574,8 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { } // Switch the cluster to an aggregate cluster, this should cause two new // child watches to be created. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeAggregate, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, ClusterName: edsService2, PrioritizedClusterNames: []string{edsService, logicalDNSService}, }, nil) @@ -632,8 +632,8 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { // Switch the cluster back to an EDS Cluster. This should cause the two // children to be deleted. - fakeClient.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{ - ClusterType: xdsclient.ClusterTypeEDS, + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService2, }, nil) @@ -673,8 +673,8 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { // Then an update should successfully be written to the update buffer. select { case chu := <-ch.updateChannel: - if diff := cmp.Diff(chu.updates, []xdsclient.ClusterUpdate{{ - ClusterType: xdsclient.ClusterTypeEDS, + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, ClusterName: edsService2, }}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 6af81f89f1f3..2cd692bbade9 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // V2 client registration. ) @@ -47,10 +48,10 @@ const ( var ( // A non-empty endpoints update which is expected to be accepted by the EDS // LB policy. - defaultEndpointsUpdate = xdsclient.EndpointsUpdate{ - Localities: []xdsclient.Locality{ + defaultEndpointsUpdate = xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{{Address: "endpoint1"}}, + Endpoints: []xdsresource.Endpoint{{Address: "endpoint1"}}, ID: internal.LocalityID{Zone: "zone"}, Priority: 1, Weight: 100, @@ -270,7 +271,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) edsLB, err := waitForNewChildLB(ctx, edsLBCh) if err != nil { t.Fatal(err) @@ -280,7 +281,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { } connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error") - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, connectionErr) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, connectionErr) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() @@ -298,7 +299,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { } resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, resourceErr) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, resourceErr) // Even if error is resource not found, watch shouldn't be canceled, because // this is an EDS resource removed (and xds client actually never sends this // error, but we still handles it). @@ -359,7 +360,7 @@ func (s) TestErrorFromResolver(t *testing.T) { if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) edsLB, err := waitForNewChildLB(ctx, edsLBCh) if err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index 475497d48950..741744ee3fc1 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -33,7 +33,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const million = 1000000 @@ -48,7 +48,7 @@ const million = 1000000 type priorityConfig struct { mechanism DiscoveryMechanism // edsResp is set only if type is EDS. - edsResp xdsclient.EndpointsUpdate + edsResp xdsresource.EndpointsUpdate // addresses is set only if type is DNS. addresses []string } @@ -169,7 +169,7 @@ func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string // - map{"p0":p0_config, "p1":p1_config} // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 -func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { +func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) for _, d := range edsResp.Drops { drops = append(drops, clusterimpl.DropConfig{ @@ -205,9 +205,9 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.Endpoint // For example, for L0-p0, L1-p0, L2-p1, results will be // - ["p0", "p1"] // - map{"p0":[L0, L1], "p1":[L2]} -func groupLocalitiesByPriority(localities []xdsclient.Locality) ([]string, map[string][]xdsclient.Locality) { +func groupLocalitiesByPriority(localities []xdsresource.Locality) ([]string, map[string][]xdsresource.Locality) { var priorityIntSlice []int - priorities := make(map[string][]xdsclient.Locality) + priorities := make(map[string][]xdsresource.Locality) for _, locality := range localities { if locality.Weight == 0 { continue @@ -252,7 +252,7 @@ var rrBalancerConfig = &internalserviceconfig.BalancerConfig{Name: roundrobin.Na // priorityLocalitiesToClusterImpl takes a list of localities (with the same // priority), and generates a cluster impl policy config, and a list of // addresses. -func priorityLocalitiesToClusterImpl(localities []xdsclient.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { +func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { clusterImplCfg := &clusterimpl.LBConfig{ Cluster: mechanism.Cluster, EDSServiceName: mechanism.EDSServiceName, @@ -293,7 +293,7 @@ func priorityLocalitiesToClusterImpl(localities []xdsclient.Locality, priorityNa // // The addresses have path hierarchy set to [priority-name], so priority knows // which child policy they are for. -func localitiesToRingHash(localities []xdsclient.Locality, priorityName string) []resolver.Address { +func localitiesToRingHash(localities []xdsresource.Locality, priorityName string) []resolver.Address { var addrs []resolver.Address for _, locality := range localities { var lw uint32 = 1 @@ -308,7 +308,7 @@ func localitiesToRingHash(localities []xdsclient.Locality, priorityName string) // Filter out all "unhealthy" endpoints (unknown and healthy are // both considered to be healthy: // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). - if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { + if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { continue } @@ -333,7 +333,7 @@ func localitiesToRingHash(localities []xdsclient.Locality, priorityName string) // // The addresses have path hierarchy set to [priority-name, locality-name], so // priority and weighted target know which child policy they are for. -func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) { +func localitiesToWeightedTarget(localities []xdsresource.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) { weightedTargets := make(map[string]weightedtarget.Target) var addrs []resolver.Address for _, locality := range localities { @@ -346,7 +346,7 @@ func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName st // Filter out all "unhealthy" endpoints (unknown and healthy are // both considered to be healthy: // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). - if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { + if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { continue } diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index 3e2ad8a2e64e..c2b68b946f06 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -38,7 +38,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/weightedtarget" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -55,9 +55,9 @@ const ( var ( testLocalityIDs []internal.LocalityID testAddressStrs [][]string - testEndpoints [][]xdsclient.Endpoint + testEndpoints [][]xdsresource.Endpoint - testLocalitiesP0, testLocalitiesP1 []xdsclient.Locality + testLocalitiesP0, testLocalitiesP1 []xdsresource.Locality addrCmpOpts = cmp.Options{ cmp.AllowUnexported(attributes.Attributes{}), @@ -75,21 +75,21 @@ func init() { testLocalityIDs = append(testLocalityIDs, internal.LocalityID{Zone: fmt.Sprintf("test-zone-%d", i)}) var ( addrs []string - ends []xdsclient.Endpoint + ends []xdsresource.Endpoint ) for j := 0; j < addressPerLocality; j++ { addr := fmt.Sprintf("addr-%d-%d", i, j) addrs = append(addrs, addr) - ends = append(ends, xdsclient.Endpoint{ + ends = append(ends, xdsresource.Endpoint{ Address: addr, - HealthStatus: xdsclient.EndpointHealthStatusHealthy, + HealthStatus: xdsresource.EndpointHealthStatusHealthy, }) } testAddressStrs = append(testAddressStrs, addrs) testEndpoints = append(testEndpoints, ends) } - testLocalitiesP0 = []xdsclient.Locality{ + testLocalitiesP0 = []xdsresource.Locality{ { Endpoints: testEndpoints[0], ID: testLocalityIDs[0], @@ -103,7 +103,7 @@ func init() { Priority: 0, }, } - testLocalitiesP1 = []xdsclient.Locality{ + testLocalitiesP1 = []xdsresource.Locality{ { Endpoints: testEndpoints[2], ID: testLocalityIDs[2], @@ -131,15 +131,15 @@ func TestBuildPriorityConfigJSON(t *testing.T) { Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServiceName, }, - edsResp: xdsclient.EndpointsUpdate{ - Drops: []xdsclient.OverloadDropConfig{ + edsResp: xdsresource.EndpointsUpdate{ + Drops: []xdsresource.OverloadDropConfig{ { Category: testDropCategory, Numerator: testDropOverMillion, Denominator: million, }, }, - Localities: []xdsclient.Locality{ + Localities: []xdsresource.Locality{ testLocalitiesP0[0], testLocalitiesP0[1], testLocalitiesP1[0], @@ -181,15 +181,15 @@ func TestBuildPriorityConfig(t *testing.T) { Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServiceName, }, - edsResp: xdsclient.EndpointsUpdate{ - Drops: []xdsclient.OverloadDropConfig{ + edsResp: xdsresource.EndpointsUpdate{ + Drops: []xdsresource.OverloadDropConfig{ { Category: testDropCategory, Numerator: testDropOverMillion, Denominator: million, }, }, - Localities: []xdsclient.Locality{ + Localities: []xdsresource.Locality{ testLocalitiesP0[0], testLocalitiesP0[1], testLocalitiesP1[0], @@ -333,15 +333,15 @@ func TestBuildClusterImplConfigForDNS(t *testing.T) { func TestBuildClusterImplConfigForEDS(t *testing.T) { gotNames, gotConfigs, gotAddrs, _ := buildClusterImplConfigForEDS( 2, - xdsclient.EndpointsUpdate{ - Drops: []xdsclient.OverloadDropConfig{ + xdsresource.EndpointsUpdate{ + Drops: []xdsresource.OverloadDropConfig{ { Category: testDropCategory, Numerator: testDropOverMillion, Denominator: million, }, }, - Localities: []xdsclient.Locality{ + Localities: []xdsresource.Locality{ { Endpoints: testEndpoints[3], ID: testLocalityIDs[3], @@ -461,42 +461,42 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { func TestGroupLocalitiesByPriority(t *testing.T) { tests := []struct { name string - localities []xdsclient.Locality + localities []xdsresource.Locality wantPriorities []string - wantLocalities map[string][]xdsclient.Locality + wantLocalities map[string][]xdsresource.Locality }{ { name: "1 locality 1 priority", - localities: []xdsclient.Locality{testLocalitiesP0[0]}, + localities: []xdsresource.Locality{testLocalitiesP0[0]}, wantPriorities: []string{"0"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[0]}, }, }, { name: "2 locality 1 priority", - localities: []xdsclient.Locality{testLocalitiesP0[0], testLocalitiesP0[1]}, + localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP0[1]}, wantPriorities: []string{"0"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[0], testLocalitiesP0[1]}, }, }, { name: "1 locality in each", - localities: []xdsclient.Locality{testLocalitiesP0[0], testLocalitiesP1[0]}, + localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP1[0]}, wantPriorities: []string{"0", "1"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[0]}, "1": {testLocalitiesP1[0]}, }, }, { name: "2 localities in each sorted", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ testLocalitiesP0[0], testLocalitiesP0[1], testLocalitiesP1[0], testLocalitiesP1[1]}, wantPriorities: []string{"0", "1"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[0], testLocalitiesP0[1]}, "1": {testLocalitiesP1[0], testLocalitiesP1[1]}, }, @@ -506,11 +506,11 @@ func TestGroupLocalitiesByPriority(t *testing.T) { // returned priority list must be sorted [p0, p1], because the list // order is the priority order. name: "2 localities in each needs to sort", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ testLocalitiesP1[1], testLocalitiesP0[1], testLocalitiesP1[0], testLocalitiesP0[0]}, wantPriorities: []string{"0", "1"}, - wantLocalities: map[string][]xdsclient.Locality{ + wantLocalities: map[string][]xdsresource.Locality{ "0": {testLocalitiesP0[1], testLocalitiesP0[0]}, "1": {testLocalitiesP1[1], testLocalitiesP1[0]}, }, @@ -563,7 +563,7 @@ func TestDedupSortedIntSlice(t *testing.T) { func TestPriorityLocalitiesToClusterImpl(t *testing.T) { tests := []struct { name string - localities []xdsclient.Locality + localities []xdsresource.Locality priorityName string mechanism DiscoveryMechanism childPolicy *internalserviceconfig.BalancerConfig @@ -572,19 +572,19 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { wantErr bool }{{ name: "round robin as child, no LRS", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -630,19 +630,19 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { }, { name: "ring_hash as child", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -666,10 +666,10 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { }, { name: "unsupported child", - localities: []xdsclient.Locality{{ - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + localities: []xdsresource.Locality{{ + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, @@ -698,7 +698,7 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { func TestLocalitiesToWeightedTarget(t *testing.T) { tests := []struct { name string - localities []xdsclient.Locality + localities []xdsresource.Locality priorityName string childPolicy *internalserviceconfig.BalancerConfig lrsServer *string @@ -707,19 +707,19 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { }{ { name: "roundrobin as child, with LRS", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -749,19 +749,19 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { }, { name: "roundrobin as child, no LRS", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -795,19 +795,19 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { }, { name: "weighted round robin as child, no LRS", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -856,26 +856,26 @@ func TestLocalitiesToWeightedTarget(t *testing.T) { func TestLocalitiesToRingHash(t *testing.T) { tests := []struct { name string - localities []xdsclient.Locality + localities []xdsresource.Locality priorityName string wantAddrs []resolver.Address }{ { // Check that address weights are locality_weight * endpoint_weight. name: "with locality and endpoint weight", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -892,19 +892,19 @@ func TestLocalitiesToRingHash(t *testing.T) { { // Check that endpoint_weight is 0, weight is the locality weight. name: "locality weight only", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, Weight: 20, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, Weight: 80, @@ -921,18 +921,18 @@ func TestLocalitiesToRingHash(t *testing.T) { { // Check that locality_weight is 0, weight is the endpoint weight. name: "endpoint weight only", - localities: []xdsclient.Locality{ + localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-1"}, }, { - Endpoints: []xdsclient.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsclient.EndpointHealthStatusHealthy, Weight: 10}, + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, }, ID: internal.LocalityID{Zone: "test-zone-2"}, }, diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index 00814a6212b2..feb96cfa56b5 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var ( @@ -405,7 +406,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { defer func() { balancergroup.DefaultSubBalancerCloseTimeout = oldCacheTimeout }() // The first update is an empty update. - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) // Pick should fail with transient failure, and all priority removed error. if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { t.Fatal(err) @@ -425,7 +426,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { t.Fatal(err) } - xdsC.InvokeWatchEDSCallback("", xdsclient.EndpointsUpdate{}, nil) + xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) // Pick should fail with transient failure, and all priority removed error. if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index 2125bd2326f2..9d7db26ad14a 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -22,6 +22,7 @@ import ( "sync" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // resourceUpdate is a combined update from all the resources, in the order of @@ -186,7 +187,7 @@ func (rr *resourceResolver) generate() { return } switch uu := u.(type) { - case xdsclient.EndpointsUpdate: + case xdsresource.EndpointsUpdate: ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu}) case []string: ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu}) @@ -202,7 +203,7 @@ func (rr *resourceResolver) generate() { type edsDiscoveryMechanism struct { cancel func() - update xdsclient.EndpointsUpdate + update xdsresource.EndpointsUpdate updateReceived bool } @@ -224,7 +225,7 @@ func (er *edsDiscoveryMechanism) stop() { func newEDSResolver(nameToWatch string, xdsc xdsclient.XDSClient, topLevelResolver *resourceResolver) *edsDiscoveryMechanism { ret := &edsDiscoveryMechanism{} topLevelResolver.parent.logger.Infof("EDS watch started on %v", nameToWatch) - cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsclient.EndpointsUpdate, err error) { + cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsresource.EndpointsUpdate, err error) { topLevelResolver.mu.Lock() defer topLevelResolver.mu.Unlock() if err != nil { diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go index 2a365850cd78..432fdd9ceb65 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -28,7 +28,7 @@ import ( "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - xdsclient "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -36,7 +36,7 @@ const ( ) var ( - testEDSUpdates []xdsclient.EndpointsUpdate + testEDSUpdates []xdsresource.EndpointsUpdate ) func init() { @@ -54,7 +54,7 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { name string clusterName, edsName string wantName string - edsUpdate xdsclient.EndpointsUpdate + edsUpdate xdsresource.EndpointsUpdate want []priorityConfig }{ {name: "watch EDS", @@ -779,7 +779,7 @@ func (s) TestResourceResolverError(t *testing.T) { // Invoke callback with an error, should get an update. edsErr := fmt.Errorf("EDS error") - fakeClient.InvokeWatchEDSCallback(gotEDSName1, xdsclient.EndpointsUpdate{}, edsErr) + fakeClient.InvokeWatchEDSCallback(gotEDSName1, xdsresource.EndpointsUpdate{}, edsErr) select { case u := <-rr.updateChannel: if u.err != edsErr { diff --git a/xds/internal/balancer/clusterresolver/testutil_test.go b/xds/internal/balancer/clusterresolver/testutil_test.go index 48759603827a..999621a7b3e4 100644 --- a/xds/internal/balancer/clusterresolver/testutil_test.go +++ b/xds/internal/balancer/clusterresolver/testutil_test.go @@ -30,14 +30,14 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // parseEDSRespProtoForTesting parses EDS response, and panic if parsing fails. // // TODO: delete this. The EDS balancer tests should build an EndpointsUpdate // directly, instead of building and parsing a proto message. -func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsclient.EndpointsUpdate { +func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsresource.EndpointsUpdate { u, err := parseEDSRespProto(m) if err != nil { panic(err.Error()) @@ -46,8 +46,8 @@ func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsclient.Endpo } // parseEDSRespProto turns EDS response proto message to EndpointsUpdate. -func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdate, error) { - ret := xdsclient.EndpointsUpdate{} +func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsresource.EndpointsUpdate, error) { + ret := xdsresource.EndpointsUpdate{} for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) } @@ -55,7 +55,7 @@ func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdat for _, locality := range m.Endpoints { l := locality.GetLocality() if l == nil { - return xdsclient.EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) + return xdsresource.EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) } lid := internal.LocalityID{ Region: l.Region, @@ -64,7 +64,7 @@ func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdat } priority := locality.GetPriority() priorities[priority] = struct{}{} - ret.Localities = append(ret.Localities, xdsclient.Locality{ + ret.Localities = append(ret.Localities, xdsresource.Locality{ ID: lid, Endpoints: parseEndpoints(locality.GetLbEndpoints()), Weight: locality.GetLoadBalancingWeight().GetValue(), @@ -73,7 +73,7 @@ func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdat } for i := 0; i < len(priorities); i++ { if _, ok := priorities[uint32(i)]; !ok { - return xdsclient.EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) + return xdsresource.EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) } } return ret, nil @@ -83,7 +83,7 @@ func parseAddress(socketAddress *corepb.SocketAddress) string { return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) } -func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) xdsclient.OverloadDropConfig { +func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) xdsresource.OverloadDropConfig { percentage := dropPolicy.GetDropPercentage() var ( numerator = percentage.GetNumerator() @@ -97,18 +97,18 @@ func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload case typepb.FractionalPercent_MILLION: denominator = 1000000 } - return xdsclient.OverloadDropConfig{ + return xdsresource.OverloadDropConfig{ Category: dropPolicy.GetCategory(), Numerator: numerator, Denominator: denominator, } } -func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsclient.Endpoint { - endpoints := make([]xdsclient.Endpoint, 0, len(lbEndpoints)) +func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsresource.Endpoint { + endpoints := make([]xdsresource.Endpoint, 0, len(lbEndpoints)) for _, lbEndpoint := range lbEndpoints { - endpoints = append(endpoints, xdsclient.Endpoint{ - HealthStatus: xdsclient.EndpointHealthStatus(lbEndpoint.GetHealthStatus()), + endpoints = append(endpoints, xdsresource.Endpoint{ + HealthStatus: xdsresource.EndpointHealthStatus(lbEndpoint.GetHealthStatus()), Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), }) diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index ddf699f938b3..c418bc5d758c 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -40,7 +40,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -109,7 +109,7 @@ type virtualHost struct { // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig // retry policy present in virtual host - retryConfig *xdsclient.RetryConfig + retryConfig *xdsresource.RetryConfig } // routeCluster holds information about a cluster as referenced by a route. @@ -120,13 +120,13 @@ type routeCluster struct { } type route struct { - m *xdsclient.CompositeMatcher // converted from route matchers - clusters wrr.WRR // holds *routeCluster entries + m *xdsresource.CompositeMatcher // converted from route matchers + clusters wrr.WRR // holds *routeCluster entries maxStreamDuration time.Duration // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig - retryConfig *xdsclient.RetryConfig - hashPolicies []*xdsclient.HashPolicy + retryConfig *xdsresource.RetryConfig + hashPolicies []*xdsresource.HashPolicy } func (r route) String() string { @@ -138,7 +138,7 @@ type configSelector struct { virtualHost virtualHost routes []route clusters map[string]*clusterInfo - httpFilterConfig []xdsclient.HTTPFilter + httpFilterConfig []xdsresource.HTTPFilter } var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found") @@ -208,7 +208,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP return config, nil } -func retryConfigToPolicy(config *xdsclient.RetryConfig) *serviceconfig.RetryPolicy { +func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPolicy { return &serviceconfig.RetryPolicy{ MaxAttempts: int(config.NumRetries) + 1, InitialBackoff: config.RetryBackoff.BaseInterval, @@ -218,14 +218,14 @@ func retryConfigToPolicy(config *xdsclient.RetryConfig) *serviceconfig.RetryPoli } } -func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsclient.HashPolicy) uint64 { +func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 { var hash uint64 var generatedHash bool for _, policy := range hashPolicies { var policyHash uint64 var generatedPolicyHash bool switch policy.HashPolicyType { - case xdsclient.HashPolicyTypeHeader: + case xdsresource.HashPolicyTypeHeader: md, ok := metadata.FromOutgoingContext(rpcInfo.Context) if !ok { continue @@ -242,7 +242,7 @@ func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies [ policyHash = xxhash.Sum64String(joinedValues) generatedHash = true generatedPolicyHash = true - case xdsclient.HashPolicyTypeChannelID: + case xdsresource.HashPolicyTypeChannelID: // Hash the ClientConn pointer which logically uniquely // identifies the client. policyHash = xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)) @@ -372,7 +372,7 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro cs.routes[i].clusters = clusters var err error - cs.routes[i].m, err = xdsclient.RouteToMatcher(rt) + cs.routes[i].m, err = xdsresource.RouteToMatcher(rt) if err != nil { return nil, err } diff --git a/xds/internal/resolver/serviceconfig_test.go b/xds/internal/resolver/serviceconfig_test.go index a1a48944dc46..98d633a9e190 100644 --- a/xds/internal/resolver/serviceconfig_test.go +++ b/xds/internal/resolver/serviceconfig_test.go @@ -29,7 +29,7 @@ import ( iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/metadata" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) func (s) TestPruneActiveClusters(t *testing.T) { @@ -57,7 +57,7 @@ func (s) TestGenerateRequestHash(t *testing.T) { } tests := []struct { name string - hashPolicies []*xdsclient.HashPolicy + hashPolicies []*xdsresource.HashPolicy requestHashWant uint64 rpcInfo iresolver.RPCInfo }{ @@ -65,8 +65,8 @@ func (s) TestGenerateRequestHash(t *testing.T) { // hash policies that specify to hash headers. { name: "test-generate-request-hash-headers", - hashPolicies: []*xdsclient.HashPolicy{{ - HashPolicyType: xdsclient.HashPolicyTypeHeader, + hashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeHeader, HeaderName: ":path", Regex: func() *regexp.Regexp { return regexp.MustCompile("/products") }(), // Will replace /products with /new-products, to test find and replace functionality. RegexSubstitution: "/new-products", @@ -82,8 +82,8 @@ func (s) TestGenerateRequestHash(t *testing.T) { // ClientConn (the pointer). { name: "test-generate-request-hash-channel-id", - hashPolicies: []*xdsclient.HashPolicy{{ - HashPolicyType: xdsclient.HashPolicyTypeChannelID, + hashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeChannelID, }}, requestHashWant: xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)), rpcInfo: iresolver.RPCInfo{}, @@ -93,8 +93,8 @@ func (s) TestGenerateRequestHash(t *testing.T) { // strings in the headers. { name: "test-generate-request-hash-empty-string", - hashPolicies: []*xdsclient.HashPolicy{{ - HashPolicyType: xdsclient.HashPolicyTypeHeader, + hashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeHeader, HeaderName: ":path", Regex: func() *regexp.Regexp { return regexp.MustCompile("") }(), RegexSubstitution: "e", diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index da0bf95f3b9f..4801fc40e43d 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // serviceUpdate contains information received from the LDS/RDS responses which @@ -33,7 +34,7 @@ import ( // making a LDS to get the RouteConfig name. type serviceUpdate struct { // virtualHost contains routes and other configuration to route RPCs. - virtualHost *xdsclient.VirtualHost + virtualHost *xdsresource.VirtualHost // ldsConfig contains configuration that applies to all routes. ldsConfig ldsConfig } @@ -44,7 +45,7 @@ type ldsConfig struct { // maxStreamDuration is from the HTTP connection manager's // common_http_protocol_options field. maxStreamDuration time.Duration - httpFilterConfig []xdsclient.HTTPFilter + httpFilterConfig []xdsresource.HTTPFilter } // watchService uses LDS and RDS to discover information about the provided @@ -81,7 +82,7 @@ type serviceUpdateWatcher struct { rdsCancel func() } -func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, err error) { +func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, err error) { w.logger.Infof("received LDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() @@ -150,8 +151,8 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er w.rdsCancel = w.c.WatchRouteConfig(update.RouteConfigName, w.handleRDSResp) } -func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteConfigUpdate) { - matchVh := xdsclient.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) +func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsresource.RouteConfigUpdate) { + matchVh := xdsresource.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) if matchVh == nil { // No matching virtual host found. w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName)) @@ -162,7 +163,7 @@ func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteC w.serviceCb(w.lastUpdate, nil) } -func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate, err error) { +func (w *serviceUpdateWatcher) handleRDSResp(update xdsresource.RouteConfigUpdate, err error) { w.logger.Infof("received RDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() diff --git a/xds/internal/resolver/watch_service_test.go b/xds/internal/resolver/watch_service_test.go index 1bf65c4d4506..1a4b45bc8ad2 100644 --- a/xds/internal/resolver/watch_service_test.go +++ b/xds/internal/resolver/watch_service_test.go @@ -28,38 +28,38 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/proto" ) func (s) TestFindBestMatchingVirtualHost(t *testing.T) { var ( - oneExactMatch = &xdsclient.VirtualHost{ + oneExactMatch = &xdsresource.VirtualHost{ Domains: []string{"foo.bar.com"}, } - oneSuffixMatch = &xdsclient.VirtualHost{ + oneSuffixMatch = &xdsresource.VirtualHost{ Domains: []string{"*.bar.com"}, } - onePrefixMatch = &xdsclient.VirtualHost{ + onePrefixMatch = &xdsresource.VirtualHost{ Domains: []string{"foo.bar.*"}, } - oneUniversalMatch = &xdsclient.VirtualHost{ + oneUniversalMatch = &xdsresource.VirtualHost{ Domains: []string{"*"}, } - longExactMatch = &xdsclient.VirtualHost{ + longExactMatch = &xdsresource.VirtualHost{ Domains: []string{"v2.foo.bar.com"}, } - multipleMatch = &xdsclient.VirtualHost{ + multipleMatch = &xdsresource.VirtualHost{ Domains: []string{"pi.foo.bar.com", "314.*", "*.159"}, } - vhs = []*xdsclient.VirtualHost{oneExactMatch, oneSuffixMatch, onePrefixMatch, oneUniversalMatch, longExactMatch, multipleMatch} + vhs = []*xdsresource.VirtualHost{oneExactMatch, oneSuffixMatch, onePrefixMatch, oneUniversalMatch, longExactMatch, multipleMatch} ) tests := []struct { name string host string - vHosts []*xdsclient.VirtualHost - want *xdsclient.VirtualHost + vHosts []*xdsresource.VirtualHost + want *xdsresource.VirtualHost }{ {name: "exact-match", host: "foo.bar.com", vHosts: vhs, want: oneExactMatch}, {name: "suffix-match", host: "123.bar.com", vHosts: vhs, want: oneSuffixMatch}, @@ -75,7 +75,7 @@ func (s) TestFindBestMatchingVirtualHost(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := xdsclient.FindBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { + if got := xdsresource.FindBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { t.Errorf("findBestMatchingxdsclient.VirtualHost() = %v, want %v", got, tt.want) } }) @@ -117,15 +117,15 @@ func (s) TestServiceWatch(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -133,22 +133,22 @@ func (s) TestServiceWatch(t *testing.T) { t.Fatal(err) } - wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, - Routes: []*xdsclient.Route{{ + wantUpdate2 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, + Routes: []*xdsresource.Route{{ Path: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}, }}, }} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Path: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Path: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, { // Another virtual host, with different domains. Domains: []string{"random"}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -171,15 +171,15 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -188,19 +188,19 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) { } // Another LDS update with a different RDS_name. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr + "2"}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr + "2"}, nil) if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { t.Fatalf("wait for cancel route watch failed: %v, want nil", err) } waitForWatchRouteConfig(ctx, t, xdsC, routeStr+"2") // RDS update for the new name. - wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback(routeStr+"2", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate2 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback(routeStr+"2", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}, }, }, }, nil) @@ -223,19 +223,19 @@ func (s) TestServiceWatchLDSUpdateMaxStreamDuration(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}, ldsConfig: ldsConfig{maxStreamDuration: time.Second}, } - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -244,22 +244,22 @@ func (s) TestServiceWatchLDSUpdateMaxStreamDuration(t *testing.T) { } // Another LDS update with the same RDS_name but different MaxStreamDuration (zero in this case). - wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + wantUpdate2 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate2); err != nil { t.Fatal(err) } // RDS update. - wantUpdate3 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{ + wantUpdate3 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}, }} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}, }, }, }, nil) @@ -282,18 +282,18 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -303,7 +303,7 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) { } // Another LDS update with a the same RDS_name. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if _, err := xdsC.WaitForCancelRouteConfigWatch(sCtx); err != context.DeadlineExceeded { @@ -327,14 +327,14 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { // First LDS update is LDS with RDS name to watch. waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -343,15 +343,15 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { } // Switch LDS resp to a LDS with inline RDS resource - wantVirtualHosts2 := &xdsclient.VirtualHost{Domains: []string{"target"}, - Routes: []*xdsclient.Route{{ + wantVirtualHosts2 := &xdsresource.VirtualHost{Domains: []string{"target"}, + Routes: []*xdsresource.Route{{ Path: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}, }}, } wantUpdate2 := serviceUpdate{virtualHost: wantVirtualHosts2} - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{InlineRouteConfig: &xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{wantVirtualHosts2}, + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{wantVirtualHosts2}, }}, nil) // This inline RDS resource should cause the RDS watch to be canceled. if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { @@ -362,13 +362,13 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { } // Switch LDS update back to LDS with RDS name to watch. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -377,8 +377,8 @@ func (s) TestServiceWatchInlineRDS(t *testing.T) { } // Switch LDS resp to a LDS with inline RDS resource again. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{InlineRouteConfig: &xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{wantVirtualHosts2}, + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{wantVirtualHosts2}, }}, nil) // This inline RDS resource should cause the RDS watch to be canceled. if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 90e6c1d4db05..c05a7422904a 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -51,6 +51,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -63,8 +64,8 @@ const ( var target = resolver.Target{Endpoint: targetStr} -var routerFilter = xdsclient.HTTPFilter{Name: "rtr", Filter: httpfilter.Get(router.TypeURL)} -var routerFilterList = []xdsclient.HTTPFilter{routerFilter} +var routerFilter = xdsresource.HTTPFilter{Name: "rtr", Filter: httpfilter.Get(router.TypeURL)} +var routerFilterList = []xdsresource.HTTPFilter{routerFilter} type s struct { grpctest.Tester @@ -262,17 +263,17 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Call the watchAPI callback after closing the resolver, and make sure no // update is triggerred on the ClientConn. xdsR.Close() - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -309,13 +310,13 @@ func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) @@ -335,17 +336,17 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer replaceRandNumGenerator(0)() for _, tt := range []struct { - routes []*xdsclient.Route + routes []*xdsresource.Route wantJSON string wantClusters map[string]bool }{ { - routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, wantJSON: `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ @@ -357,7 +358,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantClusters: map[string]bool{"test-cluster-1": true}, }, { - routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ + routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ "cluster_1": {Weight: 75}, "cluster_2": {Weight: 25}, }}}, @@ -381,7 +382,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantClusters: map[string]bool{"cluster_1": true, "cluster_2": true}, }, { - routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ + routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ "cluster_1": {Weight: 75}, "cluster_2": {Weight: 25}, }}}, @@ -404,8 +405,8 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { } { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, Routes: tt.routes, @@ -474,22 +475,22 @@ func (s) TestXDSResolverRequestHash(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke watchAPI callback with a good service update (with hash policies // specified) and wait for UpdateState method to be called on ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{ + Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{ + WeightedClusters: map[string]xdsresource.WeightedCluster{ "cluster_1": {Weight: 75}, "cluster_2": {Weight: 25}, }, - HashPolicies: []*xdsclient.HashPolicy{{ - HashPolicyType: xdsclient.HashPolicyTypeHeader, + HashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeHeader, HeaderName: ":path", }}, }}, @@ -534,16 +535,16 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, }, }, }, nil) @@ -570,7 +571,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { // Delete the resource suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if _, err = tcc.stateCh.Receive(ctx); err != nil { t.Fatalf("Error waiting for UpdateState to be called: %v", err) @@ -594,16 +595,16 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, }, }, }, nil) @@ -649,7 +650,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { // Delete the resource. The channel should receive a service config with the // original cluster but with an erroring config selector. suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotState, err = tcc.stateCh.Receive(ctx); err != nil { t.Fatalf("Error waiting for UpdateState to be called: %v", err) @@ -702,7 +703,7 @@ func (s) TestXDSResolverWRR(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) @@ -710,11 +711,11 @@ func (s) TestXDSResolverWRR(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ "A": {Weight: 5}, "B": {Weight: 10}, }}}, @@ -762,7 +763,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) @@ -770,21 +771,21 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{ + Routes: []*xdsresource.Route{{ Prefix: newStringP("/foo"), - WeightedClusters: map[string]xdsclient.WeightedCluster{"A": {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{"A": {Weight: 1}}, MaxStreamDuration: newDurationP(5 * time.Second), }, { Prefix: newStringP("/bar"), - WeightedClusters: map[string]xdsclient.WeightedCluster{"B": {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{"B": {Weight: 1}}, MaxStreamDuration: newDurationP(0), }, { Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{"C": {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{"C": {Weight: 1}}, }}, }, }, @@ -855,16 +856,16 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, }, }, }, nil) @@ -910,21 +911,21 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { // Perform TWO updates to ensure the old config selector does not hold a // reference to test-cluster-1. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"NEW": {Weight: 1}}}}, }, }, }, nil) tcc.stateCh.Receive(ctx) // Ignore the first update. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"NEW": {Weight: 1}}}}, }, }, }, nil) @@ -959,11 +960,11 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { // test-cluster-1. res.OnCommitted() - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"NEW": {Weight: 1}}}}, }, }, }, nil) @@ -1004,13 +1005,13 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) @@ -1018,11 +1019,11 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -1038,7 +1039,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr2 := errors.New("bad serviceupdate 2") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr2) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr2) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr2 { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr2) } @@ -1058,13 +1059,13 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{}, suErr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != context.DeadlineExceeded { t.Fatalf("ClientConn.ReportError() received %v, %v, want channel recv timeout", gotErrVal, gotErr) @@ -1104,12 +1105,12 @@ func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer replaceRandNumGenerator(0)() // Send a new LDS update, with the same fields. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer cancel() // Should NOT trigger a state update. @@ -1119,7 +1120,7 @@ func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { } // Send a new LDS update, with the same RDS name, but different fields. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer cancel() gotState, err = tcc.stateCh.Receive(ctx) @@ -1187,7 +1188,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { var path []string testCases := []struct { name string - ldsFilters []xdsclient.HTTPFilter + ldsFilters []xdsresource.HTTPFilter vhOverrides map[string]httpfilter.FilterConfig rtOverrides map[string]httpfilter.FilterConfig clOverrides map[string]httpfilter.FilterConfig @@ -1197,7 +1198,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }{ { name: "no router filter", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1"}}, }, rpcRes: map[string][][]string{ @@ -1209,7 +1210,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }, { name: "ignored after router filter", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1"}}, routerFilter, {Name: "foo2", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo2"}}, @@ -1227,7 +1228,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }, { name: "NewStream error; ensure earlier interceptor Done is still called", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1"}}, {Name: "bar", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "bar1", newStreamErr: errors.New("bar newstream err")}}, routerFilter, @@ -1244,7 +1245,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }, { name: "all overrides", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1", newStreamErr: errors.New("this is overridden to nil")}}, {Name: "bar", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "bar1"}}, routerFilter, @@ -1280,7 +1281,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: routeStr, HTTPFilters: tc.ldsFilters, }, nil) @@ -1293,17 +1294,17 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{ - Prefix: newStringP("1"), WeightedClusters: map[string]xdsclient.WeightedCluster{ + Routes: []*xdsresource.Route{{ + Prefix: newStringP("1"), WeightedClusters: map[string]xdsresource.WeightedCluster{ "A": {Weight: 1}, "B": {Weight: 1}, }, }, { - Prefix: newStringP("2"), WeightedClusters: map[string]xdsclient.WeightedCluster{ + Prefix: newStringP("2"), WeightedClusters: map[string]xdsresource.WeightedCluster{ "A": {Weight: 1}, "B": {Weight: 1, HTTPFilterConfigOverride: tc.clOverrides}, }, @@ -1391,13 +1392,13 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { func replaceRandNumGenerator(start int64) func() { nextInt := start - xdsclient.RandInt63n = func(int64) (ret int64) { + xdsresource.RandInt63n = func(int64) (ret int64) { ret = nextInt nextInt++ return } return func() { - xdsclient.RandInt63n = grpcrand.Int63n + xdsresource.RandInt63n = grpcrand.Int63n } } diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index dd0374dc88e4..f1ee06e7b553 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -27,7 +27,7 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" xdsinternal "google.golang.org/grpc/internal/credentials/xds" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // connWrapper is a thin wrapper around a net.Conn returned by Accept(). It @@ -43,7 +43,7 @@ type connWrapper struct { net.Conn // The specific filter chain picked for handling this connection. - filterChain *xdsclient.FilterChain + filterChain *xdsresource.FilterChain // A reference fo the listenerWrapper on which this connection was accepted. parent *listenerWrapper @@ -61,11 +61,11 @@ type connWrapper struct { // The virtual hosts with matchable routes and instantiated HTTP Filters per // route. - virtualHosts []xdsclient.VirtualHostWithInterceptors + virtualHosts []xdsresource.VirtualHostWithInterceptors } // VirtualHosts returns the virtual hosts to be used for server side routing. -func (c *connWrapper) VirtualHosts() []xdsclient.VirtualHostWithInterceptors { +func (c *connWrapper) VirtualHosts() []xdsresource.VirtualHostWithInterceptors { return c.virtualHosts } diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 99c9a7532307..045baf00f8c4 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var ( @@ -73,8 +74,8 @@ func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { // XDSClient wraps the methods on the XDSClient which are required by // the listenerWrapper. type XDSClient interface { - WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() - WatchRouteConfig(string, func(xdsclient.RouteConfigUpdate, error)) func() + WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() BootstrapConfig() *bootstrap.Config } @@ -136,7 +137,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru } type ldsUpdateWithError struct { - update xdsclient.ListenerUpdate + update xdsresource.ListenerUpdate err error } @@ -182,7 +183,7 @@ type listenerWrapper struct { // Current serving mode. mode connectivity.ServingMode // Filter chains received as part of the last good update. - filterChains *xdsclient.FilterChainManager + filterChains *xdsresource.FilterChainManager // rdsHandler is used for any dynamic RDS resources specified in a LDS // update. @@ -250,7 +251,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { conn.Close() continue } - fc, err := l.filterChains.Lookup(xdsclient.FilterChainLookupParams{ + fc, err := l.filterChains.Lookup(xdsresource.FilterChainLookupParams{ IsUnspecifiedListener: l.isUnspecifiedAddr, DestAddr: destAddr.IP, SourceAddr: srcAddr.IP, @@ -276,12 +277,12 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { if !env.RBACSupport { return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil } - var rc xdsclient.RouteConfigUpdate + var rc xdsresource.RouteConfigUpdate if fc.InlineRouteConfig != nil { rc = *fc.InlineRouteConfig } else { rcPtr := atomic.LoadPointer(&l.rdsUpdates) - rcuPtr := (*map[string]xdsclient.RouteConfigUpdate)(rcPtr) + rcuPtr := (*map[string]xdsresource.RouteConfigUpdate)(rcPtr) // This shouldn't happen, but this error protects against a panic. if rcuPtr == nil { return nil, errors.New("route configuration pointer is nil") @@ -340,7 +341,7 @@ func (l *listenerWrapper) run() { // handleLDSUpdate is the callback which handles LDS Updates. It writes the // received update to the update channel, which is picked up by the run // goroutine. -func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, err error) { +func (l *listenerWrapper) handleListenerUpdate(update xdsresource.ListenerUpdate, err error) { if l.closed.HasFired() { l.logger.Warningf("Resource %q received update: %v with error: %v, after listener was closed", l.name, update, err) return @@ -429,7 +430,7 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { } } -func (l *listenerWrapper) switchMode(fcs *xdsclient.FilterChainManager, newMode connectivity.ServingMode, err error) { +func (l *listenerWrapper) switchMode(fcs *xdsresource.FilterChainManager, newMode connectivity.ServingMode, err error) { l.mu.Lock() defer l.mu.Unlock() diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 383729363665..1dba999008a5 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -38,7 +38,7 @@ import ( _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -272,7 +272,7 @@ func (s) TestNewListenerWrapper(t *testing.T) { } // Push an error to the listener update handler. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, errors.New("bad listener update")) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, errors.New("bad listener update")) timer := time.NewTimer(defaultTestShortTimeout) select { case <-timer.C: @@ -281,15 +281,15 @@ func (s) TestNewListenerWrapper(t *testing.T) { t.Fatalf("ready channel written to after receipt of a bad Listener update") } - fcm, err := xdsclient.NewFilterChainManager(listenerWithFilterChains) + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } // Push an update whose address does not match the address to which our // listener is bound, and verify that the ready channel is not written to. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: "10.0.0.1", Port: "50051", FilterChains: fcm, @@ -306,8 +306,8 @@ func (s) TestNewListenerWrapper(t *testing.T) { // Since there are no dynamic RDS updates needed to be received, the // ListenerWrapper does not have to wait for anything else before telling // that it is ready. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: fakeListenerHost, Port: strconv.Itoa(fakeListenerPort), FilterChains: fcm, @@ -345,7 +345,7 @@ func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { if name != testListenerResourceName { t.Fatalf("listenerWrapper registered a lds watch on %s, want %s", name, testListenerResourceName) } - fcm, err := xdsclient.NewFilterChainManager(listenerWithRouteConfiguration) + fcm, err := xdsresource.NewFilterChainManager(listenerWithRouteConfiguration, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } @@ -354,8 +354,8 @@ func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { // RDS Resources that need to be received. This should ping rds handler // about which rds names to start, which will eventually start a watch on // xds client for rds name "route-1". - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: fakeListenerHost, Port: strconv.Itoa(fakeListenerPort), FilterChains: fcm, @@ -383,7 +383,7 @@ func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { // should trigger the listener wrapper to fire GoodUpdate, as it has // received both it's LDS Configuration and also RDS Configuration, // specified in LDS Configuration. - xdsC.InvokeWatchRouteConfigCallback("route-1", xdsclient.RouteConfigUpdate{}, nil) + xdsC.InvokeWatchRouteConfigCallback("route-1", xdsresource.RouteConfigUpdate{}, nil) // All of the xDS updates have completed, so can expect to send a ping on // good update channel. @@ -408,12 +408,12 @@ func (s) TestListenerWrapper_Accept(t *testing.T) { // Push a good update with a filter chain which accepts local connections on // 192.168.0.0/16 subnet and port 80. - fcm, err := xdsclient.NewFilterChainManager(listenerWithFilterChains) + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: fakeListenerHost, Port: strconv.Itoa(fakeListenerPort), FilterChains: fcm, diff --git a/xds/internal/server/rds_handler.go b/xds/internal/server/rds_handler.go index cc676c4ca05f..722748cbd526 100644 --- a/xds/internal/server/rds_handler.go +++ b/xds/internal/server/rds_handler.go @@ -21,13 +21,13 @@ package server import ( "sync" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // rdsHandlerUpdate wraps the full RouteConfigUpdate that are dynamically // queried for a given server side listener. type rdsHandlerUpdate struct { - updates map[string]xdsclient.RouteConfigUpdate + updates map[string]xdsresource.RouteConfigUpdate err error } @@ -37,7 +37,7 @@ type rdsHandler struct { xdsC XDSClient mu sync.Mutex - updates map[string]xdsclient.RouteConfigUpdate + updates map[string]xdsresource.RouteConfigUpdate cancels map[string]func() // For a rdsHandler update, the only update wrapped listener cares about is @@ -53,7 +53,7 @@ func newRDSHandler(xdsC XDSClient, ch chan rdsHandlerUpdate) *rdsHandler { return &rdsHandler{ xdsC: xdsC, updateChannel: ch, - updates: make(map[string]xdsclient.RouteConfigUpdate), + updates: make(map[string]xdsresource.RouteConfigUpdate), cancels: make(map[string]func()), } } @@ -70,7 +70,7 @@ func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool) for routeName := range routeNamesToWatch { if _, ok := rh.cancels[routeName]; !ok { func(routeName string) { - rh.cancels[routeName] = rh.xdsC.WatchRouteConfig(routeName, func(update xdsclient.RouteConfigUpdate, err error) { + rh.cancels[routeName] = rh.xdsC.WatchRouteConfig(routeName, func(update xdsresource.RouteConfigUpdate, err error) { rh.handleRouteUpdate(routeName, update, err) }) }(routeName) @@ -97,7 +97,7 @@ func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool) // handleRouteUpdate persists the route config for a given route name, and also // sends an update to the Listener Wrapper on an error received or if the rds // handler has a full collection of updates. -func (rh *rdsHandler) handleRouteUpdate(routeName string, update xdsclient.RouteConfigUpdate, err error) { +func (rh *rdsHandler) handleRouteUpdate(routeName string, update xdsresource.RouteConfigUpdate, err error) { if err != nil { drainAndPush(rh.updateChannel, rdsHandlerUpdate{err: err}) return diff --git a/xds/internal/server/rds_handler_test.go b/xds/internal/server/rds_handler_test.go index d1daffd940c0..fc622851cfa2 100644 --- a/xds/internal/server/rds_handler_test.go +++ b/xds/internal/server/rds_handler_test.go @@ -26,7 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -86,12 +86,12 @@ func (s) TestSuccessCaseOneRDSWatch(t *testing.T) { if gotRoute != route1 { t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route1) } - rdsUpdate := xdsclient.RouteConfigUpdate{} + rdsUpdate := xdsresource.RouteConfigUpdate{} // Invoke callback with the xds client with a certain route update. Due to // this route update updating every route name that rds handler handles, // this should write to the update channel to send to the listener. fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil) - rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: rdsUpdate} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate} select { case rhu := <-ch: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -147,7 +147,7 @@ func (s) TestSuccessCaseTwoUpdates(t *testing.T) { // Invoke the callback with an update for route 1. This shouldn't cause the // handler to write an update, as it has not received RouteConfigurations // for every RouteName. - rdsUpdate1 := xdsclient.RouteConfigUpdate{} + rdsUpdate1 := xdsresource.RouteConfigUpdate{} fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate1, nil) // The RDS Handler should not send an update. @@ -162,12 +162,12 @@ func (s) TestSuccessCaseTwoUpdates(t *testing.T) { // Invoke the callback with an update for route 2. This should cause the // handler to write an update, as it has received RouteConfigurations for // every RouteName. - rdsUpdate2 := xdsclient.RouteConfigUpdate{} + rdsUpdate2 := xdsresource.RouteConfigUpdate{} fakeClient.InvokeWatchRouteConfigCallback(route2, rdsUpdate2, nil) // The RDS Handler should then update the listener wrapper with an update // with two route configurations, as both route names the RDS Handler handles // have received an update. - rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: rdsUpdate1, route2: rdsUpdate2} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate1, route2: rdsUpdate2} select { case rhu := <-ch: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -213,12 +213,12 @@ func (s) TestSuccessCaseDeletedRoute(t *testing.T) { t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route2) } - rdsUpdate := xdsclient.RouteConfigUpdate{} + rdsUpdate := xdsresource.RouteConfigUpdate{} // Invoke callback with the xds client with a certain route update. Due to // this route update updating every route name that rds handler handles, // this should write to the update channel to send to the listener. fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil) - rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: rdsUpdate} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate} select { case rhu := <-ch: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -281,7 +281,7 @@ func (s) TestSuccessCaseTwoUpdatesAddAndDeleteRoute(t *testing.T) { // Invoke the callback with an update for route 2. This shouldn't cause the // handler to write an update, as it has not received RouteConfigurations // for every RouteName. - rdsUpdate2 := xdsclient.RouteConfigUpdate{} + rdsUpdate2 := xdsresource.RouteConfigUpdate{} fakeClient.InvokeWatchRouteConfigCallback(route2, rdsUpdate2, nil) // The RDS Handler should not send an update. @@ -296,12 +296,12 @@ func (s) TestSuccessCaseTwoUpdatesAddAndDeleteRoute(t *testing.T) { // Invoke the callback with an update for route 3. This should cause the // handler to write an update, as it has received RouteConfigurations for // every RouteName. - rdsUpdate3 := xdsclient.RouteConfigUpdate{} + rdsUpdate3 := xdsresource.RouteConfigUpdate{} fakeClient.InvokeWatchRouteConfigCallback(route3, rdsUpdate3, nil) // The RDS Handler should then update the listener wrapper with an update // with two route configurations, as both route names the RDS Handler handles // have received an update. - rhuWant := map[string]xdsclient.RouteConfigUpdate{route2: rdsUpdate2, route3: rdsUpdate3} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route2: rdsUpdate2, route3: rdsUpdate3} select { case rhu := <-rh.updateChannel: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -335,8 +335,8 @@ func (s) TestSuccessCaseSecondUpdateMakesRouteFull(t *testing.T) { // Invoke the callbacks for two of the three watches. Since RDS is not full, // this shouldn't trigger rds handler to write an update to update buffer. - fakeClient.InvokeWatchRouteConfigCallback(route1, xdsclient.RouteConfigUpdate{}, nil) - fakeClient.InvokeWatchRouteConfigCallback(route2, xdsclient.RouteConfigUpdate{}, nil) + fakeClient.InvokeWatchRouteConfigCallback(route1, xdsresource.RouteConfigUpdate{}, nil) + fakeClient.InvokeWatchRouteConfigCallback(route2, xdsresource.RouteConfigUpdate{}, nil) // The RDS Handler should not send an update. sCtx, sCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) @@ -360,7 +360,7 @@ func (s) TestSuccessCaseSecondUpdateMakesRouteFull(t *testing.T) { if routeNameDeleted != route3 { t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) } - rhuWant := map[string]xdsclient.RouteConfigUpdate{route1: {}, route2: {}} + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: {}, route2: {}} select { case rhu := <-ch: if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { @@ -389,7 +389,7 @@ func (s) TestErrorReceived(t *testing.T) { } rdsErr := errors.New("some error") - fakeClient.InvokeWatchRouteConfigCallback(route1, xdsclient.RouteConfigUpdate{}, rdsErr) + fakeClient.InvokeWatchRouteConfigCallback(route1, xdsresource.RouteConfigUpdate{}, rdsErr) select { case rhu := <-ch: if rhu.err.Error() != "some error" { diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index b582fd9bee91..132fa413a7e3 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // Client is a fake implementation of an xds client. It exposes a bunch of @@ -51,16 +52,16 @@ type Client struct { loadStore *load.Store bootstrapCfg *bootstrap.Config - ldsCb func(xdsclient.ListenerUpdate, error) - rdsCbs map[string]func(xdsclient.RouteConfigUpdate, error) - cdsCbs map[string]func(xdsclient.ClusterUpdate, error) - edsCbs map[string]func(xdsclient.EndpointsUpdate, error) + ldsCb func(xdsresource.ListenerUpdate, error) + rdsCbs map[string]func(xdsresource.RouteConfigUpdate, error) + cdsCbs map[string]func(xdsresource.ClusterUpdate, error) + edsCbs map[string]func(xdsresource.EndpointsUpdate, error) Closed *grpcsync.Event // fired when Close is called. } // WatchListener registers a LDS watch. -func (xdsC *Client) WatchListener(serviceName string, callback func(xdsclient.ListenerUpdate, error)) func() { +func (xdsC *Client) WatchListener(serviceName string, callback func(xdsresource.ListenerUpdate, error)) func() { xdsC.ldsCb = callback xdsC.ldsWatchCh.Send(serviceName) return func() { @@ -82,7 +83,7 @@ func (xdsC *Client) WaitForWatchListener(ctx context.Context) (string, error) { // // Not thread safe with WatchListener. Only call this after // WaitForWatchListener. -func (xdsC *Client) InvokeWatchListenerCallback(update xdsclient.ListenerUpdate, err error) { +func (xdsC *Client) InvokeWatchListenerCallback(update xdsresource.ListenerUpdate, err error) { xdsC.ldsCb(update, err) } @@ -94,7 +95,7 @@ func (xdsC *Client) WaitForCancelListenerWatch(ctx context.Context) error { } // WatchRouteConfig registers a RDS watch. -func (xdsC *Client) WatchRouteConfig(routeName string, callback func(xdsclient.RouteConfigUpdate, error)) func() { +func (xdsC *Client) WatchRouteConfig(routeName string, callback func(xdsresource.RouteConfigUpdate, error)) func() { xdsC.rdsCbs[routeName] = callback xdsC.rdsWatchCh.Send(routeName) return func() { @@ -116,7 +117,7 @@ func (xdsC *Client) WaitForWatchRouteConfig(ctx context.Context) (string, error) // // Not thread safe with WatchRouteConfig. Only call this after // WaitForWatchRouteConfig. -func (xdsC *Client) InvokeWatchRouteConfigCallback(name string, update xdsclient.RouteConfigUpdate, err error) { +func (xdsC *Client) InvokeWatchRouteConfigCallback(name string, update xdsresource.RouteConfigUpdate, err error) { if len(xdsC.rdsCbs) != 1 { xdsC.rdsCbs[name](update, err) return @@ -141,7 +142,7 @@ func (xdsC *Client) WaitForCancelRouteConfigWatch(ctx context.Context) (string, } // WatchCluster registers a CDS watch. -func (xdsC *Client) WatchCluster(clusterName string, callback func(xdsclient.ClusterUpdate, error)) func() { +func (xdsC *Client) WatchCluster(clusterName string, callback func(xdsresource.ClusterUpdate, error)) func() { // Due to the tree like structure of aggregate clusters, there can be multiple callbacks persisted for each cluster // node. However, the client doesn't care about the parent child relationship between the nodes, only that it invokes // the right callback for a particular cluster. @@ -166,7 +167,7 @@ func (xdsC *Client) WaitForWatchCluster(ctx context.Context) (string, error) { // // Not thread safe with WatchCluster. Only call this after // WaitForWatchCluster. -func (xdsC *Client) InvokeWatchClusterCallback(update xdsclient.ClusterUpdate, err error) { +func (xdsC *Client) InvokeWatchClusterCallback(update xdsresource.ClusterUpdate, err error) { // Keeps functionality with previous usage of this, if single callback call that callback. if len(xdsC.cdsCbs) == 1 { var clusterName string @@ -192,7 +193,7 @@ func (xdsC *Client) WaitForCancelClusterWatch(ctx context.Context) (string, erro } // WatchEndpoints registers an EDS watch for provided clusterName. -func (xdsC *Client) WatchEndpoints(clusterName string, callback func(xdsclient.EndpointsUpdate, error)) (cancel func()) { +func (xdsC *Client) WatchEndpoints(clusterName string, callback func(xdsresource.EndpointsUpdate, error)) (cancel func()) { xdsC.edsCbs[clusterName] = callback xdsC.edsWatchCh.Send(clusterName) return func() { @@ -214,7 +215,7 @@ func (xdsC *Client) WaitForWatchEDS(ctx context.Context) (string, error) { // // Not thread safe with WatchEndpoints. Only call this after // WaitForWatchEDS. -func (xdsC *Client) InvokeWatchEDSCallback(name string, update xdsclient.EndpointsUpdate, err error) { +func (xdsC *Client) InvokeWatchEDSCallback(name string, update xdsresource.EndpointsUpdate, err error) { if len(xdsC.edsCbs) != 1 { // This may panic if name isn't found. But it's fine for tests. xdsC.edsCbs[name](update, err) @@ -316,9 +317,9 @@ func NewClientWithName(name string) *Client { loadReportCh: testutils.NewChannel(), lrsCancelCh: testutils.NewChannel(), loadStore: load.NewStore(), - rdsCbs: make(map[string]func(xdsclient.RouteConfigUpdate, error)), - cdsCbs: make(map[string]func(xdsclient.ClusterUpdate, error)), - edsCbs: make(map[string]func(xdsclient.EndpointsUpdate, error)), + rdsCbs: make(map[string]func(xdsresource.RouteConfigUpdate, error)), + cdsCbs: make(map[string]func(xdsresource.ClusterUpdate, error)), + edsCbs: make(map[string]func(xdsresource.EndpointsUpdate, error)), Closed: grpcsync.NewEvent(), } } diff --git a/xds/internal/xdsclient/attributes.go b/xds/internal/xdsclient/attributes.go index 467c205a2559..52507bd83699 100644 --- a/xds/internal/xdsclient/attributes.go +++ b/xds/internal/xdsclient/attributes.go @@ -21,6 +21,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) type clientKeyType string @@ -31,10 +32,10 @@ const clientKey = clientKeyType("grpc.xds.internal.client.Client") // (collectively termed as xDS) on a remote management server, to discover // various dynamic resources. type XDSClient interface { - WatchListener(string, func(ListenerUpdate, error)) func() - WatchRouteConfig(string, func(RouteConfigUpdate, error)) func() - WatchCluster(string, func(ClusterUpdate, error)) func() - WatchEndpoints(clusterName string, edsCb func(EndpointsUpdate, error)) (cancel func()) + WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() + WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() + WatchEndpoints(clusterName string, edsCb func(xdsresource.EndpointsUpdate, error)) (cancel func()) ReportLoad(server string) (*load.Store, func()) DumpLDS() (string, map[string]UpdateWithMD) diff --git a/xds/internal/xdsclient/callback.go b/xds/internal/xdsclient/callback.go index 0c2665e84c0e..6643d1d4e824 100644 --- a/xds/internal/xdsclient/callback.go +++ b/xds/internal/xdsclient/callback.go @@ -20,6 +20,7 @@ package xdsclient import ( "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/proto" ) @@ -52,19 +53,19 @@ func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) { switch wiu.wi.rType { case ListenerResource: if s, ok := c.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.ldsCallback(wiu.update.(ListenerUpdate), wiu.err) } + ccb = func() { wiu.wi.ldsCallback(wiu.update.(xdsresource.ListenerUpdate), wiu.err) } } case RouteConfigResource: if s, ok := c.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.rdsCallback(wiu.update.(RouteConfigUpdate), wiu.err) } + ccb = func() { wiu.wi.rdsCallback(wiu.update.(xdsresource.RouteConfigUpdate), wiu.err) } } case ClusterResource: if s, ok := c.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.cdsCallback(wiu.update.(ClusterUpdate), wiu.err) } + ccb = func() { wiu.wi.cdsCallback(wiu.update.(xdsresource.ClusterUpdate), wiu.err) } } case EndpointsResource: if s, ok := c.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.edsCallback(wiu.update.(EndpointsUpdate), wiu.err) } + ccb = func() { wiu.wi.edsCallback(wiu.update.(xdsresource.EndpointsUpdate), wiu.err) } } } c.mu.Unlock() @@ -79,7 +80,7 @@ func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) { // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, metadata UpdateMetadata) { +func (c *clientImpl) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() @@ -116,7 +117,7 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, met // NACK metadata because some other resources in the same response // are invalid. mdCopy := metadata - mdCopy.Status = ServiceStatusACKed + mdCopy.Status = xdsresource.ServiceStatusACKed mdCopy.ErrState = nil if metadata.ErrState != nil { mdCopy.Version = metadata.ErrState.Version @@ -132,7 +133,7 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, met // the resource from cache, and also send an resource not found // error to indicate resource removed. delete(c.ldsCache, name) - c.ldsMD[name] = UpdateMetadata{Status: ServiceStatusNotExist} + c.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for wi := range c.ldsWatchers[name] { wi.resourceNotFound() } @@ -148,7 +149,7 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, met // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTuple, metadata UpdateMetadata) { +func (c *clientImpl) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() @@ -186,7 +187,7 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTupl // NACK metadata because some other resources in the same response // are invalid. mdCopy := metadata - mdCopy.Status = ServiceStatusACKed + mdCopy.Status = xdsresource.ServiceStatusACKed mdCopy.ErrState = nil if metadata.ErrState != nil { mdCopy.Version = metadata.ErrState.Version @@ -201,7 +202,7 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTupl // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metadata UpdateMetadata) { +func (c *clientImpl) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() @@ -240,7 +241,7 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metad // NACK metadata because some other resources in the same response // are invalid. mdCopy := metadata - mdCopy.Status = ServiceStatusACKed + mdCopy.Status = xdsresource.ServiceStatusACKed mdCopy.ErrState = nil if metadata.ErrState != nil { mdCopy.Version = metadata.ErrState.Version @@ -256,7 +257,7 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metad // from cache, and also send an resource not found error to indicate // resource removed. delete(c.cdsCache, name) - c.ldsMD[name] = UpdateMetadata{Status: ServiceStatusNotExist} + c.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for wi := range c.cdsWatchers[name] { wi.resourceNotFound() } @@ -272,7 +273,7 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metad // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdateErrTuple, metadata UpdateMetadata) { +func (c *clientImpl) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() @@ -311,7 +312,7 @@ func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdateErrTuple, me // NACK metadata because some other resources in the same response // are invalid. mdCopy := metadata - mdCopy.Status = ServiceStatusACKed + mdCopy.Status = xdsresource.ServiceStatusACKed mdCopy.ErrState = nil if metadata.ErrState != nil { mdCopy.Version = metadata.ErrState.Version diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 39f1df215d1a..74dab87742a5 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -24,27 +24,20 @@ import ( "context" "errors" "fmt" - "regexp" "sync" "time" "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/xds/matcher" - "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var ( @@ -70,12 +63,6 @@ func getAPIClientBuilder(version version.TransportAPI) APIClientBuilder { return nil } -// UpdateValidatorFunc performs validations on update structs using -// context/logic available at the xdsClient layer. Since these validation are -// performed on internal update structs, they can be shared between different -// API clients. -type UpdateValidatorFunc func(interface{}) error - // BuildOptions contains options to be passed to client builders. type BuildOptions struct { // Parent is a top-level xDS client which has the intelligence to take @@ -83,7 +70,7 @@ type BuildOptions struct { // server. Parent UpdateHandler // Validator performs post unmarshal validation checks. - Validator UpdateValidatorFunc + Validator xdsresource.UpdateValidatorFunc // NodeProto contains the Node proto to be used in xDS requests. The actual // type depends on the transport protocol version used. NodeProto proto.Message @@ -140,437 +127,19 @@ type loadReportingOptions struct { // resource updates from an APIClient for a specific version. type UpdateHandler interface { // NewListeners handles updates to xDS listener resources. - NewListeners(map[string]ListenerUpdateErrTuple, UpdateMetadata) + NewListeners(map[string]xdsresource.ListenerUpdateErrTuple, xdsresource.UpdateMetadata) // NewRouteConfigs handles updates to xDS RouteConfiguration resources. - NewRouteConfigs(map[string]RouteConfigUpdateErrTuple, UpdateMetadata) + NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple, xdsresource.UpdateMetadata) // NewClusters handles updates to xDS Cluster resources. - NewClusters(map[string]ClusterUpdateErrTuple, UpdateMetadata) + NewClusters(map[string]xdsresource.ClusterUpdateErrTuple, xdsresource.UpdateMetadata) // NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely // referred to as Endpoints) resources. - NewEndpoints(map[string]EndpointsUpdateErrTuple, UpdateMetadata) + NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple, xdsresource.UpdateMetadata) // NewConnectionError handles connection errors from the xDS stream. The // error will be reported to all the resource watchers. NewConnectionError(err error) } -// ServiceStatus is the status of the update. -type ServiceStatus int - -const ( - // ServiceStatusUnknown is the default state, before a watch is started for - // the resource. - ServiceStatusUnknown ServiceStatus = iota - // ServiceStatusRequested is when the watch is started, but before and - // response is received. - ServiceStatusRequested - // ServiceStatusNotExist is when the resource doesn't exist in - // state-of-the-world responses (e.g. LDS and CDS), which means the resource - // is removed by the management server. - ServiceStatusNotExist // Resource is removed in the server, in LDS/CDS. - // ServiceStatusACKed is when the resource is ACKed. - ServiceStatusACKed - // ServiceStatusNACKed is when the resource is NACKed. - ServiceStatusNACKed -) - -// UpdateErrorMetadata is part of UpdateMetadata. It contains the error state -// when a response is NACKed. -type UpdateErrorMetadata struct { - // Version is the version of the NACKed response. - Version string - // Err contains why the response was NACKed. - Err error - // Timestamp is when the NACKed response was received. - Timestamp time.Time -} - -// UpdateMetadata contains the metadata for each update, including timestamp, -// raw message, and so on. -type UpdateMetadata struct { - // Status is the status of this resource, e.g. ACKed, NACKed, or - // Not_exist(removed). - Status ServiceStatus - // Version is the version of the xds response. Note that this is the version - // of the resource in use (previous ACKed). If a response is NACKed, the - // NACKed version is in ErrState. - Version string - // Timestamp is when the response is received. - Timestamp time.Time - // ErrState is set when the update is NACKed. - ErrState *UpdateErrorMetadata -} - -// ListenerUpdate contains information received in an LDS response, which is of -// interest to the registered LDS watcher. -type ListenerUpdate struct { - // RouteConfigName is the route configuration name corresponding to the - // target which is being watched through LDS. - // - // Exactly one of RouteConfigName and InlineRouteConfig is set. - RouteConfigName string - // InlineRouteConfig is the inline route configuration (RDS response) - // returned inside LDS. - // - // Exactly one of RouteConfigName and InlineRouteConfig is set. - InlineRouteConfig *RouteConfigUpdate - - // MaxStreamDuration contains the HTTP connection manager's - // common_http_protocol_options.max_stream_duration field, or zero if - // unset. - MaxStreamDuration time.Duration - // HTTPFilters is a list of HTTP filters (name, config) from the LDS - // response. - HTTPFilters []HTTPFilter - // InboundListenerCfg contains inbound listener configuration. - InboundListenerCfg *InboundListenerConfig - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// HTTPFilter represents one HTTP filter from an LDS response's HTTP connection -// manager field. -type HTTPFilter struct { - // Name is an arbitrary name of the filter. Used for applying override - // settings in virtual host / route / weighted cluster configuration (not - // yet supported). - Name string - // Filter is the HTTP filter found in the registry for the config type. - Filter httpfilter.Filter - // Config contains the filter's configuration - Config httpfilter.FilterConfig -} - -// InboundListenerConfig contains information about the inbound listener, i.e -// the server-side listener. -type InboundListenerConfig struct { - // Address is the local address on which the inbound listener is expected to - // accept incoming connections. - Address string - // Port is the local port on which the inbound listener is expected to - // accept incoming connections. - Port string - // FilterChains is the list of filter chains associated with this listener. - FilterChains *FilterChainManager -} - -// RouteConfigUpdate contains information received in an RDS response, which is -// of interest to the registered RDS watcher. -type RouteConfigUpdate struct { - VirtualHosts []*VirtualHost - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// VirtualHost contains the routes for a list of Domains. -// -// Note that the domains in this slice can be a wildcard, not an exact string. -// The consumer of this struct needs to find the best match for its hostname. -type VirtualHost struct { - Domains []string - // Routes contains a list of routes, each containing matchers and - // corresponding action. - Routes []*Route - // HTTPFilterConfigOverride contains any HTTP filter config overrides for - // the virtual host which may be present. An individual filter's override - // may be unused if the matching Route contains an override for that - // filter. - HTTPFilterConfigOverride map[string]httpfilter.FilterConfig - RetryConfig *RetryConfig -} - -// RetryConfig contains all retry-related configuration in either a VirtualHost -// or Route. -type RetryConfig struct { - // RetryOn is a set of status codes on which to retry. Only Canceled, - // DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are - // supported; any other values will be omitted. - RetryOn map[codes.Code]bool - NumRetries uint32 // maximum number of retry attempts - RetryBackoff RetryBackoff // retry backoff policy -} - -// RetryBackoff describes the backoff policy for retries. -type RetryBackoff struct { - BaseInterval time.Duration // initial backoff duration between attempts - MaxInterval time.Duration // maximum backoff duration -} - -// HashPolicyType specifies the type of HashPolicy from a received RDS Response. -type HashPolicyType int - -const ( - // HashPolicyTypeHeader specifies to hash a Header in the incoming request. - HashPolicyTypeHeader HashPolicyType = iota - // HashPolicyTypeChannelID specifies to hash a unique Identifier of the - // Channel. In grpc-go, this will be done using the ClientConn pointer. - HashPolicyTypeChannelID -) - -// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing -// load balancer. -type HashPolicy struct { - HashPolicyType HashPolicyType - Terminal bool - // Fields used for type HEADER. - HeaderName string - Regex *regexp.Regexp - RegexSubstitution string -} - -// RouteAction is the action of the route from a received RDS response. -type RouteAction int - -const ( - // RouteActionUnsupported are routing types currently unsupported by grpc. - // According to A36, "A Route with an inappropriate action causes RPCs - // matching that route to fail." - RouteActionUnsupported RouteAction = iota - // RouteActionRoute is the expected route type on the client side. Route - // represents routing a request to some upstream cluster. On the client - // side, if an RPC matches to a route that is not RouteActionRoute, the RPC - // will fail according to A36. - RouteActionRoute - // RouteActionNonForwardingAction is the expected route type on the server - // side. NonForwardingAction represents when a route will generate a - // response directly, without forwarding to an upstream host. - RouteActionNonForwardingAction -) - -// Route is both a specification of how to match a request as well as an -// indication of the action to take upon match. -type Route struct { - Path *string - Prefix *string - Regex *regexp.Regexp - // Indicates if prefix/path matching should be case insensitive. The default - // is false (case sensitive). - CaseInsensitive bool - Headers []*HeaderMatcher - Fraction *uint32 - - HashPolicies []*HashPolicy - - // If the matchers above indicate a match, the below configuration is used. - WeightedClusters map[string]WeightedCluster - // If MaxStreamDuration is nil, it indicates neither of the route action's - // max_stream_duration fields (grpc_timeout_header_max nor - // max_stream_duration) were set. In this case, the ListenerUpdate's - // MaxStreamDuration field should be used. If MaxStreamDuration is set to - // an explicit zero duration, the application's deadline should be used. - MaxStreamDuration *time.Duration - // HTTPFilterConfigOverride contains any HTTP filter config overrides for - // the route which may be present. An individual filter's override may be - // unused if the matching WeightedCluster contains an override for that - // filter. - HTTPFilterConfigOverride map[string]httpfilter.FilterConfig - RetryConfig *RetryConfig - - RouteAction RouteAction -} - -// WeightedCluster contains settings for an xds RouteAction.WeightedCluster. -type WeightedCluster struct { - // Weight is the relative weight of the cluster. It will never be zero. - Weight uint32 - // HTTPFilterConfigOverride contains any HTTP filter config overrides for - // the weighted cluster which may be present. - HTTPFilterConfigOverride map[string]httpfilter.FilterConfig -} - -// HeaderMatcher represents header matchers. -type HeaderMatcher struct { - Name string - InvertMatch *bool - ExactMatch *string - RegexMatch *regexp.Regexp - PrefixMatch *string - SuffixMatch *string - RangeMatch *Int64Range - PresentMatch *bool -} - -// Int64Range is a range for header range match. -type Int64Range struct { - Start int64 - End int64 -} - -// SecurityConfig contains the security configuration received as part of the -// Cluster resource on the client-side, and as part of the Listener resource on -// the server-side. -type SecurityConfig struct { - // RootInstanceName identifies the certProvider plugin to be used to fetch - // root certificates. This instance name will be resolved to the plugin name - // and its associated configuration from the certificate_providers field of - // the bootstrap file. - RootInstanceName string - // RootCertName is the certificate name to be passed to the plugin (looked - // up from the bootstrap file) while fetching root certificates. - RootCertName string - // IdentityInstanceName identifies the certProvider plugin to be used to - // fetch identity certificates. This instance name will be resolved to the - // plugin name and its associated configuration from the - // certificate_providers field of the bootstrap file. - IdentityInstanceName string - // IdentityCertName is the certificate name to be passed to the plugin - // (looked up from the bootstrap file) while fetching identity certificates. - IdentityCertName string - // SubjectAltNameMatchers is an optional list of match criteria for SANs - // specified on the peer certificate. Used only on the client-side. - // - // Some intricacies: - // - If this field is empty, then any peer certificate is accepted. - // - If the peer certificate contains a wildcard DNS SAN, and an `exact` - // matcher is configured, a wildcard DNS match is performed instead of a - // regular string comparison. - SubjectAltNameMatchers []matcher.StringMatcher - // RequireClientCert indicates if the server handshake process expects the - // client to present a certificate. Set to true when performing mTLS. Used - // only on the server-side. - RequireClientCert bool -} - -// Equal returns true if sc is equal to other. -func (sc *SecurityConfig) Equal(other *SecurityConfig) bool { - switch { - case sc == nil && other == nil: - return true - case (sc != nil) != (other != nil): - return false - } - switch { - case sc.RootInstanceName != other.RootInstanceName: - return false - case sc.RootCertName != other.RootCertName: - return false - case sc.IdentityInstanceName != other.IdentityInstanceName: - return false - case sc.IdentityCertName != other.IdentityCertName: - return false - case sc.RequireClientCert != other.RequireClientCert: - return false - default: - if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) { - return false - } - for i := 0; i < len(sc.SubjectAltNameMatchers); i++ { - if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) { - return false - } - } - } - return true -} - -// ClusterType is the type of cluster from a received CDS response. -type ClusterType int - -const ( - // ClusterTypeEDS represents the EDS cluster type, which will delegate endpoint - // discovery to the management server. - ClusterTypeEDS ClusterType = iota - // ClusterTypeLogicalDNS represents the Logical DNS cluster type, which essentially - // maps to the gRPC behavior of using the DNS resolver with pick_first LB policy. - ClusterTypeLogicalDNS - // ClusterTypeAggregate represents the Aggregate Cluster type, which provides a - // prioritized list of clusters to use. It is used for failover between clusters - // with a different configuration. - ClusterTypeAggregate -) - -// ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its -// config. -type ClusterLBPolicyRingHash struct { - MinimumRingSize uint64 - MaximumRingSize uint64 -} - -// ClusterUpdate contains information from a received CDS response, which is of -// interest to the registered CDS watcher. -type ClusterUpdate struct { - ClusterType ClusterType - // ClusterName is the clusterName being watched for through CDS. - ClusterName string - // EDSServiceName is an optional name for EDS. If it's not set, the balancer - // should watch ClusterName for the EDS resources. - EDSServiceName string - // EnableLRS indicates whether or not load should be reported through LRS. - EnableLRS bool - // SecurityCfg contains security configuration sent by the control plane. - SecurityCfg *SecurityConfig - // MaxRequests for circuit breaking, if any (otherwise nil). - MaxRequests *uint32 - // DNSHostName is used only for cluster type DNS. It's the DNS name to - // resolve in "host:port" form - DNSHostName string - // PrioritizedClusterNames is used only for cluster type aggregate. It represents - // a prioritized list of cluster names. - PrioritizedClusterNames []string - - // LBPolicy is the lb policy for this cluster. - // - // This only support round_robin and ring_hash. - // - if it's nil, the lb policy is round_robin - // - if it's not nil, the lb policy is ring_hash, the this field has the config. - // - // When we add more support policies, this can be made an interface, and - // will be set to different types based on the policy type. - LBPolicy *ClusterLBPolicyRingHash - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// OverloadDropConfig contains the config to drop overloads. -type OverloadDropConfig struct { - Category string - Numerator uint32 - Denominator uint32 -} - -// EndpointHealthStatus represents the health status of an endpoint. -type EndpointHealthStatus int32 - -const ( - // EndpointHealthStatusUnknown represents HealthStatus UNKNOWN. - EndpointHealthStatusUnknown EndpointHealthStatus = iota - // EndpointHealthStatusHealthy represents HealthStatus HEALTHY. - EndpointHealthStatusHealthy - // EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY. - EndpointHealthStatusUnhealthy - // EndpointHealthStatusDraining represents HealthStatus DRAINING. - EndpointHealthStatusDraining - // EndpointHealthStatusTimeout represents HealthStatus TIMEOUT. - EndpointHealthStatusTimeout - // EndpointHealthStatusDegraded represents HealthStatus DEGRADED. - EndpointHealthStatusDegraded -) - -// Endpoint contains information of an endpoint. -type Endpoint struct { - Address string - HealthStatus EndpointHealthStatus - Weight uint32 -} - -// Locality contains information of a locality. -type Locality struct { - Endpoints []Endpoint - ID internal.LocalityID - Priority uint32 - Weight uint32 -} - -// EndpointsUpdate contains an EDS update. -type EndpointsUpdate struct { - Drops []OverloadDropConfig - Localities []Locality - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - // Function to be overridden in tests. var newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, opts BuildOptions) (APIClient, error) { cb := getAPIClientBuilder(apiVersion) @@ -603,20 +172,20 @@ type clientImpl struct { mu sync.Mutex ldsWatchers map[string]map[*watchInfo]bool ldsVersion string // Only used in CSDS. - ldsCache map[string]ListenerUpdate - ldsMD map[string]UpdateMetadata + ldsCache map[string]xdsresource.ListenerUpdate + ldsMD map[string]xdsresource.UpdateMetadata rdsWatchers map[string]map[*watchInfo]bool rdsVersion string // Only used in CSDS. - rdsCache map[string]RouteConfigUpdate - rdsMD map[string]UpdateMetadata + rdsCache map[string]xdsresource.RouteConfigUpdate + rdsMD map[string]xdsresource.UpdateMetadata cdsWatchers map[string]map[*watchInfo]bool cdsVersion string // Only used in CSDS. - cdsCache map[string]ClusterUpdate - cdsMD map[string]UpdateMetadata + cdsCache map[string]xdsresource.ClusterUpdate + cdsMD map[string]xdsresource.UpdateMetadata edsWatchers map[string]map[*watchInfo]bool edsVersion string // Only used in CSDS. - edsCache map[string]EndpointsUpdate - edsMD map[string]UpdateMetadata + edsCache map[string]xdsresource.EndpointsUpdate + edsMD map[string]xdsresource.UpdateMetadata // Changes to map lrsClients and the lrsClient inside the map need to be // protected by lrsMu. @@ -652,17 +221,17 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) ( updateCh: buffer.NewUnbounded(), ldsWatchers: make(map[string]map[*watchInfo]bool), - ldsCache: make(map[string]ListenerUpdate), - ldsMD: make(map[string]UpdateMetadata), + ldsCache: make(map[string]xdsresource.ListenerUpdate), + ldsMD: make(map[string]xdsresource.UpdateMetadata), rdsWatchers: make(map[string]map[*watchInfo]bool), - rdsCache: make(map[string]RouteConfigUpdate), - rdsMD: make(map[string]UpdateMetadata), + rdsCache: make(map[string]xdsresource.RouteConfigUpdate), + rdsMD: make(map[string]xdsresource.UpdateMetadata), cdsWatchers: make(map[string]map[*watchInfo]bool), - cdsCache: make(map[string]ClusterUpdate), - cdsMD: make(map[string]UpdateMetadata), + cdsCache: make(map[string]xdsresource.ClusterUpdate), + cdsMD: make(map[string]xdsresource.UpdateMetadata), edsWatchers: make(map[string]map[*watchInfo]bool), - edsCache: make(map[string]EndpointsUpdate), - edsMD: make(map[string]UpdateMetadata), + edsCache: make(map[string]xdsresource.EndpointsUpdate), + edsMD: make(map[string]xdsresource.UpdateMetadata), lrsClients: make(map[string]*lrsClient), } @@ -732,14 +301,14 @@ func (c *clientImpl) Close() { c.logger.Infof("Shutdown") } -func (c *clientImpl) filterChainUpdateValidator(fc *FilterChain) error { +func (c *clientImpl) filterChainUpdateValidator(fc *xdsresource.FilterChain) error { if fc == nil { return nil } return c.securityConfigUpdateValidator(fc.SecurityCfg) } -func (c *clientImpl) securityConfigUpdateValidator(sc *SecurityConfig) error { +func (c *clientImpl) securityConfigUpdateValidator(sc *xdsresource.SecurityConfig) error { if sc == nil { return nil } @@ -758,28 +327,12 @@ func (c *clientImpl) securityConfigUpdateValidator(sc *SecurityConfig) error { func (c *clientImpl) updateValidator(u interface{}) error { switch update := u.(type) { - case ListenerUpdate: + case xdsresource.ListenerUpdate: if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil { return nil } - - fcm := update.InboundListenerCfg.FilterChains - for _, dst := range fcm.dstPrefixMap { - for _, srcType := range dst.srcTypeArr { - if srcType == nil { - continue - } - for _, src := range srcType.srcPrefixMap { - for _, fc := range src.srcPortMap { - if err := c.filterChainUpdateValidator(fc); err != nil { - return err - } - } - } - } - } - return c.filterChainUpdateValidator(fcm.def) - case ClusterUpdate: + return update.InboundListenerCfg.FilterChains.Validate(c.filterChainUpdateValidator) + case xdsresource.ClusterUpdate: return c.securityConfigUpdateValidator(update.SecurityCfg) default: // We currently invoke this update validation function only for LDS and @@ -821,33 +374,3 @@ func (r ResourceType) String() string { return "UnknownResource" } } - -// IsListenerResource returns true if the provider URL corresponds to an xDS -// Listener resource. -func IsListenerResource(url string) bool { - return url == version.V2ListenerURL || url == version.V3ListenerURL -} - -// IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS -// HTTPConnManager resource. -func IsHTTPConnManagerResource(url string) bool { - return url == version.V2HTTPConnManagerURL || url == version.V3HTTPConnManagerURL -} - -// IsRouteConfigResource returns true if the provider URL corresponds to an xDS -// RouteConfig resource. -func IsRouteConfigResource(url string) bool { - return url == version.V2RouteConfigURL || url == version.V3RouteConfigURL -} - -// IsClusterResource returns true if the provider URL corresponds to an xDS -// Cluster resource. -func IsClusterResource(url string) bool { - return url == version.V2ClusterURL || url == version.V3ClusterURL -} - -// IsEndpointsResource returns true if the provider URL corresponds to an xDS -// Endpoints resource. -func IsEndpointsResource(url string) bool { - return url == version.V2EndpointsURL || url == version.V3EndpointsURL -} diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index 2a6d6ae2a536..a668ff1378f3 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -26,6 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc" @@ -60,21 +61,9 @@ const ( defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ) -var ( - cmpOpts = cmp.Options{ - cmpopts.EquateEmpty(), - cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), - cmp.Comparer(func(a, b time.Time) bool { return true }), - protocmp.Transform(), - } - - cmpOptsIgnoreDetails = cmp.Options{ - cmp.Comparer(func(a, b time.Time) bool { return true }), - cmp.Comparer(func(x, y error) bool { - return (x == nil) == (y == nil) - }), - } -) +func newStringP(s string) *string { + return &s +} func clientOpts(balancerName string, overrideWatchExpiryTimeout bool) (*bootstrap.Config, time.Duration) { watchExpiryTimeout := defaultWatchExpiryTimeout @@ -164,10 +153,10 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { clusterUpdateCh := testutils.NewChannel() firstTime := true - client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) // Calls another watch inline, to ensure there's deadlock. - client.WatchCluster("another-random-name", func(ClusterUpdate, error) {}) + client.WatchCluster("another-random-name", func(xdsresource.ClusterUpdate, error) {}) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); firstTime && err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -178,27 +167,27 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // The second update needs to be different in the underlying resource proto // for the watch callback to be invoked. - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, UpdateMetadata{}) + wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2, nil); err != nil { t.Fatal(err) } } -func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ListenerUpdate, wantErr error) error { +func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ListenerUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for listener update: %v", err) } - gotUpdate := u.(ListenerUpdateErrTuple) + gotUpdate := u.(xdsresource.ListenerUpdateErrTuple) if wantErr != nil { if gotUpdate.Err != wantErr { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) @@ -211,12 +200,12 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want return nil } -func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate RouteConfigUpdate, wantErr error) error { +func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.RouteConfigUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for route configuration update: %v", err) } - gotUpdate := u.(RouteConfigUpdateErrTuple) + gotUpdate := u.(xdsresource.RouteConfigUpdateErrTuple) if wantErr != nil { if gotUpdate.Err != wantErr { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) @@ -229,12 +218,12 @@ func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, w return nil } -func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ClusterUpdate, wantErr error) error { +func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ClusterUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for cluster update: %v", err) } - gotUpdate := u.(ClusterUpdateErrTuple) + gotUpdate := u.(xdsresource.ClusterUpdateErrTuple) if wantErr != nil { if gotUpdate.Err != wantErr { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) @@ -247,12 +236,12 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU return nil } -func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate EndpointsUpdate, wantErr error) error { +func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.EndpointsUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for endpoints update: %v", err) } - gotUpdate := u.(EndpointsUpdateErrTuple) + gotUpdate := u.(xdsresource.EndpointsUpdateErrTuple) if wantErr != nil { if gotUpdate.Err != wantErr { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) diff --git a/xds/internal/xdsclient/dump.go b/xds/internal/xdsclient/dump.go index db9b474f370d..dfe83c5b1755 100644 --- a/xds/internal/xdsclient/dump.go +++ b/xds/internal/xdsclient/dump.go @@ -18,7 +18,10 @@ package xdsclient -import anypb "github.com/golang/protobuf/ptypes/any" +import ( + anypb "github.com/golang/protobuf/ptypes/any" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) // UpdateWithMD contains the raw message of the update and the metadata, // including version, raw message, timestamp. @@ -26,31 +29,31 @@ import anypb "github.com/golang/protobuf/ptypes/any" // This is to be used for config dump and CSDS, not directly by users (like // resolvers/balancers). type UpdateWithMD struct { - MD UpdateMetadata + MD xdsresource.UpdateMetadata Raw *anypb.Any } func rawFromCache(s string, cache interface{}) *anypb.Any { switch c := cache.(type) { - case map[string]ListenerUpdate: + case map[string]xdsresource.ListenerUpdate: v, ok := c[s] if !ok { return nil } return v.Raw - case map[string]RouteConfigUpdate: + case map[string]xdsresource.RouteConfigUpdate: v, ok := c[s] if !ok { return nil } return v.Raw - case map[string]ClusterUpdate: + case map[string]xdsresource.ClusterUpdate: v, ok := c[s] if !ok { return nil } return v.Raw - case map[string]EndpointsUpdate: + case map[string]xdsresource.EndpointsUpdate: v, ok := c[s] if !ok { return nil @@ -67,7 +70,7 @@ func (c *clientImpl) dump(t ResourceType) (string, map[string]UpdateWithMD) { var ( version string - md map[string]UpdateMetadata + md map[string]xdsresource.UpdateMetadata cache interface{} ) switch t { diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index c162a9418f23..2d0b6c17e0b2 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -30,6 +30,7 @@ import ( v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/durationpb" @@ -94,25 +95,25 @@ func (s) TestLDSConfigDump(t *testing.T) { wantRequested := make(map[string]xdsclient.UpdateWithMD) for _, n := range ldsTargets { - cancel := client.WatchListener(n, func(update xdsclient.ListenerUpdate, err error) {}) + cancel := client.WatchListener(n, func(update xdsresource.ListenerUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} + wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpLDS, "", wantRequested); err != nil { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.ListenerUpdateErrTuple) + update0 := make(map[string]xdsresource.ListenerUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range listenerRaws { - update0[n] = xdsclient.ListenerUpdateErrTuple{Update: xdsclient.ListenerUpdate{Raw: r}} + update0[n] = xdsresource.ListenerUpdateErrTuple{Update: xdsresource.ListenerUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewListeners(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) + updateHandler.NewListeners(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpLDS, testVersion, want0); err != nil { @@ -122,13 +123,13 @@ func (s) TestLDSConfigDump(t *testing.T) { const nackVersion = "lds-version-nack" var nackErr = fmt.Errorf("lds nack error") updateHandler.NewListeners( - map[string]xdsclient.ListenerUpdateErrTuple{ + map[string]xdsresource.ListenerUpdateErrTuple{ ldsTargets[0]: {Err: nackErr}, - ldsTargets[1]: {Update: xdsclient.ListenerUpdate{Raw: listenerRaws[ldsTargets[1]]}}, + ldsTargets[1]: {Update: xdsresource.ListenerUpdate{Raw: listenerRaws[ldsTargets[1]]}}, }, - xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -140,10 +141,10 @@ func (s) TestLDSConfigDump(t *testing.T) { // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. wantDump[ldsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -152,7 +153,7 @@ func (s) TestLDSConfigDump(t *testing.T) { } wantDump[ldsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: listenerRaws[ldsTargets[1]], } if err := compareDump(client.DumpLDS, nackVersion, wantDump); err != nil { @@ -210,25 +211,25 @@ func (s) TestRDSConfigDump(t *testing.T) { wantRequested := make(map[string]xdsclient.UpdateWithMD) for _, n := range rdsTargets { - cancel := client.WatchRouteConfig(n, func(update xdsclient.RouteConfigUpdate, err error) {}) + cancel := client.WatchRouteConfig(n, func(update xdsresource.RouteConfigUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} + wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpRDS, "", wantRequested); err != nil { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.RouteConfigUpdateErrTuple) + update0 := make(map[string]xdsresource.RouteConfigUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range routeRaws { - update0[n] = xdsclient.RouteConfigUpdateErrTuple{Update: xdsclient.RouteConfigUpdate{Raw: r}} + update0[n] = xdsresource.RouteConfigUpdateErrTuple{Update: xdsresource.RouteConfigUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewRouteConfigs(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) + updateHandler.NewRouteConfigs(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpRDS, testVersion, want0); err != nil { @@ -238,13 +239,13 @@ func (s) TestRDSConfigDump(t *testing.T) { const nackVersion = "rds-version-nack" var nackErr = fmt.Errorf("rds nack error") updateHandler.NewRouteConfigs( - map[string]xdsclient.RouteConfigUpdateErrTuple{ + map[string]xdsresource.RouteConfigUpdateErrTuple{ rdsTargets[0]: {Err: nackErr}, - rdsTargets[1]: {Update: xdsclient.RouteConfigUpdate{Raw: routeRaws[rdsTargets[1]]}}, + rdsTargets[1]: {Update: xdsresource.RouteConfigUpdate{Raw: routeRaws[rdsTargets[1]]}}, }, - xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -256,10 +257,10 @@ func (s) TestRDSConfigDump(t *testing.T) { // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. wantDump[rdsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -267,7 +268,7 @@ func (s) TestRDSConfigDump(t *testing.T) { Raw: routeRaws[rdsTargets[0]], } wantDump[rdsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: routeRaws[rdsTargets[1]], } if err := compareDump(client.DumpRDS, nackVersion, wantDump); err != nil { @@ -326,25 +327,25 @@ func (s) TestCDSConfigDump(t *testing.T) { wantRequested := make(map[string]xdsclient.UpdateWithMD) for _, n := range cdsTargets { - cancel := client.WatchCluster(n, func(update xdsclient.ClusterUpdate, err error) {}) + cancel := client.WatchCluster(n, func(update xdsresource.ClusterUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} + wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpCDS, "", wantRequested); err != nil { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.ClusterUpdateErrTuple) + update0 := make(map[string]xdsresource.ClusterUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range clusterRaws { - update0[n] = xdsclient.ClusterUpdateErrTuple{Update: xdsclient.ClusterUpdate{Raw: r}} + update0[n] = xdsresource.ClusterUpdateErrTuple{Update: xdsresource.ClusterUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewClusters(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) + updateHandler.NewClusters(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpCDS, testVersion, want0); err != nil { @@ -354,13 +355,13 @@ func (s) TestCDSConfigDump(t *testing.T) { const nackVersion = "cds-version-nack" var nackErr = fmt.Errorf("cds nack error") updateHandler.NewClusters( - map[string]xdsclient.ClusterUpdateErrTuple{ + map[string]xdsresource.ClusterUpdateErrTuple{ cdsTargets[0]: {Err: nackErr}, - cdsTargets[1]: {Update: xdsclient.ClusterUpdate{Raw: clusterRaws[cdsTargets[1]]}}, + cdsTargets[1]: {Update: xdsresource.ClusterUpdate{Raw: clusterRaws[cdsTargets[1]]}}, }, - xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -372,10 +373,10 @@ func (s) TestCDSConfigDump(t *testing.T) { // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. wantDump[cdsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -383,7 +384,7 @@ func (s) TestCDSConfigDump(t *testing.T) { Raw: clusterRaws[cdsTargets[0]], } wantDump[cdsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: clusterRaws[cdsTargets[1]], } if err := compareDump(client.DumpCDS, nackVersion, wantDump); err != nil { @@ -428,25 +429,25 @@ func (s) TestEDSConfigDump(t *testing.T) { wantRequested := make(map[string]xdsclient.UpdateWithMD) for _, n := range edsTargets { - cancel := client.WatchEndpoints(n, func(update xdsclient.EndpointsUpdate, err error) {}) + cancel := client.WatchEndpoints(n, func(update xdsresource.EndpointsUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} + wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpEDS, "", wantRequested); err != nil { t.Fatalf(err.Error()) } - update0 := make(map[string]xdsclient.EndpointsUpdateErrTuple) + update0 := make(map[string]xdsresource.EndpointsUpdateErrTuple) want0 := make(map[string]xdsclient.UpdateWithMD) for n, r := range endpointRaws { - update0[n] = xdsclient.EndpointsUpdateErrTuple{Update: xdsclient.EndpointsUpdate{Raw: r}} + update0[n] = xdsresource.EndpointsUpdateErrTuple{Update: xdsresource.EndpointsUpdate{Raw: r}} want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } } - updateHandler.NewEndpoints(update0, xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: testVersion}) + updateHandler.NewEndpoints(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. if err := compareDump(client.DumpEDS, testVersion, want0); err != nil { @@ -456,13 +457,13 @@ func (s) TestEDSConfigDump(t *testing.T) { const nackVersion = "eds-version-nack" var nackErr = fmt.Errorf("eds nack error") updateHandler.NewEndpoints( - map[string]xdsclient.EndpointsUpdateErrTuple{ + map[string]xdsresource.EndpointsUpdateErrTuple{ edsTargets[0]: {Err: nackErr}, - edsTargets[1]: {Update: xdsclient.EndpointsUpdate{Raw: endpointRaws[edsTargets[1]]}}, + edsTargets[1]: {Update: xdsresource.EndpointsUpdate{Raw: endpointRaws[edsTargets[1]]}}, }, - xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -474,10 +475,10 @@ func (s) TestEDSConfigDump(t *testing.T) { // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. wantDump[edsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ + ErrState: &xdsresource.UpdateErrorMetadata{ Version: nackVersion, Err: nackErr, }, @@ -485,7 +486,7 @@ func (s) TestEDSConfigDump(t *testing.T) { Raw: endpointRaws[edsTargets[0]], } wantDump[edsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusACKed, Version: nackVersion}, + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: endpointRaws[edsTargets[1]], } if err := compareDump(client.DumpEDS, nackVersion, wantDump); err != nil { diff --git a/xds/internal/xdsclient/v2/ack_test.go b/xds/internal/xdsclient/v2/ack_test.go index d2f0605f6d08..21191341306b 100644 --- a/xds/internal/xdsclient/v2/ack_test.go +++ b/xds/internal/xdsclient/v2/ack_test.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -47,7 +48,7 @@ func startXDSV2Client(t *testing.T, cc *grpc.ClientConn) (v2c *client, cbLDS, cb cbCDS = testutils.NewChannel() cbEDS = testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { + f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { t.Logf("Received %v callback with {%+v}", rType, d) switch rType { case xdsclient.ListenerResource: diff --git a/xds/internal/xdsclient/v2/cds_test.go b/xds/internal/xdsclient/v2/cds_test.go index cef7563017c4..1c368b5a5c3a 100644 --- a/xds/internal/xdsclient/v2/cds_test.go +++ b/xds/internal/xdsclient/v2/cds_test.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -101,8 +102,8 @@ func (s) TestCDSHandleResponse(t *testing.T) { name string cdsResponse *xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.ClusterUpdateErrTuple - wantUpdateMD xdsclient.UpdateMetadata + wantUpdate map[string]xdsresource.ClusterUpdateErrTuple + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool }{ // Badly marshaled CDS response. @@ -111,9 +112,9 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: badlyMarshaledCDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -125,9 +126,9 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: badResourceTypeInLDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -139,8 +140,8 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: &xdspb.DiscoveryResponse{}, wantErr: false, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -149,11 +150,11 @@ func (s) TestCDSHandleResponse(t *testing.T) { name: "one-uninteresting-cluster", cdsResponse: goodCDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.ClusterUpdateErrTuple{ - goodClusterName2: {Update: xdsclient.ClusterUpdate{ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}}, + wantUpdate: map[string]xdsresource.ClusterUpdateErrTuple{ + goodClusterName2: {Update: xdsresource.ClusterUpdate{ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -162,11 +163,11 @@ func (s) TestCDSHandleResponse(t *testing.T) { name: "one-good-cluster", cdsResponse: goodCDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.ClusterUpdateErrTuple{ - goodClusterName1: {Update: xdsclient.ClusterUpdate{ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}}, + wantUpdate: map[string]xdsresource.ClusterUpdateErrTuple{ + goodClusterName1: {Update: xdsresource.ClusterUpdate{ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -194,7 +195,7 @@ func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v2/client.go b/xds/internal/xdsclient/v2/client.go index dc137f63e5f5..60e87761e852 100644 --- a/xds/internal/xdsclient/v2/client.go +++ b/xds/internal/xdsclient/v2/client.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -93,7 +94,7 @@ type client struct { // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. cc *grpc.ClientConn nodeProto *v2corepb.Node - updateValidator xdsclient.UpdateValidatorFunc + updateValidator xdsresource.UpdateValidatorFunc } func (v2c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { @@ -164,16 +165,16 @@ func (v2c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri var err error url := resp.GetTypeUrl() switch { - case xdsclient.IsListenerResource(url): + case xdsresource.IsListenerResource(url): err = v2c.handleLDSResponse(resp) rType = xdsclient.ListenerResource - case xdsclient.IsRouteConfigResource(url): + case xdsresource.IsRouteConfigResource(url): err = v2c.handleRDSResponse(resp) rType = xdsclient.RouteConfigResource - case xdsclient.IsClusterResource(url): + case xdsresource.IsClusterResource(url): err = v2c.handleCDSResponse(resp) rType = xdsclient.ClusterResource - case xdsclient.IsEndpointsResource(url): + case xdsresource.IsEndpointsResource(url): err = v2c.handleEDSResponse(resp) rType = xdsclient.EndpointsResource default: @@ -188,7 +189,7 @@ func (v2c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v2c *client) handleLDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalListener(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalListener(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v2c.logger, @@ -202,7 +203,7 @@ func (v2c *client) handleLDSResponse(resp *v2xdspb.DiscoveryResponse) error { // server. On receipt of a good response, it caches validated resources and also // invokes the registered watcher callback. func (v2c *client) handleRDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalRouteConfig(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalRouteConfig(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v2c.logger, @@ -216,7 +217,7 @@ func (v2c *client) handleRDSResponse(resp *v2xdspb.DiscoveryResponse) error { // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v2c *client) handleCDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalCluster(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalCluster(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v2c.logger, @@ -227,7 +228,7 @@ func (v2c *client) handleCDSResponse(resp *v2xdspb.DiscoveryResponse) error { } func (v2c *client) handleEDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalEndpoints(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalEndpoints(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v2c.logger, diff --git a/xds/internal/xdsclient/v2/client_test.go b/xds/internal/xdsclient/v2/client_test.go index fc3fa821a157..f74e87fb370d 100644 --- a/xds/internal/xdsclient/v2/client_test.go +++ b/xds/internal/xdsclient/v2/client_test.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" @@ -293,15 +294,15 @@ type watchHandleTestcase struct { responseToHandle *xdspb.DiscoveryResponse wantHandleErr bool wantUpdate interface{} - wantUpdateMD xdsclient.UpdateMetadata + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool } type testUpdateReceiver struct { - f func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) + f func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) } -func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdateErrTuple, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewListeners(d map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -309,7 +310,7 @@ func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdateE t.newUpdate(xdsclient.ListenerResource, dd, metadata) } -func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigUpdateErrTuple, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -317,7 +318,7 @@ func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigU t.newUpdate(xdsclient.RouteConfigResource, dd, metadata) } -func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdateErrTuple, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewClusters(d map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -325,7 +326,7 @@ func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdateErr t.newUpdate(xdsclient.ClusterResource, dd, metadata) } -func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdateErrTuple, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { dd := make(map[string]interface{}) for k, v := range d { dd[k] = v @@ -335,7 +336,7 @@ func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdate func (t *testUpdateReceiver) NewConnectionError(error) {} -func (t *testUpdateReceiver) newUpdate(rType xdsclient.ResourceType, d map[string]interface{}, metadata xdsclient.UpdateMetadata) { +func (t *testUpdateReceiver) newUpdate(rType xdsclient.ResourceType, d map[string]interface{}, metadata xdsresource.UpdateMetadata) { t.f(rType, d, metadata) } @@ -353,37 +354,37 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { type updateErr struct { u interface{} - md xdsclient.UpdateMetadata + md xdsresource.UpdateMetadata err error } gotUpdateCh := testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { + f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { if rType == test.rType { switch test.rType { case xdsclient.ListenerResource: - dd := make(map[string]xdsclient.ListenerUpdateErrTuple) + dd := make(map[string]xdsresource.ListenerUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.ListenerUpdateErrTuple) + dd[n] = u.(xdsresource.ListenerUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) case xdsclient.RouteConfigResource: - dd := make(map[string]xdsclient.RouteConfigUpdateErrTuple) + dd := make(map[string]xdsresource.RouteConfigUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.RouteConfigUpdateErrTuple) + dd[n] = u.(xdsresource.RouteConfigUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) case xdsclient.ClusterResource: - dd := make(map[string]xdsclient.ClusterUpdateErrTuple) + dd := make(map[string]xdsresource.ClusterUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.ClusterUpdateErrTuple) + dd[n] = u.(xdsresource.ClusterUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) case xdsclient.EndpointsResource: - dd := make(map[string]xdsclient.EndpointsUpdateErrTuple) + dd := make(map[string]xdsresource.EndpointsUpdateErrTuple) for n, u := range d { - dd[n] = u.(xdsclient.EndpointsUpdateErrTuple) + dd[n] = u.(xdsresource.EndpointsUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) } @@ -431,8 +432,8 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { wantUpdate := test.wantUpdate cmpOpts := cmp.Options{ cmpopts.EquateEmpty(), protocmp.Transform(), - cmpopts.IgnoreFields(xdsclient.UpdateMetadata{}, "Timestamp"), - cmpopts.IgnoreFields(xdsclient.UpdateErrorMetadata{}, "Timestamp"), + cmpopts.IgnoreFields(xdsresource.UpdateMetadata{}, "Timestamp"), + cmpopts.IgnoreFields(xdsresource.UpdateErrorMetadata{}, "Timestamp"), cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), } uErr, err := gotUpdateCh.Receive(ctx) @@ -503,7 +504,7 @@ func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { callbackCh := make(chan struct{}) v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) { close(callbackCh) }, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) { close(callbackCh) }, }, cc, goodNodeProto, clientBackoff, nil) if err != nil { t.Fatal(err) @@ -548,7 +549,7 @@ func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) { callbackCh := testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { + f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { if rType == xdsclient.ListenerResource { if u, ok := d[goodLDSTarget1]; ok { t.Logf("Received LDS callback with ldsUpdate {%+v}", u) @@ -620,7 +621,7 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) { callbackCh := testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { + f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { if rType == xdsclient.ListenerResource { if u, ok := d[goodLDSTarget1]; ok { t.Logf("Received LDS callback with ldsUpdate {%+v}", u) @@ -664,7 +665,7 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) { if v, err := callbackCh.Receive(ctx); err != nil { t.Fatal("Timeout when expecting LDS update") - } else if _, ok := v.(xdsclient.ListenerUpdateErrTuple); !ok { + } else if _, ok := v.(xdsresource.ListenerUpdateErrTuple); !ok { t.Fatalf("Expect an LDS update from watcher, got %v", v) } } diff --git a/xds/internal/xdsclient/v2/eds_test.go b/xds/internal/xdsclient/v2/eds_test.go index 8176b6dfb93a..d0f355e337b7 100644 --- a/xds/internal/xdsclient/v2/eds_test.go +++ b/xds/internal/xdsclient/v2/eds_test.go @@ -30,6 +30,7 @@ import ( xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var ( @@ -76,8 +77,8 @@ func (s) TestEDSHandleResponse(t *testing.T) { name string edsResponse *v2xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.EndpointsUpdateErrTuple - wantUpdateMD xdsclient.UpdateMetadata + wantUpdate map[string]xdsresource.EndpointsUpdateErrTuple + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool }{ // Any in resource is badly marshaled. @@ -86,9 +87,9 @@ func (s) TestEDSHandleResponse(t *testing.T) { edsResponse: badlyMarshaledEDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -100,9 +101,9 @@ func (s) TestEDSHandleResponse(t *testing.T) { edsResponse: badResourceTypeInEDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -113,11 +114,11 @@ func (s) TestEDSHandleResponse(t *testing.T) { name: "one-uninterestring-assignment", edsResponse: goodEDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.EndpointsUpdateErrTuple{ - "not-goodEDSName": {Update: xdsclient.EndpointsUpdate{ - Localities: []xdsclient.Locality{ + wantUpdate: map[string]xdsresource.EndpointsUpdateErrTuple{ + "not-goodEDSName": {Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, ID: internal.LocalityID{SubZone: "locality-1"}, Priority: 0, Weight: 1, @@ -126,8 +127,8 @@ func (s) TestEDSHandleResponse(t *testing.T) { Raw: marshaledGoodCLA2, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -136,17 +137,17 @@ func (s) TestEDSHandleResponse(t *testing.T) { name: "one-good-assignment", edsResponse: goodEDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.EndpointsUpdateErrTuple{ - goodEDSName: {Update: xdsclient.EndpointsUpdate{ - Localities: []xdsclient.Locality{ + wantUpdate: map[string]xdsresource.EndpointsUpdateErrTuple{ + goodEDSName: {Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ { - Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, ID: internal.LocalityID{SubZone: "locality-1"}, Priority: 1, Weight: 1, }, { - Endpoints: []xdsclient.Endpoint{{Address: "addr2:159"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159"}}, ID: internal.LocalityID{SubZone: "locality-2"}, Priority: 0, Weight: 1, @@ -155,8 +156,8 @@ func (s) TestEDSHandleResponse(t *testing.T) { Raw: marshaledGoodCLA1, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -183,7 +184,7 @@ func (s) TestEDSHandleResponseWithoutWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v2/lds_test.go b/xds/internal/xdsclient/v2/lds_test.go index a0600550095b..fdb2abeb5132 100644 --- a/xds/internal/xdsclient/v2/lds_test.go +++ b/xds/internal/xdsclient/v2/lds_test.go @@ -24,8 +24,8 @@ import ( v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // TestLDSHandleResponse starts a fake xDS server, makes a ClientConn to it, @@ -36,8 +36,8 @@ func (s) TestLDSHandleResponse(t *testing.T) { name string ldsResponse *v2xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.ListenerUpdateErrTuple - wantUpdateMD xdsclient.UpdateMetadata + wantUpdate map[string]xdsresource.ListenerUpdateErrTuple + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool }{ // Badly marshaled LDS response. @@ -46,9 +46,9 @@ func (s) TestLDSHandleResponse(t *testing.T) { ldsResponse: badlyMarshaledLDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -60,9 +60,9 @@ func (s) TestLDSHandleResponse(t *testing.T) { ldsResponse: badResourceTypeInLDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -75,12 +75,12 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "no-apiListener-in-response", ldsResponse: noAPIListenerLDSResponse, wantErr: true, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ goodLDSTarget1: {Err: cmpopts.AnyError}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -91,11 +91,11 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "one-good-listener", ldsResponse: goodLDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ - goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ + goodLDSTarget1: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -105,12 +105,12 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "multiple-good-listener", ldsResponse: ldsResponseWithMultipleResources, wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ - goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, - goodLDSTarget2: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ + goodLDSTarget1: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, + goodLDSTarget2: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -121,13 +121,13 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "good-bad-ugly-listeners", ldsResponse: goodBadUglyLDSResponse, wantErr: true, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ - goodLDSTarget1: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ + goodLDSTarget1: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, goodLDSTarget2: {Err: cmpopts.AnyError}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -138,11 +138,11 @@ func (s) TestLDSHandleResponse(t *testing.T) { name: "one-uninteresting-listener", ldsResponse: goodLDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdateErrTuple{ - goodLDSTarget2: {Update: xdsclient.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, + wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ + goodLDSTarget2: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -153,8 +153,8 @@ func (s) TestLDSHandleResponse(t *testing.T) { ldsResponse: emptyLDSResponse, wantErr: false, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -182,7 +182,7 @@ func (s) TestLDSHandleResponseWithoutWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v2/rds_test.go b/xds/internal/xdsclient/v2/rds_test.go index 3389f0539469..79e51ab231ea 100644 --- a/xds/internal/xdsclient/v2/rds_test.go +++ b/xds/internal/xdsclient/v2/rds_test.go @@ -25,9 +25,9 @@ import ( xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // doLDS makes a LDS watch, and waits for the response and ack to finish. @@ -50,8 +50,8 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name string rdsResponse *xdspb.DiscoveryResponse wantErr bool - wantUpdate map[string]xdsclient.RouteConfigUpdateErrTuple - wantUpdateMD xdsclient.UpdateMetadata + wantUpdate map[string]xdsresource.RouteConfigUpdateErrTuple + wantUpdateMD xdsresource.UpdateMetadata wantUpdateErr bool }{ // Badly marshaled RDS response. @@ -60,9 +60,9 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { rdsResponse: badlyMarshaledRDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -74,9 +74,9 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { rdsResponse: badResourceTypeInRDSResponse, wantErr: true, wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ Err: cmpopts.AnyError, }, }, @@ -89,14 +89,14 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name: "no-virtual-hosts-in-response", rdsResponse: noVirtualHostsInRDSResponse, wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{ - goodRouteName1: {Update: xdsclient.RouteConfigUpdate{ + wantUpdate: map[string]xdsresource.RouteConfigUpdateErrTuple{ + goodRouteName1: {Update: xdsresource.RouteConfigUpdate{ VirtualHosts: nil, Raw: marshaledNoVirtualHostsRouteConfig, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -105,28 +105,28 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name: "one-uninteresting-route-config", rdsResponse: goodRDSResponse2, wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{ - goodRouteName2: {Update: xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate: map[string]xdsresource.RouteConfigUpdateErrTuple{ + goodRouteName2: {Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: xdsclient.RouteActionRoute}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsresource.WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: xdsresource.RouteActionRoute}}, }, { Domains: []string{goodLDSTarget1}, - Routes: []*xdsclient.Route{{ + Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName2: {Weight: 1}}, - RouteAction: xdsclient.RouteActionRoute}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{goodClusterName2: {Weight: 1}}, + RouteAction: xdsresource.RouteActionRoute}}, }, }, Raw: marshaledGoodRouteConfig2, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -135,28 +135,28 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { name: "one-good-route-config", rdsResponse: goodRDSResponse1, wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdateErrTuple{ - goodRouteName1: {Update: xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate: map[string]xdsresource.RouteConfigUpdateErrTuple{ + goodRouteName1: {Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{uninterestingDomain}, - Routes: []*xdsclient.Route{{ + Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: xdsclient.RouteActionRoute}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{uninterestingClusterName: {Weight: 1}}, + RouteAction: xdsresource.RouteActionRoute}}, }, { Domains: []string{goodLDSTarget1}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName1: {Weight: 1}}, - RouteAction: xdsclient.RouteActionRoute}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsresource.WeightedCluster{goodClusterName1: {Weight: 1}}, + RouteAction: xdsresource.RouteActionRoute}}, }, }, Raw: marshaledGoodRouteConfig1, }}, }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, + wantUpdateMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, }, wantUpdateErr: false, }, @@ -183,7 +183,7 @@ func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, + f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v3/client.go b/xds/internal/xdsclient/v3/client.go index 827c06b741b7..21d8809dd33b 100644 --- a/xds/internal/xdsclient/v3/client.go +++ b/xds/internal/xdsclient/v3/client.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" @@ -93,7 +94,7 @@ type client struct { // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. cc *grpc.ClientConn nodeProto *v3corepb.Node - updateValidator xdsclient.UpdateValidatorFunc + updateValidator xdsresource.UpdateValidatorFunc } func (v3c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { @@ -164,16 +165,16 @@ func (v3c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri var err error url := resp.GetTypeUrl() switch { - case xdsclient.IsListenerResource(url): + case xdsresource.IsListenerResource(url): err = v3c.handleLDSResponse(resp) rType = xdsclient.ListenerResource - case xdsclient.IsRouteConfigResource(url): + case xdsresource.IsRouteConfigResource(url): err = v3c.handleRDSResponse(resp) rType = xdsclient.RouteConfigResource - case xdsclient.IsClusterResource(url): + case xdsresource.IsClusterResource(url): err = v3c.handleCDSResponse(resp) rType = xdsclient.ClusterResource - case xdsclient.IsEndpointsResource(url): + case xdsresource.IsEndpointsResource(url): err = v3c.handleEDSResponse(resp) rType = xdsclient.EndpointsResource default: @@ -188,7 +189,7 @@ func (v3c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v3c *client) handleLDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalListener(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalListener(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v3c.logger, @@ -202,7 +203,7 @@ func (v3c *client) handleLDSResponse(resp *v3discoverypb.DiscoveryResponse) erro // server. On receipt of a good response, it caches validated resources and also // invokes the registered watcher callback. func (v3c *client) handleRDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalRouteConfig(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalRouteConfig(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v3c.logger, @@ -216,7 +217,7 @@ func (v3c *client) handleRDSResponse(resp *v3discoverypb.DiscoveryResponse) erro // server. On receipt of a good response, it also invokes the registered watcher // callback. func (v3c *client) handleCDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalCluster(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalCluster(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v3c.logger, @@ -227,7 +228,7 @@ func (v3c *client) handleCDSResponse(resp *v3discoverypb.DiscoveryResponse) erro } func (v3c *client) handleEDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalEndpoints(&xdsclient.UnmarshalOptions{ + update, md, err := xdsresource.UnmarshalEndpoints(&xdsresource.UnmarshalOptions{ Version: resp.GetVersionInfo(), Resources: resp.GetResources(), Logger: v3c.logger, diff --git a/xds/internal/xdsclient/watchers.go b/xds/internal/xdsclient/watchers.go index e26ed360308a..639a918627b8 100644 --- a/xds/internal/xdsclient/watchers.go +++ b/xds/internal/xdsclient/watchers.go @@ -24,6 +24,7 @@ import ( "time" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) type watchInfoState int @@ -41,10 +42,10 @@ type watchInfo struct { rType ResourceType target string - ldsCallback func(ListenerUpdate, error) - rdsCallback func(RouteConfigUpdate, error) - cdsCallback func(ClusterUpdate, error) - edsCallback func(EndpointsUpdate, error) + ldsCallback func(xdsresource.ListenerUpdate, error) + rdsCallback func(xdsresource.RouteConfigUpdate, error) + cdsCallback func(xdsresource.ClusterUpdate, error) + edsCallback func(xdsresource.EndpointsUpdate, error) expiryTimer *time.Timer @@ -105,13 +106,13 @@ func (wi *watchInfo) sendErrorLocked(err error) { ) switch wi.rType { case ListenerResource: - u = ListenerUpdate{} + u = xdsresource.ListenerUpdate{} case RouteConfigResource: - u = RouteConfigUpdate{} + u = xdsresource.RouteConfigUpdate{} case ClusterResource: - u = ClusterUpdate{} + u = xdsresource.ClusterUpdate{} case EndpointsResource: - u = EndpointsUpdate{} + u = xdsresource.EndpointsUpdate{} } wi.c.scheduleCallback(wi, u, err) } @@ -132,7 +133,7 @@ func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { c.logger.Debugf("new watch for type %v, resource name %v", wi.rType, wi.target) var ( watchers map[string]map[*watchInfo]bool - mds map[string]UpdateMetadata + mds map[string]xdsresource.UpdateMetadata ) switch wi.rType { case ListenerResource: @@ -163,7 +164,7 @@ func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { c.logger.Debugf("first watch for type %v, resource name %v, will send a new xDS request", wi.rType, wi.target) s = make(map[*watchInfo]bool) watchers[resourceName] = s - mds[resourceName] = UpdateMetadata{Status: ServiceStatusRequested} + mds[resourceName] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested} c.apiClient.AddWatch(wi.rType, resourceName) } // No matter what, add the new watcher to the set, so it's callback will be @@ -233,7 +234,7 @@ func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchListener(serviceName string, cb func(ListenerUpdate, error)) (cancel func()) { +func (c *clientImpl) WatchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { wi := &watchInfo{ c: c, rType: ListenerResource, @@ -252,7 +253,7 @@ func (c *clientImpl) WatchListener(serviceName string, cb func(ListenerUpdate, e // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchRouteConfig(routeName string, cb func(RouteConfigUpdate, error)) (cancel func()) { +func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { wi := &watchInfo{ c: c, rType: RouteConfigResource, @@ -275,7 +276,7 @@ func (c *clientImpl) WatchRouteConfig(routeName string, cb func(RouteConfigUpdat // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchCluster(clusterName string, cb func(ClusterUpdate, error)) (cancel func()) { +func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { wi := &watchInfo{ c: c, rType: ClusterResource, @@ -297,7 +298,7 @@ func (c *clientImpl) WatchCluster(clusterName string, cb func(ClusterUpdate, err // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchEndpoints(clusterName string, cb func(EndpointsUpdate, error)) (cancel func()) { +func (c *clientImpl) WatchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { wi := &watchInfo{ c: c, rType: EndpointsResource, diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index c06319e959c6..7ddaf08637e4 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/testutils" @@ -52,15 +53,15 @@ func (s) TestClusterWatch(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -70,17 +71,17 @@ func (s) TestClusterWatch(t *testing.T) { // new update is not considered equal to the old one. newUpdate := wantUpdate newUpdate.Raw = &anypb.Any{} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName: {Update: newUpdate}, "randomName": {}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -114,8 +115,8 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { for i := 0; i < count; i++ { clusterUpdateCh := testutils.NewChannel() clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) - cancelLastWatch = client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + cancelLastWatch = client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -127,8 +128,8 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -139,7 +140,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { // be notified because one has been cancelled, and the other is receiving // the same update. cancelLastWatch() - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { func() { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -153,8 +154,8 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { // Push a new update and make sure the uncancelled watcher is invoked. // Specify a non-nil raw proto to ensure that the new update is not // considered equal to the old one. - newUpdate := ClusterUpdate{ClusterName: testEDSName, Raw: &anypb.Any{}} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: {Update: newUpdate}}, UpdateMetadata{}) + newUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName, Raw: &anypb.Any{}} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateChs[0], newUpdate, nil); err != nil { t.Fatal(err) } @@ -186,8 +187,8 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { for i := 0; i < count; i++ { clusterUpdateCh := testutils.NewChannel() clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) - client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -201,19 +202,19 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + wantUpdate1 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"} + wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName + "1": {Update: wantUpdate1}, testCDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate1, nil); err != nil { @@ -246,25 +247,25 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName: {Update: wantUpdate}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another watch for the resource in cache. clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -307,8 +308,8 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: u, Err: err}) + client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -318,8 +319,8 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for cluster update: %v", err) } - gotUpdate := u.(ClusterUpdateErrTuple) - if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, ClusterUpdate{}) { + gotUpdate := u.(xdsresource.ClusterUpdateErrTuple) + if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, xdsresource.ClusterUpdate{}) { t.Fatalf("unexpected clusterUpdate: (%v, %v), want: (ClusterUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err) } } @@ -346,17 +347,17 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: u, Err: err}) + client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName: {Update: wantUpdate}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -394,8 +395,8 @@ func (s) TestClusterResourceRemoved(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh1 := testutils.NewChannel() - client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) { - clusterUpdateCh1.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -403,19 +404,19 @@ func (s) TestClusterResourceRemoved(t *testing.T) { // Another watch for a different name. clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ClusterUpdate{ClusterName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ClusterName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdateErrTuple{ + wantUpdate1 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"} + wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName + "1": {Update: wantUpdate1}, testCDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh1, wantUpdate1, nil); err != nil { t.Fatal(err) } @@ -424,10 +425,10 @@ func (s) TestClusterResourceRemoved(t *testing.T) { } // Send another update to remove resource 1. - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should get an error. - if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ClusterUpdateErrTuple).Err) != ErrorTypeResourceNotFound { + if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(xdsresource.ClusterUpdateErrTuple).Err) != ErrorTypeResourceNotFound { t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } @@ -440,8 +441,8 @@ func (s) TestClusterResourceRemoved(t *testing.T) { // Send another update with resource 2 modified. Specify a non-nil raw proto // to ensure that the new update is not considered equal to the old one. - wantUpdate2 = ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) + wantUpdate2 = xdsresource.ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should not see an update. sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) @@ -477,8 +478,8 @@ func (s) TestClusterWatchNACKError(t *testing.T) { apiClient := c.(*testAPIClient) clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { @@ -486,10 +487,10 @@ func (s) TestClusterWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewClusters(map[string]ClusterUpdateErrTuple{testCDSName: { + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: { Err: wantError, - }}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, ClusterUpdate{}, wantError); err != nil { + }}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + if err := verifyClusterUpdate(ctx, clusterUpdateCh, xdsresource.ClusterUpdate{}, wantError); err != nil { t.Fatal(err) } } @@ -521,8 +522,8 @@ func (s) TestClusterWatchPartialValid(t *testing.T) { for _, name := range []string{testCDSName, badResourceName} { clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(name, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(ClusterUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchCluster(name, func(update xdsresource.ClusterUpdate, err error) { + clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) defer func() { cancelWatch() @@ -538,18 +539,18 @@ func (s) TestClusterWatchPartialValid(t *testing.T) { wantError := fmt.Errorf("testing error") wantError2 := fmt.Errorf("individual error") - client.NewClusters(map[string]ClusterUpdateErrTuple{ - testCDSName: {Update: ClusterUpdate{ClusterName: testEDSName}}, + client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ + testCDSName: {Update: xdsresource.ClusterUpdate{ClusterName: testEDSName}}, badResourceName: {Err: wantError2}, - }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) // The valid resource should be sent to the watcher. - if err := verifyClusterUpdate(ctx, updateChs[testCDSName], ClusterUpdate{ClusterName: testEDSName}, nil); err != nil { + if err := verifyClusterUpdate(ctx, updateChs[testCDSName], xdsresource.ClusterUpdate{ClusterName: testEDSName}, nil); err != nil { t.Fatal(err) } // The failed watcher should receive an error. - if err := verifyClusterUpdate(ctx, updateChs[badResourceName], ClusterUpdate{}, wantError2); err != nil { + if err := verifyClusterUpdate(ctx, updateChs[badResourceName], xdsresource.ClusterUpdate{}, wantError2); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go index b87723e5086e..3db3c3efa755 100644 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/testutils" @@ -31,15 +32,15 @@ import ( ) var ( - testLocalities = []Locality{ + testLocalities = []xdsresource.Locality{ { - Endpoints: []Endpoint{{Address: "addr1:314"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, ID: internal.LocalityID{SubZone: "locality-1"}, Priority: 1, Weight: 1, }, { - Endpoints: []Endpoint{{Address: "addr2:159"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159"}}, ID: internal.LocalityID{SubZone: "locality-2"}, Priority: 0, Weight: 1, @@ -70,15 +71,15 @@ func (s) TestEndpointsWatch(t *testing.T) { apiClient := c.(*testAPIClient) endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -88,17 +89,17 @@ func (s) TestEndpointsWatch(t *testing.T) { // new update is not considered equal to the old one. newUpdate := wantUpdate newUpdate.Raw = &anypb.Any{} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{ + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ testCDSName: {Update: newUpdate}, "randomName": {}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -134,8 +135,8 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { for i := 0; i < count; i++ { endpointsUpdateCh := testutils.NewChannel() endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) - cancelLastWatch = client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + cancelLastWatch = client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -147,8 +148,8 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -159,7 +160,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { // be notified because one has been cancelled, and the other is receiving // the same update. cancelLastWatch() - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { func() { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -173,8 +174,8 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { // Push a new update and make sure the uncancelled watcher is invoked. // Specify a non-nil raw proto to ensure that the new update is not // considered equal to the old one. - newUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}, Raw: &anypb.Any{}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: newUpdate}}, UpdateMetadata{}) + newUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}, Raw: &anypb.Any{}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[0], newUpdate, nil); err != nil { t.Fatal(err) } @@ -206,8 +207,8 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { for i := 0; i < count; i++ { endpointsUpdateCh := testutils.NewChannel() endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) - client.WatchEndpoints(testCDSName+"1", func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName+"1", func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -221,19 +222,19 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. endpointsUpdateCh2 := testutils.NewChannel() - client.WatchEndpoints(testCDSName+"2", func(update EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName+"2", func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - wantUpdate2 := EndpointsUpdate{Localities: []Locality{testLocalities[1]}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{ + wantUpdate1 := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} + wantUpdate2 := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[1]}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ testCDSName + "1": {Update: wantUpdate1}, testCDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate1, nil); err != nil { @@ -266,23 +267,23 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { apiClient := c.(*testAPIClient) endpointsUpdateCh := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another watch for the resource in cache. endpointsUpdateCh2 := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -325,8 +326,8 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { apiClient := c.(*testAPIClient) endpointsUpdateCh := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -336,8 +337,8 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for endpoints update: %v", err) } - gotUpdate := u.(EndpointsUpdateErrTuple) - if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, EndpointsUpdate{}) { + gotUpdate := u.(xdsresource.EndpointsUpdateErrTuple) + if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, xdsresource.EndpointsUpdate{}) { t.Fatalf("unexpected endpointsUpdate: (%v, %v), want: (EndpointsUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err) } } @@ -363,8 +364,8 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { apiClient := c.(*testAPIClient) endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { @@ -372,8 +373,8 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, EndpointsUpdate{}, wantError); err != nil { + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, xdsresource.EndpointsUpdate{}, wantError); err != nil { t.Fatal(err) } } @@ -405,8 +406,8 @@ func (s) TestEndpointsWatchPartialValid(t *testing.T) { for _, name := range []string{testCDSName, badResourceName} { endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(name, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(EndpointsUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchEndpoints(name, func(update xdsresource.EndpointsUpdate, err error) { + endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) defer func() { cancelWatch() @@ -422,18 +423,18 @@ func (s) TestEndpointsWatchPartialValid(t *testing.T) { wantError := fmt.Errorf("testing error") wantError2 := fmt.Errorf("individual error") - client.NewEndpoints(map[string]EndpointsUpdateErrTuple{ - testCDSName: {Update: EndpointsUpdate{Localities: []Locality{testLocalities[0]}}}, + client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ + testCDSName: {Update: xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}}, badResourceName: {Err: wantError2}, - }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) // The valid resource should be sent to the watcher. - if err := verifyEndpointsUpdate(ctx, updateChs[testCDSName], EndpointsUpdate{Localities: []Locality{testLocalities[0]}}, nil); err != nil { + if err := verifyEndpointsUpdate(ctx, updateChs[testCDSName], xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, nil); err != nil { t.Fatal(err) } // The failed watcher should receive an error. - if err := verifyEndpointsUpdate(ctx, updateChs[badResourceName], EndpointsUpdate{}, wantError2); err != nil { + if err := verifyEndpointsUpdate(ctx, updateChs[badResourceName], xdsresource.EndpointsUpdate{}, wantError2); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go index 176e6bbcb7b4..cd375639f8ee 100644 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -26,6 +26,7 @@ import ( v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" ) @@ -52,15 +53,15 @@ func (s) TestLDSWatch(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -68,18 +69,18 @@ func (s) TestLDSWatch(t *testing.T) { // Push an update, with an extra resource for a different resource name. // Specify a non-nil raw proto in the original resource to ensure that the // new update is not considered equal to the old one. - newUpdate := ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} - client.NewListeners(map[string]ListenerUpdateErrTuple{ + newUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ testLDSName: {Update: newUpdate}, "randomName": {}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -116,8 +117,8 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { for i := 0; i < count; i++ { ldsUpdateCh := testutils.NewChannel() ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) - cancelLastWatch = client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + cancelLastWatch = client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -129,8 +130,8 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -141,7 +142,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { // be notified because one has been cancelled, and the other is receiving // the same update. cancelLastWatch() - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { func() { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -155,8 +156,8 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { // Push a new update and make sure the uncancelled watcher is invoked. // Specify a non-nil raw proto to ensure that the new update is not // considered equal to the old one. - newUpdate := ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: newUpdate}}, UpdateMetadata{}) + newUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateChs[0], newUpdate, nil); err != nil { t.Fatal(err) } @@ -189,8 +190,8 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { for i := 0; i < count; i++ { ldsUpdateCh := testutils.NewChannel() ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) - client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -204,19 +205,19 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ListenerUpdate{RouteConfigName: testRDSName + "1"} - wantUpdate2 := ListenerUpdate{RouteConfigName: testRDSName + "2"} - client.NewListeners(map[string]ListenerUpdateErrTuple{ + wantUpdate1 := xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"} + wantUpdate2 := xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ testLDSName + "1": {Update: wantUpdate1}, testLDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate1, nil); err != nil { @@ -249,23 +250,23 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh := testutils.NewChannel() - client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, UpdateMetadata{}) + wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another watch for the resource in cache. ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -311,27 +312,27 @@ func (s) TestLDSResourceRemoved(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh1 := testutils.NewChannel() - client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) { - ldsUpdateCh1.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } // Another watch for a different name. ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := ListenerUpdate{RouteConfigName: testEDSName + "1"} - wantUpdate2 := ListenerUpdate{RouteConfigName: testEDSName + "2"} - client.NewListeners(map[string]ListenerUpdateErrTuple{ + wantUpdate1 := xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "1"} + wantUpdate2 := xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "2"} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ testLDSName + "1": {Update: wantUpdate1}, testLDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyListenerUpdate(ctx, ldsUpdateCh1, wantUpdate1, nil); err != nil { t.Fatal(err) } @@ -340,10 +341,10 @@ func (s) TestLDSResourceRemoved(t *testing.T) { } // Send another update to remove resource 1. - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should get an error. - if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ListenerUpdateErrTuple).Err) != ErrorTypeResourceNotFound { + if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(xdsresource.ListenerUpdateErrTuple).Err) != ErrorTypeResourceNotFound { t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } @@ -356,8 +357,8 @@ func (s) TestLDSResourceRemoved(t *testing.T) { // Send another update with resource 2 modified. Specify a non-nil raw proto // to ensure that the new update is not considered equal to the old one. - wantUpdate2 = ListenerUpdate{RouteConfigName: testEDSName + "2", Raw: &anypb.Any{}} - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, UpdateMetadata{}) + wantUpdate2 = xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "2", Raw: &anypb.Any{}} + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should not see an update. sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) @@ -393,8 +394,8 @@ func (s) TestListenerWatchNACKError(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { @@ -402,8 +403,8 @@ func (s) TestListenerWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, ListenerUpdate{}, wantError); err != nil { + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + if err := verifyListenerUpdate(ctx, ldsUpdateCh, xdsresource.ListenerUpdate{}, wantError); err != nil { t.Fatal(err) } } @@ -435,8 +436,8 @@ func (s) TestListenerWatchPartialValid(t *testing.T) { for _, name := range []string{testLDSName, badResourceName} { ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(name, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchListener(name, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) defer func() { cancelWatch() @@ -452,18 +453,18 @@ func (s) TestListenerWatchPartialValid(t *testing.T) { wantError := fmt.Errorf("testing error") wantError2 := fmt.Errorf("individual error") - client.NewListeners(map[string]ListenerUpdateErrTuple{ - testLDSName: {Update: ListenerUpdate{RouteConfigName: testEDSName}}, + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ + testLDSName: {Update: xdsresource.ListenerUpdate{RouteConfigName: testEDSName}}, badResourceName: {Err: wantError2}, - }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) // The valid resource should be sent to the watcher. - if err := verifyListenerUpdate(ctx, updateChs[testLDSName], ListenerUpdate{RouteConfigName: testEDSName}, nil); err != nil { + if err := verifyListenerUpdate(ctx, updateChs[testLDSName], xdsresource.ListenerUpdate{RouteConfigName: testEDSName}, nil); err != nil { t.Fatal(err) } // The failed watcher should receive an error. - if err := verifyListenerUpdate(ctx, updateChs[badResourceName], ListenerUpdate{}, wantError2); err != nil { + if err := verifyListenerUpdate(ctx, updateChs[badResourceName], xdsresource.ListenerUpdate{}, wantError2); err != nil { t.Fatal(err) } } @@ -489,8 +490,8 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { apiClient := c.(*testAPIClient) ldsUpdateCh := testutils.NewChannel() - client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ListenerUpdateErrTuple{Update: update, Err: err}) + client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { + ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) @@ -515,8 +516,11 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { }, HttpFilters: []*v3httppb.HttpFilter{ { - Name: "customFilter1", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, + Name: "customFilter1", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: &anypb.Any{ + TypeUrl: "custom.filter", + Value: []byte{1, 2, 3}, + }}, }, }, }), @@ -531,8 +535,11 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { }, HttpFilters: []*v3httppb.HttpFilter{ { - Name: "customFilter2", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, + Name: "customFilter2", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: &anypb.Any{ + TypeUrl: "custom.filter", + Value: []byte{1, 2, 3}, + }}, }, }, }), @@ -540,42 +547,42 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { }) tests := []struct { - update ListenerUpdate + update xdsresource.ListenerUpdate wantCallback bool }{ { // First update. Callback should be invoked. - update: ListenerUpdate{Raw: basicListener}, + update: xdsresource.ListenerUpdate{Raw: basicListener}, wantCallback: true, }, { // Same update as previous. Callback should be skipped. - update: ListenerUpdate{Raw: basicListener}, + update: xdsresource.ListenerUpdate{Raw: basicListener}, wantCallback: false, }, { // New update. Callback should be invoked. - update: ListenerUpdate{Raw: listenerWithFilter1}, + update: xdsresource.ListenerUpdate{Raw: listenerWithFilter1}, wantCallback: true, }, { // Same update as previous. Callback should be skipped. - update: ListenerUpdate{Raw: listenerWithFilter1}, + update: xdsresource.ListenerUpdate{Raw: listenerWithFilter1}, wantCallback: false, }, { // New update. Callback should be invoked. - update: ListenerUpdate{Raw: listenerWithFilter2}, + update: xdsresource.ListenerUpdate{Raw: listenerWithFilter2}, wantCallback: true, }, { // Same update as previous. Callback should be skipped. - update: ListenerUpdate{Raw: listenerWithFilter2}, + update: xdsresource.ListenerUpdate{Raw: listenerWithFilter2}, wantCallback: false, }, } for _, test := range tests { - client.NewListeners(map[string]ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, UpdateMetadata{}) + client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, xdsresource.UpdateMetadata{}) if test.wantCallback { if err := verifyListenerUpdate(ctx, ldsUpdateCh, test.update, nil); err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go index 70c8dd829e9e..9e4e7e43611c 100644 --- a/xds/internal/xdsclient/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/testutils" @@ -52,22 +53,22 @@ func (s) TestRDSWatch(t *testing.T) { apiClient := c.(*testAPIClient) rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -77,17 +78,17 @@ func (s) TestRDSWatch(t *testing.T) { // new update is not considered equal to the old one. newUpdate := wantUpdate newUpdate.Raw = &anypb.Any{} - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{ + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ testRDSName: {Update: newUpdate}, "randomName": {}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, newUpdate, nil); err != nil { t.Fatal(err) } // Cancel watch, and send update again. cancelWatch() - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { @@ -123,8 +124,8 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { for i := 0; i < count; i++ { rdsUpdateCh := testutils.NewChannel() rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) - cancelLastWatch = client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + cancelLastWatch = client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -136,15 +137,15 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { } } - wantUpdate := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil { t.Fatal(err) @@ -155,7 +156,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { // be notified because one has been cancelled, and the other is receiving // the same update. cancelLastWatch() - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { func() { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) @@ -171,7 +172,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { // considered equal to the old one. newUpdate := wantUpdate newUpdate.Raw = &anypb.Any{} - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: newUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[0], newUpdate, nil); err != nil { t.Fatal(err) } @@ -203,8 +204,8 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { for i := 0; i < count; i++ { rdsUpdateCh := testutils.NewChannel() rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) - client.WatchRouteConfig(testRDSName+"1", func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + client.WatchRouteConfig(testRDSName+"1", func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if i == 0 { @@ -218,33 +219,33 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { // Third watch for a different name. rdsUpdateCh2 := testutils.NewChannel() - client.WatchRouteConfig(testRDSName+"2", func(update RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + client.WatchRouteConfig(testRDSName+"2", func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate1 := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate1 := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName + "1": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "1": {Weight: 1}}}}, }, }, } - wantUpdate2 := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate2 := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName + "2": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "2": {Weight: 1}}}}, }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{ + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ testRDSName + "1": {Update: wantUpdate1}, testRDSName + "2": {Update: wantUpdate2}, - }, UpdateMetadata{}) + }, xdsresource.UpdateMetadata{}) for i := 0; i < count; i++ { if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate1, nil); err != nil { @@ -277,30 +278,30 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { apiClient := c.(*testAPIClient) rdsUpdateCh := testutils.NewChannel() - client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } - wantUpdate := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ + wantUpdate := xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }, }, } - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, UpdateMetadata{}) + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } // Another watch for the resource in cache. rdsUpdateCh2 := testutils.NewChannel() - client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -309,7 +310,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { } // New watch should receives the update. - if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, RouteConfigUpdateErrTuple{wantUpdate, nil}, cmp.AllowUnexported(RouteConfigUpdateErrTuple{})) { + if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, xdsresource.RouteConfigUpdateErrTuple{Update: wantUpdate}, cmp.AllowUnexported(xdsresource.RouteConfigUpdateErrTuple{})) { t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err) } @@ -342,8 +343,8 @@ func (s) TestRouteWatchNACKError(t *testing.T) { apiClient := c.(*testAPIClient) rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(testCDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchRouteConfig(testCDSName, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { @@ -351,8 +352,8 @@ func (s) TestRouteWatchNACKError(t *testing.T) { } wantError := fmt.Errorf("testing error") - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{testCDSName: {Err: wantError}}, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, RouteConfigUpdate{}, wantError); err != nil { + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, xdsresource.RouteConfigUpdate{}, wantError); err != nil { t.Fatal(err) } } @@ -384,8 +385,8 @@ func (s) TestRouteWatchPartialValid(t *testing.T) { for _, name := range []string{testRDSName, badResourceName} { rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(name, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(RouteConfigUpdateErrTuple{Update: update, Err: err}) + cancelWatch := client.WatchRouteConfig(name, func(update xdsresource.RouteConfigUpdate, err error) { + rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) defer func() { cancelWatch() @@ -401,24 +402,24 @@ func (s) TestRouteWatchPartialValid(t *testing.T) { wantError := fmt.Errorf("testing error") wantError2 := fmt.Errorf("individual error") - client.NewRouteConfigs(map[string]RouteConfigUpdateErrTuple{ - testRDSName: {Update: RouteConfigUpdate{VirtualHosts: []*VirtualHost{{ + client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ + testRDSName: {Update: xdsresource.RouteConfigUpdate{VirtualHosts: []*xdsresource.VirtualHost{{ Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }}}}, badResourceName: {Err: wantError2}, - }, UpdateMetadata{ErrState: &UpdateErrorMetadata{Err: wantError}}) + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) // The valid resource should be sent to the watcher. - if err := verifyRouteConfigUpdate(ctx, updateChs[testRDSName], RouteConfigUpdate{VirtualHosts: []*VirtualHost{{ + if err := verifyRouteConfigUpdate(ctx, updateChs[testRDSName], xdsresource.RouteConfigUpdate{VirtualHosts: []*xdsresource.VirtualHost{{ Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }}}, nil); err != nil { t.Fatal(err) } // The failed watcher should receive an error. - if err := verifyRouteConfigUpdate(ctx, updateChs[badResourceName], RouteConfigUpdate{}, wantError2); err != nil { + if err := verifyRouteConfigUpdate(ctx, updateChs[badResourceName], xdsresource.RouteConfigUpdate{}, wantError2); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/xds.go b/xds/internal/xdsclient/xds.go deleted file mode 100644 index 4b4f0680de67..000000000000 --- a/xds/internal/xdsclient/xds.go +++ /dev/null @@ -1,1345 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "errors" - "fmt" - "net" - "regexp" - "strconv" - "strings" - "time" - - v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" - v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "google.golang.org/protobuf/types/known/anypb" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/xds/matcher" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/version" -) - -// TransportSocket proto message has a `name` field which is expected to be set -// to this value by the management server. -const transportSocketName = "envoy.transport_sockets.tls" - -// UnmarshalOptions wraps the input parameters for `UnmarshalXxx` functions. -type UnmarshalOptions struct { - // Version is the version of the received response. - Version string - // Resources are the xDS resources resources in the received response. - Resources []*anypb.Any - // Logger is the prefix logger to be used during unmarshaling. - Logger *grpclog.PrefixLogger - // UpdateValidator is a post unmarshal validation check provided by the - // upper layer. - UpdateValidator UpdateValidatorFunc -} - -// UnmarshalListener processes resources received in an LDS response, validates -// them, and transforms them into a native struct which contains only fields we -// are interested in. -func UnmarshalListener(opts *UnmarshalOptions) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]ListenerUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalListenerResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { - if !IsListenerResource(r.GetTypeUrl()) { - return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2ListenerURL - lis := &v3listenerpb.Listener{} - if err := proto.Unmarshal(r.GetValue(), lis); err != nil { - return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, pretty.ToJSON(lis)) - - lu, err := processListener(lis, logger, v2) - if err != nil { - return lis.GetName(), ListenerUpdate{}, err - } - if f != nil { - if err := f(*lu); err != nil { - return lis.GetName(), ListenerUpdate{}, err - } - } - lu.Raw = r - return lis.GetName(), *lu, nil -} - -func processListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { - if lis.GetApiListener() != nil { - return processClientSideListener(lis, logger, v2) - } - return processServerSideListener(lis) -} - -// processClientSideListener checks if the provided Listener proto meets -// the expected criteria. If so, it returns a non-empty routeConfigName. -func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { - update := &ListenerUpdate{} - - apiLisAny := lis.GetApiListener().GetApiListener() - if !IsHTTPConnManagerResource(apiLisAny.GetTypeUrl()) { - return nil, fmt.Errorf("unexpected resource type: %q", apiLisAny.GetTypeUrl()) - } - apiLis := &v3httppb.HttpConnectionManager{} - if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil { - return nil, fmt.Errorf("failed to unmarshal api_listner: %v", err) - } - // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and - // HttpConnectionManager.original_ip_detection_extensions must be empty. If - // either field has an incorrect value, the Listener must be NACKed." - A41 - if apiLis.XffNumTrustedHops != 0 { - return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", apiLis) - } - if len(apiLis.OriginalIpDetectionExtensions) != 0 { - return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", apiLis) - } - - switch apiLis.RouteSpecifier.(type) { - case *v3httppb.HttpConnectionManager_Rds: - if apiLis.GetRds().GetConfigSource().GetAds() == nil { - return nil, fmt.Errorf("ConfigSource is not ADS: %+v", lis) - } - name := apiLis.GetRds().GetRouteConfigName() - if name == "" { - return nil, fmt.Errorf("empty route_config_name: %+v", lis) - } - update.RouteConfigName = name - case *v3httppb.HttpConnectionManager_RouteConfig: - routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), logger, v2) - if err != nil { - return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) - } - update.InlineRouteConfig = &routeU - case nil: - return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) - default: - return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) - } - - if v2 { - return update, nil - } - - // The following checks and fields only apply to xDS protocol versions v3+. - - update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() - - var err error - if update.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { - return nil, err - } - - return update, nil -} - -func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { - switch { - case ptypes.Is(config, &v3cncftypepb.TypedStruct{}): - // The real type name is inside the new TypedStruct message. - s := new(v3cncftypepb.TypedStruct) - if err := ptypes.UnmarshalAny(config, s); err != nil { - return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) - } - return s, s.GetTypeUrl(), nil - case ptypes.Is(config, &v1udpatypepb.TypedStruct{}): - // The real type name is inside the old TypedStruct message. - s := new(v1udpatypepb.TypedStruct) - if err := ptypes.UnmarshalAny(config, s); err != nil { - return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) - } - return s, s.GetTypeUrl(), nil - default: - return config, config.GetTypeUrl(), nil - } -} - -func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Filter, httpfilter.FilterConfig, error) { - config, typeURL, err := unwrapHTTPFilterConfig(cfg) - if err != nil { - return nil, nil, err - } - filterBuilder := httpfilter.Get(typeURL) - if filterBuilder == nil { - if optional { - return nil, nil, nil - } - return nil, nil, fmt.Errorf("no filter implementation found for %q", typeURL) - } - parseFunc := filterBuilder.ParseFilterConfig - if !lds { - parseFunc = filterBuilder.ParseFilterConfigOverride - } - filterConfig, err := parseFunc(config) - if err != nil { - return nil, nil, fmt.Errorf("error parsing config for filter %q: %v", typeURL, err) - } - return filterBuilder, filterConfig, nil -} - -func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { - if len(cfgs) == 0 { - return nil, nil - } - m := make(map[string]httpfilter.FilterConfig) - for name, cfg := range cfgs { - optional := false - s := new(v3routepb.FilterConfig) - if ptypes.Is(cfg, s) { - if err := ptypes.UnmarshalAny(cfg, s); err != nil { - return nil, fmt.Errorf("filter override %q: error unmarshalling FilterConfig: %v", name, err) - } - cfg = s.GetConfig() - optional = s.GetIsOptional() - } - - httpFilter, config, err := validateHTTPFilterConfig(cfg, false, optional) - if err != nil { - return nil, fmt.Errorf("filter override %q: %v", name, err) - } - if httpFilter == nil { - // Optional configs are ignored. - continue - } - m[name] = config - } - return m, nil -} - -func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { - ret := make([]HTTPFilter, 0, len(filters)) - seenNames := make(map[string]bool, len(filters)) - for _, filter := range filters { - name := filter.GetName() - if name == "" { - return nil, errors.New("filter missing name field") - } - if seenNames[name] { - return nil, fmt.Errorf("duplicate filter name %q", name) - } - seenNames[name] = true - - httpFilter, config, err := validateHTTPFilterConfig(filter.GetTypedConfig(), true, filter.GetIsOptional()) - if err != nil { - return nil, err - } - if httpFilter == nil { - // Optional configs are ignored. - continue - } - if server { - if _, ok := httpFilter.(httpfilter.ServerInterceptorBuilder); !ok { - if filter.GetIsOptional() { - continue - } - return nil, fmt.Errorf("HTTP filter %q not supported server-side", name) - } - } else if _, ok := httpFilter.(httpfilter.ClientInterceptorBuilder); !ok { - if filter.GetIsOptional() { - continue - } - return nil, fmt.Errorf("HTTP filter %q not supported client-side", name) - } - - // Save name/config - ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) - } - // "Validation will fail if a terminal filter is not the last filter in the - // chain or if a non-terminal filter is the last filter in the chain." - A39 - if len(ret) == 0 { - return nil, fmt.Errorf("http filters list is empty") - } - var i int - for ; i < len(ret)-1; i++ { - if ret[i].Filter.IsTerminal() { - return nil, fmt.Errorf("http filter %q is a terminal filter but it is not last in the filter chain", ret[i].Name) - } - } - if !ret[i].Filter.IsTerminal() { - return nil, fmt.Errorf("http filter %q is not a terminal filter", ret[len(ret)-1].Name) - } - return ret, nil -} - -func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { - if n := len(lis.ListenerFilters); n != 0 { - return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) - } - if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { - return nil, errors.New("unsupported field 'use_original_dst' is present and set to true") - } - addr := lis.GetAddress() - if addr == nil { - return nil, fmt.Errorf("no address field in LDS response: %+v", lis) - } - sockAddr := addr.GetSocketAddress() - if sockAddr == nil { - return nil, fmt.Errorf("no socket_address field in LDS response: %+v", lis) - } - lu := &ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ - Address: sockAddr.GetAddress(), - Port: strconv.Itoa(int(sockAddr.GetPortValue())), - }, - } - - fcMgr, err := NewFilterChainManager(lis) - if err != nil { - return nil, err - } - lu.InboundListenerCfg.FilterChains = fcMgr - return lu, nil -} - -// UnmarshalRouteConfig processes resources received in an RDS response, -// validates them, and transforms them into a native struct which contains only -// fields we are interested in. The provided hostname determines the route -// configuration resources of interest. -func UnmarshalRouteConfig(opts *UnmarshalOptions) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]RouteConfigUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { - if !IsRouteConfigResource(r.GetTypeUrl()) { - return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - rc := &v3routepb.RouteConfiguration{} - if err := proto.Unmarshal(r.GetValue(), rc); err != nil { - return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, pretty.ToJSON(rc)) - - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2RouteConfigURL - u, err := generateRDSUpdateFromRouteConfiguration(rc, logger, v2) - if err != nil { - return rc.GetName(), RouteConfigUpdate{}, err - } - u.Raw = r - return rc.GetName(), u, nil -} - -// generateRDSUpdateFromRouteConfiguration checks if the provided -// RouteConfiguration meets the expected criteria. If so, it returns a -// RouteConfigUpdate with nil error. -// -// A RouteConfiguration resource is considered valid when only if it contains a -// VirtualHost whose domain field matches the server name from the URI passed -// to the gRPC channel, and it contains a clusterName or a weighted cluster. -// -// The RouteConfiguration includes a list of virtualHosts, which may have zero -// or more elements. We are interested in the element whose domains field -// matches the server name specified in the "xds:" URI. The only field in the -// VirtualHost proto that the we are interested in is the list of routes. We -// only look at the last route in the list (the default route), whose match -// field must be empty and whose route field must be set. Inside that route -// message, the cluster field will contain the clusterName or weighted clusters -// we are looking for. -func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { - vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) - for _, vh := range rc.GetVirtualHosts() { - routes, err := routesProtoToSlice(vh.Routes, logger, v2) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) - } - rc, err := generateRetryConfig(vh.GetRetryPolicy()) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) - } - vhOut := &VirtualHost{ - Domains: vh.GetDomains(), - Routes: routes, - RetryConfig: rc, - } - if !v2 { - cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) - } - vhOut.HTTPFilterConfigOverride = cfgs - } - vhs = append(vhs, vhOut) - } - return RouteConfigUpdate{VirtualHosts: vhs}, nil -} - -func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { - if !env.RetrySupport || rp == nil { - return nil, nil - } - - cfg := &RetryConfig{RetryOn: make(map[codes.Code]bool)} - for _, s := range strings.Split(rp.GetRetryOn(), ",") { - switch strings.TrimSpace(strings.ToLower(s)) { - case "cancelled": - cfg.RetryOn[codes.Canceled] = true - case "deadline-exceeded": - cfg.RetryOn[codes.DeadlineExceeded] = true - case "internal": - cfg.RetryOn[codes.Internal] = true - case "resource-exhausted": - cfg.RetryOn[codes.ResourceExhausted] = true - case "unavailable": - cfg.RetryOn[codes.Unavailable] = true - } - } - - if rp.NumRetries == nil { - cfg.NumRetries = 1 - } else { - cfg.NumRetries = rp.GetNumRetries().Value - if cfg.NumRetries < 1 { - return nil, fmt.Errorf("retry_policy.num_retries = %v; must be >= 1", cfg.NumRetries) - } - } - - backoff := rp.GetRetryBackOff() - if backoff == nil { - cfg.RetryBackoff.BaseInterval = 25 * time.Millisecond - } else { - cfg.RetryBackoff.BaseInterval = backoff.GetBaseInterval().AsDuration() - if cfg.RetryBackoff.BaseInterval <= 0 { - return nil, fmt.Errorf("retry_policy.base_interval = %v; must be > 0", cfg.RetryBackoff.BaseInterval) - } - } - if max := backoff.GetMaxInterval(); max == nil { - cfg.RetryBackoff.MaxInterval = 10 * cfg.RetryBackoff.BaseInterval - } else { - cfg.RetryBackoff.MaxInterval = max.AsDuration() - if cfg.RetryBackoff.MaxInterval <= 0 { - return nil, fmt.Errorf("retry_policy.max_interval = %v; must be > 0", cfg.RetryBackoff.MaxInterval) - } - } - - if len(cfg.RetryOn) == 0 { - return &RetryConfig{}, nil - } - return cfg, nil -} - -func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, error) { - var routesRet []*Route - for _, r := range routes { - match := r.GetMatch() - if match == nil { - return nil, fmt.Errorf("route %+v doesn't have a match", r) - } - - if len(match.GetQueryParameters()) != 0 { - // Ignore route with query parameters. - logger.Warningf("route %+v has query parameter matchers, the route will be ignored", r) - continue - } - - pathSp := match.GetPathSpecifier() - if pathSp == nil { - return nil, fmt.Errorf("route %+v doesn't have a path specifier", r) - } - - var route Route - switch pt := pathSp.(type) { - case *v3routepb.RouteMatch_Prefix: - route.Prefix = &pt.Prefix - case *v3routepb.RouteMatch_Path: - route.Path = &pt.Path - case *v3routepb.RouteMatch_SafeRegex: - regex := pt.SafeRegex.GetRegex() - re, err := regexp.Compile(regex) - if err != nil { - return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) - } - route.Regex = re - default: - return nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) - } - - if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil { - route.CaseInsensitive = !caseSensitive.Value - } - - for _, h := range match.GetHeaders() { - var header HeaderMatcher - switch ht := h.GetHeaderMatchSpecifier().(type) { - case *v3routepb.HeaderMatcher_ExactMatch: - header.ExactMatch = &ht.ExactMatch - case *v3routepb.HeaderMatcher_SafeRegexMatch: - regex := ht.SafeRegexMatch.GetRegex() - re, err := regexp.Compile(regex) - if err != nil { - return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) - } - header.RegexMatch = re - case *v3routepb.HeaderMatcher_RangeMatch: - header.RangeMatch = &Int64Range{ - Start: ht.RangeMatch.Start, - End: ht.RangeMatch.End, - } - case *v3routepb.HeaderMatcher_PresentMatch: - header.PresentMatch = &ht.PresentMatch - case *v3routepb.HeaderMatcher_PrefixMatch: - header.PrefixMatch = &ht.PrefixMatch - case *v3routepb.HeaderMatcher_SuffixMatch: - header.SuffixMatch = &ht.SuffixMatch - default: - return nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) - } - header.Name = h.GetName() - invert := h.GetInvertMatch() - header.InvertMatch = &invert - route.Headers = append(route.Headers, &header) - } - - if fr := match.GetRuntimeFraction(); fr != nil { - d := fr.GetDefaultValue() - n := d.GetNumerator() - switch d.GetDenominator() { - case v3typepb.FractionalPercent_HUNDRED: - n *= 10000 - case v3typepb.FractionalPercent_TEN_THOUSAND: - n *= 100 - case v3typepb.FractionalPercent_MILLION: - } - route.Fraction = &n - } - - switch r.GetAction().(type) { - case *v3routepb.Route_Route: - route.WeightedClusters = make(map[string]WeightedCluster) - action := r.GetRoute() - - // Hash Policies are only applicable for a Ring Hash LB. - if env.RingHashSupport { - hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) - if err != nil { - return nil, err - } - route.HashPolicies = hp - } - - switch a := action.GetClusterSpecifier().(type) { - case *v3routepb.RouteAction_Cluster: - route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} - case *v3routepb.RouteAction_WeightedClusters: - wcs := a.WeightedClusters - var totalWeight uint32 - for _, c := range wcs.Clusters { - w := c.GetWeight().GetValue() - if w == 0 { - continue - } - wc := WeightedCluster{Weight: w} - if !v2 { - cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) - if err != nil { - return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) - } - wc.HTTPFilterConfigOverride = cfgs - } - route.WeightedClusters[c.GetName()] = wc - totalWeight += w - } - // envoy xds doc - // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight - wantTotalWeight := uint32(100) - if tw := wcs.GetTotalWeight(); tw != nil { - wantTotalWeight = tw.GetValue() - } - if totalWeight != wantTotalWeight { - return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) - } - if totalWeight == 0 { - return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) - } - case *v3routepb.RouteAction_ClusterHeader: - continue - default: - return nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) - } - - msd := action.GetMaxStreamDuration() - // Prefer grpc_timeout_header_max, if set. - dur := msd.GetGrpcTimeoutHeaderMax() - if dur == nil { - dur = msd.GetMaxStreamDuration() - } - if dur != nil { - d := dur.AsDuration() - route.MaxStreamDuration = &d - } - - var err error - route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) - if err != nil { - return nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) - } - - route.RouteAction = RouteActionRoute - - case *v3routepb.Route_NonForwardingAction: - // Expected to be used on server side. - route.RouteAction = RouteActionNonForwardingAction - default: - route.RouteAction = RouteActionUnsupported - } - - if !v2 { - cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) - if err != nil { - return nil, fmt.Errorf("route %+v: %v", r, err) - } - route.HTTPFilterConfigOverride = cfgs - } - routesRet = append(routesRet, &route) - } - return routesRet, nil -} - -func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logger *grpclog.PrefixLogger) ([]*HashPolicy, error) { - var hashPoliciesRet []*HashPolicy - for _, p := range policies { - policy := HashPolicy{Terminal: p.Terminal} - switch p.GetPolicySpecifier().(type) { - case *v3routepb.RouteAction_HashPolicy_Header_: - policy.HashPolicyType = HashPolicyTypeHeader - policy.HeaderName = p.GetHeader().GetHeaderName() - if rr := p.GetHeader().GetRegexRewrite(); rr != nil { - regex := rr.GetPattern().GetRegex() - re, err := regexp.Compile(regex) - if err != nil { - return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) - } - policy.Regex = re - policy.RegexSubstitution = rr.GetSubstitution() - } - case *v3routepb.RouteAction_HashPolicy_FilterState_: - if p.GetFilterState().GetKey() != "io.grpc.channel_id" { - logger.Infof("hash policy %+v contains an invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) - continue - } - policy.HashPolicyType = HashPolicyTypeChannelID - default: - logger.Infof("hash policy %T is an unsupported hash policy", p.GetPolicySpecifier()) - continue - } - - hashPoliciesRet = append(hashPoliciesRet, &policy) - } - return hashPoliciesRet, nil -} - -// UnmarshalCluster processes resources received in an CDS response, validates -// them, and transforms them into a native struct which contains only fields we -// are interested in. -func UnmarshalCluster(opts *UnmarshalOptions) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]ClusterUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalClusterResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { - if !IsClusterResource(r.GetTypeUrl()) { - return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - - cluster := &v3clusterpb.Cluster{} - if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { - return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, pretty.ToJSON(cluster)) - cu, err := validateClusterAndConstructClusterUpdate(cluster) - if err != nil { - return cluster.GetName(), ClusterUpdate{}, err - } - cu.Raw = r - if f != nil { - if err := f(cu); err != nil { - return "", ClusterUpdate{}, err - } - } - - return cluster.GetName(), cu, nil -} - -const ( - defaultRingHashMinSize = 1024 - defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M - ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M -) - -func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { - var lbPolicy *ClusterLBPolicyRingHash - switch cluster.GetLbPolicy() { - case v3clusterpb.Cluster_ROUND_ROBIN: - lbPolicy = nil // The default is round_robin, and there's no config to set. - case v3clusterpb.Cluster_RING_HASH: - if !env.RingHashSupport { - return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) - } - rhc := cluster.GetRingHashLbConfig() - if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { - return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) - } - // Minimum defaults to 1024 entries, and limited to 8M entries Maximum - // defaults to 8M entries, and limited to 8M entries - var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize - if min := rhc.GetMinimumRingSize(); min != nil { - if min.GetValue() > ringHashSizeUpperBound { - return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash mininum ring size %v in response: %+v", min.GetValue(), cluster) - } - minSize = min.GetValue() - } - if max := rhc.GetMaximumRingSize(); max != nil { - if max.GetValue() > ringHashSizeUpperBound { - return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash maxinum ring size %v in response: %+v", max.GetValue(), cluster) - } - maxSize = max.GetValue() - } - if minSize > maxSize { - return ClusterUpdate{}, fmt.Errorf("ring_hash config min size %v is greater than max %v", minSize, maxSize) - } - lbPolicy = &ClusterLBPolicyRingHash{MinimumRingSize: minSize, MaximumRingSize: maxSize} - default: - return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) - } - - // Process security configuration received from the control plane iff the - // corresponding environment variable is set. - var sc *SecurityConfig - if env.ClientSideSecuritySupport { - var err error - if sc, err = securityConfigFromCluster(cluster); err != nil { - return ClusterUpdate{}, err - } - } - - ret := ClusterUpdate{ - ClusterName: cluster.GetName(), - EnableLRS: cluster.GetLrsServer().GetSelf() != nil, - SecurityCfg: sc, - MaxRequests: circuitBreakersFromCluster(cluster), - LBPolicy: lbPolicy, - } - - // Validate and set cluster type from the response. - switch { - case cluster.GetType() == v3clusterpb.Cluster_EDS: - if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { - return ClusterUpdate{}, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) - } - ret.ClusterType = ClusterTypeEDS - ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() - return ret, nil - case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: - if !env.AggregateAndDNSSupportEnv { - return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) - } - ret.ClusterType = ClusterTypeLogicalDNS - dnsHN, err := dnsHostNameFromCluster(cluster) - if err != nil { - return ClusterUpdate{}, err - } - ret.DNSHostName = dnsHN - return ret, nil - case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": - if !env.AggregateAndDNSSupportEnv { - return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) - } - clusters := &v3aggregateclusterpb.ClusterConfig{} - if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { - return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - ret.ClusterType = ClusterTypeAggregate - ret.PrioritizedClusterNames = clusters.Clusters - return ret, nil - default: - return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) - } -} - -// dnsHostNameFromCluster extracts the DNS host name from the cluster's load -// assignment. -// -// There should be exactly one locality, with one endpoint, whose address -// contains the address and port. -func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { - loadAssignment := cluster.GetLoadAssignment() - if loadAssignment == nil { - return "", fmt.Errorf("load_assignment not present for LOGICAL_DNS cluster") - } - if len(loadAssignment.GetEndpoints()) != 1 { - return "", fmt.Errorf("load_assignment for LOGICAL_DNS cluster must have exactly one locality, got: %+v", loadAssignment) - } - endpoints := loadAssignment.GetEndpoints()[0].GetLbEndpoints() - if len(endpoints) != 1 { - return "", fmt.Errorf("locality for LOGICAL_DNS cluster must have exactly one endpoint, got: %+v", endpoints) - } - endpoint := endpoints[0].GetEndpoint() - if endpoint == nil { - return "", fmt.Errorf("endpoint for LOGICAL_DNS cluster not set") - } - socketAddr := endpoint.GetAddress().GetSocketAddress() - if socketAddr == nil { - return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set") - } - if socketAddr.GetResolverName() != "" { - return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set has unexpected custom resolver name: %v", socketAddr.GetResolverName()) - } - host := socketAddr.GetAddress() - if host == "" { - return "", fmt.Errorf("host for endpoint for LOGICAL_DNS cluster not set") - } - port := socketAddr.GetPortValue() - if port == 0 { - return "", fmt.Errorf("port for endpoint for LOGICAL_DNS cluster not set") - } - return net.JoinHostPort(host, strconv.Itoa(int(port))), nil -} - -// securityConfigFromCluster extracts the relevant security configuration from -// the received Cluster resource. -func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { - if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { - return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) - } - // The Cluster resource contains a `transport_socket` field, which contains - // a oneof `typed_config` field of type `protobuf.Any`. The any proto - // contains a marshaled representation of an `UpstreamTlsContext` message. - ts := cluster.GetTransportSocket() - if ts == nil { - return nil, nil - } - if name := ts.GetName(); name != transportSocketName { - return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) - } - any := ts.GetTypedConfig() - if any == nil || any.TypeUrl != version.V3UpstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) - } - upstreamCtx := &v3tlspb.UpstreamTlsContext{} - if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { - return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) - } - // The following fields from `UpstreamTlsContext` are ignored: - // - sni - // - allow_renegotiation - // - max_session_keys - if upstreamCtx.GetCommonTlsContext() == nil { - return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") - } - - return securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext(), false) -} - -// common is expected to be not nil. -// The `alpn_protocols` field is ignored. -func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { - if common.GetTlsParams() != nil { - return nil, fmt.Errorf("unsupported tls_params field in CommonTlsContext message: %+v", common) - } - if common.GetCustomHandshaker() != nil { - return nil, fmt.Errorf("unsupported custom_handshaker field in CommonTlsContext message: %+v", common) - } - - // For now, if we can't get a valid security config from the new fields, we - // fallback to the old deprecated fields. - // TODO: Drop support for deprecated fields. NACK if err != nil here. - sc, _ := securityConfigFromCommonTLSContextUsingNewFields(common, server) - if sc == nil || sc.Equal(&SecurityConfig{}) { - var err error - sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common, server) - if err != nil { - return nil, err - } - } - if sc != nil { - // sc == nil is a valid case where the control plane has not sent us any - // security configuration. xDS creds will use fallback creds. - if server { - if sc.IdentityInstanceName == "" { - return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") - } - } else { - if sc.RootInstanceName == "" { - return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") - } - } - } - return sc, nil -} - -func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { - // The `CommonTlsContext` contains a - // `tls_certificate_certificate_provider_instance` field of type - // `CertificateProviderInstance`, which contains the provider instance name - // and the certificate name to fetch identity certs. - sc := &SecurityConfig{} - if identity := common.GetTlsCertificateCertificateProviderInstance(); identity != nil { - sc.IdentityInstanceName = identity.GetInstanceName() - sc.IdentityCertName = identity.GetCertificateName() - } - - // The `CommonTlsContext` contains a `validation_context_type` field which - // is a oneof. We can get the values that we are interested in from two of - // those possible values: - // - combined validation context: - // - contains a default validation context which holds the list of - // matchers for accepted SANs. - // - contains certificate provider instance configuration - // - certificate provider instance configuration - // - in this case, we do not get a list of accepted SANs. - switch t := common.GetValidationContextType().(type) { - case *v3tlspb.CommonTlsContext_CombinedValidationContext: - combined := common.GetCombinedValidationContext() - var matchers []matcher.StringMatcher - if def := combined.GetDefaultValidationContext(); def != nil { - for _, m := range def.GetMatchSubjectAltNames() { - matcher, err := matcher.StringMatcherFromProto(m) - if err != nil { - return nil, err - } - matchers = append(matchers, matcher) - } - } - if server && len(matchers) != 0 { - return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) - } - sc.SubjectAltNameMatchers = matchers - if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { - sc.RootInstanceName = pi.GetInstanceName() - sc.RootCertName = pi.GetCertificateName() - } - case *v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance: - pi := common.GetValidationContextCertificateProviderInstance() - sc.RootInstanceName = pi.GetInstanceName() - sc.RootCertName = pi.GetCertificateName() - case nil: - // It is valid for the validation context to be nil on the server side. - default: - return nil, fmt.Errorf("validation context contains unexpected type: %T", t) - } - return sc, nil -} - -// gRFC A29 https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md -// specifies the new way to fetch security configuration and says the following: -// -// Although there are various ways to obtain certificates as per this proto -// (which are supported by Envoy), gRPC supports only one of them and that is -// the `CertificateProviderPluginInstance` proto. -// -// This helper function attempts to fetch security configuration from the -// `CertificateProviderPluginInstance` message, given a CommonTlsContext. -func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { - // The `tls_certificate_provider_instance` field of type - // `CertificateProviderPluginInstance` is used to fetch the identity - // certificate provider. - sc := &SecurityConfig{} - identity := common.GetTlsCertificateProviderInstance() - if identity == nil && len(common.GetTlsCertificates()) != 0 { - return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificates is set in CommonTlsContext message: %+v", common) - } - if identity == nil && common.GetTlsCertificateSdsSecretConfigs() != nil { - return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message: %+v", common) - } - sc.IdentityInstanceName = identity.GetInstanceName() - sc.IdentityCertName = identity.GetCertificateName() - - // The `CommonTlsContext` contains a oneof field `validation_context_type`, - // which contains the `CertificateValidationContext` message in one of the - // following ways: - // - `validation_context` field - // - this is directly of type `CertificateValidationContext` - // - `combined_validation_context` field - // - this is of type `CombinedCertificateValidationContext` and contains - // a `default validation context` field of type - // `CertificateValidationContext` - // - // The `CertificateValidationContext` message has the following fields that - // we are interested in: - // - `ca_certificate_provider_instance` - // - this is of type `CertificateProviderPluginInstance` - // - `match_subject_alt_names` - // - this is a list of string matchers - // - // The `CertificateProviderPluginInstance` message contains two fields - // - instance_name - // - this is the certificate provider instance name to be looked up in - // the bootstrap configuration - // - certificate_name - // - this is an opaque name passed to the certificate provider - var validationCtx *v3tlspb.CertificateValidationContext - switch typ := common.GetValidationContextType().(type) { - case *v3tlspb.CommonTlsContext_ValidationContext: - validationCtx = common.GetValidationContext() - case *v3tlspb.CommonTlsContext_CombinedValidationContext: - validationCtx = common.GetCombinedValidationContext().GetDefaultValidationContext() - case nil: - // It is valid for the validation context to be nil on the server side. - return sc, nil - default: - return nil, fmt.Errorf("validation context contains unexpected type: %T", typ) - } - // If we get here, it means that the `CertificateValidationContext` message - // was found through one of the supported ways. It is an error if the - // validation context is specified, but it does not contain the - // ca_certificate_provider_instance field which contains information about - // the certificate provider to be used for the root certificates. - if validationCtx.GetCaCertificateProviderInstance() == nil { - return nil, fmt.Errorf("expected field ca_certificate_provider_instance is missing in CommonTlsContext message: %+v", common) - } - // The following fields are ignored: - // - trusted_ca - // - watched_directory - // - allow_expired_certificate - // - trust_chain_verification - switch { - case len(validationCtx.GetVerifyCertificateSpki()) != 0: - return nil, fmt.Errorf("unsupported verify_certificate_spki field in CommonTlsContext message: %+v", common) - case len(validationCtx.GetVerifyCertificateHash()) != 0: - return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) - case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): - return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) - case validationCtx.GetCrl() != nil: - return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) - case validationCtx.GetCustomValidatorConfig() != nil: - return nil, fmt.Errorf("unsupported custom_validator_config field in CommonTlsContext message: %+v", common) - } - - if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { - sc.RootInstanceName = rootProvider.GetInstanceName() - sc.RootCertName = rootProvider.GetCertificateName() - } - var matchers []matcher.StringMatcher - for _, m := range validationCtx.GetMatchSubjectAltNames() { - matcher, err := matcher.StringMatcherFromProto(m) - if err != nil { - return nil, err - } - matchers = append(matchers, matcher) - } - if server && len(matchers) != 0 { - return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) - } - sc.SubjectAltNameMatchers = matchers - return sc, nil -} - -// circuitBreakersFromCluster extracts the circuit breakers configuration from -// the received cluster resource. Returns nil if no CircuitBreakers or no -// Thresholds in CircuitBreakers. -func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { - for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { - if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { - continue - } - maxRequestsPb := threshold.GetMaxRequests() - if maxRequestsPb == nil { - return nil - } - maxRequests := maxRequestsPb.GetValue() - return &maxRequests - } - return nil -} - -// UnmarshalEndpoints processes resources received in an EDS response, -// validates them, and transforms them into a native struct which contains only -// fields we are interested in. -func UnmarshalEndpoints(opts *UnmarshalOptions) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]EndpointsUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { - if !IsEndpointsResource(r.GetTypeUrl()) { - return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - - cla := &v3endpointpb.ClusterLoadAssignment{} - if err := proto.Unmarshal(r.GetValue(), cla); err != nil { - return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) - - u, err := parseEDSRespProto(cla) - if err != nil { - return cla.GetClusterName(), EndpointsUpdate{}, err - } - u.Raw = r - return cla.GetClusterName(), u, nil -} - -func parseAddress(socketAddress *v3corepb.SocketAddress) string { - return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) -} - -func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig { - percentage := dropPolicy.GetDropPercentage() - var ( - numerator = percentage.GetNumerator() - denominator uint32 - ) - switch percentage.GetDenominator() { - case v3typepb.FractionalPercent_HUNDRED: - denominator = 100 - case v3typepb.FractionalPercent_TEN_THOUSAND: - denominator = 10000 - case v3typepb.FractionalPercent_MILLION: - denominator = 1000000 - } - return OverloadDropConfig{ - Category: dropPolicy.GetCategory(), - Numerator: numerator, - Denominator: denominator, - } -} - -func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) []Endpoint { - endpoints := make([]Endpoint, 0, len(lbEndpoints)) - for _, lbEndpoint := range lbEndpoints { - endpoints = append(endpoints, Endpoint{ - HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), - Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), - Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), - }) - } - return endpoints -} - -func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { - ret := EndpointsUpdate{} - for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { - ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) - } - priorities := make(map[uint32]struct{}) - for _, locality := range m.Endpoints { - l := locality.GetLocality() - if l == nil { - return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) - } - lid := internal.LocalityID{ - Region: l.Region, - Zone: l.Zone, - SubZone: l.SubZone, - } - priority := locality.GetPriority() - priorities[priority] = struct{}{} - ret.Localities = append(ret.Localities, Locality{ - ID: lid, - Endpoints: parseEndpoints(locality.GetLbEndpoints()), - Weight: locality.GetLoadBalancingWeight().GetValue(), - Priority: priority, - }) - } - for i := 0; i < len(priorities); i++ { - if _, ok := priorities[uint32(i)]; !ok { - return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) - } - } - return ret, nil -} - -// ListenerUpdateErrTuple is a tuple with the update and error. It contains the -// results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type ListenerUpdateErrTuple struct { - Update ListenerUpdate - Err error -} - -// RouteConfigUpdateErrTuple is a tuple with the update and error. It contains -// the results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type RouteConfigUpdateErrTuple struct { - Update RouteConfigUpdate - Err error -} - -// ClusterUpdateErrTuple is a tuple with the update and error. It contains the -// results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type ClusterUpdateErrTuple struct { - Update ClusterUpdate - Err error -} - -// EndpointsUpdateErrTuple is a tuple with the update and error. It contains the -// results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type EndpointsUpdateErrTuple struct { - Update EndpointsUpdate - Err error -} - -// processAllResources unmarshals and validates the resources, populates the -// provided ret (a map), and returns metadata and error. -// -// After this function, the ret map will be populated with both valid and -// invalid updates. Invalid resources will have an entry with the key as the -// resource name, value as an empty update. -// -// The type of the resource is determined by the type of ret. E.g. -// map[string]ListenerUpdate means this is for LDS. -func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadata, error) { - timestamp := time.Now() - md := UpdateMetadata{ - Version: opts.Version, - Timestamp: timestamp, - } - var topLevelErrors []error - perResourceErrors := make(map[string]error) - - for _, r := range opts.Resources { - switch ret2 := ret.(type) { - case map[string]ListenerUpdateErrTuple: - name, update, err := unmarshalListenerResource(r, opts.UpdateValidator, opts.Logger) - if err == nil { - ret2[name] = ListenerUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = ListenerUpdateErrTuple{Err: err} - case map[string]RouteConfigUpdateErrTuple: - name, update, err := unmarshalRouteConfigResource(r, opts.Logger) - if err == nil { - ret2[name] = RouteConfigUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = RouteConfigUpdateErrTuple{Err: err} - case map[string]ClusterUpdateErrTuple: - name, update, err := unmarshalClusterResource(r, opts.UpdateValidator, opts.Logger) - if err == nil { - ret2[name] = ClusterUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = ClusterUpdateErrTuple{Err: err} - case map[string]EndpointsUpdateErrTuple: - name, update, err := unmarshalEndpointsResource(r, opts.Logger) - if err == nil { - ret2[name] = EndpointsUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = EndpointsUpdateErrTuple{Err: err} - } - } - - if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { - md.Status = ServiceStatusACKed - return md, nil - } - - var typeStr string - switch ret.(type) { - case map[string]ListenerUpdate: - typeStr = "LDS" - case map[string]RouteConfigUpdate: - typeStr = "RDS" - case map[string]ClusterUpdate: - typeStr = "CDS" - case map[string]EndpointsUpdate: - typeStr = "EDS" - } - - md.Status = ServiceStatusNACKed - errRet := combineErrors(typeStr, topLevelErrors, perResourceErrors) - md.ErrState = &UpdateErrorMetadata{ - Version: opts.Version, - Err: errRet, - Timestamp: timestamp, - } - return md, errRet -} - -func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { - var errStrB strings.Builder - errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) - if len(topLevelErrors) > 0 { - errStrB.WriteString("top level errors: ") - for i, err := range topLevelErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - errStrB.WriteString(err.Error()) - } - } - if len(perResourceErrors) > 0 { - var i int - for name, err := range perResourceErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - i++ - errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) - } - } - return errors.New(errStrB.String()) -} diff --git a/xds/internal/xdsclient/filter_chain.go b/xds/internal/xdsclient/xdsresource/filter_chain.go similarity index 95% rename from xds/internal/xdsclient/filter_chain.go rename to xds/internal/xdsclient/xdsresource/filter_chain.go index 7503e0e48761..10c779229622 100644 --- a/xds/internal/xdsclient/filter_chain.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "errors" @@ -28,6 +27,7 @@ import ( v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" @@ -61,12 +61,12 @@ type FilterChain struct { HTTPFilters []HTTPFilter // RouteConfigName is the route configuration name for this FilterChain. // - // Only one of RouteConfigName and InlineRouteConfig is set. + // Exactly one of RouteConfigName and InlineRouteConfig is set. RouteConfigName string // InlineRouteConfig is the inline route configuration (RDS response) // returned for this filter chain. // - // Only one of RouteConfigName and InlineRouteConfig is set. + // Exactly one of RouteConfigName and InlineRouteConfig is set. InlineRouteConfig *RouteConfigUpdate } @@ -177,6 +177,7 @@ const ( // 7. Source IP address. // 8. Source port. type FilterChainManager struct { + logger *grpclog.PrefixLogger // Destination prefix is the first match criteria that we support. // Therefore, this multi-stage map is indexed on destination prefixes // specified in the match criteria. @@ -247,9 +248,10 @@ type sourcePrefixEntry struct { // // This function is only exported so that tests outside of this package can // create a FilterChainManager. -func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { +func NewFilterChainManager(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger) (*FilterChainManager, error) { // Parse all the filter chains and build the internal data structures. fci := &FilterChainManager{ + logger: logger, dstPrefixMap: make(map[string]*destPrefixEntry), RouteConfigNames: make(map[string]bool), } @@ -303,7 +305,7 @@ func (fci *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) if fcm.GetDestinationPort().GetValue() != 0 { // Destination port is the first match criteria and we do not // support filter chains which contains this match criteria. - logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) continue } @@ -352,7 +354,7 @@ func (fci *FilterChainManager) addFilterChainsForServerNames(dstEntry *destPrefi // Filter chains specifying server names in their match criteria always fail // a match at connection time. So, these filter chains can be dropped now. if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { - logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) return nil } @@ -365,13 +367,13 @@ func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *de case tp != "" && tp != "raw_buffer": // Only allow filter chains with transport protocol set to empty string // or "raw_buffer". - logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) return nil case tp == "" && dstEntry.rawBufferSeen: // If we have already seen filter chains with transport protocol set to // "raw_buffer", we can drop filter chains with transport protocol set // to empty string, since the former takes precedence. - logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) return nil case tp != "" && !dstEntry.rawBufferSeen: // This is the first "raw_buffer" that we are seeing. Set the bit and @@ -385,7 +387,7 @@ func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *de func (fci *FilterChainManager) addFilterChainsForApplicationProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { - logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) + fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) return nil } return fci.addFilterChainsForSourceType(dstEntry, fc) @@ -551,6 +553,25 @@ func (fci *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain return filterChain, nil } +// Validate takes a function to validate the FilterChains in this manager. +func (fci *FilterChainManager) Validate(f func(fc *FilterChain) error) error { + for _, dst := range fci.dstPrefixMap { + for _, srcType := range dst.srcTypeArr { + if srcType == nil { + continue + } + for _, src := range srcType.srcPrefixMap { + for _, fc := range src.srcPortMap { + if err := f(fc); err != nil { + return err + } + } + } + } + } + return f(fci.def) +} + func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) { filterChain := &FilterChain{} seenNames := make(map[string]bool, len(filters)) diff --git a/xds/internal/xdsclient/filter_chain_test.go b/xds/internal/xdsclient/xdsresource/filter_chain_test.go similarity index 98% rename from xds/internal/xdsclient/filter_chain_test.go rename to xds/internal/xdsclient/xdsresource/filter_chain_test.go index ae1035e76409..dc1ea75778bf 100644 --- a/xds/internal/xdsclient/filter_chain_test.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain_test.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "context" @@ -99,7 +98,7 @@ var ( // TestNewFilterChainImpl_Failure_BadMatchFields verifies cases where we have a // single filter chain with match criteria that contains unsupported fields. -func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { tests := []struct { desc string lis *v3listenerpb.Listener @@ -188,7 +187,7 @@ func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - if fci, err := NewFilterChainManager(test.lis); err == nil { + if fci, err := NewFilterChainManager(test.lis, nil); err == nil { t.Fatalf("NewFilterChainManager() returned %v when expected to fail", fci) } }) @@ -197,7 +196,7 @@ func TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { // TestNewFilterChainImpl_Failure_OverlappingMatchingRules verifies cases where // there are multiple filter chains and they have overlapping match rules. -func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { tests := []struct { desc string lis *v3listenerpb.Listener @@ -287,7 +286,7 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { const wantErr = "multiple filter chains with overlapping matching rules are defined" for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - if _, err := NewFilterChainManager(test.lis); err == nil || !strings.Contains(err.Error(), wantErr) { + if _, err := NewFilterChainManager(test.lis, nil); err == nil || !strings.Contains(err.Error(), wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, wantErr) } }) @@ -296,7 +295,7 @@ func TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { // TestNewFilterChainImpl_Failure_BadSecurityConfig verifies cases where the // security configuration in the filter chain is invalid. -func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { tests := []struct { desc string lis *v3listenerpb.Listener @@ -509,7 +508,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis) + _, err := NewFilterChainManager(test.lis, nil) if err == nil || !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) } @@ -519,7 +518,7 @@ func TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { // TestNewFilterChainImpl_Success_RouteUpdate tests the construction of the // filter chain with valid HTTP Filters present. -func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -746,7 +745,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -759,7 +758,7 @@ func TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { // TestNewFilterChainImpl_Failure_BadRouteUpdate verifies cases where the Route // Update in the filter chain are invalid. -func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -887,7 +886,7 @@ func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis) + _, err := NewFilterChainManager(test.lis, nil) if err == nil || !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) } @@ -897,7 +896,7 @@ func TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { // TestNewFilterChainImpl_Failure_BadHTTPFilters verifies cases where the HTTP // Filters in the filter chain are invalid. -func TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { +func (s) TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { tests := []struct { name string lis *v3listenerpb.Listener @@ -961,7 +960,7 @@ func TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis) + _, err := NewFilterChainManager(test.lis, nil) if err == nil || !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) } @@ -971,7 +970,7 @@ func TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { // TestNewFilterChainImpl_Success_HTTPFilters tests the construction of the // filter chain with valid HTTP Filters present. -func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -1281,7 +1280,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -1294,7 +1293,7 @@ func TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { // TestNewFilterChainImpl_Success_SecurityConfig verifies cases where the // security configuration in the filter chain contains valid data. -func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -1510,7 +1509,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -1526,7 +1525,7 @@ func TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { // contains unsupported match fields. These configurations should lead to // success at config validation time and the filter chains which contains // unsupported match fields will be skipped at lookup time. -func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -1683,7 +1682,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -1696,7 +1695,7 @@ func TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { // TestNewFilterChainImpl_Success_AllCombinations verifies different // combinations of the supported match criteria. -func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { +func (s) TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -2184,7 +2183,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis) + gotFC, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -2195,7 +2194,7 @@ func TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { } } -func TestLookup_Failures(t *testing.T) { +func (s) TestLookup_Failures(t *testing.T) { tests := []struct { desc string lis *v3listenerpb.Listener @@ -2335,7 +2334,7 @@ func TestLookup_Failures(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - fci, err := NewFilterChainManager(test.lis) + fci, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() failed: %v", err) } @@ -2347,7 +2346,7 @@ func TestLookup_Failures(t *testing.T) { } } -func TestLookup_Successes(t *testing.T) { +func (s) TestLookup_Successes(t *testing.T) { oldRBAC := env.RBACSupport env.RBACSupport = true defer func() { @@ -2569,7 +2568,7 @@ func TestLookup_Successes(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - fci, err := NewFilterChainManager(test.lis) + fci, err := NewFilterChainManager(test.lis, nil) if err != nil { t.Fatalf("NewFilterChainManager() failed: %v", err) } @@ -2615,7 +2614,7 @@ func (si *serverInterceptor) AllowRPC(context.Context) error { return errors.New(si.level) } -func TestHTTPFilterInstantiation(t *testing.T) { +func (s) TestHTTPFilterInstantiation(t *testing.T) { tests := []struct { name string filters []HTTPFilter diff --git a/xds/internal/xdsclient/matcher.go b/xds/internal/xdsclient/xdsresource/matcher.go similarity index 99% rename from xds/internal/xdsclient/matcher.go rename to xds/internal/xdsclient/xdsresource/matcher.go index 85fff30638e6..d7da32a750e0 100644 --- a/xds/internal/xdsclient/matcher.go +++ b/xds/internal/xdsclient/xdsresource/matcher.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "fmt" diff --git a/xds/internal/xdsclient/matcher_path.go b/xds/internal/xdsclient/xdsresource/matcher_path.go similarity index 99% rename from xds/internal/xdsclient/matcher_path.go rename to xds/internal/xdsclient/xdsresource/matcher_path.go index 2ca0e4bbcc44..da487e20c58e 100644 --- a/xds/internal/xdsclient/matcher_path.go +++ b/xds/internal/xdsclient/xdsresource/matcher_path.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "regexp" diff --git a/xds/internal/xdsclient/matcher_path_test.go b/xds/internal/xdsclient/xdsresource/matcher_path_test.go similarity index 94% rename from xds/internal/xdsclient/matcher_path_test.go rename to xds/internal/xdsclient/xdsresource/matcher_path_test.go index 003d6db72e29..507cf15bed85 100644 --- a/xds/internal/xdsclient/matcher_path_test.go +++ b/xds/internal/xdsclient/xdsresource/matcher_path_test.go @@ -13,17 +13,16 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "regexp" "testing" ) -func TestPathFullMatcherMatch(t *testing.T) { +func (s) TestPathFullMatcherMatch(t *testing.T) { tests := []struct { name string fullPath string @@ -47,7 +46,7 @@ func TestPathFullMatcherMatch(t *testing.T) { } } -func TestPathPrefixMatcherMatch(t *testing.T) { +func (s) TestPathPrefixMatcherMatch(t *testing.T) { tests := []struct { name string prefix string @@ -71,7 +70,7 @@ func TestPathPrefixMatcherMatch(t *testing.T) { } } -func TestPathRegexMatcherMatch(t *testing.T) { +func (s) TestPathRegexMatcherMatch(t *testing.T) { tests := []struct { name string regexPath string diff --git a/xds/internal/xdsclient/matcher_test.go b/xds/internal/xdsclient/xdsresource/matcher_test.go similarity index 98% rename from xds/internal/xdsclient/matcher_test.go rename to xds/internal/xdsclient/xdsresource/matcher_test.go index 724fa8269582..2746e58e6c77 100644 --- a/xds/internal/xdsclient/matcher_test.go +++ b/xds/internal/xdsclient/xdsresource/matcher_test.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "context" @@ -29,7 +28,7 @@ import ( "google.golang.org/grpc/metadata" ) -func TestAndMatcherMatch(t *testing.T) { +func (s) TestAndMatcherMatch(t *testing.T) { tests := []struct { name string pm pathMatcher @@ -114,7 +113,7 @@ func TestAndMatcherMatch(t *testing.T) { } } -func TestFractionMatcherMatch(t *testing.T) { +func (s) TestFractionMatcherMatch(t *testing.T) { const fraction = 500000 fm := newFractionMatcher(fraction) defer func() { diff --git a/xds/internal/xdsclient/xdsresource/test_utils_test.go b/xds/internal/xdsclient/xdsresource/test_utils_test.go new file mode 100644 index 000000000000..b352caa23b75 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/test_utils_test.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/protobuf/testing/protocmp" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +var ( + cmpOpts = cmp.Options{ + cmpopts.EquateEmpty(), + cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), + cmp.Comparer(func(a, b time.Time) bool { return true }), + protocmp.Transform(), + } + + cmpOptsIgnoreDetails = cmp.Options{ + cmp.Comparer(func(a, b time.Time) bool { return true }), + cmp.Comparer(func(x, y error) bool { + return (x == nil) == (y == nil) + }), + } +) diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go new file mode 100644 index 000000000000..3e01d77e4e02 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -0,0 +1,107 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + "google.golang.org/grpc/xds/internal/version" +) + +// UpdateValidatorFunc performs validations on update structs using +// context/logic available at the xdsClient layer. Since these validation are +// performed on internal update structs, they can be shared between different +// API clients. +type UpdateValidatorFunc func(interface{}) error + +// UpdateMetadata contains the metadata for each update, including timestamp, +// raw message, and so on. +type UpdateMetadata struct { + // Status is the status of this resource, e.g. ACKed, NACKed, or + // Not_exist(removed). + Status ServiceStatus + // Version is the version of the xds response. Note that this is the version + // of the resource in use (previous ACKed). If a response is NACKed, the + // NACKed version is in ErrState. + Version string + // Timestamp is when the response is received. + Timestamp time.Time + // ErrState is set when the update is NACKed. + ErrState *UpdateErrorMetadata +} + +// IsListenerResource returns true if the provider URL corresponds to an xDS +// Listener resource. +func IsListenerResource(url string) bool { + return url == version.V2ListenerURL || url == version.V3ListenerURL +} + +// IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS +// HTTPConnManager resource. +func IsHTTPConnManagerResource(url string) bool { + return url == version.V2HTTPConnManagerURL || url == version.V3HTTPConnManagerURL +} + +// IsRouteConfigResource returns true if the provider URL corresponds to an xDS +// RouteConfig resource. +func IsRouteConfigResource(url string) bool { + return url == version.V2RouteConfigURL || url == version.V3RouteConfigURL +} + +// IsClusterResource returns true if the provider URL corresponds to an xDS +// Cluster resource. +func IsClusterResource(url string) bool { + return url == version.V2ClusterURL || url == version.V3ClusterURL +} + +// IsEndpointsResource returns true if the provider URL corresponds to an xDS +// Endpoints resource. +func IsEndpointsResource(url string) bool { + return url == version.V2EndpointsURL || url == version.V3EndpointsURL +} + +// ServiceStatus is the status of the update. +type ServiceStatus int + +const ( + // ServiceStatusUnknown is the default state, before a watch is started for + // the resource. + ServiceStatusUnknown ServiceStatus = iota + // ServiceStatusRequested is when the watch is started, but before and + // response is received. + ServiceStatusRequested + // ServiceStatusNotExist is when the resource doesn't exist in + // state-of-the-world responses (e.g. LDS and CDS), which means the resource + // is removed by the management server. + ServiceStatusNotExist // Resource is removed in the server, in LDS/CDS. + // ServiceStatusACKed is when the resource is ACKed. + ServiceStatusACKed + // ServiceStatusNACKed is when the resource is NACKed. + ServiceStatusNACKed +) + +// UpdateErrorMetadata is part of UpdateMetadata. It contains the error state +// when a response is NACKed. +type UpdateErrorMetadata struct { + // Version is the version of the NACKed response. + Version string + // Err contains why the response was NACKed. + Err error + // Timestamp is when the NACKed response was received. + Timestamp time.Time +} diff --git a/xds/internal/xdsclient/xdsresource/type_cds.go b/xds/internal/xdsclient/xdsresource/type_cds.go new file mode 100644 index 000000000000..c200380be26f --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_cds.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import "google.golang.org/protobuf/types/known/anypb" + +// ClusterType is the type of cluster from a received CDS response. +type ClusterType int + +const ( + // ClusterTypeEDS represents the EDS cluster type, which will delegate endpoint + // discovery to the management server. + ClusterTypeEDS ClusterType = iota + // ClusterTypeLogicalDNS represents the Logical DNS cluster type, which essentially + // maps to the gRPC behavior of using the DNS resolver with pick_first LB policy. + ClusterTypeLogicalDNS + // ClusterTypeAggregate represents the Aggregate Cluster type, which provides a + // prioritized list of clusters to use. It is used for failover between clusters + // with a different configuration. + ClusterTypeAggregate +) + +// ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its +// config. +type ClusterLBPolicyRingHash struct { + MinimumRingSize uint64 + MaximumRingSize uint64 +} + +// ClusterUpdate contains information from a received CDS response, which is of +// interest to the registered CDS watcher. +type ClusterUpdate struct { + ClusterType ClusterType + // ClusterName is the clusterName being watched for through CDS. + ClusterName string + // EDSServiceName is an optional name for EDS. If it's not set, the balancer + // should watch ClusterName for the EDS resources. + EDSServiceName string + // EnableLRS indicates whether or not load should be reported through LRS. + EnableLRS bool + // SecurityCfg contains security configuration sent by the control plane. + SecurityCfg *SecurityConfig + // MaxRequests for circuit breaking, if any (otherwise nil). + MaxRequests *uint32 + // DNSHostName is used only for cluster type DNS. It's the DNS name to + // resolve in "host:port" form + DNSHostName string + // PrioritizedClusterNames is used only for cluster type aggregate. It represents + // a prioritized list of cluster names. + PrioritizedClusterNames []string + + // LBPolicy is the lb policy for this cluster. + // + // This only support round_robin and ring_hash. + // - if it's nil, the lb policy is round_robin + // - if it's not nil, the lb policy is ring_hash, the this field has the config. + // + // When we add more support policies, this can be made an interface, and + // will be set to different types based on the policy type. + LBPolicy *ClusterLBPolicyRingHash + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// ClusterUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ClusterUpdateErrTuple struct { + Update ClusterUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/type_eds.go b/xds/internal/xdsclient/xdsresource/type_eds.go new file mode 100644 index 000000000000..ad590160f6af --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_eds.go @@ -0,0 +1,80 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/types/known/anypb" +) + +// OverloadDropConfig contains the config to drop overloads. +type OverloadDropConfig struct { + Category string + Numerator uint32 + Denominator uint32 +} + +// EndpointHealthStatus represents the health status of an endpoint. +type EndpointHealthStatus int32 + +const ( + // EndpointHealthStatusUnknown represents HealthStatus UNKNOWN. + EndpointHealthStatusUnknown EndpointHealthStatus = iota + // EndpointHealthStatusHealthy represents HealthStatus HEALTHY. + EndpointHealthStatusHealthy + // EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY. + EndpointHealthStatusUnhealthy + // EndpointHealthStatusDraining represents HealthStatus DRAINING. + EndpointHealthStatusDraining + // EndpointHealthStatusTimeout represents HealthStatus TIMEOUT. + EndpointHealthStatusTimeout + // EndpointHealthStatusDegraded represents HealthStatus DEGRADED. + EndpointHealthStatusDegraded +) + +// Endpoint contains information of an endpoint. +type Endpoint struct { + Address string + HealthStatus EndpointHealthStatus + Weight uint32 +} + +// Locality contains information of a locality. +type Locality struct { + Endpoints []Endpoint + ID internal.LocalityID + Priority uint32 + Weight uint32 +} + +// EndpointsUpdate contains an EDS update. +type EndpointsUpdate struct { + Drops []OverloadDropConfig + Localities []Locality + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// EndpointsUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type EndpointsUpdateErrTuple struct { + Update EndpointsUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/type_lds.go b/xds/internal/xdsclient/xdsresource/type_lds.go new file mode 100644 index 000000000000..a2742fb4371a --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_lds.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// ListenerUpdate contains information received in an LDS response, which is of +// interest to the registered LDS watcher. +type ListenerUpdate struct { + // RouteConfigName is the route configuration name corresponding to the + // target which is being watched through LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned inside LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate + + // MaxStreamDuration contains the HTTP connection manager's + // common_http_protocol_options.max_stream_duration field, or zero if + // unset. + MaxStreamDuration time.Duration + // HTTPFilters is a list of HTTP filters (name, config) from the LDS + // response. + HTTPFilters []HTTPFilter + // InboundListenerCfg contains inbound listener configuration. + InboundListenerCfg *InboundListenerConfig + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// HTTPFilter represents one HTTP filter from an LDS response's HTTP connection +// manager field. +type HTTPFilter struct { + // Name is an arbitrary name of the filter. Used for applying override + // settings in virtual host / route / weighted cluster configuration (not + // yet supported). + Name string + // Filter is the HTTP filter found in the registry for the config type. + Filter httpfilter.Filter + // Config contains the filter's configuration + Config httpfilter.FilterConfig +} + +// InboundListenerConfig contains information about the inbound listener, i.e +// the server-side listener. +type InboundListenerConfig struct { + // Address is the local address on which the inbound listener is expected to + // accept incoming connections. + Address string + // Port is the local port on which the inbound listener is expected to + // accept incoming connections. + Port string + // FilterChains is the list of filter chains associated with this listener. + FilterChains *FilterChainManager +} + +// ListenerUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ListenerUpdateErrTuple struct { + Update ListenerUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/type_rds.go b/xds/internal/xdsclient/xdsresource/type_rds.go new file mode 100644 index 000000000000..3c4d971cd245 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_rds.go @@ -0,0 +1,245 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "regexp" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// RouteConfigUpdate contains information received in an RDS response, which is +// of interest to the registered RDS watcher. +type RouteConfigUpdate struct { + VirtualHosts []*VirtualHost + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// VirtualHost contains the routes for a list of Domains. +// +// Note that the domains in this slice can be a wildcard, not an exact string. +// The consumer of this struct needs to find the best match for its hostname. +type VirtualHost struct { + Domains []string + // Routes contains a list of routes, each containing matchers and + // corresponding action. + Routes []*Route + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the virtual host which may be present. An individual filter's override + // may be unused if the matching Route contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig +} + +// RetryConfig contains all retry-related configuration in either a VirtualHost +// or Route. +type RetryConfig struct { + // RetryOn is a set of status codes on which to retry. Only Canceled, + // DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are + // supported; any other values will be omitted. + RetryOn map[codes.Code]bool + NumRetries uint32 // maximum number of retry attempts + RetryBackoff RetryBackoff // retry backoff policy +} + +// RetryBackoff describes the backoff policy for retries. +type RetryBackoff struct { + BaseInterval time.Duration // initial backoff duration between attempts + MaxInterval time.Duration // maximum backoff duration +} + +// HashPolicyType specifies the type of HashPolicy from a received RDS Response. +type HashPolicyType int + +const ( + // HashPolicyTypeHeader specifies to hash a Header in the incoming request. + HashPolicyTypeHeader HashPolicyType = iota + // HashPolicyTypeChannelID specifies to hash a unique Identifier of the + // Channel. In grpc-go, this will be done using the ClientConn pointer. + HashPolicyTypeChannelID +) + +// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing +// load balancer. +type HashPolicy struct { + HashPolicyType HashPolicyType + Terminal bool + // Fields used for type HEADER. + HeaderName string + Regex *regexp.Regexp + RegexSubstitution string +} + +// RouteAction is the action of the route from a received RDS response. +type RouteAction int + +const ( + // RouteActionUnsupported are routing types currently unsupported by grpc. + // According to A36, "A Route with an inappropriate action causes RPCs + // matching that route to fail." + RouteActionUnsupported RouteAction = iota + // RouteActionRoute is the expected route type on the client side. Route + // represents routing a request to some upstream cluster. On the client + // side, if an RPC matches to a route that is not RouteActionRoute, the RPC + // will fail according to A36. + RouteActionRoute + // RouteActionNonForwardingAction is the expected route type on the server + // side. NonForwardingAction represents when a route will generate a + // response directly, without forwarding to an upstream host. + RouteActionNonForwardingAction +) + +// Route is both a specification of how to match a request as well as an +// indication of the action to take upon match. +type Route struct { + Path *string + Prefix *string + Regex *regexp.Regexp + // Indicates if prefix/path matching should be case insensitive. The default + // is false (case sensitive). + CaseInsensitive bool + Headers []*HeaderMatcher + Fraction *uint32 + + HashPolicies []*HashPolicy + + // If the matchers above indicate a match, the below configuration is used. + WeightedClusters map[string]WeightedCluster + // If MaxStreamDuration is nil, it indicates neither of the route action's + // max_stream_duration fields (grpc_timeout_header_max nor + // max_stream_duration) were set. In this case, the ListenerUpdate's + // MaxStreamDuration field should be used. If MaxStreamDuration is set to + // an explicit zero duration, the application's deadline should be used. + MaxStreamDuration *time.Duration + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the route which may be present. An individual filter's override may be + // unused if the matching WeightedCluster contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig + + RouteAction RouteAction +} + +// WeightedCluster contains settings for an xds RouteAction.WeightedCluster. +type WeightedCluster struct { + // Weight is the relative weight of the cluster. It will never be zero. + Weight uint32 + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the weighted cluster which may be present. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig +} + +// HeaderMatcher represents header matchers. +type HeaderMatcher struct { + Name string + InvertMatch *bool + ExactMatch *string + RegexMatch *regexp.Regexp + PrefixMatch *string + SuffixMatch *string + RangeMatch *Int64Range + PresentMatch *bool +} + +// Int64Range is a range for header range match. +type Int64Range struct { + Start int64 + End int64 +} + +// SecurityConfig contains the security configuration received as part of the +// Cluster resource on the client-side, and as part of the Listener resource on +// the server-side. +type SecurityConfig struct { + // RootInstanceName identifies the certProvider plugin to be used to fetch + // root certificates. This instance name will be resolved to the plugin name + // and its associated configuration from the certificate_providers field of + // the bootstrap file. + RootInstanceName string + // RootCertName is the certificate name to be passed to the plugin (looked + // up from the bootstrap file) while fetching root certificates. + RootCertName string + // IdentityInstanceName identifies the certProvider plugin to be used to + // fetch identity certificates. This instance name will be resolved to the + // plugin name and its associated configuration from the + // certificate_providers field of the bootstrap file. + IdentityInstanceName string + // IdentityCertName is the certificate name to be passed to the plugin + // (looked up from the bootstrap file) while fetching identity certificates. + IdentityCertName string + // SubjectAltNameMatchers is an optional list of match criteria for SANs + // specified on the peer certificate. Used only on the client-side. + // + // Some intricacies: + // - If this field is empty, then any peer certificate is accepted. + // - If the peer certificate contains a wildcard DNS SAN, and an `exact` + // matcher is configured, a wildcard DNS match is performed instead of a + // regular string comparison. + SubjectAltNameMatchers []matcher.StringMatcher + // RequireClientCert indicates if the server handshake process expects the + // client to present a certificate. Set to true when performing mTLS. Used + // only on the server-side. + RequireClientCert bool +} + +// Equal returns true if sc is equal to other. +func (sc *SecurityConfig) Equal(other *SecurityConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + } + switch { + case sc.RootInstanceName != other.RootInstanceName: + return false + case sc.RootCertName != other.RootCertName: + return false + case sc.IdentityInstanceName != other.IdentityInstanceName: + return false + case sc.IdentityCertName != other.IdentityCertName: + return false + case sc.RequireClientCert != other.RequireClientCert: + return false + default: + if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) { + return false + } + for i := 0; i < len(sc.SubjectAltNameMatchers); i++ { + if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) { + return false + } + } + } + return true +} + +// RouteConfigUpdateErrTuple is a tuple with the update and error. It contains +// the results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type RouteConfigUpdateErrTuple struct { + Update RouteConfigUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal.go b/xds/internal/xdsclient/xdsresource/unmarshal.go new file mode 100644 index 000000000000..7cd9d32dd6c8 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal.go @@ -0,0 +1,174 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package xdsresource contains functions to proto xds updates (unmarshal from +// proto), and types for the resource updates. +package xdsresource + +import ( + "errors" + "fmt" + "strings" + "time" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/protobuf/types/known/anypb" +) + +// UnmarshalOptions wraps the input parameters for `UnmarshalXxx` functions. +type UnmarshalOptions struct { + // Version is the version of the received response. + Version string + // Resources are the xDS resources resources in the received response. + Resources []*anypb.Any + // Logger is the prefix logger to be used during unmarshaling. + Logger *grpclog.PrefixLogger + // UpdateValidator is a post unmarshal validation check provided by the + // upper layer. + UpdateValidator UpdateValidatorFunc +} + +// processAllResources unmarshals and validates the resources, populates the +// provided ret (a map), and returns metadata and error. +// +// After this function, the ret map will be populated with both valid and +// invalid updates. Invalid resources will have an entry with the key as the +// resource name, value as an empty update. +// +// The type of the resource is determined by the type of ret. E.g. +// map[string]ListenerUpdate means this is for LDS. +func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadata, error) { + timestamp := time.Now() + md := UpdateMetadata{ + Version: opts.Version, + Timestamp: timestamp, + } + var topLevelErrors []error + perResourceErrors := make(map[string]error) + + for _, r := range opts.Resources { + switch ret2 := ret.(type) { + case map[string]ListenerUpdateErrTuple: + name, update, err := unmarshalListenerResource(r, opts.UpdateValidator, opts.Logger) + if err == nil { + ret2[name] = ListenerUpdateErrTuple{Update: update} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret2[name] = ListenerUpdateErrTuple{Err: err} + case map[string]RouteConfigUpdateErrTuple: + name, update, err := unmarshalRouteConfigResource(r, opts.Logger) + if err == nil { + ret2[name] = RouteConfigUpdateErrTuple{Update: update} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret2[name] = RouteConfigUpdateErrTuple{Err: err} + case map[string]ClusterUpdateErrTuple: + name, update, err := unmarshalClusterResource(r, opts.UpdateValidator, opts.Logger) + if err == nil { + ret2[name] = ClusterUpdateErrTuple{Update: update} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret2[name] = ClusterUpdateErrTuple{Err: err} + case map[string]EndpointsUpdateErrTuple: + name, update, err := unmarshalEndpointsResource(r, opts.Logger) + if err == nil { + ret2[name] = EndpointsUpdateErrTuple{Update: update} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret2[name] = EndpointsUpdateErrTuple{Err: err} + } + } + + if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { + md.Status = ServiceStatusACKed + return md, nil + } + + var typeStr string + switch ret.(type) { + case map[string]ListenerUpdate: + typeStr = "LDS" + case map[string]RouteConfigUpdate: + typeStr = "RDS" + case map[string]ClusterUpdate: + typeStr = "CDS" + case map[string]EndpointsUpdate: + typeStr = "EDS" + } + + md.Status = ServiceStatusNACKed + errRet := combineErrors(typeStr, topLevelErrors, perResourceErrors) + md.ErrState = &UpdateErrorMetadata{ + Version: opts.Version, + Err: errRet, + Timestamp: timestamp, + } + return md, errRet +} + +func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { + var errStrB strings.Builder + errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) + if len(topLevelErrors) > 0 { + errStrB.WriteString("top level errors: ") + for i, err := range topLevelErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + errStrB.WriteString(err.Error()) + } + } + if len(perResourceErrors) > 0 { + var i int + for name, err := range perResourceErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + i++ + errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) + } + } + return errors.New(errStrB.String()) +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go new file mode 100644 index 000000000000..a1c6c3ea7a62 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -0,0 +1,456 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "net" + "strconv" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/version" + "google.golang.org/protobuf/types/known/anypb" +) + +// TransportSocket proto message has a `name` field which is expected to be set +// to this value by the management server. +const transportSocketName = "envoy.transport_sockets.tls" + +// UnmarshalCluster processes resources received in an CDS response, validates +// them, and transforms them into a native struct which contains only fields we +// are interested in. +func UnmarshalCluster(opts *UnmarshalOptions) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]ClusterUpdateErrTuple) + md, err := processAllResources(opts, update) + return update, md, err +} + +func unmarshalClusterResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { + if !IsClusterResource(r.GetTypeUrl()) { + return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cluster := &v3clusterpb.Cluster{} + if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, pretty.ToJSON(cluster)) + cu, err := validateClusterAndConstructClusterUpdate(cluster) + if err != nil { + return cluster.GetName(), ClusterUpdate{}, err + } + cu.Raw = r + if f != nil { + if err := f(cu); err != nil { + return "", ClusterUpdate{}, err + } + } + + return cluster.GetName(), cu, nil +} + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M +) + +func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { + var lbPolicy *ClusterLBPolicyRingHash + switch cluster.GetLbPolicy() { + case v3clusterpb.Cluster_ROUND_ROBIN: + lbPolicy = nil // The default is round_robin, and there's no config to set. + case v3clusterpb.Cluster_RING_HASH: + if !env.RingHashSupport { + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + rhc := cluster.GetRingHashLbConfig() + if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { + return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) + } + // Minimum defaults to 1024 entries, and limited to 8M entries Maximum + // defaults to 8M entries, and limited to 8M entries + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhc.GetMinimumRingSize(); min != nil { + if min.GetValue() > ringHashSizeUpperBound { + return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash mininum ring size %v in response: %+v", min.GetValue(), cluster) + } + minSize = min.GetValue() + } + if max := rhc.GetMaximumRingSize(); max != nil { + if max.GetValue() > ringHashSizeUpperBound { + return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash maxinum ring size %v in response: %+v", max.GetValue(), cluster) + } + maxSize = max.GetValue() + } + if minSize > maxSize { + return ClusterUpdate{}, fmt.Errorf("ring_hash config min size %v is greater than max %v", minSize, maxSize) + } + lbPolicy = &ClusterLBPolicyRingHash{MinimumRingSize: minSize, MaximumRingSize: maxSize} + default: + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + + // Process security configuration received from the control plane iff the + // corresponding environment variable is set. + var sc *SecurityConfig + if env.ClientSideSecuritySupport { + var err error + if sc, err = securityConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } + } + + ret := ClusterUpdate{ + ClusterName: cluster.GetName(), + EnableLRS: cluster.GetLrsServer().GetSelf() != nil, + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + LBPolicy: lbPolicy, + } + + // Validate and set cluster type from the response. + switch { + case cluster.GetType() == v3clusterpb.Cluster_EDS: + if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { + return ClusterUpdate{}, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) + } + ret.ClusterType = ClusterTypeEDS + ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + return ret, nil + case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: + if !env.AggregateAndDNSSupportEnv { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } + ret.ClusterType = ClusterTypeLogicalDNS + dnsHN, err := dnsHostNameFromCluster(cluster) + if err != nil { + return ClusterUpdate{}, err + } + ret.DNSHostName = dnsHN + return ret, nil + case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": + if !env.AggregateAndDNSSupportEnv { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } + clusters := &v3aggregateclusterpb.ClusterConfig{} + if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { + return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + ret.ClusterType = ClusterTypeAggregate + ret.PrioritizedClusterNames = clusters.Clusters + return ret, nil + default: + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } +} + +// dnsHostNameFromCluster extracts the DNS host name from the cluster's load +// assignment. +// +// There should be exactly one locality, with one endpoint, whose address +// contains the address and port. +func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { + loadAssignment := cluster.GetLoadAssignment() + if loadAssignment == nil { + return "", fmt.Errorf("load_assignment not present for LOGICAL_DNS cluster") + } + if len(loadAssignment.GetEndpoints()) != 1 { + return "", fmt.Errorf("load_assignment for LOGICAL_DNS cluster must have exactly one locality, got: %+v", loadAssignment) + } + endpoints := loadAssignment.GetEndpoints()[0].GetLbEndpoints() + if len(endpoints) != 1 { + return "", fmt.Errorf("locality for LOGICAL_DNS cluster must have exactly one endpoint, got: %+v", endpoints) + } + endpoint := endpoints[0].GetEndpoint() + if endpoint == nil { + return "", fmt.Errorf("endpoint for LOGICAL_DNS cluster not set") + } + socketAddr := endpoint.GetAddress().GetSocketAddress() + if socketAddr == nil { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set") + } + if socketAddr.GetResolverName() != "" { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set has unexpected custom resolver name: %v", socketAddr.GetResolverName()) + } + host := socketAddr.GetAddress() + if host == "" { + return "", fmt.Errorf("host for endpoint for LOGICAL_DNS cluster not set") + } + port := socketAddr.GetPortValue() + if port == 0 { + return "", fmt.Errorf("port for endpoint for LOGICAL_DNS cluster not set") + } + return net.JoinHostPort(host, strconv.Itoa(int(port))), nil +} + +// securityConfigFromCluster extracts the relevant security configuration from +// the received Cluster resource. +func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { + if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { + return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) + } + // The Cluster resource contains a `transport_socket` field, which contains + // a oneof `typed_config` field of type `protobuf.Any`. The any proto + // contains a marshaled representation of an `UpstreamTlsContext` message. + ts := cluster.GetTransportSocket() + if ts == nil { + return nil, nil + } + if name := ts.GetName(); name != transportSocketName { + return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + any := ts.GetTypedConfig() + if any == nil || any.TypeUrl != version.V3UpstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) + } + upstreamCtx := &v3tlspb.UpstreamTlsContext{} + if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) + } + // The following fields from `UpstreamTlsContext` are ignored: + // - sni + // - allow_renegotiation + // - max_session_keys + if upstreamCtx.GetCommonTlsContext() == nil { + return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") + } + + return securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext(), false) +} + +// common is expected to be not nil. +// The `alpn_protocols` field is ignored. +func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + if common.GetTlsParams() != nil { + return nil, fmt.Errorf("unsupported tls_params field in CommonTlsContext message: %+v", common) + } + if common.GetCustomHandshaker() != nil { + return nil, fmt.Errorf("unsupported custom_handshaker field in CommonTlsContext message: %+v", common) + } + + // For now, if we can't get a valid security config from the new fields, we + // fallback to the old deprecated fields. + // TODO: Drop support for deprecated fields. NACK if err != nil here. + sc, _ := securityConfigFromCommonTLSContextUsingNewFields(common, server) + if sc == nil || sc.Equal(&SecurityConfig{}) { + var err error + sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common, server) + if err != nil { + return nil, err + } + } + if sc != nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + if server { + if sc.IdentityInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + } + } else { + if sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") + } + } + } + return sc, nil +} + +func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `CommonTlsContext` contains a + // `tls_certificate_certificate_provider_instance` field of type + // `CertificateProviderInstance`, which contains the provider instance name + // and the certificate name to fetch identity certs. + sc := &SecurityConfig{} + if identity := common.GetTlsCertificateCertificateProviderInstance(); identity != nil { + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + } + + // The `CommonTlsContext` contains a `validation_context_type` field which + // is a oneof. We can get the values that we are interested in from two of + // those possible values: + // - combined validation context: + // - contains a default validation context which holds the list of + // matchers for accepted SANs. + // - contains certificate provider instance configuration + // - certificate provider instance configuration + // - in this case, we do not get a list of accepted SANs. + switch t := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + combined := common.GetCombinedValidationContext() + var matchers []matcher.StringMatcher + if def := combined.GetDefaultValidationContext(); def != nil { + for _, m := range def.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + } + case *v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance: + pi := common.GetValidationContextCertificateProviderInstance() + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + case nil: + // It is valid for the validation context to be nil on the server side. + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", t) + } + return sc, nil +} + +// gRFC A29 https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md +// specifies the new way to fetch security configuration and says the following: +// +// Although there are various ways to obtain certificates as per this proto +// (which are supported by Envoy), gRPC supports only one of them and that is +// the `CertificateProviderPluginInstance` proto. +// +// This helper function attempts to fetch security configuration from the +// `CertificateProviderPluginInstance` message, given a CommonTlsContext. +func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `tls_certificate_provider_instance` field of type + // `CertificateProviderPluginInstance` is used to fetch the identity + // certificate provider. + sc := &SecurityConfig{} + identity := common.GetTlsCertificateProviderInstance() + if identity == nil && len(common.GetTlsCertificates()) != 0 { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificates is set in CommonTlsContext message: %+v", common) + } + if identity == nil && common.GetTlsCertificateSdsSecretConfigs() != nil { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message: %+v", common) + } + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + + // The `CommonTlsContext` contains a oneof field `validation_context_type`, + // which contains the `CertificateValidationContext` message in one of the + // following ways: + // - `validation_context` field + // - this is directly of type `CertificateValidationContext` + // - `combined_validation_context` field + // - this is of type `CombinedCertificateValidationContext` and contains + // a `default validation context` field of type + // `CertificateValidationContext` + // + // The `CertificateValidationContext` message has the following fields that + // we are interested in: + // - `ca_certificate_provider_instance` + // - this is of type `CertificateProviderPluginInstance` + // - `match_subject_alt_names` + // - this is a list of string matchers + // + // The `CertificateProviderPluginInstance` message contains two fields + // - instance_name + // - this is the certificate provider instance name to be looked up in + // the bootstrap configuration + // - certificate_name + // - this is an opaque name passed to the certificate provider + var validationCtx *v3tlspb.CertificateValidationContext + switch typ := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_ValidationContext: + validationCtx = common.GetValidationContext() + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + validationCtx = common.GetCombinedValidationContext().GetDefaultValidationContext() + case nil: + // It is valid for the validation context to be nil on the server side. + return sc, nil + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", typ) + } + // If we get here, it means that the `CertificateValidationContext` message + // was found through one of the supported ways. It is an error if the + // validation context is specified, but it does not contain the + // ca_certificate_provider_instance field which contains information about + // the certificate provider to be used for the root certificates. + if validationCtx.GetCaCertificateProviderInstance() == nil { + return nil, fmt.Errorf("expected field ca_certificate_provider_instance is missing in CommonTlsContext message: %+v", common) + } + // The following fields are ignored: + // - trusted_ca + // - watched_directory + // - allow_expired_certificate + // - trust_chain_verification + switch { + case len(validationCtx.GetVerifyCertificateSpki()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_spki field in CommonTlsContext message: %+v", common) + case len(validationCtx.GetVerifyCertificateHash()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) + case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): + return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) + case validationCtx.GetCrl() != nil: + return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) + case validationCtx.GetCustomValidatorConfig() != nil: + return nil, fmt.Errorf("unsupported custom_validator_config field in CommonTlsContext message: %+v", common) + } + + if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { + sc.RootInstanceName = rootProvider.GetInstanceName() + sc.RootCertName = rootProvider.GetCertificateName() + } + var matchers []matcher.StringMatcher + for _, m := range validationCtx.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + return sc, nil +} + +// circuitBreakersFromCluster extracts the circuit breakers configuration from +// the received cluster resource. Returns nil if no CircuitBreakers or no +// Thresholds in CircuitBreakers. +func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { + for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { + if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { + continue + } + maxRequestsPb := threshold.GetMaxRequests() + if maxRequestsPb == nil { + return nil + } + maxRequests := maxRequestsPb.GetValue() + return &maxRequests + } + return nil +} diff --git a/xds/internal/xdsclient/cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go similarity index 99% rename from xds/internal/xdsclient/cds_test.go rename to xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 21e3b05b9089..3a56965bdc4e 100644 --- a/xds/internal/xdsclient/cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "regexp" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go new file mode 100644 index 000000000000..f1774dedae43 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "net" + "strconv" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/types/known/anypb" +) + +// UnmarshalEndpoints processes resources received in an EDS response, +// validates them, and transforms them into a native struct which contains only +// fields we are interested in. +func UnmarshalEndpoints(opts *UnmarshalOptions) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]EndpointsUpdateErrTuple) + md, err := processAllResources(opts, update) + return update, md, err +} + +func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { + if !IsEndpointsResource(r.GetTypeUrl()) { + return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cla := &v3endpointpb.ClusterLoadAssignment{} + if err := proto.Unmarshal(r.GetValue(), cla); err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) + + u, err := parseEDSRespProto(cla) + if err != nil { + return cla.GetClusterName(), EndpointsUpdate{}, err + } + u.Raw = r + return cla.GetClusterName(), u, nil +} + +func parseAddress(socketAddress *v3corepb.SocketAddress) string { + return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) +} + +func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig { + percentage := dropPolicy.GetDropPercentage() + var ( + numerator = percentage.GetNumerator() + denominator uint32 + ) + switch percentage.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + denominator = 100 + case v3typepb.FractionalPercent_TEN_THOUSAND: + denominator = 10000 + case v3typepb.FractionalPercent_MILLION: + denominator = 1000000 + } + return OverloadDropConfig{ + Category: dropPolicy.GetCategory(), + Numerator: numerator, + Denominator: denominator, + } +} + +func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) []Endpoint { + endpoints := make([]Endpoint, 0, len(lbEndpoints)) + for _, lbEndpoint := range lbEndpoints { + endpoints = append(endpoints, Endpoint{ + HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), + Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), + Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), + }) + } + return endpoints +} + +func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { + ret := EndpointsUpdate{} + for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { + ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) + } + priorities := make(map[uint32]struct{}) + for _, locality := range m.Endpoints { + l := locality.GetLocality() + if l == nil { + return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) + } + lid := internal.LocalityID{ + Region: l.Region, + Zone: l.Zone, + SubZone: l.SubZone, + } + priority := locality.GetPriority() + priorities[priority] = struct{}{} + ret.Localities = append(ret.Localities, Locality{ + ID: lid, + Endpoints: parseEndpoints(locality.GetLbEndpoints()), + Weight: locality.GetLoadBalancingWeight().GetValue(), + Priority: priority, + }) + } + for i := 0; i < len(priorities); i++ { + if _, ok := priorities[uint32(i)]; !ok { + return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) + } + } + return ret, nil +} diff --git a/xds/internal/xdsclient/eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go similarity index 99% rename from xds/internal/xdsclient/eds_test.go rename to xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index d0af8a988d83..770dbf4c5253 100644 --- a/xds/internal/xdsclient/eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "fmt" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go new file mode 100644 index 000000000000..3a1d0f63156f --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -0,0 +1,297 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "strconv" + + v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" + v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/version" + "google.golang.org/protobuf/types/known/anypb" +) + +// UnmarshalListener processes resources received in an LDS response, validates +// them, and transforms them into a native struct which contains only fields we +// are interested in. +func UnmarshalListener(opts *UnmarshalOptions) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]ListenerUpdateErrTuple) + md, err := processAllResources(opts, update) + return update, md, err +} + +func unmarshalListenerResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { + if !IsListenerResource(r.GetTypeUrl()) { + return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + // TODO: Pass version.TransportAPI instead of relying upon the type URL + v2 := r.GetTypeUrl() == version.V2ListenerURL + lis := &v3listenerpb.Listener{} + if err := proto.Unmarshal(r.GetValue(), lis); err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, pretty.ToJSON(lis)) + + lu, err := processListener(lis, logger, v2) + if err != nil { + return lis.GetName(), ListenerUpdate{}, err + } + if f != nil { + if err := f(*lu); err != nil { + return lis.GetName(), ListenerUpdate{}, err + } + } + lu.Raw = r + return lis.GetName(), *lu, nil +} + +func processListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { + if lis.GetApiListener() != nil { + return processClientSideListener(lis, logger, v2) + } + return processServerSideListener(lis, logger) +} + +// processClientSideListener checks if the provided Listener proto meets +// the expected criteria. If so, it returns a non-empty routeConfigName. +func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { + update := &ListenerUpdate{} + + apiLisAny := lis.GetApiListener().GetApiListener() + if !IsHTTPConnManagerResource(apiLisAny.GetTypeUrl()) { + return nil, fmt.Errorf("unexpected resource type: %q", apiLisAny.GetTypeUrl()) + } + apiLis := &v3httppb.HttpConnectionManager{} + if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil { + return nil, fmt.Errorf("failed to unmarshal api_listner: %v", err) + } + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if apiLis.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", apiLis) + } + if len(apiLis.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", apiLis) + } + + switch apiLis.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if apiLis.GetRds().GetConfigSource().GetAds() == nil { + return nil, fmt.Errorf("ConfigSource is not ADS: %+v", lis) + } + name := apiLis.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", lis) + } + update.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), logger, v2) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + update.InlineRouteConfig = &routeU + case nil: + return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) + } + + if v2 { + return update, nil + } + + // The following checks and fields only apply to xDS protocol versions v3+. + + update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() + + var err error + if update.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { + return nil, err + } + + return update, nil +} + +func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { + switch { + case ptypes.Is(config, &v3cncftypepb.TypedStruct{}): + // The real type name is inside the new TypedStruct message. + s := new(v3cncftypepb.TypedStruct) + if err := ptypes.UnmarshalAny(config, s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + case ptypes.Is(config, &v1udpatypepb.TypedStruct{}): + // The real type name is inside the old TypedStruct message. + s := new(v1udpatypepb.TypedStruct) + if err := ptypes.UnmarshalAny(config, s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + default: + return config, config.GetTypeUrl(), nil + } +} + +func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Filter, httpfilter.FilterConfig, error) { + config, typeURL, err := unwrapHTTPFilterConfig(cfg) + if err != nil { + return nil, nil, err + } + filterBuilder := httpfilter.Get(typeURL) + if filterBuilder == nil { + if optional { + return nil, nil, nil + } + return nil, nil, fmt.Errorf("no filter implementation found for %q", typeURL) + } + parseFunc := filterBuilder.ParseFilterConfig + if !lds { + parseFunc = filterBuilder.ParseFilterConfigOverride + } + filterConfig, err := parseFunc(config) + if err != nil { + return nil, nil, fmt.Errorf("error parsing config for filter %q: %v", typeURL, err) + } + return filterBuilder, filterConfig, nil +} + +func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { + if len(cfgs) == 0 { + return nil, nil + } + m := make(map[string]httpfilter.FilterConfig) + for name, cfg := range cfgs { + optional := false + s := new(v3routepb.FilterConfig) + if ptypes.Is(cfg, s) { + if err := ptypes.UnmarshalAny(cfg, s); err != nil { + return nil, fmt.Errorf("filter override %q: error unmarshalling FilterConfig: %v", name, err) + } + cfg = s.GetConfig() + optional = s.GetIsOptional() + } + + httpFilter, config, err := validateHTTPFilterConfig(cfg, false, optional) + if err != nil { + return nil, fmt.Errorf("filter override %q: %v", name, err) + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + m[name] = config + } + return m, nil +} + +func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { + ret := make([]HTTPFilter, 0, len(filters)) + seenNames := make(map[string]bool, len(filters)) + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, errors.New("filter missing name field") + } + if seenNames[name] { + return nil, fmt.Errorf("duplicate filter name %q", name) + } + seenNames[name] = true + + httpFilter, config, err := validateHTTPFilterConfig(filter.GetTypedConfig(), true, filter.GetIsOptional()) + if err != nil { + return nil, err + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + if server { + if _, ok := httpFilter.(httpfilter.ServerInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported server-side", name) + } + } else if _, ok := httpFilter.(httpfilter.ClientInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported client-side", name) + } + + // Save name/config + ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) + } + // "Validation will fail if a terminal filter is not the last filter in the + // chain or if a non-terminal filter is the last filter in the chain." - A39 + if len(ret) == 0 { + return nil, fmt.Errorf("http filters list is empty") + } + var i int + for ; i < len(ret)-1; i++ { + if ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is a terminal filter but it is not last in the filter chain", ret[i].Name) + } + } + if !ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is not a terminal filter", ret[len(ret)-1].Name) + } + return ret, nil +} + +func processServerSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger) (*ListenerUpdate, error) { + if n := len(lis.ListenerFilters); n != 0 { + return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) + } + if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { + return nil, errors.New("unsupported field 'use_original_dst' is present and set to true") + } + addr := lis.GetAddress() + if addr == nil { + return nil, fmt.Errorf("no address field in LDS response: %+v", lis) + } + sockAddr := addr.GetSocketAddress() + if sockAddr == nil { + return nil, fmt.Errorf("no socket_address field in LDS response: %+v", lis) + } + lu := &ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: sockAddr.GetAddress(), + Port: strconv.Itoa(int(sockAddr.GetPortValue())), + }, + } + + fcMgr, err := NewFilterChainManager(lis, logger) + if err != nil { + return nil, err + } + lu.InboundListenerCfg.FilterChains = fcMgr + return lu, nil +} diff --git a/xds/internal/xdsclient/lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go similarity index 99% rename from xds/internal/xdsclient/lds_test.go rename to xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index f889e380eab3..138a8928a684 100644 --- a/xds/internal/xdsclient/lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "fmt" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go new file mode 100644 index 000000000000..0642500f303b --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -0,0 +1,373 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "regexp" + "strings" + "time" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/xds/internal/version" + "google.golang.org/protobuf/types/known/anypb" +) + +// UnmarshalRouteConfig processes resources received in an RDS response, +// validates them, and transforms them into a native struct which contains only +// fields we are interested in. The provided hostname determines the route +// configuration resources of interest. +func UnmarshalRouteConfig(opts *UnmarshalOptions) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]RouteConfigUpdateErrTuple) + md, err := processAllResources(opts, update) + return update, md, err +} + +func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { + if !IsRouteConfigResource(r.GetTypeUrl()) { + return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + rc := &v3routepb.RouteConfiguration{} + if err := proto.Unmarshal(r.GetValue(), rc); err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, pretty.ToJSON(rc)) + + // TODO: Pass version.TransportAPI instead of relying upon the type URL + v2 := r.GetTypeUrl() == version.V2RouteConfigURL + u, err := generateRDSUpdateFromRouteConfiguration(rc, logger, v2) + if err != nil { + return rc.GetName(), RouteConfigUpdate{}, err + } + u.Raw = r + return rc.GetName(), u, nil +} + +// generateRDSUpdateFromRouteConfiguration checks if the provided +// RouteConfiguration meets the expected criteria. If so, it returns a +// RouteConfigUpdate with nil error. +// +// A RouteConfiguration resource is considered valid when only if it contains a +// VirtualHost whose domain field matches the server name from the URI passed +// to the gRPC channel, and it contains a clusterName or a weighted cluster. +// +// The RouteConfiguration includes a list of virtualHosts, which may have zero +// or more elements. We are interested in the element whose domains field +// matches the server name specified in the "xds:" URI. The only field in the +// VirtualHost proto that the we are interested in is the list of routes. We +// only look at the last route in the list (the default route), whose match +// field must be empty and whose route field must be set. Inside that route +// message, the cluster field will contain the clusterName or weighted clusters +// we are looking for. +func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { + vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) + for _, vh := range rc.GetVirtualHosts() { + routes, err := routesProtoToSlice(vh.Routes, logger, v2) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + rc, err := generateRetryConfig(vh.GetRetryPolicy()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + vhOut := &VirtualHost{ + Domains: vh.GetDomains(), + Routes: routes, + RetryConfig: rc, + } + if !v2 { + cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) + } + vhOut.HTTPFilterConfigOverride = cfgs + } + vhs = append(vhs, vhOut) + } + return RouteConfigUpdate{VirtualHosts: vhs}, nil +} + +func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { + if !env.RetrySupport || rp == nil { + return nil, nil + } + + cfg := &RetryConfig{RetryOn: make(map[codes.Code]bool)} + for _, s := range strings.Split(rp.GetRetryOn(), ",") { + switch strings.TrimSpace(strings.ToLower(s)) { + case "cancelled": + cfg.RetryOn[codes.Canceled] = true + case "deadline-exceeded": + cfg.RetryOn[codes.DeadlineExceeded] = true + case "internal": + cfg.RetryOn[codes.Internal] = true + case "resource-exhausted": + cfg.RetryOn[codes.ResourceExhausted] = true + case "unavailable": + cfg.RetryOn[codes.Unavailable] = true + } + } + + if rp.NumRetries == nil { + cfg.NumRetries = 1 + } else { + cfg.NumRetries = rp.GetNumRetries().Value + if cfg.NumRetries < 1 { + return nil, fmt.Errorf("retry_policy.num_retries = %v; must be >= 1", cfg.NumRetries) + } + } + + backoff := rp.GetRetryBackOff() + if backoff == nil { + cfg.RetryBackoff.BaseInterval = 25 * time.Millisecond + } else { + cfg.RetryBackoff.BaseInterval = backoff.GetBaseInterval().AsDuration() + if cfg.RetryBackoff.BaseInterval <= 0 { + return nil, fmt.Errorf("retry_policy.base_interval = %v; must be > 0", cfg.RetryBackoff.BaseInterval) + } + } + if max := backoff.GetMaxInterval(); max == nil { + cfg.RetryBackoff.MaxInterval = 10 * cfg.RetryBackoff.BaseInterval + } else { + cfg.RetryBackoff.MaxInterval = max.AsDuration() + if cfg.RetryBackoff.MaxInterval <= 0 { + return nil, fmt.Errorf("retry_policy.max_interval = %v; must be > 0", cfg.RetryBackoff.MaxInterval) + } + } + + if len(cfg.RetryOn) == 0 { + return &RetryConfig{}, nil + } + return cfg, nil +} + +func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, error) { + var routesRet []*Route + for _, r := range routes { + match := r.GetMatch() + if match == nil { + return nil, fmt.Errorf("route %+v doesn't have a match", r) + } + + if len(match.GetQueryParameters()) != 0 { + // Ignore route with query parameters. + logger.Warningf("route %+v has query parameter matchers, the route will be ignored", r) + continue + } + + pathSp := match.GetPathSpecifier() + if pathSp == nil { + return nil, fmt.Errorf("route %+v doesn't have a path specifier", r) + } + + var route Route + switch pt := pathSp.(type) { + case *v3routepb.RouteMatch_Prefix: + route.Prefix = &pt.Prefix + case *v3routepb.RouteMatch_Path: + route.Path = &pt.Path + case *v3routepb.RouteMatch_SafeRegex: + regex := pt.SafeRegex.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + route.Regex = re + default: + return nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) + } + + if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil { + route.CaseInsensitive = !caseSensitive.Value + } + + for _, h := range match.GetHeaders() { + var header HeaderMatcher + switch ht := h.GetHeaderMatchSpecifier().(type) { + case *v3routepb.HeaderMatcher_ExactMatch: + header.ExactMatch = &ht.ExactMatch + case *v3routepb.HeaderMatcher_SafeRegexMatch: + regex := ht.SafeRegexMatch.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + header.RegexMatch = re + case *v3routepb.HeaderMatcher_RangeMatch: + header.RangeMatch = &Int64Range{ + Start: ht.RangeMatch.Start, + End: ht.RangeMatch.End, + } + case *v3routepb.HeaderMatcher_PresentMatch: + header.PresentMatch = &ht.PresentMatch + case *v3routepb.HeaderMatcher_PrefixMatch: + header.PrefixMatch = &ht.PrefixMatch + case *v3routepb.HeaderMatcher_SuffixMatch: + header.SuffixMatch = &ht.SuffixMatch + default: + return nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) + } + header.Name = h.GetName() + invert := h.GetInvertMatch() + header.InvertMatch = &invert + route.Headers = append(route.Headers, &header) + } + + if fr := match.GetRuntimeFraction(); fr != nil { + d := fr.GetDefaultValue() + n := d.GetNumerator() + switch d.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + n *= 10000 + case v3typepb.FractionalPercent_TEN_THOUSAND: + n *= 100 + case v3typepb.FractionalPercent_MILLION: + } + route.Fraction = &n + } + + switch r.GetAction().(type) { + case *v3routepb.Route_Route: + route.WeightedClusters = make(map[string]WeightedCluster) + action := r.GetRoute() + + // Hash Policies are only applicable for a Ring Hash LB. + if env.RingHashSupport { + hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) + if err != nil { + return nil, err + } + route.HashPolicies = hp + } + + switch a := action.GetClusterSpecifier().(type) { + case *v3routepb.RouteAction_Cluster: + route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} + case *v3routepb.RouteAction_WeightedClusters: + wcs := a.WeightedClusters + var totalWeight uint32 + for _, c := range wcs.Clusters { + w := c.GetWeight().GetValue() + if w == 0 { + continue + } + wc := WeightedCluster{Weight: w} + if !v2 { + cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) + if err != nil { + return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + } + wc.HTTPFilterConfigOverride = cfgs + } + route.WeightedClusters[c.GetName()] = wc + totalWeight += w + } + // envoy xds doc + // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight + wantTotalWeight := uint32(100) + if tw := wcs.GetTotalWeight(); tw != nil { + wantTotalWeight = tw.GetValue() + } + if totalWeight != wantTotalWeight { + return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) + } + if totalWeight == 0 { + return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + } + case *v3routepb.RouteAction_ClusterHeader: + continue + default: + return nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) + } + + msd := action.GetMaxStreamDuration() + // Prefer grpc_timeout_header_max, if set. + dur := msd.GetGrpcTimeoutHeaderMax() + if dur == nil { + dur = msd.GetMaxStreamDuration() + } + if dur != nil { + d := dur.AsDuration() + route.MaxStreamDuration = &d + } + + var err error + route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) + if err != nil { + return nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) + } + + route.RouteAction = RouteActionRoute + + case *v3routepb.Route_NonForwardingAction: + // Expected to be used on server side. + route.RouteAction = RouteActionNonForwardingAction + default: + route.RouteAction = RouteActionUnsupported + } + + if !v2 { + cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) + if err != nil { + return nil, fmt.Errorf("route %+v: %v", r, err) + } + route.HTTPFilterConfigOverride = cfgs + } + routesRet = append(routesRet, &route) + } + return routesRet, nil +} + +func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logger *grpclog.PrefixLogger) ([]*HashPolicy, error) { + var hashPoliciesRet []*HashPolicy + for _, p := range policies { + policy := HashPolicy{Terminal: p.Terminal} + switch p.GetPolicySpecifier().(type) { + case *v3routepb.RouteAction_HashPolicy_Header_: + policy.HashPolicyType = HashPolicyTypeHeader + policy.HeaderName = p.GetHeader().GetHeaderName() + if rr := p.GetHeader().GetRegexRewrite(); rr != nil { + regex := rr.GetPattern().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) + } + policy.Regex = re + policy.RegexSubstitution = rr.GetSubstitution() + } + case *v3routepb.RouteAction_HashPolicy_FilterState_: + if p.GetFilterState().GetKey() != "io.grpc.channel_id" { + logger.Infof("hash policy %+v contains an invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) + continue + } + policy.HashPolicyType = HashPolicyTypeChannelID + default: + logger.Infof("hash policy %T is an unsupported hash policy", p.GetPolicySpecifier()) + continue + } + + hashPoliciesRet = append(hashPoliciesRet, &policy) + } + return hashPoliciesRet, nil +} diff --git a/xds/internal/xdsclient/rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go similarity index 99% rename from xds/internal/xdsclient/rds_test.go rename to xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index 8b419244d672..38a7e99a9ede 100644 --- a/xds/internal/xdsclient/rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package xdsclient +package xdsresource import ( "fmt" diff --git a/xds/server.go b/xds/server.go index b36fa64b5008..28abaf84f5f8 100644 --- a/xds/server.go +++ b/xds/server.go @@ -42,6 +42,7 @@ import ( "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/server" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const serverPrefix = "[xds-server %p] " @@ -330,7 +331,7 @@ func (s *GRPCServer) GracefulStop() { func routeAndProcess(ctx context.Context) error { conn := transport.GetConnection(ctx) cw, ok := conn.(interface { - VirtualHosts() []xdsclient.VirtualHostWithInterceptors + VirtualHosts() []xdsresource.VirtualHostWithInterceptors }) if !ok { return errors.New("missing virtual hosts in incoming context") @@ -347,12 +348,12 @@ func routeAndProcess(ctx context.Context) error { // the RPC gets to this point, there will be a single, unambiguous authority // present in the header map. authority := md.Get(":authority") - vh := xdsclient.FindBestMatchingVirtualHostServer(authority[0], cw.VirtualHosts()) + vh := xdsresource.FindBestMatchingVirtualHostServer(authority[0], cw.VirtualHosts()) if vh == nil { return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Virtual Host") } - var rwi *xdsclient.RouteWithInterceptors + var rwi *xdsresource.RouteWithInterceptors rpcInfo := iresolver.RPCInfo{ Context: ctx, Method: mn, @@ -361,7 +362,7 @@ func routeAndProcess(ctx context.Context) error { if r.M.Match(rpcInfo) { // "NonForwardingAction is expected for all Routes used on server-side; a route with an inappropriate action causes // RPCs matching that route to fail with UNAVAILABLE." - A36 - if r.RouteAction != xdsclient.RouteActionNonForwardingAction { + if r.RouteAction != xdsresource.RouteActionNonForwardingAction { return status.Error(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") } rwi = &r diff --git a/xds/server_test.go b/xds/server_test.go index 501b8ba76e20..492a2fa6d6ed 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" @@ -429,7 +430,7 @@ func (s) TestServeSuccess(t *testing.T) { // Push an error to the registered listener watch callback and make sure // that Serve does not return. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "LDS resource not found")) + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "LDS resource not found")) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { @@ -447,14 +448,14 @@ func (s) TestServeSuccess(t *testing.T) { // Push a good LDS response, and wait for Serve() to be invoked on the // underlying grpc.Server. - fcm, err := xdsclient.NewFilterChainManager(listenerWithFilterChains) + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } addr, port := splitHostPort(lis.Addr().String()) - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: addr, Port: port, FilterChains: fcm, @@ -476,9 +477,9 @@ func (s) TestServeSuccess(t *testing.T) { // Push an update to the registered listener watch callback with a Listener // resource whose host:port does not match the actual listening address and // port. This will push the listener to "not-serving" mode. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: "10.20.30.40", Port: "666", FilterChains: fcm, @@ -749,7 +750,7 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { // Push a good LDS response with security config, and wait for Serve() to be // invoked on the underlying grpc.Server. Also make sure that certificate // providers are not created. - fcm, err := xdsclient.NewFilterChainManager(&v3listenerpb.Listener{ + fcm, err := xdsresource.NewFilterChainManager(&v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ { TransportSocket: &v3corepb.TransportSocket{ @@ -789,14 +790,14 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { }, }, }, - }) + }, nil) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } addr, port := splitHostPort(lis.Addr().String()) - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ Address: addr, Port: port, FilterChains: fcm, @@ -862,7 +863,7 @@ func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) { // Push an error to the registered listener watch callback and make sure // that Serve does not return. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, errors.New("LDS error")) + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, errors.New("LDS error")) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { From c53203c581923c256e76e82eba1ec64f9744e684 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 8 Nov 2021 14:17:48 -0800 Subject: [PATCH 324/998] xds/federation: support populating resource template in xds-resolver (#4900) --- xds/internal/resolver/xds_resolver.go | 45 +++- xds/internal/resolver/xds_resolver_test.go | 226 +++++++++++++----- xds/internal/testutils/fakeclient/client.go | 5 +- xds/internal/xdsclient/bootstrap/template.go | 47 ++++ .../xdsclient/bootstrap/template_test.go | 97 ++++++++ xds/server.go | 7 +- 6 files changed, 349 insertions(+), 78 deletions(-) create mode 100644 xds/internal/xdsclient/bootstrap/template.go create mode 100644 xds/internal/xdsclient/bootstrap/template_test.go diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index 19ee01773e8c..e04ce00b2d29 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -22,6 +22,7 @@ package resolver import ( "errors" "fmt" + "strings" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" @@ -30,6 +31,7 @@ import ( iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const xdsScheme = "xds" @@ -60,7 +62,7 @@ type xdsResolverBuilder struct { // // The xds bootstrap process is performed (and a new xds client is built) every // time an xds resolver is built. -func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { +func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { r := &xdsResolver{ target: t, cc: cc, @@ -68,7 +70,14 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op updateCh: make(chan suWithError, 1), activeClusters: make(map[string]*clusterInfo), } - r.logger = prefixLogger((r)) + defer func() { + if retErr != nil { + if r.client != nil { + r.client.Close() + } + } + }() + r.logger = prefixLogger(r) r.logger.Infof("Creating resolver for target: %+v", t) newXDSClient := newXDSClient @@ -81,6 +90,10 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) } r.client = client + bootstrapConfig := client.BootstrapConfig() + if bootstrapConfig == nil { + return nil, errors.New("bootstrap configuration is empty") + } // If xds credentials were specified by the user, but bootstrap configs do // not contain any certificate provider configuration, it is better to fail @@ -94,14 +107,36 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op creds = opts.CredsBundle.TransportCredentials() } if xc, ok := creds.(interface{ UsesXDS() bool }); ok && xc.UsesXDS() { - bc := client.BootstrapConfig() - if len(bc.CertProviderConfigs) == 0 { + if len(bootstrapConfig.CertProviderConfigs) == 0 { return nil, errors.New("xds: xdsCreds specified but certificate_providers config missing in bootstrap file") } } + // Find the client listener template to use from the bootstrap config: + // - If authority is not set in the target, use the top level template + // - If authority is set, use the template from the authority map. + template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate + if authority := r.target.URL.Host; authority != "" { + a := bootstrapConfig.Authorities[authority] + if a == nil { + return nil, fmt.Errorf("xds: authority %q is not found in the bootstrap file", authority) + } + if a.ClientListenerResourceNameTemplate != "" { + // This check will never be false, because + // ClientListenerResourceNameTemplate is required to start with + // xdstp://, and has a default value (not an empty string) if unset. + template = a.ClientListenerResourceNameTemplate + } + } + endpoint := r.target.URL.Path + if endpoint == "" { + endpoint = r.target.URL.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + resourceName := bootstrap.PopulateResourceTemplate(template, endpoint) + // Register a watch on the xdsClient for the user's dial target. - cancelWatch := watchService(r.client, r.target.Endpoint, r.handleServiceUpdate, r.logger) + cancelWatch := watchService(r.client, resourceName, r.handleServiceUpdate, r.logger) r.logger.Infof("Watch started on resource name %v with xds-client %p", r.target.Endpoint, r.client) r.cancelWatch = func() { cancelWatch() diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index c05a7422904a..e366b9fb6265 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -21,6 +21,7 @@ package resolver import ( "context" "errors" + "net/url" "reflect" "strings" "testing" @@ -62,7 +63,7 @@ const ( defaultTestShortTimeout = 100 * time.Microsecond ) -var target = resolver.Target{Endpoint: targetStr} +var target = resolver.Target{Endpoint: targetStr, URL: url.URL{Scheme: "xds", Path: "/" + targetStr}} var routerFilter = xdsresource.HTTPFilter{Name: "rtr", Filter: httpfilter.Get(router.TypeURL)} var routerFilterList = []xdsresource.HTTPFilter{routerFilter} @@ -117,6 +118,7 @@ func (s) TestResolverBuilder(t *testing.T) { tests := []struct { name string xdsClientFunc func() (xdsclient.XDSClient, error) + target resolver.Target wantErr bool }{ { @@ -124,6 +126,7 @@ func (s) TestResolverBuilder(t *testing.T) { xdsClientFunc: func() (xdsclient.XDSClient, error) { return fakeclient.NewClient(), nil }, + target: target, wantErr: false, }, { @@ -131,6 +134,29 @@ func (s) TestResolverBuilder(t *testing.T) { xdsClientFunc: func() (xdsclient.XDSClient, error) { return nil, errors.New("newXDSClient-throws-error") }, + target: target, + wantErr: true, + }, + { + name: "authority not defined in bootstrap", + xdsClientFunc: func() (xdsclient.XDSClient, error) { + c := fakeclient.NewClient() + c.SetBootstrapConfig(&bootstrap.Config{ + ClientDefaultListenerResourceNameTemplate: "%s", + Authorities: map[string]*bootstrap.Authority{ + "test-authority": { + ClientListenerResourceNameTemplate: "xdstp://test-authority/%s", + }, + }, + }) + return c, nil + }, + target: resolver.Target{ + URL: url.URL{ + Host: "non-existing-authority", + Path: "/" + targetStr, + }, + }, wantErr: true, }, } @@ -148,7 +174,7 @@ func (s) TestResolverBuilder(t *testing.T) { t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) } - r, err := builder.Build(target, newTestClientConn(), resolver.BuildOptions{}) + r, err := builder.Build(test.target, newTestClientConn(), resolver.BuildOptions{}) if (err != nil) != test.wantErr { t.Fatalf("builder.Build(%v) returned err: %v, wantErr: %v", target, err, test.wantErr) } @@ -168,13 +194,20 @@ func (s) TestResolverBuilder(t *testing.T) { func (s) TestResolverBuilder_xdsCredsBootstrapMismatch(t *testing.T) { // Fake out the xdsClient creation process by providing a fake, which does // not have any certificate provider configuration. + fc := fakeclient.NewClient() + fc.SetBootstrapConfig(&bootstrap.Config{}) oldClientMaker := newXDSClient newXDSClient = func() (xdsclient.XDSClient, error) { - fc := fakeclient.NewClient() - fc.SetBootstrapConfig(&bootstrap.Config{}) return fc, nil } defer func() { newXDSClient = oldClientMaker }() + defer func() { + select { + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for close") + case <-fc.Closed.Done(): + } + }() builder := resolver.Get(xdsScheme) if builder == nil { @@ -196,29 +229,45 @@ func (s) TestResolverBuilder_xdsCredsBootstrapMismatch(t *testing.T) { } type setupOpts struct { - xdsClientFunc func() (xdsclient.XDSClient, error) + bootstrapC *bootstrap.Config + target resolver.Target } -func testSetup(t *testing.T, opts setupOpts) (*xdsResolver, *testClientConn, func()) { +func testSetup(t *testing.T, opts setupOpts) (*xdsResolver, *fakeclient.Client, *testClientConn, func()) { t.Helper() + fc := fakeclient.NewClient() + if opts.bootstrapC != nil { + fc.SetBootstrapConfig(opts.bootstrapC) + } oldClientMaker := newXDSClient - newXDSClient = opts.xdsClientFunc + newXDSClient = func() (xdsclient.XDSClient, error) { + return fc, nil + } cancel := func() { + // Make sure the xDS client is closed, in all (successful or failed) + // cases. + select { + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for close") + case <-fc.Closed.Done(): + } newXDSClient = oldClientMaker } - builder := resolver.Get(xdsScheme) if builder == nil { t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) } tcc := newTestClientConn() - r, err := builder.Build(target, tcc, resolver.BuildOptions{}) + r, err := builder.Build(opts.target, tcc, resolver.BuildOptions{}) if err != nil { t.Fatalf("builder.Build(%v) returned err: %v", target, err) } - return r.(*xdsResolver), tcc, cancel + return r.(*xdsResolver), fc, tcc, func() { + r.Close() + cancel() + } } // waitForWatchListener waits for the WatchListener method to be called on the @@ -251,13 +300,97 @@ func waitForWatchRouteConfig(ctx context.Context, t *testing.T, xdsC *fakeclient } } +// TestXDSResolverResourceNameToWatch tests that the correct resource name is +// used to watch for the service. This covers cases with different bootstrap +// config, and different authority. +func (s) TestXDSResolverResourceNameToWatch(t *testing.T) { + tests := []struct { + name string + bc *bootstrap.Config + target resolver.Target + want string + }{ + { + name: "default %s old style", + bc: &bootstrap.Config{ + ClientDefaultListenerResourceNameTemplate: "%s", + }, + target: resolver.Target{ + URL: url.URL{Path: "/" + targetStr}, + }, + want: targetStr, + }, + { + name: "old style no percent encoding", + bc: &bootstrap.Config{ + ClientDefaultListenerResourceNameTemplate: "/path/to/%s", + }, + target: resolver.Target{ + URL: url.URL{Path: "/" + targetStr}, + }, + want: "/path/to/" + targetStr, + }, + { + name: "new style with %s", + bc: &bootstrap.Config{ + ClientDefaultListenerResourceNameTemplate: "xdstp://authority.com/%s", + Authorities: nil, + }, + target: resolver.Target{ + URL: url.URL{Path: "/0.0.0.0:8080"}, + }, + want: "xdstp://authority.com/0.0.0.0:8080", + }, + { + name: "new style percent encoding", + bc: &bootstrap.Config{ + ClientDefaultListenerResourceNameTemplate: "xdstp://authority.com/%s", + Authorities: nil, + }, + target: resolver.Target{ + URL: url.URL{Path: "/[::1]:8080"}, + }, + want: "xdstp://authority.com/%5B::1%5D:8080", + }, + { + name: "new style different authority", + bc: &bootstrap.Config{ + ClientDefaultListenerResourceNameTemplate: "xdstp://authority.com/%s", + Authorities: map[string]*bootstrap.Authority{ + "test-authority": { + ClientListenerResourceNameTemplate: "xdstp://test-authority/%s", + }, + }, + }, + target: resolver.Target{ + URL: url.URL{ + Host: "test-authority", + Path: "/" + targetStr, + }, + }, + want: "xdstp://test-authority/" + targetStr, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + xdsR, xdsC, _, cancel := testSetup(t, setupOpts{ + bootstrapC: tt.bc, + target: tt.target, + }) + defer cancel() + defer xdsR.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + waitForWatchListener(ctx, t, xdsC, tt.want) + }) + } +} + // TestXDSResolverWatchCallbackAfterClose tests the case where a service update // from the underlying xdsClient is received after the resolver is closed. func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -286,10 +419,7 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { // TestXDSResolverCloseClosesXDSClient tests that the XDS resolver's Close // method closes the XDS client. func (s) TestXDSResolverCloseClosesXDSClient(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, _, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, _, cancel := testSetup(t, setupOpts{target: target}) defer cancel() xdsR.Close() if !xdsC.Closed.HasFired() { @@ -300,10 +430,7 @@ func (s) TestXDSResolverCloseClosesXDSClient(t *testing.T) { // TestXDSResolverBadServiceUpdate tests the case the xdsClient returns a bad // service update. func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() @@ -326,10 +453,7 @@ func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { // TestXDSResolverGoodServiceUpdate tests the happy case where the resolver // gets a good service update from the xdsClient. func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() @@ -465,10 +589,7 @@ func (s) TestXDSResolverRequestHash(t *testing.T) { env.RingHashSupport = true defer func() { env.RingHashSupport = oldRH }() - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() @@ -525,10 +646,7 @@ func (s) TestXDSResolverRequestHash(t *testing.T) { // TestXDSResolverRemovedWithRPCs tests the case where a config selector sends // an empty update to the resolver after the resource is removed. func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer cancel() defer xdsR.Close() @@ -585,10 +703,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { // TestXDSResolverRemovedResource tests for proper behavior after a resource is // removed. func (s) TestXDSResolverRemovedResource(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer cancel() defer xdsR.Close() @@ -693,10 +808,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { } func (s) TestXDSResolverWRR(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() @@ -753,10 +865,7 @@ func (s) TestXDSResolverWRR(t *testing.T) { } func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() @@ -846,10 +955,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { // TestXDSResolverDelayedOnCommitted tests that clusters remain in service // config if RPCs are in flight. func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() @@ -995,10 +1101,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { // TestXDSResolverUpdates tests the cases where the resolver gets a good update // after an error, and an error after the good update. func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() @@ -1049,10 +1152,7 @@ func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { // a ResourceNotFoundError. It should generate a service config picking // weighted_target, but no child balancers. func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() @@ -1095,10 +1195,7 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { // // This test case also makes sure the resolver doesn't panic. func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() @@ -1270,10 +1367,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { for i, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsclient.XDSClient, error) { return xdsC, nil }, - }) + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() defer cancel() diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index 132fa413a7e3..871aa7288c63 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -306,17 +306,18 @@ func NewClient() *Client { func NewClientWithName(name string) *Client { return &Client{ name: name, - ldsWatchCh: testutils.NewChannel(), + ldsWatchCh: testutils.NewChannelWithSize(10), rdsWatchCh: testutils.NewChannelWithSize(10), cdsWatchCh: testutils.NewChannelWithSize(10), edsWatchCh: testutils.NewChannelWithSize(10), - ldsCancelCh: testutils.NewChannel(), + ldsCancelCh: testutils.NewChannelWithSize(10), rdsCancelCh: testutils.NewChannelWithSize(10), cdsCancelCh: testutils.NewChannelWithSize(10), edsCancelCh: testutils.NewChannelWithSize(10), loadReportCh: testutils.NewChannel(), lrsCancelCh: testutils.NewChannel(), loadStore: load.NewStore(), + bootstrapCfg: &bootstrap.Config{ClientDefaultListenerResourceNameTemplate: "%s"}, rdsCbs: make(map[string]func(xdsresource.RouteConfigUpdate, error)), cdsCbs: make(map[string]func(xdsresource.ClusterUpdate, error)), edsCbs: make(map[string]func(xdsresource.EndpointsUpdate, error)), diff --git a/xds/internal/xdsclient/bootstrap/template.go b/xds/internal/xdsclient/bootstrap/template.go new file mode 100644 index 000000000000..9b51fcc83972 --- /dev/null +++ b/xds/internal/xdsclient/bootstrap/template.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package bootstrap + +import ( + "net/url" + "strings" +) + +// PopulateResourceTemplate populates the given template using the target +// string. "%s", if exists in the template, will be replaced with target. +// +// If the template starts with "xdstp:", the replaced string will be %-encoded. +// But note that "/" is not percent encoded. +func PopulateResourceTemplate(template, target string) string { + if !strings.Contains(template, "%s") { + return template + } + if strings.HasPrefix(template, "xdstp:") { + target = percentEncode(target) + } + return strings.Replace(template, "%s", target, -1) +} + +// percentEncode percent encode t, except for "/". See the tests for examples. +func percentEncode(t string) string { + segs := strings.Split(t, "/") + for i := range segs { + segs[i] = url.PathEscape(segs[i]) + } + return strings.Join(segs, "/") +} diff --git a/xds/internal/xdsclient/bootstrap/template_test.go b/xds/internal/xdsclient/bootstrap/template_test.go new file mode 100644 index 000000000000..bc12eb42991f --- /dev/null +++ b/xds/internal/xdsclient/bootstrap/template_test.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package bootstrap + +import "testing" + +func Test_percentEncode(t *testing.T) { + tests := []struct { + name string + target string + want string + }{ + { + name: "normal name", + target: "server.example.com", + want: "server.example.com", + }, + { + name: "ipv4", + target: "0.0.0.0:8080", + want: "0.0.0.0:8080", + }, + { + name: "ipv6", + target: "[::1]:8080", + want: "%5B::1%5D:8080", // [ and ] are percent encoded. + }, + { + name: "/ should not be percent encoded", + target: "my/service/region", + want: "my/service/region", // "/"s are kept. + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := percentEncode(tt.target); got != tt.want { + t.Errorf("percentEncode() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPopulateResourceTemplate(t *testing.T) { + tests := []struct { + name string + template string + target string + want string + }{ + { + name: "no %s", + template: "/name/template", + target: "[::1]:8080", + want: "/name/template", + }, + { + name: "with %s, no xdstp: prefix, ipv6", + template: "/name/template/%s", + target: "[::1]:8080", + want: "/name/template/[::1]:8080", + }, + { + name: "with %s, with xdstp: prefix", + template: "xdstp://authority.com/%s", + target: "0.0.0.0:8080", + want: "xdstp://authority.com/0.0.0.0:8080", + }, + { + name: "with %s, with xdstp: prefix, and ipv6", + template: "xdstp://authority.com/%s", + target: "[::1]:8080", + want: "xdstp://authority.com/%5B::1%5D:8080", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := PopulateResourceTemplate(tt.template, tt.target); got != tt.want { + t.Errorf("PopulateResourceTemplate() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/xds/server.go b/xds/server.go index 28abaf84f5f8..0b47fd27ef41 100644 --- a/xds/server.go +++ b/xds/server.go @@ -23,7 +23,6 @@ import ( "errors" "fmt" "net" - "strings" "sync" "google.golang.org/grpc" @@ -42,6 +41,7 @@ import ( "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/server" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -217,10 +217,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error { if cfg.ServerListenerResourceNameTemplate == "" { return errors.New("missing server_listener_resource_name_template in the bootstrap configuration") } - name := cfg.ServerListenerResourceNameTemplate - if strings.Contains(cfg.ServerListenerResourceNameTemplate, "%s") { - name = strings.Replace(cfg.ServerListenerResourceNameTemplate, "%s", lis.Addr().String(), -1) - } + name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate, lis.Addr().String()) modeUpdateCh := buffer.NewUnbounded() go func() { From 82c28251681cdebcd81c60a3578e68941ba13902 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 8 Nov 2021 14:18:52 -0800 Subject: [PATCH 325/998] xds/federation: support new bootstrap fields and protect them by an env var (#4936) --- internal/xds/env/env.go | 4 + xds/internal/xdsclient/bootstrap/bootstrap.go | 16 ++ .../xdsclient/bootstrap/bootstrap_test.go | 214 ++++++++++++++++++ 3 files changed, 234 insertions(+) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index 87d3c2433a4f..2c85e7804ba0 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -44,6 +44,7 @@ const ( aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" @@ -88,6 +89,9 @@ var ( // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". RBACSupport = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + // FederationSupport indicates whether federation support is enabled. + FederationSupport = strings.EqualFold(os.Getenv(federationEnv), "true") + // C2PResolverSupport indicates whether support for C2P resolver is enabled. // This can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 7f75525cc631..fd68367054ad 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -338,6 +338,22 @@ func NewConfigFromContents(data []byte) (*Config, error) { if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil { return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) } + case "client_default_listener_resource_name_template": + if !env.FederationSupport { + logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k) + continue + } + if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + case "authorities": + if !env.FederationSupport { + logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k) + continue + } + if err := json.Unmarshal(v, &config.Authorities); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } default: logger.Warningf("Bootstrap content has unknown field: %s", k) } diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 9d6ead0ff5b5..6348be1324ae 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -775,3 +775,217 @@ func TestNewConfigWithServerListenerResourceNameTemplate(t *testing.T) { }) } } + +func TestNewConfigWithFederation(t *testing.T) { + cancel := setupBootstrapOverride(map[string]string{ + "badClientListenerResourceNameTemplate": ` + { + "node": { "id": "ENVOY_NODE_ID" }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443" + }], + "client_default_listener_resource_name_template": 123456789 + }`, + "badClientListenerResourceNameTemplatePerAuthority": ` + { + "node": { "id": "ENVOY_NODE_ID" }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }], + "authorities": { + "xds.td.com": { + "client_listener_resource_name_template": "some/template/%s", + "xds_servers": [{ + "server_uri": "td.com", + "channel_creds": [ { "type": "google_default" } ], + "server_features" : ["foo", "bar", "xds_v3"] + }] + } + } + }`, + "good": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }], + "server_listener_resource_name_template": "xdstp://xds.example.com/envoy.config.listener.v3.Listener/grpc/server?listening_address=%s", + "client_default_listener_resource_name_template": "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + "authorities": { + "xds.td.com": { + "client_listener_resource_name_template": "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", + "xds_servers": [{ + "server_uri": "td.com", + "channel_creds": [ { "type": "google_default" } ], + "server_features" : ["foo", "bar", "xds_v3"] + }] + } + } + }`, + // If client_default_listener_resource_name_template is not set, it + // defaults to "%s". + "goodWithDefaultDefaultClientListenerTemplate": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }] + }`, + // If client_listener_resource_name_template in authority is not set, it + // defaults to + // "xdstp:///envoy.config.listener.v3.Listener/%s". + "goodWithDefaultClientListenerTemplatePerAuthority": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }], + "client_default_listener_resource_name_template": "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + "authorities": { + "xds.td.com": { } + } + }`, + // It's OK for an authority to not have servers. The top-level server + // will be used. + "goodWithNoServerPerAuthority": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }], + "client_default_listener_resource_name_template": "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + "authorities": { + "xds.td.com": { + "client_listener_resource_name_template": "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s" + } + } + }`, + }) + defer cancel() + + tests := []struct { + name string + wantConfig *Config + wantErr bool + }{ + { + name: "badClientListenerResourceNameTemplate", + wantErr: true, + }, + { + name: "badClientListenerResourceNameTemplatePerAuthority", + wantErr: true, + }, + { + name: "good", + wantConfig: &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + credsType: "google_default", + TransportAPI: version.TransportV2, + NodeProto: v2NodeProto, + }, + ServerListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/grpc/server?listening_address=%s", + ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + Authorities: map[string]*Authority{ + "xds.td.com": { + ClientListenerResourceNameTemplate: "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", + XDSServer: &ServerConfig{ + ServerURI: "td.com", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + credsType: "google_default", + TransportAPI: version.TransportV3, + NodeProto: v3NodeProto, + }, + }, + }, + }, + }, + { + name: "goodWithDefaultDefaultClientListenerTemplate", + wantConfig: &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + credsType: "google_default", + TransportAPI: version.TransportV2, + NodeProto: v2NodeProto, + }, + ClientDefaultListenerResourceNameTemplate: "%s", + }, + }, + { + name: "goodWithDefaultClientListenerTemplatePerAuthority", + wantConfig: &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + credsType: "google_default", + TransportAPI: version.TransportV2, + NodeProto: v2NodeProto, + }, + ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + Authorities: map[string]*Authority{ + "xds.td.com": { + ClientListenerResourceNameTemplate: "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", + }, + }, + }, + }, + { + name: "goodWithNoServerPerAuthority", + wantConfig: &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + credsType: "google_default", + TransportAPI: version.TransportV2, + NodeProto: v2NodeProto, + }, + ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + Authorities: map[string]*Authority{ + "xds.td.com": { + ClientListenerResourceNameTemplate: "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", + }, + }, + }, + }, + } + + oldFederationSupport := env.FederationSupport + env.FederationSupport = true + defer func() { env.FederationSupport = oldFederationSupport }() + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testNewConfigWithFileNameEnv(t, test.name, test.wantErr, test.wantConfig) + testNewConfigWithFileContentEnv(t, test.name, test.wantErr, test.wantConfig) + }) + } +} From bac0a7e47f70cf08cd80193ff26a55d954e1e102 Mon Sep 17 00:00:00 2001 From: Anirudh Ramachandra Date: Mon, 8 Nov 2021 14:57:42 -0800 Subject: [PATCH 326/998] transport: pass handshake info with attributes to custom dialers (#4938) --- internal/transport/http2_client.go | 11 +++++---- internal/transport/transport_test.go | 37 ++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 5 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 2521a7d7a408..c763b23aad04 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -201,6 +201,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }() + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { @@ -245,11 +251,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - // gRPC, resolver, balancer etc. can specify arbitrary data in the - // Attributes field of resolver.Address, which is shoved into connectCtx - // and passed to the credential handshaker. This makes it possible for - // address specific arbitrary data to reach the credential handshaker. - connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) rawConn := conn // Pull the deadline from the connectCtx, which will be used for // timeouts in the authentication protocol handshake. Can ignore the diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 4e561a73c4cb..e0a9536d8f9e 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -2187,6 +2187,43 @@ func (s) TestClientHandshakeInfo(t *testing.T) { } } +// TestClientHandshakeInfoDialer adds attributes to the resolver.Address passes to +// NewClientTransport and verifies that these attributes are received by a custom +// dialer. +func (s) TestClientHandshakeInfoDialer(t *testing.T) { + server := setUpServerOnly(t, 0, &ServerConfig{}, pingpong) + defer server.stop() + + const ( + testAttrKey = "foo" + testAttrVal = "bar" + ) + addr := resolver.Address{ + Addr: "localhost:" + server.port, + Attributes: attributes.New(testAttrKey, testAttrVal), + } + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) + defer cancel() + + var attr *attributes.Attributes + dialer := func(ctx context.Context, addr string) (net.Conn, error) { + ai := credentials.ClientHandshakeInfoFromContext(ctx) + attr = ai.Attributes + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + } + + tr, err := NewClientTransport(ctx, context.Background(), addr, ConnectOptions{Dialer: dialer}, func() {}, func(GoAwayReason) {}, func() {}) + if err != nil { + t.Fatalf("NewClientTransport(): %v", err) + } + defer tr.Close(fmt.Errorf("closed manually by test")) + + wantAttr := attributes.New(testAttrKey, testAttrVal) + if gotAttr := attr; !cmp.Equal(gotAttr, wantAttr, cmp.AllowUnexported(attributes.Attributes{})) { + t.Errorf("Received attributes %v in custom dialer, want %v", gotAttr, wantAttr) + } +} + func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { testStream := func() *Stream { return &Stream{ From 59e024e4c7452142fe67de2ff5fc26bbc025928f Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 9 Nov 2021 10:32:16 -0800 Subject: [PATCH 327/998] xds/client: move watchers from xdsclient to a separate struct (#4963) --- xds/csds/csds.go | 2 +- .../balancer/cdsbalancer/cdsbalancer.go | 4 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 4 +- .../clusterresolver/clusterresolver.go | 3 +- .../clusterresolver/clusterresolver_test.go | 8 +- xds/internal/resolver/watch_service.go | 2 +- xds/internal/resolver/xds_resolver.go | 3 +- xds/internal/resolver/xds_resolver_test.go | 6 +- xds/internal/server/listener_wrapper.go | 5 +- xds/internal/xdsclient/attributes.go | 8 +- xds/internal/xdsclient/callback.go | 281 +-------------- xds/internal/xdsclient/client.go | 135 ++------ xds/internal/xdsclient/client_test.go | 34 +- xds/internal/xdsclient/dump.go | 99 +----- xds/internal/xdsclient/dump_test.go | 66 ++-- xds/internal/xdsclient/pubsub/dump.go | 92 +++++ xds/internal/xdsclient/pubsub/pubsub.go | 186 ++++++++++ xds/internal/xdsclient/pubsub/update.go | 319 ++++++++++++++++++ xds/internal/xdsclient/pubsub/watch.go | 232 +++++++++++++ xds/internal/xdsclient/transport_helper.go | 39 +-- xds/internal/xdsclient/v2/ack_test.go | 119 ++++--- xds/internal/xdsclient/v2/cds_test.go | 5 +- xds/internal/xdsclient/v2/client.go | 24 +- xds/internal/xdsclient/v2/client_test.go | 48 +-- xds/internal/xdsclient/v2/eds_test.go | 5 +- xds/internal/xdsclient/v2/lds_test.go | 5 +- xds/internal/xdsclient/v2/rds_test.go | 6 +- xds/internal/xdsclient/v3/client.go | 24 +- xds/internal/xdsclient/watchers.go | 280 ++------------- .../xdsclient/watchers_cluster_test.go | 28 +- .../xdsclient/watchers_endpoints_test.go | 20 +- .../xdsclient/watchers_listener_test.go | 26 +- xds/internal/xdsclient/watchers_route_test.go | 18 +- .../xdsclient/{ => xdsresource}/errors.go | 2 +- xds/internal/xdsclient/xdsresource/type.go | 43 +++ xds/server_test.go | 2 +- 36 files changed, 1204 insertions(+), 979 deletions(-) create mode 100644 xds/internal/xdsclient/pubsub/dump.go create mode 100644 xds/internal/xdsclient/pubsub/pubsub.go create mode 100644 xds/internal/xdsclient/pubsub/update.go create mode 100644 xds/internal/xdsclient/pubsub/watch.go rename xds/internal/xdsclient/{ => xdsresource}/errors.go (98%) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 5b9d1c467cb2..a54afaf2cb35 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -174,7 +174,7 @@ func nodeProtoToV3(n proto.Message) *v3corepb.Node { return node } -func dumpToGenericXdsConfig(typeURL string, dumpF func() (string, map[string]xdsclient.UpdateWithMD)) []*v3statuspb.ClientConfig_GenericXdsConfig { +func dumpToGenericXdsConfig(typeURL string, dumpF func() (string, map[string]xdsresource.UpdateWithMD)) []*v3statuspb.ClientConfig_GenericXdsConfig { _, dump := dumpF() ret := make([]*v3statuspb.ClientConfig_GenericXdsConfig, 0, len(dump)) for name, d := range dump { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 9c128dfb4639..5f898c87918f 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -431,11 +431,11 @@ func (b *cdsBalancer) run() { func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { // This is not necessary today, because xds client never sends connection // errors. - if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { + if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { b.clusterHandler.close() } if b.childLB != nil { - if xdsclient.ErrType(err) != xdsclient.ErrorTypeConnection { + if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { // Connection errors will be sent to the child balancers directly. // There's no need to forward them. b.childLB.ResolverError(err) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 7979f82e8f6e..267fa3b10cd2 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -472,7 +472,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { } // Push a resource-not-found-error this time around. - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") + resourceErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, resourceErr) // Make sure that the watch is not cancelled. This error indicates that the // request cluster resource is not found. We should continue to watch it. @@ -557,7 +557,7 @@ func (s) TestResolverError(t *testing.T) { } // Push a resource-not-found-error this time around. - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") + resourceErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") cdsB.ResolverError(resourceErr) // Make sure the registered watch is cancelled. if _, err := xdsC.WaitForCancelClusterWatch(ctx); err != nil { diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index 66a5aab305eb..d49014cfa433 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // Name is the name of the cluster_resolver balancer. @@ -244,7 +245,7 @@ func (b *clusterResolverBalancer) updateChildConfig() error { // In both cases, the sub-balancers will be receive the error. func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) { b.logger.Warningf("Received error: %v", err) - if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { + if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { // This is an error from the parent ClientConn (can be the parent CDS // balancer), and is a resource-not-found error. This means the resource // (can be either LDS or CDS) was removed. Stop the EDS watch. diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 2cd692bbade9..808f3050e3e2 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -280,7 +280,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { t.Fatalf("EDS impl got unexpected update: %v", err) } - connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error") + connectionErr := xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "connection error") xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, connectionErr) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) @@ -298,7 +298,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { t.Fatalf("want resolver error, got %v", err) } - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") + resourceErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, resourceErr) // Even if error is resource not found, watch shouldn't be canceled, because // this is an EDS resource removed (and xds client actually never sends this @@ -369,7 +369,7 @@ func (s) TestErrorFromResolver(t *testing.T) { t.Fatalf("EDS impl got unexpected update: %v", err) } - connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error") + connectionErr := xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "connection error") edsB.ResolverError(connectionErr) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) @@ -387,7 +387,7 @@ func (s) TestErrorFromResolver(t *testing.T) { t.Fatalf("want resolver error, got %v", err) } - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") + resourceErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") edsB.ResolverError(resourceErr) if _, err := xdsC.WaitForCancelEDSWatch(ctx); err != nil { t.Fatalf("want watch to be canceled, waitForCancel failed: %v", err) diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 4801fc40e43d..30f65727d08a 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -94,7 +94,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, // type we check is ResourceNotFound, which indicates the LDS resource // was removed, and besides sending the error to callback, we also // cancel the RDS watch. - if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound && w.rdsCancel != nil { + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound && w.rdsCancel != nil { w.rdsCancel() w.rdsName = "" w.rdsCancel = nil diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index e04ce00b2d29..41e81a31865c 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const xdsScheme = "xds" @@ -234,7 +235,7 @@ func (r *xdsResolver) run() { case update := <-r.updateCh: if update.err != nil { r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.target.Endpoint, r.client, update.err) - if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { + if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { // If error is resource-not-found, it means the LDS // resource was removed. Ultimately send an empty service // config, which picks pick-first, with no address, and diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index e366b9fb6265..a078b82c5eb6 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -688,7 +688,7 @@ func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { } // Delete the resource - suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") + suErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource removed error") xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if _, err = tcc.stateCh.Receive(ctx); err != nil { @@ -764,7 +764,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { // Delete the resource. The channel should receive a service config with the // original cluster but with an erroring config selector. - suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") + suErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource removed error") xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotState, err = tcc.stateCh.Receive(ctx); err != nil { @@ -1164,7 +1164,7 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. - suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") + suErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource removed error") xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != context.DeadlineExceeded { diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 045baf00f8c4..6ffb9763ad6e 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -36,7 +36,6 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -365,7 +364,7 @@ func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) { } if update.err != nil { l.logger.Warningf("Received error for rds names specified in resource %q: %+v", l.name, update.err) - if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { + if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { l.switchMode(nil, connectivity.ServingModeNotServing, update.err) } // For errors which are anything other than "resource-not-found", we @@ -381,7 +380,7 @@ func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) { func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { if update.err != nil { l.logger.Warningf("Received error for resource %q: %+v", l.name, update.err) - if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { + if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { l.switchMode(nil, connectivity.ServingModeNotServing, update.err) } // For errors which are anything other than "resource-not-found", we diff --git a/xds/internal/xdsclient/attributes.go b/xds/internal/xdsclient/attributes.go index 52507bd83699..2f2fcf98ce91 100644 --- a/xds/internal/xdsclient/attributes.go +++ b/xds/internal/xdsclient/attributes.go @@ -38,10 +38,10 @@ type XDSClient interface { WatchEndpoints(clusterName string, edsCb func(xdsresource.EndpointsUpdate, error)) (cancel func()) ReportLoad(server string) (*load.Store, func()) - DumpLDS() (string, map[string]UpdateWithMD) - DumpRDS() (string, map[string]UpdateWithMD) - DumpCDS() (string, map[string]UpdateWithMD) - DumpEDS() (string, map[string]UpdateWithMD) + DumpLDS() (string, map[string]xdsresource.UpdateWithMD) + DumpRDS() (string, map[string]xdsresource.UpdateWithMD) + DumpCDS() (string, map[string]xdsresource.UpdateWithMD) + DumpEDS() (string, map[string]xdsresource.UpdateWithMD) BootstrapConfig() *bootstrap.Config Close() diff --git a/xds/internal/xdsclient/callback.go b/xds/internal/xdsclient/callback.go index 6643d1d4e824..1ad1659e12e9 100644 --- a/xds/internal/xdsclient/callback.go +++ b/xds/internal/xdsclient/callback.go @@ -19,129 +19,16 @@ package xdsclient import ( - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/proto" ) -type watcherInfoWithUpdate struct { - wi *watchInfo - update interface{} - err error -} - -// scheduleCallback should only be called by methods of watchInfo, which checks -// for watcher states and maintain consistency. -func (c *clientImpl) scheduleCallback(wi *watchInfo, update interface{}, err error) { - c.updateCh.Put(&watcherInfoWithUpdate{ - wi: wi, - update: update, - err: err, - }) -} - -func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) { - c.mu.Lock() - // Use a closure to capture the callback and type assertion, to save one - // more switch case. - // - // The callback must be called without c.mu. Otherwise if the callback calls - // another watch() inline, it will cause a deadlock. This leaves a small - // window that a watcher's callback could be called after the watcher is - // canceled, and the user needs to take care of it. - var ccb func() - switch wiu.wi.rType { - case ListenerResource: - if s, ok := c.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.ldsCallback(wiu.update.(xdsresource.ListenerUpdate), wiu.err) } - } - case RouteConfigResource: - if s, ok := c.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.rdsCallback(wiu.update.(xdsresource.RouteConfigUpdate), wiu.err) } - } - case ClusterResource: - if s, ok := c.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.cdsCallback(wiu.update.(xdsresource.ClusterUpdate), wiu.err) } - } - case EndpointsResource: - if s, ok := c.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.edsCallback(wiu.update.(xdsresource.EndpointsUpdate), wiu.err) } - } - } - c.mu.Unlock() - - if ccb != nil { - ccb() - } -} - // NewListeners is called by the underlying xdsAPIClient when it receives an // xDS response. // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. func (c *clientImpl) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - c.mu.Lock() - defer c.mu.Unlock() - - c.ldsVersion = metadata.Version - if metadata.ErrState != nil { - c.ldsVersion = metadata.ErrState.Version - } - for name, uErr := range updates { - if s, ok := c.ldsWatchers[name]; ok { - if uErr.Err != nil { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := c.ldsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - c.ldsMD[name] = mdCopy - for wi := range s { - wi.newError(uErr.Err) - } - continue - } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different - // from the one currently cached. - if cur, ok := c.ldsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { - for wi := range s { - wi.newUpdate(uErr.Update) - } - } - // Sync cache. - c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) - c.ldsCache[name] = uErr.Update - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - mdCopy := metadata - mdCopy.Status = xdsresource.ServiceStatusACKed - mdCopy.ErrState = nil - if metadata.ErrState != nil { - mdCopy.Version = metadata.ErrState.Version - } - c.ldsMD[name] = mdCopy - } - } - // Resources not in the new update were removed by the server, so delete - // them. - for name := range c.ldsCache { - if _, ok := updates[name]; !ok { - // If resource exists in cache, but not in the new update, delete - // the resource from cache, and also send an resource not found - // error to indicate resource removed. - delete(c.ldsCache, name) - c.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} - for wi := range c.ldsWatchers[name] { - wi.resourceNotFound() - } - } - } - // When LDS resource is removed, we don't delete corresponding RDS cached - // data. The RDS watch will be canceled, and cache entry is removed when the - // last watch is canceled. + c.pubsub.NewListeners(updates, metadata) } // NewRouteConfigs is called by the underlying xdsAPIClient when it receives an @@ -150,51 +37,7 @@ func (c *clientImpl) NewListeners(updates map[string]xdsresource.ListenerUpdateE // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. func (c *clientImpl) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - c.mu.Lock() - defer c.mu.Unlock() - - // If no error received, the status is ACK. - c.rdsVersion = metadata.Version - if metadata.ErrState != nil { - c.rdsVersion = metadata.ErrState.Version - } - for name, uErr := range updates { - if s, ok := c.rdsWatchers[name]; ok { - if uErr.Err != nil { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := c.rdsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - c.rdsMD[name] = mdCopy - for wi := range s { - wi.newError(uErr.Err) - } - continue - } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different - // from the one currently cached. - if cur, ok := c.rdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { - for wi := range s { - wi.newUpdate(uErr.Update) - } - } - // Sync cache. - c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) - c.rdsCache[name] = uErr.Update - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - mdCopy := metadata - mdCopy.Status = xdsresource.ServiceStatusACKed - mdCopy.ErrState = nil - if metadata.ErrState != nil { - mdCopy.Version = metadata.ErrState.Version - } - c.rdsMD[name] = mdCopy - } - } + c.pubsub.NewRouteConfigs(updates, metadata) } // NewClusters is called by the underlying xdsAPIClient when it receives an xDS @@ -203,69 +46,7 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]xdsresource.RouteConfigU // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. func (c *clientImpl) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - c.mu.Lock() - defer c.mu.Unlock() - - c.cdsVersion = metadata.Version - if metadata.ErrState != nil { - c.cdsVersion = metadata.ErrState.Version - } - for name, uErr := range updates { - if s, ok := c.cdsWatchers[name]; ok { - if uErr.Err != nil { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := c.cdsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - c.cdsMD[name] = mdCopy - for wi := range s { - // Send the watcher the individual error, instead of the - // overall combined error from the metadata.ErrState. - wi.newError(uErr.Err) - } - continue - } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different - // from the one currently cached. - if cur, ok := c.cdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { - for wi := range s { - wi.newUpdate(uErr.Update) - } - } - // Sync cache. - c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) - c.cdsCache[name] = uErr.Update - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - mdCopy := metadata - mdCopy.Status = xdsresource.ServiceStatusACKed - mdCopy.ErrState = nil - if metadata.ErrState != nil { - mdCopy.Version = metadata.ErrState.Version - } - c.cdsMD[name] = mdCopy - } - } - // Resources not in the new update were removed by the server, so delete - // them. - for name := range c.cdsCache { - if _, ok := updates[name]; !ok { - // If resource exists in cache, but not in the new update, delete it - // from cache, and also send an resource not found error to indicate - // resource removed. - delete(c.cdsCache, name) - c.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} - for wi := range c.cdsWatchers[name] { - wi.resourceNotFound() - } - } - } - // When CDS resource is removed, we don't delete corresponding EDS cached - // data. The EDS watch will be canceled, and cache entry is removed when the - // last watch is canceled. + c.pubsub.NewClusters(updates, metadata) } // NewEndpoints is called by the underlying xdsAPIClient when it receives an @@ -274,63 +55,11 @@ func (c *clientImpl) NewClusters(updates map[string]xdsresource.ClusterUpdateErr // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. func (c *clientImpl) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - c.mu.Lock() - defer c.mu.Unlock() - - c.edsVersion = metadata.Version - if metadata.ErrState != nil { - c.edsVersion = metadata.ErrState.Version - } - for name, uErr := range updates { - if s, ok := c.edsWatchers[name]; ok { - if uErr.Err != nil { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := c.edsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - c.edsMD[name] = mdCopy - for wi := range s { - // Send the watcher the individual error, instead of the - // overall combined error from the metadata.ErrState. - wi.newError(uErr.Err) - } - continue - } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different - // from the one currently cached. - if cur, ok := c.edsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { - for wi := range s { - wi.newUpdate(uErr.Update) - } - } - // Sync cache. - c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) - c.edsCache[name] = uErr.Update - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - mdCopy := metadata - mdCopy.Status = xdsresource.ServiceStatusACKed - mdCopy.ErrState = nil - if metadata.ErrState != nil { - mdCopy.Version = metadata.ErrState.Version - } - c.edsMD[name] = mdCopy - } - } + c.pubsub.NewEndpoints(updates, metadata) } // NewConnectionError is called by the underlying xdsAPIClient when it receives // a connection error. The error will be forwarded to all the resource watchers. func (c *clientImpl) NewConnectionError(err error) { - c.mu.Lock() - defer c.mu.Unlock() - - for _, s := range c.edsWatchers { - for wi := range s { - wi.newError(NewErrorf(ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) - } - } + c.pubsub.NewConnectionError(err) } diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 74dab87742a5..56441142cc5a 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -30,13 +30,13 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -103,11 +103,11 @@ type APIClientBuilder interface { // will still keep this interface for testing purposes. type APIClient interface { // AddWatch adds a watch for an xDS resource given its type and name. - AddWatch(ResourceType, string) + AddWatch(xdsresource.ResourceType, string) // RemoveWatch cancels an already registered watch for an xDS resource // given its type and name. - RemoveWatch(ResourceType, string) + RemoveWatch(xdsresource.ResourceType, string) // reportLoad starts an LRS stream to periodically report load using the // provided ClientConn, which represent a connection to the management @@ -157,35 +157,13 @@ var newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, op // style of ccBalancerWrapper so that the Client type does not implement these // exported methods. type clientImpl struct { - done *grpcsync.Event - config *bootstrap.Config - cc *grpc.ClientConn // Connection to the management server. - apiClient APIClient - watchExpiryTimeout time.Duration + done *grpcsync.Event + config *bootstrap.Config + cc *grpc.ClientConn // Connection to the management server. + apiClient APIClient logger *grpclog.PrefixLogger - - updateCh *buffer.Unbounded // chan *watcherInfoWithUpdate - // All the following maps are to keep the updates/metadata in a cache. - // TODO: move them to a separate struct/package, to cleanup the xds_client. - // And CSDS handler can be implemented directly by the cache. - mu sync.Mutex - ldsWatchers map[string]map[*watchInfo]bool - ldsVersion string // Only used in CSDS. - ldsCache map[string]xdsresource.ListenerUpdate - ldsMD map[string]xdsresource.UpdateMetadata - rdsWatchers map[string]map[*watchInfo]bool - rdsVersion string // Only used in CSDS. - rdsCache map[string]xdsresource.RouteConfigUpdate - rdsMD map[string]xdsresource.UpdateMetadata - cdsWatchers map[string]map[*watchInfo]bool - cdsVersion string // Only used in CSDS. - cdsCache map[string]xdsresource.ClusterUpdate - cdsMD map[string]xdsresource.UpdateMetadata - edsWatchers map[string]map[*watchInfo]bool - edsVersion string // Only used in CSDS. - edsCache map[string]xdsresource.EndpointsUpdate - edsMD map[string]xdsresource.UpdateMetadata + pubsub *pubsub.Pubsub // Changes to map lrsClients and the lrsClient inside the map need to be // protected by lrsMu. @@ -194,7 +172,7 @@ type clientImpl struct { } // newWithConfig returns a new xdsClient with the given config. -func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (*clientImpl, error) { +func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (_ *clientImpl, retErr error) { switch { case config.XDSServer == nil: return nil, errors.New("xds: no xds_server provided") @@ -215,35 +193,36 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) ( } c := &clientImpl{ - done: grpcsync.NewEvent(), - config: config, - watchExpiryTimeout: watchExpiryTimeout, - - updateCh: buffer.NewUnbounded(), - ldsWatchers: make(map[string]map[*watchInfo]bool), - ldsCache: make(map[string]xdsresource.ListenerUpdate), - ldsMD: make(map[string]xdsresource.UpdateMetadata), - rdsWatchers: make(map[string]map[*watchInfo]bool), - rdsCache: make(map[string]xdsresource.RouteConfigUpdate), - rdsMD: make(map[string]xdsresource.UpdateMetadata), - cdsWatchers: make(map[string]map[*watchInfo]bool), - cdsCache: make(map[string]xdsresource.ClusterUpdate), - cdsMD: make(map[string]xdsresource.UpdateMetadata), - edsWatchers: make(map[string]map[*watchInfo]bool), - edsCache: make(map[string]xdsresource.EndpointsUpdate), - edsMD: make(map[string]xdsresource.UpdateMetadata), - lrsClients: make(map[string]*lrsClient), + done: grpcsync.NewEvent(), + config: config, + lrsClients: make(map[string]*lrsClient), } + defer func() { + if retErr != nil { + if c.cc != nil { + c.cc.Close() + } + if c.pubsub != nil { + c.pubsub.Close() + } + if c.apiClient != nil { + c.apiClient.Close() + } + } + }() + cc, err := grpc.Dial(config.XDSServer.ServerURI, dopts...) if err != nil { // An error from a non-blocking dial indicates something serious. return nil, fmt.Errorf("xds: failed to dial balancer {%s}: %v", config.XDSServer.ServerURI, err) } c.cc = cc - c.logger = prefixLogger((c)) + c.logger = prefixLogger(c) c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) + c.pubsub = pubsub.New(watchExpiryTimeout, c.logger) + apiClient, err := newAPIClient(config.XDSServer.TransportAPI, cc, BuildOptions{ Parent: c, Validator: c.updateValidator, @@ -252,12 +231,10 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) ( Logger: c.logger, }) if err != nil { - cc.Close() return nil, err } c.apiClient = apiClient c.logger.Infof("Created") - go c.run() return c, nil } @@ -267,27 +244,6 @@ func (c *clientRefCounted) BootstrapConfig() *bootstrap.Config { return c.config } -// run is a goroutine for all the callbacks. -// -// Callback can be called in watch(), if an item is found in cache. Without this -// goroutine, the callback will be called inline, which might cause a deadlock -// in user's code. Callbacks also cannot be simple `go callback()` because the -// order matters. -func (c *clientImpl) run() { - for { - select { - case t := <-c.updateCh.Get(): - c.updateCh.Load() - if c.done.HasFired() { - return - } - c.callCallback(t.(*watcherInfoWithUpdate)) - case <-c.done.Done(): - return - } - } -} - // Close closes the gRPC connection to the management server. func (c *clientImpl) Close() { if c.done.HasFired() { @@ -298,6 +254,7 @@ func (c *clientImpl) Close() { // the client is closed? c.apiClient.Close() c.cc.Close() + c.pubsub.Close() c.logger.Infof("Shutdown") } @@ -342,35 +299,3 @@ func (c *clientImpl) updateValidator(u interface{}) error { } return nil } - -// ResourceType identifies resources in a transport protocol agnostic way. These -// will be used in transport version agnostic code, while the versioned API -// clients will map these to appropriate version URLs. -type ResourceType int - -// Version agnostic resource type constants. -const ( - UnknownResource ResourceType = iota - ListenerResource - HTTPConnManagerResource - RouteConfigResource - ClusterResource - EndpointsResource -) - -func (r ResourceType) String() string { - switch r { - case ListenerResource: - return "ListenerResource" - case HTTPConnManagerResource: - return "HTTPConnManagerResource" - case RouteConfigResource: - return "RouteConfigResource" - case ClusterResource: - return "ClusterResource" - case EndpointsResource: - return "EndpointsResource" - default: - return "UnknownResource" - } -} diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index a668ff1378f3..f20d6112a84c 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -81,8 +81,8 @@ func clientOpts(balancerName string, overrideWatchExpiryTimeout bool) (*bootstra type testAPIClient struct { done *grpcsync.Event - addWatches map[ResourceType]*testutils.Channel - removeWatches map[ResourceType]*testutils.Channel + addWatches map[xdsresource.ResourceType]*testutils.Channel + removeWatches map[xdsresource.ResourceType]*testutils.Channel } func overrideNewAPIClient() (*testutils.Channel, func()) { @@ -97,17 +97,17 @@ func overrideNewAPIClient() (*testutils.Channel, func()) { } func newTestAPIClient() *testAPIClient { - addWatches := map[ResourceType]*testutils.Channel{ - ListenerResource: testutils.NewChannel(), - RouteConfigResource: testutils.NewChannel(), - ClusterResource: testutils.NewChannel(), - EndpointsResource: testutils.NewChannel(), - } - removeWatches := map[ResourceType]*testutils.Channel{ - ListenerResource: testutils.NewChannel(), - RouteConfigResource: testutils.NewChannel(), - ClusterResource: testutils.NewChannel(), - EndpointsResource: testutils.NewChannel(), + addWatches := map[xdsresource.ResourceType]*testutils.Channel{ + xdsresource.ListenerResource: testutils.NewChannel(), + xdsresource.RouteConfigResource: testutils.NewChannel(), + xdsresource.ClusterResource: testutils.NewChannel(), + xdsresource.EndpointsResource: testutils.NewChannel(), + } + removeWatches := map[xdsresource.ResourceType]*testutils.Channel{ + xdsresource.ListenerResource: testutils.NewChannel(), + xdsresource.RouteConfigResource: testutils.NewChannel(), + xdsresource.ClusterResource: testutils.NewChannel(), + xdsresource.EndpointsResource: testutils.NewChannel(), } return &testAPIClient{ done: grpcsync.NewEvent(), @@ -116,11 +116,11 @@ func newTestAPIClient() *testAPIClient { } } -func (c *testAPIClient) AddWatch(resourceType ResourceType, resourceName string) { +func (c *testAPIClient) AddWatch(resourceType xdsresource.ResourceType, resourceName string) { c.addWatches[resourceType].Send(resourceName) } -func (c *testAPIClient) RemoveWatch(resourceType ResourceType, resourceName string) { +func (c *testAPIClient) RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) { c.removeWatches[resourceType].Send(resourceName) } @@ -158,12 +158,12 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { // Calls another watch inline, to ensure there's deadlock. client.WatchCluster("another-random-name", func(xdsresource.ClusterUpdate, error) {}) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); firstTime && err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); firstTime && err != nil { t.Fatalf("want new watch to start, got error %v", err) } firstTime = false }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } diff --git a/xds/internal/xdsclient/dump.go b/xds/internal/xdsclient/dump.go index dfe83c5b1755..0963749277da 100644 --- a/xds/internal/xdsclient/dump.go +++ b/xds/internal/xdsclient/dump.go @@ -19,108 +19,25 @@ package xdsclient import ( - anypb "github.com/golang/protobuf/ptypes/any" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -// UpdateWithMD contains the raw message of the update and the metadata, -// including version, raw message, timestamp. -// -// This is to be used for config dump and CSDS, not directly by users (like -// resolvers/balancers). -type UpdateWithMD struct { - MD xdsresource.UpdateMetadata - Raw *anypb.Any -} - -func rawFromCache(s string, cache interface{}) *anypb.Any { - switch c := cache.(type) { - case map[string]xdsresource.ListenerUpdate: - v, ok := c[s] - if !ok { - return nil - } - return v.Raw - case map[string]xdsresource.RouteConfigUpdate: - v, ok := c[s] - if !ok { - return nil - } - return v.Raw - case map[string]xdsresource.ClusterUpdate: - v, ok := c[s] - if !ok { - return nil - } - return v.Raw - case map[string]xdsresource.EndpointsUpdate: - v, ok := c[s] - if !ok { - return nil - } - return v.Raw - default: - return nil - } -} - -func (c *clientImpl) dump(t ResourceType) (string, map[string]UpdateWithMD) { - c.mu.Lock() - defer c.mu.Unlock() - - var ( - version string - md map[string]xdsresource.UpdateMetadata - cache interface{} - ) - switch t { - case ListenerResource: - version = c.ldsVersion - md = c.ldsMD - cache = c.ldsCache - case RouteConfigResource: - version = c.rdsVersion - md = c.rdsMD - cache = c.rdsCache - case ClusterResource: - version = c.cdsVersion - md = c.cdsMD - cache = c.cdsCache - case EndpointsResource: - version = c.edsVersion - md = c.edsMD - cache = c.edsCache - default: - c.logger.Errorf("dumping resource of unknown type: %v", t) - return "", nil - } - - ret := make(map[string]UpdateWithMD, len(md)) - for s, md := range md { - ret[s] = UpdateWithMD{ - MD: md, - Raw: rawFromCache(s, cache), - } - } - return version, ret -} - // DumpLDS returns the status and contents of LDS. -func (c *clientImpl) DumpLDS() (string, map[string]UpdateWithMD) { - return c.dump(ListenerResource) +func (c *clientImpl) DumpLDS() (string, map[string]xdsresource.UpdateWithMD) { + return c.pubsub.Dump(xdsresource.ListenerResource) } // DumpRDS returns the status and contents of RDS. -func (c *clientImpl) DumpRDS() (string, map[string]UpdateWithMD) { - return c.dump(RouteConfigResource) +func (c *clientImpl) DumpRDS() (string, map[string]xdsresource.UpdateWithMD) { + return c.pubsub.Dump(xdsresource.RouteConfigResource) } // DumpCDS returns the status and contents of CDS. -func (c *clientImpl) DumpCDS() (string, map[string]UpdateWithMD) { - return c.dump(ClusterResource) +func (c *clientImpl) DumpCDS() (string, map[string]xdsresource.UpdateWithMD) { + return c.pubsub.Dump(xdsresource.ClusterResource) } // DumpEDS returns the status and contents of EDS. -func (c *clientImpl) DumpEDS() (string, map[string]UpdateWithMD) { - return c.dump(EndpointsResource) +func (c *clientImpl) DumpEDS() (string, map[string]xdsresource.UpdateWithMD) { + return c.pubsub.Dump(xdsresource.EndpointsResource) } diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index 2d0b6c17e0b2..0b6d22c7d89f 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -89,15 +89,15 @@ func (s) TestLDSConfigDump(t *testing.T) { updateHandler := client.(xdsclient.UpdateHandler) // Expected unknown. - if err := compareDump(client.DumpLDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { + if err := compareDump(client.DumpLDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { t.Fatalf(err.Error()) } - wantRequested := make(map[string]xdsclient.UpdateWithMD) + wantRequested := make(map[string]xdsresource.UpdateWithMD) for _, n := range ldsTargets { cancel := client.WatchListener(n, func(update xdsresource.ListenerUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} + wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpLDS, "", wantRequested); err != nil { @@ -105,10 +105,10 @@ func (s) TestLDSConfigDump(t *testing.T) { } update0 := make(map[string]xdsresource.ListenerUpdateErrTuple) - want0 := make(map[string]xdsclient.UpdateWithMD) + want0 := make(map[string]xdsresource.UpdateWithMD) for n, r := range listenerRaws { update0[n] = xdsresource.ListenerUpdateErrTuple{Update: xdsresource.ListenerUpdate{Raw: r}} - want0[n] = xdsclient.UpdateWithMD{ + want0[n] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } @@ -137,10 +137,10 @@ func (s) TestLDSConfigDump(t *testing.T) { ) // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsclient.UpdateWithMD) + wantDump := make(map[string]xdsresource.UpdateWithMD) // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. - wantDump[ldsTargets[0]] = xdsclient.UpdateWithMD{ + wantDump[ldsTargets[0]] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{ Status: xdsresource.ServiceStatusNACKed, Version: testVersion, @@ -152,7 +152,7 @@ func (s) TestLDSConfigDump(t *testing.T) { Raw: listenerRaws[ldsTargets[0]], } - wantDump[ldsTargets[1]] = xdsclient.UpdateWithMD{ + wantDump[ldsTargets[1]] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: listenerRaws[ldsTargets[1]], } @@ -205,15 +205,15 @@ func (s) TestRDSConfigDump(t *testing.T) { updateHandler := client.(xdsclient.UpdateHandler) // Expected unknown. - if err := compareDump(client.DumpRDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { + if err := compareDump(client.DumpRDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { t.Fatalf(err.Error()) } - wantRequested := make(map[string]xdsclient.UpdateWithMD) + wantRequested := make(map[string]xdsresource.UpdateWithMD) for _, n := range rdsTargets { cancel := client.WatchRouteConfig(n, func(update xdsresource.RouteConfigUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} + wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpRDS, "", wantRequested); err != nil { @@ -221,10 +221,10 @@ func (s) TestRDSConfigDump(t *testing.T) { } update0 := make(map[string]xdsresource.RouteConfigUpdateErrTuple) - want0 := make(map[string]xdsclient.UpdateWithMD) + want0 := make(map[string]xdsresource.UpdateWithMD) for n, r := range routeRaws { update0[n] = xdsresource.RouteConfigUpdateErrTuple{Update: xdsresource.RouteConfigUpdate{Raw: r}} - want0[n] = xdsclient.UpdateWithMD{ + want0[n] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } @@ -253,10 +253,10 @@ func (s) TestRDSConfigDump(t *testing.T) { ) // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsclient.UpdateWithMD) + wantDump := make(map[string]xdsresource.UpdateWithMD) // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. - wantDump[rdsTargets[0]] = xdsclient.UpdateWithMD{ + wantDump[rdsTargets[0]] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{ Status: xdsresource.ServiceStatusNACKed, Version: testVersion, @@ -267,7 +267,7 @@ func (s) TestRDSConfigDump(t *testing.T) { }, Raw: routeRaws[rdsTargets[0]], } - wantDump[rdsTargets[1]] = xdsclient.UpdateWithMD{ + wantDump[rdsTargets[1]] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: routeRaws[rdsTargets[1]], } @@ -321,15 +321,15 @@ func (s) TestCDSConfigDump(t *testing.T) { updateHandler := client.(xdsclient.UpdateHandler) // Expected unknown. - if err := compareDump(client.DumpCDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { + if err := compareDump(client.DumpCDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { t.Fatalf(err.Error()) } - wantRequested := make(map[string]xdsclient.UpdateWithMD) + wantRequested := make(map[string]xdsresource.UpdateWithMD) for _, n := range cdsTargets { cancel := client.WatchCluster(n, func(update xdsresource.ClusterUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} + wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpCDS, "", wantRequested); err != nil { @@ -337,10 +337,10 @@ func (s) TestCDSConfigDump(t *testing.T) { } update0 := make(map[string]xdsresource.ClusterUpdateErrTuple) - want0 := make(map[string]xdsclient.UpdateWithMD) + want0 := make(map[string]xdsresource.UpdateWithMD) for n, r := range clusterRaws { update0[n] = xdsresource.ClusterUpdateErrTuple{Update: xdsresource.ClusterUpdate{Raw: r}} - want0[n] = xdsclient.UpdateWithMD{ + want0[n] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } @@ -369,10 +369,10 @@ func (s) TestCDSConfigDump(t *testing.T) { ) // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsclient.UpdateWithMD) + wantDump := make(map[string]xdsresource.UpdateWithMD) // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. - wantDump[cdsTargets[0]] = xdsclient.UpdateWithMD{ + wantDump[cdsTargets[0]] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{ Status: xdsresource.ServiceStatusNACKed, Version: testVersion, @@ -383,7 +383,7 @@ func (s) TestCDSConfigDump(t *testing.T) { }, Raw: clusterRaws[cdsTargets[0]], } - wantDump[cdsTargets[1]] = xdsclient.UpdateWithMD{ + wantDump[cdsTargets[1]] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: clusterRaws[cdsTargets[1]], } @@ -423,15 +423,15 @@ func (s) TestEDSConfigDump(t *testing.T) { updateHandler := client.(xdsclient.UpdateHandler) // Expected unknown. - if err := compareDump(client.DumpEDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { + if err := compareDump(client.DumpEDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { t.Fatalf(err.Error()) } - wantRequested := make(map[string]xdsclient.UpdateWithMD) + wantRequested := make(map[string]xdsresource.UpdateWithMD) for _, n := range edsTargets { cancel := client.WatchEndpoints(n, func(update xdsresource.EndpointsUpdate, err error) {}) defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} + wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. if err := compareDump(client.DumpEDS, "", wantRequested); err != nil { @@ -439,10 +439,10 @@ func (s) TestEDSConfigDump(t *testing.T) { } update0 := make(map[string]xdsresource.EndpointsUpdateErrTuple) - want0 := make(map[string]xdsclient.UpdateWithMD) + want0 := make(map[string]xdsresource.UpdateWithMD) for n, r := range endpointRaws { update0[n] = xdsresource.EndpointsUpdateErrTuple{Update: xdsresource.EndpointsUpdate{Raw: r}} - want0[n] = xdsclient.UpdateWithMD{ + want0[n] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, Raw: r, } @@ -471,10 +471,10 @@ func (s) TestEDSConfigDump(t *testing.T) { ) // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsclient.UpdateWithMD) + wantDump := make(map[string]xdsresource.UpdateWithMD) // Though resource 0 was NACKed, the dump should show the previous ACKed raw // message, as well as the NACK error. - wantDump[edsTargets[0]] = xdsclient.UpdateWithMD{ + wantDump[edsTargets[0]] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{ Status: xdsresource.ServiceStatusNACKed, Version: testVersion, @@ -485,7 +485,7 @@ func (s) TestEDSConfigDump(t *testing.T) { }, Raw: endpointRaws[edsTargets[0]], } - wantDump[edsTargets[1]] = xdsclient.UpdateWithMD{ + wantDump[edsTargets[1]] = xdsresource.UpdateWithMD{ MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: endpointRaws[edsTargets[1]], } @@ -494,7 +494,7 @@ func (s) TestEDSConfigDump(t *testing.T) { } } -func compareDump(dumpFunc func() (string, map[string]xdsclient.UpdateWithMD), wantVersion string, wantDump interface{}) error { +func compareDump(dumpFunc func() (string, map[string]xdsresource.UpdateWithMD), wantVersion string, wantDump interface{}) error { v, dump := dumpFunc() if v != wantVersion { return fmt.Errorf("Dump() returned version %q, want %q", v, wantVersion) diff --git a/xds/internal/xdsclient/pubsub/dump.go b/xds/internal/xdsclient/pubsub/dump.go new file mode 100644 index 000000000000..b9523ee76a27 --- /dev/null +++ b/xds/internal/xdsclient/pubsub/dump.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pubsub + +import ( + anypb "github.com/golang/protobuf/ptypes/any" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +func rawFromCache(s string, cache interface{}) *anypb.Any { + switch c := cache.(type) { + case map[string]xdsresource.ListenerUpdate: + if v, ok := c[s]; ok { + return v.Raw + } + return nil + case map[string]xdsresource.RouteConfigUpdate: + if v, ok := c[s]; ok { + return v.Raw + } + return nil + case map[string]xdsresource.ClusterUpdate: + if v, ok := c[s]; ok { + return v.Raw + } + return nil + case map[string]xdsresource.EndpointsUpdate: + if v, ok := c[s]; ok { + return v.Raw + } + return nil + default: + return nil + } +} + +// Dump dumps the resource for the given type. +func (pb *Pubsub) Dump(t xdsresource.ResourceType) (string, map[string]xdsresource.UpdateWithMD) { + pb.mu.Lock() + defer pb.mu.Unlock() + + var ( + version string + md map[string]xdsresource.UpdateMetadata + cache interface{} + ) + switch t { + case xdsresource.ListenerResource: + version = pb.ldsVersion + md = pb.ldsMD + cache = pb.ldsCache + case xdsresource.RouteConfigResource: + version = pb.rdsVersion + md = pb.rdsMD + cache = pb.rdsCache + case xdsresource.ClusterResource: + version = pb.cdsVersion + md = pb.cdsMD + cache = pb.cdsCache + case xdsresource.EndpointsResource: + version = pb.edsVersion + md = pb.edsMD + cache = pb.edsCache + default: + pb.logger.Errorf("dumping resource of unknown type: %v", t) + return "", nil + } + + ret := make(map[string]xdsresource.UpdateWithMD, len(md)) + for s, md := range md { + ret[s] = xdsresource.UpdateWithMD{ + MD: md, + Raw: rawFromCache(s, cache), + } + } + return version, ret +} diff --git a/xds/internal/xdsclient/pubsub/pubsub.go b/xds/internal/xdsclient/pubsub/pubsub.go new file mode 100644 index 000000000000..d876eca0b986 --- /dev/null +++ b/xds/internal/xdsclient/pubsub/pubsub.go @@ -0,0 +1,186 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package pubsub implements a utility type to maintain resource watchers and +// the updates. +// +// This package is designed to work with the xds resources. It could be made a +// general system that works with all types. +package pubsub + +import ( + "sync" + "time" + + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// Pubsub maintains resource watchers and resource updates. +// +// There can be multiple watchers for the same resource. An update to a resource +// triggers updates to all the existing watchers. Watchers can be canceled at +// any time. +type Pubsub struct { + done *grpcsync.Event + logger *grpclog.PrefixLogger + watchExpiryTimeout time.Duration + + updateCh *buffer.Unbounded // chan *watcherInfoWithUpdate + // All the following maps are to keep the updates/metadata in a cache. + mu sync.Mutex + ldsWatchers map[string]map[*watchInfo]bool + ldsVersion string // Only used in CSDS. + ldsCache map[string]xdsresource.ListenerUpdate + ldsMD map[string]xdsresource.UpdateMetadata + rdsWatchers map[string]map[*watchInfo]bool + rdsVersion string // Only used in CSDS. + rdsCache map[string]xdsresource.RouteConfigUpdate + rdsMD map[string]xdsresource.UpdateMetadata + cdsWatchers map[string]map[*watchInfo]bool + cdsVersion string // Only used in CSDS. + cdsCache map[string]xdsresource.ClusterUpdate + cdsMD map[string]xdsresource.UpdateMetadata + edsWatchers map[string]map[*watchInfo]bool + edsVersion string // Only used in CSDS. + edsCache map[string]xdsresource.EndpointsUpdate + edsMD map[string]xdsresource.UpdateMetadata +} + +// New creates a new Pubsub. +func New(watchExpiryTimeout time.Duration, logger *grpclog.PrefixLogger) *Pubsub { + pb := &Pubsub{ + done: grpcsync.NewEvent(), + logger: logger, + watchExpiryTimeout: watchExpiryTimeout, + + updateCh: buffer.NewUnbounded(), + ldsWatchers: make(map[string]map[*watchInfo]bool), + ldsCache: make(map[string]xdsresource.ListenerUpdate), + ldsMD: make(map[string]xdsresource.UpdateMetadata), + rdsWatchers: make(map[string]map[*watchInfo]bool), + rdsCache: make(map[string]xdsresource.RouteConfigUpdate), + rdsMD: make(map[string]xdsresource.UpdateMetadata), + cdsWatchers: make(map[string]map[*watchInfo]bool), + cdsCache: make(map[string]xdsresource.ClusterUpdate), + cdsMD: make(map[string]xdsresource.UpdateMetadata), + edsWatchers: make(map[string]map[*watchInfo]bool), + edsCache: make(map[string]xdsresource.EndpointsUpdate), + edsMD: make(map[string]xdsresource.UpdateMetadata), + } + go pb.run() + return pb +} + +// WatchListener registers a watcher for the LDS resource. +// +// It also returns whether this is the first watch for this resource. +func (pb *Pubsub) WatchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (first bool, cancel func() bool) { + wi := &watchInfo{ + c: pb, + rType: xdsresource.ListenerResource, + target: serviceName, + ldsCallback: cb, + } + + wi.expiryTimer = time.AfterFunc(pb.watchExpiryTimeout, func() { + wi.timeout() + }) + return pb.watch(wi) +} + +// WatchRouteConfig register a watcher for the RDS resource. +// +// It also returns whether this is the first watch for this resource. +func (pb *Pubsub) WatchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (first bool, cancel func() bool) { + wi := &watchInfo{ + c: pb, + rType: xdsresource.RouteConfigResource, + target: routeName, + rdsCallback: cb, + } + + wi.expiryTimer = time.AfterFunc(pb.watchExpiryTimeout, func() { + wi.timeout() + }) + return pb.watch(wi) +} + +// WatchCluster register a watcher for the CDS resource. +// +// It also returns whether this is the first watch for this resource. +func (pb *Pubsub) WatchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (first bool, cancel func() bool) { + wi := &watchInfo{ + c: pb, + rType: xdsresource.ClusterResource, + target: clusterName, + cdsCallback: cb, + } + + wi.expiryTimer = time.AfterFunc(pb.watchExpiryTimeout, func() { + wi.timeout() + }) + return pb.watch(wi) +} + +// WatchEndpoints registers a watcher for the EDS resource. +// +// It also returns whether this is the first watch for this resource. +func (pb *Pubsub) WatchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (first bool, cancel func() bool) { + wi := &watchInfo{ + c: pb, + rType: xdsresource.EndpointsResource, + target: clusterName, + edsCallback: cb, + } + + wi.expiryTimer = time.AfterFunc(pb.watchExpiryTimeout, func() { + wi.timeout() + }) + return pb.watch(wi) +} + +// Close closes the pubsub. +func (pb *Pubsub) Close() { + if pb.done.HasFired() { + return + } + pb.done.Fire() +} + +// run is a goroutine for all the callbacks. +// +// Callback can be called in watch(), if an item is found in cache. Without this +// goroutine, the callback will be called inline, which might cause a deadlock +// in user's code. Callbacks also cannot be simple `go callback()` because the +// order matters. +func (pb *Pubsub) run() { + for { + select { + case t := <-pb.updateCh.Get(): + pb.updateCh.Load() + if pb.done.HasFired() { + return + } + pb.callCallback(t.(*watcherInfoWithUpdate)) + case <-pb.done.Done(): + return + } + } +} diff --git a/xds/internal/xdsclient/pubsub/update.go b/xds/internal/xdsclient/pubsub/update.go new file mode 100644 index 000000000000..9e7b398e1e3d --- /dev/null +++ b/xds/internal/xdsclient/pubsub/update.go @@ -0,0 +1,319 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pubsub + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/proto" +) + +type watcherInfoWithUpdate struct { + wi *watchInfo + update interface{} + err error +} + +// scheduleCallback should only be called by methods of watchInfo, which checks +// for watcher states and maintain consistency. +func (pb *Pubsub) scheduleCallback(wi *watchInfo, update interface{}, err error) { + pb.updateCh.Put(&watcherInfoWithUpdate{ + wi: wi, + update: update, + err: err, + }) +} + +func (pb *Pubsub) callCallback(wiu *watcherInfoWithUpdate) { + pb.mu.Lock() + // Use a closure to capture the callback and type assertion, to save one + // more switch case. + // + // The callback must be called without pb.mu. Otherwise if the callback calls + // another watch() inline, it will cause a deadlock. This leaves a small + // window that a watcher's callback could be called after the watcher is + // canceled, and the user needs to take care of it. + var ccb func() + switch wiu.wi.rType { + case xdsresource.ListenerResource: + if s, ok := pb.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] { + ccb = func() { wiu.wi.ldsCallback(wiu.update.(xdsresource.ListenerUpdate), wiu.err) } + } + case xdsresource.RouteConfigResource: + if s, ok := pb.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { + ccb = func() { wiu.wi.rdsCallback(wiu.update.(xdsresource.RouteConfigUpdate), wiu.err) } + } + case xdsresource.ClusterResource: + if s, ok := pb.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { + ccb = func() { wiu.wi.cdsCallback(wiu.update.(xdsresource.ClusterUpdate), wiu.err) } + } + case xdsresource.EndpointsResource: + if s, ok := pb.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] { + ccb = func() { wiu.wi.edsCallback(wiu.update.(xdsresource.EndpointsUpdate), wiu.err) } + } + } + pb.mu.Unlock() + + if ccb != nil { + ccb() + } +} + +// NewListeners is called when there's a new LDS update. +func (pb *Pubsub) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { + pb.mu.Lock() + defer pb.mu.Unlock() + + pb.ldsVersion = metadata.Version + if metadata.ErrState != nil { + pb.ldsVersion = metadata.ErrState.Version + } + for name, uErr := range updates { + if s, ok := pb.ldsWatchers[name]; ok { + if uErr.Err != nil { + // On error, keep previous version for each resource. But update + // status and error. + mdCopy := pb.ldsMD[name] + mdCopy.ErrState = metadata.ErrState + mdCopy.Status = metadata.Status + pb.ldsMD[name] = mdCopy + for wi := range s { + wi.newError(uErr.Err) + } + continue + } + // If we get here, it means that the update is a valid one. Notify + // watchers only if this is a first time update or it is different + // from the one currently cached. + if cur, ok := pb.ldsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { + for wi := range s { + wi.newUpdate(uErr.Update) + } + } + // Sync cache. + pb.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + pb.ldsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = xdsresource.ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + pb.ldsMD[name] = mdCopy + } + } + // Resources not in the new update were removed by the server, so delete + // them. + for name := range pb.ldsCache { + if _, ok := updates[name]; !ok { + // If resource exists in cache, but not in the new update, delete + // the resource from cache, and also send an resource not found + // error to indicate resource removed. + delete(pb.ldsCache, name) + pb.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for wi := range pb.ldsWatchers[name] { + wi.resourceNotFound() + } + } + } + // When LDS resource is removed, we don't delete corresponding RDS cached + // data. The RDS watch will be canceled, and cache entry is removed when the + // last watch is canceled. +} + +// NewRouteConfigs is called when there's a new RDS update. +func (pb *Pubsub) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { + pb.mu.Lock() + defer pb.mu.Unlock() + + // If no error received, the status is ACK. + pb.rdsVersion = metadata.Version + if metadata.ErrState != nil { + pb.rdsVersion = metadata.ErrState.Version + } + for name, uErr := range updates { + if s, ok := pb.rdsWatchers[name]; ok { + if uErr.Err != nil { + // On error, keep previous version for each resource. But update + // status and error. + mdCopy := pb.rdsMD[name] + mdCopy.ErrState = metadata.ErrState + mdCopy.Status = metadata.Status + pb.rdsMD[name] = mdCopy + for wi := range s { + wi.newError(uErr.Err) + } + continue + } + // If we get here, it means that the update is a valid one. Notify + // watchers only if this is a first time update or it is different + // from the one currently cached. + if cur, ok := pb.rdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { + for wi := range s { + wi.newUpdate(uErr.Update) + } + } + // Sync cache. + pb.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + pb.rdsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = xdsresource.ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + pb.rdsMD[name] = mdCopy + } + } +} + +// NewClusters is called when there's a new CDS update. +func (pb *Pubsub) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { + pb.mu.Lock() + defer pb.mu.Unlock() + + pb.cdsVersion = metadata.Version + if metadata.ErrState != nil { + pb.cdsVersion = metadata.ErrState.Version + } + for name, uErr := range updates { + if s, ok := pb.cdsWatchers[name]; ok { + if uErr.Err != nil { + // On error, keep previous version for each resource. But update + // status and error. + mdCopy := pb.cdsMD[name] + mdCopy.ErrState = metadata.ErrState + mdCopy.Status = metadata.Status + pb.cdsMD[name] = mdCopy + for wi := range s { + // Send the watcher the individual error, instead of the + // overall combined error from the metadata.ErrState. + wi.newError(uErr.Err) + } + continue + } + // If we get here, it means that the update is a valid one. Notify + // watchers only if this is a first time update or it is different + // from the one currently cached. + if cur, ok := pb.cdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { + for wi := range s { + wi.newUpdate(uErr.Update) + } + } + // Sync cache. + pb.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + pb.cdsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = xdsresource.ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + pb.cdsMD[name] = mdCopy + } + } + // Resources not in the new update were removed by the server, so delete + // them. + for name := range pb.cdsCache { + if _, ok := updates[name]; !ok { + // If resource exists in cache, but not in the new update, delete it + // from cache, and also send an resource not found error to indicate + // resource removed. + delete(pb.cdsCache, name) + pb.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for wi := range pb.cdsWatchers[name] { + wi.resourceNotFound() + } + } + } + // When CDS resource is removed, we don't delete corresponding EDS cached + // data. The EDS watch will be canceled, and cache entry is removed when the + // last watch is canceled. +} + +// NewEndpoints is called when there's anew EDS update. +func (pb *Pubsub) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { + pb.mu.Lock() + defer pb.mu.Unlock() + + pb.edsVersion = metadata.Version + if metadata.ErrState != nil { + pb.edsVersion = metadata.ErrState.Version + } + for name, uErr := range updates { + if s, ok := pb.edsWatchers[name]; ok { + if uErr.Err != nil { + // On error, keep previous version for each resource. But update + // status and error. + mdCopy := pb.edsMD[name] + mdCopy.ErrState = metadata.ErrState + mdCopy.Status = metadata.Status + pb.edsMD[name] = mdCopy + for wi := range s { + // Send the watcher the individual error, instead of the + // overall combined error from the metadata.ErrState. + wi.newError(uErr.Err) + } + continue + } + // If we get here, it means that the update is a valid one. Notify + // watchers only if this is a first time update or it is different + // from the one currently cached. + if cur, ok := pb.edsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { + for wi := range s { + wi.newUpdate(uErr.Update) + } + } + // Sync cache. + pb.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + pb.edsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = xdsresource.ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + pb.edsMD[name] = mdCopy + } + } +} + +// NewConnectionError is called by the underlying xdsAPIClient when it receives +// a connection error. The error will be forwarded to all the resource watchers. +func (pb *Pubsub) NewConnectionError(err error) { + pb.mu.Lock() + defer pb.mu.Unlock() + + for _, s := range pb.edsWatchers { + for wi := range s { + wi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) + } + } +} diff --git a/xds/internal/xdsclient/pubsub/watch.go b/xds/internal/xdsclient/pubsub/watch.go new file mode 100644 index 000000000000..0baa683175dd --- /dev/null +++ b/xds/internal/xdsclient/pubsub/watch.go @@ -0,0 +1,232 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pubsub + +import ( + "fmt" + "sync" + "time" + + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +type watchInfoState int + +const ( + watchInfoStateStarted watchInfoState = iota + watchInfoStateRespReceived + watchInfoStateTimeout + watchInfoStateCanceled +) + +// watchInfo holds all the information from a watch() call. +type watchInfo struct { + c *Pubsub + rType xdsresource.ResourceType + target string + + ldsCallback func(xdsresource.ListenerUpdate, error) + rdsCallback func(xdsresource.RouteConfigUpdate, error) + cdsCallback func(xdsresource.ClusterUpdate, error) + edsCallback func(xdsresource.EndpointsUpdate, error) + + expiryTimer *time.Timer + + // mu protects state, and c.scheduleCallback(). + // - No callback should be scheduled after watchInfo is canceled. + // - No timeout error should be scheduled after watchInfo is resp received. + mu sync.Mutex + state watchInfoState +} + +func (wi *watchInfo) newUpdate(update interface{}) { + wi.mu.Lock() + defer wi.mu.Unlock() + if wi.state == watchInfoStateCanceled { + return + } + wi.state = watchInfoStateRespReceived + wi.expiryTimer.Stop() + wi.c.scheduleCallback(wi, update, nil) +} + +func (wi *watchInfo) newError(err error) { + wi.mu.Lock() + defer wi.mu.Unlock() + if wi.state == watchInfoStateCanceled { + return + } + wi.state = watchInfoStateRespReceived + wi.expiryTimer.Stop() + wi.sendErrorLocked(err) +} + +func (wi *watchInfo) resourceNotFound() { + wi.mu.Lock() + defer wi.mu.Unlock() + if wi.state == watchInfoStateCanceled { + return + } + wi.state = watchInfoStateRespReceived + wi.expiryTimer.Stop() + wi.sendErrorLocked(xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "xds: %v target %s not found in received response", wi.rType, wi.target)) +} + +func (wi *watchInfo) timeout() { + wi.mu.Lock() + defer wi.mu.Unlock() + if wi.state == watchInfoStateCanceled || wi.state == watchInfoStateRespReceived { + return + } + wi.state = watchInfoStateTimeout + wi.sendErrorLocked(fmt.Errorf("xds: %v target %s not found, watcher timeout", wi.rType, wi.target)) +} + +// Caller must hold wi.mu. +func (wi *watchInfo) sendErrorLocked(err error) { + var ( + u interface{} + ) + switch wi.rType { + case xdsresource.ListenerResource: + u = xdsresource.ListenerUpdate{} + case xdsresource.RouteConfigResource: + u = xdsresource.RouteConfigUpdate{} + case xdsresource.ClusterResource: + u = xdsresource.ClusterUpdate{} + case xdsresource.EndpointsResource: + u = xdsresource.EndpointsUpdate{} + } + wi.c.scheduleCallback(wi, u, err) +} + +func (wi *watchInfo) cancel() { + wi.mu.Lock() + defer wi.mu.Unlock() + if wi.state == watchInfoStateCanceled { + return + } + wi.expiryTimer.Stop() + wi.state = watchInfoStateCanceled +} + +func (pb *Pubsub) watch(wi *watchInfo) (first bool, cancel func() bool) { + pb.mu.Lock() + defer pb.mu.Unlock() + pb.logger.Debugf("new watch for type %v, resource name %v", wi.rType, wi.target) + var ( + watchers map[string]map[*watchInfo]bool + mds map[string]xdsresource.UpdateMetadata + ) + switch wi.rType { + case xdsresource.ListenerResource: + watchers = pb.ldsWatchers + mds = pb.ldsMD + case xdsresource.RouteConfigResource: + watchers = pb.rdsWatchers + mds = pb.rdsMD + case xdsresource.ClusterResource: + watchers = pb.cdsWatchers + mds = pb.cdsMD + case xdsresource.EndpointsResource: + watchers = pb.edsWatchers + mds = pb.edsMD + default: + pb.logger.Errorf("unknown watch type: %v", wi.rType) + return false, nil + } + + var firstWatcher bool + resourceName := wi.target + s, ok := watchers[wi.target] + if !ok { + // If this is a new watcher, will ask lower level to send a new request + // with the resource name. + // + // If this (type+name) is already being watched, will not notify the + // underlying versioned apiClient. + pb.logger.Debugf("first watch for type %v, resource name %v, will send a new xDS request", wi.rType, wi.target) + s = make(map[*watchInfo]bool) + watchers[resourceName] = s + mds[resourceName] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested} + firstWatcher = true + } + // No matter what, add the new watcher to the set, so it's callback will be + // call for new responses. + s[wi] = true + + // If the resource is in cache, call the callback with the value. + switch wi.rType { + case xdsresource.ListenerResource: + if v, ok := pb.ldsCache[resourceName]; ok { + pb.logger.Debugf("LDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) + wi.newUpdate(v) + } + case xdsresource.RouteConfigResource: + if v, ok := pb.rdsCache[resourceName]; ok { + pb.logger.Debugf("RDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) + wi.newUpdate(v) + } + case xdsresource.ClusterResource: + if v, ok := pb.cdsCache[resourceName]; ok { + pb.logger.Debugf("CDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) + wi.newUpdate(v) + } + case xdsresource.EndpointsResource: + if v, ok := pb.edsCache[resourceName]; ok { + pb.logger.Debugf("EDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) + wi.newUpdate(v) + } + } + + return firstWatcher, func() bool { + pb.logger.Debugf("watch for type %v, resource name %v canceled", wi.rType, wi.target) + wi.cancel() + pb.mu.Lock() + defer pb.mu.Unlock() + var lastWatcher bool + if s := watchers[resourceName]; s != nil { + // Remove this watcher, so it's callback will not be called in the + // future. + delete(s, wi) + if len(s) == 0 { + pb.logger.Debugf("last watch for type %v, resource name %v canceled, will send a new xDS request", wi.rType, wi.target) + // If this was the last watcher, also tell xdsv2Client to stop + // watching this resource. + delete(watchers, resourceName) + delete(mds, resourceName) + lastWatcher = true + // Remove the resource from cache. When a watch for this + // resource is added later, it will trigger a xDS request with + // resource names, and client will receive new xDS responses. + switch wi.rType { + case xdsresource.ListenerResource: + delete(pb.ldsCache, resourceName) + case xdsresource.RouteConfigResource: + delete(pb.rdsCache, resourceName) + case xdsresource.ClusterResource: + delete(pb.cdsCache, resourceName) + case xdsresource.EndpointsResource: + delete(pb.edsCache, resourceName) + } + } + } + return lastWatcher + } +} diff --git a/xds/internal/xdsclient/transport_helper.go b/xds/internal/xdsclient/transport_helper.go index 4c56daaf011b..ba456eb390ea 100644 --- a/xds/internal/xdsclient/transport_helper.go +++ b/xds/internal/xdsclient/transport_helper.go @@ -25,6 +25,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/grpc" "google.golang.org/grpc/internal/buffer" @@ -52,7 +53,7 @@ type VersionedClient interface { // SendRequest constructs and sends out a DiscoveryRequest message specific // to the underlying transport protocol version. - SendRequest(s grpc.ClientStream, resourceNames []string, rType ResourceType, version, nonce, errMsg string) error + SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error // RecvResponse uses the provided stream to receive a response specific to // the underlying transport protocol version. @@ -65,7 +66,7 @@ type VersionedClient interface { // If the provided protobuf message contains a resource type which is not // supported, implementations must return an error of type // ErrResourceTypeUnsupported. - HandleResponse(proto.Message) (ResourceType, string, string, error) + HandleResponse(proto.Message) (xdsresource.ResourceType, string, string, error) // NewLoadStatsStream returns a new LRS client stream specific to the underlying // transport protocol version. @@ -116,14 +117,14 @@ type TransportHelper struct { // messages. When the user of this client object cancels a watch call, // these are set to nil. All accesses to the map protected and any value // inside the map should be protected with the above mutex. - watchMap map[ResourceType]map[string]bool + watchMap map[xdsresource.ResourceType]map[string]bool // versionMap contains the version that was acked (the version in the ack // request that was sent on wire). The key is rType, the value is the // version string, becaues the versions for different resource types should // be independent. - versionMap map[ResourceType]string + versionMap map[xdsresource.ResourceType]string // nonceMap contains the nonce from the most recent received response. - nonceMap map[ResourceType]string + nonceMap map[xdsresource.ResourceType]string } // NewTransportHelper creates a new transport helper to be used by versioned @@ -138,9 +139,9 @@ func NewTransportHelper(vc VersionedClient, logger *grpclog.PrefixLogger, backof streamCh: make(chan grpc.ClientStream, 1), sendCh: buffer.NewUnbounded(), - watchMap: make(map[ResourceType]map[string]bool), - versionMap: make(map[ResourceType]string), - nonceMap: make(map[ResourceType]string), + watchMap: make(map[xdsresource.ResourceType]map[string]bool), + versionMap: make(map[xdsresource.ResourceType]string), + nonceMap: make(map[xdsresource.ResourceType]string), } go t.run(ctx) @@ -148,7 +149,7 @@ func NewTransportHelper(vc VersionedClient, logger *grpclog.PrefixLogger, backof } // AddWatch adds a watch for an xDS resource given its type and name. -func (t *TransportHelper) AddWatch(rType ResourceType, resourceName string) { +func (t *TransportHelper) AddWatch(rType xdsresource.ResourceType, resourceName string) { t.sendCh.Put(&watchAction{ rType: rType, remove: false, @@ -158,7 +159,7 @@ func (t *TransportHelper) AddWatch(rType ResourceType, resourceName string) { // RemoveWatch cancels an already registered watch for an xDS resource // given its type and name. -func (t *TransportHelper) RemoveWatch(rType ResourceType, resourceName string) { +func (t *TransportHelper) RemoveWatch(rType xdsresource.ResourceType, resourceName string) { t.sendCh.Put(&watchAction{ rType: rType, remove: true, @@ -250,7 +251,7 @@ func (t *TransportHelper) send(ctx context.Context) { var ( target []string - rType ResourceType + rType xdsresource.ResourceType version, nonce, errMsg string send bool ) @@ -292,8 +293,8 @@ func (t *TransportHelper) sendExisting(stream grpc.ClientStream) bool { defer t.mu.Unlock() // Reset the ack versions when the stream restarts. - t.versionMap = make(map[ResourceType]string) - t.nonceMap = make(map[ResourceType]string) + t.versionMap = make(map[xdsresource.ResourceType]string) + t.nonceMap = make(map[xdsresource.ResourceType]string) for rType, s := range t.watchMap { if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, "", "", ""); err != nil { @@ -351,7 +352,7 @@ func mapToSlice(m map[string]bool) []string { } type watchAction struct { - rType ResourceType + rType xdsresource.ResourceType remove bool // Whether this is to remove watch for the resource. resource string } @@ -359,7 +360,7 @@ type watchAction struct { // processWatchInfo pulls the fields needed by the request from a watchAction. // // It also updates the watch map. -func (t *TransportHelper) processWatchInfo(w *watchAction) (target []string, rType ResourceType, ver, nonce string) { +func (t *TransportHelper) processWatchInfo(w *watchAction) (target []string, rType xdsresource.ResourceType, ver, nonce string) { t.mu.Lock() defer t.mu.Unlock() @@ -390,7 +391,7 @@ func (t *TransportHelper) processWatchInfo(w *watchAction) (target []string, rTy } type ackAction struct { - rType ResourceType + rType xdsresource.ResourceType version string // NACK if version is an empty string. nonce string errMsg string // Empty unless it's a NACK. @@ -403,13 +404,13 @@ type ackAction struct { // processAckInfo pulls the fields needed by the ack request from a ackAction. // // If no active watch is found for this ack, it returns false for send. -func (t *TransportHelper) processAckInfo(ack *ackAction, stream grpc.ClientStream) (target []string, rType ResourceType, version, nonce string, send bool) { +func (t *TransportHelper) processAckInfo(ack *ackAction, stream grpc.ClientStream) (target []string, rType xdsresource.ResourceType, version, nonce string, send bool) { if ack.stream != stream { // If ACK's stream isn't the current sending stream, this means the ACK // was pushed to queue before the old stream broke, and a new stream has // been started since. Return immediately here so we don't update the // nonce for the new stream. - return nil, UnknownResource, "", "", false + return nil, xdsresource.UnknownResource, "", "", false } rType = ack.rType @@ -429,7 +430,7 @@ func (t *TransportHelper) processAckInfo(ack *ackAction, stream grpc.ClientStrea // canceled while the ackAction is in queue), because there's no resource // name. And if we send a request with empty resource name list, the // server may treat it as a wild card and send us everything. - return nil, UnknownResource, "", "", false + return nil, xdsresource.UnknownResource, "", "", false } send = true target = mapToSlice(s) diff --git a/xds/internal/xdsclient/v2/ack_test.go b/xds/internal/xdsclient/v2/ack_test.go index 21191341306b..51716676dd44 100644 --- a/xds/internal/xdsclient/v2/ack_test.go +++ b/xds/internal/xdsclient/v2/ack_test.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/version" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -48,22 +47,22 @@ func startXDSV2Client(t *testing.T, cc *grpc.ClientConn) (v2c *client, cbLDS, cb cbCDS = testutils.NewChannel() cbEDS = testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { + f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { t.Logf("Received %v callback with {%+v}", rType, d) switch rType { - case xdsclient.ListenerResource: + case xdsresource.ListenerResource: if _, ok := d[goodLDSTarget1]; ok { cbLDS.Send(struct{}{}) } - case xdsclient.RouteConfigResource: + case xdsresource.RouteConfigResource: if _, ok := d[goodRouteName1]; ok { cbRDS.Send(struct{}{}) } - case xdsclient.ClusterResource: + case xdsresource.ClusterResource: if _, ok := d[goodClusterName1]; ok { cbCDS.Send(struct{}{}) } - case xdsclient.EndpointsResource: + case xdsresource.EndpointsResource: if _, ok := d[goodEDSName]; ok { cbEDS.Send(struct{}{}) } @@ -118,16 +117,16 @@ func sendXDSRespWithVersion(ch chan<- *fakeserver.Response, respWithoutVersion * // startXDS calls watch to send the first request. It then sends a good response // and checks for ack. -func startXDS(ctx context.Context, t *testing.T, rType xdsclient.ResourceType, v2c *client, reqChan *testutils.Channel, req *xdspb.DiscoveryRequest, preVersion string, preNonce string) { +func startXDS(ctx context.Context, t *testing.T, rType xdsresource.ResourceType, v2c *client, reqChan *testutils.Channel, req *xdspb.DiscoveryRequest, preVersion string, preNonce string) { nameToWatch := "" switch rType { - case xdsclient.ListenerResource: + case xdsresource.ListenerResource: nameToWatch = goodLDSTarget1 - case xdsclient.RouteConfigResource: + case xdsresource.RouteConfigResource: nameToWatch = goodRouteName1 - case xdsclient.ClusterResource: + case xdsresource.ClusterResource: nameToWatch = goodClusterName1 - case xdsclient.EndpointsResource: + case xdsresource.EndpointsResource: nameToWatch = goodEDSName } v2c.AddWatch(rType, nameToWatch) @@ -143,7 +142,7 @@ func startXDS(ctx context.Context, t *testing.T, rType xdsclient.ResourceType, v // // It also waits and checks that the ack request contains the given version, and // the generated nonce. -func sendGoodResp(ctx context.Context, t *testing.T, rType xdsclient.ResourceType, fakeServer *fakeserver.Server, ver int, goodResp *xdspb.DiscoveryResponse, wantReq *xdspb.DiscoveryRequest, callbackCh *testutils.Channel) (string, error) { +func sendGoodResp(ctx context.Context, t *testing.T, rType xdsresource.ResourceType, fakeServer *fakeserver.Server, ver int, goodResp *xdspb.DiscoveryResponse, wantReq *xdspb.DiscoveryRequest, callbackCh *testutils.Channel) (string, error) { nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodResp, ver) t.Logf("Good %v response pushed to fakeServer...", rType) @@ -163,16 +162,16 @@ func sendGoodResp(ctx context.Context, t *testing.T, rType xdsclient.ResourceTyp // be nacked, so we expect a request with the previous version (version-1). // // But the nonce in request should be the new nonce. -func sendBadResp(ctx context.Context, t *testing.T, rType xdsclient.ResourceType, fakeServer *fakeserver.Server, ver int, wantReq *xdspb.DiscoveryRequest) error { +func sendBadResp(ctx context.Context, t *testing.T, rType xdsresource.ResourceType, fakeServer *fakeserver.Server, ver int, wantReq *xdspb.DiscoveryRequest) error { var typeURL string switch rType { - case xdsclient.ListenerResource: + case xdsresource.ListenerResource: typeURL = version.V2ListenerURL - case xdsclient.RouteConfigResource: + case xdsresource.RouteConfigResource: typeURL = version.V2RouteConfigURL - case xdsclient.ClusterResource: + case xdsresource.ClusterResource: typeURL = version.V2ClusterURL - case xdsclient.EndpointsResource: + case xdsresource.EndpointsResource: typeURL = version.V2EndpointsURL } nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ @@ -209,59 +208,59 @@ func (s) TestV2ClientAck(t *testing.T) { defer cancel() // Start the watch, send a good response, and check for ack. - startXDS(ctx, t, xdsclient.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { + startXDS(ctx, t, xdsresource.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") + if _, err := sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { t.Fatal(err) } versionLDS++ - startXDS(ctx, t, xdsclient.RouteConfigResource, v2c, fakeServer.XDSRequestChan, goodRDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsclient.RouteConfigResource, fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS); err != nil { + startXDS(ctx, t, xdsresource.RouteConfigResource, v2c, fakeServer.XDSRequestChan, goodRDSRequest, "", "") + if _, err := sendGoodResp(ctx, t, xdsresource.RouteConfigResource, fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS); err != nil { t.Fatal(err) } versionRDS++ - startXDS(ctx, t, xdsclient.ClusterResource, v2c, fakeServer.XDSRequestChan, goodCDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { + startXDS(ctx, t, xdsresource.ClusterResource, v2c, fakeServer.XDSRequestChan, goodCDSRequest, "", "") + if _, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { t.Fatal(err) } versionCDS++ - startXDS(ctx, t, xdsclient.EndpointsResource, v2c, fakeServer.XDSRequestChan, goodEDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsclient.EndpointsResource, fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS); err != nil { + startXDS(ctx, t, xdsresource.EndpointsResource, v2c, fakeServer.XDSRequestChan, goodEDSRequest, "", "") + if _, err := sendGoodResp(ctx, t, xdsresource.EndpointsResource, fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS); err != nil { t.Fatal(err) } versionEDS++ // Send a bad response, and check for nack. - if err := sendBadResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSRequest); err != nil { + if err := sendBadResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSRequest); err != nil { t.Fatal(err) } versionLDS++ - if err := sendBadResp(ctx, t, xdsclient.RouteConfigResource, fakeServer, versionRDS, goodRDSRequest); err != nil { + if err := sendBadResp(ctx, t, xdsresource.RouteConfigResource, fakeServer, versionRDS, goodRDSRequest); err != nil { t.Fatal(err) } versionRDS++ - if err := sendBadResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { + if err := sendBadResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { t.Fatal(err) } versionCDS++ - if err := sendBadResp(ctx, t, xdsclient.EndpointsResource, fakeServer, versionEDS, goodEDSRequest); err != nil { + if err := sendBadResp(ctx, t, xdsresource.EndpointsResource, fakeServer, versionEDS, goodEDSRequest); err != nil { t.Fatal(err) } versionEDS++ // send another good response, and check for ack, with the new version. - if _, err := sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { + if _, err := sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { t.Fatal(err) } versionLDS++ - if _, err := sendGoodResp(ctx, t, xdsclient.RouteConfigResource, fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS); err != nil { + if _, err := sendGoodResp(ctx, t, xdsresource.RouteConfigResource, fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS); err != nil { t.Fatal(err) } versionRDS++ - if _, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { + if _, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { t.Fatal(err) } versionCDS++ - if _, err := sendGoodResp(ctx, t, xdsclient.EndpointsResource, fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS); err != nil { + if _, err := sendGoodResp(ctx, t, xdsresource.EndpointsResource, fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS); err != nil { t.Fatal(err) } versionEDS++ @@ -282,7 +281,7 @@ func (s) TestV2ClientAckFirstIsNack(t *testing.T) { defer cancel() // Start the watch, send a good response, and check for ack. - startXDS(ctx, t, xdsclient.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") + startXDS(ctx, t, xdsresource.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{{}}, @@ -298,7 +297,7 @@ func (s) TestV2ClientAckFirstIsNack(t *testing.T) { t.Logf("Bad response nacked") versionLDS++ - sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) + sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) versionLDS++ } @@ -317,14 +316,14 @@ func (s) TestV2ClientAckNackAfterNewWatch(t *testing.T) { defer cancel() // Start the watch, send a good response, and check for ack. - startXDS(ctx, t, xdsclient.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") - nonce, err := sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) + startXDS(ctx, t, xdsresource.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") + nonce, err := sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) if err != nil { t.Fatal(err) } // Start a new watch. The version in the new request should be the version // from the previous response, thus versionLDS before ++. - startXDS(ctx, t, xdsclient.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, strconv.Itoa(versionLDS), nonce) + startXDS(ctx, t, xdsresource.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, strconv.Itoa(versionLDS), nonce) versionLDS++ // This is an invalid response after the new watch. @@ -341,7 +340,7 @@ func (s) TestV2ClientAckNackAfterNewWatch(t *testing.T) { t.Logf("Bad response nacked") versionLDS++ - if _, err := sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { + if _, err := sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { t.Fatal(err) } versionLDS++ @@ -362,42 +361,42 @@ func (s) TestV2ClientAckNewWatchAfterCancel(t *testing.T) { defer cancel() // Start a CDS watch. - v2c.AddWatch(xdsclient.ClusterResource, goodClusterName1) + v2c.AddWatch(xdsresource.ClusterResource, goodClusterName1) if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, "", "", false); err != nil { t.Fatal(err) } - t.Logf("FakeServer received %v request...", xdsclient.ClusterResource) + t.Logf("FakeServer received %v request...", xdsresource.ClusterResource) // Send a good CDS response, this function waits for the ACK with the right // version. - nonce, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) + nonce, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) if err != nil { t.Fatal(err) } // Cancel the CDS watch, and start a new one. The new watch should have the // version from the response above. - v2c.RemoveWatch(xdsclient.ClusterResource, goodClusterName1) + v2c.RemoveWatch(xdsresource.ClusterResource, goodClusterName1) // Wait for a request with no resource names, because the only watch was // removed. emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: version.V2ClusterURL} if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, emptyReq, strconv.Itoa(versionCDS), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) + t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) } - v2c.AddWatch(xdsclient.ClusterResource, goodClusterName1) + v2c.AddWatch(xdsresource.ClusterResource, goodClusterName1) // Wait for a request with correct resource names and version. if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) + t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) } versionCDS++ // Send a bad response with the next version. - if err := sendBadResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { + if err := sendBadResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { t.Fatal(err) } versionCDS++ // send another good response, and check for ack, with the new version. - if _, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { + if _, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { t.Fatal(err) } versionCDS++ @@ -420,25 +419,25 @@ func (s) TestV2ClientAckCancelResponseRace(t *testing.T) { defer cancel() // Start a CDS watch. - v2c.AddWatch(xdsclient.ClusterResource, goodClusterName1) + v2c.AddWatch(xdsresource.ClusterResource, goodClusterName1) if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, "", "", false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) + t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) } - t.Logf("FakeServer received %v request...", xdsclient.ClusterResource) + t.Logf("FakeServer received %v request...", xdsresource.ClusterResource) // send a good response, and check for ack, with the new version. - nonce, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) + nonce, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) if err != nil { t.Fatal(err) } // Cancel the watch before the next response is sent. This mimics the case // watch is canceled while response is on wire. - v2c.RemoveWatch(xdsclient.ClusterResource, goodClusterName1) + v2c.RemoveWatch(xdsresource.ClusterResource, goodClusterName1) // Wait for a request with no resource names, because the only watch was // removed. emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: version.V2ClusterURL} if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, emptyReq, strconv.Itoa(versionCDS), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) + t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) } versionCDS++ @@ -450,7 +449,7 @@ func (s) TestV2ClientAckCancelResponseRace(t *testing.T) { // Send a good response. nonce = sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodCDSResponse1, versionCDS) - t.Logf("Good %v response pushed to fakeServer...", xdsclient.ClusterResource) + t.Logf("Good %v response pushed to fakeServer...", xdsresource.ClusterResource) // Expect no ACK because watch was canceled. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) @@ -461,24 +460,24 @@ func (s) TestV2ClientAckCancelResponseRace(t *testing.T) { // Still expected an callback update, because response was good. if _, err := cbCDS.Receive(ctx); err != nil { - t.Fatalf("Timeout when expecting %v update", xdsclient.ClusterResource) + t.Fatalf("Timeout when expecting %v update", xdsresource.ClusterResource) } // Start a new watch. The new watch should have the nonce from the response // above, and version from the first good response. - v2c.AddWatch(xdsclient.ClusterResource, goodClusterName1) + v2c.AddWatch(xdsresource.ClusterResource, goodClusterName1) if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS-1), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) + t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) } // Send a bad response with the next version. - if err := sendBadResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { + if err := sendBadResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { t.Fatal(err) } versionCDS++ // send another good response, and check for ack, with the new version. - if _, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { + if _, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { t.Fatal(err) } versionCDS++ diff --git a/xds/internal/xdsclient/v2/cds_test.go b/xds/internal/xdsclient/v2/cds_test.go index 1c368b5a5c3a..e44ee0cc48b9 100644 --- a/xds/internal/xdsclient/v2/cds_test.go +++ b/xds/internal/xdsclient/v2/cds_test.go @@ -28,7 +28,6 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/version" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -175,7 +174,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { testWatchHandle(t, &watchHandleTestcase{ - rType: xdsclient.ClusterResource, + rType: xdsresource.ClusterResource, resourceName: goodClusterName1, responseToHandle: test.cdsResponse, @@ -195,7 +194,7 @@ func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, + f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v2/client.go b/xds/internal/xdsclient/v2/client.go index 60e87761e852..12d0509dc524 100644 --- a/xds/internal/xdsclient/v2/client.go +++ b/xds/internal/xdsclient/v2/client.go @@ -43,11 +43,11 @@ func init() { } var ( - resourceTypeToURL = map[xdsclient.ResourceType]string{ - xdsclient.ListenerResource: version.V2ListenerURL, - xdsclient.RouteConfigResource: version.V2RouteConfigURL, - xdsclient.ClusterResource: version.V2ClusterURL, - xdsclient.EndpointsResource: version.V2EndpointsURL, + resourceTypeToURL = map[xdsresource.ResourceType]string{ + xdsresource.ListenerResource: version.V2ListenerURL, + xdsresource.RouteConfigResource: version.V2RouteConfigURL, + xdsresource.ClusterResource: version.V2ClusterURL, + xdsresource.EndpointsResource: version.V2EndpointsURL, } ) @@ -109,7 +109,7 @@ func (v2c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { // - If this is an ack, version will be the version from the response. // - If this is a nack, version will be the previous acked version (from // versionMap). If there was no ack before, it will be empty. -func (v2c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsclient.ResourceType, version, nonce, errMsg string) error { +func (v2c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error { stream, ok := s.(adsStream) if !ok { return fmt.Errorf("xds: Attempt to send request on unsupported stream type: %T", s) @@ -151,8 +151,8 @@ func (v2c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { return resp, nil } -func (v2c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, string, string, error) { - rType := xdsclient.UnknownResource +func (v2c *client) HandleResponse(r proto.Message) (xdsresource.ResourceType, string, string, error) { + rType := xdsresource.UnknownResource resp, ok := r.(*v2xdspb.DiscoveryResponse) if !ok { return rType, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) @@ -167,16 +167,16 @@ func (v2c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri switch { case xdsresource.IsListenerResource(url): err = v2c.handleLDSResponse(resp) - rType = xdsclient.ListenerResource + rType = xdsresource.ListenerResource case xdsresource.IsRouteConfigResource(url): err = v2c.handleRDSResponse(resp) - rType = xdsclient.RouteConfigResource + rType = xdsresource.RouteConfigResource case xdsresource.IsClusterResource(url): err = v2c.handleCDSResponse(resp) - rType = xdsclient.ClusterResource + rType = xdsresource.ClusterResource case xdsresource.IsEndpointsResource(url): err = v2c.handleEDSResponse(resp) - rType = xdsclient.EndpointsResource + rType = xdsresource.EndpointsResource default: return rType, "", "", xdsclient.ErrResourceTypeUnsupported{ ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()), diff --git a/xds/internal/xdsclient/v2/client_test.go b/xds/internal/xdsclient/v2/client_test.go index f74e87fb370d..28edae57b0f9 100644 --- a/xds/internal/xdsclient/v2/client_test.go +++ b/xds/internal/xdsclient/v2/client_test.go @@ -288,7 +288,7 @@ var ( ) type watchHandleTestcase struct { - rType xdsclient.ResourceType + rType xdsresource.ResourceType resourceName string responseToHandle *xdspb.DiscoveryResponse @@ -299,7 +299,7 @@ type watchHandleTestcase struct { } type testUpdateReceiver struct { - f func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) + f func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) } func (t *testUpdateReceiver) NewListeners(d map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { @@ -307,7 +307,7 @@ func (t *testUpdateReceiver) NewListeners(d map[string]xdsresource.ListenerUpdat for k, v := range d { dd[k] = v } - t.newUpdate(xdsclient.ListenerResource, dd, metadata) + t.newUpdate(xdsresource.ListenerResource, dd, metadata) } func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { @@ -315,7 +315,7 @@ func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsresource.RouteConfi for k, v := range d { dd[k] = v } - t.newUpdate(xdsclient.RouteConfigResource, dd, metadata) + t.newUpdate(xdsresource.RouteConfigResource, dd, metadata) } func (t *testUpdateReceiver) NewClusters(d map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { @@ -323,7 +323,7 @@ func (t *testUpdateReceiver) NewClusters(d map[string]xdsresource.ClusterUpdateE for k, v := range d { dd[k] = v } - t.newUpdate(xdsclient.ClusterResource, dd, metadata) + t.newUpdate(xdsresource.ClusterResource, dd, metadata) } func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { @@ -331,12 +331,12 @@ func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsresource.EndpointsUpda for k, v := range d { dd[k] = v } - t.newUpdate(xdsclient.EndpointsResource, dd, metadata) + t.newUpdate(xdsresource.EndpointsResource, dd, metadata) } func (t *testUpdateReceiver) NewConnectionError(error) {} -func (t *testUpdateReceiver) newUpdate(rType xdsclient.ResourceType, d map[string]interface{}, metadata xdsresource.UpdateMetadata) { +func (t *testUpdateReceiver) newUpdate(rType xdsresource.ResourceType, d map[string]interface{}, metadata xdsresource.UpdateMetadata) { t.f(rType, d, metadata) } @@ -360,28 +360,28 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { gotUpdateCh := testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { + f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { if rType == test.rType { switch test.rType { - case xdsclient.ListenerResource: + case xdsresource.ListenerResource: dd := make(map[string]xdsresource.ListenerUpdateErrTuple) for n, u := range d { dd[n] = u.(xdsresource.ListenerUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) - case xdsclient.RouteConfigResource: + case xdsresource.RouteConfigResource: dd := make(map[string]xdsresource.RouteConfigUpdateErrTuple) for n, u := range d { dd[n] = u.(xdsresource.RouteConfigUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) - case xdsclient.ClusterResource: + case xdsresource.ClusterResource: dd := make(map[string]xdsresource.ClusterUpdateErrTuple) for n, u := range d { dd[n] = u.(xdsresource.ClusterUpdateErrTuple) } gotUpdateCh.Send(updateErr{dd, md, nil}) - case xdsclient.EndpointsResource: + case xdsresource.EndpointsResource: dd := make(map[string]xdsresource.EndpointsUpdateErrTuple) for n, u := range d { dd[n] = u.(xdsresource.EndpointsUpdateErrTuple) @@ -416,13 +416,13 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { // request channel afterwards. var handleXDSResp func(response *xdspb.DiscoveryResponse) error switch test.rType { - case xdsclient.ListenerResource: + case xdsresource.ListenerResource: handleXDSResp = v2c.handleLDSResponse - case xdsclient.RouteConfigResource: + case xdsresource.RouteConfigResource: handleXDSResp = v2c.handleRDSResponse - case xdsclient.ClusterResource: + case xdsresource.ClusterResource: handleXDSResp = v2c.handleCDSResponse - case xdsclient.EndpointsResource: + case xdsresource.EndpointsResource: handleXDSResp = v2c.handleEDSResponse } if err := handleXDSResp(test.responseToHandle); (err != nil) != test.wantHandleErr { @@ -504,7 +504,7 @@ func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { callbackCh := make(chan struct{}) v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) { close(callbackCh) }, + f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) { close(callbackCh) }, }, cc, goodNodeProto, clientBackoff, nil) if err != nil { t.Fatal(err) @@ -512,7 +512,7 @@ func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { defer v2c.Close() t.Log("Started xds v2Client...") - v2c.AddWatch(xdsclient.ListenerResource, goodLDSTarget1) + v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { @@ -549,8 +549,8 @@ func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) { callbackCh := testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { - if rType == xdsclient.ListenerResource { + f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { + if rType == xdsresource.ListenerResource { if u, ok := d[goodLDSTarget1]; ok { t.Logf("Received LDS callback with ldsUpdate {%+v}", u) callbackCh.Send(struct{}{}) @@ -564,7 +564,7 @@ func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) { defer v2c.Close() t.Log("Started xds v2Client...") - v2c.AddWatch(xdsclient.ListenerResource, goodLDSTarget1) + v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { @@ -621,8 +621,8 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) { callbackCh := testutils.NewChannel() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { - if rType == xdsclient.ListenerResource { + f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { + if rType == xdsresource.ListenerResource { if u, ok := d[goodLDSTarget1]; ok { t.Logf("Received LDS callback with ldsUpdate {%+v}", u) callbackCh.Send(u) @@ -638,7 +638,7 @@ func (s) TestV2ClientWatchWithoutStream(t *testing.T) { // This watch is started when the xds-ClientConn is in Transient Failure, // and no xds stream is created. - v2c.AddWatch(xdsclient.ListenerResource, goodLDSTarget1) + v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) // The watcher should receive an update, with a timeout error in it. sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) diff --git a/xds/internal/xdsclient/v2/eds_test.go b/xds/internal/xdsclient/v2/eds_test.go index d0f355e337b7..9503c0397b91 100644 --- a/xds/internal/xdsclient/v2/eds_test.go +++ b/xds/internal/xdsclient/v2/eds_test.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/xds/internal" xtestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/version" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -165,7 +164,7 @@ func (s) TestEDSHandleResponse(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { testWatchHandle(t, &watchHandleTestcase{ - rType: xdsclient.EndpointsResource, + rType: xdsresource.EndpointsResource, resourceName: goodEDSName, responseToHandle: test.edsResponse, wantHandleErr: test.wantErr, @@ -184,7 +183,7 @@ func (s) TestEDSHandleResponseWithoutWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, + f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v2/lds_test.go b/xds/internal/xdsclient/v2/lds_test.go index fdb2abeb5132..008b29f4db9d 100644 --- a/xds/internal/xdsclient/v2/lds_test.go +++ b/xds/internal/xdsclient/v2/lds_test.go @@ -24,7 +24,6 @@ import ( v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -163,7 +162,7 @@ func (s) TestLDSHandleResponse(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { testWatchHandle(t, &watchHandleTestcase{ - rType: xdsclient.ListenerResource, + rType: xdsresource.ListenerResource, resourceName: goodLDSTarget1, responseToHandle: test.ldsResponse, wantHandleErr: test.wantErr, @@ -182,7 +181,7 @@ func (s) TestLDSHandleResponseWithoutWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, + f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v2/rds_test.go b/xds/internal/xdsclient/v2/rds_test.go index 79e51ab231ea..9ac3b3041ed8 100644 --- a/xds/internal/xdsclient/v2/rds_test.go +++ b/xds/internal/xdsclient/v2/rds_test.go @@ -36,7 +36,7 @@ import ( // pre-requirement for RDS, and RDS handle would fail without an existing LDS // watch. func doLDS(ctx context.Context, t *testing.T, v2c xdsclient.APIClient, fakeServer *fakeserver.Server) { - v2c.AddWatch(xdsclient.ListenerResource, goodLDSTarget1) + v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { t.Fatalf("Timeout waiting for LDS request: %v", err) } @@ -164,7 +164,7 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { testWatchHandle(t, &watchHandleTestcase{ - rType: xdsclient.RouteConfigResource, + rType: xdsresource.RouteConfigResource, resourceName: goodRouteName1, responseToHandle: test.rdsResponse, wantHandleErr: test.wantErr, @@ -183,7 +183,7 @@ func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) { defer cleanup() v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, + f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/v3/client.go b/xds/internal/xdsclient/v3/client.go index 21d8809dd33b..622ae2a434f9 100644 --- a/xds/internal/xdsclient/v3/client.go +++ b/xds/internal/xdsclient/v3/client.go @@ -43,11 +43,11 @@ func init() { } var ( - resourceTypeToURL = map[xdsclient.ResourceType]string{ - xdsclient.ListenerResource: version.V3ListenerURL, - xdsclient.RouteConfigResource: version.V3RouteConfigURL, - xdsclient.ClusterResource: version.V3ClusterURL, - xdsclient.EndpointsResource: version.V3EndpointsURL, + resourceTypeToURL = map[xdsresource.ResourceType]string{ + xdsresource.ListenerResource: version.V3ListenerURL, + xdsresource.RouteConfigResource: version.V3RouteConfigURL, + xdsresource.ClusterResource: version.V3ClusterURL, + xdsresource.EndpointsResource: version.V3EndpointsURL, } ) @@ -109,7 +109,7 @@ func (v3c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { // - If this is an ack, version will be the version from the response. // - If this is a nack, version will be the previous acked version (from // versionMap). If there was no ack before, it will be empty. -func (v3c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsclient.ResourceType, version, nonce, errMsg string) error { +func (v3c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error { stream, ok := s.(adsStream) if !ok { return fmt.Errorf("xds: Attempt to send request on unsupported stream type: %T", s) @@ -151,8 +151,8 @@ func (v3c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { return resp, nil } -func (v3c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, string, string, error) { - rType := xdsclient.UnknownResource +func (v3c *client) HandleResponse(r proto.Message) (xdsresource.ResourceType, string, string, error) { + rType := xdsresource.UnknownResource resp, ok := r.(*v3discoverypb.DiscoveryResponse) if !ok { return rType, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) @@ -167,16 +167,16 @@ func (v3c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, stri switch { case xdsresource.IsListenerResource(url): err = v3c.handleLDSResponse(resp) - rType = xdsclient.ListenerResource + rType = xdsresource.ListenerResource case xdsresource.IsRouteConfigResource(url): err = v3c.handleRDSResponse(resp) - rType = xdsclient.RouteConfigResource + rType = xdsresource.RouteConfigResource case xdsresource.IsClusterResource(url): err = v3c.handleCDSResponse(resp) - rType = xdsclient.ClusterResource + rType = xdsresource.ClusterResource case xdsresource.IsEndpointsResource(url): err = v3c.handleEDSResponse(resp) - rType = xdsclient.EndpointsResource + rType = xdsresource.EndpointsResource default: return rType, "", "", xdsclient.ErrResourceTypeUnsupported{ ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()), diff --git a/xds/internal/xdsclient/watchers.go b/xds/internal/xdsclient/watchers.go index 639a918627b8..bf9a07c061af 100644 --- a/xds/internal/xdsclient/watchers.go +++ b/xds/internal/xdsclient/watchers.go @@ -13,239 +13,29 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ package xdsclient import ( - "fmt" - "sync" - "time" - - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -type watchInfoState int - -const ( - watchInfoStateStarted watchInfoState = iota - watchInfoStateRespReceived - watchInfoStateTimeout - watchInfoStateCanceled -) - -// watchInfo holds all the information from a watch() call. -type watchInfo struct { - c *clientImpl - rType ResourceType - target string - - ldsCallback func(xdsresource.ListenerUpdate, error) - rdsCallback func(xdsresource.RouteConfigUpdate, error) - cdsCallback func(xdsresource.ClusterUpdate, error) - edsCallback func(xdsresource.EndpointsUpdate, error) - - expiryTimer *time.Timer - - // mu protects state, and c.scheduleCallback(). - // - No callback should be scheduled after watchInfo is canceled. - // - No timeout error should be scheduled after watchInfo is resp received. - mu sync.Mutex - state watchInfoState -} - -func (wi *watchInfo) newUpdate(update interface{}) { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.state = watchInfoStateRespReceived - wi.expiryTimer.Stop() - wi.c.scheduleCallback(wi, update, nil) -} - -func (wi *watchInfo) newError(err error) { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.state = watchInfoStateRespReceived - wi.expiryTimer.Stop() - wi.sendErrorLocked(err) -} - -func (wi *watchInfo) resourceNotFound() { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.state = watchInfoStateRespReceived - wi.expiryTimer.Stop() - wi.sendErrorLocked(NewErrorf(ErrorTypeResourceNotFound, "xds: %v target %s not found in received response", wi.rType, wi.target)) -} - -func (wi *watchInfo) timeout() { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled || wi.state == watchInfoStateRespReceived { - return - } - wi.state = watchInfoStateTimeout - wi.sendErrorLocked(fmt.Errorf("xds: %v target %s not found, watcher timeout", wi.rType, wi.target)) -} - -// Caller must hold wi.mu. -func (wi *watchInfo) sendErrorLocked(err error) { - var ( - u interface{} - ) - switch wi.rType { - case ListenerResource: - u = xdsresource.ListenerUpdate{} - case RouteConfigResource: - u = xdsresource.RouteConfigUpdate{} - case ClusterResource: - u = xdsresource.ClusterUpdate{} - case EndpointsResource: - u = xdsresource.EndpointsUpdate{} - } - wi.c.scheduleCallback(wi, u, err) -} - -func (wi *watchInfo) cancel() { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.expiryTimer.Stop() - wi.state = watchInfoStateCanceled -} - -func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { - c.mu.Lock() - defer c.mu.Unlock() - c.logger.Debugf("new watch for type %v, resource name %v", wi.rType, wi.target) - var ( - watchers map[string]map[*watchInfo]bool - mds map[string]xdsresource.UpdateMetadata - ) - switch wi.rType { - case ListenerResource: - watchers = c.ldsWatchers - mds = c.ldsMD - case RouteConfigResource: - watchers = c.rdsWatchers - mds = c.rdsMD - case ClusterResource: - watchers = c.cdsWatchers - mds = c.cdsMD - case EndpointsResource: - watchers = c.edsWatchers - mds = c.edsMD - default: - c.logger.Errorf("unknown watch type: %v", wi.rType) - return nil - } - - resourceName := wi.target - s, ok := watchers[wi.target] - if !ok { - // If this is a new watcher, will ask lower level to send a new request - // with the resource name. - // - // If this (type+name) is already being watched, will not notify the - // underlying versioned apiClient. - c.logger.Debugf("first watch for type %v, resource name %v, will send a new xDS request", wi.rType, wi.target) - s = make(map[*watchInfo]bool) - watchers[resourceName] = s - mds[resourceName] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested} - c.apiClient.AddWatch(wi.rType, resourceName) - } - // No matter what, add the new watcher to the set, so it's callback will be - // call for new responses. - s[wi] = true - - // If the resource is in cache, call the callback with the value. - switch wi.rType { - case ListenerResource: - if v, ok := c.ldsCache[resourceName]; ok { - c.logger.Debugf("LDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) - wi.newUpdate(v) - } - case RouteConfigResource: - if v, ok := c.rdsCache[resourceName]; ok { - c.logger.Debugf("RDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) - wi.newUpdate(v) - } - case ClusterResource: - if v, ok := c.cdsCache[resourceName]; ok { - c.logger.Debugf("CDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) - wi.newUpdate(v) - } - case EndpointsResource: - if v, ok := c.edsCache[resourceName]; ok { - c.logger.Debugf("EDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) - wi.newUpdate(v) - } - } - - return func() { - c.logger.Debugf("watch for type %v, resource name %v canceled", wi.rType, wi.target) - wi.cancel() - c.mu.Lock() - defer c.mu.Unlock() - if s := watchers[resourceName]; s != nil { - // Remove this watcher, so it's callback will not be called in the - // future. - delete(s, wi) - if len(s) == 0 { - c.logger.Debugf("last watch for type %v, resource name %v canceled, will send a new xDS request", wi.rType, wi.target) - // If this was the last watcher, also tell xdsv2Client to stop - // watching this resource. - delete(watchers, resourceName) - delete(mds, resourceName) - c.apiClient.RemoveWatch(wi.rType, resourceName) - // Remove the resource from cache. When a watch for this - // resource is added later, it will trigger a xDS request with - // resource names, and client will receive new xDS responses. - switch wi.rType { - case ListenerResource: - delete(c.ldsCache, resourceName) - case RouteConfigResource: - delete(c.rdsCache, resourceName) - case ClusterResource: - delete(c.cdsCache, resourceName) - case EndpointsResource: - delete(c.edsCache, resourceName) - } - } - } - } -} - // WatchListener uses LDS to discover information about the provided listener. // // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { - wi := &watchInfo{ - c: c, - rType: ListenerResource, - target: serviceName, - ldsCallback: cb, + first, cancelF := c.pubsub.WatchListener(serviceName, cb) + if first { + c.apiClient.AddWatch(xdsresource.ListenerResource, serviceName) + } + return func() { + if cancelF() { + c.apiClient.RemoveWatch(xdsresource.ListenerResource, serviceName) + } } - - wi.expiryTimer = time.AfterFunc(c.watchExpiryTimeout, func() { - wi.timeout() - }) - return c.watch(wi) } // WatchRouteConfig starts a listener watcher for the service.. @@ -254,17 +44,15 @@ func (c *clientImpl) WatchListener(serviceName string, cb func(xdsresource.Liste // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { - wi := &watchInfo{ - c: c, - rType: RouteConfigResource, - target: routeName, - rdsCallback: cb, + first, cancelF := c.pubsub.WatchRouteConfig(routeName, cb) + if first { + c.apiClient.AddWatch(xdsresource.RouteConfigResource, routeName) + } + return func() { + if cancelF() { + c.apiClient.RemoveWatch(xdsresource.RouteConfigResource, routeName) + } } - - wi.expiryTimer = time.AfterFunc(c.watchExpiryTimeout, func() { - wi.timeout() - }) - return c.watch(wi) } // WatchCluster uses CDS to discover information about the provided @@ -277,17 +65,15 @@ func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.Rout // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { - wi := &watchInfo{ - c: c, - rType: ClusterResource, - target: clusterName, - cdsCallback: cb, + first, cancelF := c.pubsub.WatchCluster(clusterName, cb) + if first { + c.apiClient.AddWatch(xdsresource.ClusterResource, clusterName) + } + return func() { + if cancelF() { + c.apiClient.RemoveWatch(xdsresource.ClusterResource, clusterName) + } } - - wi.expiryTimer = time.AfterFunc(c.watchExpiryTimeout, func() { - wi.timeout() - }) - return c.watch(wi) } // WatchEndpoints uses EDS to discover endpoints in the provided clusterName. @@ -299,15 +85,13 @@ func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.Cluste // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { - wi := &watchInfo{ - c: c, - rType: EndpointsResource, - target: clusterName, - edsCallback: cb, + first, cancelF := c.pubsub.WatchEndpoints(clusterName, cb) + if first { + c.apiClient.AddWatch(xdsresource.EndpointsResource, clusterName) + } + return func() { + if cancelF() { + c.apiClient.RemoveWatch(xdsresource.EndpointsResource, clusterName) + } } - - wi.expiryTimer = time.AfterFunc(c.watchExpiryTimeout, func() { - wi.timeout() - }) - return c.watch(wi) } diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index 7ddaf08637e4..729990ad95bb 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -56,7 +56,7 @@ func (s) TestClusterWatch(t *testing.T) { cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -122,7 +122,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { if i == 0 { // A new watch is registered on the underlying API client only for // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } } @@ -194,7 +194,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { if i == 0 { // A new watch is registered on the underlying API client only for // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } } @@ -205,7 +205,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) { clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -250,7 +250,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -269,7 +269,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() - if n, err := apiClient.addWatches[ClusterResource].Receive(sCtx); err != context.DeadlineExceeded { + if n, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(sCtx); err != context.DeadlineExceeded { t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) } @@ -311,7 +311,7 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -350,7 +350,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -398,7 +398,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) { client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) { clusterUpdateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -407,7 +407,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) { client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) { clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -428,7 +428,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) { client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should get an error. - if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(xdsresource.ClusterUpdateErrTuple).Err) != ErrorTypeResourceNotFound { + if u, err := clusterUpdateCh1.Receive(ctx); err != nil || xdsresource.ErrType(u.(xdsresource.ClusterUpdateErrTuple).Err) != xdsresource.ErrorTypeResourceNotFound { t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } @@ -482,7 +482,7 @@ func (s) TestClusterWatchNACKError(t *testing.T) { clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -527,11 +527,11 @@ func (s) TestClusterWatchPartialValid(t *testing.T) { }) defer func() { cancelWatch() - if _, err := apiClient.removeWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.removeWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want watch to be canceled, got err: %v", err) } }() - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } updateChs[name] = clusterUpdateCh diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go index 3db3c3efa755..eddc17ed1bd6 100644 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -74,7 +74,7 @@ func (s) TestEndpointsWatch(t *testing.T) { cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -142,7 +142,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { if i == 0 { // A new watch is registered on the underlying API client only for // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } } @@ -214,7 +214,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { if i == 0 { // A new watch is registered on the underlying API client only for // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } } @@ -225,7 +225,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { client.WatchEndpoints(testCDSName+"2", func(update xdsresource.EndpointsUpdate, err error) { endpointsUpdateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -270,7 +270,7 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -287,7 +287,7 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() - if n, err := apiClient.addWatches[EndpointsResource].Receive(sCtx); err != context.DeadlineExceeded { + if n, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(sCtx); err != context.DeadlineExceeded { t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) } @@ -329,7 +329,7 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -368,7 +368,7 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -411,11 +411,11 @@ func (s) TestEndpointsWatchPartialValid(t *testing.T) { }) defer func() { cancelWatch() - if _, err := apiClient.removeWatches[EndpointsResource].Receive(ctx); err != nil { + if _, err := apiClient.removeWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want watch to be canceled, got err: %v", err) } }() - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } updateChs[name] = endpointsUpdateCh diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go index cd375639f8ee..3ced8b81f7a0 100644 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -56,7 +56,7 @@ func (s) TestLDSWatch(t *testing.T) { cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -124,7 +124,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { if i == 0 { // A new watch is registered on the underlying API client only for // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } } @@ -197,7 +197,7 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { if i == 0 { // A new watch is registered on the underlying API client only for // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } } @@ -208,7 +208,7 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) { ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -253,7 +253,7 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -270,7 +270,7 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() - if n, err := apiClient.addWatches[ListenerResource].Receive(sCtx); err != context.DeadlineExceeded { + if n, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(sCtx); err != context.DeadlineExceeded { t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) } @@ -315,7 +315,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) { client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) { ldsUpdateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } // Another watch for a different name. @@ -323,7 +323,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) { client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) { ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -344,7 +344,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) { client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) // Watcher 1 should get an error. - if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(xdsresource.ListenerUpdateErrTuple).Err) != ErrorTypeResourceNotFound { + if u, err := ldsUpdateCh1.Receive(ctx); err != nil || xdsresource.ErrType(u.(xdsresource.ListenerUpdateErrTuple).Err) != xdsresource.ErrorTypeResourceNotFound { t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) } @@ -398,7 +398,7 @@ func (s) TestListenerWatchNACKError(t *testing.T) { ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -441,11 +441,11 @@ func (s) TestListenerWatchPartialValid(t *testing.T) { }) defer func() { cancelWatch() - if _, err := apiClient.removeWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.removeWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want watch to be canceled, got err: %v", err) } }() - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } updateChs[name] = ldsUpdateCh @@ -493,7 +493,7 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go index 9e4e7e43611c..d01e03911eae 100644 --- a/xds/internal/xdsclient/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -56,7 +56,7 @@ func (s) TestRDSWatch(t *testing.T) { cancelWatch := client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -131,7 +131,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { if i == 0 { // A new watch is registered on the underlying API client only for // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } } @@ -211,7 +211,7 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { if i == 0 { // A new watch is registered on the underlying API client only for // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } } @@ -222,7 +222,7 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { client.WatchRouteConfig(testRDSName+"2", func(update xdsresource.RouteConfigUpdate, err error) { rdsUpdateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -281,7 +281,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -305,7 +305,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { }) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() - if n, err := apiClient.addWatches[RouteConfigResource].Receive(sCtx); err != context.DeadlineExceeded { + if n, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(sCtx); err != context.DeadlineExceeded { t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) } @@ -347,7 +347,7 @@ func (s) TestRouteWatchNACKError(t *testing.T) { rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) }) defer cancelWatch() - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } @@ -390,11 +390,11 @@ func (s) TestRouteWatchPartialValid(t *testing.T) { }) defer func() { cancelWatch() - if _, err := apiClient.removeWatches[RouteConfigResource].Receive(ctx); err != nil { + if _, err := apiClient.removeWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want watch to be canceled, got err: %v", err) } }() - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { + if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } updateChs[name] = rdsUpdateCh diff --git a/xds/internal/xdsclient/errors.go b/xds/internal/xdsclient/xdsresource/errors.go similarity index 98% rename from xds/internal/xdsclient/errors.go rename to xds/internal/xdsclient/xdsresource/errors.go index 4d6cdaaf9b4a..cfaf63b30396 100644 --- a/xds/internal/xdsclient/errors.go +++ b/xds/internal/xdsclient/xdsresource/errors.go @@ -16,7 +16,7 @@ * */ -package xdsclient +package xdsresource import "fmt" diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go index 3e01d77e4e02..bb7e9c1520e9 100644 --- a/xds/internal/xdsclient/xdsresource/type.go +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -21,6 +21,7 @@ import ( "time" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/protobuf/types/known/anypb" ) // UpdateValidatorFunc performs validations on update structs using @@ -105,3 +106,45 @@ type UpdateErrorMetadata struct { // Timestamp is when the NACKed response was received. Timestamp time.Time } + +// UpdateWithMD contains the raw message of the update and the metadata, +// including version, raw message, timestamp. +// +// This is to be used for config dump and CSDS, not directly by users (like +// resolvers/balancers). +type UpdateWithMD struct { + MD UpdateMetadata + Raw *anypb.Any +} + +// ResourceType identifies resources in a transport protocol agnostic way. These +// will be used in transport version agnostic code, while the versioned API +// clients will map these to appropriate version URLs. +type ResourceType int + +// Version agnostic resource type constants. +const ( + UnknownResource ResourceType = iota + ListenerResource + HTTPConnManagerResource + RouteConfigResource + ClusterResource + EndpointsResource +) + +func (r ResourceType) String() string { + switch r { + case ListenerResource: + return "ListenerResource" + case HTTPConnManagerResource: + return "HTTPConnManagerResource" + case RouteConfigResource: + return "RouteConfigResource" + case ClusterResource: + return "ClusterResource" + case EndpointsResource: + return "EndpointsResource" + default: + return "UnknownResource" + } +} diff --git a/xds/server_test.go b/xds/server_test.go index 492a2fa6d6ed..e307beee754d 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -430,7 +430,7 @@ func (s) TestServeSuccess(t *testing.T) { // Push an error to the registered listener watch callback and make sure // that Serve does not return. - client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "LDS resource not found")) + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "LDS resource not found")) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { From 14ebd917f2fc0a02cb71142de11ff1f3806b9f10 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 10 Nov 2021 03:18:21 +0800 Subject: [PATCH 328/998] lint: fix some unused parameter issues (#4956) --- authz/rbac_translator.go | 14 +++++-------- .../rls/internal/adaptive/adaptive_test.go | 2 +- balancer/rls/internal/client.go | 2 +- balancer/rls/internal/client_test.go | 11 ++++------ benchmark/benchmain/main.go | 4 ++-- benchmark/latency/latency_test.go | 2 +- binarylog/binarylog_end2end_test.go | 13 +++++------- internal/channelz/funcs.go | 20 +++++++++---------- internal/profiling/buffer/buffer.go | 2 +- internal/serviceconfig/serviceconfig_test.go | 6 +++--- internal/transport/flowcontrol.go | 4 +--- internal/transport/http2_client.go | 2 +- internal/transport/proxy.go | 4 ++-- internal/transport/proxy_test.go | 5 +---- internal/transport/transport_test.go | 8 ++++---- stats/stats_test.go | 8 ++++---- test/balancer_test.go | 6 ++---- .../cdsbalancer/cluster_handler_test.go | 16 +++++++-------- xds/internal/test/e2e/controlplane.go | 2 +- xds/internal/test/e2e/e2e.go | 14 ++++--------- xds/internal/test/e2e/e2e_test.go | 2 +- 21 files changed, 62 insertions(+), 85 deletions(-) diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index 4a790b1a702c..ba49b0c5250f 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -154,21 +154,21 @@ func parsePrincipalNames(principalNames []string) []*v3rbacpb.Principal { return ps } -func parsePeer(source peer) (*v3rbacpb.Principal, error) { +func parsePeer(source peer) *v3rbacpb.Principal { if source.Principals == nil { return &v3rbacpb.Principal{ Identifier: &v3rbacpb.Principal_Any{ Any: true, }, - }, nil + } } if len(source.Principals) == 0 { return &v3rbacpb.Principal{ Identifier: &v3rbacpb.Principal_Authenticated_{ Authenticated: &v3rbacpb.Principal_Authenticated{}, - }}, nil + }} } - return principalOr(parsePrincipalNames(source.Principals)), nil + return principalOr(parsePrincipalNames(source.Principals)) } func parsePaths(paths []string) []*v3rbacpb.Permission { @@ -257,17 +257,13 @@ func parseRules(rules []rule, prefixName string) (map[string]*v3rbacpb.Policy, e if rule.Name == "" { return policies, fmt.Errorf(`%d: "name" is not present`, i) } - principal, err := parsePeer(rule.Source) - if err != nil { - return nil, fmt.Errorf("%d: %v", i, err) - } permission, err := parseRequest(rule.Request) if err != nil { return nil, fmt.Errorf("%d: %v", i, err) } policyName := prefixName + "_" + rule.Name policies[policyName] = &v3rbacpb.Policy{ - Principals: []*v3rbacpb.Principal{principal}, + Principals: []*v3rbacpb.Principal{parsePeer(rule.Source)}, Permissions: []*v3rbacpb.Permission{permission}, } } diff --git a/balancer/rls/internal/adaptive/adaptive_test.go b/balancer/rls/internal/adaptive/adaptive_test.go index 40a846a388a4..2205b533eec7 100644 --- a/balancer/rls/internal/adaptive/adaptive_test.go +++ b/balancer/rls/internal/adaptive/adaptive_test.go @@ -156,7 +156,7 @@ func TestShouldThrottleOptions(t *testing.T) { for _, test := range testcases { t.Run(test.desc, func(t *testing.T) { m.SetNanos(0) - th := newWithArgs(time.Duration(time.Nanosecond), 1, test.ratioForAccepts, test.requestsPadding) + th := newWithArgs(time.Nanosecond, 1, test.ratioForAccepts, test.requestsPadding) for i, response := range responses { if response != E { th.RegisterBackendResponse(response == T) diff --git a/balancer/rls/internal/client.go b/balancer/rls/internal/client.go index c233e6e35289..4e263adc2590 100644 --- a/balancer/rls/internal/client.go +++ b/balancer/rls/internal/client.go @@ -64,7 +64,7 @@ type lookupCallback func(targets []string, headerData string, err error) // lookup starts a RouteLookup RPC in a separate goroutine and returns the // results (and error, if any) in the provided callback. -func (c *rlsClient) lookup(path string, keyMap map[string]string, cb lookupCallback) { +func (c *rlsClient) lookup(keyMap map[string]string, cb lookupCallback) { go func() { ctx, cancel := context.WithTimeout(context.Background(), c.rpcTimeout) resp, err := c.stub.RouteLookup(ctx, &rlspb.RouteLookupRequest{ diff --git a/balancer/rls/internal/client_test.go b/balancer/rls/internal/client_test.go index f5289f7a1ac0..6c8af6a6ab82 100644 --- a/balancer/rls/internal/client_test.go +++ b/balancer/rls/internal/client_test.go @@ -71,7 +71,7 @@ func (s) TestLookupFailure(t *testing.T) { rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout) errCh := testutils.NewChannel() - rlsClient.lookup("", nil, func(targets []string, headerData string, err error) { + rlsClient.lookup(nil, func(targets []string, headerData string, err error) { if err == nil { errCh.Send(errors.New("rlsClient.lookup() succeeded, should have failed")) return @@ -101,7 +101,7 @@ func (s) TestLookupDeadlineExceeded(t *testing.T) { rlsClient := newRLSClient(cc, defaultDialTarget, 100*time.Millisecond) errCh := testutils.NewChannel() - rlsClient.lookup("", nil, func(_ []string, _ string, err error) { + rlsClient.lookup(nil, func(_ []string, _ string, err error) { if st, ok := status.FromError(err); !ok || st.Code() != codes.DeadlineExceeded { errCh.Send(fmt.Errorf("rlsClient.lookup() returned error: %v, want %v", err, codes.DeadlineExceeded)) return @@ -121,10 +121,7 @@ func (s) TestLookupSuccess(t *testing.T) { server, cc, cleanup := setup(t) defer cleanup() - const ( - rlsReqPath = "/service/method" - wantHeaderData = "headerData" - ) + const wantHeaderData = "headerData" rlsReqKeyMap := map[string]string{ "k1": "v1", @@ -141,7 +138,7 @@ func (s) TestLookupSuccess(t *testing.T) { rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout) errCh := testutils.NewChannel() - rlsClient.lookup(rlsReqPath, rlsReqKeyMap, func(targets []string, hd string, err error) { + rlsClient.lookup(rlsReqKeyMap, func(targets []string, hd string, err error) { if err != nil { errCh.Send(fmt.Errorf("rlsClient.Lookup() failed: %v", err)) return diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index 55427035f41b..b3655820c1cf 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -206,7 +206,7 @@ func streamBenchmark(start startFunc, stop stopFunc, bf stats.Features, s *stats runBenchmark(caller, start, stop, bf, s, workloadsStreaming) } -func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Features, s *stats.Stats) { +func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Features) { var sender rpcSendFunc var recver rpcRecvFunc var cleanup rpcCleanupFunc @@ -771,7 +771,7 @@ func main() { streamBenchmark(start, stop, bf, s) } if opts.rModes.unconstrained { - unconstrainedStreamBenchmark(start, ucStop, bf, s) + unconstrainedStreamBenchmark(start, ucStop, bf) } } after(opts, s.GetResults()) diff --git a/benchmark/latency/latency_test.go b/benchmark/latency/latency_test.go index 5d08b90b4fa2..787373ca30be 100644 --- a/benchmark/latency/latency_test.go +++ b/benchmark/latency/latency_test.go @@ -86,7 +86,7 @@ func (s) TestConn(t *testing.T) { wantSleeps(latency) // Connection creation delay. // 1 kbps = 128 Bps. Divides evenly by 1 second using nanos. - byteLatency := time.Duration(time.Second / 128) + byteLatency := time.Second / 128 write := func(b []byte) { n, err := c.Write(b) diff --git a/binarylog/binarylog_end2end_test.go b/binarylog/binarylog_end2end_test.go index 61eeb68edae8..adf2d1f76047 100644 --- a/binarylog/binarylog_end2end_test.go +++ b/binarylog/binarylog_end2end_test.go @@ -252,13 +252,10 @@ func (te *test) tearDown() { te.srv.Stop() } -type testConfig struct { -} - // newTest returns a new test using the provided testing.T and // environment. It is returned with default values. Tests should // modify it before calling its startServer and clientConn methods. -func newTest(t *testing.T, tc *testConfig) *test { +func newTest(t *testing.T) *test { te := &test{ t: t, } @@ -794,8 +791,8 @@ func (ed *expectedData) toServerLogEntries() []*pb.GrpcLogEntry { return ret } -func runRPCs(t *testing.T, tc *testConfig, cc *rpcConfig) *expectedData { - te := newTest(t, tc) +func runRPCs(t *testing.T, cc *rpcConfig) *expectedData { + te := newTest(t) te.startServer(&testServer{te: te}) defer te.tearDown() @@ -869,7 +866,7 @@ func equalLogEntry(entries ...*pb.GrpcLogEntry) (equal bool) { func testClientBinaryLog(t *testing.T, c *rpcConfig) error { defer testSink.clear() - expect := runRPCs(t, &testConfig{}, c) + expect := runRPCs(t, c) want := expect.toClientLogEntries() var got []*pb.GrpcLogEntry // In racy cases, some entries are not logged when the RPC is finished (e.g. @@ -969,7 +966,7 @@ func (s) TestClientBinaryLogCancel(t *testing.T) { func testServerBinaryLog(t *testing.T, c *rpcConfig) error { defer testSink.clear() - expect := runRPCs(t, &testConfig{}, c) + expect := runRPCs(t, c) want := expect.toServerLogEntries() var got []*pb.GrpcLogEntry // In racy cases, some entries are not logged when the RPC is finished (e.g. diff --git a/internal/channelz/funcs.go b/internal/channelz/funcs.go index 6d5760d95146..cd1807543eee 100644 --- a/internal/channelz/funcs.go +++ b/internal/channelz/funcs.go @@ -204,9 +204,9 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) + db.get().addChannel(id, cn, true, pid) } else { - db.get().addChannel(id, cn, false, pid, ref) + db.get().addChannel(id, cn, false, pid) } return id } @@ -228,7 +228,7 @@ func RegisterSubChannel(c Channel, pid int64, ref string) int64 { pid: pid, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid, ref) + db.get().addSubChannel(id, sc, pid) return id } @@ -258,7 +258,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 { } id := idGen.genID() ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) + db.get().addListenSocket(id, ls, pid) return id } @@ -273,11 +273,11 @@ func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { } id := idGen.genID() ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) + db.get().addNormalSocket(id, ns, pid) return id } -// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. func RemoveEntry(id int64) { db.get().removeEntry(id) @@ -333,7 +333,7 @@ func (c *channelMap) addServer(id int64, s *server) { c.mu.Unlock() } -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { c.mu.Lock() cn.cm = c cn.trace.cm = c @@ -346,7 +346,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in c.mu.Unlock() } -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { c.mu.Lock() sc.cm = c sc.trace.cm = c @@ -355,7 +355,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri c.mu.Unlock() } -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { c.mu.Lock() ls.cm = c c.listenSockets[id] = ls @@ -363,7 +363,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref c.mu.Unlock() } -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { c.mu.Lock() ns.cm = c c.normalSockets[id] = ns diff --git a/internal/profiling/buffer/buffer.go b/internal/profiling/buffer/buffer.go index f4cd4201de11..8bf89c901458 100644 --- a/internal/profiling/buffer/buffer.go +++ b/internal/profiling/buffer/buffer.go @@ -244,7 +244,7 @@ func (cb *CircularBuffer) Drain() []interface{} { } var wg sync.WaitGroup - wg.Add(int(len(qs))) + wg.Add(len(qs)) for i := 0; i < len(qs); i++ { go func(qi int) { qs[qi].drainWait() diff --git a/internal/serviceconfig/serviceconfig_test.go b/internal/serviceconfig/serviceconfig_test.go index 770ee2efeb83..3a725685db01 100644 --- a/internal/serviceconfig/serviceconfig_test.go +++ b/internal/serviceconfig/serviceconfig_test.go @@ -148,7 +148,7 @@ func TestBalancerConfigMarshalJSON(t *testing.T) { Name: testBalancerBuilderName, Config: testBalancerConfig, }, - wantJSON: fmt.Sprintf(`[{"test-bb": {"check":true}}]`), + wantJSON: `[{"test-bb": {"check":true}}]`, }, { name: "OK config is nil", @@ -156,7 +156,7 @@ func TestBalancerConfigMarshalJSON(t *testing.T) { Name: testBalancerBuilderNotParserName, Config: nil, // nil should be marshalled to an empty config "{}". }, - wantJSON: fmt.Sprintf(`[{"test-bb-not-parser": {}}]`), + wantJSON: `[{"test-bb-not-parser": {}}]`, }, } for _, tt := range tests { @@ -172,7 +172,7 @@ func TestBalancerConfigMarshalJSON(t *testing.T) { var bc BalancerConfig if err := bc.UnmarshalJSON(b); err != nil { - t.Errorf("failed to mnmarshal: %v", err) + t.Errorf("failed to unmarshal: %v", err) } if !cmp.Equal(bc, tt.bc) { t.Errorf("diff: %v", cmp.Diff(bc, tt.bc)) diff --git a/internal/transport/flowcontrol.go b/internal/transport/flowcontrol.go index f262edd8ecda..97198c515889 100644 --- a/internal/transport/flowcontrol.go +++ b/internal/transport/flowcontrol.go @@ -136,12 +136,10 @@ type inFlow struct { // newLimit updates the inflow window to a new value n. // It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { +func (f *inFlow) newLimit(n uint32) { f.mu.Lock() - d := n - f.limit f.limit = n f.mu.Unlock() - return d } func (f *inFlow) maybeAdjust(n uint32) uint32 { diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index c763b23aad04..3a77a782979d 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -1557,7 +1557,7 @@ func minTime(a, b time.Duration) time.Duration { return b } -// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. diff --git a/internal/transport/proxy.go b/internal/transport/proxy.go index a662bf39a6c8..415961987870 100644 --- a/internal/transport/proxy.go +++ b/internal/transport/proxy.go @@ -37,7 +37,7 @@ var ( httpProxyFromEnvironment = http.ProxyFromEnvironment ) -func mapAddress(ctx context.Context, address string) (*url.URL, error) { +func mapAddress(address string) (*url.URL, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", @@ -114,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // connection. func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { newAddr := addr - proxyURL, err := mapAddress(ctx, addr) + proxyURL, err := mapAddress(addr) if err != nil { return nil, err } diff --git a/internal/transport/proxy_test.go b/internal/transport/proxy_test.go index 404354a19dbb..8abee1e7b383 100644 --- a/internal/transport/proxy_test.go +++ b/internal/transport/proxy_test.go @@ -211,11 +211,8 @@ func (s) TestMapAddressEnv(t *testing.T) { } defer overwrite(hpfe)() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // envTestAddr should be handled by ProxyFromEnvironment. - got, err := mapAddress(ctx, envTestAddr) + got, err := mapAddress(envTestAddr) if err != nil { t.Error(err) } diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index e0a9536d8f9e..ec864afb6e96 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -194,12 +194,12 @@ func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { } } -func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s *Stream) { +func (h *testStreamHandler) handleStreamEncodingRequiredStatus(s *Stream) { // raw newline is not accepted by http2 framer so it must be encoded. h.t.WriteStatus(s, encodingTestStatus) } -func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stream) { +func (h *testStreamHandler) handleStreamInvalidHeaderField(s *Stream) { headerFields := []hpack.HeaderField{} headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField}) h.t.controlBuf.put(&headerFrame{ @@ -356,13 +356,13 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT }) case encodingRequiredStatus: go transport.HandleStreams(func(s *Stream) { - go h.handleStreamEncodingRequiredStatus(t, s) + go h.handleStreamEncodingRequiredStatus(s) }, func(ctx context.Context, method string) context.Context { return ctx }) case invalidHeaderField: go transport.HandleStreams(func(s *Stream) { - go h.handleStreamInvalidHeaderField(t, s) + go h.handleStreamInvalidHeaderField(s) }, func(ctx context.Context, method string) context.Context { return ctx }) diff --git a/stats/stats_test.go b/stats/stats_test.go index dfc6edfc3d36..234919e86c8d 100644 --- a/stats/stats_test.go +++ b/stats/stats_test.go @@ -753,7 +753,7 @@ func checkEnd(t *testing.T, d *gotData, e *expectedData) { } } -func checkConnBegin(t *testing.T, d *gotData, e *expectedData) { +func checkConnBegin(t *testing.T, d *gotData) { var ( ok bool st *stats.ConnBegin @@ -767,7 +767,7 @@ func checkConnBegin(t *testing.T, d *gotData, e *expectedData) { st.IsClient() // TODO remove this. } -func checkConnEnd(t *testing.T, d *gotData, e *expectedData) { +func checkConnEnd(t *testing.T, d *gotData) { var ( ok bool st *stats.ConnEnd @@ -815,9 +815,9 @@ func checkConnStats(t *testing.T, got []*gotData) { t.Fatalf("got %v stats, want even positive number", len(got)) } // The first conn stats must be a ConnBegin. - checkConnBegin(t, got[0], nil) + checkConnBegin(t, got[0]) // The last conn stats must be a ConnEnd. - checkConnEnd(t, got[len(got)-1], nil) + checkConnEnd(t, got[len(got)-1]) } func checkServerStats(t *testing.T, got []*gotData, expect *expectedData, checkFuncs []func(t *testing.T, d *gotData, e *expectedData)) { diff --git a/test/balancer_test.go b/test/balancer_test.go index e2fa4cf31d0c..47332db7975e 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -299,12 +299,10 @@ func init() { } func (s) TestDoneLoads(t *testing.T) { - for _, e := range listTestEnv() { - testDoneLoads(t, e) - } + testDoneLoads(t) } -func testDoneLoads(t *testing.T, e env) { +func testDoneLoads(t *testing.T) { b := &testBalancer{} balancer.Register(b) diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index 4a00fe7d542a..b58f06d0eb27 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -36,7 +36,7 @@ const ( // setupTests creates a clusterHandler with a fake xds client for control over // xds client. -func setupTests(t *testing.T) (*clusterHandler, *fakeclient.Client) { +func setupTests() (*clusterHandler, *fakeclient.Client) { xdsC := fakeclient.NewClient() ch := newClusterHandler(&cdsBalancer{xdsClient: xdsC}) return ch, xdsC @@ -83,7 +83,7 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ch, fakeClient := setupTests(t) + ch, fakeClient := setupTests() // When you first update the root cluster, it should hit the code // path which will start a cluster node for that root. Updating the // root cluster logically represents a ping from a ClientConn. @@ -170,7 +170,7 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ch, fakeClient := setupTests(t) + ch, fakeClient := setupTests() ch.updateRootCluster(test.clusterName) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -220,7 +220,7 @@ func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { // the children, and at the end there should be a successful clusterUpdate // written to the update buffer to send back to CDS. func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { - ch, fakeClient := setupTests(t) + ch, fakeClient := setupTests() ch.updateRootCluster(aggregateClusterService) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -342,7 +342,7 @@ func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { // This initial code is the same as the test for the aggregate success case, // except without validations. This will get this test to the point where it // can change one of the children. - ch, fakeClient := setupTests(t) + ch, fakeClient := setupTests() ch.updateRootCluster(aggregateClusterService) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -449,7 +449,7 @@ func (s) TestUpdateRootClusterAggregateThenChangeRootToEDS(t *testing.T) { // This initial code is the same as the test for the aggregate success case, // except without validations. This will get this test to the point where it // can update the root cluster to one of type EDS. - ch, fakeClient := setupTests(t) + ch, fakeClient := setupTests() ch.updateRootCluster(aggregateClusterService) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -528,7 +528,7 @@ func (s) TestUpdateRootClusterAggregateThenChangeRootToEDS(t *testing.T) { // TestHandleRespInvokedWithError tests that when handleResp is invoked with an // error, that the error is successfully written to the update buffer. func (s) TestHandleRespInvokedWithError(t *testing.T) { - ch, fakeClient := setupTests(t) + ch, fakeClient := setupTests() ch.updateRootCluster(edsService) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -555,7 +555,7 @@ func (s) TestHandleRespInvokedWithError(t *testing.T) { func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { // Getting the test to the point where there's a root cluster which is a eds // leaf. - ch, fakeClient := setupTests(t) + ch, fakeClient := setupTests() ch.updateRootCluster(edsService2) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() diff --git a/xds/internal/test/e2e/controlplane.go b/xds/internal/test/e2e/controlplane.go index 247991b83d33..b663cb31f7e3 100644 --- a/xds/internal/test/e2e/controlplane.go +++ b/xds/internal/test/e2e/controlplane.go @@ -31,7 +31,7 @@ type controlPlane struct { bootstrapContent string } -func newControlPlane(testName string) (*controlPlane, error) { +func newControlPlane() (*controlPlane, error) { // Spin up an xDS management server on a local port. server, err := e2e.StartManagementServer() if err != nil { diff --git a/xds/internal/test/e2e/e2e.go b/xds/internal/test/e2e/e2e.go index ade6339bf534..3c388fbf0d93 100644 --- a/xds/internal/test/e2e/e2e.go +++ b/xds/internal/test/e2e/e2e.go @@ -32,12 +32,12 @@ import ( testpb "google.golang.org/grpc/interop/grpc_testing" ) -func cmd(path string, logger io.Writer, args []string, env []string) (*exec.Cmd, error) { +func cmd(path string, logger io.Writer, args []string, env []string) *exec.Cmd { cmd := exec.Command(path, args...) cmd.Env = append(os.Environ(), env...) cmd.Stdout = logger cmd.Stderr = logger - return cmd, nil + return cmd } const ( @@ -53,7 +53,7 @@ type client struct { // newClient create a client with the given target and bootstrap content. func newClient(target, binaryPath, bootstrap string, logger io.Writer, flags ...string) (*client, error) { - cmd, err := cmd( + cmd := cmd( binaryPath, logger, append([]string{ @@ -68,9 +68,6 @@ func newClient(target, binaryPath, bootstrap string, logger io.Writer, flags ... "GRPC_XDS_BOOTSTRAP_CONFIG=" + bootstrap, // The bootstrap content doesn't need to be quoted. }, ) - if err != nil { - return nil, fmt.Errorf("failed to run client cmd: %v", err) - } cmd.Start() cc, err := grpc.Dial(fmt.Sprintf("localhost:%d", clientStatsPort), grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.WaitForReady(true))) @@ -150,7 +147,7 @@ func newServers(hostnamePrefix, binaryPath, bootstrap string, logger io.Writer, }() for i := 0; i < count; i++ { port := serverPort + i - cmd, err := cmd( + cmd := cmd( binaryPath, logger, []string{ @@ -163,9 +160,6 @@ func newServers(hostnamePrefix, binaryPath, bootstrap string, logger io.Writer, "GRPC_XDS_BOOTSTRAP_CONFIG=" + bootstrap, // The bootstrap content doesn't need to be quoted., }, ) - if err != nil { - return nil, fmt.Errorf("failed to run server cmd: %v", err) - } cmd.Start() ret = append(ret, &server{cmd: cmd, port: port}) } diff --git a/xds/internal/test/e2e/e2e_test.go b/xds/internal/test/e2e/e2e_test.go index 6984566db2e7..ca547a522f24 100644 --- a/xds/internal/test/e2e/e2e_test.go +++ b/xds/internal/test/e2e/e2e_test.go @@ -58,7 +58,7 @@ func setup(t *testing.T, opts testOpts) (*controlPlane, *client, []*server) { backendCount = opts.backendCount } - cp, err := newControlPlane(opts.testName) + cp, err := newControlPlane() if err != nil { t.Fatalf("failed to start control-plane: %v", err) } From 714ba8d517a907d98a96c4bf7bc2fdbad4ae780b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 9 Nov 2021 11:59:10 -0800 Subject: [PATCH 329/998] xds: move balancergroup and weightedtarget our of xds directory (#4966) --- .../weightedtarget/logging.go | 0 .../weightedaggregator/aggregator.go | 0 .../weightedtarget/weightedtarget.go | 11 +- .../weightedtarget/weightedtarget_config.go | 0 .../weightedtarget_config_test.go | 40 +- .../weightedtarget/weightedtarget_test.go | 1220 +++++++++++++++++ .../balancergroup/balancergroup.go | 0 internal/balancergroup/balancergroup_test.go | 511 +++++++ .../balancergroup/balancerstateaggregator.go | 0 .../testutils/balancer.go | 13 - {xds/internal => internal}/testutils/wrr.go | 0 xds/internal/balancer/balancer.go | 2 +- .../balancergroup/balancergroup_test.go | 925 ------------- .../cdsbalancer/cdsbalancer_security_test.go | 7 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 9 +- .../balancer/clusterimpl/balancer_test.go | 2 +- .../balancer/clusterimpl/config_test.go | 2 +- .../balancer/clustermanager/clustermanager.go | 2 +- .../clustermanager/clustermanager_test.go | 7 +- .../balancer/clustermanager/config_test.go | 2 +- .../balancer/clusterresolver/configbuilder.go | 2 +- .../clusterresolver/configbuilder_test.go | 2 +- .../balancer/clusterresolver/eds_impl_test.go | 41 +- .../balancer/clusterresolver/priority_test.go | 51 +- .../balancer/clusterresolver/testutil_test.go | 2 +- xds/internal/balancer/priority/balancer.go | 2 +- .../balancer/priority/balancer_test.go | 4 +- .../priority/ignore_resolve_now_test.go | 5 +- xds/internal/balancer/ringhash/picker_test.go | 2 +- .../balancer/ringhash/ringhash_test.go | 2 +- .../weightedtarget/weightedtarget_test.go | 326 ----- xds/internal/resolver/xds_resolver_test.go | 7 +- xds/internal/testutils/balancer_test.go | 21 +- xds/internal/testutils/testutils.go | 19 + 34 files changed, 1867 insertions(+), 1372 deletions(-) rename {xds/internal/balancer => balancer}/weightedtarget/logging.go (100%) rename {xds/internal/balancer => balancer}/weightedtarget/weightedaggregator/aggregator.go (100%) rename {xds/internal/balancer => balancer}/weightedtarget/weightedtarget.go (91%) rename {xds/internal/balancer => balancer}/weightedtarget/weightedtarget_config.go (100%) rename {xds/internal/balancer => balancer}/weightedtarget/weightedtarget_config_test.go (61%) create mode 100644 balancer/weightedtarget/weightedtarget_test.go rename {xds/internal/balancer => internal}/balancergroup/balancergroup.go (100%) create mode 100644 internal/balancergroup/balancergroup_test.go rename {xds/internal/balancer => internal}/balancergroup/balancerstateaggregator.go (100%) rename {xds/internal => internal}/testutils/balancer.go (96%) rename {xds/internal => internal}/testutils/wrr.go (100%) delete mode 100644 xds/internal/balancer/balancergroup/balancergroup_test.go delete mode 100644 xds/internal/balancer/weightedtarget/weightedtarget_test.go create mode 100644 xds/internal/testutils/testutils.go diff --git a/xds/internal/balancer/weightedtarget/logging.go b/balancer/weightedtarget/logging.go similarity index 100% rename from xds/internal/balancer/weightedtarget/logging.go rename to balancer/weightedtarget/logging.go diff --git a/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go b/balancer/weightedtarget/weightedaggregator/aggregator.go similarity index 100% rename from xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go rename to balancer/weightedtarget/weightedaggregator/aggregator.go diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/balancer/weightedtarget/weightedtarget.go similarity index 91% rename from xds/internal/balancer/weightedtarget/weightedtarget.go rename to balancer/weightedtarget/weightedtarget.go index d036c9b5bd5b..b6fa532b5120 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/balancer/weightedtarget/weightedtarget.go @@ -17,6 +17,8 @@ */ // Package weightedtarget implements the weighted_target balancer. +// +// All APIs in this package are experimental. package weightedtarget import ( @@ -24,14 +26,14 @@ import ( "fmt" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget/weightedaggregator" + "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/hierarchy" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" ) // Name is the name of the weighted_target balancer. @@ -69,11 +71,6 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err type weightedTargetBalancer struct { logger *grpclog.PrefixLogger - // TODO: Make this package not dependent on any xds specific code. - // BalancerGroup uses xdsinternal.LocalityID as the key in the map of child - // policies that it maintains and reports load using LRS. Once these two - // dependencies are removed from the balancerGroup, this package will not - // have any dependencies on xds code. bg *balancergroup.BalancerGroup stateAggregator *weightedaggregator.Aggregator diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config.go b/balancer/weightedtarget/weightedtarget_config.go similarity index 100% rename from xds/internal/balancer/weightedtarget/weightedtarget_config.go rename to balancer/weightedtarget/weightedtarget_config.go diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go b/balancer/weightedtarget/weightedtarget_config_test.go similarity index 61% rename from xds/internal/balancer/weightedtarget/weightedtarget_config_test.go rename to balancer/weightedtarget/weightedtarget_config_test.go index c239a3ae5a4e..25bbee836abe 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go +++ b/balancer/weightedtarget/weightedtarget_config_test.go @@ -23,34 +23,38 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/grpclb" + "google.golang.org/grpc/balancer/roundrobin" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/priority" ) const ( testJSONConfig = `{ "targets": { - "cluster_1" : { - "weight":75, - "childPolicy":[{"priority_experimental":{"priorities": ["child-1"], "children": {"child-1": {"config": [{"round_robin":{}}]}}}}] + "cluster_1": { + "weight": 75, + "childPolicy": [{ + "grpclb": { + "childPolicy": [{"pick_first":{}}], + "targetName": "foo-service" + } + }] }, - "cluster_2" : { - "weight":25, - "childPolicy":[{"priority_experimental":{"priorities": ["child-2"], "children": {"child-2": {"config": [{"round_robin":{}}]}}}}] + "cluster_2": { + "weight": 25, + "childPolicy": [{"round_robin": ""}] } } }` ) var ( - testConfigParser = balancer.Get(priority.Name).(balancer.ConfigParser) - testConfigJSON1 = `{"priorities": ["child-1"], "children": {"child-1": {"config": [{"round_robin":{}}]}}}` - testConfig1, _ = testConfigParser.ParseConfig([]byte(testConfigJSON1)) - testConfigJSON2 = `{"priorities": ["child-2"], "children": {"child-2": {"config": [{"round_robin":{}}]}}}` - testConfig2, _ = testConfigParser.ParseConfig([]byte(testConfigJSON2)) + grpclbConfigParser = balancer.Get("grpclb").(balancer.ConfigParser) + grpclbConfigJSON = `{"childPolicy": [{"pick_first":{}}], "targetName": "foo-service"}` + grpclbConfig, _ = grpclbConfigParser.ParseConfig([]byte(grpclbConfigJSON)) ) -func Test_parseConfig(t *testing.T) { +func (s) TestParseConfig(t *testing.T) { tests := []struct { name string js string @@ -71,15 +75,14 @@ func Test_parseConfig(t *testing.T) { "cluster_1": { Weight: 75, ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: priority.Name, - Config: testConfig1, + Name: "grpclb", + Config: grpclbConfig, }, }, "cluster_2": { Weight: 25, ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: priority.Name, - Config: testConfig2, + Name: roundrobin.Name, }, }, }, @@ -91,8 +94,7 @@ func Test_parseConfig(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := parseConfig([]byte(tt.js)) if (err != nil) != tt.wantErr { - t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) - return + t.Fatalf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) } if !cmp.Equal(got, tt.want) { t.Errorf("parseConfig() got unexpected result, diff: %v", cmp.Diff(got, tt.want)) diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go new file mode 100644 index 000000000000..fa63b3f25822 --- /dev/null +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -0,0 +1,1220 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedtarget + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type testConfigBalancerBuilder struct { + balancer.Builder +} + +func newTestConfigBalancerBuilder() *testConfigBalancerBuilder { + return &testConfigBalancerBuilder{ + Builder: balancer.Get(roundrobin.Name), + } +} + +func (t *testConfigBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + rr := t.Builder.Build(cc, opts) + return &testConfigBalancer{ + Balancer: rr, + } +} + +const testConfigBalancerName = "test_config_balancer" + +func (t *testConfigBalancerBuilder) Name() string { + return testConfigBalancerName +} + +type stringBalancerConfig struct { + serviceconfig.LoadBalancingConfig + configStr string +} + +func (t *testConfigBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg string + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, fmt.Errorf("failed to unmarshal config in %q: %v", testConfigBalancerName, err) + } + return stringBalancerConfig{configStr: cfg}, nil +} + +// testConfigBalancer is a roundrobin balancer, but it takes the balancer config +// string and adds it as an address attribute to the backend addresses. +type testConfigBalancer struct { + balancer.Balancer +} + +// configKey is the type used as the key to store balancer config in the +// Attributes field of resolver.Address. +type configKey struct{} + +func setConfigKey(addr resolver.Address, config string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(configKey{}, config) + return addr +} + +func getConfigKey(attr *attributes.Attributes) (string, bool) { + v := attr.Value(configKey{}) + name, ok := v.(string) + return name, ok +} + +func (b *testConfigBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + c, ok := s.BalancerConfig.(stringBalancerConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type %T", s.BalancerConfig) + } + + addrsWithAttr := make([]resolver.Address, len(s.ResolverState.Addresses)) + for i, addr := range s.ResolverState.Addresses { + addrsWithAttr[i] = setConfigKey(addr, c.configStr) + } + s.BalancerConfig = nil + s.ResolverState.Addresses = addrsWithAttr + return b.Balancer.UpdateClientConnState(s) +} + +func (b *testConfigBalancer) Close() { + b.Balancer.Close() +} + +var ( + wtbBuilder balancer.Builder + wtbParser balancer.ConfigParser + testBackendAddrStrs []string +) + +const testBackendAddrsCount = 12 + +func init() { + balancer.Register(newTestConfigBalancerBuilder()) + for i := 0; i < testBackendAddrsCount; i++ { + testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) + } + wtbBuilder = balancer.Get(Name) + wtbParser = wtbBuilder.(balancer.ConfigParser) + + balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond + NewRandomWRR = testutils.NewTestWRR +} + +// TestWeightedTarget covers the cases that a sub-balancer is added and a +// sub-balancer is removed. It verifies that the addresses and balancer configs +// are forwarded to the right sub-balancer. This test is intended to test the +// glue code in weighted_target. +func (s) TestWeightedTarget(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: round_robin". + config1, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"round_robin": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1], Attributes: nil} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr1, []string{"cluster_1"})}}, + BalancerConfig: config1, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + verifyAddressInNewSubConn(t, cc, addr1) + + // Send subconn state change. + sc1 := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test pick with one backend. + for i := 0; i < 5; i++ { + gotSCSt, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) + } + } + + // Remove cluster_1, and add "cluster_2: test_config_balancer". The + // test_config_balancer adds an address attribute whose value is set to the + // config that is passed to it. + config2, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and one address with hierarchy path "cluster_2". + addr2 := resolver.Address{Addr: testBackendAddrStrs[2], Attributes: nil} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr2, []string{"cluster_2"})}}, + BalancerConfig: config2, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Expect a new subConn from the test_config_balancer which has an address + // attribute set to the config that was passed to it. + verifyAddressInNewSubConn(t, cc, setConfigKey(addr2, "cluster_2")) + + // The subconn for cluster_1 should be removed. + scRemoved := <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) + } + wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + + sc2 := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p = <-cc.NewPickerCh + + // Test pick with one backend. + for i := 0; i < 5; i++ { + gotSCSt, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) + } + } + + // Replace child policy of "cluster_1" to "round_robin". + config3, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_2": { + "weight":1, + "childPolicy": [{"round_robin": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_2"]. + addr3 := resolver.Address{Addr: testBackendAddrStrs[3], Attributes: nil} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr3, []string{"cluster_2"})}}, + BalancerConfig: config3, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + verifyAddressInNewSubConn(t, cc, addr3) + + // The subconn from the test_config_balancer should be removed. + scRemoved = <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) + } + wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + + // Send subconn state change. + sc3 := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p = <-cc.NewPickerCh + + // Test pick with one backend. + for i := 0; i < 5; i++ { + gotSCSt, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc3) + } + } +} + +func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { + return func() balancer.SubConn { + scst, _ := p.Pick(balancer.PickInfo{}) + return scst.SubConn + } +} + +// TestWeightedTarget_OneSubBalancer_AddRemoveBackend tests the case where we +// have a weighted target balancer will one sub-balancer, and we add and remove +// backends from the subBalancer. +func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: round_robin". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"round_robin": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr1, []string{"cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + verifyAddressInNewSubConn(t, cc, addr1) + + // Expect one SubConn, and move it to READY. + sc1 := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test pick with one backend. + for i := 0; i < 5; i++ { + gotSCSt, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) + } + } + + // Send two addresses. + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + verifyAddressInNewSubConn(t, cc, addr2) + + // Expect one new SubConn, and move it to READY. + sc2 := <-cc.NewSubConnCh + // Update the SubConn to become READY. + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p = <-cc.NewPickerCh + + // Test round robin pick. + want := []balancer.SubConn{sc1, sc2} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Remove the first address. + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr2, []string{"cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Expect one SubConn to be removed. + scRemoved := <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) + } + wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + p = <-cc.NewPickerCh + + // Test pick with only the second SubConn. + for i := 0; i < 5; i++ { + gotSC, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSC.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSC, sc2) + } + } +} + +// TestWeightedTarget_TwoSubBalancers_OneBackend tests the case where we have a +// weighted target balancer with two sub-balancers, each with one backend. +func (s) TestWeightedTarget_TwoSubBalancers_OneBackend(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with one address for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 2) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1}, + "cluster_2": {addr2}, + }) + + // We expect a single subConn on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_2"][0].sc + + // Send state changes for both SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test roundrobin on the last picker. + want := []balancer.SubConn{sc1, sc2} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// TestWeightedTarget_TwoSubBalancers_MoreBackends tests the case where we have +// a weighted target balancer with two sub-balancers, each with more than one +// backend. +func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: round_robin, cluster_2: round_robin". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with two backends for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} + addr4 := resolver.Address{Addr: testBackendAddrStrs[4]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_2"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 4) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1, addr2}, + "cluster_2": {addr3, addr4}, + }) + + // We expect two subConns on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_1"][1].sc + sc3 := scs["cluster_2"][0].sc + sc4 := scs["cluster_2"][1].sc + + // Send state changes for all SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test roundrobin on the last picker. RPCs should be sent equally to all + // backends. + want := []balancer.SubConn{sc1, sc2, sc3, sc4} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Turn sc2's connection down, should be RR between balancers. + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + p = <-cc.NewPickerCh + want = []balancer.SubConn{sc1, sc1, sc3, sc4} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Remove subConn corresponding to addr3. + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + scRemoved := <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc3, scRemoved) + } + wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + p = <-cc.NewPickerCh + want = []balancer.SubConn{sc1, sc4} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Turn sc1's connection down. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + p = <-cc.NewPickerCh + want = []balancer.SubConn{sc4} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Turn last connection to connecting. + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + p = <-cc.NewPickerCh + for i := 0; i < 5; i++ { + if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) + } + } + + // Turn all connections down. + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + p = <-cc.NewPickerCh + for i := 0; i < 5; i++ { + if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { + t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) + } + } +} + +// TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends tests the +// case where we have a weighted target balancer with two sub-balancers of +// differing weights. +func (s) TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with two subBalancers, one with twice the weight of the other. + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 2, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with two backends for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} + addr4 := resolver.Address{Addr: testBackendAddrStrs[4]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_2"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 4) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1, addr2}, + "cluster_2": {addr3, addr4}, + }) + + // We expect two subConns on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_1"][1].sc + sc3 := scs["cluster_2"][0].sc + sc4 := scs["cluster_2"][1].sc + + // Send state changes for all SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test roundrobin on the last picker. Twice the number of RPCs should be + // sent to cluster_1 when compared to cluster_2. + want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// TestWeightedTarget_ThreeSubBalancers_RemoveBalancer tests the case where we +// have a weighted target balancer with three sub-balancers and we remove one of +// the subBalancers. +func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with two subBalancers, one with twice the weight of the other. + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + }, + "cluster_3": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_3"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with one backend for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_2"}), + hierarchy.Set(addr3, []string{"cluster_3"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 3) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1}, + "cluster_2": {addr2}, + "cluster_3": {addr3}, + }) + + // We expect one subConn on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_2"][0].sc + sc3 := scs["cluster_3"][0].sc + + // Send state changes for all SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + want := []balancer.SubConn{sc1, sc2, sc3} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Remove the second balancer, while the others two are ready. + config, err = wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_3": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_3"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_3"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Removing a subBalancer causes the weighted target LB policy to push a new + // picker which ensures that the removed subBalancer is not picked for RPCs. + p = <-cc.NewPickerCh + + scRemoved := <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scRemoved) + } + want = []balancer.SubConn{sc1, sc3} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Move balancer 3 into transient failure. + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + <-cc.NewPickerCh + + // Remove the first balancer, while the third is transient failure. + config, err = wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_3": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_3"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr3, []string{"cluster_3"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Removing a subBalancer causes the weighted target LB policy to push a new + // picker which ensures that the removed subBalancer is not picked for RPCs. + p = <-cc.NewPickerCh + + scRemoved = <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) + } + for i := 0; i < 5; i++ { + if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { + t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) + } + } +} + +// TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends tests the case +// where we have a weighted target balancer with two sub-balancers, and we +// change the weight of these subBalancers. +func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with two subBalancers, one with twice the weight of the other. + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 2, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with two backends for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} + addr4 := resolver.Address{Addr: testBackendAddrStrs[4]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_2"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 4) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1, addr2}, + "cluster_2": {addr3, addr4}, + }) + + // We expect two subConns on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_1"][1].sc + sc3 := scs["cluster_2"][0].sc + sc4 := scs["cluster_2"][1].sc + + // Send state changes for all SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test roundrobin on the last picker. Twice the number of RPCs should be + // sent to cluster_1 when compared to cluster_2. + want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Change the weight of cluster_1. + config, err = wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 3, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_2"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Weight change causes a new picker to be pushed to the channel. + p = <-cc.NewPickerCh + want = []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc2, sc3, sc4} + if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// TestWeightedTarget_InitOneSubBalancerTransientFailure tests that at init +// time, with two sub-balancers, if one sub-balancer reports transient_failure, +// the picks won't fail with transient_failure, and should instead wait for the +// other sub-balancer. +func (s) TestWeightedTarget_InitOneSubBalancerTransientFailure(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with one address for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 2) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1}, + "cluster_2": {addr2}, + }) + + // We expect a single subConn on each subBalancer. + sc1 := scs["cluster_1"][0].sc + _ = scs["cluster_2"][0].sc + + // Set one subconn to TransientFailure, this will trigger one sub-balancer + // to report transient failure. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + + p := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + r, err := p.Pick(balancer.PickInfo{}) + if err != balancer.ErrNoSubConnAvailable { + t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrNoSubConnAvailable, r, err) + } + } +} + +// Test that with two sub-balancers, both in transient_failure, if one turns +// connecting, the overall state stays in transient_failure, and all picks +// return transient failure error. +func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with one address for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 2) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1}, + "cluster_2": {addr2}, + }) + + // We expect a single subConn on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_2"][0].sc + + // Set both subconn to TransientFailure, this will put both sub-balancers in + // transient failure. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + p := <-cc.NewPickerCh + + for i := 0; i < 5; i++ { + r, err := p.Pick(balancer.PickInfo{}) + if err != balancer.ErrTransientFailure { + t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err) + } + } + + // Set one subconn to Connecting, it shouldn't change the overall state. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + select { + case <-time.After(100 * time.Millisecond): + case <-cc.NewPickerCh: + t.Fatal("received new picker from the LB policy when expecting none") + } + + for i := 0; i < 5; i++ { + r, err := p.Pick(balancer.PickInfo{}) + if err != balancer.ErrTransientFailure { + t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err) + } + } +} + +// Verify that a SubConn is created with the expected address and hierarchy +// path cleared. +func verifyAddressInNewSubConn(t *testing.T, cc *testutils.TestClientConn, addr resolver.Address) { + t.Helper() + + gotAddr := <-cc.NewSubConnAddrsCh + wantAddr := []resolver.Address{hierarchy.Set(addr, []string{})} + if diff := cmp.Diff(gotAddr, wantAddr, cmp.AllowUnexported(attributes.Attributes{})); diff != "" { + t.Fatalf("got unexpected new subconn addrs: %v", diff) + } +} + +// subConnWithAddr wraps a subConn and the address for which it was created. +type subConnWithAddr struct { + sc balancer.SubConn + addr resolver.Address +} + +// waitForNewSubConns waits for `num` number of subConns to be created. This is +// expected to be used from tests using the "test_config_balancer" LB policy, +// which adds an address attribute with value set to the balancer config. +// +// Returned value is a map from subBalancer (identified by its config) to +// subConns created by it. +func waitForNewSubConns(t *testing.T, cc *testutils.TestClientConn, num int) map[string][]subConnWithAddr { + t.Helper() + + scs := make(map[string][]subConnWithAddr) + for i := 0; i < num; i++ { + addrs := <-cc.NewSubConnAddrsCh + if len(addrs) != 1 { + t.Fatalf("received subConns with %d addresses, want 1", len(addrs)) + } + cfg, ok := getConfigKey(addrs[0].Attributes) + if !ok { + t.Fatalf("received subConn address %v contains no attribute for balancer config", addrs[0]) + } + sc := <-cc.NewSubConnCh + scWithAddr := subConnWithAddr{sc: sc, addr: addrs[0]} + scs[cfg] = append(scs[cfg], scWithAddr) + } + return scs +} + +func verifySubConnAddrs(t *testing.T, scs map[string][]subConnWithAddr, wantSubConnAddrs map[string][]resolver.Address) { + t.Helper() + + if len(scs) != len(wantSubConnAddrs) { + t.Fatalf("got new subConns %+v, want %v", scs, wantSubConnAddrs) + } + for cfg, scsWithAddr := range scs { + if len(scsWithAddr) != len(wantSubConnAddrs[cfg]) { + t.Fatalf("got new subConns %+v, want %v", scs, wantSubConnAddrs) + } + wantAddrs := wantSubConnAddrs[cfg] + for i, scWithAddr := range scsWithAddr { + if diff := cmp.Diff(wantAddrs[i].Addr, scWithAddr.addr.Addr); diff != "" { + t.Fatalf("got unexpected new subconn addrs: %v", diff) + } + } + } +} + +const initIdleBalancerName = "test-init-Idle-balancer" + +var errTestInitIdle = fmt.Errorf("init Idle balancer error 0") + +func init() { + stub.Register(initIdleBalancerName, stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { + bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{}) + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + err := fmt.Errorf("wrong picker error") + if state.ConnectivityState == connectivity.Idle { + err = errTestInitIdle + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &testutils.TestConstPicker{Err: err}, + }) + }, + }) +} + +// TestInitialIdle covers the case that if the child reports Idle, the overall +// state will be Idle. +func (s) TestInitialIdle(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test-init-Idle-balancer": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addrs := []resolver.Address{{Addr: testBackendAddrStrs[0], Attributes: nil}} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addrs[0], []string{"cds:cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that a subconn is created with the address, and the hierarchy path + // in the address is cleared. + for range addrs { + sc := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + } + + if state := <-cc.NewStateCh; state != connectivity.Idle { + t.Fatalf("Received aggregated state: %v, want Idle", state) + } +} diff --git a/xds/internal/balancer/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go similarity index 100% rename from xds/internal/balancer/balancergroup/balancergroup.go rename to internal/balancergroup/balancergroup.go diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go new file mode 100644 index 000000000000..ef11e402ec2e --- /dev/null +++ b/internal/balancergroup/balancergroup_test.go @@ -0,0 +1,511 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package balancergroup + +import ( + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedtarget/weightedaggregator" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" +) + +var ( + rrBuilder = balancer.Get(roundrobin.Name) + testBalancerIDs = []string{"b1", "b2", "b3"} + testBackendAddrs []resolver.Address +) + +const testBackendAddrsCount = 12 + +func init() { + for i := 0; i < testBackendAddrsCount; i++ { + testBackendAddrs = append(testBackendAddrs, resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)}) + } + + // Disable caching for all tests. It will be re-enabled in caching specific + // tests. + DefaultSubBalancerCloseTimeout = time.Millisecond +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { + return func() balancer.SubConn { + scst, _ := p.Pick(balancer.PickInfo{}) + return scst.SubConn + } +} + +// Create a new balancer group, add balancer and backends, but not start. +// - b1, weight 2, backends [0,1] +// - b2, weight 1, backends [2,3] +// Start the balancer group and check behavior. +// +// Close the balancer group, call add/remove/change weight/change address. +// - b2, weight 3, backends [0,3] +// - b3, weight 1, backends [1,2] +// Start the balancer group again and check for behavior. +func (s) TestBalancerGroup_start_close(t *testing.T) { + cc := testutils.NewTestClientConn(t) + gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) + gator.Start() + bg := New(cc, balancer.BuildOptions{}, gator, nil) + + // Add two balancers to group and send two resolved addresses to both + // balancers. + gator.Add(testBalancerIDs[0], 2) + bg.Add(testBalancerIDs[0], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) + + bg.Start() + + m1 := make(map[resolver.Address]balancer.SubConn) + for i := 0; i < 4; i++ { + addrs := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + m1[addrs[0]] = sc + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + + // Test roundrobin on the last picker. + p1 := <-cc.NewPickerCh + want := []balancer.SubConn{ + m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], + m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], + m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], + } + if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + gator.Stop() + bg.Close() + for i := 0; i < 4; i++ { + bg.UpdateSubConnState(<-cc.RemoveSubConnCh, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + } + + // Add b3, weight 1, backends [1,2]. + gator.Add(testBalancerIDs[2], 1) + bg.Add(testBalancerIDs[2], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:3]}}) + + // Remove b1. + gator.Remove(testBalancerIDs[0]) + bg.Remove(testBalancerIDs[0]) + + // Update b2 to weight 3, backends [0,3]. + gator.UpdateWeight(testBalancerIDs[1], 3) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: append([]resolver.Address(nil), testBackendAddrs[0], testBackendAddrs[3])}}) + + gator.Start() + bg.Start() + + m2 := make(map[resolver.Address]balancer.SubConn) + for i := 0; i < 4; i++ { + addrs := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + m2[addrs[0]] = sc + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + + // Test roundrobin on the last picker. + p2 := <-cc.NewPickerCh + want = []balancer.SubConn{ + m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], + m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], + m2[testBackendAddrs[1]], m2[testBackendAddrs[2]], + } + if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// Test that balancer group start() doesn't deadlock if the balancer calls back +// into balancer group inline when it gets an update. +// +// The potential deadlock can happen if we +// - hold a lock and send updates to balancer (e.g. update resolved addresses) +// - the balancer calls back (NewSubConn or update picker) in line +// The callback will try to hold hte same lock again, which will cause a +// deadlock. +// +// This test starts the balancer group with a test balancer, will updates picker +// whenever it gets an address update. It's expected that start() doesn't block +// because of deadlock. +func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { + const balancerName = "stub-TestBalancerGroup_start_close_deadlock" + stub.Register(balancerName, stub.BalancerFuncs{}) + builder := balancer.Get(balancerName) + + cc := testutils.NewTestClientConn(t) + gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) + gator.Start() + bg := New(cc, balancer.BuildOptions{}, gator, nil) + + gator.Add(testBalancerIDs[0], 2) + bg.Add(testBalancerIDs[0], builder) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], builder) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) + + bg.Start() +} + +func replaceDefaultSubBalancerCloseTimeout(n time.Duration) func() { + old := DefaultSubBalancerCloseTimeout + DefaultSubBalancerCloseTimeout = n + return func() { DefaultSubBalancerCloseTimeout = old } +} + +// initBalancerGroupForCachingTest creates a balancer group, and initialize it +// to be ready for caching tests. +// +// Two rr balancers are added to bg, each with 2 ready subConns. A sub-balancer +// is removed later, so the balancer group returned has one sub-balancer in its +// own map, and one sub-balancer in cache. +func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregator, *BalancerGroup, *testutils.TestClientConn, map[resolver.Address]balancer.SubConn) { + cc := testutils.NewTestClientConn(t) + gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) + gator.Start() + bg := New(cc, balancer.BuildOptions{}, gator, nil) + + // Add two balancers to group and send two resolved addresses to both + // balancers. + gator.Add(testBalancerIDs[0], 2) + bg.Add(testBalancerIDs[0], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) + + bg.Start() + + m1 := make(map[resolver.Address]balancer.SubConn) + for i := 0; i < 4; i++ { + addrs := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + m1[addrs[0]] = sc + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + + // Test roundrobin on the last picker. + p1 := <-cc.NewPickerCh + want := []balancer.SubConn{ + m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], + m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], + m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], + } + if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + gator.Remove(testBalancerIDs[1]) + bg.Remove(testBalancerIDs[1]) + gator.BuildAndUpdate() + // Don't wait for SubConns to be removed after close, because they are only + // removed after close timeout. + for i := 0; i < 10; i++ { + select { + case <-cc.RemoveSubConnCh: + t.Fatalf("Got request to remove subconn, want no remove subconn (because subconns were still in cache)") + default: + } + time.Sleep(time.Millisecond) + } + // Test roundrobin on the with only sub-balancer0. + p2 := <-cc.NewPickerCh + want = []balancer.SubConn{ + m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], + } + if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + return gator, bg, cc, m1 +} + +// Test that if a sub-balancer is removed, and re-added within close timeout, +// the subConns won't be re-created. +func (s) TestBalancerGroup_locality_caching(t *testing.T) { + defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() + gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) + + // Turn down subconn for addr2, shouldn't get picker update because + // sub-balancer1 was removed. + bg.UpdateSubConnState(addrToSC[testBackendAddrs[2]], balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + for i := 0; i < 10; i++ { + select { + case <-cc.NewPickerCh: + t.Fatalf("Got new picker, want no new picker (because the sub-balancer was removed)") + default: + } + time.Sleep(time.Millisecond) + } + + // Sleep, but sleep less then close timeout. + time.Sleep(time.Millisecond * 100) + + // Re-add sub-balancer-1, because subconns were in cache, no new subconns + // should be created. But a new picker will still be generated, with subconn + // states update to date. + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], rrBuilder) + + p3 := <-cc.NewPickerCh + want := []balancer.SubConn{ + addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], + addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], + // addr2 is down, b2 only has addr3 in READY state. + addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]], + } + if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + for i := 0; i < 10; i++ { + select { + case <-cc.NewSubConnAddrsCh: + t.Fatalf("Got new subconn, want no new subconn (because subconns were still in cache)") + default: + } + time.Sleep(time.Millisecond * 10) + } +} + +// Sub-balancers are put in cache when they are removed. If balancer group is +// closed within close timeout, all subconns should still be rmeoved +// immediately. +func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) { + defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() + _, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) + + bg.Close() + // The balancer group is closed. The subconns should be removed immediately. + removeTimeout := time.After(time.Millisecond * 500) + scToRemove := map[balancer.SubConn]int{ + addrToSC[testBackendAddrs[0]]: 1, + addrToSC[testBackendAddrs[1]]: 1, + addrToSC[testBackendAddrs[2]]: 1, + addrToSC[testBackendAddrs[3]]: 1, + } + for i := 0; i < len(scToRemove); i++ { + select { + case sc := <-cc.RemoveSubConnCh: + c := scToRemove[sc] + if c == 0 { + t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) + } + scToRemove[sc] = c - 1 + case <-removeTimeout: + t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") + } + } +} + +// Sub-balancers in cache will be closed if not re-added within timeout, and +// subConns will be removed. +func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) { + defer replaceDefaultSubBalancerCloseTimeout(time.Second)() + _, _, cc, addrToSC := initBalancerGroupForCachingTest(t) + + // The sub-balancer is not re-added within timeout. The subconns should be + // removed. + removeTimeout := time.After(DefaultSubBalancerCloseTimeout) + scToRemove := map[balancer.SubConn]int{ + addrToSC[testBackendAddrs[2]]: 1, + addrToSC[testBackendAddrs[3]]: 1, + } + for i := 0; i < len(scToRemove); i++ { + select { + case sc := <-cc.RemoveSubConnCh: + c := scToRemove[sc] + if c == 0 { + t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) + } + scToRemove[sc] = c - 1 + case <-removeTimeout: + t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") + } + } +} + +// Wrap the rr builder, so it behaves the same, but has a different pointer. +type noopBalancerBuilderWrapper struct { + balancer.Builder +} + +// After removing a sub-balancer, re-add with same ID, but different balancer +// builder. Old subconns should be removed, and new subconns should be created. +func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) { + defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() + gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) + + // Re-add sub-balancer-1, but with a different balancer builder. The + // sub-balancer was still in cache, but cann't be reused. This should cause + // old sub-balancer's subconns to be removed immediately, and new subconns + // to be created. + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], &noopBalancerBuilderWrapper{rrBuilder}) + + // The cached sub-balancer should be closed, and the subconns should be + // removed immediately. + removeTimeout := time.After(time.Millisecond * 500) + scToRemove := map[balancer.SubConn]int{ + addrToSC[testBackendAddrs[2]]: 1, + addrToSC[testBackendAddrs[3]]: 1, + } + for i := 0; i < len(scToRemove); i++ { + select { + case sc := <-cc.RemoveSubConnCh: + c := scToRemove[sc] + if c == 0 { + t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) + } + scToRemove[sc] = c - 1 + case <-removeTimeout: + t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") + } + } + + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[4:6]}}) + + newSCTimeout := time.After(time.Millisecond * 500) + scToAdd := map[resolver.Address]int{ + testBackendAddrs[4]: 1, + testBackendAddrs[5]: 1, + } + for i := 0; i < len(scToAdd); i++ { + select { + case addr := <-cc.NewSubConnAddrsCh: + c := scToAdd[addr[0]] + if c == 0 { + t.Fatalf("Got newSubConn for %v when there's %d new expected", addr, c) + } + scToAdd[addr[0]] = c - 1 + sc := <-cc.NewSubConnCh + addrToSC[addr[0]] = sc + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + case <-newSCTimeout: + t.Fatalf("timeout waiting for subConns (from new sub-balancer) to be newed") + } + } + + // Test roundrobin on the new picker. + p3 := <-cc.NewPickerCh + want := []balancer.SubConn{ + addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], + addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], + addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]], + } + if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// After removing a sub-balancer, it will be kept in cache. Make sure that this +// sub-balancer's Close is called when the balancer group is closed. +func (s) TestBalancerGroup_CloseStopsBalancerInCache(t *testing.T) { + const balancerName = "stub-TestBalancerGroup_check_close" + closed := make(chan struct{}) + stub.Register(balancerName, stub.BalancerFuncs{Close: func(_ *stub.BalancerData) { + close(closed) + }}) + builder := balancer.Get(balancerName) + + defer replaceDefaultSubBalancerCloseTimeout(time.Second)() + gator, bg, _, _ := initBalancerGroupForCachingTest(t) + + // Add balancer, and remove + gator.Add(testBalancerIDs[2], 1) + bg.Add(testBalancerIDs[2], builder) + gator.Remove(testBalancerIDs[2]) + bg.Remove(testBalancerIDs[2]) + + // Immediately close balancergroup, before the cache timeout. + bg.Close() + + // Make sure the removed child balancer is closed eventually. + select { + case <-closed: + case <-time.After(time.Second * 2): + t.Fatalf("timeout waiting for the child balancer in cache to be closed") + } +} + +// TestBalancerGroupBuildOptions verifies that the balancer.BuildOptions passed +// to the balancergroup at creation time is passed to child policies. +func (s) TestBalancerGroupBuildOptions(t *testing.T) { + const ( + balancerName = "stubBalancer-TestBalancerGroupBuildOptions" + parent = int64(1234) + userAgent = "ua" + defaultTestTimeout = 1 * time.Second + ) + + // Setup the stub balancer such that we can read the build options passed to + // it in the UpdateClientConnState method. + bOpts := balancer.BuildOptions{ + DialCreds: insecure.NewCredentials(), + ChannelzParentID: parent, + CustomUserAgent: userAgent, + } + stub.Register(balancerName, stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + if !cmp.Equal(bd.BuildOptions, bOpts) { + return fmt.Errorf("buildOptions in child balancer: %v, want %v", bd, bOpts) + } + return nil + }, + }) + cc := testutils.NewTestClientConn(t) + bg := New(cc, bOpts, nil, nil) + bg.Start() + + // Add the stub balancer build above as a child policy. + balancerBuilder := balancer.Get(balancerName) + bg.Add(testBalancerIDs[0], balancerBuilder) + + // Send an empty clientConn state change. This should trigger the + // verification of the buildOptions being passed to the child policy. + if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{}); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/balancer/balancergroup/balancerstateaggregator.go b/internal/balancergroup/balancerstateaggregator.go similarity index 100% rename from xds/internal/balancer/balancergroup/balancerstateaggregator.go rename to internal/balancergroup/balancerstateaggregator.go diff --git a/xds/internal/testutils/balancer.go b/internal/testutils/balancer.go similarity index 96% rename from xds/internal/testutils/balancer.go rename to internal/testutils/balancer.go index ff74da71cc95..ff43fb7340c5 100644 --- a/xds/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -16,7 +16,6 @@ * */ -// Package testutils provides utility types, for use in xds tests. package testutils import ( @@ -244,18 +243,6 @@ func IsRoundRobin(want []balancer.SubConn, f func() balancer.SubConn) error { return nil } -// testClosure is a test util for TestIsRoundRobin. -type testClosure struct { - r []balancer.SubConn - i int -} - -func (tc *testClosure) next() balancer.SubConn { - ret := tc.r[tc.i] - tc.i = (tc.i + 1) % len(tc.r) - return ret -} - // ErrTestConstPicker is error returned by test const picker. var ErrTestConstPicker = fmt.Errorf("const picker error") diff --git a/xds/internal/testutils/wrr.go b/internal/testutils/wrr.go similarity index 100% rename from xds/internal/testutils/wrr.go rename to internal/testutils/wrr.go diff --git a/xds/internal/balancer/balancer.go b/xds/internal/balancer/balancer.go index 86656736a61b..8d81aced2dd5 100644 --- a/xds/internal/balancer/balancer.go +++ b/xds/internal/balancer/balancer.go @@ -20,10 +20,10 @@ package balancer import ( + _ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer - _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer ) diff --git a/xds/internal/balancer/balancergroup/balancergroup_test.go b/xds/internal/balancer/balancergroup/balancergroup_test.go deleted file mode 100644 index 60c74a16b6ba..000000000000 --- a/xds/internal/balancer/balancergroup/balancergroup_test.go +++ /dev/null @@ -1,925 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// All tests in this file are combination of balancer group and -// weighted_balancerstate_aggregator, aka weighted_target tests. The difference -// is weighted_target tests cannot add sub-balancers to balancer group directly, -// they instead uses balancer config to control sub-balancers. Even though not -// very suited, the tests still cover all the functionality. -// -// TODO: the tests should be moved to weighted_target, and balancer group's -// tests should use a mock balancerstate_aggregator. - -package balancergroup - -import ( - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/balancer/stub" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" - "google.golang.org/grpc/xds/internal/testutils" -) - -var ( - rrBuilder = balancer.Get(roundrobin.Name) - pfBuilder = balancer.Get(grpc.PickFirstBalancerName) - testBalancerIDs = []string{"b1", "b2", "b3"} - testBackendAddrs []resolver.Address -) - -const testBackendAddrsCount = 12 - -func init() { - for i := 0; i < testBackendAddrsCount; i++ { - testBackendAddrs = append(testBackendAddrs, resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)}) - } - - // Disable caching for all tests. It will be re-enabled in caching specific - // tests. - DefaultSubBalancerCloseTimeout = time.Millisecond -} - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} - -func newTestBalancerGroup(t *testing.T) (*testutils.TestClientConn, *weightedaggregator.Aggregator, *BalancerGroup) { - cc := testutils.NewTestClientConn(t) - gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) - gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil) - bg.Start() - return cc, gator, bg -} - -// 1 balancer, 1 backend -> 2 backends -> 1 backend. -func (s) TestBalancerGroup_OneRR_AddRemoveBackend(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t) - - // Add one balancer to group. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - // Send one resolved address. - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - - // Send subconn state change. - sc1 := <-cc.NewSubConnCh - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with one backend. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - } - - // Send two addresses. - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - // Expect one new subconn, send state update. - sc2 := <-cc.NewSubConnCh - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin pick. - p2 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Remove the first address. - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:2]}}) - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - bg.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - // Test pick with only the second subconn. - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSC, _ := p3.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSC.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSC, sc2) - } - } -} - -// 2 balancers, each with 1 backend. -func (s) TestBalancerGroup_TwoRR_OneBackend(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t) - - // Add two balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc1 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc2 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// 2 balancers, each with more than 1 backends. -func (s) TestBalancerGroup_TwoRR_MoreBackends(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t) - - // Add two balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - sc1 := <-cc.NewSubConnCh - sc2 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - sc3 := <-cc.NewSubConnCh - sc4 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Turn sc2's connection down, should be RR between balancers. - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - p2 := <-cc.NewPickerCh - // Expect two sc1's in the result, because balancer1 will be picked twice, - // but there's only one sc in it. - want = []balancer.SubConn{sc1, sc1, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Remove sc3's addresses. - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[3:4]}}) - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc3, scToRemove) - } - bg.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - p3 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Turn sc1's connection down. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - p4 := <-cc.NewPickerCh - want = []balancer.SubConn{sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p4)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Turn last connection to connecting. - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - p5 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p5.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } - } - - // Turn all connections down. - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - p6 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p6.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) - } - } -} - -// 2 balancers with different weights. -func (s) TestBalancerGroup_TwoRR_DifferentWeight_MoreBackends(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - sc1 := <-cc.NewSubConnCh - sc2 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - sc3 := <-cc.NewSubConnCh - sc4 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// totally 3 balancers, add/remove balancer. -func (s) TestBalancerGroup_ThreeRR_RemoveBalancer(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t) - - // Add three balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc1 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:2]}}) - sc2 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[2], 1) - bg.Add(testBalancerIDs[2], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:2]}}) - sc3 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Remove the second balancer, while the others two are ready. - gator.Remove(testBalancerIDs[1]) - bg.Remove(testBalancerIDs[1]) - gator.BuildAndUpdate() - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove) - } - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // move balancer 3 into transient failure. - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - // Remove the first balancer, while the third is transient failure. - gator.Remove(testBalancerIDs[0]) - bg.Remove(testBalancerIDs[0]) - gator.BuildAndUpdate() - scToRemove = <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p3.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) - } - } -} - -// 2 balancers, change balancer weight. -func (s) TestBalancerGroup_TwoRR_ChangeWeight_MoreBackends(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - sc1 := <-cc.NewSubConnCh - sc2 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - sc3 := <-cc.NewSubConnCh - sc4 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - gator.UpdateWeight(testBalancerIDs[0], 3) - gator.BuildAndUpdate() - - // Test roundrobin with new weight. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// Create a new balancer group, add balancer and backends, but not start. -// - b1, weight 2, backends [0,1] -// - b2, weight 1, backends [2,3] -// Start the balancer group and check behavior. -// -// Close the balancer group, call add/remove/change weight/change address. -// - b2, weight 3, backends [0,3] -// - b3, weight 1, backends [1,2] -// Start the balancer group again and check for behavior. -func (s) TestBalancerGroup_start_close(t *testing.T) { - cc := testutils.NewTestClientConn(t) - gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) - gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - - bg.Start() - - m1 := make(map[resolver.Address]balancer.SubConn) - for i := 0; i < 4; i++ { - addrs := <-cc.NewSubConnAddrsCh - sc := <-cc.NewSubConnCh - m1[addrs[0]] = sc - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - } - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], - m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], - m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - gator.Stop() - bg.Close() - for i := 0; i < 4; i++ { - bg.UpdateSubConnState(<-cc.RemoveSubConnCh, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - } - - // Add b3, weight 1, backends [1,2]. - gator.Add(testBalancerIDs[2], 1) - bg.Add(testBalancerIDs[2], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:3]}}) - - // Remove b1. - gator.Remove(testBalancerIDs[0]) - bg.Remove(testBalancerIDs[0]) - - // Update b2 to weight 3, backends [0,3]. - gator.UpdateWeight(testBalancerIDs[1], 3) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: append([]resolver.Address(nil), testBackendAddrs[0], testBackendAddrs[3])}}) - - gator.Start() - bg.Start() - - m2 := make(map[resolver.Address]balancer.SubConn) - for i := 0; i < 4; i++ { - addrs := <-cc.NewSubConnAddrsCh - sc := <-cc.NewSubConnCh - m2[addrs[0]] = sc - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - } - - // Test roundrobin on the last picker. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{ - m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], - m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], - m2[testBackendAddrs[1]], m2[testBackendAddrs[2]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// Test that balancer group start() doesn't deadlock if the balancer calls back -// into balancer group inline when it gets an update. -// -// The potential deadlock can happen if we -// - hold a lock and send updates to balancer (e.g. update resolved addresses) -// - the balancer calls back (NewSubConn or update picker) in line -// The callback will try to hold hte same lock again, which will cause a -// deadlock. -// -// This test starts the balancer group with a test balancer, will updates picker -// whenever it gets an address update. It's expected that start() doesn't block -// because of deadlock. -func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { - const balancerName = "stub-TestBalancerGroup_start_close_deadlock" - stub.Register(balancerName, stub.BalancerFuncs{}) - builder := balancer.Get(balancerName) - - cc := testutils.NewTestClientConn(t) - gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) - gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil) - - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], builder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], builder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - - bg.Start() -} - -// Test that at init time, with two sub-balancers, if one sub-balancer reports -// transient_failure, the picks won't fail with transient_failure, and should -// instead wait for the other sub-balancer. -func (s) TestBalancerGroup_InitOneSubBalancerTransientFailure(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t) - - // Add two balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc1 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - <-cc.NewSubConnCh - - // Set one subconn to TransientFailure, this will trigger one sub-balancer - // to report transient failure. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - r, err := p1.Pick(balancer.PickInfo{}) - if err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrNoSubConnAvailable, r, err) - } - } -} - -// Test that with two sub-balancers, both in transient_failure, if one turns -// connecting, the overall state stays in transient_failure, and all picks -// return transient failure error. -func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t) - - // Add two balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], pfBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc1 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], pfBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc2 := <-cc.NewSubConnCh - - // Set both subconn to TransientFailure, this will put both sub-balancers in - // transient failure. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - r, err := p1.Pick(balancer.PickInfo{}) - if err != balancer.ErrTransientFailure { - t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err) - } - } - - // Set one subconn to Connecting, it shouldn't change the overall state. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - r, err := p2.Pick(balancer.PickInfo{}) - if err != balancer.ErrTransientFailure { - t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err) - } - } -} - -func replaceDefaultSubBalancerCloseTimeout(n time.Duration) func() { - old := DefaultSubBalancerCloseTimeout - DefaultSubBalancerCloseTimeout = n - return func() { DefaultSubBalancerCloseTimeout = old } -} - -// initBalancerGroupForCachingTest creates a balancer group, and initialize it -// to be ready for caching tests. -// -// Two rr balancers are added to bg, each with 2 ready subConns. A sub-balancer -// is removed later, so the balancer group returned has one sub-balancer in its -// own map, and one sub-balancer in cache. -func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregator, *BalancerGroup, *testutils.TestClientConn, map[resolver.Address]balancer.SubConn) { - cc := testutils.NewTestClientConn(t) - gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) - gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - - bg.Start() - - m1 := make(map[resolver.Address]balancer.SubConn) - for i := 0; i < 4; i++ { - addrs := <-cc.NewSubConnAddrsCh - sc := <-cc.NewSubConnCh - m1[addrs[0]] = sc - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - } - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], - m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], - m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - gator.Remove(testBalancerIDs[1]) - bg.Remove(testBalancerIDs[1]) - gator.BuildAndUpdate() - // Don't wait for SubConns to be removed after close, because they are only - // removed after close timeout. - for i := 0; i < 10; i++ { - select { - case <-cc.RemoveSubConnCh: - t.Fatalf("Got request to remove subconn, want no remove subconn (because subconns were still in cache)") - default: - } - time.Sleep(time.Millisecond) - } - // Test roundrobin on the with only sub-balancer0. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - return gator, bg, cc, m1 -} - -// Test that if a sub-balancer is removed, and re-added within close timeout, -// the subConns won't be re-created. -func (s) TestBalancerGroup_locality_caching(t *testing.T) { - defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() - gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) - - // Turn down subconn for addr2, shouldn't get picker update because - // sub-balancer1 was removed. - bg.UpdateSubConnState(addrToSC[testBackendAddrs[2]], balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - for i := 0; i < 10; i++ { - select { - case <-cc.NewPickerCh: - t.Fatalf("Got new picker, want no new picker (because the sub-balancer was removed)") - default: - } - time.Sleep(time.Millisecond) - } - - // Sleep, but sleep less then close timeout. - time.Sleep(time.Millisecond * 100) - - // Re-add sub-balancer-1, because subconns were in cache, no new subconns - // should be created. But a new picker will still be generated, with subconn - // states update to date. - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - - p3 := <-cc.NewPickerCh - want := []balancer.SubConn{ - addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], - addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], - // addr2 is down, b2 only has addr3 in READY state. - addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - for i := 0; i < 10; i++ { - select { - case <-cc.NewSubConnAddrsCh: - t.Fatalf("Got new subconn, want no new subconn (because subconns were still in cache)") - default: - } - time.Sleep(time.Millisecond * 10) - } -} - -// Sub-balancers are put in cache when they are removed. If balancer group is -// closed within close timeout, all subconns should still be rmeoved -// immediately. -func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) { - defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() - _, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) - - bg.Close() - // The balancer group is closed. The subconns should be removed immediately. - removeTimeout := time.After(time.Millisecond * 500) - scToRemove := map[balancer.SubConn]int{ - addrToSC[testBackendAddrs[0]]: 1, - addrToSC[testBackendAddrs[1]]: 1, - addrToSC[testBackendAddrs[2]]: 1, - addrToSC[testBackendAddrs[3]]: 1, - } - for i := 0; i < len(scToRemove); i++ { - select { - case sc := <-cc.RemoveSubConnCh: - c := scToRemove[sc] - if c == 0 { - t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) - } - scToRemove[sc] = c - 1 - case <-removeTimeout: - t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") - } - } -} - -// Sub-balancers in cache will be closed if not re-added within timeout, and -// subConns will be removed. -func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) { - defer replaceDefaultSubBalancerCloseTimeout(time.Second)() - _, _, cc, addrToSC := initBalancerGroupForCachingTest(t) - - // The sub-balancer is not re-added within timeout. The subconns should be - // removed. - removeTimeout := time.After(DefaultSubBalancerCloseTimeout) - scToRemove := map[balancer.SubConn]int{ - addrToSC[testBackendAddrs[2]]: 1, - addrToSC[testBackendAddrs[3]]: 1, - } - for i := 0; i < len(scToRemove); i++ { - select { - case sc := <-cc.RemoveSubConnCh: - c := scToRemove[sc] - if c == 0 { - t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) - } - scToRemove[sc] = c - 1 - case <-removeTimeout: - t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") - } - } -} - -// Wrap the rr builder, so it behaves the same, but has a different pointer. -type noopBalancerBuilderWrapper struct { - balancer.Builder -} - -// After removing a sub-balancer, re-add with same ID, but different balancer -// builder. Old subconns should be removed, and new subconns should be created. -func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) { - defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() - gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) - - // Re-add sub-balancer-1, but with a different balancer builder. The - // sub-balancer was still in cache, but cann't be reused. This should cause - // old sub-balancer's subconns to be removed immediately, and new subconns - // to be created. - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], &noopBalancerBuilderWrapper{rrBuilder}) - - // The cached sub-balancer should be closed, and the subconns should be - // removed immediately. - removeTimeout := time.After(time.Millisecond * 500) - scToRemove := map[balancer.SubConn]int{ - addrToSC[testBackendAddrs[2]]: 1, - addrToSC[testBackendAddrs[3]]: 1, - } - for i := 0; i < len(scToRemove); i++ { - select { - case sc := <-cc.RemoveSubConnCh: - c := scToRemove[sc] - if c == 0 { - t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) - } - scToRemove[sc] = c - 1 - case <-removeTimeout: - t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") - } - } - - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[4:6]}}) - - newSCTimeout := time.After(time.Millisecond * 500) - scToAdd := map[resolver.Address]int{ - testBackendAddrs[4]: 1, - testBackendAddrs[5]: 1, - } - for i := 0; i < len(scToAdd); i++ { - select { - case addr := <-cc.NewSubConnAddrsCh: - c := scToAdd[addr[0]] - if c == 0 { - t.Fatalf("Got newSubConn for %v when there's %d new expected", addr, c) - } - scToAdd[addr[0]] = c - 1 - sc := <-cc.NewSubConnCh - addrToSC[addr[0]] = sc - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - case <-newSCTimeout: - t.Fatalf("timeout waiting for subConns (from new sub-balancer) to be newed") - } - } - - // Test roundrobin on the new picker. - p3 := <-cc.NewPickerCh - want := []balancer.SubConn{ - addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], - addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], - addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// After removing a sub-balancer, it will be kept in cache. Make sure that this -// sub-balancer's Close is called when the balancer group is closed. -func (s) TestBalancerGroup_CloseStopsBalancerInCache(t *testing.T) { - const balancerName = "stub-TestBalancerGroup_check_close" - closed := make(chan struct{}) - stub.Register(balancerName, stub.BalancerFuncs{Close: func(_ *stub.BalancerData) { - close(closed) - }}) - builder := balancer.Get(balancerName) - - defer replaceDefaultSubBalancerCloseTimeout(time.Second)() - gator, bg, _, _ := initBalancerGroupForCachingTest(t) - - // Add balancer, and remove - gator.Add(testBalancerIDs[2], 1) - bg.Add(testBalancerIDs[2], builder) - gator.Remove(testBalancerIDs[2]) - bg.Remove(testBalancerIDs[2]) - - // Immediately close balancergroup, before the cache timeout. - bg.Close() - - // Make sure the removed child balancer is closed eventually. - select { - case <-closed: - case <-time.After(time.Second * 2): - t.Fatalf("timeout waiting for the child balancer in cache to be closed") - } -} - -// TestBalancerGroupBuildOptions verifies that the balancer.BuildOptions passed -// to the balancergroup at creation time is passed to child policies. -func (s) TestBalancerGroupBuildOptions(t *testing.T) { - const ( - balancerName = "stubBalancer-TestBalancerGroupBuildOptions" - parent = int64(1234) - userAgent = "ua" - defaultTestTimeout = 1 * time.Second - ) - - // Setup the stub balancer such that we can read the build options passed to - // it in the UpdateClientConnState method. - bOpts := balancer.BuildOptions{ - DialCreds: insecure.NewCredentials(), - ChannelzParentID: parent, - CustomUserAgent: userAgent, - } - stub.Register(balancerName, stub.BalancerFuncs{ - UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { - if !cmp.Equal(bd.BuildOptions, bOpts) { - return fmt.Errorf("buildOptions in child balancer: %v, want %v", bd, bOpts) - } - return nil - }, - }) - cc := testutils.NewTestClientConn(t) - bg := New(cc, bOpts, nil, nil) - bg.Start() - - // Add the stub balancer build above as a child policy. - balancerBuilder := balancer.Get(balancerName) - bg.Add(testBalancerIDs[0], balancerBuilder) - - // Send an empty clientConn state change. This should trigger the - // verification of the buildOptions being passed to the child policy. - if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{}); err != nil { - t.Fatal(err) - } -} diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 778d711f2190..cd93dd0ecd84 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -34,7 +34,6 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -129,7 +128,7 @@ func (p *fakeProvider) Close() { // setupWithXDSCreds performs all the setup steps required for tests which use // xDSCredentials. -func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *xdstestutils.TestClientConn, func()) { +func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *testutils.TestClientConn, func()) { t.Helper() xdsC := fakeclient.NewClient() builder := balancer.Get(cdsName) @@ -145,7 +144,7 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS } // Create a new CDS balancer and pass it a fake balancer.ClientConn which we // can use to inspect the different calls made by the balancer. - tcc := xdstestutils.NewTestClientConn(t) + tcc := testutils.NewTestClientConn(t) cdsB := builder.Build(tcc, balancer.BuildOptions{DialCreds: creds}) // Override the creation of the EDS balancer to return a fake EDS balancer @@ -184,7 +183,7 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS // passed to the EDS balancer, and verifies that the CDS balancer forwards the // call appropriately to its parent balancer.ClientConn with or without // attributes bases on the value of wantFallback. -func makeNewSubConn(ctx context.Context, edsCC balancer.ClientConn, parentCC *xdstestutils.TestClientConn, wantFallback bool) (balancer.SubConn, error) { +func makeNewSubConn(ctx context.Context, edsCC balancer.ClientConn, parentCC *testutils.TestClientConn, wantFallback bool) (balancer.SubConn, error) { dummyAddr := "foo-address" addrs := []resolver.Address{{Addr: dummyAddr}} sc, err := edsCC.NewSubConn(addrs, balancer.NewSubConnOptions{}) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 267fa3b10cd2..242f7fa64992 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -36,7 +36,6 @@ import ( "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" "google.golang.org/grpc/xds/internal/balancer/ringhash" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -225,14 +224,14 @@ func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy *inter // setup creates a cdsBalancer and an edsBalancer (and overrides the // newChildBalancer function to return it), and also returns a cleanup function. -func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *xdstestutils.TestClientConn, func()) { +func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *testutils.TestClientConn, func()) { t.Helper() xdsC := fakeclient.NewClient() builder := balancer.Get(cdsName) if builder == nil { t.Fatalf("balancer.Get(%q) returned nil", cdsName) } - tcc := xdstestutils.NewTestClientConn(t) + tcc := testutils.NewTestClientConn(t) cdsB := builder.Build(tcc, balancer.BuildOptions{}) edsB := newTestEDSBalancer() @@ -250,7 +249,7 @@ func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *x // setupWithWatch does everything that setup does, and also pushes a ClientConn // update to the cdsBalancer and waits for a CDS watch call to be registered. -func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *xdstestutils.TestClientConn, func()) { +func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *testutils.TestClientConn, func()) { t.Helper() xdsC, cdsB, edsB, tcc, cancel := setup(t) @@ -692,7 +691,7 @@ func (s) TestClose(t *testing.T) { // Make sure that the UpdateSubConnState() method on the CDS balancer does // not forward the update to the EDS balancer. - cdsB.UpdateSubConnState(&xdstestutils.TestSubConn{}, balancer.SubConnState{}) + cdsB.UpdateSubConnState(&testutils.TestSubConn{}, balancer.SubConnState{}) sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if err := edsB.waitForSubConnUpdate(sCtx, subConnWithState{}); err != context.DeadlineExceeded { diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 65ec17348f46..5abf37fcbf16 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -36,9 +36,9 @@ import ( "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" xdsinternal "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/load" diff --git a/xds/internal/balancer/clusterimpl/config_test.go b/xds/internal/balancer/clusterimpl/config_test.go index ccb0c5e74d90..88bed5c182c0 100644 --- a/xds/internal/balancer/clusterimpl/config_test.go +++ b/xds/internal/balancer/clusterimpl/config_test.go @@ -24,8 +24,8 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/roundrobin" + _ "google.golang.org/grpc/balancer/weightedtarget" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" ) const ( diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index 188b39d467af..8d71200d8c61 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -25,12 +25,12 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancergroup" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/hierarchy" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" ) const balancerName = "xds_cluster_manager_experimental" diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index 191a5d56b692..503ed58fd09e 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -31,13 +31,12 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/hierarchy" - itestutils "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/testutils" ) type s struct { @@ -524,7 +523,7 @@ func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) { // Setup the stub balancer such that we can read the build options passed to // it in the UpdateClientConnState method. - ccsCh := itestutils.NewChannel() + ccsCh := testutils.NewChannel() bOpts := balancer.BuildOptions{ DialCreds: insecure.NewCredentials(), ChannelzParentID: parent, diff --git a/xds/internal/balancer/clustermanager/config_test.go b/xds/internal/balancer/clustermanager/config_test.go index 3328ba1d300f..23c25755ee30 100644 --- a/xds/internal/balancer/clustermanager/config_test.go +++ b/xds/internal/balancer/clustermanager/config_test.go @@ -23,9 +23,9 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/weightedtarget" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" - _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" ) const ( diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index 741744ee3fc1..1c0c8d0d7018 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" @@ -32,7 +33,6 @@ import ( "google.golang.org/grpc/xds/internal/balancer/clusterimpl" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index c2b68b946f06..b56f20fa41bf 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" @@ -37,7 +38,6 @@ import ( "google.golang.org/grpc/xds/internal/balancer/clusterimpl" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index feb96cfa56b5..7f2bfa8a75d1 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -26,14 +26,15 @@ import ( corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancergroup" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" "google.golang.org/grpc/xds/internal/balancer/priority" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget" - "google.golang.org/grpc/xds/internal/testutils" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -101,7 +102,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { defer cleanup() // One locality with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) @@ -115,7 +116,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { } // The same locality, add one more backend. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) @@ -129,7 +130,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { } // The same locality, delete first backend. - clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) @@ -145,7 +146,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { } // The same locality, replace backend. - clab4 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab4 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab4.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab4.Build()), nil) @@ -164,7 +165,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { } // The same locality, different drop rate, dropping 50%. - clab5 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], map[string]uint32{"test-drop": 50}) + clab5 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], map[string]uint32{"test-drop": 50}) clab5.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab5.Build()), nil) @@ -186,7 +187,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { } // The same locality, remove drops. - clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab6 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab6.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab6.Build()), nil) @@ -207,7 +208,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { defer cleanup() // Two localities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) sc1 := <-cc.NewSubConnCh @@ -229,7 +230,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { } // Add another locality, with one backend. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) @@ -245,7 +246,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { } // Remove first locality. - clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab3.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) @@ -262,7 +263,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { } // Add a backend to the last locality. - clab4 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab4 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab4.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab4.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab4.Build()), nil) @@ -281,7 +282,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { } // Change weight of the locality[1]. - clab5 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab5 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab5.AddLocality(testSubZones[1], 2, 0, testEndpointAddrs[1:2], nil) clab5.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab5.Build()), nil) @@ -296,7 +297,7 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { } // Change weight of the locality[1] to 0, it should never be picked. - clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab6 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab6.AddLocality(testSubZones[1], 0, 0, testEndpointAddrs[1:2], nil) clab6.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab6.Build()), nil) @@ -328,8 +329,8 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { defer cleanup() // Two localities, each 3 backend, one Healthy, one Unhealthy, one Unknown. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:6], &testutils.AddLocalityOptions{ + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:6], &xdstestutils.AddLocalityOptions{ Health: []corepb.HealthStatus{ corepb.HealthStatus_HEALTHY, corepb.HealthStatus_UNHEALTHY, @@ -339,7 +340,7 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { corepb.HealthStatus_DEGRADED, }, }) - clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[6:12], &testutils.AddLocalityOptions{ + clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[6:12], &xdstestutils.AddLocalityOptions{ Health: []corepb.HealthStatus{ corepb.HealthStatus_HEALTHY, corepb.HealthStatus_UNHEALTHY, @@ -413,7 +414,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { } // One locality with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) @@ -472,7 +473,7 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { } // One locality with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) sc1 := <-cc.NewSubConnCh diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index 8438a373d9d9..9f1accd1f2d3 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -26,9 +26,10 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/balancer/priority" - "google.golang.org/grpc/xds/internal/testutils" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" ) // When a high priority is ready, adding/removing lower locality doesn't cause @@ -40,7 +41,7 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { defer cleanup() // Two localities, with priorities [0, 1], each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) @@ -61,7 +62,7 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { } // Add p2, it shouldn't cause any updates. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) @@ -78,7 +79,7 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { } // Remove p2, no updates. - clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) @@ -103,7 +104,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { defer cleanup() // Two localities, with priorities [0, 1], each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) @@ -139,7 +140,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { } // Add p2, it shouldn't cause any updates. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) @@ -171,7 +172,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { } // Remove 2, use 1. - clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) @@ -196,7 +197,7 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with different priorities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) @@ -222,7 +223,7 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { } // Add p2, it should create a new SubConn. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) @@ -249,7 +250,7 @@ func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with priorities [0,1,2], each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab1.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) @@ -340,7 +341,7 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) { edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with different priorities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) @@ -383,7 +384,7 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with different priorities, each with one backend. - clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab0 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab0.Build()), nil) @@ -430,7 +431,7 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { } // Add two localities, with two priorities, with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab1.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) @@ -480,7 +481,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with different priorities, each with one backend. - clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab0 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab0.Build()), nil) @@ -498,7 +499,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { } // Remove all priorities. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) // p0 subconn should be removed. scToRemove := <-cc.RemoveSubConnCh @@ -515,7 +516,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { } // Re-add two localities, with previous priorities, but different backends. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[3:4], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) @@ -544,7 +545,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { } // Remove p1 from EDS, to fallback to p0. - clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) @@ -587,7 +588,7 @@ func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) { edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with priorities [0, 1], each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) @@ -607,7 +608,7 @@ func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) { } // Remove addresses from priority 0, should use p1. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, nil, nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) @@ -638,7 +639,7 @@ func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) { edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // Two localities, with priorities [0, 1], each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) @@ -658,8 +659,8 @@ func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) { } // Set priority 0 endpoints to all unhealthy, should use p1. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], &testutils.AddLocalityOptions{ + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], &xdstestutils.AddLocalityOptions{ Health: []corepb.HealthStatus{corepb.HealthStatus_UNHEALTHY}, }) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) @@ -699,11 +700,11 @@ func (s) TestEDSPriority_FirstPriorityRemoved(t *testing.T) { _, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() // One localities, with priorities [0], each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) // Remove the only localities. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -751,7 +752,7 @@ func (s) TestFallbackToDNS(t *testing.T) { } // One locality with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) diff --git a/xds/internal/balancer/clusterresolver/testutil_test.go b/xds/internal/balancer/clusterresolver/testutil_test.go index 999621a7b3e4..9c51db49f921 100644 --- a/xds/internal/balancer/clusterresolver/testutil_test.go +++ b/xds/internal/balancer/clusterresolver/testutil_test.go @@ -28,8 +28,8 @@ import ( endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" typepb "github.com/envoyproxy/go-control-plane/envoy/type" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index 39053dbc1bfe..98fd0672af42 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -30,6 +30,7 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" @@ -37,7 +38,6 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" ) // Name is the name of the priority balancer. diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 29b712c0f55d..e8963898727c 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -29,12 +29,12 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/testutils" ) type s struct { diff --git a/xds/internal/balancer/priority/ignore_resolve_now_test.go b/xds/internal/balancer/priority/ignore_resolve_now_test.go index b7cecd6c1ff2..29b719d9e129 100644 --- a/xds/internal/balancer/priority/ignore_resolve_now_test.go +++ b/xds/internal/balancer/priority/ignore_resolve_now_test.go @@ -25,14 +25,13 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" - grpctestutils "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/testutils" ) const resolveNowBalancerName = "test-resolve-now-balancer" -var resolveNowBalancerCCCh = grpctestutils.NewChannel() +var resolveNowBalancerCCCh = testutils.NewChannel() type resolveNowBalancerBuilder struct { balancer.Builder diff --git a/xds/internal/balancer/ringhash/picker_test.go b/xds/internal/balancer/ringhash/picker_test.go index c88698ebbdfe..2619ea7002c6 100644 --- a/xds/internal/balancer/ringhash/picker_test.go +++ b/xds/internal/balancer/ringhash/picker_test.go @@ -26,7 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/internal/testutils" ) func newTestRing(cStats []connectivity.State) *ring { diff --git a/xds/internal/balancer/ringhash/ringhash_test.go b/xds/internal/balancer/ringhash/ringhash_test.go index fb85367e4a41..015424cdafed 100644 --- a/xds/internal/balancer/ringhash/ringhash_test.go +++ b/xds/internal/balancer/ringhash/ringhash_test.go @@ -30,8 +30,8 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/testutils" ) var ( diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_test.go deleted file mode 100644 index b0e4df895885..000000000000 --- a/xds/internal/balancer/weightedtarget/weightedtarget_test.go +++ /dev/null @@ -1,326 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package weightedtarget - -import ( - "encoding/json" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/attributes" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/balancer/stub" - "google.golang.org/grpc/internal/hierarchy" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/testutils" -) - -type testConfigBalancerBuilder struct { - balancer.Builder -} - -func newTestConfigBalancerBuilder() *testConfigBalancerBuilder { - return &testConfigBalancerBuilder{ - Builder: balancer.Get(roundrobin.Name), - } -} - -func (t *testConfigBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - rr := t.Builder.Build(cc, opts) - return &testConfigBalancer{ - Balancer: rr, - } -} - -const testConfigBalancerName = "test_config_balancer" - -func (t *testConfigBalancerBuilder) Name() string { - return testConfigBalancerName -} - -type stringBalancerConfig struct { - serviceconfig.LoadBalancingConfig - s string -} - -func (t *testConfigBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - // Return string without quotes. - return stringBalancerConfig{s: string(c[1 : len(c)-1])}, nil -} - -// testConfigBalancer is a roundrobin balancer, but it takes the balancer config -// string and append it to the backend addresses. -type testConfigBalancer struct { - balancer.Balancer -} - -func (b *testConfigBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - c, ok := s.BalancerConfig.(stringBalancerConfig) - if !ok { - return fmt.Errorf("unexpected balancer config with type %T", s.BalancerConfig) - } - oneMoreAddr := resolver.Address{Addr: c.s} - s.BalancerConfig = nil - s.ResolverState.Addresses = append(s.ResolverState.Addresses, oneMoreAddr) - return b.Balancer.UpdateClientConnState(s) -} - -func (b *testConfigBalancer) Close() { - b.Balancer.Close() -} - -var ( - wtbBuilder balancer.Builder - wtbParser balancer.ConfigParser - testBackendAddrStrs []string -) - -const testBackendAddrsCount = 12 - -func init() { - balancer.Register(newTestConfigBalancerBuilder()) - for i := 0; i < testBackendAddrsCount; i++ { - testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) - } - wtbBuilder = balancer.Get(Name) - wtbParser = wtbBuilder.(balancer.ConfigParser) - - balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond -} - -// TestWeightedTarget covers the cases that a sub-balancer is added and a -// sub-balancer is removed. It verifies that the addresses and balancer configs -// are forwarded to the right sub-balancer. -// -// This test is intended to test the glue code in weighted_target. Most of the -// functionality tests are covered by the balancer group tests. -func TestWeightedTarget(t *testing.T) { - cc := testutils.NewTestClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) - - // Start with "cluster_1: round_robin". - config1, err := wtbParser.ParseConfig([]byte(`{"targets":{"cluster_1":{"weight":1,"childPolicy":[{"round_robin":""}]}}}`)) - if err != nil { - t.Fatalf("failed to parse balancer config: %v", err) - } - - // Send the config, and an address with hierarchy path ["cluster_1"]. - wantAddr1 := resolver.Address{Addr: testBackendAddrStrs[0], Attributes: nil} - if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddr1, []string{"cluster_1"}), - }}, - BalancerConfig: config1, - }); err != nil { - t.Fatalf("failed to update ClientConn state: %v", err) - } - - // Verify that a subconn is created with the address, and the hierarchy path - // in the address is cleared. - addr1 := <-cc.NewSubConnAddrsCh - if want := []resolver.Address{ - hierarchy.Set(wantAddr1, []string{}), - }; !cmp.Equal(addr1, want, cmp.AllowUnexported(attributes.Attributes{})) { - t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr1, want, cmp.AllowUnexported(attributes.Attributes{}))) - } - - // Send subconn state change. - sc1 := <-cc.NewSubConnCh - wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with one backend. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - } - - // Remove cluster_1, and add "cluster_2: test_config_balancer". - wantAddr3Str := testBackendAddrStrs[2] - config2, err := wtbParser.ParseConfig([]byte( - fmt.Sprintf(`{"targets":{"cluster_2":{"weight":1,"childPolicy":[{%q:%q}]}}}`, testConfigBalancerName, wantAddr3Str), - )) - if err != nil { - t.Fatalf("failed to parse balancer config: %v", err) - } - - // Send the config, and one address with hierarchy path "cluster_2". - wantAddr2 := resolver.Address{Addr: testBackendAddrStrs[1], Attributes: nil} - if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddr2, []string{"cluster_2"}), - }}, - BalancerConfig: config2, - }); err != nil { - t.Fatalf("failed to update ClientConn state: %v", err) - } - - // Expect the address sent in the address list. The hierarchy path should be - // cleared. - addr2 := <-cc.NewSubConnAddrsCh - if want := []resolver.Address{ - hierarchy.Set(wantAddr2, []string{}), - }; !cmp.Equal(addr2, want, cmp.AllowUnexported(attributes.Attributes{})) { - t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr2, want, cmp.AllowUnexported(attributes.Attributes{}))) - } - // Expect the other address sent as balancer config. This address doesn't - // have hierarchy path. - wantAddr3 := resolver.Address{Addr: wantAddr3Str, Attributes: nil} - addr3 := <-cc.NewSubConnAddrsCh - if want := []resolver.Address{wantAddr3}; !cmp.Equal(addr3, want, cmp.AllowUnexported(attributes.Attributes{})) { - t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr3, want, cmp.AllowUnexported(attributes.Attributes{}))) - } - - // The subconn for cluster_1 should be removed. - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - wtb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - sc2 := <-cc.NewSubConnCh - wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - sc3 := <-cc.NewSubConnCh - wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin pick with backends in cluster_2. - p2 := <-cc.NewPickerCh - want := []balancer.SubConn{sc2, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Replace child policy of "cluster_1" to "round_robin". - config3, err := wtbParser.ParseConfig([]byte(`{"targets":{"cluster_2":{"weight":1,"childPolicy":[{"round_robin":""}]}}}`)) - if err != nil { - t.Fatalf("failed to parse balancer config: %v", err) - } - - // Send the config, and an address with hierarchy path ["cluster_1"]. - wantAddr4 := resolver.Address{Addr: testBackendAddrStrs[0], Attributes: nil} - if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddr4, []string{"cluster_2"}), - }}, - BalancerConfig: config3, - }); err != nil { - t.Fatalf("failed to update ClientConn state: %v", err) - } - - // Verify that a subconn is created with the address, and the hierarchy path - // in the address is cleared. - addr4 := <-cc.NewSubConnAddrsCh - if want := []resolver.Address{ - hierarchy.Set(wantAddr4, []string{}), - }; !cmp.Equal(addr4, want, cmp.AllowUnexported(attributes.Attributes{})) { - t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr4, want, cmp.AllowUnexported(attributes.Attributes{}))) - } - - // Send subconn state change. - sc4 := <-cc.NewSubConnCh - wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with one backend. - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p3.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc4, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc4) - } - } -} - -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} - -const initIdleBalancerName = "test-init-Idle-balancer" - -var errTestInitIdle = fmt.Errorf("init Idle balancer error 0") - -func init() { - stub.Register(initIdleBalancerName, stub.BalancerFuncs{ - UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { - bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{}) - return nil - }, - UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { - err := fmt.Errorf("wrong picker error") - if state.ConnectivityState == connectivity.Idle { - err = errTestInitIdle - } - bd.ClientConn.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &testutils.TestConstPicker{Err: err}, - }) - }, - }) -} - -// TestInitialIdle covers the case that if the child reports Idle, the overall -// state will be Idle. -func TestInitialIdle(t *testing.T) { - cc := testutils.NewTestClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) - - // Start with "cluster_1: round_robin". - config1, err := wtbParser.ParseConfig([]byte(`{"targets":{"cluster_1":{"weight":1,"childPolicy":[{"test-init-Idle-balancer":""}]}}}`)) - if err != nil { - t.Fatalf("failed to parse balancer config: %v", err) - } - - // Send the config, and an address with hierarchy path ["cluster_1"]. - wantAddrs := []resolver.Address{ - {Addr: testBackendAddrStrs[0], Attributes: nil}, - } - if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), - }}, - BalancerConfig: config1, - }); err != nil { - t.Fatalf("failed to update ClientConn state: %v", err) - } - - // Verify that a subconn is created with the address, and the hierarchy path - // in the address is cleared. - for range wantAddrs { - sc := <-cc.NewSubConnCh - wtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Idle}) - } - - if state1 := <-cc.NewStateCh; state1 != connectivity.Idle { - t.Fatalf("Received aggregated state: %v, want Idle", state1) - } -} diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index a078b82c5eb6..5ab40b712ba1 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -48,7 +48,6 @@ import ( "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -819,7 +818,7 @@ func (s) TestXDSResolverWRR(t *testing.T) { waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) - newWRR = xdstestutils.NewTestWRR + newWRR = testutils.NewTestWRR // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. @@ -876,7 +875,7 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { waitForWatchRouteConfig(ctx, t, xdsC, routeStr) defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) - newWRR = xdstestutils.NewTestWRR + newWRR = testutils.NewTestWRR // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. @@ -1384,7 +1383,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { } defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) - newWRR = xdstestutils.NewTestWRR + newWRR = testutils.NewTestWRR // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. diff --git a/xds/internal/testutils/balancer_test.go b/xds/internal/testutils/balancer_test.go index 4891eb9cdadf..b5f7f665396c 100644 --- a/xds/internal/testutils/balancer_test.go +++ b/xds/internal/testutils/balancer_test.go @@ -22,13 +22,14 @@ import ( "testing" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/testutils" ) func TestIsRoundRobin(t *testing.T) { var ( - sc1 = TestSubConns[0] - sc2 = TestSubConns[1] - sc3 = TestSubConns[2] + sc1 = testutils.TestSubConns[0] + sc2 = testutils.TestSubConns[1] + sc3 = testutils.TestSubConns[2] ) testCases := []struct { @@ -125,10 +126,22 @@ func TestIsRoundRobin(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - err := IsRoundRobin(tC.want, (&testClosure{r: tC.got}).next) + err := testutils.IsRoundRobin(tC.want, (&testClosure{r: tC.got}).next) if err == nil != tC.pass { t.Errorf("want pass %v, want %v, got err %v", tC.pass, tC.want, err) } }) } } + +// testClosure is a test util for TestIsRoundRobin. +type testClosure struct { + r []balancer.SubConn + i int +} + +func (tc *testClosure) next() balancer.SubConn { + ret := tc.r[tc.i] + tc.i = (tc.i + 1) % len(tc.r) + return ret +} diff --git a/xds/internal/testutils/testutils.go b/xds/internal/testutils/testutils.go new file mode 100644 index 000000000000..a4c56f6438a0 --- /dev/null +++ b/xds/internal/testutils/testutils.go @@ -0,0 +1,19 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package testutils provides utility types, for use in xds tests. +package testutils From c25a52b76917819833e06c1181536c449a506631 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 9 Nov 2021 13:06:38 -0800 Subject: [PATCH 330/998] config: remove retry disable via environment variable (#4922) --- dialoptions.go | 2 -- internal/envconfig/envconfig.go | 6 ---- internal/xds/env/env.go | 4 --- test/retry_test.go | 11 -------- .../test/xds_client_integration_test.go | 6 ---- .../xdsclient/xdsresource/unmarshal_rds.go | 2 +- .../xdsresource/unmarshal_rds_test.go | 28 ++++++------------- 7 files changed, 10 insertions(+), 49 deletions(-) diff --git a/dialoptions.go b/dialoptions.go index 280df92e74ba..22d626f183f2 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -576,7 +575,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - disableRetry: !envconfig.Retry, healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index 9f25a67fc6bd..6f0272543110 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -22,20 +22,14 @@ package envconfig import ( "os" "strings" - - xdsenv "google.golang.org/grpc/internal/xds/env" ) const ( prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" ) var ( - // Retry is enabled unless explicitly disabled via "GRPC_GO_RETRY=off" or - // if XDS retry support is explicitly disabled. - Retry = !strings.EqualFold(os.Getenv(retryStr), "off") && xdsenv.RetrySupport // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go index 2c85e7804ba0..75bdc6efb19f 100644 --- a/internal/xds/env/env.go +++ b/internal/xds/env/env.go @@ -42,7 +42,6 @@ const ( ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" @@ -81,9 +80,6 @@ var ( // "true". AggregateAndDNSSupportEnv = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") - // RetrySupport indicates whether xDS retry is enabled. - RetrySupport = !strings.EqualFold(os.Getenv(retrySupportEnv), "false") - // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". diff --git a/test/retry_test.go b/test/retry_test.go index 7f068d79f44d..1bd866add606 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -33,7 +33,6 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" @@ -41,14 +40,7 @@ import ( testpb "google.golang.org/grpc/test/grpc_testing" ) -func enableRetry() func() { - old := envconfig.Retry - envconfig.Retry = true - return func() { envconfig.Retry = old } -} - func (s) TestRetryUnary(t *testing.T) { - defer enableRetry()() i := -1 ss := &stubserver.StubServer{ EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { @@ -116,7 +108,6 @@ func (s) TestRetryUnary(t *testing.T) { } func (s) TestRetryThrottling(t *testing.T) { - defer enableRetry()() i := -1 ss := &stubserver.StubServer{ EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { @@ -192,7 +183,6 @@ func (s) TestRetryThrottling(t *testing.T) { } func (s) TestRetryStreaming(t *testing.T) { - defer enableRetry()() req := func(b byte) *testpb.StreamingOutputCallRequest { return &testpb.StreamingOutputCallRequest{Payload: &testpb.Payload{Body: []byte{b}}} } @@ -510,7 +500,6 @@ func (*retryStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) con func (*retryStatsHandler) HandleConn(context.Context, stats.ConnStats) {} func (s) TestRetryStats(t *testing.T) { - defer enableRetry()() lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen. Err: %v", err) diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index e26e3e08f4c9..6a9a8c9688f0 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -32,7 +32,6 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/testutils/e2e" @@ -105,11 +104,6 @@ func (s) TestClientSideXDS(t *testing.T) { } func (s) TestClientSideRetry(t *testing.T) { - if !env.RetrySupport { - // Skip this test if retry is not enabled. - return - } - ctr := 0 errs := []codes.Code{codes.ResourceExhausted} ss := &stubserver.StubServer{ diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 0642500f303b..bcaf1315b9d1 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -109,7 +109,7 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l } func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { - if !env.RetrySupport || rp == nil { + if rp == nil { return nil, nil } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index 38a7e99a9ede..955e08d4b377 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -99,10 +99,6 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { } } goodUpdateWithRetryPolicy = func(vhrc *RetryConfig, rrc *RetryConfig) RouteConfigUpdate { - if !env.RetrySupport { - vhrc = nil - rrc = nil - } return RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ Domains: []string{ldsTarget}, @@ -116,13 +112,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { }}, } } - defaultRetryBackoff = RetryBackoff{BaseInterval: 25 * time.Millisecond, MaxInterval: 250 * time.Millisecond} - goodUpdateIfRetryDisabled = func() RouteConfigUpdate { - if env.RetrySupport { - return RouteConfigUpdate{} - } - return goodUpdateWithRetryPolicy(nil, nil) - } + defaultRetryBackoff = RetryBackoff{BaseInterval: 25 * time.Millisecond, MaxInterval: 250 * time.Millisecond} ) tests := []struct { @@ -554,26 +544,26 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { { name: "bad-retry-policy-0-retries", rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "cancelled", NumRetries: &wrapperspb.UInt32Value{Value: 0}}, nil), - wantUpdate: goodUpdateIfRetryDisabled(), - wantError: env.RetrySupport, + wantUpdate: RouteConfigUpdate{}, + wantError: true, }, { name: "bad-retry-policy-0-base-interval", rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "cancelled", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{BaseInterval: durationpb.New(0)}}, nil), - wantUpdate: goodUpdateIfRetryDisabled(), - wantError: env.RetrySupport, + wantUpdate: RouteConfigUpdate{}, + wantError: true, }, { name: "bad-retry-policy-negative-max-interval", rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "cancelled", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{MaxInterval: durationpb.New(-time.Second)}}, nil), - wantUpdate: goodUpdateIfRetryDisabled(), - wantError: env.RetrySupport, + wantUpdate: RouteConfigUpdate{}, + wantError: true, }, { name: "bad-retry-policy-negative-max-interval-no-known-retry-on", rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "something", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{MaxInterval: durationpb.New(-time.Second)}}, nil), - wantUpdate: goodUpdateIfRetryDisabled(), - wantError: env.RetrySupport, + wantUpdate: RouteConfigUpdate{}, + wantError: true, }, } for _, test := range tests { From dd767416a6b9d4c7c49c17f94e13b94857cc08c0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 9 Nov 2021 15:42:07 -0800 Subject: [PATCH 331/998] grpc: implement WithInsecure() using the insecure package (#4718) --- clientconn.go | 37 ++++++----- clientconn_test.go | 43 ++++++++++--- credentials/credentials.go | 10 +++ credentials/insecure/insecure.go | 3 + dialoptions.go | 14 ++-- test/creds_test.go | 3 +- test/insecure_creds_test.go | 106 ++++++++++++++++--------------- 7 files changed, 132 insertions(+), 84 deletions(-) diff --git a/clientconn.go b/clientconn.go index 5a9e7d754fe2..972ff1a65baa 100644 --- a/clientconn.go +++ b/clientconn.go @@ -83,13 +83,13 @@ var ( // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure - // connection. + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") ) const ( @@ -177,17 +177,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.csMgr.channelzID = cc.channelzID } - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { - return nil, errCredentialsConflict - } + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return nil, errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { for _, cd := range cc.dopts.copts.PerRPCCredentials { if cd.RequireTransportSecurity() { return nil, errTransportCredentialsMissing diff --git a/clientconn_test.go b/clientconn_test.go index d276c7b5f2ff..ef6a68081ff7 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -490,29 +490,52 @@ func (s) TestDialContextFailFast(t *testing.T) { } // securePerRPCCredentials always requires transport security. -type securePerRPCCredentials struct{} - -func (c securePerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return nil, nil +type securePerRPCCredentials struct { + credentials.PerRPCCredentials } func (c securePerRPCCredentials) RequireTransportSecurity() bool { return true } +type fakeBundleCreds struct { + credentials.Bundle + transportCreds credentials.TransportCredentials +} + +func (b *fakeBundleCreds) TransportCredentials() credentials.TransportCredentials { + return b.transportCreds +} + func (s) TestCredentialsMisuse(t *testing.T) { - tlsCreds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com") + // Use of no transport creds and no creds bundle must fail. + if _, err := Dial("passthrough:///Non-Existent.Server:80"); err != errNoTransportSecurity { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errNoTransportSecurity) + } + + // Use of both transport creds and creds bundle must fail. + creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com") if err != nil { t.Fatalf("Failed to create authenticator %v", err) } - // Two conflicting credential configurations - if _, err := Dial("passthrough:///Non-Existent.Server:80", WithTransportCredentials(tlsCreds), WithBlock(), WithInsecure()); err != errCredentialsConflict { - t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errCredentialsConflict) + dopts := []DialOption{ + WithTransportCredentials(creds), + WithCredentialsBundle(&fakeBundleCreds{transportCreds: creds}), + } + if _, err := Dial("passthrough:///Non-Existent.Server:80", dopts...); err != errTransportCredsAndBundle { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredsAndBundle) } - // security info on insecure connection - if _, err := Dial("passthrough:///Non-Existent.Server:80", WithPerRPCCredentials(securePerRPCCredentials{}), WithBlock(), WithInsecure()); err != errTransportCredentialsMissing { + + // Use of perRPC creds requiring transport security over an insecure + // transport must fail. + if _, err := Dial("passthrough:///Non-Existent.Server:80", WithPerRPCCredentials(securePerRPCCredentials{}), WithInsecure()); err != errTransportCredentialsMissing { t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredentialsMissing) } + + // Use of a creds bundle with nil transport credentials must fail. + if _, err := Dial("passthrough:///Non-Existent.Server:80", WithCredentialsBundle(&fakeBundleCreds{})); err != errNoTransportCredsInBundle { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredsAndBundle) + } } func (s) TestWithBackoffConfigDefault(t *testing.T) { diff --git a/credentials/credentials.go b/credentials/credentials.go index a671107584f5..96ff1877e754 100644 --- a/credentials/credentials.go +++ b/credentials/credentials.go @@ -178,8 +178,18 @@ type TransportCredentials interface { // // This API is experimental. type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. // diff --git a/credentials/insecure/insecure.go b/credentials/insecure/insecure.go index c4fa27c920da..22a8f996a68b 100644 --- a/credentials/insecure/insecure.go +++ b/credentials/insecure/insecure.go @@ -33,6 +33,9 @@ import ( ) // NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. func NewCredentials() credentials.TransportCredentials { return insecureTC{} } diff --git a/dialoptions.go b/dialoptions.go index 22d626f183f2..063f1e903c01 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/transport" @@ -49,7 +50,6 @@ type dialOptions struct { bs internalbackoff.Strategy block bool returnLastError bool - insecure bool timeout time.Duration scChan <-chan ServiceConfig authority string @@ -298,11 +298,17 @@ func WithReturnConnectionError() DialOption { } // WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use insecure.NewCredentials() instead. +// Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.insecure = true + o.copts.TransportCredentials = insecure.NewCredentials() }) } diff --git a/test/creds_test.go b/test/creds_test.go index 6b3fc2a46076..d886220d8a46 100644 --- a/test/creds_test.go +++ b/test/creds_test.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" @@ -52,7 +53,7 @@ type testCredsBundle struct { func (c *testCredsBundle) TransportCredentials() credentials.TransportCredentials { if c.mode == bundlePerRPCOnly { - return nil + return insecure.NewCredentials() } creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com") diff --git a/test/insecure_creds_test.go b/test/insecure_creds_test.go index 791cf650887e..9c925e4757c7 100644 --- a/test/insecure_creds_test.go +++ b/test/insecure_creds_test.go @@ -144,64 +144,66 @@ func (s) TestInsecureCreds(t *testing.T) { } } -func (s) TestInsecureCredsWithPerRPCCredentials(t *testing.T) { - tests := []struct { - desc string - perRPCCredsViaDialOptions bool - perRPCCredsViaCallOptions bool - }{ - { - desc: "send PerRPCCredentials via DialOptions", - perRPCCredsViaDialOptions: true, - perRPCCredsViaCallOptions: false, - }, - { - desc: "send PerRPCCredentials via CallOptions", - perRPCCredsViaDialOptions: false, - perRPCCredsViaCallOptions: true, +func (s) TestInsecureCreds_WithPerRPCCredentials_AsCallOption(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil }, } - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil - }, - } - s := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) - defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + s := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) + defer s.Stop() + testpb.RegisterTestServiceServer(s, ss) - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("net.Listen(tcp, localhost:0) failed: %v", err) - } - go s.Serve(lis) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("net.Listen(tcp, localhost:0) failed: %v", err) + } + go s.Serve(lis) - addr := lis.Addr().String() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() + addr := lis.Addr().String() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() - dopts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} - if test.perRPCCredsViaDialOptions { - dopts = append(dopts, grpc.WithPerRPCCredentials(testLegacyPerRPCCredentials{})) - } - copts := []grpc.CallOption{} - if test.perRPCCredsViaCallOptions { - copts = append(copts, grpc.PerRPCCredentials(testLegacyPerRPCCredentials{})) - } - cc, err := grpc.Dial(addr, dopts...) - if err != nil { - t.Fatalf("grpc.Dial(%q) failed: %v", addr, err) - } - defer cc.Close() + dopts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} + copts := []grpc.CallOption{grpc.PerRPCCredentials(testLegacyPerRPCCredentials{})} + cc, err := grpc.Dial(addr, dopts...) + if err != nil { + t.Fatalf("grpc.Dial(%q) failed: %v", addr, err) + } + defer cc.Close() - const wantErr = "transport: cannot send secure credentials on an insecure connection" - c := testpb.NewTestServiceClient(cc) - if _, err = c.EmptyCall(ctx, &testpb.Empty{}, copts...); err == nil || !strings.Contains(err.Error(), wantErr) { - t.Fatalf("InsecureCredsWithPerRPCCredentials/send_PerRPCCredentials_via_CallOptions = %v; want %s", err, wantErr) - } - }) + const wantErr = "transport: cannot send secure credentials on an insecure connection" + c := testpb.NewTestServiceClient(cc) + if _, err = c.EmptyCall(ctx, &testpb.Empty{}, copts...); err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("insecure credentials with per-RPC credentials requiring transport security returned error: %v; want %s", err, wantErr) + } +} + +func (s) TestInsecureCreds_WithPerRPCCredentials_AsDialOption(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(_ context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + s := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) + defer s.Stop() + testpb.RegisterTestServiceServer(s, ss) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("net.Listen(tcp, localhost:0) failed: %v", err) + } + go s.Serve(lis) + + addr := lis.Addr().String() + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithPerRPCCredentials(testLegacyPerRPCCredentials{}), + } + const wantErr = "the credentials require transport level security" + if _, err := grpc.Dial(addr, dopts...); err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("grpc.Dial(%q) returned err %v, want: %v", addr, err, wantErr) } } From d57363ab5dbb341b471091f7d312c0e0e0c0dfd3 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 9 Nov 2021 19:03:39 -0500 Subject: [PATCH 332/998] xds: Add Cluster Specifier Plugin to xdsclient (#4967) * xds: Add Cluster Specifier Plugin to xdsclient --- .../clusterspecifier/cluster_specifier.go | 67 +++++++++ .../xdsclient/xdsresource/type_rds.go | 4 + .../xdsclient/xdsresource/unmarshal_rds.go | 93 ++++++++++--- .../xdsresource/unmarshal_rds_test.go | 127 +++++++++++++++++- 4 files changed, 273 insertions(+), 18 deletions(-) create mode 100644 xds/internal/clusterspecifier/cluster_specifier.go diff --git a/xds/internal/clusterspecifier/cluster_specifier.go b/xds/internal/clusterspecifier/cluster_specifier.go new file mode 100644 index 000000000000..54776f20cf0b --- /dev/null +++ b/xds/internal/clusterspecifier/cluster_specifier.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterspecifier contains the ClusterSpecifier interface and a registry for +// storing and retrieving their implementations. +package clusterspecifier + +import ( + "github.com/golang/protobuf/proto" +) + +// BalancerConfig is the Go Native JSON representation of a balancer +// configuration. +type BalancerConfig []map[string]interface{} + +// ClusterSpecifier defines the parsing functionality of a Cluster Specifier. +type ClusterSpecifier interface { + // TypeURLs are the proto message types supported by this + // ClusterSpecifierPlugin. A ClusterSpecifierPlugin will be registered by + // each of its supported message types. + TypeURLs() []string + // ParseClusterSpecifierConfig parses the provided configuration + // proto.Message from the top level RDS configuration. The resulting + // BalancerConfig will be used as configuration for a child LB Policy of the + // Cluster Manager LB Policy. + ParseClusterSpecifierConfig(proto.Message) (BalancerConfig, error) +} + +var ( + // m is a map from scheme to filter. + m = make(map[string]ClusterSpecifier) +) + +// Register registers the ClusterSpecifierPlugin to the ClusterSpecifier map. +// cs.TypeURLs() will be used as the types for this ClusterSpecifierPlugin. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple cluster specifier +// plugins are registered with the same type URL, the one registered last will +// take effect. +func Register(cs ClusterSpecifier) { + for _, u := range cs.TypeURLs() { + m[u] = cs + } +} + +// Get returns the ClusterSpecifier registered with typeURL. +// +// If no cluster specifier is registered with typeURL, nil will be returned. +func Get(typeURL string) ClusterSpecifier { + return m[typeURL] +} diff --git a/xds/internal/xdsclient/xdsresource/type_rds.go b/xds/internal/xdsclient/xdsresource/type_rds.go index 3c4d971cd245..47de59e434cc 100644 --- a/xds/internal/xdsclient/xdsresource/type_rds.go +++ b/xds/internal/xdsclient/xdsresource/type_rds.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/protobuf/types/known/anypb" ) @@ -31,6 +32,9 @@ import ( // of interest to the registered RDS watcher. type RouteConfigUpdate struct { VirtualHosts []*VirtualHost + // ClusterSpecifierPlugins are the LB Configurations for any + // ClusterSpecifierPlugins referenced by the Route Table. + ClusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig // Raw is the resource from the xds response. Raw *anypb.Any } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index bcaf1315b9d1..9388f9a8d2e3 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/version" "google.golang.org/protobuf/types/known/anypb" ) @@ -82,11 +83,22 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s // we are looking for. func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) + csps, err := processClusterSpecifierPlugins(rc.ClusterSpecifierPlugins) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid %v", err) + } + // cspNames represents all the cluster specifiers referenced by Route + // Actions - any cluster specifiers not referenced by a Route Action can be + // ignored and not emitted by the xdsclient. + var cspNames = make(map[string]bool) for _, vh := range rc.GetVirtualHosts() { - routes, err := routesProtoToSlice(vh.Routes, logger, v2) + routes, cspNs, err := routesProtoToSlice(vh.Routes, csps, logger, v2) if err != nil { return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) } + for n := range cspNs { + cspNames[n] = true + } rc, err := generateRetryConfig(vh.GetRetryPolicy()) if err != nil { return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) @@ -105,7 +117,44 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l } vhs = append(vhs, vhOut) } - return RouteConfigUpdate{VirtualHosts: vhs}, nil + + // "For any entry in the RouteConfiguration.cluster_specifier_plugins not + // referenced by an enclosed RouteAction's cluster_specifier_plugin, the xDS + // client should not provide it to its consumers." - RLS in xDS Design + for name := range csps { + if !cspNames[name] { + delete(csps, name) + } + } + + return RouteConfigUpdate{VirtualHosts: vhs, ClusterSpecifierPlugins: csps}, nil +} + +func processClusterSpecifierPlugins(csps []*v3routepb.ClusterSpecifierPlugin) (map[string]clusterspecifier.BalancerConfig, error) { + cspCfgs := make(map[string]clusterspecifier.BalancerConfig) + // "The xDS client will inspect all elements of the + // cluster_specifier_plugins field looking up a plugin based on the + // extension.typed_config of each." - RLS in xDS design + for _, csp := range csps { + cs := clusterspecifier.Get(csp.GetExtension().GetTypedConfig().GetTypeUrl()) + if cs == nil { + // "If no plugin is registered for it, the resource will be NACKed." + // - RLS in xDS design + return nil, fmt.Errorf("cluster specifier %q of type %q was not found", csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) + } + lbCfg, err := cs.ParseClusterSpecifierConfig(csp.GetExtension().GetTypedConfig()) + if err != nil { + // "If a plugin is found, the value of the typed_config field will + // be passed to it's conversion method, and if an error is + // encountered, the resource will be NACKED." - RLS in xDS design + return nil, fmt.Errorf("error: %q parsing config %q for cluster specifier %q of type %q", err, csp.GetExtension().GetTypedConfig(), csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) + } + // "If all cluster specifiers are valid, the xDS client will store the + // configurations in a map keyed by the name of the extension instance." - + // RLS in xDS Design + cspCfgs[csp.GetExtension().GetName()] = lbCfg + } + return cspCfgs, nil } func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { @@ -162,12 +211,13 @@ func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { return cfg, nil } -func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, error) { +func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, map[string]bool, error) { var routesRet []*Route + var cspNames = make(map[string]bool) for _, r := range routes { match := r.GetMatch() if match == nil { - return nil, fmt.Errorf("route %+v doesn't have a match", r) + return nil, nil, fmt.Errorf("route %+v doesn't have a match", r) } if len(match.GetQueryParameters()) != 0 { @@ -178,7 +228,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, pathSp := match.GetPathSpecifier() if pathSp == nil { - return nil, fmt.Errorf("route %+v doesn't have a path specifier", r) + return nil, nil, fmt.Errorf("route %+v doesn't have a path specifier", r) } var route Route @@ -191,11 +241,11 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, regex := pt.SafeRegex.GetRegex() re, err := regexp.Compile(regex) if err != nil { - return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + return nil, nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) } route.Regex = re default: - return nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) + return nil, nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) } if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil { @@ -211,7 +261,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, regex := ht.SafeRegexMatch.GetRegex() re, err := regexp.Compile(regex) if err != nil { - return nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + return nil, nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) } header.RegexMatch = re case *v3routepb.HeaderMatcher_RangeMatch: @@ -226,7 +276,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, case *v3routepb.HeaderMatcher_SuffixMatch: header.SuffixMatch = &ht.SuffixMatch default: - return nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) + return nil, nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) } header.Name = h.GetName() invert := h.GetInvertMatch() @@ -256,7 +306,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, if env.RingHashSupport { hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) if err != nil { - return nil, err + return nil, nil, err } route.HashPolicies = hp } @@ -276,7 +326,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, if !v2 { cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) if err != nil { - return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) } wc.HTTPFilterConfigOverride = cfgs } @@ -290,15 +340,24 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, wantTotalWeight = tw.GetValue() } if totalWeight != wantTotalWeight { - return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) + return nil, nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) } if totalWeight == 0 { - return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + return nil, nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) } case *v3routepb.RouteAction_ClusterHeader: continue + case *v3routepb.RouteAction_ClusterSpecifierPlugin: + if _, ok := csps[a.ClusterSpecifierPlugin]; !ok { + // "When processing RouteActions, if any action includes a + // cluster_specifier_plugin value that is not in + // RouteConfiguration.cluster_specifier_plugins, the + // resource will be NACKed." - RLS in xDS design + return nil, nil, fmt.Errorf("route %+v, action %+v, specifies a cluster specifier plugin %+v that is not in Route Configuration", r, a, a.ClusterSpecifierPlugin) + } + cspNames[a.ClusterSpecifierPlugin] = true default: - return nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) + return nil, nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) } msd := action.GetMaxStreamDuration() @@ -315,7 +374,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, var err error route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) if err != nil { - return nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) } route.RouteAction = RouteActionRoute @@ -330,13 +389,13 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, if !v2 { cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) if err != nil { - return nil, fmt.Errorf("route %+v: %v", r, err) + return nil, nil, fmt.Errorf("route %+v: %v", r, err) } route.HTTPFilterConfigOverride = cfgs } routesRet = append(routesRet, &route) } - return routesRet, nil + return routesRet, cspNames, nil } func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logger *grpclog.PrefixLogger) ([]*HashPolicy, error) { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index 955e08d4b377..54181f06877a 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -18,16 +18,19 @@ package xdsresource import ( + "errors" "fmt" "regexp" "testing" "time" + "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" "google.golang.org/protobuf/types/known/durationpb" @@ -67,6 +70,31 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { }}, } } + goodRouteConfigWithClusterSpecifierPlugins = func(csps []*v3routepb.ClusterSpecifierPlugin, cspReferences []string) *v3routepb.RouteConfiguration { + var rs []*v3routepb.Route + + for i, cspReference := range cspReferences { + rs = append(rs, &v3routepb.Route{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: fmt.Sprint(i + 1)}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{ClusterSpecifierPlugin: cspReference}, + }, + }, + }) + } + + rc := &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: rs, + }}, + ClusterSpecifierPlugins: csps, + } + + return rc + } goodUpdateWithFilterConfigs = func(cfgs map[string]httpfilter.FilterConfig) RouteConfigUpdate { return RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ @@ -80,6 +108,26 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { }}, } } + goodUpdateWithClusterSpecifierPluginA = RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*Route{{ + Prefix: newStringP("1"), + RouteAction: RouteActionRoute, + }}, + }}, + ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{ + "cspA": nil, + }, + } + clusterSpecifierPlugin = func(name string, config *anypb.Any) *v3routepb.ClusterSpecifierPlugin { + return &v3routepb.ClusterSpecifierPlugin{ + Extension: &v3corepb.TypedExtensionConfig{ + Name: name, + TypedConfig: config, + }, + } + } goodRouteConfigWithRetryPolicy = func(vhrp *v3routepb.RetryPolicy, rrp *v3routepb.RetryPolicy) *v3routepb.RouteConfiguration { return &v3routepb.RouteConfiguration{ Name: routeName, @@ -565,6 +613,42 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { wantUpdate: RouteConfigUpdate{}, wantError: true, }, + { + name: "cluster-specifier-declared-which-not-registered", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist), + }, []string{"cspA"}), + wantError: true, + }, + { + name: "error-in-cluster-specifier-plugin-conversion-method", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", errorClusterSpecifierConfig), + }, []string{"cspA"}), + wantError: true, + }, + { + name: "route-action-that-references-undeclared-cluster-specifier-plugin", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), + }, []string{"cspA", "cspB"}), + wantError: true, + }, + { + name: "emitted-cluster-specifier-plugins", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), + }, []string{"cspA"}), + wantUpdate: goodUpdateWithClusterSpecifierPluginA, + }, + { + name: "deleted-cluster-specifier-plugins-not-referenced", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), + clusterSpecifierPlugin("cspB", mockClusterSpecifierConfig), + }, []string{"cspA"}), + wantUpdate: goodUpdateWithClusterSpecifierPluginA, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -580,6 +664,47 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { } } +var configOfClusterSpecifierDoesntExist = &anypb.Any{ + TypeUrl: "does.not.exist", + Value: []byte{1, 2, 3}, +} + +var mockClusterSpecifierConfig = &anypb.Any{ + TypeUrl: "mock.cluster.specifier.plugin", + Value: []byte{1, 2, 3}, +} + +var errorClusterSpecifierConfig = &anypb.Any{ + TypeUrl: "error.cluster.specifier.plugin", + Value: []byte{1, 2, 3}, +} + +func init() { + clusterspecifier.Register(mockClusterSpecifierPlugin{}) + clusterspecifier.Register(errorClusterSpecifierPlugin{}) +} + +type mockClusterSpecifierPlugin struct { +} + +func (mockClusterSpecifierPlugin) TypeURLs() []string { + return []string{"mock.cluster.specifier.plugin"} +} + +func (mockClusterSpecifierPlugin) ParseClusterSpecifierConfig(proto.Message) (clusterspecifier.BalancerConfig, error) { + return nil, nil +} + +type errorClusterSpecifierPlugin struct{} + +func (errorClusterSpecifierPlugin) TypeURLs() []string { + return []string{"error.cluster.specifier.plugin"} +} + +func (errorClusterSpecifierPlugin) ParseClusterSpecifierConfig(proto.Message) (clusterspecifier.BalancerConfig, error) { + return nil, errors.New("error from cluster specifier conversion function") +} + func (s) TestUnmarshalRouteConfig(t *testing.T) { const ( ldsTarget = "lds.target.good:1111" @@ -1458,7 +1583,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { defer func() { env.RingHashSupport = oldRingHashSupport }() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := routesProtoToSlice(tt.routes, nil, false) + got, _, err := routesProtoToSlice(tt.routes, nil, nil, false) if (err != nil) != tt.wantErr { t.Fatalf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) } From 6603e730b5ace5965d4179873c19ed3742fb4459 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 9 Nov 2021 19:21:32 -0500 Subject: [PATCH 333/998] xds: Added validation and construction of RBAC matcher engine to ParseConfig (#4964) * xds: Added validation and construction of RBAC matcher engine to ParseConfig --- internal/xds/rbac/rbac_engine.go | 6 +- xds/internal/httpfilter/rbac/rbac.go | 98 +++++++++---------- .../xdsresource/unmarshal_lds_test.go | 72 ++++++++++++++ .../xdsresource/unmarshal_rds_test.go | 21 ++++ 4 files changed, 145 insertions(+), 52 deletions(-) diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index ecb8512ac51a..66c7bf10bd0f 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -108,13 +108,13 @@ type engine struct { // newEngine creates an RBAC Engine based on the contents of policy. Returns a // non-nil error if the policy is invalid. func newEngine(config *v3rbacpb.RBAC) (*engine, error) { - a := *config.Action.Enum() + a := config.GetAction() if a != v3rbacpb.RBAC_ALLOW && a != v3rbacpb.RBAC_DENY { return nil, fmt.Errorf("unsupported action %s", config.Action) } - policies := make(map[string]*policyMatcher, len(config.Policies)) - for name, policy := range config.Policies { + policies := make(map[string]*policyMatcher, len(config.GetPolicies())) + for name, policy := range config.GetPolicies() { matcher, err := newPolicyMatcher(policy) if err != nil { return nil, err diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go index e92e2e64421b..9211d7714bc3 100644 --- a/xds/internal/httpfilter/rbac/rbac.go +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -64,7 +64,7 @@ type builder struct { type config struct { httpfilter.FilterConfig - config *rpb.RBAC + chainEngine *rbac.ChainEngine } func (builder) TypeURLs() []string { @@ -90,23 +90,57 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { // "It is also a validation failure if Permission or Principal has a // header matcher for a grpc- prefixed header name or :scheme." - A41 for _, principal := range policy.Principals { - if principal.GetHeader() != nil { - name := principal.GetHeader().GetName() - if name == ":scheme" || strings.HasPrefix(name, "grpc-") { - return nil, fmt.Errorf("rbac: principal header matcher for %v is :scheme or starts with grpc", name) - } + name := principal.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: principal header matcher for %v is :scheme or starts with grpc", name) + } + } + for _, permission := range policy.Permissions { + name := permission.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: permission header matcher for %v is :scheme or starts with grpc", name) + } + } + } + + // "Envoy aliases :authority and Host in its header map implementation, so + // they should be treated equivalent for the RBAC matchers; there must be no + // behavior change depending on which of the two header names is used in the + // RBAC policy." - A41. Loop through config's principals and policies, change + // any header matcher with value "host" to :authority", as that is what + // grpc-go shifts both headers to in transport layer. + for _, policy := range rbacCfg.GetRules().GetPolicies() { + for _, principal := range policy.Principals { + if principal.GetHeader().GetName() == "host" { + principal.GetHeader().Name = ":authority" } } for _, permission := range policy.Permissions { - if permission.GetHeader() != nil { - name := permission.GetHeader().GetName() - if name == ":scheme" || strings.HasPrefix(name, "grpc-") { - return nil, fmt.Errorf("rbac: permission header matcher for %v is :scheme or starts with grpc", name) - } + if permission.GetHeader().GetName() == "host" { + permission.GetHeader().Name = ":authority" } } } - return config{config: rbacCfg}, nil + + // Two cases where this HTTP Filter is a no op: + // "If absent, no enforcing RBAC policy will be applied" - RBAC + // Documentation for Rules field. + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configurated." - A41 + if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG { + return config{}, nil + } + + ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}) + if err != nil { + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configurated." - A41 + if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG { + return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err) + } + } + + return config{chainEngine: ce}, nil } func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { @@ -166,49 +200,15 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http } } - icfg := c.config + // RBAC HTTP Filter is a no op from one of these two cases: // "If absent, no enforcing RBAC policy will be applied" - RBAC // Documentation for Rules field. - if icfg.Rules == nil { - return nil, nil - } - // "At this time, if the RBAC.action is Action.LOG then the policy will be // completely ignored, as if RBAC was not configurated." - A41 - if icfg.Rules.Action == v3rbacpb.RBAC_LOG { + if c.chainEngine == nil { return nil, nil } - - // "Envoy aliases :authority and Host in its header map implementation, so - // they should be treated equivalent for the RBAC matchers; there must be no - // behavior change depending on which of the two header names is used in the - // RBAC policy." - A41. Loop through config's principals and policies, change - // any header matcher with value "host" to :authority", as that is what - // grpc-go shifts both headers to in transport layer. - for _, policy := range icfg.Rules.GetPolicies() { - for _, principal := range policy.Principals { - if principal.GetHeader() != nil { - name := principal.GetHeader().GetName() - if name == "host" { - principal.GetHeader().Name = ":authority" - } - } - } - for _, permission := range policy.Permissions { - if permission.GetHeader() != nil { - name := permission.GetHeader().GetName() - if name == "host" { - permission.GetHeader().Name = ":authority" - } - } - } - } - - ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{icfg.Rules}) - if err != nil { - return nil, fmt.Errorf("error constructing matching engine: %v", err) - } - return &interceptor{chainEngine: ce}, nil + return &interceptor{chainEngine: c.chainEngine}, nil } type interceptor struct { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index 138a8928a684..503a634f1f9a 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/version" @@ -42,9 +43,12 @@ import ( v2httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" v2listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" anypb "github.com/golang/protobuf/ptypes/any" spb "github.com/golang/protobuf/ptypes/struct" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" @@ -990,6 +994,60 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }) } + v3LisWithBadRBACConfiguration := func(rbacCfg *v3rbacpb.RBAC) *anypb.Any { + return testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("rbac", rbacCfg), e2e.RouterHTTPFilter}, + }), + }, + }, + }, + }, + }, + }) + } + badRBACCfgRegex := &v3rbacpb.RBAC{ + Rules: &rpb.RBAC{ + Action: rpb.RBAC_ALLOW, + Policies: map[string]*rpb.Policy{ + "bad-regex-value": { + Permissions: []*rpb.Permission{ + {Rule: &rpb.Permission_Any{Any: true}}, + }, + Principals: []*rpb.Principal{ + {Identifier: &rpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "["}}}}}, + }, + }, + }, + }, + } + badRBACCfgDestIP := &v3rbacpb.RBAC{ + Rules: &rpb.RBAC{ + Action: rpb.RBAC_ALLOW, + Policies: map[string]*rpb.Policy{ + "certain-destination-ip": { + Permissions: []*rpb.Permission{ + {Rule: &rpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*rpb.Principal{ + {Identifier: &rpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + } tests := []struct { name string @@ -1445,6 +1503,20 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { wantMD: errMD, wantErr: "original_ip_detection_extensions must be empty", }, + { + name: "rbac-with-invalid-regex", + resources: []*anypb.Any{v3LisWithBadRBACConfiguration(badRBACCfgRegex)}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, + wantMD: errMD, + wantErr: "error parsing config for filter", + }, + { + name: "rbac-with-invalid-destination-ip-matcher", + resources: []*anypb.Any{v3LisWithBadRBACConfiguration(badRBACCfgDestIP)}, + wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, + wantMD: errMD, + wantErr: "error parsing config for filter", + }, { name: "unsupported validation context in transport socket", resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index 54181f06877a..b48940927ba4 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -38,7 +38,9 @@ import ( v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" v2routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" anypb "github.com/golang/protobuf/ptypes/any" @@ -571,6 +573,25 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("unknown.custom.filter")}), wantUpdate: goodUpdateWithFilterConfigs(nil), }, + { + name: "good-route-config-with-bad-rbac-http-filter-configuration", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"rbac": testutils.MarshalAny(&v3rbacpb.RBACPerRoute{Rbac: &v3rbacpb.RBAC{ + Rules: &rpb.RBAC{ + Action: rpb.RBAC_ALLOW, + Policies: map[string]*rpb.Policy{ + "certain-destination-ip": { + Permissions: []*rpb.Permission{ + {Rule: &rpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*rpb.Principal{ + {Identifier: &rpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }})}), + wantError: true, + }, { name: "good-route-config-with-retry-policy", rc: goodRouteConfigWithRetryPolicy( From 52d9416739a60150e6ecfeea9737e4afbeabd8d9 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 10 Nov 2021 15:03:10 -0800 Subject: [PATCH 334/998] xds/client: move transport_helper from xdsclient to a separate struct (#4968) --- xds/csds/csds.go | 4 +- xds/googledirectpath/googlec2p.go | 2 +- xds/googledirectpath/googlec2p_test.go | 2 +- .../clusterresolver/clusterresolver_test.go | 2 +- xds/internal/httpfilter/fault/fault_test.go | 6 +- xds/internal/resolver/xds_resolver.go | 15 +- xds/internal/testutils/fakeserver/server.go | 14 - xds/internal/xdsclient/bootstrap/bootstrap.go | 2 +- .../xdsclient/bootstrap/bootstrap_test.go | 2 +- xds/internal/xdsclient/client.go | 197 ++------------ xds/internal/xdsclient/client_test.go | 39 +-- xds/internal/xdsclient/controller.go | 38 +++ .../xdsclient/controller/controller.go | 168 ++++++++++++ .../xdsclient/controller/loadreport.go | 144 ++++++++++ .../transport.go} | 210 +++++---------- .../ack_test.go => controller/v2_ack_test.go} | 33 ++- .../cds_test.go => controller/v2_cds_test.go} | 39 +-- .../xdsclient/controller/v2_client_test.go | 212 +++++++++++++++ .../eds_test.go => controller/v2_eds_test.go} | 14 +- .../lds_test.go => controller/v2_lds_test.go} | 12 +- .../rds_test.go => controller/v2_rds_test.go} | 15 +- .../v2_testutils_test.go} | 246 ++---------------- .../{ => controller/version}/v2/client.go | 120 ++------- .../{ => controller/version}/v2/loadreport.go | 0 .../{ => controller/version}/v3/client.go | 118 ++------- .../{ => controller/version}/v3/loadreport.go | 0 .../xdsclient/controller/version/version.go | 123 +++++++++ xds/internal/xdsclient/dump_test.go | 9 +- xds/internal/xdsclient/loadreport.go | 110 +------- xds/internal/xdsclient/loadreport_test.go | 11 +- xds/internal/xdsclient/pubsub/interface.go | 39 +++ xds/internal/xdsclient/watchers.go | 16 +- .../xdsclient/watchers_cluster_test.go | 36 +-- .../xdsclient/watchers_endpoints_test.go | 28 +- .../xdsclient/watchers_listener_test.go | 32 +-- xds/internal/xdsclient/watchers_route_test.go | 24 +- xds/internal/xdsclient/xdsclient_test.go | 5 +- .../xdsclient/xdsresource/filter_chain.go | 2 +- .../xdsresource/filter_chain_test.go | 2 +- xds/internal/xdsclient/xdsresource/type.go | 2 +- .../xdsclient/xdsresource/unmarshal_cds.go | 2 +- .../xdsresource/unmarshal_cds_test.go | 2 +- .../xdsresource/unmarshal_eds_test.go | 2 +- .../xdsclient/xdsresource/unmarshal_lds.go | 2 +- .../xdsresource/unmarshal_lds_test.go | 2 +- .../xdsclient/xdsresource/unmarshal_rds.go | 2 +- .../xdsresource/unmarshal_rds_test.go | 2 +- .../xdsresource}/version/version.go | 0 xds/xds.go | 16 +- 49 files changed, 1051 insertions(+), 1072 deletions(-) create mode 100644 xds/internal/xdsclient/controller.go create mode 100644 xds/internal/xdsclient/controller/controller.go create mode 100644 xds/internal/xdsclient/controller/loadreport.go rename xds/internal/xdsclient/{transport_helper.go => controller/transport.go} (60%) rename xds/internal/xdsclient/{v2/ack_test.go => controller/v2_ack_test.go} (94%) rename xds/internal/xdsclient/{v2/cds_test.go => controller/v2_cds_test.go} (84%) create mode 100644 xds/internal/xdsclient/controller/v2_client_test.go rename xds/internal/xdsclient/{v2/eds_test.go => controller/v2_eds_test.go} (93%) rename xds/internal/xdsclient/{v2/lds_test.go => controller/v2_lds_test.go} (94%) rename xds/internal/xdsclient/{v2/rds_test.go => controller/v2_rds_test.go} (93%) rename xds/internal/xdsclient/{v2/client_test.go => controller/v2_testutils_test.go} (63%) rename xds/internal/xdsclient/{ => controller/version}/v2/client.go (51%) rename xds/internal/xdsclient/{ => controller/version}/v2/loadreport.go (100%) rename xds/internal/xdsclient/{ => controller/version}/v3/client.go (52%) rename xds/internal/xdsclient/{ => controller/version}/v3/loadreport.go (100%) create mode 100644 xds/internal/xdsclient/controller/version/version.go create mode 100644 xds/internal/xdsclient/pubsub/interface.go rename xds/internal/{ => xdsclient/xdsresource}/version/version.go (100%) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index a54afaf2cb35..f1e67f1ba63b 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -40,8 +40,8 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/timestamppb" - _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register v2 xds_client. - _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register v3 xds_client. + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register v2 xds_client. + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register v3 xds_client. ) var ( diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 4f753e49c71a..726ecec43187 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -39,9 +39,9 @@ import ( "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. - "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/structpb" ) diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index c8162317cc30..4b0ab5395503 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -29,9 +29,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/structpb" ) diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 808f3050e3e2..035188851a13 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -35,7 +35,7 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // V2 client registration. + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // V2 client registration. ) const ( diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 1bad1f92d65d..6ee5e654c7dc 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -52,9 +52,9 @@ import ( tpb "github.com/envoyproxy/go-control-plane/envoy/type/v3" testpb "google.golang.org/grpc/test/grpc_testing" - _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. - _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register the v3 xDS API client. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. ) const defaultTestTimeout = 10 * time.Second diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index 41e81a31865c..2192051ae2f6 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -73,9 +73,7 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op } defer func() { if retErr != nil { - if r.client != nil { - r.client.Close() - } + r.Close() } }() r.logger = prefixLogger(r) @@ -304,8 +302,15 @@ func (*xdsResolver) ResolveNow(o resolver.ResolveNowOptions) {} // Close closes the resolver, and also closes the underlying xdsClient. func (r *xdsResolver) Close() { - r.cancelWatch() - r.client.Close() + // Note that Close needs to check for nils even if some of them are always + // set in the constructor. This is because the constructor defers Close() in + // error cases, and the fields might not be set when the error happens. + if r.cancelWatch != nil { + r.cancelWatch() + } + if r.client != nil { + r.client.Close() + } r.closed.Fire() r.logger.Infof("Shutdown") } diff --git a/xds/internal/testutils/fakeserver/server.go b/xds/internal/testutils/fakeserver/server.go index d37c1c3ef0e2..94412171003e 100644 --- a/xds/internal/testutils/fakeserver/server.go +++ b/xds/internal/testutils/fakeserver/server.go @@ -20,7 +20,6 @@ package fakeserver import ( - "context" "fmt" "io" "net" @@ -29,7 +28,6 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/status" @@ -134,18 +132,6 @@ func StartServer() (*Server, func(), error) { return s, func() { server.Stop() }, nil } -// XDSClientConn returns a grpc.ClientConn connected to the fakeServer. -func (xdsS *Server) XDSClientConn() (*grpc.ClientConn, func(), error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultDialTimeout) - defer cancel() - - cc, err := grpc.DialContext(ctx, xdsS.Address, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) - if err != nil { - return nil, nil, fmt.Errorf("grpc.DialContext(%s) failed: %v", xdsS.Address, err) - } - return cc, func() { cc.Close() }, nil -} - type xdsServer struct { reqChan *testutils.Channel respChan chan *Response diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index fd68367054ad..8123d94de5a3 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -38,7 +38,7 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) const ( diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 6348be1324ae..dd1536845704 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -38,7 +38,7 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) var ( diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 56441142cc5a..13e8265b65c6 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -21,134 +21,16 @@ package xdsclient import ( - "context" - "errors" "fmt" - "sync" "time" - "github.com/golang/protobuf/proto" - "google.golang.org/grpc" - "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -var ( - m = make(map[version.TransportAPI]APIClientBuilder) -) - -// RegisterAPIClientBuilder registers a client builder for xDS transport protocol -// version specified by b.Version(). -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple builders are -// registered for the same version, the one registered last will take effect. -func RegisterAPIClientBuilder(b APIClientBuilder) { - m[b.Version()] = b -} - -// getAPIClientBuilder returns the client builder registered for the provided -// xDS transport API version. -func getAPIClientBuilder(version version.TransportAPI) APIClientBuilder { - if b, ok := m[version]; ok { - return b - } - return nil -} - -// BuildOptions contains options to be passed to client builders. -type BuildOptions struct { - // Parent is a top-level xDS client which has the intelligence to take - // appropriate action based on xDS responses received from the management - // server. - Parent UpdateHandler - // Validator performs post unmarshal validation checks. - Validator xdsresource.UpdateValidatorFunc - // NodeProto contains the Node proto to be used in xDS requests. The actual - // type depends on the transport protocol version used. - NodeProto proto.Message - // Backoff returns the amount of time to backoff before retrying broken - // streams. - Backoff func(int) time.Duration - // Logger provides enhanced logging capabilities. - Logger *grpclog.PrefixLogger -} - -// APIClientBuilder creates an xDS client for a specific xDS transport protocol -// version. -type APIClientBuilder interface { - // Build builds a transport protocol specific implementation of the xDS - // client based on the provided clientConn to the management server and the - // provided options. - Build(*grpc.ClientConn, BuildOptions) (APIClient, error) - // Version returns the xDS transport protocol version used by clients build - // using this builder. - Version() version.TransportAPI -} - -// APIClient represents the functionality provided by transport protocol -// version specific implementations of the xDS client. -// -// TODO: unexport this interface and all the methods after the PR to make -// xdsClient sharable by clients. AddWatch and RemoveWatch are exported for -// v2/v3 to override because they need to keep track of LDS name for RDS to use. -// After the share xdsClient change, that's no longer necessary. After that, we -// will still keep this interface for testing purposes. -type APIClient interface { - // AddWatch adds a watch for an xDS resource given its type and name. - AddWatch(xdsresource.ResourceType, string) - - // RemoveWatch cancels an already registered watch for an xDS resource - // given its type and name. - RemoveWatch(xdsresource.ResourceType, string) - - // reportLoad starts an LRS stream to periodically report load using the - // provided ClientConn, which represent a connection to the management - // server. - reportLoad(ctx context.Context, cc *grpc.ClientConn, opts loadReportingOptions) - - // Close cleans up resources allocated by the API client. - Close() -} - -// loadReportingOptions contains configuration knobs for reporting load data. -type loadReportingOptions struct { - loadStore *load.Store -} - -// UpdateHandler receives and processes (by taking appropriate actions) xDS -// resource updates from an APIClient for a specific version. -type UpdateHandler interface { - // NewListeners handles updates to xDS listener resources. - NewListeners(map[string]xdsresource.ListenerUpdateErrTuple, xdsresource.UpdateMetadata) - // NewRouteConfigs handles updates to xDS RouteConfiguration resources. - NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple, xdsresource.UpdateMetadata) - // NewClusters handles updates to xDS Cluster resources. - NewClusters(map[string]xdsresource.ClusterUpdateErrTuple, xdsresource.UpdateMetadata) - // NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely - // referred to as Endpoints) resources. - NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple, xdsresource.UpdateMetadata) - // NewConnectionError handles connection errors from the xDS stream. The - // error will be reported to all the resource watchers. - NewConnectionError(err error) -} - -// Function to be overridden in tests. -var newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, opts BuildOptions) (APIClient, error) { - cb := getAPIClientBuilder(apiVersion) - if cb == nil { - return nil, fmt.Errorf("no client builder for xDS API version: %v", apiVersion) - } - return cb.Build(cc, opts) -} - // clientImpl is the real implementation of the xds client. The exported Client // is a wrapper of this struct with a ref count. // @@ -157,83 +39,39 @@ var newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, op // style of ccBalancerWrapper so that the Client type does not implement these // exported methods. type clientImpl struct { - done *grpcsync.Event - config *bootstrap.Config - cc *grpc.ClientConn // Connection to the management server. - apiClient APIClient + done *grpcsync.Event + config *bootstrap.Config + + controller controllerInterface logger *grpclog.PrefixLogger pubsub *pubsub.Pubsub - - // Changes to map lrsClients and the lrsClient inside the map need to be - // protected by lrsMu. - lrsMu sync.Mutex - lrsClients map[string]*lrsClient } // newWithConfig returns a new xdsClient with the given config. func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (_ *clientImpl, retErr error) { - switch { - case config.XDSServer == nil: - return nil, errors.New("xds: no xds_server provided") - case config.XDSServer.ServerURI == "": - return nil, errors.New("xds: no xds_server name provided in options") - case config.XDSServer.Creds == nil: - return nil, errors.New("xds: no credentials provided in options") - case config.XDSServer.NodeProto == nil: - return nil, errors.New("xds: no node_proto provided in options") - } - - dopts := []grpc.DialOption{ - config.XDSServer.Creds, - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 5 * time.Minute, - Timeout: 20 * time.Second, - }), - } - c := &clientImpl{ - done: grpcsync.NewEvent(), - config: config, - lrsClients: make(map[string]*lrsClient), + done: grpcsync.NewEvent(), + config: config, } defer func() { if retErr != nil { - if c.cc != nil { - c.cc.Close() - } - if c.pubsub != nil { - c.pubsub.Close() - } - if c.apiClient != nil { - c.apiClient.Close() - } + c.Close() } }() - cc, err := grpc.Dial(config.XDSServer.ServerURI, dopts...) - if err != nil { - // An error from a non-blocking dial indicates something serious. - return nil, fmt.Errorf("xds: failed to dial balancer {%s}: %v", config.XDSServer.ServerURI, err) - } - c.cc = cc c.logger = prefixLogger(c) c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) c.pubsub = pubsub.New(watchExpiryTimeout, c.logger) - apiClient, err := newAPIClient(config.XDSServer.TransportAPI, cc, BuildOptions{ - Parent: c, - Validator: c.updateValidator, - NodeProto: config.XDSServer.NodeProto, - Backoff: backoff.DefaultExponential.Backoff, - Logger: c.logger, - }) + controller, err := newController(config.XDSServer, c.pubsub, c.updateValidator, c.logger) if err != nil { - return nil, err + return nil, fmt.Errorf("xds: failed to connect to the control plane: %v", err) } - c.apiClient = apiClient + c.controller = controller + c.logger.Infof("Created") return c, nil } @@ -252,9 +90,16 @@ func (c *clientImpl) Close() { c.done.Fire() // TODO: Should we invoke the registered callbacks here with an error that // the client is closed? - c.apiClient.Close() - c.cc.Close() - c.pubsub.Close() + + // Note that Close needs to check for nils even if some of them are always + // set in the constructor. This is because the constructor defers Close() in + // error cases, and the fields might not be set when the error happens. + if c.controller != nil { + c.controller.Close() + } + if c.pubsub != nil { + c.pubsub.Close() + } c.logger.Infof("Shutdown") } diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index f20d6112a84c..cd2b98950a65 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -26,6 +26,9 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" @@ -35,7 +38,6 @@ import ( "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/testing/protocmp" ) @@ -79,24 +81,24 @@ func clientOpts(balancerName string, overrideWatchExpiryTimeout bool) (*bootstra }, watchExpiryTimeout } -type testAPIClient struct { +type testController struct { done *grpcsync.Event addWatches map[xdsresource.ResourceType]*testutils.Channel removeWatches map[xdsresource.ResourceType]*testutils.Channel } -func overrideNewAPIClient() (*testutils.Channel, func()) { - origNewAPIClient := newAPIClient +func overrideNewController() (*testutils.Channel, func()) { + origNewController := newController ch := testutils.NewChannel() - newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, opts BuildOptions) (APIClient, error) { - ret := newTestAPIClient() + newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (controllerInterface, error) { + ret := newTestController() ch.Send(ret) return ret, nil } - return ch, func() { newAPIClient = origNewAPIClient } + return ch, func() { newController = origNewController } } -func newTestAPIClient() *testAPIClient { +func newTestController() *testController { addWatches := map[xdsresource.ResourceType]*testutils.Channel{ xdsresource.ListenerResource: testutils.NewChannel(), xdsresource.RouteConfigResource: testutils.NewChannel(), @@ -109,32 +111,33 @@ func newTestAPIClient() *testAPIClient { xdsresource.ClusterResource: testutils.NewChannel(), xdsresource.EndpointsResource: testutils.NewChannel(), } - return &testAPIClient{ + return &testController{ done: grpcsync.NewEvent(), addWatches: addWatches, removeWatches: removeWatches, } } -func (c *testAPIClient) AddWatch(resourceType xdsresource.ResourceType, resourceName string) { +func (c *testController) AddWatch(resourceType xdsresource.ResourceType, resourceName string) { c.addWatches[resourceType].Send(resourceName) } -func (c *testAPIClient) RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) { +func (c *testController) RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) { c.removeWatches[resourceType].Send(resourceName) } -func (c *testAPIClient) reportLoad(context.Context, *grpc.ClientConn, loadReportingOptions) { +func (c *testController) ReportLoad(server string) (*load.Store, func()) { + panic("ReportLoad is not implemented") } -func (c *testAPIClient) Close() { +func (c *testController) Close() { c.done.Fire() } // TestWatchCallAnotherWatch covers the case where watch() is called inline by a // callback. It makes sure it doesn't cause a deadlock. func (s) TestWatchCallAnotherWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -149,7 +152,7 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) clusterUpdateCh := testutils.NewChannel() firstTime := true @@ -269,7 +272,7 @@ func (s) TestClientNewSingleton(t *testing.T) { } defer func() { bootstrapNewConfig = oldBootstrapNewConfig }() - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() // The first New(). Should create a Client and a new APIClient. @@ -284,7 +287,7 @@ func (s) TestClientNewSingleton(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) // Call New() again. They should all return the same client implementation, // and should not create new API client. @@ -343,7 +346,7 @@ func (s) TestClientNewSingleton(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient2 := c2.(*testAPIClient) + apiClient2 := c2.(*testController) // The client wrapper with ref count should be the same. if client2 != client { diff --git a/xds/internal/xdsclient/controller.go b/xds/internal/xdsclient/controller.go new file mode 100644 index 000000000000..431a14498e1f --- /dev/null +++ b/xds/internal/xdsclient/controller.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/controller" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +type controllerInterface interface { + AddWatch(resourceType xdsresource.ResourceType, resourceName string) + RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) + ReportLoad(server string) (*load.Store, func()) + Close() +} + +var newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (controllerInterface, error) { + return controller.New(config, pubsub, validator, logger) +} diff --git a/xds/internal/xdsclient/controller/controller.go b/xds/internal/xdsclient/controller/controller.go new file mode 100644 index 000000000000..09283d7423ff --- /dev/null +++ b/xds/internal/xdsclient/controller/controller.go @@ -0,0 +1,168 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package controller contains implementation to connect to the control plane. +// Including starting the ClientConn, starting the xDS stream, and +// sending/receiving messages. +// +// All the messages are parsed by the resource package (e.g. +// UnmarshalListener()) and sent to the Pubsub watchers. +package controller + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/controller/version" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// Controller manages the connection and stream to the control plane. +// +// It keeps track of what resources are being watched, and send new requests +// when new watches are added. +// +// It takes a pubsub (as an interface) as input. When a response is received, +// it's parsed, and the updates are sent to the pubsub. +type Controller struct { + config *bootstrap.ServerConfig + updateHandler pubsub.UpdateHandler + updateValidator xdsresource.UpdateValidatorFunc + logger *grpclog.PrefixLogger + + cc *grpc.ClientConn // Connection to the management server. + vClient version.VersionedClient + stopRunGoroutine context.CancelFunc + + backoff func(int) time.Duration + streamCh chan grpc.ClientStream + sendCh *buffer.Unbounded + + mu sync.Mutex + // Message specific watch infos, protected by the above mutex. These are + // written to, after successfully reading from the update channel, and are + // read from when recovering from a broken stream to resend the xDS + // messages. When the user of this client object cancels a watch call, + // these are set to nil. All accesses to the map protected and any value + // inside the map should be protected with the above mutex. + watchMap map[xdsresource.ResourceType]map[string]bool + // versionMap contains the version that was acked (the version in the ack + // request that was sent on wire). The key is rType, the value is the + // version string, becaues the versions for different resource types should + // be independent. + versionMap map[xdsresource.ResourceType]string + // nonceMap contains the nonce from the most recent received response. + nonceMap map[xdsresource.ResourceType]string + + // Changes to map lrsClients and the lrsClient inside the map need to be + // protected by lrsMu. + // + // TODO: after LRS refactoring, each controller should only manage the LRS + // stream to its server. LRS streams to other servers should be managed by + // other controllers. + lrsMu sync.Mutex + lrsClients map[string]*lrsClient +} + +// New creates a new controller. +func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (_ *Controller, retErr error) { + switch { + case config == nil: + return nil, errors.New("xds: no xds_server provided") + case config.ServerURI == "": + return nil, errors.New("xds: no xds_server name provided in options") + case config.Creds == nil: + return nil, errors.New("xds: no credentials provided in options") + case config.NodeProto == nil: + return nil, errors.New("xds: no node_proto provided in options") + } + + dopts := []grpc.DialOption{ + config.Creds, + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 5 * time.Minute, + Timeout: 20 * time.Second, + }), + } + + ret := &Controller{ + config: config, + updateValidator: validator, + updateHandler: updateHandler, + + backoff: backoff.DefaultExponential.Backoff, // TODO: should this be configurable? + streamCh: make(chan grpc.ClientStream, 1), + sendCh: buffer.NewUnbounded(), + watchMap: make(map[xdsresource.ResourceType]map[string]bool), + versionMap: make(map[xdsresource.ResourceType]string), + nonceMap: make(map[xdsresource.ResourceType]string), + + lrsClients: make(map[string]*lrsClient), + } + + defer func() { + if retErr != nil { + ret.Close() + } + }() + + cc, err := grpc.Dial(config.ServerURI, dopts...) + if err != nil { + // An error from a non-blocking dial indicates something serious. + return nil, fmt.Errorf("xds: failed to dial control plane {%s}: %v", config.ServerURI, err) + } + ret.cc = cc + + builder := version.GetAPIClientBuilder(config.TransportAPI) + if builder == nil { + return nil, fmt.Errorf("no client builder for xDS API version: %v", config.TransportAPI) + } + apiClient, err := builder(version.BuildOptions{NodeProto: config.NodeProto, Logger: logger}) + if err != nil { + return nil, err + } + ret.vClient = apiClient + + ctx, cancel := context.WithCancel(context.Background()) + ret.stopRunGoroutine = cancel + go ret.run(ctx) + + return ret, nil +} + +// Close closes the controller. +func (t *Controller) Close() { + // Note that Close needs to check for nils even if some of them are always + // set in the constructor. This is because the constructor defers Close() in + // error cases, and the fields might not be set when the error happens. + if t.stopRunGoroutine != nil { + t.stopRunGoroutine() + } + if t.cc != nil { + t.cc.Close() + } +} diff --git a/xds/internal/xdsclient/controller/loadreport.go b/xds/internal/xdsclient/controller/loadreport.go new file mode 100644 index 000000000000..f8cfd017e415 --- /dev/null +++ b/xds/internal/xdsclient/controller/loadreport.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controller + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/xds/internal/xdsclient/controller/version" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +// ReportLoad starts an load reporting stream to the given server. If the server +// is not an empty string, and is different from the management server, a new +// ClientConn will be created. +// +// The same options used for creating the Client will be used (including +// NodeProto, and dial options if necessary). +// +// It returns a Store for the user to report loads, a function to cancel the +// load reporting stream. +// +// TODO: LRS refactor; maybe a new controller should be created for a separate +// server, so that the same stream can be shared by different reporters to the +// same server, even if they originate from different Controllers. +func (c *Controller) ReportLoad(server string) (*load.Store, func()) { + c.lrsMu.Lock() + defer c.lrsMu.Unlock() + + // If there's already a client to this server, use it. Otherwise, create + // one. + lrsC, ok := c.lrsClients[server] + if !ok { + lrsC = newLRSClient(c, server) + c.lrsClients[server] = lrsC + } + + store := lrsC.ref() + return store, func() { + // This is a callback, need to hold lrsMu. + c.lrsMu.Lock() + defer c.lrsMu.Unlock() + if lrsC.unRef() { + // Delete the lrsClient from map if this is the last reference. + delete(c.lrsClients, server) + } + } +} + +// lrsClient maps to one lrsServer. It contains: +// - a ClientConn to this server (only if it's different from the management +// server) +// - a load.Store that contains loads only for this server +type lrsClient struct { + parent *Controller + server string + + cc *grpc.ClientConn // nil if the server is same as the management server + refCount int + cancelStream func() + loadStore *load.Store +} + +// newLRSClient creates a new LRS stream to the server. +func newLRSClient(parent *Controller, server string) *lrsClient { + return &lrsClient{ + parent: parent, + server: server, + refCount: 0, + } +} + +// ref increments the refCount. If this is the first ref, it starts the LRS stream. +// +// Not thread-safe, caller needs to synchronize. +func (lrsC *lrsClient) ref() *load.Store { + lrsC.refCount++ + if lrsC.refCount == 1 { + lrsC.startStream() + } + return lrsC.loadStore +} + +// unRef decrements the refCount, and closes the stream if refCount reaches 0 +// (and close the cc if cc is not xDS cc). It returns whether refCount reached 0 +// after this call. +// +// Not thread-safe, caller needs to synchronize. +func (lrsC *lrsClient) unRef() (closed bool) { + lrsC.refCount-- + if lrsC.refCount != 0 { + return false + } + lrsC.parent.logger.Infof("Stopping load report to server: %s", lrsC.server) + lrsC.cancelStream() + if lrsC.cc != nil { + lrsC.cc.Close() + } + return true +} + +// startStream starts the LRS stream to the server. If server is not the same +// management server from the parent, it also creates a ClientConn. +func (lrsC *lrsClient) startStream() { + var cc *grpc.ClientConn + + lrsC.parent.logger.Infof("Starting load report to server: %s", lrsC.server) + if lrsC.server == "" || lrsC.server == lrsC.parent.config.ServerURI { + // Reuse the xDS client if server is the same. + cc = lrsC.parent.cc + } else { + lrsC.parent.logger.Infof("LRS server is different from management server, starting a new ClientConn") + ccNew, err := grpc.Dial(lrsC.server, lrsC.parent.config.Creds) + if err != nil { + // An error from a non-blocking dial indicates something serious. + lrsC.parent.logger.Infof("xds: failed to dial load report server {%s}: %v", lrsC.server, err) + return + } + cc = ccNew + lrsC.cc = ccNew + } + + var ctx context.Context + ctx, lrsC.cancelStream = context.WithCancel(context.Background()) + + // Create the store and stream. + lrsC.loadStore = load.NewStore() + go lrsC.parent.reportLoad(ctx, cc, version.LoadReportingOptions{LoadStore: lrsC.loadStore}) +} diff --git a/xds/internal/xdsclient/transport_helper.go b/xds/internal/xdsclient/controller/transport.go similarity index 60% rename from xds/internal/xdsclient/transport_helper.go rename to xds/internal/xdsclient/controller/transport.go index ba456eb390ea..b7746ed883c3 100644 --- a/xds/internal/xdsclient/transport_helper.go +++ b/xds/internal/xdsclient/controller/transport.go @@ -16,140 +16,23 @@ * */ -package xdsclient +package controller import ( "context" - "sync" + "fmt" "time" "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + controllerversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version" + xdsresourceversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - - "google.golang.org/grpc" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" ) -// ErrResourceTypeUnsupported is an error used to indicate an unsupported xDS -// resource type. The wrapped ErrStr contains the details. -type ErrResourceTypeUnsupported struct { - ErrStr string -} - -// Error helps implements the error interface. -func (e ErrResourceTypeUnsupported) Error() string { - return e.ErrStr -} - -// VersionedClient is the interface to be provided by the transport protocol -// specific client implementations. This mainly deals with the actual sending -// and receiving of messages. -type VersionedClient interface { - // NewStream returns a new xDS client stream specific to the underlying - // transport protocol version. - NewStream(ctx context.Context) (grpc.ClientStream, error) - - // SendRequest constructs and sends out a DiscoveryRequest message specific - // to the underlying transport protocol version. - SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error - - // RecvResponse uses the provided stream to receive a response specific to - // the underlying transport protocol version. - RecvResponse(s grpc.ClientStream) (proto.Message, error) - - // HandleResponse parses and validates the received response and notifies - // the top-level client which in turn notifies the registered watchers. - // - // Return values are: resourceType, version, nonce, error. - // If the provided protobuf message contains a resource type which is not - // supported, implementations must return an error of type - // ErrResourceTypeUnsupported. - HandleResponse(proto.Message) (xdsresource.ResourceType, string, string, error) - - // NewLoadStatsStream returns a new LRS client stream specific to the underlying - // transport protocol version. - NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) - - // SendFirstLoadStatsRequest constructs and sends the first request on the - // LRS stream. - SendFirstLoadStatsRequest(s grpc.ClientStream) error - - // HandleLoadStatsResponse receives the first response from the server which - // contains the load reporting interval and the clusters for which the - // server asks the client to report load for. - // - // If the response sets SendAllClusters to true, the returned clusters is - // nil. - HandleLoadStatsResponse(s grpc.ClientStream) (clusters []string, _ time.Duration, _ error) - - // SendLoadStatsRequest will be invoked at regular intervals to send load - // report with load data reported since the last time this method was - // invoked. - SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error -} - -// TransportHelper contains all xDS transport protocol related functionality -// which is common across different versioned client implementations. -// -// TransportHelper takes care of sending and receiving xDS requests and -// responses on an ADS stream. It also takes care of ACK/NACK handling. It -// delegates to the actual versioned client implementations wherever -// appropriate. -// -// Implements the APIClient interface which makes it possible for versioned -// client implementations to embed this type, and thereby satisfy the interface -// requirements. -type TransportHelper struct { - cancelCtx context.CancelFunc - - vClient VersionedClient - logger *grpclog.PrefixLogger - backoff func(int) time.Duration - streamCh chan grpc.ClientStream - sendCh *buffer.Unbounded - - mu sync.Mutex - // Message specific watch infos, protected by the above mutex. These are - // written to, after successfully reading from the update channel, and are - // read from when recovering from a broken stream to resend the xDS - // messages. When the user of this client object cancels a watch call, - // these are set to nil. All accesses to the map protected and any value - // inside the map should be protected with the above mutex. - watchMap map[xdsresource.ResourceType]map[string]bool - // versionMap contains the version that was acked (the version in the ack - // request that was sent on wire). The key is rType, the value is the - // version string, becaues the versions for different resource types should - // be independent. - versionMap map[xdsresource.ResourceType]string - // nonceMap contains the nonce from the most recent received response. - nonceMap map[xdsresource.ResourceType]string -} - -// NewTransportHelper creates a new transport helper to be used by versioned -// client implementations. -func NewTransportHelper(vc VersionedClient, logger *grpclog.PrefixLogger, backoff func(int) time.Duration) *TransportHelper { - ctx, cancelCtx := context.WithCancel(context.Background()) - t := &TransportHelper{ - cancelCtx: cancelCtx, - vClient: vc, - logger: logger, - backoff: backoff, - - streamCh: make(chan grpc.ClientStream, 1), - sendCh: buffer.NewUnbounded(), - watchMap: make(map[xdsresource.ResourceType]map[string]bool), - versionMap: make(map[xdsresource.ResourceType]string), - nonceMap: make(map[xdsresource.ResourceType]string), - } - - go t.run(ctx) - return t -} - // AddWatch adds a watch for an xDS resource given its type and name. -func (t *TransportHelper) AddWatch(rType xdsresource.ResourceType, resourceName string) { +func (t *Controller) AddWatch(rType xdsresource.ResourceType, resourceName string) { t.sendCh.Put(&watchAction{ rType: rType, remove: false, @@ -159,7 +42,7 @@ func (t *TransportHelper) AddWatch(rType xdsresource.ResourceType, resourceName // RemoveWatch cancels an already registered watch for an xDS resource // given its type and name. -func (t *TransportHelper) RemoveWatch(rType xdsresource.ResourceType, resourceName string) { +func (t *Controller) RemoveWatch(rType xdsresource.ResourceType, resourceName string) { t.sendCh.Put(&watchAction{ rType: rType, remove: true, @@ -167,15 +50,10 @@ func (t *TransportHelper) RemoveWatch(rType xdsresource.ResourceType, resourceNa }) } -// Close closes the transport helper. -func (t *TransportHelper) Close() { - t.cancelCtx() -} - // run starts an ADS stream (and backs off exponentially, if the previous // stream failed without receiving a single reply) and runs the sender and // receiver routines to send and receive data from the stream respectively. -func (t *TransportHelper) run(ctx context.Context) { +func (t *Controller) run(ctx context.Context) { go t.send(ctx) // TODO: start a goroutine monitoring ClientConn's connectivity state, and // report error (and log) when stats is transient failure. @@ -201,7 +79,7 @@ func (t *TransportHelper) run(ctx context.Context) { } retries++ - stream, err := t.vClient.NewStream(ctx) + stream, err := t.vClient.NewStream(ctx, t.cc) if err != nil { t.logger.Warningf("xds: ADS stream creation failed: %v", err) continue @@ -235,7 +113,7 @@ func (t *TransportHelper) run(ctx context.Context) { // Note that this goroutine doesn't do anything to the old stream when there's a // new one. In fact, there should be only one stream in progress, and new one // should only be created when the old one fails (recv returns an error). -func (t *TransportHelper) send(ctx context.Context) { +func (t *Controller) send(ctx context.Context) { var stream grpc.ClientStream for { select { @@ -288,7 +166,7 @@ func (t *TransportHelper) send(ctx context.Context) { // that here because the stream has just started and Send() usually returns // quickly (once it pushes the message onto the transport layer) and is only // ever blocked if we don't have enough flow control quota. -func (t *TransportHelper) sendExisting(stream grpc.ClientStream) bool { +func (t *Controller) sendExisting(stream grpc.ClientStream) bool { t.mu.Lock() defer t.mu.Unlock() @@ -308,16 +186,19 @@ func (t *TransportHelper) sendExisting(stream grpc.ClientStream) bool { // recv receives xDS responses on the provided ADS stream and branches out to // message specific handlers. -func (t *TransportHelper) recv(stream grpc.ClientStream) bool { +func (t *Controller) recv(stream grpc.ClientStream) bool { success := false for { resp, err := t.vClient.RecvResponse(stream) if err != nil { + t.updateHandler.NewConnectionError(err) t.logger.Warningf("ADS stream is closed with error: %v", err) return success } - rType, version, nonce, err := t.vClient.HandleResponse(resp) - if e, ok := err.(ErrResourceTypeUnsupported); ok { + + rType, version, nonce, err := t.handleResponse(resp) + + if e, ok := err.(xdsresourceversion.ErrResourceTypeUnsupported); ok { t.logger.Warningf("%s", e.ErrStr) continue } @@ -343,6 +224,43 @@ func (t *TransportHelper) recv(stream grpc.ClientStream) bool { } } +func (t *Controller) handleResponse(resp proto.Message) (xdsresource.ResourceType, string, string, error) { + rType, resource, version, nonce, err := t.vClient.ParseResponse(resp) + if err != nil { + return rType, version, nonce, err + } + opts := &xdsresource.UnmarshalOptions{ + Version: version, + Resources: resource, + Logger: t.logger, + UpdateValidator: t.updateValidator, + } + var md xdsresource.UpdateMetadata + switch rType { + case xdsresource.ListenerResource: + var update map[string]xdsresource.ListenerUpdateErrTuple + update, md, err = xdsresource.UnmarshalListener(opts) + t.updateHandler.NewListeners(update, md) + case xdsresource.RouteConfigResource: + var update map[string]xdsresource.RouteConfigUpdateErrTuple + update, md, err = xdsresource.UnmarshalRouteConfig(opts) + t.updateHandler.NewRouteConfigs(update, md) + case xdsresource.ClusterResource: + var update map[string]xdsresource.ClusterUpdateErrTuple + update, md, err = xdsresource.UnmarshalCluster(opts) + t.updateHandler.NewClusters(update, md) + case xdsresource.EndpointsResource: + var update map[string]xdsresource.EndpointsUpdateErrTuple + update, md, err = xdsresource.UnmarshalEndpoints(opts) + t.updateHandler.NewEndpoints(update, md) + default: + return rType, "", "", xdsresourceversion.ErrResourceTypeUnsupported{ + ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", rType), + } + } + return rType, version, nonce, err +} + func mapToSlice(m map[string]bool) []string { ret := make([]string, 0, len(m)) for i := range m { @@ -360,7 +278,7 @@ type watchAction struct { // processWatchInfo pulls the fields needed by the request from a watchAction. // // It also updates the watch map. -func (t *TransportHelper) processWatchInfo(w *watchAction) (target []string, rType xdsresource.ResourceType, ver, nonce string) { +func (t *Controller) processWatchInfo(w *watchAction) (target []string, rType xdsresource.ResourceType, ver, nonce string) { t.mu.Lock() defer t.mu.Unlock() @@ -404,7 +322,7 @@ type ackAction struct { // processAckInfo pulls the fields needed by the ack request from a ackAction. // // If no active watch is found for this ack, it returns false for send. -func (t *TransportHelper) processAckInfo(ack *ackAction, stream grpc.ClientStream) (target []string, rType xdsresource.ResourceType, version, nonce string, send bool) { +func (t *Controller) processAckInfo(ack *ackAction, stream grpc.ClientStream) (target []string, rType xdsresource.ResourceType, version, nonce string, send bool) { if ack.stream != stream { // If ACK's stream isn't the current sending stream, this means the ACK // was pushed to queue before the old stream broke, and a new stream has @@ -450,7 +368,7 @@ func (t *TransportHelper) processAckInfo(ack *ackAction, stream grpc.ClientStrea // reportLoad starts an LRS stream to report load data to the management server. // It blocks until the context is cancelled. -func (t *TransportHelper) reportLoad(ctx context.Context, cc *grpc.ClientConn, opts loadReportingOptions) { +func (t *Controller) reportLoad(ctx context.Context, cc *grpc.ClientConn, opts controllerversion.LoadReportingOptions) { retries := 0 for { if ctx.Err() != nil { @@ -472,28 +390,28 @@ func (t *TransportHelper) reportLoad(ctx context.Context, cc *grpc.ClientConn, o retries++ stream, err := t.vClient.NewLoadStatsStream(ctx, cc) if err != nil { - logger.Warningf("lrs: failed to create stream: %v", err) + t.logger.Warningf("lrs: failed to create stream: %v", err) continue } - logger.Infof("lrs: created LRS stream") + t.logger.Infof("lrs: created LRS stream") if err := t.vClient.SendFirstLoadStatsRequest(stream); err != nil { - logger.Warningf("lrs: failed to send first request: %v", err) + t.logger.Warningf("lrs: failed to send first request: %v", err) continue } clusters, interval, err := t.vClient.HandleLoadStatsResponse(stream) if err != nil { - logger.Warning(err) + t.logger.Warningf("%v", err) continue } retries = 0 - t.sendLoads(ctx, stream, opts.loadStore, clusters, interval) + t.sendLoads(ctx, stream, opts.LoadStore, clusters, interval) } } -func (t *TransportHelper) sendLoads(ctx context.Context, stream grpc.ClientStream, store *load.Store, clusterNames []string, interval time.Duration) { +func (t *Controller) sendLoads(ctx context.Context, stream grpc.ClientStream, store *load.Store, clusterNames []string, interval time.Duration) { tick := time.NewTicker(interval) defer tick.Stop() for { @@ -503,7 +421,7 @@ func (t *TransportHelper) sendLoads(ctx context.Context, stream grpc.ClientStrea return } if err := t.vClient.SendLoadStatsRequest(stream, store.Stats(clusterNames)); err != nil { - logger.Warning(err) + t.logger.Warningf("%v", err) return } } diff --git a/xds/internal/xdsclient/v2/ack_test.go b/xds/internal/xdsclient/controller/v2_ack_test.go similarity index 94% rename from xds/internal/xdsclient/v2/ack_test.go rename to xds/internal/xdsclient/controller/v2_ack_test.go index 51716676dd44..6680de7911b2 100644 --- a/xds/internal/xdsclient/v2/ack_test.go +++ b/xds/internal/xdsclient/controller/v2_ack_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package v2 +package controller import ( "context" @@ -28,12 +28,11 @@ import ( "github.com/golang/protobuf/proto" anypb "github.com/golang/protobuf/ptypes/any" "github.com/google/go-cmp/cmp" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) const ( @@ -41,12 +40,12 @@ const ( defaultTestShortTimeout = 10 * time.Millisecond ) -func startXDSV2Client(t *testing.T, cc *grpc.ClientConn) (v2c *client, cbLDS, cbRDS, cbCDS, cbEDS *testutils.Channel, cleanup func()) { +func startXDSV2Client(t *testing.T, controlPlaneAddr string) (v2c *Controller, cbLDS, cbRDS, cbCDS, cbEDS *testutils.Channel, cleanup func()) { cbLDS = testutils.NewChannel() cbRDS = testutils.NewChannel() cbCDS = testutils.NewChannel() cbEDS = testutils.NewChannel() - v2c, err := newV2Client(&testUpdateReceiver{ + v2c, err := newTestController(&testUpdateReceiver{ f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { t.Logf("Received %v callback with {%+v}", rType, d) switch rType { @@ -68,7 +67,7 @@ func startXDSV2Client(t *testing.T, cc *grpc.ClientConn) (v2c *client, cbLDS, cb } } }, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) + }, controlPlaneAddr, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) } @@ -117,7 +116,7 @@ func sendXDSRespWithVersion(ch chan<- *fakeserver.Response, respWithoutVersion * // startXDS calls watch to send the first request. It then sends a good response // and checks for ack. -func startXDS(ctx context.Context, t *testing.T, rType xdsresource.ResourceType, v2c *client, reqChan *testutils.Channel, req *xdspb.DiscoveryRequest, preVersion string, preNonce string) { +func startXDS(ctx context.Context, t *testing.T, rType xdsresource.ResourceType, v2c *Controller, reqChan *testutils.Channel, req *xdspb.DiscoveryRequest, preVersion string, preNonce string) { nameToWatch := "" switch rType { case xdsresource.ListenerResource: @@ -198,10 +197,10 @@ func (s) TestV2ClientAck(t *testing.T) { versionEDS = 4000 ) - fakeServer, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() - v2c, cbLDS, cbRDS, cbCDS, cbEDS, v2cCleanup := startXDSV2Client(t, cc) + v2c, cbLDS, cbRDS, cbCDS, cbEDS, v2cCleanup := startXDSV2Client(t, fakeServer.Address) defer v2cCleanup() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -271,10 +270,10 @@ func (s) TestV2ClientAck(t *testing.T) { func (s) TestV2ClientAckFirstIsNack(t *testing.T) { var versionLDS = 1000 - fakeServer, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() - v2c, cbLDS, _, _, _, v2cCleanup := startXDSV2Client(t, cc) + v2c, cbLDS, _, _, _, v2cCleanup := startXDSV2Client(t, fakeServer.Address) defer v2cCleanup() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -306,10 +305,10 @@ func (s) TestV2ClientAckFirstIsNack(t *testing.T) { func (s) TestV2ClientAckNackAfterNewWatch(t *testing.T) { var versionLDS = 1000 - fakeServer, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() - v2c, cbLDS, _, _, _, v2cCleanup := startXDSV2Client(t, cc) + v2c, cbLDS, _, _, _, v2cCleanup := startXDSV2Client(t, fakeServer.Address) defer v2cCleanup() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -351,10 +350,10 @@ func (s) TestV2ClientAckNackAfterNewWatch(t *testing.T) { func (s) TestV2ClientAckNewWatchAfterCancel(t *testing.T) { var versionCDS = 3000 - fakeServer, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() - v2c, _, _, cbCDS, _, v2cCleanup := startXDSV2Client(t, cc) + v2c, _, _, cbCDS, _, v2cCleanup := startXDSV2Client(t, fakeServer.Address) defer v2cCleanup() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -409,10 +408,10 @@ func (s) TestV2ClientAckNewWatchAfterCancel(t *testing.T) { func (s) TestV2ClientAckCancelResponseRace(t *testing.T) { var versionCDS = 3000 - fakeServer, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() - v2c, _, _, cbCDS, _, v2cCleanup := startXDSV2Client(t, cc) + v2c, _, _, cbCDS, _, v2cCleanup := startXDSV2Client(t, fakeServer.Address) defer v2cCleanup() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/xds/internal/xdsclient/v2/cds_test.go b/xds/internal/xdsclient/controller/v2_cds_test.go similarity index 84% rename from xds/internal/xdsclient/v2/cds_test.go rename to xds/internal/xdsclient/controller/v2_cds_test.go index e44ee0cc48b9..20485dc1c280 100644 --- a/xds/internal/xdsclient/v2/cds_test.go +++ b/xds/internal/xdsclient/controller/v2_cds_test.go @@ -16,7 +16,7 @@ * */ -package v2 +package controller import ( "testing" @@ -27,8 +27,8 @@ import ( anypb "github.com/golang/protobuf/ptypes/any" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) const ( @@ -119,31 +119,6 @@ func (s) TestCDSHandleResponse(t *testing.T) { }, wantUpdateErr: false, }, - // Response does not contain Cluster proto. - { - name: "no-cluster-proto-in-response", - cdsResponse: badResourceTypeInLDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // Response contains no clusters. - { - name: "no-cluster", - cdsResponse: &xdspb.DiscoveryResponse{}, - wantErr: false, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, // Response contains one good cluster we are not interested in. { name: "one-uninteresting-cluster", @@ -190,22 +165,22 @@ func (s) TestCDSHandleResponse(t *testing.T) { // TestCDSHandleResponseWithoutWatch tests the case where the v2Client receives // a CDS response without a registered watcher. func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) { - _, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() - v2c, err := newV2Client(&testUpdateReceiver{ + v2c, err := newTestController(&testUpdateReceiver{ f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) + }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) } defer v2c.Close() - if v2c.handleCDSResponse(badResourceTypeInLDSResponse) == nil { + if _, _, _, err := v2c.handleResponse(badResourceTypeInLDSResponse); err == nil { t.Fatal("v2c.handleCDSResponse() succeeded, should have failed") } - if v2c.handleCDSResponse(goodCDSResponse1) != nil { + if _, _, _, err := v2c.handleResponse(goodCDSResponse1); err != nil { t.Fatal("v2c.handleCDSResponse() succeeded, should have failed") } } diff --git a/xds/internal/xdsclient/controller/v2_client_test.go b/xds/internal/xdsclient/controller/v2_client_test.go new file mode 100644 index 000000000000..942f18649034 --- /dev/null +++ b/xds/internal/xdsclient/controller/v2_client_test.go @@ -0,0 +1,212 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controller + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/xds/internal/testutils/fakeserver" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 xDS API client. +) + +// TestV2ClientBackoffAfterRecvError verifies if the v2Client backs off when it +// encounters a Recv error while receiving an LDS response. +func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { + fakeServer, cleanup := startServer(t) + defer cleanup() + + // Override the v2Client backoff function with this, so that we can verify + // that a backoff actually was triggered. + boCh := make(chan int, 1) + clientBackoff := func(v int) time.Duration { + boCh <- v + return 0 + } + + callbackCh := make(chan struct{}) + v2c, err := newTestController(&testUpdateReceiver{ + f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) { close(callbackCh) }, + }, fakeServer.Address, goodNodeProto, clientBackoff, nil) + if err != nil { + t.Fatal(err) + } + defer v2c.Close() + t.Log("Started xds v2Client...") + + v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { + t.Fatalf("Timeout expired when expecting an LDS request") + } + t.Log("FakeServer received request...") + + fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} + t.Log("Bad LDS response pushed to fakeServer...") + + timer := time.NewTimer(defaultTestTimeout) + select { + case <-timer.C: + t.Fatal("Timeout when expecting LDS update") + case <-boCh: + timer.Stop() + t.Log("v2Client backed off before retrying...") + case <-callbackCh: + t.Fatal("Received unexpected LDS callback") + } + + if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { + t.Fatalf("Timeout expired when expecting an LDS request") + } + t.Log("FakeServer received request after backoff...") +} + +// TestV2ClientRetriesAfterBrokenStream verifies the case where a stream +// encountered a Recv() error, and is expected to send out xDS requests for +// registered watchers once it comes back up again. +func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) { + fakeServer, cleanup := startServer(t) + defer cleanup() + + callbackCh := testutils.NewChannel() + v2c, err := newTestController(&testUpdateReceiver{ + f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { + if rType == xdsresource.ListenerResource { + if u, ok := d[goodLDSTarget1]; ok { + t.Logf("Received LDS callback with ldsUpdate {%+v}", u) + callbackCh.Send(struct{}{}) + } + } + }, + }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) + if err != nil { + t.Fatal(err) + } + defer v2c.Close() + t.Log("Started xds v2Client...") + + v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { + t.Fatalf("Timeout expired when expecting an LDS request") + } + t.Log("FakeServer received request...") + + fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} + t.Log("Good LDS response pushed to fakeServer...") + + if _, err := callbackCh.Receive(ctx); err != nil { + t.Fatal("Timeout when expecting LDS update") + } + + // Read the ack, so the next request is sent after stream re-creation. + if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { + t.Fatalf("Timeout expired when expecting an LDS ACK") + } + + fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} + t.Log("Bad LDS response pushed to fakeServer...") + + val, err := fakeServer.XDSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout expired when expecting LDS update") + } + gotRequest := val.(*fakeserver.Request) + if !proto.Equal(gotRequest.Req, goodLDSRequest) { + t.Fatalf("gotRequest: %+v, wantRequest: %+v", gotRequest.Req, goodLDSRequest) + } +} + +// TestV2ClientWatchWithoutStream verifies the case where a watch is started +// when the xds stream is not created. The watcher should not receive any update +// (because there won't be any xds response, and timeout is done at a upper +// level). And when the stream is re-created, the watcher should get future +// updates. +func (s) TestV2ClientWatchWithoutStream(t *testing.T) { + fakeServer, sCleanup, err := fakeserver.StartServer() + if err != nil { + t.Fatalf("Failed to start fake xDS server: %v", err) + } + defer sCleanup() + + const scheme = "xds-client-test-whatever" + rb := manual.NewBuilderWithScheme(scheme) + rb.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: "no.such.server"}}}) + resolver.Register(rb) + defer resolver.UnregisterForTesting(scheme) + + callbackCh := testutils.NewChannel() + v2c, err := newTestController(&testUpdateReceiver{ + f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { + if rType == xdsresource.ListenerResource { + if u, ok := d[goodLDSTarget1]; ok { + t.Logf("Received LDS callback with ldsUpdate {%+v}", u) + callbackCh.Send(u) + } + } + }, + }, scheme+":///whatever", goodNodeProto, func(int) time.Duration { return 0 }, nil) + if err != nil { + t.Fatal(err) + } + defer v2c.Close() + t.Log("Started xds v2Client...") + + // This watch is started when the xds-ClientConn is in Transient Failure, + // and no xds stream is created. + v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) + + // The watcher should receive an update, with a timeout error in it. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if v, err := callbackCh.Receive(sCtx); err == nil { + t.Fatalf("Expect an timeout error from watcher, got %v", v) + } + + // Send the real server address to the ClientConn, the stream should be + // created, and the previous watch should be sent. + rb.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: fakeServer.Address}}, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { + t.Fatalf("Timeout expired when expecting an LDS request") + } + t.Log("FakeServer received request...") + + fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} + t.Log("Good LDS response pushed to fakeServer...") + + if v, err := callbackCh.Receive(ctx); err != nil { + t.Fatal("Timeout when expecting LDS update") + } else if _, ok := v.(xdsresource.ListenerUpdateErrTuple); !ok { + t.Fatalf("Expect an LDS update from watcher, got %v", v) + } +} diff --git a/xds/internal/xdsclient/v2/eds_test.go b/xds/internal/xdsclient/controller/v2_eds_test.go similarity index 93% rename from xds/internal/xdsclient/v2/eds_test.go rename to xds/internal/xdsclient/controller/v2_eds_test.go index 9503c0397b91..aaa84f9c3d63 100644 --- a/xds/internal/xdsclient/v2/eds_test.go +++ b/xds/internal/xdsclient/controller/v2_eds_test.go @@ -16,7 +16,7 @@ * */ -package v2 +package controller import ( "testing" @@ -28,8 +28,8 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" xtestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) var ( @@ -179,22 +179,22 @@ func (s) TestEDSHandleResponse(t *testing.T) { // TestEDSHandleResponseWithoutWatch tests the case where the v2Client // receives an EDS response without a registered EDS watcher. func (s) TestEDSHandleResponseWithoutWatch(t *testing.T) { - _, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() - v2c, err := newV2Client(&testUpdateReceiver{ + v2c, err := newTestController(&testUpdateReceiver{ f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) + }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) } defer v2c.Close() - if v2c.handleEDSResponse(badResourceTypeInEDSResponse) == nil { + if _, _, _, err := v2c.handleResponse(badResourceTypeInEDSResponse); err == nil { t.Fatal("v2c.handleEDSResponse() succeeded, should have failed") } - if v2c.handleEDSResponse(goodEDSResponse1) != nil { + if _, _, _, err := v2c.handleResponse(goodEDSResponse1); err != nil { t.Fatal("v2c.handleEDSResponse() succeeded, should have failed") } } diff --git a/xds/internal/xdsclient/v2/lds_test.go b/xds/internal/xdsclient/controller/v2_lds_test.go similarity index 94% rename from xds/internal/xdsclient/v2/lds_test.go rename to xds/internal/xdsclient/controller/v2_lds_test.go index 008b29f4db9d..56b292988b03 100644 --- a/xds/internal/xdsclient/v2/lds_test.go +++ b/xds/internal/xdsclient/controller/v2_lds_test.go @@ -16,7 +16,7 @@ * */ -package v2 +package controller import ( "testing" @@ -177,22 +177,22 @@ func (s) TestLDSHandleResponse(t *testing.T) { // TestLDSHandleResponseWithoutWatch tests the case where the client receives // an LDS response without a registered watcher. func (s) TestLDSHandleResponseWithoutWatch(t *testing.T) { - _, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() - v2c, err := newV2Client(&testUpdateReceiver{ + v2c, err := newTestController(&testUpdateReceiver{ f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) + }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) } defer v2c.Close() - if v2c.handleLDSResponse(badResourceTypeInLDSResponse) == nil { + if _, _, _, err := v2c.handleResponse(badResourceTypeInLDSResponse); err == nil { t.Fatal("v2c.handleLDSResponse() succeeded, should have failed") } - if v2c.handleLDSResponse(goodLDSResponse1) != nil { + if _, _, _, err := v2c.handleResponse(goodLDSResponse1); err != nil { t.Fatal("v2c.handleLDSResponse() succeeded, should have failed") } } diff --git a/xds/internal/xdsclient/v2/rds_test.go b/xds/internal/xdsclient/controller/v2_rds_test.go similarity index 93% rename from xds/internal/xdsclient/v2/rds_test.go rename to xds/internal/xdsclient/controller/v2_rds_test.go index 9ac3b3041ed8..0b3dbfc8cfaf 100644 --- a/xds/internal/xdsclient/v2/rds_test.go +++ b/xds/internal/xdsclient/controller/v2_rds_test.go @@ -16,7 +16,7 @@ * */ -package v2 +package controller import ( "context" @@ -26,7 +26,6 @@ import ( xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -35,7 +34,7 @@ import ( // This is called by RDS tests to start LDS first, because LDS is a // pre-requirement for RDS, and RDS handle would fail without an existing LDS // watch. -func doLDS(ctx context.Context, t *testing.T, v2c xdsclient.APIClient, fakeServer *fakeserver.Server) { +func doLDS(ctx context.Context, t *testing.T, v2c *Controller, fakeServer *fakeserver.Server) { v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { t.Fatalf("Timeout waiting for LDS request: %v", err) @@ -179,12 +178,12 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { // TestRDSHandleResponseWithoutRDSWatch tests the case where the v2Client // receives an RDS response without a registered RDS watcher. func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) { - fakeServer, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() - v2c, err := newV2Client(&testUpdateReceiver{ + v2c, err := newTestController(&testUpdateReceiver{ f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) + }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) } @@ -194,11 +193,11 @@ func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) { defer cancel() doLDS(ctx, t, v2c, fakeServer) - if v2c.handleRDSResponse(badResourceTypeInRDSResponse) == nil { + if _, _, _, err := v2c.handleResponse(badResourceTypeInRDSResponse); err == nil { t.Fatal("v2c.handleRDSResponse() succeeded, should have failed") } - if v2c.handleRDSResponse(goodRDSResponse1) != nil { + if _, _, _, err := v2c.handleResponse(goodRDSResponse1); err != nil { t.Fatal("v2c.handleRDSResponse() succeeded, should have failed") } } diff --git a/xds/internal/xdsclient/v2/client_test.go b/xds/internal/xdsclient/controller/v2_testutils_test.go similarity index 63% rename from xds/internal/xdsclient/v2/client_test.go rename to xds/internal/xdsclient/controller/v2_testutils_test.go index 28edae57b0f9..dfd195827d46 100644 --- a/xds/internal/xdsclient/v2/client_test.go +++ b/xds/internal/xdsclient/controller/v2_testutils_test.go @@ -16,11 +16,10 @@ * */ -package v2 +package controller import ( "context" - "errors" "testing" "time" @@ -32,12 +31,11 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" @@ -349,7 +347,7 @@ func (t *testUpdateReceiver) newUpdate(rType xdsresource.ResourceType, d map[str func testWatchHandle(t *testing.T, test *watchHandleTestcase) { t.Helper() - fakeServer, cc, cleanup := startServerAndGetCC(t) + fakeServer, cleanup := startServer(t) defer cleanup() type updateErr struct { @@ -359,7 +357,7 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { } gotUpdateCh := testutils.NewChannel() - v2c, err := newV2Client(&testUpdateReceiver{ + v2c, err := newTestController(&testUpdateReceiver{ f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { if rType == test.rType { switch test.rType { @@ -390,7 +388,7 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { } } }, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) + }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) if err != nil { t.Fatal(err) } @@ -414,18 +412,7 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { // // Also note that this won't trigger ACK, so there's no need to clear the // request channel afterwards. - var handleXDSResp func(response *xdspb.DiscoveryResponse) error - switch test.rType { - case xdsresource.ListenerResource: - handleXDSResp = v2c.handleLDSResponse - case xdsresource.RouteConfigResource: - handleXDSResp = v2c.handleRDSResponse - case xdsresource.ClusterResource: - handleXDSResp = v2c.handleCDSResponse - case xdsresource.EndpointsResource: - handleXDSResp = v2c.handleEDSResponse - } - if err := handleXDSResp(test.responseToHandle); (err != nil) != test.wantHandleErr { + if _, _, _, err := v2c.handleResponse(test.responseToHandle); (err != nil) != test.wantHandleErr { t.Fatalf("v2c.handleRDSResponse() returned err: %v, wantErr: %v", err, test.wantHandleErr) } @@ -454,220 +441,31 @@ func testWatchHandle(t *testing.T, test *watchHandleTestcase) { } } -// startServerAndGetCC starts a fake XDS server and also returns a ClientConn +// startServer starts a fake XDS server and also returns a ClientConn // connected to it. -func startServerAndGetCC(t *testing.T) (*fakeserver.Server, *grpc.ClientConn, func()) { +func startServer(t *testing.T) (*fakeserver.Server, func()) { t.Helper() - fs, sCleanup, err := fakeserver.StartServer() if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } - - cc, ccCleanup, err := fs.XDSClientConn() - if err != nil { - sCleanup() - t.Fatalf("Failed to get a clientConn to the fake xDS server: %v", err) - } - return fs, cc, func() { - sCleanup() - ccCleanup() - } + return fs, sCleanup } -func newV2Client(p xdsclient.UpdateHandler, cc *grpc.ClientConn, n *basepb.Node, b func(int) time.Duration, l *grpclog.PrefixLogger) (*client, error) { - c, err := newClient(cc, xdsclient.BuildOptions{ - Parent: p, - NodeProto: n, - Backoff: b, - Logger: l, - }) +func newTestController(p pubsub.UpdateHandler, controlPlanAddr string, n *basepb.Node, b func(int) time.Duration, l *grpclog.PrefixLogger) (*Controller, error) { + c, err := New(&bootstrap.ServerConfig{ + ServerURI: controlPlanAddr, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV2, + NodeProto: n, + }, p, nil, l) if err != nil { return nil, err } - return c.(*client), nil -} - -// TestV2ClientBackoffAfterRecvError verifies if the v2Client backs off when it -// encounters a Recv error while receiving an LDS response. -func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - // Override the v2Client backoff function with this, so that we can verify - // that a backoff actually was triggered. - boCh := make(chan int, 1) - clientBackoff := func(v int) time.Duration { - boCh <- v - return 0 - } - - callbackCh := make(chan struct{}) - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) { close(callbackCh) }, - }, cc, goodNodeProto, clientBackoff, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - t.Log("Started xds v2Client...") - - v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request...") - - fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} - t.Log("Bad LDS response pushed to fakeServer...") - - timer := time.NewTimer(defaultTestTimeout) - select { - case <-timer.C: - t.Fatal("Timeout when expecting LDS update") - case <-boCh: - timer.Stop() - t.Log("v2Client backed off before retrying...") - case <-callbackCh: - t.Fatal("Received unexpected LDS callback") - } - - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request after backoff...") -} - -// TestV2ClientRetriesAfterBrokenStream verifies the case where a stream -// encountered a Recv() error, and is expected to send out xDS requests for -// registered watchers once it comes back up again. -func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) { - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - callbackCh := testutils.NewChannel() - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { - if rType == xdsresource.ListenerResource { - if u, ok := d[goodLDSTarget1]; ok { - t.Logf("Received LDS callback with ldsUpdate {%+v}", u) - callbackCh.Send(struct{}{}) - } - } - }, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - t.Log("Started xds v2Client...") - - v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request...") - - fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} - t.Log("Good LDS response pushed to fakeServer...") - - if _, err := callbackCh.Receive(ctx); err != nil { - t.Fatal("Timeout when expecting LDS update") - } - - // Read the ack, so the next request is sent after stream re-creation. - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS ACK") - } - - fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} - t.Log("Bad LDS response pushed to fakeServer...") - - val, err := fakeServer.XDSRequestChan.Receive(ctx) - if err != nil { - t.Fatalf("Timeout expired when expecting LDS update") - } - gotRequest := val.(*fakeserver.Request) - if !proto.Equal(gotRequest.Req, goodLDSRequest) { - t.Fatalf("gotRequest: %+v, wantRequest: %+v", gotRequest.Req, goodLDSRequest) - } -} - -// TestV2ClientWatchWithoutStream verifies the case where a watch is started -// when the xds stream is not created. The watcher should not receive any update -// (because there won't be any xds response, and timeout is done at a upper -// level). And when the stream is re-created, the watcher should get future -// updates. -func (s) TestV2ClientWatchWithoutStream(t *testing.T) { - fakeServer, sCleanup, err := fakeserver.StartServer() - if err != nil { - t.Fatalf("Failed to start fake xDS server: %v", err) - } - defer sCleanup() - - const scheme = "xds-client-test-whatever" - rb := manual.NewBuilderWithScheme(scheme) - rb.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: "no.such.server"}}}) - - cc, err := grpc.Dial(scheme+":///whatever", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(rb)) - if err != nil { - t.Fatalf("Failed to dial ClientConn: %v", err) - } - defer cc.Close() - - callbackCh := testutils.NewChannel() - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { - if rType == xdsresource.ListenerResource { - if u, ok := d[goodLDSTarget1]; ok { - t.Logf("Received LDS callback with ldsUpdate {%+v}", u) - callbackCh.Send(u) - } - } - }, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - t.Log("Started xds v2Client...") - - // This watch is started when the xds-ClientConn is in Transient Failure, - // and no xds stream is created. - v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) - - // The watcher should receive an update, with a timeout error in it. - sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if v, err := callbackCh.Receive(sCtx); err == nil { - t.Fatalf("Expect an timeout error from watcher, got %v", v) - } - - // Send the real server address to the ClientConn, the stream should be - // created, and the previous watch should be sent. - rb.UpdateState(resolver.State{ - Addresses: []resolver.Address{{Addr: fakeServer.Address}}, - }) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request...") - - fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} - t.Log("Good LDS response pushed to fakeServer...") - - if v, err := callbackCh.Receive(ctx); err != nil { - t.Fatal("Timeout when expecting LDS update") - } else if _, ok := v.(xdsresource.ListenerUpdateErrTuple); !ok { - t.Fatalf("Expect an LDS update from watcher, got %v", v) - } + // This direct setting backoff seems a bit hacky, but should be OK for the + // tests. Or we need to make it configurable in New(). + c.backoff = b + return c, nil } func newStringP(s string) *string { diff --git a/xds/internal/xdsclient/v2/client.go b/xds/internal/xdsclient/controller/version/v2/client.go similarity index 51% rename from xds/internal/xdsclient/v2/client.go rename to xds/internal/xdsclient/controller/version/v2/client.go index 12d0509dc524..ae3ae559e5dd 100644 --- a/xds/internal/xdsclient/v2/client.go +++ b/xds/internal/xdsclient/controller/version/v2/client.go @@ -28,9 +28,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/grpc/xds/internal/xdsclient" + controllerversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + xdsresourceversion "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -39,42 +40,24 @@ import ( ) func init() { - xdsclient.RegisterAPIClientBuilder(clientBuilder{}) + controllerversion.RegisterAPIClientBuilder(xdsresourceversion.TransportV2, newClient) } var ( resourceTypeToURL = map[xdsresource.ResourceType]string{ - xdsresource.ListenerResource: version.V2ListenerURL, - xdsresource.RouteConfigResource: version.V2RouteConfigURL, - xdsresource.ClusterResource: version.V2ClusterURL, - xdsresource.EndpointsResource: version.V2EndpointsURL, + xdsresource.ListenerResource: xdsresourceversion.V2ListenerURL, + xdsresource.RouteConfigResource: xdsresourceversion.V2RouteConfigURL, + xdsresource.ClusterResource: xdsresourceversion.V2ClusterURL, + xdsresource.EndpointsResource: xdsresourceversion.V2EndpointsURL, } ) -type clientBuilder struct{} - -func (clientBuilder) Build(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) { - return newClient(cc, opts) -} - -func (clientBuilder) Version() version.TransportAPI { - return version.TransportV2 -} - -func newClient(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) { +func newClient(opts controllerversion.BuildOptions) (controllerversion.VersionedClient, error) { nodeProto, ok := opts.NodeProto.(*v2corepb.Node) if !ok { return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, (*v2corepb.Node)(nil)) } - v2c := &client{ - cc: cc, - parent: opts.Parent, - nodeProto: nodeProto, - logger: opts.Logger, - updateValidator: opts.Validator, - } - v2c.ctx, v2c.cancelCtx = context.WithCancel(context.Background()) - v2c.TransportHelper = xdsclient.NewTransportHelper(v2c, opts.Logger, opts.Backoff) + v2c := &client{nodeProto: nodeProto, logger: opts.Logger} return v2c, nil } @@ -84,24 +67,15 @@ type adsStream v2adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesCli // single ADS stream on which the different types of xDS requests and responses // are multiplexed. type client struct { - *xdsclient.TransportHelper - - ctx context.Context - cancelCtx context.CancelFunc - parent xdsclient.UpdateHandler + nodeProto *v2corepb.Node logger *grpclog.PrefixLogger - - // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. - cc *grpc.ClientConn - nodeProto *v2corepb.Node - updateValidator xdsresource.UpdateValidatorFunc } -func (v2c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { - return v2adsgrpc.NewAggregatedDiscoveryServiceClient(v2c.cc).StreamAggregatedResources(v2c.ctx, grpc.WaitForReady(true)) +func (v2c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) { + return v2adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx, grpc.WaitForReady(true)) } -// sendRequest sends out a DiscoveryRequest for the given resourceNames, of type +// SendRequest sends out a DiscoveryRequest for the given resourceNames, of type // rType, on the provided stream. // // version is the ack version to be sent with the request @@ -143,7 +117,6 @@ func (v2c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { resp, err := stream.Recv() if err != nil { - v2c.parent.NewConnectionError(err) return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) } v2c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) @@ -151,11 +124,11 @@ func (v2c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { return resp, nil } -func (v2c *client) HandleResponse(r proto.Message) (xdsresource.ResourceType, string, string, error) { +func (v2c *client) ParseResponse(r proto.Message) (xdsresource.ResourceType, []*anypb.Any, string, string, error) { rType := xdsresource.UnknownResource resp, ok := r.(*v2xdspb.DiscoveryResponse) if !ok { - return rType, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) + return rType, nil, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) } // Note that the xDS transport protocol is versioned independently of @@ -166,74 +139,17 @@ func (v2c *client) HandleResponse(r proto.Message) (xdsresource.ResourceType, st url := resp.GetTypeUrl() switch { case xdsresource.IsListenerResource(url): - err = v2c.handleLDSResponse(resp) rType = xdsresource.ListenerResource case xdsresource.IsRouteConfigResource(url): - err = v2c.handleRDSResponse(resp) rType = xdsresource.RouteConfigResource case xdsresource.IsClusterResource(url): - err = v2c.handleCDSResponse(resp) rType = xdsresource.ClusterResource case xdsresource.IsEndpointsResource(url): - err = v2c.handleEDSResponse(resp) rType = xdsresource.EndpointsResource default: - return rType, "", "", xdsclient.ErrResourceTypeUnsupported{ + return rType, nil, "", "", controllerversion.ErrResourceTypeUnsupported{ ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()), } } - return rType, resp.GetVersionInfo(), resp.GetNonce(), err -} - -// handleLDSResponse processes an LDS response received from the management -// server. On receipt of a good response, it also invokes the registered watcher -// callback. -func (v2c *client) handleLDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsresource.UnmarshalListener(&xdsresource.UnmarshalOptions{ - Version: resp.GetVersionInfo(), - Resources: resp.GetResources(), - Logger: v2c.logger, - UpdateValidator: v2c.updateValidator, - }) - v2c.parent.NewListeners(update, md) - return err -} - -// handleRDSResponse processes an RDS response received from the management -// server. On receipt of a good response, it caches validated resources and also -// invokes the registered watcher callback. -func (v2c *client) handleRDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsresource.UnmarshalRouteConfig(&xdsresource.UnmarshalOptions{ - Version: resp.GetVersionInfo(), - Resources: resp.GetResources(), - Logger: v2c.logger, - UpdateValidator: v2c.updateValidator, - }) - v2c.parent.NewRouteConfigs(update, md) - return err -} - -// handleCDSResponse processes an CDS response received from the management -// server. On receipt of a good response, it also invokes the registered watcher -// callback. -func (v2c *client) handleCDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsresource.UnmarshalCluster(&xdsresource.UnmarshalOptions{ - Version: resp.GetVersionInfo(), - Resources: resp.GetResources(), - Logger: v2c.logger, - UpdateValidator: v2c.updateValidator, - }) - v2c.parent.NewClusters(update, md) - return err -} - -func (v2c *client) handleEDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsresource.UnmarshalEndpoints(&xdsresource.UnmarshalOptions{ - Version: resp.GetVersionInfo(), - Resources: resp.GetResources(), - Logger: v2c.logger, - UpdateValidator: v2c.updateValidator, - }) - v2c.parent.NewEndpoints(update, md) - return err + return rType, resp.GetResources(), resp.GetVersionInfo(), resp.GetNonce(), err } diff --git a/xds/internal/xdsclient/v2/loadreport.go b/xds/internal/xdsclient/controller/version/v2/loadreport.go similarity index 100% rename from xds/internal/xdsclient/v2/loadreport.go rename to xds/internal/xdsclient/controller/version/v2/loadreport.go diff --git a/xds/internal/xdsclient/v3/client.go b/xds/internal/xdsclient/controller/version/v3/client.go similarity index 52% rename from xds/internal/xdsclient/v3/client.go rename to xds/internal/xdsclient/controller/version/v3/client.go index 622ae2a434f9..1c7f11ad2527 100644 --- a/xds/internal/xdsclient/v3/client.go +++ b/xds/internal/xdsclient/controller/version/v3/client.go @@ -29,9 +29,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/grpc/xds/internal/xdsclient" + controllerversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + xdsresourceversion "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" @@ -39,42 +40,26 @@ import ( ) func init() { - xdsclient.RegisterAPIClientBuilder(clientBuilder{}) + controllerversion.RegisterAPIClientBuilder(xdsresourceversion.TransportV3, newClient) } var ( resourceTypeToURL = map[xdsresource.ResourceType]string{ - xdsresource.ListenerResource: version.V3ListenerURL, - xdsresource.RouteConfigResource: version.V3RouteConfigURL, - xdsresource.ClusterResource: version.V3ClusterURL, - xdsresource.EndpointsResource: version.V3EndpointsURL, + xdsresource.ListenerResource: xdsresourceversion.V3ListenerURL, + xdsresource.RouteConfigResource: xdsresourceversion.V3RouteConfigURL, + xdsresource.ClusterResource: xdsresourceversion.V3ClusterURL, + xdsresource.EndpointsResource: xdsresourceversion.V3EndpointsURL, } ) -type clientBuilder struct{} - -func (clientBuilder) Build(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) { - return newClient(cc, opts) -} - -func (clientBuilder) Version() version.TransportAPI { - return version.TransportV3 -} - -func newClient(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) { +func newClient(opts controllerversion.BuildOptions) (controllerversion.VersionedClient, error) { nodeProto, ok := opts.NodeProto.(*v3corepb.Node) if !ok { return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, v3corepb.Node{}) } v3c := &client{ - cc: cc, - parent: opts.Parent, - nodeProto: nodeProto, - logger: opts.Logger, - updateValidator: opts.Validator, + nodeProto: nodeProto, logger: opts.Logger, } - v3c.ctx, v3c.cancelCtx = context.WithCancel(context.Background()) - v3c.TransportHelper = xdsclient.NewTransportHelper(v3c, opts.Logger, opts.Backoff) return v3c, nil } @@ -84,24 +69,15 @@ type adsStream v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesCli // single ADS stream on which the different types of xDS requests and responses // are multiplexed. type client struct { - *xdsclient.TransportHelper - - ctx context.Context - cancelCtx context.CancelFunc - parent xdsclient.UpdateHandler + nodeProto *v3corepb.Node logger *grpclog.PrefixLogger - - // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. - cc *grpc.ClientConn - nodeProto *v3corepb.Node - updateValidator xdsresource.UpdateValidatorFunc } -func (v3c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(v3c.cc).StreamAggregatedResources(v3c.ctx, grpc.WaitForReady(true)) +func (v3c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) { + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx, grpc.WaitForReady(true)) } -// sendRequest sends out a DiscoveryRequest for the given resourceNames, of type +// SendRequest sends out a DiscoveryRequest for the given resourceNames, of type // rType, on the provided stream. // // version is the ack version to be sent with the request @@ -143,7 +119,6 @@ func (v3c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { resp, err := stream.Recv() if err != nil { - v3c.parent.NewConnectionError(err) return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) } v3c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) @@ -151,11 +126,11 @@ func (v3c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { return resp, nil } -func (v3c *client) HandleResponse(r proto.Message) (xdsresource.ResourceType, string, string, error) { +func (v3c *client) ParseResponse(r proto.Message) (xdsresource.ResourceType, []*anypb.Any, string, string, error) { rType := xdsresource.UnknownResource resp, ok := r.(*v3discoverypb.DiscoveryResponse) if !ok { - return rType, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) + return rType, nil, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) } // Note that the xDS transport protocol is versioned independently of @@ -166,74 +141,17 @@ func (v3c *client) HandleResponse(r proto.Message) (xdsresource.ResourceType, st url := resp.GetTypeUrl() switch { case xdsresource.IsListenerResource(url): - err = v3c.handleLDSResponse(resp) rType = xdsresource.ListenerResource case xdsresource.IsRouteConfigResource(url): - err = v3c.handleRDSResponse(resp) rType = xdsresource.RouteConfigResource case xdsresource.IsClusterResource(url): - err = v3c.handleCDSResponse(resp) rType = xdsresource.ClusterResource case xdsresource.IsEndpointsResource(url): - err = v3c.handleEDSResponse(resp) rType = xdsresource.EndpointsResource default: - return rType, "", "", xdsclient.ErrResourceTypeUnsupported{ + return rType, nil, "", "", controllerversion.ErrResourceTypeUnsupported{ ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()), } } - return rType, resp.GetVersionInfo(), resp.GetNonce(), err -} - -// handleLDSResponse processes an LDS response received from the management -// server. On receipt of a good response, it also invokes the registered watcher -// callback. -func (v3c *client) handleLDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsresource.UnmarshalListener(&xdsresource.UnmarshalOptions{ - Version: resp.GetVersionInfo(), - Resources: resp.GetResources(), - Logger: v3c.logger, - UpdateValidator: v3c.updateValidator, - }) - v3c.parent.NewListeners(update, md) - return err -} - -// handleRDSResponse processes an RDS response received from the management -// server. On receipt of a good response, it caches validated resources and also -// invokes the registered watcher callback. -func (v3c *client) handleRDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsresource.UnmarshalRouteConfig(&xdsresource.UnmarshalOptions{ - Version: resp.GetVersionInfo(), - Resources: resp.GetResources(), - Logger: v3c.logger, - UpdateValidator: v3c.updateValidator, - }) - v3c.parent.NewRouteConfigs(update, md) - return err -} - -// handleCDSResponse processes an CDS response received from the management -// server. On receipt of a good response, it also invokes the registered watcher -// callback. -func (v3c *client) handleCDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsresource.UnmarshalCluster(&xdsresource.UnmarshalOptions{ - Version: resp.GetVersionInfo(), - Resources: resp.GetResources(), - Logger: v3c.logger, - UpdateValidator: v3c.updateValidator, - }) - v3c.parent.NewClusters(update, md) - return err -} - -func (v3c *client) handleEDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsresource.UnmarshalEndpoints(&xdsresource.UnmarshalOptions{ - Version: resp.GetVersionInfo(), - Resources: resp.GetResources(), - Logger: v3c.logger, - UpdateValidator: v3c.updateValidator, - }) - v3c.parent.NewEndpoints(update, md) - return err + return rType, resp.GetResources(), resp.GetVersionInfo(), resp.GetNonce(), err } diff --git a/xds/internal/xdsclient/v3/loadreport.go b/xds/internal/xdsclient/controller/version/v3/loadreport.go similarity index 100% rename from xds/internal/xdsclient/v3/loadreport.go rename to xds/internal/xdsclient/controller/version/v3/loadreport.go diff --git a/xds/internal/xdsclient/controller/version/version.go b/xds/internal/xdsclient/controller/version/version.go new file mode 100644 index 000000000000..f79a21e294fa --- /dev/null +++ b/xds/internal/xdsclient/controller/version/version.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package version defines APIs to deal with different versions of xDS. +package version + +import ( + "context" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" +) + +var ( + m = make(map[version.TransportAPI]func(opts BuildOptions) (VersionedClient, error)) +) + +// RegisterAPIClientBuilder registers a client builder for xDS transport protocol +// version specified by b.Version(). +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple builders are +// registered for the same version, the one registered last will take effect. +func RegisterAPIClientBuilder(v version.TransportAPI, f func(opts BuildOptions) (VersionedClient, error)) { + m[v] = f +} + +// GetAPIClientBuilder returns the client builder registered for the provided +// xDS transport API version. +func GetAPIClientBuilder(version version.TransportAPI) func(opts BuildOptions) (VersionedClient, error) { + if f, ok := m[version]; ok { + return f + } + return nil +} + +// BuildOptions contains options to be passed to client builders. +type BuildOptions struct { + // NodeProto contains the Node proto to be used in xDS requests. The actual + // type depends on the transport protocol version used. + NodeProto proto.Message + // // Backoff returns the amount of time to backoff before retrying broken + // // streams. + // Backoff func(int) time.Duration + // Logger provides enhanced logging capabilities. + Logger *grpclog.PrefixLogger +} + +// LoadReportingOptions contains configuration knobs for reporting load data. +type LoadReportingOptions struct { + LoadStore *load.Store +} + +// ErrResourceTypeUnsupported is an error used to indicate an unsupported xDS +// resource type. The wrapped ErrStr contains the details. +type ErrResourceTypeUnsupported struct { + ErrStr string +} + +// Error helps implements the error interface. +func (e ErrResourceTypeUnsupported) Error() string { + return e.ErrStr +} + +// VersionedClient is the interface to version specific operations of the +// client. +// +// It mainly deals with the type assertion from proto.Message to the real v2/v3 +// types, and grpc.Stream to the versioned stream types. +type VersionedClient interface { + // NewStream returns a new xDS client stream specific to the underlying + // transport protocol version. + NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) + // SendRequest constructs and sends out a DiscoveryRequest message specific + // to the underlying transport protocol version. + SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error + // RecvResponse uses the provided stream to receive a response specific to + // the underlying transport protocol version. + RecvResponse(s grpc.ClientStream) (proto.Message, error) + // ParseResponse type asserts message to the versioned response, and + // retrieves the fields. + ParseResponse(r proto.Message) (xdsresource.ResourceType, []*anypb.Any, string, string, error) + + // The following are LRS methods. + + // NewLoadStatsStream returns a new LRS client stream specific to the + // underlying transport protocol version. + NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) + // SendFirstLoadStatsRequest constructs and sends the first request on the + // LRS stream. + SendFirstLoadStatsRequest(s grpc.ClientStream) error + // HandleLoadStatsResponse receives the first response from the server which + // contains the load reporting interval and the clusters for which the + // server asks the client to report load for. + // + // If the response sets SendAllClusters to true, the returned clusters is + // nil. + HandleLoadStatsResponse(s grpc.ClientStream) (clusters []string, _ time.Duration, _ error) + // SendLoadStatsRequest will be invoked at regular intervals to send load + // report with load data reported since the last time this method was + // invoked. + SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error +} diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index 0b6d22c7d89f..ccc98898a9cd 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -30,6 +30,7 @@ import ( v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" @@ -86,7 +87,7 @@ func (s) TestLDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() - updateHandler := client.(xdsclient.UpdateHandler) + updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpLDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { @@ -202,7 +203,7 @@ func (s) TestRDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() - updateHandler := client.(xdsclient.UpdateHandler) + updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpRDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { @@ -318,7 +319,7 @@ func (s) TestCDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() - updateHandler := client.(xdsclient.UpdateHandler) + updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpCDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { @@ -420,7 +421,7 @@ func (s) TestEDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() - updateHandler := client.(xdsclient.UpdateHandler) + updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpEDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { diff --git a/xds/internal/xdsclient/loadreport.go b/xds/internal/xdsclient/loadreport.go index 4df9a5c0c3a4..21400c1321b8 100644 --- a/xds/internal/xdsclient/loadreport.go +++ b/xds/internal/xdsclient/loadreport.go @@ -17,12 +17,7 @@ package xdsclient -import ( - "context" - - "google.golang.org/grpc" - "google.golang.org/grpc/xds/internal/xdsclient/load" -) +import "google.golang.org/grpc/xds/internal/xdsclient/load" // ReportLoad starts an load reporting stream to the given server. If the server // is not an empty string, and is different from the management server, a new @@ -34,106 +29,5 @@ import ( // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. func (c *clientImpl) ReportLoad(server string) (*load.Store, func()) { - c.lrsMu.Lock() - defer c.lrsMu.Unlock() - - // If there's already a client to this server, use it. Otherwise, create - // one. - lrsC, ok := c.lrsClients[server] - if !ok { - lrsC = newLRSClient(c, server) - c.lrsClients[server] = lrsC - } - - store := lrsC.ref() - return store, func() { - // This is a callback, need to hold lrsMu. - c.lrsMu.Lock() - defer c.lrsMu.Unlock() - if lrsC.unRef() { - // Delete the lrsClient from map if this is the last reference. - delete(c.lrsClients, server) - } - } -} - -// lrsClient maps to one lrsServer. It contains: -// - a ClientConn to this server (only if it's different from the management -// server) -// - a load.Store that contains loads only for this server -type lrsClient struct { - parent *clientImpl - server string - - cc *grpc.ClientConn // nil if the server is same as the management server - refCount int - cancelStream func() - loadStore *load.Store -} - -// newLRSClient creates a new LRS stream to the server. -func newLRSClient(parent *clientImpl, server string) *lrsClient { - return &lrsClient{ - parent: parent, - server: server, - refCount: 0, - } -} - -// ref increments the refCount. If this is the first ref, it starts the LRS stream. -// -// Not thread-safe, caller needs to synchronize. -func (lrsC *lrsClient) ref() *load.Store { - lrsC.refCount++ - if lrsC.refCount == 1 { - lrsC.startStream() - } - return lrsC.loadStore -} - -// unRef decrements the refCount, and closes the stream if refCount reaches 0 -// (and close the cc if cc is not xDS cc). It returns whether refCount reached 0 -// after this call. -// -// Not thread-safe, caller needs to synchronize. -func (lrsC *lrsClient) unRef() (closed bool) { - lrsC.refCount-- - if lrsC.refCount != 0 { - return false - } - lrsC.parent.logger.Infof("Stopping load report to server: %s", lrsC.server) - lrsC.cancelStream() - if lrsC.cc != nil { - lrsC.cc.Close() - } - return true -} - -// startStream starts the LRS stream to the server. If server is not the same -// management server from the parent, it also creates a ClientConn. -func (lrsC *lrsClient) startStream() { - var cc *grpc.ClientConn - - lrsC.parent.logger.Infof("Starting load report to server: %s", lrsC.server) - if lrsC.server == "" || lrsC.server == lrsC.parent.config.XDSServer.ServerURI { - // Reuse the xDS client if server is the same. - cc = lrsC.parent.cc - } else { - lrsC.parent.logger.Infof("LRS server is different from management server, starting a new ClientConn") - ccNew, err := grpc.Dial(lrsC.server, lrsC.parent.config.XDSServer.Creds) - if err != nil { - // An error from a non-blocking dial indicates something serious. - lrsC.parent.logger.Infof("xds: failed to dial load report server {%s}: %v", lrsC.server, err) - return - } - cc = ccNew - lrsC.cc = ccNew - } - - var ctx context.Context - ctx, lrsC.cancelStream = context.WithCancel(context.Background()) - - // Create the store and stream. - lrsC.loadStore = load.NewStore() - go lrsC.parent.apiClient.reportLoad(ctx, cc, loadReportingOptions{loadStore: lrsC.loadStore}) + return c.controller.ReportLoad(server) } diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index db31de6cf78b..8b19f80287cc 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -16,7 +16,7 @@ * */ -package xdsclient_test +package xdsclient import ( "context" @@ -33,17 +33,14 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" - _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register the v2 xDS API client. + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 xDS API client. ) const ( - defaultTestTimeout = 5 * time.Second - defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. defaultClientWatchExpiryTimeout = 15 * time.Second ) @@ -54,7 +51,7 @@ func (s) TestLRSClient(t *testing.T) { } defer sCleanup() - xdsC, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + xdsC, err := NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: fs.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), diff --git a/xds/internal/xdsclient/pubsub/interface.go b/xds/internal/xdsclient/pubsub/interface.go new file mode 100644 index 000000000000..334ec101e29d --- /dev/null +++ b/xds/internal/xdsclient/pubsub/interface.go @@ -0,0 +1,39 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pubsub + +import "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + +// UpdateHandler receives and processes (by taking appropriate actions) xDS +// resource updates from an APIClient for a specific version. +// +// It's a subset of the APIs of a *Pubsub. +type UpdateHandler interface { + // NewListeners handles updates to xDS listener resources. + NewListeners(map[string]xdsresource.ListenerUpdateErrTuple, xdsresource.UpdateMetadata) + // NewRouteConfigs handles updates to xDS RouteConfiguration resources. + NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple, xdsresource.UpdateMetadata) + // NewClusters handles updates to xDS Cluster resources. + NewClusters(map[string]xdsresource.ClusterUpdateErrTuple, xdsresource.UpdateMetadata) + // NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely + // referred to as Endpoints) resources. + NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple, xdsresource.UpdateMetadata) + // NewConnectionError handles connection errors from the xDS stream. The + // error will be reported to all the resource watchers. + NewConnectionError(err error) +} diff --git a/xds/internal/xdsclient/watchers.go b/xds/internal/xdsclient/watchers.go index bf9a07c061af..fe59dbbd6f68 100644 --- a/xds/internal/xdsclient/watchers.go +++ b/xds/internal/xdsclient/watchers.go @@ -29,11 +29,11 @@ import ( func (c *clientImpl) WatchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { first, cancelF := c.pubsub.WatchListener(serviceName, cb) if first { - c.apiClient.AddWatch(xdsresource.ListenerResource, serviceName) + c.controller.AddWatch(xdsresource.ListenerResource, serviceName) } return func() { if cancelF() { - c.apiClient.RemoveWatch(xdsresource.ListenerResource, serviceName) + c.controller.RemoveWatch(xdsresource.ListenerResource, serviceName) } } } @@ -46,11 +46,11 @@ func (c *clientImpl) WatchListener(serviceName string, cb func(xdsresource.Liste func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { first, cancelF := c.pubsub.WatchRouteConfig(routeName, cb) if first { - c.apiClient.AddWatch(xdsresource.RouteConfigResource, routeName) + c.controller.AddWatch(xdsresource.RouteConfigResource, routeName) } return func() { if cancelF() { - c.apiClient.RemoveWatch(xdsresource.RouteConfigResource, routeName) + c.controller.RemoveWatch(xdsresource.RouteConfigResource, routeName) } } } @@ -67,11 +67,11 @@ func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.Rout func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { first, cancelF := c.pubsub.WatchCluster(clusterName, cb) if first { - c.apiClient.AddWatch(xdsresource.ClusterResource, clusterName) + c.controller.AddWatch(xdsresource.ClusterResource, clusterName) } return func() { if cancelF() { - c.apiClient.RemoveWatch(xdsresource.ClusterResource, clusterName) + c.controller.RemoveWatch(xdsresource.ClusterResource, clusterName) } } } @@ -87,11 +87,11 @@ func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.Cluste func (c *clientImpl) WatchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { first, cancelF := c.pubsub.WatchEndpoints(clusterName, cb) if first { - c.apiClient.AddWatch(xdsresource.EndpointsResource, clusterName) + c.controller.AddWatch(xdsresource.EndpointsResource, clusterName) } return func() { if cancelF() { - c.apiClient.RemoveWatch(xdsresource.EndpointsResource, clusterName) + c.controller.RemoveWatch(xdsresource.EndpointsResource, clusterName) } } } diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index 729990ad95bb..98be38869bc6 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -35,7 +35,7 @@ import ( // - an update for another resource name // - an update is received after cancel() func (s) TestClusterWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -50,7 +50,7 @@ func (s) TestClusterWatch(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) clusterUpdateCh := testutils.NewChannel() cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { @@ -92,7 +92,7 @@ func (s) TestClusterWatch(t *testing.T) { // TestClusterTwoWatchSameResourceName covers the case where an update is received // after two watch() for the same resource name. func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -107,7 +107,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) var clusterUpdateChs []*testutils.Channel var cancelLastWatch func() @@ -164,7 +164,7 @@ func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { // TestClusterThreeWatchDifferentResourceName covers the case where an update is // received after three watch() for different resource names. func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -179,7 +179,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) // Two watches for the same name. var clusterUpdateChs []*testutils.Channel @@ -229,7 +229,7 @@ func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { // TestClusterWatchAfterCache covers the case where watch is called after the update // is in cache. func (s) TestClusterWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -244,7 +244,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) clusterUpdateCh := testutils.NewChannel() client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { @@ -290,7 +290,7 @@ func (s) TestClusterWatchAfterCache(t *testing.T) { // an CDS response for the request that it sends out. We want the watch callback // to be invoked with an error once the watchExpiryTimer fires. func (s) TestClusterWatchExpiryTimer(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, true)) @@ -305,7 +305,7 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) clusterUpdateCh := testutils.NewChannel() client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { @@ -329,7 +329,7 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { // an CDS response for the request that it sends out. We want no error even // after expiry timeout. func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, true)) @@ -344,7 +344,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) clusterUpdateCh := testutils.NewChannel() client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { @@ -377,7 +377,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { // - one more update without the removed resource // - the callback (above) shouldn't receive any update func (s) TestClusterResourceRemoved(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -392,7 +392,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) clusterUpdateCh1 := testutils.NewChannel() client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) { @@ -460,7 +460,7 @@ func (s) TestClusterResourceRemoved(t *testing.T) { // TestClusterWatchNACKError covers the case that an update is NACK'ed, and the // watcher should also receive the error. func (s) TestClusterWatchNACKError(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -475,7 +475,7 @@ func (s) TestClusterWatchNACKError(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) clusterUpdateCh := testutils.NewChannel() cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { @@ -500,7 +500,7 @@ func (s) TestClusterWatchNACKError(t *testing.T) { // But the watchers with valid resources should receive the update, those with // invalida resources should receive an error. func (s) TestClusterWatchPartialValid(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -515,7 +515,7 @@ func (s) TestClusterWatchPartialValid(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) const badResourceName = "bad-resource" updateChs := make(map[string]*testutils.Channel) diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go index eddc17ed1bd6..4ae59d2f1e92 100644 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -53,7 +53,7 @@ var ( // - an update for another resource name (which doesn't trigger callback) // - an update is received after cancel() func (s) TestEndpointsWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -68,7 +68,7 @@ func (s) TestEndpointsWatch(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) endpointsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { @@ -110,7 +110,7 @@ func (s) TestEndpointsWatch(t *testing.T) { // TestEndpointsTwoWatchSameResourceName covers the case where an update is received // after two watch() for the same resource name. func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -125,7 +125,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) const count = 2 var ( @@ -184,7 +184,7 @@ func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { // TestEndpointsThreeWatchDifferentResourceName covers the case where an update is // received after three watch() for different resource names. func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -199,7 +199,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) // Two watches for the same name. var endpointsUpdateChs []*testutils.Channel @@ -249,7 +249,7 @@ func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { // TestEndpointsWatchAfterCache covers the case where watch is called after the update // is in cache. func (s) TestEndpointsWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -264,7 +264,7 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) endpointsUpdateCh := testutils.NewChannel() client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { @@ -308,7 +308,7 @@ func (s) TestEndpointsWatchAfterCache(t *testing.T) { // an CDS response for the request that it sends out. We want the watch callback // to be invoked with an error once the watchExpiryTimer fires. func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, true)) @@ -323,7 +323,7 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) endpointsUpdateCh := testutils.NewChannel() client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { @@ -346,7 +346,7 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { // TestEndpointsWatchNACKError covers the case that an update is NACK'ed, and // the watcher should also receive the error. func (s) TestEndpointsWatchNACKError(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -361,7 +361,7 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) endpointsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { @@ -384,7 +384,7 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { // But the watchers with valid resources should receive the update, those with // invalida resources should receive an error. func (s) TestEndpointsWatchPartialValid(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -399,7 +399,7 @@ func (s) TestEndpointsWatchPartialValid(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) const badResourceName = "bad-resource" updateChs := make(map[string]*testutils.Channel) diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go index 3ced8b81f7a0..7446975a1511 100644 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -35,7 +35,7 @@ import ( // - an update for another resource name // - an update is received after cancel() func (s) TestLDSWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -50,7 +50,7 @@ func (s) TestLDSWatch(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) ldsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { @@ -91,7 +91,7 @@ func (s) TestLDSWatch(t *testing.T) { // TestLDSTwoWatchSameResourceName covers the case where an update is received // after two watch() for the same resource name. func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -106,7 +106,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) const count = 2 var ( @@ -166,7 +166,7 @@ func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { // TestLDSThreeWatchDifferentResourceName covers the case where an update is // received after three watch() for different resource names. func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -181,7 +181,7 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) var ldsUpdateChs []*testutils.Channel const count = 2 @@ -232,7 +232,7 @@ func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { // TestLDSWatchAfterCache covers the case where watch is called after the update // is in cache. func (s) TestLDSWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -247,7 +247,7 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) ldsUpdateCh := testutils.NewChannel() client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { @@ -294,7 +294,7 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { // - one more update without the removed resource // - the callback (above) shouldn't receive any update func (s) TestLDSResourceRemoved(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -309,7 +309,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) ldsUpdateCh1 := testutils.NewChannel() client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) { @@ -376,7 +376,7 @@ func (s) TestLDSResourceRemoved(t *testing.T) { // TestListenerWatchNACKError covers the case that an update is NACK'ed, and the // watcher should also receive the error. func (s) TestListenerWatchNACKError(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -391,7 +391,7 @@ func (s) TestListenerWatchNACKError(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) ldsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { @@ -414,7 +414,7 @@ func (s) TestListenerWatchNACKError(t *testing.T) { // But the watchers with valid resources should receive the update, those with // invalida resources should receive an error. func (s) TestListenerWatchPartialValid(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -429,7 +429,7 @@ func (s) TestListenerWatchPartialValid(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) const badResourceName = "bad-resource" updateChs := make(map[string]*testutils.Channel) @@ -472,7 +472,7 @@ func (s) TestListenerWatchPartialValid(t *testing.T) { // TestListenerWatch_RedundantUpdateSupression tests scenarios where an update // with an unmodified resource is suppressed, and modified resource is not. func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -487,7 +487,7 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) ldsUpdateCh := testutils.NewChannel() client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go index d01e03911eae..ea7b06ae1fd9 100644 --- a/xds/internal/xdsclient/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -35,7 +35,7 @@ import ( // - an update for another resource name (which doesn't trigger callback) // - an update is received after cancel() func (s) TestRDSWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -50,7 +50,7 @@ func (s) TestRDSWatch(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) rdsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { @@ -99,7 +99,7 @@ func (s) TestRDSWatch(t *testing.T) { // TestRDSTwoWatchSameResourceName covers the case where an update is received // after two watch() for the same resource name. func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -114,7 +114,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) const count = 2 var ( @@ -181,7 +181,7 @@ func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { // TestRDSThreeWatchDifferentResourceName covers the case where an update is // received after three watch() for different resource names. func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -196,7 +196,7 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) // Two watches for the same name. var rdsUpdateChs []*testutils.Channel @@ -260,7 +260,7 @@ func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { // TestRDSWatchAfterCache covers the case where watch is called after the update // is in cache. func (s) TestRDSWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -275,7 +275,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) rdsUpdateCh := testutils.NewChannel() client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { @@ -325,7 +325,7 @@ func (s) TestRDSWatchAfterCache(t *testing.T) { // TestRouteWatchNACKError covers the case that an update is NACK'ed, and the // watcher should also receive the error. func (s) TestRouteWatchNACKError(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -340,7 +340,7 @@ func (s) TestRouteWatchNACKError(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) rdsUpdateCh := testutils.NewChannel() cancelWatch := client.WatchRouteConfig(testCDSName, func(update xdsresource.RouteConfigUpdate, err error) { @@ -363,7 +363,7 @@ func (s) TestRouteWatchNACKError(t *testing.T) { // But the watchers with valid resources should receive the update, those with // invalida resources should receive an error. func (s) TestRouteWatchPartialValid(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() + apiClientCh, cleanup := overrideNewController() defer cleanup() client, err := newWithConfig(clientOpts(testXDSServer, false)) @@ -378,7 +378,7 @@ func (s) TestRouteWatchPartialValid(t *testing.T) { if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } - apiClient := c.(*testAPIClient) + apiClient := c.(*testController) const badResourceName = "bad-resource" updateChs := make(map[string]*testutils.Channel) diff --git a/xds/internal/xdsclient/xdsclient_test.go b/xds/internal/xdsclient/xdsclient_test.go index 23d6d6f54183..92423a59f296 100644 --- a/xds/internal/xdsclient/xdsclient_test.go +++ b/xds/internal/xdsclient/xdsclient_test.go @@ -26,10 +26,11 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/version" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register the v2 API client. + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 API client. ) type s struct { diff --git a/xds/internal/xdsclient/xdsresource/filter_chain.go b/xds/internal/xdsclient/xdsresource/filter_chain.go index 10c779229622..88dc81305b26 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -31,7 +31,7 @@ import ( "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) const ( diff --git a/xds/internal/xdsclient/xdsresource/filter_chain_test.go b/xds/internal/xdsclient/xdsresource/filter_chain_test.go index dc1ea75778bf..daa3ad46cbfb 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain_test.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain_test.go @@ -43,7 +43,7 @@ import ( "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) const ( diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go index bb7e9c1520e9..c64f7c609c62 100644 --- a/xds/internal/xdsclient/xdsresource/type.go +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -20,7 +20,7 @@ package xdsresource import ( "time" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index a1c6c3ea7a62..01ebe7135326 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/internal/xds/matcher" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 3a56965bdc4e..16874e3c1789 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -36,7 +36,7 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/internal/xds/matcher" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/wrapperspb" ) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index 770dbf4c5253..324d7d250f69 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -32,7 +32,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) func (s) TestEDSParseRespProto(t *testing.T) { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index 3a1d0f63156f..f9663d05bee3 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index 503a634f1f9a..101735c25cab 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -32,7 +32,7 @@ import ( _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/durationpb" v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 9388f9a8d2e3..3e33a0d61bf6 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -31,7 +31,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/clusterspecifier" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index b48940927ba4..b06317c373fe 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/durationpb" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" diff --git a/xds/internal/version/version.go b/xds/internal/xdsclient/xdsresource/version/version.go similarity index 100% rename from xds/internal/version/version.go rename to xds/internal/xdsclient/xdsresource/version/version.go diff --git a/xds/xds.go b/xds/xds.go index 27547b56d226..818af0367ad0 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -36,14 +36,14 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" - _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. - _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. - _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. - _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. - xdsresolver "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. - _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register the v2 xDS API client. - _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register the v3 xDS API client. + _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. + xdsresolver "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 xDS API client. + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. ) func init() { From 6a896a3e30c243a34fb3d08ab89c17b3e8791763 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 10 Nov 2021 16:48:05 -0800 Subject: [PATCH 335/998] pickfirst: check b.sc before calling Connect (#4971) --- pickfirst.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pickfirst.go b/pickfirst.go index f194d14a0816..5168b62b078a 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -125,7 +125,7 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.state == connectivity.Idle { + if b.sc != nil && b.state == connectivity.Idle { b.sc.Connect() } } From d61c7ae86d6a6a0d3fa46484e350cf9ee726025d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 10 Nov 2021 17:05:08 -0800 Subject: [PATCH 336/998] internal: merge xds/envconfig into env package (#4923) --- internal/{xds/env/env.go => envconfig/xds.go} | 56 +++++++++---------- internal/xds/bootstrap.go | 8 +-- xds/googledirectpath/googlec2p.go | 11 ++-- xds/googledirectpath/googlec2p_test.go | 13 +++-- xds/internal/httpfilter/rbac/rbac.go | 4 +- xds/internal/resolver/serviceconfig.go | 4 +- xds/internal/resolver/xds_resolver_test.go | 8 +-- xds/internal/server/listener_wrapper.go | 6 +- xds/internal/server/listener_wrapper_test.go | 8 +-- xds/internal/test/xds_client_affinity_test.go | 15 ++--- .../test/xds_server_integration_test.go | 30 +++++----- xds/internal/xdsclient/bootstrap/bootstrap.go | 18 +++--- .../xdsclient/bootstrap/bootstrap_test.go | 51 ++++++++--------- .../xdsclient/xdsresource/filter_chain.go | 4 +- .../xdsresource/filter_chain_test.go | 44 +++++++-------- .../xdsclient/xdsresource/unmarshal_cds.go | 10 ++-- .../xdsresource/unmarshal_cds_test.go | 45 +++++++-------- .../xdsresource/unmarshal_lds_test.go | 8 +-- .../xdsclient/xdsresource/unmarshal_rds.go | 4 +- .../xdsresource/unmarshal_rds_test.go | 14 ++--- xds/server.go | 6 +- 21 files changed, 186 insertions(+), 181 deletions(-) rename internal/{xds/env/env.go => envconfig/xds.go} (56%) diff --git a/internal/xds/env/env.go b/internal/envconfig/xds.go similarity index 56% rename from internal/xds/env/env.go rename to internal/envconfig/xds.go index 75bdc6efb19f..13bbeb197229 100644 --- a/internal/xds/env/env.go +++ b/internal/envconfig/xds.go @@ -16,9 +16,7 @@ * */ -// Package env acts a single source of definition for all environment variables -// related to the xDS implementation in gRPC. -package env +package envconfig import ( "os" @@ -26,18 +24,18 @@ import ( ) const ( - // BootstrapFileNameEnv is the env variable to set bootstrap file name. + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. // Do not use this and read from env directly. Its value is read and kept in // variable BootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // BootstrapFileContentEnv is the env variable to set bootstrapp file + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file // content. Do not use this and read from env directly. Its value is read // and kept in variable BootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" @@ -50,48 +48,48 @@ const ( ) var ( - // BootstrapFileName holds the name of the file which contains xDS bootstrap - // configuration. Users can specify the location of the bootstrap file by - // setting the environment variable "GRPC_XDS_BOOTSTRAP". + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". // // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileName = os.Getenv(BootstrapFileNameEnv) - // BootstrapFileContent holds the content of the xDS bootstrap - // configuration. Users can specify the bootstrap config by - // setting the environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". // // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) - // RingHashSupport indicates whether ring hash support is enabled, which can - // be disabled by setting the environment variable + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - RingHashSupport = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") - // ClientSideSecuritySupport is used to control processing of security + XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + // XDSClientSideSecurity is used to control processing of security // configuration on the client-side. // // Note that there is no env var protection for the server-side because we // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. - ClientSideSecuritySupport = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") - // AggregateAndDNSSupportEnv indicates whether processing of aggregated - // cluster and DNS cluster is enabled, which can be enabled by setting the + XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + // XDSAggregateAndDNS indicates whether processing of aggregated cluster + // and DNS cluster is enabled, which can be enabled by setting the // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - AggregateAndDNSSupportEnv = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") - // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled, + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - RBACSupport = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") - // FederationSupport indicates whether federation support is enabled. - FederationSupport = strings.EqualFold(os.Getenv(federationEnv), "true") + // XDSFederation indicates whether federation support is enabled. + XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") - // C2PResolverSupport indicates whether support for C2P resolver is enabled. + // C2PResolver indicates whether support for C2P resolver is enabled. // This can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". - C2PResolverSupport = strings.EqualFold(os.Getenv(c2pResolverSupportEnv), "true") + C2PResolver = strings.EqualFold(os.Getenv(c2pResolverSupportEnv), "true") // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) diff --git a/internal/xds/bootstrap.go b/internal/xds/bootstrap.go index 1d74ab46a114..eeb709c45072 100644 --- a/internal/xds/bootstrap.go +++ b/internal/xds/bootstrap.go @@ -27,7 +27,7 @@ import ( "os" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/internal/envconfig" ) var logger = grpclog.Component("internal/xds") @@ -79,11 +79,11 @@ func SetupBootstrapFile(opts BootstrapOptions) (func(), error) { } logger.Infof("Created bootstrap file at %q with contents: %s\n", f.Name(), bootstrapContents) - origBootstrapFileName := env.BootstrapFileName - env.BootstrapFileName = f.Name() + origBootstrapFileName := envconfig.XDSBootstrapFileName + envconfig.XDSBootstrapFileName = f.Name() return func() { os.Remove(f.Name()) - env.BootstrapFileName = origBootstrapFileName + envconfig.XDSBootstrapFileName = origBootstrapFileName }, nil } diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 726ecec43187..8841f72173b3 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -29,20 +29,21 @@ import ( "fmt" "time" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "google.golang.org/grpc" "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/googlecloud" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/structpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) const ( @@ -74,7 +75,7 @@ var ( ) func init() { - if env.C2PResolverSupport { + if envconfig.C2PResolver { resolver.Register(c2pResolverBuilder{}) } } @@ -98,7 +99,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts go func() { zoneCh <- getZone(httpReqTimeout) }() go func() { ipv6CapableCh <- getIPv6Capable(httpReqTimeout) }() - balancerName := env.C2PResolverTestOnlyTrafficDirectorURI + balancerName := envconfig.C2PResolverTestOnlyTrafficDirectorURI if balancerName == "" { balancerName = tdURL } @@ -176,5 +177,5 @@ func newNode(zone string, ipv6Capable bool) *v3corepb.Node { // direct path is enabled if this client is running on GCE, and the normal xDS // is not used (bootstrap env vars are not set). func runDirectPath() bool { - return env.BootstrapFileName == "" && env.BootstrapFileContent == "" && onGCE() + return envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" && onGCE() } diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 4b0ab5395503..7f7c08ba28b7 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -23,17 +23,18 @@ import ( "testing" "time" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc" - "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/structpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) type emptyResolver struct { @@ -90,7 +91,7 @@ func TestBuildWithBootstrapEnvSet(t *testing.T) { defer replaceResolvers()() builder := resolver.Get(c2pScheme) - for i, envP := range []*string{&env.BootstrapFileName, &env.BootstrapFileContent} { + for i, envP := range []*string{&envconfig.XDSBootstrapFileName, &envconfig.XDSBootstrapFileContent} { t.Run(strconv.Itoa(i), func(t *testing.T) { // Set bootstrap config env var. oldEnv := *envP @@ -166,10 +167,10 @@ func TestBuildXDS(t *testing.T) { defer func() { getIPv6Capable = oldGetIPv6Capability }() if tt.tdURI != "" { - oldURI := env.C2PResolverTestOnlyTrafficDirectorURI - env.C2PResolverTestOnlyTrafficDirectorURI = tt.tdURI + oldURI := envconfig.C2PResolverTestOnlyTrafficDirectorURI + envconfig.C2PResolverTestOnlyTrafficDirectorURI = tt.tdURI defer func() { - env.C2PResolverTestOnlyTrafficDirectorURI = oldURI + envconfig.C2PResolverTestOnlyTrafficDirectorURI = oldURI }() } diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go index 9211d7714bc3..bb85dc80d460 100644 --- a/xds/internal/httpfilter/rbac/rbac.go +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -27,8 +27,8 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/internal/xds/rbac" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/protobuf/types/known/anypb" @@ -38,7 +38,7 @@ import ( ) func init() { - if env.RBACSupport { + if envconfig.XDSRBAC { httpfilter.Register(builder{}) } } diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index c418bc5d758c..772873092107 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -29,11 +29,11 @@ import ( xxhash "github.com/cespare/xxhash/v2" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/wrr" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/balancer/clustermanager" @@ -174,7 +174,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name) // Request Hashes are only applicable for a Ring Hash LB. - if env.RingHashSupport { + if envconfig.XDSRingHash { lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies)) } diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 5ab40b712ba1..df4f47803713 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -33,12 +33,12 @@ import ( "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpctest" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/wrr" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -584,9 +584,9 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { // with a HashPolicy specifying to generate a hash. The configSelector generated should // successfully generate a Hash. func (s) TestXDSResolverRequestHash(t *testing.T) { - oldRH := env.RingHashSupport - env.RingHashSupport = true - defer func() { env.RingHashSupport = oldRH }() + oldRH := envconfig.XDSRingHash + envconfig.XDSRingHash = true + defer func() { envconfig.XDSRingHash = oldRH }() xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) defer xdsR.Close() diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 6ffb9763ad6e..c90f9672ea32 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -33,9 +33,9 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -273,7 +273,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { conn.Close() continue } - if !env.RBACSupport { + if !envconfig.XDSRBAC { return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil } var rc xdsresource.RouteConfigUpdate @@ -414,7 +414,7 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { // Server's state to ServingModeNotServing. That prevents new connections // from being accepted, whereas here we simply want the clients to reconnect // to get the updated configuration. - if env.RBACSupport { + if envconfig.XDSRBAC { if l.drainCallback != nil { l.drainCallback(l.Listener.Addr()) } diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 1dba999008a5..85ac93c2ed44 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -32,9 +32,9 @@ import ( v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/env" _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/testutils/fakeclient" @@ -326,10 +326,10 @@ func (s) TestNewListenerWrapper(t *testing.T) { // the update from the rds handler should it move the server to // ServingModeServing. func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() _, readyCh, xdsC, _, cleanup := newListenerWrapper(t) defer cleanup() diff --git a/xds/internal/test/xds_client_affinity_test.go b/xds/internal/test/xds_client_affinity_test.go index e9ddfe157b12..55d98459251a 100644 --- a/xds/internal/test/xds_client_affinity_test.go +++ b/xds/internal/test/xds_client_affinity_test.go @@ -26,14 +26,15 @@ import ( "fmt" "testing" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/xds/internal/testutils/e2e" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/xds/env" testpb "google.golang.org/grpc/test/grpc_testing" - "google.golang.org/grpc/xds/internal/testutils/e2e" ) const hashHeaderName = "session_id" @@ -86,9 +87,9 @@ func ringhashCluster(clusterName, edsServiceName string) *v3clusterpb.Cluster { // behavior in ring_hash policy. func (s) TestClientSideAffinitySanityCheck(t *testing.T) { defer func() func() { - old := env.RingHashSupport - env.RingHashSupport = true - return func() { env.RingHashSupport = old } + old := envconfig.XDSRingHash + envconfig.XDSRingHash = true + return func() { envconfig.XDSRingHash = old } }()() managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 6641a678db3c..c2afebfd2253 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/status" "google.golang.org/grpc/xds" "google.golang.org/grpc/xds/internal/httpfilter/rbac" @@ -374,10 +374,10 @@ func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { // (NonForwardingAction), and the RPC's matching those routes should proceed as // normal. func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) defer cleanup1() @@ -722,10 +722,10 @@ func serverListenerWithRBACHTTPFilters(host string, port uint32, rbacCfg *rpb.RB // as normal and certain RPC's are denied by the RBAC HTTP Filter which gets // called by hooked xds interceptors. func (s) TestRBACHTTPFilter(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() rbac.RegisterForTesting() defer rbac.UnregisterForTesting() @@ -970,7 +970,7 @@ func (s) TestRBACHTTPFilter(t *testing.T) { // Toggle the RBAC Env variable off, this should disable RBAC and allow any RPC"s through (will not go through // routing or processed by HTTP Filters and thus will never get denied by RBAC). - env.RBACSupport = false + envconfig.XDSRBAC = false if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { t.Fatalf("EmptyCall() returned err with status: %v, once RBAC is disabled all RPC's should proceed as normal", status.Code(err)) } @@ -978,7 +978,7 @@ func (s) TestRBACHTTPFilter(t *testing.T) { t.Fatalf("UnaryCall() returned err with status: %v, once RBAC is disabled all RPC's should proceed as normal", status.Code(err)) } // Toggle RBAC back on for next iterations. - env.RBACSupport = true + envconfig.XDSRBAC = true }() }) } @@ -1103,10 +1103,10 @@ func serverListenerWithBadRouteConfiguration(host string, port uint32) *v3listen func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) { // Turn RBAC support on. - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) @@ -1160,10 +1160,10 @@ func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) { func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) { // Turn RBAC support off. - oldRBAC := env.RBACSupport - env.RBACSupport = false + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = false defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 8123d94de5a3..c8c6740bcce0 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -27,8 +27,6 @@ import ( "io/ioutil" "strings" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" "google.golang.org/grpc" @@ -36,9 +34,12 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) const ( @@ -231,8 +232,8 @@ type xdsServer struct { } func bootstrapConfigFromEnvVariable() ([]byte, error) { - fName := env.BootstrapFileName - fContent := env.BootstrapFileContent + fName := envconfig.XDSBootstrapFileName + fContent := envconfig.XDSBootstrapFileContent // Bootstrap file name has higher priority than bootstrap content. if fName != "" { @@ -250,7 +251,8 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { return []byte(fContent), nil } - return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined", env.BootstrapFileNameEnv, env.BootstrapFileContentEnv) + return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined", + envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) } // NewConfig returns a new instance of Config initialized by reading the @@ -339,7 +341,7 @@ func NewConfigFromContents(data []byte) (*Config, error) { return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) } case "client_default_listener_resource_name_template": - if !env.FederationSupport { + if !envconfig.XDSFederation { logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k) continue } @@ -347,7 +349,7 @@ func NewConfigFromContents(data []byte) (*Config, error) { return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) } case "authorities": - if !env.FederationSupport { + if !envconfig.XDSFederation { logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k) continue } diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index dd1536845704..edb9d298d023 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -25,10 +25,7 @@ import ( "os" "testing" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/golang/protobuf/proto" - structpb "github.com/golang/protobuf/ptypes/struct" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -37,8 +34,12 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + structpb "github.com/golang/protobuf/ptypes/struct" ) var ( @@ -272,9 +273,9 @@ func setupBootstrapOverride(bootstrapFileMap map[string]string) func() { // code that reads file with the given fileName. func testNewConfigWithFileNameEnv(t *testing.T, fileName string, wantError bool, wantConfig *Config) { t.Helper() - origBootstrapFileName := env.BootstrapFileName - env.BootstrapFileName = fileName - defer func() { env.BootstrapFileName = origBootstrapFileName }() + origBootstrapFileName := envconfig.XDSBootstrapFileName + envconfig.XDSBootstrapFileName = fileName + defer func() { envconfig.XDSBootstrapFileName = origBootstrapFileName }() c, err := NewConfig() if (err != nil) != wantError { @@ -296,9 +297,9 @@ func testNewConfigWithFileContentEnv(t *testing.T, fileName string, wantError bo if err != nil { t.Skip(err) } - origBootstrapContent := env.BootstrapFileContent - env.BootstrapFileContent = string(b) - defer func() { env.BootstrapFileContent = origBootstrapContent }() + origBootstrapContent := envconfig.XDSBootstrapFileContent + envconfig.XDSBootstrapFileContent = string(b) + defer func() { envconfig.XDSBootstrapFileContent = origBootstrapContent }() c, err := NewConfig() if (err != nil) != wantError { @@ -463,13 +464,13 @@ func TestNewConfigBootstrapEnvPriority(t *testing.T) { goodFileContent2 := v3BootstrapFileMap[goodFileName2] goodConfig2 := nonNilCredsConfigV3 - origBootstrapFileName := env.BootstrapFileName - env.BootstrapFileName = "" - defer func() { env.BootstrapFileName = origBootstrapFileName }() + origBootstrapFileName := envconfig.XDSBootstrapFileName + envconfig.XDSBootstrapFileName = "" + defer func() { envconfig.XDSBootstrapFileName = origBootstrapFileName }() - origBootstrapContent := env.BootstrapFileContent - env.BootstrapFileContent = "" - defer func() { env.BootstrapFileContent = origBootstrapContent }() + origBootstrapContent := envconfig.XDSBootstrapFileContent + envconfig.XDSBootstrapFileContent = "" + defer func() { envconfig.XDSBootstrapFileContent = origBootstrapContent }() // When both env variables are empty, NewConfig should fail. if _, err := NewConfig(); err == nil { @@ -477,21 +478,21 @@ func TestNewConfigBootstrapEnvPriority(t *testing.T) { } // When one of them is set, it should be used. - env.BootstrapFileName = goodFileName1 - env.BootstrapFileContent = "" + envconfig.XDSBootstrapFileName = goodFileName1 + envconfig.XDSBootstrapFileContent = "" if c, err := NewConfig(); err != nil || c.compare(goodConfig1) != nil { t.Errorf("NewConfig() = %v, %v, want: %v, %v", c, err, goodConfig1, nil) } - env.BootstrapFileName = "" - env.BootstrapFileContent = goodFileContent2 + envconfig.XDSBootstrapFileName = "" + envconfig.XDSBootstrapFileContent = goodFileContent2 if c, err := NewConfig(); err != nil || c.compare(goodConfig2) != nil { t.Errorf("NewConfig() = %v, %v, want: %v, %v", c, err, goodConfig1, nil) } // Set both, file name should be read. - env.BootstrapFileName = goodFileName1 - env.BootstrapFileContent = goodFileContent2 + envconfig.XDSBootstrapFileName = goodFileName1 + envconfig.XDSBootstrapFileContent = goodFileContent2 if c, err := NewConfig(); err != nil || c.compare(goodConfig1) != nil { t.Errorf("NewConfig() = %v, %v, want: %v, %v", c, err, goodConfig1, nil) } @@ -978,9 +979,9 @@ func TestNewConfigWithFederation(t *testing.T) { }, } - oldFederationSupport := env.FederationSupport - env.FederationSupport = true - defer func() { env.FederationSupport = oldFederationSupport }() + oldFederationSupport := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldFederationSupport }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/xds/internal/xdsclient/xdsresource/filter_chain.go b/xds/internal/xdsclient/xdsresource/filter_chain.go index 88dc81305b26..70f75caa26a0 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -27,9 +27,9 @@ import ( v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) @@ -632,7 +632,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) // TODO: Implement terminal filter logic, as per A36. filterChain.HTTPFilters = filters seenHCM = true - if !env.RBACSupport { + if !envconfig.XDSRBAC { continue } switch hcm.RouteSpecifier.(type) { diff --git a/xds/internal/xdsclient/xdsresource/filter_chain_test.go b/xds/internal/xdsclient/xdsresource/filter_chain_test.go index daa3ad46cbfb..26078be82da8 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain_test.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain_test.go @@ -37,9 +37,9 @@ import ( "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/wrapperspb" + "google.golang.org/grpc/internal/envconfig" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils/e2e" @@ -519,10 +519,10 @@ func (s) TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { // TestNewFilterChainImpl_Success_RouteUpdate tests the construction of the // filter chain with valid HTTP Filters present. func (s) TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() tests := []struct { name string @@ -759,10 +759,10 @@ func (s) TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { // TestNewFilterChainImpl_Failure_BadRouteUpdate verifies cases where the Route // Update in the filter chain are invalid. func (s) TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() tests := []struct { name string @@ -971,10 +971,10 @@ func (s) TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { // TestNewFilterChainImpl_Success_HTTPFilters tests the construction of the // filter chain with valid HTTP Filters present. func (s) TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() tests := []struct { name string @@ -1294,10 +1294,10 @@ func (s) TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { // TestNewFilterChainImpl_Success_SecurityConfig verifies cases where the // security configuration in the filter chain contains valid data. func (s) TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() tests := []struct { desc string @@ -1526,10 +1526,10 @@ func (s) TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { // success at config validation time and the filter chains which contains // unsupported match fields will be skipped at lookup time. func (s) TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() unspecifiedEntry := &destPrefixEntry{ srcTypeArr: [3]*sourcePrefixes{ @@ -1696,10 +1696,10 @@ func (s) TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { // TestNewFilterChainImpl_Success_AllCombinations verifies different // combinations of the supported match criteria. func (s) TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() tests := []struct { desc string @@ -2347,10 +2347,10 @@ func (s) TestLookup_Failures(t *testing.T) { } func (s) TestLookup_Successes(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() lisWithDefaultChain := &v3listenerpb.Listener{ FilterChains: []*v3listenerpb.FilterChain{ diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 01ebe7135326..5b34c1ae6e1e 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -28,9 +28,9 @@ import ( v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" @@ -85,7 +85,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu case v3clusterpb.Cluster_ROUND_ROBIN: lbPolicy = nil // The default is round_robin, and there's no config to set. case v3clusterpb.Cluster_RING_HASH: - if !env.RingHashSupport { + if !envconfig.XDSRingHash { return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } rhc := cluster.GetRingHashLbConfig() @@ -118,7 +118,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // Process security configuration received from the control plane iff the // corresponding environment variable is set. var sc *SecurityConfig - if env.ClientSideSecuritySupport { + if envconfig.XDSClientSideSecurity { var err error if sc, err = securityConfigFromCluster(cluster); err != nil { return ClusterUpdate{}, err @@ -143,7 +143,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() return ret, nil case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: - if !env.AggregateAndDNSSupportEnv { + if !envconfig.XDSAggregateAndDNS { return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) } ret.ClusterType = ClusterTypeLogicalDNS @@ -154,7 +154,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu ret.DNSHostName = dnsHN return ret, nil case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": - if !env.AggregateAndDNSSupportEnv { + if !envconfig.XDSAggregateAndDNS { return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) } clusters := &v3aggregateclusterpb.ClusterConfig{} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 16874e3c1789..dd2f72e0fada 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -22,6 +22,14 @@ import ( "strings" "testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/wrapperspb" + v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" @@ -31,13 +39,6 @@ import ( v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" anypb "github.com/golang/protobuf/ptypes/any" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/internal/xds/matcher" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - "google.golang.org/protobuf/types/known/wrapperspb" ) const ( @@ -193,12 +194,12 @@ func (s) TestValidateCluster_Failure(t *testing.T) { }, } - oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv - env.AggregateAndDNSSupportEnv = true - defer func() { env.AggregateAndDNSSupportEnv = oldAggregateAndDNSSupportEnv }() - oldRingHashSupport := env.RingHashSupport - env.RingHashSupport = true - defer func() { env.RingHashSupport = oldRingHashSupport }() + oldAggregateAndDNSSupportEnv := envconfig.XDSAggregateAndDNS + envconfig.XDSAggregateAndDNS = true + defer func() { envconfig.XDSAggregateAndDNS = oldAggregateAndDNSSupportEnv }() + oldRingHashSupport := envconfig.XDSRingHash + envconfig.XDSRingHash = true + defer func() { envconfig.XDSRingHash = oldRingHashSupport }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { if update, err := validateClusterAndConstructClusterUpdate(test.cluster); err == nil { @@ -413,12 +414,12 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, } - oldAggregateAndDNSSupportEnv := env.AggregateAndDNSSupportEnv - env.AggregateAndDNSSupportEnv = true - defer func() { env.AggregateAndDNSSupportEnv = oldAggregateAndDNSSupportEnv }() - oldRingHashSupport := env.RingHashSupport - env.RingHashSupport = true - defer func() { env.RingHashSupport = oldRingHashSupport }() + oldAggregateAndDNSSupportEnv := envconfig.XDSAggregateAndDNS + envconfig.XDSAggregateAndDNS = true + defer func() { envconfig.XDSAggregateAndDNS = oldAggregateAndDNSSupportEnv }() + oldRingHashSupport := envconfig.XDSRingHash + envconfig.XDSRingHash = true + defer func() { envconfig.XDSRingHash = oldRingHashSupport }() for _, test := range tests { t.Run(test.name, func(t *testing.T) { update, err := validateClusterAndConstructClusterUpdate(test.cluster) @@ -434,9 +435,9 @@ func (s) TestValidateCluster_Success(t *testing.T) { func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { // Turn off the env var protection for client-side security. - origClientSideSecurityEnvVar := env.ClientSideSecuritySupport - env.ClientSideSecuritySupport = false - defer func() { env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }() + origClientSideSecurityEnvVar := envconfig.XDSClientSideSecurity + envconfig.XDSClientSideSecurity = false + defer func() { envconfig.XDSClientSideSecurity = origClientSideSecurityEnvVar }() cluster := &v3clusterpb.Cluster{ Name: clusterName, diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index 101735c25cab..d95e2e7a19ab 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -26,8 +26,8 @@ import ( "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/httpfilter" _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" _ "google.golang.org/grpc/xds/internal/httpfilter/router" @@ -702,10 +702,10 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { } func (s) TestUnmarshalListener_ServerSide(t *testing.T) { - oldRBAC := env.RBACSupport - env.RBACSupport = true + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true defer func() { - env.RBACSupport = oldRBAC + envconfig.XDSRBAC = oldRBAC }() const ( v3LDSTarget = "grpc/server?xds.resource.listening_address=0.0.0.0:9999" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 3e33a0d61bf6..03080f7eebba 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -27,9 +27,9 @@ import ( v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" @@ -303,7 +303,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif action := r.GetRoute() // Hash Policies are only applicable for a Ring Hash LB. - if env.RingHashSupport { + if envconfig.XDSRingHash { hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) if err != nil { return nil, nil, err diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index b06317c373fe..a3fe94c0778b 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -28,8 +28,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" @@ -1599,9 +1599,9 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { return fmt.Sprint(fc) }), } - oldRingHashSupport := env.RingHashSupport - env.RingHashSupport = true - defer func() { env.RingHashSupport = oldRingHashSupport }() + oldRingHashSupport := envconfig.XDSRingHash + envconfig.XDSRingHash = true + defer func() { envconfig.XDSRingHash = oldRingHashSupport }() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, _, err := routesProtoToSlice(tt.routes, nil, nil, false) @@ -1712,9 +1712,9 @@ func (s) TestHashPoliciesProtoToSlice(t *testing.T) { }, } - oldRingHashSupport := env.RingHashSupport - env.RingHashSupport = true - defer func() { env.RingHashSupport = oldRingHashSupport }() + oldRingHashSupport := envconfig.XDSRingHash + envconfig.XDSRingHash = true + defer func() { envconfig.XDSRingHash = oldRingHashSupport }() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := hashPoliciesProtoToSlice(tt.hashPolicies, nil) diff --git a/xds/server.go b/xds/server.go index 0b47fd27ef41..098017ec26a6 100644 --- a/xds/server.go +++ b/xds/server.go @@ -32,11 +32,11 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/server" @@ -380,7 +380,7 @@ func routeAndProcess(ctx context.Context) error { // xdsUnaryInterceptor is the unary interceptor added to the gRPC server to // perform any xDS specific functionality on unary RPCs. func xdsUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - if env.RBACSupport { + if envconfig.XDSRBAC { if err := routeAndProcess(ctx); err != nil { return nil, err } @@ -391,7 +391,7 @@ func xdsUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServ // xdsStreamInterceptor is the stream interceptor added to the gRPC server to // perform any xDS specific functionality on streaming RPCs. func xdsStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if env.RBACSupport { + if envconfig.XDSRBAC { if err := routeAndProcess(ss.Context()); err != nil { return err } From ea068ed2e625c00e507633693fd29f8212df68f3 Mon Sep 17 00:00:00 2001 From: Uddeshya Singh Date: Thu, 11 Nov 2021 23:22:54 +0530 Subject: [PATCH 337/998] rpc_util: Change error message to indicate size after decompression (#4918) --- rpc_util.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/rpc_util.go b/rpc_util.go index 87987a2e652f..5d407b004b0e 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -712,13 +712,11 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - } else { - size = len(d) - } - if size > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } } return d, nil } From 82d8af8bf09fb889fbe8d5cf8101916e09c5ef74 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 11 Nov 2021 10:59:18 -0800 Subject: [PATCH 338/998] balancer: add Authority field to balancer.BuildOptions (#4969) --- balancer/balancer.go | 29 ++++++---- clientconn.go | 1 + test/balancer_test.go | 125 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 144 insertions(+), 11 deletions(-) diff --git a/balancer/balancer.go b/balancer/balancer.go index 178de0898aa4..bcc6f5451c90 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -174,25 +174,32 @@ type ClientConn interface { // BuildOptions contains additional information for Build. type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. CredsBundle credentials.Bundle - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. Dialer func(context.Context, string) (net.Conn, error) - // ChannelzParentID is the entity parent's channelz unique identification number. + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. ChannelzParentID int64 // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. CustomUserAgent string - // Target contains the parsed address info of the dial target. It is the same resolver.Target as - // passed to the resolver. - // See the documentation for the resolver.Target type for details about what it contains. + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. Target resolver.Target } diff --git a/clientconn.go b/clientconn.go index 972ff1a65baa..97b793e05e27 100644 --- a/clientconn.go +++ b/clientconn.go @@ -285,6 +285,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, diff --git a/test/balancer_test.go b/test/balancer_test.go index 47332db7975e..5d5c85896d30 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/grpcutil" @@ -821,3 +822,127 @@ func (s) TestWaitForReady(t *testing.T) { t.Fatal(err.Error()) } } + +// authorityOverrideTransportCreds returns the configured authority value in its +// Info() method. +type authorityOverrideTransportCreds struct { + credentials.TransportCredentials + authorityOverride string +} + +func (ao *authorityOverrideTransportCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return rawConn, nil, nil +} +func (ao *authorityOverrideTransportCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{ServerName: ao.authorityOverride} +} +func (ao *authorityOverrideTransportCreds) Clone() credentials.TransportCredentials { + return &authorityOverrideTransportCreds{authorityOverride: ao.authorityOverride} +} + +// TestAuthorityInBuildOptions tests that the Authority field in +// balancer.BuildOptions is setup correctly from gRPC. +func (s) TestAuthorityInBuildOptions(t *testing.T) { + const dialTarget = "test.server" + + tests := []struct { + name string + dopts []grpc.DialOption + wantAuthority string + }{ + { + name: "authority from dial target", + dopts: []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}, + wantAuthority: dialTarget, + }, + { + name: "authority from dial option", + dopts: []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithAuthority("authority-override"), + }, + wantAuthority: "authority-override", + }, + { + name: "authority from transport creds", + dopts: []grpc.DialOption{grpc.WithTransportCredentials(&authorityOverrideTransportCreds{authorityOverride: "authority-override-from-transport-creds"})}, + wantAuthority: "authority-override-from-transport-creds", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + authorityCh := make(chan string, 1) + bf := stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + select { + case authorityCh <- bd.BuildOptions.Authority: + default: + } + + addrs := ccs.ResolverState.Addresses + if len(addrs) == 0 { + return nil + } + + // Only use the first address. + sc, err := bd.ClientConn.NewSubConn([]resolver.Address{addrs[0]}, balancer.NewSubConnOptions{}) + if err != nil { + return err + } + sc.Connect() + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: state.ConnectivityState, Picker: &aiPicker{result: balancer.PickResult{SubConn: sc}, err: state.ConnectionError}}) + }, + } + balancerName := "stub-balancer-" + test.name + stub.Register(balancerName, bf) + t.Logf("Registered balancer %s...", balancerName) + + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + s := grpc.NewServer() + testpb.RegisterTestServiceServer(s, &testServer{}) + go s.Serve(lis) + defer s.Stop() + t.Logf("Started gRPC server at %s...", lis.Addr().String()) + + r := manual.NewBuilderWithScheme("whatever") + t.Logf("Registered manual resolver with scheme %s...", r.Scheme()) + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) + + dopts := append([]grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, balancerName)), + }, test.dopts...) + cc, err := grpc.Dial(r.Scheme()+":///"+dialTarget, dopts...) + if err != nil { + t.Fatal(err) + } + defer cc.Close() + tc := testpb.NewTestServiceClient(cc) + t.Log("Created a ClientConn...") + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, ", err) + } + t.Log("Made an RPC which succeeded...") + + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for Authority in balancer.BuildOptions") + case gotAuthority := <-authorityCh: + if gotAuthority != test.wantAuthority { + t.Fatalf("Authority in balancer.BuildOptions is %s, want %s", gotAuthority, test.wantAuthority) + } + } + }) + } +} From 6e79bc8afe8cf979fea7aa6e539e23cd400393bb Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 12 Nov 2021 15:50:27 -0500 Subject: [PATCH 339/998] xdsclient: add Cluster Specifier Name to Route (#4972) * xdsclient: add Cluster Specifier Name to Route --- .../xdsclient/controller/v2_rds_test.go | 8 +-- .../xdsclient/xdsresource/filter_chain.go | 6 +- .../xdsresource/filter_chain_test.go | 2 +- .../xdsclient/xdsresource/type_rds.go | 18 +++-- .../xdsresource/unmarshal_lds_test.go | 4 +- .../xdsclient/xdsresource/unmarshal_rds.go | 9 +-- .../xdsresource/unmarshal_rds_test.go | 67 ++++++++++--------- xds/server.go | 2 +- 8 files changed, 62 insertions(+), 54 deletions(-) diff --git a/xds/internal/xdsclient/controller/v2_rds_test.go b/xds/internal/xdsclient/controller/v2_rds_test.go index 0b3dbfc8cfaf..476df71094bf 100644 --- a/xds/internal/xdsclient/controller/v2_rds_test.go +++ b/xds/internal/xdsclient/controller/v2_rds_test.go @@ -111,14 +111,14 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { Domains: []string{uninterestingDomain}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: xdsresource.RouteActionRoute}}, + ActionType: xdsresource.RouteActionRoute}}, }, { Domains: []string{goodLDSTarget1}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{goodClusterName2: {Weight: 1}}, - RouteAction: xdsresource.RouteActionRoute}}, + ActionType: xdsresource.RouteActionRoute}}, }, }, Raw: marshaledGoodRouteConfig2, @@ -142,13 +142,13 @@ func (s) TestRDSHandleResponseWithRouting(t *testing.T) { Routes: []*xdsresource.Route{{ Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: xdsresource.RouteActionRoute}}, + ActionType: xdsresource.RouteActionRoute}}, }, { Domains: []string{goodLDSTarget1}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{goodClusterName1: {Weight: 1}}, - RouteAction: xdsresource.RouteActionRoute}}, + ActionType: xdsresource.RouteActionRoute}}, }, }, Raw: marshaledGoodRouteConfig1, diff --git a/xds/internal/xdsclient/xdsresource/filter_chain.go b/xds/internal/xdsclient/xdsresource/filter_chain.go index 70f75caa26a0..78b2a56e8939 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -86,8 +86,8 @@ type VirtualHostWithInterceptors struct { type RouteWithInterceptors struct { // M is the matcher used to match to this route. M *CompositeMatcher - // RouteAction is the type of routing action to initiate once matched to. - RouteAction RouteAction + // ActionType is the type of routing action to initiate once matched to. + ActionType RouteActionType // Interceptors are interceptors instantiated for this route. These will be // constructed from a combination of the top level configuration and any // HTTP Filter overrides present in Virtual Host or Route. @@ -112,7 +112,7 @@ func (f *FilterChain) convertVirtualHost(virtualHost *VirtualHost) (VirtualHostW rs := make([]RouteWithInterceptors, len(virtualHost.Routes)) for i, r := range virtualHost.Routes { var err error - rs[i].RouteAction = r.RouteAction + rs[i].ActionType = r.ActionType rs[i].M, err = RouteToMatcher(r) if err != nil { return VirtualHostWithInterceptors{}, fmt.Errorf("matcher construction: %v", err) diff --git a/xds/internal/xdsclient/xdsresource/filter_chain_test.go b/xds/internal/xdsclient/xdsresource/filter_chain_test.go index 26078be82da8..71e537f29b74 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain_test.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain_test.go @@ -66,7 +66,7 @@ var ( inlineRouteConfig = &RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ Domains: []string{"lds.target.good:3333"}, - Routes: []*Route{{Prefix: newStringP("/"), RouteAction: RouteActionNonForwardingAction}}, + Routes: []*Route{{Prefix: newStringP("/"), ActionType: RouteActionNonForwardingAction}}, }}} emptyValidNetworkFilters = []*v3listenerpb.Filter{ { diff --git a/xds/internal/xdsclient/xdsresource/type_rds.go b/xds/internal/xdsclient/xdsresource/type_rds.go index 47de59e434cc..decffd4ae767 100644 --- a/xds/internal/xdsclient/xdsresource/type_rds.go +++ b/xds/internal/xdsclient/xdsresource/type_rds.go @@ -95,14 +95,14 @@ type HashPolicy struct { RegexSubstitution string } -// RouteAction is the action of the route from a received RDS response. -type RouteAction int +// RouteActionType is the action of the route from a received RDS response. +type RouteActionType int const ( // RouteActionUnsupported are routing types currently unsupported by grpc. // According to A36, "A Route with an inappropriate action causes RPCs // matching that route to fail." - RouteActionUnsupported RouteAction = iota + RouteActionUnsupported RouteActionType = iota // RouteActionRoute is the expected route type on the client side. Route // represents routing a request to some upstream cluster. On the client // side, if an RPC matches to a route that is not RouteActionRoute, the RPC @@ -129,7 +129,6 @@ type Route struct { HashPolicies []*HashPolicy // If the matchers above indicate a match, the below configuration is used. - WeightedClusters map[string]WeightedCluster // If MaxStreamDuration is nil, it indicates neither of the route action's // max_stream_duration fields (grpc_timeout_header_max nor // max_stream_duration) were set. In this case, the ListenerUpdate's @@ -143,10 +142,17 @@ type Route struct { HTTPFilterConfigOverride map[string]httpfilter.FilterConfig RetryConfig *RetryConfig - RouteAction RouteAction + ActionType RouteActionType + + // Only one of the following fields (WeightedClusters or + // ClusterSpecifierPlugin) will be set for a route. + WeightedClusters map[string]WeightedCluster + // ClusterSpecifierPlugin is the name of the Cluster Specifier Plugin that + // this Route is linked to, if specified by xDS. + ClusterSpecifierPlugin string } -// WeightedCluster contains settings for an xds RouteAction.WeightedCluster. +// WeightedCluster contains settings for an xds ActionType.WeightedCluster. type WeightedCluster struct { // Weight is the relative weight of the cluster. It will never be zero. Weight uint32 diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index d95e2e7a19ab..a5c53886ecbd 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -633,7 +633,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { InlineRouteConfig: &RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ Domains: []string{v3LDSTarget}, - Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, RouteAction: RouteActionRoute}}, + Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, ActionType: RouteActionRoute}}, }}}, MaxStreamDuration: time.Second, Raw: v3LisWithInlineRoute, @@ -730,7 +730,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { inlineRouteConfig = &RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ Domains: []string{"lds.target.good:3333"}, - Routes: []*Route{{Prefix: newStringP("/"), RouteAction: RouteActionNonForwardingAction}}, + Routes: []*Route{{Prefix: newStringP("/"), ActionType: RouteActionNonForwardingAction}}, }}} emptyValidNetworkFilters = []*v3listenerpb.Filter{ { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 03080f7eebba..a6fbf08d4502 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -119,7 +119,7 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l } // "For any entry in the RouteConfiguration.cluster_specifier_plugins not - // referenced by an enclosed RouteAction's cluster_specifier_plugin, the xDS + // referenced by an enclosed ActionType's cluster_specifier_plugin, the xDS // client should not provide it to its consumers." - RLS in xDS Design for name := range csps { if !cspNames[name] { @@ -356,6 +356,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif return nil, nil, fmt.Errorf("route %+v, action %+v, specifies a cluster specifier plugin %+v that is not in Route Configuration", r, a, a.ClusterSpecifierPlugin) } cspNames[a.ClusterSpecifierPlugin] = true + route.ClusterSpecifierPlugin = a.ClusterSpecifierPlugin default: return nil, nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) } @@ -377,13 +378,13 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) } - route.RouteAction = RouteActionRoute + route.ActionType = RouteActionRoute case *v3routepb.Route_NonForwardingAction: // Expected to be used on server side. - route.RouteAction = RouteActionNonForwardingAction + route.ActionType = RouteActionNonForwardingAction default: - route.RouteAction = RouteActionUnsupported + route.ActionType = RouteActionUnsupported } if !v2 { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index a3fe94c0778b..72e5c4149024 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -104,7 +104,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Routes: []*Route{{ Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, HTTPFilterConfigOverride: cfgs, }}, @@ -114,8 +114,9 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { VirtualHosts: []*VirtualHost{{ Domains: []string{ldsTarget}, Routes: []*Route{{ - Prefix: newStringP("1"), - RouteAction: RouteActionRoute, + Prefix: newStringP("1"), + ActionType: RouteActionRoute, + ClusterSpecifierPlugin: "cspA", }}, }}, ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{ @@ -155,7 +156,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Routes: []*Route{{ Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, RetryConfig: rrc, }}, RetryConfig: vhrc, @@ -263,7 +264,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Routes: []*Route{{Prefix: newStringP("/"), CaseInsensitive: true, WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, }, }, @@ -307,13 +308,13 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Domains: []string{uninterestingDomain}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, }, }, @@ -345,7 +346,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Domains: []string{ldsTarget}, Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, }, }, @@ -422,7 +423,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { "b": {Weight: 3}, "c": {Weight: 5}, }, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, }, }, @@ -457,7 +458,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, MaxStreamDuration: newDurationP(time.Second), - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, }, }, @@ -492,7 +493,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, MaxStreamDuration: newDurationP(time.Second), - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, }, }, @@ -527,7 +528,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, MaxStreamDuration: newDurationP(0), - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, }, }, @@ -860,13 +861,13 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Domains: []string{uninterestingDomain}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, }, Raw: v2RouteConfig, @@ -887,13 +888,13 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Domains: []string{uninterestingDomain}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, }, Raw: v3RouteConfig, @@ -914,13 +915,13 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Domains: []string{uninterestingDomain}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, }, Raw: v3RouteConfig, @@ -931,13 +932,13 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Domains: []string{uninterestingDomain}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, }, Raw: v2RouteConfig, @@ -969,13 +970,13 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Domains: []string{uninterestingDomain}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, }, Raw: v3RouteConfig, @@ -986,13 +987,13 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Domains: []string{uninterestingDomain}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, { Domains: []string{ldsTarget}, Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, - RouteAction: RouteActionRoute}}, + ActionType: RouteActionRoute}}, }, }, Raw: v2RouteConfig, @@ -1059,7 +1060,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { CaseInsensitive: true, WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60, HTTPFilterConfigOverride: cfgs}}, HTTPFilterConfigOverride: cfgs, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }} } ) @@ -1099,7 +1100,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { Prefix: newStringP("/"), CaseInsensitive: true, WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, }, { @@ -1147,7 +1148,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }, Fraction: newUInt32P(10000), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, wantErr: false, }, @@ -1193,7 +1194,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }, Fraction: newUInt32P(10000), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, wantErr: false, }, @@ -1228,7 +1229,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { wantRoutes: []*Route{{ Prefix: newStringP("/a/"), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, wantErr: false, }, @@ -1410,7 +1411,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { wantRoutes: []*Route{{ Prefix: newStringP("/a/"), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, wantErr: false, }, @@ -1436,7 +1437,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { wantRoutes: []*Route{{ Prefix: newStringP("/a/"), WeightedClusters: map[string]WeightedCluster{"A": {Weight: 20}, "B": {Weight: 30}}, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, wantErr: false, }, @@ -1492,7 +1493,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { HashPolicies: []*HashPolicy{ {HashPolicyType: HashPolicyTypeChannelID}, }, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, wantErr: false, }, @@ -1551,7 +1552,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {HashPolicyType: HashPolicyTypeHeader, HeaderName: ":path"}, }, - RouteAction: RouteActionRoute, + ActionType: RouteActionRoute, }}, wantErr: false, }, diff --git a/xds/server.go b/xds/server.go index 098017ec26a6..7d2c404ace9d 100644 --- a/xds/server.go +++ b/xds/server.go @@ -359,7 +359,7 @@ func routeAndProcess(ctx context.Context) error { if r.M.Match(rpcInfo) { // "NonForwardingAction is expected for all Routes used on server-side; a route with an inappropriate action causes // RPCs matching that route to fail with UNAVAILABLE." - A36 - if r.RouteAction != xdsresource.RouteActionNonForwardingAction { + if r.ActionType != xdsresource.RouteActionNonForwardingAction { return status.Error(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") } rwi = &r From cf8b64e2c5bf11e00856a29794e434460eb67b90 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 12 Nov 2021 12:58:35 -0800 Subject: [PATCH 340/998] internal: add log when service config is disabled (#4973) --- clientconn.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/clientconn.go b/clientconn.go index 97b793e05e27..28f09dc87073 100644 --- a/clientconn.go +++ b/clientconn.go @@ -633,7 +633,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var ret error - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { cc.maybeApplyDefaultServiceConfig(s.Addresses) // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? From b2317c762757d277fea21e68c714c6b423675570 Mon Sep 17 00:00:00 2001 From: sanjaypujare Date: Mon, 15 Nov 2021 09:45:00 -0800 Subject: [PATCH 341/998] test/kokoro: rename xds_k8s to psm-security as part of tech-debt cleanup and name clarity (#4979) --- test/kokoro/{xds_k8s.cfg => psm-security.cfg} | 2 +- test/kokoro/{xds_k8s.sh => psm-security.sh} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename test/kokoro/{xds_k8s.cfg => psm-security.cfg} (83%) rename test/kokoro/{xds_k8s.sh => psm-security.sh} (100%) diff --git a/test/kokoro/xds_k8s.cfg b/test/kokoro/psm-security.cfg similarity index 83% rename from test/kokoro/xds_k8s.cfg rename to test/kokoro/psm-security.cfg index 4b6bcb58d75d..5faa6b50458a 100644 --- a/test/kokoro/xds_k8s.cfg +++ b/test/kokoro/psm-security.cfg @@ -1,7 +1,7 @@ # Config file for internal CI # Location of the continuous shell script in repository. -build_file: "grpc-go/test/kokoro/xds_k8s.sh" +build_file: "grpc-go/test/kokoro/psm-security.sh" timeout_mins: 180 action { diff --git a/test/kokoro/xds_k8s.sh b/test/kokoro/psm-security.sh similarity index 100% rename from test/kokoro/xds_k8s.sh rename to test/kokoro/psm-security.sh From bdf8336f2a89b10221d99bf18286a9cf5053cf5b Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 16 Nov 2021 10:45:02 -0800 Subject: [PATCH 342/998] xds/client: cleanup Dump to remove unnecessary version field (#4978) --- xds/csds/csds.go | 4 +-- xds/internal/xdsclient/attributes.go | 8 ++--- xds/internal/xdsclient/dump.go | 8 ++--- xds/internal/xdsclient/dump_test.go | 41 ++++++++++++------------- xds/internal/xdsclient/pubsub/dump.go | 15 +++------ xds/internal/xdsclient/pubsub/pubsub.go | 4 --- xds/internal/xdsclient/pubsub/update.go | 16 ---------- 7 files changed, 34 insertions(+), 62 deletions(-) diff --git a/xds/csds/csds.go b/xds/csds/csds.go index f1e67f1ba63b..0d71f8f8577a 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -174,8 +174,8 @@ func nodeProtoToV3(n proto.Message) *v3corepb.Node { return node } -func dumpToGenericXdsConfig(typeURL string, dumpF func() (string, map[string]xdsresource.UpdateWithMD)) []*v3statuspb.ClientConfig_GenericXdsConfig { - _, dump := dumpF() +func dumpToGenericXdsConfig(typeURL string, dumpF func() map[string]xdsresource.UpdateWithMD) []*v3statuspb.ClientConfig_GenericXdsConfig { + dump := dumpF() ret := make([]*v3statuspb.ClientConfig_GenericXdsConfig, 0, len(dump)) for name, d := range dump { config := &v3statuspb.ClientConfig_GenericXdsConfig{ diff --git a/xds/internal/xdsclient/attributes.go b/xds/internal/xdsclient/attributes.go index 2f2fcf98ce91..64f87f296591 100644 --- a/xds/internal/xdsclient/attributes.go +++ b/xds/internal/xdsclient/attributes.go @@ -38,10 +38,10 @@ type XDSClient interface { WatchEndpoints(clusterName string, edsCb func(xdsresource.EndpointsUpdate, error)) (cancel func()) ReportLoad(server string) (*load.Store, func()) - DumpLDS() (string, map[string]xdsresource.UpdateWithMD) - DumpRDS() (string, map[string]xdsresource.UpdateWithMD) - DumpCDS() (string, map[string]xdsresource.UpdateWithMD) - DumpEDS() (string, map[string]xdsresource.UpdateWithMD) + DumpLDS() map[string]xdsresource.UpdateWithMD + DumpRDS() map[string]xdsresource.UpdateWithMD + DumpCDS() map[string]xdsresource.UpdateWithMD + DumpEDS() map[string]xdsresource.UpdateWithMD BootstrapConfig() *bootstrap.Config Close() diff --git a/xds/internal/xdsclient/dump.go b/xds/internal/xdsclient/dump.go index 0963749277da..61c054d25bc9 100644 --- a/xds/internal/xdsclient/dump.go +++ b/xds/internal/xdsclient/dump.go @@ -23,21 +23,21 @@ import ( ) // DumpLDS returns the status and contents of LDS. -func (c *clientImpl) DumpLDS() (string, map[string]xdsresource.UpdateWithMD) { +func (c *clientImpl) DumpLDS() map[string]xdsresource.UpdateWithMD { return c.pubsub.Dump(xdsresource.ListenerResource) } // DumpRDS returns the status and contents of RDS. -func (c *clientImpl) DumpRDS() (string, map[string]xdsresource.UpdateWithMD) { +func (c *clientImpl) DumpRDS() map[string]xdsresource.UpdateWithMD { return c.pubsub.Dump(xdsresource.RouteConfigResource) } // DumpCDS returns the status and contents of CDS. -func (c *clientImpl) DumpCDS() (string, map[string]xdsresource.UpdateWithMD) { +func (c *clientImpl) DumpCDS() map[string]xdsresource.UpdateWithMD { return c.pubsub.Dump(xdsresource.ClusterResource) } // DumpEDS returns the status and contents of EDS. -func (c *clientImpl) DumpEDS() (string, map[string]xdsresource.UpdateWithMD) { +func (c *clientImpl) DumpEDS() map[string]xdsresource.UpdateWithMD { return c.pubsub.Dump(xdsresource.EndpointsResource) } diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index ccc98898a9cd..41fbeb69b7c9 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -30,6 +30,7 @@ import ( v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" @@ -40,7 +41,6 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) @@ -90,7 +90,7 @@ func (s) TestLDSConfigDump(t *testing.T) { updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. - if err := compareDump(client.DumpLDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { + if err := compareDump(client.DumpLDS, map[string]xdsresource.UpdateWithMD{}); err != nil { t.Fatalf(err.Error()) } @@ -101,7 +101,7 @@ func (s) TestLDSConfigDump(t *testing.T) { wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. - if err := compareDump(client.DumpLDS, "", wantRequested); err != nil { + if err := compareDump(client.DumpLDS, wantRequested); err != nil { t.Fatalf(err.Error()) } @@ -117,7 +117,7 @@ func (s) TestLDSConfigDump(t *testing.T) { updateHandler.NewListeners(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. - if err := compareDump(client.DumpLDS, testVersion, want0); err != nil { + if err := compareDump(client.DumpLDS, want0); err != nil { t.Fatalf(err.Error()) } @@ -157,7 +157,7 @@ func (s) TestLDSConfigDump(t *testing.T) { MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: listenerRaws[ldsTargets[1]], } - if err := compareDump(client.DumpLDS, nackVersion, wantDump); err != nil { + if err := compareDump(client.DumpLDS, wantDump); err != nil { t.Fatalf(err.Error()) } } @@ -206,7 +206,7 @@ func (s) TestRDSConfigDump(t *testing.T) { updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. - if err := compareDump(client.DumpRDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { + if err := compareDump(client.DumpRDS, map[string]xdsresource.UpdateWithMD{}); err != nil { t.Fatalf(err.Error()) } @@ -217,7 +217,7 @@ func (s) TestRDSConfigDump(t *testing.T) { wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. - if err := compareDump(client.DumpRDS, "", wantRequested); err != nil { + if err := compareDump(client.DumpRDS, wantRequested); err != nil { t.Fatalf(err.Error()) } @@ -233,7 +233,7 @@ func (s) TestRDSConfigDump(t *testing.T) { updateHandler.NewRouteConfigs(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. - if err := compareDump(client.DumpRDS, testVersion, want0); err != nil { + if err := compareDump(client.DumpRDS, want0); err != nil { t.Fatalf(err.Error()) } @@ -272,7 +272,7 @@ func (s) TestRDSConfigDump(t *testing.T) { MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: routeRaws[rdsTargets[1]], } - if err := compareDump(client.DumpRDS, nackVersion, wantDump); err != nil { + if err := compareDump(client.DumpRDS, wantDump); err != nil { t.Fatalf(err.Error()) } } @@ -322,7 +322,7 @@ func (s) TestCDSConfigDump(t *testing.T) { updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. - if err := compareDump(client.DumpCDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { + if err := compareDump(client.DumpCDS, map[string]xdsresource.UpdateWithMD{}); err != nil { t.Fatalf(err.Error()) } @@ -333,7 +333,7 @@ func (s) TestCDSConfigDump(t *testing.T) { wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. - if err := compareDump(client.DumpCDS, "", wantRequested); err != nil { + if err := compareDump(client.DumpCDS, wantRequested); err != nil { t.Fatalf(err.Error()) } @@ -349,7 +349,7 @@ func (s) TestCDSConfigDump(t *testing.T) { updateHandler.NewClusters(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. - if err := compareDump(client.DumpCDS, testVersion, want0); err != nil { + if err := compareDump(client.DumpCDS, want0); err != nil { t.Fatalf(err.Error()) } @@ -388,7 +388,7 @@ func (s) TestCDSConfigDump(t *testing.T) { MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: clusterRaws[cdsTargets[1]], } - if err := compareDump(client.DumpCDS, nackVersion, wantDump); err != nil { + if err := compareDump(client.DumpCDS, wantDump); err != nil { t.Fatalf(err.Error()) } } @@ -424,7 +424,7 @@ func (s) TestEDSConfigDump(t *testing.T) { updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. - if err := compareDump(client.DumpEDS, "", map[string]xdsresource.UpdateWithMD{}); err != nil { + if err := compareDump(client.DumpEDS, map[string]xdsresource.UpdateWithMD{}); err != nil { t.Fatalf(err.Error()) } @@ -435,7 +435,7 @@ func (s) TestEDSConfigDump(t *testing.T) { wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} } // Expected requested. - if err := compareDump(client.DumpEDS, "", wantRequested); err != nil { + if err := compareDump(client.DumpEDS, wantRequested); err != nil { t.Fatalf(err.Error()) } @@ -451,7 +451,7 @@ func (s) TestEDSConfigDump(t *testing.T) { updateHandler.NewEndpoints(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. - if err := compareDump(client.DumpEDS, testVersion, want0); err != nil { + if err := compareDump(client.DumpEDS, want0); err != nil { t.Fatalf(err.Error()) } @@ -490,16 +490,13 @@ func (s) TestEDSConfigDump(t *testing.T) { MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, Raw: endpointRaws[edsTargets[1]], } - if err := compareDump(client.DumpEDS, nackVersion, wantDump); err != nil { + if err := compareDump(client.DumpEDS, wantDump); err != nil { t.Fatalf(err.Error()) } } -func compareDump(dumpFunc func() (string, map[string]xdsresource.UpdateWithMD), wantVersion string, wantDump interface{}) error { - v, dump := dumpFunc() - if v != wantVersion { - return fmt.Errorf("Dump() returned version %q, want %q", v, wantVersion) - } +func compareDump(dumpFunc func() map[string]xdsresource.UpdateWithMD, wantDump interface{}) error { + dump := dumpFunc() cmpOpts := cmp.Options{ cmpopts.EquateEmpty(), cmp.Comparer(func(a, b time.Time) bool { return true }), diff --git a/xds/internal/xdsclient/pubsub/dump.go b/xds/internal/xdsclient/pubsub/dump.go index b9523ee76a27..2ff19a901616 100644 --- a/xds/internal/xdsclient/pubsub/dump.go +++ b/xds/internal/xdsclient/pubsub/dump.go @@ -50,35 +50,30 @@ func rawFromCache(s string, cache interface{}) *anypb.Any { } // Dump dumps the resource for the given type. -func (pb *Pubsub) Dump(t xdsresource.ResourceType) (string, map[string]xdsresource.UpdateWithMD) { +func (pb *Pubsub) Dump(t xdsresource.ResourceType) map[string]xdsresource.UpdateWithMD { pb.mu.Lock() defer pb.mu.Unlock() var ( - version string - md map[string]xdsresource.UpdateMetadata - cache interface{} + md map[string]xdsresource.UpdateMetadata + cache interface{} ) switch t { case xdsresource.ListenerResource: - version = pb.ldsVersion md = pb.ldsMD cache = pb.ldsCache case xdsresource.RouteConfigResource: - version = pb.rdsVersion md = pb.rdsMD cache = pb.rdsCache case xdsresource.ClusterResource: - version = pb.cdsVersion md = pb.cdsMD cache = pb.cdsCache case xdsresource.EndpointsResource: - version = pb.edsVersion md = pb.edsMD cache = pb.edsCache default: pb.logger.Errorf("dumping resource of unknown type: %v", t) - return "", nil + return nil } ret := make(map[string]xdsresource.UpdateWithMD, len(md)) @@ -88,5 +83,5 @@ func (pb *Pubsub) Dump(t xdsresource.ResourceType) (string, map[string]xdsresour Raw: rawFromCache(s, cache), } } - return version, ret + return ret } diff --git a/xds/internal/xdsclient/pubsub/pubsub.go b/xds/internal/xdsclient/pubsub/pubsub.go index d876eca0b986..a843fd5f191f 100644 --- a/xds/internal/xdsclient/pubsub/pubsub.go +++ b/xds/internal/xdsclient/pubsub/pubsub.go @@ -46,19 +46,15 @@ type Pubsub struct { // All the following maps are to keep the updates/metadata in a cache. mu sync.Mutex ldsWatchers map[string]map[*watchInfo]bool - ldsVersion string // Only used in CSDS. ldsCache map[string]xdsresource.ListenerUpdate ldsMD map[string]xdsresource.UpdateMetadata rdsWatchers map[string]map[*watchInfo]bool - rdsVersion string // Only used in CSDS. rdsCache map[string]xdsresource.RouteConfigUpdate rdsMD map[string]xdsresource.UpdateMetadata cdsWatchers map[string]map[*watchInfo]bool - cdsVersion string // Only used in CSDS. cdsCache map[string]xdsresource.ClusterUpdate cdsMD map[string]xdsresource.UpdateMetadata edsWatchers map[string]map[*watchInfo]bool - edsVersion string // Only used in CSDS. edsCache map[string]xdsresource.EndpointsUpdate edsMD map[string]xdsresource.UpdateMetadata } diff --git a/xds/internal/xdsclient/pubsub/update.go b/xds/internal/xdsclient/pubsub/update.go index 9e7b398e1e3d..ab8c94ccf3f7 100644 --- a/xds/internal/xdsclient/pubsub/update.go +++ b/xds/internal/xdsclient/pubsub/update.go @@ -79,10 +79,6 @@ func (pb *Pubsub) NewListeners(updates map[string]xdsresource.ListenerUpdateErrT pb.mu.Lock() defer pb.mu.Unlock() - pb.ldsVersion = metadata.Version - if metadata.ErrState != nil { - pb.ldsVersion = metadata.ErrState.Version - } for name, uErr := range updates { if s, ok := pb.ldsWatchers[name]; ok { if uErr.Err != nil { @@ -145,10 +141,6 @@ func (pb *Pubsub) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpda defer pb.mu.Unlock() // If no error received, the status is ACK. - pb.rdsVersion = metadata.Version - if metadata.ErrState != nil { - pb.rdsVersion = metadata.ErrState.Version - } for name, uErr := range updates { if s, ok := pb.rdsWatchers[name]; ok { if uErr.Err != nil { @@ -193,10 +185,6 @@ func (pb *Pubsub) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTup pb.mu.Lock() defer pb.mu.Unlock() - pb.cdsVersion = metadata.Version - if metadata.ErrState != nil { - pb.cdsVersion = metadata.ErrState.Version - } for name, uErr := range updates { if s, ok := pb.cdsWatchers[name]; ok { if uErr.Err != nil { @@ -260,10 +248,6 @@ func (pb *Pubsub) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErr pb.mu.Lock() defer pb.mu.Unlock() - pb.edsVersion = metadata.Version - if metadata.ErrState != nil { - pb.edsVersion = metadata.ErrState.Version - } for name, uErr := range updates { if s, ok := pb.edsWatchers[name]; ok { if uErr.Err != nil { From 23becb71f7f8957cd8e60f4a0c8bec7eb5c424c9 Mon Sep 17 00:00:00 2001 From: Yuan Tang Date: Tue, 16 Nov 2021 14:49:29 -0500 Subject: [PATCH 343/998] examples: Fix server port in route_guide example to work with client (#4975) --- examples/route_guide/server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/route_guide/server/server.go b/examples/route_guide/server/server.go index dd804406afcd..7c09e533ee95 100644 --- a/examples/route_guide/server/server.go +++ b/examples/route_guide/server/server.go @@ -50,7 +50,7 @@ var ( certFile = flag.String("cert_file", "", "The TLS cert file") keyFile = flag.String("key_file", "", "The TLS key file") jsonDBFile = flag.String("json_db_file", "", "A json file containing a list of features") - port = flag.Int("port", 10000, "The server port") + port = flag.Int("port", 50051, "The server port") ) type routeGuideServer struct { From 295d7e66becc3af3b81d2116bf3706954731a01d Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 17 Nov 2021 12:06:04 -0800 Subject: [PATCH 344/998] internal: move leakcheck to t.Cleanup (#4989) --- internal/grpctest/grpctest.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/internal/grpctest/grpctest.go b/internal/grpctest/grpctest.go index 53d1c24f4da5..a4b49d5f9a5e 100644 --- a/internal/grpctest/grpctest.go +++ b/internal/grpctest/grpctest.go @@ -97,9 +97,13 @@ func RunSubTests(t *testing.T, x interface{}) { } tfunc := getTestFunc(t, xv, methodName) t.Run(strings.TrimPrefix(methodName, "Test"), func(t *testing.T) { + // Run leakcheck in t.Cleanup() to guarantee it is run even if tfunc + // or setup uses t.Fatal(). + // + // Note that a defer would run before t.Cleanup, so if a goroutine + // is closed by a test's t.Cleanup, a deferred leakcheck would fail. + t.Cleanup(func() { teardown(t) }) setup(t) - // defer teardown to guarantee it is run even if tfunc uses t.Fatal() - defer teardown(t) tfunc(t) }) } From f45e61797429f3d78ba0ae6f06527e2f98653f89 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 18 Nov 2021 16:51:41 -0800 Subject: [PATCH 345/998] rls: move pb.gos to grpc/internal/proto/grpc_lookup_v1 (#4993) --- balancer/rls/internal/client.go | 2 +- balancer/rls/internal/client_test.go | 2 +- balancer/rls/internal/config.go | 3 +-- balancer/rls/internal/keys/builder.go | 2 +- balancer/rls/internal/keys/builder_test.go | 2 +- balancer/rls/internal/picker_test.go | 2 +- balancer/rls/internal/testutils/fakeserver/fakeserver.go | 4 ++-- .../rls/internal => internal}/proto/grpc_lookup_v1/rls.pb.go | 0 .../proto/grpc_lookup_v1/rls_config.pb.go | 0 .../internal => internal}/proto/grpc_lookup_v1/rls_grpc.pb.go | 0 regenerate.sh | 4 ++-- 11 files changed, 10 insertions(+), 11 deletions(-) rename {balancer/rls/internal => internal}/proto/grpc_lookup_v1/rls.pb.go (100%) rename {balancer/rls/internal => internal}/proto/grpc_lookup_v1/rls_config.pb.go (100%) rename {balancer/rls/internal => internal}/proto/grpc_lookup_v1/rls_grpc.pb.go (100%) diff --git a/balancer/rls/internal/client.go b/balancer/rls/internal/client.go index 4e263adc2590..af22bda188cd 100644 --- a/balancer/rls/internal/client.go +++ b/balancer/rls/internal/client.go @@ -23,7 +23,7 @@ import ( "time" "google.golang.org/grpc" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" ) // For gRPC services using RLS, the value of target_type in the diff --git a/balancer/rls/internal/client_test.go b/balancer/rls/internal/client_test.go index 6c8af6a6ab82..9a805c77ca32 100644 --- a/balancer/rls/internal/client_test.go +++ b/balancer/rls/internal/client_test.go @@ -28,9 +28,9 @@ import ( "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "google.golang.org/grpc" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/balancer/rls/internal/testutils/fakeserver" "google.golang.org/grpc/codes" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/status" ) diff --git a/balancer/rls/internal/config.go b/balancer/rls/internal/config.go index b27a2970f083..e3f261d4026c 100644 --- a/balancer/rls/internal/config.go +++ b/balancer/rls/internal/config.go @@ -28,10 +28,9 @@ import ( "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/ptypes" durationpb "github.com/golang/protobuf/ptypes/duration" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/keys" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) diff --git a/balancer/rls/internal/keys/builder.go b/balancer/rls/internal/keys/builder.go index 24767b405f06..55cf7478c887 100644 --- a/balancer/rls/internal/keys/builder.go +++ b/balancer/rls/internal/keys/builder.go @@ -25,7 +25,7 @@ import ( "sort" "strings" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" ) diff --git a/balancer/rls/internal/keys/builder_test.go b/balancer/rls/internal/keys/builder_test.go index a5cad29e0c93..d7b74b3c0dd4 100644 --- a/balancer/rls/internal/keys/builder_test.go +++ b/balancer/rls/internal/keys/builder_test.go @@ -24,7 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" ) diff --git a/balancer/rls/internal/picker_test.go b/balancer/rls/internal/picker_test.go index 762eb5fd80e9..f115be98a399 100644 --- a/balancer/rls/internal/picker_test.go +++ b/balancer/rls/internal/picker_test.go @@ -31,8 +31,8 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/cache" "google.golang.org/grpc/balancer/rls/internal/keys" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/internal/grpcrand" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/metadata" ) diff --git a/balancer/rls/internal/testutils/fakeserver/fakeserver.go b/balancer/rls/internal/testutils/fakeserver/fakeserver.go index 479e3036468f..8554ffbf78bf 100644 --- a/balancer/rls/internal/testutils/fakeserver/fakeserver.go +++ b/balancer/rls/internal/testutils/fakeserver/fakeserver.go @@ -28,8 +28,8 @@ import ( "time" "google.golang.org/grpc" - rlsgrpc "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" + rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/internal/testutils" ) diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go b/internal/proto/grpc_lookup_v1/rls.pb.go similarity index 100% rename from balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go rename to internal/proto/grpc_lookup_v1/rls.pb.go diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go b/internal/proto/grpc_lookup_v1/rls_config.pb.go similarity index 100% rename from balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go rename to internal/proto/grpc_lookup_v1/rls_config.pb.go diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go similarity index 100% rename from balancer/rls/internal/proto/grpc_lookup_v1/rls_grpc.pb.go rename to internal/proto/grpc_lookup_v1/rls_grpc.pb.go diff --git a/regenerate.sh b/regenerate.sh index dfd3226a1d96..a0a71aae9681 100755 --- a/regenerate.sh +++ b/regenerate.sh @@ -102,8 +102,8 @@ done # The go_package option in grpc/lookup/v1/rls.proto doesn't match the # current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 # grpc_testingv3/testv3.pb.go is not re-generated because it was # intentionally generated by an older version of protoc-gen-go. From d542bfcee46d733f7bf8a5e870994379863da2d2 Mon Sep 17 00:00:00 2001 From: "Mark S. Lewis" Date: Fri, 19 Nov 2021 18:12:24 +0000 Subject: [PATCH 346/998] status: support wrapped errors in FromContextError (#4977) --- status/status.go | 19 ++++++++++--------- status/status_test.go | 2 ++ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/status/status.go b/status/status.go index af2cffe985c0..6d163b6e3842 100644 --- a/status/status.go +++ b/status/status.go @@ -29,6 +29,7 @@ package status import ( "context" + "errors" "fmt" spb "google.golang.org/genproto/googleapis/rpc/status" @@ -117,18 +118,18 @@ func Code(err error) codes.Code { return codes.Unknown } -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. func FromContextError(err error) *Status { - switch err { - case nil: + if err == nil { return nil - case context.DeadlineExceeded: + } + if errors.Is(err, context.DeadlineExceeded) { return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + } + if errors.Is(err, context.Canceled) { return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) } + return New(codes.Unknown, err.Error()) } diff --git a/status/status_test.go b/status/status_test.go index 839a3c390ede..420fb6b8102c 100644 --- a/status/status_test.go +++ b/status/status_test.go @@ -364,6 +364,8 @@ func (s) TestFromContextError(t *testing.T) { {in: context.DeadlineExceeded, want: New(codes.DeadlineExceeded, context.DeadlineExceeded.Error())}, {in: context.Canceled, want: New(codes.Canceled, context.Canceled.Error())}, {in: errors.New("other"), want: New(codes.Unknown, "other")}, + {in: fmt.Errorf("wrapped: %w", context.DeadlineExceeded), want: New(codes.DeadlineExceeded, "wrapped: "+context.DeadlineExceeded.Error())}, + {in: fmt.Errorf("wrapped: %w", context.Canceled), want: New(codes.Canceled, "wrapped: "+context.Canceled.Error())}, } for _, tc := range testCases { got := FromContextError(tc.in) From 6f8796bc00be24b00b6cf0c88a5fe2555b716486 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 29 Nov 2021 14:27:43 -0800 Subject: [PATCH 347/998] rls: double import rls protos (#5003) Use `rlspb` for messages and `rlsgrpc` for services --- balancer/rls/internal/client.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/balancer/rls/internal/client.go b/balancer/rls/internal/client.go index af22bda188cd..b0c858e032e5 100644 --- a/balancer/rls/internal/client.go +++ b/balancer/rls/internal/client.go @@ -23,6 +23,7 @@ import ( "time" "google.golang.org/grpc" + rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" ) @@ -43,7 +44,7 @@ const grpcTargetType = "grpc" // throttling and asks this client to make an RPC call only after checking with // the throttler. type rlsClient struct { - stub rlspb.RouteLookupServiceClient + stub rlsgrpc.RouteLookupServiceClient // origDialTarget is the original dial target of the user and sent in each // RouteLookup RPC made to the RLS server. origDialTarget string @@ -54,7 +55,7 @@ type rlsClient struct { func newRLSClient(cc *grpc.ClientConn, dialTarget string, rpcTimeout time.Duration) *rlsClient { return &rlsClient{ - stub: rlspb.NewRouteLookupServiceClient(cc), + stub: rlsgrpc.NewRouteLookupServiceClient(cc), origDialTarget: dialTarget, rpcTimeout: rpcTimeout, } From 58beff180d7a0627144f39d1bdd4286c6cb9ee0a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 29 Nov 2021 14:28:18 -0800 Subject: [PATCH 348/998] balancergroup: add method to exitIdle a sub-balancer (#4994) This is required for the RLS LB policy. At pick time, if the RLS picker finds one of its child policies in IDLE, it needs to be able to ask it to exit idle. --- internal/balancergroup/balancergroup.go | 10 ++++++++ internal/balancergroup/balancergroup_test.go | 26 ++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/internal/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go index d516f215323b..9776158dd986 100644 --- a/internal/balancergroup/balancergroup.go +++ b/internal/balancergroup/balancergroup.go @@ -506,3 +506,13 @@ func (bg *BalancerGroup) ExitIdle() { } bg.outgoingMu.Unlock() } + +// ExitIdleOne instructs the sub-balancer `id` to exit IDLE state, if +// appropriate and possible. +func (bg *BalancerGroup) ExitIdleOne(id string) { + bg.outgoingMu.Lock() + if config := bg.idToBalancerConfig[id]; config != nil { + config.exitIdle() + } + bg.outgoingMu.Unlock() +} diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index ef11e402ec2e..4942f8a7da87 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -509,3 +509,29 @@ func (s) TestBalancerGroupBuildOptions(t *testing.T) { t.Fatal(err) } } + +func (s) TestBalancerExitIdleOne(t *testing.T) { + const balancerName = "stub-balancer-test-balancergroup-exit-idle-one" + exitIdleCh := make(chan struct{}, 1) + stub.Register(balancerName, stub.BalancerFuncs{ + ExitIdle: func(*stub.BalancerData) { + exitIdleCh <- struct{}{} + }, + }) + cc := testutils.NewTestClientConn(t) + bg := New(cc, balancer.BuildOptions{}, nil, nil) + bg.Start() + defer bg.Close() + + // Add the stub balancer build above as a child policy. + builder := balancer.Get(balancerName) + bg.Add(testBalancerIDs[0], builder) + + // Call ExitIdle on the child policy. + bg.ExitIdleOne(testBalancerIDs[0]) + select { + case <-time.After(time.Second): + t.Fatal("Timeout when waiting for ExitIdle to be invoked on child policy") + case <-exitIdleCh: + } +} From 872a6f12e327557a34e6ac4415f5c919fe986e58 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 30 Nov 2021 15:26:31 -0800 Subject: [PATCH 349/998] xds/c2p: fix default client resource template, and xds-client target scheme (#5010) --- xds/googledirectpath/googlec2p.go | 3 ++- xds/googledirectpath/googlec2p_test.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 8841f72173b3..7f3769df8bb8 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -49,7 +49,7 @@ import ( const ( c2pScheme = "google-c2p" - tdURL = "directpath-pa.googleapis.com" + tdURL = "dns:///directpath-pa.googleapis.com" httpReqTimeout = 10 * time.Second zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" @@ -110,6 +110,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts TransportAPI: version.TransportV3, NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), }, + ClientDefaultListenerResourceNameTemplate: "%s", } // Create singleton xds client with this config. The xds client will be diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 7f7c08ba28b7..00f8aa0b21be 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -217,6 +217,7 @@ func TestBuildXDS(t *testing.T) { TransportAPI: version.TransportV3, NodeProto: wantNode, }, + ClientDefaultListenerResourceNameTemplate: "%s", } if tt.tdURI != "" { wantConfig.XDSServer.ServerURI = tt.tdURI From c2bccd0b1594416da57a74d15f09f8eb0a3d727b Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 30 Nov 2021 15:31:14 -0800 Subject: [PATCH 350/998] xds/kokoro: install go 1.17, and retry go build (#5015) --- test/kokoro/xds.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/kokoro/xds.sh b/test/kokoro/xds.sh index f9cb7dab7332..63dad66d2bbd 100755 --- a/test/kokoro/xds.sh +++ b/test/kokoro/xds.sh @@ -13,7 +13,13 @@ shopt -s extglob branch="${branch//[[:space:]]}" branch="${branch##remotes/origin/}" shopt -u extglob -go build +# Install a version of Go supported by gRPC for the new features, e.g. +# errors.Is() +curl --retry 3 -O -L https://go.dev/dl/go1.17.3.linux-amd64.tar.gz +sudo tar -C /usr/local -xf go1.17.3.linux-amd64.tar.gz +sudo ln -s /usr/local/go/bin/go /usr/bin/go +# Retry go build on errors (e.g. go get connection errors), for at most 3 times +for i in 1 2 3; do go build && break || sleep 5; done popd git clone -b "${branch}" --single-branch --depth=1 https://github.com/grpc/grpc.git From 46935b96506deef74e73192e70cc44e3795b1452 Mon Sep 17 00:00:00 2001 From: Shihao Xia Date: Wed, 1 Dec 2021 18:10:13 -0500 Subject: [PATCH 351/998] fix possible nil before casting (#5017) --- .../balancer/clustermanager/clustermanager_test.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index 503ed58fd09e..5b3a2403e1a7 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -559,9 +559,12 @@ func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) { } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if v, err := ccsCh.Receive(ctx); err != nil { - err2 := v.(error) - t.Fatal(err2) + v, err := ccsCh.Receive(ctx) + if err != nil { + t.Fatalf("timed out waiting for UpdateClientConnState result: %v", err) + } + if v != nil { + t.Fatal(v) } } From f3bbd12084380987ae0c9fcccdd7c4d405294a68 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 2 Dec 2021 13:54:15 -0800 Subject: [PATCH 352/998] xds/bootstrap_config: add a string function to server config (#5031) --- xds/internal/xdsclient/bootstrap/bootstrap.go | 26 ++++++++++++++++--- .../xdsclient/bootstrap/bootstrap_test.go | 22 ++++++++-------- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index c8c6740bcce0..ecec170774c7 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -71,8 +71,8 @@ type ServerConfig struct { // Creds contains the credentials to be used while talking to the xDS // server, as a grpc.DialOption. Creds grpc.DialOption - // credsType is the type of the creds. It will be used to dedup servers. - credsType string + // CredsType is the type of the creds. It will be used to dedup servers. + CredsType string // TransportAPI indicates the API version of xDS transport protocol to use. // This describes the xDS gRPC endpoint and version of // DiscoveryRequest/Response used on the wire. @@ -86,6 +86,26 @@ type ServerConfig struct { NodeProto proto.Message } +// String returns the string representation of the ServerConfig. +// +// This string representation will be used as map keys in federation +// (`map[ServerConfig]authority`), so that the xDS ClientConn and stream will be +// shared by authorities with different names but the same server config. +// +// It covers (almost) all the fields so the string can represent the config +// content. It doesn't cover NodeProto because NodeProto isn't used by +// federation. +func (sc *ServerConfig) String() string { + var ver string + switch sc.TransportAPI { + case version.TransportV3: + ver = "xDSv3" + case version.TransportV2: + ver = "xDSv2" + } + return strings.Join([]string{sc.ServerURI, sc.CredsType, ver}, "-") +} + // UnmarshalJSON takes the json data (a list of servers) and unmarshals the // first one in the list. func (sc *ServerConfig) UnmarshalJSON(data []byte) error { @@ -100,7 +120,7 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { sc.ServerURI = xs.ServerURI for _, cc := range xs.ChannelCreds { // We stop at the first credential type that we support. - sc.credsType = cc.Type + sc.CredsType = cc.Type if cc.Type == credsGoogleDefault { sc.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials()) break diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index edb9d298d023..6b6933e97bf2 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -212,7 +212,7 @@ var ( XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - credsType: "insecure", + CredsType: "insecure", NodeProto: v2NodeProto, }, ClientDefaultListenerResourceNameTemplate: "%s", @@ -221,7 +221,7 @@ var ( XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - credsType: "google_default", + CredsType: "google_default", NodeProto: v2NodeProto, }, ClientDefaultListenerResourceNameTemplate: "%s", @@ -230,7 +230,7 @@ var ( XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - credsType: "google_default", + CredsType: "google_default", TransportAPI: version.TransportV3, NodeProto: v3NodeProto, }, @@ -395,7 +395,7 @@ func TestNewConfigV2ProtoSuccess(t *testing.T) { XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - credsType: "insecure", + CredsType: "insecure", NodeProto: &v2corepb.Node{ BuildVersion: gRPCVersion, UserAgentName: gRPCUserAgentName, @@ -665,7 +665,7 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - credsType: "google_default", + CredsType: "google_default", TransportAPI: version.TransportV3, NodeProto: v3NodeProto, }, @@ -759,7 +759,7 @@ func TestNewConfigWithServerListenerResourceNameTemplate(t *testing.T) { XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - credsType: "google_default", + CredsType: "google_default", TransportAPI: version.TransportV2, NodeProto: v2NodeProto, }, @@ -908,7 +908,7 @@ func TestNewConfigWithFederation(t *testing.T) { XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - credsType: "google_default", + CredsType: "google_default", TransportAPI: version.TransportV2, NodeProto: v2NodeProto, }, @@ -920,7 +920,7 @@ func TestNewConfigWithFederation(t *testing.T) { XDSServer: &ServerConfig{ ServerURI: "td.com", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - credsType: "google_default", + CredsType: "google_default", TransportAPI: version.TransportV3, NodeProto: v3NodeProto, }, @@ -934,7 +934,7 @@ func TestNewConfigWithFederation(t *testing.T) { XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - credsType: "google_default", + CredsType: "google_default", TransportAPI: version.TransportV2, NodeProto: v2NodeProto, }, @@ -947,7 +947,7 @@ func TestNewConfigWithFederation(t *testing.T) { XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - credsType: "google_default", + CredsType: "google_default", TransportAPI: version.TransportV2, NodeProto: v2NodeProto, }, @@ -965,7 +965,7 @@ func TestNewConfigWithFederation(t *testing.T) { XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - credsType: "google_default", + CredsType: "google_default", TransportAPI: version.TransportV2, NodeProto: v2NodeProto, }, From 512e89474bfbd567806e026e0afdec468b1ee1ea Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 3 Dec 2021 15:25:06 -0800 Subject: [PATCH 353/998] rls: support extra_keys and constant_keys (#4995) * rls: support extra_keys and constant_keys * review comments * use the constant_keys map from the proto --- balancer/rls/internal/keys/builder.go | 121 +++++---- balancer/rls/internal/keys/builder_test.go | 276 +++++++++++++++------ balancer/rls/internal/picker.go | 14 +- 3 files changed, 280 insertions(+), 131 deletions(-) diff --git a/balancer/rls/internal/keys/builder.go b/balancer/rls/internal/keys/builder.go index 55cf7478c887..fbf45c668d7f 100644 --- a/balancer/rls/internal/keys/builder.go +++ b/balancer/rls/internal/keys/builder.go @@ -29,25 +29,11 @@ import ( "google.golang.org/grpc/metadata" ) -// BuilderMap provides a mapping from a request path to the key builder to be -// used for that path. -// The BuilderMap is constructed by parsing the RouteLookupConfig received by -// the RLS balancer as part of its ServiceConfig, and is used by the picker in -// the data path to build the RLS keys to be used for a given request. +// BuilderMap maps from request path to the key builder for that path. type BuilderMap map[string]builder // MakeBuilderMap parses the provided RouteLookupConfig proto and returns a map // from paths to key builders. -// -// The following conditions are validated, and an error is returned if any of -// them is not met: -// grpc_keybuilders field -// * must have at least one entry -// * must not have two entries with the same Name -// * must not have any entry with a Name with the service field unset or empty -// * must not have any entries without a Name -// * must not have a headers entry that has required_match set -// * must not have two headers entries with the same key within one entry func MakeBuilderMap(cfg *rlspb.RouteLookupConfig) (BuilderMap, error) { kbs := cfg.GetGrpcKeybuilders() if len(kbs) == 0 { @@ -56,21 +42,46 @@ func MakeBuilderMap(cfg *rlspb.RouteLookupConfig) (BuilderMap, error) { bm := make(map[string]builder) for _, kb := range kbs { + // Extract keys from `headers`, `constant_keys` and `extra_keys` fields + // and populate appropriate values in the builder struct. Also ensure + // that keys are not repeated. var matchers []matcher seenKeys := make(map[string]bool) + constantKeys := kb.GetConstantKeys() + for k := range kb.GetConstantKeys() { + seenKeys[k] = true + } for _, h := range kb.GetHeaders() { if h.GetRequiredMatch() { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set {%+v}", kbs) } key := h.GetKey() if seenKeys[key] { - return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Key field in headers {%+v}", kbs) + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q across headers, constant_keys and extra_keys {%+v}", key, kbs) } seenKeys[key] = true matchers = append(matchers, matcher{key: h.GetKey(), names: h.GetNames()}) } - b := builder{matchers: matchers} + if seenKeys[kb.GetExtraKeys().GetHost()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetHost(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetService()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetService(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetMethod()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetMethod(), kbs) + } + b := builder{ + headerKeys: matchers, + constantKeys: constantKeys, + hostKey: kb.GetExtraKeys().GetHost(), + serviceKey: kb.GetExtraKeys().GetService(), + methodKey: kb.GetExtraKeys().GetMethod(), + } + // Store the builder created above in the BuilderMap based on the value + // of the `Names` field, which wraps incoming request's service and + // method. Also, ensure that there are no repeated `Names` field. names := kb.GetNames() if len(names) == 0 { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig does not contain any Name {%+v}", kbs) @@ -108,16 +119,31 @@ type KeyMap struct { // RLSKey builds the RLS keys to be used for the given request, identified by // the request path and the request headers stored in metadata. -func (bm BuilderMap) RLSKey(md metadata.MD, path string) KeyMap { +func (bm BuilderMap) RLSKey(md metadata.MD, host, path string) KeyMap { + i := strings.LastIndex(path, "/") + service, method := path[:i+1], path[i+1:] b, ok := bm[path] if !ok { - i := strings.LastIndex(path, "/") - b, ok = bm[path[:i+1]] + b, ok = bm[service] if !ok { return KeyMap{} } } - return b.keys(md) + + kvMap := b.buildHeaderKeys(md) + if b.hostKey != "" { + kvMap[b.hostKey] = host + } + if b.serviceKey != "" { + kvMap[b.serviceKey] = service + } + if b.methodKey != "" { + kvMap[b.methodKey] = method + } + for k, v := range b.constantKeys { + kvMap[k] = v + } + return KeyMap{Map: kvMap, Str: mapToString(kvMap)} } // Equal reports whether bm and am represent equivalent BuilderMaps. @@ -141,26 +167,19 @@ func (bm BuilderMap) Equal(am BuilderMap) bool { return true } -// builder provides the actual functionality of building RLS keys. These are -// stored in the BuilderMap. -// While processing a pick, the picker looks in the BuilderMap for the -// appropriate builder to be used for the given RPC. For each of the matchers -// in the found builder, we iterate over the list of request headers (available -// as metadata in the context). Once a header matches one of the names in the -// matcher, we set the value of the header in the keyMap (with the key being -// the one found in the matcher) and move on to the next matcher. If no -// KeyBuilder was found in the map, or no header match was found, an empty -// keyMap is returned. +// builder provides the actual functionality of building RLS keys. type builder struct { - matchers []matcher + headerKeys []matcher + constantKeys map[string]string + // The following keys mirror corresponding fields in `extra_keys`. + hostKey string + serviceKey string + methodKey string } // Equal reports whether b and a represent equivalent key builders. func (b builder) Equal(a builder) bool { - if (b.matchers == nil) != (a.matchers == nil) { - return false - } - if len(b.matchers) != len(a.matchers) { + if len(b.headerKeys) != len(a.headerKeys) { return false } // Protobuf serialization maintains the order of repeated fields. Matchers @@ -168,13 +187,23 @@ func (b builder) Equal(a builder) bool { // order changes, it means that the order in the protobuf changed. We report // this case as not being equal even though the builders could possible be // functionally equal. - for i, bMatcher := range b.matchers { - aMatcher := a.matchers[i] + for i, bMatcher := range b.headerKeys { + aMatcher := a.headerKeys[i] if !bMatcher.Equal(aMatcher) { return false } } - return true + + if len(b.constantKeys) != len(a.constantKeys) { + return false + } + for k, v := range b.constantKeys { + if a.constantKeys[k] != v { + return false + } + } + + return b.hostKey == a.hostKey && b.serviceKey == a.serviceKey && b.methodKey == a.methodKey } // matcher helps extract a key from request headers based on a given name. @@ -185,14 +214,11 @@ type matcher struct { names []string } -// Equal reports if m and are are equivalent matchers. +// Equal reports if m and are are equivalent headerKeys. func (m matcher) Equal(a matcher) bool { if m.key != a.key { return false } - if (m.names == nil) != (a.names == nil) { - return false - } if len(m.names) != len(a.names) { return false } @@ -204,9 +230,12 @@ func (m matcher) Equal(a matcher) bool { return true } -func (b builder) keys(md metadata.MD) KeyMap { +func (b builder) buildHeaderKeys(md metadata.MD) map[string]string { kvMap := make(map[string]string) - for _, m := range b.matchers { + if len(md) == 0 { + return kvMap + } + for _, m := range b.headerKeys { for _, name := range m.names { if vals := md.Get(name); vals != nil { kvMap[m.key] = strings.Join(vals, ",") @@ -214,7 +243,7 @@ func (b builder) keys(md metadata.MD) KeyMap { } } } - return KeyMap{Map: kvMap, Str: mapToString(kvMap)} + return kvMap } func mapToString(kv map[string]string) string { diff --git a/balancer/rls/internal/keys/builder_test.go b/balancer/rls/internal/keys/builder_test.go index d7b74b3c0dd4..64ace65bd9a0 100644 --- a/balancer/rls/internal/keys/builder_test.go +++ b/balancer/rls/internal/keys/builder_test.go @@ -37,6 +37,15 @@ var ( {Key: "k1", Names: []string{"n1"}}, {Key: "k2", Names: []string{"n1"}}, }, + ExtraKeys: &rlspb.GrpcKeyBuilder_ExtraKeys{ + Host: "host", + Service: "service", + Method: "method", + }, + ConstantKeys: map[string]string{ + "const-key-1": "const-val-1", + "const-key-2": "const-val-2", + }, } goodKeyBuilder2 = &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{ @@ -50,13 +59,21 @@ var ( ) func TestMakeBuilderMap(t *testing.T) { - wantBuilderMap1 := map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}, {key: "k2", names: []string{"n1"}}}}, + gFooBuilder := builder{ + headerKeys: []matcher{{key: "k1", names: []string{"n1"}}, {key: "k2", names: []string{"n1"}}}, + constantKeys: map[string]string{ + "const-key-1": "const-val-1", + "const-key-2": "const-val-2", + }, + hostKey: "host", + serviceKey: "service", + methodKey: "method", } + wantBuilderMap1 := map[string]builder{"/gFoo/": gFooBuilder} wantBuilderMap2 := map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}, {key: "k2", names: []string{"n1"}}}}, - "/gBar/method1": {matchers: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, - "/gFoobar/": {matchers: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, + "/gFoo/": gFooBuilder, + "/gBar/method1": {headerKeys: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, + "/gFoobar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, } tests := []struct { @@ -91,33 +108,6 @@ func TestMakeBuilderMap(t *testing.T) { } func TestMakeBuilderMapErrors(t *testing.T) { - emptyServiceKeyBuilder := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{ - {Service: "bFoo", Method: "method1"}, - {Service: "bBar"}, - {Method: "method1"}, - }, - Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, - } - requiredMatchKeyBuilder := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "bFoo", Method: "method1"}}, - Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}, RequiredMatch: true}}, - } - repeatedHeadersKeyBuilder := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{ - {Service: "gBar", Method: "method1"}, - {Service: "gFoobar"}, - }, - Headers: []*rlspb.NameMatcher{ - {Key: "k1", Names: []string{"n1", "n2"}}, - {Key: "k1", Names: []string{"n1", "n2"}}, - }, - } - methodNameWithSlashKeyBuilder := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gBar", Method: "method1/foo"}}, - Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, - } - tests := []struct { desc string cfg *rlspb.RouteLookupConfig @@ -138,7 +128,17 @@ func TestMakeBuilderMapErrors(t *testing.T) { { desc: "GrpcKeyBuilder with empty Service field", cfg: &rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{emptyServiceKeyBuilder, goodKeyBuilder1}, + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "bFoo", Method: "method1"}, + {Service: "bBar"}, + {Method: "method1"}, + }, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + }, + goodKeyBuilder1, + }, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains a Name field with no Service", }, @@ -152,21 +152,96 @@ func TestMakeBuilderMapErrors(t *testing.T) { { desc: "GrpcKeyBuilder with requiredMatch field set", cfg: &rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{requiredMatchKeyBuilder, goodKeyBuilder1}, + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "bFoo", Method: "method1"}}, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}, RequiredMatch: true}}, + }, + goodKeyBuilder1, + }, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set", }, { desc: "GrpcKeyBuilder two headers with same key", cfg: &rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{repeatedHeadersKeyBuilder, goodKeyBuilder1}, + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "gBar", Method: "method1"}, + {Service: "gFoobar"}, + }, + Headers: []*rlspb.NameMatcher{ + {Key: "k1", Names: []string{"n1", "n2"}}, + {Key: "k1", Names: []string{"n1", "n2"}}, + }, + }, + goodKeyBuilder1, + }, + }, + wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key \"k1\" across headers, constant_keys and extra_keys", + }, + { + desc: "GrpcKeyBuilder repeated keys across headers and constant_keys", + cfg: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "gBar", Method: "method1"}, + {Service: "gFoobar"}, + }, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + ConstantKeys: map[string]string{"k1": "v1"}, + }, + goodKeyBuilder1, + }, + }, + wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key \"k1\" across headers, constant_keys and extra_keys", + }, + { + desc: "GrpcKeyBuilder repeated keys across headers and extra_keys", + cfg: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "gBar", Method: "method1"}, + {Service: "gFoobar"}, + }, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + ExtraKeys: &rlspb.GrpcKeyBuilder_ExtraKeys{Method: "k1"}, + }, + goodKeyBuilder1, + }, + }, + wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key \"k1\" in extra_keys from constant_keys or headers", + }, + { + desc: "GrpcKeyBuilder repeated keys across constant_keys and extra_keys", + cfg: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "gBar", Method: "method1"}, + {Service: "gFoobar"}, + }, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + ConstantKeys: map[string]string{"host": "v1"}, + ExtraKeys: &rlspb.GrpcKeyBuilder_ExtraKeys{Host: "host"}, + }, + goodKeyBuilder1, + }, }, - wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Key field in headers", + wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key \"host\" in extra_keys from constant_keys or headers", }, { desc: "GrpcKeyBuilder with slash in method name", cfg: &rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{methodNameWithSlashKeyBuilder}, + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gBar", Method: "method1/foo"}}, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + }, + }, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains a method with a slash", }, @@ -257,11 +332,22 @@ func TestRLSKey(t *testing.T) { wantKM: KeyMap{Map: map[string]string{"k1": "v1"}, Str: "k1=v1"}, }, { - // Multiple matchers find hits in the provided request headers. - desc: "multipleMatchers", - path: "/gFoo/method1", - md: metadata.Pairs("n2", "v2", "n1", "v1"), - wantKM: KeyMap{Map: map[string]string{"k1": "v1", "k2": "v1"}, Str: "k1=v1,k2=v1"}, + // Multiple headerKeys find hits in the provided request headers. + desc: "multipleMatchers", + path: "/gFoo/method1", + md: metadata.Pairs("n2", "v2", "n1", "v1"), + wantKM: KeyMap{ + Map: map[string]string{ + "const-key-1": "const-val-1", + "const-key-2": "const-val-2", + "host": "dummy-host", + "service": "/gFoo/", + "method": "method1", + "k1": "v1", + "k2": "v1", + }, + Str: "const-key-1=const-val-1,const-key-2=const-val-2,host=dummy-host,k1=v1,k2=v1,method=method1,service=/gFoo/", + }, }, { // A match is found for a header which is specified multiple times. @@ -275,7 +361,7 @@ func TestRLSKey(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - if gotKM := bm.RLSKey(test.md, test.path); !cmp.Equal(gotKM, test.wantKM) { + if gotKM := bm.RLSKey(test.md, "dummy-host", test.path); !cmp.Equal(gotKM, test.wantKM) { t.Errorf("RLSKey(%+v, %s) = %+v, want %+v", test.md, test.path, gotKM, test.wantKM) } }) @@ -351,57 +437,57 @@ func TestBuilderMapEqual(t *testing.T) { { desc: "nil and non-nil builder maps", a: nil, - b: map[string]builder{"/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}}, + b: map[string]builder{"/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}}, wantEqual: false, }, { desc: "empty and non-empty builder maps", a: make(map[string]builder), - b: map[string]builder{"/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}}, + b: map[string]builder{"/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}}, wantEqual: false, }, { desc: "different number of map keys", a: map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, b: map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: false, }, { desc: "different map keys", a: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, b: map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: false, }, { desc: "equal keys different values", a: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, }, b: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: false, }, { desc: "good match", a: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, b: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: true, }, @@ -425,44 +511,80 @@ func TestBuilderEqual(t *testing.T) { }{ { desc: "nil builders", - a: builder{matchers: nil}, - b: builder{matchers: nil}, + a: builder{headerKeys: nil}, + b: builder{headerKeys: nil}, wantEqual: true, }, { desc: "empty builders", - a: builder{matchers: []matcher{}}, - b: builder{matchers: []matcher{}}, + a: builder{headerKeys: []matcher{}}, + b: builder{headerKeys: []matcher{}}, wantEqual: true, }, { - desc: "nil and non-nil builders", - a: builder{matchers: nil}, - b: builder{matchers: []matcher{}}, + desc: "empty and non-empty builders", + a: builder{headerKeys: []matcher{}}, + b: builder{headerKeys: []matcher{{key: "foo"}}}, wantEqual: false, }, { - desc: "empty and non-empty builders", - a: builder{matchers: []matcher{}}, - b: builder{matchers: []matcher{{key: "foo"}}}, + desc: "different number of headerKeys", + a: builder{headerKeys: []matcher{{key: "foo"}, {key: "bar"}}}, + b: builder{headerKeys: []matcher{{key: "foo"}}}, wantEqual: false, }, { - desc: "different number of matchers", - a: builder{matchers: []matcher{{key: "foo"}, {key: "bar"}}}, - b: builder{matchers: []matcher{{key: "foo"}}}, + desc: "equal number but differing headerKeys", + a: builder{headerKeys: []matcher{{key: "bar"}}}, + b: builder{headerKeys: []matcher{{key: "foo"}}}, wantEqual: false, }, { - desc: "equal number but differing matchers", - a: builder{matchers: []matcher{{key: "bar"}}}, - b: builder{matchers: []matcher{{key: "foo"}}}, + desc: "different number of constantKeys", + a: builder{constantKeys: map[string]string{"k1": "v1"}}, + b: builder{constantKeys: map[string]string{"k1": "v1", "k2": "v2"}}, wantEqual: false, }, { - desc: "good match", - a: builder{matchers: []matcher{{key: "foo"}}}, - b: builder{matchers: []matcher{{key: "foo"}}}, + desc: "equal number but differing constantKeys", + a: builder{constantKeys: map[string]string{"k1": "v1"}}, + b: builder{constantKeys: map[string]string{"k2": "v2"}}, + wantEqual: false, + }, + { + desc: "different hostKey", + a: builder{hostKey: "host1"}, + b: builder{hostKey: "host2"}, + wantEqual: false, + }, + { + desc: "different serviceKey", + a: builder{hostKey: "service1"}, + b: builder{hostKey: "service2"}, + wantEqual: false, + }, + { + desc: "different methodKey", + a: builder{hostKey: "method1"}, + b: builder{hostKey: "method2"}, + wantEqual: false, + }, + { + desc: "equal", + a: builder{ + headerKeys: []matcher{{key: "foo"}}, + constantKeys: map[string]string{"k1": "v1"}, + hostKey: "host", + serviceKey: "/service/", + methodKey: "method", + }, + b: builder{ + headerKeys: []matcher{{key: "foo"}}, + constantKeys: map[string]string{"k1": "v1"}, + hostKey: "host", + serviceKey: "/service/", + methodKey: "method", + }, wantEqual: true, }, } diff --git a/balancer/rls/internal/picker.go b/balancer/rls/internal/picker.go index 738449446558..37e58759e256 100644 --- a/balancer/rls/internal/picker.go +++ b/balancer/rls/internal/picker.go @@ -41,6 +41,9 @@ type rlsPicker struct { // The keyBuilder map used to generate RLS keys for the RPC. This is built // by the LB policy based on the received ServiceConfig. kbm keys.BuilderMap + // Endpoint from the user's original dial target. Used to set the `host_key` + // field in `extra_keys`. + origEndpoint string // The following hooks are setup by the LB policy to enable the rlsPicker to // access state stored in the policy. This approach has the following @@ -76,14 +79,9 @@ type rlsPicker struct { // Pick makes the routing decision for every outbound RPC. func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - // For every incoming request, we first build the RLS keys using the - // keyBuilder we received from the LB policy. If no metadata is present in - // the context, we end up using an empty key. - km := keys.KeyMap{} - md, ok := metadata.FromOutgoingContext(info.Ctx) - if ok { - km = p.kbm.RLSKey(md, info.FullMethodName) - } + // Build the request's keys using the key builders from LB config. + md, _ := metadata.FromOutgoingContext(info.Ctx) + km := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) // We use the LB policy hook to read the data cache and the pending request // map (whether or not an entry exists) for the RPC path and the generated From 3786ae1778f5bfcc49da81cbd925b7175ec81839 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 6 Dec 2021 13:41:14 -0500 Subject: [PATCH 354/998] xds/resolver: Add support for cluster specifier plugins (#4987) * xds/resolver: Add support for cluster specifier plugins --- .../resolver/cluster_specifier_plugin_test.go | 368 ++++++++++++++++++ xds/internal/resolver/serviceconfig.go | 60 ++- xds/internal/resolver/watch_service.go | 11 +- xds/internal/resolver/xds_resolver.go | 1 - xds/internal/resolver/xds_resolver_test.go | 32 +- 5 files changed, 433 insertions(+), 39 deletions(-) create mode 100644 xds/internal/resolver/cluster_specifier_plugin_test.go diff --git a/xds/internal/resolver/cluster_specifier_plugin_test.go b/xds/internal/resolver/cluster_specifier_plugin_test.go new file mode 100644 index 000000000000..d432ad3c489d --- /dev/null +++ b/xds/internal/resolver/cluster_specifier_plugin_test.go @@ -0,0 +1,368 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/clustermanager" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +func init() { + balancer.Register(cspB{}) +} + +type cspB struct{} + +func (cspB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return nil +} + +func (cspB) Name() string { + return "csp_experimental" +} + +type cspConfig struct { + ArbitraryField string `json:"arbitrary_field"` +} + +// TestXDSResolverClusterSpecifierPlugin tests that cluster specifier plugins +// produce the correct service config, and that the config selector routes to a +// cluster specifier plugin supported by this service config (i.e. prefixed with +// a cluster specifier plugin prefix). +func (s) TestXDSResolverClusterSpecifierPlugin(t *testing.T) { + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) + defer xdsR.Close() + defer cancel() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + waitForWatchListener(ctx, t, xdsC, targetStr) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + + waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspA"}}, + }, + }, + // Top level csp config here - the value of cspA should get directly + // placed as a child policy of xds cluster manager. + ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspA": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anything"}}}}, + }, nil) + + gotState, err := tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for UpdateState to be called: %v", err) + } + rState := gotState.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + } + wantJSON := `{"loadBalancingConfig":[{ + "xds_cluster_manager_experimental":{ + "children":{ + "cluster_specifier_plugin:cspA":{ + "childPolicy":[{"csp_experimental":{"arbitrary_field":"anything"}}] + } + } + }}]}` + + wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Errorf("ClientConn.UpdateState received different service config") + t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + } + + cs := iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("received nil config selector") + } + + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + if err != nil { + t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + } + + cluster := clustermanager.GetPickedClusterForTesting(res.Context) + clusterWant := clusterSpecifierPluginPrefix + "cspA" + if cluster != clusterWant { + t.Fatalf("cluster: %+v, want: %+v", cluster, clusterWant) + } +} + +// TestXDSResolverClusterSpecifierPluginConfigUpdate tests that cluster +// specifier plugins produce the correct service config, and that on an update +// to the CSP Configuration, the new config is accounted for in the output +// service config. +func (s) TestXDSResolverClusterSpecifierPluginConfigUpdate(t *testing.T) { + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) + defer xdsR.Close() + defer cancel() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + waitForWatchListener(ctx, t, xdsC, targetStr) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + + waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspA"}}, + }, + }, + // Top level csp config here - the value of cspA should get directly + // placed as a child policy of xds cluster manager. + ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspA": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anything"}}}}, + }, nil) + + gotState, err := tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for UpdateState to be called: %v", err) + } + rState := gotState.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + } + wantJSON := `{"loadBalancingConfig":[{ + "xds_cluster_manager_experimental":{ + "children":{ + "cluster_specifier_plugin:cspA":{ + "childPolicy":[{"csp_experimental":{"arbitrary_field":"anything"}}] + } + } + }}]}` + + wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Errorf("ClientConn.UpdateState received different service config") + t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + } + + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspA"}}, + }, + }, + // Top level csp config here - the value of cspA should get directly + // placed as a child policy of xds cluster manager. + ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspA": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "changed"}}}}, + }, nil) + + gotState, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for UpdateState to be called: %v", err) + } + rState = gotState.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + } + wantJSON = `{"loadBalancingConfig":[{ + "xds_cluster_manager_experimental":{ + "children":{ + "cluster_specifier_plugin:cspA":{ + "childPolicy":[{"csp_experimental":{"arbitrary_field":"changed"}}] + } + } + }}]}` + + wantSCParsed = internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Errorf("ClientConn.UpdateState received different service config") + t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + } +} + +// TestXDSResolverDelayedOnCommittedCSP tests that cluster specifier plugins and +// their corresponding configurations remain in service config if RPCs are in +// flight. +func (s) TestXDSResolverDelayedOnCommittedCSP(t *testing.T) { + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) + defer xdsR.Close() + defer cancel() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + waitForWatchListener(ctx, t, xdsC, targetStr) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) + waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspA"}}, + }, + }, + // Top level csp config here - the value of cspA should get directly + // placed as a child policy of xds cluster manager. + ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspA": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anythingA"}}}}, + }, nil) + + gotState, err := tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for UpdateState to be called: %v", err) + } + rState := gotState.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + } + wantJSON := `{"loadBalancingConfig":[{ + "xds_cluster_manager_experimental":{ + "children":{ + "cluster_specifier_plugin:cspA":{ + "childPolicy":[{"csp_experimental":{"arbitrary_field":"anythingA"}}] + } + } + }}]}` + + wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Errorf("ClientConn.UpdateState received different service config") + t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + } + + cs := iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("received nil config selector") + } + + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + if err != nil { + t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + } + + cluster := clustermanager.GetPickedClusterForTesting(res.Context) + clusterWant := clusterSpecifierPluginPrefix + "cspA" + if cluster != clusterWant { + t.Fatalf("cluster: %+v, want: %+v", cluster, clusterWant) + } + // delay res.OnCommitted() + + // Perform TWO updates to ensure the old config selector does not hold a reference to cspA + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspB"}}, + }, + }, + // Top level csp config here - the value of cspB should get directly + // placed as a child policy of xds cluster manager. + ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspB": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anythingB"}}}}, + }, nil) + tcc.stateCh.Receive(ctx) // Ignore the first update. + + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspB"}}, + }, + }, + // Top level csp config here - the value of cspB should get directly + // placed as a child policy of xds cluster manager. + ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspB": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anythingB"}}}}, + }, nil) + + gotState, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for UpdateState to be called: %v", err) + } + rState = gotState.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + } + wantJSON2 := `{"loadBalancingConfig":[{ + "xds_cluster_manager_experimental":{ + "children":{ + "cluster_specifier_plugin:cspA":{ + "childPolicy":[{"csp_experimental":{"arbitrary_field":"anythingA"}}] + }, + "cluster_specifier_plugin:cspB":{ + "childPolicy":[{"csp_experimental":{"arbitrary_field":"anythingB"}}] + } + } + }}]}` + + wantSCParsed2 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON2) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed2.Config) { + t.Errorf("ClientConn.UpdateState received different service config") + t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed2.Config)) + } + + // Invoke OnCommitted; should lead to a service config update that deletes + // cspA. + res.OnCommitted() + + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspB"}}, + }, + }, + // Top level csp config here - the value of cspB should get directly + // placed as a child policy of xds cluster manager. + ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspB": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anythingB"}}}}, + }, nil) + gotState, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for UpdateState to be called: %v", err) + } + rState = gotState.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + } + wantJSON3 := `{"loadBalancingConfig":[{ + "xds_cluster_manager_experimental":{ + "children":{ + "cluster_specifier_plugin:cspB":{ + "childPolicy":[{"csp_experimental":{"arbitrary_field":"anythingB"}}] + } + } + }}]}` + + wantSCParsed3 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON3) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed3.Config) { + t.Errorf("ClientConn.UpdateState received different service config") + t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed3.Config)) + } +} diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index 772873092107..fd75af210457 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -44,8 +44,10 @@ import ( ) const ( - cdsName = "cds_experimental" - xdsClusterManagerName = "xds_cluster_manager_experimental" + cdsName = "cds_experimental" + xdsClusterManagerName = "xds_cluster_manager_experimental" + clusterPrefix = "cluster:" + clusterSpecifierPluginPrefix = "cluster_specifier_plugin:" ) type serviceConfig struct { @@ -86,10 +88,8 @@ func (r *xdsResolver) pruneActiveClusters() { func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) { // Generate children (all entries in activeClusters). children := make(map[string]xdsChildConfig) - for cluster := range activeClusters { - children[cluster] = xdsChildConfig{ - ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}), - } + for cluster, ci := range activeClusters { + children[cluster] = ci.cfg } sc := serviceConfig{ @@ -158,10 +158,12 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP if rt == nil || rt.clusters == nil { return nil, errNoMatchedRouteFound } + cluster, ok := rt.clusters.Next().(*routeCluster) if !ok { return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster) } + // Add a ref to the selected cluster, as this RPC needs this cluster until // it is committed. ref := &cs.clusters[cluster.name].refCount @@ -353,21 +355,25 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro for i, rt := range su.virtualHost.Routes { clusters := newWRR() - for cluster, wc := range rt.WeightedClusters { + if rt.ClusterSpecifierPlugin != "" { + clusterName := clusterSpecifierPluginPrefix + rt.ClusterSpecifierPlugin clusters.Add(&routeCluster{ - name: cluster, - httpFilterConfigOverride: wc.HTTPFilterConfigOverride, - }, int64(wc.Weight)) - - // Initialize entries in cs.clusters map, creating entries in - // r.activeClusters as necessary. Set to zero as they will be - // incremented by incRefs. - ci := r.activeClusters[cluster] - if ci == nil { - ci = &clusterInfo{refCount: 0} - r.activeClusters[cluster] = ci + name: clusterName, + }, 1) + cs.initializeCluster(clusterName, xdsChildConfig{ + ChildPolicy: balancerConfig(su.clusterSpecifierPlugins[rt.ClusterSpecifierPlugin]), + }) + } else { + for cluster, wc := range rt.WeightedClusters { + clusterName := clusterPrefix + cluster + clusters.Add(&routeCluster{ + name: clusterName, + httpFilterConfigOverride: wc.HTTPFilterConfigOverride, + }, int64(wc.Weight)) + cs.initializeCluster(clusterName, xdsChildConfig{ + ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}), + }) } - cs.clusters[cluster] = ci } cs.routes[i].clusters = clusters @@ -397,9 +403,25 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro return cs, nil } +// initializeCluster initializes entries in cs.clusters map, creating entries in +// r.activeClusters as necessary. Any created entries will have a ref count set +// to zero as their ref count will be incremented by incRefs. +func (cs *configSelector) initializeCluster(clusterName string, cfg xdsChildConfig) { + ci := cs.r.activeClusters[clusterName] + if ci == nil { + ci = &clusterInfo{refCount: 0} + cs.r.activeClusters[clusterName] = ci + } + cs.clusters[clusterName] = ci + cs.clusters[clusterName].cfg = cfg +} + type clusterInfo struct { // number of references to this cluster; accessed atomically refCount int32 + // cfg is the child configuration for this cluster, containing either the + // csp config or the cds cluster config. + cfg xdsChildConfig } type interceptorList struct { diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 30f65727d08a..3db9be1cac07 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -35,6 +36,9 @@ import ( type serviceUpdate struct { // virtualHost contains routes and other configuration to route RPCs. virtualHost *xdsresource.VirtualHost + // clusterSpecifierPlugins contains the configurations for any cluster + // specifier plugins emitted by the xdsclient. + clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig // ldsConfig contains configuration that applies to all routes. ldsConfig ldsConfig } @@ -120,7 +124,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, } // Handle the inline RDS update as if it's from an RDS watch. - w.updateVirtualHostsFromRDS(*update.InlineRouteConfig) + w.applyRouteConfigUpdate(*update.InlineRouteConfig) return } @@ -151,7 +155,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, w.rdsCancel = w.c.WatchRouteConfig(update.RouteConfigName, w.handleRDSResp) } -func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsresource.RouteConfigUpdate) { +func (w *serviceUpdateWatcher) applyRouteConfigUpdate(update xdsresource.RouteConfigUpdate) { matchVh := xdsresource.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) if matchVh == nil { // No matching virtual host found. @@ -160,6 +164,7 @@ func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsresource.Rout } w.lastUpdate.virtualHost = matchVh + w.lastUpdate.clusterSpecifierPlugins = update.ClusterSpecifierPlugins w.serviceCb(w.lastUpdate, nil) } @@ -179,7 +184,7 @@ func (w *serviceUpdateWatcher) handleRDSResp(update xdsresource.RouteConfigUpdat w.serviceCb(serviceUpdate{}, err) return } - w.updateVirtualHostsFromRDS(update) + w.applyRouteConfigUpdate(update) } func (w *serviceUpdateWatcher) close() { diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index 2192051ae2f6..6788090e29c0 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -205,7 +205,6 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { return true } - // Produce the service config. sc, err := serviceConfigJSON(r.activeClusters) if err != nil { // JSON marshal error; should never happen. diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index df4f47803713..c5fa3b8f7493 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -473,12 +473,12 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantJSON: `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ - "test-cluster-1":{ + "cluster:test-cluster-1":{ "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] } } }}]}`, - wantClusters: map[string]bool{"test-cluster-1": true}, + wantClusters: map[string]bool{"cluster:test-cluster-1": true}, }, { routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ @@ -491,18 +491,18 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantJSON: `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ - "test-cluster-1":{ + "cluster:test-cluster-1":{ "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] }, - "cluster_1":{ + "cluster:cluster_1":{ "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] }, - "cluster_2":{ + "cluster:cluster_2":{ "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] } } }}]}`, - wantClusters: map[string]bool{"cluster_1": true, "cluster_2": true}, + wantClusters: map[string]bool{"cluster:cluster_1": true, "cluster:cluster_2": true}, }, { routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ @@ -515,15 +515,15 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { wantJSON: `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ - "cluster_1":{ + "cluster:cluster_1":{ "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] }, - "cluster_2":{ + "cluster:cluster_2":{ "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] } } }}]}`, - wantClusters: map[string]bool{"cluster_1": true, "cluster_2": true}, + wantClusters: map[string]bool{"cluster:cluster_1": true, "cluster:cluster_2": true}, }, } { // Invoke the watchAPI callback with a good service update and wait for the @@ -725,7 +725,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { wantJSON := `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ - "test-cluster-1":{ + "cluster:test-cluster-1":{ "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] } } @@ -857,7 +857,7 @@ func (s) TestXDSResolverWRR(t *testing.T) { picks[clustermanager.GetPickedClusterForTesting(res.Context)]++ res.OnCommitted() } - want := map[string]int{"A": 10, "B": 20} + want := map[string]int{"cluster:A": 10, "cluster:B": 20} if !reflect.DeepEqual(picks, want) { t.Errorf("picked clusters = %v; want %v", picks, want) } @@ -987,7 +987,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { wantJSON := `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ - "test-cluster-1":{ + "cluster:test-cluster-1":{ "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] } } @@ -1009,7 +1009,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) } cluster := clustermanager.GetPickedClusterForTesting(res.Context) - if cluster != "test-cluster-1" { + if cluster != "cluster:test-cluster-1" { t.Fatalf("") } // delay res.OnCommitted() @@ -1046,10 +1046,10 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { wantJSON2 := `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ - "test-cluster-1":{ + "cluster:test-cluster-1":{ "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] }, - "NEW":{ + "cluster:NEW":{ "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] } } @@ -1084,7 +1084,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { wantJSON3 := `{"loadBalancingConfig":[{ "xds_cluster_manager_experimental":{ "children":{ - "NEW":{ + "cluster:NEW":{ "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] } } From ac4edd2a03b9124d2ceda2a7c205396b31200351 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 6 Dec 2021 12:56:02 -0800 Subject: [PATCH 355/998] Change version to 1.44.0-dev (#5041) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index e0d75bb7d94f..9e0723bf73a3 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.43.0-dev" +const Version = "1.44.0-dev" From f5dc086d1367d58e576f7dd44da508846a7792a5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 7 Dec 2021 10:04:31 -0800 Subject: [PATCH 356/998] internal/balancergroup: eliminate race in exitIdle (#5012) incomingMu needs to be taken before accessing scToSubBalancer map as part of exitIdle --- internal/balancergroup/balancergroup.go | 33 +++++++++++++++++-------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/internal/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go index 9776158dd986..3142503a0ad5 100644 --- a/internal/balancergroup/balancergroup.go +++ b/internal/balancergroup/balancergroup.go @@ -101,20 +101,18 @@ func (sbc *subBalancerWrapper) startBalancer() { } } -func (sbc *subBalancerWrapper) exitIdle() { +// exitIdle invokes the sub-balancer's ExitIdle method. Returns a boolean +// indicating whether or not the operation was completed. +func (sbc *subBalancerWrapper) exitIdle() (complete bool) { b := sbc.balancer if b == nil { - return + return true } if ei, ok := b.(balancer.ExitIdler); ok { ei.ExitIdle() - return - } - for sc, b := range sbc.group.scToSubBalancer { - if b == sbc { - sc.Connect() - } + return true } + return false } func (sbc *subBalancerWrapper) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { @@ -383,6 +381,17 @@ func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWrapper) { bg.incomingMu.Unlock() } +// connect attempts to connect to all subConns belonging to sb. +func (bg *BalancerGroup) connect(sb *subBalancerWrapper) { + bg.incomingMu.Lock() + for sc, b := range bg.scToSubBalancer { + if b == sb { + sc.Connect() + } + } + bg.incomingMu.Unlock() +} + // Following are actions from the parent grpc.ClientConn, forward to sub-balancers. // UpdateSubConnState handles the state for the subconn. It finds the @@ -502,7 +511,9 @@ func (bg *BalancerGroup) Close() { func (bg *BalancerGroup) ExitIdle() { bg.outgoingMu.Lock() for _, config := range bg.idToBalancerConfig { - config.exitIdle() + if !config.exitIdle() { + bg.connect(config) + } } bg.outgoingMu.Unlock() } @@ -512,7 +523,9 @@ func (bg *BalancerGroup) ExitIdle() { func (bg *BalancerGroup) ExitIdleOne(id string) { bg.outgoingMu.Lock() if config := bg.idToBalancerConfig[id]; config != nil { - config.exitIdle() + if !config.exitIdle() { + bg.connect(config) + } } bg.outgoingMu.Unlock() } From a722e6aabac2f21084a8bfb03949c03b8a43e591 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Tue, 7 Dec 2021 10:51:24 -0800 Subject: [PATCH 357/998] xds/c2p: replace C2P resolver env var with experimental scheme suffix (#5044) --- internal/envconfig/xds.go | 5 ----- xds/googledirectpath/googlec2p.go | 6 ++---- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 13bbeb197229..93522d716d1e 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -43,7 +43,6 @@ const ( rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" - c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) @@ -86,10 +85,6 @@ var ( // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") - // C2PResolver indicates whether support for C2P resolver is enabled. - // This can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". - C2PResolver = strings.EqualFold(os.Getenv(c2pResolverSupportEnv), "true") // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 7f3769df8bb8..d759d25c8519 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -47,7 +47,7 @@ import ( ) const ( - c2pScheme = "google-c2p" + c2pScheme = "google-c2p-experimental" tdURL = "dns:///directpath-pa.googleapis.com" httpReqTimeout = 10 * time.Second @@ -75,9 +75,7 @@ var ( ) func init() { - if envconfig.C2PResolver { - resolver.Register(c2pResolverBuilder{}) - } + resolver.Register(c2pResolverBuilder{}) } type c2pResolverBuilder struct{} From ccc060cb4396856703b8d31a426fccaa42ad320d Mon Sep 17 00:00:00 2001 From: Evan Jones Date: Tue, 7 Dec 2021 16:58:17 -0500 Subject: [PATCH 358/998] grpclog.DepthLoggerV2: Correct comment: formats like fmt.Println (#5038) --- grpclog/loggerv2.go | 8 ++++---- internal/grpclog/grpclog.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/grpclog/loggerv2.go b/grpclog/loggerv2.go index 34098bb8eb59..7c1f66409034 100644 --- a/grpclog/loggerv2.go +++ b/grpclog/loggerv2.go @@ -248,12 +248,12 @@ func (g *loggerT) V(l int) bool { // later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/internal/grpclog/grpclog.go b/internal/grpclog/grpclog.go index e6f975cbf6a8..30a3b4258fc0 100644 --- a/internal/grpclog/grpclog.go +++ b/internal/grpclog/grpclog.go @@ -115,12 +115,12 @@ type LoggerV2 interface { // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } From 1ec7a893331ed45377b882d6d9f9b9f221eae3cd Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 8 Dec 2021 10:04:20 -0800 Subject: [PATCH 359/998] xds/client: send NewStream errors to the watchers (#5032) --- xds/internal/xdsclient/controller/transport.go | 1 + 1 file changed, 1 insertion(+) diff --git a/xds/internal/xdsclient/controller/transport.go b/xds/internal/xdsclient/controller/transport.go index b7746ed883c3..0b982b0d7057 100644 --- a/xds/internal/xdsclient/controller/transport.go +++ b/xds/internal/xdsclient/controller/transport.go @@ -81,6 +81,7 @@ func (t *Controller) run(ctx context.Context) { retries++ stream, err := t.vClient.NewStream(ctx, t.cc) if err != nil { + t.updateHandler.NewConnectionError(err) t.logger.Warningf("xds: ADS stream creation failed: %v", err) continue } From 40916aa021698425b1685741a48315a4c675bc92 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 8 Dec 2021 10:05:59 -0800 Subject: [PATCH 360/998] transport: better error message when per-RPC creds fail (#5033) --- internal/transport/http2_client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 3a77a782979d..f0c72d337105 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -588,7 +588,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s return nil, err } - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. From 62f73ecd84b14e5092e0ec0ea956a3be35b9faac Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 8 Dec 2021 10:53:49 -0800 Subject: [PATCH 361/998] xds/client: move xdsclient.New tests to controller.New (#5037) --- .../xdsclient/controller/controller_test.go | 102 ++++++++++++++++++ xds/internal/xdsclient/xdsclient_test.go | 87 --------------- 2 files changed, 102 insertions(+), 87 deletions(-) create mode 100644 xds/internal/xdsclient/controller/controller_test.go diff --git a/xds/internal/xdsclient/controller/controller_test.go b/xds/internal/xdsclient/controller/controller_test.go new file mode 100644 index 000000000000..b2d9c225d8e0 --- /dev/null +++ b/xds/internal/xdsclient/controller/controller_test.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package controller + +import ( + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" +) + +const testXDSServer = "xds-server" + +// TestNew covers that New() returns an error if the input *ServerConfig +// contains invalid content. +func (s) TestNew(t *testing.T) { + tests := []struct { + name string + config *bootstrap.ServerConfig + wantErr bool + }{ + { + name: "empty-opts", + config: &bootstrap.ServerConfig{}, + wantErr: true, + }, + { + name: "empty-balancer-name", + config: &bootstrap.ServerConfig{ + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: testutils.EmptyNodeProtoV2, + }, + wantErr: true, + }, + { + name: "empty-dial-creds", + config: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + NodeProto: testutils.EmptyNodeProtoV2, + }, + wantErr: true, + }, + { + name: "empty-node-proto", + config: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + }, + wantErr: true, + }, + { + name: "node-proto-version-mismatch", + config: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV2, + NodeProto: testutils.EmptyNodeProtoV3, + }, + wantErr: true, + }, + { + name: "happy-case", + config: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithInsecure(), + NodeProto: testutils.EmptyNodeProtoV2, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + c, err := New(test.config, nil, nil, nil) // Only testing the config, other inputs are left as nil. + defer func() { + if c != nil { + c.Close() + } + }() + if (err != nil) != test.wantErr { + t.Fatalf("New(%+v) = %v, wantErr: %v", test.config, err, test.wantErr) + } + }) + } +} diff --git a/xds/internal/xdsclient/xdsclient_test.go b/xds/internal/xdsclient/xdsclient_test.go index 92423a59f296..68ecd88acc4c 100644 --- a/xds/internal/xdsclient/xdsclient_test.go +++ b/xds/internal/xdsclient/xdsclient_test.go @@ -20,16 +20,8 @@ package xdsclient_test import ( "testing" - "time" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 API client. ) @@ -42,82 +34,3 @@ func Test(t *testing.T) { } const testXDSServer = "xds-server" - -func (s) TestNew(t *testing.T) { - tests := []struct { - name string - config *bootstrap.Config - wantErr bool - }{ - { - name: "empty-opts", - config: &bootstrap.Config{}, - wantErr: true, - }, - { - name: "empty-balancer-name", - config: &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: testutils.EmptyNodeProtoV2, - }, - }, - wantErr: true, - }, - { - name: "empty-dial-creds", - config: &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - NodeProto: testutils.EmptyNodeProtoV2, - }, - }, - wantErr: true, - }, - { - name: "empty-node-proto", - config: &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, - }, - wantErr: true, - }, - { - name: "node-proto-version-mismatch", - config: &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV2, - NodeProto: testutils.EmptyNodeProtoV3, - }, - }, - wantErr: true, - }, - // TODO(easwars): Add cases for v3 API client. - { - name: "happy-case", - config: &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithInsecure(), - NodeProto: testutils.EmptyNodeProtoV2, - }, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - c, err := xdsclient.NewWithConfigForTesting(test.config, 15*time.Second) - if (err != nil) != test.wantErr { - t.Fatalf("New(%+v) = %v, wantErr: %v", test.config, err, test.wantErr) - } - if c != nil { - c.Close() - } - }) - } -} From d35aff3c836a77e714eb1c86c8aab963560f043c Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 8 Dec 2021 12:07:10 -0800 Subject: [PATCH 362/998] xds/federation: resource name parsing (#4991) --- xds/internal/xdsclient/xdsresource/name.go | 130 ++++++++++++++++++ .../xdsclient/xdsresource/name_test.go | 104 ++++++++++++++ 2 files changed, 234 insertions(+) create mode 100644 xds/internal/xdsclient/xdsresource/name.go create mode 100644 xds/internal/xdsclient/xdsresource/name_test.go diff --git a/xds/internal/xdsclient/xdsresource/name.go b/xds/internal/xdsclient/xdsresource/name.go new file mode 100644 index 000000000000..076e3d617a44 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/name.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "net/url" + "sort" + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// Name contains the parsed component of an xDS resource name. +// +// An xDS resource name is in the format of +// xdstp://[{authority}]/{resource type}/{id/*}?{context parameters}{#processing directive,*} +// +// See +// https://github.com/cncf/xds/blob/main/proposals/TP1-xds-transport-next.md#uri-based-xds-resource-names +// for details, and examples. +type Name struct { + Scheme string + Authority string + Type string + ID string + + ContextParams map[string]string + + processingDirective string +} + +// ParseName splits the name and returns a struct representation of the Name. +// +// If the name isn't a valid new-style xDS name, field ID is set to the input. +// Note that this is not an error, because we still support the old-style +// resource names (those not starting with "xdstp:"). +// +// The caller can tell if the parsing is successful by checking the returned +// Scheme. +func ParseName(name string) *Name { + if !envconfig.XDSFederation { + // Return "" scheme to use the default authority for the server. + return &Name{ID: name} + } + if !strings.Contains(name, "://") { + // Only the long form URL, with ://, is valid. + return &Name{ID: name} + } + parsed, err := url.Parse(name) + if err != nil { + return &Name{ID: name} + } + + ret := &Name{ + Scheme: parsed.Scheme, + Authority: parsed.Host, + } + split := strings.SplitN(parsed.Path, "/", 3) + if len(split) < 3 { + // Path is in the format of "/type/id". There must be at least 3 + // segments after splitting. + return &Name{ID: name} + } + ret.Type = split[1] + ret.ID = split[2] + if len(parsed.Query()) != 0 { + ret.ContextParams = make(map[string]string) + for k, vs := range parsed.Query() { + if len(vs) > 0 { + // We only keep one value of each key. Behavior for multiple values + // is undefined. + ret.ContextParams[k] = vs[0] + } + } + } + // TODO: processing directive (the part comes after "#" in the URL, stored + // in parsed.RawFragment) is kept but not processed. Add support for that + // when it's needed. + ret.processingDirective = parsed.RawFragment + return ret +} + +// String returns a canonicalized string of name. The context parameters are +// sorted by the keys. +func (n *Name) String() string { + if n.Scheme == "" { + return n.ID + } + + // Sort and build query. + keys := make([]string, 0, len(n.ContextParams)) + for k := range n.ContextParams { + keys = append(keys, k) + } + sort.Strings(keys) + var pairs []string + for _, k := range keys { + pairs = append(pairs, strings.Join([]string{k, n.ContextParams[k]}, "=")) + } + rawQuery := strings.Join(pairs, "&") + + path := n.Type + if n.ID != "" { + path = path + "/" + n.ID + } + + tempURL := &url.URL{ + Scheme: n.Scheme, + Host: n.Authority, + Path: path, + RawQuery: rawQuery, + RawFragment: n.processingDirective, + } + return tempURL.String() +} diff --git a/xds/internal/xdsclient/xdsresource/name_test.go b/xds/internal/xdsclient/xdsresource/name_test.go new file mode 100644 index 000000000000..8ef9d894840c --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/name_test.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "reflect" + "testing" + + "google.golang.org/grpc/internal/envconfig" +) + +func TestParseName(t *testing.T) { + tests := []struct { + name string + env bool // Whether federation env is set to true. + in string + want *Name + }{ + { + name: "env off", + env: false, + in: "xdstp://auth/type/id", + want: &Name{ID: "xdstp://auth/type/id"}, + }, + { + name: "old style name", + env: true, + in: "test-resource", + want: &Name{ID: "test-resource"}, + }, + { + name: "invalid not url", + env: true, + in: "a:/b/c", + want: &Name{ID: "a:/b/c"}, + }, + { + name: "invalid no resource type", + env: true, + in: "xdstp://auth/id", + want: &Name{ID: "xdstp://auth/id"}, + }, + { + name: "valid no ctx params", + env: true, + in: "xdstp://auth/type/id", + want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id"}, + }, + { + name: "valid with ctx params", + env: true, + in: "xdstp://auth/type/id?a=1&b=2", + want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id", ContextParams: map[string]string{"a": "1", "b": "2"}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.env { + defer func() func() { + oldEnv := envconfig.XDSFederation + envconfig.XDSFederation = true + return func() { envconfig.XDSFederation = oldEnv } + }()() + } + if got := ParseName(tt.in); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ParseName() = %#v, want %#v", got, tt.want) + } + }) + } +} + +// TestNameStringCtxParamsOrder covers the case that if two names differ only in +// context parameter __order__, the parsed name.String() has the same value. +func TestNameStringCtxParamsOrder(t *testing.T) { + oldEnv := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldEnv }() + + const ( + a = "xdstp://auth/type/id?a=1&b=2" + b = "xdstp://auth/type/id?b=2&a=1" + ) + aParsed := ParseName(a).String() + bParsed := ParseName(b).String() + + if aParsed != bParsed { + t.Fatalf("aParsed.String() = %q, bParsed.String() = %q, want them to be the same", aParsed, bParsed) + } +} From bd7076973b45b81e37a45eb761efb789e2001618 Mon Sep 17 00:00:00 2001 From: Huang Chong Date: Thu, 9 Dec 2021 05:18:56 +0800 Subject: [PATCH 363/998] test: cleanup roundrobin_test (#5005) --- balancer/roundrobin/roundrobin_test.go | 58 ++++++++++++++++++-------- 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/balancer/roundrobin/roundrobin_test.go b/balancer/roundrobin/roundrobin_test.go index eb25055ff78c..574625642bef 100644 --- a/balancer/roundrobin/roundrobin_test.go +++ b/balancer/roundrobin/roundrobin_test.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/metadata" @@ -136,8 +137,10 @@ func (s) TestOneBackend(t *testing.T) { t.Fatalf("failed to start servers: %v", err) } defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -146,7 +149,7 @@ func (s) TestOneBackend(t *testing.T) { // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } @@ -167,7 +170,10 @@ func (s) TestBackendsRoundRobin(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -176,7 +182,7 @@ func (s) TestBackendsRoundRobin(t *testing.T) { // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } @@ -224,7 +230,10 @@ func (s) TestAddressesRemoved(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -233,7 +242,7 @@ func (s) TestAddressesRemoved(t *testing.T) { // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } @@ -255,7 +264,7 @@ func (s) TestAddressesRemoved(t *testing.T) { } const msgWant = "produced zero addresses" - if _, err := testc.EmptyCall(ctx2, &testpb.Empty{}); err == nil || !strings.Contains(status.Convert(err).Message(), msgWant) { + if _, err := testc.EmptyCall(ctx2, &testpb.Empty{}); !strings.Contains(status.Convert(err).Message(), msgWant) { t.Fatalf("EmptyCall() = _, %v, want _, Contains(Message(), %q)", err, msgWant) } } @@ -269,7 +278,10 @@ func (s) TestCloseWithPendingRPC(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -301,7 +313,10 @@ func (s) TestNewAddressWhileBlocking(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -310,7 +325,7 @@ func (s) TestNewAddressWhileBlocking(t *testing.T) { // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } @@ -348,7 +363,10 @@ func (s) TestOneServerDown(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -357,7 +375,7 @@ func (s) TestOneServerDown(t *testing.T) { // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } @@ -444,7 +462,10 @@ func (s) TestAllServersDown(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -453,7 +474,7 @@ func (s) TestAllServersDown(t *testing.T) { // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } @@ -514,7 +535,10 @@ func (s) TestUpdateAddressAttributes(t *testing.T) { } defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -527,7 +551,7 @@ func (s) TestUpdateAddressAttributes(t *testing.T) { // The first RPC should fail because there's no address. ctxShort, cancel2 := context.WithTimeout(ctx, time.Millisecond) defer cancel2() - if _, err := testc.EmptyCall(ctxShort, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { + if _, err := testc.EmptyCall(ctxShort, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } From fd4e3bdc3ac7e4b553e3859424d4666af35919a6 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 9 Dec 2021 03:28:58 -0500 Subject: [PATCH 364/998] xds: Added env var for RLS in xDS (#5050) * xds: Added env var for RLS in xDS --- internal/envconfig/xds.go | 7 ++++ .../xdsclient/xdsresource/unmarshal_rds.go | 13 +++++-- .../xdsresource/unmarshal_rds_test.go | 38 +++++++++++++++++-- 3 files changed, 52 insertions(+), 6 deletions(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 93522d716d1e..9bad03cec64f 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -42,6 +42,7 @@ const ( aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) @@ -85,6 +86,12 @@ var ( // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index a6fbf08d4502..f43b18292f0c 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -83,9 +83,13 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s // we are looking for. func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) - csps, err := processClusterSpecifierPlugins(rc.ClusterSpecifierPlugins) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("received route is invalid %v", err) + csps := make(map[string]clusterspecifier.BalancerConfig) + if envconfig.XDSRLS { + var err error + csps, err = processClusterSpecifierPlugins(rc.ClusterSpecifierPlugins) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid %v", err) + } } // cspNames represents all the cluster specifiers referenced by Route // Actions - any cluster specifiers not referenced by a Route Action can be @@ -348,6 +352,9 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif case *v3routepb.RouteAction_ClusterHeader: continue case *v3routepb.RouteAction_ClusterSpecifierPlugin: + if !envconfig.XDSRLS { + return nil, nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) + } if _, ok := csps[a.ClusterSpecifierPlugin]; !ok { // "When processing RouteActions, if any action includes a // cluster_specifier_plugin value that is not in diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index 72e5c4149024..a14d321b8eee 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -110,6 +110,12 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { }}, } } + goodUpdate = RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: nil, + }}, + } goodUpdateWithClusterSpecifierPluginA = RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ Domains: []string{ldsTarget}, @@ -166,11 +172,17 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { defaultRetryBackoff = RetryBackoff{BaseInterval: 25 * time.Millisecond, MaxInterval: 250 * time.Millisecond} ) + oldRLS := envconfig.XDSRLS + defer func() { + envconfig.XDSRLS = oldRLS + }() + tests := []struct { name string rc *v3routepb.RouteConfiguration wantUpdate RouteConfigUpdate wantError bool + rlsEnabled bool }{ { name: "default-route-match-field-is-nil", @@ -640,21 +652,24 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist), }, []string{"cspA"}), - wantError: true, + wantError: true, + rlsEnabled: true, }, { name: "error-in-cluster-specifier-plugin-conversion-method", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ clusterSpecifierPlugin("cspA", errorClusterSpecifierConfig), }, []string{"cspA"}), - wantError: true, + wantError: true, + rlsEnabled: true, }, { name: "route-action-that-references-undeclared-cluster-specifier-plugin", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), }, []string{"cspA", "cspB"}), - wantError: true, + wantError: true, + rlsEnabled: true, }, { name: "emitted-cluster-specifier-plugins", @@ -662,6 +677,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), }, []string{"cspA"}), wantUpdate: goodUpdateWithClusterSpecifierPluginA, + rlsEnabled: true, }, { name: "deleted-cluster-specifier-plugins-not-referenced", @@ -670,10 +686,26 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { clusterSpecifierPlugin("cspB", mockClusterSpecifierConfig), }, []string{"cspA"}), wantUpdate: goodUpdateWithClusterSpecifierPluginA, + rlsEnabled: true, + }, + { + name: "ignore-error-in-cluster-specifier-plugin", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist), + }, []string{}), + wantUpdate: goodUpdate, + }, + { + name: "cluster-specifier-plugin-referenced-env-var-off", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), + }, []string{"cspA"}), + wantError: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + envconfig.XDSRLS = test.rlsEnabled gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc, nil, false) if (gotError != nil) != test.wantError || !cmp.Equal(gotUpdate, test.wantUpdate, cmpopts.EquateEmpty(), From 5d90b32d9dec9e82dd2f5181d3748c57e68e97ac Mon Sep 17 00:00:00 2001 From: Ashitha Santhosh <55257063+ashithasantosh@users.noreply.github.com> Date: Thu, 9 Dec 2021 15:37:33 -0800 Subject: [PATCH 365/998] authz: fix regex expression match (#5035) * Fixes regex expression matching. * Adds tests * Updates FulMatchWithRegex and regex string for presence match. * Add tests for FullMatchWithRegex * Update regex to allow whitespace characters --- authz/rbac_translator.go | 8 ++++--- authz/rbac_translator_test.go | 2 +- authz/sdk_end2end_test.go | 40 ++++++++++++++++++++++++++++----- internal/grpcutil/regex.go | 11 +++++---- internal/grpcutil/regex_test.go | 12 ++++++++++ 5 files changed, 59 insertions(+), 14 deletions(-) diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index ba49b0c5250f..010fc89a6e22 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -94,7 +94,8 @@ func getStringMatcher(value string) *v3matcherpb.StringMatcher { switch { case value == "*": return &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{}, + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{ + SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, } case strings.HasSuffix(value, "*"): prefix := strings.TrimSuffix(value, "*") @@ -117,8 +118,9 @@ func getHeaderMatcher(key, value string) *v3routepb.HeaderMatcher { switch { case value == "*": return &v3routepb.HeaderMatcher{ - Name: key, - HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{}, + Name: key, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{ + SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: ".+"}}, } case strings.HasSuffix(value, "*"): prefix := strings.TrimSuffix(value, "*") diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go index 9b88362ea90b..e22ab62ce26b 100644 --- a/authz/rbac_translator_test.go +++ b/authz/rbac_translator_test.go @@ -127,7 +127,7 @@ func TestTranslatePolicy(t *testing.T) { Ids: []*v3rbacpb.Principal{ {Identifier: &v3rbacpb.Principal_Authenticated_{ Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{}, + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, }}, }}, }, diff --git a/authz/sdk_end2end_test.go b/authz/sdk_end2end_test.go index 79fa379bceac..839faaa76081 100644 --- a/authz/sdk_end2end_test.go +++ b/authz/sdk_end2end_test.go @@ -95,8 +95,7 @@ var sdkTests = map[string]struct { "request": { "paths": [ - "/grpc.testing.TestService/UnaryCall", - "/grpc.testing.TestService/StreamingInputCall" + "/grpc.testing.TestService/*" ], "headers": [ @@ -122,11 +121,11 @@ var sdkTests = map[string]struct { "allow_rules": [ { - "name": "allow_TestServiceCalls", + "name": "allow_all", "request": { "paths": [ - "/grpc.testing.TestService/*" + "*" ] } } @@ -134,11 +133,11 @@ var sdkTests = map[string]struct { "deny_rules": [ { - "name": "deny_TestServiceCalls", + "name": "deny_all", "request": { "paths": [ - "/grpc.testing.TestService/*" + "*" ] } } @@ -300,6 +299,35 @@ var sdkTests = map[string]struct { }`, wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), }, + "DeniesRPCRequestNoMatchInAllowFailsPresenceMatch": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_TestServiceCalls", + "request": { + "paths": + [ + "/grpc.testing.TestService/*" + ], + "headers": + [ + { + "key": "key-abc", + "values": + [ + "*" + ] + } + ] + } + } + ] + }`, + md: metadata.Pairs("key-abc", ""), + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, } func (s) TestSDKStaticPolicyEnd2End(t *testing.T) { diff --git a/internal/grpcutil/regex.go b/internal/grpcutil/regex.go index 2810a8ba2fdf..7a092b2b8041 100644 --- a/internal/grpcutil/regex.go +++ b/internal/grpcutil/regex.go @@ -20,9 +20,12 @@ package grpcutil import "regexp" -// FullMatchWithRegex returns whether the full string matches the regex provided. -func FullMatchWithRegex(re *regexp.Regexp, string string) bool { +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } re.Longest() - rem := re.FindString(string) - return len(rem) == len(string) + rem := re.FindString(text) + return len(rem) == len(text) } diff --git a/internal/grpcutil/regex_test.go b/internal/grpcutil/regex_test.go index 1b2299858daa..4c12804fed5f 100644 --- a/internal/grpcutil/regex_test.go +++ b/internal/grpcutil/regex_test.go @@ -48,6 +48,18 @@ func TestFullMatchWithRegex(t *testing.T) { string: "ab", want: true, }, + { + name: "match all", + regexStr: ".*", + string: "", + want: true, + }, + { + name: "matches non-empty strings", + regexStr: ".+", + string: "", + want: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From c18e2a2883cb46be5417a44dc8885734d26c1ebc Mon Sep 17 00:00:00 2001 From: Shang Jian Ding Date: Mon, 13 Dec 2021 15:56:04 -0600 Subject: [PATCH 366/998] cmd/protoc-gen-go-grpc: update google.golang.org/protobuf to v1.27.1 (#5053) --- cmd/protoc-gen-go-grpc/go.mod | 2 +- cmd/protoc-gen-go-grpc/go.sum | 22 ++++++---------------- regenerate.sh | 18 +++++++++++++++--- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/cmd/protoc-gen-go-grpc/go.mod b/cmd/protoc-gen-go-grpc/go.mod index d0cfd8ebf56f..e0f9440a65f7 100644 --- a/cmd/protoc-gen-go-grpc/go.mod +++ b/cmd/protoc-gen-go-grpc/go.mod @@ -2,4 +2,4 @@ module google.golang.org/grpc/cmd/protoc-gen-go-grpc go 1.9 -require google.golang.org/protobuf v1.23.0 +require google.golang.org/protobuf v1.27.1 diff --git a/cmd/protoc-gen-go-grpc/go.sum b/cmd/protoc-gen-go-grpc/go.sum index 92baf2631b73..03b1917b5a42 100644 --- a/cmd/protoc-gen-go-grpc/go.sum +++ b/cmd/protoc-gen-go-grpc/go.sum @@ -1,18 +1,8 @@ -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= diff --git a/regenerate.sh b/regenerate.sh index a0a71aae9681..58c802f8aec7 100755 --- a/regenerate.sh +++ b/regenerate.sh @@ -76,7 +76,21 @@ SOURCES=( # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ +Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -85,7 +99,6 @@ for src in ${SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -96,7 +109,6 @@ for src in ${LEGACY_SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done From a32d2778f37bfc7260b4a9130f9ae0767a70d3d3 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 13 Dec 2021 13:56:30 -0800 Subject: [PATCH 367/998] xds/client: send connection errors to all watchers (#5054) --- xds/internal/xdsclient/pubsub/update.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/xds/internal/xdsclient/pubsub/update.go b/xds/internal/xdsclient/pubsub/update.go index ab8c94ccf3f7..371405b67972 100644 --- a/xds/internal/xdsclient/pubsub/update.go +++ b/xds/internal/xdsclient/pubsub/update.go @@ -295,6 +295,21 @@ func (pb *Pubsub) NewConnectionError(err error) { pb.mu.Lock() defer pb.mu.Unlock() + for _, s := range pb.ldsWatchers { + for wi := range s { + wi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) + } + } + for _, s := range pb.rdsWatchers { + for wi := range s { + wi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) + } + } + for _, s := range pb.cdsWatchers { + for wi := range s { + wi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) + } + } for _, s := range pb.edsWatchers { for wi := range s { wi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) From 7c8a9321b9def6609378ab7c51f58bc2d83cb224 Mon Sep 17 00:00:00 2001 From: Shihao Xia Date: Tue, 14 Dec 2021 18:44:54 -0500 Subject: [PATCH 368/998] testing: fix goroutine leak in TestClientUpdatesParamsAfterGoAway (#5024) --- clientconn_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/clientconn_test.go b/clientconn_test.go index ef6a68081ff7..7d6a40adb831 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -658,7 +659,8 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { t.Fatalf("Failed to listen. Err: %v", err) } defer lis.Close() - connected := make(chan struct{}) + connected := grpcsync.NewEvent() + defer connected.Fire() go func() { conn, err := lis.Accept() if err != nil { @@ -680,7 +682,7 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { t.Errorf("error writing settings: %v", err) return } - <-connected + <-connected.Done() if err := f.WriteGoAway(0, http2.ErrCodeEnhanceYourCalm, []byte("too_many_pings")); err != nil { t.Errorf("error writing GOAWAY: %v", err) return @@ -698,7 +700,7 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } defer cc.Close() - close(connected) + connected.Fire() for { time.Sleep(10 * time.Millisecond) cc.mu.RLock() From 50f82701b5b5be3e55d0bfa59a1fa6d123d78e20 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 15 Dec 2021 09:37:05 -0800 Subject: [PATCH 369/998] rls: control channel implementation (#5046) --- balancer/rls/internal/balancer.go | 157 +---- balancer/rls/internal/balancer_test.go | 238 ------- balancer/rls/internal/builder.go | 14 +- balancer/rls/internal/client.go | 80 --- balancer/rls/internal/client_test.go | 178 ----- balancer/rls/internal/config_test.go | 4 +- balancer/rls/internal/control_channel.go | 206 ++++++ balancer/rls/internal/control_channel_test.go | 469 +++++++++++++ balancer/rls/internal/helpers_test.go | 327 ++++++++++ balancer/rls/internal/picker.go | 147 ----- balancer/rls/internal/picker_test.go | 615 ------------------ balancer/rls/internal/test/e2e/e2e.go | 20 + .../rls/internal/test/e2e/rls_child_policy.go | 131 ++++ .../rls/internal/test/e2e/rls_fakeserver.go | 110 ++++ .../rls/internal/test/e2e/rls_lb_config.go | 100 +++ internal/stubserver/stubserver.go | 13 + internal/testutils/restartable_listener.go | 7 +- 17 files changed, 1389 insertions(+), 1427 deletions(-) delete mode 100644 balancer/rls/internal/balancer_test.go delete mode 100644 balancer/rls/internal/client.go delete mode 100644 balancer/rls/internal/client_test.go create mode 100644 balancer/rls/internal/control_channel.go create mode 100644 balancer/rls/internal/control_channel_test.go create mode 100644 balancer/rls/internal/helpers_test.go delete mode 100644 balancer/rls/internal/picker.go delete mode 100644 balancer/rls/internal/picker_test.go create mode 100644 balancer/rls/internal/test/e2e/e2e.go create mode 100644 balancer/rls/internal/test/e2e/rls_child_policy.go create mode 100644 balancer/rls/internal/test/e2e/rls_fakeserver.go create mode 100644 balancer/rls/internal/test/e2e/rls_lb_config.go diff --git a/balancer/rls/internal/balancer.go b/balancer/rls/internal/balancer.go index b23783bf9da4..e5985eeee354 100644 --- a/balancer/rls/internal/balancer.go +++ b/balancer/rls/internal/balancer.go @@ -19,183 +19,36 @@ package rls import ( - "sync" - - "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcsync" ) var ( _ balancer.Balancer = (*rlsBalancer)(nil) - // For overriding in tests. - newRLSClientFunc = newRLSClient - logger = grpclog.Component("rls") + logger = grpclog.Component("rls") ) // rlsBalancer implements the RLS LB policy. -type rlsBalancer struct { - done *grpcsync.Event - cc balancer.ClientConn - opts balancer.BuildOptions - - // Mutex protects all the state maintained by the LB policy. - // TODO(easwars): Once we add the cache, we will also have another lock for - // the cache alone. - mu sync.Mutex - lbCfg *lbConfig // Most recently received service config. - rlsCC *grpc.ClientConn // ClientConn to the RLS server. - rlsC *rlsClient // RLS client wrapper. - - ccUpdateCh chan *balancer.ClientConnState -} - -// run is a long running goroutine which handles all the updates that the -// balancer wishes to handle. The appropriate updateHandler will push the update -// on to a channel that this goroutine will select on, thereby the handling of -// the update will happen asynchronously. -func (lb *rlsBalancer) run() { - for { - // TODO(easwars): Handle other updates like subConn state changes, RLS - // responses from the server etc. - select { - case u := <-lb.ccUpdateCh: - lb.handleClientConnUpdate(u) - case <-lb.done.Done(): - return - } - } -} - -// handleClientConnUpdate handles updates to the service config. -// If the RLS server name or the RLS RPC timeout changes, it updates the control -// channel accordingly. -// TODO(easwars): Handle updates to other fields in the service config. -func (lb *rlsBalancer) handleClientConnUpdate(ccs *balancer.ClientConnState) { - logger.Infof("rls: service config: %+v", ccs.BalancerConfig) - lb.mu.Lock() - defer lb.mu.Unlock() - - if lb.done.HasFired() { - logger.Warning("rls: received service config after balancer close") - return - } - - newCfg := ccs.BalancerConfig.(*lbConfig) - if lb.lbCfg.Equal(newCfg) { - logger.Info("rls: new service config matches existing config") - return - } - - lb.updateControlChannel(newCfg) - lb.lbCfg = newCfg -} +type rlsBalancer struct{} -// UpdateClientConnState pushes the received ClientConnState update on the -// update channel which will be processed asynchronously by the run goroutine. -// Implements balancer.Balancer interface. func (lb *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - select { - case lb.ccUpdateCh <- &ccs: - case <-lb.done.Done(): - } + logger.Fatal("rls: UpdateClientConnState is not yet unimplemented") return nil } -// ResolverErr implements balancer.Balancer interface. func (lb *rlsBalancer) ResolverError(error) { - // ResolverError is called by gRPC when the name resolver reports an error. - // TODO(easwars): How do we handle this? logger.Fatal("rls: ResolverError is not yet unimplemented") } -// UpdateSubConnState implements balancer.Balancer interface. func (lb *rlsBalancer) UpdateSubConnState(_ balancer.SubConn, _ balancer.SubConnState) { logger.Fatal("rls: UpdateSubConnState is not yet implemented") } -// Cleans up the resources allocated by the LB policy including the clientConn -// to the RLS server. -// Implements balancer.Balancer. func (lb *rlsBalancer) Close() { - lb.mu.Lock() - defer lb.mu.Unlock() - - lb.done.Fire() - if lb.rlsCC != nil { - lb.rlsCC.Close() - } + logger.Fatal("rls: Close is not yet implemented") } func (lb *rlsBalancer) ExitIdle() { - // TODO: are we 100% sure this should be a nop? -} - -// updateControlChannel updates the RLS client if required. -// Caller must hold lb.mu. -func (lb *rlsBalancer) updateControlChannel(newCfg *lbConfig) { - oldCfg := lb.lbCfg - if newCfg.lookupService == oldCfg.lookupService && newCfg.lookupServiceTimeout == oldCfg.lookupServiceTimeout { - return - } - - // Use RPC timeout from new config, if different from existing one. - timeout := oldCfg.lookupServiceTimeout - if timeout != newCfg.lookupServiceTimeout { - timeout = newCfg.lookupServiceTimeout - } - - if newCfg.lookupService == oldCfg.lookupService { - // This is the case where only the timeout has changed. We will continue - // to use the existing clientConn. but will create a new rlsClient with - // the new timeout. - lb.rlsC = newRLSClientFunc(lb.rlsCC, lb.opts.Target.Endpoint, timeout) - return - } - - // This is the case where the RLS server name has changed. We need to create - // a new clientConn and close the old one. - var dopts []grpc.DialOption - if dialer := lb.opts.Dialer; dialer != nil { - dopts = append(dopts, grpc.WithContextDialer(dialer)) - } - dopts = append(dopts, dialCreds(lb.opts)) - - cc, err := grpc.Dial(newCfg.lookupService, dopts...) - if err != nil { - logger.Errorf("rls: dialRLS(%s, %v): %v", newCfg.lookupService, lb.opts, err) - // An error from a non-blocking dial indicates something serious. We - // should continue to use the old control channel if one exists, and - // return so that the rest of the config updates can be processes. - return - } - if lb.rlsCC != nil { - lb.rlsCC.Close() - } - lb.rlsCC = cc - lb.rlsC = newRLSClientFunc(cc, lb.opts.Target.Endpoint, timeout) -} - -func dialCreds(opts balancer.BuildOptions) grpc.DialOption { - // The control channel should use the same authority as that of the parent - // channel. This ensures that the identify of the RLS server and that of the - // backend is the same, so if the RLS config is injected by an attacker, it - // cannot cause leakage of private information contained in headers set by - // the application. - server := opts.Target.Authority - switch { - case opts.DialCreds != nil: - if err := opts.DialCreds.OverrideServerName(server); err != nil { - logger.Warningf("rls: OverrideServerName(%s) = (%v), using Insecure", server, err) - return grpc.WithInsecure() - } - return grpc.WithTransportCredentials(opts.DialCreds) - case opts.CredsBundle != nil: - return grpc.WithTransportCredentials(opts.CredsBundle.TransportCredentials()) - default: - logger.Warning("rls: no credentials available, using Insecure") - return grpc.WithInsecure() - } + logger.Fatal("rls: ExitIdle is not yet implemented") } diff --git a/balancer/rls/internal/balancer_test.go b/balancer/rls/internal/balancer_test.go deleted file mode 100644 index 2378a86fff10..000000000000 --- a/balancer/rls/internal/balancer_test.go +++ /dev/null @@ -1,238 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "context" - "net" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/rls/internal/testutils/fakeserver" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/testdata" -) - -const defaultTestTimeout = 1 * time.Second - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -type listenerWrapper struct { - net.Listener - connCh *testutils.Channel -} - -// Accept waits for and returns the next connection to the listener. -func (l *listenerWrapper) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return nil, err - } - l.connCh.Send(c) - return c, nil -} - -func setupwithListener(t *testing.T, opts ...grpc.ServerOption) (*fakeserver.Server, *listenerWrapper, func()) { - t.Helper() - - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("net.Listen(tcp, localhost:0): %v", err) - } - lw := &listenerWrapper{ - Listener: l, - connCh: testutils.NewChannel(), - } - - server, cleanup, err := fakeserver.Start(lw, opts...) - if err != nil { - t.Fatalf("fakeserver.Start(): %v", err) - } - t.Logf("Fake RLS server started at %s ...", server.Address) - - return server, lw, cleanup -} - -type testBalancerCC struct { - balancer.ClientConn -} - -// TestUpdateControlChannelFirstConfig tests the scenario where the LB policy -// receives its first service config and verifies that a control channel to the -// RLS server specified in the serviceConfig is established. -func (s) TestUpdateControlChannelFirstConfig(t *testing.T) { - server, lis, cleanup := setupwithListener(t) - defer cleanup() - - bb := balancer.Get(rlsBalancerName) - if bb == nil { - t.Fatalf("balancer.Get(%s) = nil", rlsBalancerName) - } - rlsB := bb.Build(&testBalancerCC{}, balancer.BuildOptions{}) - defer rlsB.Close() - t.Log("Built RLS LB policy ...") - - lbCfg := &lbConfig{lookupService: server.Address} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := lis.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - // TODO: Verify channel connectivity state once control channel connectivity - // state monitoring is in place. - - // TODO: Verify RLS RPC can be made once we integrate with the picker. -} - -// TestUpdateControlChannelSwitch tests the scenario where a control channel -// exists and the LB policy receives a new serviceConfig with a different RLS -// server name. Verifies that the new control channel is created and the old one -// is closed (the leakchecker takes care of this). -func (s) TestUpdateControlChannelSwitch(t *testing.T) { - server1, lis1, cleanup1 := setupwithListener(t) - defer cleanup1() - - server2, lis2, cleanup2 := setupwithListener(t) - defer cleanup2() - - bb := balancer.Get(rlsBalancerName) - if bb == nil { - t.Fatalf("balancer.Get(%s) = nil", rlsBalancerName) - } - rlsB := bb.Build(&testBalancerCC{}, balancer.BuildOptions{}) - defer rlsB.Close() - t.Log("Built RLS LB policy ...") - - lbCfg := &lbConfig{lookupService: server1.Address} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := lis1.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - lbCfg = &lbConfig{lookupService: server2.Address} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - if _, err := lis2.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - // TODO: Verify channel connectivity state once control channel connectivity - // state monitoring is in place. - - // TODO: Verify RLS RPC can be made once we integrate with the picker. -} - -// TestUpdateControlChannelTimeout tests the scenario where the LB policy -// receives a service config update with a different lookupServiceTimeout, but -// the lookupService itself remains unchanged. It verifies that the LB policy -// does not create a new control channel in this case. -func (s) TestUpdateControlChannelTimeout(t *testing.T) { - server, lis, cleanup := setupwithListener(t) - defer cleanup() - - bb := balancer.Get(rlsBalancerName) - if bb == nil { - t.Fatalf("balancer.Get(%s) = nil", rlsBalancerName) - } - rlsB := bb.Build(&testBalancerCC{}, balancer.BuildOptions{}) - defer rlsB.Close() - t.Log("Built RLS LB policy ...") - - lbCfg := &lbConfig{lookupService: server.Address, lookupServiceTimeout: 1 * time.Second} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := lis.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - lbCfg = &lbConfig{lookupService: server.Address, lookupServiceTimeout: 2 * time.Second} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - if _, err := lis.connCh.Receive(ctx); err != context.DeadlineExceeded { - t.Fatal("LB policy created new control channel when only lookupServiceTimeout changed") - } - - // TODO: Verify channel connectivity state once control channel connectivity - // state monitoring is in place. - - // TODO: Verify RLS RPC can be made once we integrate with the picker. -} - -// TestUpdateControlChannelWithCreds tests the scenario where the control -// channel is to established with credentials from the parent channel. -func (s) TestUpdateControlChannelWithCreds(t *testing.T) { - sCreds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) - if err != nil { - t.Fatalf("credentials.NewServerTLSFromFile(server1.pem, server1.key) = %v", err) - } - cCreds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "") - if err != nil { - t.Fatalf("credentials.NewClientTLSFromFile(ca.pem) = %v", err) - } - - server, lis, cleanup := setupwithListener(t, grpc.Creds(sCreds)) - defer cleanup() - - bb := balancer.Get(rlsBalancerName) - if bb == nil { - t.Fatalf("balancer.Get(%s) = nil", rlsBalancerName) - } - rlsB := bb.Build(&testBalancerCC{}, balancer.BuildOptions{ - DialCreds: cCreds, - }) - defer rlsB.Close() - t.Log("Built RLS LB policy ...") - - lbCfg := &lbConfig{lookupService: server.Address} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := lis.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - // TODO: Verify channel connectivity state once control channel connectivity - // state monitoring is in place. - - // TODO: Verify RLS RPC can be made once we integrate with the picker. -} diff --git a/balancer/rls/internal/builder.go b/balancer/rls/internal/builder.go index 7c29caef4047..9707b08420df 100644 --- a/balancer/rls/internal/builder.go +++ b/balancer/rls/internal/builder.go @@ -21,10 +21,9 @@ package rls import ( "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/grpcsync" ) -const rlsBalancerName = "rls" +const rlsBalancerName = "rls_experimental" func init() { balancer.Register(&rlsBB{}) @@ -41,13 +40,6 @@ func (*rlsBB) Name() string { } func (*rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - lb := &rlsBalancer{ - done: grpcsync.NewEvent(), - cc: cc, - opts: opts, - lbCfg: &lbConfig{}, - ccUpdateCh: make(chan *balancer.ClientConnState), - } - go lb.run() - return lb + // TODO(easwars): Fix this once the LB policy implementation is pulled in. + return &rlsBalancer{} } diff --git a/balancer/rls/internal/client.go b/balancer/rls/internal/client.go deleted file mode 100644 index b0c858e032e5..000000000000 --- a/balancer/rls/internal/client.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "context" - "time" - - "google.golang.org/grpc" - rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" - rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" -) - -// For gRPC services using RLS, the value of target_type in the -// RouteLookupServiceRequest will be set to this. -const grpcTargetType = "grpc" - -// rlsClient is a simple wrapper around a RouteLookupService client which -// provides non-blocking semantics on top of a blocking unary RPC call. -// -// The RLS LB policy creates a new rlsClient object with the following values: -// * a grpc.ClientConn to the RLS server using appropriate credentials from the -// parent channel -// * dialTarget corresponding to the original user dial target, e.g. -// "firestore.googleapis.com". -// -// The RLS LB policy uses an adaptive throttler to perform client side -// throttling and asks this client to make an RPC call only after checking with -// the throttler. -type rlsClient struct { - stub rlsgrpc.RouteLookupServiceClient - // origDialTarget is the original dial target of the user and sent in each - // RouteLookup RPC made to the RLS server. - origDialTarget string - // rpcTimeout specifies the timeout for the RouteLookup RPC call. The LB - // policy receives this value in its service config. - rpcTimeout time.Duration -} - -func newRLSClient(cc *grpc.ClientConn, dialTarget string, rpcTimeout time.Duration) *rlsClient { - return &rlsClient{ - stub: rlsgrpc.NewRouteLookupServiceClient(cc), - origDialTarget: dialTarget, - rpcTimeout: rpcTimeout, - } -} - -type lookupCallback func(targets []string, headerData string, err error) - -// lookup starts a RouteLookup RPC in a separate goroutine and returns the -// results (and error, if any) in the provided callback. -func (c *rlsClient) lookup(keyMap map[string]string, cb lookupCallback) { - go func() { - ctx, cancel := context.WithTimeout(context.Background(), c.rpcTimeout) - resp, err := c.stub.RouteLookup(ctx, &rlspb.RouteLookupRequest{ - // TODO(easwars): Use extra_keys field to populate host, service and - // method keys. - TargetType: grpcTargetType, - KeyMap: keyMap, - }) - cb(resp.GetTargets(), resp.GetHeaderData(), err) - cancel() - }() -} diff --git a/balancer/rls/internal/client_test.go b/balancer/rls/internal/client_test.go deleted file mode 100644 index 9a805c77ca32..000000000000 --- a/balancer/rls/internal/client_test.go +++ /dev/null @@ -1,178 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc" - "google.golang.org/grpc/balancer/rls/internal/testutils/fakeserver" - "google.golang.org/grpc/codes" - rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/status" -) - -const ( - defaultDialTarget = "dummy" - defaultRPCTimeout = 5 * time.Second -) - -func setup(t *testing.T) (*fakeserver.Server, *grpc.ClientConn, func()) { - t.Helper() - - server, sCleanup, err := fakeserver.Start(nil) - if err != nil { - t.Fatalf("Failed to start fake RLS server: %v", err) - } - - cc, cCleanup, err := server.ClientConn() - if err != nil { - sCleanup() - t.Fatalf("Failed to get a ClientConn to the RLS server: %v", err) - } - - return server, cc, func() { - sCleanup() - cCleanup() - } -} - -// TestLookupFailure verifies the case where the RLS server returns an error. -func (s) TestLookupFailure(t *testing.T) { - server, cc, cleanup := setup(t) - defer cleanup() - - // We setup the fake server to return an error. - server.ResponseChan <- fakeserver.Response{Err: errors.New("rls failure")} - - rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout) - - errCh := testutils.NewChannel() - rlsClient.lookup(nil, func(targets []string, headerData string, err error) { - if err == nil { - errCh.Send(errors.New("rlsClient.lookup() succeeded, should have failed")) - return - } - if len(targets) != 0 || headerData != "" { - errCh.Send(fmt.Errorf("rlsClient.lookup() = (%v, %s), want (nil, \"\")", targets, headerData)) - return - } - errCh.Send(nil) - }) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if e, err := errCh.Receive(ctx); err != nil || e != nil { - t.Fatalf("lookup error: %v, error receiving from channel: %v", e, err) - } -} - -// TestLookupDeadlineExceeded tests the case where the RPC deadline associated -// with the lookup expires. -func (s) TestLookupDeadlineExceeded(t *testing.T) { - _, cc, cleanup := setup(t) - defer cleanup() - - // Give the Lookup RPC a small deadline, but don't setup the fake server to - // return anything. So the Lookup call will block and eventually expire. - rlsClient := newRLSClient(cc, defaultDialTarget, 100*time.Millisecond) - - errCh := testutils.NewChannel() - rlsClient.lookup(nil, func(_ []string, _ string, err error) { - if st, ok := status.FromError(err); !ok || st.Code() != codes.DeadlineExceeded { - errCh.Send(fmt.Errorf("rlsClient.lookup() returned error: %v, want %v", err, codes.DeadlineExceeded)) - return - } - errCh.Send(nil) - }) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if e, err := errCh.Receive(ctx); err != nil || e != nil { - t.Fatalf("lookup error: %v, error receiving from channel: %v", e, err) - } -} - -// TestLookupSuccess verifies the successful Lookup API case. -func (s) TestLookupSuccess(t *testing.T) { - server, cc, cleanup := setup(t) - defer cleanup() - - const wantHeaderData = "headerData" - - rlsReqKeyMap := map[string]string{ - "k1": "v1", - "k2": "v2", - } - wantLookupRequest := &rlspb.RouteLookupRequest{ - // TODO(easwars): Use extra_keys field to populate host, service and - // method keys. - TargetType: "grpc", - KeyMap: rlsReqKeyMap, - } - wantRespTargets := []string{"us_east_1.firestore.googleapis.com"} - - rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout) - - errCh := testutils.NewChannel() - rlsClient.lookup(rlsReqKeyMap, func(targets []string, hd string, err error) { - if err != nil { - errCh.Send(fmt.Errorf("rlsClient.Lookup() failed: %v", err)) - return - } - if !cmp.Equal(targets, wantRespTargets) || hd != wantHeaderData { - errCh.Send(fmt.Errorf("rlsClient.lookup() = (%v, %s), want (%v, %s)", targets, hd, wantRespTargets, wantHeaderData)) - return - } - errCh.Send(nil) - }) - - // Make sure that the fake server received the expected RouteLookupRequest - // proto. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - req, err := server.RequestChan.Receive(ctx) - if err != nil { - t.Fatalf("Timed out wile waiting for a RouteLookupRequest") - } - gotLookupRequest := req.(*rlspb.RouteLookupRequest) - if diff := cmp.Diff(wantLookupRequest, gotLookupRequest, cmp.Comparer(proto.Equal)); diff != "" { - t.Fatalf("RouteLookupRequest diff (-want, +got):\n%s", diff) - } - - // We setup the fake server to return this response when it receives a - // request. - server.ResponseChan <- fakeserver.Response{ - Resp: &rlspb.RouteLookupResponse{ - Targets: wantRespTargets, - HeaderData: wantHeaderData, - }, - } - - if e, err := errCh.Receive(ctx); err != nil || e != nil { - t.Fatalf("lookup error: %v, error receiving from channel: %v", e, err) - } -} diff --git a/balancer/rls/internal/config_test.go b/balancer/rls/internal/config_test.go index 41d330c604e2..84da9fe426f2 100644 --- a/balancer/rls/internal/config_test.go +++ b/balancer/rls/internal/config_test.go @@ -61,7 +61,7 @@ func testEqual(a, b *lbConfig) bool { childPolicyConfigEqual(a.childPolicyConfig, b.childPolicyConfig) } -func TestParseConfig(t *testing.T) { +func (s) TestParseConfig(t *testing.T) { childPolicyTargetFieldVal, _ := json.Marshal(dummyChildPolicyTarget) tests := []struct { desc string @@ -158,7 +158,7 @@ func TestParseConfig(t *testing.T) { } } -func TestParseConfigErrors(t *testing.T) { +func (s) TestParseConfigErrors(t *testing.T) { tests := []struct { desc string input []byte diff --git a/balancer/rls/internal/control_channel.go b/balancer/rls/internal/control_channel.go new file mode 100644 index 000000000000..dc8446313e7d --- /dev/null +++ b/balancer/rls/internal/control_channel.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/adaptive" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" +) + +var newAdaptiveThrottler = func() adaptiveThrottler { return adaptive.New() } + +type adaptiveThrottler interface { + ShouldThrottle() bool + RegisterBackendResponse(throttled bool) +} + +// controlChannel is a wrapper around the gRPC channel to the RLS server +// specified in the service config. +type controlChannel struct { + // rpcTimeout specifies the timeout for the RouteLookup RPC call. The LB + // policy receives this value in its service config. + rpcTimeout time.Duration + // backToReadyCh is the channel on which an update is pushed when the + // connectivity state changes from READY --> TRANSIENT_FAILURE --> READY. + backToReadyCh chan struct{} + // throttler in an adaptive throttling implementation used to avoid + // hammering the RLS service while it is overloaded or down. + throttler adaptiveThrottler + + cc *grpc.ClientConn + client rlsgrpc.RouteLookupServiceClient + logger *internalgrpclog.PrefixLogger +} + +func newControlChannel(rlsServerName string, rpcTimeout time.Duration, bOpts balancer.BuildOptions, backToReadyCh chan struct{}) (*controlChannel, error) { + ctrlCh := &controlChannel{ + rpcTimeout: rpcTimeout, + backToReadyCh: backToReadyCh, + throttler: newAdaptiveThrottler(), + } + ctrlCh.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-control-channel %p] ", ctrlCh)) + + dopts, err := ctrlCh.dialOpts(bOpts) + if err != nil { + return nil, err + } + ctrlCh.cc, err = grpc.Dial(rlsServerName, dopts...) + if err != nil { + return nil, err + } + ctrlCh.client = rlsgrpc.NewRouteLookupServiceClient(ctrlCh.cc) + ctrlCh.logger.Infof("Control channel created to RLS server at: %v", rlsServerName) + + go ctrlCh.monitorConnectivityState() + return ctrlCh, nil +} + +// dialOpts constructs the dial options for the control plane channel. +func (cc *controlChannel) dialOpts(bOpts balancer.BuildOptions) ([]grpc.DialOption, error) { + // The control plane channel will use the same authority as the parent + // channel for server authorization. This ensures that the identity of the + // RLS server and the identity of the backends is the same, so if the RLS + // config is injected by an attacker, it cannot cause leakage of private + // information contained in headers set by the application. + dopts := []grpc.DialOption{grpc.WithAuthority(bOpts.Authority)} + if bOpts.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(bOpts.Dialer)) + } + + // The control channel will use the channel credentials from the parent + // channel, including any call creds associated with the channel creds. + var credsOpt grpc.DialOption + switch { + case bOpts.DialCreds != nil: + credsOpt = grpc.WithTransportCredentials(bOpts.DialCreds.Clone()) + case bOpts.CredsBundle != nil: + // The "fallback" mode in google default credentials (which is the only + // type of credentials we expect to be used with RLS) uses TLS/ALTS + // creds for transport and uses the same call creds as that on the + // parent bundle. + bundle, err := bOpts.CredsBundle.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + return nil, err + } + credsOpt = grpc.WithCredentialsBundle(bundle) + default: + cc.logger.Warningf("no credentials available, using Insecure") + credsOpt = grpc.WithInsecure() + } + return append(dopts, credsOpt), nil +} + +func (cc *controlChannel) monitorConnectivityState() { + cc.logger.Infof("Starting connectivity state monitoring goroutine") + // Since we use two mechanisms to deal with RLS server being down: + // - adaptive throttling for the channel as a whole + // - exponential backoff on a per-request basis + // we need a way to avoid double-penalizing requests by counting failures + // toward both mechanisms when the RLS server is unreachable. + // + // To accomplish this, we monitor the state of the control plane channel. If + // the state has been TRANSIENT_FAILURE since the last time it was in state + // READY, and it then transitions into state READY, we push on a channel + // which is being read by the LB policy. + // + // The LB the policy will iterate through the cache to reset the backoff + // timeouts in all cache entries. Specifically, this means that it will + // reset the backoff state and cancel the pending backoff timer. Note that + // when cancelling the backoff timer, just like when the backoff timer fires + // normally, a new picker is returned to the channel, to force it to + // re-process any wait-for-ready RPCs that may still be queued if we failed + // them while we were in backoff. However, we should optimize this case by + // returning only one new picker, regardless of how many backoff timers are + // cancelled. + + // Using the background context is fine here since we check for the ClientConn + // entering SHUTDOWN and return early in that case. + ctx := context.Background() + + first := true + for { + // Wait for the control channel to become READY. + for s := cc.cc.GetState(); s != connectivity.Ready; s = cc.cc.GetState() { + if s == connectivity.Shutdown { + return + } + cc.cc.WaitForStateChange(ctx, s) + } + cc.logger.Infof("Connectivity state is READY") + + if !first { + cc.logger.Infof("Control channel back to READY") + cc.backToReadyCh <- struct{}{} + } + first = false + + // Wait for the control channel to move out of READY. + cc.cc.WaitForStateChange(ctx, connectivity.Ready) + if cc.cc.GetState() == connectivity.Shutdown { + return + } + cc.logger.Infof("Connectivity state is %s", cc.cc.GetState()) + } +} + +func (cc *controlChannel) close() { + cc.logger.Infof("Closing control channel") + cc.cc.Close() +} + +type lookupCallback func(targets []string, headerData string, err error) + +// lookup starts a RouteLookup RPC in a separate goroutine and returns the +// results (and error, if any) in the provided callback. +// +// The returned boolean indicates whether the request was throttled by the +// client-side adaptive throttling algorithm in which case the provided callback +// will not be invoked. +func (cc *controlChannel) lookup(reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string, cb lookupCallback) (throttled bool) { + if cc.throttler.ShouldThrottle() { + cc.logger.Infof("RLS request throttled by client-side adaptive throttling") + return true + } + go func() { + req := &rlspb.RouteLookupRequest{ + TargetType: "grpc", + KeyMap: reqKeys, + Reason: reason, + StaleHeaderData: staleHeaders, + } + cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) + + ctx, cancel := context.WithTimeout(context.Background(), cc.rpcTimeout) + defer cancel() + resp, err := cc.client.RouteLookup(ctx, req) + cb(resp.GetTargets(), resp.GetHeaderData(), err) + }() + return false +} diff --git a/balancer/rls/internal/control_channel_test.go b/balancer/rls/internal/control_channel_test.go new file mode 100644 index 000000000000..953f6531428c --- /dev/null +++ b/balancer/rls/internal/control_channel_test.go @@ -0,0 +1,469 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/test/e2e" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/testdata" + "google.golang.org/protobuf/proto" +) + +// TestControlChannelThrottled tests the case where the adaptive throttler +// indicates that the control channel needs to be throttled. +func (s) TestControlChannelThrottled(t *testing.T) { + // Start an RLS server and set the throttler to always throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) + + // Create a control channel to the fake RLS server. + ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, balancer.BuildOptions{}, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect the attempt to be throttled. + ctrlCh.lookup(nil, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, nil) + + select { + case <-rlsReqCh: + t.Fatal("RouteLookup RPC invoked when control channel is throtlled") + case <-time.After(defaultTestShortTimeout): + } +} + +// TestLookupFailure tests the case where the RLS server responds with an error. +func (s) TestLookupFailure(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, _ := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Setup the RLS server to respond with errors. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Err: errors.New("rls failure")} + }) + + // Create a control channel to the fake RLS server. + ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, balancer.BuildOptions{}, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect the callback to be invoked with an error. + errCh := make(chan error, 1) + ctrlCh.lookup(nil, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, func(_ []string, _ string, err error) { + if err == nil { + errCh <- errors.New("rlsClient.lookup() succeeded, should have failed") + return + } + errCh <- nil + }) + + select { + case <-time.After(defaultTestTimeout): + t.Fatal("timeout when waiting for lookup callback to be invoked") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } +} + +// TestLookupDeadlineExceeded tests the case where the RLS server does not +// respond within the configured rpc timeout. +func (s) TestLookupDeadlineExceeded(t *testing.T) { + // A unary interceptor which sleeps for long enough to cause lookup RPCs to + // exceed their deadline. + rlsReqCh := make(chan struct{}, 1) + interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + rlsReqCh <- struct{}{} + time.Sleep(2 * defaultTestShortTimeout) + return handler(ctx, req) + } + + // Start an RLS server and set the throttler to never throttle. + rlsServer, _ := setupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Create a control channel with a small deadline. + ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestShortTimeout, balancer.BuildOptions{}, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect the callback to be invoked with an error. + errCh := make(chan error) + ctrlCh.lookup(nil, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, func(_ []string, _ string, err error) { + if st, ok := status.FromError(err); !ok || st.Code() != codes.DeadlineExceeded { + errCh <- fmt.Errorf("rlsClient.lookup() returned error: %v, want %v", err, codes.DeadlineExceeded) + return + } + errCh <- nil + }) + + select { + case <-time.After(defaultTestTimeout): + t.Fatal("timeout when waiting for lookup callback to be invoked") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } +} + +// testCredsBundle wraps a test call creds and real transport creds. +type testCredsBundle struct { + transportCreds credentials.TransportCredentials + callCreds credentials.PerRPCCredentials +} + +func (f *testCredsBundle) TransportCredentials() credentials.TransportCredentials { + return f.transportCreds +} + +func (f *testCredsBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return f.callCreds +} + +func (f *testCredsBundle) NewWithMode(mode string) (credentials.Bundle, error) { + if mode != internal.CredsBundleModeFallback { + return nil, fmt.Errorf("unsupported mode: %v", mode) + } + return &testCredsBundle{ + transportCreds: f.transportCreds, + callCreds: f.callCreds, + }, nil +} + +var ( + // Call creds sent by the testPerRPCCredentials on the client, and verified + // by an interceptor on the server. + perRPCCredsData = map[string]string{ + "test-key": "test-value", + "test-key-bin": string([]byte{1, 2, 3}), + } +) + +type testPerRPCCredentials struct { + callCreds map[string]string +} + +func (f *testPerRPCCredentials) GetRequestMetadata(context.Context, ...string) (map[string]string, error) { + return f.callCreds, nil +} + +func (f *testPerRPCCredentials) RequireTransportSecurity() bool { + return true +} + +// Unary server interceptor which validates if the RPC contains call credentials +// which match `perRPCCredsData +func callCredsValidatingServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Error(codes.PermissionDenied, "didn't find metadata in context") + } + for k, want := range perRPCCredsData { + got, ok := md[k] + if !ok { + return ctx, status.Errorf(codes.PermissionDenied, "didn't find call creds key %v in context", k) + } + if got[0] != want { + return ctx, status.Errorf(codes.PermissionDenied, "for key %v, got value %v, want %v", k, got, want) + } + } + return handler(ctx, req) +} + +// makeTLSCreds is a test helper which creates a TLS based transport credentials +// from files specified in the arguments. +func makeTLSCreds(t *testing.T, certPath, keyPath, rootsPath string) credentials.TransportCredentials { + cert, err := tls.LoadX509KeyPair(testdata.Path(certPath), testdata.Path(keyPath)) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(%q, %q) failed: %v", certPath, keyPath, err) + } + b, err := ioutil.ReadFile(testdata.Path(rootsPath)) + if err != nil { + t.Fatalf("ioutil.ReadFile(%q) failed: %v", rootsPath, err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(b) { + t.Fatal("failed to append certificates") + } + return credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + }) +} + +const ( + wantHeaderData = "headerData" + staleHeaderData = "staleHeaderData" +) + +var ( + keyMap = map[string]string{ + "k1": "v1", + "k2": "v2", + } + wantTargets = []string{"us_east_1.firestore.googleapis.com"} + lookupRequest = &rlspb.RouteLookupRequest{ + TargetType: "grpc", + KeyMap: keyMap, + Reason: rlspb.RouteLookupRequest_REASON_MISS, + StaleHeaderData: staleHeaderData, + } + lookupResponse = &e2e.RouteLookupResponse{ + Resp: &rlspb.RouteLookupResponse{ + Targets: wantTargets, + HeaderData: wantHeaderData, + }, + } +) + +func testControlChannelCredsSuccess(t *testing.T, sopts []grpc.ServerOption, bopts balancer.BuildOptions) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, _ := setupFakeRLSServer(t, nil, sopts...) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Setup the RLS server to respond with a valid response. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return lookupResponse + }) + + // Verify that the request received by the RLS matches the expected one. + rlsServer.SetRequestCallback(func(got *rlspb.RouteLookupRequest) { + if diff := cmp.Diff(lookupRequest, got, cmp.Comparer(proto.Equal)); diff != "" { + t.Errorf("RouteLookupRequest diff (-want, +got):\n%s", diff) + } + }) + + // Create a control channel to the fake server. + ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, bopts, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect a successful callback invocation. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + errCh := make(chan error, 1) + ctrlCh.lookup(keyMap, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, func(targets []string, headerData string, err error) { + if err != nil { + errCh <- fmt.Errorf("rlsClient.lookup() failed with err: %v", err) + return + } + if !cmp.Equal(targets, wantTargets) || headerData != wantHeaderData { + errCh <- fmt.Errorf("rlsClient.lookup() = (%v, %s), want (%v, %s)", targets, headerData, wantTargets, wantHeaderData) + return + } + errCh <- nil + }) + + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for lookup callback to be invoked") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } +} + +// TestControlChannelCredsSuccess tests creation of the control channel with +// different credentials, which are expected to succeed. +func (s) TestControlChannelCredsSuccess(t *testing.T) { + serverCreds := makeTLSCreds(t, "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + clientCreds := makeTLSCreds(t, "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + + tests := []struct { + name string + sopts []grpc.ServerOption + bopts balancer.BuildOptions + }{ + { + name: "insecure", + sopts: nil, + bopts: balancer.BuildOptions{}, + }, + { + name: "transport creds only", + sopts: []grpc.ServerOption{grpc.Creds(serverCreds)}, + bopts: balancer.BuildOptions{ + DialCreds: clientCreds, + Authority: "x.test.example.com", + }, + }, + { + name: "creds bundle", + sopts: []grpc.ServerOption{ + grpc.Creds(serverCreds), + grpc.UnaryInterceptor(callCredsValidatingServerInterceptor), + }, + bopts: balancer.BuildOptions{ + CredsBundle: &testCredsBundle{ + transportCreds: clientCreds, + callCreds: &testPerRPCCredentials{callCreds: perRPCCredsData}, + }, + Authority: "x.test.example.com", + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testControlChannelCredsSuccess(t, test.sopts, test.bopts) + }) + } +} + +func testControlChannelCredsFailure(t *testing.T, sopts []grpc.ServerOption, bopts balancer.BuildOptions, wantCode codes.Code, wantErr string) { + // StartFakeRouteLookupServer a fake server. + // + // Start an RLS server and set the throttler to never throttle requests. The + // creds failures happen before the RPC handler on the server is invoked. + // So, there is need to setup the request and responses on the fake server. + rlsServer, _ := setupFakeRLSServer(t, nil, sopts...) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Create the control channel to the fake server. + ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, bopts, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect the callback to be invoked with an error. + errCh := make(chan error) + ctrlCh.lookup(nil, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, func(_ []string, _ string, err error) { + if st, ok := status.FromError(err); !ok || st.Code() != wantCode || !strings.Contains(st.String(), wantErr) { + errCh <- fmt.Errorf("rlsClient.lookup() returned error: %v, wantCode: %v, wantErr: %s", err, wantCode, wantErr) + return + } + errCh <- nil + }) + + select { + case <-time.After(defaultTestTimeout): + t.Fatal("timeout when waiting for lookup callback to be invoked") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } +} + +// TestControlChannelCredsFailure tests creation of the control channel with +// different credentials, which are expected to fail. +func (s) TestControlChannelCredsFailure(t *testing.T) { + serverCreds := makeTLSCreds(t, "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + clientCreds := makeTLSCreds(t, "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + + tests := []struct { + name string + sopts []grpc.ServerOption + bopts balancer.BuildOptions + wantCode codes.Code + wantErr string + }{ + { + name: "transport creds authority mismatch", + sopts: []grpc.ServerOption{grpc.Creds(serverCreds)}, + bopts: balancer.BuildOptions{ + DialCreds: clientCreds, + Authority: "authority-mismatch", + }, + wantCode: codes.Unavailable, + wantErr: "transport: authentication handshake failed: x509: certificate is valid for *.test.example.com, not authority-mismatch", + }, + { + name: "transport creds handshake failure", + sopts: nil, // server expects insecure connection + bopts: balancer.BuildOptions{ + DialCreds: clientCreds, + Authority: "x.test.example.com", + }, + wantCode: codes.Unavailable, + wantErr: "transport: authentication handshake failed: tls: first record does not look like a TLS handshake", + }, + { + name: "call creds mismatch", + sopts: []grpc.ServerOption{ + grpc.Creds(serverCreds), + grpc.UnaryInterceptor(callCredsValidatingServerInterceptor), // server expects call creds + }, + bopts: balancer.BuildOptions{ + CredsBundle: &testCredsBundle{ + transportCreds: clientCreds, + callCreds: &testPerRPCCredentials{}, // sends no call creds + }, + Authority: "x.test.example.com", + }, + wantCode: codes.PermissionDenied, + wantErr: "didn't find call creds", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testControlChannelCredsFailure(t, test.sopts, test.bopts, test.wantCode, test.wantErr) + }) + } +} + +type unsupportedCredsBundle struct { + credentials.Bundle +} + +func (*unsupportedCredsBundle) NewWithMode(mode string) (credentials.Bundle, error) { + return nil, fmt.Errorf("unsupported mode: %v", mode) +} + +// TestNewControlChannelUnsupportedCredsBundle tests the case where the control +// channel is configured with a bundle which does not support the mode we use. +func (s) TestNewControlChannelUnsupportedCredsBundle(t *testing.T) { + rlsServer, _ := setupFakeRLSServer(t, nil) + + // Create the control channel to the fake server. + ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, balancer.BuildOptions{CredsBundle: &unsupportedCredsBundle{}}, nil) + if err == nil { + ctrlCh.close() + t.Fatal("newControlChannel succeeded when expected to fail") + } +} diff --git a/balancer/rls/internal/helpers_test.go b/balancer/rls/internal/helpers_test.go new file mode 100644 index 000000000000..bb5478a3fa57 --- /dev/null +++ b/balancer/rls/internal/helpers_test.go @@ -0,0 +1,327 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "net" + "strings" + "sync" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/rls/internal/test/e2e" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/grpctest" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/protobuf/types/known/durationpb" +) + +// TODO(easwars): Remove this once all RLS code is merged. +//lint:file-ignore U1000 Ignore all unused code, not all code is merged yet. + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 100 * time.Millisecond +) + +func init() { + balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// connWrapper wraps a net.Conn and pushes on a channel when closed. +type connWrapper struct { + net.Conn + closeCh *testutils.Channel +} + +func (cw *connWrapper) Close() error { + err := cw.Conn.Close() + cw.closeCh.Replace(nil) + return err +} + +// listenerWrapper wraps a net.Listener and the returned net.Conn. +// +// It pushes on a channel whenever it accepts a new connection. +type listenerWrapper struct { + net.Listener + newConnCh *testutils.Channel +} + +func (l *listenerWrapper) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + closeCh := testutils.NewChannel() + conn := &connWrapper{Conn: c, closeCh: closeCh} + l.newConnCh.Send(conn) + return conn, nil +} + +func newListenerWrapper(t *testing.T, lis net.Listener) *listenerWrapper { + if lis == nil { + var err error + lis, err = testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + } + + return &listenerWrapper{ + Listener: lis, + newConnCh: testutils.NewChannel(), + } +} + +// fakeBackoffStrategy is a fake implementation of the backoff.Strategy +// interface, for tests to inject the backoff duration. +type fakeBackoffStrategy struct { + backoff time.Duration +} + +func (f *fakeBackoffStrategy) Backoff(retries int) time.Duration { + return f.backoff +} + +// fakeThrottler is a fake implementation of the adaptiveThrottler interface. +type fakeThrottler struct { + throttleFunc func() bool +} + +func (f *fakeThrottler) ShouldThrottle() bool { return f.throttleFunc() } +func (f *fakeThrottler) RegisterBackendResponse(bool) {} + +// alwaysThrottlingThrottler returns a fake throttler which always throttles. +func alwaysThrottlingThrottler() *fakeThrottler { + return &fakeThrottler{throttleFunc: func() bool { return true }} +} + +// neverThrottlingThrottler returns a fake throttler which never throttles. +func neverThrottlingThrottler() *fakeThrottler { + return &fakeThrottler{throttleFunc: func() bool { return false }} +} + +// oneTimeAllowingThrottler returns a fake throttler which does not throttle the +// first request, but throttles everything that comes after. This is useful for +// tests which need to set up a valid cache entry before testing other cases. +func oneTimeAllowingThrottler() *fakeThrottler { + var once sync.Once + return &fakeThrottler{ + throttleFunc: func() bool { + throttle := true + once.Do(func() { throttle = false }) + return throttle + }, + } +} + +func overrideAdaptiveThrottler(t *testing.T, f *fakeThrottler) { + origAdaptiveThrottler := newAdaptiveThrottler + newAdaptiveThrottler = func() adaptiveThrottler { return f } + t.Cleanup(func() { newAdaptiveThrottler = origAdaptiveThrottler }) +} + +// setupFakeRLSServer starts and returns a fake RouteLookupService server +// listening on the given listener or on a random local port. Also returns a +// channel for tests to get notified whenever the RouteLookup RPC is invoked on +// the fake server. +// +// This function sets up the fake server to respond with an empty response for +// the RouteLookup RPCs. Tests can override this by calling the +// SetResponseCallback() method on the returned fake server. +func setupFakeRLSServer(t *testing.T, lis net.Listener, opts ...grpc.ServerOption) (*e2e.FakeRouteLookupServer, chan struct{}) { + s, cancel := e2e.StartFakeRouteLookupServer(t, lis, opts...) + t.Logf("Started fake RLS server at %q", s.Address) + + ch := make(chan struct{}, 1) + s.SetRequestCallback(func(request *rlspb.RouteLookupRequest) { + select { + case ch <- struct{}{}: + default: + } + }) + t.Cleanup(cancel) + return s, ch +} + +// buildBasicRLSConfig constructs a basic service config for the RLS LB policy +// which header matching rules. This expects the passed child policy name to +// have been registered by the caller. +func buildBasicRLSConfig(childPolicyName, rlsServerAddress string) *e2e.RLSConfig { + return &e2e.RLSConfig{ + RouteLookupConfig: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "grpc.testing.TestService"}}, + Headers: []*rlspb.NameMatcher{ + {Key: "k1", Names: []string{"n1"}}, + {Key: "k2", Names: []string{"n2"}}, + }, + }, + }, + LookupService: rlsServerAddress, + LookupServiceTimeout: durationpb.New(defaultTestTimeout), + CacheSizeBytes: 1024, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, + ChildPolicyConfigTargetFieldName: e2e.RLSChildPolicyTargetNameField, + } +} + +// buildBasicRLSConfigWithChildPolicy constructs a very basic service config for +// the RLS LB policy. It also registers a test LB policy which is capable of +// being a child of the RLS LB policy. +func buildBasicRLSConfigWithChildPolicy(t *testing.T, childPolicyName, rlsServerAddress string) *e2e.RLSConfig { + childPolicyName = "test-child-policy" + childPolicyName + e2e.RegisterRLSChildPolicy(childPolicyName, nil) + t.Logf("Registered child policy with name %q", childPolicyName) + + return &e2e.RLSConfig{ + RouteLookupConfig: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{{Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "grpc.testing.TestService"}}}}, + LookupService: rlsServerAddress, + LookupServiceTimeout: durationpb.New(defaultTestTimeout), + CacheSizeBytes: 1024, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, + ChildPolicyConfigTargetFieldName: e2e.RLSChildPolicyTargetNameField, + } +} + +// startBackend starts a backend implementing the TestService on a local port. +// It returns a channel for tests to get notified whenever an RPC is invoked on +// the backend. This allows tests to ensure that RPCs reach expected backends. +// Also returns the address of the backend. +func startBackend(t *testing.T, sopts ...grpc.ServerOption) (rpcCh chan struct{}, address string) { + t.Helper() + + rpcCh = make(chan struct{}, 1) + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + select { + case rpcCh <- struct{}{}: + default: + } + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(sopts...); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + return rpcCh, backend.Address +} + +// startManualResolverWithConfig registers and returns a manual resolver which +// pushes the RLS LB policy's service config on the channel. +func startManualResolverWithConfig(t *testing.T, rlsConfig *e2e.RLSConfig) *manual.Resolver { + t.Helper() + + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + + sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + r := manual.NewBuilderWithScheme("rls-e2e") + r.InitialState(resolver.State{ServiceConfig: sc}) + t.Cleanup(r.Close) + return r +} + +// makeTestRPCAndExpectItToReachBackend is a test helper function which makes +// the EmptyCall RPC on the given ClientConn and verifies that it reaches a +// backend. The latter is accomplished by listening on the provided channel +// which gets pushed to whenever the backend in question gets an RPC. +func makeTestRPCAndExpectItToReachBackend(ctx context.Context, t *testing.T, cc *grpc.ClientConn, ch chan struct{}) { + t.Helper() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall() failed with error: %v", err) + } + select { + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for backend to receive RPC") + case <-ch: + } +} + +// makeTestRPCAndVerifyError is a test helper function which makes the EmptyCall +// RPC on the given ClientConn and verifies that the RPC fails with the given +// status code and error. +func makeTestRPCAndVerifyError(ctx context.Context, t *testing.T, cc *grpc.ClientConn, wantCode codes.Code, wantErr error) { + t.Helper() + + client := testgrpc.NewTestServiceClient(cc) + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if err == nil { + t.Fatal("TestService/EmptyCall() succeeded when expected to fail") + } + if code := status.Code(err); code != wantCode { + t.Fatalf("TestService/EmptyCall() returned code: %v, want: %v", code, wantCode) + } + if wantErr != nil && !strings.Contains(err.Error(), wantErr.Error()) { + t.Fatalf("TestService/EmptyCall() returned err: %v, want: %v", err, wantErr) + } +} + +// verifyRLSRequest is a test helper which listens on a channel to see if an RLS +// request was received by the fake RLS server. Based on whether the test +// expects a request to be sent out or not, it uses a different timeout. +func verifyRLSRequest(t *testing.T, ch chan struct{}, wantRequest bool) { + t.Helper() + + if wantRequest { + select { + case <-time.After(defaultTestTimeout): + t.Fatalf("Timeout when waiting for an RLS request to be sent out") + case <-ch: + } + } else { + select { + case <-time.After(defaultTestShortTimeout): + case <-ch: + t.Fatalf("RLS request sent out when not expecting one") + } + } +} diff --git a/balancer/rls/internal/picker.go b/balancer/rls/internal/picker.go deleted file mode 100644 index 37e58759e256..000000000000 --- a/balancer/rls/internal/picker.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "errors" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/rls/internal/cache" - "google.golang.org/grpc/balancer/rls/internal/keys" - "google.golang.org/grpc/metadata" -) - -var errRLSThrottled = errors.New("RLS call throttled at client side") - -// RLS rlsPicker selects the subConn to be used for a particular RPC. It does -// not manage subConns directly and usually deletegates to pickers provided by -// child policies. -// -// The RLS LB policy creates a new rlsPicker object whenever its ServiceConfig -// is updated and provides a bunch of hooks for the rlsPicker to get the latest -// state that it can used to make its decision. -type rlsPicker struct { - // The keyBuilder map used to generate RLS keys for the RPC. This is built - // by the LB policy based on the received ServiceConfig. - kbm keys.BuilderMap - // Endpoint from the user's original dial target. Used to set the `host_key` - // field in `extra_keys`. - origEndpoint string - - // The following hooks are setup by the LB policy to enable the rlsPicker to - // access state stored in the policy. This approach has the following - // advantages: - // 1. The rlsPicker is loosely coupled with the LB policy in the sense that - // updates happening on the LB policy like the receipt of an RLS - // response, or an update to the default rlsPicker etc are not explicitly - // pushed to the rlsPicker, but are readily available to the rlsPicker - // when it invokes these hooks. And the LB policy takes care of - // synchronizing access to these shared state. - // 2. It makes unit testing the rlsPicker easy since any number of these - // hooks could be overridden. - - // readCache is used to read from the data cache and the pending request - // map in an atomic fashion. The first return parameter is the entry in the - // data cache, and the second indicates whether an entry for the same key - // is present in the pending cache. - readCache func(cache.Key) (*cache.Entry, bool) - // shouldThrottle decides if the current RPC should be throttled at the - // client side. It uses an adaptive throttling algorithm. - shouldThrottle func() bool - // startRLS kicks off an RLS request in the background for the provided RPC - // path and keyMap. An entry in the pending request map is created before - // sending out the request and an entry in the data cache is created or - // updated upon receipt of a response. See implementation in the LB policy - // for details. - startRLS func(string, keys.KeyMap) - // defaultPick enables the rlsPicker to delegate the pick decision to the - // rlsPicker returned by the child LB policy pointing to the default target - // specified in the service config. - defaultPick func(balancer.PickInfo) (balancer.PickResult, error) -} - -// Pick makes the routing decision for every outbound RPC. -func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - // Build the request's keys using the key builders from LB config. - md, _ := metadata.FromOutgoingContext(info.Ctx) - km := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) - - // We use the LB policy hook to read the data cache and the pending request - // map (whether or not an entry exists) for the RPC path and the generated - // RLS keys. We will end up kicking off an RLS request only if there is no - // pending request for the current RPC path and keys, and either we didn't - // find an entry in the data cache or the entry was stale and it wasn't in - // backoff. - startRequest := false - now := time.Now() - entry, pending := p.readCache(cache.Key{Path: info.FullMethodName, KeyMap: km.Str}) - if entry == nil { - startRequest = true - } else { - entry.Mu.Lock() - defer entry.Mu.Unlock() - if entry.StaleTime.Before(now) && entry.BackoffTime.Before(now) { - // This is the proactive cache refresh. - startRequest = true - } - } - - if startRequest && !pending { - if p.shouldThrottle() { - // The entry doesn't exist or has expired and the new RLS request - // has been throttled. Treat it as an error and delegate to default - // pick, if one exists, or fail the pick. - if entry == nil || entry.ExpiryTime.Before(now) { - if p.defaultPick != nil { - return p.defaultPick(info) - } - return balancer.PickResult{}, errRLSThrottled - } - // The proactive refresh has been throttled. Nothing to worry, just - // keep using the existing entry. - } else { - p.startRLS(info.FullMethodName, km) - } - } - - if entry != nil { - if entry.ExpiryTime.After(now) { - // This is the jolly good case where we have found a valid entry in - // the data cache. We delegate to the LB policy associated with - // this cache entry. - return entry.ChildPicker.Pick(info) - } else if entry.BackoffTime.After(now) { - // The entry has expired, but is in backoff. We delegate to the - // default pick, if one exists, or return the error from the last - // failed RLS request for this entry. - if p.defaultPick != nil { - return p.defaultPick(info) - } - return balancer.PickResult{}, entry.CallStatus - } - } - - // We get here only in the following cases: - // * No data cache entry or expired entry, RLS request sent out - // * No valid data cache entry and Pending cache entry exists - // We need to queue to pick which will be handled once the RLS response is - // received. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} diff --git a/balancer/rls/internal/picker_test.go b/balancer/rls/internal/picker_test.go deleted file mode 100644 index f115be98a399..000000000000 --- a/balancer/rls/internal/picker_test.go +++ /dev/null @@ -1,615 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "context" - "errors" - "fmt" - "math" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/rls/internal/cache" - "google.golang.org/grpc/balancer/rls/internal/keys" - "google.golang.org/grpc/internal/grpcrand" - rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/metadata" -) - -const defaultTestMaxAge = 5 * time.Second - -// initKeyBuilderMap initializes a keyBuilderMap of the form: -// { -// "gFoo": "k1=n1", -// "gBar/method1": "k2=n21,n22" -// "gFoobar": "k3=n3", -// } -func initKeyBuilderMap() (keys.BuilderMap, error) { - kb1 := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gFoo"}}, - Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1"}}}, - } - kb2 := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gBar", Method: "method1"}}, - Headers: []*rlspb.NameMatcher{{Key: "k2", Names: []string{"n21", "n22"}}}, - } - kb3 := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gFoobar"}}, - Headers: []*rlspb.NameMatcher{{Key: "k3", Names: []string{"n3"}}}, - } - return keys.MakeBuilderMap(&rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{kb1, kb2, kb3}, - }) -} - -// fakeSubConn embeds the balancer.SubConn interface and contains an id which -// helps verify that the expected subConn was returned by the rlsPicker. -type fakeSubConn struct { - balancer.SubConn - id int -} - -// fakePicker sends a PickResult with a fakeSubConn with the configured id. -type fakePicker struct { - id int -} - -func (p *fakePicker) Pick(_ balancer.PickInfo) (balancer.PickResult, error) { - return balancer.PickResult{SubConn: &fakeSubConn{id: p.id}}, nil -} - -// newFakePicker returns a fakePicker configured with a random ID. The subConns -// returned by this picker are of type fakefakeSubConn, and contain the same -// random ID, which tests can use to verify. -func newFakePicker() *fakePicker { - return &fakePicker{id: grpcrand.Intn(math.MaxInt32)} -} - -func verifySubConn(sc balancer.SubConn, wantID int) error { - fsc, ok := sc.(*fakeSubConn) - if !ok { - return fmt.Errorf("Pick() returned a SubConn of type %T, want %T", sc, &fakeSubConn{}) - } - if fsc.id != wantID { - return fmt.Errorf("Pick() returned SubConn %d, want %d", fsc.id, wantID) - } - return nil -} - -// TestPickKeyBuilder verifies the different possible scenarios for forming an -// RLS key for an incoming RPC. -func TestPickKeyBuilder(t *testing.T) { - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - - tests := []struct { - desc string - rpcPath string - md metadata.MD - wantKey cache.Key - }{ - { - desc: "non existent service in keyBuilder map", - rpcPath: "/gNonExistentService/method", - md: metadata.New(map[string]string{"n1": "v1", "n3": "v3"}), - wantKey: cache.Key{Path: "/gNonExistentService/method", KeyMap: ""}, - }, - { - desc: "no metadata in incoming context", - rpcPath: "/gFoo/method", - md: metadata.MD{}, - wantKey: cache.Key{Path: "/gFoo/method", KeyMap: ""}, - }, - { - desc: "keyBuilderMatch", - rpcPath: "/gFoo/method", - md: metadata.New(map[string]string{"n1": "v1", "n3": "v3"}), - wantKey: cache.Key{Path: "/gFoo/method", KeyMap: "k1=v1"}, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - randID := grpcrand.Intn(math.MaxInt32) - p := rlsPicker{ - kbm: kbm, - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, test.wantKey) { - t.Fatalf("rlsPicker using cacheKey %v, want %v", key, test.wantKey) - } - - now := time.Now() - return &cache.Entry{ - ExpiryTime: now.Add(defaultTestMaxAge), - StaleTime: now.Add(defaultTestMaxAge), - // Cache entry is configured with a child policy whose - // rlsPicker always returns an empty PickResult and nil - // error. - ChildPicker: &fakePicker{id: randID}, - }, false - }, - // The other hooks are not set here because they are not expected to be - // invoked for these cases and if they get invoked, they will panic. - } - - gotResult, err := p.Pick(balancer.PickInfo{ - FullMethodName: test.rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), test.md), - }) - if err != nil { - t.Fatalf("Pick() failed with error: %v", err) - } - sc, ok := gotResult.SubConn.(*fakeSubConn) - if !ok { - t.Fatalf("Pick() returned a SubConn of type %T, want %T", gotResult.SubConn, &fakeSubConn{}) - } - if sc.id != randID { - t.Fatalf("Pick() returned SubConn %d, want %d", sc.id, randID) - } - }) - } -} - -// TestPick_DataCacheMiss_PendingCacheMiss verifies different Pick scenarios -// where the entry is neither found in the data cache nor in the pending cache. -func TestPick_DataCacheMiss_PendingCacheMiss(t *testing.T) { - const ( - rpcPath = "/gFoo/method" - wantKeyMapStr = "k1=v1" - ) - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - md := metadata.New(map[string]string{"n1": "v1", "n3": "v3"}) - wantKey := cache.Key{Path: rpcPath, KeyMap: wantKeyMapStr} - - tests := []struct { - desc string - // Whether or not a default target is configured. - defaultPickExists bool - // Whether or not the RLS request should be throttled. - throttle bool - // Whether or not the test is expected to make a new RLS request. - wantRLSRequest bool - // Expected error returned by the rlsPicker under test. - wantErr error - }{ - { - desc: "rls request throttled with default pick", - defaultPickExists: true, - throttle: true, - }, - { - desc: "rls request throttled without default pick", - throttle: true, - wantErr: errRLSThrottled, - }, - { - desc: "rls request not throttled", - wantRLSRequest: true, - wantErr: balancer.ErrNoSubConnAvailable, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - rlsCh := testutils.NewChannel() - defaultPicker := newFakePicker() - - p := rlsPicker{ - kbm: kbm, - // Cache lookup fails, no pending entry. - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, wantKey) { - t.Fatalf("cache lookup using cacheKey %v, want %v", key, wantKey) - } - return nil, false - }, - shouldThrottle: func() bool { return test.throttle }, - startRLS: func(path string, km keys.KeyMap) { - if !test.wantRLSRequest { - rlsCh.Send(errors.New("RLS request attempted when none was expected")) - return - } - if path != rpcPath { - rlsCh.Send(fmt.Errorf("RLS request initiated for rpcPath %s, want %s", path, rpcPath)) - return - } - if km.Str != wantKeyMapStr { - rlsCh.Send(fmt.Errorf("RLS request initiated with keys %v, want %v", km.Str, wantKeyMapStr)) - return - } - rlsCh.Send(nil) - }, - } - if test.defaultPickExists { - p.defaultPick = defaultPicker.Pick - } - - gotResult, err := p.Pick(balancer.PickInfo{ - FullMethodName: rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), md), - }) - if err != test.wantErr { - t.Fatalf("Pick() returned error {%v}, want {%v}", err, test.wantErr) - } - // If the test specified that a new RLS request should be made, - // verify it. - if test.wantRLSRequest { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if rlsErr, err := rlsCh.Receive(ctx); err != nil || rlsErr != nil { - t.Fatalf("startRLS() = %v, error receiving from channel: %v", rlsErr, err) - } - } - if test.wantErr != nil { - return - } - - // We get here only for cases where we expect the pick to be - // delegated to the default picker. - if err := verifySubConn(gotResult.SubConn, defaultPicker.id); err != nil { - t.Fatal(err) - } - }) - } -} - -// TestPick_DataCacheMiss_PendingCacheMiss verifies different Pick scenarios -// where the entry is not found in the data cache, but there is a entry in the -// pending cache. For all of these scenarios, no new RLS request will be sent. -func TestPick_DataCacheMiss_PendingCacheHit(t *testing.T) { - const ( - rpcPath = "/gFoo/method" - wantKeyMapStr = "k1=v1" - ) - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - md := metadata.New(map[string]string{"n1": "v1", "n3": "v3"}) - wantKey := cache.Key{Path: rpcPath, KeyMap: wantKeyMapStr} - - tests := []struct { - desc string - defaultPickExists bool - }{ - { - desc: "default pick exists", - defaultPickExists: true, - }, - { - desc: "default pick does not exists", - }, - } - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - rlsCh := testutils.NewChannel() - p := rlsPicker{ - kbm: kbm, - // Cache lookup fails, pending entry exists. - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, wantKey) { - t.Fatalf("cache lookup using cacheKey %v, want %v", key, wantKey) - } - return nil, true - }, - // Never throttle. We do not expect an RLS request to be sent out anyways. - shouldThrottle: func() bool { return false }, - startRLS: func(_ string, _ keys.KeyMap) { - rlsCh.Send(nil) - }, - } - if test.defaultPickExists { - p.defaultPick = func(info balancer.PickInfo) (balancer.PickResult, error) { - // We do not expect the default picker to be invoked at all. - // So, if we get here, the test will fail, because it - // expects the pick to be queued. - return balancer.PickResult{}, nil - } - } - - if _, err := p.Pick(balancer.PickInfo{ - FullMethodName: rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), md), - }); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("Pick() returned error {%v}, want {%v}", err, balancer.ErrNoSubConnAvailable) - } - - // Make sure that no RLS request was sent out. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := rlsCh.Receive(ctx); err != context.DeadlineExceeded { - t.Fatalf("RLS request sent out when pending entry exists") - } - }) - } -} - -// TestPick_DataCacheHit_PendingCacheMiss verifies different Pick scenarios -// where the entry is found in the data cache, and there is no entry in the -// pending cache. This includes cases where the entry in the data cache is -// stale, expired or in backoff. -func TestPick_DataCacheHit_PendingCacheMiss(t *testing.T) { - const ( - rpcPath = "/gFoo/method" - wantKeyMapStr = "k1=v1" - ) - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - md := metadata.New(map[string]string{"n1": "v1", "n3": "v3"}) - wantKey := cache.Key{Path: rpcPath, KeyMap: wantKeyMapStr} - rlsLastErr := errors.New("last RLS request failed") - - tests := []struct { - desc string - // The cache entry, as returned by the overridden readCache hook. - cacheEntry *cache.Entry - // Whether or not a default target is configured. - defaultPickExists bool - // Whether or not the RLS request should be throttled. - throttle bool - // Whether or not the test is expected to make a new RLS request. - wantRLSRequest bool - // Whether or not the rlsPicker should delegate to the child picker. - wantChildPick bool - // Whether or not the rlsPicker should delegate to the default picker. - wantDefaultPick bool - // Expected error returned by the rlsPicker under test. - wantErr error - }{ - { - desc: "valid entry", - cacheEntry: &cache.Entry{ - ExpiryTime: time.Now().Add(defaultTestMaxAge), - StaleTime: time.Now().Add(defaultTestMaxAge), - }, - wantChildPick: true, - }, - { - desc: "entryStale_requestThrottled", - cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, - throttle: true, - wantChildPick: true, - }, - { - desc: "entryStale_requestNotThrottled", - cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, - wantRLSRequest: true, - wantChildPick: true, - }, - { - desc: "entryExpired_requestThrottled_defaultPickExists", - cacheEntry: &cache.Entry{}, - throttle: true, - defaultPickExists: true, - wantDefaultPick: true, - }, - { - desc: "entryExpired_requestThrottled_defaultPickNotExists", - cacheEntry: &cache.Entry{}, - throttle: true, - wantErr: errRLSThrottled, - }, - { - desc: "entryExpired_requestNotThrottled", - cacheEntry: &cache.Entry{}, - wantRLSRequest: true, - wantErr: balancer.ErrNoSubConnAvailable, - }, - { - desc: "entryExpired_backoffNotExpired_defaultPickExists", - cacheEntry: &cache.Entry{ - BackoffTime: time.Now().Add(defaultTestMaxAge), - CallStatus: rlsLastErr, - }, - defaultPickExists: true, - }, - { - desc: "entryExpired_backoffNotExpired_defaultPickNotExists", - cacheEntry: &cache.Entry{ - BackoffTime: time.Now().Add(defaultTestMaxAge), - CallStatus: rlsLastErr, - }, - wantErr: rlsLastErr, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - rlsCh := testutils.NewChannel() - childPicker := newFakePicker() - defaultPicker := newFakePicker() - - p := rlsPicker{ - kbm: kbm, - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, wantKey) { - t.Fatalf("cache lookup using cacheKey %v, want %v", key, wantKey) - } - test.cacheEntry.ChildPicker = childPicker - return test.cacheEntry, false - }, - shouldThrottle: func() bool { return test.throttle }, - startRLS: func(path string, km keys.KeyMap) { - if !test.wantRLSRequest { - rlsCh.Send(errors.New("RLS request attempted when none was expected")) - return - } - if path != rpcPath { - rlsCh.Send(fmt.Errorf("RLS request initiated for rpcPath %s, want %s", path, rpcPath)) - return - } - if km.Str != wantKeyMapStr { - rlsCh.Send(fmt.Errorf("RLS request initiated with keys %v, want %v", km.Str, wantKeyMapStr)) - return - } - rlsCh.Send(nil) - }, - } - if test.defaultPickExists { - p.defaultPick = defaultPicker.Pick - } - - gotResult, err := p.Pick(balancer.PickInfo{ - FullMethodName: rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), md), - }) - if err != test.wantErr { - t.Fatalf("Pick() returned error {%v}, want {%v}", err, test.wantErr) - } - // If the test specified that a new RLS request should be made, - // verify it. - if test.wantRLSRequest { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if rlsErr, err := rlsCh.Receive(ctx); err != nil || rlsErr != nil { - t.Fatalf("startRLS() = %v, error receiving from channel: %v", rlsErr, err) - } - } - if test.wantErr != nil { - return - } - - // We get here only for cases where we expect the pick to be - // delegated to the child picker or the default picker. - if test.wantChildPick { - if err := verifySubConn(gotResult.SubConn, childPicker.id); err != nil { - t.Fatal(err) - } - - } - if test.wantDefaultPick { - if err := verifySubConn(gotResult.SubConn, defaultPicker.id); err != nil { - t.Fatal(err) - } - } - }) - } -} - -// TestPick_DataCacheHit_PendingCacheHit verifies different Pick scenarios where -// the entry is found both in the data cache and in the pending cache. This -// mostly verifies cases where the entry is stale, but there is already a -// pending RLS request, so no new request should be sent out. -func TestPick_DataCacheHit_PendingCacheHit(t *testing.T) { - const ( - rpcPath = "/gFoo/method" - wantKeyMapStr = "k1=v1" - ) - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - md := metadata.New(map[string]string{"n1": "v1", "n3": "v3"}) - wantKey := cache.Key{Path: rpcPath, KeyMap: wantKeyMapStr} - - tests := []struct { - desc string - // The cache entry, as returned by the overridden readCache hook. - cacheEntry *cache.Entry - // Whether or not a default target is configured. - defaultPickExists bool - // Expected error returned by the rlsPicker under test. - wantErr error - }{ - { - desc: "stale entry", - cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, - }, - { - desc: "stale entry with default picker", - cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, - defaultPickExists: true, - }, - { - desc: "entryExpired_defaultPickExists", - cacheEntry: &cache.Entry{}, - defaultPickExists: true, - wantErr: balancer.ErrNoSubConnAvailable, - }, - { - desc: "entryExpired_defaultPickNotExists", - cacheEntry: &cache.Entry{}, - wantErr: balancer.ErrNoSubConnAvailable, - }, - } - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - rlsCh := testutils.NewChannel() - childPicker := newFakePicker() - - p := rlsPicker{ - kbm: kbm, - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, wantKey) { - t.Fatalf("cache lookup using cacheKey %v, want %v", key, wantKey) - } - test.cacheEntry.ChildPicker = childPicker - return test.cacheEntry, true - }, - // Never throttle. We do not expect an RLS request to be sent out anyways. - shouldThrottle: func() bool { return false }, - startRLS: func(path string, km keys.KeyMap) { - rlsCh.Send(nil) - }, - } - if test.defaultPickExists { - p.defaultPick = func(info balancer.PickInfo) (balancer.PickResult, error) { - // We do not expect the default picker to be invoked at all. - // So, if we get here, we return an error. - return balancer.PickResult{}, errors.New("default picker invoked when expecting a child pick") - } - } - - gotResult, err := p.Pick(balancer.PickInfo{ - FullMethodName: rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), md), - }) - if err != test.wantErr { - t.Fatalf("Pick() returned error {%v}, want {%v}", err, test.wantErr) - } - // Make sure that no RLS request was sent out. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := rlsCh.Receive(ctx); err != context.DeadlineExceeded { - t.Fatalf("RLS request sent out when pending entry exists") - } - if test.wantErr != nil { - return - } - - // We get here only for cases where we expect the pick to be - // delegated to the child picker. - if err := verifySubConn(gotResult.SubConn, childPicker.id); err != nil { - t.Fatal(err) - } - }) - } -} diff --git a/balancer/rls/internal/test/e2e/e2e.go b/balancer/rls/internal/test/e2e/e2e.go new file mode 100644 index 000000000000..7b8a8bbde138 --- /dev/null +++ b/balancer/rls/internal/test/e2e/e2e.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package e2e contains utilities for end-to-end RouteLookupService tests. +package e2e diff --git a/balancer/rls/internal/test/e2e/rls_child_policy.go b/balancer/rls/internal/test/e2e/rls_child_policy.go new file mode 100644 index 000000000000..5a6e3e69175a --- /dev/null +++ b/balancer/rls/internal/test/e2e/rls_child_policy.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +const ( + // RLSChildPolicyTargetNameField is a top-level field name to add to the child + // policy's config, whose value is set to the target for the child policy. + RLSChildPolicyTargetNameField = "Backend" + // RLSChildPolicyBadTarget is a value which is considered a bad target by the + // child policy. This is useful to test bad child policy configuration. + RLSChildPolicyBadTarget = "bad-target" +) + +// ErrParseConfigBadTarget is the error returned from ParseConfig when the +// backend field is set to RLSChildPolicyBadTarget. +var ErrParseConfigBadTarget = errors.New("backend field set to RLSChildPolicyBadTarget") + +// BalancerFuncs is a set of callbacks which get invoked when the corresponding +// method on the child policy is invoked. +type BalancerFuncs struct { + UpdateClientConnState func(cfg *RLSChildPolicyConfig) error + Close func() +} + +// RegisterRLSChildPolicy registers a balancer builder with the given name, to +// be used as a child policy for the RLS LB policy. +// +// The child policy uses a pickfirst balancer under the hood to send all traffic +// to the single backend specified by the `RLSChildPolicyTargetNameField` field +// in its configuration which looks like: {"Backend": "Backend-address"}. +func RegisterRLSChildPolicy(name string, bf *BalancerFuncs) { + balancer.Register(bb{name: name, bf: bf}) +} + +type bb struct { + name string + bf *BalancerFuncs +} + +func (bb bb) Name() string { return bb.name } + +func (bb bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + pf := balancer.Get(grpc.PickFirstBalancerName) + b := &bal{ + Balancer: pf.Build(cc, opts), + bf: bb.bf, + done: grpcsync.NewEvent(), + } + go b.run() + return b +} + +func (bb bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &RLSChildPolicyConfig{} + if err := json.Unmarshal(c, cfg); err != nil { + return nil, err + } + if cfg.Backend == RLSChildPolicyBadTarget { + return nil, ErrParseConfigBadTarget + } + return cfg, nil +} + +type bal struct { + balancer.Balancer + bf *BalancerFuncs + done *grpcsync.Event +} + +// RLSChildPolicyConfig is the LB config for the test child policy. +type RLSChildPolicyConfig struct { + serviceconfig.LoadBalancingConfig + Backend string // The target for which this child policy was created. + Random string // A random field to test child policy config changes. +} + +func (b *bal) UpdateClientConnState(c balancer.ClientConnState) error { + cfg, ok := c.BalancerConfig.(*RLSChildPolicyConfig) + if !ok { + return fmt.Errorf("received balancer config of type %T, want %T", c.BalancerConfig, &RLSChildPolicyConfig{}) + } + if b.bf != nil && b.bf.UpdateClientConnState != nil { + b.bf.UpdateClientConnState(cfg) + } + return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{{Addr: cfg.Backend}}}, + }) +} + +func (b *bal) Close() { + b.Balancer.Close() + if b.bf != nil && b.bf.Close != nil { + b.bf.Close() + } + b.done.Fire() +} + +// run is a dummy goroutine to make sure that child policies are closed at the +// end of tests. If they are not closed, these goroutines will be picked up by +// the leakcheker and tests will fail. +func (b *bal) run() { + <-b.done.Done() +} diff --git a/balancer/rls/internal/test/e2e/rls_fakeserver.go b/balancer/rls/internal/test/e2e/rls_fakeserver.go new file mode 100644 index 000000000000..521985412822 --- /dev/null +++ b/balancer/rls/internal/test/e2e/rls_fakeserver.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "context" + "net" + "sync" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/status" +) + +// RouteLookupResponse wraps an RLS response and the associated error to be sent +// to a client when the RouteLookup RPC is invoked. +type RouteLookupResponse struct { + Resp *rlspb.RouteLookupResponse + Err error +} + +// FakeRouteLookupServer is a fake implementation of the RouteLookupService. +// +// It is safe for concurrent use. +type FakeRouteLookupServer struct { + rlsgrpc.UnimplementedRouteLookupServiceServer + Address string + + mu sync.Mutex + respCb func(context.Context, *rlspb.RouteLookupRequest) *RouteLookupResponse + reqCb func(*rlspb.RouteLookupRequest) +} + +// StartFakeRouteLookupServer starts a fake RLS server listening for requests on +// lis. If lis is nil, it creates a new listener on a random local port. The +// returned cancel function should be invoked by the caller upon completion of +// the test. +func StartFakeRouteLookupServer(t *testing.T, lis net.Listener, opts ...grpc.ServerOption) (*FakeRouteLookupServer, func()) { + t.Helper() + + if lis == nil { + var err error + lis, err = testutils.LocalTCPListener() + if err != nil { + t.Fatalf("net.Listen() failed: %v", err) + } + } + + s := &FakeRouteLookupServer{Address: lis.Addr().String()} + server := grpc.NewServer(opts...) + rlsgrpc.RegisterRouteLookupServiceServer(server, s) + go server.Serve(lis) + return s, func() { server.Stop() } +} + +// RouteLookup implements the RouteLookupService. +func (s *FakeRouteLookupServer) RouteLookup(ctx context.Context, req *rlspb.RouteLookupRequest) (*rlspb.RouteLookupResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.reqCb != nil { + s.reqCb(req) + } + if err := ctx.Err(); err != nil { + return nil, status.Error(codes.DeadlineExceeded, err.Error()) + } + if s.respCb == nil { + return &rlspb.RouteLookupResponse{}, nil + } + resp := s.respCb(ctx, req) + return resp.Resp, resp.Err +} + +// SetResponseCallback sets a callback to be invoked on every RLS request. If +// this callback is set, the response returned by the fake server depends on the +// value returned by the callback. If this callback is not set, the fake server +// responds with an empty response. +func (s *FakeRouteLookupServer) SetResponseCallback(f func(context.Context, *rlspb.RouteLookupRequest) *RouteLookupResponse) { + s.mu.Lock() + s.respCb = f + s.mu.Unlock() +} + +// SetRequestCallback sets a callback to be invoked on every RLS request. The +// callback is given the incoming request, and tests can use this to verify that +// the request matches its expectations. +func (s *FakeRouteLookupServer) SetRequestCallback(f func(*rlspb.RouteLookupRequest)) { + s.mu.Lock() + s.reqCb = f + s.mu.Unlock() +} diff --git a/balancer/rls/internal/test/e2e/rls_lb_config.go b/balancer/rls/internal/test/e2e/rls_lb_config.go new file mode 100644 index 000000000000..2aec642c77e0 --- /dev/null +++ b/balancer/rls/internal/test/e2e/rls_lb_config.go @@ -0,0 +1,100 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" + + "google.golang.org/protobuf/encoding/protojson" +) + +// RLSConfig is a utility type to build service config for the RLS LB policy. +type RLSConfig struct { + RouteLookupConfig *rlspb.RouteLookupConfig + ChildPolicy *internalserviceconfig.BalancerConfig + ChildPolicyConfigTargetFieldName string +} + +// ServiceConfigJSON generates service config with a load balancing config +// corresponding to the RLS LB policy. +func (c *RLSConfig) ServiceConfigJSON() (string, error) { + m := protojson.MarshalOptions{ + Multiline: true, + Indent: " ", + UseProtoNames: true, + } + routeLookupCfg, err := m.Marshal(c.RouteLookupConfig) + if err != nil { + return "", err + } + childPolicy, err := c.ChildPolicy.MarshalJSON() + if err != nil { + return "", err + } + + return fmt.Sprintf(` +{ + "loadBalancingConfig": [ + { + "rls_experimental": { + "routeLookupConfig": %s, + "childPolicy": %s, + "childPolicyConfigTargetFieldName": %q + } + } + ] +}`, string(routeLookupCfg), string(childPolicy), c.ChildPolicyConfigTargetFieldName), nil +} + +// LoadBalancingConfig generates load balancing config which can used as part of +// a ClientConnState update to the RLS LB policy. +func (c *RLSConfig) LoadBalancingConfig() (serviceconfig.LoadBalancingConfig, error) { + m := protojson.MarshalOptions{ + Multiline: true, + Indent: " ", + UseProtoNames: true, + } + routeLookupCfg, err := m.Marshal(c.RouteLookupConfig) + if err != nil { + return nil, err + } + childPolicy, err := c.ChildPolicy.MarshalJSON() + if err != nil { + return nil, err + } + lbConfigJSON := fmt.Sprintf(` +{ + "routeLookupConfig": %s, + "childPolicy": %s, + "childPolicyConfigTargetFieldName": %q +}`, string(routeLookupCfg), string(childPolicy), c.ChildPolicyConfigTargetFieldName) + + builder := balancer.Get("rls_experimental") + if builder == nil { + return nil, errors.New("balancer builder not found for RLS LB policy") + } + parser := builder.(balancer.ConfigParser) + return parser.ParseConfig([]byte(lbConfigJSON)) +} diff --git a/internal/stubserver/stubserver.go b/internal/stubserver/stubserver.go index c97010dfe9a6..f3ed23aa32a4 100644 --- a/internal/stubserver/stubserver.go +++ b/internal/stubserver/stubserver.go @@ -80,6 +80,14 @@ func (ss *StubServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallSer // Start starts the server and creates a client connected to it. func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) error { + if err := ss.StartServer(sopts...); err != nil { + return err + } + return ss.StartClient(dopts...) +} + +// StartServer only starts the server. It does not create a client to it. +func (ss *StubServer) StartServer(sopts ...grpc.ServerOption) error { if ss.Network == "" { ss.Network = "tcp" } @@ -102,7 +110,12 @@ func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) go s.Serve(lis) ss.cleanups = append(ss.cleanups, s.Stop) ss.S = s + return nil +} +// StartClient creates a client connected to this service that the test may use. +// The newly created client will be available in the Client field of StubServer. +func (ss *StubServer) StartClient(dopts ...grpc.DialOption) error { opts := append([]grpc.DialOption{grpc.WithInsecure()}, dopts...) if ss.R != nil { ss.Target = ss.R.Scheme() + ":///" + ss.Address diff --git a/internal/testutils/restartable_listener.go b/internal/testutils/restartable_listener.go index 1f5019391911..efe4019a08c2 100644 --- a/internal/testutils/restartable_listener.go +++ b/internal/testutils/restartable_listener.go @@ -83,12 +83,11 @@ func (l *RestartableListener) Addr() net.Addr { func (l *RestartableListener) Stop() { l.mu.Lock() l.stopped = true - tmp := l.conns - l.conns = nil - l.mu.Unlock() - for _, conn := range tmp { + for _, conn := range l.conns { conn.Close() } + l.conns = nil + l.mu.Unlock() } // Restart gets a previously stopped listener to start accepting connections. From 029b8227351f5b89c63deab4eeeac8b5cbf17767 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 15 Dec 2021 17:58:03 -0500 Subject: [PATCH 370/998] xds: Add RLS Cluster Specifier Plugin (#5004) * xds: Add RLS Cluster Specifier Plugin --- balancer/rls/rls.go | 25 +++ xds/internal/clusterspecifier/rls/rls.go | 104 +++++++++++ xds/internal/clusterspecifier/rls/rls_test.go | 168 ++++++++++++++++++ 3 files changed, 297 insertions(+) create mode 100644 balancer/rls/rls.go create mode 100644 xds/internal/clusterspecifier/rls/rls.go create mode 100644 xds/internal/clusterspecifier/rls/rls_test.go diff --git a/balancer/rls/rls.go b/balancer/rls/rls.go new file mode 100644 index 000000000000..473c3c1a8133 --- /dev/null +++ b/balancer/rls/rls.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls imports to init the rls lb policy for testing purposes. +package rls + +import ( + // Blank import to init the rls lb policy for external use. + _ "google.golang.org/grpc/balancer/rls/internal" +) diff --git a/xds/internal/clusterspecifier/rls/rls.go b/xds/internal/clusterspecifier/rls/rls.go new file mode 100644 index 000000000000..98b0d566395a --- /dev/null +++ b/xds/internal/clusterspecifier/rls/rls.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls implements the RLS cluster specifier plugin. +package rls + +import ( + "encoding/json" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/anypb" + + // Blank import to init the RLS LB policy. + _ "google.golang.org/grpc/balancer/rls" +) + +const rlsBalancerName = "rls_experimental" + +func init() { + if envconfig.XDSRLS { + clusterspecifier.Register(rls{}) + } +} + +type rls struct{} + +func (rls) TypeURLs() []string { + return []string{"type.googleapis.com/grpc.lookup.v1.RouteLookupClusterSpecifier"} +} + +// lbConfigJSON is the RLS LB Policies configuration in JSON format. +// RouteLookupConfig will be a raw JSON string from the passed in proto +// configuration, and the other fields will be hardcoded. +type lbConfigJSON struct { + RouteLookupConfig json.RawMessage `json:"routeLookupConfig"` + ChildPolicy []map[string]json.RawMessage `json:"childPolicy"` + ChildPolicyConfigTargetFieldName string `json:"childPolicyConfigTargetFieldName"` +} + +func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.BalancerConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("rls_csp: nil configuration message provided") + } + any, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rls_csp: error parsing config %v: unknown type %T", cfg, cfg) + } + rlcs := new(grpc_lookup_v1.RouteLookupClusterSpecifier) + + if err := ptypes.UnmarshalAny(any, rlcs); err != nil { + return nil, fmt.Errorf("rls_csp: error parsing config %v: %v", cfg, err) + } + rlcJSON, err := protojson.Marshal(rlcs.GetRouteLookupConfig()) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling route lookup config: %v: %v", rlcs.GetRouteLookupConfig(), err) + } + lbCfgJSON := &lbConfigJSON{ + RouteLookupConfig: rlcJSON, // "JSON form of RouteLookupClusterSpecifier.config" - RLS in xDS Design Doc + ChildPolicy: []map[string]json.RawMessage{ + { + "cds_experimental": json.RawMessage("{}"), + }, + }, + ChildPolicyConfigTargetFieldName: "cluster", + } + + rawJSON, err := json.Marshal(lbCfgJSON) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling load balancing config %v: %v", lbCfgJSON, err) + } + + rlsBB := balancer.Get(rlsBalancerName) + if rlsBB == nil { + return nil, fmt.Errorf("RLS LB policy not registered") + } + _, err = rlsBB.(balancer.ConfigParser).ParseConfig(rawJSON) + if err != nil { + return nil, fmt.Errorf("rls_csp: validation error from rls lb policy parsing %v", err) + } + + return clusterspecifier.BalancerConfig{{rlsBalancerName: lbCfgJSON}}, nil +} diff --git a/xds/internal/clusterspecifier/rls/rls_test.go b/xds/internal/clusterspecifier/rls/rls_test.go new file mode 100644 index 000000000000..69bf165d8c91 --- /dev/null +++ b/xds/internal/clusterspecifier/rls/rls_test.go @@ -0,0 +1,168 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "encoding/json" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + _ "google.golang.org/grpc/balancer/rls" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/internal/testutils" + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/types/known/durationpb" +) + +func init() { + clusterspecifier.Register(rls{}) +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestParseClusterSpecifierConfig tests the parsing functionality of the RLS +// Cluster Specifier Plugin. +func (s) TestParseClusterSpecifierConfig(t *testing.T) { + tests := []struct { + name string + rlcs proto.Message + wantConfig clusterspecifier.BalancerConfig + wantErr bool + }{ + { + name: "invalid-rls-cluster-specifier", + rlcs: rlsClusterSpecifierConfigError, + wantErr: true, + }, + { + name: "valid-rls-cluster-specifier", + rlcs: rlsClusterSpecifierConfigWithoutTransformations, + wantConfig: configWithoutTransformationsWant, + }, + } + for _, test := range tests { + cs := clusterspecifier.Get("type.googleapis.com/grpc.lookup.v1.RouteLookupClusterSpecifier") + if cs == nil { + t.Fatal("Error getting cluster specifier") + } + lbCfg, err := cs.ParseClusterSpecifierConfig(test.rlcs) + + if (err != nil) != test.wantErr { + t.Fatalf("ParseClusterSpecifierConfig(%+v) returned err: %v, wantErr: %v", test.rlcs, err, test.wantErr) + } + if test.wantErr { // Successfully received an error. + return + } + // Marshal and then unmarshal into interface{} to get rid of + // nondeterministic protojson Marshaling. + lbCfgJSON, err := json.Marshal(lbCfg) + if err != nil { + t.Fatalf("json.Marshal(%+v) returned err %v", lbCfg, err) + } + var got interface{} + err = json.Unmarshal(lbCfgJSON, got) + if err != nil { + t.Fatalf("json.Unmarshal(%+v) returned err %v", lbCfgJSON, err) + } + wantCfgJSON, err := json.Marshal(test.wantConfig) + if err != nil { + t.Fatalf("json.Marshal(%+v) returned err %v", test.wantConfig, err) + } + var want interface{} + err = json.Unmarshal(wantCfgJSON, want) + if err != nil { + t.Fatalf("json.Unmarshal(%+v) returned err %v", lbCfgJSON, err) + } + if diff := cmp.Diff(want, got, cmpopts.EquateEmpty()); diff != "" { + t.Fatalf("ParseClusterSpecifierConfig(%+v) returned expected, diff (-want +got) %v", test.rlcs, diff) + } + } +} + +// This will error because the required match field is set in grpc key builder. +var rlsClusterSpecifierConfigError = testutils.MarshalAny(&grpc_lookup_v1.RouteLookupClusterSpecifier{ + RouteLookupConfig: &grpc_lookup_v1.RouteLookupConfig{ + GrpcKeybuilders: []*grpc_lookup_v1.GrpcKeyBuilder{ + { + Names: []*grpc_lookup_v1.GrpcKeyBuilder_Name{ + { + Service: "service", + Method: "method", + }, + }, + Headers: []*grpc_lookup_v1.NameMatcher{ + { + Key: "k1", + RequiredMatch: true, + Names: []string{"v1"}, + }, + }, + }, + }, + }, +}) + +// Corresponds to the rls unit test case in +// balancer/rls/internal/config_test.go. +var rlsClusterSpecifierConfigWithoutTransformations = testutils.MarshalAny(&grpc_lookup_v1.RouteLookupClusterSpecifier{ + RouteLookupConfig: &grpc_lookup_v1.RouteLookupConfig{ + GrpcKeybuilders: []*grpc_lookup_v1.GrpcKeyBuilder{ + { + Names: []*grpc_lookup_v1.GrpcKeyBuilder_Name{ + { + Service: "service", + Method: "method", + }, + }, + Headers: []*grpc_lookup_v1.NameMatcher{ + { + Key: "k1", + Names: []string{"v1"}, + }, + }, + }, + }, + LookupService: "target", + LookupServiceTimeout: &durationpb.Duration{Seconds: 100}, + MaxAge: &durationpb.Duration{Seconds: 60}, + StaleAge: &durationpb.Duration{Seconds: 50}, + CacheSizeBytes: 1000, + DefaultTarget: "passthrough:///default", + }, +}) + +var configWithoutTransformationsWant = clusterspecifier.BalancerConfig{{"rls_experimental": &lbConfigJSON{ + RouteLookupConfig: []byte(`{"grpcKeybuilders":[{"names":[{"service":"service","method":"method"}],"headers":[{"key":"k1","names":["v1"]}]}],"lookupService":"target","lookupServiceTimeout":"100s","maxAge":"60s","staleAge":"50s","cacheSizeBytes":"1000","defaultTarget":"passthrough:///default"}`), + ChildPolicy: []map[string]json.RawMessage{ + { + "cds_experimental": []byte(`{}`), + }, + }, + ChildPolicyConfigTargetFieldName: "cluster", +}}} From ce152f209be66f88c2d4d75be5829347733e5270 Mon Sep 17 00:00:00 2001 From: Mohan Li <67390330+mohanli-ml@users.noreply.github.com> Date: Wed, 15 Dec 2021 15:19:36 -0800 Subject: [PATCH 371/998] xds: Add xds dependency to the fallback test client (#5062) --- interop/grpclb_fallback/client_linux.go | 1 + 1 file changed, 1 insertion(+) diff --git a/interop/grpclb_fallback/client_linux.go b/interop/grpclb_fallback/client_linux.go index c9b25a894b30..b5a16bf22bf1 100644 --- a/interop/grpclb_fallback/client_linux.go +++ b/interop/grpclb_fallback/client_linux.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/credentials/google" + _ "google.golang.org/grpc/xds/googledirectpath" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" From 51835dc0912b7a0b894bc0fdbfce167aea88f4cb Mon Sep 17 00:00:00 2001 From: Mohan Li <67390330+mohanli-ml@users.noreply.github.com> Date: Thu, 16 Dec 2021 14:39:38 -0800 Subject: [PATCH 372/998] xds: Remove WithBlock option from the fallback test client (#5066) --- interop/grpclb_fallback/client_linux.go | 1 - 1 file changed, 1 deletion(-) diff --git a/interop/grpclb_fallback/client_linux.go b/interop/grpclb_fallback/client_linux.go index b5a16bf22bf1..152bb01a7b5f 100644 --- a/interop/grpclb_fallback/client_linux.go +++ b/interop/grpclb_fallback/client_linux.go @@ -99,7 +99,6 @@ func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { func createTestConn() *grpc.ClientConn { opts := []grpc.DialOption{ grpc.WithContextDialer(dialTCPUserTimeout), - grpc.WithBlock(), } switch *customCredentialsType { case "tls": From c285fc70e095eccc98d79b9a133e1e328141aefd Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 20 Dec 2021 15:40:38 -0800 Subject: [PATCH 373/998] cmd/protoc-gen-go-grpc: update version for release (#5070) --- balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go | 2 +- channelz/grpc_channelz_v1/channelz_grpc.pb.go | 2 +- cmd/protoc-gen-go-grpc/main.go | 2 +- credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go | 2 +- examples/features/proto/echo/echo_grpc.pb.go | 2 +- examples/helloworld/helloworld/helloworld_grpc.pb.go | 2 +- examples/route_guide/routeguide/route_guide_grpc.pb.go | 2 +- health/grpc_health_v1/health_grpc.pb.go | 2 +- internal/proto/grpc_lookup_v1/rls_grpc.pb.go | 2 +- interop/grpc_testing/benchmark_service_grpc.pb.go | 2 +- interop/grpc_testing/report_qps_scenario_service_grpc.pb.go | 2 +- interop/grpc_testing/test_grpc.pb.go | 2 +- interop/grpc_testing/worker_service_grpc.pb.go | 2 +- profiling/proto/service_grpc.pb.go | 2 +- reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go | 2 +- reflection/grpc_testing/test_grpc.pb.go | 2 +- stress/grpc_testing/metrics_grpc.pb.go | 2 +- test/grpc_testing/test_grpc.pb.go | 2 +- 18 files changed, 18 insertions(+), 18 deletions(-) diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 50cc9da4a907..cb4b3c203c51 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/channelz/grpc_channelz_v1/channelz_grpc.pb.go b/channelz/grpc_channelz_v1/channelz_grpc.pb.go index ee425c219940..d8803d011d89 100644 --- a/channelz/grpc_channelz_v1/channelz_grpc.pb.go +++ b/channelz/grpc_channelz_v1/channelz_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/channelz/v1/channelz.proto diff --git a/cmd/protoc-gen-go-grpc/main.go b/cmd/protoc-gen-go-grpc/main.go index 7f104da7d068..58cde2eb6df2 100644 --- a/cmd/protoc-gen-go-grpc/main.go +++ b/cmd/protoc-gen-go-grpc/main.go @@ -38,7 +38,7 @@ import ( "google.golang.org/protobuf/types/pluginpb" ) -const version = "1.1.0" +const version = "1.2.0" var requireUnimplemented *bool diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index a02c4582815d..fd55176b9b69 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/gcp/handshaker.proto diff --git a/examples/features/proto/echo/echo_grpc.pb.go b/examples/features/proto/echo/echo_grpc.pb.go index e1d24b1e8309..cf1ffe708118 100644 --- a/examples/features/proto/echo/echo_grpc.pb.go +++ b/examples/features/proto/echo/echo_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: examples/features/proto/echo/echo.proto diff --git a/examples/helloworld/helloworld/helloworld_grpc.pb.go b/examples/helloworld/helloworld/helloworld_grpc.pb.go index ae27dfa3cfee..b1423484d51b 100644 --- a/examples/helloworld/helloworld/helloworld_grpc.pb.go +++ b/examples/helloworld/helloworld/helloworld_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: examples/helloworld/helloworld/helloworld.proto diff --git a/examples/route_guide/routeguide/route_guide_grpc.pb.go b/examples/route_guide/routeguide/route_guide_grpc.pb.go index efa7c28ce6f5..32f7910a33dc 100644 --- a/examples/route_guide/routeguide/route_guide_grpc.pb.go +++ b/examples/route_guide/routeguide/route_guide_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: examples/route_guide/routeguide/route_guide.proto diff --git a/health/grpc_health_v1/health_grpc.pb.go b/health/grpc_health_v1/health_grpc.pb.go index bdc3ae284e7a..69f525d1baeb 100644 --- a/health/grpc_health_v1/health_grpc.pb.go +++ b/health/grpc_health_v1/health_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/health/v1/health.proto diff --git a/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index 39d79e13343e..3afbf8930f9d 100644 --- a/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/lookup/v1/rls.proto diff --git a/interop/grpc_testing/benchmark_service_grpc.pb.go b/interop/grpc_testing/benchmark_service_grpc.pb.go index f4e4436e97e8..0e6f21e90286 100644 --- a/interop/grpc_testing/benchmark_service_grpc.pb.go +++ b/interop/grpc_testing/benchmark_service_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/testing/benchmark_service.proto diff --git a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go index 4bf3fce68ab1..f327326b0cae 100644 --- a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/testing/report_qps_scenario_service.proto diff --git a/interop/grpc_testing/test_grpc.pb.go b/interop/grpc_testing/test_grpc.pb.go index 137a1e98ce6e..9a39fdd95a9a 100644 --- a/interop/grpc_testing/test_grpc.pb.go +++ b/interop/grpc_testing/test_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/testing/test.proto diff --git a/interop/grpc_testing/worker_service_grpc.pb.go b/interop/grpc_testing/worker_service_grpc.pb.go index a97366df09a2..0c4e10a4072c 100644 --- a/interop/grpc_testing/worker_service_grpc.pb.go +++ b/interop/grpc_testing/worker_service_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/testing/worker_service.proto diff --git a/profiling/proto/service_grpc.pb.go b/profiling/proto/service_grpc.pb.go index a0656149bda1..218c6123c15f 100644 --- a/profiling/proto/service_grpc.pb.go +++ b/profiling/proto/service_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: profiling/proto/service.proto diff --git a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 7d05c14ebd89..4e6a6b1a857b 100644 --- a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: reflection/grpc_reflection_v1alpha/reflection.proto diff --git a/reflection/grpc_testing/test_grpc.pb.go b/reflection/grpc_testing/test_grpc.pb.go index 235b5d82484b..c9a461280088 100644 --- a/reflection/grpc_testing/test_grpc.pb.go +++ b/reflection/grpc_testing/test_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: reflection/grpc_testing/test.proto diff --git a/stress/grpc_testing/metrics_grpc.pb.go b/stress/grpc_testing/metrics_grpc.pb.go index 0730fad49a46..29fa088882de 100644 --- a/stress/grpc_testing/metrics_grpc.pb.go +++ b/stress/grpc_testing/metrics_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: stress/grpc_testing/metrics.proto diff --git a/test/grpc_testing/test_grpc.pb.go b/test/grpc_testing/test_grpc.pb.go index 76b3935620ca..b328eaa3b582 100644 --- a/test/grpc_testing/test_grpc.pb.go +++ b/test/grpc_testing/test_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: test/grpc_testing/test.proto From 956c5948555c16ea78d5b2afe1fef0d2699478a5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 21 Dec 2021 13:32:22 -0800 Subject: [PATCH 374/998] rls: minor config processing changes (#5061) --- balancer/rls/internal/builder.go | 6 +- balancer/rls/internal/config.go | 133 +++++++++++++-------------- balancer/rls/internal/config_test.go | 56 +++++------ 3 files changed, 91 insertions(+), 104 deletions(-) diff --git a/balancer/rls/internal/builder.go b/balancer/rls/internal/builder.go index 9707b08420df..d293ada31208 100644 --- a/balancer/rls/internal/builder.go +++ b/balancer/rls/internal/builder.go @@ -26,7 +26,7 @@ import ( const rlsBalancerName = "rls_experimental" func init() { - balancer.Register(&rlsBB{}) + balancer.Register(rlsBB{}) } // rlsBB helps build RLS load balancers and parse the service config to be @@ -35,11 +35,11 @@ type rlsBB struct{} // Name returns the name of the RLS LB policy and helps implement the // balancer.Balancer interface. -func (*rlsBB) Name() string { +func (rlsBB) Name() string { return rlsBalancerName } -func (*rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { // TODO(easwars): Fix this once the LB policy implementation is pulled in. return &rlsBalancer{} } diff --git a/balancer/rls/internal/config.go b/balancer/rls/internal/config.go index e3f261d4026c..e9e94cd2c449 100644 --- a/balancer/rls/internal/config.go +++ b/balancer/rls/internal/config.go @@ -25,42 +25,40 @@ import ( "net/url" "time" - "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/ptypes" durationpb "github.com/golang/protobuf/ptypes/duration" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/internal/pretty" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/protobuf/encoding/protojson" ) const ( - // This is max duration that we are willing to cache RLS responses. If the - // service config doesn't specify a value for max_age or if it specified a - // value greater that this, we will use this value instead. + // Default max_age if not specified (or greater than this value) in the + // service config. maxMaxAge = 5 * time.Minute - // If lookup_service_timeout is not specified in the service config, we use - // a default of 10 seconds. + // Upper limit for cache_size since we don't fully trust the service config. + maxCacheSize = 5 * 1024 * 1024 * 8 // 5MB in bytes + // Default lookup_service_timeout if not specified in the service config. defaultLookupServiceTimeout = 10 * time.Second - // This is set to the targetNameField in the child policy config during + // Default value for targetNameField in the child policy config during // service config validation. dummyChildPolicyTarget = "target_name_to_be_filled_in_later" ) -// lbConfig contains the parsed and validated contents of the -// loadBalancingConfig section of the service config. The RLS LB policy will -// use this to directly access config data instead of ploughing through proto -// fields. +// lbConfig is the internal representation of the RLS LB policy's config. type lbConfig struct { serviceconfig.LoadBalancingConfig + cacheSizeBytes int64 // Keep this field 64-bit aligned. kbMap keys.BuilderMap lookupService string lookupServiceTimeout time.Duration maxAge time.Duration staleAge time.Duration - cacheSizeBytes int64 defaultTarget string childPolicyName string @@ -88,7 +86,6 @@ func childPolicyConfigEqual(a, b map[string]json.RawMessage) bool { if len(b) != len(a) { return false } - for k, jsonA := range a { jsonB, ok := b[k] if !ok { @@ -109,37 +106,50 @@ type lbConfigJSON struct { ChildPolicyConfigTargetFieldName string } -// ParseConfig parses and validates the JSON representation of the service -// config and returns the loadBalancingConfig to be used by the RLS LB policy. -// -// Helps implement the balancer.ConfigParser interface. -// * childPolicy field: -// - must find a valid child policy with a valid config (the child policy must -// be able to parse the provided config successfully when we pass it a dummy -// target name in the target_field provided by the -// childPolicyConfigTargetFieldName field) -// * childPolicyConfigTargetFieldName field: +// When parsing a config update, the following validations are performed: +// - routeLookupConfig: +// - grpc_keybuilders field: +// - must have at least one entry +// - must not have two entries with the same `Name` +// - within each entry: +// - must have at least one `Name` +// - must not have a `Name` with the `service` field unset or empty +// - within each `headers` entry: +// - must not have `required_match` set +// - must not have `key` unset or empty +// - across all `headers`, `constant_keys` and `extra_keys` fields: +// - must not have the same `key` specified twice +// - no `key` must be the empty string +// - `lookup_service` field must be set and and must parse as a target URI +// - if `max_age` > 5m, it should be set to 5 minutes +// - if `stale_age` > `max_age`, ignore it +// - if `stale_age` is set, then `max_age` must also be set +// - ignore `valid_targets` field +// - `cache_size_bytes` field must have a value greater than 0, and if its +// value is greater than 5M, we cap it at 5M +// - childPolicy: +// - must find a valid child policy with a valid config +// - childPolicyConfigTargetFieldName: // - must be set and non-empty -func (*rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) cfgJSON := &lbConfigJSON{} if err := json.Unmarshal(c, cfgJSON); err != nil { - return nil, fmt.Errorf("rls: json unmarshal failed for service config {%+v}: %v", string(c), err) + return nil, fmt.Errorf("rls: json unmarshal failed for service config %+v: %v", string(c), err) } - // Unmarshal and validate contents of the RLS proto. - m := jsonpb.Unmarshaler{AllowUnknownFields: true} + m := protojson.UnmarshalOptions{DiscardUnknown: true} rlsProto := &rlspb.RouteLookupConfig{} - if err := m.Unmarshal(bytes.NewReader(cfgJSON.RouteLookupConfig), rlsProto); err != nil { - return nil, fmt.Errorf("rls: bad RouteLookupConfig proto {%+v}: %v", string(cfgJSON.RouteLookupConfig), err) + if err := m.Unmarshal(cfgJSON.RouteLookupConfig, rlsProto); err != nil { + return nil, fmt.Errorf("rls: bad RouteLookupConfig proto %+v: %v", string(cfgJSON.RouteLookupConfig), err) } lbCfg, err := parseRLSProto(rlsProto) if err != nil { return nil, err } - // Unmarshal and validate child policy configs. if cfgJSON.ChildPolicyConfigTargetFieldName == "" { - return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config {%+v}", string(c)) + return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config %+v", string(c)) } name, config, err := parseChildPolicyConfigs(cfgJSON.ChildPolicy, cfgJSON.ChildPolicyConfigTargetFieldName) if err != nil { @@ -151,87 +161,74 @@ func (*rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, return lbCfg, nil } -// parseRLSProto fetches relevant information from the RouteLookupConfig proto -// and validates the values in the process. -// -// The following validation checks are performed: -// ** grpc_keybuilders field: -// - must have at least one entry -// - must not have two entries with the same Name -// - must not have any entry with a Name with the service field unset or -// empty -// - must not have any entries without a Name -// - must not have a headers entry that has required_match set -// - must not have two headers entries with the same key within one entry -// ** lookup_service field: -// - must be set and non-empty and must parse as a target URI -// ** max_age field: -// - if not specified or is greater than maxMaxAge, it will be reset to -// maxMaxAge -// ** stale_age field: -// - if the value is greater than or equal to max_age, it is ignored -// - if set, then max_age must also be set -// ** valid_targets field: -// - will be ignored -// ** cache_size_bytes field: -// - must be greater than zero -// - TODO(easwars): Define a minimum value for this field, to be used when -// left unspecified func parseRLSProto(rlsProto *rlspb.RouteLookupConfig) (*lbConfig, error) { + // Validations specified on the `grpc_keybuilders` field are performed here. kbMap, err := keys.MakeBuilderMap(rlsProto) if err != nil { return nil, err } + // `lookup_service` field must be set and and must parse as a target URI. lookupService := rlsProto.GetLookupService() if lookupService == "" { - return nil, fmt.Errorf("rls: empty lookup_service in route lookup config {%+v}", rlsProto) + return nil, fmt.Errorf("rls: empty lookup_service in route lookup config %+v", rlsProto) } parsedTarget, err := url.Parse(lookupService) if err != nil { - // If the first attempt failed because of a missing scheme, try again - // with the default scheme. + // url.Parse() fails if scheme is missing. Retry with default scheme. parsedTarget, err = url.Parse(resolver.GetDefaultScheme() + ":///" + lookupService) if err != nil { - return nil, fmt.Errorf("rls: invalid target URI in lookup_service {%s}", lookupService) + return nil, fmt.Errorf("rls: invalid target URI in lookup_service %s", lookupService) } } if parsedTarget.Scheme == "" { parsedTarget.Scheme = resolver.GetDefaultScheme() } if resolver.Get(parsedTarget.Scheme) == nil { - return nil, fmt.Errorf("rls: unregistered scheme in lookup_service {%s}", lookupService) + return nil, fmt.Errorf("rls: unregistered scheme in lookup_service %s", lookupService) } lookupServiceTimeout, err := convertDuration(rlsProto.GetLookupServiceTimeout()) if err != nil { - return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in route lookup config {%+v}: %v", rlsProto, err) + return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in route lookup config %+v: %v", rlsProto, err) } if lookupServiceTimeout == 0 { lookupServiceTimeout = defaultLookupServiceTimeout } + + // Validations performed here: + // - if `max_age` > 5m, it should be set to 5 minutes + // - if `stale_age` > `max_age`, ignore it + // - if `stale_age` is set, then `max_age` must also be set maxAge, err := convertDuration(rlsProto.GetMaxAge()) if err != nil { - return nil, fmt.Errorf("rls: failed to parse max_age in route lookup config {%+v}: %v", rlsProto, err) + return nil, fmt.Errorf("rls: failed to parse max_age in route lookup config %+v: %v", rlsProto, err) } staleAge, err := convertDuration(rlsProto.GetStaleAge()) if err != nil { - return nil, fmt.Errorf("rls: failed to parse staleAge in route lookup config {%+v}: %v", rlsProto, err) + return nil, fmt.Errorf("rls: failed to parse staleAge in route lookup config %+v: %v", rlsProto, err) } if staleAge != 0 && maxAge == 0 { - return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in route lookup config {%+v}", rlsProto) + return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in route lookup config %+v", rlsProto) } if staleAge >= maxAge { - logger.Info("rls: stale_age {%v} is greater than max_age {%v}, ignoring it", staleAge, maxAge) + logger.Infof("rls: stale_age %v is not less than max_age %v, ignoring it", staleAge, maxAge) staleAge = 0 } if maxAge == 0 || maxAge > maxMaxAge { logger.Infof("rls: max_age in route lookup config is %v, using %v", maxAge, maxMaxAge) maxAge = maxMaxAge } + + // `cache_size_bytes` field must have a value greater than 0, and if its + // value is greater than 5M, we cap it at 5M cacheSizeBytes := rlsProto.GetCacheSizeBytes() if cacheSizeBytes <= 0 { - return nil, fmt.Errorf("rls: cache_size_bytes must be greater than 0 in route lookup config {%+v}", rlsProto) + return nil, fmt.Errorf("rls: cache_size_bytes must be set to a non-zero value: %+v", rlsProto) + } + if cacheSizeBytes > maxCacheSize { + logger.Info("rls: cache_size_bytes %v is too large, setting it to: %v", cacheSizeBytes, maxCacheSize) + cacheSizeBytes = maxCacheSize } return &lbConfig{ kbMap: kbMap, diff --git a/balancer/rls/internal/config_test.go b/balancer/rls/internal/config_test.go index 84da9fe426f2..733666c28e6e 100644 --- a/balancer/rls/internal/config_test.go +++ b/balancer/rls/internal/config_test.go @@ -25,25 +25,10 @@ import ( "testing" "time" - "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/grpclb" // grpclb for config parsing. _ "google.golang.org/grpc/internal/resolver/passthrough" // passthrough resolver. ) -const balancerWithoutConfigParserName = "dummy_balancer" - -type dummyBB struct { - balancer.Builder -} - -func (*dummyBB) Name() string { - return balancerWithoutConfigParserName -} - -func init() { - balancer.Register(&dummyBB{}) -} - // testEqual reports whether the lbCfgs a and b are equal. This is to be used // only from tests. This ignores the keyBuilderMap field because its internals // are not exported, and hence not possible to specify in the want section of @@ -61,6 +46,7 @@ func testEqual(a, b *lbConfig) bool { childPolicyConfigEqual(a.childPolicyConfig, b.childPolicyConfig) } +// TestParseConfig verifies successful config parsing scenarios. func (s) TestParseConfig(t *testing.T) { childPolicyTargetFieldVal, _ := json.Marshal(dummyChildPolicyTarget) tests := []struct { @@ -68,14 +54,15 @@ func (s) TestParseConfig(t *testing.T) { input []byte wantCfg *lbConfig }{ - // This input validates a few cases: - // - A top-level unknown field should not fail. - // - An unknown field in routeLookupConfig proto should not fail. - // - lookupServiceTimeout is set to its default value, since it is not specified in the input. - // - maxAge is set to maxMaxAge since the value is too large in the input. - // - staleAge is ignore because it is higher than maxAge in the input. { - desc: "with transformations", + // This input validates a few cases: + // - A top-level unknown field should not fail. + // - An unknown field in routeLookupConfig proto should not fail. + // - lookupServiceTimeout is set to its default value, since it is not specified in the input. + // - maxAge is set to maxMaxAge since the value is too large in the input. + // - staleAge is ignore because it is higher than maxAge in the input. + // - cacheSizeBytes is greater than the hard upper limit of 5MB + desc: "with transformations 1", input: []byte(`{ "top-level-unknown-field": "unknown-value", "routeLookupConfig": { @@ -87,7 +74,7 @@ func (s) TestParseConfig(t *testing.T) { "lookupService": ":///target", "maxAge" : "500s", "staleAge": "600s", - "cacheSizeBytes": 1000, + "cacheSizeBytes": 100000000, "defaultTarget": "passthrough:///default" }, "childPolicy": [ @@ -102,7 +89,7 @@ func (s) TestParseConfig(t *testing.T) { lookupServiceTimeout: 10 * time.Second, // This is the default value. maxAge: 5 * time.Minute, // This is max maxAge. staleAge: time.Duration(0), // StaleAge is ignore because it was higher than maxAge. - cacheSizeBytes: 1000, + cacheSizeBytes: maxCacheSize, defaultTarget: "passthrough:///default", childPolicyName: "grpclb", childPolicyTargetField: "service_name", @@ -147,7 +134,7 @@ func (s) TestParseConfig(t *testing.T) { }, } - builder := &rlsBB{} + builder := rlsBB{} for _, test := range tests { t.Run(test.desc, func(t *testing.T) { lbCfg, err := builder.ParseConfig(test.input) @@ -158,6 +145,7 @@ func (s) TestParseConfig(t *testing.T) { } } +// TestParseConfigErrors verifies config parsing failure scenarios. func (s) TestParseConfigErrors(t *testing.T) { tests := []struct { desc string @@ -223,7 +211,7 @@ func (s) TestParseConfigErrors(t *testing.T) { "lookupServiceTimeout" : "315576000001s" } }`), - wantErr: "bad Duration: time: invalid duration", + wantErr: "google.protobuf.Duration value out of range", }, { desc: "invalid max age", @@ -238,7 +226,7 @@ func (s) TestParseConfigErrors(t *testing.T) { "maxAge" : "315576000001s" } }`), - wantErr: "bad Duration: time: invalid duration", + wantErr: "google.protobuf.Duration value out of range", }, { desc: "invalid stale age", @@ -254,7 +242,7 @@ func (s) TestParseConfigErrors(t *testing.T) { "staleAge" : "315576000001s" } }`), - wantErr: "bad Duration: time: invalid duration", + wantErr: "google.protobuf.Duration value out of range", }, { desc: "invalid max age stale age combo", @@ -272,7 +260,7 @@ func (s) TestParseConfigErrors(t *testing.T) { wantErr: "rls: stale_age is set, but max_age is not in route lookup config", }, { - desc: "invalid cache size", + desc: "cache_size_bytes field is not set", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ @@ -282,10 +270,12 @@ func (s) TestParseConfigErrors(t *testing.T) { "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge": "30s", - "staleAge" : "25s" - } + "staleAge" : "25s", + "defaultTarget": "passthrough:///default" + }, + "childPolicyConfigTargetFieldName": "service_name" }`), - wantErr: "rls: cache_size_bytes must be greater than 0 in route lookup config", + wantErr: "rls: cache_size_bytes must be set to a non-zero value", }, { desc: "no child policy", @@ -403,7 +393,7 @@ func (s) TestParseConfigErrors(t *testing.T) { }, } - builder := &rlsBB{} + builder := rlsBB{} for _, test := range tests { t.Run(test.desc, func(t *testing.T) { lbCfg, err := builder.ParseConfig(test.input) From 7506755eb1d5a4f5a9b8848ea62470ae5296b13b Mon Sep 17 00:00:00 2001 From: Shihao Xia Date: Wed, 22 Dec 2021 13:56:46 -0500 Subject: [PATCH 375/998] internal/resolver: fix possible goroutine leak in TestSafeConfigSelector (#5058) --- internal/resolver/config_selector_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/resolver/config_selector_test.go b/internal/resolver/config_selector_test.go index e1dae8bde27c..7a8a5dbd693f 100644 --- a/internal/resolver/config_selector_test.go +++ b/internal/resolver/config_selector_test.go @@ -112,7 +112,7 @@ func (s) TestSafeConfigSelector(t *testing.T) { cs1Done := false // set when cs2 is first called for dl := time.Now().Add(150 * time.Millisecond); !time.Now().After(dl); { - gotConfigChan := make(chan *RPCConfig) + gotConfigChan := make(chan *RPCConfig, 1) go func() { cfg, _ := scs.SelectConfig(testRPCInfo) gotConfigChan <- cfg From 8ae11f46222d4e470b7ecac5df2947da0bfbf01c Mon Sep 17 00:00:00 2001 From: Shitian Ni Date: Thu, 23 Dec 2021 04:34:33 +0900 Subject: [PATCH 376/998] grpc: minor improvement on WithInsecure() document (#5068) --- dialoptions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dialoptions.go b/dialoptions.go index 063f1e903c01..fe2f560d4240 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -304,7 +304,7 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use insecure.NewCredentials() instead. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. // Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { From ec7cf6c9774e5dc58e56afa35ab8bd8d0d5a5407 Mon Sep 17 00:00:00 2001 From: Shihao Xia Date: Wed, 22 Dec 2021 15:41:46 -0500 Subject: [PATCH 377/998] grpclb: fix possible nil before conversion in TestDropRequest (#5022) --- balancer/grpclb/grpclb_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index 1807edb9cb71..5f90a3827ee3 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -656,7 +656,7 @@ func (s) TestDropRequest(t *testing.T) { for i := 0; i < 3; i++ { var p peer.Peer if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if want := tss.bePorts[1]; p.Addr.(*net.TCPAddr).Port != want { t.Errorf("got peer: %v, want peer port: %v", p.Addr, want) @@ -667,7 +667,7 @@ func (s) TestDropRequest(t *testing.T) { } if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if want := tss.bePorts[1]; p.Addr.(*net.TCPAddr).Port != want { t.Errorf("got peer: %v, want peer port: %v", p.Addr, want) From b3d19efee6ceac2ccaee1b16e6ef249d461eae44 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 22 Dec 2021 13:50:56 -0800 Subject: [PATCH 378/998] rls: move the data cache implementation into the rls package (#5060) --- balancer/rls/internal/cache.go | 404 ++++++++++++++++++++++ balancer/rls/internal/cache/cache.go | 244 ------------- balancer/rls/internal/cache/cache_test.go | 262 -------------- balancer/rls/internal/cache_test.go | 276 +++++++++++++++ balancer/rls/internal/child_policy.go | 112 ++++++ 5 files changed, 792 insertions(+), 506 deletions(-) create mode 100644 balancer/rls/internal/cache.go delete mode 100644 balancer/rls/internal/cache/cache.go delete mode 100644 balancer/rls/internal/cache/cache_test.go create mode 100644 balancer/rls/internal/cache_test.go create mode 100644 balancer/rls/internal/child_policy.go diff --git a/balancer/rls/internal/cache.go b/balancer/rls/internal/cache.go new file mode 100644 index 000000000000..527b9b278a1f --- /dev/null +++ b/balancer/rls/internal/cache.go @@ -0,0 +1,404 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "container/list" + "time" + + "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" +) + +// TODO(easwars): Remove this once all RLS code is merged. +//lint:file-ignore U1000 Ignore all unused code, not all code is merged yet. + +// cacheKey represents the key used to uniquely identify an entry in the data +// cache and in the pending requests map. +type cacheKey struct { + // path is the full path of the incoming RPC request. + path string + // keys is a stringified version of the RLS request key map built using the + // RLS keyBuilder. Since maps are not a type which is comparable in Go, it + // cannot be part of the key for another map (entries in the data cache and + // pending requests map are stored in maps). + keys string +} + +// cacheEntry wraps all the data to be stored in a data cache entry. +type cacheEntry struct { + // childPolicyWrappers contains the list of child policy wrappers + // corresponding to the targets returned by the RLS server for this entry. + childPolicyWrappers []*childPolicyWrapper + // headerData is received in the RLS response and is to be sent in the + // X-Google-RLS-Data header for matching RPCs. + headerData string + // expiryTime is the absolute time at which this cache entry entry stops + // being valid. When an RLS request succeeds, this is set to the current + // time plus the max_age field from the LB policy config. + expiryTime time.Time + // staleTime is the absolute time after which this cache entry will be + // proactively refreshed if an incoming RPC matches this entry. When an RLS + // request succeeds, this is set to the current time plus the stale_age from + // the LB policy config. + staleTime time.Time + // earliestEvictTime is the absolute time before which this entry should not + // be evicted from the cache. When a cache entry is created, this is set to + // the current time plus a default value of 5 seconds. This is required to + // make sure that a new entry added to the cache is not evicted before the + // RLS response arrives (usually when the cache is too small). + earliestEvictTime time.Time + + // status stores the RPC status of the previous RLS request for this + // entry. Picks for entries with a non-nil value for this field are failed + // with the error stored here. + status error + // backoffState contains all backoff related state. When an RLS request + // succeeds, backoffState is reset. This state moves between the data cache + // and the pending requests map. + backoffState *backoffState + // backoffTime is the absolute time at which the backoff period for this + // entry ends. When an RLS request fails, this is set to the current time + // plus the backoff value returned by the backoffState. The backoff timer is + // also setup with this value. No new RLS requests are sent out for this + // entry until the backoff period ends. + // + // Set to zero time instant upon a successful RLS response. + backoffTime time.Time + // backoffExpiryTime is the absolute time at which an entry which has gone + // through backoff stops being valid. When an RLS request fails, this is + // set to the current time plus twice the backoff time. The cache expiry + // timer will only delete entries for which both expiryTime and + // backoffExpiryTime are in the past. + // + // Set to zero time instant upon a successful RLS response. + backoffExpiryTime time.Time + + // size stores the size of this cache entry. Used to enforce the cache size + // specified in the LB policy configuration. + size int64 + // onEvict is the callback to be invoked when this cache entry is evicted. + onEvict func() +} + +// backoffState wraps all backoff related state associated with a cache entry. +type backoffState struct { + // retries keeps track of the number of RLS failures, to be able to + // determine the amount of time to backoff before the next attempt. + retries int + // bs is the exponential backoff implementation which returns the amount of + // time to backoff, given the number of retries. + bs backoff.Strategy + // timer fires when the backoff period ends and incoming requests after this + // will trigger a new RLS request. + timer *time.Timer +} + +// lru is a cache implementation with a least recently used eviction policy. +// Internally it uses a doubly linked list, with the least recently used element +// at the front of the list and the most recently used element at the back of +// the list. The value stored in this cache will be of type `cacheKey`. +// +// It is not safe for concurrent access. +type lru struct { + ll *list.List + + // A map from the value stored in the lru to its underlying list element is + // maintained to have a clean API. Without this, a subset of the lru's API + // would accept/return cacheKey while another subset would accept/return + // list elements. + m map[cacheKey]*list.Element +} + +// newLRU creates a new cache with a least recently used eviction policy. +func newLRU() *lru { + return &lru{ + ll: list.New(), + m: make(map[cacheKey]*list.Element), + } +} + +func (l *lru) addEntry(key cacheKey) { + e := l.ll.PushBack(key) + l.m[key] = e +} + +func (l *lru) makeRecent(key cacheKey) { + e := l.m[key] + l.ll.MoveToBack(e) +} + +func (l *lru) removeEntry(key cacheKey) { + e := l.m[key] + l.ll.Remove(e) + delete(l.m, key) +} + +func (l *lru) getLeastRecentlyUsed() cacheKey { + e := l.ll.Front() + if e == nil { + return cacheKey{} + } + return e.Value.(cacheKey) +} + +// iterateAndRun traverses the lru in least-recently-used order and calls the +// provided function for every element. +// +// Callers may delete the cache entry associated with the cacheKey passed into +// f, but they may not perform any other operation which reorders the elements +// in the lru. +func (l *lru) iterateAndRun(f func(cacheKey)) { + var next *list.Element + for e := l.ll.Front(); e != nil; e = next { + next = e.Next() + f(e.Value.(cacheKey)) + } +} + +// dataCache contains a cache of RLS data used by the LB policy to make routing +// decisions. +// +// The dataCache will be keyed by the request's path and keys, represented by +// the `cacheKey` type. It will maintain the cache keys in an `lru` and the +// cache data, represented by the `cacheEntry` type, in a native map. +// +// It is not safe for concurrent access. +type dataCache struct { + maxSize int64 // Maximum allowed size. + currentSize int64 // Current size. + keys *lru // Cache keys maintained in lru order. + entries map[cacheKey]*cacheEntry + logger *internalgrpclog.PrefixLogger + shutdown *grpcsync.Event +} + +func newDataCache(size int64, logger *internalgrpclog.PrefixLogger) *dataCache { + return &dataCache{ + maxSize: size, + keys: newLRU(), + entries: make(map[cacheKey]*cacheEntry), + logger: logger, + shutdown: grpcsync.NewEvent(), + } +} + +// resize changes the maximum allowed size of the data cache. +// +// The return value indicates if an entry with a valid backoff timer was +// evicted. This is important to the RLS LB policy which would send a new picker +// on the channel to re-process any RPCs queued as a result of this backoff +// timer. +func (dc *dataCache) resize(size int64) (backoffCancelled bool) { + if dc.shutdown.HasFired() { + return false + } + + backoffCancelled = false + for dc.currentSize > size { + key := dc.keys.getLeastRecentlyUsed() + entry, ok := dc.entries[key] + if !ok { + // This should never happen. + dc.logger.Errorf("cacheKey %+v not found in the cache while attempting to resize it", key) + break + } + + // When we encounter a cache entry whose minimum expiration time is in + // the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases + // where the cache is too small, when we receive an RLS Response, we + // keep the resulting cache entry around long enough for the pending + // incoming requests to be re-processed through the new Picker. If we + // didn't do this, then we'd risk throwing away each RLS response as we + // receive it, in which case we would fail to actually route any of our + // incoming requests. + if entry.earliestEvictTime.After(time.Now()) { + dc.logger.Warningf("cachekey %+v is too recent to be evicted. Stopping cache resizing for now", key) + break + } + + // Stop the backoff timer before evicting the entry. + if entry.backoffState != nil && entry.backoffState.timer != nil { + if entry.backoffState.timer.Stop() { + entry.backoffState.timer = nil + backoffCancelled = true + } + } + dc.deleteAndcleanup(key, entry) + } + dc.maxSize = size + return backoffCancelled +} + +// evictExpiredEntries sweeps through the cache and deletes expired entries. An +// expired entry is one for which both the `expiryTime` and `backoffExpiryTime` +// fields are in the past. +// +// The return value indicates if any expired entries were evicted. +// +// The LB policy invokes this method periodically to purge expired entries. +func (dc *dataCache) evictExpiredEntries() (evicted bool) { + if dc.shutdown.HasFired() { + return false + } + + evicted = false + dc.keys.iterateAndRun(func(key cacheKey) { + entry, ok := dc.entries[key] + if !ok { + // This should never happen. + dc.logger.Errorf("cacheKey %+v not found in the cache while attempting to perform periodic cleanup of expired entries", key) + return + } + + // Only evict entries for which both the data expiration time and + // backoff expiration time fields are in the past. + now := time.Now() + if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { + return + } + evicted = true + dc.deleteAndcleanup(key, entry) + }) + return evicted +} + +// resetBackoffState sweeps through the cache and for entries with a backoff +// state, the backoff timer is cancelled and the backoff state is reset. The +// return value indicates if any entries were mutated in this fashion. +// +// The LB policy invokes this method when the control channel moves from READY +// to TRANSIENT_FAILURE back to READY. See `monitorConnectivityState` method on +// the `controlChannel` type for more details. +func (dc *dataCache) resetBackoffState(newBackoffState *backoffState) (backoffReset bool) { + if dc.shutdown.HasFired() { + return false + } + + backoffReset = false + dc.keys.iterateAndRun(func(key cacheKey) { + entry, ok := dc.entries[key] + if !ok { + // This should never happen. + dc.logger.Errorf("cacheKey %+v not found in the cache while attempting to perform periodic cleanup of expired entries", key) + return + } + + if entry.backoffState == nil { + return + } + if entry.backoffState.timer != nil { + entry.backoffState.timer.Stop() + entry.backoffState.timer = nil + } + entry.backoffState = &backoffState{bs: newBackoffState.bs} + entry.backoffTime = time.Time{} + entry.backoffExpiryTime = time.Time{} + backoffReset = true + }) + return backoffReset +} + +// addEntry adds a cache entry for the given key. +// +// Return value backoffCancelled indicates if a cache entry with a valid backoff +// timer was evicted to make space for the current entry. This is important to +// the RLS LB policy which would send a new picker on the channel to re-process +// any RPCs queued as a result of this backoff timer. +// +// Return value ok indicates if entry was successfully added to the cache. +func (dc *dataCache) addEntry(key cacheKey, entry *cacheEntry) (backoffCancelled bool, ok bool) { + if dc.shutdown.HasFired() { + return false, false + } + + // Handle the extremely unlikely case that a single entry is bigger than the + // size of the cache. + if entry.size > dc.maxSize { + return false, false + } + dc.entries[key] = entry + dc.currentSize += entry.size + dc.keys.addEntry(key) + // If the new entry makes the cache go over its configured size, remove some + // old entries. + if dc.currentSize > dc.maxSize { + backoffCancelled = dc.resize(dc.maxSize) + } + return backoffCancelled, true +} + +// updateEntrySize updates the size of a cache entry and the current size of the +// data cache. An entry's size can change upon receipt of an RLS response. +func (dc *dataCache) updateEntrySize(entry *cacheEntry, newSize int64) { + dc.currentSize -= entry.size + entry.size = newSize + dc.currentSize += entry.size +} + +func (dc *dataCache) getEntry(key cacheKey) *cacheEntry { + if dc.shutdown.HasFired() { + return nil + } + + entry, ok := dc.entries[key] + if !ok { + return nil + } + dc.keys.makeRecent(key) + return entry +} + +func (dc *dataCache) removeEntryForTesting(key cacheKey) { + entry, ok := dc.entries[key] + if !ok { + return + } + dc.deleteAndcleanup(key, entry) +} + +// deleteAndCleanup performs actions required at the time of deleting an entry +// from the data cache. +// - the entry is removed from the map of entries +// - current size of the data cache is update +// - the key is removed from the LRU +// - onEvict is invoked in a separate goroutine +func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) { + delete(dc.entries, key) + dc.currentSize -= entry.size + dc.keys.removeEntry(key) + if entry.onEvict != nil { + go entry.onEvict() + } +} + +func (dc *dataCache) stop() { + dc.keys.iterateAndRun(func(key cacheKey) { + entry, ok := dc.entries[key] + if !ok { + // This should never happen. + dc.logger.Errorf("cacheKey %+v not found in the cache while shutting down", key) + return + } + dc.deleteAndcleanup(key, entry) + }) + dc.shutdown.Fire() +} diff --git a/balancer/rls/internal/cache/cache.go b/balancer/rls/internal/cache/cache.go deleted file mode 100644 index b975c3078fdb..000000000000 --- a/balancer/rls/internal/cache/cache.go +++ /dev/null @@ -1,244 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package cache provides an LRU cache implementation to be used by the RLS LB -// policy to cache RLS response data. -package cache - -import ( - "container/list" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" -) - -var logger = grpclog.Component("rls") - -// Key represents the cache key used to uniquely identify a cache entry. -type Key struct { - // Path is the full path of the incoming RPC request. - Path string - // KeyMap is a stringified version of the RLS request keys built using the - // RLS keyBuilder. Since map is not a Type which is comparable in Go, it - // cannot be part of the key for another map (the LRU cache is implemented - // using a native map type). - KeyMap string -} - -// Entry wraps all the data to be stored in a cache entry. -type Entry struct { - // Mu synchronizes access to this particular cache entry. The LB policy - // will also hold another mutex to synchronize access to the cache as a - // whole. To avoid holding the top-level mutex for the whole duration for - // which one particular cache entry is acted upon, we use this entry mutex. - Mu sync.Mutex - // ExpiryTime is the absolute time at which the data cached as part of this - // entry stops being valid. When an RLS request succeeds, this is set to - // the current time plus the max_age field from the LB policy config. An - // entry with this field in the past is not used to process picks. - ExpiryTime time.Time - // BackoffExpiryTime is the absolute time at which an entry which has gone - // through backoff stops being valid. When an RLS request fails, this is - // set to the current time plus twice the backoff time. The cache expiry - // timer will only delete entries for which both ExpiryTime and - // BackoffExpiryTime are in the past. - BackoffExpiryTime time.Time - // StaleTime is the absolute time after which this entry will be - // proactively refreshed if we receive a request for it. When an RLS - // request succeeds, this is set to the current time plus the stale_age - // from the LB policy config. - StaleTime time.Time - // BackoffTime is the absolute time at which the backoff period for this - // entry ends. The backoff timer is setup with this value. No new RLS - // requests are sent out for this entry until the backoff period ends. - BackoffTime time.Time - // EarliestEvictTime is the absolute time before which this entry should - // not be evicted from the cache. This is set to a default value of 5 - // seconds when the entry is created. This is required to make sure that a - // new entry added to the cache is not evicted before the RLS response - // arrives (usually when the cache is too small). - EarliestEvictTime time.Time - // CallStatus stores the RPC status of the previous RLS request for this - // entry. Picks for entries with a non-nil value for this field are failed - // with the error stored here. - CallStatus error - // Backoff contains all backoff related state. When an RLS request - // succeeds, backoff state is reset. - Backoff BackoffState - // HeaderData is received in an RLS response and is to be sent in the - // X-Google-RLS-Data header for matching RPCs. - HeaderData string - // ChildPicker is a very thin wrapper around the child policy wrapper. - // The type is declared as a Picker interface since the users of - // the cache only care about the picker provided by the child policy, and - // this makes it easy for testing. - ChildPicker balancer.Picker - - // size stores the size of this cache entry. Uses only a subset of the - // fields. See `entrySize` for this is computed. - size int64 - // key contains the cache key corresponding to this entry. This is required - // from methods like `removeElement` which only have a pointer to the - // list.Element which contains a reference to the cache.Entry. But these - // methods need the cache.Key to be able to remove the entry from the - // underlying map. - key Key -} - -// BackoffState wraps all backoff related state associated with a cache entry. -type BackoffState struct { - // Retries keeps track of the number of RLS failures, to be able to - // determine the amount of time to backoff before the next attempt. - Retries int - // Backoff is an exponential backoff implementation which returns the - // amount of time to backoff, given the number of retries. - Backoff backoff.Strategy - // Timer fires when the backoff period ends and incoming requests after - // this will trigger a new RLS request. - Timer *time.Timer - // Callback provided by the LB policy to be notified when the backoff timer - // expires. This will trigger a new picker to be returned to the - // ClientConn, to force queued up RPCs to be retried. - Callback func() -} - -// LRU is a cache with a least recently used eviction policy. It is not safe -// for concurrent access. -type LRU struct { - maxSize int64 - usedSize int64 - onEvicted func(Key, *Entry) - - ll *list.List - cache map[Key]*list.Element -} - -// NewLRU creates a cache.LRU with a size limit of maxSize and the provided -// eviction callback. -// -// Currently, only the cache.Key and the HeaderData field from cache.Entry -// count towards the size of the cache (other overhead per cache entry is not -// counted). The cache could temporarily exceed the configured maxSize because -// we want the entries to spend a configured minimum amount of time in the -// cache before they are LRU evicted (so that all the work performed in sending -// an RLS request and caching the response is not a total waste). -// -// The provided onEvited callback must not attempt to re-add the entry inline -// and the RLS LB policy does not have a need to do that. -// -// The cache package trusts the RLS policy (its only user) to supply a default -// minimum non-zero maxSize, in the event that the ServiceConfig does not -// provide a value for it. -func NewLRU(maxSize int64, onEvicted func(Key, *Entry)) *LRU { - return &LRU{ - maxSize: maxSize, - onEvicted: onEvicted, - ll: list.New(), - cache: make(map[Key]*list.Element), - } -} - -// Resize sets the size limit of the LRU to newMaxSize and removes older -// entries, if required, to comply with the new limit. -func (lru *LRU) Resize(newMaxSize int64) { - lru.maxSize = newMaxSize - lru.removeToFit(0) -} - -// TODO(easwars): If required, make this function more sophisticated. -func entrySize(key Key, value *Entry) int64 { - return int64(len(key.Path) + len(key.KeyMap) + len(value.HeaderData)) -} - -// removeToFit removes older entries from the cache to make room for a new -// entry of size newSize. -func (lru *LRU) removeToFit(newSize int64) { - now := time.Now() - for lru.usedSize+newSize > lru.maxSize { - elem := lru.ll.Back() - if elem == nil { - // This is a corner case where the cache is empty, but the new entry - // to be added is bigger than maxSize. - logger.Info("rls: newly added cache entry exceeds cache maxSize") - return - } - - entry := elem.Value.(*Entry) - if t := entry.EarliestEvictTime; !t.IsZero() && t.Before(now) { - // When the oldest entry is too new (it hasn't even spent a default - // minimum amount of time in the cache), we abort and allow the - // cache to grow bigger than the configured maxSize. - logger.Info("rls: LRU eviction finds oldest entry to be too new. Allowing cache to exceed maxSize momentarily") - return - } - lru.removeElement(elem) - } -} - -// Add adds a new entry to the cache. -func (lru *LRU) Add(key Key, value *Entry) { - size := entrySize(key, value) - elem, ok := lru.cache[key] - if !ok { - lru.removeToFit(size) - lru.usedSize += size - value.size = size - value.key = key - elem := lru.ll.PushFront(value) - lru.cache[key] = elem - return - } - - existing := elem.Value.(*Entry) - sizeDiff := size - existing.size - lru.removeToFit(sizeDiff) - value.size = size - elem.Value = value - lru.ll.MoveToFront(elem) - lru.usedSize += sizeDiff -} - -// Remove removes a cache entry wth key key, if one exists. -func (lru *LRU) Remove(key Key) { - if elem, ok := lru.cache[key]; ok { - lru.removeElement(elem) - } -} - -func (lru *LRU) removeElement(e *list.Element) { - entry := e.Value.(*Entry) - lru.ll.Remove(e) - delete(lru.cache, entry.key) - lru.usedSize -= entry.size - if lru.onEvicted != nil { - lru.onEvicted(entry.key, entry) - } -} - -// Get returns a cache entry with key key. -func (lru *LRU) Get(key Key) *Entry { - elem, ok := lru.cache[key] - if !ok { - return nil - } - lru.ll.MoveToFront(elem) - return elem.Value.(*Entry) -} diff --git a/balancer/rls/internal/cache/cache_test.go b/balancer/rls/internal/cache/cache_test.go deleted file mode 100644 index 7c480b64621e..000000000000 --- a/balancer/rls/internal/cache/cache_test.go +++ /dev/null @@ -1,262 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package cache - -import ( - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" -) - -const ( - defaultTestCacheSize = 5 - defaultTestCacheMaxSize = 1000000 - defaultTestTimeout = 1 * time.Second -) - -// TestGet verifies the Add and Get methods of cache.LRU. -func TestGet(t *testing.T) { - key1 := Key{Path: "/service1/method1", KeyMap: "k1=v1,k2=v2"} - key2 := Key{Path: "/service2/method2", KeyMap: "k1=v1,k2=v2"} - val1 := Entry{HeaderData: "h1=v1"} - val2 := Entry{HeaderData: "h2=v2"} - - tests := []struct { - desc string - keysToAdd []Key - valsToAdd []*Entry - keyToGet Key - wantEntry *Entry - }{ - { - desc: "Empty cache", - keyToGet: Key{}, - }, - { - desc: "Single entry miss", - keysToAdd: []Key{key1}, - valsToAdd: []*Entry{&val1}, - keyToGet: Key{}, - }, - { - desc: "Single entry hit", - keysToAdd: []Key{key1}, - valsToAdd: []*Entry{&val1}, - keyToGet: key1, - wantEntry: &val1, - }, - { - desc: "Multi entry miss", - keysToAdd: []Key{key1, key2}, - valsToAdd: []*Entry{&val1, &val2}, - keyToGet: Key{}, - }, - { - desc: "Multi entry hit", - keysToAdd: []Key{key1, key2}, - valsToAdd: []*Entry{&val1, &val2}, - keyToGet: key1, - wantEntry: &val1, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - lru := NewLRU(defaultTestCacheMaxSize, nil) - for i, key := range test.keysToAdd { - lru.Add(key, test.valsToAdd[i]) - } - opts := []cmp.Option{ - cmpopts.IgnoreInterfaces(struct{ sync.Locker }{}), - cmpopts.IgnoreUnexported(Entry{}), - } - if gotEntry := lru.Get(test.keyToGet); !cmp.Equal(gotEntry, test.wantEntry, opts...) { - t.Errorf("lru.Get(%+v) = %+v, want %+v", test.keyToGet, gotEntry, test.wantEntry) - } - }) - } -} - -// TestRemove verifies the Add and Remove methods of cache.LRU. -func TestRemove(t *testing.T) { - keys := []Key{ - {Path: "/service1/method1", KeyMap: "k1=v1,k2=v2"}, - {Path: "/service2/method2", KeyMap: "k1=v1,k2=v2"}, - {Path: "/service3/method3", KeyMap: "k1=v1,k2=v2"}, - } - - lru := NewLRU(defaultTestCacheMaxSize, nil) - for _, k := range keys { - lru.Add(k, &Entry{}) - } - for _, k := range keys { - lru.Remove(k) - if entry := lru.Get(k); entry != nil { - t.Fatalf("lru.Get(%+v) after a call to lru.Remove succeeds, should have failed", k) - } - } -} - -// TestExceedingSizeCausesEviction verifies the case where adding a new entry -// to the cache leads to eviction of old entries to make space for the new one. -func TestExceedingSizeCausesEviction(t *testing.T) { - evictCh := make(chan Key, defaultTestCacheSize) - onEvicted := func(k Key, _ *Entry) { - t.Logf("evicted key {%+v} from cache", k) - evictCh <- k - } - - keysToFill := []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}} - keysCausingEviction := []Key{{Path: "f"}, {Path: "g"}, {Path: "h"}, {Path: "i"}, {Path: "j"}} - - lru := NewLRU(defaultTestCacheSize, onEvicted) - for _, key := range keysToFill { - lru.Add(key, &Entry{}) - } - - for i, key := range keysCausingEviction { - lru.Add(key, &Entry{}) - - timer := time.NewTimer(defaultTestTimeout) - select { - case <-timer.C: - t.Fatal("Test timeout waiting for eviction") - case k := <-evictCh: - timer.Stop() - if !cmp.Equal(k, keysToFill[i]) { - t.Fatalf("Evicted key %+v, wanted %+v", k, keysToFill[i]) - } - } - } -} - -// TestAddCausesMultipleEvictions verifies the case where adding one new entry -// causes the eviction of multiple old entries to make space for the new one. -func TestAddCausesMultipleEvictions(t *testing.T) { - evictCh := make(chan Key, defaultTestCacheSize) - onEvicted := func(k Key, _ *Entry) { - evictCh <- k - } - - keysToFill := []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}} - keyCausingEviction := Key{Path: "abcde"} - - lru := NewLRU(defaultTestCacheSize, onEvicted) - for _, key := range keysToFill { - lru.Add(key, &Entry{}) - } - - lru.Add(keyCausingEviction, &Entry{}) - - for i := range keysToFill { - timer := time.NewTimer(defaultTestTimeout) - select { - case <-timer.C: - t.Fatal("Test timeout waiting for eviction") - case k := <-evictCh: - timer.Stop() - if !cmp.Equal(k, keysToFill[i]) { - t.Fatalf("Evicted key %+v, wanted %+v", k, keysToFill[i]) - } - } - } -} - -// TestModifyCausesMultipleEvictions verifies the case where mofiying an -// existing entry to increase its size leads to the eviction of older entries -// to make space for the new one. -func TestModifyCausesMultipleEvictions(t *testing.T) { - evictCh := make(chan Key, defaultTestCacheSize) - onEvicted := func(k Key, _ *Entry) { - evictCh <- k - } - - keysToFill := []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}} - lru := NewLRU(defaultTestCacheSize, onEvicted) - for _, key := range keysToFill { - lru.Add(key, &Entry{}) - } - - lru.Add(keysToFill[len(keysToFill)-1], &Entry{HeaderData: "xxxx"}) - for i := range keysToFill[:len(keysToFill)-1] { - timer := time.NewTimer(defaultTestTimeout) - select { - case <-timer.C: - t.Fatal("Test timeout waiting for eviction") - case k := <-evictCh: - timer.Stop() - if !cmp.Equal(k, keysToFill[i]) { - t.Fatalf("Evicted key %+v, wanted %+v", k, keysToFill[i]) - } - } - } -} - -func TestLRUResize(t *testing.T) { - tests := []struct { - desc string - maxSize int64 - keysToFill []Key - newMaxSize int64 - wantEvictedKeys []Key - }{ - { - desc: "resize causes multiple evictions", - maxSize: 5, - keysToFill: []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}}, - newMaxSize: 3, - wantEvictedKeys: []Key{{Path: "a"}, {Path: "b"}}, - }, - { - desc: "resize causes no evictions", - maxSize: 50, - keysToFill: []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}}, - newMaxSize: 10, - wantEvictedKeys: []Key{}, - }, - { - desc: "resize to higher value", - maxSize: 5, - keysToFill: []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}}, - newMaxSize: 10, - wantEvictedKeys: []Key{}, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - var evictedKeys []Key - onEvicted := func(k Key, _ *Entry) { - evictedKeys = append(evictedKeys, k) - } - - lru := NewLRU(test.maxSize, onEvicted) - for _, key := range test.keysToFill { - lru.Add(key, &Entry{}) - } - lru.Resize(test.newMaxSize) - if !cmp.Equal(evictedKeys, test.wantEvictedKeys, cmpopts.EquateEmpty()) { - t.Fatalf("lru.Resize evicted keys {%v}, should have evicted {%v}", evictedKeys, test.wantEvictedKeys) - } - }) - } -} diff --git a/balancer/rls/internal/cache_test.go b/balancer/rls/internal/cache_test.go new file mode 100644 index 000000000000..cb9b060b59ae --- /dev/null +++ b/balancer/rls/internal/cache_test.go @@ -0,0 +1,276 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/backoff" +) + +var ( + cacheKeys = []cacheKey{ + {path: "0", keys: "a"}, + {path: "1", keys: "b"}, + {path: "2", keys: "c"}, + {path: "3", keys: "d"}, + {path: "4", keys: "e"}, + } + + longDuration = 10 * time.Minute + shortDuration = 1 * time.Millisecond + cacheEntries []*cacheEntry +) + +func initCacheEntries() { + // All entries have a dummy size of 1 to simplify resize operations. + cacheEntries = []*cacheEntry{ + { + // Entry is valid and minimum expiry time has not expired. + expiryTime: time.Now().Add(longDuration), + earliestEvictTime: time.Now().Add(longDuration), + size: 1, + }, + { + // Entry is valid and is in backoff. + expiryTime: time.Now().Add(longDuration), + backoffTime: time.Now().Add(longDuration), + backoffState: &backoffState{timer: time.NewTimer(longDuration)}, + size: 1, + }, + { + // Entry is valid, and not in backoff. + expiryTime: time.Now().Add(longDuration), + size: 1, + }, + { + // Entry is invalid. + expiryTime: time.Time{}.Add(shortDuration), + size: 1, + }, + { + // Entry is invalid valid and backoff has expired. + expiryTime: time.Time{}.Add(shortDuration), + backoffExpiryTime: time.Time{}.Add(shortDuration), + size: 1, + }, + } +} + +func (s) TestLRU_BasicOperations(t *testing.T) { + initCacheEntries() + // Create an LRU and add some entries to it. + lru := newLRU() + for _, k := range cacheKeys { + lru.addEntry(k) + } + + // Get the least recent entry. This should be the first entry we added. + if got, want := lru.getLeastRecentlyUsed(), cacheKeys[0]; got != want { + t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want) + } + + // Iterate through the slice of keys we added earlier, making them the most + // recent entry, one at a time. The least recent entry at that point should + // be the next entry from our slice of keys. + for i, k := range cacheKeys { + lru.makeRecent(k) + + lruIndex := (i + 1) % len(cacheKeys) + if got, want := lru.getLeastRecentlyUsed(), cacheKeys[lruIndex]; got != want { + t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want) + } + } + + // Iterate through the slice of keys we added earlier, removing them one at + // a time The least recent entry at that point should be the next entry from + // our slice of keys, except for the last one because the lru will be empty. + for i, k := range cacheKeys { + lru.removeEntry(k) + + var want cacheKey + if i < len(cacheKeys)-1 { + want = cacheKeys[i+1] + } + if got := lru.getLeastRecentlyUsed(); got != want { + t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want) + } + } +} + +func (s) TestLRU_IterateAndRun(t *testing.T) { + initCacheEntries() + // Create an LRU and add some entries to it. + lru := newLRU() + for _, k := range cacheKeys { + lru.addEntry(k) + } + + // Iterate through the lru to make sure that entries are returned in the + // least recently used order. + var gotKeys []cacheKey + lru.iterateAndRun(func(key cacheKey) { + gotKeys = append(gotKeys, key) + }) + if !cmp.Equal(gotKeys, cacheKeys, cmp.AllowUnexported(cacheKey{})) { + t.Fatalf("lru.iterateAndRun returned %v, want %v", gotKeys, cacheKeys) + } + + // Make sure that removing entries from the lru while iterating through it + // is a safe operation. + lru.iterateAndRun(func(key cacheKey) { + lru.removeEntry(key) + }) + + // Check the lru internals to make sure we freed up all the memory. + if len := lru.ll.Len(); len != 0 { + t.Fatalf("Number of entries in the lru's underlying list is %d, want 0", len) + } + if len := len(lru.m); len != 0 { + t.Fatalf("Number of entries in the lru's underlying map is %d, want 0", len) + } +} + +func (s) TestDataCache_BasicOperations(t *testing.T) { + initCacheEntries() + dc := newDataCache(5, nil) + for i, k := range cacheKeys { + dc.addEntry(k, cacheEntries[i]) + } + for i, k := range cacheKeys { + entry := dc.getEntry(k) + if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) { + t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", k, entry, cacheEntries[i]) + } + } +} + +func (s) TestDataCache_AddForcesResize(t *testing.T) { + initCacheEntries() + dc := newDataCache(1, nil) + + // The first entry in cacheEntries has a minimum expiry time in the future. + // This entry would stop the resize operation since we do not evict entries + // whose minimum expiration time is in the future. So, we do not use that + // entry in this test. The entry being added has a running backoff timer. + evicted, ok := dc.addEntry(cacheKeys[1], cacheEntries[1]) + if evicted || !ok { + t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", evicted, ok) + } + + // Add another entry leading to the eviction of the above entry which has a + // running backoff timer. The first return value is expected to be true. + backoffCancelled, ok := dc.addEntry(cacheKeys[2], cacheEntries[2]) + if !backoffCancelled || !ok { + t.Fatalf("dataCache.addEntry() returned (%v, %v) want (true, true)", backoffCancelled, ok) + } + + // Add another entry leading to the eviction of the above entry which does not + // have a running backoff timer. This should evict the above entry, but the + // first return value is expected to be false. + backoffCancelled, ok = dc.addEntry(cacheKeys[3], cacheEntries[3]) + if backoffCancelled || !ok { + t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", backoffCancelled, ok) + } +} + +func (s) TestDataCache_Resize(t *testing.T) { + initCacheEntries() + dc := newDataCache(5, nil) + for i, k := range cacheKeys { + dc.addEntry(k, cacheEntries[i]) + } + + // The first cache entry (with a key of cacheKeys[0]) that we added has an + // earliestEvictTime in the future. As part of the resize operation, we + // traverse the cache in least recently used order, and this will be first + // entry that we will encounter. And since the earliestEvictTime is in the + // future, the resize operation will stop, leaving the cache bigger than + // what was asked for. + if dc.resize(1) { + t.Fatalf("dataCache.resize() returned true, want false") + } + if dc.currentSize != 5 { + t.Fatalf("dataCache.size is %d, want 5", dc.currentSize) + } + + // Remove the entry with earliestEvictTime in the future and retry the + // resize operation. + dc.removeEntryForTesting(cacheKeys[0]) + if !dc.resize(1) { + t.Fatalf("dataCache.resize() returned false, want true") + } + if dc.currentSize != 1 { + t.Fatalf("dataCache.size is %d, want 1", dc.currentSize) + } +} + +func (s) TestDataCache_EvictExpiredEntries(t *testing.T) { + initCacheEntries() + dc := newDataCache(5, nil) + for i, k := range cacheKeys { + dc.addEntry(k, cacheEntries[i]) + } + + // The last two entries in the cacheEntries list have expired, and will be + // evicted. The first three should still remain in the cache. + if !dc.evictExpiredEntries() { + t.Fatal("dataCache.evictExpiredEntries() returned false, want true") + } + if dc.currentSize != 3 { + t.Fatalf("dataCache.size is %d, want 3", dc.currentSize) + } + for i := 0; i < 3; i++ { + entry := dc.getEntry(cacheKeys[i]) + if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) { + t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", cacheKeys[i], entry, cacheEntries[i]) + } + } +} + +func (s) TestDataCache_ResetBackoffState(t *testing.T) { + type fakeBackoff struct { + backoff.Strategy + } + + initCacheEntries() + dc := newDataCache(5, nil) + for i, k := range cacheKeys { + dc.addEntry(k, cacheEntries[i]) + } + + newBackoffState := &backoffState{bs: &fakeBackoff{}} + if updatePicker := dc.resetBackoffState(newBackoffState); !updatePicker { + t.Fatal("dataCache.resetBackoffState() returned updatePicker is false, want true") + } + + // Make sure that the entry with no backoff state was not touched. + if entry := dc.getEntry(cacheKeys[0]); cmp.Equal(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})) { + t.Fatal("dataCache.resetBackoffState() touched entries without a valid backoffState") + } + + // Make sure that the entry with a valid backoff state was reset. + entry := dc.getEntry(cacheKeys[1]) + if diff := cmp.Diff(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})); diff != "" { + t.Fatalf("unexpected diff in backoffState for cache entry after dataCache.resetBackoffState(): %s", diff) + } +} diff --git a/balancer/rls/internal/child_policy.go b/balancer/rls/internal/child_policy.go new file mode 100644 index 000000000000..2e25be6438e3 --- /dev/null +++ b/balancer/rls/internal/child_policy.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "fmt" + "sync/atomic" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +// TODO(easwars): Remove this once all RLS code is merged. +//lint:file-ignore U1000 Ignore all unused code, not all code is merged yet. + +// childPolicyWrapper is a reference counted wrapper around a child policy. +// +// The LB policy maintains a map of these wrappers keyed by the target returned +// by RLS. When a target is seen for the first time, a child policy wrapper is +// created for it and the wrapper is added to the child policy map. Each entry +// in the data cache holds references to the corresponding child policy +// wrappers. The LB policy also holds a reference to the child policy wrapper +// for the default target specified in the LB Policy Configuration +// +// When a cache entry is evicted, it releases references to the child policy +// wrappers that it contains. When all references have been released, the +// wrapper is removed from the child policy map and is destroyed. +// +// The child policy wrapper also caches the connectivity state and most recent +// picker from the child policy. Once the child policy wrapper reports +// TRANSIENT_FAILURE, it will continue reporting that state until it goes READY; +// transitions from TRANSIENT_FAILURE to CONNECTING are ignored. +// +// Whenever a child policy wrapper changes its connectivity state, the LB policy +// returns a new picker to the channel, since the channel may need to re-process +// the picks for queued RPCs. +// +// It is not safe for concurrent access. +type childPolicyWrapper struct { + logger *internalgrpclog.PrefixLogger + target string // RLS target corresponding to this child policy. + refCnt int // Reference count. + + // Balancer state reported by the child policy. The RLS LB policy maintains + // these child policies in a BalancerGroup. The state reported by the child + // policy is pushed to the state aggregator (which is also implemented by the + // RLS LB policy) and cached here. See handleChildPolicyStateUpdate() for + // details on how the state aggregation is performed. + // + // While this field is written to by the LB policy, it is read by the picker + // at Pick time. Making this an atomic to enable the picker to read this value + // without a mutex. + state unsafe.Pointer // *balancer.State +} + +// newChildPolicyWrapper creates a child policy wrapper for the given target, +// and is initialized with one reference and starts off in CONNECTING state. +func newChildPolicyWrapper(target string) *childPolicyWrapper { + c := &childPolicyWrapper{ + target: target, + refCnt: 1, + state: unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }), + } + c.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-child-policy-wrapper %s %p] ", c.target, c)) + c.logger.Infof("Created") + return c +} + +// acquireRef increments the reference count on the child policy wrapper. +func (c *childPolicyWrapper) acquireRef() { + c.refCnt++ +} + +// releaseRef decrements the reference count on the child policy wrapper. The +// return value indicates whether the released reference was the last one. +func (c *childPolicyWrapper) releaseRef() bool { + c.refCnt-- + return c.refCnt == 0 +} + +// lamify causes the child policy wrapper to return a picker which will always +// fail requests. This is used when the wrapper runs into errors when trying to +// build and parse the child policy configuration. +func (c *childPolicyWrapper) lamify(err error) { + c.logger.Warningf("Entering lame mode: %v", err) + atomic.StorePointer(&c.state, unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + })) +} From db9fdf706d400bfc4d54665e1f06e863ed407f45 Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Wed, 22 Dec 2021 23:28:13 +0100 Subject: [PATCH 379/998] insecure: remove experimental notice (#5069) --- credentials/insecure/insecure.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/credentials/insecure/insecure.go b/credentials/insecure/insecure.go index 22a8f996a68b..4fbed12565fd 100644 --- a/credentials/insecure/insecure.go +++ b/credentials/insecure/insecure.go @@ -18,11 +18,6 @@ // Package insecure provides an implementation of the // credentials.TransportCredentials interface which disables transport security. -// -// Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. package insecure import ( From 78df8ec077fdd65d752a70ba2527a6dac339aa8b Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Wed, 22 Dec 2021 15:54:08 -0800 Subject: [PATCH 380/998] test/kokoro: Use xds-test-server-5 as the GCE interop server (#5071) --- test/kokoro/xds.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/kokoro/xds.sh b/test/kokoro/xds.sh index 63dad66d2bbd..0c12f0088b58 100755 --- a/test/kokoro/xds.sh +++ b/test/kokoro/xds.sh @@ -36,7 +36,7 @@ GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info \ --test_case="all,circuit_breaking,timeout,fault_injection,csds" \ --project_id=grpc-testing \ --project_num=830293263384 \ - --source_image=projects/grpc-testing/global/images/xds-test-server-4 \ + --source_image=projects/grpc-testing/global/images/xds-test-server-5 \ --path_to_server_binary=/java_server/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/xds-test-server \ --gcp_suffix=$(date '+%s') \ --verbose \ From 4d58dd98db75e180ebf67cce127db5068ff7ab3e Mon Sep 17 00:00:00 2001 From: Steve Greene Date: Thu, 23 Dec 2021 13:56:58 -0500 Subject: [PATCH 381/998] dialoptions.go: Fix WithBlock godoc (#5073) --- dialoptions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dialoptions.go b/dialoptions.go index fe2f560d4240..c4bf09f9e940 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -272,7 +272,7 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. func WithBlock() DialOption { From 344b93a285883f2da713622d5064ad4b4512e63e Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 23 Dec 2021 14:02:05 -0800 Subject: [PATCH 382/998] testdata: use SHA256 as signing algorithm in testdata certs (#5074) --- testdata/x509/client1_cert.pem | 56 ++++++++-------- testdata/x509/client1_key.pem | 98 +++++++++++++-------------- testdata/x509/client2_cert.pem | 54 +++++++-------- testdata/x509/client2_key.pem | 98 +++++++++++++-------------- testdata/x509/client_ca_cert.pem | 65 +++++++++--------- testdata/x509/client_ca_key.pem | 100 ++++++++++++++-------------- testdata/x509/multiple_uri_cert.pem | 56 ++++++++-------- testdata/x509/multiple_uri_key.pem | 100 ++++++++++++++-------------- testdata/x509/server1_cert.pem | 56 ++++++++-------- testdata/x509/server1_key.pem | 98 +++++++++++++-------------- testdata/x509/server2_cert.pem | 56 ++++++++-------- testdata/x509/server2_key.pem | 98 +++++++++++++-------------- testdata/x509/server_ca_cert.pem | 65 +++++++++--------- testdata/x509/server_ca_key.pem | 100 ++++++++++++++-------------- testdata/x509/spiffe_cert.pem | 56 ++++++++-------- testdata/x509/spiffe_key.pem | 100 ++++++++++++++-------------- 16 files changed, 629 insertions(+), 627 deletions(-) diff --git a/testdata/x509/client1_cert.pem b/testdata/x509/client1_cert.pem index 714136918f30..6f82cc3be84f 100644 --- a/testdata/x509/client1_cert.pem +++ b/testdata/x509/client1_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIwMDgwNDAyMDAwMFoXDTMwMDgwMjAyMDAw -MFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4NDI1MVoXDTMxMTIyMTE4NDI1 +MVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAK3fSafgFyHediP0fonPcc/pH010l2jqryUNsEfr -PhxR//ccr7sBvbcInwvj3NJ9XqF4V4ws9h/QbPMLXg1FBcC/LpYjo6VZoNjuJLt2 -DTG2gGcTEL+4G2w/4ztrrmunLxa53P3URIgMgMYhCTIXK2enVbpy637X8WhPYOrq -w+NXnDaTwT8uLGfMVEAKNvXzf8Ras8OHjgTZJEpkgXVjREhUhOPszrBsyYKnI/f2 -QSDnvgSJbrkBLFRqluT/ciqccryBWy0qJOStVhha1I2tId+dvJsTgQa/NLBASbsU -LkIIUV375K0raINYeg/kA6MK6YDwcCtrVbQa8fu7drxxBiY3tSoDLVn1FYz7iTJ3 -PvtpwsGAqTEsSW3k7l2MTz3iuqcAgL8tI1CpyacwNPfy7j67mH3akY95sh2nmTVj -rsW2uuFSC/cc00bH+IMVZnztE7+fpgZvU63BVnf9d9TMgDe8kwMwbq7dFi9irr12 -8Szpbdnt028dgsrjpbOgPpMYJehRK2Q0I7+99cLeJa1V5ySeFhf+uhNpW9RDi/qp -TJGAG+rE3qAbVVoD9GrOispNZW7Hby4/q8pkNoafXmilqIf6mOri/88AYOMXbH4X -i8mJgIeN2AjJmEGVPBPM25ZjN+ZurWqfdSasXuiIJmJzw9ExcIEjzAjoMl9CNVVy -c77lAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFEdlWQy5/06l3GTu -rqJTuMgy4JuKMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD -AjANBgkqhkiG9w0BAQUFAAOCAgEAAmzrEprlWBxCQDzFZy5pZVIa1FniD+23qXlV -n0Fhhr0eF2udYR2tfzf0VM9WcBHHoRzX5fwNkGmIWiXAISgamMl4sHHZn6Ig0i9h -k9/fI4bYtrCiOqjYRG6VA8OZSD98bD+NtQEPQneO5F5buL0by4FUugu6Ls0Ovpk0 -yhb2pgKFhbFbMC6ev1AK9IpJZgz2q9/rjkJedGjnu35ze+94tw5Fe1FIIkg24ZQk -C4e4DzSpRz5s51LS+dS5hDGuvglWn7SrwGuGujz8iMQdAJa3WSP5WmjbuUFaD8pH -6afrjAhMZoWgxNubLkypkFUW/3W5JwTLnj5wPhPpBtHX6NQ30/FgN89j7+0Zp064 -i4Ur1ykhHgbdUb/EB28sXs+/CkmfmFx44M68yhoJ5euUzRF5gGmxSgRn3+RVsw2E -ju0YQBVvH8JjAt0XCi9SY+vCpe+EG0uV4HxEO6DDdSslsMkuiAeM7pvZTM3FbZyt -BXpWs/L71OF37ouUbt1TD+C1fsCUovjGi4AE0KXeO1rv4u2mTGfxtOOUFKt2dFDa -E1sjyJm1+WjDgIqNjbubM6zpvNtix0xaOXqg7MAt4OJnKAeRQELgJhe6Rt2ROKGq -Hoy8uIjcA26/lwclj2h7fwiKznlxqfDxVsiwmCTdJJb76w69UQvyIRY3tlJr3c3+ -O4VSONQ= +AQEBBQADggIPADCCAgoCggIBALUoje/J3uPOJ0dapY2s7mGLVPhYRaHyRnJE2/TY +zFOB0IisAF3R7BIDufQrHhk3fh0JazCw95TDD9rxsKEVs6Z50lmDkrg/bjlsniE/ +n+M1JacaLQW7xfh2L+Ei4jvMr101nAsimd6IxFU9m3+2SFbhPBG/GWWJ2ZKqQblz +DVMpNg9FYNmMe45vLevOhdPQBE4cVoAPhI9Je+P4Koslebhor0koUeQVeYdBbCq3 +3dQJPAHjBST6mD9mJI4yVrE3Xso3LO85WROUPhRYQyXhrgU15W6g9qTpMTfkriUe +FYLCtAPU9LBodyvjYLuwoEoyRVsA6Zh/vABteD8Afl552fV9KwN2fRVbTDAxQCp7 +P8gE3/rD1RKv7KBNJ/LrwMu7g4VO+tzYDxWee+eXPQ6M/zRWAb3E0v3UNHsF1ZBl +rlFhEiRShHrXDEKMQwCTSrRjwYajUpZ/Hq2USDgkLepKmTmCaoBfWHPyZwblqSTn +A4DNOh5N23eJyrLnJOPYjzZqEPfX5hDTjFRdVTQxtmYlJ1muwtlNyuwZDImhjO6G +54pPj/bV6gy1+YpIQBemPoXtqqmcRiEVWSV5zAizwRaWf85tqpxb1Tjuj2OpD9le +oO4JX0HLjhyQBoKspNohu2I4+s7ex/w92bf76cTpYTbMJqIp37YZmfPVztHVaMl4 +W0xRAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFMRdhhib+RS6IJpQ +zFsaKH1BNbyZMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD +AjANBgkqhkiG9w0BAQsFAAOCAgEAHyQwLSo/UdSoZCKcdeb0DCkeABRMUPIysroj +gQJCe3cAKOT+TxL6Qnp6jzM2/vt2T/lzlRp7RAB1jH3J4wy5re4cbCn1sbII3Rog +Nm4PKcw6hfUET44t1Gk9DsCjgvxEIirFBWVpxfn+YDI916iH1fkNURaMP+yxpQBL +3K4bmxanBiyBUHC8cyChLMD2NwXjOAA4pZFk0ohpmK0YUk4ra3Z3Q30DCH6NZ1ZP +aOMDHrCXU6MLlmPk8yiOnotgjqiYEgi3Bzxd/OHpR41Xo8k6g3UrN2GEQFs17ibQ +CQasxodOar5Vezu6ZKCYk5TaY4lugT34w+qxi8tVF54WY2jtWY5PUmU6ZT2Dw5cn +CQzlPUdEebOc1hltTvsD049/2lZmGlMXk0dykxy51jYAYznf2rb3cnC1vu1Wgi3w +J28xXBYD8AvME9jaJ6g3L+KR+AFCSLqpUsTxvu9zKf6pLrVtOCl+9G69uOK/wono +yMGNeel8rkzwzzr1LNrhmcKHqipkq83vqxIUT/mbpBUKO1ZXVG/TWKS6bpBTc4Pn +hBCIvGOSyoKuEiXnFr6fqLhLskUNcCNl7iOfA9h/MhS5ZufJXhhXu3Wbo/KC/mNh +y+fr1S9AyA+EJaYtJRKAOeewGvXYb881UNXWGCQU1aVNJnujRKFyhd07sEjxsad9 +Bn/aYes= -----END CERTIFICATE----- diff --git a/testdata/x509/client1_key.pem b/testdata/x509/client1_key.pem index b7a3930254fd..6cd652c55435 100644 --- a/testdata/x509/client1_key.pem +++ b/testdata/x509/client1_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEArd9Jp+AXId52I/R+ic9xz+kfTXSXaOqvJQ2wR+s+HFH/9xyv -uwG9twifC+Pc0n1eoXhXjCz2H9Bs8wteDUUFwL8uliOjpVmg2O4ku3YNMbaAZxMQ -v7gbbD/jO2uua6cvFrnc/dREiAyAxiEJMhcrZ6dVunLrftfxaE9g6urD41ecNpPB -Py4sZ8xUQAo29fN/xFqzw4eOBNkkSmSBdWNESFSE4+zOsGzJgqcj9/ZBIOe+BIlu -uQEsVGqW5P9yKpxyvIFbLSok5K1WGFrUja0h3528mxOBBr80sEBJuxQuQghRXfvk -rStog1h6D+QDowrpgPBwK2tVtBrx+7t2vHEGJje1KgMtWfUVjPuJMnc++2nCwYCp -MSxJbeTuXYxPPeK6pwCAvy0jUKnJpzA09/LuPruYfdqRj3myHaeZNWOuxba64VIL -9xzTRsf4gxVmfO0Tv5+mBm9TrcFWd/131MyAN7yTAzBurt0WL2KuvXbxLOlt2e3T -bx2CyuOls6A+kxgl6FErZDQjv731wt4lrVXnJJ4WF/66E2lb1EOL+qlMkYAb6sTe -oBtVWgP0as6Kyk1lbsdvLj+rymQ2hp9eaKWoh/qY6uL/zwBg4xdsfheLyYmAh43Y -CMmYQZU8E8zblmM35m6tap91Jqxe6IgmYnPD0TFwgSPMCOgyX0I1VXJzvuUCAwEA -AQKCAgEAidNL4aUC8TgU0h+HBtrHzxVuWMmpE+OkfmzBZeEV1QEzM8Erk8OnjSVq -XdR8QOZcUwa/7z/cwg9Hrck+/qnOC6IA3cbWe8X2eL8dovPLNbMDSbGVP0RDiKWE -DKApHPDjpNIkWZkf0fCHS4b4cRpor7u3exqJjnzCwfraSp1aNiZGkATD1L9XN9iC -mFkAhCpHB3EWulIDw9gUqlvNOy46/FLzHHGkzbkOa2DuZCpyKhFJUPNYL5K8fxYX -EuNirmBhmwe3LLARmqvEaX3mq3+oMEgrL4pgZua+b1AmogM3P+S0CxoXhSW5rRQ/ -fcUzFNUbj7gIUoK85w3M780ELBAz3F0j9cy1/DcidV0T8SAzKVrpiJvvK59XYzzn -3J4JFmAsZ0PYgkPhZyPY6hNysRFapPwJyNC+I1NVRpSNHifMsYNEX5dV4M6Qtmv4 -7QmtvUubpJ+vo75W0DNzQ8Ar4BaBVZ6YzKTW58/Ob9Y1o6knUJv/lElE9RLyJBrn -PgtFMPDjf2FzYaA45+zVtQBDk3rljLatS6WZxWg+qh+4RPQjS6sKzNB7U728oiZj -1PRMbeUGKAZDb6FWTZ5nlvai3Z1VDwmLdBBSACnUWLOhXqmnkWY0q9d3kSGnMih4 -Au1A2sCFhhoowoyEkbbmlvORDSo6jfqdYKxP2rUQV1DJBPepo6kCggEBANQzH//s -CTcB9fMSIpiEUmcFBBineA+uMXDbmNRzeJMoFwSxt7TXQpVqfMOXTaa4aDuCSCNX -VLIf10aP4Myx9A0i+t4A9IAn9Ps+wCu5Yb43XmiEc2PC/AZYuviYfP/rIptTS0o8 -z8zAc1cLdDYBww76DcKdagAQABZQaqPARlGEHAvqmr5fjR0oWfcGeEvzqdv7WbGf -9nyuAWl1ldMmILysW0GRDudFhp5rit6A3uCq7LB5Qb14dGrek5k+y8lnzjp30r0O -9QxUuxZVuvh4ujiDnQI5tVWbhD/jgIUF91Nm/Vw0bZMdcp0iA9r8EGmFaHNi0by7 -rMw/6Pqcxd75qP8CggEBANHC5PZLyZEmt6C/c1ohTIZtf2g5ghXSHARfwMvRTVJ5 -4HksZp/FQSe3080l4yACXDpfxJ6pm5WNFNhy4Xi09wkEIWk+bSOqBgk+DvItgqkY -em3q1EUUdhzIB3OXqWRcpgmc78hLiD33GkCTM9BR6W2Q/5TY7o5ULOjkiDKiVL+r -+juFlXQtUTOak0Mwu1RRDQE6z96N5Ffg2rHxjNu1HxQK7OsSfc/lrwOyqnXaB7kR -7CThI4xpSmtyMq4prxehM1YhKk2rJmT4hW+M636uyxZCBg1Aoqqnoxv0sQTHH6k/ -RU1+ZU38RYLzid2qNBom86RS1fWX60H3CH4EX3AVFBsCggEAFMOv9O4W9MAHXjK/ -GeeQ3K3b+cGheP9VrTJ/4QIvoU7B+d6eGF8cD9zsuoL6wT64TGJyRqsMCaYd/bSk -jcM4G3T50XGMe2HtkgxQ57ZrPx7R6S5U0EVLPh++pAbf7HcI2uQqsOgEeYe3gaQI -SiSf/r4vTIT00269Y3GZDc8J0n439F6Pp+NXvqutKgQDD4OXcoRFAaGikA7C6pvr -/k5z06KWB3N3XuApzSS+4QkBRkDTim1DJpQ76B1BmjRP4rR6tLP29jMZfYxpBkV7 -V0cRCeivG4GkIe1m4o2TjPDJg+rHDhe/RS8TgRbMA8i4nmrEjs3zsiE3RoFWffeL -UUdi5wKCAQB86+rb26rBbSNy8lHKXYZrkI6ODaGxSR4yZKw3NgEsmzTaNV0wzZLO -CqZyyJuJFp7CjQJV04C7AfhmJ5SsBGoSzojvWqQ41ysdGf5gsEXeWpufFnkwYs0s -utvlNW9GO/8OPo525LTQ4naZ+pCjAgVYoT/073SzAuJ0GJYcQZzjQZKXHCkztUFk -0CvfmggWYOaz0si1LB/PTjQwQUC4IBfQIemS3cJbq9gdBayK3zw2NbxDAmnfV11g -u/P+0QhbtD8Ujk/ZTZJiE7e0BWLCYWrFaLCd995ob8mt/n3l8IikjO/DBQFj/leP -c2apwpGg+Y2kUUjnKICNGofONOB5qbP9AoIBAQCSKpGUVqnsb0PSqjrhr8B1VFvS -4MZfe0ds6/GrB02D7owHPhPaSJsXhBXVri/ECSx2WripMujbZ3tZH4IPub848PYv -9668O1RxKRkyoknyUn5TO58dhYbp3VO7P7EqfVfqEezyQ8bDfVGxrIbMA+kXJosi -T052e3yNin6Q1r+R3cWCg0dHBGDCCkpKdD861LkYjfyipw+u8c4O+CefTHvd8XV8 -EXGn+NBBAPG42bBsJMa+P/1k9qJbflbUfQy/lPGxMspVD8xwWWeEOJEFTgGmoLWE -cNtabvDCEiQ6+DjBBE2Cl656MjX9uv0Dn830so/PLr6FWK0JSy9sGIRTrPC/ +MIIJKgIBAAKCAgEAtSiN78ne484nR1qljazuYYtU+FhFofJGckTb9NjMU4HQiKwA +XdHsEgO59CseGTd+HQlrMLD3lMMP2vGwoRWzpnnSWYOSuD9uOWyeIT+f4zUlpxot +BbvF+HYv4SLiO8yvXTWcCyKZ3ojEVT2bf7ZIVuE8Eb8ZZYnZkqpBuXMNUyk2D0Vg +2Yx7jm8t686F09AEThxWgA+Ej0l74/gqiyV5uGivSShR5BV5h0FsKrfd1Ak8AeMF +JPqYP2YkjjJWsTdeyjcs7zlZE5Q+FFhDJeGuBTXlbqD2pOkxN+SuJR4VgsK0A9T0 +sGh3K+Ngu7CgSjJFWwDpmH+8AG14PwB+XnnZ9X0rA3Z9FVtMMDFAKns/yATf+sPV +Eq/soE0n8uvAy7uDhU763NgPFZ5755c9Doz/NFYBvcTS/dQ0ewXVkGWuUWESJFKE +etcMQoxDAJNKtGPBhqNSln8erZRIOCQt6kqZOYJqgF9Yc/JnBuWpJOcDgM06Hk3b +d4nKsuck49iPNmoQ99fmENOMVF1VNDG2ZiUnWa7C2U3K7BkMiaGM7obnik+P9tXq +DLX5ikhAF6Y+he2qqZxGIRVZJXnMCLPBFpZ/zm2qnFvVOO6PY6kP2V6g7glfQcuO +HJAGgqyk2iG7Yjj6zt7H/D3Zt/vpxOlhNswmoinfthmZ89XO0dVoyXhbTFECAwEA +AQKCAgEAjtzrijWVy+sQuMm4k1DUMSKzIKJkT4GDoqvBFoc+I4DVVmLmaxaYZ+B+ +bhruwo4rq3R5Ds4QgUWPJGfDllVJ9rhNdYA4XYrQPwL0dV36ljCcf/o5lTLuvbFe +stpStTwG86fKZlGkLIWI53wNPBshUzqOp6QfwB6E8Y/JAxnDYVi3pDVfWlDaQ4pU +GYklqtN6AauBX75dGK6nwDE+Q7uLES2lRjlA03FIBK1IQyv7CTM7GnXQ4cep9x1z +KJx0F4+F9kyq6AE+yRz4FA1C7wXZuYw2YhcYSxcHVH/IAceGyTcIxZjUWqYXjQnk +iD+TONAKN+kxTq01MtUhpfWasqC/i+6QU1eqf5YWpd6GsRKyrGgO02NND/SM6Z3V ++S9og4QAjdUyc8dkN+udd1K1CeYNFbmhrYpF2aS9k/PjDP3L137hDW6Cy+thIjZP +u9OB6ba2yUrbQDlmkCbh0vX+77HKAbT5bj8h9r7MqzNsPsgkaKS8gZ79T/Whr/ft +Xiu+eo/u1jtjwUjNMKGxQ9XiU2UU7QccthHHLcYaiv4eySHXA75h+Sho9cD1Vvs/ +ms1/nbCSuU9TSK0UK/V8YjeDA0eVGtDCX3weIW2ECQ80SoT7uf+fhjaLkvOadb7f +1O9DvYVYZvblxUm8ajOh+/n9VyB/I9R9Q8GdGiauXy16uXLZMdECggEBAPEx+4aR +XfuXmnEoPk3GpvyrRlrMy02aklrATKcxeAYo2uiIcuQv65E3AmE1GHpoxmAKjLty +fuUfGdT7f4uGeF6p+IEkW4jQm56UFbCdun9kduEaN9FRylTBqUKWIY2rtRS6nHZ8 +bAkL/6Uv3g9NWx95rV7HnAfC2n6AIvc8LRfQVVqSvjPbsEPvJAT2353D0Rb7vC2M +1hKeBrSNBiy57EKnrMDOhNpBvSBU0Zc+YsBRNAimKyBz7dt35H+THkFaEk9vGtG0 +QkDvngPzSX99Ojwk2mo9jGrh7LHErWih5C73IfvYUh3kyEwbZ5y25i9Z0F37boIG +jHSVvcPp+9x9PNUCggEBAMBHLyhBUAQVZFXtWysr0BjO34XffgkSt1XQa8cVxif7 +glWauUZtjfC7PT/qgY0mx2dI2bDcKiQQCBlVavP1RLRwj3rZv23eit7z13UgHSa6 +3dnsgpO2Zux6qoV48lO4xbuFqZtW+MP+9jthKwr95r8lmZ4cmGQwXXcqNsR7skFt +30Uhcyn+MTfyLwcqt8g9i98rrJmbPAuIME/Sz9DLIi6UxQLI6MeEn92AzECNDp18 +CypOL+sDrLw/7HNHNoSblgm628BHpBgT2qaOYnawRr0gni7MHXOAbDopKYDAtLuU +ZMFjlILdfiSDouhvKtMlZG9arTB0TasdAQJGPz53H40CggEBAJ4JDvJsOzVHb2Vn +ZfNWD0INA0spVqhheDXIPDFsg2UdzdmA1i7XizUZ4xBIVuKV1i1FnFKRwb1ktGtN +4pNMJ4B3RCFx7hvl+6FbDB8uKe2gqRfzMtGPEtCYF8xOTGvkLwEHCM/F1I/U8cuN +YqWKHQOxmTw58+1N6hXq5X4zSqSI1/RBpCiccJEClwo9q+VWUaEKjpEV74pBSslw +gbQ6mihOby3h40CSxFXz3WSI9vFmA38LScS40Qf1NZ21iqRtXQP5G4x93M9pcZLL +DMRhDBAuYYItE91QbONJqAmf0cBII1c9tQhrSCY96pTPbmFmKtX5kb3Whp85Ih7F +KEafNIUCggEBALMnoIDZmjyz0fFeX3wyLotu9kY+n6jEj56dvE6bsy694grxR4Cf +w4lybPeJAX0LjPBnqK5p9bn0VheEx0rYVVPrLUVCbmNo3+wtN6wiaAcWRnAvNtt7 +MRtWkFwc/W2U1GiNeiMLPm8guT1KpFhxiva/igsQic2QYwYNh0o8FzNvtIEtUajm +9+Uw+zCqVON2tUUT5JabVa9JDfrSamAZZZgRdh/KI1sD8BDrWWUsCVojoiOhBnTr +z5730ND4oYudjIc0XF0kY3krxqc6M/Ry+vZt1fW0qhxcpHrsr4cQB1ZgRiELL+1f +g5FyNfBs5HIofRRkYMqtE1FEjRQZcAQ76mECggEAaOUtM9BZuV9gEwmG4hmFfeXq +vJOMvlsDkRRbLuDQ1B8Vw3v7lt1+K+KfBt96MoQe08MyXM7sIMB+hn+zakNaM2W6 +UzTnAPQQAo+wELqj6U3DrV7zw7I1hZTA9G7qxMAQBEmk3u2q4/zWDAcyAx3D9JVj +L3G14pYf0drFLChnknVTPRaF0Q5upLYzCPLMa9w0FLKy6fkfdWdpzyjvW7+JEeFY +koA98hrottqJB2CcqehQDSCUHKKbd5U15y1NV1BQloaPJLwpPAVTkBszQSHanltN +l9POJBJlfQ1eWL88wHdKiLbtOg6PTfAmfghIRxakjHvxBgFO1/xG6Lxm7QwUDQ== -----END RSA PRIVATE KEY----- diff --git a/testdata/x509/client2_cert.pem b/testdata/x509/client2_cert.pem index 6777622f891b..2cffdeafb590 100644 --- a/testdata/x509/client2_cert.pem +++ b/testdata/x509/client2_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIwMDgwNDAyMDAwMloXDTMwMDgwMjAyMDAw +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4NDI1MloXDTMxMTIyMTE4NDI1 MlowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MjCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAOBsqW26eD9t1JvEsQH2PjcstaknRoeNi+yqxQ4w -DIWrngdeL9/achgzCx4lCbcdTv0QatGg3manPEXem5qsmR9dKj+EKPXnV2qI6PSI -fv1AwcLma0Ph2F/zMASNP5wkwgv6MaIyYx3n+F4iBGQToUaj1l9XS5E30w7k2VxN -KR7zDOGSKifavuGP2nVT8NKgXUjsh3X9F72ZIPZwvaPYkbmikOshDr8TchSgof6+ -9ng+sSmYt/Vm0yWspjJfk2qJldeIXgGRVCwOl0qBsziEk0HJSpAjjy/u9GcTGz3A -qRQ7wPmoKU5MwnNQKZGE73JRra6zk64YiqWdkg7x2WuE1Dp661bvP9iwC4EgUqXL -ZEQkISsDpT8RkWqp2G5crvyrk/cf8I8TbsPi9Q6Eg3dRkqCN8H1mkZT1assOp+G3 -2F7jOvagZfLik3xoSbvpD+u2vMRe30uPKZBNhEZv2PU2YaSEXu+a5qT328uFK254 -rLFi1DZU0eXlj9Y///nMo5kUoq3z4WcL1rnDRSJk2JZJ6Ln5SXN4lbNuvn7dFjKA -VoQa4texrCSf8jtRKzexhBi28n6LAorJT57E/mo0ZvfL6aJb7cUjbhQZZmC5Kqoa -lMaiEnoPxhMqG4m+n6bfYGLqfZlsDiTVzcgEd+RxGTlhaUIg65ZMGK5982PvV3vr -AeyRAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFHoqiLSZ1N8EEtPo -vmr1u4X0tKyKMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD -AjANBgkqhkiG9w0BAQUFAAOCAgEAZ+NQzQk5L/55+58WYQk81SyFWXLjj3RVO3fB -jUgIaxd87IrVeLKrnfoa9mMaS2Qf3SfEMhovRy6Jb2jfxbG0wLQnhx1bqNtaNLAr -2pGG/Yu+4ZzN4iIloP9dn98tYFkHOLOLfIwNEh4Yg6IB9eg+qcbDg5JlqGkGRSsu -IOS9XD2CY9zQ3KLGlVCWZ1EfW8u+du1GIUMx0DEwEYZ/zYnyTa2bCdBD7aetmbKZ -yjSQ0Ole1W3z1Q3uF8CQjZ2dr/wQ3nmxj5Km9PN7/Q9iHn7RyeypWxt5utzSG5Bf -egL0ER8kmYeKHZeagdRbKWPRyUjEligndLzh8Vi75hGFBDAx/pB0aVf81HEStKKw -WCuL0PKpKIoIqNE8aJ6jTo0OEL+Z+6uam0vSnuVqHkeigbNsmefyR82TmiJYDahM -3CBp6Q5gfw4WKIY/0JuJnN4Ym+zIgv2kKRVHGK3SHhiaCUGt2BydN9MxSjl1/B+v -U7kYVj73MJZHSl96w1mnXXFOevxb7SOP23QmTKfqmU0NakfRMcHcjnG6M5mlnIDg -DjpSJd1TLoCS1SfIyc+Fibd4grsRucnuo0iHuFqV8TZ4hi0qKKE8UG8En5KNiQDl -exFgZo6FGQa1mJxQiSfhL3VoyeZ/b3QRG+mNVDmgHsZSTjfMppfmyBJRT4HIlsHS -dWeIeN4= +AQEBBQADggIPADCCAgoCggIBALb7KLFguOBiEHR8FBI/0AFs2X3s7fN5ZCOkTf4p +s9LwAcBWm5/zUqzvZCSui+4sr3qN1b1D+Xbc4xH9+WcxfbeoA2w4d2FKKJ0qaShD +Mu4XTQfj9B7g5GZ+FUeP9rScqgJ+WFeOM6QoAgRrFAS0AMP21TpDue4AVKmD1trX +z3f1DaRBtcUa4zlk2J0GBQDPyPB4worxhZ0IW+OLz2RIl8AWJBDFKMgscxEx239t +GTOY9H6hPI7Py2koknj7LBNc84lf2PVFw5OytQYglmtkFQqntyVxtETAOL4pFOjj +Zw6MAnQBGLS6nhiXG1LkZuvWJn1T5ewhci4qNVpv/8LtrPc2Fv7jb86I5XJvdOGv +hYC6IwS9Psg9oCYaIzyandoSj8SEwXaQuD98ROBUs1raasLedg3d4xNeZCRRmnzE +me+IpHm/wS4hTMxQJHYVewNB68fl6FoyRAqoXNy5yi8uMJKbjqb9E7niQCQRO9vQ +eX0TrB23uoUXbdTz95uMiw8yy/xz11/h59TxN9cOiqDf74tymgH0jTwO8eg/bzKU +zTXxTANcfGDvS1vR+yaZDxNbZ/3A4+NzNF0M/Z7AHgEzUcx4yu2shrXXr3dhNpzb +crk9yoCnEAiP1i54euqaOOrp6O34cRCCBE18j+oEEIIfYdMRXorCPxHRzdHKlJ5K +OfRZAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFCIxr8jA3srbonwd +2AoxV5teDkuzMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD +AjANBgkqhkiG9w0BAQsFAAOCAgEAT4hUMxXeg6cqFyyg8TeStBI3fWtmVKahlsba +Nh1KlZe5ZVTwWKh6ULn2zvSqy0t28wpER8Ky2a/yBxsssvPKGQBUgUUmUOSy0Zzk +ICU+pDJrVtZ02vOPlrx23SpnE3EFs3yXMGO5B0RGScHG62YjHyBDPJH3Gun4CB2W +PpthtYIX2FN5g15T3r6UZy62w7SjUEu5z5Ke7IOiAcnXNOXtozv4J9v3bt2Kg7WB +YS80r6b6cyOhO57jobdRBcdsWWogBXBn+ciaL8z3gLNWPs6YooA/6/95Vq0uV8t3 +xfq0XH1dbcdZOnalSwNEyOgLKxQ/yggOd9ridk9e5cGBBIfw1v1N1qDkOWjcdEoR +qjAjR4pgUa+d92/HNLYG8SqVGqACjUUQM6tigw6tHUbeqpk1iI7vT8Cl6Er6bEYE +tMTWEcWAI7GsqJXl2SJPMsObjBg34aZJnU+xxedMDF+OYZXzYeYk2De7uhXUi3iu +46alockzYqOdN6vE99Y7757C1X3N62PnEUhZN0Ri9D7i1yjQ9t0CCucam6hcqcEH +fcDIsXTQz0l97iztkPhcLd3kzAg2pXopwuHkhd3Ih5So5/m1V0rjHVVtrbSkN8/u +JlHy0/tNsJ1OaJKRqd665M9IhaRrc9KP0lzHoA1ZpUsRKo/Be8gNUaw9EP1CMDny +kKizg2s= -----END CERTIFICATE----- diff --git a/testdata/x509/client2_key.pem b/testdata/x509/client2_key.pem index dc4cca65a312..a9563fbf1427 100644 --- a/testdata/x509/client2_key.pem +++ b/testdata/x509/client2_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEA4Gypbbp4P23Um8SxAfY+Nyy1qSdGh42L7KrFDjAMhaueB14v -39pyGDMLHiUJtx1O/RBq0aDeZqc8Rd6bmqyZH10qP4Qo9edXaojo9Ih+/UDBwuZr -Q+HYX/MwBI0/nCTCC/oxojJjHef4XiIEZBOhRqPWX1dLkTfTDuTZXE0pHvMM4ZIq -J9q+4Y/adVPw0qBdSOyHdf0XvZkg9nC9o9iRuaKQ6yEOvxNyFKCh/r72eD6xKZi3 -9WbTJaymMl+TaomV14heAZFULA6XSoGzOISTQclKkCOPL+70ZxMbPcCpFDvA+agp -TkzCc1ApkYTvclGtrrOTrhiKpZ2SDvHZa4TUOnrrVu8/2LALgSBSpctkRCQhKwOl -PxGRaqnYblyu/KuT9x/wjxNuw+L1DoSDd1GSoI3wfWaRlPVqyw6n4bfYXuM69qBl -8uKTfGhJu+kP67a8xF7fS48pkE2ERm/Y9TZhpIRe75rmpPfby4UrbnissWLUNlTR -5eWP1j//+cyjmRSirfPhZwvWucNFImTYlknouflJc3iVs26+ft0WMoBWhBri17Gs -JJ/yO1ErN7GEGLbyfosCislPnsT+ajRm98vpolvtxSNuFBlmYLkqqhqUxqISeg/G -Eyobib6fpt9gYup9mWwOJNXNyAR35HEZOWFpQiDrlkwYrn3zY+9Xe+sB7JECAwEA -AQKCAgA4kiuDRWXaV00olsQnwnKcZeDE6umUcdG7rrBNiz8c0s3a/ZsDyoTIJNXA -m4V/axvmHqVOgkaNicpfsmV279sJVOq5aA8LLW2TpT9TpLSeEhzFjF+tlNh+F0cb -Xp+SNJHVgxPP1vO1LiwlTl3c/DXDILmA/vhFetTxBC7mXWzoKEwu8DFAKpvDMAfZ -W3dxIItjPnxG+a1qVZdBh9nF22mgaaIuIv8cm0I+gN9U374xQVxXJ+/3JBxFeufJ -+t2mFVh4JB/ONVwKXwMz/M24iXK1OpBZFR2a75kcAmzzfAUi3I0gYYtH+YFqn+Ja -lC/nmT82sn2ffQA2DyoqKjysJad5PWHByyepPGA6mkrAwaxn8YFsd0Yu14LaWCfO -5jKQzMvDhuAavAkaeT8EJnQdOeztXHYGV7S8rDQOgXM58W8e9+SchceJzkl1MYKf -99xXveelRaTaGOWBK1E6xPQP7iKJTeh1/Xjk0ylEnWPG5VvjcbNFwleDAnhyDTwB -OqcW2L3IV208MmDEmLuSBAFjHg8u5+/hLnsv+qozAX4yWhITZL67uBufVjKbhTi9 -viFUJ8/yGP9kIrJosQ4iDZgZv1juQLEhAw/W1eIV0gCxy/ZFfxAJXgKThZJWgSAI -FTNf3mKZOiUpuG5+Pe5fFtDa1/vmvQaE5y2lzh8ztLtFboaboQKCAQEA+a4nh2bD -WR6UC/3xQ22/Uwvntw91P18L+HyzNtgKCKKKVpwjWdaK/o9jdnRajK4s/hYKcIND -szaSjnD1vXWezw61aXZgOBai+xGdMWJFbTIRFfFcJqvFwN4cOmURX2NzLn7JPCp2 -y8HUdP0u55n0Ax9/qSkh4Eysxcy9+RMAcJ7LIsqSSlsSY9tQ78QS8ymJeePdf8xl -Ha3rlaGLpoLt/8gfYLjMfpyfUnuWrwRK79aBBKbkG7sdi5Cahnw1ZN1vxdOjpKcu -5/NhJ5OZxU9OSm91uzSkQFfsLe2t1JLnjuvcPASlhMIskhpGof8qCrjct1e7sYeo -UpyVknF7InNGMwKCAQEA5hrbpi7Nny2H4Uu7Z6aIUf3dIuPLl6f9VDkIqRon1HXx -4+1gQWhEwclB18FzFVDv4h4YAGv4upGHYo8DNl8GYrcIpZQ2dxz3QfTH5jl+tKmF -FfHIRKuBJVgXw+nVrE8HzF1M1UTCwCb8SnDg1dV8U5OfJy01LOEnp1sNW64T/pDy -unCnY2+k/ncqGmeWUKL4mbKN8GmfzIGMhwi8yiM3Cdbmk0kETDK/NIwgl+YLX6dt -lHe2g5OVoDgVatC8ViVmoQVmuuPASP1K4TPUAtRi0A0BYqPB2O/vFZ1f+yD1sJM7 -kILtz91DPB5v+7txwjD5S558TC1l8L9JCH12R7BWKwKCAQEAz4Z5RImdhM1tsCn6 -BlmJ1LToe7dVdL7DbF35d3RJorO22BYfK+Su0rbLrQE44gVDUE1xj+MKukJ5vfsV -xculm+RV1LqXbwchoB0b0pgjrIcYvGxIc7wCOjRisgafUfGPIu4uxNtmsiUBOdvW -yJmlv5LGwQt3JL+WOzHaFNQ+YV0a6mgE/9iCiI0Z0K/gMEwuACntSPPSd8C/Nzd2 -o4ff2eG0cugm0HXN1vjyXbXrsz1PL1an8oSsIfym83D50ERdSsiGE60Bx7j637JG -9UDdifDqohc3DmQF4obTHQSdgqV4AEq8aIQcF7PPUYaMoyzUB2/cicp/lWqgx3+b -IR8/EQKCAQBtZA9P7agrKEYUwSASooTkFb/vOkQrkN1KEOMhISIWSwv3w32jGqK1 -TaxTmc/QLm4cHRpj+PCCIXUvUbXBP2OVwlYGAXPzJH4XiPsPY/3sfTqbuBnxK2d2 -DW8e4CeIhvm6GhDQwqOjHeWKrib1AUzdnqxmv4MsFs33Lb4n+5Xdy6LZJ30sNINH -xfbqHpzDMPbmepAn3s7tNhlMiMbXge5Eazmqg2fbobRsksFb9S0rCDl7/31xB9R2 -GrNz2E/w1E759ctkxalACcpzTWRZBAcFyWkDL76UF1yd9fcPOBgVHamPhe7whsvT -5NRv5CisnQOnA20r+dkgno9lzd9RLW+JAoIBADJ0vUL2nJZkM6reh4+bDAoRDP3s -U6JNPAmkMvWsiMckm+WKUtUo84VDBSIKX897z5sZ1AfkWS8P9MqyiDbPiJCuuIkq -h9OJIHVEQ8NfmD/sl/3TE+ig0OzIbZUL3sssL1Iadkkn9hNnYIY1nt5QsKsWJ1m7 -u2+6DHTkj0TAM6SGt41TvRQyLS/fGomqmAkqYNuN3jdEGF5cFJoeyhOh/EoMP3RC -LabPAhwUZzIH+JO93Ws5nuKOTPnryDQOM4Ug09aPLaJW5GRmfKVie1iDV6sp7KBI -7OqHcuieCyxXHrFRESmxkMj87DaQ5mTo/q8qoZ1nOZ58vohAjbPvIaQ+vL8= +MIIJKQIBAAKCAgEAtvsosWC44GIQdHwUEj/QAWzZfezt83lkI6RN/imz0vABwFab +n/NSrO9kJK6L7iyveo3VvUP5dtzjEf35ZzF9t6gDbDh3YUoonSppKEMy7hdNB+P0 +HuDkZn4VR4/2tJyqAn5YV44zpCgCBGsUBLQAw/bVOkO57gBUqYPW2tfPd/UNpEG1 +xRrjOWTYnQYFAM/I8HjCivGFnQhb44vPZEiXwBYkEMUoyCxzETHbf20ZM5j0fqE8 +js/LaSiSePssE1zziV/Y9UXDk7K1BiCWa2QVCqe3JXG0RMA4vikU6ONnDowCdAEY +tLqeGJcbUuRm69YmfVPl7CFyLio1Wm//wu2s9zYW/uNvzojlcm904a+FgLojBL0+ +yD2gJhojPJqd2hKPxITBdpC4P3xE4FSzWtpqwt52Dd3jE15kJFGafMSZ74ikeb/B +LiFMzFAkdhV7A0Hrx+XoWjJECqhc3LnKLy4wkpuOpv0TueJAJBE729B5fROsHbe6 +hRdt1PP3m4yLDzLL/HPXX+Hn1PE31w6KoN/vi3KaAfSNPA7x6D9vMpTNNfFMA1x8 +YO9LW9H7JpkPE1tn/cDj43M0XQz9nsAeATNRzHjK7ayGtdevd2E2nNtyuT3KgKcQ +CI/WLnh66po46uno7fhxEIIETXyP6gQQgh9h0xFeisI/EdHN0cqUnko59FkCAwEA +AQKCAgEAtP73H42nEfyufiqFyA9q9x3ufMsyDFYVIdRSeYhSoeJaOSDyS2NqcjlR ++57UN0HoSfemZtKoHlUcHx3z54li65m72P55x7iNN/lNj0/5Pt25ioaHYUvfYSpy +bhkPVVRqLpE/XUwB9OzGIgyw/n33C+BKxplbfvrAw/TvQAWc6PFzDvkYjeGsxYbl +ZV0g8c6W2pb5CGsjWVN9YTVYbcAIqy67egMr9eVR5L5GemM2PH2dyuw+dJ1CfcBu +MlFxJa4aD9bJSsQ5Uw3AVlFBuPSEg8emN9mjESZ6ek80qbDWreL8QjcbcxntbDF8 +C6B11e48oFeu5MWopdWGdPC4Mt7a6Pjjy/ESGHcKqiDPP0VdcEgKpmowXI2CtXfz +k9bbIAoveMgFThX4eb/d5DzYXID9MkSd3GdZXMa2Z2xqX3/S4dujWKda0VlN61vd +3sX0Xd6Wno91vobjFx3tqhqumKpZ/1MjNDqzB6v0lRzxaiFPT5/h6hTIzGKslzvJ +H9bTUyoocXo6Xuskw5FHcM3VFriJljfFOi09eqVvldSvaBosYc6MIRuw8zuGuSon +q9ZBIYDgdnfuXhMh2cohEUOHoi692FgGsC651rl/bgHncx3q7IS995vz8NzmZF2V +dpN9q4v6nwBuePQs23s2MAEF5REyeOR/eA7gWbtGASnZMfmyk6kCggEBAOMGYgAZ +JMr3dY0bZsp2hdS2quHau2mUIEvV4FMvu/FLLiFq0JMHNe3vlmZrTLR/JW9TfK00 +ymPXEc/v01raJDeG2Y7I0086v65tDmdHyEE30MLEYNww2XsPqZIoacJNEvr/Jmki +O/nSaUszxJ4ygOGA6u7oJi1YJ+l2Oe8kQ6UMxoDHSDO0Y2Fjhg+TKCF0+a0Y9ddP +Q8k0B3MOXUcUuGk5ZtnXXJbDBq2Fvpf/pGsCp9twESyu/nbGSGKAClH3hfxXNU/C +jBUbX9Jyxgw5ZqWqRIt7O5NBliav3MClKKKVBYWQiju0SjV4GC2s7kF3lfohFq6R +ltGgn0pxaXsLqbMCggEBAM5Vubx62pQ9O36FPCp4yLCzMeGuuTA0Wq8LoC1OtBvq +OtHjKZdmx+Pe4W224iyceK1hEYNd18Byv1w/FSJPR5U9W+jk/GYTQ1WlMosAeGYG +fNvuLCJUDxO1cDimRIuCiQeAeYiAgCmyfmsdaUEiwMYsI22ITlSeKavIULiZDvc6 +JUQfDgfsmmD9ZtxVhwyGuLgqnHEOxXv4Cti511/iYbwM1NMB1tvmuDpjZAwpQMpl +/Fq4N/cNCe91/sAF/a3VXMZlxXey1kGmXLlCPFdzGGMGelI0v8cbB+dJ11NnZDC7 +EZPknj4jiEHVkN7/jl+WVk4zhfr8l85xh5Q+nP8/C8MCggEAPNUkA3S5WC2w8Qur +oorZ16LO7VAoMeVANjHsNz4uNTz48nllxFAFUmmFupH77s23ITqUyPDBXrlti3Nv +BgQ3+i0HNOx5Oty6KioM1v30Gg2zwczPS5FHZWNQA9sSY781W85s43UJ7ypDjqQj +hmRwBnz99uB8AmCB6VwFsB/ehGaE9lLv9PLcQmdhr+C1uylWEd0DWxthRZPMfzcV +JYvW0lNQTQUZSUifDHYvGRmmXApNIk7IO1n006zUDpjSqx4RaAmSPnoaATnhlkms +6e+joraaQWnXD+FeM6WiGHjpB4+4+A5ADDmGPQeeKvcQrLg3ltuw8TwP1sIcjN0Q +76izYwKCAQEAlYaQPCN3pTeelrhs+oZfQZYKjvb0oxc9pF6zbEH9ycD7cUDC0kIc +l2jcSore6t9VoKeYbm+iO4esX2gjo6J6SI+XvHW85ygMgtNdhlgH6D/JWgQGnbX2 +2xyAP71WLReiv/n9mMsulYkRjgRZU2eg9bvkzKqbwTyBDEj1HmFk9AqCGRS8MUfo +NGNOmFuuq4gx8tyGVHQU7xq4mYhLqOPAWeuei29oyiEv3rhKN3npxwMTVpbrj7A2 +Q/9pZrSwurnFKs1zxaOnGxo5VdPHMMRqptB58nrhg6N2HcloLrvdYmcefOOPPY64 +XqUrAD+IaILk9nTmIhXM2UFytB6P3XVNywKCAQBsOZZ7Bk3LEZHpqqlqOy7U9jjI +39tir93AEIf46i2Rn0YgynuTpsh458E4LEH88ZXJCDdfOtFPTEqa0wnm1DHhpai2 +qyiNeXWFpmhbsEgLR4RfiASVyl1W4febZT+JpcVkYtkMwro6u6Owy8L34SO+rPCb +X2IyPqQ1+lj/9ZvXU9AGaFgZNQ9bui3sK3ifvNYLGbPTBM939hNdOoI5mW44eEHQ +ZDBKjiMNnkWlNnFJk2DEyGVIQak7pVgSygX+RkMAP/OjDPNO3DyS81WXZKuxOlda +rWV0liu6hYAAz4Bq881oXzTviG24BgUjNJVq0qYtIzrsbW7fDYirkn5ap/7k -----END RSA PRIVATE KEY----- diff --git a/testdata/x509/client_ca_cert.pem b/testdata/x509/client_ca_cert.pem index ad6fea8ecfeb..026a4e478412 100644 --- a/testdata/x509/client_ca_cert.pem +++ b/testdata/x509/client_ca_cert.pem @@ -1,34 +1,35 @@ -----BEGIN CERTIFICATE----- -MIIF6jCCA9KgAwIBAgIJAKa2/29Hc+P6MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD -MRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTAeFw0yMDA4MDQwMTU5NTdaFw0zMDA4 -MDIwMTU5NTdaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD -U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL9LRGKPAszSvRzSKwgP6niaPy0w -wbSILjrp60WVHB9jjOSIvgCaTev9Tz/+zCaxCqM/hIrBNXI+ITZuzNUBx3+rz4Ns -VdYVhEsilc5gjl/dqsvD/FJdRKHKDrSzvKznwEs7KpGX1AdYoWBYZ8jNaQcDdopU -VhZdE/196akrTRejZQhnjNaaCXKCjrubfeFGpZ4hTsDHLjzuTYkiZ7m5q0Kdiri0 -9gKNdp6b5edyLuuMimEviEsZbYritZbwP1kwGiOMSQi2tzBGUcIANugqxMhSUrgy -JQ45Eew8mLnNqEOgk3nuWf4m0LPzTlJ/R70TmLIVyJrZ51GcLYmTZ/czsfkhXaPT -sTuBRgqFhJNb2ukjq8XPJH7O0wOhbUKT7MCRXSlFttUCIZ8aOmufv5mYLuaGx0sd -8uJEEMZHKDeMZOZNsyTZNaged77Onf+AoUkSH25aTdjU+bpUn/0CO2aJDqwp04Rq -7qOrtGQ76miNnw4Fe/eHJuUoqp8VH4dUmFO3vZ24N+kSzF5LDwEbgyybQN/cot0i -rjm8iqcimwS+BISEm7UvIeK0AEzXmxNC1mXEwvY0lkIci6TpH2Fy7OGaCu5MTru0 -XrOORWqxMLo65bTQ0ciUSxw8DartL4xobOW2UY+EUO6Da8yhVRbO59cC8dBbA9J4 -fH60efPhziFt4aKvAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUaGCg8RtquvpSIbS9Va1yqdqyXuYwgYAGA1UdIwR5MHeAFGhgoPEbarr6UiG0 -vVWtcqnasl7moVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV -BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1jbGllbnRfY2GC -CQCmtv9vR3Pj+jAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAEmD -XMO4s+WP86pM35pFMbrz3qsjiB6Cw3tqSoUaw0eIi88uWk3ismREV/24wY4j+10+ -5NACcPLykZLh0XpQjHMIZu4FEZSsQP0ExnHluaS/XaFf8hIy/qLFcm5x6wZ08AeU -M+daf9BmCSrjuW7u2bMxIrRLcnLMQG1kX3t3aEQLl/GA62g6Ll3MlHBGDILdvdNA -jIscctNhnrCPLBc+ykifa5NIBhz1PWU1RTr9JyNJwLaO2To9LJcpZKda2LJJ6xYQ -/lzPBg0aJgw9rOOgdenhb4ijQ5nMWZqCDZZFiKej3e6pj+M9E4a6OlelHiRPZT7j -q0bSoDDNTCviGlap/LDCBTvzyU/c8hgJ2XSUMfOL5RTXQTmqF7eQEMepmNl+J9HT -FYv80eOtk3O6rnIVHJ25zjLcLTD8iDzH3eX61bhMphI65jr4ltC6fGetXn9xINX4 -lpuxpMg5sRIYLl6lUdBcp1pMdsjEWUdiPcAxhjYqthb9MeSgmAG0cEJ+EbgGbiJA -m2DpQ8HkQjd5gc2mCs1X5HKiFWr3ERTeQwzBwUZmNaupfgbDWpKi8xrz91r3tLVN -eFjyd2z+0VtM82KP8D34ZVqssjp3jS8N9H1h3NoPqZPtFN3DjXfFV7BsfrcGR9CN -mwNfZlxB487I+gXYIwAG2Tp1UYNQ1JDDfkF39Uu5 +MIIGAjCCA+qgAwIBAgIUZzkKhtgm6Y3RaksChHMIJFKV+U4wDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4 +NDI1MFoXDTMxMTIyMTE4NDI1MFowUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB +MQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3QtY2xp +ZW50X2NhMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1Rk7zsuwXn8r +KMHk+gmvaftFmlY+NHs1mKJPzyCGFnablnJtHU4hDpSvvNitoQ0OcurOo9V9ALlA +U2uw/1q6Yhg1Am4cXwSWHG0/GwCQAdPTVb7W1MiAd/IB5bx9xrwfjrpGLjVLS3/y +nOKP+kl1bf6WAcLEPClvH+kSG8xMwvg58ot7ipWQcWBTSuZLaz89d2yfxpvtwrvS +YDemY6f8Tkxil+kDjb2Jo/zdRDz8eIEOs1PcdztrdWWeQaYJVX6aEOHCfdVNOHw3 +jNQKyVREUgXjr/pkwo9fTnZjQdBUhZIo7NuPPG25t5qZK3dUDuLcVRQ5Vt0/45pZ +/HkZDCkxmSynZWz2gPClOHVPOG8Eqi0Mbd3XxQSsd1Go667oFotLvTuynbYhdh4s +xAJWXbFV26HgDXI5wXueXrs1n0stUlbD6KahfeoYBu+idX7gB4RftqhqlbIazu3y +hj22k8cMQEPkLhzmUwRt64juLA0+FRG0Hfr8vdZD+f91Qbv86Qw3c1/lckQIOlyI +MerljNbCbHJm9KOZGf1zizwvMVtVzuVtr6RY+Loov4gzhJ5kNSk/YDMQC42c2Yhz +Lr5y9EGe/cL8QXdKfjKNeJjCbzxTTFiVBq5XRKUgjz6ga+F7KGO7ayMBrexZ7+ap +z7ydlUYS+xp43hqdisAGmUMJdDVlHCMCAwEAAaOB0zCB0DAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTq92tDG5TfVvTqbu1bA593K6aAwjCBjQYDVR0jBIGFMIGC +gBTq92tDG5TfVvTqbu1bA593K6aAwqFUpFIwUDELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRl +c3QtY2xpZW50X2NhghRnOQqG2CbpjdFqSwKEcwgkUpX5TjAOBgNVHQ8BAf8EBAMC +AgQwDQYJKoZIhvcNAQELBQADggIBAMHOXRUUq5vf9G2NvnAR1lb0fTKx4/6B9rhU +Nli9uIoWGQyMu8icEMistUp4AdHWdhutKX9NS0Fe3e5ef6qIYCng0gVBE3fTHJd4 +V8MhGtyaK0K/gpTrJdClwK/litRIEjCFwNYEK8vtuqNjR82d8IuFjnbinb+IGCH0 +sLRGvvZch+dwM5N9BVRq20M2FZhyI+fWZmt1ZiBwnfy3xM+enD2I+/LOUFoxAmGS +m2vnS+ULhq7fLaK6vgyUIGqRDQMxYEql9QGzRIspV9vVhRuOCmowlJbgCv++eOUG +FvjlAPlQRGJ+ShpXO5n2pEkdjIJOrLf4kyviLDHffIl5I80fRWzv7GJ1HP+Bb9qO +LZGaiO3SelPhvJGTSV5uSZpgkFsBbgdbbGI60W2QQIHEwG0HdjnNk17+TmVEUoCj +rWK/Kzw5py1Egtibju4CiJ8uIKeew+2pfdnnyHoCVwCfdACc4dwRpet6fQvkRcru +5PR5MzZqUI2+bjg/hJrHj7SVpxpjcr3OZdh05T+heCVuPp+9mHBmcxbeA8rkMZAq +vILLwgwEriSbKy9Y1GLs2oaPNaWEpN9Q6kZPUwtwlzjHG3OOtldeXPpMVpg6Sb0y +3NnRfvfV/g2gm68S21j6qhGM2aeQCdCu5insqnR8GS5/stmuyCNnlst24JBneE0i +louEQ0EV -----END CERTIFICATE----- diff --git a/testdata/x509/client_ca_key.pem b/testdata/x509/client_ca_key.pem index 3fcfdb7074e1..750e20bb0f2b 100644 --- a/testdata/x509/client_ca_key.pem +++ b/testdata/x509/client_ca_key.pem @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC/S0RijwLM0r0c -0isID+p4mj8tMMG0iC466etFlRwfY4zkiL4Amk3r/U8//swmsQqjP4SKwTVyPiE2 -bszVAcd/q8+DbFXWFYRLIpXOYI5f3arLw/xSXUShyg60s7ys58BLOyqRl9QHWKFg -WGfIzWkHA3aKVFYWXRP9fempK00Xo2UIZ4zWmglygo67m33hRqWeIU7Axy487k2J -Ime5uatCnYq4tPYCjXaem+Xnci7rjIphL4hLGW2K4rWW8D9ZMBojjEkItrcwRlHC -ADboKsTIUlK4MiUOORHsPJi5zahDoJN57ln+JtCz805Sf0e9E5iyFcia2edRnC2J -k2f3M7H5IV2j07E7gUYKhYSTW9rpI6vFzyR+ztMDoW1Ck+zAkV0pRbbVAiGfGjpr -n7+ZmC7mhsdLHfLiRBDGRyg3jGTmTbMk2TWoHne+zp3/gKFJEh9uWk3Y1Pm6VJ/9 -AjtmiQ6sKdOEau6jq7RkO+pojZ8OBXv3hyblKKqfFR+HVJhTt72duDfpEsxeSw8B -G4Msm0Df3KLdIq45vIqnIpsEvgSEhJu1LyHitABM15sTQtZlxML2NJZCHIuk6R9h -cuzhmgruTE67tF6zjkVqsTC6OuW00NHIlEscPA2q7S+MaGzltlGPhFDug2vMoVUW -zufXAvHQWwPSeHx+tHnz4c4hbeGirwIDAQABAoICAQC9otcLQazL8kpprOvd1TFj -F75zhTcySiJSYxzKYTR85YqB8BEztcRzoy2SSnyGCtJ53Xj+uOTL+U2hkZvbuiTU -qzVPmvFJBxGcDpAmBFCANtafpA2adT2Zih6kAt6TJjfaHLBpnvMhyTpJsbpJNWDe -BA/auBqTlvg/PziJbRTCz0dUWpsjD5c3/reSwmW7EvcSWQCiWZK78p3IyeO8GZTu -uBESZMrQ4v5p5DC5Ddf3yN5R0/YwROf0XCUamdajCu2Ouf6Y9dGKuNtKED5eUC++ -SuYYFhXoEKl04OmioH8jc6dfo+tw6XfSPOwzGly60xd3y+KPqF8J52K5VPkm9geC -NEttAEKEpwLX4cAsxzQ09WaL0fq+XSpwWZYuAJI4F8zPadckbzittkAFGnwH6t5N -ydaYoAcGxz9x97qbu2iS9SiN1cWQ+OSMF+o3o02WcLNcIBOVIKivV1FuLgQEPfXw -bi9egAOUI5TUvoVO8mG3Drk5+Ii6PPxEaCKfp6x0xXA+t8JrmOCsEoYRiPhCc65B -gHZC1+mgngYUs6PYmkPgTgBfYAe2wYpn7uaCEo06tNfe0kPqLzr2uMEKZNY1IfoM -5RMxic9qKac3Qp2Lf/XG/90L/wO+kVpv/HSWh8JAZXezYD9f+EhrDuYae8KlsKXE -Z+XGmMdgIarHLGnXoAqpeQKCAQEA3002LSywsvGM1KZz5tQoIyaor2kD8tW8vyS8 -7TlozM1TI58ALtDyV/LCrvS5jJEIbsdlrrOeBhQOS3RPjSQQdEQKfSly1TF9mhE2 -vDLznxFOQNpdkkzGwfLxI/5mMbeHN6960XAcfVD5QTDdpKPY+74uQU3HzQCx6Net -+UK3aT6CeIvgWn0xNnR5Fk2EnQHKUduqm0sRj5c2S8qUO6HxD/VPNRCT7G+faex9 -tP2VIHxwF4iH1WOmwQWxTLpy9wR7UYYxpFBvQN6gglHuMeY6tublTnvfhpMdZ5NU -Zd1Trzrh4w4sXRWStHkphJK9aQzHEclZq5ktvdJtFd/GGZfsrQKCAQEA2040LYoT -JcassmJs9UzgJeVkJIB61wmBc/qvqwKqy28niLubSYNaETO6cQzPrnlZjk6LKa+M -HumrlA3DjobbkmA1YI/OAhIHjGMEtaxsOTUz0rMR5RDOvRc3hXo2qKsXfDQGUtr+ -1DrCmnI/iVm8+F8HtV9tEHzrGEaCmeMLHQWCxveNoGDnZRCZds52ApoFxiLnVq3N -+ocQEsWwdOg+8ZdyfF7RqzW2e22WoCkTJYApGutDfu1eXHXlOeBrBNPiHMzK3pbA -n6+oqcxB23NRttNeUkge3UezjfQfuGqR7CLi0yF2L236MGBOGuXo4bGaUMgEz277 -ZBT7YfWhZpn8SwKCAQADqM5Ee0ECDbdTHM81bzChMtb82Om5pwsKzt1Rvekbwhmk -scxc+AugqVfLajNIPHA48IeYD1V9oAKD9gn/tCGY5iyN1IoPOFpolfOhrewUJUJ1 -CZ8S8LMpJoQRJPAjzHAo13VZzU6KNzN+gACB3DWIGpvDcjTeBS7lM/Oj7BX5YY7d -zt0EXpzZ2ZrKZMbRk9/u63ymQtqs0buQDmfTelnq+wgrRHRIIaQpJjkBKE6zU5a6 -rAAd3R40d5VqPnv31Fj5Awv5N2A7XeqfeBxBMRaxPKNxX9JP8EVBF0cAzFm8u2hM -QkUz2VCoKHwnsgfsmssAXZ5ck4wOWk5zV1F1xemZAoIBAAR+esVAIhpREvLo33C7 -bZB5Pe8djubfM/7rcTQg7t0SXw4HQixke7EEjVqJt6vMotAuvd1R0p5DjZeQHKTM -EK3UOOPMrp0OP4dZ9BvA98rIU1KLBt/Z01K+qg2bLomQT//klQiXokc5GQnPM4we -AahZUjAeT37aAHtT3pNGutCSb1aidg2GTtecWni7zGFLRLkFuBXno+PxZpvr3yzW -IYwT3W29B7Dpfd7TpRWNIe5PzQfXMF/mf1uHsvXXqnnD2ctbSwD6t+HN2Lf6DpNv -ron/lNw8zB0evgg3q3q8/FaJdHp9Ig3gxBK/tnoIohgV6qKjJq4ViSNI5sngHbmb -iDcCggEBAJ+Wg1Y3UnPShQjAAUyOeqfdLnb0h6ocz5Flog4I49023ro102xav/Rr -O6NzaH8nBHt4OKYWPgwa1ANZ1ujXfnqU531OlB7p8vllDcECSR9qnSE0vMO8hvbU -flREfjy2inQ9kVwCqLbYHh2XEYZ7sEwQ7p0dz1v9G1ytBslwyeC3h2aMIg4utT/k -73y0T5Nq0e6Mas5w0ZBemzKNHoKw7N05g2rrELL4hRfkGMrEIsSaANPDRM+4cI1k -a3CAv0mex+5XeBskUCtvU+xrCH6isDovDhCT/CSAjuEatezby6tLk8PeaH0uEaxr -MhPlrQvyfY9eITe9uSQtiTQRg+Z4U5E= +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDVGTvOy7Befyso +weT6Ca9p+0WaVj40ezWYok/PIIYWdpuWcm0dTiEOlK+82K2hDQ5y6s6j1X0AuUBT +a7D/WrpiGDUCbhxfBJYcbT8bAJAB09NVvtbUyIB38gHlvH3GvB+OukYuNUtLf/Kc +4o/6SXVt/pYBwsQ8KW8f6RIbzEzC+Dnyi3uKlZBxYFNK5ktrPz13bJ/Gm+3Cu9Jg +N6Zjp/xOTGKX6QONvYmj/N1EPPx4gQ6zU9x3O2t1ZZ5BpglVfpoQ4cJ91U04fDeM +1ArJVERSBeOv+mTCj19OdmNB0FSFkijs2488bbm3mpkrd1QO4txVFDlW3T/jmln8 +eRkMKTGZLKdlbPaA8KU4dU84bwSqLQxt3dfFBKx3UajrrugWi0u9O7KdtiF2HizE +AlZdsVXboeANcjnBe55euzWfSy1SVsPopqF96hgG76J1fuAHhF+2qGqVshrO7fKG +PbaTxwxAQ+QuHOZTBG3riO4sDT4VEbQd+vy91kP5/3VBu/zpDDdzX+VyRAg6XIgx +6uWM1sJscmb0o5kZ/XOLPC8xW1XO5W2vpFj4uii/iDOEnmQ1KT9gMxALjZzZiHMu +vnL0QZ79wvxBd0p+Mo14mMJvPFNMWJUGrldEpSCPPqBr4XsoY7trIwGt7Fnv5qnP +vJ2VRhL7GnjeGp2KwAaZQwl0NWUcIwIDAQABAoICAQCgH7bmG/4p84qdtJx3GaH6 +k/noD9fsHYzXZVds/zZiWLtuoArHk3aZezZWQ8asFqB9z1x4lSm5ynnAdVJpfmZA +4Ymrisu8xjh5ocliY9jR1radXqoU95g5CNtOIoWsOJ3J5MRpYlhyofDO3Btt6ZbY +kQ1sw0orHsNGih62Tpx7gIQicZbiOqJv3v6XcFbJfpqUS0X/uhk9U16wOADKL2cR ++qm3Fjs6XWq4k4A8D0tyzR8btu8ZlMeZTkNNdxLacCgaeVlorke5IvWm14pHYA96 +Rryg9hiSbaMi1SieQonQWFRyLkUCFj0P7pYbqC28hdEkCO9RCy0/vDLT2LbugWGn +JBdPIQqRYggGnEdRocvflx6f2Xdid9I4zrI2XWnorbypqVIdmhVivCCWK8PNqKE2 +YcRg8TRQHvyOXoR55Sodrxp6KycUc65nduGe5jsyjA9hlQ0Jfxhr4gv1LuytnVCx +Z2q2PFF/cznrSLlU8uBT9Lb2gGQXRyI/rxp6g6zwnTKLvMXsQqBrt2hzlE2vkdiz +I8EcLp99IT4CwSJAyGFdR/2ZmXg2Hy20GiGc5RisMIsXvj2gVt26XHrbb+LnYHMq +0+5d5QoMnTMZC+JczoDiw64vQlzJGcM1VWFDOMn9g7UALgofCQv9/nZrWLjw5hIB +FCli4JhtwjNUP5Gz2sqx4QKCAQEA/zY1Op8i0j2xaxeaysZg6midIKyvS0a+E+CJ +qfNE0qmwCEmG/T67+IvKIwXqfBGBrBntg3De4rTDxhTVL4S4Ue89WYzB/sf9J52e +6HEpBCujRJcdb8ouxSfDkpkMYXsVsVTIjckbQl731cm4qk7L8DS1GuE0oZs9I3kx +iQSzJ1+GzRotnzO6a11NU5n5N7NM+97x9z/BHFmd3fUOlUkYdpr67PVBNKRaj46k +Ifs0Og7cZNh2JCzhVOOYrf/x9DybjCJnPHLVuNMqYOHTNI/LgpFytM26+Lnmu1X3 +mcohVacygr55oZaCC0dz6CijEOpX9SL9sUZJb/tJ01Sxv/pgMQKCAQEA1cG6Oa7h +1Hl1f6Qqg9qiWNTHur6dBOw4lt/ej8vexB6y9c85WoMfXUBFpiQta9Nt+XCrC/UU +wY0EqdQir+Ydwg92ddX+1eKb7NmNLi/moUF+s0V+9uPvgGcz6xVlSMQKTgsYxZnZ +CE8ZSBTSD3dYyIadGQHaFoP4PsABzGfzYjWnQpvk4SZf055Qs0Gt6vIBlbs7R/O/ +wPajzFYf/o0mAaPAAdPpuK6C3Q1J4Gp5LKkHtzY79XFl336uXQV3AwxU29sAkmVS +/COFl772Ev55P5nV8NsEQaChoyuNHO11YQtZEyh7zTwx+R4SfnTivffVQNfusnIa +gKuj2Eoq24XgkwKCAQAeHRJY0XA1aIwnu8hLBu9mmWN4+IdSlY1WIRd9UzQau2UH +BU4FUcKySCRYz5jkfNhVK1YIPWg/Td8P32NsUPfCyzzs9Rvq6UQoyYN3n+qcEF4a +eM5DY5LzNobwJFj+o5xiqUNk34b05OnPcxb0GYoc1MtN2abxLrUfG2zJ4yEUk0P/ +rYgWke78Pi0ioTdz6Bc8XQkmCILLypNDHmhTGyXk0NKs5R+Fi6MX71fUnqSB+UDu +MVB3YkhQUO6yEVJGZGRiO6j8y/wF6/zDI8JdIF5+EJV9Wg0mziC4mCM4JU6bobfn +D3ygoXbEx/CYQztCgrRQO4m9wjJmITuL0SGMKonxAoIBAQCCelJ2S23GCK3UUB0z +hw16M8gHEbs++gJA9j4ggE1mYWbT7L4RpeBLR6Q8GfEv1EtY65E9J0iYLMAf+kGC +JXEct9uTaiC35i9PkCxBeTPKUvRH8a/ifJgBRP3IDbNZi3DO2q8wTwzPqZjBCxR+ +JFepb6INVbgN7lhl1UZDw2ApHp8OZaJ8XLQ5tHWGNh03QKn+/97buMnfu62YWSoG +c5ozfgUCGJyeAsgWrrndpqB4xmTTTOOkmqeYmPdOCLvwvGJAIZpjwj25cuVlD0ed +qH/SdtDEyKv8c1S3CSqF8dyodAjXTOrlCE1oxxZ64lZVpyYhAq3NdyD+Uccdi4hF +n57JAoIBAQDFuv2cmOl34qQz1vd+R0axxNSQEwYC9wug9WG8PEARRmk6vCIMy/AZ +XnHXZ4aV9ds9q0J4hGrx0C1vjGHSpBFR+kulI+KcIITtHLPTAgE7e1UXGATVz0+B +ES3qvzJ1eXhl7hrrFfYUdmPok7pJUhf37qqhKajcRfVtHccaB6J7v/sbAMCIXP1B +ij7EESZgM+NLwOQ/iAM2Bpuphn+gxdV2oqgorx3kLymzffhmn0oq6qfn818DH5ps +sPgi2bndSxG9jNtpCIPPC9ltMNwWxuB+3f+wd2pKIjBulJ9tb+72s/Vb4v7EOmJ/ +c/xqN5lRsGXGduw76PipTrLpy3/LkZDL -----END PRIVATE KEY----- diff --git a/testdata/x509/multiple_uri_cert.pem b/testdata/x509/multiple_uri_cert.pem index 97637997e3dd..210b844448cf 100644 --- a/testdata/x509/multiple_uri_cert.pem +++ b/testdata/x509/multiple_uri_cert.pem @@ -1,34 +1,34 @@ -----BEGIN CERTIFICATE----- -MIIFzjCCA7agAwIBAgIUI8r7b2hX9DRwEQGWuRdk32eU5kowDQYJKoZIhvcNAQEL +MIIFzjCCA7agAwIBAgIUA0Tqj0ezdOI2R+W4smJis+1NRucwDQYJKoZIhvcNAQEL BQAwTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL -BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTAeFw0yMDEwMDcwNjQx -MTRaFw0zMDEwMDUwNjQxMTRaME4xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEM +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTAeFw0yMTEyMjMxODQy +NTRaFw0zMTEyMjExODQyNTRaME4xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEM MAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBDMRUwEwYDVQQDDAx0ZXN0LWNsaWVu -dDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCm/zjNYkfCTcq7tnVf -qkPEde1+M6s2z05iWDfoBeZfC2NwUxIBqAC6XTXTxqYSjEVRCQUzjVxyWQNiwuz7 -pK/xGZhP/Ih2uSQKTw8vkXay4HCOt9DR0S/XGcQNImdbawKgnGven8Jrg8UZDXrt -9R9Z0nRajB1eXvXOsEEoEfOnYthc6P+MxWJc0lnfaTlowyEgv84Ha13y1h46W6yC -+WNBT/kWqp/mzDTv/Ima8xcqEft9VUZ82qJ1DVt1064x8KOzm2x7F7QSIcjxr39M -fbASm8Vdnt10XfhdsDVkxTlBJs8WKGn0uw8MyPNjFG01OpYDHLAfJTL3XlvaUjfF -yDMFsRDVjfkuYIkqAVWQ7eleFfOFBYzaVf2K+2OvCR+vGAPa5NQ59kwogJYLjV/O -43axChBizcPM0p7gmRhhO7TQz7LLTea30rBJ/YtXdxFR11y9Jdq+i2KwWi8O50iO -hzxUBkbcQD/W9Bcn7gOkD/pgEGynWvFSs+UHjLeyL0COk0NiuYIMlOgwtI5BGwzD -bdLuTU/ZQm4BJBjEIGVHFqKyTqUXcw5t9fWxH8V0XNs8zqj9J7lvNKu9b88GnyaJ -fKMdDO4rVTJHmvFDHP9MUJHC9SabW8+hK0nuU7n3+Pc07ToCAan+Ych5bQHsRMjI -9EvxKVNfwIwNrmRr3mhbOU9xIQIDAQABo4GjMIGgMB0GA1UdDgQWBBS6jnt9IccJ -SOuE1KwP68VCBPB4hTAfBgNVHSMEGDAWgBS6jnt9IccJSOuE1KwP68VCBPB4hTAP +dDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnfQMbGQzzWl17NuJQ +annAeiYFX9mcyKi0ywS1BOsNpDn8SRgeW7Ymj8EqMYIUv0VK5QDpsJKUQ4ZDBE/f +drplyXhbR0/aMpAATxP4AjnzH0pw49aIe/n3KTdBZj/KF52qO7WG2rcJ5GSVr8Z6 +H8FeP02GRt2iOAbLqV5/k52gBEEzjSJ1Be1DiRAMOQL/Wahyo1XTfvB0UeG7nIFN +OFiTdQmgS0s6OUYWns8J2jcSO8XoSKCxuNz+ZzkisX7xicRFjkweLKNLuYmS4U1x +wd19JMizaCRoK8E6NnOGVrxP/r9am5ft3QgC6AIkzZvNcylcHkkhZo5++X5qC18u +mKjPgxCOzAop/pGeiItfkjnshccLPgoPBI1W/gO6puxDaTV3HFGZdy5rtV/6MxjE +byUf69aKaBEY9d+mIw0OB8TfyqzNSoU578rTbuEqwiG2f1IDe0KMB2xkikDXGXPz +YYRVdm4bmgVmY3fjpoiM06+aSN1IbYEnmvuB98z1nf8VyRdR39jVk80udfHHdQWu +xTBMEHBtAjT6HC0tKIkeGKB5kmozPIVL+6EbI0JyuRVB6wyCrmnpv0Mw1NzSF5NM +JiYD/5ScSKhnFogYEstq8Lgj5atyrqi703bNGDVGmbTqChBG133r30WNT9JJ4rzS +KznDJhoHOgQqagkzGgHsKeAmiwIDAQABo4GjMIGgMB0GA1UdDgQWBBRHg8AR1psw +kslOTh5a2nxGAmo33zAfBgNVHSMEGDAWgBRHg8AR1pswkslOTh5a2nxGAmo33zAP BgNVHRMBAf8EBTADAQH/ME0GA1UdEQRGMESGJnNwaWZmZTovL2Zvby5iYXIuY29t L2NsaWVudC93b3JrbG9hZC8xhhpodHRwczovL2Jhci5iYXouY29tL2NsaWVudDAN -BgkqhkiG9w0BAQsFAAOCAgEAoR4LbmvtSXLiVg7BRilvSxIWgcG6AI75/afuaM20 -PUTpyDhnrPxEaytb5LP0w42BCMoIHXDLE0Jmbxqbi+ku/Qw1R6723J7gwRSUYIg1 -a2S5Gue4AFp7aSLDUZhl0jPphq7OMKozzH5TrDgjKljYjPURClc/ODSlGdzOqlif -CbDHwrCorb+BFM3aFDE0pF06pnMDXcn/Ob9QCLIpvZEOWe/fJbPtTUiA5cY3knne -regyhvfqfVZtU52qg+9o6q5QchVqOt19alAsISK9/H4iVE+S79AiYEAU4yM4S6p5 -VW44idy3KXmr5kyVwJhe3t9f5Ckuswmo6hL32ec6M52ElrS8Er0vFt4bjfNgq996 -lTm4/reL/Anko9chQiGBe7F8J82OfxjLoVH9CbZjIoS4LiZPkey3Ze9HUV1sHhM/ -umkL54jRsVjEwwSCIcF9onzmiD8D7FV3AQ9W/RbBF3wZvVBBs9ZKQCxek2pZX/eZ -Q+BvXwG7NGArowpqbi+tSrW3O+XZzY7nXbbf23jCBwkBn3jvqn1Kwsr/T/HbXUaz -dDUvkwgyrX7NfvvZ20svtKLlBZTO5D8P9fy0+cHsS0XkPhw6UbHk396hoOmVZ+OG -E5uVb2sBy+vx+82IwVzWN0o7380AEmAA5nrA6fMaxTxmo07pOF7avAZ34LgHJIjr -sTM= +BgkqhkiG9w0BAQsFAAOCAgEAOkL6WsETiUWJT2lhMXEHGpLwu1Q4nETr4O51+V7t +AJJd7oGS/QRL0K6YNDgNQW6GOUZptvTEOSAO2irNohP+0+ITZAClF46ggB0pRAeD +COWjnG9h1aonMtVlnswh2xVYfg4jd+qfQZ07jN9tATn5ZBpFpcaxvcyAYc/eq6x/ +DKf7HBBWq9XWyRxZJuPD9qhyGPDzI/E2yr2ahLJFSGMRbTDivDUbw0yHbzmYnY2g +uPrVAAD4DuKsJxyZrA2/Hs7ZspBMTyUjWj7KSw64AcDvFDQgPBXDfG4CMSRH3Eh5 +J2F48ej7T6J1+PbJ81ISifGjUZH50haskBG5TKQqRX65p5LIVrDThsEM+YpfEyOB +mD2ylbxNs/X3b9fk07iS2HirfKZ0cKSINZPU+hEroasqxCcAY0E28Kzw0SdAGCGf +iZNRT0mNVgTPg7Bnrb7JhCBrm0aid0/nYFX+fqeKuS2lcdAcx6U5EgH0KnHg+9/N +NbSv+RtRiGWv5RqWF/Pk4bdHPvlzp/qiFfX9dQIOBtrFph9XUt/bEf6hZgaMKvT1 +QbQuM+rmf2ghjbqpCRP9iZUYBzOOvDZ8IeugguDvyBgrGaUSpreMzMC52B0fp2jB +Ib89u6yiKNNZzBGGE0d9y2qsju7q3IoV+eUwqbCUvGvcal+gdAfhO7Pvr3dD40z+ +g58= -----END CERTIFICATE----- diff --git a/testdata/x509/multiple_uri_key.pem b/testdata/x509/multiple_uri_key.pem index c2918fdd65d0..621c1b2c9a5a 100644 --- a/testdata/x509/multiple_uri_key.pem +++ b/testdata/x509/multiple_uri_key.pem @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCm/zjNYkfCTcq7 -tnVfqkPEde1+M6s2z05iWDfoBeZfC2NwUxIBqAC6XTXTxqYSjEVRCQUzjVxyWQNi -wuz7pK/xGZhP/Ih2uSQKTw8vkXay4HCOt9DR0S/XGcQNImdbawKgnGven8Jrg8UZ -DXrt9R9Z0nRajB1eXvXOsEEoEfOnYthc6P+MxWJc0lnfaTlowyEgv84Ha13y1h46 -W6yC+WNBT/kWqp/mzDTv/Ima8xcqEft9VUZ82qJ1DVt1064x8KOzm2x7F7QSIcjx -r39MfbASm8Vdnt10XfhdsDVkxTlBJs8WKGn0uw8MyPNjFG01OpYDHLAfJTL3Xlva -UjfFyDMFsRDVjfkuYIkqAVWQ7eleFfOFBYzaVf2K+2OvCR+vGAPa5NQ59kwogJYL -jV/O43axChBizcPM0p7gmRhhO7TQz7LLTea30rBJ/YtXdxFR11y9Jdq+i2KwWi8O -50iOhzxUBkbcQD/W9Bcn7gOkD/pgEGynWvFSs+UHjLeyL0COk0NiuYIMlOgwtI5B -GwzDbdLuTU/ZQm4BJBjEIGVHFqKyTqUXcw5t9fWxH8V0XNs8zqj9J7lvNKu9b88G -nyaJfKMdDO4rVTJHmvFDHP9MUJHC9SabW8+hK0nuU7n3+Pc07ToCAan+Ych5bQHs -RMjI9EvxKVNfwIwNrmRr3mhbOU9xIQIDAQABAoICADn4UuGJAlwC4SN0XR5OXqPu -Q/kROpgWMqGU+iNDGQtZSrWNQKzugwIupSbUyIWbx9wvg2y336WaHMDF5bodGy5Y -sjTh9wUvk8E4XI8oscm6e5gvWv/a2/6RZSsiDDsB1LGoWxG256im316o/UlpU+68 -TcO46+D8mdub96JPSQOMHotyHnPheRm7s5MIVfN1+SQDMSQGM2C+z1N2y1XT+I6N -kmw54rQdoyrDwYjWZe4mu+RwG73vr4Ful5c5WjjfzhPlGi1ItyusKrMrNsd4wgxT -opmzMjDZBgSPzJkklZF2RWDtuopH/Rt1DngQeTCHG9gMt164bQ7N5JjO/alcq8j4 -TW/IRlZOllqJ0KogOn9nX2ce9Kfxz+H36Yj54sKuOOYvKRsoiTNdTD3D6eB7pwnQ -KGWAGrpU4llbzotiG5NJ8sDHYUwynmhfmwIeBjq0vuXlITLQplGYQnsQJI29Py3N -KWBOC9HaiKCKq2gAUacj8BK+BLeGEiV9sxWQb7/MbWRxXnW4KhNI8+ft+PZOuvZZ -vLxH0wg4/bYQISMaeaqWL4LksKtV7es4MglFdCCZGDMdy1/btIHjRFPQWwIaXxij -2OtCozfmmzIc76UQ8g506q4rSgzZclDvI3Yd3cm3XFl4cQfr5l8WTQ313wrlmo5U -DjYdKipOGFRSLHt7aXABAoIBAQDY4KyfuCHFMqKC0FJZUCr3/gGU0aqZqHR4l5jl -N0TsTuwCRf4lK9BuM1bqumv6Nbi6VwWmp3+BzZCI/Nn7+s6KN5hyulBd56X7owef -zl9yWW0n6nJxKzutiH06krjmODtO44gLjR7ddcEd+i023hwIQffdAE6IEtYuuoD4 -94pKd+dB9GQmgITwjS5vEP67A0lpFlL6pNMfhhe2QOLUnDPKsgSnKgwJsbBYC19j -TQUpgFh4iCYKSGAX4ABdKpOUjbKGqNGrZNPQv+4MS9u6s/HWN7yDaSMT/tB9n1MC -g8m7crWyOuNJ5oO6SPnetkdTbcam7tiCce/auqjx2cMJ2eDBAoIBAQDFHxPHXP6Z -FHxI2pYBFyUB7j0VipwG3105ujrJJWu2abFU778SrkmM16eaWHVH6tMvuAo24mP1 -6Qfi09uAjdwhRPmIfManxj5wpDafgvG7H5g7+VhY2/IXTahO46JuZAxVoiXUGmct -WwmOy0vpI2IxoXY8qLvaJv+b9nLpNi1PVJ743BmPMqG3dInoRAIBxMMEu6Drrbj3 -bjPmRNpqhs7/Kn4IahCalD6lgSBkDuz7DaJji8jINw5OhiL9VU1eslXmGrCbZMXv -1QG0EjAZvGzqWPL88mKYTecndP1k9DMqVBVGhCT2dW1aLypQgDCC5YxyRc5vOnZ+ -2vQpELPeS0hhAoIBABBEqi5A7aeRKMePQN4aOV7o2s2C/L0R+cqh9IIdJzpioSl6 -fpnjM3tQtpBc84SNSxIPPQlHPzVJajIcZW2VXrDXgsP4XdbtbXH2xLekD1zQgHOi -DnuWtp9JwbsHDn+WcDx2rNnQ+CO8lYPeJE4dUxT7fdBCGaHzZ8WRj+MdDm6Pl/VG -k8yfj1lL/dOu/qygjn0ng4nxmzSeJmExdNJl9SybNeYkLUr83TF9iOY1/NEkI37H -F7Nlwm+ICf7zFqbqCh43w6KLqafa/cxGVHEo1lcvTyC8Xjk9v/3sWZmysQsyi5aW -/D2q4O60Uqn2GluTvHcBK5R9X3SU099wakTu5wECggEAUljjOFu++FA4g27dT2NN -0HqoBgG7oJtbJKyJtlHtp2yL6kGlfrZUf4PvvmjJxdtxkfO+QKNewvIwmy+J+TBK -D5Py8nO9wYTtvLy9HPHk7hkKzbMilyx6/AUzFJG/34HoLTXpu6u0ApyPZ5nCAokH -klgzPq/2mfHEwnC4HHjHgOaG6st32fx61lrW6bLPa9G47pc7aHlQVf0xrTaCUBI1 -Ex+7OuSkPw9DBHzm/SXHFjHh7tgMbqehUGh04YPrKG4zuEbaFHCKx+AiMAmREo9G -qLez+rt/OMUCldcnrC7f2QT7RlQZ5OO1ZQFjGfITUft3Kp3C2XCA5AmwCh+yJGEq -wQKCAQANvxxFh6VvjU2+rB8Q4mDzYdr9OFTWMag3SNjBwwWoSXbL2wXPE5gFpzKj -yvEbjmOgzIRABt6Eytx32p0pC5UFIey5PNu+/4ejxiiQdKSLQbqQavKYdfGgyZ0/ -JVqNKiiEJ0b9VtqhAG+Ye1mHZIBzXncWyBSZtxUGVuLG29uKbBo4ufyKauPd3dDv -wR+JqEmAg0ICIFR+q81dEWY/gKsyyI5hMYTTsWge3l3FAdwMZEn9Ek0nclSb3dev -ZiVlFvMZPdp5IwZljClRxnyto7bOTw+X/RMuVLB6p+v3URY4oUSL15+RNODn/tWM -zJOG+48NgohVKfBhGN7JyxV1dq/X +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQCnfQMbGQzzWl17 +NuJQannAeiYFX9mcyKi0ywS1BOsNpDn8SRgeW7Ymj8EqMYIUv0VK5QDpsJKUQ4ZD +BE/fdrplyXhbR0/aMpAATxP4AjnzH0pw49aIe/n3KTdBZj/KF52qO7WG2rcJ5GSV +r8Z6H8FeP02GRt2iOAbLqV5/k52gBEEzjSJ1Be1DiRAMOQL/Wahyo1XTfvB0UeG7 +nIFNOFiTdQmgS0s6OUYWns8J2jcSO8XoSKCxuNz+ZzkisX7xicRFjkweLKNLuYmS +4U1xwd19JMizaCRoK8E6NnOGVrxP/r9am5ft3QgC6AIkzZvNcylcHkkhZo5++X5q +C18umKjPgxCOzAop/pGeiItfkjnshccLPgoPBI1W/gO6puxDaTV3HFGZdy5rtV/6 +MxjEbyUf69aKaBEY9d+mIw0OB8TfyqzNSoU578rTbuEqwiG2f1IDe0KMB2xkikDX +GXPzYYRVdm4bmgVmY3fjpoiM06+aSN1IbYEnmvuB98z1nf8VyRdR39jVk80udfHH +dQWuxTBMEHBtAjT6HC0tKIkeGKB5kmozPIVL+6EbI0JyuRVB6wyCrmnpv0Mw1NzS +F5NMJiYD/5ScSKhnFogYEstq8Lgj5atyrqi703bNGDVGmbTqChBG133r30WNT9JJ +4rzSKznDJhoHOgQqagkzGgHsKeAmiwIDAQABAoICAHkAXOUP1QZe65hfz2LPecRv +utY5KCsX4KI05eKtee9yDR5R5GXSVidHxgLon5TDlpkEFwO9uDf7DJ2QGPBVg1aU +FirDu1HlI5nFh6SuXxVhLtOeFtil0LIaibvq1fz30MUyu/OAQaqY4X4u7lI+bOHd +E/IFcouGtIogg4/hoof/aueGeDVZIc+fzwM1kQ/Pw12G2TOhyrAOk+mJqPST15I4 +hMrUerXGuPcQpnz0tMKsgk9NYSLkbmwxQNrqps5zfGPP6PgHwbWshlKiCOQ9bfnC +QGk0vNCxg7i9q/qK4SNd5Prd3AZRoD8RRLM4A+6K23+ctbK2uA3Ny+Fq88njKlkN +jYxHPlkZ+b6nGzYxwZ4pbVYR/rpmrKdnrns1t4l/9GCOwMhDZe1jnRZaimQOoQQs +8hHMwxDisqsOjzd5ozQU3dVgxmG/n0jGjjtBIVp8usGe//AqRmZ7SVRNrglgG6FI +vqYxwCvum+DEJ4X5ONDyyKddmccGkCpj1lX6xPBtEkp7VupKx4KHW6ufteQYSkdh +U80RrCPaIKoFm1y5Jes9vOtICtvRk4PVNfLXBBycn0WR6aUQEkFHDvSHFlCnROPD +UdABH1r3bSMruz5vQrdIA/6XGHXzjHmq1WN0pKynwberFwazAxDlD/1G1ZbPD4Dg ++Z9cpyZ7Tlxj+T4hSfExAoIBAQDT8GU76CpiHL1VkIA9Khk3j6Nglq8ALQjRUH2S +ReqcAhvpUiSr6G9lt9PWGm5iWv4qpRoln+HyBDXpWOxaTR/PNi07J0NeTGxk2+in +IYNMDg9IILDGlbv5tKen4yKD9Yb7VHD73DxhDbSs2U+o49eTdfXmlQfErd15alZy +m19Xl2r2jPtYF8diPEp2m2dUamArnA8Vd3jdFAxTsfP8eNeAcCn6R3jEfIFyMm02 +X2H4iGO4Ec1ykVdiFxiemElLjm3Z7vVek88Md0KgjuAxhv5AuogRYtgMIMlhNhco +hfNgXzVtvxuDFJ3J2rlA20T0htSz6xsy9ZVUYRWkDIf0tPRJAoIBAQDKTufkXmcC +wXhkjnUMmYXqnmul6CPeYbXyfX1CZhtAlZSXtzAJ/2e7eRlqb7iFJuYfTdpkbN6i +pMrSb5wfcPtl8RRC+MERMCbe/LB4DND70QSLy/u8mWIsjbkdIT6SCi8rlNzfaL7U +cOb4uzmuXOhZHvdw8q9YTjXlT/EjKoMa2a+RggVLnICTm5sG6tsfkgiH5+DSsFCL +kKQt9Gtc66Q8HfyJW4ljK2JAOjYR7w3+bDXsodrxUQrl5maMALeBdkenc9uzwsmE +2an/R0NQToJJku+gRKVbJxN9jyMEVMygmygxfmW5qUyFZ8W2dd1Rkcr9jzNdVPlT +a6KgEGHPP5wzAoIBAQCqma3DpUTIqT227JRtp7+Yu+TVUTYZPCcG5nXOEr2hSwlQ +rTCbuIRDKtA4XhpQzdIeXbxIYQStnboP1eabYc2jLIcIQLi35WizX1lNf2qDBCZE +9xuVHt6rSEJUoD8eXbuEABraghOQREoVgO/gkVbsel2weHJCXXoTzAc+RddfWKFf +SWjhJnL2nnWKN9nbV62GLR7vNrZxrzuk+2/c4SEHYEJKFtIdx+MjG3hR9kGUn6U1 +fA8Wk+v1J4ZH02ncig/fB703nl9iN3XIbHoHJBTx4bS52gjy6klwGOxXUEvyXXFS +oCzzPNsuqwPIMzi0ZPw+v5erU4ga3fNflD60Oh0RAoIBAQCXV84MPj7rhdZNy3Bu +246eBKNdOrtSimA1poEFIiNy/jNqB+WNJR7x1VcZE7jDC2WNt40QIY2vuH3uTQZL +UxcOnPneW/76n74EhJ5zQIs6RpQTDKcm4Mvbrq3zx8HqOGovPS66hr5zaH6xRkaR +VPmQaiULvtFDy0ZwZIxtFUl81aqMvOq/NLXPNtITq6/+/x0YpnO+yZ2Hus3Hfxiu +K63yNzCLhQnTQUo/6Aw5AE/ErCju++oxKsJvWBwQ0hx1YgmakIakBK0CkF6nFSWb +NxAqgBx5FcFp3mKrRGAaxmFKKKg51me9K5SOHCKBK81ETz++zdjMElxudo/zFC5H +fzuXAoIBAQDTFfoLQ2XC0PI5x77zpse3zOvIEvsh3e3tLUhOHLoLTUqX0Nvp/7m7 +ohTHrPU4lf4ERInL4Kz7y2iI2yjRiKD+ARYHDYVx3QgttNeUFnsODnseJTnBcgFB +EWlThVuxbvEzJAZXNVtKTHmCLKFHqc9epyfdI59uD5lXQjvNp4uNEPiEE1AX0d8d +0OuFfQ6bSK1rZNv6IkRPTl+LCWuyHvZNWO1WZ4IUCg4XLr1IXtQaRIMRDxyxbHwc +6vwk0JtRxCt6wvIb/YWUK6qjjbfcfH70iDComZRTyXLGrr8w+vtmqvD43MUGIEmq +XHot6Ki4FhjC4ks5oT9m2q6TerUyIg8Z -----END PRIVATE KEY----- diff --git a/testdata/x509/server1_cert.pem b/testdata/x509/server1_cert.pem index 3e48a52fd108..ed6bc02c4ce9 100644 --- a/testdata/x509/server1_cert.pem +++ b/testdata/x509/server1_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIwMDgwNDAxNTk1OFoXDTMwMDgwMjAxNTk1 -OFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIxMTIyMzE4NDI1MFoXDTMxMTIyMTE4NDI1 +MFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAKonkszKvSg1IUvpfW3PAeDPLgLrXboOWJCXv3RD -5q6vf29+IBCaljSJmU6T7SplokUML5ZkY6adjX6awG+LH3tOMg9zvXpHuSPRpFUk -2oLFtaWuzJ+NC5HIM0wWDvdZ6KQsiPFbNxk2Rhkk+QKsiiptZy2yf/AbDY0sVieZ -BJZJ+os+BdFIk7+XUgDutPdSAutTANhrGycYa4iYAfDGQApz3sndSSsM2KVc0w5F -gW6w2UBC4ggc1ZaWdbVtkYo+0dCsrl1J7WUNsz8v8mjGsvm9eFuJjKFBiDhCF+xg -4Xzu1Wz7zV97994la/xMImQR4QDdky9IgKcJMVUGua6U0GE5lmt2wnd3aAI228Vm -6SnK7kKvnD8vRUyM9ByeRoMlrAuYb0AjnVBr/MTFbOaii6w2v3RjU0j6YFzp8+67 -ihOW9nkb1ayqSXD3T4QUD0p75Ne7/zz1r2amIh9pmSJlugLexVDpb86vXg9RnXjb -Zn2HTEkXsL5eHUIlQzuhK+gdmj+MLGf/Yzp3fdaJsA0cJfMjj5Ubb2gR4VwzrHy9 -AD2Kjjzs06pTtpULChwpr9IBTLEsZfw/4uW4II4pfe6Rwn4bGHFifjx0+3svlsSo -jdHcXEMHvdRPhWGUZ0rne+IK6Qxgb3OMZu7a04vV0RqvgovxM6hre3e0UzBJG45Y -qlQjAgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFFL5HUzehgKNfgdz -4nuw5fru5OTPMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh -bXBsZS5jb20wDQYJKoZIhvcNAQEFBQADggIBAHMPYTF4StfSx9869EoitlEi7Oz2 -YTOForDbsY9i0VnIamhIi9CpjekAGLo8SVojeAk7UV3ayiu0hEMAHJWbicgWTwWM -JvZWWfrIk/2WYyBWWTa711DuW26cvtbSebFzXsovNeTqMICiTeYbvOAK826UdH/o -OqNiHL+UO5xR1Xmqa2hKmLSl5J1n+zgm94l6SROzc9c5YDzn03U+8dlhoyXCwlTv -JRprOD+lupccxcKj5Tfh9/G6PjKsgxW+DZ+rvQV5f/l7c4m/bBrgS8tru4t2Xip0 -NhQW4qHnL0wXdTjaOG/1liLppjcp7SsP+vKF4shUvp+P8NQuAswBp/QtqUse5EYl -EUARWrjEpV4OHSKThkMackMg5E32keiOvQE6iICxtU+m2V+C3xXM3G2cGlDDx5Ob -tan0c9fZXoygrN2mc94GPogfwFGxwivajvvJIs/bsB3RkcIuLbi2UB76Wwoq+ZvH -15xxNZI1rpaDhjEuqwbSGPMPVpFtF5VERgYQ9LaDgj7yorwSQ1YLY8R1y0vSiAR2 -2YeOaBH1ZLPF9v9os1iK4TIC8XQfPv7ll2WdDwfbe2ux5GVbDBD4bPhP9s3F4a+f -oPhikWsUY4eN5CfS76x6xL0L60TL1AlWLlwuubTxpvNhv3GSyxjfunjcGiXDml20 -6S80qO4hepxzzjol +AQEBBQADggIPADCCAgoCggIBAKtk472NGPcQhDL9U6wsYWGOachAw5XX/a7lUBh/ +yowV+qD/SRCbspeBfiNdMNoXh/LPgyePWhAhskT2XaSJZ5cYD6VpI9Q55lFnFzR7 +Q1bw7BLaD2q83BJkrUSGyDnxH+LdQc2+Gq7rj1PIpIDBaJDtdd8U9bcpP6rH+S9Q +yGQw5OniPCCsUrnx8ym/3lAhKdn2OWXLq+F1avim8AN5dQj0fvAI2kMQdswKgY2M +bs5E932WPtjwLbe80A7RtHPIrqvsdVIoaZav7g3liKekisBuJGLMtTX3hBct5an2 +eKu3Q901bEQXeMWrToekc+DnUsmQ5TwkXatWiE+/sMWg80KNyWt8rulI4ATF8go8 +7Jl8duyb/jvULXjTRdDae6w34gNZjq9jZH2qSVIiLV3Jy0GadyRVJDVyE8Lz8EiI +XkbhgbjL8fpNG8cjN+58sK3TNDuP480A/Pi/9I1BoPYTSPCD6H1KPQJwF2GZVmgj +epF328/RGjl0bfaY458RRYZafydblBpIhDsLBBRmDMkFh4SghAgOwoQBjsEZpCmF +efzzPmJybfloBdmBiqfrEXP8t3J4jBzP5+qhYZRxHik0ignOWwyDtQQSUa2JTJoE +/ET8bkO88XLL7hkAlF+eLVV8ao4oXRh5yjf1c4PvJ/Zfr80mYJYOvOlA8Me9/+A7 +jZr3AgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFJmANOTd7MASs5/K +mnBYcpvzmgLcMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh +bXBsZS5jb20wDQYJKoZIhvcNAQELBQADggIBAFH3XxzcRkP5053jgcyV1/L3hNY3 +gAKxdDjYSw9uk2saJXz4gjfvc8xyIsWv35QU2yzzvN3xaGfDP+qVh1fNmC/7DdUF +LJzYTJb/wm4atVM6oYzdBhu/b3NqtMPb06NFKqyEX4SSN2QjeUVUF1QgAkjmsCiE +79NOuoCO0aWcxgdKd18Wl9MLtG/PtlCMLRcPlx4FX6OYLcOeqFEtcOQXKYDWej0r +9m8JQ2DAGRSa3AOUYskN12GacEmchC86cnlMsL2AycnX1YzOBawgp4KKSur40iPg +S1+8LRjZA5Dz88+a+DXb619ckABO7v8b0AVlbkVVmaXNnhBEU4vqdfQa4BygaGGl +BG8hYYMoNBHpNDr8bBWwwl/WVpGUIMHpOnIJnG5gAGrNiAxH1/6DYFC+cXEOH5J2 +NpkJ5O3Jm9a1xtwBs3tSp8GEORqVrpIjiK+bUWQacss9nsyyO6Oo0S33javSR8AN +nJrGHBE01/QytEpJ53d3N0btZrByhiFZkh8BG4NdhXZsAaQjVy6EEHxfJBsfl8Z6 +UGX4T/TTkASNDWA4B+/nRD/BxrcSegDb8fE34GY9M0IWgQtmMIdR49bOxygzYMFK +lrh+dwGqZ9/xqJ3ro7sYphhJ+Gk5YL5lkZygF2/F9GJY3zOcKrFUalfJgaqKOaTO +6zW3ZjSyDhQoFNR1 -----END CERTIFICATE----- diff --git a/testdata/x509/server1_key.pem b/testdata/x509/server1_key.pem index e71ad0ac9753..5e5331ab5d98 100644 --- a/testdata/x509/server1_key.pem +++ b/testdata/x509/server1_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAqieSzMq9KDUhS+l9bc8B4M8uAutdug5YkJe/dEPmrq9/b34g -EJqWNImZTpPtKmWiRQwvlmRjpp2NfprAb4sfe04yD3O9eke5I9GkVSTagsW1pa7M -n40LkcgzTBYO91nopCyI8Vs3GTZGGST5AqyKKm1nLbJ/8BsNjSxWJ5kElkn6iz4F -0UiTv5dSAO6091IC61MA2GsbJxhriJgB8MZACnPeyd1JKwzYpVzTDkWBbrDZQELi -CBzVlpZ1tW2Rij7R0KyuXUntZQ2zPy/yaMay+b14W4mMoUGIOEIX7GDhfO7VbPvN -X3v33iVr/EwiZBHhAN2TL0iApwkxVQa5rpTQYTmWa3bCd3doAjbbxWbpKcruQq+c -Py9FTIz0HJ5GgyWsC5hvQCOdUGv8xMVs5qKLrDa/dGNTSPpgXOnz7ruKE5b2eRvV -rKpJcPdPhBQPSnvk17v/PPWvZqYiH2mZImW6At7FUOlvzq9eD1GdeNtmfYdMSRew -vl4dQiVDO6Er6B2aP4wsZ/9jOnd91omwDRwl8yOPlRtvaBHhXDOsfL0APYqOPOzT -qlO2lQsKHCmv0gFMsSxl/D/i5bggjil97pHCfhsYcWJ+PHT7ey+WxKiN0dxcQwe9 -1E+FYZRnSud74grpDGBvc4xm7trTi9XRGq+Ci/EzqGt7d7RTMEkbjliqVCMCAwEA -AQKCAgEAjU6UEVMFSBDnd/2OVtUlQCeOlIoWql8jmeEL9Gg3eTbx5AugYWmf+D2V -fbZHrX/+BM2b74+rWkFZspyd14R4PpSv6jk6UASkcmS1zqfud8/tjIzgDli6FPVn -9HYVM8IM+9qoV5hi56M1D8iuq1PS4m081Kx6p1IwLN93JSdksdL6KQz3E9jsKp5m -UbPrwcDv/7JM723zfMJA+40Rf32EzalwicAl9YSTnrC57g428VAY+88Pm6EmmAqX -8nXt+hs1b9EYdQziA5wfEgiljfIFzHVXMN3IVlrv35iz+XBzkqddw0ZSRkvTiz8U -sNAhd22JqIhapVfWz+FIgM43Ag9ABUMNWoQlaT0+2KlhkL+cZ6J1nfpMTBEIatz0 -A/l4TGcvdDhREODrS5jrxwJNx/LMRENtFFnRzAPzX4RdkFvi8SOioAWRBvs1TZFo -ZLq2bzDOzDjs+EPQVx0SmjZEiBRhI6nC8Way00IdQi3T546r6qTKfPmXgjl5/fVO -J4adGVbEUnI/7+fqL2N82WVr+Le585EFP/6IL5FO++sAIGDqAOzEQhyRaLhmnz+D -GboeS/Tac9XdymFbrEvEMB4EFS3nsZHTeahfiqVd/SuXFDTHZ6kiqXweuhfsP1uW -7tGlnqtn+3zmLO6XRENPVvmjn7DhU255yjiKFdUqkajcoOYyWPECggEBANuYk+sr -UTScvJoh/VRHuqd9NkVVIoqfoTN61x6V1OuNNcmjMWsOIsH+n4SifLlUW6xCKaSK -8x8RJYfE9bnObv/NqM4DMhuaNd52bPKFi8IBbHSZpuRE/UEyJhMDpoto04H1GXx4 -1S49tndiNxQOv1/VojB4BH7kapY0yp30drK1CrocGN+YOUddxI9lOQpgt2AyoXVk -ehdyamK4uzQmkMyyGQljrV5EQbmyPCqZ1l/d0MJ9DixOBxnPDR9Ov9qrG4Dy6S/k -cH8PythqHTGTdlXgsBJaWEl2PyQupo3OhfiCV+79B9uxPfKvk5CIMVbnYxKgu+ly -RKSTSX+GHVgNwicCggEBAMZcwQIAA+I39sTRg/Vn/MxmUBAu3h2+oJcuZ3FQh4v5 -SL80BWEsooK9Oe4MzxyWkU+8FieFu5G6iXaSx8f3Wv6j90IzA3g6Xr9M5xBm5qUN -IqzF+hUZuKAEMY1NcPlFTa2NlrkT8JdfQvJ+D5QrcBIMFmg9cKG5x9yD7MfHTJkf -ztMDFOwP3n7ahKRBowfe7/unAEFf6hYFtYjV+bqMDmBFVmk2CIVtjFgO9BNBQ/LB -zGcnwo2VigWBIjRDF5BgV0v+2g0PZGaxJ362RigZjzJojx3gYj6kaZYX8yb6ttGo -RPGt1A9woz6m0G0fLLMlce1dpbBAna14UVY7AEVt56UCggEAVvii/Oz3CINbHyB/ -GLYf8t3gdK03NPfr/FuWf4KQBYqz1txPYjsDARo7S2ifRTdn51186LIvgApmdtNH -DwP3alClnpIdclktJKJ6m8LQi1HNBpEkTBwWwY9/DODRQT2PJ1VPdsDUja/baIT5 -k3QTz3zo85FVFnyYyky2QsDjkfup9/PQ1h2P8fftNW29naKYff0PfVMCF+80u0y2 -t/zeNHQE/nb/3unhrg4tTiIHiYhsedrVli6BGXOrms6xpYVHK1cJi/JJq8kxaWz9 -ivkAURrgISSu+sleUJI5XMiCvt3AveJxDk2wX0Gyi/eksuqJjoMiaV7cWOIMpfkT -/h/U2QKCAQAFirvduXBiVpvvXccpCRG4CDe+bADKpfPIpYRAVzaiQ4GzzdlEoMGd -k3nV28fBjbdbme6ohgT6ilKi3HD2dkO1j5Et6Uz0g/T3tUdTXvycqeRJHXLiOgi9 -d8CGqR456KTF74nBe/whzoiJS9pVkm0cI/hQSz8lVZJu58SqxDewo4HcxV5FRiA6 -PRKtoCPU6Xac+kp4iRx6JwiuXQQQIS+ZovZKFDdiuu/L2gcZrp4eXym9zA+UcxQb -GUOCYEl9QCPQPLuM19w/Pj3TPXZyUlx81Q0Cka1NALzuc5bYhPKsot3iPrAJCmWV -L4XtNozCKI6pSg+CABwnp4/mL9nPFsX9AoIBAQDHiDhG9jtBdgtAEog6oL2Z98qR -u5+nONtLQ61I5R22eZYOgWfxnz08fTtpaHaVWNLNzF0ApyxjxD+zkFHcMJDUuHkR -O0yxUbCaof7u8EFtq8P9ux4xjtCnZW+9da0Y07zBrcXTsHYnAOiqNbtvVYd6RPiW -AaE61hgvj1c9/BQh2lUcroQx+yJI8uAAQrfYtXzm90rb6qk6rWy4li2ybMjB+LmP -cIQIXIUzdwE5uhBnwIre74cIZRXFJBqFY01+mT8ShPUWJkpOe0Fojrkl633TUuNf -9thZ++Fjvs4s7alFH5Hc7Ulk4v/O1+owdjqERd8zlu7+568C9s50CGwFnH0d +MIIJKQIBAAKCAgEAq2TjvY0Y9xCEMv1TrCxhYY5pyEDDldf9ruVQGH/KjBX6oP9J +EJuyl4F+I10w2heH8s+DJ49aECGyRPZdpIlnlxgPpWkj1DnmUWcXNHtDVvDsEtoP +arzcEmStRIbIOfEf4t1Bzb4aruuPU8ikgMFokO113xT1tyk/qsf5L1DIZDDk6eI8 +IKxSufHzKb/eUCEp2fY5Zcur4XVq+KbwA3l1CPR+8AjaQxB2zAqBjYxuzkT3fZY+ +2PAtt7zQDtG0c8iuq+x1Uihplq/uDeWIp6SKwG4kYsy1NfeEFy3lqfZ4q7dD3TVs +RBd4xatOh6Rz4OdSyZDlPCRdq1aIT7+wxaDzQo3Ja3yu6UjgBMXyCjzsmXx27Jv+ +O9QteNNF0Np7rDfiA1mOr2NkfapJUiItXcnLQZp3JFUkNXITwvPwSIheRuGBuMvx ++k0bxyM37nywrdM0O4/jzQD8+L/0jUGg9hNI8IPofUo9AnAXYZlWaCN6kXfbz9Ea +OXRt9pjjnxFFhlp/J1uUGkiEOwsEFGYMyQWHhKCECA7ChAGOwRmkKYV5/PM+YnJt ++WgF2YGKp+sRc/y3cniMHM/n6qFhlHEeKTSKCc5bDIO1BBJRrYlMmgT8RPxuQ7zx +csvuGQCUX54tVXxqjihdGHnKN/Vzg+8n9l+vzSZglg686UDwx73/4DuNmvcCAwEA +AQKCAgEAknXlUv42vjGD9pqZnMBT+uyaooANYoevBXx5ZGYXbHv/rwJXqnSSOXtz +kb651zRSfQAswGp0eOKClwG8ZbTxK6FpBV2CO4G6ugcRQkyu76Vy5m0mzXxTxvf3 +RF60zSaqq8+MwsbXwHAVC3CielBMDcSNfDNKAdmiyUqXOoKaq1tI0j/8R6NaEgGa +XCvUSr78J4CL7dwMpd4TqiXlZeKtSxi7PF0kPjjce2Hi8VV2/pbaspvoWrNrLd6Q +IIm83VA5SzsFyk40ZIs0LvXdP/yQgP3d4/uwQkyfuLsEzaeL2JkDyg0z1kAEeU35 +DlpOl3q1OP+zlCAzVw3b7+ILqeXu2KOLPBCoRTo2wWNKutxcExmA0oIfMSgyb/P9 +36bNsdmT6b/6C9ZeJpsXZWedx7bmlEYg1patfk1CgD6WMynsl+fQ34F658v/JWzY +b8JpqAG+2ov4j4EyRnwK267/6u/Yw4CvQw+8giKIDMv6W81b0GWpCBT7PkTKgUaW +1Hq46z6xZ/NttiTU6qMsgUskFheU7IuO1KHl1kpLBERio9S87ZxkjLGfvxl4+sS7 +7OdgIVsM1d50RYy4pipNplQXw998kitPvNFcSOK5vnxqW5sz4SRzVAUl5sAqJ5GC +0MPqq/I+H84BBQwwviQ9WsBuA1+YW6NxfY/mldAzAjg3/yUMwokCggEBANTyvgnw +3SjBOKzg56QXRs1L2eqQzqtDzyOn2BXlMvT3bJFdqOHROhqqD0oSSbYZuCoJAJPh +/W2bqengGSbsGwKKLHDN0yPWR12QVq3y9gbK/L6Qktjp2zDhHqNgr+4SSSl0gMMF +bz6Nzn+0SO6C3m+M6hAgfsuizIhSCxBSSLccFSIT0ZiYRzq7rN8FlirpSIhd9cQ5 +B3q41lebUHpfciPr7K+psmCXO9NqtSvXtcMA/n7GyIGPBVDp8kMUwzuH9OVOkZkK +Z1a42uuYKs/zgnbXV7kCZ6iBQkt7A2Scv/IIwdeaKxfTv87e8UqMkYPuf4wgqLq1 +USoMoHdz6JmQnw0CggEBAM4LfX4PIIGT+OQG5kn54ZcsqerPljPj19EBe8U3jyf3 +OzxDkSX0f2fmBVDjqj8QRO/PcXQbGUXWhJN1DvvuJITE93Y5Pj8DJX3CfLEnjE+b ++sfuSxNawH9NwvwNt42NWDleMAgfMot3J+MwlXb31BWixCMj41Vl4VrkHEzX/M6R +aAWcTqeY59KumtOvZO/4U98VvNyOipLHWcWZuwJLzeUjvyZ+hJQ6kBGJJYNezYm/ +qhHZ5k4bz8HAUSgih+Yb3DNmREk3YqIM4Skq843YdF8SEnFw/4b7PKMfmBXy3X72 +f51wdCiSkIm9o+/QLaduExQd7AhmFO8avZa5bRNNwRMCggEAKY/DBX+sOoMTw7IV +o9IjMHhobL6ch5Kxf/0HUKauPl94IhsMlh5W39NnLobJOjBk4FdndHV8GAN0sz/Y +yN72GpXLPKz/U5RD04ATWtn7qLG/iJYBAzMJY83cQ/jf/XA2NVAWvXl3D9dvgT83 +qM2ECnOPT1x4QthgYQ7aN/JHXO2vNjp2AvldlZoBkHmvqGpljLACAq06x3oB45Fd +sLSmO1qVlGdjeDSsKYQ/HfJ4+DlecnHrulWmrPcsIGmR/TF427Rs+FiueJ+VorvN +R074nKdE6MgOYTXxMXgt3lo1oFCTPLhLRtg+LGsY3vr2f7Bx1nCdXet7juBuBUJr +GGXAlQKCAQEAxNVHMfijdgX022kX8A2Ni4x4Wj+q3rFHR3viUDnOQUC2TtDBRX/3 +gjrEU0zaI1qYcHs8h80nbIcMqY1HHjaWnltHh6IRq8KGu0fjNJ1yNc7tWLd08u1c +PYD8xysXcVtYr50hx3B+KatP6IJOFpOUAIM4WdV74+XqzZhizKn88R0JQWrb3NF+ +jM6OS7EffPs+rDuo6w4kpSlZwiIk+4GNFNv8THrKjowPeyEIPCKBuZjmkB0YHQAG +jbH6FZw/NPzidBu7GjKVv/cL1fcZKiVgrj2mbsai5MD3YWHaOQWEwTgcGzwFS4kQ +GPWYOY0nP+4wvaQECtXyI6To/qbu42UBDwKCAQAd+/H4mepB8cgchZobFco6yTt3 +qgS1D9sOFgWYZLkxIuQTQH0C1cLHXIiqf0NrJAOBbaJ+vVuCrqv2MUASaxKoDK7h +1W413yKIELWbTk7yEttw0M0T2PXmI5dNdKuw5I5hw+MmjLOFmyRzvX870ihDnM4F +MISxV7K45t1EHjMsz66fMc8BIkophwK3/7FSok5XhYdLQdQS9Rshv0PXQmffkVUM +UTlrwgH/3WGRhkFbmcdBMlawHQGvjiyZ+Gz+wF1uhzEYweT6wUfaHZePxX0hXqom +WVS6ojlUji6NNJqFN8DB4q8V5/EShj4fpdjenDap5IxFgDSxgSCShR2FGTCW -----END RSA PRIVATE KEY----- diff --git a/testdata/x509/server2_cert.pem b/testdata/x509/server2_cert.pem index dc20b468e5d9..753065b49ad5 100644 --- a/testdata/x509/server2_cert.pem +++ b/testdata/x509/server2_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIwMDgwNDAxNTk1OVoXDTMwMDgwMjAxNTk1 -OVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIxMTIyMzE4NDI1MVoXDTMxMTIyMTE4NDI1 +MVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMjCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBANluCTNFJz8gsMgn2ixQuk4YphdLfbsgOlk2lRFx -mYBpfD2hfZpnr6c67WNIWBuvMy57z+FWcmmA2iVabEs4OGPaQj5R6cngai01QNPO -d0gPpcAW/4KuVAYOYiYWSrVOTj8aTZm4buG/VMZMUKUMS0JNXSuYLZrgD23Rsr5K -j6q2fqRFtcC89QW9opafa4oTmkp6Kz/WrphF4EsK1fbelZ8xQ4+TOkIJegZMS+vA -r3itgA3ha1xqzUU9+A4xTg8HybRzJMAbtzO0DJMzmfDXXwIzAsdsYerDgaoYlBtP -5Fnod19g8k8NIJduF8dPRfnyn8fFVisT4fWet59/1jcXUbdsgdPLuuY59sxT/C8o -HLfn26w4Wda0Sc2XN5qhXwezkPX51mOw2siP81jFeHRQE+J0IOfxjfpdbI1+xdIF -vsu42NdmYa7a7ejhilZxDYRZSaJLLYE/ZDiGfTBZVoVKRNbM0EZ7VRCN9pN6i5jd -WsHCjdq1u9rzplA0D3KrycUvlpZc7xFaJxTiVFGiJugJmTJoUpQHnF6chZsGukhA -pypSB/f+r4tPa81N5X9f9vG0WBXiKGaoWVJXmNOQHaqAYz7maO/JCetjtUn6IH7V -Ti0qK4yeVh/5GZzC7xFfTmO4oWbz6Cb9FKPSsVjvo/n2Zo0e7CSVKc9oFFBwgjg+ -p6bvAgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFNpFUJbVy5fb+Uvm -jKzuUDbuWctCMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh -bXBsZS5jb20wDQYJKoZIhvcNAQEFBQADggIBAIaUCF04BpWeQkeUsslTSN44Q95U -oNlRD19fNXWF8eae7Wl53dFkRhn2nyqx0uoHvFZ5oRhF4v8kzM1cyW4RyLk9WTnh -Lmg/jfr84bSdWvN8nW5T2jNvq0ltSY414MFu4fHf8/GMbpIKtafFkisFXmhKm8Uc -zVilTn9Wn087Lkg3FHYVU2v0oWfupM5Qvq6tvZxT2v+7nmES6Cip8Z9U7km04yxV -hDy6YFdz2UDUYlZaQCsLPmaiIxR/EclSsL6KnMW3UjMyxX8Eft1WPwvTzlQKQFDs -uEfbq+Cl+cogMaGq1VvAA9cvCUSa1hTathWayKH2q1mPH8sqtbFyged7XXh8mkkf -8qeYTqfeL74I405Gl3u3/EjVnhSLpOqQOgn2E5HnV0bZaJmGHdU0DIvOyKauinyg -U4hnL8WBv5en9owQvE+DrivbcG9brqEY3wot0XNzB7pxXjrWdw/PMc/HNPbBsT8s -Zg0gwxwvpffGemc1L8tiM8aHOp8eR1oVr4szuNDAbAfdEgpwBctXs5JJg81zsmGe -2jJfHFAeqwhUZgCoF/FjJ+IHxOFZx9IVwrlawPadIFgVh2I0rFUcME0B1/Vk46Gg -BOiuP9keVX+qhKtqjnfabN9l5iX+zpniHIarke2o6W7nYIgdOtdbmH4YNZxjyidj -9w/3d/4ItCavbKAn +AQEBBQADggIPADCCAgoCggIBAMqkUFp6xBzIksawPZpDQCZS+ZE/Pjfab4q7CUd/ +pN0Ss0U9MRBBnYj767qvwrOGQYdpkK0NWh+BtUOQHaqsnD+ykr1x2i27uMvYBnkn +91t8EmfW8u5cj2lZGM8SRXaiCc2tv3ZFDKeizIp5BcxLsTubRZNMTOYxUCpFoABo +d6DGYXaU6LFNbIhXQ/vTlbDa85a5EjS5SWeBzEEFgBHHQDdjh+cqS4bzAgI+klYT +uYM3PMbUuOg49hVQjW1pXSGO8Yvha0fzZP4upUKi6h1+/xBUZdDiWuynEJZuYPAC +xqgnmgSkexNLJwrg/6JPr04TfbKagffKDHKhloI62CZJ9/VRwo7OF8dsl1Y+cr3T +XjDtsxmypDIi31szDYhE5V1P+FAGP3sewQBRh597x1RA8tGE3ijIV6iqSlSo/Lpx +HaVM5n3PWY5vUNFWdbawMDuCUvi9GYn3JVnt4YUxjsMxLC39FibzUmwD7JXqx86t +ArPRLU6knrcod72dKRnUGi446Z4dElqMngyu2Z08RB5iYzDwT+xugwki0gezQ2uB +59/968+MI3eVe/QWMwtIAoZuHKvPsAHOb9bAcBvLQi94MWLbBUkAga3XRF6CNAGq +LPNdhznCNYdXMnMdBQHAzu7V5+yIB/ZFQ519Fm4RinCG6clIi326A6GNYenAJ/Jr +Xa5tAgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFBfefRYaa/BmcX6g +eSQSMrh0zktEMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh +bXBsZS5jb20wDQYJKoZIhvcNAQELBQADggIBALpTW6UICT8SyCY7VNUja51t3+XF +QoD8xKu2qS69G/oDbxe5SF3ldymvkQPqVwnW9e34mjxD1Au4IF8zQWv2EMm96wDD +0Js0/yAMjw6/60f0hF9lpQEqe24W+wgbRV4Fzt3/rybLte+4L9chvq1plHqQ3sfa +s99D7bfPSLX8n82ppNmm0U81kXmtNAw3P+vStBqRgrZNkbkkgsoGZsTgzKuD+H81 +WUzIqmIAfTmkDN47SXYuneULlFNWwtHgTWv4jq2/ptYo75MQq+ExwTDGM168x17u +yaC634INTjfNd04exiktBJXWmAS8K4aYgvHPgcIlzyidR7taI0X1O4mR4qomh5W2 +fVmGkpQZCmkW80whgycY3ui2fdWYOs3XGdfz53fJdN2vWebpUjw7+1owlmqFJhEe +Ct0wqLLeE8rdOfKueh3/xi7CxoeYM2fVjN4gHPojcQ3Mcs7wiJDm0WFaITi6+KDS +LmGhSHKaiXiGKsbLykN0DygDQYa/c4t6NfzoRGWKMhdAcRXicZaxnwPD65psAijv +ZDgONwXeHKgfk7DnE3rs+D9xuh2ciw7lkcbImYmOCMoV88qG0t1uIwlM3xh8S7Oa +DH6q4vj3pF63QS57uRtwCBCOa3xcYKTDJbdRyUKgAejVoz8bqTI8lBjnRTtxlQFi +4ugkg86X1370IY84 -----END CERTIFICATE----- diff --git a/testdata/x509/server2_key.pem b/testdata/x509/server2_key.pem index b0f6ddf70dce..82c947d70fdb 100644 --- a/testdata/x509/server2_key.pem +++ b/testdata/x509/server2_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEA2W4JM0UnPyCwyCfaLFC6ThimF0t9uyA6WTaVEXGZgGl8PaF9 -mmevpzrtY0hYG68zLnvP4VZyaYDaJVpsSzg4Y9pCPlHpyeBqLTVA0853SA+lwBb/ -gq5UBg5iJhZKtU5OPxpNmbhu4b9UxkxQpQxLQk1dK5gtmuAPbdGyvkqPqrZ+pEW1 -wLz1Bb2ilp9rihOaSnorP9aumEXgSwrV9t6VnzFDj5M6Qgl6BkxL68CveK2ADeFr -XGrNRT34DjFODwfJtHMkwBu3M7QMkzOZ8NdfAjMCx2xh6sOBqhiUG0/kWeh3X2Dy -Tw0gl24Xx09F+fKfx8VWKxPh9Z63n3/WNxdRt2yB08u65jn2zFP8Lygct+fbrDhZ -1rRJzZc3mqFfB7OQ9fnWY7DayI/zWMV4dFAT4nQg5/GN+l1sjX7F0gW+y7jY12Zh -rtrt6OGKVnENhFlJokstgT9kOIZ9MFlWhUpE1szQRntVEI32k3qLmN1awcKN2rW7 -2vOmUDQPcqvJxS+WllzvEVonFOJUUaIm6AmZMmhSlAecXpyFmwa6SECnKlIH9/6v -i09rzU3lf1/28bRYFeIoZqhZUleY05AdqoBjPuZo78kJ62O1SfogftVOLSorjJ5W -H/kZnMLvEV9OY7ihZvPoJv0Uo9KxWO+j+fZmjR7sJJUpz2gUUHCCOD6npu8CAwEA -AQKCAgB1i31B0HLlN+EadCEIsCPoMH8qPM+eKFAjBtUT9xwLRfu6veFPZhqaB8tq -TyQC43aB/MFnivqTeut0IixFhgFGSiph0prXXpFIG3AOkaH+vSbYcBZ2KZSXKZN6 -D7cXyVuX1bp6DjEzreJAyeUXNUxCbdyewsh04Ai3UBSXt2tv2PUiDeWyavTzw49w -aoMSxII3HVDgVElTXQNizlrZ+X9d7p4dsnReWw0y9nBc5XB3hyShXGpULhEHC/dc -hN80VPuAqHcHvHQQaZgaxFzGzUg5wiYQddGBv2wL7vmywkArMvfGAn08q1YhR41n -XL3x4G7s6wwogbk4tjOC8PN4GQ09YxbxJVLSyVIHX/v8tYe8H8acsw4LonkawZVm -HOgwMpz/hcm7P+ClYjAVUWZjCJt02svDV9U1BPEdBtOXrMDwlBfVuFxtM4GDkKmZ -GjCLnthpvBXfw6stDKuwE9g+TYVcRMsPhksjE9ZasTTVtFU/qZXhc2bDuJkWaUAd -yAtxBOQYF9mBN4g35NSE7k8FE3HxNDJx+zstodweq6qhinXchuKAeViap94tneeG -hoSt9PgMnOnx7V0wIK7DaGCH3ssxbjRQ2wRLdTNYAzhV+tkeDex2zf2xtOvqtWIC -l5gUSTUnaEYX5wVbCPAJIOAI1TtMe501PfXyZa8wb6p9eSHMAQKCAQEA9DJPKkjI -p+FLBn5iFgGE6DgiFD+K6OyGtr0Mle8D+XFDtlZClF1sjZHacb3OQgDnlSYXZaZR -iN5jKuVJsrhCgm9g0QYDwtp+m7JMMX9A19qZbbK71w9Qi80wuRZze0nktr6RKiyS -+x8VXkeSHPUSw7VbzbE/CCm551Z/ORoU5fXnDstPKk/M8K2NSYywwzwaEkEuu0NQ -/syGxaAW8mThruDAZ4gtJns6IyTmM+8KgkSnbwK5mlOMPhJ+6bHDyeV3OJe2lSVW -ZRA9kzDFAKlotpwRaSwBdu6chCdDhQGn/WlofJHCt2t5Fh9mK89AXQsXfjAh0O1N -7zrU/yeNIXJd7wKCAQEA4/CD665RVUwNffb7fa0vnt6Rkj47FdM/BmWpLnb1IC7L -87Fe9uryaNtghLD6T87vF3MtH2rEfQ2qwR9VRC4MyB5kNvozBVtJbKLy2oRD0/Lp -GSLhjAiKrzu8Dmwv/5iQhrSRr3mqn/eoIx5ydgot/+OzxgH5Q4CGYvzZUcIMVpi+ -eq4/39vLPQoa5tvT+n0G81sCCVR+sBtBbgVq8WaiqW6UunqP+B4+bPG6jbYMjdcD -w+ylakjJdAofl5SqcUcUy0UzI1pEjKnlLYyCyuVMlkhVZoaQiX9TTOTZ0jAXnbps -sDS0fwW1/8J5cSXxIA3q1WVtshst2LwwaCgYlVhHAQKCAQEA0x+v7BnzSZnx+JJK -EUaM9wyZAjKR0aG1Msat2+9C22W+qiVX+Nfw41EHsLDuY4hOsFe3gM3TzmafDFYi -ap79+bF73hu6IrwvHEOBtoWTtUusvPf7iQsXk1b62fr8KsqPMCQAc5sIFI8iNVnh -jKGh8Iya63Jj0ZXpwYW6Bs9y5AK/Gr5SGn3V7PvPnJhDtvf+fmvWkFa57yE7IB+x -1y27JSvxjVFh39RIRlw/nwT7a/cZX1PWzgOPy5bIHRnw8VwvwEECvV4DnOr2oYxX -tqPBAahbMTe3qHDR5zvfF16ANArvKEwJMfV8QdExz4ym1Aqj7BiHFBAnAj82Kcez -MAimBwKCAQADw2LKL1SUbe8DF2LLjmJs4wvQOErNb3Fo76C9baVaZKtlWJZSyUo7 -RPPw/OMFEkuMPZCPJjocPm+FRLkpqQD5BNduuO7CteEedApCZVChXS9QBO1oXHO9 -tOTD8DFSrPgl4TFOjlmszm/uNIB7Rmu//8hmCn5NCQAu/jGwUd3WSCtM5zeSwJQ4 -a8RJ73MufYXx2pzL/qMg0TJhWKGNXr5swbCe64sY85bgQZVs5YaLiPM89tk8SftZ -eRlQbVnrCNtlB71yZfkfwWZRPDKkmuiKyqLuUGZufrWnXVfjSnv5VKyatCQOvM9m -a5WJsrCqcNBhuYz4Fc7J90FtVswhGxYBAoIBAQCzNj4K/OrC5fX2sidbcaEU/9S+ -r8JZeCaxAAFepoFKE0LyspNrsW0CZ3Ana3B7SqfH3nAFLoLxExCgMm1WkvwLp22X -23Gav6cRG4XJjZjyLKW+rcowuhI2Hb6FE2UvshcDzlHpkISpjeY62Qx5gcoLeLlj -eQpqg59wL5ZweCOcgV/K2nrOILlmQR/GQ68XxvBLoj3J46fc+/iI8G1roGI2H6n6 -tRqmOxRFdmchkPfLPYq5Z71LTWD7m1E27k8apttT8P2mfQhZZ3YYERyiRTTYdO0i -0ZIi5+OqzTuZuefgurtHDnJe4rFT3/jZzKmI3IfbuRITxmxSgPd7cpuM22uo +MIIJKQIBAAKCAgEAyqRQWnrEHMiSxrA9mkNAJlL5kT8+N9pvirsJR3+k3RKzRT0x +EEGdiPvruq/Cs4ZBh2mQrQ1aH4G1Q5AdqqycP7KSvXHaLbu4y9gGeSf3W3wSZ9by +7lyPaVkYzxJFdqIJza2/dkUMp6LMinkFzEuxO5tFk0xM5jFQKkWgAGh3oMZhdpTo +sU1siFdD+9OVsNrzlrkSNLlJZ4HMQQWAEcdAN2OH5ypLhvMCAj6SVhO5gzc8xtS4 +6Dj2FVCNbWldIY7xi+FrR/Nk/i6lQqLqHX7/EFRl0OJa7KcQlm5g8ALGqCeaBKR7 +E0snCuD/ok+vThN9spqB98oMcqGWgjrYJkn39VHCjs4Xx2yXVj5yvdNeMO2zGbKk +MiLfWzMNiETlXU/4UAY/ex7BAFGHn3vHVEDy0YTeKMhXqKpKVKj8unEdpUzmfc9Z +jm9Q0VZ1trAwO4JS+L0ZifclWe3hhTGOwzEsLf0WJvNSbAPslerHzq0Cs9EtTqSe +tyh3vZ0pGdQaLjjpnh0SWoyeDK7ZnTxEHmJjMPBP7G6DCSLSB7NDa4Hn3/3rz4wj +d5V79BYzC0gChm4cq8+wAc5v1sBwG8tCL3gxYtsFSQCBrddEXoI0Aaos812HOcI1 +h1cycx0FAcDO7tXn7IgH9kVDnX0WbhGKcIbpyUiLfboDoY1h6cAn8mtdrm0CAwEA +AQKCAgBa3LaS+304Es+Ne7UDmKgJByeUczEoxi9Bm4AbqSZ5Yksz/q4jReinZZ5b +hTfeW5LCbxlKHzSL8BMhClvjDaa6AQ4/F+/mlcfUzzaH2N3XDZkLKpyfOK2tZR/0 +qZKwERQoP4IcO/XirOLeLEnnQwFjYsodtBa/GNmDOtj1leIeGxXUoAx+g+Lod4iq +QENcm7ChoraBIZvCZ7b4aMj2L8uhimWDx7k593itHPVs10dViM0dsoB+0Bu3jvj7 +WEVEKN4yBI+gIYjlWHENohMryqf/4HgO45A1kOulKDUbKYN+HtO2xTHSgt4syJqX +YveOILs5/IHOY7CVLdNY7Z3B/WTKtO4UCzGtFWsD0Ai6rtfnSj2aAmq5uHWDwPHl +3fHdTK6knHdlaPWbeQiBIk4bT6L/JjH38dqBU17/RathjoCDQngNmid7AgSnv5o0 +5ugTCTzzFTUz7FnA9uYcEWIq7xDB5gVFTcvWcARYd2BsLM/9gF1Hh7r4A8gsHj2i +0+7Saw6mvAsPXp0JH+8idBk8khV8v6Uy1arF5aYF8yNus/hVr5fUBnnK6MZ+F2Nr +VOa49n5BhbWm/IsVYdgnnk4uwUx4yNuwMZ9/nSsDEZ6IiX9KZjap6zKNwhoodLV2 +T9WYKC7JMOEBr1CzoQL9JzvyYulZUp0F5SBMbB9kZj6j1gQRGQKCAQEA5QbA1hqt +0iy5KjH26Rok2pwi3z49o/sFPyg4TBOs5Zx7T/iBQM3ZlyOUdGT4uYFX83Xljbo1 +As/UIM1wzCSUbyyHGs7RTuoAWJ35d94UwdfOmw5j5ETAtbA0EVxnLCqbp2ArqC60 +UT/M9Yk9HK8bj6wwb0kZ/wsfwn78Jts8GhtCozPg/c2KWq3ce2bcHMuGhlaWq5Jb +XlHrZBIoL1tYsT8LWe4wc8Agm/w0ZC4rM6B0A8rxdsrPAc3WmlvNvnA8DA7OAJZ5 +j6zThsQ6FdSic8CI6p5vw4pyFZXtdERIjX2jkWVLVdb8adQKhqcglBvYZt5e73tk +a1OfgG0tX3M8TwKCAQEA4oIOLgVJWc9lfWFaLiRIjKlVtnOqfR8Vy9e7i2dBylgO +ZAoVM3ROYLsbDRsMJqMN/IQIuSOMYP+lIeeLoJlcqor49+z6em45Kbt5ZiK1VRoG +78Zi/UZ6cb/vcB17zmeBuUHYEmHLbB8PWbL+qFEipVDAf0q1jD9VnRw6ntt5K0oM +AKPH7jiCxfo3GU62nOZgcT1rwnA0FHBl9vvcr+237a/+NoNkRqQxTX4y5kA5l0nP +9EEWYvOlCKbkYiPKHOZMGZ4MWb0FlFp46KPxiM/x8XxB6NFUacEJUKE27NYgj69+ +5C62A34YtLKptD2+CeAKqxYOYIt3Tj4qJBrJ4m6OgwKCAQBxoXwjvnDnipEEQm4D +EZmfbUBQCw2CQpVD1Ky58jkiYxU7hEx83qVKu7h4V3CgeXAttx0ByJVso7jX3ZZN +cwjCcBFIV7y5rpglX5vawTEDTBOSEv20z/fdLWNoCbSW0T0ROkHu291TQphqaoEL +rkW6bvBJBrgDNn23flGU5clYGpZhaugChOxUOVbfUxV6o/BGzsdKsP7sOTDVIb0W +YfgLWQBEykz34SdMvUExQ0bkAoQNLa/IBK/YcUw8obfe+MiSIvZKjF4bzt/USZ+Y +HTvMuoY0Ag/psNMRqqV5vjdRHDj/doZ+PIBX8YCXdmxPj9E6mLH5l/sm1QKaMZEF +fqM5AoIBAQCPe8lVt72Wab2lpgTFU/CtQhtsv2qRZh6diSRhk2BmuE8tagGyHYwE +1KG3NJoG46VZf54zAWTMkUTe7FlTu7KqyewayYCGC8qkOAEYBQaPSTR5sVdFj97C +rc4UXGjwADt5yk8AnfiJnkdQEAYnQ3ZJ+JRoTkAg/oHSS26K8QaZuIdP5HAi5KNa +nD1JB8bAL2OKeFkJy6ACDo1Y3oUW4ORxadoEWEkuQpaEu1us5aRVxMk5tf1jY2n4 +yBfGX1uJ4Qz18VtrgUTGjGUpIalAfFGMIqVxwSDS+RhYfjdX4fCwdIBSNZDRN5CY +7tB3v+DhSo4XgJpM6CwEYXa6dknK6TPXAoIBAQCo/+MThNIrKwIsbKxyz3Xzg4Ut +6JUQxd3YU1kNcNde8BSuNp+05RcUIc7wpRTrrJwAc9uNHVNjKz73WcpjMh3dHYJY +VF0nbUzM2m+KzLdTJtRTYMaFGJjiHbttetNGJoKomNrMea/vEjZh5WadkBagKBGp +u85H3Ff1vYdMyCTBiU6eyabxc08/ZEaFJaQALjVC4e/mQdluCHyfmqeY1awKLmCK +vf5ZvBC6ISOMMibqcRT5ocjAvO3j+3d9Ce3ExX5U1fu/xYb3YoWQtdX+qIAT2KYq +QG1vDy0VieGJlUiDPooWin/X830pxyYJ79w7XN67JIZdlrtIEVDJe4/xghXt -----END RSA PRIVATE KEY----- diff --git a/testdata/x509/server_ca_cert.pem b/testdata/x509/server_ca_cert.pem index eee033e8cb05..2760c56b4335 100644 --- a/testdata/x509/server_ca_cert.pem +++ b/testdata/x509/server_ca_cert.pem @@ -1,34 +1,35 @@ -----BEGIN CERTIFICATE----- -MIIF6jCCA9KgAwIBAgIJAKnJpgBC9CHNMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD -MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMDA4MDQwMTU5NTdaFw0zMDA4 -MDIwMTU5NTdaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD -U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMZFKSUi+PlQ6z/aTz1Jp9lqrFAY -38cEIzpxS9ktQiWvLoYICImXRFhCH/h+WjmiyV8zYHcbft63BTUwgXJFuE0cxsJY -mqOUYL2wTD5PzgoN0B9KVgKyyi0SQ6WH9+D2ZvYAolHb1l6pYuxxk1bQL2OA80Cc -K659UioynIQtJ52NRqGRDI2EYsC9XRuhfddnDu/RwBaiv3ix84R3VAqcgRyOeGwH -cX2e+aX0m6ULnsiyPXG9y9wQi956CGGZimInV63S+sU3Mc6PuUt8rwFlmSXCZ/07 -D8No5ljNUo6Vt2BpAMQzSz+SU4PUFE7Vxbq4ypI+2ZbkI80YjDwF52/pMauqZFIP -Kjw0b2yyWD/F4hLmR7Rx9d8EFWRLZm2VYSVMiQTwANpb+uL7+kH8UE3QF7tryH8K -G65mMh18XiERgSAWgs5Z8j/B1W5bl17PVx2Ii1dYp0IquyAVjCIKRrFituvoXXZj -FHHpb/aUDpW0SYrT5dmDhAAGFkYfMTFd4EOj6bWepZtRRjPeIHR9B2yx8U0tFSMf -tuHCj95l2izJDUfKhVIkigpbRrElI2QqXAPIyIOqcdzlgtI6DIanCd/CwsfdyaEs -7AnW2mFWarbkxpw92RdGxYy6WXbdM+2EdY+cWKys06upINcnG2zvkCflAE39fg9F -BVCJC71oO3laXnf7AgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUBuToaw2a+AV/vfbooJn3yzwA3lMwgYAGA1UdIwR5MHeAFAbk6GsNmvgFf732 -6KCZ98s8AN5ToVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV -BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1zZXJ2ZXJfY2GC -CQCpyaYAQvQhzTAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBALUz -P2SiZAXZDwCH8kzHbLqsqacSM81bUSuG153t3fhwZU8hzXgQqifFububLkrLaRCj -VvtIS3XsbHmKYD1TBOOCZy5zE2KdpWYW47LmogBqUllKCSD099UHFB2YUepK9Zci -oxYJMhNWIhkoJ/NJMp70A8PZtxUvZafeUQl6xueo1yPbfQubg0lG9Pp2xkmTypSv -WJkpRyX8GSJYFoFFYdNcvICVw7E/Zg+PGXe8gjpAGWW8KxxaohPsdLid6f3KauJM -UCi/WQECzIpNzxQDSqnGeoqbZp+2y6mhgECQ3mG/K75n0fX0aV88DNwTd1o0xOpv -lHJo8VD9mvwnapbm/Bc7NWIzCjL8fo0IviRkmAuoz525eBy6NsUCf1f432auvNbg -OUaGGrY6Kse9sF8Tsc8XMoT9AfGQaR8Ay7oJHjaCZccvuxpB2n//L1UAjMRPYd2y -XAiSN2xz7WauUh4+v48lKbWa+dwn1G0pa6ZGB7IGBUbgva8Fi3iqVh3UZoz+0PFM -qVLG2SzhfMTMHg0kF+rI4eOcEKc1j3A83DmTTPZDz3APn53weJLJhKzrgQiI1JRW -boAJ4VFQF6zjxeecCIIiekH6saYKnol2yL6ksm0jyHoFejkrHWrzoRAwIhTf9avj -G7QS5fiSQk4PXCX42J5aS/zISy85RT120bkBjV/P +MIIGAjCCA+qgAwIBAgIUVaYPCm+rhznxJTRWV7wJKkmRuW0wDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIxMTIyMzE4 +NDI0OVoXDTMxMTIyMTE4NDI0OVowUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB +MQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3Qtc2Vy +dmVyX2NhMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxo6Cn80nk3i5 +PgmYnMBicmJEykEz5YbJEuyN+Mjv1wivqc23P75qvu7u0FPePptHZK+Q3PCnv7BZ +jc+MDQzZhUWN8jwenMGOxpVrX0zjK7Q0u92YbrHgxE9fkRA5fZcXGzZlrhsJQJUA +G+0QGCSzjWZvSab2JrVn/gYEzikcl81Q6zAJkTI9vACZC0vnTc6XsVC8QCpT71fb +qQwE4Bvr1tyuA6biB4H40RiLGWuG+8BoVn1pgSL/9GzRnsEnSN2KCfaqzk9VMDnP +TQRx0yJY+Zl5FB/ufeEJH8hh1OS3dAJhR7IYLktlm8S68dSI/oTs811BWIw1dOqa +KbpElXS5Tr9usGOehxy7q6dlazj2+nDzIhQ/20koX0dqyN1O8Pzi4OWcR5YQEBDO +8Bp9v6JNowwbMkZGSg/C1GMNwN4rEhLlAgpv8/4CoZwlQM0oROWiiZwczpVniDiq +6dYtTUhuJwC0cgJLSswDXpnAlp30hPB7EV5MIdr+9ybuRAx59Nl+ZB8g6utuWNaA +lNTrAsouwWBalHmY/f4/ltEnHkwgKCReYFHpDNuDVtnxhtEfGzd5IxQWNl2etWCR +Nnf7Z3DQHLTduIQD2R0qp73tqFK8T1DR/HZkbZnZPvBqmUIXvCnHJKKB3ntrkpqR +bQHMq6Tkv3NL+N3XpZwEz0AOeiRE+gsCAwEAAaOB0zCB0DAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBQl88evytJrL7t5BGRbJUeMOW4jaTCBjQYDVR0jBIGFMIGC +gBQl88evytJrL7t5BGRbJUeMOW4jaaFUpFIwUDELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRl +c3Qtc2VydmVyX2NhghRVpg8Kb6uHOfElNFZXvAkqSZG5bTAOBgNVHQ8BAf8EBAMC +AgQwDQYJKoZIhvcNAQELBQADggIBAMGqi2F7ccNQ0FSiALPUjO0VvQrUqdWLrc9Z +67rr7wBu4bEzchM+HQP9GwbnSnH9yT0pnYj2H6idAfqTww1kKuR4CYMkGsNJ9PYW +AgYdrC67HKT2xhy9YmrUItIe/pM6rRO6oNA8Np3IAEmC0gpVMqmPqHeLvwhxcf4f +izsi148gTGOxBIWVNupImFrOaztKV6SbVwA+wdHNJvXz4MEEYlMlgHFfkrAEXHfO +6QmHXru8C0BIQaMOiVZDN8YCwmsrcGFYjHFRS/OnYblrRxuVDdhpMmNiQRJLhZHi +jf6WOpJS7o50FmC8bG1CE0CqMNF/qz3Hap36Rm2w/xSems2dIqMr6FsH34KkyXzm +pCHN162g720orV1uExpgfRSfv+IaklN1sM98WkTqAkz9p6OPPEo4VHeVGUk/mFuv +aVnByrk7qmpTLBGDk7dFI0GjsNwOz619omgYZGliRU+7rDXP4fN6EPlF5sQO7MJX +REOSZvVcHPpIAIqTFRR4SBnwYGsEPQbTKTH7jJROg0TGmiKeN4N1syb4KNos2Wfp +ZZB+f2qmn6LXS6d2kI692UomRfGVNoBEsAhWBW7FzpU/WnT+aF97VpvWUEqzg/AS +61tKM/t/ap6kNTLaPGSWTk9Ade/KuMmg+nSrL6S1Xa0T1rl2Qjd5h7U6JLWHL94v +GPxPuBsN -----END CERTIFICATE----- diff --git a/testdata/x509/server_ca_key.pem b/testdata/x509/server_ca_key.pem index 114e2a37a11e..1f69f3435663 100644 --- a/testdata/x509/server_ca_key.pem +++ b/testdata/x509/server_ca_key.pem @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDGRSklIvj5UOs/ -2k89SafZaqxQGN/HBCM6cUvZLUIlry6GCAiJl0RYQh/4flo5oslfM2B3G37etwU1 -MIFyRbhNHMbCWJqjlGC9sEw+T84KDdAfSlYCssotEkOlh/fg9mb2AKJR29ZeqWLs -cZNW0C9jgPNAnCuufVIqMpyELSedjUahkQyNhGLAvV0boX3XZw7v0cAWor94sfOE -d1QKnIEcjnhsB3F9nvml9JulC57Isj1xvcvcEIveeghhmYpiJ1et0vrFNzHOj7lL -fK8BZZklwmf9Ow/DaOZYzVKOlbdgaQDEM0s/klOD1BRO1cW6uMqSPtmW5CPNGIw8 -Bedv6TGrqmRSDyo8NG9sslg/xeIS5ke0cfXfBBVkS2ZtlWElTIkE8ADaW/ri+/pB -/FBN0Be7a8h/ChuuZjIdfF4hEYEgFoLOWfI/wdVuW5dez1cdiItXWKdCKrsgFYwi -CkaxYrbr6F12YxRx6W/2lA6VtEmK0+XZg4QABhZGHzExXeBDo+m1nqWbUUYz3iB0 -fQdssfFNLRUjH7bhwo/eZdosyQ1HyoVSJIoKW0axJSNkKlwDyMiDqnHc5YLSOgyG -pwnfwsLH3cmhLOwJ1tphVmq25MacPdkXRsWMull23TPthHWPnFisrNOrqSDXJxts -75An5QBN/X4PRQVQiQu9aDt5Wl53+wIDAQABAoICADoDco6TNRZ+PtdoIVdlfd93 -/wNQw+mPpF8tV2wsefZc09gT8auQv0az0nb7QZsrrpBUkB1Jxk2Ub8mob7fn/o1R -pjanhlfmyoe2VhjFcRwv/n2pWpFfjxixB2of5r/EWUwR02zwTkFUfsWAVgRI1hTf -Xk3BZGah9LC0LmfeboEDHW+Y6XtfCSYsQlobXp7wYMZ7MSFubWf7aa2Q3N5d/MlG -RqYVZ3fCVHnioMgiJkvDG4d0aXnyvXpTarBkJMGjkVwjJ40dIU23cBhOW0alW7JY -t+S4q1waDYxeR5HA7O8gykCeYZ4wSo+ANpD6q+h+uYchLLmh93fDfwTxFU8BhK6a -Dp8ikyZe7hjEba5a7ZvfOXedOZoLqGuUF4P5wI0Hfdslqwq34QSqMiHJuQGa+dM+ -tqnxTw8TjylYysMJxkqipA91uhO9AWxUc37jkWOY255kXcQdKwx5TdQN25XDDjK3 -BNiGtWIEuRMoflO2tL8AmaATOYbVuC3rSm9vtK0jre09MwLxihuzd8fgGBrtEx5S -UMaBAGDG1F0lcdxQY/h1byL5g1y//N472Ir0PLGczMPBigy+ZEy2GNtwUniwWOWH -z8CE8BbCr4PMxaqR/qU4hmEw6E3mB8w0WMMGQRn9+jKwxSZaIsE518Wa7oVEx02d -LZOu9b4xNslw8HjwaSKBAoIBAQDvas21s1EhtgKZaLXNyVsWaX6Mg1h0puylvqlg -G7t7F7XRV4gPb21e65y29V42vG/r2KB/AJh8eHTFYrOPSPPT1ZZxfxD7yuJGliYc -LwMU9QWkks5bFEP8nHogBv5nA47Ve+ctgrkwhZneWS896EI0Ulzw90oeOYgzJAmP -u0IVx6k0SlYKw5b31xWdwRehAIiz0UFufn88QtM3Fhj530It/+mqvrT/MR93XIIm -0tFLOIGz0Tp4yLleB1h//9xFdLUgDAGXgyC2ivlq5H31rGwkZr0Ixiwm8VOq1yvF -/ZofDN37RIrIbC2O0shFbU/L4KC99Uu5gDk/bu7INwLrmK3JAoIBAQDUAMNz0Ewg -cR1hlJ1mDD0IuKHjgjwoOslJ+r/P9tMfXkNudx7IRrnsNlJzB0RYYFaZiyXi4dXn -nN1C1ePIXo/kfw18Bvl+GIUrHV9EZrMJ+OfdWyXOzv6kfkT4B+axUJpqCTA3aalr -+mI+EpSjw5IHzgEL9cZBlms2YSu6cDxKYXm8sjQ7w0OKSQFsdv4rIfT+xwVyVHMW -1vn2tYdxnnidzuGUFt1Fhx8SnNHSu6K3rvjoc80jjg3TuOeKtil5AwVhtxqpX5HV -XAdQwFSZigSkjypnvIlJ9YLVl+64U24UQXBZc3qZdImZqKn38Dfalaiz52CWZPtt -N6HFzJTAjcmjAoIBAEgWe4wLSxGAcTXp3lvxFfkgmJcMcVTmcfjR+MPUddXxZLB8 -z53+KgtbnBtGtDB8+qIj3ud+sWtBBb/tIS2yhKAy/pJ79QwroYgpa54u1Zm40RMl -lPa9ml70ap08Hdu8qYREQ25jnwkqIRNe/SeByHVim1N+0hVZs1XasvpRIuvV62+w -NkoVbF6Bp6ORYWD7/S1Pg4kWk478fAZpI+oQvCeHl77unyb7joLtGs8/yP8CK6OO -CzIVFiNmyNH5o0RSiLr2goAxXmc4XzM9S2Pun70yJhb/PIoZPd0B3s9FteNFh41B -rRv93pXTh7PH3y//Gcc4la1sG1CrQUCNt9ZiaWkCggEABHHXpy/wyKVWdltFSYRs -KyijzD9Iv5cr7S8ioluMZZX2V/SLYquI7ljdNagrWKb8ac+vBaiycV6qjOIrGmJR -Jfs77yO+S1R8RkEhZC+7BTSAt/VXP5S7Zft3urN/tKv58MsshZzjfm4LbT26fAx3 -nU5GW1fVxj4/FS7IWepMeUq94KTjz3Tyj42kR//eqEzX9Bd8F7+JgisTpoZ7xngK -E1TpCc/I59JDZoJ/K6nfaXZzpXv4CwzJYWz4/cF/8ReNH1VVa8OjLRP220yM+YMZ -QdH2k6IyRqitC4lZ6edl4WrVzipLobf9woj0t0wD/8MvfEYXkk+frdSCwcDeRYMz -fQKCAQB/kbirzZfwH60I6FIlCHohRDJ3csBU3n94UqSOqi/zl6ts+Vgrdq/36UuK -lww51o1FmtnpI1jaVw8Sug7N4xYUkgCmyFvLU3SUOw68xzPxi7k9NwI+M1jH4ZMK -JVJXHaxx2bY35rf+y1NKOge24uw//C1aEmKq4Dolql6ZiJlVGUna9lp+VmcDa+XW -OzGfJWMZeSh2kI8cJrTCrar21zRfF2c6IsoKdDBAmZV1qSgzymzUYtYQ2P1s+qRS -Cs891gpYRQMchfec7FefWdFYXgEfLRp+nz4WoLaIwK+oftPHl96V1z9rS1Zs2HXD -okA9YtMucwgrhGFv9T0QtBuq4aEC +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDGjoKfzSeTeLk+ +CZicwGJyYkTKQTPlhskS7I34yO/XCK+pzbc/vmq+7u7QU94+m0dkr5Dc8Ke/sFmN +z4wNDNmFRY3yPB6cwY7GlWtfTOMrtDS73ZhuseDET1+REDl9lxcbNmWuGwlAlQAb +7RAYJLONZm9JpvYmtWf+BgTOKRyXzVDrMAmRMj28AJkLS+dNzpexULxAKlPvV9up +DATgG+vW3K4DpuIHgfjRGIsZa4b7wGhWfWmBIv/0bNGewSdI3YoJ9qrOT1UwOc9N +BHHTIlj5mXkUH+594QkfyGHU5Ld0AmFHshguS2WbxLrx1Ij+hOzzXUFYjDV06pop +ukSVdLlOv26wY56HHLurp2VrOPb6cPMiFD/bSShfR2rI3U7w/OLg5ZxHlhAQEM7w +Gn2/ok2jDBsyRkZKD8LUYw3A3isSEuUCCm/z/gKhnCVAzShE5aKJnBzOlWeIOKrp +1i1NSG4nALRyAktKzANemcCWnfSE8HsRXkwh2v73Ju5EDHn02X5kHyDq625Y1oCU +1OsCyi7BYFqUeZj9/j+W0SceTCAoJF5gUekM24NW2fGG0R8bN3kjFBY2XZ61YJE2 +d/tncNActN24hAPZHSqnve2oUrxPUNH8dmRtmdk+8GqZQhe8KcckooHee2uSmpFt +AcyrpOS/c0v43delnATPQA56JET6CwIDAQABAoICAQC80PSi5kMGWD1AI3v/RGvZ +/l0QQOULFhvMZSu1M8/wGxCBV2E1uuxj2W88qSSlQKCpvNLzZ979yMPAuWejWV7Y +/4W2nzk1NFODwL+0hrdY7itfo6C7U2g9BoYIuvcQ2Udd12LmKEuqIIdUByHQ88XT +Z1/ZGG7n7IaR6ENVkX7hVJvoq2vNqYtPZvoi5fF16koSkoYSNq5O4qu+m/Fe9O5X +CtBoJKC5Jv3oSYCtkbVxXk1aQjS8Wv4v//NvFps3DYWhZ/KR8ps+GxtpUBq1/unB +ohKj8qGnDwLQOIvgGgfiyAieV1vrWkOr1283XTdRYjK6Uyo6/Eoxfo9PsxRZVACK +lpkRGn2p1GTbx442INrOwhJbJtKAcSlM7V8m2dAhgMTXXJzPtl0eCNWGy0s1obpK +1p9qTUz23s3WA425TAXQwIFpq0CdrXPgaXmZYw5LlfuwQhUmw5SQBOg8dX30Y5rb +vrrBBj1+2kAEYyeC3aQ3HwP6qAevWtmlbyCLMS5pYcPTRIEJlVX7OMCZSmvYnOku +0Vghgr2g+FZjNgQMo0HYEq6bb1jmpnN6tY4y3H7Zq76xavkFBWzaz8aHEB1koBoW +e0z+AaojAXEMaAKzsA3hF74HZA4kZf9kFi4er4ZLlnZFrU7UCN99mQfsV0LU+75E +rpzoRVYE8CZN9egiphZFYQKCAQEA8AUt1mi3tDfbyuDnut9KVkraCDAQ5WX+31H9 +BTPdIoXaeYx29zCKSEaBAMvf43s2pDgVPVyuc7KVFp8l3LZZ+znNWxU3eVznE2Ua +1QUTR8ZxueUIyOZxuSHU8xqWvxZ7SLZYTqTVmYGpORjvuQIgS/+6GrAbCNcZLNIo +I7U4/Nx4zJ3bkF1xaKJcaVXQRwG0cHQEZZdgkaE+J/ak3WTRC/11tEr4agGo+UDF +6XBQryp3HxPMnZyAv44zOB9gqD/FvnxH6S5ybc93eISQXZoJQuuad3GuEn28Gz5x +Bsr6zzSkhsTp2cEr2AVSH+b/SCseZ/2JEE0mjPxgjXRVLQrnGQKCAQEA08akFJjH +nq0aEcL8HWbUE2PuuXcq/vUZHFNv1gG+e6vZ7gU7bq+5SS3+APuerPQrhE+3mKsn +WKUfkr2Rgn2Yxieo/u+SKMbDaNNT5h08KgA/joewRYDeInACw9QHsKPKArWmWn6d +6fHYl5d3rxUPGUaM0fsdOvayX+xeN4PDn+puqzInhh3TFbeyvkpu1b/xZZ17u7Xz +yvu1PRHNt5eZB6QZ/GF3d1r63CTC7m3GF/wikoV9Ygy0Me8gw8WxqnF42GFRyUIa +wHXfb3w4Y4rZ17b7x5H18FF4tHMgXN0pNbGk+PoIFB3Erzfqi95xVyZh/MROwFiL +qy2KeRoniUbCwwKCAQBDvxJ7DD+dzI5rKyP9KP1Qcfwsh3SdazaPThL+nu7xyZoq +6KzDhJ3jXJMY6HKfQK3hmDrWgQx0d5mBMxZ6v7WSJXSDGu/3f3Nxk/4I1k/k2GxN +LgpWukSrHpN+sqiN8wiFM4KlX/0yQNjE1vcC30jCasHauo5G5n+imQbfXU1igdBO +4NeSXe2evQUcbi5FfIOzoeuDyUBmmn5yxTkvjD89BSNt6iNHuIQ7Jj82bo83geLx +kKMWcZAdgUOPubuMgcOMyoN5m7SMrhxolfIxmUK38sw8noeljHvFrNA2PKCiT5eI +upfO8KkxZf8SJh8z/YetjnBbe4tADBQsmQNZnVQxAoIBABNKmxPNPxHzTtajXngH +L/Z8OfjnJCGJjjoIV7209vcpFncaPum8VDKYX/US9sdmjrhE0sKzhKgMkq25WxH6 +Avq6Dij7BeN1B8P6zD/AFgT1dNS1A5exP4r/jSDtpa2vne1VQswnkJcJEuPsRljK +oE97H8TZDTab1m/qhkKkXCOrJV2u+e67tMjbrQqsmSAblg/dorHcx1KMT1w6zPSW +eLg7eKqG7m0O+p8nMiKqGUuCClwykNNnuNp7oA51adPO9mUvqFWfEfTKSApN1I0s +zt9ZqeHqJ+82XLqDakVLWD+t6QtNK4M5mvsjKtiG8OgxdOejslDPQBnd0ilp+oQE +0CUCggEATL5jg3Kcbq9HcaFEdgqzVXXcqNI4hOVWjP3Vp6W390PUZkmls1Igms3l +zc9fhibHv5gRq/Y3xQGUuP0dKuPuTykjXUTRozeal5ErkiLU6PZeVlz7xUHbLrF9 +J/IARiZtnfhcDh8qo33QZ4m4HUB5b54/4Gdzr7go8o0UVI50gXY65kwz5hV79/7P +S38hxS+X+k2FkWuCNrP6ECkC/L8jNM1h2gR4Ez6FziiNxYbOhQgsaTQRKjca/v/E +tzX9oZjMqiWtWKcxgSCZs/1DscXjjCsi64f/oOBShF1x4ZeiMoZTMhQBLMlbVx/n +JxRWkMCYLEbAZXWJXEKHyqaiIAOpkw== -----END PRIVATE KEY----- diff --git a/testdata/x509/spiffe_cert.pem b/testdata/x509/spiffe_cert.pem index 0ce0a1aa190c..bbe7d66a4bfb 100644 --- a/testdata/x509/spiffe_cert.pem +++ b/testdata/x509/spiffe_cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFsjCCA5qgAwIBAgIUPxiyjwxoDyMeRvl4g9TSdvLlCA0wDQYJKoZIhvcNAQEL +MIIFsjCCA5qgAwIBAgIURygVMMzdr+Q7rsUaz189JozyHMwwDQYJKoZIhvcNAQEL BQAwTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL -BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTAeFw0yMDEwMDYxNzI2 -MzFaFw0zMDEwMDQxNzI2MzFaME4xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEM +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTAeFw0yMTEyMjMxODQy +NTJaFw0zMTEyMjExODQyNTJaME4xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEM MAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBDMRUwEwYDVQQDDAx0ZXN0LWNsaWVu -dDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCV84YR/EV55qfFynHh -QvWEZW5hUI9q0DeD5kG5CarrkOj11rZuQIBZ7X23CJbeoVbrvbYLghsPYJzxS/n3 -Qlwwzb5k+L0Qt+HrBD836HcSK5k1oh0jGGMaGownap+XCZH9g52s/8iiwfI02CmN -TbwsNp7wtSEFgNOd2OlzhT6wBLF2Q6uxfBmsDpiChxe2Fs1lyan9RH8fYEf7sxwP -E+SgBfEs7dSG5ZwFfdF+pd1T3IfrVjIxechKO1MO7HTSxbOTj6eHf1NeErDTGPA7 -VrnDCupRgcDGyAhFd54r62R8TbTjn5MwzMxElO45Ck/Ej7Qw/GWeaBHj/dMa6mhE -R55PvnKuyj+k9t0Rf0HDZyONtY5/OLqI/xVr27Y1o9v5FysNgjWPkZMRpvuCzkeC -2RuE6k2TfBDRLiCyYu/Zzw+ZtUyTAKtWtefLdQBjrYpnhrDPpmrnTWomX/e9pylE -WfkyxCswiPnDw7ypI7uFSTkz0+bUaROmAtlPvR+3SjaQDWigwz3eJsdIaeg5AY9q -//rWaal6l2iR0Ou9L6A9lLxh5iN/ch+OGk4QPK6pFbOy3IqYfmQ+IpAXG0da9RT2 -EN76cNa3bldEjRRON8oQ3HZmhOQJqVxhQciUz84sTjAqH8WvqqbdG9HKUoZ19T5Z -9vNldjlQn33Mi5gBxdugqdnmCQIDAQABo4GHMIGEMB0GA1UdDgQWBBT8rr0kPapk -bGLJ4EU1582sw7WlOTAfBgNVHSMEGDAWgBT8rr0kPapkbGLJ4EU1582sw7WlOTAP +dDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ4AqpGetyVSqGUuBJ +LVFla+7bEfca7UYzfVSSZLZ/X+JDmWIVN8UIPuFib5jhMEc3XaUnFXUmM7zEtz/Z +G5hapwLwOb2C3ZxOP6PQjYCJxbkLie+b43UQrFu1xxd3vMhVJgcj/AIxEpmszuqO +a6kUrkYifjJADQ+64kZgl66bsTdXMCzpxyFl9xUfff59L8OX+HUfAcoZz3emjg3Z +JPYURQEmjdZTOau1EjFilwHgd989Jt7NKgx30NXoHmw7nusVBIY94fL2VKN3f1XV +m0dHu5NI279Q6zr0ZBU7k5T3IeHnzsUesQS4NGlklDWoVTKk73Uv9Pna8yQsSW75 +7PEbHOGp9Knu4bnoGPOlsG81yIPipO6hTgGFK24pF97M9kpGbWqYX4+2vLlrCAfc +msHqaUPmQlYeRVTT6vw7ctYo2kyUYGtnODXk76LqewRBVvkzx75QUhfjAyb740Yc +DmIenc56Tq6gebJHjhEmVSehR6xIpXP7SVeurTyhPsEQnpJHtgs4dcwWOZp7BvPN +zHXmJqfr7vsshie3vS5kQ0u1e1yqAqXgyDjqKXOkx+dpgUTehSJHhPNHvTc5LXRs +vvXKYz6FrwR/DZ8t7BNEvPeLjFgxpH7QVJFLCvCbXs5K6yYbsnLfxFIBPRnrbJkI +sK+sQwnRdnsiUdPsTkG5B2lQfQIDAQABo4GHMIGEMB0GA1UdDgQWBBQ2lBp0PiRH +HvQ5IRURm8aHsj4RETAfBgNVHSMEGDAWgBQ2lBp0PiRHHvQ5IRURm8aHsj4RETAP BgNVHRMBAf8EBTADAQH/MDEGA1UdEQQqMCiGJnNwaWZmZTovL2Zvby5iYXIuY29t -L2NsaWVudC93b3JrbG9hZC8xMA0GCSqGSIb3DQEBCwUAA4ICAQA15Ne+Lz5cN1/B -fkys4QHDWJ0n5Zy9OtwSW6aTyqIIwls6OOSkJn3qJMoT2oFvoHoOxb0swyN+zUoD -pmPEd7FHkMm8BhRqoyH3UZGR7kOSIIcfvldVZbW9mD88A04qvLsWkkanMyGhkYV4 -0TXyb8USdjeNm1H32iF4k24czSpvoOYo9HOQv+4aFcqTMnGwS7CvwU6O6vVU8gIy -HYP/oWnkhap6X7acjPxYoW5IDZdN9vdMz9wQlKlc799lWqOCuwl68NSuTNcNNFyn -TXfFWZaghb7iXsUezGYTY9glsPxY0Egmbcmxut0gz0U2BNVvNGKUUu55MlAS7yXO -Y7eTfSSf6DJesFQKwTg8qlyNLjzbLSmhvz6EPV55ToUxPPA9CIOrWQwXv4GdySuH -bwof3U5p/cq2NDtxv8KGisjK04l++s+Ea8AS6T6O8+08nBFGgfNW331eWtU91JoQ -e6Q4DWipiNzkIvISk48V8CT9eRB2KD7NsigQprePRN3gDZREh+01gwbVUX2gbtHx -1RGxEjO6H0kUuaoXF5E6+WGwgn8MA47qUy1WXC5QDFpc5LyaoVaMFv8bcoWSNXAS -Oes+ZDWDXWq6F+9Kt0zWmO651cVquLTjmgt48fgL6m8rU13ikjH7dFnimrwRxfOD -p+z97N7TvWfgE1HOmYDfsbaHjPFZKg== +L2NsaWVudC93b3JrbG9hZC8xMA0GCSqGSIb3DQEBCwUAA4ICAQA1mSkgRclAl+E/ +aS9zJ7t8+Y4n3T24nOKKveSIjxXm/zjhWqVsLYBI6kglWtih2+PELvU8JdPqNZK3 +4Kl0Q6FWpVSGDdWN1i6NyORt2ocggL3ke3iXxRk3UpUKJmqwz81VhA2KUHnMlyE0 +IufFfZNwNWWHBv13uJfRbjeQpKPhU+yf4DeXrsWcvrZlGvAET+mcplafUzCp7Iv+ +PcISJtUerbxbVtuHVeZCLlgDXWkLAWJN8rf0dIG4x060LJ+j6j9uRVhb9sZn1HJV ++j4XdIYm1VKilluhOtNwP2d3Ox/JuTBxf7hFHXZPfMagQE5k5PzmxRaCAEMJ1l2D +vUbZw+shJfSNoWcBo2qadnUaWT3BmmJRBDh7ZReib/RQ1Rd4ygOyzP3E0vkV4/gq +yjLdApXh5PZP8KLQZ+1JN/sdWt7VfIt9wYOpkIqujdll51ESHzwQeAK9WVCB4UvV +z6zdhItB9CRbXPreWC+wCB1xDovIzFKOVsLs5+Gqs1m7VinG2LxbDqaKyo/FB0Hx +x0acBNzezLWoDwXYQrN0T0S4pnqhKD1CYPpdArBkNezUYAjS725FkApuK+mnBX3U +0msBffEaUEOkcyar1EW2m/33vpetD/k3eQQkmvQf4Hbiu9AF+9cNDm/hMuXEw5EX +GA91fn0891b5eEW8BJHXX0jri0aN8g== -----END CERTIFICATE----- diff --git a/testdata/x509/spiffe_key.pem b/testdata/x509/spiffe_key.pem index 5462d66326b1..77a33e9d0af0 100644 --- a/testdata/x509/spiffe_key.pem +++ b/testdata/x509/spiffe_key.pem @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCV84YR/EV55qfF -ynHhQvWEZW5hUI9q0DeD5kG5CarrkOj11rZuQIBZ7X23CJbeoVbrvbYLghsPYJzx -S/n3Qlwwzb5k+L0Qt+HrBD836HcSK5k1oh0jGGMaGownap+XCZH9g52s/8iiwfI0 -2CmNTbwsNp7wtSEFgNOd2OlzhT6wBLF2Q6uxfBmsDpiChxe2Fs1lyan9RH8fYEf7 -sxwPE+SgBfEs7dSG5ZwFfdF+pd1T3IfrVjIxechKO1MO7HTSxbOTj6eHf1NeErDT -GPA7VrnDCupRgcDGyAhFd54r62R8TbTjn5MwzMxElO45Ck/Ej7Qw/GWeaBHj/dMa -6mhER55PvnKuyj+k9t0Rf0HDZyONtY5/OLqI/xVr27Y1o9v5FysNgjWPkZMRpvuC -zkeC2RuE6k2TfBDRLiCyYu/Zzw+ZtUyTAKtWtefLdQBjrYpnhrDPpmrnTWomX/e9 -pylEWfkyxCswiPnDw7ypI7uFSTkz0+bUaROmAtlPvR+3SjaQDWigwz3eJsdIaeg5 -AY9q//rWaal6l2iR0Ou9L6A9lLxh5iN/ch+OGk4QPK6pFbOy3IqYfmQ+IpAXG0da -9RT2EN76cNa3bldEjRRON8oQ3HZmhOQJqVxhQciUz84sTjAqH8WvqqbdG9HKUoZ1 -9T5Z9vNldjlQn33Mi5gBxdugqdnmCQIDAQABAoICADWoJXJsHgRHyAMbtPJRPn94 -uC20YQ1somDdVOk8j1+pw+KsSS1cgVEsjU6gkTPq8ap7gRfPH5W6EY66jCCxK0H/ -bUC+TREda4boRyLfWTQ0S6eIcfqr8FJX64zzN1YZg5b+sL5F7Opokh3ct8mrZkk/ -5lHlzoIknhSemLLQnCTqGQJjpp1k9d6+fk4+vvpWYHsq1VweVYrJrhhf+AthJ+8n -ESztkZ4PrWu9oOg7u94VTMGmX2Ga3VPKtKbjb844FlEYF2+B3TgNYh63jsb8+o3T -axNtZaj7zRHmgr/ehF+CgtbstAPDVNi5niDlErQYY/cfadFsFfLKUe8Qr+y23+vG -1WuVSUmrUcgO/IYMIz2gEOrBOutc9cdKOlCnwrXu3WjSGO6zhcbXCw7WZrSR/Uj5 -1Tatt5QJ5Z3i4vOc6Jj1XKL/9Xa+FEryfVh/HKlQTlHnIuuGXMBpIzyYQ6kY8+cH -n75FVMo4lB97c48hweupQY6SUQwvWXqXQOAxLJ/eq2k1QpUWJ4GV5kRr3/eQ/AZ1 -y4Kk2ZxM8IWksFdVnomNr65GIk219D1uwDtJQeBrwqrYseGq/2mB2h4llTbwjSez -GkOPO74tLPh3wkG8wDzbc94nfouxCL6ee9W4XeDGzYXgndSKAPOWWUyFnsIxisVu -BB2HUkJZotG2Otrgnj/xAoIBAQDF7NjT9JkN+JhrmH4jG+16lI6RDErf/VgheSE8 -G/ayAg1RGY2FsuyTi2bAM3xprXqaZHDSikil8t9G7JJZoKVzesCZR+OJU+9fGZR6 -TCS6mCdv6OEG18GJP9dzDLaqYybJ9VgnSnXT7mRlcCyU8uZ9/2FR/vnLEatbOw99 -2tles0LGdkKT/YIYdxIENIpmaZhzAOWPDLDwDTO8aIXy40DiGzjLWJf/0LgCV7Ub -2C8aPS4WWXCOdAeYvjcEKeCp9+YSgZaYNT2P8Ns40VcL/yysMtFzvnTOcA9riyzN -5pu+ppv/KYGt/ENa4zQMCgKFtTUxicr2M9VYqNI/CgJmJPKVAoIBAQDB8x13soMv -tohfiNwGTjEkVzu/RkTCix8+hERF1C+oupL3ykpJOGvvpOyhFUqwFAYyBQKflrxj -9lQBKQiPYR0VtgDFJ7UzjYGO8zt8U56gTcYeatpNKY4zvZyGAOBhtWuvuvi8pnpc -xO8yQjE0jWrwWL3bmf/5lP2jO8j+k1qZfrA2ksTGUWRGEZkFRqqTQxvRrJVr9QiY -2xpRq/n7fq9UhCfNxm2aLdYgZ/BCVFzNahCEWfFdH3jOcP5N/5Fkiy7IhwfdglO+ -JjydEMqBYHg3ET6MQ3JYM/Gt/GkX0myV9BHd/fYYF2xGBMDIMLVdFKyCivUMGGt1 -pHhcNLzebFylAoIBAQDA4Cr4eh8Az2XxTDx3iEqnLsezr8/zcVYF4J2zjuib1YYW -pxkT1iXXLnymBkZSUVztwb10Xo+nMAPHgNipgPRakZ/If5bLh8D34tyfRT5xm76q -vr0zRuPyFQWmtxf2+QKewnjyaQxjx6eMdoDrcb2NwWWcWyYfbwuWrvpMwg0bzQLg -lfQRdXTm1Hn5IR5R6MtIHvKVsV9nvuXQz+bgp/bdoHt7Jc2R3FrE5aW3Cbf1EPOt -keEu4QFaJttEMm8eE1bgZ+pST2e7spJfTxlNtpBZCni0G0CGwAs22PyDdhwF8SSJ -xm/6FZ+pnUlmBgcpN0osCUSBIkfgyzt/dQibc5v1AoIBAF5ZLweQfoLSb+rRf/9N -QFimWvlEbKSa2vslirTRcNHK2T3TWWnfGZq9hyMhYXDgfNcOWuVZhZG3PcxGstRU -8LokDKHcHCjU+KaaqmBjqTHgQ7V+U23f/j4rSh5iBMVjZNxavy++aJ4Caz3ut1MS -TGhZMxrGAqDeGriyl6dH9XXgDEawBStYYsg3PVI0uzviFIFeTF31GFaLl3UNjREL -4qzhkR9oHN841wZyqY0Kzw5aP2iy/FhJvBHpI7y7y3W2w25nSas3ABfrL+dUSL7B -OBnJuLyw/snrkvEJbfJZudsEnUB5j6LOmixBmaqJD2EVcooaoPReWMAk3ywzt4EY -A8UCggEBAJbArCsQ0Q+pFOEce6JlgtYAdcBiu8n5zFYLD2/qZdM2ir/09uj9XDpC -WbE5YTumgzkt2VLK/wf+HATMAyXObtaAn5AdoA6OJ1AvbNGOwA3F2LOlbJGO2XOW -TpQlgbDvBBktaKk9PSszDj92W2tdFQPefDP/uBzonymev7BWCERZ/vU2L3HpXQjp -bxzRyVNWwwg3VBYvbCIz8v2yeviAsiEkOCPRU6+cIr7/VUr0mymzDgfzKaQNoOap -LqOpnInw0pUA+BhsVr4n/fBXZLbSd7ZG5WU48HUaSLefEI4NuiZLT2K1tZreyBqZ -Xgln2zbN6APAb+dGDdv27dz4YlasU4s= +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDJ4AqpGetyVSqG +UuBJLVFla+7bEfca7UYzfVSSZLZ/X+JDmWIVN8UIPuFib5jhMEc3XaUnFXUmM7zE +tz/ZG5hapwLwOb2C3ZxOP6PQjYCJxbkLie+b43UQrFu1xxd3vMhVJgcj/AIxEpms +zuqOa6kUrkYifjJADQ+64kZgl66bsTdXMCzpxyFl9xUfff59L8OX+HUfAcoZz3em +jg3ZJPYURQEmjdZTOau1EjFilwHgd989Jt7NKgx30NXoHmw7nusVBIY94fL2VKN3 +f1XVm0dHu5NI279Q6zr0ZBU7k5T3IeHnzsUesQS4NGlklDWoVTKk73Uv9Pna8yQs +SW757PEbHOGp9Knu4bnoGPOlsG81yIPipO6hTgGFK24pF97M9kpGbWqYX4+2vLlr +CAfcmsHqaUPmQlYeRVTT6vw7ctYo2kyUYGtnODXk76LqewRBVvkzx75QUhfjAyb7 +40YcDmIenc56Tq6gebJHjhEmVSehR6xIpXP7SVeurTyhPsEQnpJHtgs4dcwWOZp7 +BvPNzHXmJqfr7vsshie3vS5kQ0u1e1yqAqXgyDjqKXOkx+dpgUTehSJHhPNHvTc5 +LXRsvvXKYz6FrwR/DZ8t7BNEvPeLjFgxpH7QVJFLCvCbXs5K6yYbsnLfxFIBPRnr +bJkIsK+sQwnRdnsiUdPsTkG5B2lQfQIDAQABAoICAQCXbolwqfHVFQ/OLRLzsZvy +UZGeIY7UUxKrAyPSoNvJFpr7DG7n7arOcaTOG1p56aYyYPvHIrB7FKpQggnSCYIy +1j89BoMjTKu4gsKWad72+ivB/RmRPYGOHUy6QftXpXQ9c0Y99weJ2iMO3zRR2269 +BbG0pCd7ppCbJquWP5IKVlhl/cxjHS3vd/YPZorlS1QUhpsMxGHfFKLzfHHk5nX1 +ZIHlctZIHeWw8VG8W/xbbnA2RhcxnY42vqAG+/NCkgZUAM3WU8zWfU0WEZ3Imy9Q +HuPv7m9H+vyBYSYQR7eh3nfAVHnHeRBKQX6hpQ/PEwHneXzVmZVnaaZD1l35+oQs +9mCmIC5PkwGe+vJI+Rxt5UElgzRDMVF6YuHfmobQQn5mT0Jsbdn6tfaFXa+e8+Ja +4WSlv/rVNSvCxNSwV0fOGIIrV1CIElf9ei7Go/Jewz94Eh2PdoOL9gNPqBeFFLmt +mS7HgST/Dkn8yAvyYgY5IDDWDiauECMV2F37QRk9stX+dUFhDQU3P/CcMH0SRyjx +vXRRvWY/5rWuinJcIr1Kb0OHiDztrM/4wAYc1BQjsL6CPKT3aAmCs/fxf0JflK/o +pvzsK5+AyBrvEBh9SXPcQYYEj6ZYCGZ/rPVlmgOjT/d4+xZf0FBUi3g+np7V15Km +ao4LgfcSQF5xUVTiyGo1AQKCAQEA423AcschurSrHwnmL1E5UIaYoFyKok2W3GZ6 +nu8Gp49GlF1v+CQ4w0zMNgigTTHxPyShK4/lC1n8W7liuo2TD98Km4qsnkD04R1z +Jf2PNBoVfMFEwxTA0t3LYfqJc3xLxWTR7wbvIRwtrcM/tFbU0RQAZ0/eE5Vine6t +fU8HBn/kSnUeCXMjFqheRccluYQgwES5ayJjnvf4Yrwe+In89+C6JE01LetqtxiL +U9X6iO9VpF9JVI7CPTBd7cZI3jfO+N2dXGlnt/Dp95S3RymoRSyMA1ILCCFZOeTo +Kh26/6hfgc2ox6ttN+s/1J9xPaXFmvbEAT27n+bDFVIfvF9QwQKCAQEA4zx4xLsW +AdljDLSDcLziWE1ikvO25hWiH3Q7ZyFi7vqby4d7FdgazNERrFZaCNQeSLfMakLd +zO1URfWsG7+6XvY45VtgVlqw2+uAHlE/B3FtrSeaIDr68AvWfPo02vckRMgTCBch +MEvul58A658mlybOkTHoRyaeDDD+83sIHAnyFzublxfsfbdgqzFW1RxfiUwmAM7w +9rI+PFPQnBgBkfyjcOfaVx/I1uvm3Nnl4ZUAmEdz6YlJdN6EhM8SjCP2LsLvhDUz +kjZ5WJ50ybRs3DWhDH/d06DmFlGwgu894TKBHqrq2fgDDukpZkH7cHldcjugzlHJ +c2CAO9MxI4UyvQKCAQAogEgQaKP6EuiSe3nRnV5el8mgbTqHEtg14c4edaSyvFIu +Y8Fn6FNvfEK1sK2TcbxrqUNGdbatYdYOI6KQZFv3LJo//t8kw56YZF04O8J/3dFL +yUNMlmqMYtEwXqSRu2Xm/kBgl9SICfOciTPUEs6NeUllHJUI2caZJ4Mf2K4Am0/1 +bovt1OI/y7YWKRPvyLboZpS6noItMi26r5O4YSJ6pjuf8VvyFIWJm8ZcJLQcJLsU +rZ9qfo3axb1EddZONJQYP6chaOf+mtmfrI1DEAkWYIuCn961EPNJ2xj5PxgpJTv0 +6sIO5NlrZuqUG9zXxKi/Iwjey7aZEEhXiKt8KWFBAoIBABtcRaJScHTqit2VwpnJ +dGtzbeIJzETp5+pnoVtqjrH9pNKdznkz2w48QieBAjg76iWRU+Cbin9JODNwQDfb +HwKeHP2owfHD27WvJm8AE1m/E5icwxcMYviSRFIqAkE3LrvFZ107A7j/+4twDrlQ +IWJjvs2Gt9QRV0hagegpMTHHFMotWC+aJtSARvh16WGhl/M9IvpH8IWTsqCq6txQ +m6fLRpaqpASHhDQ0lUiUR/Sgb0DmoZNF/3096bDgCfirv9GjkRlXGo2JV5UPBzre +KZleL7UElF4N6oZXcaxiSA4ceaWKqNpz3VJnSp/QZAkH4/OEMHmHKX1l6irJ5AnF +2PUCggEAQzXltQN+24eNh9nD9BzJR7CYOJQ4CzmHbkxJJZXGJtSCv0mYcEsSRmpH +5mW9hr8w6Y5WL4XfMduUX9od8exJtXl5d7Fl8oUNh+CwDr9smEjEDchh+6d11ZCi +Ervr1XOmNyOtpnQ1+N2nbbBEMLVns9yX6oNnl9mBdwpvwko7Q9ahyTvWnE2oG5At +8VeX/34k6BWdqPCJfnISMVbt6D+J0kaqaVw6BplTNSRs93bmqKwpcrRHMTida+bO +l9t8Cy3TguZdRWTJZ0kFmze0fV6dXYookZoUIeisTZBl701tOsvysZjxtMQJfTLJ +Io+0lEzXxTbCbBP/iyizjo62XTgpdQ== -----END PRIVATE KEY----- From fbaf7c55821070944bb0ce342ba3c54cc521c6fe Mon Sep 17 00:00:00 2001 From: Ashitha Santhosh <55257063+ashithasantosh@users.noreply.github.com> Date: Tue, 28 Dec 2021 15:07:12 -0800 Subject: [PATCH 383/998] authz: update representation of allow authenticated in SDK (#5052) * remove empty principals logic * Update test * minor formatting * resolving comments --- authz/rbac_translator.go | 8 +------- authz/rbac_translator_test.go | 34 ++++++++++++++++++++++++---------- authz/sdk_end2end_test.go | 34 +++++----------------------------- 3 files changed, 30 insertions(+), 46 deletions(-) diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index 010fc89a6e22..75bbdb44d497 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -157,19 +157,13 @@ func parsePrincipalNames(principalNames []string) []*v3rbacpb.Principal { } func parsePeer(source peer) *v3rbacpb.Principal { - if source.Principals == nil { + if len(source.Principals) == 0 { return &v3rbacpb.Principal{ Identifier: &v3rbacpb.Principal_Any{ Any: true, }, } } - if len(source.Principals) == 0 { - return &v3rbacpb.Principal{ - Identifier: &v3rbacpb.Principal_Authenticated_{ - Authenticated: &v3rbacpb.Principal_Authenticated{}, - }} - } return principalOr(parsePrincipalNames(source.Principals)) } diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go index e22ab62ce26b..e8e2f76b5ed8 100644 --- a/authz/rbac_translator_test.go +++ b/authz/rbac_translator_test.go @@ -205,23 +205,37 @@ func TestTranslatePolicy(t *testing.T) { }, }, }, - "empty principal field": { + "allow authenticated": { authzPolicy: `{ - "name": "authz", - "allow_rules": [{ - "name": "allow_authenticated", - "source": {"principals":[]} - }] - }`, + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }] + }`, wantPolicies: []*v3rbacpb.RBAC{ { Action: v3rbacpb.RBAC_ALLOW, Policies: map[string]*v3rbacpb.Policy{ "authz_allow_authenticated": { Principals: []*v3rbacpb.Principal{ - {Identifier: &v3rbacpb.Principal_Authenticated_{ - Authenticated: &v3rbacpb.Principal_Authenticated{}, - }}, + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, }, Permissions: []*v3rbacpb.Permission{ {Rule: &v3rbacpb.Permission_Any{Any: true}}, diff --git a/authz/sdk_end2end_test.go b/authz/sdk_end2end_test.go index 839faaa76081..b3d449d5dfbd 100644 --- a/authz/sdk_end2end_test.go +++ b/authz/sdk_end2end_test.go @@ -261,30 +261,6 @@ var sdkTests = map[string]struct { wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), }, "DeniesRPCRequestWithPrincipalsFieldOnUnauthenticatedConnection": { - authzPolicy: `{ - "name": "authz", - "allow_rules": - [ - { - "name": "allow_TestServiceCalls", - "source": { - "principals": - [ - "foo" - ] - }, - "request": { - "paths": - [ - "/grpc.testing.TestService/*" - ] - } - } - ] - }`, - wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), - }, - "DeniesRPCRequestWithEmptyPrincipalsOnUnauthenticatedConnection": { authzPolicy: `{ "name": "authz", "allow_rules": @@ -292,7 +268,7 @@ var sdkTests = map[string]struct { { "name": "allow_authenticated", "source": { - "principals": [] + "principals": ["*", ""] } } ] @@ -386,7 +362,7 @@ func (s) TestSDKStaticPolicyEnd2End(t *testing.T) { } } -func (s) TestSDKAllowsRPCRequestWithEmptyPrincipalsOnTLSAuthenticatedConnection(t *testing.T) { +func (s) TestSDKAllowsRPCRequestWithPrincipalsFieldOnTLSAuthenticatedConnection(t *testing.T) { authzPolicy := `{ "name": "authz", "allow_rules": @@ -394,7 +370,7 @@ func (s) TestSDKAllowsRPCRequestWithEmptyPrincipalsOnTLSAuthenticatedConnection( { "name": "allow_authenticated", "source": { - "principals": [] + "principals": ["*", ""] } } ] @@ -438,7 +414,7 @@ func (s) TestSDKAllowsRPCRequestWithEmptyPrincipalsOnTLSAuthenticatedConnection( } } -func (s) TestSDKAllowsRPCRequestWithEmptyPrincipalsOnMTLSAuthenticatedConnection(t *testing.T) { +func (s) TestSDKAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection(t *testing.T) { authzPolicy := `{ "name": "authz", "allow_rules": @@ -446,7 +422,7 @@ func (s) TestSDKAllowsRPCRequestWithEmptyPrincipalsOnMTLSAuthenticatedConnection { "name": "allow_authenticated", "source": { - "principals": [] + "principals": ["*", ""] } } ] From b069440926d58e0a6fa6c59b01638a6948f9e6f3 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 4 Jan 2022 08:51:39 -0800 Subject: [PATCH 384/998] credentials/google: use grpctest.Tester for tests in this package (#5098) --- credentials/google/google_test.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go index 8c08712087df..0699b9ad4e01 100644 --- a/credentials/google/google_test.go +++ b/credentials/google/google_test.go @@ -26,9 +26,18 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/resolver" ) +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + type testCreds struct { credentials.TransportCredentials typ string @@ -73,7 +82,7 @@ func overrideNewCredsFuncs() func() { // TestClientHandshakeBasedOnClusterName that by default (without switching // modes), ClientHandshake does either tls or alts base on the cluster name in // attributes. -func TestClientHandshakeBasedOnClusterName(t *testing.T) { +func (s) TestClientHandshakeBasedOnClusterName(t *testing.T) { defer overrideNewCredsFuncs()() for bundleTyp, tc := range map[string]credentials.Bundle{ "defaultCredsWithOptions": NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{}), From afded7231d0083fb7e9fcccc768ecbf56c271626 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 4 Jan 2022 15:50:50 -0800 Subject: [PATCH 385/998] xds/federation: update xdsclient to support multi authority (#5042) --- xds/internal/xdsclient/authority.go | 228 +++++++ xds/internal/xdsclient/authority_test.go | 353 ++++++++++ xds/internal/xdsclient/callback.go | 65 -- xds/internal/xdsclient/client.go | 59 +- xds/internal/xdsclient/client_test.go | 90 +-- xds/internal/xdsclient/dump.go | 28 +- xds/internal/xdsclient/dump_test.go | 22 +- xds/internal/xdsclient/loadreport.go | 18 +- xds/internal/xdsclient/loadreport_test.go | 8 +- xds/internal/xdsclient/singleton.go | 13 +- xds/internal/xdsclient/watchers.go | 58 +- .../xdsclient/watchers_cluster_test.go | 459 +------------ .../xdsclient/watchers_endpoints_test.go | 349 +--------- .../xdsclient/watchers_federation_test.go | 99 +++ .../xdsclient/watchers_listener_test.go | 432 +----------- xds/internal/xdsclient/watchers_route_test.go | 371 +---------- xds/internal/xdsclient/watchers_test.go | 614 ++++++++++++++++++ xds/internal/xdsclient/xdsclient_test.go | 2 - .../xdsclient/xdsresource/unmarshal.go | 4 + .../xdsclient/xdsresource/version/version.go | 38 +- 20 files changed, 1597 insertions(+), 1713 deletions(-) create mode 100644 xds/internal/xdsclient/authority.go create mode 100644 xds/internal/xdsclient/authority_test.go delete mode 100644 xds/internal/xdsclient/callback.go create mode 100644 xds/internal/xdsclient/watchers_federation_test.go create mode 100644 xds/internal/xdsclient/watchers_test.go diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go new file mode 100644 index 000000000000..6cc4c117755c --- /dev/null +++ b/xds/internal/xdsclient/authority.go @@ -0,0 +1,228 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const federationScheme = "xdstp" + +// findAuthority returns the authority for this name. If it doesn't already +// exist, one will be created. +// +// Note that this doesn't always create new authority. authorities with the same +// config but different names are shared. +// +// The returned unref function must be called when the caller is done using this +// authority, without holding c.authorityMu. +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref func(), _ error) { + scheme, authority := n.Scheme, n.Authority + + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if c.done.HasFired() { + return nil, nil, errors.New("the xds-client is closed") + } + + config := c.config.XDSServer + if scheme == federationScheme { + cfg, ok := c.config.Authorities[authority] + if !ok { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + config = cfg.XDSServer + } + + a, err := c.newAuthority(config) + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) + } + // All returned authority from this function will be used by a watch, + // holding the ref here. + // + // Note that this must be done while c.authorityMu is held, to avoid the + // race that an authority is returned, but before the watch starts, the + // old last watch is canceled (in another goroutine), causing this + // authority to be removed, and then a watch will start on a removed + // authority. + // + // unref() will be done when the watch is canceled. + a.ref() + return a, func() { c.unrefAuthority(a) }, nil +} + +// newAuthority creates a new authority for the config. But before that, it +// checks the cache to see if an authority for this config already exists. +// +// caller must hold c.authorityMu +func (c *clientImpl) newAuthority(config *bootstrap.ServerConfig) (_ *authority, retErr error) { + // First check if there's already an authority for this config. If found, it + // means this authority is used by other watches (could be the same + // authority name, or a different authority name but the same server + // config). Return it. + configStr := config.String() + if a, ok := c.authorities[configStr]; ok { + return a, nil + } + // Second check if there's an authority in the idle cache. If found, it + // means this authority was created, but moved to the idle cache because the + // watch was canceled. Move it from idle cache to the authority cache, and + // return. + if old, ok := c.idleAuthorities.Remove(configStr); ok { + oldA, _ := old.(*authority) + if oldA != nil { + c.authorities[configStr] = oldA + return oldA, nil + } + } + + // Make a new authority since there's no existing authority for this config. + ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, c.logger)} + defer func() { + if retErr != nil { + ret.close() + } + }() + ctr, err := newController(config, ret.pubsub, c.updateValidator, c.logger) + if err != nil { + return nil, err + } + ret.controller = ctr + // Add it to the cache, so it will be reused. + c.authorities[configStr] = ret + return ret, nil +} + +// unrefAuthority unrefs the authority. It also moves the authority to idle +// cache if it's ref count is 0. +// +// This function doesn't need to called explicitly. It's called by the returned +// unref from findAuthority(). +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) unrefAuthority(a *authority) { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if a.unref() > 0 { + return + } + configStr := a.config.String() + delete(c.authorities, configStr) + c.idleAuthorities.Add(configStr, a, func() { + a.close() + }) +} + +// authority is a combination of pubsub and the controller for this authority. +// +// Note that it might make sense to use one pubsub for all the resources (for +// all the controllers). One downside is the handling of StoW APIs (LDS/CDS). +// These responses contain all the resources from that control plane, so pubsub +// will need to keep lists of resources from each control plane, to know what +// are removed. +type authority struct { + config *bootstrap.ServerConfig + pubsub *pubsub.Pubsub + controller controllerInterface + refCount int +} + +// caller must hold parent's authorityMu. +func (a *authority) ref() { + a.refCount++ +} + +// caller must hold parent's authorityMu. +func (a *authority) unref() int { + a.refCount-- + return a.refCount +} + +func (a *authority) close() { + if a.pubsub != nil { + a.pubsub.Close() + } + if a.controller != nil { + a.controller.Close() + } +} + +func (a *authority) watchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { + first, cancelF := a.pubsub.WatchListener(serviceName, cb) + if first { + a.controller.AddWatch(xdsresource.ListenerResource, serviceName) + } + return func() { + if cancelF() { + a.controller.RemoveWatch(xdsresource.ListenerResource, serviceName) + } + } +} + +func (a *authority) watchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { + first, cancelF := a.pubsub.WatchRouteConfig(routeName, cb) + if first { + a.controller.AddWatch(xdsresource.RouteConfigResource, routeName) + } + return func() { + if cancelF() { + a.controller.RemoveWatch(xdsresource.RouteConfigResource, routeName) + } + } +} + +func (a *authority) watchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { + first, cancelF := a.pubsub.WatchCluster(clusterName, cb) + if first { + a.controller.AddWatch(xdsresource.ClusterResource, clusterName) + } + return func() { + if cancelF() { + a.controller.RemoveWatch(xdsresource.ClusterResource, clusterName) + } + } +} + +func (a *authority) watchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { + first, cancelF := a.pubsub.WatchEndpoints(clusterName, cb) + if first { + a.controller.AddWatch(xdsresource.EndpointsResource, clusterName) + } + return func() { + if cancelF() { + a.controller.RemoveWatch(xdsresource.EndpointsResource, clusterName) + } + } +} + +func (a *authority) reportLoad(server string) (*load.Store, func()) { + return a.controller.ReportLoad(server) +} + +func (a *authority) dump(t xdsresource.ResourceType) map[string]xdsresource.UpdateWithMD { + return a.pubsub.Dump(t) +} diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go new file mode 100644 index 000000000000..583594fad066 --- /dev/null +++ b/xds/internal/xdsclient/authority_test.go @@ -0,0 +1,353 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/testing/protocmp" +) + +var ( + serverConfigs = []*bootstrap.ServerConfig{ + { + ServerURI: testXDSServer + "0", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "creds-0", + TransportAPI: version.TransportV2, + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, + { + ServerURI: testXDSServer + "1", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "creds-1", + TransportAPI: version.TransportV3, + NodeProto: xdstestutils.EmptyNodeProtoV3, + }, + { + ServerURI: testXDSServer + "2", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "creds-2", + TransportAPI: version.TransportV2, + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, + } + + serverConfigCmpOptions = cmp.Options{ + cmpopts.IgnoreFields(bootstrap.ServerConfig{}, "Creds"), + protocmp.Transform(), + } +) + +// watchAndFetchNewController starts a CDS watch on the client for the given +// resourceName, and tries to receive a new controller from the ctrlCh. +// +// It returns false if there's no controller in the ctrlCh. +func watchAndFetchNewController(t *testing.T, client *clientImpl, resourceName string, ctrlCh *testutils.Channel) (*testController, bool, func()) { + updateCh := testutils.NewChannel() + cancelWatch := client.WatchCluster(resourceName, func(update xdsresource.ClusterUpdate, err error) { + updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) + }) + + // Clear the item in the watch channel, otherwise the next watch will block. + authority := xdsresource.ParseName(resourceName).Authority + var config *bootstrap.ServerConfig + if authority == "" { + config = client.config.XDSServer + } else { + authConfig, ok := client.config.Authorities[authority] + if !ok { + t.Fatalf("failed to find authority %q", authority) + } + config = authConfig.XDSServer + } + a := client.authorities[config.String()] + if a == nil { + t.Fatalf("authority for %q is not created", authority) + } + ctrlTemp := a.controller.(*testController) + // Clear the channel so the next watch on this controller can proceed. + ctrlTemp.addWatches[xdsresource.ClusterResource].ReceiveOrFail() + + cancelWatchRet := func() { + cancelWatch() + ctrlTemp.removeWatches[xdsresource.ClusterResource].ReceiveOrFail() + } + + // Try to receive a new controller. + c, ok := ctrlCh.ReceiveOrFail() + if !ok { + return nil, false, cancelWatchRet + } + ctrl := c.(*testController) + return ctrl, true, cancelWatchRet +} + +// TestAuthorityDefaultAuthority covers that a watch for an old style resource +// name (one without authority) builds a controller using the top level server +// config. +func (s) TestAuthorityDefaultAuthority(t *testing.T) { + overrideFedEnvVar(t) + ctrlCh := overrideNewController(t) + + client, err := newWithConfig(&bootstrap.Config{ + XDSServer: serverConfigs[0], + Authorities: map[string]*bootstrap.Authority{testAuthority: {XDSServer: serverConfigs[1]}}, + }, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + t.Cleanup(client.Close) + + ctrl, ok, _ := watchAndFetchNewController(t, client, testCDSName, ctrlCh) + if !ok { + t.Fatalf("want a new controller to be built, got none") + } + // Want the default server config. + wantConfig := serverConfigs[0] + if diff := cmp.Diff(ctrl.config, wantConfig, serverConfigCmpOptions); diff != "" { + t.Fatalf("controller is built with unexpected config, diff (-got +want): %v", diff) + } +} + +// TestAuthorityNoneDefaultAuthority covers that a watch with a new style +// resource name creates a controller with the corresponding server config. +func (s) TestAuthorityNoneDefaultAuthority(t *testing.T) { + overrideFedEnvVar(t) + ctrlCh := overrideNewController(t) + + client, err := newWithConfig(&bootstrap.Config{ + XDSServer: serverConfigs[0], + Authorities: map[string]*bootstrap.Authority{testAuthority: {XDSServer: serverConfigs[1]}}, + }, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + t.Cleanup(client.Close) + + resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + ctrl, ok, _ := watchAndFetchNewController(t, client, resourceName, ctrlCh) + if !ok { + t.Fatalf("want a new controller to be built, got none") + } + // Want the server config for this authority. + wantConfig := serverConfigs[1] + if diff := cmp.Diff(ctrl.config, wantConfig, serverConfigCmpOptions); diff != "" { + t.Fatalf("controller is built with unexpected config, diff (-got +want): %v", diff) + } +} + +// TestAuthorityShare covers that +// - watch with the same authority name doesn't create new authority +// - watch with different authority name but same authority config doesn't +// create new authority +func (s) TestAuthorityShare(t *testing.T) { + overrideFedEnvVar(t) + ctrlCh := overrideNewController(t) + + client, err := newWithConfig(&bootstrap.Config{ + XDSServer: serverConfigs[0], + Authorities: map[string]*bootstrap.Authority{ + testAuthority: {XDSServer: serverConfigs[1]}, + testAuthority2: {XDSServer: serverConfigs[1]}, // Another authority name, but with the same config. + }, + }, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + t.Cleanup(client.Close) + + resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + ctrl1, ok1, _ := watchAndFetchNewController(t, client, resourceName, ctrlCh) + if !ok1 { + t.Fatalf("want a new controller to be built, got none") + } + // Want the server config for this authority. + wantConfig := serverConfigs[1] + if diff := cmp.Diff(ctrl1.config, wantConfig, serverConfigCmpOptions); diff != "" { + t.Fatalf("controller is built with unexpected config, diff (-got +want): %v", diff) + } + + // Call the watch with the same authority name. This shouldn't create a new + // controller. + resourceNameSameAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) + ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameSameAuthority, ctrlCh) + if ok2 { + t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config) + } + + // Call the watch with a different authority name, but the same server + // config. This shouldn't create a new controller. + resourceNameSameConfig := buildResourceName(xdsresource.ClusterResource, testAuthority2, testCDSName+"1", nil) + if ctrl, ok, _ := watchAndFetchNewController(t, client, resourceNameSameConfig, ctrlCh); ok { + t.Fatalf("an unexpected controller is built with config: %v", ctrl.config) + } +} + +// TestAuthorityIdle covers that +// - authorities are put in a timeout cache when the last watch is canceled +// - idle authorities are not immediately closed. They will be closed after a +// timeout. +func (s) TestAuthorityIdleTimeout(t *testing.T) { + overrideFedEnvVar(t) + ctrlCh := overrideNewController(t) + + const idleTimeout = 50 * time.Millisecond + + client, err := newWithConfig(&bootstrap.Config{ + XDSServer: serverConfigs[0], + Authorities: map[string]*bootstrap.Authority{ + testAuthority: {XDSServer: serverConfigs[1]}, + }, + }, defaultWatchExpiryTimeout, idleTimeout) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + t.Cleanup(client.Close) + + resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh) + if !ok1 { + t.Fatalf("want a new controller to be built, got none") + } + + var cancelWatch2 func() + // Call the watch with the same authority name. This shouldn't create a new + // controller. + resourceNameSameAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) + ctrl2, ok2, cancelWatch2 := watchAndFetchNewController(t, client, resourceNameSameAuthority, ctrlCh) + if ok2 { + t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config) + } + + cancelWatch1() + if ctrl1.done.HasFired() { + t.Fatalf("controller is closed immediately when the watch is canceled, wanted to be put in the idle cache") + } + + // Cancel the second watch, should put controller in the idle cache. + cancelWatch2() + if ctrl1.done.HasFired() { + t.Fatalf("controller is closed when the second watch is closed") + } + + time.Sleep(idleTimeout * 2) + if !ctrl1.done.HasFired() { + t.Fatalf("controller is not closed after idle timeout") + } +} + +// TestAuthorityClientClose covers that the authorities in use and in idle cache +// are all closed when the client is closed. +func (s) TestAuthorityClientClose(t *testing.T) { + overrideFedEnvVar(t) + ctrlCh := overrideNewController(t) + + client, err := newWithConfig(&bootstrap.Config{ + XDSServer: serverConfigs[0], + Authorities: map[string]*bootstrap.Authority{ + testAuthority: {XDSServer: serverConfigs[1]}, + }, + }, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + t.Cleanup(client.Close) + + resourceName := testCDSName + ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh) + if !ok1 { + t.Fatalf("want a new controller to be built, got none") + } + + resourceNameWithAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameWithAuthority, ctrlCh) + if !ok2 { + t.Fatalf("want a new controller to be built, got none") + } + + cancelWatch1() + if ctrl1.done.HasFired() { + t.Fatalf("controller is closed immediately when the watch is canceled, wanted to be put in the idle cache") + } + + // Close the client while watch2 is not canceled. ctrl1 is in the idle + // cache, ctrl2 is in use. Both should be closed. + client.Close() + + if !ctrl1.done.HasFired() { + t.Fatalf("controller in idle cache is not closed after client is closed") + } + if !ctrl2.done.HasFired() { + t.Fatalf("controller in use is not closed after client is closed") + } +} + +// TestAuthorityRevive covers that the authorities in the idle cache is revived +// when a new watch is started on this authority. +func (s) TestAuthorityRevive(t *testing.T) { + overrideFedEnvVar(t) + ctrlCh := overrideNewController(t) + + const idleTimeout = 50 * time.Millisecond + + client, err := newWithConfig(&bootstrap.Config{ + XDSServer: serverConfigs[0], + Authorities: map[string]*bootstrap.Authority{ + testAuthority: {XDSServer: serverConfigs[1]}, + }, + }, defaultWatchExpiryTimeout, idleTimeout) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + t.Cleanup(client.Close) + + // Start a watch on the authority, and cancel it. This puts the authority in + // the idle cache. + resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh) + if !ok1 { + t.Fatalf("want a new controller to be built, got none") + } + cancelWatch1() + + // Start another watch on this authority, it should retrieve the authority + // from the cache, instead of creating a new one. + resourceNameWithAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) + ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameWithAuthority, ctrlCh) + if ok2 { + t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config) + } + + // Wait for double the idle timeout, the controller shouldn't be closed, + // since it was revived. + time.Sleep(idleTimeout * 2) + if ctrl1.done.HasFired() { + t.Fatalf("controller that was revived is closed after timeout, want not closed") + } +} diff --git a/xds/internal/xdsclient/callback.go b/xds/internal/xdsclient/callback.go deleted file mode 100644 index 1ad1659e12e9..000000000000 --- a/xds/internal/xdsclient/callback.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// NewListeners is called by the underlying xdsAPIClient when it receives an -// xDS response. -// -// A response can contain multiple resources. They will be parsed and put in a -// map from resource name to the resource content. -func (c *clientImpl) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - c.pubsub.NewListeners(updates, metadata) -} - -// NewRouteConfigs is called by the underlying xdsAPIClient when it receives an -// xDS response. -// -// A response can contain multiple resources. They will be parsed and put in a -// map from resource name to the resource content. -func (c *clientImpl) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - c.pubsub.NewRouteConfigs(updates, metadata) -} - -// NewClusters is called by the underlying xdsAPIClient when it receives an xDS -// response. -// -// A response can contain multiple resources. They will be parsed and put in a -// map from resource name to the resource content. -func (c *clientImpl) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - c.pubsub.NewClusters(updates, metadata) -} - -// NewEndpoints is called by the underlying xdsAPIClient when it receives an -// xDS response. -// -// A response can contain multiple resources. They will be parsed and put in a -// map from resource name to the resource content. -func (c *clientImpl) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - c.pubsub.NewEndpoints(updates, metadata) -} - -// NewConnectionError is called by the underlying xdsAPIClient when it receives -// a connection error. The error will be forwarded to all the resource watchers. -func (c *clientImpl) NewConnectionError(err error) { - c.pubsub.NewConnectionError(err) -} diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 13e8265b65c6..1378268ef434 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -22,12 +22,13 @@ package xdsclient import ( "fmt" + "sync" "time" + "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -42,17 +43,39 @@ type clientImpl struct { done *grpcsync.Event config *bootstrap.Config - controller controllerInterface - - logger *grpclog.PrefixLogger - pubsub *pubsub.Pubsub + // authorityMu protects the authority fields. It's necessary because an + // authority is created when it's used. + authorityMu sync.Mutex + // authorities is a map from ServerConfig to authority. So that + // different authorities sharing the same ServerConfig can share the + // authority. + // + // The key is **ServerConfig.String()**, not the authority name. + // + // An authority is either in authorities, or idleAuthorities, + // never both. + authorities map[string]*authority + // idleAuthorities keeps the authorities that are not used (the last + // watch on it was canceled). They are kept in the cache and will be deleted + // after a timeout. The key is ServerConfig.String(). + // + // An authority is either in authorities, or idleAuthorities, + // never both. + idleAuthorities *cache.TimeoutCache + + logger *grpclog.PrefixLogger + watchExpiryTimeout time.Duration } // newWithConfig returns a new xdsClient with the given config. -func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (_ *clientImpl, retErr error) { +func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (_ *clientImpl, retErr error) { c := &clientImpl{ - done: grpcsync.NewEvent(), - config: config, + done: grpcsync.NewEvent(), + config: config, + watchExpiryTimeout: watchExpiryTimeout, + + authorities: make(map[string]*authority), + idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), } defer func() { @@ -64,14 +87,6 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) ( c.logger = prefixLogger(c) c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) - c.pubsub = pubsub.New(watchExpiryTimeout, c.logger) - - controller, err := newController(config.XDSServer, c.pubsub, c.updateValidator, c.logger) - if err != nil { - return nil, fmt.Errorf("xds: failed to connect to the control plane: %v", err) - } - c.controller = controller - c.logger.Infof("Created") return c, nil } @@ -94,12 +109,14 @@ func (c *clientImpl) Close() { // Note that Close needs to check for nils even if some of them are always // set in the constructor. This is because the constructor defers Close() in // error cases, and the fields might not be set when the error happens. - if c.controller != nil { - c.controller.Close() - } - if c.pubsub != nil { - c.pubsub.Close() + + c.authorityMu.Lock() + for _, a := range c.authorities { + a.close() } + c.idleAuthorities.Clear(true) + c.authorityMu.Unlock() + c.logger.Infof("Shutdown") } diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index cd2b98950a65..d6c9f9b401d8 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -51,12 +51,15 @@ func Test(t *testing.T) { } const ( - testXDSServer = "xds-server" + testXDSServer = "xds-server" + testXDSServerAuthority = "xds-server-authority" - testLDSName = "test-lds" - testRDSName = "test-rds" - testCDSName = "test-cds" - testEDSName = "test-eds" + testAuthority = "test-authority" + testAuthority2 = "test-authority-2" + testLDSName = "test-lds" + testRDSName = "test-rds" + testCDSName = "test-cds" + testEDSName = "test-eds" defaultTestWatchExpiryTimeout = 500 * time.Millisecond defaultTestTimeout = 5 * time.Second @@ -67,38 +70,47 @@ func newStringP(s string) *string { return &s } -func clientOpts(balancerName string, overrideWatchExpiryTimeout bool) (*bootstrap.Config, time.Duration) { - watchExpiryTimeout := defaultWatchExpiryTimeout - if overrideWatchExpiryTimeout { - watchExpiryTimeout = defaultTestWatchExpiryTimeout - } +func clientOpts() *bootstrap.Config { return &bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: balancerName, + ServerURI: testXDSServer, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), NodeProto: xdstestutils.EmptyNodeProtoV2, }, - }, watchExpiryTimeout + Authorities: map[string]*bootstrap.Authority{ + testAuthority: { + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServerAuthority, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, + }, + }, + } } type testController struct { + // config is the config this controller is created with. + config *bootstrap.ServerConfig + done *grpcsync.Event addWatches map[xdsresource.ResourceType]*testutils.Channel removeWatches map[xdsresource.ResourceType]*testutils.Channel } -func overrideNewController() (*testutils.Channel, func()) { +func overrideNewController(t *testing.T) *testutils.Channel { origNewController := newController ch := testutils.NewChannel() newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (controllerInterface, error) { - ret := newTestController() + ret := newTestController(config) ch.Send(ret) return ret, nil } - return ch, func() { newController = origNewController } + t.Cleanup(func() { newController = origNewController }) + return ch } -func newTestController() *testController { +func newTestController(config *bootstrap.ServerConfig) *testController { addWatches := map[xdsresource.ResourceType]*testutils.Channel{ xdsresource.ListenerResource: testutils.NewChannel(), xdsresource.RouteConfigResource: testutils.NewChannel(), @@ -112,6 +124,7 @@ func newTestController() *testController { xdsresource.EndpointsResource: testutils.NewChannel(), } return &testController{ + config: config, done: grpcsync.NewEvent(), addWatches: addWatches, removeWatches: removeWatches, @@ -137,22 +150,14 @@ func (c *testController) Close() { // TestWatchCallAnotherWatch covers the case where watch() is called inline by a // callback. It makes sure it doesn't cause a deadlock. func (s) TestWatchCallAnotherWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) + // Start a watch for some resource, so that the controller and update + // handlers are built for this authority. The test needs these to make an + // inline watch in a callback. + client, ctrlCh := testClientSetup(t, false) + newWatch(t, client, xdsresource.ClusterResource, "doesnot-matter") + controller, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, "doesnot-matter") clusterUpdateCh := testutils.NewChannel() firstTime := true @@ -161,17 +166,17 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { // Calls another watch inline, to ensure there's deadlock. client.WatchCluster("another-random-name", func(xdsresource.ClusterUpdate, error) {}) - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); firstTime && err != nil { + if _, err := controller.addWatches[xdsresource.ClusterResource].Receive(ctx); firstTime && err != nil { t.Fatalf("want new watch to start, got error %v", err) } firstTime = false }) - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { + if _, err := controller.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { t.Fatalf("want new watch to start, got error %v", err) } wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) + updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { t.Fatal(err) } @@ -179,7 +184,7 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { // The second update needs to be different in the underlying resource proto // for the watch callback to be invoked. wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) + updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2, nil); err != nil { t.Fatal(err) } @@ -272,18 +277,21 @@ func (s) TestClientNewSingleton(t *testing.T) { } defer func() { bootstrapNewConfig = oldBootstrapNewConfig }() - apiClientCh, cleanup := overrideNewController() - defer cleanup() + ctrlCh := overrideNewController(t) // The first New(). Should create a Client and a new APIClient. client, err := newRefCounted() if err != nil { t.Fatalf("failed to create client: %v", err) } + + // Call a watch to create the controller. + client.WatchCluster("doesnot-matter", func(update xdsresource.ClusterUpdate, err error) {}) + clientImpl := client.clientImpl ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) + c, err := ctrlCh.Receive(ctx) if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } @@ -306,7 +314,7 @@ func (s) TestClientNewSingleton(t *testing.T) { sctx, scancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer scancel() - _, err := apiClientCh.Receive(sctx) + _, err := ctrlCh.Receive(sctx) if err == nil { client.Close() t.Fatalf("%d-th call to New() created a new API client", i) @@ -342,7 +350,11 @@ func (s) TestClientNewSingleton(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client2.Close() - c2, err := apiClientCh.Receive(ctx) + + // Call a watch to create the controller. + client2.WatchCluster("abc", func(update xdsresource.ClusterUpdate, err error) {}) + + c2, err := ctrlCh.Receive(ctx) if err != nil { t.Fatalf("timeout when waiting for API client to be created: %v", err) } diff --git a/xds/internal/xdsclient/dump.go b/xds/internal/xdsclient/dump.go index 61c054d25bc9..69407d20cafd 100644 --- a/xds/internal/xdsclient/dump.go +++ b/xds/internal/xdsclient/dump.go @@ -22,22 +22,42 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) +func mergeMaps(maps []map[string]xdsresource.UpdateWithMD) map[string]xdsresource.UpdateWithMD { + ret := make(map[string]xdsresource.UpdateWithMD) + for _, m := range maps { + for k, v := range m { + ret[k] = v + } + } + return ret +} + +func (c *clientImpl) dump(t xdsresource.ResourceType) map[string]xdsresource.UpdateWithMD { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + maps := make([]map[string]xdsresource.UpdateWithMD, 0, len(c.authorities)) + for _, a := range c.authorities { + maps = append(maps, a.dump(t)) + } + return mergeMaps(maps) +} + // DumpLDS returns the status and contents of LDS. func (c *clientImpl) DumpLDS() map[string]xdsresource.UpdateWithMD { - return c.pubsub.Dump(xdsresource.ListenerResource) + return c.dump(xdsresource.ListenerResource) } // DumpRDS returns the status and contents of RDS. func (c *clientImpl) DumpRDS() map[string]xdsresource.UpdateWithMD { - return c.pubsub.Dump(xdsresource.RouteConfigResource) + return c.dump(xdsresource.RouteConfigResource) } // DumpCDS returns the status and contents of CDS. func (c *clientImpl) DumpCDS() map[string]xdsresource.UpdateWithMD { - return c.pubsub.Dump(xdsresource.ClusterResource) + return c.dump(xdsresource.ClusterResource) } // DumpEDS returns the status and contents of EDS. func (c *clientImpl) DumpEDS() map[string]xdsresource.UpdateWithMD { - return c.pubsub.Dump(xdsresource.EndpointsResource) + return c.dump(xdsresource.EndpointsResource) } diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index 41fbeb69b7c9..6a1729675f8f 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -16,7 +16,7 @@ * */ -package xdsclient_test +package xdsclient import ( "fmt" @@ -30,8 +30,6 @@ import ( v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" @@ -44,8 +42,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) -const defaultTestWatchExpiryTimeout = 500 * time.Millisecond - func (s) TestLDSConfigDump(t *testing.T) { const testVersion = "test-version-lds" var ( @@ -76,7 +72,7 @@ func (s) TestLDSConfigDump(t *testing.T) { listenerRaws[ldsTargets[i]] = testutils.MarshalAny(listenersT) } - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, err := NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: testXDSServer, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -87,7 +83,6 @@ func (s) TestLDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() - updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpLDS, map[string]xdsresource.UpdateWithMD{}); err != nil { @@ -114,6 +109,7 @@ func (s) TestLDSConfigDump(t *testing.T) { Raw: r, } } + updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "") updateHandler.NewListeners(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. @@ -192,7 +188,7 @@ func (s) TestRDSConfigDump(t *testing.T) { routeRaws[rdsTargets[i]] = testutils.MarshalAny(routeConfigT) } - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, err := NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: testXDSServer, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -203,7 +199,6 @@ func (s) TestRDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() - updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpRDS, map[string]xdsresource.UpdateWithMD{}); err != nil { @@ -230,6 +225,7 @@ func (s) TestRDSConfigDump(t *testing.T) { Raw: r, } } + updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "") updateHandler.NewRouteConfigs(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. @@ -308,7 +304,7 @@ func (s) TestCDSConfigDump(t *testing.T) { clusterRaws[cdsTargets[i]] = testutils.MarshalAny(clusterT) } - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, err := NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: testXDSServer, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -319,7 +315,6 @@ func (s) TestCDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() - updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpCDS, map[string]xdsresource.UpdateWithMD{}); err != nil { @@ -346,6 +341,7 @@ func (s) TestCDSConfigDump(t *testing.T) { Raw: r, } } + updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "") updateHandler.NewClusters(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. @@ -410,7 +406,7 @@ func (s) TestEDSConfigDump(t *testing.T) { endpointRaws[edsTargets[i]] = testutils.MarshalAny(claT) } - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, err := NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: testXDSServer, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -421,7 +417,6 @@ func (s) TestEDSConfigDump(t *testing.T) { t.Fatalf("failed to create client: %v", err) } defer client.Close() - updateHandler := client.(pubsub.UpdateHandler) // Expected unknown. if err := compareDump(client.DumpEDS, map[string]xdsresource.UpdateWithMD{}); err != nil { @@ -448,6 +443,7 @@ func (s) TestEDSConfigDump(t *testing.T) { Raw: r, } } + updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "") updateHandler.NewEndpoints(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) // Expect ACK. diff --git a/xds/internal/xdsclient/loadreport.go b/xds/internal/xdsclient/loadreport.go index 21400c1321b8..d157731c789b 100644 --- a/xds/internal/xdsclient/loadreport.go +++ b/xds/internal/xdsclient/loadreport.go @@ -17,7 +17,10 @@ package xdsclient -import "google.golang.org/grpc/xds/internal/xdsclient/load" +import ( + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) // ReportLoad starts an load reporting stream to the given server. If the server // is not an empty string, and is different from the management server, a new @@ -29,5 +32,16 @@ import "google.golang.org/grpc/xds/internal/xdsclient/load" // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. func (c *clientImpl) ReportLoad(server string) (*load.Store, func()) { - return c.controller.ReportLoad(server) + // TODO: load reporting with federation also needs find the authority for + // this server first, then reports load to it. Currently always report to + // the default authority. This is needed to avoid a nil pointer panic. + a, unref, err := c.findAuthority(xdsresource.ParseName("")) + if err != nil { + return nil, func() {} + } + store, cancelF := a.reportLoad(server) + return store, func() { + cancelF() + unref() + } } diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index 8b19f80287cc..92b5ab6482d8 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -65,13 +65,15 @@ func (s) TestLRSClient(t *testing.T) { defer xdsC.Close() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if u, err := fs.NewConnChan.Receive(ctx); err != nil { - t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err) - } // Report to the same address should not create new ClientConn. store1, lrsCancel1 := xdsC.ReportLoad(fs.Address) defer lrsCancel1() + + if u, err := fs.NewConnChan.Receive(ctx); err != nil { + t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err) + } + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if u, err := fs.NewConnChan.Receive(sCtx); err != context.DeadlineExceeded { diff --git a/xds/internal/xdsclient/singleton.go b/xds/internal/xdsclient/singleton.go index f045790e2a40..ef928041c17a 100644 --- a/xds/internal/xdsclient/singleton.go +++ b/xds/internal/xdsclient/singleton.go @@ -28,7 +28,10 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) -const defaultWatchExpiryTimeout = 15 * time.Second +const ( + defaultWatchExpiryTimeout = 15 * time.Second + defaultIdleAuthorityDeleteTimeout = 5 * time.Minute +) // This is the Client returned by New(). It contains one client implementation, // and maintains the refcount. @@ -82,7 +85,7 @@ func newRefCounted() (*clientRefCounted, error) { if err != nil { return nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) } - c, err := newWithConfig(config, defaultWatchExpiryTimeout) + c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) if err != nil { return nil, err } @@ -114,7 +117,7 @@ func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { } // Create the new client implementation. - c, err := newWithConfig(config, defaultWatchExpiryTimeout) + c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) if err != nil { return nil, err } @@ -144,7 +147,7 @@ func (c *clientRefCounted) Close() { // Note that this function doesn't set the singleton, so that the testing states // don't leak. func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) { - cl, err := newWithConfig(config, watchExpiryTimeout) + cl, err := newWithConfig(config, watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) if err != nil { return nil, err } @@ -182,7 +185,7 @@ func NewClientWithBootstrapContents(contents []byte) (XDSClient, error) { return nil, fmt.Errorf("xds: error with bootstrap config: %v", err) } - cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout) + cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) if err != nil { return nil, err } diff --git a/xds/internal/xdsclient/watchers.go b/xds/internal/xdsclient/watchers.go index fe59dbbd6f68..0c9f87aa7dc0 100644 --- a/xds/internal/xdsclient/watchers.go +++ b/xds/internal/xdsclient/watchers.go @@ -27,31 +27,35 @@ import ( // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { - first, cancelF := c.pubsub.WatchListener(serviceName, cb) - if first { - c.controller.AddWatch(xdsresource.ListenerResource, serviceName) + n := xdsresource.ParseName(serviceName) + a, unref, err := c.findAuthority(n) + if err != nil { + cb(xdsresource.ListenerUpdate{}, err) + return func() {} } + cancelF := a.watchListener(n.String(), cb) return func() { - if cancelF() { - c.controller.RemoveWatch(xdsresource.ListenerResource, serviceName) - } + cancelF() + unref() } } -// WatchRouteConfig starts a listener watcher for the service.. +// WatchRouteConfig starts a listener watcher for the service. // // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { - first, cancelF := c.pubsub.WatchRouteConfig(routeName, cb) - if first { - c.controller.AddWatch(xdsresource.RouteConfigResource, routeName) + n := xdsresource.ParseName(routeName) + a, unref, err := c.findAuthority(n) + if err != nil { + cb(xdsresource.RouteConfigUpdate{}, err) + return func() {} } + cancelF := a.watchRouteConfig(n.String(), cb) return func() { - if cancelF() { - c.controller.RemoveWatch(xdsresource.RouteConfigResource, routeName) - } + cancelF() + unref() } } @@ -65,14 +69,16 @@ func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.Rout // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { - first, cancelF := c.pubsub.WatchCluster(clusterName, cb) - if first { - c.controller.AddWatch(xdsresource.ClusterResource, clusterName) + n := xdsresource.ParseName(clusterName) + a, unref, err := c.findAuthority(n) + if err != nil { + cb(xdsresource.ClusterUpdate{}, err) + return func() {} } + cancelF := a.watchCluster(n.String(), cb) return func() { - if cancelF() { - c.controller.RemoveWatch(xdsresource.ClusterResource, clusterName) - } + cancelF() + unref() } } @@ -85,13 +91,15 @@ func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.Cluste // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { - first, cancelF := c.pubsub.WatchEndpoints(clusterName, cb) - if first { - c.controller.AddWatch(xdsresource.EndpointsResource, clusterName) + n := xdsresource.ParseName(clusterName) + a, unref, err := c.findAuthority(n) + if err != nil { + cb(xdsresource.EndpointsUpdate{}, err) + return func() {} } + cancelF := a.watchEndpoints(n.String(), cb) return func() { - if cancelF() { - c.controller.RemoveWatch(xdsresource.EndpointsResource, clusterName) - } + cancelF() + unref() } } diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index 98be38869bc6..52c6d42d340b 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -25,9 +25,6 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/types/known/anypb" - - "google.golang.org/grpc/internal/testutils" ) // TestClusterWatch covers the cases: @@ -35,285 +32,38 @@ import ( // - an update for another resource name // - an update is received after cancel() func (s) TestClusterWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Push an update, with an extra resource for a different resource name. - // Specify a non-nil raw proto in the original resource to ensure that the - // new update is not considered equal to the old one. - newUpdate := wantUpdate - newUpdate.Raw = &anypb.Any{} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ - testCDSName: {Update: newUpdate}, - "randomName": {}, - }, xdsresource.UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, newUpdate, nil); err != nil { - t.Fatal(err) - } - - // Cancel watch, and send update again. - cancelWatch() - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) - } + testWatch(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName) } // TestClusterTwoWatchSameResourceName covers the case where an update is received // after two watch() for the same resource name. func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - var clusterUpdateChs []*testutils.Channel - var cancelLastWatch func() - const count = 2 - for i := 0; i < count; i++ { - clusterUpdateCh := testutils.NewChannel() - clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) - cancelLastWatch = client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - for i := 0; i < count; i++ { - if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate, nil); err != nil { - t.Fatal(err) - } - } - - // Cancel the last watch, and send update again. None of the watchers should - // be notified because one has been cancelled, and the other is receiving - // the same update. - cancelLastWatch() - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - for i := 0; i < count; i++ { - func() { - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ClusterUpdate: %v, %v, want channel recv timeout", u, err) - } - }() - } - - // Push a new update and make sure the uncancelled watcher is invoked. - // Specify a non-nil raw proto to ensure that the new update is not - // considered equal to the old one. - newUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName, Raw: &anypb.Any{}} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateChs[0], newUpdate, nil); err != nil { - t.Fatal(err) - } + testTwoWatchSameResourceName(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName) } // TestClusterThreeWatchDifferentResourceName covers the case where an update is // received after three watch() for different resource names. func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - // Two watches for the same name. - var clusterUpdateChs []*testutils.Channel - const count = 2 - for i := 0; i < count; i++ { - clusterUpdateCh := testutils.NewChannel() - clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) - client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - // Third watch for a different name. - clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"} - wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ - testCDSName + "1": {Update: wantUpdate1}, - testCDSName + "2": {Update: wantUpdate2}, - }, xdsresource.UpdateMetadata{}) - - for i := 0; i < count; i++ { - if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate1, nil); err != nil { - t.Fatal(err) - } - } - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) - } + testThreeWatchDifferentResourceName(t, xdsresource.ClusterResource, + xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}, testCDSName+"1", + xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}, testCDSName+"2", + ) } // TestClusterWatchAfterCache covers the case where watch is called after the update // is in cache. func (s) TestClusterWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ - testCDSName: {Update: wantUpdate}, - }, xdsresource.UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Another watch for the resource in cache. - clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if n, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) - } - - // New watch should receives the update. - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Old watch should see nothing. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) - } + testWatchAfterCache(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName) } // TestClusterWatchExpiryTimer tests the case where the client does not receive // an CDS response for the request that it sends out. We want the watch callback // to be invoked with an error once the watchExpiryTimer fires. func (s) TestClusterWatchExpiryTimer(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, true)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { - clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } + client, _ := testClientSetup(t, true) + clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName) u, err := clusterUpdateCh.Receive(ctx) if err != nil { @@ -329,33 +79,14 @@ func (s) TestClusterWatchExpiryTimer(t *testing.T) { // an CDS response for the request that it sends out. We want no error even // after expiry timeout. func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, true)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(u xdsresource.ClusterUpdate, err error) { - clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } + client, ctrlCh := testClientSetup(t, true) + clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, testCDSName) wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ + updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ testCDSName: {Update: wantUpdate}, }, xdsresource.UpdateMetadata{}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { @@ -377,117 +108,23 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { // - one more update without the removed resource // - the callback (above) shouldn't receive any update func (s) TestClusterResourceRemoved(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - clusterUpdateCh1 := testutils.NewChannel() - client.WatchCluster(testCDSName+"1", func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - // Another watch for a different name. - clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName+"2", func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"} - wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ - testCDSName + "1": {Update: wantUpdate1}, - testCDSName + "2": {Update: wantUpdate2}, - }, xdsresource.UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh1, wantUpdate1, nil); err != nil { - t.Fatal(err) - } - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) - } - - // Send another update to remove resource 1. - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) - - // Watcher 1 should get an error. - if u, err := clusterUpdateCh1.Receive(ctx); err != nil || xdsresource.ErrType(u.(xdsresource.ClusterUpdateErrTuple).Err) != xdsresource.ErrorTypeResourceNotFound { - t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) - } - - // Watcher 2 should not see an update since the resource has not changed. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateCh2.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ClusterUpdate: %v, want receiving from channel timeout", u) - } - - // Send another update with resource 2 modified. Specify a non-nil raw proto - // to ensure that the new update is not considered equal to the old one. - wantUpdate2 = xdsresource.ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) - - // Watcher 1 should not see an update. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateCh1.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected Cluster: %v, want receiving from channel timeout", u) - } - - // Watcher 2 should get the update. - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) - } + testResourceRemoved(t, xdsresource.ClusterResource, + xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}, testCDSName+"1", + xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}, testCDSName+"2", + ) } // TestClusterWatchNACKError covers the case that an update is NACK'ed, and the // watcher should also receive the error. func (s) TestClusterWatchNACKError(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - defer cancelWatch() - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } + client, ctrlCh := testClientSetup(t, false) + clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, testCDSName) wantError := fmt.Errorf("testing error") - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: { + updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: { Err: wantError, }}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) if err := verifyClusterUpdate(ctx, clusterUpdateCh, xdsresource.ClusterUpdate{}, wantError); err != nil { @@ -500,57 +137,5 @@ func (s) TestClusterWatchNACKError(t *testing.T) { // But the watchers with valid resources should receive the update, those with // invalida resources should receive an error. func (s) TestClusterWatchPartialValid(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - const badResourceName = "bad-resource" - updateChs := make(map[string]*testutils.Channel) - - for _, name := range []string{testCDSName, badResourceName} { - clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(name, func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - defer func() { - cancelWatch() - if _, err := apiClient.removeWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want watch to be canceled, got err: %v", err) - } - }() - if _, err := apiClient.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - updateChs[name] = clusterUpdateCh - } - - wantError := fmt.Errorf("testing error") - wantError2 := fmt.Errorf("individual error") - client.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ - testCDSName: {Update: xdsresource.ClusterUpdate{ClusterName: testEDSName}}, - badResourceName: {Err: wantError2}, - }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - - // The valid resource should be sent to the watcher. - if err := verifyClusterUpdate(ctx, updateChs[testCDSName], xdsresource.ClusterUpdate{ClusterName: testEDSName}, nil); err != nil { - t.Fatal(err) - } - - // The failed watcher should receive an error. - if err := verifyClusterUpdate(ctx, updateChs[badResourceName], xdsresource.ClusterUpdate{}, wantError2); err != nil { - t.Fatal(err) - } + testWatchPartialValid(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName) } diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go index 4ae59d2f1e92..b0e84edcf487 100644 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ b/xds/internal/xdsclient/watchers_endpoints_test.go @@ -25,9 +25,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" ) @@ -53,285 +51,38 @@ var ( // - an update for another resource name (which doesn't trigger callback) // - an update is received after cancel() func (s) TestEndpointsWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { - endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Push an update, with an extra resource for a different resource name. - // Specify a non-nil raw proto in the original resource to ensure that the - // new update is not considered equal to the old one. - newUpdate := wantUpdate - newUpdate.Raw = &anypb.Any{} - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ - testCDSName: {Update: newUpdate}, - "randomName": {}, - }, xdsresource.UpdateMetadata{}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, newUpdate, nil); err != nil { - t.Fatal(err) - } - - // Cancel watch, and send update again. - cancelWatch() - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) - } + testWatch(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName) } // TestEndpointsTwoWatchSameResourceName covers the case where an update is received // after two watch() for the same resource name. func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - const count = 2 - var ( - endpointsUpdateChs []*testutils.Channel - cancelLastWatch func() - ) - for i := 0; i < count; i++ { - endpointsUpdateCh := testutils.NewChannel() - endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) - cancelLastWatch = client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { - endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - for i := 0; i < count; i++ { - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate, nil); err != nil { - t.Fatal(err) - } - } - - // Cancel the last watch, and send update again. None of the watchers should - // be notified because one has been cancelled, and the other is receiving - // the same update. - cancelLastWatch() - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - for i := 0; i < count; i++ { - func() { - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := endpointsUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) - } - }() - } - - // Push a new update and make sure the uncancelled watcher is invoked. - // Specify a non-nil raw proto to ensure that the new update is not - // considered equal to the old one. - newUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}, Raw: &anypb.Any{}} - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[0], newUpdate, nil); err != nil { - t.Fatal(err) - } + testTwoWatchSameResourceName(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName) } // TestEndpointsThreeWatchDifferentResourceName covers the case where an update is // received after three watch() for different resource names. func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - // Two watches for the same name. - var endpointsUpdateChs []*testutils.Channel - const count = 2 - for i := 0; i < count; i++ { - endpointsUpdateCh := testutils.NewChannel() - endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) - client.WatchEndpoints(testCDSName+"1", func(update xdsresource.EndpointsUpdate, err error) { - endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - // Third watch for a different name. - endpointsUpdateCh2 := testutils.NewChannel() - client.WatchEndpoints(testCDSName+"2", func(update xdsresource.EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} - wantUpdate2 := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[1]}} - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ - testCDSName + "1": {Update: wantUpdate1}, - testCDSName + "2": {Update: wantUpdate2}, - }, xdsresource.UpdateMetadata{}) - - for i := 0; i < count; i++ { - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate1, nil); err != nil { - t.Fatal(err) - } - } - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) - } + testThreeWatchDifferentResourceName(t, xdsresource.EndpointsResource, + xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName+"1", + xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[1]}}, testCDSName+"2", + ) } // TestEndpointsWatchAfterCache covers the case where watch is called after the update // is in cache. func (s) TestEndpointsWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - endpointsUpdateCh := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { - endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}} - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Another watch for the resource in cache. - endpointsUpdateCh2 := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if n, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) - } - - // New watch should receives the update. - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Old watch should see nothing. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) - } + testWatchAfterCache(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName) } // TestEndpointsWatchExpiryTimer tests the case where the client does not receive // an CDS response for the request that it sends out. We want the watch callback // to be invoked with an error once the watchExpiryTimer fires. func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, true)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - endpointsUpdateCh := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { - endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } + client, _ := testClientSetup(t, true) + endpointsUpdateCh, _ := newWatch(t, client, xdsresource.EndpointsResource, testCDSName) u, err := endpointsUpdateCh.Receive(ctx) if err != nil { @@ -346,34 +97,14 @@ func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { // TestEndpointsWatchNACKError covers the case that an update is NACK'ed, and // the watcher should also receive the error. func (s) TestEndpointsWatchNACKError(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(testCDSName, func(update xdsresource.EndpointsUpdate, err error) { - endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - defer cancelWatch() - if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } + client, ctrlCh := testClientSetup(t, false) + endpointsUpdateCh, _ := newWatch(t, client, xdsresource.EndpointsResource, testCDSName) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.EndpointsResource, testCDSName) wantError := fmt.Errorf("testing error") - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + updateHandler.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, xdsresource.EndpointsUpdate{}, wantError); err != nil { t.Fatal(err) } @@ -384,57 +115,5 @@ func (s) TestEndpointsWatchNACKError(t *testing.T) { // But the watchers with valid resources should receive the update, those with // invalida resources should receive an error. func (s) TestEndpointsWatchPartialValid(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - const badResourceName = "bad-resource" - updateChs := make(map[string]*testutils.Channel) - - for _, name := range []string{testCDSName, badResourceName} { - endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(name, func(update xdsresource.EndpointsUpdate, err error) { - endpointsUpdateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - defer func() { - cancelWatch() - if _, err := apiClient.removeWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want watch to be canceled, got err: %v", err) - } - }() - if _, err := apiClient.addWatches[xdsresource.EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - updateChs[name] = endpointsUpdateCh - } - - wantError := fmt.Errorf("testing error") - wantError2 := fmt.Errorf("individual error") - client.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ - testCDSName: {Update: xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}}, - badResourceName: {Err: wantError2}, - }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - - // The valid resource should be sent to the watcher. - if err := verifyEndpointsUpdate(ctx, updateChs[testCDSName], xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, nil); err != nil { - t.Fatal(err) - } - - // The failed watcher should receive an error. - if err := verifyEndpointsUpdate(ctx, updateChs[badResourceName], xdsresource.EndpointsUpdate{}, wantError2); err != nil { - t.Fatal(err) - } + testWatchPartialValid(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName) } diff --git a/xds/internal/xdsclient/watchers_federation_test.go b/xds/internal/xdsclient/watchers_federation_test.go new file mode 100644 index 000000000000..1567cf587df8 --- /dev/null +++ b/xds/internal/xdsclient/watchers_federation_test.go @@ -0,0 +1,99 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "context" + "testing" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +func overrideFedEnvVar(t *testing.T) { + oldFed := envconfig.XDSFederation + envconfig.XDSFederation = true + t.Cleanup(func() { envconfig.XDSFederation = oldFed }) +} + +func testFedTwoWatchDifferentContextParameterOrder(t *testing.T, typ xdsresource.ResourceType, update interface{}) { + overrideFedEnvVar(t) + var ( + // Two resource names only differ in context parameter __order__. + resourceName1 = buildResourceName(typ, testAuthority, "test-resource-name", nil) + "?a=1&b=2" + resourceName2 = buildResourceName(typ, testAuthority, "test-resource-name", nil) + "?b=2&a=1" + ) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client, ctrlCh := testClientSetup(t, false) + updateCh, _ := newWatch(t, client, typ, resourceName1) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, resourceName1) + newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) + + // Start a watch on the second resource name. + updateCh2, _ := newWatchF(client, resourceName2) + + // Send an update on the first resoruce, both watchers should be updated. + newUpdateF(updateHandler, map[string]interface{}{resourceName1: update}) + verifyUpdateF(ctx, t, updateCh, update, nil) + verifyUpdateF(ctx, t, updateCh2, update, nil) +} + +// TestLDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name +// - Two watches with the same query string, but in different order. The two +// watches should watch the same resource. +// - The response has the same query string, but in different order. The watch +// should still be notified. +func (s) TestLDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) { + testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}) +} + +// TestRDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name +// - Two watches with the same query string, but in different order. The two +// watches should watch the same resource. +// - The response has the same query string, but in different order. The watch +// should still be notified. +func (s) TestRDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) { + testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{testLDSName}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, + }, + }, + }) +} + +// TestClusterFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name +// - Two watches with the same query string, but in different order. The two +// watches should watch the same resource. +// - The response has the same query string, but in different order. The watch +// should still be notified. +func (s) TestClusterFedTwoWatchDifferentContextParameterOrder(t *testing.T) { + testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}) +} + +// TestEndpointsFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name +// - Two watches with the same query string, but in different order. The two +// watches should watch the same resource. +// - The response has the same query string, but in different order. The watch +// should still be notified. +func (s) TestEndpointsFedTwoWatchDifferentContextParameterOrder(t *testing.T) { + testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}) +} diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go index 7446975a1511..f0f34d4c578e 100644 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ b/xds/internal/xdsclient/watchers_listener_test.go @@ -35,256 +35,28 @@ import ( // - an update for another resource name // - an update is received after cancel() func (s) TestLDSWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Push an update, with an extra resource for a different resource name. - // Specify a non-nil raw proto in the original resource to ensure that the - // new update is not considered equal to the old one. - newUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ - testLDSName: {Update: newUpdate}, - "randomName": {}, - }, xdsresource.UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, newUpdate, nil); err != nil { - t.Fatal(err) - } - - // Cancel watch, and send update again. - cancelWatch() - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) - } + testWatch(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName) } // TestLDSTwoWatchSameResourceName covers the case where an update is received // after two watch() for the same resource name. func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - const count = 2 - var ( - ldsUpdateChs []*testutils.Channel - cancelLastWatch func() - ) - - for i := 0; i < count; i++ { - ldsUpdateCh := testutils.NewChannel() - ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) - cancelLastWatch = client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - for i := 0; i < count; i++ { - if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate, nil); err != nil { - t.Fatal(err) - } - } - - // Cancel the last watch, and send update again. None of the watchers should - // be notified because one has been cancelled, and the other is receiving - // the same update. - cancelLastWatch() - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - for i := 0; i < count; i++ { - func() { - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) - } - }() - } - - // Push a new update and make sure the uncancelled watcher is invoked. - // Specify a non-nil raw proto to ensure that the new update is not - // considered equal to the old one. - newUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName, Raw: &anypb.Any{}} - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateChs[0], newUpdate, nil); err != nil { - t.Fatal(err) - } + testTwoWatchSameResourceName(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName) } // TestLDSThreeWatchDifferentResourceName covers the case where an update is // received after three watch() for different resource names. func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - var ldsUpdateChs []*testutils.Channel - const count = 2 - - // Two watches for the same name. - for i := 0; i < count; i++ { - ldsUpdateCh := testutils.NewChannel() - ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) - client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - // Third watch for a different name. - ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"} - wantUpdate2 := xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"} - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ - testLDSName + "1": {Update: wantUpdate1}, - testLDSName + "2": {Update: wantUpdate2}, - }, xdsresource.UpdateMetadata{}) - - for i := 0; i < count; i++ { - if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate1, nil); err != nil { - t.Fatal(err) - } - } - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) - } + testThreeWatchDifferentResourceName(t, xdsresource.ListenerResource, + xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"}, testLDSName+"1", + xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"}, testLDSName+"2", + ) } // TestLDSWatchAfterCache covers the case where watch is called after the update // is in cache. func (s) TestLDSWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - ldsUpdateCh := testutils.NewChannel() - client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := xdsresource.ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Another watch for the resource in cache. - ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if n, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) - } - - // New watch should receive the update. - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Old watch should see nothing. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) - } + testWatchAfterCache(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName) } // TestLDSResourceRemoved covers the cases: @@ -294,116 +66,23 @@ func (s) TestLDSWatchAfterCache(t *testing.T) { // - one more update without the removed resource // - the callback (above) shouldn't receive any update func (s) TestLDSResourceRemoved(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - ldsUpdateCh1 := testutils.NewChannel() - client.WatchListener(testLDSName+"1", func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - // Another watch for a different name. - ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName+"2", func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "1"} - wantUpdate2 := xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "2"} - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ - testLDSName + "1": {Update: wantUpdate1}, - testLDSName + "2": {Update: wantUpdate2}, - }, xdsresource.UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh1, wantUpdate1, nil); err != nil { - t.Fatal(err) - } - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) - } - - // Send another update to remove resource 1. - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) - - // Watcher 1 should get an error. - if u, err := ldsUpdateCh1.Receive(ctx); err != nil || xdsresource.ErrType(u.(xdsresource.ListenerUpdateErrTuple).Err) != xdsresource.ErrorTypeResourceNotFound { - t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) - } - - // Watcher 2 should not see an update since the resource has not changed. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateCh2.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u) - } - - // Send another update with resource 2 modified. Specify a non-nil raw proto - // to ensure that the new update is not considered equal to the old one. - wantUpdate2 = xdsresource.ListenerUpdate{RouteConfigName: testEDSName + "2", Raw: &anypb.Any{}} - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName + "2": {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) - - // Watcher 1 should not see an update. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateCh1.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u) - } - - // Watcher 2 should get the update. - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) - } + testResourceRemoved(t, xdsresource.ListenerResource, + xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"}, testLDSName+"1", + xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"}, testLDSName+"2", + ) } // TestListenerWatchNACKError covers the case that an update is NACK'ed, and the // watcher should also receive the error. func (s) TestListenerWatchNACKError(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - defer cancelWatch() - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } + client, ctrlCh := testClientSetup(t, false) + ldsUpdateCh, _ := newWatch(t, client, xdsresource.ListenerResource, testLDSName) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ListenerResource, testLDSName) wantError := fmt.Errorf("testing error") - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + updateHandler.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) if err := verifyListenerUpdate(ctx, ldsUpdateCh, xdsresource.ListenerUpdate{}, wantError); err != nil { t.Fatal(err) } @@ -414,88 +93,17 @@ func (s) TestListenerWatchNACKError(t *testing.T) { // But the watchers with valid resources should receive the update, those with // invalida resources should receive an error. func (s) TestListenerWatchPartialValid(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - const badResourceName = "bad-resource" - updateChs := make(map[string]*testutils.Channel) - - for _, name := range []string{testLDSName, badResourceName} { - ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(name, func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - defer func() { - cancelWatch() - if _, err := apiClient.removeWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want watch to be canceled, got err: %v", err) - } - }() - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - updateChs[name] = ldsUpdateCh - } - - wantError := fmt.Errorf("testing error") - wantError2 := fmt.Errorf("individual error") - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ - testLDSName: {Update: xdsresource.ListenerUpdate{RouteConfigName: testEDSName}}, - badResourceName: {Err: wantError2}, - }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - - // The valid resource should be sent to the watcher. - if err := verifyListenerUpdate(ctx, updateChs[testLDSName], xdsresource.ListenerUpdate{RouteConfigName: testEDSName}, nil); err != nil { - t.Fatal(err) - } - - // The failed watcher should receive an error. - if err := verifyListenerUpdate(ctx, updateChs[badResourceName], xdsresource.ListenerUpdate{}, wantError2); err != nil { - t.Fatal(err) - } + testWatchPartialValid(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName) } // TestListenerWatch_RedundantUpdateSupression tests scenarios where an update // with an unmodified resource is suppressed, and modified resource is not. func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - ldsUpdateCh := testutils.NewChannel() - client.WatchListener(testLDSName, func(update xdsresource.ListenerUpdate, err error) { - ldsUpdateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } + client, ctrlCh := testClientSetup(t, false) + ldsUpdateCh, _ := newWatch(t, client, xdsresource.ListenerResource, testLDSName) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ListenerResource, testLDSName) basicListener := testutils.MarshalAny(&v3listenerpb.Listener{ Name: testLDSName, @@ -582,7 +190,7 @@ func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { }, } for _, test := range tests { - client.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, xdsresource.UpdateMetadata{}) + updateHandler.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, xdsresource.UpdateMetadata{}) if test.wantCallback { if err := verifyListenerUpdate(ctx, ldsUpdateCh, test.update, nil); err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go index ea7b06ae1fd9..669785084c27 100644 --- a/xds/internal/xdsclient/watchers_route_test.go +++ b/xds/internal/xdsclient/watchers_route_test.go @@ -23,11 +23,7 @@ import ( "fmt" "testing" - "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/types/known/anypb" - - "google.golang.org/grpc/internal/testutils" ) // TestRDSWatch covers the cases: @@ -35,324 +31,76 @@ import ( // - an update for another resource name (which doesn't trigger callback) // - an update is received after cancel() func (s) TestRDSWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { - rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := xdsresource.RouteConfigUpdate{ + testWatch(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }, }, - } - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Push an update, with an extra resource for a different resource name. - // Specify a non-nil raw proto in the original resource to ensure that the - // new update is not considered equal to the old one. - newUpdate := wantUpdate - newUpdate.Raw = &anypb.Any{} - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ - testRDSName: {Update: newUpdate}, - "randomName": {}, - }, xdsresource.UpdateMetadata{}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, newUpdate, nil); err != nil { - t.Fatal(err) - } - - // Cancel watch, and send update again. - cancelWatch() - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) - } + }, testRDSName) } // TestRDSTwoWatchSameResourceName covers the case where an update is received // after two watch() for the same resource name. func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - const count = 2 - var ( - rdsUpdateChs []*testutils.Channel - cancelLastWatch func() - ) - for i := 0; i < count; i++ { - rdsUpdateCh := testutils.NewChannel() - rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) - cancelLastWatch = client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { - rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - wantUpdate := xdsresource.RouteConfigUpdate{ + testTwoWatchSameResourceName(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }, }, - } - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - for i := 0; i < count; i++ { - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate, nil); err != nil { - t.Fatal(err) - } - } - - // Cancel the last watch, and send update again. None of the watchers should - // be notified because one has been cancelled, and the other is receiving - // the same update. - cancelLastWatch() - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - for i := 0; i < count; i++ { - func() { - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := rdsUpdateChs[i].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) - } - }() - } - - // Push a new update and make sure the uncancelled watcher is invoked. - // Specify a non-nil raw proto to ensure that the new update is not - // considered equal to the old one. - newUpdate := wantUpdate - newUpdate.Raw = &anypb.Any{} - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: newUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[0], newUpdate, nil); err != nil { - t.Fatal(err) - } + }, testRDSName) } // TestRDSThreeWatchDifferentResourceName covers the case where an update is // received after three watch() for different resource names. func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - // Two watches for the same name. - var rdsUpdateChs []*testutils.Channel - const count = 2 - for i := 0; i < count; i++ { - rdsUpdateCh := testutils.NewChannel() - rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) - client.WatchRouteConfig(testRDSName+"1", func(update xdsresource.RouteConfigUpdate, err error) { - rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - // Third watch for a different name. - rdsUpdateCh2 := testutils.NewChannel() - client.WatchRouteConfig(testRDSName+"2", func(update xdsresource.RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "1": {Weight: 1}}}}, + testThreeWatchDifferentResourceName(t, xdsresource.RouteConfigResource, + xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{testLDSName}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "1": {Weight: 1}}}}, + }, }, - }, - } - wantUpdate2 := xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "2": {Weight: 1}}}}, + }, testRDSName+"1", + xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{testLDSName}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "2": {Weight: 1}}}}, + }, }, - }, - } - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ - testRDSName + "1": {Update: wantUpdate1}, - testRDSName + "2": {Update: wantUpdate2}, - }, xdsresource.UpdateMetadata{}) - - for i := 0; i < count; i++ { - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate1, nil); err != nil { - t.Fatal(err) - } - } - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh2, wantUpdate2, nil); err != nil { - t.Fatal(err) - } + }, testRDSName+"2", + ) } // TestRDSWatchAfterCache covers the case where watch is called after the update // is in cache. func (s) TestRDSWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - rdsUpdateCh := testutils.NewChannel() - client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { - rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) - }) - if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := xdsresource.RouteConfigUpdate{ + testWatchAfterCache(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{testLDSName}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, }, }, - } - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Another watch for the resource in cache. - rdsUpdateCh2 := testutils.NewChannel() - client.WatchRouteConfig(testRDSName, func(update xdsresource.RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) - }) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if n, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) - } - - // New watch should receives the update. - if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, xdsresource.RouteConfigUpdateErrTuple{Update: wantUpdate}, cmp.AllowUnexported(xdsresource.RouteConfigUpdateErrTuple{})) { - t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err) - } - - // Old watch should see nothing. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) - } + }, testRDSName) } // TestRouteWatchNACKError covers the case that an update is NACK'ed, and the // watcher should also receive the error. func (s) TestRouteWatchNACKError(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(testCDSName, func(update xdsresource.RouteConfigUpdate, err error) { - rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) - }) - defer cancelWatch() - if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } + client, ctrlCh := testClientSetup(t, false) + rdsUpdateCh, _ := newWatch(t, client, xdsresource.RouteConfigResource, testRDSName) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.RouteConfigResource, testRDSName) wantError := fmt.Errorf("testing error") - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + updateHandler.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, xdsresource.RouteConfigUpdate{}, wantError); err != nil { t.Fatal(err) } @@ -363,63 +111,12 @@ func (s) TestRouteWatchNACKError(t *testing.T) { // But the watchers with valid resources should receive the update, those with // invalida resources should receive an error. func (s) TestRouteWatchPartialValid(t *testing.T) { - apiClientCh, cleanup := overrideNewController() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - const badResourceName = "bad-resource" - updateChs := make(map[string]*testutils.Channel) - - for _, name := range []string{testRDSName, badResourceName} { - rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(name, func(update xdsresource.RouteConfigUpdate, err error) { - rdsUpdateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) - }) - defer func() { - cancelWatch() - if _, err := apiClient.removeWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want watch to be canceled, got err: %v", err) - } - }() - if _, err := apiClient.addWatches[xdsresource.RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - updateChs[name] = rdsUpdateCh - } - - wantError := fmt.Errorf("testing error") - wantError2 := fmt.Errorf("individual error") - client.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ - testRDSName: {Update: xdsresource.RouteConfigUpdate{VirtualHosts: []*xdsresource.VirtualHost{{ - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, - }}}}, - badResourceName: {Err: wantError2}, - }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - - // The valid resource should be sent to the watcher. - if err := verifyRouteConfigUpdate(ctx, updateChs[testRDSName], xdsresource.RouteConfigUpdate{VirtualHosts: []*xdsresource.VirtualHost{{ - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, - }}}, nil); err != nil { - t.Fatal(err) - } - - // The failed watcher should receive an error. - if err := verifyRouteConfigUpdate(ctx, updateChs[badResourceName], xdsresource.RouteConfigUpdate{}, wantError2); err != nil { - t.Fatal(err) - } + testWatchPartialValid(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{testLDSName}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, + }, + }, + }, testRDSName) } diff --git a/xds/internal/xdsclient/watchers_test.go b/xds/internal/xdsclient/watchers_test.go new file mode 100644 index 000000000000..39be83d48628 --- /dev/null +++ b/xds/internal/xdsclient/watchers_test.go @@ -0,0 +1,614 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" +) + +// testClientSetup sets up the client and controller for the test. It returns a +// newly created client, and a channel where new controllers will be sent to. +func testClientSetup(t *testing.T, overrideWatchExpiryTimeout bool) (*clientImpl, *testutils.Channel) { + t.Helper() + ctrlCh := overrideNewController(t) + + watchExpiryTimeout := defaultWatchExpiryTimeout + if overrideWatchExpiryTimeout { + watchExpiryTimeout = defaultTestWatchExpiryTimeout + } + + client, err := newWithConfig(clientOpts(), watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + t.Cleanup(client.Close) + return client, ctrlCh +} + +// newWatch starts a new watch on the client. +func newWatch(t *testing.T, client *clientImpl, typ xdsresource.ResourceType, resourceName string) (updateCh *testutils.Channel, cancelWatch func()) { + newWatchF, _, _ := typeToTestFuncs(typ) + updateCh, cancelWatch = newWatchF(client, resourceName) + t.Cleanup(cancelWatch) + + if u, ok := updateCh.ReceiveOrFail(); ok { + t.Fatalf("received unexpected update immediately after watch: %+v", u) + } + return +} + +// getControllerAndPubsub returns the controller and pubsub for the given +// type+resourceName from the client. +func getControllerAndPubsub(ctx context.Context, t *testing.T, client *clientImpl, ctrlCh *testutils.Channel, typ xdsresource.ResourceType, resourceName string) (*testController, pubsub.UpdateHandler) { + c, err := ctrlCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for API client to be created: %v", err) + } + ctrl := c.(*testController) + + if _, err := ctrl.addWatches[typ].Receive(ctx); err != nil { + t.Fatalf("want new watch to start, got error %v", err) + } + + updateHandler := findPubsubForTest(t, client, xdsresource.ParseName(resourceName).Authority) + + return ctrl, updateHandler +} + +// findPubsubForTest returns the pubsub for the given authority, to send updates +// to. If authority is "", the default is returned. If the authority is not +// found, the test will fail. +func findPubsubForTest(t *testing.T, c *clientImpl, authority string) pubsub.UpdateHandler { + t.Helper() + var config *bootstrap.ServerConfig + if authority == "" { + config = c.config.XDSServer + } else { + authConfig, ok := c.config.Authorities[authority] + if !ok { + t.Fatalf("failed to find authority %q", authority) + } + config = authConfig.XDSServer + } + a := c.authorities[config.String()] + if a == nil { + t.Fatalf("authority for %q is not created", authority) + } + return a.pubsub +} + +var ( + newLDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) { + updateCh := testutils.NewChannel() + cancelLastWatch := client.WatchListener(resourceName, func(update xdsresource.ListenerUpdate, err error) { + updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) + }) + return updateCh, cancelLastWatch + } + newLDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) { + wantUpdates := map[string]xdsresource.ListenerUpdateErrTuple{} + for n, u := range updates { + wantUpdate := u.(xdsresource.ListenerUpdate) + wantUpdates[n] = xdsresource.ListenerUpdateErrTuple{Update: wantUpdate} + } + updateHandler.NewListeners(wantUpdates, xdsresource.UpdateMetadata{}) + } + verifyLDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) { + t.Helper() + wantUpdate := update.(xdsresource.ListenerUpdate) + if err := verifyListenerUpdate(ctx, updateCh, wantUpdate, err); err != nil { + t.Fatal(err) + } + } + + newRDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) { + updateCh := testutils.NewChannel() + cancelLastWatch := client.WatchRouteConfig(resourceName, func(update xdsresource.RouteConfigUpdate, err error) { + updateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) + }) + return updateCh, cancelLastWatch + } + newRDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) { + wantUpdates := map[string]xdsresource.RouteConfigUpdateErrTuple{} + for n, u := range updates { + wantUpdate := u.(xdsresource.RouteConfigUpdate) + wantUpdates[n] = xdsresource.RouteConfigUpdateErrTuple{Update: wantUpdate} + } + updateHandler.NewRouteConfigs(wantUpdates, xdsresource.UpdateMetadata{}) + } + verifyRDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) { + t.Helper() + wantUpdate := update.(xdsresource.RouteConfigUpdate) + if err := verifyRouteConfigUpdate(ctx, updateCh, wantUpdate, err); err != nil { + t.Fatal(err) + } + } + + newCDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) { + updateCh := testutils.NewChannel() + cancelLastWatch := client.WatchCluster(resourceName, func(update xdsresource.ClusterUpdate, err error) { + updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) + }) + return updateCh, cancelLastWatch + } + newCDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) { + wantUpdates := map[string]xdsresource.ClusterUpdateErrTuple{} + for n, u := range updates { + wantUpdate := u.(xdsresource.ClusterUpdate) + wantUpdates[n] = xdsresource.ClusterUpdateErrTuple{Update: wantUpdate} + } + updateHandler.NewClusters(wantUpdates, xdsresource.UpdateMetadata{}) + } + verifyCDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) { + t.Helper() + wantUpdate := update.(xdsresource.ClusterUpdate) + if err := verifyClusterUpdate(ctx, updateCh, wantUpdate, err); err != nil { + t.Fatal(err) + } + } + + newEDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) { + updateCh := testutils.NewChannel() + cancelLastWatch := client.WatchEndpoints(resourceName, func(update xdsresource.EndpointsUpdate, err error) { + updateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) + }) + return updateCh, cancelLastWatch + } + newEDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) { + wantUpdates := map[string]xdsresource.EndpointsUpdateErrTuple{} + for n, u := range updates { + wantUpdate := u.(xdsresource.EndpointsUpdate) + wantUpdates[n] = xdsresource.EndpointsUpdateErrTuple{Update: wantUpdate} + } + updateHandler.NewEndpoints(wantUpdates, xdsresource.UpdateMetadata{}) + } + verifyEDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) { + t.Helper() + wantUpdate := update.(xdsresource.EndpointsUpdate) + if err := verifyEndpointsUpdate(ctx, updateCh, wantUpdate, err); err != nil { + t.Fatal(err) + } + } +) + +func typeToTestFuncs(typ xdsresource.ResourceType) ( + newWatchF func(client *clientImpl, resourceName string) (*testutils.Channel, func()), + newUpdateF func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}), + verifyUpdateF func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error), +) { + switch typ { + case xdsresource.ListenerResource: + newWatchF = newLDSWatchF + newUpdateF = newLDSUpdateF + verifyUpdateF = verifyLDSUpdateF + case xdsresource.RouteConfigResource: + newWatchF = newRDSWatchF + newUpdateF = newRDSUpdateF + verifyUpdateF = verifyRDSUpdateF + case xdsresource.ClusterResource: + newWatchF = newCDSWatchF + newUpdateF = newCDSUpdateF + verifyUpdateF = verifyCDSUpdateF + case xdsresource.EndpointsResource: + newWatchF = newEDSWatchF + newUpdateF = newEDSUpdateF + verifyUpdateF = verifyEDSUpdateF + } + return +} + +func buildResourceName(typ xdsresource.ResourceType, auth, id string, ctxParams map[string]string) string { + var typS string + switch typ { + case xdsresource.ListenerResource: + typS = version.V3ListenerType + case xdsresource.RouteConfigResource: + typS = version.V3RouteConfigType + case xdsresource.ClusterResource: + typS = version.V3ClusterType + case xdsresource.EndpointsResource: + typS = version.V3EndpointsType + } + return (&xdsresource.Name{ + Scheme: "xdstp", + Authority: auth, + Type: typS, + ID: id, + ContextParams: ctxParams, + }).String() +} + +// TestClusterWatch covers the cases: +// - an update is received after a watch() +// - an update for another resource name +// - an update is received after cancel() +func testWatch(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { + overrideFedEnvVar(t) + for _, rName := range []string{resourceName, buildResourceName(typ, testAuthority, resourceName, nil)} { + t.Run(rName, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client, ctrlCh := testClientSetup(t, false) + updateCh, cancelWatch := newWatch(t, client, typ, rName) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName) + _, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) + + // Send an update, and check the result. + newUpdateF(updateHandler, map[string]interface{}{rName: update}) + verifyUpdateF(ctx, t, updateCh, update, nil) + + // Push an update, with an extra resource for a different resource name. + // Specify a non-nil raw proto in the original resource to ensure that the + // new update is not considered equal to the old one. + var newUpdate interface{} + switch typ { + case xdsresource.ListenerResource: + newU := update.(xdsresource.ListenerUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + case xdsresource.RouteConfigResource: + newU := update.(xdsresource.RouteConfigUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + case xdsresource.ClusterResource: + newU := update.(xdsresource.ClusterUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + case xdsresource.EndpointsResource: + newU := update.(xdsresource.EndpointsUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + } + newUpdateF(updateHandler, map[string]interface{}{rName: newUpdate}) + verifyUpdateF(ctx, t, updateCh, newUpdate, nil) + + // Cancel watch, and send update again. + cancelWatch() + newUpdateF(updateHandler, map[string]interface{}{rName: update}) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected update: %v, %v, want channel recv timeout", u, err) + } + }) + } +} + +// testClusterTwoWatchSameResourceName covers the case where an update is +// received after two watch() for the same resource name. +func testTwoWatchSameResourceName(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { + overrideFedEnvVar(t) + for _, rName := range []string{resourceName, buildResourceName(typ, testAuthority, resourceName, nil)} { + t.Run(rName, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client, ctrlCh := testClientSetup(t, false) + updateCh, _ := newWatch(t, client, typ, resourceName) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, resourceName) + newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) + + updateChs := []*testutils.Channel{updateCh} + var cancelLastWatch func() + const count = 1 + for i := 0; i < count; i++ { + var updateCh *testutils.Channel + updateCh, cancelLastWatch = newWatchF(client, resourceName) + updateChs = append(updateChs, updateCh) + } + + newUpdateF(updateHandler, map[string]interface{}{resourceName: update}) + for i := 0; i < count+1; i++ { + verifyUpdateF(ctx, t, updateChs[i], update, nil) + } + + // Cancel the last watch, and send update again. None of the watchers should + // be notified because one has been cancelled, and the other is receiving + // the same update. + cancelLastWatch() + newUpdateF(updateHandler, map[string]interface{}{resourceName: update}) + for i := 0; i < count+1; i++ { + func() { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateChs[i].Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected update: %v, %v, want channel recv timeout", u, err) + } + }() + } + + // Push a new update and make sure the uncancelled watcher is invoked. + // Specify a non-nil raw proto to ensure that the new update is not + // considered equal to the old one. + var newUpdate interface{} + switch typ { + case xdsresource.ListenerResource: + newU := update.(xdsresource.ListenerUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + case xdsresource.RouteConfigResource: + newU := update.(xdsresource.RouteConfigUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + case xdsresource.ClusterResource: + newU := update.(xdsresource.ClusterUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + case xdsresource.EndpointsResource: + newU := update.(xdsresource.EndpointsUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + } + newUpdateF(updateHandler, map[string]interface{}{resourceName: newUpdate}) + verifyUpdateF(ctx, t, updateCh, newUpdate, nil) + }) + } +} + +// testThreeWatchDifferentResourceName starts two watches for name1, and one +// watch for name2. This test verifies that two watches for name1 receive the +// same update, and name2 watch receives a different update. +func testThreeWatchDifferentResourceName(t *testing.T, typ xdsresource.ResourceType, update1 interface{}, resourceName1 string, update2 interface{}, resourceName2 string) { + overrideFedEnvVar(t) + for _, rName := range [][]string{ + {resourceName1, resourceName2}, + {buildResourceName(typ, testAuthority, resourceName1, nil), buildResourceName(typ, testAuthority, resourceName2, nil)}, + } { + t.Run(rName[0], func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client, ctrlCh := testClientSetup(t, false) + updateCh, _ := newWatch(t, client, typ, rName[0]) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName[0]) + newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) + + // Two watches for the same name. + updateChs := []*testutils.Channel{updateCh} + const count = 1 + for i := 0; i < count; i++ { + var updateCh *testutils.Channel + updateCh, _ = newWatchF(client, rName[0]) + updateChs = append(updateChs, updateCh) + } + // Third watch for a different name. + updateCh2, _ := newWatchF(client, rName[1]) + + newUpdateF(updateHandler, map[string]interface{}{ + rName[0]: update1, + rName[1]: update2, + }) + + // The first several watches for the same resource should all + // receive the first update. + for i := 0; i < count+1; i++ { + verifyUpdateF(ctx, t, updateChs[i], update1, nil) + } + // The last watch for the different resource should receive the + // second update. + verifyUpdateF(ctx, t, updateCh2, update2, nil) + }) + } +} + +// testWatchAfterCache covers the case where watch is called after the update is +// in cache. +func testWatchAfterCache(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { + overrideFedEnvVar(t) + for _, rName := range []string{resourceName, buildResourceName(typ, testAuthority, resourceName, nil)} { + t.Run(rName, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client, ctrlCh := testClientSetup(t, false) + updateCh, _ := newWatch(t, client, typ, rName) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName) + newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) + + newUpdateF(updateHandler, map[string]interface{}{rName: update}) + verifyUpdateF(ctx, t, updateCh, update, nil) + + // Another watch for the resource in cache. + updateCh2, _ := newWatchF(client, rName) + + // New watch should receive the update. + verifyUpdateF(ctx, t, updateCh2, update, nil) + + // Old watch should see nothing. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected update: %v, %v, want channel recv timeout", u, err) + } + }) + } +} + +// testResourceRemoved covers the cases: +// - an update is received after a watch() +// - another update is received, with one resource removed +// - this should trigger callback with resource removed error +// - one more update without the removed resource +// - the callback (above) shouldn't receive any update +func testResourceRemoved(t *testing.T, typ xdsresource.ResourceType, update1 interface{}, resourceName1 string, update2 interface{}, resourceName2 string) { + overrideFedEnvVar(t) + for _, rName := range [][]string{ + {resourceName1, resourceName2}, + {buildResourceName(typ, testAuthority, resourceName1, nil), buildResourceName(typ, testAuthority, resourceName2, nil)}, + } { + t.Run(rName[0], func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client, ctrlCh := testClientSetup(t, false) + updateCh, _ := newWatch(t, client, typ, rName[0]) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName[0]) + newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) + + // Another watch for a different name. + updateCh2, _ := newWatchF(client, rName[1]) + + newUpdateF(updateHandler, map[string]interface{}{ + rName[0]: update1, + rName[1]: update2, + }) + verifyUpdateF(ctx, t, updateCh, update1, nil) + verifyUpdateF(ctx, t, updateCh2, update2, nil) + + // Send another update to remove resource 1. + newUpdateF(updateHandler, map[string]interface{}{ + rName[1]: update2, + }) + + // Watcher 1 should get an error. + if u, err := updateCh.Receive(ctx); err != nil { + t.Errorf("failed to receive update: %v", err) + } else { + var gotErr error + switch typ { + case xdsresource.ListenerResource: + newU := u.(xdsresource.ListenerUpdateErrTuple) + gotErr = newU.Err + case xdsresource.RouteConfigResource: + newU := u.(xdsresource.RouteConfigUpdateErrTuple) + gotErr = newU.Err + case xdsresource.ClusterResource: + newU := u.(xdsresource.ClusterUpdateErrTuple) + gotErr = newU.Err + case xdsresource.EndpointsResource: + newU := u.(xdsresource.EndpointsUpdateErrTuple) + gotErr = newU.Err + } + if xdsresource.ErrType(gotErr) != xdsresource.ErrorTypeResourceNotFound { + t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) + } + } + + // Watcher 2 should not see an update since the resource has not changed. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh2.Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected ClusterUpdate: %v, want receiving from channel timeout", u) + } + + // Send another update with resource 2 modified. Specify a non-nil raw proto + // to ensure that the new update is not considered equal to the old one. + var newUpdate interface{} + switch typ { + case xdsresource.ListenerResource: + newU := update2.(xdsresource.ListenerUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + case xdsresource.RouteConfigResource: + newU := update2.(xdsresource.RouteConfigUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + case xdsresource.ClusterResource: + newU := update2.(xdsresource.ClusterUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + case xdsresource.EndpointsResource: + newU := update2.(xdsresource.EndpointsUpdate) + newU.Raw = &anypb.Any{} + newUpdate = newU + } + newUpdateF(updateHandler, map[string]interface{}{ + rName[1]: newUpdate, + }) + + // Watcher 1 should not see an update. + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Errorf("unexpected Cluster: %v, want receiving from channel timeout", u) + } + + // Watcher 2 should get the update. + verifyUpdateF(ctx, t, updateCh2, newUpdate, nil) + }) + } +} + +// testWatchPartialValid covers the case that a response contains both +// valid and invalid resources. This response will be NACK'ed by the xdsclient. +// But the watchers with valid resources should receive the update, those with +// invalid resources should receive an error. +func testWatchPartialValid(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { + overrideFedEnvVar(t) + const badResourceName = "bad-resource" + + for _, rName := range [][]string{ + {resourceName, badResourceName}, + {buildResourceName(typ, testAuthority, resourceName, nil), buildResourceName(typ, testAuthority, badResourceName, nil)}, + } { + t.Run(rName[0], func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client, ctrlCh := testClientSetup(t, false) + updateCh, _ := newWatch(t, client, typ, rName[0]) + _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName[0]) + newWatchF, _, verifyUpdateF := typeToTestFuncs(typ) + + updateChs := map[string]*testutils.Channel{ + rName[0]: updateCh, + } + + for _, name := range []string{rName[1]} { + updateChT, _ := newWatchF(client, rName[1]) + updateChs[name] = updateChT + } + + wantError := fmt.Errorf("testing error") + wantError2 := fmt.Errorf("individual error") + + switch typ { + case xdsresource.ListenerResource: + updateHandler.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ + rName[0]: {Update: update.(xdsresource.ListenerUpdate)}, + rName[1]: {Err: wantError2}, + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + case xdsresource.RouteConfigResource: + updateHandler.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ + rName[0]: {Update: update.(xdsresource.RouteConfigUpdate)}, + rName[1]: {Err: wantError2}, + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + case xdsresource.ClusterResource: + updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ + rName[0]: {Update: update.(xdsresource.ClusterUpdate)}, + rName[1]: {Err: wantError2}, + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + case xdsresource.EndpointsResource: + updateHandler.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ + rName[0]: {Update: update.(xdsresource.EndpointsUpdate)}, + rName[1]: {Err: wantError2}, + }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) + } + + // The valid resource should be sent to the watcher. + verifyUpdateF(ctx, t, updateChs[rName[0]], update, nil) + + // The failed watcher should receive an error. + verifyUpdateF(ctx, t, updateChs[rName[1]], update, wantError2) + }) + } +} diff --git a/xds/internal/xdsclient/xdsclient_test.go b/xds/internal/xdsclient/xdsclient_test.go index 68ecd88acc4c..74da4de7c8b4 100644 --- a/xds/internal/xdsclient/xdsclient_test.go +++ b/xds/internal/xdsclient/xdsclient_test.go @@ -32,5 +32,3 @@ type s struct { func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } - -const testXDSServer = "xds-server" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal.go b/xds/internal/xdsclient/xdsresource/unmarshal.go index 7cd9d32dd6c8..eda11088765b 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal.go @@ -64,6 +64,7 @@ func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadat switch ret2 := ret.(type) { case map[string]ListenerUpdateErrTuple: name, update, err := unmarshalListenerResource(r, opts.UpdateValidator, opts.Logger) + name = ParseName(name).String() if err == nil { ret2[name] = ListenerUpdateErrTuple{Update: update} continue @@ -78,6 +79,7 @@ func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadat ret2[name] = ListenerUpdateErrTuple{Err: err} case map[string]RouteConfigUpdateErrTuple: name, update, err := unmarshalRouteConfigResource(r, opts.Logger) + name = ParseName(name).String() if err == nil { ret2[name] = RouteConfigUpdateErrTuple{Update: update} continue @@ -92,6 +94,7 @@ func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadat ret2[name] = RouteConfigUpdateErrTuple{Err: err} case map[string]ClusterUpdateErrTuple: name, update, err := unmarshalClusterResource(r, opts.UpdateValidator, opts.Logger) + name = ParseName(name).String() if err == nil { ret2[name] = ClusterUpdateErrTuple{Update: update} continue @@ -106,6 +109,7 @@ func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadat ret2[name] = ClusterUpdateErrTuple{Err: err} case map[string]EndpointsUpdateErrTuple: name, update, err := unmarshalEndpointsResource(r, opts.Logger) + name = ParseName(name).String() if err == nil { ret2[name] = EndpointsUpdateErrTuple{Update: update} continue diff --git a/xds/internal/xdsclient/xdsresource/version/version.go b/xds/internal/xdsclient/xdsresource/version/version.go index dbcb76ffd1f1..edfa68762f6e 100644 --- a/xds/internal/xdsclient/xdsresource/version/version.go +++ b/xds/internal/xdsclient/xdsresource/version/version.go @@ -35,17 +35,29 @@ const ( // Resource URLs. We need to be able to accept either version of the resource // regardless of the version of the transport protocol in use. const ( - V2ListenerURL = "type.googleapis.com/envoy.api.v2.Listener" - V2RouteConfigURL = "type.googleapis.com/envoy.api.v2.RouteConfiguration" - V2ClusterURL = "type.googleapis.com/envoy.api.v2.Cluster" - V2EndpointsURL = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment" - V2HTTPConnManagerURL = "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" - - V3ListenerURL = "type.googleapis.com/envoy.config.listener.v3.Listener" - V3RouteConfigURL = "type.googleapis.com/envoy.config.route.v3.RouteConfiguration" - V3ClusterURL = "type.googleapis.com/envoy.config.cluster.v3.Cluster" - V3EndpointsURL = "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment" - V3HTTPConnManagerURL = "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" - V3UpstreamTLSContextURL = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" - V3DownstreamTLSContextURL = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" + googleapiPrefix = "type.googleapis.com/" + + V2ListenerType = "envoy.api.v2.Listener" + V2RouteConfigType = "envoy.api.v2.RouteConfiguration" + V2ClusterType = "envoy.api.v2.Cluster" + V2EndpointsType = "envoy.api.v2.ClusterLoadAssignment" + + V2ListenerURL = googleapiPrefix + V2ListenerType + V2RouteConfigURL = googleapiPrefix + V2RouteConfigType + V2ClusterURL = googleapiPrefix + V2ClusterType + V2EndpointsURL = googleapiPrefix + V2EndpointsType + V2HTTPConnManagerURL = googleapiPrefix + "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" + + V3ListenerType = "envoy.config.listener.v3.Listener" + V3RouteConfigType = "envoy.config.route.v3.RouteConfiguration" + V3ClusterType = "envoy.config.cluster.v3.Cluster" + V3EndpointsType = "envoy.config.endpoint.v3.ClusterLoadAssignment" + + V3ListenerURL = googleapiPrefix + V3ListenerType + V3RouteConfigURL = googleapiPrefix + V3RouteConfigType + V3ClusterURL = googleapiPrefix + V3ClusterType + V3EndpointsURL = googleapiPrefix + V3EndpointsType + V3HTTPConnManagerURL = googleapiPrefix + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + V3UpstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" + V3DownstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" ) From 2fb1ac854b2037b408121870f0dcc81474ca483b Mon Sep 17 00:00:00 2001 From: Shihao Xia Date: Wed, 5 Jan 2022 13:38:18 -0500 Subject: [PATCH 386/998] test: fix potential goroutine leak in TestUpdateAddresses_RetryFromFirstAddr (#5023) --- clientconn_test.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/clientconn_test.go b/clientconn_test.go index 7d6a40adb831..ee39370a87fb 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -859,12 +859,15 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { defer lis3.Close() closeServer2 := make(chan struct{}) + exitCh := make(chan struct{}) server1ContactedFirstTime := make(chan struct{}) server1ContactedSecondTime := make(chan struct{}) server2ContactedFirstTime := make(chan struct{}) server2ContactedSecondTime := make(chan struct{}) server3Contacted := make(chan struct{}) + defer close(exitCh) + // Launch server 1. go func() { // First, let's allow the initial connection to go READY. We need to do @@ -888,12 +891,18 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { // until balancer is built to process the addresses. stateNotifications := testBalancerBuilder.nextStateNotifier() // Wait for the transport to become ready. - for s := range stateNotifications { - if s == connectivity.Ready { - break + for { + select { + case st := <-stateNotifications: + if st == connectivity.Ready { + goto ready + } + case <-exitCh: + return } } + ready: // Once it's ready, curAddress has been set. So let's close this // connection prompting the first reconnect cycle. conn1.Close() From 907a202a615d3b2de857e4302cf6032004d65aee Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 6 Jan 2022 15:44:56 -0800 Subject: [PATCH 387/998] attributes: document that some value types (e.g. `map`s) must implement Equal (#5109) --- attributes/attributes.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/attributes/attributes.go b/attributes/attributes.go index 6ff2792ee4fa..ae13ddac14e0 100644 --- a/attributes/attributes.go +++ b/attributes/attributes.go @@ -69,7 +69,9 @@ func (a *Attributes) Value(key interface{}) interface{} { // bool' is implemented for a value in the attributes, it is called to // determine if the value matches the one stored in the other attributes. If // Equal is not implemented, standard equality is used to determine if the two -// values are equal. +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true From 77b478d768e7e75234406b53471844a0a8d6faf4 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 7 Jan 2022 11:26:53 -0800 Subject: [PATCH 388/998] xds/federation: e2e tests (#5103) --- internal/xds/bootstrap.go | 31 +++- .../test/xds_client_federation_test.go | 142 ++++++++++++++++++ xds/internal/testutils/testutils.go | 28 ++++ xds/internal/xdsclient/authority_test.go | 18 +-- .../xdsclient/watchers_federation_test.go | 5 +- xds/internal/xdsclient/watchers_test.go | 35 +---- 6 files changed, 214 insertions(+), 45 deletions(-) create mode 100644 xds/internal/test/xds_client_federation_test.go diff --git a/internal/xds/bootstrap.go b/internal/xds/bootstrap.go index eeb709c45072..4905b7825ee7 100644 --- a/internal/xds/bootstrap.go +++ b/internal/xds/bootstrap.go @@ -55,6 +55,14 @@ type BootstrapOptions struct { ServerListenerResourceNameTemplate string // CertificateProviders is the certificate providers configuration. CertificateProviders map[string]json.RawMessage + // Authorities is a list of non-default authorities. + // + // In the config, an authority contains {ServerURI, xds-version, creds, + // features, etc}. Note that this fields only has ServerURI (it's a + // map[authority-name]ServerURI). The other fields (version, creds, + // features) are assumed to be the same as the default authority (they can + // be added later if needed). + Authorities map[string]string } // SetupBootstrapFile creates a temporary file with bootstrap contents, based on @@ -94,12 +102,8 @@ func BootstrapContents(opts BootstrapOptions) ([]byte, error) { cfg := &bootstrapConfig{ XdsServers: []server{ { - ServerURI: opts.ServerURI, - ChannelCreds: []creds{ - { - Type: "insecure", - }, - }, + ServerURI: opts.ServerURI, + ChannelCreds: []creds{{Type: "insecure"}}, }, }, Node: node{ @@ -117,6 +121,16 @@ func BootstrapContents(opts BootstrapOptions) ([]byte, error) { return nil, fmt.Errorf("unsupported xDS transport protocol version: %v", opts.Version) } + auths := make(map[string]authority) + for n, auURI := range opts.Authorities { + auths[n] = authority{XdsServers: []server{{ + ServerURI: auURI, + ChannelCreds: []creds{{Type: "insecure"}}, + ServerFeatures: cfg.XdsServers[0].ServerFeatures, + }}} + } + cfg.Authorities = auths + bootstrapContents, err := json.MarshalIndent(cfg, "", " ") if err != nil { return nil, fmt.Errorf("failed to created bootstrap file: %v", err) @@ -129,6 +143,11 @@ type bootstrapConfig struct { Node node `json:"node,omitempty"` CertificateProviders map[string]json.RawMessage `json:"certificate_providers,omitempty"` ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` + Authorities map[string]authority `json:"authorities,omitempty"` +} + +type authority struct { + XdsServers []server `json:"xds_servers,omitempty"` } type server struct { diff --git a/xds/internal/test/xds_client_federation_test.go b/xds/internal/test/xds_client_federation_test.go new file mode 100644 index 000000000000..09db314b726a --- /dev/null +++ b/xds/internal/test/xds_client_federation_test.go @@ -0,0 +1,142 @@ +//go:build !386 +// +build !386 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" + xdsinternal "google.golang.org/grpc/internal/xds" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/xds" + "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/testutils/e2e" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// TestClientSideFederation tests that federation is supported. +// +// In this test, some xDS responses contain resource names in another authority +// (in the new resource name style): +// - LDS: old style, no authority (default authority) +// - RDS: new style, in a different authority +// - CDS: old style, no authority (default authority) +// - EDS: new style, in a different authority +func (s) TestClientSideFederation(t *testing.T) { + oldXDSFederation := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldXDSFederation }() + + // Start a management server as the default authority. + serverDefaultAuth, err := e2e.StartManagementServer() + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(serverDefaultAuth.Stop) + + // Start another management server as the other authority. + const nonDefaultAuth = "non-default-auth" + serverAnotherAuth, err := e2e.StartManagementServer() + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(serverAnotherAuth.Stop) + + // Create a bootstrap file in a temporary directory. + nodeID := uuid.New().String() + bootstrapContents, err := xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ + Version: xdsinternal.TransportV3, + NodeID: nodeID, + ServerURI: serverDefaultAuth.Address, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + // Specify the address of the non-default authority. + Authorities: map[string]string{nonDefaultAuth: serverAnotherAuth.Address}, + }) + if err != nil { + t.Fatalf("Failed to create bootstrap file: %v", err) + } + + resolver, err := xds.NewXDSResolverWithConfigForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS resolver for testing: %v", err) + } + port, cleanup := clientSetup(t, &testService{}) + defer cleanup() + + const serviceName = "my-service-client-side-xds" + // LDS is old style name. + ldsName := serviceName + // RDS is new style, with the non default authority. + rdsName := testutils.BuildResourceName(xdsresource.RouteConfigResource, nonDefaultAuth, "route-"+serviceName, nil) + // CDS is old style name. + cdsName := "cluster-" + serviceName + // EDS is new style, with the non default authority. + edsName := testutils.BuildResourceName(xdsresource.EndpointsResource, nonDefaultAuth, "endpoints-"+serviceName, nil) + + // Split resources, put LDS/CDS in the default authority, and put RDS/EDS in + // the other authority. + resourcesDefault := e2e.UpdateOptions{ + NodeID: nodeID, + // This has only LDS and CDS. + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + resourcesAnother := e2e.UpdateOptions{ + NodeID: nodeID, + // This has only RDS and EDS. + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, cdsName)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, "localhost", []uint32{port})}, + SkipValidation: true, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // This has only LDS and CDS. + if err := serverDefaultAuth.Update(ctx, resourcesDefault); err != nil { + t.Fatal(err) + } + // This has only RDS and EDS. + if err := serverAnotherAuth.Update(ctx, resourcesAnother); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/xds/internal/testutils/testutils.go b/xds/internal/testutils/testutils.go index a4c56f6438a0..bab5871105f4 100644 --- a/xds/internal/testutils/testutils.go +++ b/xds/internal/testutils/testutils.go @@ -17,3 +17,31 @@ // Package testutils provides utility types, for use in xds tests. package testutils + +import ( + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" +) + +// BuildResourceName returns the resource name in the format of an xdstp:// +// resource. +func BuildResourceName(typ xdsresource.ResourceType, auth, id string, ctxParams map[string]string) string { + var typS string + switch typ { + case xdsresource.ListenerResource: + typS = version.V3ListenerType + case xdsresource.RouteConfigResource: + typS = version.V3RouteConfigType + case xdsresource.ClusterResource: + typS = version.V3ClusterType + case xdsresource.EndpointsResource: + typS = version.V3EndpointsType + } + return (&xdsresource.Name{ + Scheme: "xdstp", + Authority: auth, + Type: typS, + ID: id, + ContextParams: ctxParams, + }).String() +} diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index 583594fad066..f55d076f8488 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -150,7 +150,7 @@ func (s) TestAuthorityNoneDefaultAuthority(t *testing.T) { } t.Cleanup(client.Close) - resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + resourceName := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) ctrl, ok, _ := watchAndFetchNewController(t, client, resourceName, ctrlCh) if !ok { t.Fatalf("want a new controller to be built, got none") @@ -182,7 +182,7 @@ func (s) TestAuthorityShare(t *testing.T) { } t.Cleanup(client.Close) - resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + resourceName := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) ctrl1, ok1, _ := watchAndFetchNewController(t, client, resourceName, ctrlCh) if !ok1 { t.Fatalf("want a new controller to be built, got none") @@ -195,7 +195,7 @@ func (s) TestAuthorityShare(t *testing.T) { // Call the watch with the same authority name. This shouldn't create a new // controller. - resourceNameSameAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) + resourceNameSameAuthority := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameSameAuthority, ctrlCh) if ok2 { t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config) @@ -203,7 +203,7 @@ func (s) TestAuthorityShare(t *testing.T) { // Call the watch with a different authority name, but the same server // config. This shouldn't create a new controller. - resourceNameSameConfig := buildResourceName(xdsresource.ClusterResource, testAuthority2, testCDSName+"1", nil) + resourceNameSameConfig := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority2, testCDSName+"1", nil) if ctrl, ok, _ := watchAndFetchNewController(t, client, resourceNameSameConfig, ctrlCh); ok { t.Fatalf("an unexpected controller is built with config: %v", ctrl.config) } @@ -230,7 +230,7 @@ func (s) TestAuthorityIdleTimeout(t *testing.T) { } t.Cleanup(client.Close) - resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + resourceName := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh) if !ok1 { t.Fatalf("want a new controller to be built, got none") @@ -239,7 +239,7 @@ func (s) TestAuthorityIdleTimeout(t *testing.T) { var cancelWatch2 func() // Call the watch with the same authority name. This shouldn't create a new // controller. - resourceNameSameAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) + resourceNameSameAuthority := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) ctrl2, ok2, cancelWatch2 := watchAndFetchNewController(t, client, resourceNameSameAuthority, ctrlCh) if ok2 { t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config) @@ -285,7 +285,7 @@ func (s) TestAuthorityClientClose(t *testing.T) { t.Fatalf("want a new controller to be built, got none") } - resourceNameWithAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + resourceNameWithAuthority := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameWithAuthority, ctrlCh) if !ok2 { t.Fatalf("want a new controller to be built, got none") @@ -329,7 +329,7 @@ func (s) TestAuthorityRevive(t *testing.T) { // Start a watch on the authority, and cancel it. This puts the authority in // the idle cache. - resourceName := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) + resourceName := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh) if !ok1 { t.Fatalf("want a new controller to be built, got none") @@ -338,7 +338,7 @@ func (s) TestAuthorityRevive(t *testing.T) { // Start another watch on this authority, it should retrieve the authority // from the cache, instead of creating a new one. - resourceNameWithAuthority := buildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) + resourceNameWithAuthority := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameWithAuthority, ctrlCh) if ok2 { t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config) diff --git a/xds/internal/xdsclient/watchers_federation_test.go b/xds/internal/xdsclient/watchers_federation_test.go index 1567cf587df8..527999ebc8ae 100644 --- a/xds/internal/xdsclient/watchers_federation_test.go +++ b/xds/internal/xdsclient/watchers_federation_test.go @@ -22,6 +22,7 @@ import ( "testing" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -35,8 +36,8 @@ func testFedTwoWatchDifferentContextParameterOrder(t *testing.T, typ xdsresource overrideFedEnvVar(t) var ( // Two resource names only differ in context parameter __order__. - resourceName1 = buildResourceName(typ, testAuthority, "test-resource-name", nil) + "?a=1&b=2" - resourceName2 = buildResourceName(typ, testAuthority, "test-resource-name", nil) + "?b=2&a=1" + resourceName1 = testutils.BuildResourceName(typ, testAuthority, "test-resource-name", nil) + "?a=1&b=2" + resourceName2 = testutils.BuildResourceName(typ, testAuthority, "test-resource-name", nil) + "?b=2&a=1" ) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/xds/internal/xdsclient/watchers_test.go b/xds/internal/xdsclient/watchers_test.go index 39be83d48628..2405bd684a05 100644 --- a/xds/internal/xdsclient/watchers_test.go +++ b/xds/internal/xdsclient/watchers_test.go @@ -23,10 +23,10 @@ import ( "testing" "google.golang.org/grpc/internal/testutils" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) @@ -221,34 +221,13 @@ func typeToTestFuncs(typ xdsresource.ResourceType) ( return } -func buildResourceName(typ xdsresource.ResourceType, auth, id string, ctxParams map[string]string) string { - var typS string - switch typ { - case xdsresource.ListenerResource: - typS = version.V3ListenerType - case xdsresource.RouteConfigResource: - typS = version.V3RouteConfigType - case xdsresource.ClusterResource: - typS = version.V3ClusterType - case xdsresource.EndpointsResource: - typS = version.V3EndpointsType - } - return (&xdsresource.Name{ - Scheme: "xdstp", - Authority: auth, - Type: typS, - ID: id, - ContextParams: ctxParams, - }).String() -} - // TestClusterWatch covers the cases: // - an update is received after a watch() // - an update for another resource name // - an update is received after cancel() func testWatch(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { overrideFedEnvVar(t) - for _, rName := range []string{resourceName, buildResourceName(typ, testAuthority, resourceName, nil)} { + for _, rName := range []string{resourceName, xdstestutils.BuildResourceName(typ, testAuthority, resourceName, nil)} { t.Run(rName, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -302,7 +281,7 @@ func testWatch(t *testing.T, typ xdsresource.ResourceType, update interface{}, r // received after two watch() for the same resource name. func testTwoWatchSameResourceName(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { overrideFedEnvVar(t) - for _, rName := range []string{resourceName, buildResourceName(typ, testAuthority, resourceName, nil)} { + for _, rName := range []string{resourceName, xdstestutils.BuildResourceName(typ, testAuthority, resourceName, nil)} { t.Run(rName, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -375,7 +354,7 @@ func testThreeWatchDifferentResourceName(t *testing.T, typ xdsresource.ResourceT overrideFedEnvVar(t) for _, rName := range [][]string{ {resourceName1, resourceName2}, - {buildResourceName(typ, testAuthority, resourceName1, nil), buildResourceName(typ, testAuthority, resourceName2, nil)}, + {xdstestutils.BuildResourceName(typ, testAuthority, resourceName1, nil), xdstestutils.BuildResourceName(typ, testAuthority, resourceName2, nil)}, } { t.Run(rName[0], func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -417,7 +396,7 @@ func testThreeWatchDifferentResourceName(t *testing.T, typ xdsresource.ResourceT // in cache. func testWatchAfterCache(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { overrideFedEnvVar(t) - for _, rName := range []string{resourceName, buildResourceName(typ, testAuthority, resourceName, nil)} { + for _, rName := range []string{resourceName, xdstestutils.BuildResourceName(typ, testAuthority, resourceName, nil)} { t.Run(rName, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -455,7 +434,7 @@ func testResourceRemoved(t *testing.T, typ xdsresource.ResourceType, update1 int overrideFedEnvVar(t) for _, rName := range [][]string{ {resourceName1, resourceName2}, - {buildResourceName(typ, testAuthority, resourceName1, nil), buildResourceName(typ, testAuthority, resourceName2, nil)}, + {xdstestutils.BuildResourceName(typ, testAuthority, resourceName1, nil), xdstestutils.BuildResourceName(typ, testAuthority, resourceName2, nil)}, } { t.Run(rName[0], func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -559,7 +538,7 @@ func testWatchPartialValid(t *testing.T, typ xdsresource.ResourceType, update in for _, rName := range [][]string{ {resourceName, badResourceName}, - {buildResourceName(typ, testAuthority, resourceName, nil), buildResourceName(typ, testAuthority, badResourceName, nil)}, + {xdstestutils.BuildResourceName(typ, testAuthority, resourceName, nil), xdstestutils.BuildResourceName(typ, testAuthority, badResourceName, nil)}, } { t.Run(rName[0], func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) From 13c41bce4ac7d42d09d86a6aae366211cb606b22 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 11 Jan 2022 17:38:19 -0500 Subject: [PATCH 389/998] Change version to 1.45.0-dev (#5122) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 9e0723bf73a3..06160c17d97c 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.44.0-dev" +const Version = "1.45.0-dev" From 9353ae3bb432cf1075bf9329ab264421b8e01394 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 11 Jan 2022 14:42:12 -0800 Subject: [PATCH 390/998] credentials/google: stub out the oauth package in test (#5118) --- credentials/google/google.go | 5 ++++- credentials/google/google_test.go | 15 +++++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/credentials/google/google.go b/credentials/google/google.go index 63625a4b6803..fbdf7dc2997a 100644 --- a/credentials/google/google.go +++ b/credentials/google/google.go @@ -50,7 +50,7 @@ func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credential ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) defer cancel() var err error - opts.PerRPCCreds, err = oauth.NewApplicationDefault(ctx) + opts.PerRPCCreds, err = newADC(ctx) if err != nil { logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) } @@ -112,6 +112,9 @@ var ( newALTS = func() credentials.TransportCredentials { return alts.NewClientCreds(alts.DefaultClientOptions()) } + newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { + return oauth.NewApplicationDefault(ctx) + } ) // NewWithMode should make a copy of Bundle, and switch mode. Modifying the diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go index 0699b9ad4e01..efebb3efab75 100644 --- a/credentials/google/google_test.go +++ b/credentials/google/google_test.go @@ -65,17 +65,24 @@ var ( ) func overrideNewCredsFuncs() func() { - oldNewTLS := newTLS + origNewTLS := newTLS newTLS = func() credentials.TransportCredentials { return testTLS } - oldNewALTS := newALTS + origNewALTS := newALTS newALTS = func() credentials.TransportCredentials { return testALTS } + origNewADC := newADC + newADC = func(context.Context) (credentials.PerRPCCredentials, error) { + // We do not use perRPC creds in this test. It is safe to return nil here. + return nil, nil + } + return func() { - newTLS = oldNewTLS - newALTS = oldNewALTS + newTLS = origNewTLS + newALTS = origNewALTS + newADC = origNewADC } } From b49c0c99e4ba49a52f1929a3c3ea971f91c7d194 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 12 Jan 2022 11:01:29 -0800 Subject: [PATCH 391/998] xds/clusterresolver: set ClusterName for DNS child (#5119) --- xds/internal/balancer/cdsbalancer/cdsbalancer.go | 1 + .../balancer/clusterresolver/clusterresolver_test.go | 1 + xds/internal/balancer/clusterresolver/configbuilder.go | 9 ++++++--- .../balancer/clusterresolver/configbuilder_test.go | 7 +++++-- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 5f898c87918f..f1149108507c 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -328,6 +328,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { case xdsresource.ClusterTypeLogicalDNS: dms[i] = clusterresolver.DiscoveryMechanism{ Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, + Cluster: cu.ClusterName, DNSHostname: cu.DNSHostName, } default: diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 035188851a13..3b0843f6807e 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -43,6 +43,7 @@ const ( defaultTestShortTimeout = 10 * time.Millisecond testEDSServcie = "test-eds-service-name" testClusterName = "test-cluster-name" + testClusterName2 = "google_cfe_some-name" ) var ( diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index 1c0c8d0d7018..c2404b387b60 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -135,7 +135,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi } retAddrs = append(retAddrs, addrs...) case DiscoveryMechanismTypeLogicalDNS: - name, config, addrs := buildClusterImplConfigForDNS(i, p.addresses) + name, config, addrs := buildClusterImplConfigForDNS(i, p.addresses, p.mechanism) retConfig.Priorities = append(retConfig.Priorities, name) retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: config}, @@ -149,7 +149,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi return retConfig, retAddrs, nil } -func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string, *clusterimpl.LBConfig, []resolver.Address) { +func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { // Endpoint picking policy for DNS is hardcoded to pick_first. const childPolicy = "pick_first" retAddrs := make([]resolver.Address, 0, len(addrStrs)) @@ -157,7 +157,10 @@ func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string for _, addrStr := range addrStrs { retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) } - return pName, &clusterimpl.LBConfig{ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}}, retAddrs + return pName, &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}, + }, retAddrs } // buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index b56f20fa41bf..011500f4bc9a 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -199,7 +199,8 @@ func TestBuildPriorityConfig(t *testing.T) { }, { mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeLogicalDNS, + Cluster: testClusterName2, + Type: DiscoveryMechanismTypeLogicalDNS, }, addresses: testAddressStrs[4], }, @@ -277,6 +278,7 @@ func TestBuildPriorityConfig(t *testing.T) { Config: &internalserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ + Cluster: testClusterName2, ChildPolicy: &internalserviceconfig.BalancerConfig{Name: "pick_first"}, }, }, @@ -307,9 +309,10 @@ func TestBuildPriorityConfig(t *testing.T) { } func TestBuildClusterImplConfigForDNS(t *testing.T) { - gotName, gotConfig, gotAddrs := buildClusterImplConfigForDNS(3, testAddressStrs[0]) + gotName, gotConfig, gotAddrs := buildClusterImplConfigForDNS(3, testAddressStrs[0], DiscoveryMechanism{Cluster: testClusterName2, Type: DiscoveryMechanismTypeLogicalDNS}) wantName := "priority-3" wantConfig := &clusterimpl.LBConfig{ + Cluster: testClusterName2, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: "pick_first", }, From 0145b50cdc0b7547ff8b4ca71bb89c4108558d22 Mon Sep 17 00:00:00 2001 From: Tiratom <217bssr@gmail.com> Date: Thu, 13 Jan 2022 04:19:03 +0900 Subject: [PATCH 392/998] use insecure.NewCredentials() instead of grpc.WithInsecure (#5087) --- examples/helloworld/greeter_client/main.go | 3 ++- examples/route_guide/client/client.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/helloworld/greeter_client/main.go b/examples/helloworld/greeter_client/main.go index b27b7da43dd1..452906937dde 100644 --- a/examples/helloworld/greeter_client/main.go +++ b/examples/helloworld/greeter_client/main.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/helloworld/helloworld" ) @@ -41,7 +42,7 @@ var ( func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/route_guide/client/client.go b/examples/route_guide/client/client.go index 4d705e9ca99b..a96e32b5619d 100644 --- a/examples/route_guide/client/client.go +++ b/examples/route_guide/client/client.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/examples/data" pb "google.golang.org/grpc/examples/route_guide/routeguide" ) @@ -164,7 +165,7 @@ func main() { } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } conn, err := grpc.Dial(*serverAddr, opts...) From f231ac5293685492fabba61112acaa15c1a68ead Mon Sep 17 00:00:00 2001 From: Huang Chong Date: Thu, 13 Jan 2022 05:44:29 +0800 Subject: [PATCH 393/998] wrr: improve randomWRR performance (#5067) --- internal/wrr/random.go | 44 ++++++++++++++-------- internal/wrr/wrr_test.go | 79 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 105 insertions(+), 18 deletions(-) diff --git a/internal/wrr/random.go b/internal/wrr/random.go index ccf5113e9f32..6d5eb7d46209 100644 --- a/internal/wrr/random.go +++ b/internal/wrr/random.go @@ -19,6 +19,7 @@ package wrr import ( "fmt" + "sort" "sync" "google.golang.org/grpc/internal/grpcrand" @@ -26,8 +27,9 @@ import ( // weightedItem is a wrapped weighted item that is used to implement weighted random algorithm. type weightedItem struct { - Item interface{} - Weight int64 + item interface{} + weight int64 + accumulatedWeight int64 } func (w *weightedItem) String() string { @@ -36,9 +38,10 @@ func (w *weightedItem) String() string { // randomWRR is a struct that contains weighted items implement weighted random algorithm. type randomWRR struct { - mu sync.RWMutex - items []*weightedItem - sumOfWeights int64 + mu sync.RWMutex + items []*weightedItem + // Are all item's weights equal + equalWeights bool } // NewRandom creates a new WRR with random. @@ -51,27 +54,36 @@ var grpcrandInt63n = grpcrand.Int63n func (rw *randomWRR) Next() (item interface{}) { rw.mu.RLock() defer rw.mu.RUnlock() - if rw.sumOfWeights == 0 { + if len(rw.items) == 0 { return nil } - // Random number in [0, sum). - randomWeight := grpcrandInt63n(rw.sumOfWeights) - for _, item := range rw.items { - randomWeight = randomWeight - item.Weight - if randomWeight < 0 { - return item.Item - } + if rw.equalWeights { + return rw.items[grpcrandInt63n(int64(len(rw.items)))].item } - return rw.items[len(rw.items)-1].Item + sumOfWeights := rw.items[len(rw.items)-1].accumulatedWeight + // Random number in [0, sumOfWeights). + randomWeight := grpcrandInt63n(sumOfWeights) + // Item's accumulated weights are in ascending order, because item's weight >= 0. + // Binary search rw.items to find first item whose accumulatedWeight > randomWeight + // The return i is guaranteed to be in range [0, len(rw.items)) because randomWeight < last item's accumulatedWeight + i := sort.Search(len(rw.items), func(i int) bool { return rw.items[i].accumulatedWeight > randomWeight }) + return rw.items[i].item } func (rw *randomWRR) Add(item interface{}, weight int64) { rw.mu.Lock() defer rw.mu.Unlock() - rItem := &weightedItem{Item: item, Weight: weight} + accumulatedWeight := weight + equalWeights := true + if len(rw.items) > 0 { + lastItem := rw.items[len(rw.items)-1] + accumulatedWeight = lastItem.accumulatedWeight + weight + equalWeights = rw.equalWeights && weight == lastItem.weight + } + rw.equalWeights = equalWeights + rItem := &weightedItem{item: item, weight: weight, accumulatedWeight: accumulatedWeight} rw.items = append(rw.items, rItem) - rw.sumOfWeights += weight } func (rw *randomWRR) String() string { diff --git a/internal/wrr/wrr_test.go b/internal/wrr/wrr_test.go index 4565e34ffb9c..ce4f5e507a2c 100644 --- a/internal/wrr/wrr_test.go +++ b/internal/wrr/wrr_test.go @@ -21,6 +21,7 @@ import ( "errors" "math" "math/rand" + "strconv" "testing" "github.com/google/go-cmp/cmp" @@ -70,12 +71,22 @@ func testWRRNext(t *testing.T, newWRR func() WRR) { name: "17-23-37", weights: []int64{17, 23, 37}, }, + { + name: "no items", + weights: []int64{}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var sumOfWeights int64 - w := newWRR() + if len(tt.weights) == 0 { + if next := w.Next(); next != nil { + t.Fatalf("w.Next returns non nil value:%v when there is no item", next) + } + return + } + + var sumOfWeights int64 for i, weight := range tt.weights { w.Add(i, weight) sumOfWeights += weight @@ -112,6 +123,70 @@ func (s) TestEdfWrrNext(t *testing.T) { testWRRNext(t, NewEDF) } +func BenchmarkRandomWRRNext(b *testing.B) { + for _, n := range []int{100, 500, 1000} { + b.Run("equal-weights-"+strconv.Itoa(n)+"-items", func(b *testing.B) { + w := NewRandom() + sumOfWeights := n + for i := 0; i < n; i++ { + w.Add(i, 1) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for i := 0; i < sumOfWeights; i++ { + w.Next() + } + } + }) + } + + var maxWeight int64 = 1024 + for _, n := range []int{100, 500, 1000} { + b.Run("random-weights-"+strconv.Itoa(n)+"-items", func(b *testing.B) { + w := NewRandom() + var sumOfWeights int64 + for i := 0; i < n; i++ { + weight := rand.Int63n(maxWeight + 1) + w.Add(i, weight) + sumOfWeights += weight + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for i := 0; i < int(sumOfWeights); i++ { + w.Next() + } + } + }) + } + + itemsNum := 200 + heavyWeight := int64(itemsNum) + lightWeight := int64(1) + heavyIndices := []int{0, itemsNum / 2, itemsNum - 1} + for _, heavyIndex := range heavyIndices { + b.Run("skew-weights-heavy-index-"+strconv.Itoa(heavyIndex), func(b *testing.B) { + w := NewRandom() + var sumOfWeights int64 + for i := 0; i < itemsNum; i++ { + var weight int64 + if i == heavyIndex { + weight = heavyWeight + } else { + weight = lightWeight + } + sumOfWeights += weight + w.Add(i, weight) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for i := 0; i < int(sumOfWeights); i++ { + w.Next() + } + } + }) + } +} + func init() { r := rand.New(rand.NewSource(0)) grpcrandInt63n = r.Int63n From c1198288f52985cd16c6c2f33af81450c465bb73 Mon Sep 17 00:00:00 2001 From: Anirudh Ramachandra Date: Wed, 12 Jan 2022 16:11:12 -0800 Subject: [PATCH 394/998] xdsclient: allow overriding grpc.Dial function for the xDS controller. (#5108) --- .../xdsclient/controller/controller.go | 13 +++++- .../xdsclient/controller/controller_test.go | 45 +++++++++++++++++++ 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/xds/internal/xdsclient/controller/controller.go b/xds/internal/xdsclient/controller/controller.go index 09283d7423ff..3f7371ae63c7 100644 --- a/xds/internal/xdsclient/controller/controller.go +++ b/xds/internal/xdsclient/controller/controller.go @@ -88,6 +88,17 @@ type Controller struct { lrsClients map[string]*lrsClient } +var grpcDial = grpc.Dial + +// SetGRPCDial sets the dialer for the controller. The dial can be used to +// manipulate the dial options or change the target if needed. +// The SetGRPCDial must be called before gRPC initialization to make sure it +// affects all the controllers created. +// To reset any dialer set, pass in grpc.Dial as the parameter. +func SetGRPCDial(dialer func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error)) { + grpcDial = dialer +} + // New creates a new controller. func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (_ *Controller, retErr error) { switch { @@ -130,7 +141,7 @@ func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, val } }() - cc, err := grpc.Dial(config.ServerURI, dopts...) + cc, err := grpcDial(config.ServerURI, dopts...) if err != nil { // An error from a non-blocking dial indicates something serious. return nil, fmt.Errorf("xds: failed to dial control plane {%s}: %v", config.ServerURI, err) diff --git a/xds/internal/xdsclient/controller/controller_test.go b/xds/internal/xdsclient/controller/controller_test.go index b2d9c225d8e0..e5fdb8878716 100644 --- a/xds/internal/xdsclient/controller/controller_test.go +++ b/xds/internal/xdsclient/controller/controller_test.go @@ -100,3 +100,48 @@ func (s) TestNew(t *testing.T) { }) } } + +func (s) TestNewWithGRPCDial(t *testing.T) { + config := &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithInsecure(), + NodeProto: testutils.EmptyNodeProtoV2, + } + + customDialerCalled := false + customDialer := func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + customDialerCalled = true + return grpc.Dial(target, opts...) + } + + // Set the dialer and make sure it is called. + SetGRPCDial(customDialer) + c, err := New(config, nil, nil, nil) + if err != nil { + t.Fatalf("New(%+v) = %v, want no error", config, err) + } + if c != nil { + c.Close() + } + + if !customDialerCalled { + t.Errorf("New(%+v) custom dialer called = false, want true", config) + } + customDialerCalled = false + + // Reset the dialer and make sure it is not called. + SetGRPCDial(grpc.Dial) + c, err = New(config, nil, nil, nil) + defer func() { + if c != nil { + c.Close() + } + }() + if err != nil { + t.Fatalf("New(%+v) = %v, want no error", config, err) + } + + if customDialerCalled { + t.Errorf("New(%+v) interceptor called = true, want false", config) + } +} From a002994200f31f5b3e985e6b2cd2f12cb8b81389 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 12 Jan 2022 16:34:12 -0800 Subject: [PATCH 395/998] internal/proto: update generated code (#5125) --- .../grpc_service_config/service_config.pb.go | 1673 ++++++++++++----- 1 file changed, 1182 insertions(+), 491 deletions(-) diff --git a/internal/proto/grpc_service_config/service_config.pb.go b/internal/proto/grpc_service_config/service_config.pb.go index b50d59d79352..0f65b79aae05 100644 --- a/internal/proto/grpc_service_config/service_config.pb.go +++ b/internal/proto/grpc_service_config/service_config.pb.go @@ -21,6 +21,10 @@ // options as the system is likely to choose better values for these options in // the vast majority of cases. In other words, please specify a configuration // option only if you really have to, and avoid copy-paste inclusion of configs. +// +// Note that gRPC uses the service config in JSON form, not in protobuf +// form. This proto definition is intended to help document the schema but +// will not actually be used directly by gRPC. // Code generated by protoc-gen-go. DO NOT EDIT. // versions: @@ -52,6 +56,55 @@ const ( // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 +type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type int32 + +const ( + XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_UNKNOWN XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type = 0 + XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_EDS XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type = 1 + XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_LOGICAL_DNS XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type = 2 +) + +// Enum value maps for XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type. +var ( + XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "EDS", + 2: "LOGICAL_DNS", + } + XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type_value = map[string]int32{ + "UNKNOWN": 0, + "EDS": 1, + "LOGICAL_DNS": 2, + } +) + +func (x XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Enum() *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type { + p := new(XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) + *p = x + return p +} + +func (x XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_service_config_service_config_proto_enumTypes[0].Descriptor() +} + +func (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Type() protoreflect.EnumType { + return &file_grpc_service_config_service_config_proto_enumTypes[0] +} + +func (x XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type.Descriptor instead. +func (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) EnumDescriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7, 0, 0} +} + // Load balancing policy. // // Note that load_balancing_policy is deprecated in favor of @@ -100,11 +153,11 @@ func (x ServiceConfig_LoadBalancingPolicy) String() string { } func (ServiceConfig_LoadBalancingPolicy) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_service_config_service_config_proto_enumTypes[0].Descriptor() + return file_grpc_service_config_service_config_proto_enumTypes[1].Descriptor() } func (ServiceConfig_LoadBalancingPolicy) Type() protoreflect.EnumType { - return &file_grpc_service_config_service_config_proto_enumTypes[0] + return &file_grpc_service_config_service_config_proto_enumTypes[1] } func (x ServiceConfig_LoadBalancingPolicy) Number() protoreflect.EnumNumber { @@ -113,7 +166,7 @@ func (x ServiceConfig_LoadBalancingPolicy) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConfig_LoadBalancingPolicy.Descriptor instead. func (ServiceConfig_LoadBalancingPolicy) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14, 0} } // Configuration for a method. @@ -367,6 +420,70 @@ func (*RoundRobinConfig) Descriptor() ([]byte, []int) { return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{2} } +// Configuration for grpclb LB policy. +type GrpcLbConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. What LB policy to use for routing between the backend + // addresses. If unset, defaults to round_robin. + // Currently, the only supported values are round_robin and pick_first. + // Note that this will be used both in balancer mode and in fallback mode. + // Multiple LB policies can be specified; clients will iterate through + // the list in order and stop at the first policy that they support. + ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` + // Optional. If specified, overrides the name of the service to be sent to + // the balancer. + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` +} + +func (x *GrpcLbConfig) Reset() { + *x = GrpcLbConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLbConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLbConfig) ProtoMessage() {} + +func (x *GrpcLbConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLbConfig.ProtoReflect.Descriptor instead. +func (*GrpcLbConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3} +} + +func (x *GrpcLbConfig) GetChildPolicy() []*LoadBalancingConfig { + if x != nil { + return x.ChildPolicy + } + return nil +} + +func (x *GrpcLbConfig) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + // Configuration for priority LB policy. type PriorityLoadBalancingPolicyConfig struct { state protoimpl.MessageState @@ -382,7 +499,7 @@ type PriorityLoadBalancingPolicyConfig struct { func (x *PriorityLoadBalancingPolicyConfig) Reset() { *x = PriorityLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[3] + mi := &file_grpc_service_config_service_config_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -395,7 +512,7 @@ func (x *PriorityLoadBalancingPolicyConfig) String() string { func (*PriorityLoadBalancingPolicyConfig) ProtoMessage() {} func (x *PriorityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[3] + mi := &file_grpc_service_config_service_config_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -408,7 +525,7 @@ func (x *PriorityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message // Deprecated: Use PriorityLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*PriorityLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4} } func (x *PriorityLoadBalancingPolicyConfig) GetChildren() map[string]*PriorityLoadBalancingPolicyConfig_Child { @@ -437,7 +554,7 @@ type WeightedTargetLoadBalancingPolicyConfig struct { func (x *WeightedTargetLoadBalancingPolicyConfig) Reset() { *x = WeightedTargetLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[4] + mi := &file_grpc_service_config_service_config_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -450,7 +567,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig) String() string { func (*WeightedTargetLoadBalancingPolicyConfig) ProtoMessage() {} func (x *WeightedTargetLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[4] + mi := &file_grpc_service_config_service_config_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -463,7 +580,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Me // Deprecated: Use WeightedTargetLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*WeightedTargetLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5} } func (x *WeightedTargetLoadBalancingPolicyConfig) GetTargets() map[string]*WeightedTargetLoadBalancingPolicyConfig_Target { @@ -473,41 +590,32 @@ func (x *WeightedTargetLoadBalancingPolicyConfig) GetTargets() map[string]*Weigh return nil } -// Configuration for grpclb LB policy. -type GrpcLbConfig struct { +// Configuration for the cds LB policy. +type CdsConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Optional. What LB policy to use for routing between the backend - // addresses. If unset, defaults to round_robin. - // Currently, the only supported values are round_robin and pick_first. - // Note that this will be used both in balancer mode and in fallback mode. - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` - // Optional. If specified, overrides the name of the service to be sent to - // the balancer. - ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` // Required. } -func (x *GrpcLbConfig) Reset() { - *x = GrpcLbConfig{} +func (x *CdsConfig) Reset() { + *x = CdsConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[5] + mi := &file_grpc_service_config_service_config_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GrpcLbConfig) String() string { +func (x *CdsConfig) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GrpcLbConfig) ProtoMessage() {} +func (*CdsConfig) ProtoMessage() {} -func (x *GrpcLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[5] +func (x *CdsConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -518,51 +626,55 @@ func (x *GrpcLbConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GrpcLbConfig.ProtoReflect.Descriptor instead. -func (*GrpcLbConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5} -} - -func (x *GrpcLbConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil +// Deprecated: Use CdsConfig.ProtoReflect.Descriptor instead. +func (*CdsConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6} } -func (x *GrpcLbConfig) GetServiceName() string { +func (x *CdsConfig) GetCluster() string { if x != nil { - return x.ServiceName + return x.Cluster } return "" } -// Configuration for the cds LB policy. -type CdsConfig struct { +// Configuration for xds_cluster_resolver LB policy. +type XdsClusterResolverLoadBalancingPolicyConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` // Required. -} - -func (x *CdsConfig) Reset() { - *x = CdsConfig{} + // Ordered list of discovery mechanisms. + // Must have at least one element. + // Results from each discovery mechanism are concatenated together in + // successive priorities. + DiscoveryMechanisms []*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism `protobuf:"bytes,1,rep,name=discovery_mechanisms,json=discoveryMechanisms,proto3" json:"discovery_mechanisms,omitempty"` + // xDS LB policy. + // This represents the xDS LB policy, which does not necessarily map + // one-to-one to a gRPC LB policy. Currently, the following policies + // are supported: + // - "ROUND_ROBIN" (config is empty) + // - "RING_HASH" (config is a RingHashLoadBalancingConfig) + XdsLbPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=xds_lb_policy,json=xdsLbPolicy,proto3" json:"xds_lb_policy,omitempty"` +} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig) Reset() { + *x = XdsClusterResolverLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] + mi := &file_grpc_service_config_service_config_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CdsConfig) String() string { +func (x *XdsClusterResolverLoadBalancingPolicyConfig) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CdsConfig) ProtoMessage() {} +func (*XdsClusterResolverLoadBalancingPolicyConfig) ProtoMessage() {} -func (x *CdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] +func (x *XdsClusterResolverLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -573,65 +685,67 @@ func (x *CdsConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CdsConfig.ProtoReflect.Descriptor instead. -func (*CdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6} +// Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. +func (*XdsClusterResolverLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7} } -func (x *CdsConfig) GetCluster() string { +func (x *XdsClusterResolverLoadBalancingPolicyConfig) GetDiscoveryMechanisms() []*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism { if x != nil { - return x.Cluster + return x.DiscoveryMechanisms } - return "" + return nil } -// Configuration for xds LB policy. -type XdsConfig struct { +func (x *XdsClusterResolverLoadBalancingPolicyConfig) GetXdsLbPolicy() []*LoadBalancingConfig { + if x != nil { + return x.XdsLbPolicy + } + return nil +} + +// Configuration for xds_cluster_impl LB policy. +type XdsClusterImplLoadBalancingPolicyConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Name of balancer to connect to. - // - // Deprecated: Do not use. - BalancerName string `protobuf:"bytes,1,opt,name=balancer_name,json=balancerName,proto3" json:"balancer_name,omitempty"` - // Optional. What LB policy to use for intra-locality routing. - // If unset, will use whatever algorithm is specified by the balancer. - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` - // Optional. What LB policy to use in fallback mode. If not - // specified, defaults to round_robin. - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. - FallbackPolicy []*LoadBalancingConfig `protobuf:"bytes,3,rep,name=fallback_policy,json=fallbackPolicy,proto3" json:"fallback_policy,omitempty"` - // Optional. Name to use in EDS query. If not present, defaults to - // the server name from the target URI. - EdsServiceName string `protobuf:"bytes,4,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` - // LRS server to send load reports to. - // If not present, load reporting will be disabled. - // If set to the empty string, load reporting will be sent to the same - // server that we obtained CDS data from. - LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,5,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` + // Cluster name. Required. + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + // EDS service name. + // Not set if cluster is not an EDS cluster or if it does not + // specify an EDS service name. + EdsServiceName string `protobuf:"bytes,2,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` + // Server to send load reports to. + // If unset, no load reporting is done. + // If set to empty string, load reporting will be sent to the same + // server as we are getting xds data from. + LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,3,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` + // Maximum number of outstanding requests can be made to the upstream cluster. + // Default is 1024. + MaxConcurrentRequests *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_concurrent_requests,json=maxConcurrentRequests,proto3" json:"max_concurrent_requests,omitempty"` + DropCategories []*XdsClusterImplLoadBalancingPolicyConfig_DropCategory `protobuf:"bytes,5,rep,name=drop_categories,json=dropCategories,proto3" json:"drop_categories,omitempty"` + // Child policy. + ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,6,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` } -func (x *XdsConfig) Reset() { - *x = XdsConfig{} +func (x *XdsClusterImplLoadBalancingPolicyConfig) Reset() { + *x = XdsClusterImplLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] + mi := &file_grpc_service_config_service_config_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *XdsConfig) String() string { +func (x *XdsClusterImplLoadBalancingPolicyConfig) String() string { return protoimpl.X.MessageStringOf(x) } -func (*XdsConfig) ProtoMessage() {} +func (*XdsClusterImplLoadBalancingPolicyConfig) ProtoMessage() {} -func (x *XdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] +func (x *XdsClusterImplLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -642,43 +756,49 @@ func (x *XdsConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use XdsConfig.ProtoReflect.Descriptor instead. -func (*XdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7} +// Deprecated: Use XdsClusterImplLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. +func (*XdsClusterImplLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8} } -// Deprecated: Do not use. -func (x *XdsConfig) GetBalancerName() string { +func (x *XdsClusterImplLoadBalancingPolicyConfig) GetCluster() string { if x != nil { - return x.BalancerName + return x.Cluster } return "" } -func (x *XdsConfig) GetChildPolicy() []*LoadBalancingConfig { +func (x *XdsClusterImplLoadBalancingPolicyConfig) GetEdsServiceName() string { if x != nil { - return x.ChildPolicy + return x.EdsServiceName + } + return "" +} + +func (x *XdsClusterImplLoadBalancingPolicyConfig) GetLrsLoadReportingServerName() *wrapperspb.StringValue { + if x != nil { + return x.LrsLoadReportingServerName } return nil } -func (x *XdsConfig) GetFallbackPolicy() []*LoadBalancingConfig { +func (x *XdsClusterImplLoadBalancingPolicyConfig) GetMaxConcurrentRequests() *wrapperspb.UInt32Value { if x != nil { - return x.FallbackPolicy + return x.MaxConcurrentRequests } return nil } -func (x *XdsConfig) GetEdsServiceName() string { +func (x *XdsClusterImplLoadBalancingPolicyConfig) GetDropCategories() []*XdsClusterImplLoadBalancingPolicyConfig_DropCategory { if x != nil { - return x.EdsServiceName + return x.DropCategories } - return "" + return nil } -func (x *XdsConfig) GetLrsLoadReportingServerName() *wrapperspb.StringValue { +func (x *XdsClusterImplLoadBalancingPolicyConfig) GetChildPolicy() []*LoadBalancingConfig { if x != nil { - return x.LrsLoadReportingServerName + return x.ChildPolicy } return nil } @@ -716,7 +836,7 @@ type EdsLoadBalancingPolicyConfig struct { func (x *EdsLoadBalancingPolicyConfig) Reset() { *x = EdsLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] + mi := &file_grpc_service_config_service_config_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -729,7 +849,7 @@ func (x *EdsLoadBalancingPolicyConfig) String() string { func (*EdsLoadBalancingPolicyConfig) ProtoMessage() {} func (x *EdsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] + mi := &file_grpc_service_config_service_config_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -742,7 +862,7 @@ func (x *EdsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use EdsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*EdsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9} } func (x *EdsLoadBalancingPolicyConfig) GetCluster() string { @@ -780,6 +900,62 @@ func (x *EdsLoadBalancingPolicyConfig) GetEndpointPickingPolicy() []*LoadBalanci return nil } +// Configuration for ring_hash LB policy. +type RingHashLoadBalancingConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MinRingSize uint64 `protobuf:"varint,1,opt,name=min_ring_size,json=minRingSize,proto3" json:"min_ring_size,omitempty"` + MaxRingSize uint64 `protobuf:"varint,2,opt,name=max_ring_size,json=maxRingSize,proto3" json:"max_ring_size,omitempty"` +} + +func (x *RingHashLoadBalancingConfig) Reset() { + *x = RingHashLoadBalancingConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RingHashLoadBalancingConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RingHashLoadBalancingConfig) ProtoMessage() {} + +func (x *RingHashLoadBalancingConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RingHashLoadBalancingConfig.ProtoReflect.Descriptor instead. +func (*RingHashLoadBalancingConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10} +} + +func (x *RingHashLoadBalancingConfig) GetMinRingSize() uint64 { + if x != nil { + return x.MinRingSize + } + return 0 +} + +func (x *RingHashLoadBalancingConfig) GetMaxRingSize() uint64 { + if x != nil { + return x.MaxRingSize + } + return 0 +} + // Configuration for lrs LB policy. type LrsLoadBalancingPolicyConfig struct { state protoimpl.MessageState @@ -803,7 +979,7 @@ type LrsLoadBalancingPolicyConfig struct { func (x *LrsLoadBalancingPolicyConfig) Reset() { *x = LrsLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] + mi := &file_grpc_service_config_service_config_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -816,7 +992,7 @@ func (x *LrsLoadBalancingPolicyConfig) String() string { func (*LrsLoadBalancingPolicyConfig) ProtoMessage() {} func (x *LrsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] + mi := &file_grpc_service_config_service_config_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -829,7 +1005,7 @@ func (x *LrsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use LrsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*LrsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11} } func (x *LrsLoadBalancingPolicyConfig) GetClusterName() string { @@ -867,6 +1043,104 @@ func (x *LrsLoadBalancingPolicyConfig) GetChildPolicy() []*LoadBalancingConfig { return nil } +// Configuration for xds LB policy. +type XdsConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of balancer to connect to. + // + // Deprecated: Do not use. + BalancerName string `protobuf:"bytes,1,opt,name=balancer_name,json=balancerName,proto3" json:"balancer_name,omitempty"` + // Optional. What LB policy to use for intra-locality routing. + // If unset, will use whatever algorithm is specified by the balancer. + // Multiple LB policies can be specified; clients will iterate through + // the list in order and stop at the first policy that they support. + ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` + // Optional. What LB policy to use in fallback mode. If not + // specified, defaults to round_robin. + // Multiple LB policies can be specified; clients will iterate through + // the list in order and stop at the first policy that they support. + FallbackPolicy []*LoadBalancingConfig `protobuf:"bytes,3,rep,name=fallback_policy,json=fallbackPolicy,proto3" json:"fallback_policy,omitempty"` + // Optional. Name to use in EDS query. If not present, defaults to + // the server name from the target URI. + EdsServiceName string `protobuf:"bytes,4,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` + // LRS server to send load reports to. + // If not present, load reporting will be disabled. + // If set to the empty string, load reporting will be sent to the same + // server that we obtained CDS data from. + LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,5,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` +} + +func (x *XdsConfig) Reset() { + *x = XdsConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XdsConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XdsConfig) ProtoMessage() {} + +func (x *XdsConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XdsConfig.ProtoReflect.Descriptor instead. +func (*XdsConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{12} +} + +// Deprecated: Do not use. +func (x *XdsConfig) GetBalancerName() string { + if x != nil { + return x.BalancerName + } + return "" +} + +func (x *XdsConfig) GetChildPolicy() []*LoadBalancingConfig { + if x != nil { + return x.ChildPolicy + } + return nil +} + +func (x *XdsConfig) GetFallbackPolicy() []*LoadBalancingConfig { + if x != nil { + return x.FallbackPolicy + } + return nil +} + +func (x *XdsConfig) GetEdsServiceName() string { + if x != nil { + return x.EdsServiceName + } + return "" +} + +func (x *XdsConfig) GetLrsLoadReportingServerName() *wrapperspb.StringValue { + if x != nil { + return x.LrsLoadReportingServerName + } + return nil +} + // Selects LB policy and provides corresponding configuration. // // In general, all instances of this field should be repeated. Clients will @@ -889,11 +1163,14 @@ type LoadBalancingConfig struct { // *LoadBalancingConfig_PickFirst // *LoadBalancingConfig_RoundRobin // *LoadBalancingConfig_Grpclb - // *LoadBalancingConfig_Priority - // *LoadBalancingConfig_WeightedTarget - // *LoadBalancingConfig_Cds - // *LoadBalancingConfig_Eds - // *LoadBalancingConfig_Lrs + // *LoadBalancingConfig_PriorityExperimental + // *LoadBalancingConfig_WeightedTargetExperimental + // *LoadBalancingConfig_CdsExperimental + // *LoadBalancingConfig_XdsClusterResolverExperimental + // *LoadBalancingConfig_XdsClusterImplExperimental + // *LoadBalancingConfig_EdsExperimental + // *LoadBalancingConfig_RingHashExperimental + // *LoadBalancingConfig_LrsExperimental // *LoadBalancingConfig_Xds // *LoadBalancingConfig_XdsExperimental Policy isLoadBalancingConfig_Policy `protobuf_oneof:"policy"` @@ -902,7 +1179,7 @@ type LoadBalancingConfig struct { func (x *LoadBalancingConfig) Reset() { *x = LoadBalancingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] + mi := &file_grpc_service_config_service_config_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -915,7 +1192,7 @@ func (x *LoadBalancingConfig) String() string { func (*LoadBalancingConfig) ProtoMessage() {} func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] + mi := &file_grpc_service_config_service_config_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -928,7 +1205,7 @@ func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use LoadBalancingConfig.ProtoReflect.Descriptor instead. func (*LoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{13} } func (m *LoadBalancingConfig) GetPolicy() isLoadBalancingConfig_Policy { @@ -952,44 +1229,66 @@ func (x *LoadBalancingConfig) GetRoundRobin() *RoundRobinConfig { return nil } -func (x *LoadBalancingConfig) GetGrpclb() *GrpcLbConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Grpclb); ok { - return x.Grpclb +func (x *LoadBalancingConfig) GetGrpclb() *GrpcLbConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_Grpclb); ok { + return x.Grpclb + } + return nil +} + +func (x *LoadBalancingConfig) GetPriorityExperimental() *PriorityLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_PriorityExperimental); ok { + return x.PriorityExperimental + } + return nil +} + +func (x *LoadBalancingConfig) GetWeightedTargetExperimental() *WeightedTargetLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_WeightedTargetExperimental); ok { + return x.WeightedTargetExperimental + } + return nil +} + +func (x *LoadBalancingConfig) GetCdsExperimental() *CdsConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_CdsExperimental); ok { + return x.CdsExperimental } return nil } -func (x *LoadBalancingConfig) GetPriority() *PriorityLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Priority); ok { - return x.Priority +func (x *LoadBalancingConfig) GetXdsClusterResolverExperimental() *XdsClusterResolverLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsClusterResolverExperimental); ok { + return x.XdsClusterResolverExperimental } return nil } -func (x *LoadBalancingConfig) GetWeightedTarget() *WeightedTargetLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_WeightedTarget); ok { - return x.WeightedTarget +func (x *LoadBalancingConfig) GetXdsClusterImplExperimental() *XdsClusterImplLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsClusterImplExperimental); ok { + return x.XdsClusterImplExperimental } return nil } -func (x *LoadBalancingConfig) GetCds() *CdsConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Cds); ok { - return x.Cds +func (x *LoadBalancingConfig) GetEdsExperimental() *EdsLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_EdsExperimental); ok { + return x.EdsExperimental } return nil } -func (x *LoadBalancingConfig) GetEds() *EdsLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Eds); ok { - return x.Eds +func (x *LoadBalancingConfig) GetRingHashExperimental() *RingHashLoadBalancingConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_RingHashExperimental); ok { + return x.RingHashExperimental } return nil } -func (x *LoadBalancingConfig) GetLrs() *LrsLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Lrs); ok { - return x.Lrs +// Deprecated: Do not use. +func (x *LoadBalancingConfig) GetLrsExperimental() *LrsLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_LrsExperimental); ok { + return x.LrsExperimental } return nil } @@ -1029,28 +1328,40 @@ type LoadBalancingConfig_Grpclb struct { Grpclb *GrpcLbConfig `protobuf:"bytes,3,opt,name=grpclb,proto3,oneof"` } -type LoadBalancingConfig_Priority struct { - Priority *PriorityLoadBalancingPolicyConfig `protobuf:"bytes,9,opt,name=priority,proto3,oneof"` +type LoadBalancingConfig_PriorityExperimental struct { + PriorityExperimental *PriorityLoadBalancingPolicyConfig `protobuf:"bytes,9,opt,name=priority_experimental,proto3,oneof"` } -type LoadBalancingConfig_WeightedTarget struct { - WeightedTarget *WeightedTargetLoadBalancingPolicyConfig `protobuf:"bytes,10,opt,name=weighted_target,json=weightedTarget,proto3,oneof"` +type LoadBalancingConfig_WeightedTargetExperimental struct { + WeightedTargetExperimental *WeightedTargetLoadBalancingPolicyConfig `protobuf:"bytes,10,opt,name=weighted_target_experimental,proto3,oneof"` } -type LoadBalancingConfig_Cds struct { - // EXPERIMENTAL -- DO NOT USE +type LoadBalancingConfig_CdsExperimental struct { // xDS-based load balancing. - // The policy is known as xds_experimental while it is under development. - // It will be renamed to xds once it is ready for public use. - Cds *CdsConfig `protobuf:"bytes,6,opt,name=cds,proto3,oneof"` + CdsExperimental *CdsConfig `protobuf:"bytes,6,opt,name=cds_experimental,proto3,oneof"` +} + +type LoadBalancingConfig_XdsClusterResolverExperimental struct { + XdsClusterResolverExperimental *XdsClusterResolverLoadBalancingPolicyConfig `protobuf:"bytes,11,opt,name=xds_cluster_resolver_experimental,proto3,oneof"` +} + +type LoadBalancingConfig_XdsClusterImplExperimental struct { + XdsClusterImplExperimental *XdsClusterImplLoadBalancingPolicyConfig `protobuf:"bytes,12,opt,name=xds_cluster_impl_experimental,proto3,oneof"` +} + +type LoadBalancingConfig_EdsExperimental struct { + EdsExperimental *EdsLoadBalancingPolicyConfig `protobuf:"bytes,7,opt,name=eds_experimental,proto3,oneof"` } -type LoadBalancingConfig_Eds struct { - Eds *EdsLoadBalancingPolicyConfig `protobuf:"bytes,7,opt,name=eds,proto3,oneof"` +type LoadBalancingConfig_RingHashExperimental struct { + RingHashExperimental *RingHashLoadBalancingConfig `protobuf:"bytes,13,opt,name=ring_hash_experimental,proto3,oneof"` } -type LoadBalancingConfig_Lrs struct { - Lrs *LrsLoadBalancingPolicyConfig `protobuf:"bytes,8,opt,name=lrs,proto3,oneof"` +type LoadBalancingConfig_LrsExperimental struct { + // Deprecated xDS-related policies. + // + // Deprecated: Do not use. + LrsExperimental *LrsLoadBalancingPolicyConfig `protobuf:"bytes,8,opt,name=lrs_experimental,proto3,oneof"` } type LoadBalancingConfig_Xds struct { @@ -1059,9 +1370,6 @@ type LoadBalancingConfig_Xds struct { } type LoadBalancingConfig_XdsExperimental struct { - // TODO(rekarthik): Deprecate this field after the xds policy - // is ready for public use. - // // Deprecated: Do not use. XdsExperimental *XdsConfig `protobuf:"bytes,5,opt,name=xds_experimental,proto3,oneof"` } @@ -1072,15 +1380,21 @@ func (*LoadBalancingConfig_RoundRobin) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_Grpclb) isLoadBalancingConfig_Policy() {} -func (*LoadBalancingConfig_Priority) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_PriorityExperimental) isLoadBalancingConfig_Policy() {} + +func (*LoadBalancingConfig_WeightedTargetExperimental) isLoadBalancingConfig_Policy() {} + +func (*LoadBalancingConfig_CdsExperimental) isLoadBalancingConfig_Policy() {} -func (*LoadBalancingConfig_WeightedTarget) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_XdsClusterResolverExperimental) isLoadBalancingConfig_Policy() {} -func (*LoadBalancingConfig_Cds) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_XdsClusterImplExperimental) isLoadBalancingConfig_Policy() {} -func (*LoadBalancingConfig_Eds) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_EdsExperimental) isLoadBalancingConfig_Policy() {} -func (*LoadBalancingConfig_Lrs) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_RingHashExperimental) isLoadBalancingConfig_Policy() {} + +func (*LoadBalancingConfig_LrsExperimental) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_Xds) isLoadBalancingConfig_Policy() {} @@ -1108,7 +1422,7 @@ type ServiceConfig struct { func (x *ServiceConfig) Reset() { *x = ServiceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] + mi := &file_grpc_service_config_service_config_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1121,7 +1435,7 @@ func (x *ServiceConfig) String() string { func (*ServiceConfig) ProtoMessage() {} func (x *ServiceConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] + mi := &file_grpc_service_config_service_config_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1134,7 +1448,7 @@ func (x *ServiceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14} } // Deprecated: Do not use. @@ -1212,7 +1526,7 @@ type MethodConfig_Name struct { func (x *MethodConfig_Name) Reset() { *x = MethodConfig_Name{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] + mi := &file_grpc_service_config_service_config_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1225,7 +1539,7 @@ func (x *MethodConfig_Name) String() string { func (*MethodConfig_Name) ProtoMessage() {} func (x *MethodConfig_Name) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] + mi := &file_grpc_service_config_service_config_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1284,7 +1598,7 @@ type MethodConfig_RetryPolicy struct { func (x *MethodConfig_RetryPolicy) Reset() { *x = MethodConfig_RetryPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1297,7 +1611,7 @@ func (x *MethodConfig_RetryPolicy) String() string { func (*MethodConfig_RetryPolicy) ProtoMessage() {} func (x *MethodConfig_RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1379,7 +1693,7 @@ type MethodConfig_HedgingPolicy struct { func (x *MethodConfig_HedgingPolicy) Reset() { *x = MethodConfig_HedgingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1392,7 +1706,7 @@ func (x *MethodConfig_HedgingPolicy) String() string { func (*MethodConfig_HedgingPolicy) ProtoMessage() {} func (x *MethodConfig_HedgingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1439,12 +1753,14 @@ type PriorityLoadBalancingPolicyConfig_Child struct { unknownFields protoimpl.UnknownFields Config []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=config,proto3" json:"config,omitempty"` + // If true, will ignore reresolution requests from this child. + IgnoreReresolutionRequests bool `protobuf:"varint,2,opt,name=ignore_reresolution_requests,json=ignoreReresolutionRequests,proto3" json:"ignore_reresolution_requests,omitempty"` } func (x *PriorityLoadBalancingPolicyConfig_Child) Reset() { *x = PriorityLoadBalancingPolicyConfig_Child{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1457,7 +1773,7 @@ func (x *PriorityLoadBalancingPolicyConfig_Child) String() string { func (*PriorityLoadBalancingPolicyConfig_Child) ProtoMessage() {} func (x *PriorityLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1470,7 +1786,7 @@ func (x *PriorityLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Me // Deprecated: Use PriorityLoadBalancingPolicyConfig_Child.ProtoReflect.Descriptor instead. func (*PriorityLoadBalancingPolicyConfig_Child) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4, 0} } func (x *PriorityLoadBalancingPolicyConfig_Child) GetConfig() []*LoadBalancingConfig { @@ -1480,6 +1796,13 @@ func (x *PriorityLoadBalancingPolicyConfig_Child) GetConfig() []*LoadBalancingCo return nil } +func (x *PriorityLoadBalancingPolicyConfig_Child) GetIgnoreReresolutionRequests() bool { + if x != nil { + return x.IgnoreReresolutionRequests + } + return false +} + type WeightedTargetLoadBalancingPolicyConfig_Target struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1492,7 +1815,7 @@ type WeightedTargetLoadBalancingPolicyConfig_Target struct { func (x *WeightedTargetLoadBalancingPolicyConfig_Target) Reset() { *x = WeightedTargetLoadBalancingPolicyConfig_Target{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1505,7 +1828,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig_Target) String() string { func (*WeightedTargetLoadBalancingPolicyConfig_Target) ProtoMessage() {} func (x *WeightedTargetLoadBalancingPolicyConfig_Target) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1518,7 +1841,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig_Target) ProtoReflect() protoref // Deprecated: Use WeightedTargetLoadBalancingPolicyConfig_Target.ProtoReflect.Descriptor instead. func (*WeightedTargetLoadBalancingPolicyConfig_Target) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5, 0} } func (x *WeightedTargetLoadBalancingPolicyConfig_Target) GetWeight() uint32 { @@ -1535,6 +1858,167 @@ func (x *WeightedTargetLoadBalancingPolicyConfig_Target) GetChildPolicy() []*Loa return nil } +// Describes a discovery mechanism instance. +// For EDS or LOGICAL_DNS clusters, there will be exactly one +// DiscoveryMechanism, which will describe the cluster of the parent +// CDS policy. +// For aggregate clusters, there will be one DiscoveryMechanism for each +// underlying cluster. +type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Cluster name. + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + // LRS server to send load reports to. + // If not present, load reporting will be disabled. + // If set to the empty string, load reporting will be sent to the same + // server that we obtained CDS data from. + LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,2,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` + // Maximum number of outstanding requests can be made to the upstream + // cluster. Default is 1024. + MaxConcurrentRequests *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_concurrent_requests,json=maxConcurrentRequests,proto3" json:"max_concurrent_requests,omitempty"` + Type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type" json:"type,omitempty"` + // For type EDS only. + // EDS service name, as returned in CDS. + // May be unset if not specified in CDS. + EdsServiceName string `protobuf:"bytes,5,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` + // For type LOGICAL_DNS only. + // DNS name to resolve in "host:port" form. + DnsHostname string `protobuf:"bytes,6,opt,name=dns_hostname,json=dnsHostname,proto3" json:"dns_hostname,omitempty"` +} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Reset() { + *x = XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoMessage() {} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism.ProtoReflect.Descriptor instead. +func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetCluster() string { + if x != nil { + return x.Cluster + } + return "" +} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetLrsLoadReportingServerName() *wrapperspb.StringValue { + if x != nil { + return x.LrsLoadReportingServerName + } + return nil +} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetMaxConcurrentRequests() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxConcurrentRequests + } + return nil +} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetType() XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type { + if x != nil { + return x.Type + } + return XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_UNKNOWN +} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetEdsServiceName() string { + if x != nil { + return x.EdsServiceName + } + return "" +} + +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetDnsHostname() string { + if x != nil { + return x.DnsHostname + } + return "" +} + +// Drop configuration. +type XdsClusterImplLoadBalancingPolicyConfig_DropCategory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Category string `protobuf:"bytes,1,opt,name=category,proto3" json:"category,omitempty"` + RequestsPerMillion uint32 `protobuf:"varint,2,opt,name=requests_per_million,json=requestsPerMillion,proto3" json:"requests_per_million,omitempty"` +} + +func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Reset() { + *x = XdsClusterImplLoadBalancingPolicyConfig_DropCategory{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoMessage() {} + +func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XdsClusterImplLoadBalancingPolicyConfig_DropCategory.ProtoReflect.Descriptor instead. +func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) GetCategory() string { + if x != nil { + return x.Category + } + return "" +} + +func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) GetRequestsPerMillion() uint32 { + if x != nil { + return x.RequestsPerMillion + } + return 0 +} + // The locality for which this policy will report load. Required. type LrsLoadBalancingPolicyConfig_Locality struct { state protoimpl.MessageState @@ -1549,7 +2033,7 @@ type LrsLoadBalancingPolicyConfig_Locality struct { func (x *LrsLoadBalancingPolicyConfig_Locality) Reset() { *x = LrsLoadBalancingPolicyConfig_Locality{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] + mi := &file_grpc_service_config_service_config_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1562,7 +2046,7 @@ func (x *LrsLoadBalancingPolicyConfig_Locality) String() string { func (*LrsLoadBalancingPolicyConfig_Locality) ProtoMessage() {} func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] + mi := &file_grpc_service_config_service_config_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1575,7 +2059,7 @@ func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Mess // Deprecated: Use LrsLoadBalancingPolicyConfig_Locality.ProtoReflect.Descriptor instead. func (*LrsLoadBalancingPolicyConfig_Locality) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 0} } func (x *LrsLoadBalancingPolicyConfig_Locality) GetRegion() string { @@ -1633,7 +2117,7 @@ type ServiceConfig_RetryThrottlingPolicy struct { func (x *ServiceConfig_RetryThrottlingPolicy) Reset() { *x = ServiceConfig_RetryThrottlingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1646,7 +2130,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) String() string { func (*ServiceConfig_RetryThrottlingPolicy) ProtoMessage() {} func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1659,7 +2143,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Messag // Deprecated: Use ServiceConfig_RetryThrottlingPolicy.ProtoReflect.Descriptor instead. func (*ServiceConfig_RetryThrottlingPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14, 0} } func (x *ServiceConfig_RetryThrottlingPolicy) GetMaxTokens() uint32 { @@ -1688,7 +2172,7 @@ type ServiceConfig_HealthCheckConfig struct { func (x *ServiceConfig_HealthCheckConfig) Reset() { *x = ServiceConfig_HealthCheckConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] + mi := &file_grpc_service_config_service_config_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1701,7 +2185,7 @@ func (x *ServiceConfig_HealthCheckConfig) String() string { func (*ServiceConfig_HealthCheckConfig) ProtoMessage() {} func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] + mi := &file_grpc_service_config_service_config_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1714,7 +2198,7 @@ func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig_HealthCheckConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig_HealthCheckConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 1} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14, 1} } func (x *ServiceConfig_HealthCheckConfig) GetServiceName() *wrapperspb.StringValue { @@ -1808,8 +2292,16 @@ var file_grpc_service_config_service_config_proto_rawDesc = []byte{ 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x72, 0x5f, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x12, 0x0a, 0x10, 0x52, 0x6f, - 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xeb, - 0x02, 0x0a, 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x7e, + 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, + 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, + 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xae, + 0x03, 0x0a, 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, @@ -1819,224 +2311,342 @@ var file_grpc_service_config_service_config_proto_rawDesc = []byte{ 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, - 0x40, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, - 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x02, 0x0a, - 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, + 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, + 0x12, 0x40, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x72, + 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x52, 0x65, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, + 0x68, 0x69, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xfe, 0x02, 0x0a, 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x07, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, + 0x1a, 0x6d, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, + 0x7f, 0x0a, 0x0c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x59, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x25, 0x0a, 0x09, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0xd7, 0x05, 0x0a, 0x2b, 0x58, 0x64, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x86, 0x01, 0x0a, 0x14, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x53, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, + 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x13, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, + 0x12, 0x4c, 0x0a, 0x0d, 0x78, 0x64, 0x73, 0x5f, 0x6c, 0x62, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x0b, 0x78, 0x64, 0x73, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xd0, + 0x03, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, + 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, + 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x6c, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x58, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0x6d, 0x0a, - 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, - 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x7f, 0x0a, 0x0c, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x59, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7e, 0x0a, - 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, - 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x6e, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6e, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x01, + 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, 0x53, 0x10, + 0x02, 0x22, 0xc4, 0x04, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x72, 0x0a, 0x0f, 0x64, 0x72, 0x6f, + 0x70, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x64, + 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, + 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x25, 0x0a, - 0x09, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, - 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, - 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, - 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x88, 0x03, 0x0a, 0x1c, 0x45, 0x64, 0x73, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, - 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, - 0x0a, 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, - 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x69, 0x74, 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x60, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x70, 0x69, 0x63, - 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x65, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x22, 0xa6, 0x03, 0x0a, 0x1c, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x42, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, - 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, + 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x5c, 0x0a, 0x0c, 0x44, 0x72, + 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, + 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, + 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, + 0x72, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x03, 0x0a, 0x1c, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, - 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x0c, - 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, + 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, + 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, + 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x60, 0x0a, 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x69, 0x63, 0x6b, + 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x60, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x70, 0x69, + 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x50, 0x0a, 0x08, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, - 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x22, 0xfa, 0x05, 0x0a, 0x13, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, - 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, - 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, - 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, - 0x63, 0x6c, 0x62, 0x12, 0x54, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x0f, 0x77, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, - 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x48, 0x00, 0x52, 0x0e, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x03, 0x63, 0x64, 0x73, 0x12, 0x45, 0x0a, 0x03, 0x65, 0x64, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x03, 0x65, 0x64, 0x73, 0x12, 0x45, 0x0a, - 0x03, 0x6c, 0x72, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x22, 0x65, 0x0a, 0x1b, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x52, 0x69, + 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, + 0x61, 0x78, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa6, 0x03, 0x0a, 0x1c, 0x4c, + 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, + 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x08, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x03, 0x6c, 0x72, 0x73, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x10, - 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x78, 0x64, - 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x08, - 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, - 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, + 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x1a, 0x50, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, + 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, + 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, + 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x7a, + 0x6f, 0x6e, 0x65, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, + 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, + 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, + 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, + 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, + 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x88, 0x0a, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, + 0x0a, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x6b, + 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, + 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, + 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x12, 0x6e, + 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x82, + 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, + 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, + 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, + 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x5f, 0x0a, 0x10, 0x65, + 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x6a, 0x0a, 0x16, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x6c, 0x72, 0x73, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x72, 0x73, + 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, + 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, + 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, + 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, + 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x15, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x52, - 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, - 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, + 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, + 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, + 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, + 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -2051,87 +2661,105 @@ func file_grpc_service_config_service_config_proto_rawDescGZIP() []byte { return file_grpc_service_config_service_config_proto_rawDescData } -var file_grpc_service_config_service_config_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_grpc_service_config_service_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ - (ServiceConfig_LoadBalancingPolicy)(0), // 0: grpc.service_config.ServiceConfig.LoadBalancingPolicy - (*MethodConfig)(nil), // 1: grpc.service_config.MethodConfig - (*PickFirstConfig)(nil), // 2: grpc.service_config.PickFirstConfig - (*RoundRobinConfig)(nil), // 3: grpc.service_config.RoundRobinConfig - (*PriorityLoadBalancingPolicyConfig)(nil), // 4: grpc.service_config.PriorityLoadBalancingPolicyConfig - (*WeightedTargetLoadBalancingPolicyConfig)(nil), // 5: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - (*GrpcLbConfig)(nil), // 6: grpc.service_config.GrpcLbConfig - (*CdsConfig)(nil), // 7: grpc.service_config.CdsConfig - (*XdsConfig)(nil), // 8: grpc.service_config.XdsConfig - (*EdsLoadBalancingPolicyConfig)(nil), // 9: grpc.service_config.EdsLoadBalancingPolicyConfig - (*LrsLoadBalancingPolicyConfig)(nil), // 10: grpc.service_config.LrsLoadBalancingPolicyConfig - (*LoadBalancingConfig)(nil), // 11: grpc.service_config.LoadBalancingConfig - (*ServiceConfig)(nil), // 12: grpc.service_config.ServiceConfig - (*MethodConfig_Name)(nil), // 13: grpc.service_config.MethodConfig.Name - (*MethodConfig_RetryPolicy)(nil), // 14: grpc.service_config.MethodConfig.RetryPolicy - (*MethodConfig_HedgingPolicy)(nil), // 15: grpc.service_config.MethodConfig.HedgingPolicy - (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 16: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - nil, // 17: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 18: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - nil, // 19: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 20: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - (*ServiceConfig_RetryThrottlingPolicy)(nil), // 21: grpc.service_config.ServiceConfig.RetryThrottlingPolicy - (*ServiceConfig_HealthCheckConfig)(nil), // 22: grpc.service_config.ServiceConfig.HealthCheckConfig - (*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue - (*durationpb.Duration)(nil), // 24: google.protobuf.Duration - (*wrapperspb.UInt32Value)(nil), // 25: google.protobuf.UInt32Value - (*wrapperspb.StringValue)(nil), // 26: google.protobuf.StringValue - (code.Code)(0), // 27: google.rpc.Code + (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type)(0), // 0: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type + (ServiceConfig_LoadBalancingPolicy)(0), // 1: grpc.service_config.ServiceConfig.LoadBalancingPolicy + (*MethodConfig)(nil), // 2: grpc.service_config.MethodConfig + (*PickFirstConfig)(nil), // 3: grpc.service_config.PickFirstConfig + (*RoundRobinConfig)(nil), // 4: grpc.service_config.RoundRobinConfig + (*GrpcLbConfig)(nil), // 5: grpc.service_config.GrpcLbConfig + (*PriorityLoadBalancingPolicyConfig)(nil), // 6: grpc.service_config.PriorityLoadBalancingPolicyConfig + (*WeightedTargetLoadBalancingPolicyConfig)(nil), // 7: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig + (*CdsConfig)(nil), // 8: grpc.service_config.CdsConfig + (*XdsClusterResolverLoadBalancingPolicyConfig)(nil), // 9: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig + (*XdsClusterImplLoadBalancingPolicyConfig)(nil), // 10: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig + (*EdsLoadBalancingPolicyConfig)(nil), // 11: grpc.service_config.EdsLoadBalancingPolicyConfig + (*RingHashLoadBalancingConfig)(nil), // 12: grpc.service_config.RingHashLoadBalancingConfig + (*LrsLoadBalancingPolicyConfig)(nil), // 13: grpc.service_config.LrsLoadBalancingPolicyConfig + (*XdsConfig)(nil), // 14: grpc.service_config.XdsConfig + (*LoadBalancingConfig)(nil), // 15: grpc.service_config.LoadBalancingConfig + (*ServiceConfig)(nil), // 16: grpc.service_config.ServiceConfig + (*MethodConfig_Name)(nil), // 17: grpc.service_config.MethodConfig.Name + (*MethodConfig_RetryPolicy)(nil), // 18: grpc.service_config.MethodConfig.RetryPolicy + (*MethodConfig_HedgingPolicy)(nil), // 19: grpc.service_config.MethodConfig.HedgingPolicy + (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 20: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + nil, // 21: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 22: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + nil, // 23: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 24: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 26: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + (*ServiceConfig_RetryThrottlingPolicy)(nil), // 27: grpc.service_config.ServiceConfig.RetryThrottlingPolicy + (*ServiceConfig_HealthCheckConfig)(nil), // 28: grpc.service_config.ServiceConfig.HealthCheckConfig + (*wrapperspb.BoolValue)(nil), // 29: google.protobuf.BoolValue + (*durationpb.Duration)(nil), // 30: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 31: google.protobuf.UInt32Value + (*wrapperspb.StringValue)(nil), // 32: google.protobuf.StringValue + (code.Code)(0), // 33: google.rpc.Code } var file_grpc_service_config_service_config_proto_depIdxs = []int32{ - 13, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name - 23, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue - 24, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration - 25, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value - 25, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value - 14, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy - 15, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy - 17, // 7: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - 19, // 8: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - 11, // 9: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 11, // 10: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 11, // 11: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig - 26, // 12: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 26, // 13: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 11, // 14: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 11, // 15: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 20, // 16: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - 11, // 17: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 2, // 18: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig - 3, // 19: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig - 6, // 20: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig - 4, // 21: grpc.service_config.LoadBalancingConfig.priority:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig - 5, // 22: grpc.service_config.LoadBalancingConfig.weighted_target:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - 7, // 23: grpc.service_config.LoadBalancingConfig.cds:type_name -> grpc.service_config.CdsConfig - 9, // 24: grpc.service_config.LoadBalancingConfig.eds:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig - 10, // 25: grpc.service_config.LoadBalancingConfig.lrs:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig - 8, // 26: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig - 8, // 27: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig - 0, // 28: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy - 11, // 29: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig - 1, // 30: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig - 21, // 31: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy - 22, // 32: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig - 24, // 33: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration - 24, // 34: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration - 27, // 35: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code - 24, // 36: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration - 27, // 37: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code - 11, // 38: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig - 16, // 39: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - 11, // 40: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 18, // 41: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - 26, // 42: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name + 17, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name + 29, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue + 30, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration + 31, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value + 31, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value + 18, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy + 19, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy + 15, // 7: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 21, // 8: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + 23, // 9: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + 24, // 10: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + 15, // 11: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig + 32, // 12: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 31, // 13: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 25, // 14: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + 15, // 15: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 32, // 16: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 15, // 17: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 15, // 18: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 26, // 19: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + 15, // 20: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 15, // 21: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 15, // 22: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig + 32, // 23: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 3, // 24: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig + 4, // 25: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig + 5, // 26: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig + 6, // 27: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig + 7, // 28: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig + 8, // 29: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig + 9, // 30: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig + 10, // 31: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig + 11, // 32: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig + 12, // 33: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig + 13, // 34: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig + 14, // 35: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig + 14, // 36: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig + 1, // 37: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy + 15, // 38: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig + 2, // 39: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig + 27, // 40: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy + 28, // 41: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig + 30, // 42: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration + 30, // 43: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration + 33, // 44: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code + 30, // 45: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration + 33, // 46: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code + 15, // 47: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig + 20, // 48: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + 15, // 49: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 22, // 50: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + 32, // 51: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 31, // 52: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 0, // 53: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type + 32, // 54: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue + 55, // [55:55] is the sub-list for method output_type + 55, // [55:55] is the sub-list for method input_type + 55, // [55:55] is the sub-list for extension type_name + 55, // [55:55] is the sub-list for extension extendee + 0, // [0:55] is the sub-list for field type_name } func init() { file_grpc_service_config_service_config_proto_init() } @@ -2177,7 +2805,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PriorityLoadBalancingPolicyConfig); i { + switch v := v.(*GrpcLbConfig); i { case 0: return &v.state case 1: @@ -2189,7 +2817,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WeightedTargetLoadBalancingPolicyConfig); i { + switch v := v.(*PriorityLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2201,7 +2829,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLbConfig); i { + switch v := v.(*WeightedTargetLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2225,7 +2853,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsConfig); i { + switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2237,7 +2865,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EdsLoadBalancingPolicyConfig); i { + switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2249,7 +2877,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LrsLoadBalancingPolicyConfig); i { + switch v := v.(*EdsLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2261,7 +2889,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancingConfig); i { + switch v := v.(*RingHashLoadBalancingConfig); i { case 0: return &v.state case 1: @@ -2273,7 +2901,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig); i { + switch v := v.(*LrsLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2285,7 +2913,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_Name); i { + switch v := v.(*XdsConfig); i { case 0: return &v.state case 1: @@ -2297,7 +2925,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_RetryPolicy); i { + switch v := v.(*LoadBalancingConfig); i { case 0: return &v.state case 1: @@ -2309,7 +2937,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_HedgingPolicy); i { + switch v := v.(*ServiceConfig); i { case 0: return &v.state case 1: @@ -2321,7 +2949,19 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { + switch v := v.(*MethodConfig_Name); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodConfig_RetryPolicy); i { case 0: return &v.state case 1: @@ -2333,6 +2973,30 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodConfig_HedgingPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WeightedTargetLoadBalancingPolicyConfig_Target); i { case 0: return &v.state @@ -2344,7 +3008,31 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig_DropCategory); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LrsLoadBalancingPolicyConfig_Locality); i { case 0: return &v.state @@ -2356,7 +3044,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_RetryThrottlingPolicy); i { case 0: return &v.state @@ -2368,7 +3056,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_HealthCheckConfig); i { case 0: return &v.state @@ -2385,15 +3073,18 @@ func file_grpc_service_config_service_config_proto_init() { (*MethodConfig_RetryPolicy_)(nil), (*MethodConfig_HedgingPolicy_)(nil), } - file_grpc_service_config_service_config_proto_msgTypes[10].OneofWrappers = []interface{}{ + file_grpc_service_config_service_config_proto_msgTypes[13].OneofWrappers = []interface{}{ (*LoadBalancingConfig_PickFirst)(nil), (*LoadBalancingConfig_RoundRobin)(nil), (*LoadBalancingConfig_Grpclb)(nil), - (*LoadBalancingConfig_Priority)(nil), - (*LoadBalancingConfig_WeightedTarget)(nil), - (*LoadBalancingConfig_Cds)(nil), - (*LoadBalancingConfig_Eds)(nil), - (*LoadBalancingConfig_Lrs)(nil), + (*LoadBalancingConfig_PriorityExperimental)(nil), + (*LoadBalancingConfig_WeightedTargetExperimental)(nil), + (*LoadBalancingConfig_CdsExperimental)(nil), + (*LoadBalancingConfig_XdsClusterResolverExperimental)(nil), + (*LoadBalancingConfig_XdsClusterImplExperimental)(nil), + (*LoadBalancingConfig_EdsExperimental)(nil), + (*LoadBalancingConfig_RingHashExperimental)(nil), + (*LoadBalancingConfig_LrsExperimental)(nil), (*LoadBalancingConfig_Xds)(nil), (*LoadBalancingConfig_XdsExperimental)(nil), } @@ -2402,8 +3093,8 @@ func file_grpc_service_config_service_config_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_service_config_service_config_proto_rawDesc, - NumEnums: 1, - NumMessages: 22, + NumEnums: 2, + NumMessages: 27, NumExtensions: 0, NumServices: 0, }, From aad573d86b2d03258984e4bee507307e1671cfc0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 18 Jan 2022 08:45:23 -0800 Subject: [PATCH 396/998] rls: LB policy implementation (#4992) --- balancer/rls/internal/balancer.go | 558 ++++++++++++- balancer/rls/internal/balancer_test.go | 766 ++++++++++++++++++ balancer/rls/internal/builder.go | 45 - balancer/rls/internal/control_channel_test.go | 9 +- balancer/rls/internal/helpers_test.go | 93 ++- balancer/rls/internal/picker.go | 395 +++++++++ balancer/rls/internal/picker_test.go | 722 +++++++++++++++++ .../testutils/fakeserver/fakeserver.go | 109 --- 8 files changed, 2504 insertions(+), 193 deletions(-) create mode 100644 balancer/rls/internal/balancer_test.go delete mode 100644 balancer/rls/internal/builder.go create mode 100644 balancer/rls/internal/picker.go create mode 100644 balancer/rls/internal/picker_test.go delete mode 100644 balancer/rls/internal/testutils/fakeserver/fakeserver.go diff --git a/balancer/rls/internal/balancer.go b/balancer/rls/internal/balancer.go index e5985eeee354..9a53edb8db1d 100644 --- a/balancer/rls/internal/balancer.go +++ b/balancer/rls/internal/balancer.go @@ -16,39 +16,571 @@ * */ +// Package rls implements the RLS LB policy. package rls import ( + "encoding/json" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/buffer" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" ) var ( - _ balancer.Balancer = (*rlsBalancer)(nil) - logger = grpclog.Component("rls") + + // Below defined vars for overriding in unit tests. + + // Default exponential backoff strategy for data cache entries. + defaultBackoffStrategy = backoff.Strategy(backoff.DefaultExponential) + // Default frequency for data cache purging. + periodicCachePurgeFreq = time.Minute + // We want every cache entry to live in the cache for at least this + // duration. If we encounter a cache entry whose minimum expiration time is + // in the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases where + // the cache is too small, when we receive an RLS Response, we keep the + // resulting cache entry around long enough for the pending incoming + // requests to be re-processed through the new Picker. If we didn't do this, + // then we'd risk throwing away each RLS response as we receive it, in which + // case we would fail to actually route any of our incoming requests. + minEvictDuration = 5 * time.Second + + // Following functions are no-ops in actual code, but can be overridden in + // tests to give tests visibility into exactly when certain events happen. + clientConnUpdateHook = func() {} + dataCachePurgeHook = func() {} + resetBackoffHook = func() {} ) +const balancerName = "rls_experimental" + +func init() { + balancer.Register(&rlsBB{}) +} + +type rlsBB struct{} + +func (rlsBB) Name() string { + return balancerName +} + +func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + lb := &rlsBalancer{ + done: grpcsync.NewEvent(), + cc: cc, + bopts: opts, + lbCfg: &lbConfig{}, + pendingMap: make(map[cacheKey]*backoffState), + childPolicies: make(map[string]*childPolicyWrapper), + ccUpdateCh: make(chan *balancer.ClientConnState, 1), + childPolicyStateUpdateCh: buffer.NewUnbounded(), + connectivityStateCh: make(chan struct{}), + } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) + lb.dataCache = newDataCache(maxCacheSize, lb.logger) + lb.bg = balancergroup.New(cc, opts, lb, lb.logger) + lb.bg.Start() + go lb.run() + return lb +} + // rlsBalancer implements the RLS LB policy. -type rlsBalancer struct{} +type rlsBalancer struct { + done *grpcsync.Event + cc balancer.ClientConn + bopts balancer.BuildOptions + logger *internalgrpclog.PrefixLogger + + // If both cacheMu and stateMu need to be acquired, the former must be + // acquired first to prevent a deadlock. This order restriction is due to the + // fact that in places where we need to acquire both the locks, we always + // start off reading the cache. + + // cacheMu guards access to the data cache and pending requests map. + cacheMu sync.RWMutex + dataCache *dataCache // Cache of RLS data. + pendingMap map[cacheKey]*backoffState // Map of pending RLS requests. + + // stateMu guards access to all LB policy state. + stateMu sync.Mutex + lbCfg *lbConfig // Most recently received service config. + childPolicyBuilder balancer.Builder // Cached child policy builder. + resolverState resolver.State // Cached resolver state. + ctrlCh *controlChannel // Control channel to the RLS server. + bg *balancergroup.BalancerGroup + childPolicies map[string]*childPolicyWrapper + defaultPolicy *childPolicyWrapper + // A reference to the most recent picker sent to gRPC as part of a state + // update is cached in this field so that we can release the reference to the + // default child policy wrapper when a new picker is created. See + // sendNewPickerLocked() for details. + lastPicker *rlsPicker + + // Channels on which updates are received or pushed. + ccUpdateCh chan *balancer.ClientConnState + childPolicyStateUpdateCh *buffer.Unbounded // idAndState from child policy. + connectivityStateCh chan struct{} // signalled when control channel becomes READY again. +} -func (lb *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - logger.Fatal("rls: UpdateClientConnState is not yet unimplemented") +// run is a long-running goroutine which handles all the updates that the +// balancer wishes to handle. The appropriate updateHandler will push the update +// on to a channel that this goroutine will select on, thereby the handling of +// the update will happen asynchronously. +func (b *rlsBalancer) run() { + go b.purgeDataCache() + for { + select { + case u := <-b.ccUpdateCh: + b.handleClientConnUpdate(u) + case u := <-b.childPolicyStateUpdateCh.Get(): + update := u.(idAndState) + b.childPolicyStateUpdateCh.Load() + b.handleChildPolicyStateUpdate(update.id, update.state) + case <-b.connectivityStateCh: + b.logger.Infof("Resetting backoff state after control channel getting back to READY") + b.cacheMu.Lock() + updatePicker := b.dataCache.resetBackoffState(&backoffState{bs: defaultBackoffStrategy}) + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + resetBackoffHook() + case <-b.done.Done(): + return + } + } +} + +// purgeDataCache is a long-running goroutine which periodically deletes expired +// entries. An expired entry is one for which both the expiryTime and +// backoffExpiryTime are in the past. +func (b *rlsBalancer) purgeDataCache() { + ticker := time.NewTicker(periodicCachePurgeFreq) + defer ticker.Stop() + + for { + select { + case <-b.done.Done(): + return + case <-ticker.C: + b.cacheMu.Lock() + updatePicker := b.dataCache.evictExpiredEntries() + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + dataCachePurgeHook() + } + } +} + +func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + // Remove unprocessed update from the channel, if one exists, before pushing + // the most recent one. + select { + case <-b.ccUpdateCh: + default: + } + b.ccUpdateCh <- &ccs return nil } -func (lb *rlsBalancer) ResolverError(error) { - logger.Fatal("rls: ResolverError is not yet unimplemented") +// handleClientConnUpdate handles updates to the service config. +// +// Invoked from the run() goroutine and this will attempt to grab the mutex. +func (b *rlsBalancer) handleClientConnUpdate(ccs *balancer.ClientConnState) { + if b.done.HasFired() { + b.logger.Warningf("Received service config after balancer close") + return + } + + b.stateMu.Lock() + defer b.stateMu.Unlock() + newCfg := ccs.BalancerConfig.(*lbConfig) + if b.lbCfg.Equal(newCfg) { + b.logger.Infof("New service config matches existing config") + return + } + + // When the RLS server name changes, the old control channel needs to be + // swapped out for a new one. All state associated with the throttling + // algorithm is stored on a per-control-channel basis; when we swap out + // channels, we also swap out the throttling state. + b.handleControlChannelUpdate(newCfg) + + // If the new config changes the size of the data cache, we might have to + // evict entries to get the cache size down to the newly specified size. + if newCfg.cacheSizeBytes != b.lbCfg.cacheSizeBytes { + b.dataCache.resize(newCfg.cacheSizeBytes) + } + + // Any changes to child policy name or configuration needs to be handled by + // either creating new child policies or pushing updates to existing ones. + b.resolverState = ccs.ResolverState + b.handleChildPolicyConfigUpdate(newCfg, ccs) + + // Update the copy of the config in the LB policy and send a new picker. + b.lbCfg = newCfg + b.sendNewPickerLocked() + + clientConnUpdateHook() +} + +// handleControlChannelUpdate handles updates to service config fields which +// influence the control channel to the RLS server. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleControlChannelUpdate(newCfg *lbConfig) { + if newCfg.lookupService == b.lbCfg.lookupService && newCfg.lookupServiceTimeout == b.lbCfg.lookupServiceTimeout { + return + } + + // Create a new control channel and close the existing one. + b.logger.Infof("Creating control channel to RLS server at: %v", newCfg.lookupService) + ctrlCh, err := newControlChannel(newCfg.lookupService, newCfg.lookupServiceTimeout, b.bopts, b.connectivityStateCh) + if err != nil { + // This is very uncommon and usually represents a non-transient error. + // There is not much we can do here other than wait for another update + // which might fix things. + b.logger.Errorf("Failed to create control channel to %q: %v", newCfg.lookupService, err) + return + } + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.ctrlCh = ctrlCh } -func (lb *rlsBalancer) UpdateSubConnState(_ balancer.SubConn, _ balancer.SubConnState) { - logger.Fatal("rls: UpdateSubConnState is not yet implemented") +// handleChildPolicyConfigUpdate handles updates to service config fields which +// influence child policy configuration. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleChildPolicyConfigUpdate(newCfg *lbConfig, ccs *balancer.ClientConnState) { + // Update child policy builder first since other steps are dependent on this. + if b.childPolicyBuilder == nil || b.childPolicyBuilder.Name() != newCfg.childPolicyName { + b.logger.Infof("Child policy changed to %q", newCfg.childPolicyName) + b.childPolicyBuilder = balancer.Get(newCfg.childPolicyName) + for _, cpw := range b.childPolicies { + // If the child policy has changed, we need to remove the old policy + // from the BalancerGroup and add a new one. The BalancerGroup takes + // care of closing the old one in this case. + b.bg.Remove(cpw.target) + b.bg.Add(cpw.target, b.childPolicyBuilder) + } + } + + configSentToDefault := false + if b.lbCfg.defaultTarget != newCfg.defaultTarget { + // If the default target has changed, create a new childPolicyWrapper for + // the new target if required. If a new wrapper is created, add it to the + // childPolicies map and the BalancerGroup. + b.logger.Infof("Default target in LB config changing from %q to %q", b.lbCfg.defaultTarget, newCfg.defaultTarget) + cpw := b.childPolicies[newCfg.defaultTarget] + if cpw == nil { + cpw = newChildPolicyWrapper(newCfg.defaultTarget) + b.childPolicies[newCfg.defaultTarget] = cpw + b.bg.Add(newCfg.defaultTarget, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", newCfg.defaultTarget) + } + if err := b.buildAndPushChildPolicyConfigs(newCfg.defaultTarget, newCfg, ccs); err != nil { + cpw.lamify(err) + } + + // If an old default exists, release its reference. If this was the last + // reference, remove the child policy from the BalancerGroup and remove the + // corresponding entry the childPolicies map. + if b.defaultPolicy != nil { + if b.defaultPolicy.releaseRef() { + delete(b.childPolicies, b.lbCfg.defaultTarget) + b.bg.Remove(b.defaultPolicy.target) + } + } + b.defaultPolicy = cpw + configSentToDefault = true + } + + // No change in configuration affecting child policies. Return early. + if b.lbCfg.childPolicyName == newCfg.childPolicyName && b.lbCfg.childPolicyTargetField == newCfg.childPolicyTargetField && childPolicyConfigEqual(b.lbCfg.childPolicyConfig, newCfg.childPolicyConfig) { + return + } + + // If fields affecting child policy configuration have changed, the changes + // are pushed to the childPolicyWrapper which handles them appropriately. + for _, cpw := range b.childPolicies { + if configSentToDefault && cpw.target == newCfg.defaultTarget { + // Default target has already been taken care of. + continue + } + if err := b.buildAndPushChildPolicyConfigs(cpw.target, newCfg, ccs); err != nil { + cpw.lamify(err) + } + } } -func (lb *rlsBalancer) Close() { - logger.Fatal("rls: Close is not yet implemented") +// buildAndPushChildPolicyConfigs builds the final child policy configuration by +// adding the `targetField` to the base child policy configuration received in +// RLS LB policy configuration. The `targetField` is set to target and +// configuration is pushed to the child policy through the BalancerGroup. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) buildAndPushChildPolicyConfigs(target string, newCfg *lbConfig, ccs *balancer.ClientConnState) error { + jsonTarget, err := json.Marshal(target) + if err != nil { + return fmt.Errorf("failed to marshal child policy target %q: %v", target, err) + } + + config := newCfg.childPolicyConfig + targetField := newCfg.childPolicyTargetField + config[targetField] = jsonTarget + jsonCfg, err := json.Marshal(config) + if err != nil { + return fmt.Errorf("failed to marshal child policy config %+v: %v", config, err) + } + + parser, _ := b.childPolicyBuilder.(balancer.ConfigParser) + parsedCfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("childPolicy config parsing failed: %v", err) + } + + state := balancer.ClientConnState{ResolverState: ccs.ResolverState, BalancerConfig: parsedCfg} + b.logger.Infof("Pushing new state to child policy %q: %+v", target, state) + if err := b.bg.UpdateClientConnState(target, state); err != nil { + b.logger.Warningf("UpdateClientConnState(%q, %+v) failed : %v", target, ccs, err) + } + return nil +} + +func (b *rlsBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *rlsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.bg.UpdateSubConnState(sc, state) +} + +func (b *rlsBalancer) Close() { + b.done.Fire() + + b.stateMu.Lock() + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.bg.Close() + b.stateMu.Unlock() + + b.cacheMu.Lock() + b.dataCache.stop() + b.cacheMu.Unlock() +} + +func (b *rlsBalancer) ExitIdle() { + b.bg.ExitIdle() +} + +// sendNewPickerLocked pushes a new picker on to the channel. +// +// +// Note that regardless of what connectivity state is reported, the policy will +// return its own picker, and not a picker that unconditionally queues +// (typically used for IDLE or CONNECTING) or a picker that unconditionally +// fails (typically used for TRANSIENT_FAILURE). This is required because, +// irrespective of the connectivity state, we need to able to perform RLS +// lookups for incoming RPCs and affect the status of queued RPCs based on the +// receipt of RLS responses. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) sendNewPickerLocked() { + aggregatedState := b.aggregatedConnectivityState() + + // Acquire a separate reference for the picker. This is required to ensure + // that the wrapper held by the old picker is not closed when the default + // target changes in the config, and a new wrapper is created for the new + // default target. See handleChildPolicyConfigUpdate() for how config changes + // affecting the default target are handled. + if b.defaultPolicy != nil { + b.defaultPolicy.acquireRef() + } + picker := &rlsPicker{ + kbm: b.lbCfg.kbMap, + origEndpoint: b.bopts.Target.Endpoint, + lb: b, + defaultPolicy: b.defaultPolicy, + ctrlCh: b.ctrlCh, + maxAge: b.lbCfg.maxAge, + staleAge: b.lbCfg.staleAge, + bg: b.bg, + } + picker.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-picker %p] ", picker)) + state := balancer.State{ + ConnectivityState: aggregatedState, + Picker: picker, + } + b.logger.Infof("New balancer.State: %+v", state) + b.cc.UpdateState(state) + + if b.lastPicker != nil { + if b.defaultPolicy != nil { + b.defaultPolicy.releaseRef() + } + } + b.lastPicker = picker +} + +func (b *rlsBalancer) sendNewPicker() { + b.stateMu.Lock() + b.sendNewPickerLocked() + b.stateMu.Unlock() +} + +// The aggregated connectivity state reported is determined as follows: +// - If there is at least one child policy in state READY, the connectivity +// state is READY. +// - Otherwise, if there is at least one child policy in state CONNECTING, the +// connectivity state is CONNECTING. +// - Otherwise, if there is at least one child policy in state IDLE, the +// connectivity state is IDLE. +// - Otherwise, all child policies are in TRANSIENT_FAILURE, and the +// connectivity state is TRANSIENT_FAILURE. +// +// If the RLS policy has no child policies and no configured default target, +// then we will report connectivity state IDLE. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) aggregatedConnectivityState() connectivity.State { + if len(b.childPolicies) == 0 && b.lbCfg.defaultTarget == "" { + return connectivity.Idle + } + + var readyN, connectingN, idleN int + for _, cpw := range b.childPolicies { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + switch state.ConnectivityState { + case connectivity.Ready: + readyN++ + case connectivity.Connecting: + connectingN++ + case connectivity.Idle: + idleN++ + } + } + + switch { + case readyN > 0: + return connectivity.Ready + case connectingN > 0: + return connectivity.Connecting + case idleN > 0: + return connectivity.Idle + default: + return connectivity.TransientFailure + } +} + +// idAndState wraps a child policy id and its state update. +type idAndState struct { + id string + state balancer.State +} + +// UpdateState is a implementation of the balancergroup.BalancerStateAggregator +// interface. The actual state aggregation functionality is handled +// asynchronously. This method only pushes the state update on to channel read +// and dispatched by the run() goroutine. +func (b *rlsBalancer) UpdateState(id string, state balancer.State) { + b.childPolicyStateUpdateCh.Put(idAndState{id: id, state: state}) +} + +// handleChildPolicyStateUpdate provides the state aggregator functionality for +// the BalancerGroup. +// +// This method is invoked by the BalancerGroup whenever a child policy sends a +// state update. We cache the child policy's connectivity state and picker for +// two reasons: +// - to suppress connectivity state transitions from TRANSIENT_FAILURE to states +// other than READY +// - to delegate picks to child policies +func (b *rlsBalancer) handleChildPolicyStateUpdate(id string, newState balancer.State) { + b.stateMu.Lock() + defer b.stateMu.Unlock() + + cpw := b.childPolicies[id] + if cpw == nil { + // All child policies start with an entry in the map. If ID is not in + // map, it's either been removed, or never existed. + b.logger.Warningf("Received state update %+v for missing child policy %q", newState, id) + return + } + + oldState := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + if oldState.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting { + // Ignore state transitions from TRANSIENT_FAILURE to CONNECTING, and thus + // fail pending RPCs instead of queuing them indefinitely when all + // subChannels are failing, even if the subChannels are bouncing back and + // forth between CONNECTING and TRANSIENT_FAILURE. + return + } + atomic.StorePointer(&cpw.state, unsafe.Pointer(&newState)) + b.logger.Infof("Child policy %q has new state %+v", id, cpw.state) + b.sendNewPickerLocked() +} + +// acquireChildPolicyReferences attempts to acquire references to +// childPolicyWrappers corresponding to the passed in targets. If there is no +// childPolicyWrapper corresponding to one of the targets, a new one is created +// and added to the BalancerGroup. +func (b *rlsBalancer) acquireChildPolicyReferences(targets []string) []*childPolicyWrapper { + b.stateMu.Lock() + var newChildPolicies []*childPolicyWrapper + for _, target := range targets { + // If the target exists in the LB policy's childPolicies map. a new + // reference is taken here and added to the new list. + if cpw := b.childPolicies[target]; cpw != nil { + cpw.acquireRef() + newChildPolicies = append(newChildPolicies, cpw) + continue + } + + // If the target does not exist in the child policy map, then a new + // child policy wrapper is created and added to the new list. + cpw := newChildPolicyWrapper(target) + b.childPolicies[target] = cpw + b.bg.Add(target, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", target) + newChildPolicies = append(newChildPolicies, cpw) + if err := b.buildAndPushChildPolicyConfigs(target, b.lbCfg, &balancer.ClientConnState{ + ResolverState: b.resolverState, + }); err != nil { + cpw.lamify(err) + } + } + b.stateMu.Unlock() + return newChildPolicies } -func (lb *rlsBalancer) ExitIdle() { - logger.Fatal("rls: ExitIdle is not yet implemented") +// releaseChildPolicyReferences releases references to childPolicyWrappers +// corresponding to the passed in targets. If the release reference was the last +// one, the child policy is removed from the BalancerGroup. +func (b *rlsBalancer) releaseChildPolicyReferences(targets []string) { + b.stateMu.Lock() + for _, target := range targets { + if cpw := b.childPolicies[target]; cpw.releaseRef() { + delete(b.childPolicies, cpw.target) + b.bg.Remove(cpw.target) + } + } + b.stateMu.Unlock() } diff --git a/balancer/rls/internal/balancer_test.go b/balancer/rls/internal/balancer_test.go new file mode 100644 index 000000000000..9607ac6773c3 --- /dev/null +++ b/balancer/rls/internal/balancer_test.go @@ -0,0 +1,766 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/test/e2e" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/testdata" + "google.golang.org/protobuf/types/known/durationpb" +) + +// TestConfigUpdate_ControlChannel tests the scenario where a config update +// changes the RLS server name. Verifies that the new control channel is created +// and the old one is closed. +func (s) TestConfigUpdate_ControlChannel(t *testing.T) { + // Start two RLS servers. + lis1 := newListenerWrapper(t, nil) + rlsServer1, rlsReqCh1 := setupFakeRLSServer(t, lis1) + lis2 := newListenerWrapper(t, nil) + rlsServer2, rlsReqCh2 := setupFakeRLSServer(t, lis2) + + // Build RLS service config with the RLS server pointing to the first one. + // Set a very low value for maxAge to ensure that the entry expires soon. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer1.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a couple of test backends, and set up the fake RLS servers to return + // these as a target in the RLS response. + backendCh1, backendAddress1 := startBackend(t) + rlsServer1.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress1}}} + }) + backendCh2, backendAddress2 := startBackend(t) + rlsServer2.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress2}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh1) + + // Ensure a connection is established to the first RLS server. + val, err := lis1.newConnCh.Receive(ctx) + if err != nil { + t.Fatal("Timeout expired when waiting for LB policy to create control channel") + } + conn1 := val.(*connWrapper) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh1, true) + + // Change lookup_service field of the RLS config to point to the second one. + rlsConfig.RouteLookupConfig.LookupService = rlsServer2.Address + + // Push the config update through the manual resolver. + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + // Ensure a connection is established to the second RLS server. + if _, err := lis2.newConnCh.Receive(ctx); err != nil { + t.Fatal("Timeout expired when waiting for LB policy to create control channel") + } + + // Ensure the connection to the old one is closed. + if _, err := conn1.closeCh.Receive(ctx); err != nil { + t.Fatal("Timeout expired when waiting for LB policy to close control channel") + } + + // Make an RPC and expect it to get routed to the second test backend through + // the second RLS server. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh2) + verifyRLSRequest(t, rlsReqCh2, true) +} + +// TestConfigUpdate_ControlChannelWithCreds tests the scenario where a config +// update specified an RLS server name, and the parent ClientConn specifies +// transport credentials. The RLS server and the test backend are configured to +// accept those transport credentials. This test verifies that the parent +// channel credentials are correctly propagated to the control channel. +func (s) TestConfigUpdate_ControlChannelWithCreds(t *testing.T) { + serverCreds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) + if err != nil { + t.Fatalf("credentials.NewServerTLSFromFile(server1.pem, server1.key) = %v", err) + } + clientCreds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "") + if err != nil { + t.Fatalf("credentials.NewClientTLSFromFile(ca.pem) = %v", err) + } + + // Start an RLS server with the wrapped listener and credentials. + lis := newListenerWrapper(t, nil) + rlsServer, rlsReqCh := setupFakeRLSServer(t, lis, grpc.Creds(serverCreds)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build RLS service config. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Start a test backend which uses the same credentials as the RLS server, + // and set up the fake RLS server to return this as the target in the RLS + // response. + backendCh, backendAddress := startBackend(t, grpc.Creds(serverCreds)) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial with credentials and expect the RLS server to receive the same. The + // server certificate used for the RLS server and the backend specifies a + // DNS SAN of "*.test.example.com". Hence we use a dial target which is a + // subdomain of the same here. + cc, err := grpc.Dial(r.Scheme()+":///rls.test.example.com", grpc.WithResolvers(r), grpc.WithTransportCredentials(clientCreds)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Ensure a connection is established to the first RLS server. + if _, err := lis.newConnCh.Receive(ctx); err != nil { + t.Fatal("Timeout expired when waiting for LB policy to create control channel") + } +} + +// TestConfigUpdate_DefaultTarget tests the scenario where a config update +// changes the default target. Verifies that RPCs get routed to the new default +// target after the config has been applied. +func (s) TestConfigUpdate_DefaultTarget(t *testing.T) { + // Start an RLS server and set the throttler to always throttle requests. + rlsServer, _ := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) + + // Build RLS service config with a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + backendCh1, backendAddress1 := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = backendAddress1 + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the default target. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh1) + + // Change default_target field of the RLS config. + backendCh2, backendAddress2 := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = backendAddress2 + + // Push the config update through the manual resolver. + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh2) +} + +// TestConfigUpdate_ChildPolicyConfigs verifies that config changes which affect +// child policy configuration are propagated correctly. +func (s) TestConfigUpdate_ChildPolicyConfigs(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Start a default backend and a test backend. + _, defBackendAddress := startBackend(t) + testBackendCh, testBackendAddress := startBackend(t) + + // Set up the RLS server to respond with the test backend. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Set up a test balancer callback to push configs received by child policies. + defBackendConfigsCh := make(chan *e2e.RLSChildPolicyConfig, 1) + testBackendConfigsCh := make(chan *e2e.RLSChildPolicyConfig, 1) + bf := &e2e.BalancerFuncs{ + UpdateClientConnState: func(cfg *e2e.RLSChildPolicyConfig) error { + switch cfg.Backend { + case defBackendAddress: + defBackendConfigsCh <- cfg + case testBackendAddress: + testBackendConfigsCh <- cfg + default: + t.Errorf("Received child policy configs for unknown target %q", cfg.Backend) + } + return nil + }, + } + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName := "test-child-policy" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName, bf) + t.Logf("Registered child policy with name %q", childPolicyName) + + // Build RLS service config with default target. + rlsConfig := buildBasicRLSConfig(childPolicyName, rlsServer.Address) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // At this point, the RLS LB policy should have received its config, and + // should have created a child policy for the default target. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantCfg := &e2e.RLSChildPolicyConfig{Backend: defBackendAddress} + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the default target child policy to receive its config") + case gotCfg := <-defBackendConfigsCh: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("Default target child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } + + // Make an RPC and ensure it gets routed to the test backend. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // As part of handling the above RPC, the RLS LB policy should have created + // a child policy for the test target. + wantCfg = &e2e.RLSChildPolicyConfig{Backend: testBackendAddress} + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the test target child policy to receive its config") + case gotCfg := <-testBackendConfigsCh: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("Test target child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } + + // Push an RLS config update with a change in the child policy config. + childPolicyBuilder := balancer.Get(childPolicyName) + childPolicyParser := childPolicyBuilder.(balancer.ConfigParser) + lbCfg, err := childPolicyParser.ParseConfig([]byte(`{"Random": "random"}`)) + if err != nil { + t.Fatal(err) + } + rlsConfig.ChildPolicy.Config = lbCfg + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + // Expect the child policy for the test backend to receive the update. + wantCfg = &e2e.RLSChildPolicyConfig{ + Backend: testBackendAddress, + Random: "random", + } + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the test target child policy to receive its config") + case gotCfg := <-testBackendConfigsCh: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("Test target child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } + + // Expect the child policy for the default backend to receive the update. + wantCfg = &e2e.RLSChildPolicyConfig{ + Backend: defBackendAddress, + Random: "random", + } + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the default target child policy to receive its config") + case gotCfg := <-defBackendConfigsCh: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("Default target child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } +} + +// TestConfigUpdate_ChildPolicyChange verifies that a child policy change is +// handled by closing the old balancer and creating a new one. +func (s) TestConfigUpdate_ChildPolicyChange(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, _ := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Set up balancer callbacks. + configsCh1 := make(chan *e2e.RLSChildPolicyConfig, 1) + closeCh1 := make(chan struct{}, 1) + bf := &e2e.BalancerFuncs{ + UpdateClientConnState: func(cfg *e2e.RLSChildPolicyConfig) error { + configsCh1 <- cfg + return nil + }, + Close: func() { + closeCh1 <- struct{}{} + }, + } + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName1 := "test-child-policy-1" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName1, bf) + t.Logf("Registered child policy with name %q", childPolicyName1) + + // Build RLS service config with a dummy default target. + const defaultBackend = "default-backend" + rlsConfig := buildBasicRLSConfig(childPolicyName1, rlsServer.Address) + rlsConfig.RouteLookupConfig.DefaultTarget = defaultBackend + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // At this point, the RLS LB policy should have received its config, and + // should have created a child policy for the default target. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantCfg := &e2e.RLSChildPolicyConfig{Backend: defaultBackend} + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the first child policy to receive its config") + case gotCfg := <-configsCh1: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("First child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } + + // Set up balancer callbacks for the second policy. + configsCh2 := make(chan *e2e.RLSChildPolicyConfig, 1) + bf = &e2e.BalancerFuncs{ + UpdateClientConnState: func(cfg *e2e.RLSChildPolicyConfig) error { + configsCh2 <- cfg + return nil + }, + } + + // Register a second LB policy to act as the child policy for RLS LB policy. + childPolicyName2 := "test-child-policy-2" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName2, bf) + t.Logf("Registered child policy with name %q", childPolicyName2) + + // Push an RLS config update with a change in the child policy name. + rlsConfig.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: childPolicyName2} + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + // The above update should result in the first LB policy being shutdown and + // the second LB policy receiving a config update. + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the first child policy to be shutdown") + case <-closeCh1: + } + + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the second child policy to receive its config") + case gotCfg := <-configsCh2: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("First child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } +} + +// TestConfigUpdate_BadChildPolicyConfigs tests the scenario where a config +// update is rejected by the child policy. Verifies that the child policy +// wrapper goes "lame" and the error from the child policy is reported back to +// the caller of the RPC. +func (s) TestConfigUpdate_BadChildPolicyConfigs(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Set up the RLS server to respond with a bad target field which is expected + // to cause the child policy's ParseTarget to fail and should result in the LB + // policy creating a lame child policy wrapper. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{e2e.RLSChildPolicyBadTarget}}} + }) + + // Build RLS service config with a default target. This default backend is + // expected to be healthy (even though we don't attempt to route RPCs to it) + // and ensures that the overall connectivity state of the RLS LB policy is not + // TRANSIENT_FAILURE. This is required to make sure that the pick for the bad + // child policy actually gets delegated to the child policy picker. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + _, addr := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = addr + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure that if fails with the expected error. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, e2e.ErrParseConfigBadTarget) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) +} + +// TestConfigUpdate_DataCacheSizeDecrease tests the scenario where a config +// update decreases the data cache size. Verifies that entries are evicted from +// the cache. +func (s) TestConfigUpdate_DataCacheSizeDecrease(t *testing.T) { + // Override the clientConn update hook to get notified. + clientConnUpdateDone := make(chan struct{}, 1) + origClientConnUpdateHook := clientConnUpdateHook + clientConnUpdateHook = func() { clientConnUpdateDone <- struct{}{} } + defer func() { clientConnUpdateHook = origClientConnUpdateHook }() + + // Override the cache entry size func, and always return 1. + origEntrySizeFunc := computeDataCacheEntrySize + computeDataCacheEntrySize = func(cacheKey, *cacheEntry) int64 { return 1 } + defer func() { computeDataCacheEntrySize = origEntrySizeFunc }() + + // Override the minEvictionDuration to ensure that when the config update + // reduces the cache size, the resize operation is not stopped because + // we find an entry whose minExpiryDuration has not elapsed. + origMinEvictDuration := minEvictDuration + minEvictDuration = time.Duration(0) + defer func() { minEvictDuration = origMinEvictDuration }() + + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName := "test-child-policy" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName, nil) + t.Logf("Registered child policy with name %q", childPolicyName) + + // Build RLS service config with header matchers. + rlsConfig := buildBasicRLSConfig(childPolicyName, rlsServer.Address) + + // Start a couple of test backends, and set up the fake RLS server to return + // these as targets in the RLS response, based on request keys. + backendCh1, backendAddress1 := startBackend(t) + backendCh2, backendAddress2 := startBackend(t) + rlsServer.SetResponseCallback(func(ctx context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + if req.KeyMap["k1"] == "v1" { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress1}}} + } + if req.KeyMap["k2"] == "v2" { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress2}}} + } + return &e2e.RouteLookupResponse{Err: errors.New("no keys in request metadata")} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + <-clientConnUpdateDone + + // Make an RPC and ensure it gets routed to the first backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + ctxOutgoing := metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh1) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC with a different set of headers. This will force the LB + // policy to send out a new RLS request, resulting in a new data cache + // entry. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n2", "v2") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh2) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // We currently have two cache entries. Setting the size to 1, will cause + // the entry corresponding to backend1 to be evicted. + rlsConfig.RouteLookupConfig.CacheSizeBytes = 1 + + // Push the config update through the manual resolver. + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + <-clientConnUpdateDone + + // Make an RPC to match the cache entry which got evicted above, and expect + // an RLS request to be made to fetch the targets. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh1) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) +} + +// TestDataCachePurging verifies that the LB policy periodically evicts expired +// entries from the data cache. +func (s) TestDataCachePurging(t *testing.T) { + // Override the frequency of the data cache purger to a small one. + origPurgeFreq := periodicCachePurgeFreq + periodicCachePurgeFreq = defaultTestShortTimeout + defer func() { periodicCachePurgeFreq = origPurgeFreq }() + + // Override the data cache purge hook to get notified. + dataCachePurgeDone := make(chan struct{}, 1) + origDataCachePurgeHook := dataCachePurgeHook + dataCachePurgeHook = func() { dataCachePurgeDone <- struct{}{} } + defer func() { dataCachePurgeHook = origDataCachePurgeHook }() + + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName := "test-child-policy" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName, nil) + t.Logf("Registered child policy with name %q", childPolicyName) + + // Build RLS service config with header matchers and lookupService pointing to + // the fake RLS server created above. Set a very low value for maxAge to + // ensure that the entry expires soon. + rlsConfig := buildBasicRLSConfig(childPolicyName, rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(time.Millisecond) + + // Start a test backend, and set up the fake RLS server to return this as a + // target in the RLS response. + backendCh, backendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + ctxOutgoing := metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC with different headers. This will force the LB policy to + // send out a new RLS request, resulting in a new data cache entry. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n2", "v2") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Wait for the data cache purging to happen before proceeding. + <-dataCachePurgeDone + + // Perform the same RPCs again and verify that they result in RLS requests. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC with different headers. This will force the LB policy to + // send out a new RLS request, resulting in a new data cache entry. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n2", "v2") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) +} + +// TestControlChannelConnectivityStateMonitoring tests the scenario where the +// control channel goes down and comes back up again and verifies that backoff +// state is reset for cache entries in this scenario. +func (s) TestControlChannelConnectivityStateMonitoring(t *testing.T) { + // Create a restartable listener which can close existing connections. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("net.Listen() failed: %v", err) + } + lis := testutils.NewRestartableListener(l) + + // Start an RLS server with the restartable listener and set the throttler to + // never throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, lis) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Override the reset backoff hook to get notified. + resetBackoffDone := make(chan struct{}, 1) + origResetBackoffHook := resetBackoffHook + resetBackoffHook = func() { resetBackoffDone <- struct{}{} } + defer func() { resetBackoffHook = origResetBackoffHook }() + + // Override the backoff strategy to return a large backoff which + // will make sure the date cache entry remains in backoff for the + // duration of the test. + origBackoffStrategy := defaultBackoffStrategy + defaultBackoffStrategy = &fakeBackoffStrategy{backoff: defaultTestTimeout} + defer func() { defaultBackoffStrategy = origBackoffStrategy }() + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName := "test-child-policy" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName, nil) + t.Logf("Registered child policy with name %q", childPolicyName) + + // Build RLS service config with header matchers, and a very low value for + // maxAge to ensure that cache entries become invalid very soon. + rlsConfig := buildBasicRLSConfig(childPolicyName, rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a test backend, and set up the fake RLS server to return this as a + // target in the RLS response. + backendCh, backendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Stop the RLS server. + lis.Stop() + + // Make another RPC similar to the first one. Since the above cache entry + // would have expired by now, this should trigger another RLS request. And + // since the RLS server is down, RLS request will fail and the cache entry + // will enter backoff, and we have overridden the default backoff strategy to + // return a value which will keep this entry in backoff for the whole duration + // of the test. + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, nil) + + // Restart the RLS server. + lis.Restart() + + // When we closed the RLS server earlier, the existing transport to the RLS + // server would have closed, and the RLS control channel would have moved to + // TRANSIENT_FAILURE with a subConn backoff before moving to IDLE. This + // backoff will last for about a second. We need to keep retrying RPCs for the + // subConn to eventually come out of backoff and attempt to reconnect. + // + // Make this RPC with a different set of headers leading to the creation of + // a new cache entry and a new RLS request. This RLS request will also fail + // till the control channel comes moves back to READY. So, override the + // backoff strategy to perform a small backoff on this entry. + defaultBackoffStrategy = &fakeBackoffStrategy{backoff: defaultTestShortTimeout} + ctxOutgoing := metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + <-resetBackoffDone + + // The fact that the above RPC succeeded indicates that the control channel + // has moved back to READY. The connectivity state monitoring code should have + // realized this and should have reset all backoff timers (which in this case + // is the cache entry corresponding to the first RPC). Retrying that RPC now + // should succeed with an RLS request being sent out. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh) + verifyRLSRequest(t, rlsReqCh, true) +} diff --git a/balancer/rls/internal/builder.go b/balancer/rls/internal/builder.go deleted file mode 100644 index d293ada31208..000000000000 --- a/balancer/rls/internal/builder.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package rls implements the RLS LB policy. -package rls - -import ( - "google.golang.org/grpc/balancer" -) - -const rlsBalancerName = "rls_experimental" - -func init() { - balancer.Register(rlsBB{}) -} - -// rlsBB helps build RLS load balancers and parse the service config to be -// passed to the RLS load balancer. -type rlsBB struct{} - -// Name returns the name of the RLS LB policy and helps implement the -// balancer.Balancer interface. -func (rlsBB) Name() string { - return rlsBalancerName -} - -func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - // TODO(easwars): Fix this once the LB policy implementation is pulled in. - return &rlsBalancer{} -} diff --git a/balancer/rls/internal/control_channel_test.go b/balancer/rls/internal/control_channel_test.go index 953f6531428c..876701de3326 100644 --- a/balancer/rls/internal/control_channel_test.go +++ b/balancer/rls/internal/control_channel_test.go @@ -108,13 +108,10 @@ func (s) TestLookupFailure(t *testing.T) { // TestLookupDeadlineExceeded tests the case where the RLS server does not // respond within the configured rpc timeout. func (s) TestLookupDeadlineExceeded(t *testing.T) { - // A unary interceptor which sleeps for long enough to cause lookup RPCs to - // exceed their deadline. - rlsReqCh := make(chan struct{}, 1) + // A unary interceptor which blocks until the test is done. interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - rlsReqCh <- struct{}{} - time.Sleep(2 * defaultTestShortTimeout) - return handler(ctx, req) + <-ctx.Done() + return nil, ctx.Err() } // Start an RLS server and set the throttler to never throttle. diff --git a/balancer/rls/internal/helpers_test.go b/balancer/rls/internal/helpers_test.go index bb5478a3fa57..7d3e9fa10fea 100644 --- a/balancer/rls/internal/helpers_test.go +++ b/balancer/rls/internal/helpers_test.go @@ -123,20 +123,36 @@ func (f *fakeBackoffStrategy) Backoff(retries int) time.Duration { // fakeThrottler is a fake implementation of the adaptiveThrottler interface. type fakeThrottler struct { - throttleFunc func() bool + throttleFunc func() bool // Fake throttler implementation. + throttleCh chan struct{} // Invocation of ShouldThrottle signals here. +} + +func (f *fakeThrottler) ShouldThrottle() bool { + select { + case <-f.throttleCh: + default: + } + f.throttleCh <- struct{}{} + + return f.throttleFunc() } -func (f *fakeThrottler) ShouldThrottle() bool { return f.throttleFunc() } func (f *fakeThrottler) RegisterBackendResponse(bool) {} // alwaysThrottlingThrottler returns a fake throttler which always throttles. func alwaysThrottlingThrottler() *fakeThrottler { - return &fakeThrottler{throttleFunc: func() bool { return true }} + return &fakeThrottler{ + throttleFunc: func() bool { return true }, + throttleCh: make(chan struct{}, 1), + } } // neverThrottlingThrottler returns a fake throttler which never throttles. func neverThrottlingThrottler() *fakeThrottler { - return &fakeThrottler{throttleFunc: func() bool { return false }} + return &fakeThrottler{ + throttleFunc: func() bool { return false }, + throttleCh: make(chan struct{}, 1), + } } // oneTimeAllowingThrottler returns a fake throttler which does not throttle the @@ -150,6 +166,7 @@ func oneTimeAllowingThrottler() *fakeThrottler { once.Do(func() { throttle = false }) return throttle }, + throttleCh: make(chan struct{}, 1), } } @@ -272,36 +289,72 @@ func startManualResolverWithConfig(t *testing.T, rlsConfig *e2e.RLSConfig) *manu // the EmptyCall RPC on the given ClientConn and verifies that it reaches a // backend. The latter is accomplished by listening on the provided channel // which gets pushed to whenever the backend in question gets an RPC. +// +// There are many instances where it can take a while before the attempted RPC +// reaches the expected backend. Examples include, but are not limited to: +// - control channel is changed in a config update. The RLS LB policy creates a +// new control channel, and sends a new picker to gRPC. But it takes a while +// before gRPC actually starts using the new picker. +// - test is waiting for a cache entry to expire after which we expect a +// different behavior because we have configured the fake RLS server to return +// different backends. +// +// Therefore, we do not return an error when the RPC fails. Instead, we wait for +// the context to expire before failing. func makeTestRPCAndExpectItToReachBackend(ctx context.Context, t *testing.T, cc *grpc.ClientConn, ch chan struct{}) { t.Helper() - client := testgrpc.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("TestService/EmptyCall() failed with error: %v", err) - } + // Drain the backend channel before performing the RPC to remove any + // notifications from previous RPCs. select { - case <-ctx.Done(): - t.Fatalf("Timeout when waiting for backend to receive RPC") case <-ch: + default: + } + + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for RPCs to be routed to the given target: %v", err) + } + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + client := testgrpc.NewTestServiceClient(cc) + client.EmptyCall(sCtx, &testpb.Empty{}) + + select { + case <-sCtx.Done(): + case <-ch: + sCancel() + return + } } } // makeTestRPCAndVerifyError is a test helper function which makes the EmptyCall // RPC on the given ClientConn and verifies that the RPC fails with the given // status code and error. +// +// Similar to makeTestRPCAndExpectItToReachBackend, retries until expected +// outcome is reached or the provided context has expired. func makeTestRPCAndVerifyError(ctx context.Context, t *testing.T, cc *grpc.ClientConn, wantCode codes.Code, wantErr error) { t.Helper() - client := testgrpc.NewTestServiceClient(cc) - _, err := client.EmptyCall(ctx, &testpb.Empty{}) - if err == nil { - t.Fatal("TestService/EmptyCall() succeeded when expected to fail") - } - if code := status.Code(err); code != wantCode { - t.Fatalf("TestService/EmptyCall() returned code: %v, want: %v", code, wantCode) - } - if wantErr != nil && !strings.Contains(err.Error(), wantErr.Error()) { - t.Fatalf("TestService/EmptyCall() returned err: %v, want: %v", err, wantErr) + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for RPCs to fail with given error: %v", err) + } + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + client := testgrpc.NewTestServiceClient(cc) + _, err := client.EmptyCall(sCtx, &testpb.Empty{}) + + // If the RPC fails with the expected code and expected error message (if + // one was provided), we return. Else we retry after blocking for a little + // while to ensure that we don't keep blasting away with RPCs. + if code := status.Code(err); code == wantCode { + if wantErr == nil || strings.Contains(err.Error(), wantErr.Error()) { + sCancel() + return + } + } + <-sCtx.Done() } } diff --git a/balancer/rls/internal/picker.go b/balancer/rls/internal/picker.go new file mode 100644 index 000000000000..bf1824ea0343 --- /dev/null +++ b/balancer/rls/internal/picker.go @@ -0,0 +1,395 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "errors" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/metadata" +) + +var ( + errRLSThrottled = errors.New("RLS call throttled at client side") + + // Function to compute data cache entry size. + computeDataCacheEntrySize = dcEntrySize +) + +// exitIdler wraps the only method on the BalancerGroup that the picker calls. +type exitIdler interface { + ExitIdleOne(id string) +} + +// rlsPicker selects the subConn to be used for a particular RPC. It does not +// manage subConns directly and delegates to pickers provided by child policies. +type rlsPicker struct { + // The keyBuilder map used to generate RLS keys for the RPC. This is built + // by the LB policy based on the received ServiceConfig. + kbm keys.BuilderMap + // Endpoint from the user's original dial target. Used to set the `host_key` + // field in `extra_keys`. + origEndpoint string + + lb *rlsBalancer + + // The picker is given its own copy of the below fields from the RLS LB policy + // to avoid having to grab the mutex on the latter. + defaultPolicy *childPolicyWrapper // Child policy for the default target. + ctrlCh *controlChannel // Control channel to the RLS server. + maxAge time.Duration // Cache max age from LB config. + staleAge time.Duration // Cache stale age from LB config. + bg exitIdler + logger *internalgrpclog.PrefixLogger +} + +// Pick makes the routing decision for every outbound RPC. +func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + // Build the request's keys using the key builders from LB config. + md, _ := metadata.FromOutgoingContext(info.Ctx) + reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) + + // Grab a read-lock to perform a cache lookup. If it so happens that we need + // to write to the cache (if we have to send out an RLS request), we will + // release the read-lock and acquire a write-lock. + p.lb.cacheMu.RLock() + + // Lookup data cache and pending request map using request path and keys. + cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str} + dcEntry := p.lb.dataCache.getEntry(cacheKey) + pendingEntry := p.lb.pendingMap[cacheKey] + now := time.Now() + + switch { + // No data cache entry. No pending request. + case dcEntry == nil && pendingEntry == nil: + p.lb.cacheMu.RUnlock() + bs := &backoffState{bs: defaultBackoffStrategy} + return p.sendRequestAndReturnPick(cacheKey, bs, reqKeys.Map, info) + + // No data cache entry. Pending request exits. + case dcEntry == nil && pendingEntry != nil: + p.lb.cacheMu.RUnlock() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // Data cache hit. No pending request. + case dcEntry != nil && pendingEntry == nil: + if dcEntry.expiryTime.After(now) { + if !dcEntry.staleTime.IsZero() && dcEntry.staleTime.Before(now) && dcEntry.backoffTime.Before(now) { + // Executing the proactive cache refresh in a goroutine simplifies + // acquiring and releasing of locks. + go func(bs *backoffState) { + p.lb.cacheMu.Lock() + // It is OK to ignore the return value which indicates if this request + // was throttled. This is an attempt to proactively refresh the cache, + // and it is OK for it to fail. + p.sendRouteLookupRequest(cacheKey, bs, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) + p.lb.cacheMu.Unlock() + }(dcEntry.backoffState) + } + // Delegate to child policies. + res, err := p.delegateToChildPolicies(dcEntry, info) + p.lb.cacheMu.RUnlock() + return res, err + } + + // We get here only if the data cache entry has expired. If entry is in + // backoff, delegate to default target or fail the pick. + if dcEntry.backoffState != nil && dcEntry.backoffTime.After(now) { + status := dcEntry.status + p.lb.cacheMu.RUnlock() + return p.useDefaultPickIfPossible(info, status) + } + + // We get here only if the entry has expired and is not in backoff. + bs := *dcEntry.backoffState + p.lb.cacheMu.RUnlock() + return p.sendRequestAndReturnPick(cacheKey, &bs, reqKeys.Map, info) + + // Data cache hit. Pending request exists. + default: + if dcEntry.expiryTime.After(now) { + res, err := p.delegateToChildPolicies(dcEntry, info) + p.lb.cacheMu.RUnlock() + return res, err + } + // Data cache entry has expired and pending request exists. Queue pick. + p.lb.cacheMu.RUnlock() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } +} + +// delegateToChildPolicies is a helper function which iterates through the list +// of child policy wrappers in a cache entry and attempts to find a child policy +// to which this RPC can be routed to. If there is no child policy in READY +// state, we delegate to the first child policy arbitrarily. +// +// Caller must hold at least a read-lock on p.lb.cacheMu. +func (p *rlsPicker) delegateToChildPolicies(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { + for _, cpw := range dcEntry.childPolicyWrappers { + ok, res, err := p.pickIfFeasible(cpw, info) + if ok { + return res, err + } + } + if len(dcEntry.childPolicyWrappers) != 0 { + state := (*balancer.State)(atomic.LoadPointer(&dcEntry.childPolicyWrappers[0].state)) + return state.Picker.Pick(info) + } + // In the unlikely event that we have a cache entry with no targets, we end up + // queueing the RPC. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// sendRequestAndReturnPick is called to send out an RLS request on the control +// channel. Since sending out an RLS request entails creating an entry in the +// pending request map, this method needs to acquire the write-lock on the +// cache. This also means that the caller must release the read-lock that they +// could have been holding. This means that things could have happened in +// between and therefore a fresh lookup on the cache needs to be performed here +// with the write-lock and all cases need to be handled. +// +// Acquires the write-lock on the cache. Caller must not hold p.lb.cacheMu. +func (p *rlsPicker) sendRequestAndReturnPick(cacheKey cacheKey, bs *backoffState, reqKeys map[string]string, info balancer.PickInfo) (balancer.PickResult, error) { + p.lb.cacheMu.Lock() + defer p.lb.cacheMu.Unlock() + + // We need to perform another cache lookup to ensure that things haven't + // changed since the last lookup. + dcEntry := p.lb.dataCache.getEntry(cacheKey) + pendingEntry := p.lb.pendingMap[cacheKey] + + // Existence of a pending map entry indicates that someone sent out a request + // before us and the response is pending. Skip sending a new request. + // Piggyback on the existing one by queueing the pick. + if pendingEntry != nil { + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + + // If no data cache entry exists, it means that no one jumped in front of us. + // We need to send out an RLS request and queue the pick. + if dcEntry == nil { + throttled := p.sendRouteLookupRequest(cacheKey, bs, reqKeys, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + return p.useDefaultPickIfPossible(info, errRLSThrottled) + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + + // Existence of a data cache entry indicates either that someone sent out a + // request before us and received a response, or we got here in the first + // place because we found an expired entry in the data cache. + now := time.Now() + switch { + // Valid data cache entry. Delegate to its child policies. + case dcEntry.expiryTime.After(now): + return p.delegateToChildPolicies(dcEntry, info) + + // Entry is in backoff. Delegate to default target or fail the pick. + case dcEntry.backoffState != nil && dcEntry.backoffTime.After(now): + return p.useDefaultPickIfPossible(info, dcEntry.status) + + // Entry has expired, but is not in backoff. Send request and queue pick. + default: + throttled := p.sendRouteLookupRequest(cacheKey, bs, reqKeys, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + return p.useDefaultPickIfPossible(info, errRLSThrottled) + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } +} + +// useDefaultPickIfPossible is a helper method which delegates to the default +// target if one is configured, or fails the pick with the given error. +func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, error) { + if p.defaultPolicy != nil { + _, res, err := p.pickIfFeasible(p.defaultPolicy, info) + return res, err + } + return balancer.PickResult{}, errOnNoDefault +} + +// sendRouteLookupRequest adds an entry to the pending request map and sends out +// an RLS request using the passed in arguments. Returns a value indicating if +// the request was throttled by the client-side adaptive throttler. +// +// Caller must hold a write-lock on p.lb.cacheMu. +func (p *rlsPicker) sendRouteLookupRequest(cacheKey cacheKey, bs *backoffState, reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string) bool { + if p.lb.pendingMap[cacheKey] != nil { + return false + } + + p.lb.pendingMap[cacheKey] = bs + throttled := p.ctrlCh.lookup(reqKeys, reason, staleHeaders, func(targets []string, headerData string, err error) { + p.handleRouteLookupResponse(cacheKey, targets, headerData, err) + }) + if throttled { + delete(p.lb.pendingMap, cacheKey) + } + return throttled +} + +// pickIfFeasible determines if a pick can be delegated to child policy based on +// its connectivity state. +// - If state is CONNECTING, the pick is to be queued +// - If state is IDLE, the child policy is instructed to exit idle, and the pick +// is to be queued +// - If state is READY, pick it delegated to the child policy's picker +func (p *rlsPicker) pickIfFeasible(cpw *childPolicyWrapper, info balancer.PickInfo) (bool, balancer.PickResult, error) { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + switch state.ConnectivityState { + case connectivity.Connecting: + return true, balancer.PickResult{}, balancer.ErrNoSubConnAvailable + case connectivity.Idle: + p.bg.ExitIdleOne(cpw.target) + return true, balancer.PickResult{}, balancer.ErrNoSubConnAvailable + case connectivity.Ready: + r, e := state.Picker.Pick(info) + return true, r, e + } + return false, balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// handleRouteLookupResponse is the callback invoked by the control channel upon +// receipt of an RLS response. Modifies the data cache and pending requests map +// and sends a new picker. +// +// Acquires the write-lock on the cache. Caller must not hold p.lb.cacheMu. +func (p *rlsPicker) handleRouteLookupResponse(cacheKey cacheKey, targets []string, headerData string, err error) { + p.logger.Infof("Received RLS response for key %+v with targets %+v, headerData %q, err: %v", cacheKey, targets, headerData, err) + + p.lb.cacheMu.Lock() + defer func() { + // Pending request map entry is unconditionally deleted since the request is + // no longer pending. + p.logger.Infof("Removing pending request entry for key %+v", cacheKey) + delete(p.lb.pendingMap, cacheKey) + p.lb.sendNewPicker() + p.lb.cacheMu.Unlock() + }() + + // Lookup the data cache entry or create a new one. + dcEntry := p.lb.dataCache.getEntry(cacheKey) + if dcEntry == nil { + dcEntry = &cacheEntry{} + if _, ok := p.lb.dataCache.addEntry(cacheKey, dcEntry); !ok { + // This is a very unlikely case where we are unable to add a + // data cache entry. Log and leave. + p.logger.Warningf("Failed to add data cache entry for %+v", cacheKey) + return + } + } + + // For failed requests, the data cache entry is modified as follows: + // - status is set to error returned from the control channel + // - current backoff state is available in the pending entry + // - `retries` field is incremented and + // - backoff state is moved to the data cache + // - backoffTime is set to the time indicated by the backoff state + // - backoffExpirationTime is set to twice the backoff time + // - backoffTimer is set to fire after backoffTime + // + // When a proactive cache refresh fails, this would leave the targets and the + // expiry time from the old entry unchanged. And this mean that the old valid + // entry would be used until expiration, and a new picker would be sent upon + // backoff expiry. + now := time.Now() + if err != nil { + dcEntry.status = err + pendingEntry := p.lb.pendingMap[cacheKey] + pendingEntry.retries++ + backoffTime := pendingEntry.bs.Backoff(pendingEntry.retries) + dcEntry.backoffState = pendingEntry + dcEntry.backoffTime = now.Add(backoffTime) + dcEntry.backoffExpiryTime = now.Add(2 * backoffTime) + if dcEntry.backoffState.timer != nil { + dcEntry.backoffState.timer.Stop() + } + dcEntry.backoffState.timer = time.AfterFunc(backoffTime, p.lb.sendNewPicker) + return + } + + // For successful requests, the cache entry is modified as follows: + // - childPolicyWrappers is set to point to the child policy wrappers + // associated with the targets specified in the received response + // - headerData is set to the value received in the response + // - expiryTime, stateTime and earliestEvictionTime are set + // - status is set to nil (OK status) + // - backoff state is cleared + p.setChildPolicyWrappersInCacheEntry(dcEntry, targets) + dcEntry.headerData = headerData + dcEntry.expiryTime = now.Add(p.maxAge) + if p.staleAge != 0 { + dcEntry.staleTime = now.Add(p.staleAge) + } + dcEntry.earliestEvictTime = now.Add(minEvictDuration) + dcEntry.status = nil + dcEntry.backoffState = &backoffState{bs: defaultBackoffStrategy} + dcEntry.backoffTime = time.Time{} + dcEntry.backoffExpiryTime = time.Time{} + p.lb.dataCache.updateEntrySize(dcEntry, computeDataCacheEntrySize(cacheKey, dcEntry)) +} + +// setChildPolicyWrappersInCacheEntry sets up the childPolicyWrappers field in +// the cache entry to point to the child policy wrappers for the targets +// specified in the RLS response. +// +// Caller must hold a write-lock on p.lb.cacheMu. +func (p *rlsPicker) setChildPolicyWrappersInCacheEntry(dcEntry *cacheEntry, newTargets []string) { + // If the childPolicyWrappers field is already pointing to the right targets, + // then the field's value does not need to change. + targetsChanged := true + func() { + if cpws := dcEntry.childPolicyWrappers; cpws != nil { + if len(newTargets) != len(cpws) { + return + } + for i, target := range newTargets { + if cpws[i].target != target { + return + } + } + targetsChanged = false + } + }() + if !targetsChanged { + return + } + + // If the childPolicyWrappers field is not already set to the right targets, + // then it must be reset. We construct a new list of child policies and + // then swap out the old list for the new one. + newChildPolicies := p.lb.acquireChildPolicyReferences(newTargets) + oldChildPolicyTargets := make([]string, len(dcEntry.childPolicyWrappers)) + for i, cpw := range dcEntry.childPolicyWrappers { + oldChildPolicyTargets[i] = cpw.target + } + p.lb.releaseChildPolicyReferences(oldChildPolicyTargets) + dcEntry.childPolicyWrappers = newChildPolicies +} + +func dcEntrySize(key cacheKey, entry *cacheEntry) int64 { + return int64(len(key.path) + len(key.keys) + len(entry.headerData)) +} diff --git a/balancer/rls/internal/picker_test.go b/balancer/rls/internal/picker_test.go new file mode 100644 index 000000000000..75991852d946 --- /dev/null +++ b/balancer/rls/internal/picker_test.go @@ -0,0 +1,722 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "errors" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/rls/internal/test/e2e" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/protobuf/types/known/durationpb" +) + +// Test verifies the scenario where there is no matching entry in the data cache +// and no pending request either, and the ensuing RLS request is throttled. +func (s) TestPick_DataCacheMiss_NoPendingEntry_ThrottledWithDefaultTarget(t *testing.T) { + // Start an RLS server and set the throttler to always throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) + + // Build RLS service config with a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + defBackendCh, defBackendAddress := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the default target. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, defBackendCh) + + // Make sure no RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, false) +} + +// Test verifies the scenario where there is no matching entry in the data cache +// and no pending request either, and the ensuing RLS request is throttled. +// There is no default target configured in the service config, so the RPC is +// expected to fail with an RLS throttled error. +func (s) TestPick_DataCacheMiss_NoPendingEntry_ThrottledWithoutDefaultTarget(t *testing.T) { + // Start an RLS server and set the throttler to always throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) + + // Build an RLS config without a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and expect it to fail with RLS throttled error. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, errRLSThrottled) + + // Make sure no RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, false) +} + +// Test verifies the scenario where there is no matching entry in the data cache +// and no pending request either, and the ensuing RLS request is not throttled. +// The RLS response does not contain any backends, so the RPC fails with a +// deadline exceeded error. +func (s) TestPick_DataCacheMiss_NoPendingEntry_NotThrottled(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build an RLS config without a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and expect it to fail with deadline exceeded error. We use a + // smaller timeout to ensure that the test doesn't run very long. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + makeTestRPCAndVerifyError(ctx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) +} + +// Test verifies the scenario where there is no matching entry in the data +// cache, but there is a pending request. So, we expect no RLS request to be +// sent out. The pick should be queued and not delegated to the default target. +func (s) TestPick_DataCacheMiss_PendingEntryExists(t *testing.T) { + tests := []struct { + name string + withDefaultTarget bool + }{ + { + name: "withDefaultTarget", + withDefaultTarget: true, + }, + { + name: "withoutDefaultTarget", + withDefaultTarget: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // A unary interceptor which blocks the RouteLookup RPC on the fake + // RLS server until the test is done. The first RPC by the client + // will cause the LB policy to send out an RLS request. This will + // also lead to creation of a pending entry, and further RPCs by the + // client should not result in RLS requests being sent out. + rlsReqCh := make(chan struct{}, 1) + interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + rlsReqCh <- struct{}{} + <-ctx.Done() + return nil, ctx.Err() + } + + // Start an RLS server and set the throttler to never throttle. + rlsServer, _ := setupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build RLS service config with an optional default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + if test.withDefaultTarget { + _, defBackendAddress := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and expect it to fail with deadline exceeded error. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + makeTestRPCAndVerifyError(ctx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC and expect it to fail the same way. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + makeTestRPCAndVerifyError(ctx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + + // Make sure no RLS request is sent out this time around. + verifyRLSRequest(t, rlsReqCh, false) + }) + } +} + +// Test verifies the scenario where there is a matching entry in the data cache +// which is valid and there is no pending request. The pick is expected to be +// delegated to the child policy. +func (s) TestPick_DataCacheHit_NoPendingEntry_ValidEntry(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build the RLS config without a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Start a test backend, and setup the fake RLS server to return this as a + // target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC and expect it to find the target in the data cache. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure no RLS request is sent out this time around. + verifyRLSRequest(t, rlsReqCh, false) +} + +// Test verifies the scenario where there is a matching entry in the data cache +// which is stale and there is no pending request. The pick is expected to be +// delegated to the child policy with a proactive cache refresh. +func (s) TestPick_DataCacheHit_NoPendingEntry_StaleEntry(t *testing.T) { + // We expect the same pick behavior (i.e delegated to the child policy) for + // a proactive refresh whether the control channel is throttled or not. + tests := []struct { + name string + throttled bool + }{ + { + name: "throttled", + throttled: true, + }, + { + name: "notThrottled", + throttled: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Start an RLS server and setup the throttler appropriately. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + var throttler *fakeThrottler + if test.throttled { + throttler = oneTimeAllowingThrottler() + overrideAdaptiveThrottler(t, throttler) + } else { + throttler = neverThrottlingThrottler() + overrideAdaptiveThrottler(t, throttler) + } + + // Build the RLS config without a default target. Set the stale age + // to a very low value to force entries to become stale quickly. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(time.Minute) + rlsConfig.RouteLookupConfig.StaleAge = durationpb.New(defaultTestShortTimeout) + + // Start a test backend, and setup the fake RLS server to return + // this as a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // The cache entry has a large maxAge, but a small stateAge. We keep + // retrying until the cache entry becomes stale, in which case we expect a + // proactive cache refresh. + // + // If the control channel is not throttled, then we expect an RLS request + // to be sent out. If the control channel is throttled, we expect the fake + // throttler's channel to be signalled. + for { + // Make another RPC and expect it to find the target in the data cache. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + if !test.throttled { + select { + case <-time.After(defaultTestShortTimeout): + // Go back and retry the RPC. + case <-rlsReqCh: + return + } + } else { + select { + case <-time.After(defaultTestShortTimeout): + // Go back and retry the RPC. + case <-throttler.throttleCh: + return + } + } + } + }) + } +} + +// Test verifies scenarios where there is a matching entry in the data cache +// which has expired and there is no pending request. +func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntry(t *testing.T) { + tests := []struct { + name string + throttled bool + withDefaultTarget bool + }{ + { + name: "throttledWithDefaultTarget", + throttled: true, + withDefaultTarget: true, + }, + { + name: "throttledWithoutDefaultTarget", + throttled: true, + withDefaultTarget: false, + }, + { + name: "notThrottled", + throttled: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Start an RLS server and setup the throttler appropriately. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + var throttler *fakeThrottler + if test.throttled { + throttler = oneTimeAllowingThrottler() + overrideAdaptiveThrottler(t, throttler) + } else { + throttler = neverThrottlingThrottler() + overrideAdaptiveThrottler(t, throttler) + } + + // Build the RLS config with a very low value for maxAge. This will + // ensure that cache entries become invalid very soon. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a default backend if needed. + var defBackendCh chan struct{} + if test.withDefaultTarget { + var defBackendAddress string + defBackendCh, defBackendAddress = startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + + // Start a test backend, and setup the fake RLS server to return + // this as a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Keep retrying the RPC until the cache entry expires. Expected behavior + // is dependent on the scenario being tested. + switch { + case test.throttled && test.withDefaultTarget: + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, defBackendCh) + <-throttler.throttleCh + case test.throttled && !test.withDefaultTarget: + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, errRLSThrottled) + <-throttler.throttleCh + case !test.throttled: + for { + // The backend to which the RPC is routed does not change after the + // cache entry expires because the control channel is not throttled. + // So, we need to keep retrying until the cache entry expires, at + // which point we expect an RLS request to be sent out and the RPC to + // get routed to the same testBackend. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + select { + case <-time.After(defaultTestShortTimeout): + // Go back and retry the RPC. + case <-rlsReqCh: + return + } + } + } + }) + } +} + +// Test verifies scenarios where there is a matching entry in the data cache +// which has expired and is in backoff and there is no pending request. +func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntryInBackoff(t *testing.T) { + tests := []struct { + name string + withDefaultTarget bool + }{ + { + name: "withDefaultTarget", + withDefaultTarget: true, + }, + { + name: "withoutDefaultTarget", + withDefaultTarget: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Override the backoff strategy to return a large backoff which + // will make sure the date cache entry remains in backoff for the + // duration of the test. + origBackoffStrategy := defaultBackoffStrategy + defaultBackoffStrategy = &fakeBackoffStrategy{backoff: defaultTestTimeout} + defer func() { defaultBackoffStrategy = origBackoffStrategy }() + + // Build the RLS config with a very low value for maxAge. This will + // ensure that cache entries become invalid very soon. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a default backend if needed. + var defBackendCh chan struct{} + if test.withDefaultTarget { + var defBackendAddress string + defBackendCh, defBackendAddress = startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + + // Start a test backend, and set up the fake RLS server to return this as + // a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Set up the fake RLS server to return errors. This will push the cache + // entry into backoff. + var rlsLastErr = errors.New("last RLS request failed") + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Err: rlsLastErr} + }) + + // Since the RLS server is now configured to return errors, this will push + // the cache entry into backoff. The pick will be delegated to the default + // backend if one exits, and will fail with the error returned by the RLS + // server otherwise. + if test.withDefaultTarget { + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, defBackendCh) + } else { + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unknown, rlsLastErr) + } + }) + } +} + +// Test verifies scenarios where there is a matching entry in the data cache +// which is stale and there is a pending request. +func (s) TestPick_DataCacheHit_PendingEntryExists_StaleEntry(t *testing.T) { + tests := []struct { + name string + withDefaultTarget bool + }{ + { + name: "withDefaultTarget", + withDefaultTarget: true, + }, + { + name: "withoutDefaultTarget", + withDefaultTarget: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // A unary interceptor which does nothing on the first RPC, but + // blocks on subsequent RPCs on the fake RLS server until the test + // is done. Since we configure the LB policy with a really low value + // for stale age, this allows us to simulate the condition where the + // LB policy has a stale entry and a pending entry in the cache. + rlsReqCh := make(chan struct{}, 1) + i := 0 + interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + rlsReqCh <- struct{}{} + if i == 0 { + i++ + return handler(ctx, req) + } + <-ctx.Done() + return nil, ctx.Err() + } + + // Start an RLS server and set the throttler to never throttle. + rlsServer, _ := setupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build RLS service config with an optional default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + if test.withDefaultTarget { + _, defBackendAddress := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + + // Low value for stale age to force entries to become stale quickly. + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(time.Minute) + rlsConfig.RouteLookupConfig.StaleAge = durationpb.New(defaultTestShortTimeout) + + // Start a test backend, and setup the fake RLS server to return + // this as a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // The cache entry has a large maxAge, but a small stateAge. We keep + // retrying until the cache entry becomes stale, in which case we expect a + // proactive cache refresh. + for { + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + select { + case <-time.After(defaultTestShortTimeout): + // Go back and retry the RPC. + case <-rlsReqCh: + return + } + } + }) + } +} + +// Test verifies scenarios where there is a matching entry in the data cache +// which is expired and there is a pending request. +func (s) TestPick_DataCacheHit_PendingEntryExists_ExpiredEntry(t *testing.T) { + tests := []struct { + name string + withDefaultTarget bool + }{ + { + name: "withDefaultTarget", + withDefaultTarget: true, + }, + { + name: "withoutDefaultTarget", + withDefaultTarget: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // A unary interceptor which does nothing on the first RPC, but + // blocks on subsequent RPCs on the fake RLS server until the test + // is done. And since we configure the LB policy with a really low + // value for max age, this allows us to simulate the condition where + // the LB policy has an expired entry and a pending entry in the + // cache. + rlsReqCh := make(chan struct{}, 1) + i := 0 + interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + rlsReqCh <- struct{}{} + if i == 0 { + i++ + return handler(ctx, req) + } + <-ctx.Done() + return nil, ctx.Err() + } + + // Start an RLS server and set the throttler to never throttle. + rlsServer, _ := setupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build RLS service config with an optional default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + if test.withDefaultTarget { + _, defBackendAddress := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + // Set a low value for maxAge to ensure cache entries expire soon. + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a test backend, and setup the fake RLS server to return + // this as a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // At this point, we have a cache entry with a small maxAge, and the RLS + // server is configured to block on further RLS requests. As we retry the + // RPC, at some point the cache entry would expire and force us to send an + // RLS request. But this request would exceed the deadline since the + // server blocks. + makeTestRPCAndVerifyError(ctx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + verifyRLSRequest(t, rlsReqCh, true) + + // Another RPC at this point should find the pending entry and be queued. + // But since we pass a small deadline, this RPC should fail with a + // deadline exceeded error since the pending request does not return until + // the test is done. And since we have a pending entry, we expect no RLS + // request to be sent out. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + makeTestRPCAndVerifyError(sCtx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + verifyRLSRequest(t, rlsReqCh, false) + }) + } +} diff --git a/balancer/rls/internal/testutils/fakeserver/fakeserver.go b/balancer/rls/internal/testutils/fakeserver/fakeserver.go deleted file mode 100644 index 8554ffbf78bf..000000000000 --- a/balancer/rls/internal/testutils/fakeserver/fakeserver.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package fakeserver provides a fake implementation of the RouteLookupService, -// to be used in unit tests. -package fakeserver - -import ( - "context" - "errors" - "fmt" - "net" - "time" - - "google.golang.org/grpc" - rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" - rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/internal/testutils" -) - -const ( - defaultDialTimeout = 5 * time.Second - defaultRPCTimeout = 5 * time.Second - defaultChannelBufferSize = 50 -) - -// Response wraps the response protobuf (xds/LRS) and error that the Server -// should send out to the client through a call to stream.Send() -type Response struct { - Resp *rlspb.RouteLookupResponse - Err error -} - -// Server is a fake implementation of RLS. It exposes channels to send/receive -// RLS requests and responses. -type Server struct { - rlsgrpc.UnimplementedRouteLookupServiceServer - RequestChan *testutils.Channel - ResponseChan chan Response - Address string -} - -// Start makes a new Server which uses the provided net.Listener. If lis is nil, -// it creates a new net.Listener on a local port. The returned cancel function -// should be invoked by the caller upon completion of the test. -func Start(lis net.Listener, opts ...grpc.ServerOption) (*Server, func(), error) { - if lis == nil { - var err error - lis, err = net.Listen("tcp", "localhost:0") - if err != nil { - return nil, func() {}, fmt.Errorf("net.Listen() failed: %v", err) - } - } - s := &Server{ - // Give the channels a buffer size of 1 so that we can setup - // expectations for one lookup call, without blocking. - RequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - ResponseChan: make(chan Response, 1), - Address: lis.Addr().String(), - } - - server := grpc.NewServer(opts...) - rlsgrpc.RegisterRouteLookupServiceServer(server, s) - go server.Serve(lis) - - return s, func() { server.Stop() }, nil -} - -// RouteLookup implements the RouteLookupService. -func (s *Server) RouteLookup(ctx context.Context, req *rlspb.RouteLookupRequest) (*rlspb.RouteLookupResponse, error) { - s.RequestChan.Send(req) - - // The leakchecker fails if we don't exit out of here in a reasonable time. - timer := time.NewTimer(defaultRPCTimeout) - select { - case <-timer.C: - return nil, errors.New("default RPC timeout exceeded") - case resp := <-s.ResponseChan: - timer.Stop() - return resp.Resp, resp.Err - } -} - -// ClientConn returns a grpc.ClientConn connected to the fakeServer. -func (s *Server) ClientConn() (*grpc.ClientConn, func(), error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultDialTimeout) - defer cancel() - - cc, err := grpc.DialContext(ctx, s.Address, grpc.WithInsecure(), grpc.WithBlock()) - if err != nil { - return nil, nil, fmt.Errorf("grpc.DialContext(%s) failed: %v", s.Address, err) - } - return cc, func() { cc.Close() }, nil -} From eb6ff1ae43acd46fe708dee34997317a79d5136f Mon Sep 17 00:00:00 2001 From: Mohan Li <67390330+mohanli-ml@users.noreply.github.com> Date: Wed, 19 Jan 2022 10:31:42 -0800 Subject: [PATCH 397/998] xds: update RPC timeout in blackhole case (#5126) --- interop/grpclb_fallback/client_linux.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/interop/grpclb_fallback/client_linux.go b/interop/grpclb_fallback/client_linux.go index 152bb01a7b5f..4fa09ceb4d9c 100644 --- a/interop/grpclb_fallback/client_linux.go +++ b/interop/grpclb_fallback/client_linux.go @@ -132,7 +132,7 @@ func waitForFallbackAndDoRPCs(client testgrpc.TestServiceClient, fallbackDeadlin fallbackRetryCount := 0 fellBack := false for time.Now().Before(fallbackDeadline) { - g := doRPCAndGetPath(client, 1*time.Second) + g := doRPCAndGetPath(client, 20*time.Second) if g == testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_FALLBACK { infoLog.Println("Made one successul RPC to a fallback. Now expect the same for the rest.") fellBack = true @@ -166,7 +166,7 @@ func doFastFallbackBeforeStartup() { func doSlowFallbackBeforeStartup() { runCmd(*blackholeLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(20 * time.Second) + fallbackDeadline := time.Now().Add(60 * time.Second) conn := createTestConn() defer conn.Close() client := testgrpc.NewTestServiceClient(conn) From 5350e74601b2d9bec01625be53242468b9e7c8e1 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 19 Jan 2022 12:56:41 -0800 Subject: [PATCH 398/998] xds/controller: fix test nil panic on stream errors (#5144) --- .../xdsclient/controller/controller_test.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/xds/internal/xdsclient/controller/controller_test.go b/xds/internal/xdsclient/controller/controller_test.go index e5fdb8878716..8c7c2838d838 100644 --- a/xds/internal/xdsclient/controller/controller_test.go +++ b/xds/internal/xdsclient/controller/controller_test.go @@ -24,11 +24,18 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) const testXDSServer = "xds-server" +// noopUpdateHandler ignores all updates. It's to be used in tests where the +// updates don't matter. To avoid potential nil panic. +var noopUpdateHandler = &testUpdateReceiver{ + f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) {}, +} + // TestNew covers that New() returns an error if the input *ServerConfig // contains invalid content. func (s) TestNew(t *testing.T) { @@ -88,7 +95,7 @@ func (s) TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - c, err := New(test.config, nil, nil, nil) // Only testing the config, other inputs are left as nil. + c, err := New(test.config, noopUpdateHandler, nil, nil) // Only testing the config, other inputs are left as nil. defer func() { if c != nil { c.Close() @@ -116,7 +123,7 @@ func (s) TestNewWithGRPCDial(t *testing.T) { // Set the dialer and make sure it is called. SetGRPCDial(customDialer) - c, err := New(config, nil, nil, nil) + c, err := New(config, noopUpdateHandler, nil, nil) if err != nil { t.Fatalf("New(%+v) = %v, want no error", config, err) } @@ -131,7 +138,7 @@ func (s) TestNewWithGRPCDial(t *testing.T) { // Reset the dialer and make sure it is not called. SetGRPCDial(grpc.Dial) - c, err = New(config, nil, nil, nil) + c, err = New(config, noopUpdateHandler, nil, nil) defer func() { if c != nil { c.Close() From 475c62a8dbe53ddb1ea98317b0a360d99b4f987e Mon Sep 17 00:00:00 2001 From: Shihao Xia Date: Wed, 19 Jan 2022 15:57:53 -0500 Subject: [PATCH 399/998] testing: fix potential problems in testFlowControlAccountCheck (#5105) --- internal/transport/transport_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index ec864afb6e96..34536c9c3252 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -1546,6 +1546,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) loopyServerStreams := map[uint32]*outStream{} // Get all the streams from server reader and writer and client writer. st.mu.Lock() + client.mu.Lock() for _, stream := range clientStreams { id := stream.id serverStreams[id] = st.activeStreams[id] @@ -1553,6 +1554,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) loopyClientStreams[id] = client.loopy.estdStreams[id] } + client.mu.Unlock() st.mu.Unlock() // Close all streams for _, stream := range clientStreams { @@ -1574,6 +1576,9 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) sstream := serverStreams[id] loopyServerStream := loopyServerStreams[id] loopyClientStream := loopyClientStreams[id] + if loopyServerStream == nil { + t.Fatalf("Unexpected nil loopyServerStream") + } // Check stream flow control. if int(cstream.fc.limit+cstream.fc.delta-cstream.fc.pendingData-cstream.fc.pendingUpdate) != int(st.loopy.oiws)-loopyServerStream.bytesOutStanding { t.Fatalf("Account mismatch: client stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != server outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", cstream.fc.limit, cstream.fc.delta, cstream.fc.pendingData, cstream.fc.pendingUpdate, st.loopy.oiws, loopyServerStream.bytesOutStanding) From 61a352e94efc1b3e98a0f58de6b10c8329cc9f6f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 19 Jan 2022 13:52:45 -0800 Subject: [PATCH 400/998] rls: move RLS LB policy implementation out of internal (#5143) --- balancer/rls/{internal => }/balancer.go | 5 ++-- balancer/rls/{internal => }/balancer_test.go | 0 balancer/rls/{internal => }/cache.go | 3 --- balancer/rls/{internal => }/cache_test.go | 0 balancer/rls/{internal => }/child_policy.go | 3 --- balancer/rls/{internal => }/config.go | 0 balancer/rls/{internal => }/config_test.go | 0 .../rls/{internal => }/control_channel.go | 0 .../{internal => }/control_channel_test.go | 0 balancer/rls/{internal => }/helpers_test.go | 3 --- balancer/rls/{internal => }/picker.go | 0 balancer/rls/{internal => }/picker_test.go | 0 balancer/rls/rls.go | 25 ------------------- xds/internal/clusterspecifier/rls/rls.go | 10 +++----- 14 files changed, 7 insertions(+), 42 deletions(-) rename balancer/rls/{internal => }/balancer.go (99%) rename balancer/rls/{internal => }/balancer_test.go (100%) rename balancer/rls/{internal => }/cache.go (99%) rename balancer/rls/{internal => }/cache_test.go (100%) rename balancer/rls/{internal => }/child_policy.go (96%) rename balancer/rls/{internal => }/config.go (100%) rename balancer/rls/{internal => }/config_test.go (100%) rename balancer/rls/{internal => }/control_channel.go (100%) rename balancer/rls/{internal => }/control_channel_test.go (100%) rename balancer/rls/{internal => }/helpers_test.go (98%) rename balancer/rls/{internal => }/picker.go (100%) rename balancer/rls/{internal => }/picker_test.go (100%) delete mode 100644 balancer/rls/rls.go diff --git a/balancer/rls/internal/balancer.go b/balancer/rls/balancer.go similarity index 99% rename from balancer/rls/internal/balancer.go rename to balancer/rls/balancer.go index 9a53edb8db1d..8dc982ffc76f 100644 --- a/balancer/rls/internal/balancer.go +++ b/balancer/rls/balancer.go @@ -65,7 +65,8 @@ var ( resetBackoffHook = func() {} ) -const balancerName = "rls_experimental" +// Name is the name of the RLS LB policy. +const Name = "rls_experimental" func init() { balancer.Register(&rlsBB{}) @@ -74,7 +75,7 @@ func init() { type rlsBB struct{} func (rlsBB) Name() string { - return balancerName + return Name } func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { diff --git a/balancer/rls/internal/balancer_test.go b/balancer/rls/balancer_test.go similarity index 100% rename from balancer/rls/internal/balancer_test.go rename to balancer/rls/balancer_test.go diff --git a/balancer/rls/internal/cache.go b/balancer/rls/cache.go similarity index 99% rename from balancer/rls/internal/cache.go rename to balancer/rls/cache.go index 527b9b278a1f..9a38072c7745 100644 --- a/balancer/rls/internal/cache.go +++ b/balancer/rls/cache.go @@ -27,9 +27,6 @@ import ( "google.golang.org/grpc/internal/grpcsync" ) -// TODO(easwars): Remove this once all RLS code is merged. -//lint:file-ignore U1000 Ignore all unused code, not all code is merged yet. - // cacheKey represents the key used to uniquely identify an entry in the data // cache and in the pending requests map. type cacheKey struct { diff --git a/balancer/rls/internal/cache_test.go b/balancer/rls/cache_test.go similarity index 100% rename from balancer/rls/internal/cache_test.go rename to balancer/rls/cache_test.go diff --git a/balancer/rls/internal/child_policy.go b/balancer/rls/child_policy.go similarity index 96% rename from balancer/rls/internal/child_policy.go rename to balancer/rls/child_policy.go index 2e25be6438e3..c74184cac238 100644 --- a/balancer/rls/internal/child_policy.go +++ b/balancer/rls/child_policy.go @@ -29,9 +29,6 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -// TODO(easwars): Remove this once all RLS code is merged. -//lint:file-ignore U1000 Ignore all unused code, not all code is merged yet. - // childPolicyWrapper is a reference counted wrapper around a child policy. // // The LB policy maintains a map of these wrappers keyed by the target returned diff --git a/balancer/rls/internal/config.go b/balancer/rls/config.go similarity index 100% rename from balancer/rls/internal/config.go rename to balancer/rls/config.go diff --git a/balancer/rls/internal/config_test.go b/balancer/rls/config_test.go similarity index 100% rename from balancer/rls/internal/config_test.go rename to balancer/rls/config_test.go diff --git a/balancer/rls/internal/control_channel.go b/balancer/rls/control_channel.go similarity index 100% rename from balancer/rls/internal/control_channel.go rename to balancer/rls/control_channel.go diff --git a/balancer/rls/internal/control_channel_test.go b/balancer/rls/control_channel_test.go similarity index 100% rename from balancer/rls/internal/control_channel_test.go rename to balancer/rls/control_channel_test.go diff --git a/balancer/rls/internal/helpers_test.go b/balancer/rls/helpers_test.go similarity index 98% rename from balancer/rls/internal/helpers_test.go rename to balancer/rls/helpers_test.go index 7d3e9fa10fea..2b6a37cc36c9 100644 --- a/balancer/rls/internal/helpers_test.go +++ b/balancer/rls/helpers_test.go @@ -45,9 +45,6 @@ import ( "google.golang.org/protobuf/types/known/durationpb" ) -// TODO(easwars): Remove this once all RLS code is merged. -//lint:file-ignore U1000 Ignore all unused code, not all code is merged yet. - const ( defaultTestTimeout = 5 * time.Second defaultTestShortTimeout = 100 * time.Millisecond diff --git a/balancer/rls/internal/picker.go b/balancer/rls/picker.go similarity index 100% rename from balancer/rls/internal/picker.go rename to balancer/rls/picker.go diff --git a/balancer/rls/internal/picker_test.go b/balancer/rls/picker_test.go similarity index 100% rename from balancer/rls/internal/picker_test.go rename to balancer/rls/picker_test.go diff --git a/balancer/rls/rls.go b/balancer/rls/rls.go deleted file mode 100644 index 473c3c1a8133..000000000000 --- a/balancer/rls/rls.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package rls imports to init the rls lb policy for testing purposes. -package rls - -import ( - // Blank import to init the rls lb policy for external use. - _ "google.golang.org/grpc/balancer/rls/internal" -) diff --git a/xds/internal/clusterspecifier/rls/rls.go b/xds/internal/clusterspecifier/rls/rls.go index 98b0d566395a..58ba9364e6c5 100644 --- a/xds/internal/clusterspecifier/rls/rls.go +++ b/xds/internal/clusterspecifier/rls/rls.go @@ -32,12 +32,10 @@ import ( "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/anypb" - // Blank import to init the RLS LB policy. - _ "google.golang.org/grpc/balancer/rls" + // Never remove this import as the RLS LB policy is registered in its init(). + rlslb "google.golang.org/grpc/balancer/rls" ) -const rlsBalancerName = "rls_experimental" - func init() { if envconfig.XDSRLS { clusterspecifier.Register(rls{}) @@ -91,7 +89,7 @@ func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.Bala return nil, fmt.Errorf("rls_csp: error marshaling load balancing config %v: %v", lbCfgJSON, err) } - rlsBB := balancer.Get(rlsBalancerName) + rlsBB := balancer.Get(rlslb.Name) if rlsBB == nil { return nil, fmt.Errorf("RLS LB policy not registered") } @@ -100,5 +98,5 @@ func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.Bala return nil, fmt.Errorf("rls_csp: validation error from rls lb policy parsing %v", err) } - return clusterspecifier.BalancerConfig{{rlsBalancerName: lbCfgJSON}}, nil + return clusterspecifier.BalancerConfig{{rlslb.Name: lbCfgJSON}}, nil } From f93e8e673710b7765c8f29e78ba3b97c238e1de8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 19 Jan 2022 16:48:55 -0800 Subject: [PATCH 401/998] rls: return status error from server interceptor in test (#5153) --- balancer/rls/control_channel_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/balancer/rls/control_channel_test.go b/balancer/rls/control_channel_test.go index 876701de3326..78ddb8d1219a 100644 --- a/balancer/rls/control_channel_test.go +++ b/balancer/rls/control_channel_test.go @@ -108,10 +108,9 @@ func (s) TestLookupFailure(t *testing.T) { // TestLookupDeadlineExceeded tests the case where the RLS server does not // respond within the configured rpc timeout. func (s) TestLookupDeadlineExceeded(t *testing.T) { - // A unary interceptor which blocks until the test is done. + // A unary interceptor which returns a status error with DeadlineExceeded. interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - <-ctx.Done() - return nil, ctx.Err() + return nil, status.Error(codes.DeadlineExceeded, "deadline exceeded") } // Start an RLS server and set the throttler to never throttle. From fa62572afec89ee008639f664b6d2463927ab8a8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 20 Jan 2022 09:21:16 -0800 Subject: [PATCH 402/998] rls: make the data cache purge ticker a field in rlsBalancer (#5154) --- balancer/rls/balancer.go | 30 +++++++++++++++++------------- balancer/rls/balancer_test.go | 8 +++++--- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go index 8dc982ffc76f..33da052ca562 100644 --- a/balancer/rls/balancer.go +++ b/balancer/rls/balancer.go @@ -38,6 +38,13 @@ import ( "google.golang.org/grpc/resolver" ) +const ( + // Name is the name of the RLS LB policy. + Name = "rls_experimental" + // Default frequency for data cache purging. + periodicCachePurgeFreq = time.Minute +) + var ( logger = grpclog.Component("rls") @@ -45,8 +52,8 @@ var ( // Default exponential backoff strategy for data cache entries. defaultBackoffStrategy = backoff.Strategy(backoff.DefaultExponential) - // Default frequency for data cache purging. - periodicCachePurgeFreq = time.Minute + // Ticker used for periodic data cache purging. + dataCachePurgeTicker = func() *time.Ticker { return time.NewTicker(periodicCachePurgeFreq) } // We want every cache entry to live in the cache for at least this // duration. If we encounter a cache entry whose minimum expiration time is // in the future, we abort the LRU pass, which may temporarily leave the @@ -65,9 +72,6 @@ var ( resetBackoffHook = func() {} ) -// Name is the name of the RLS LB policy. -const Name = "rls_experimental" - func init() { balancer.Register(&rlsBB{}) } @@ -83,6 +87,7 @@ func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. done: grpcsync.NewEvent(), cc: cc, bopts: opts, + purgeTicker: dataCachePurgeTicker(), lbCfg: &lbConfig{}, pendingMap: make(map[cacheKey]*backoffState), childPolicies: make(map[string]*childPolicyWrapper), @@ -100,10 +105,11 @@ func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. // rlsBalancer implements the RLS LB policy. type rlsBalancer struct { - done *grpcsync.Event - cc balancer.ClientConn - bopts balancer.BuildOptions - logger *internalgrpclog.PrefixLogger + done *grpcsync.Event + cc balancer.ClientConn + bopts balancer.BuildOptions + purgeTicker *time.Ticker + logger *internalgrpclog.PrefixLogger // If both cacheMu and stateMu need to be acquired, the former must be // acquired first to prevent a deadlock. This order restriction is due to the @@ -169,14 +175,11 @@ func (b *rlsBalancer) run() { // entries. An expired entry is one for which both the expiryTime and // backoffExpiryTime are in the past. func (b *rlsBalancer) purgeDataCache() { - ticker := time.NewTicker(periodicCachePurgeFreq) - defer ticker.Stop() - for { select { case <-b.done.Done(): return - case <-ticker.C: + case <-b.purgeTicker.C: b.cacheMu.Lock() updatePicker := b.dataCache.evictExpiredEntries() b.cacheMu.Unlock() @@ -376,6 +379,7 @@ func (b *rlsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub func (b *rlsBalancer) Close() { b.done.Fire() + b.purgeTicker.Stop() b.stateMu.Lock() if b.ctrlCh != nil { b.ctrlCh.close() diff --git a/balancer/rls/balancer_test.go b/balancer/rls/balancer_test.go index 9607ac6773c3..7d243cd13777 100644 --- a/balancer/rls/balancer_test.go +++ b/balancer/rls/balancer_test.go @@ -586,9 +586,11 @@ func (s) TestConfigUpdate_DataCacheSizeDecrease(t *testing.T) { // entries from the data cache. func (s) TestDataCachePurging(t *testing.T) { // Override the frequency of the data cache purger to a small one. - origPurgeFreq := periodicCachePurgeFreq - periodicCachePurgeFreq = defaultTestShortTimeout - defer func() { periodicCachePurgeFreq = origPurgeFreq }() + origDataCachePurgeTicker := dataCachePurgeTicker + ticker := time.NewTicker(defaultTestShortTimeout) + defer ticker.Stop() + dataCachePurgeTicker = func() *time.Ticker { return ticker } + defer func() { dataCachePurgeTicker = origDataCachePurgeTicker }() // Override the data cache purge hook to get notified. dataCachePurgeDone := make(chan struct{}, 1) From 6f54b5ddbee05b7c8fa53f9375bc17eea83257db Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 20 Jan 2022 09:21:38 -0800 Subject: [PATCH 403/998] recommend the use `go test` commands instead of `make test` (#5151) --- CONTRIBUTING.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cd03f8c76888..52338d004ce3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. From 9cb411380883ddbf69467b4ba1099817c0fe6c61 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 20 Jan 2022 11:31:59 -0800 Subject: [PATCH 404/998] xdsclient: make Close() idempotent (#5149) --- xds/csds/csds_test.go | 23 +++-- xds/internal/xdsclient/client.go | 14 +-- xds/internal/xdsclient/client_test.go | 110 -------------------- xds/internal/xdsclient/singleton.go | 74 ++++++++----- xds/internal/xdsclient/singleton_test.go | 126 +++++++++++++++++++++++ 5 files changed, 191 insertions(+), 156 deletions(-) create mode 100644 xds/internal/xdsclient/singleton_test.go diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 0bf305899de8..fd19599ab094 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -30,6 +30,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/uuid" "google.golang.org/grpc" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds" _ "google.golang.org/grpc/xds/internal/httpfilter/router" @@ -50,9 +51,7 @@ import ( v3statuspbgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) -const ( - defaultTestTimeout = 10 * time.Second -) +const defaultTestTimeout = 10 * time.Second var cmpOpts = cmp.Options{ cmp.Transformer("sort", func(in []*v3statuspb.ClientConfig_GenericXdsConfig) []*v3statuspb.ClientConfig_GenericXdsConfig { @@ -113,6 +112,14 @@ var ( ports = []uint32{123, 456} ) +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + func init() { for i := range ldsTargets { listeners[i] = e2e.DefaultClientListener(ldsTargets[i], rdsTargets[i]) @@ -132,7 +139,7 @@ func init() { } } -func TestCSDS(t *testing.T) { +func (s) TestCSDS(t *testing.T) { const retryCount = 10 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -242,12 +249,13 @@ func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.M if err != nil { t.Fatal(err) } + // Create xds_client. xdsC, err := xdsclient.New() if err != nil { t.Fatalf("failed to create xds client: %v", err) } - oldNewXDSClient := newXDSClient + origNewXDSClient := newXDSClient newXDSClient = func() xdsclient.XDSClient { return xdsC } // Initialize an gRPC server and register CSDS on it. @@ -257,6 +265,7 @@ func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.M t.Fatal(err) } v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) + // Create a local listener and pass it to Serve(). lis, err := testutils.LocalTCPListener() if err != nil { @@ -284,7 +293,7 @@ func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.M conn.Close() server.Stop() csdss.Close() - newXDSClient = oldNewXDSClient + newXDSClient = origNewXDSClient xdsC.Close() bootstrapCleanup() } @@ -490,7 +499,7 @@ func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDisco return nil } -func TestCSDSNoXDSClient(t *testing.T) { +func (s) TestCSDSNoXDSClient(t *testing.T) { oldNewXDSClient := newXDSClient newXDSClient = func() xdsclient.XDSClient { return nil } defer func() { newXDSClient = oldNewXDSClient }() diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 1378268ef434..817a4507eb34 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -68,25 +68,17 @@ type clientImpl struct { } // newWithConfig returns a new xdsClient with the given config. -func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (_ *clientImpl, retErr error) { +func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { c := &clientImpl{ done: grpcsync.NewEvent(), config: config, watchExpiryTimeout: watchExpiryTimeout, - - authorities: make(map[string]*authority), - idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), + authorities: make(map[string]*authority), + idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), } - defer func() { - if retErr != nil { - c.Close() - } - }() - c.logger = prefixLogger(c) c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) - c.logger.Infof("Created") return c, nil } diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index d6c9f9b401d8..e165f84f78d9 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -261,113 +261,3 @@ func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wan } return nil } - -// Test that multiple New() returns the same Client. And only when the last -// client is closed, the underlying client is closed. -func (s) TestClientNewSingleton(t *testing.T) { - oldBootstrapNewConfig := bootstrapNewConfig - bootstrapNewConfig = func() (*bootstrap.Config, error) { - return &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithInsecure(), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - }, nil - } - defer func() { bootstrapNewConfig = oldBootstrapNewConfig }() - - ctrlCh := overrideNewController(t) - - // The first New(). Should create a Client and a new APIClient. - client, err := newRefCounted() - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - - // Call a watch to create the controller. - client.WatchCluster("doesnot-matter", func(update xdsresource.ClusterUpdate, err error) {}) - - clientImpl := client.clientImpl - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := ctrlCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testController) - - // Call New() again. They should all return the same client implementation, - // and should not create new API client. - const count = 9 - for i := 0; i < count; i++ { - tc, terr := newRefCounted() - if terr != nil { - client.Close() - t.Fatalf("%d-th call to New() failed with error: %v", i, terr) - } - if tc.clientImpl != clientImpl { - client.Close() - tc.Close() - t.Fatalf("%d-th call to New() got a different client %p, want %p", i, tc.clientImpl, clientImpl) - } - - sctx, scancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer scancel() - _, err := ctrlCh.Receive(sctx) - if err == nil { - client.Close() - t.Fatalf("%d-th call to New() created a new API client", i) - } - } - - // Call Close(). Nothing should be actually closed until the last ref calls - // Close(). - for i := 0; i < count; i++ { - client.Close() - if clientImpl.done.HasFired() { - t.Fatalf("%d-th call to Close(), unexpected done in the client implemenation", i) - } - if apiClient.done.HasFired() { - t.Fatalf("%d-th call to Close(), unexpected done in the API client", i) - } - } - - // Call the last Close(). The underlying implementation and API Client - // should all be closed. - client.Close() - if !clientImpl.done.HasFired() { - t.Fatalf("want client implementation to be closed, got not done") - } - if !apiClient.done.HasFired() { - t.Fatalf("want API client to be closed, got not done") - } - - // Call New() again after the previous Client is actually closed. Should - // create a Client and a new APIClient. - client2, err2 := newRefCounted() - if err2 != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client2.Close() - - // Call a watch to create the controller. - client2.WatchCluster("abc", func(update xdsresource.ClusterUpdate, err error) {}) - - c2, err := ctrlCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient2 := c2.(*testController) - - // The client wrapper with ref count should be the same. - if client2 != client { - t.Fatalf("New() after Close() should return the same client wrapper, got different %p, %p", client2, client) - } - if client2.clientImpl == clientImpl { - t.Fatalf("New() after Close() should return different client implementation, got the same %p", client2.clientImpl) - } - if apiClient2 == apiClient { - t.Fatalf("New() after Close() should return different API client, got the same %p", apiClient2) - } -} diff --git a/xds/internal/xdsclient/singleton.go b/xds/internal/xdsclient/singleton.go index ef928041c17a..1cf033c03198 100644 --- a/xds/internal/xdsclient/singleton.go +++ b/xds/internal/xdsclient/singleton.go @@ -33,13 +33,36 @@ const ( defaultIdleAuthorityDeleteTimeout = 5 * time.Minute ) -// This is the Client returned by New(). It contains one client implementation, -// and maintains the refcount. -var singletonClient = &clientRefCounted{} +var ( + // This is the Client returned by New(). It contains one client implementation, + // and maintains the refcount. + singletonClient = &clientRefCounted{} + + // The following functions are no-ops in the actual code, but can be + // overridden in tests to give them visibility into certain events. + singletonClientImplCreateHook = func() {} + singletonClientImplCloseHook = func() {} +) // To override in tests. var bootstrapNewConfig = bootstrap.NewConfig +// onceClosingClient is a thin wrapper around clientRefCounted. The Close() +// method is overridden such that the underlying reference counted client's +// Close() is called at most once, thereby making Close() idempotent. +// +// This is the type which is returned by New() and NewWithConfig(), making it +// safe for these callers to call Close() any number of times. +type onceClosingClient struct { + XDSClient + + once sync.Once +} + +func (o *onceClosingClient) Close() { + o.once.Do(o.XDSClient.Close) +} + // clientRefCounted is ref-counted, and to be shared by the xds resolver and // balancer implementations, across multiple ClientConns and Servers. type clientRefCounted struct { @@ -70,29 +93,8 @@ func New() (XDSClient, error) { return c, nil } -func newRefCounted() (*clientRefCounted, error) { - singletonClient.mu.Lock() - defer singletonClient.mu.Unlock() - // If the client implementation was created, increment ref count and return - // the client. - if singletonClient.clientImpl != nil { - singletonClient.refCount++ - return singletonClient, nil - } - - // Create the new client implementation. - config, err := bootstrapNewConfig() - if err != nil { - return nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) - } - c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - return nil, err - } - - singletonClient.clientImpl = c - singletonClient.refCount++ - return singletonClient, nil +func newRefCounted() (XDSClient, error) { + return newRefCountedWithConfig(nil) } // NewWithConfig returns a new xdsClient configured by the given config. @@ -107,13 +109,27 @@ func newRefCounted() (*clientRefCounted, error) { // This function is internal only, for c2p resolver and testing to use. DO NOT // use this elsewhere. Use New() instead. func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { + return newRefCountedWithConfig(config) +} + +func newRefCountedWithConfig(config *bootstrap.Config) (XDSClient, error) { singletonClient.mu.Lock() defer singletonClient.mu.Unlock() + // If the client implementation was created, increment ref count and return // the client. if singletonClient.clientImpl != nil { singletonClient.refCount++ - return singletonClient, nil + return &onceClosingClient{XDSClient: singletonClient}, nil + } + + // If the passed in config is nil, perform bootstrap to read config. + if config == nil { + var err error + config, err = bootstrapNewConfig() + if err != nil { + return nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) + } } // Create the new client implementation. @@ -124,7 +140,8 @@ func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { singletonClient.clientImpl = c singletonClient.refCount++ - return singletonClient, nil + singletonClientImplCreateHook() + return &onceClosingClient{XDSClient: singletonClient}, nil } // Close closes the client. It does ref count of the xds client implementation, @@ -139,6 +156,7 @@ func (c *clientRefCounted) Close() { // Set clientImpl back to nil. So if New() is called after this, a new // implementation will be created. c.clientImpl = nil + singletonClientImplCloseHook() } } diff --git a/xds/internal/xdsclient/singleton_test.go b/xds/internal/xdsclient/singleton_test.go new file mode 100644 index 000000000000..1bf6077f3952 --- /dev/null +++ b/xds/internal/xdsclient/singleton_test.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "context" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal/testutils" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +// Test that multiple New() returns the same Client. And only when the last +// client is closed, the underlying client is closed. +func (s) TestClientNewSingleton(t *testing.T) { + // Override bootstrap with a fake config. + oldBootstrapNewConfig := bootstrapNewConfig + bootstrapNewConfig = func() (*bootstrap.Config, error) { + return &bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: testXDSServer, + Creds: grpc.WithInsecure(), + NodeProto: xdstestutils.EmptyNodeProtoV2, + }, + }, nil + } + defer func() { bootstrapNewConfig = oldBootstrapNewConfig }() + + // Override the singleton creation hook to get notified. + origSingletonClientImplCreateHook := singletonClientImplCreateHook + singletonCreationCh := testutils.NewChannel() + singletonClientImplCreateHook = func() { + singletonCreationCh.Replace(nil) + } + defer func() { singletonClientImplCreateHook = origSingletonClientImplCreateHook }() + + // Override the singleton close hook to get notified. + origSingletonClientImplCloseHook := singletonClientImplCloseHook + singletonCloseCh := testutils.NewChannel() + singletonClientImplCloseHook = func() { + singletonCloseCh.Replace(nil) + } + defer func() { singletonClientImplCloseHook = origSingletonClientImplCloseHook }() + + // The first call to New() should create a new singleton client. + client, err := New() + if err != nil { + t.Fatalf("failed to create xDS client: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := singletonCreationCh.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for singleton xDS client to be created: %v", err) + } + + // Calling New() again should not create new singleton client implementations. + const count = 9 + clients := make([]XDSClient, count) + for i := 0; i < count; i++ { + func() { + clients[i], err = New() + if err != nil { + t.Fatalf("%d-th call to New() failed with error: %v", i, err) + } + + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := singletonCreationCh.Receive(sCtx); err == nil { + t.Fatalf("%d-th call to New() created a new singleton client", i) + } + }() + } + + // Call Close() multiple times on each of the clients created in the above for + // loop. Close() calls are idempotent, and the underlying client + // implementation will not be closed until we release the first reference we + // acquired above, via the first call to New(). + for i := 0; i < count; i++ { + func() { + clients[i].Close() + clients[i].Close() + + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := singletonCloseCh.Receive(sCtx); err == nil { + t.Fatal("singleton client implementation closed before all references are released") + } + }() + } + + // Call the last Close(). The underlying implementation should be closed. + client.Close() + if _, err := singletonCloseCh.Receive(ctx); err != nil { + t.Fatalf("Timeout waiting for singleton client implementation to be closed: %v", err) + } + + // Calling New() again, after the previous Client was actually closed, should + // create a new one. + client, err = New() + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer client.Close() + if _, err := singletonCreationCh.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for singleton xDS client to be created: %v", err) + } +} From 5b3768235a1d274a605276e248d966faec623cd6 Mon Sep 17 00:00:00 2001 From: Thomas Hallgren Date: Tue, 25 Jan 2022 00:38:04 +0100 Subject: [PATCH 405/998] Let helloworld example listen to all hosts (#5089) This commit changes the address that the helloworld greeter_server is listening to from "localhost:" to just ":", thus enabling the server to be used in setups where the request stem from another host. Signed-off-by: Thomas Hallgren --- examples/helloworld/greeter_server/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/helloworld/greeter_server/main.go b/examples/helloworld/greeter_server/main.go index 728bb19fd118..7a62a9b9ff25 100644 --- a/examples/helloworld/greeter_server/main.go +++ b/examples/helloworld/greeter_server/main.go @@ -47,7 +47,7 @@ func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloRe func main() { flag.Parse() - lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } From 546e6aebb42618d81a017a9abf83f1f66628d856 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 25 Jan 2022 10:39:55 -0800 Subject: [PATCH 406/998] proto: incorporate recent service config proto definition changes (#5167) --- .../grpc_service_config/service_config.pb.go | 1578 ++++++++++------- 1 file changed, 985 insertions(+), 593 deletions(-) diff --git a/internal/proto/grpc_service_config/service_config.pb.go b/internal/proto/grpc_service_config/service_config.pb.go index 0f65b79aae05..5a41804f3d87 100644 --- a/internal/proto/grpc_service_config/service_config.pb.go +++ b/internal/proto/grpc_service_config/service_config.pb.go @@ -40,6 +40,7 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" @@ -102,7 +103,7 @@ func (x XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Num // Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type.Descriptor instead. func (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7, 0, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9, 0, 0} } // Load balancing policy. @@ -166,7 +167,7 @@ func (x ServiceConfig_LoadBalancingPolicy) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConfig_LoadBalancingPolicy.Descriptor instead. func (ServiceConfig_LoadBalancingPolicy) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16, 0} } // Configuration for a method. @@ -590,6 +591,54 @@ func (x *WeightedTargetLoadBalancingPolicyConfig) GetTargets() map[string]*Weigh return nil } +// Configuration for xds_cluster_manager_experimental LB policy. +type XdsClusterManagerLoadBalancingPolicyConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Children map[string]*XdsClusterManagerLoadBalancingPolicyConfig_Child `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *XdsClusterManagerLoadBalancingPolicyConfig) Reset() { + *x = XdsClusterManagerLoadBalancingPolicyConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XdsClusterManagerLoadBalancingPolicyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XdsClusterManagerLoadBalancingPolicyConfig) ProtoMessage() {} + +func (x *XdsClusterManagerLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XdsClusterManagerLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. +func (*XdsClusterManagerLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6} +} + +func (x *XdsClusterManagerLoadBalancingPolicyConfig) GetChildren() map[string]*XdsClusterManagerLoadBalancingPolicyConfig_Child { + if x != nil { + return x.Children + } + return nil +} + // Configuration for the cds LB policy. type CdsConfig struct { state protoimpl.MessageState @@ -602,7 +651,7 @@ type CdsConfig struct { func (x *CdsConfig) Reset() { *x = CdsConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] + mi := &file_grpc_service_config_service_config_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -615,7 +664,7 @@ func (x *CdsConfig) String() string { func (*CdsConfig) ProtoMessage() {} func (x *CdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] + mi := &file_grpc_service_config_service_config_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -628,7 +677,7 @@ func (x *CdsConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use CdsConfig.ProtoReflect.Descriptor instead. func (*CdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7} } func (x *CdsConfig) GetCluster() string { @@ -638,6 +687,72 @@ func (x *CdsConfig) GetCluster() string { return "" } +// Represents an xDS server. +type XdsServer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServerUri string `protobuf:"bytes,1,opt,name=server_uri,proto3" json:"server_uri,omitempty"` // Required. + // A list of channel creds to use. The first supported type will be used. + ChannelCreds []*XdsServer_ChannelCredentials `protobuf:"bytes,2,rep,name=channel_creds,proto3" json:"channel_creds,omitempty"` + // A repeated list of server features. + ServerFeatures []*structpb.Value `protobuf:"bytes,3,rep,name=server_features,proto3" json:"server_features,omitempty"` +} + +func (x *XdsServer) Reset() { + *x = XdsServer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XdsServer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XdsServer) ProtoMessage() {} + +func (x *XdsServer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XdsServer.ProtoReflect.Descriptor instead. +func (*XdsServer) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8} +} + +func (x *XdsServer) GetServerUri() string { + if x != nil { + return x.ServerUri + } + return "" +} + +func (x *XdsServer) GetChannelCreds() []*XdsServer_ChannelCredentials { + if x != nil { + return x.ChannelCreds + } + return nil +} + +func (x *XdsServer) GetServerFeatures() []*structpb.Value { + if x != nil { + return x.ServerFeatures + } + return nil +} + // Configuration for xds_cluster_resolver LB policy. type XdsClusterResolverLoadBalancingPolicyConfig struct { state protoimpl.MessageState @@ -661,7 +776,7 @@ type XdsClusterResolverLoadBalancingPolicyConfig struct { func (x *XdsClusterResolverLoadBalancingPolicyConfig) Reset() { *x = XdsClusterResolverLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] + mi := &file_grpc_service_config_service_config_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -674,7 +789,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig) String() string { func (*XdsClusterResolverLoadBalancingPolicyConfig) ProtoMessage() {} func (x *XdsClusterResolverLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] + mi := &file_grpc_service_config_service_config_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -687,7 +802,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig) ProtoReflect() protoreflec // Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*XdsClusterResolverLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9} } func (x *XdsClusterResolverLoadBalancingPolicyConfig) GetDiscoveryMechanisms() []*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism { @@ -720,7 +835,14 @@ type XdsClusterImplLoadBalancingPolicyConfig struct { // If unset, no load reporting is done. // If set to empty string, load reporting will be sent to the same // server as we are getting xds data from. + // DEPRECATED: Use new lrs_load_reporting_server field instead. + // + // Deprecated: Do not use. LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,3,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` + // LRS server to send load reports to. + // If not present, load reporting will be disabled. + // Supercedes lrs_load_reporting_server_name field. + LrsLoadReportingServer *XdsServer `protobuf:"bytes,7,opt,name=lrs_load_reporting_server,json=lrsLoadReportingServer,proto3" json:"lrs_load_reporting_server,omitempty"` // Maximum number of outstanding requests can be made to the upstream cluster. // Default is 1024. MaxConcurrentRequests *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_concurrent_requests,json=maxConcurrentRequests,proto3" json:"max_concurrent_requests,omitempty"` @@ -732,7 +854,7 @@ type XdsClusterImplLoadBalancingPolicyConfig struct { func (x *XdsClusterImplLoadBalancingPolicyConfig) Reset() { *x = XdsClusterImplLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] + mi := &file_grpc_service_config_service_config_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -745,7 +867,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig) String() string { func (*XdsClusterImplLoadBalancingPolicyConfig) ProtoMessage() {} func (x *XdsClusterImplLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] + mi := &file_grpc_service_config_service_config_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -758,7 +880,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Me // Deprecated: Use XdsClusterImplLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*XdsClusterImplLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10} } func (x *XdsClusterImplLoadBalancingPolicyConfig) GetCluster() string { @@ -775,6 +897,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig) GetEdsServiceName() string { return "" } +// Deprecated: Do not use. func (x *XdsClusterImplLoadBalancingPolicyConfig) GetLrsLoadReportingServerName() *wrapperspb.StringValue { if x != nil { return x.LrsLoadReportingServerName @@ -782,6 +905,13 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig) GetLrsLoadReportingServerName( return nil } +func (x *XdsClusterImplLoadBalancingPolicyConfig) GetLrsLoadReportingServer() *XdsServer { + if x != nil { + return x.LrsLoadReportingServer + } + return nil +} + func (x *XdsClusterImplLoadBalancingPolicyConfig) GetMaxConcurrentRequests() *wrapperspb.UInt32Value { if x != nil { return x.MaxConcurrentRequests @@ -836,7 +966,7 @@ type EdsLoadBalancingPolicyConfig struct { func (x *EdsLoadBalancingPolicyConfig) Reset() { *x = EdsLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] + mi := &file_grpc_service_config_service_config_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -849,7 +979,7 @@ func (x *EdsLoadBalancingPolicyConfig) String() string { func (*EdsLoadBalancingPolicyConfig) ProtoMessage() {} func (x *EdsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] + mi := &file_grpc_service_config_service_config_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -862,7 +992,7 @@ func (x *EdsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use EdsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*EdsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11} } func (x *EdsLoadBalancingPolicyConfig) GetCluster() string { @@ -913,7 +1043,7 @@ type RingHashLoadBalancingConfig struct { func (x *RingHashLoadBalancingConfig) Reset() { *x = RingHashLoadBalancingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] + mi := &file_grpc_service_config_service_config_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -926,7 +1056,7 @@ func (x *RingHashLoadBalancingConfig) String() string { func (*RingHashLoadBalancingConfig) ProtoMessage() {} func (x *RingHashLoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] + mi := &file_grpc_service_config_service_config_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -939,7 +1069,7 @@ func (x *RingHashLoadBalancingConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RingHashLoadBalancingConfig.ProtoReflect.Descriptor instead. func (*RingHashLoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{12} } func (x *RingHashLoadBalancingConfig) GetMinRingSize() uint64 { @@ -979,7 +1109,7 @@ type LrsLoadBalancingPolicyConfig struct { func (x *LrsLoadBalancingPolicyConfig) Reset() { *x = LrsLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] + mi := &file_grpc_service_config_service_config_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -992,7 +1122,7 @@ func (x *LrsLoadBalancingPolicyConfig) String() string { func (*LrsLoadBalancingPolicyConfig) ProtoMessage() {} func (x *LrsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] + mi := &file_grpc_service_config_service_config_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1005,7 +1135,7 @@ func (x *LrsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use LrsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*LrsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{13} } func (x *LrsLoadBalancingPolicyConfig) GetClusterName() string { @@ -1076,7 +1206,7 @@ type XdsConfig struct { func (x *XdsConfig) Reset() { *x = XdsConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] + mi := &file_grpc_service_config_service_config_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1089,7 +1219,7 @@ func (x *XdsConfig) String() string { func (*XdsConfig) ProtoMessage() {} func (x *XdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] + mi := &file_grpc_service_config_service_config_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1102,7 +1232,7 @@ func (x *XdsConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use XdsConfig.ProtoReflect.Descriptor instead. func (*XdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{12} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14} } // Deprecated: Do not use. @@ -1165,12 +1295,13 @@ type LoadBalancingConfig struct { // *LoadBalancingConfig_Grpclb // *LoadBalancingConfig_PriorityExperimental // *LoadBalancingConfig_WeightedTargetExperimental + // *LoadBalancingConfig_XdsClusterManagerExperimental // *LoadBalancingConfig_CdsExperimental // *LoadBalancingConfig_XdsClusterResolverExperimental // *LoadBalancingConfig_XdsClusterImplExperimental - // *LoadBalancingConfig_EdsExperimental // *LoadBalancingConfig_RingHashExperimental // *LoadBalancingConfig_LrsExperimental + // *LoadBalancingConfig_EdsExperimental // *LoadBalancingConfig_Xds // *LoadBalancingConfig_XdsExperimental Policy isLoadBalancingConfig_Policy `protobuf_oneof:"policy"` @@ -1179,7 +1310,7 @@ type LoadBalancingConfig struct { func (x *LoadBalancingConfig) Reset() { *x = LoadBalancingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] + mi := &file_grpc_service_config_service_config_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1192,7 +1323,7 @@ func (x *LoadBalancingConfig) String() string { func (*LoadBalancingConfig) ProtoMessage() {} func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] + mi := &file_grpc_service_config_service_config_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1205,7 +1336,7 @@ func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use LoadBalancingConfig.ProtoReflect.Descriptor instead. func (*LoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{13} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{15} } func (m *LoadBalancingConfig) GetPolicy() isLoadBalancingConfig_Policy { @@ -1250,6 +1381,13 @@ func (x *LoadBalancingConfig) GetWeightedTargetExperimental() *WeightedTargetLoa return nil } +func (x *LoadBalancingConfig) GetXdsClusterManagerExperimental() *XdsClusterManagerLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsClusterManagerExperimental); ok { + return x.XdsClusterManagerExperimental + } + return nil +} + func (x *LoadBalancingConfig) GetCdsExperimental() *CdsConfig { if x, ok := x.GetPolicy().(*LoadBalancingConfig_CdsExperimental); ok { return x.CdsExperimental @@ -1271,13 +1409,6 @@ func (x *LoadBalancingConfig) GetXdsClusterImplExperimental() *XdsClusterImplLoa return nil } -func (x *LoadBalancingConfig) GetEdsExperimental() *EdsLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_EdsExperimental); ok { - return x.EdsExperimental - } - return nil -} - func (x *LoadBalancingConfig) GetRingHashExperimental() *RingHashLoadBalancingConfig { if x, ok := x.GetPolicy().(*LoadBalancingConfig_RingHashExperimental); ok { return x.RingHashExperimental @@ -1293,6 +1424,14 @@ func (x *LoadBalancingConfig) GetLrsExperimental() *LrsLoadBalancingPolicyConfig return nil } +// Deprecated: Do not use. +func (x *LoadBalancingConfig) GetEdsExperimental() *EdsLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_EdsExperimental); ok { + return x.EdsExperimental + } + return nil +} + // Deprecated: Do not use. func (x *LoadBalancingConfig) GetXds() *XdsConfig { if x, ok := x.GetPolicy().(*LoadBalancingConfig_Xds); ok { @@ -1336,8 +1475,12 @@ type LoadBalancingConfig_WeightedTargetExperimental struct { WeightedTargetExperimental *WeightedTargetLoadBalancingPolicyConfig `protobuf:"bytes,10,opt,name=weighted_target_experimental,proto3,oneof"` } -type LoadBalancingConfig_CdsExperimental struct { +type LoadBalancingConfig_XdsClusterManagerExperimental struct { // xDS-based load balancing. + XdsClusterManagerExperimental *XdsClusterManagerLoadBalancingPolicyConfig `protobuf:"bytes,14,opt,name=xds_cluster_manager_experimental,proto3,oneof"` +} + +type LoadBalancingConfig_CdsExperimental struct { CdsExperimental *CdsConfig `protobuf:"bytes,6,opt,name=cds_experimental,proto3,oneof"` } @@ -1349,10 +1492,6 @@ type LoadBalancingConfig_XdsClusterImplExperimental struct { XdsClusterImplExperimental *XdsClusterImplLoadBalancingPolicyConfig `protobuf:"bytes,12,opt,name=xds_cluster_impl_experimental,proto3,oneof"` } -type LoadBalancingConfig_EdsExperimental struct { - EdsExperimental *EdsLoadBalancingPolicyConfig `protobuf:"bytes,7,opt,name=eds_experimental,proto3,oneof"` -} - type LoadBalancingConfig_RingHashExperimental struct { RingHashExperimental *RingHashLoadBalancingConfig `protobuf:"bytes,13,opt,name=ring_hash_experimental,proto3,oneof"` } @@ -1364,6 +1503,11 @@ type LoadBalancingConfig_LrsExperimental struct { LrsExperimental *LrsLoadBalancingPolicyConfig `protobuf:"bytes,8,opt,name=lrs_experimental,proto3,oneof"` } +type LoadBalancingConfig_EdsExperimental struct { + // Deprecated: Do not use. + EdsExperimental *EdsLoadBalancingPolicyConfig `protobuf:"bytes,7,opt,name=eds_experimental,proto3,oneof"` +} + type LoadBalancingConfig_Xds struct { // Deprecated: Do not use. Xds *XdsConfig `protobuf:"bytes,2,opt,name=xds,proto3,oneof"` @@ -1384,18 +1528,20 @@ func (*LoadBalancingConfig_PriorityExperimental) isLoadBalancingConfig_Policy() func (*LoadBalancingConfig_WeightedTargetExperimental) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_XdsClusterManagerExperimental) isLoadBalancingConfig_Policy() {} + func (*LoadBalancingConfig_CdsExperimental) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_XdsClusterResolverExperimental) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_XdsClusterImplExperimental) isLoadBalancingConfig_Policy() {} -func (*LoadBalancingConfig_EdsExperimental) isLoadBalancingConfig_Policy() {} - func (*LoadBalancingConfig_RingHashExperimental) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_LrsExperimental) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_EdsExperimental) isLoadBalancingConfig_Policy() {} + func (*LoadBalancingConfig_Xds) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_XdsExperimental) isLoadBalancingConfig_Policy() {} @@ -1422,7 +1568,7 @@ type ServiceConfig struct { func (x *ServiceConfig) Reset() { *x = ServiceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1435,7 +1581,7 @@ func (x *ServiceConfig) String() string { func (*ServiceConfig) ProtoMessage() {} func (x *ServiceConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1448,7 +1594,7 @@ func (x *ServiceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16} } // Deprecated: Do not use. @@ -1526,7 +1672,7 @@ type MethodConfig_Name struct { func (x *MethodConfig_Name) Reset() { *x = MethodConfig_Name{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1539,7 +1685,7 @@ func (x *MethodConfig_Name) String() string { func (*MethodConfig_Name) ProtoMessage() {} func (x *MethodConfig_Name) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1598,7 +1744,7 @@ type MethodConfig_RetryPolicy struct { func (x *MethodConfig_RetryPolicy) Reset() { *x = MethodConfig_RetryPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1611,7 +1757,7 @@ func (x *MethodConfig_RetryPolicy) String() string { func (*MethodConfig_RetryPolicy) ProtoMessage() {} func (x *MethodConfig_RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1693,7 +1839,7 @@ type MethodConfig_HedgingPolicy struct { func (x *MethodConfig_HedgingPolicy) Reset() { *x = MethodConfig_HedgingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1706,7 +1852,7 @@ func (x *MethodConfig_HedgingPolicy) String() string { func (*MethodConfig_HedgingPolicy) ProtoMessage() {} func (x *MethodConfig_HedgingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1760,7 +1906,7 @@ type PriorityLoadBalancingPolicyConfig_Child struct { func (x *PriorityLoadBalancingPolicyConfig_Child) Reset() { *x = PriorityLoadBalancingPolicyConfig_Child{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1773,7 +1919,7 @@ func (x *PriorityLoadBalancingPolicyConfig_Child) String() string { func (*PriorityLoadBalancingPolicyConfig_Child) ProtoMessage() {} func (x *PriorityLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1815,7 +1961,7 @@ type WeightedTargetLoadBalancingPolicyConfig_Target struct { func (x *WeightedTargetLoadBalancingPolicyConfig_Target) Reset() { *x = WeightedTargetLoadBalancingPolicyConfig_Target{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1828,7 +1974,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig_Target) String() string { func (*WeightedTargetLoadBalancingPolicyConfig_Target) ProtoMessage() {} func (x *WeightedTargetLoadBalancingPolicyConfig_Target) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1858,6 +2004,108 @@ func (x *WeightedTargetLoadBalancingPolicyConfig_Target) GetChildPolicy() []*Loa return nil } +type XdsClusterManagerLoadBalancingPolicyConfig_Child struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` +} + +func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) Reset() { + *x = XdsClusterManagerLoadBalancingPolicyConfig_Child{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoMessage() {} + +func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XdsClusterManagerLoadBalancingPolicyConfig_Child.ProtoReflect.Descriptor instead. +func (*XdsClusterManagerLoadBalancingPolicyConfig_Child) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) GetChildPolicy() []*LoadBalancingConfig { + if x != nil { + return x.ChildPolicy + } + return nil +} + +type XdsServer_ChannelCredentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Required. + Config *structpb.Struct `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` // Optional JSON config. +} + +func (x *XdsServer_ChannelCredentials) Reset() { + *x = XdsServer_ChannelCredentials{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XdsServer_ChannelCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XdsServer_ChannelCredentials) ProtoMessage() {} + +func (x *XdsServer_ChannelCredentials) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XdsServer_ChannelCredentials.ProtoReflect.Descriptor instead. +func (*XdsServer_ChannelCredentials) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *XdsServer_ChannelCredentials) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *XdsServer_ChannelCredentials) GetConfig() *structpb.Struct { + if x != nil { + return x.Config + } + return nil +} + // Describes a discovery mechanism instance. // For EDS or LOGICAL_DNS clusters, there will be exactly one // DiscoveryMechanism, which will describe the cluster of the parent @@ -1875,7 +2123,14 @@ type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism struct { // If not present, load reporting will be disabled. // If set to the empty string, load reporting will be sent to the same // server that we obtained CDS data from. + // DEPRECATED: Use new lrs_load_reporting_server field instead. + // + // Deprecated: Do not use. LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,2,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` + // LRS server to send load reports to. + // If not present, load reporting will be disabled. + // Supercedes lrs_load_reporting_server_name field. + LrsLoadReportingServer *XdsServer `protobuf:"bytes,7,opt,name=lrs_load_reporting_server,json=lrsLoadReportingServer,proto3" json:"lrs_load_reporting_server,omitempty"` // Maximum number of outstanding requests can be made to the upstream // cluster. Default is 1024. MaxConcurrentRequests *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_concurrent_requests,json=maxConcurrentRequests,proto3" json:"max_concurrent_requests,omitempty"` @@ -1892,7 +2147,7 @@ type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism struct { func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Reset() { *x = XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + mi := &file_grpc_service_config_service_config_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1905,7 +2160,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) String( func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoMessage() {} func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + mi := &file_grpc_service_config_service_config_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1918,7 +2173,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoRe // Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism.ProtoReflect.Descriptor instead. func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9, 0} } func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetCluster() string { @@ -1928,6 +2183,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetClus return "" } +// Deprecated: Do not use. func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetLrsLoadReportingServerName() *wrapperspb.StringValue { if x != nil { return x.LrsLoadReportingServerName @@ -1935,6 +2191,13 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetLrsL return nil } +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetLrsLoadReportingServer() *XdsServer { + if x != nil { + return x.LrsLoadReportingServer + } + return nil +} + func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetMaxConcurrentRequests() *wrapperspb.UInt32Value { if x != nil { return x.MaxConcurrentRequests @@ -1976,7 +2239,7 @@ type XdsClusterImplLoadBalancingPolicyConfig_DropCategory struct { func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Reset() { *x = XdsClusterImplLoadBalancingPolicyConfig_DropCategory{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[23] + mi := &file_grpc_service_config_service_config_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1989,7 +2252,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) String() string { func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoMessage() {} func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[23] + mi := &file_grpc_service_config_service_config_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2002,7 +2265,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoReflect() pr // Deprecated: Use XdsClusterImplLoadBalancingPolicyConfig_DropCategory.ProtoReflect.Descriptor instead. func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10, 0} } func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) GetCategory() string { @@ -2033,7 +2296,7 @@ type LrsLoadBalancingPolicyConfig_Locality struct { func (x *LrsLoadBalancingPolicyConfig_Locality) Reset() { *x = LrsLoadBalancingPolicyConfig_Locality{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[24] + mi := &file_grpc_service_config_service_config_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2046,7 +2309,7 @@ func (x *LrsLoadBalancingPolicyConfig_Locality) String() string { func (*LrsLoadBalancingPolicyConfig_Locality) ProtoMessage() {} func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[24] + mi := &file_grpc_service_config_service_config_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2059,7 +2322,7 @@ func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Mess // Deprecated: Use LrsLoadBalancingPolicyConfig_Locality.ProtoReflect.Descriptor instead. func (*LrsLoadBalancingPolicyConfig_Locality) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{13, 0} } func (x *LrsLoadBalancingPolicyConfig_Locality) GetRegion() string { @@ -2117,7 +2380,7 @@ type ServiceConfig_RetryThrottlingPolicy struct { func (x *ServiceConfig_RetryThrottlingPolicy) Reset() { *x = ServiceConfig_RetryThrottlingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[25] + mi := &file_grpc_service_config_service_config_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2130,7 +2393,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) String() string { func (*ServiceConfig_RetryThrottlingPolicy) ProtoMessage() {} func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[25] + mi := &file_grpc_service_config_service_config_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2143,7 +2406,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Messag // Deprecated: Use ServiceConfig_RetryThrottlingPolicy.ProtoReflect.Descriptor instead. func (*ServiceConfig_RetryThrottlingPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16, 0} } func (x *ServiceConfig_RetryThrottlingPolicy) GetMaxTokens() uint32 { @@ -2172,7 +2435,7 @@ type ServiceConfig_HealthCheckConfig struct { func (x *ServiceConfig_HealthCheckConfig) Reset() { *x = ServiceConfig_HealthCheckConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[26] + mi := &file_grpc_service_config_service_config_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2185,7 +2448,7 @@ func (x *ServiceConfig_HealthCheckConfig) String() string { func (*ServiceConfig_HealthCheckConfig) ProtoMessage() {} func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[26] + mi := &file_grpc_service_config_service_config_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2198,7 +2461,7 @@ func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig_HealthCheckConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig_HealthCheckConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14, 1} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16, 1} } func (x *ServiceConfig_HealthCheckConfig) GetServiceName() *wrapperspb.StringValue { @@ -2217,436 +2480,500 @@ var file_grpc_service_config_service_config_proto_rawDesc = []byte{ 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xde, 0x08, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, - 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, - 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, - 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x19, 0x6d, 0x61, - 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x6d, 0x61, 0x78, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x52, - 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x58, 0x0a, 0x0e, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xde, 0x08, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x72, 0x65, + 0x61, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x52, 0x65, + 0x61, 0x64, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x59, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0c, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x58, 0x0a, 0x0e, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x64, 0x67, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x68, 0x65, 0x64, + 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x38, 0x0a, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, + 0x6d, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x41, + 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x3a, 0x0a, 0x0b, 0x6d, + 0x61, 0x78, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6d, 0x61, 0x78, + 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x2d, 0x0a, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x6f, + 0x66, 0x66, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x61, + 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x1a, 0xb9, + 0x01, 0x0a, 0x0d, 0x48, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, + 0x70, 0x74, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x6c, 0x61, 0x79, 0x12, 0x45, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x74, 0x61, 0x6c, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x6e, 0x6f, 0x6e, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x72, 0x5f, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x12, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x6e, + 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x7e, 0x0a, 0x0c, + 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x0c, + 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, + 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x03, 0x0a, + 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x60, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, + 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x72, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, 0x40, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x72, 0x65, 0x73, + 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, + 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, + 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x02, + 0x0a, 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x07, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, - 0x64, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x68, - 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x38, 0x0a, 0x04, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, - 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, - 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69, - 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x3a, 0x0a, - 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6d, - 0x61, 0x78, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x2d, 0x0a, 0x12, 0x62, 0x61, 0x63, - 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x75, - 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, - 0x1a, 0xb9, 0x01, 0x0a, 0x0d, 0x48, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x41, 0x74, 0x74, - 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, - 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x45, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x74, - 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x6e, 0x6f, 0x6e, 0x46, 0x61, 0x74, 0x61, - 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x19, 0x0a, 0x17, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x72, 0x5f, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x50, 0x69, 0x63, 0x6b, 0x46, - 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x12, 0x0a, 0x10, 0x52, 0x6f, - 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x7e, - 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, - 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, - 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xae, - 0x03, 0x0a, 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, - 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, - 0x12, 0x40, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x72, - 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, - 0x52, 0x65, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x73, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, - 0x68, 0x69, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xfe, 0x02, 0x0a, 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x07, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, - 0x1a, 0x6d, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0x6d, + 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x7f, 0x0a, + 0x0c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x59, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf2, + 0x02, 0x0a, 0x2a, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, + 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, + 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, 0x54, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, + 0x64, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x82, + 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x5b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x09, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0xa1, 0x02, 0x0a, 0x09, 0x58, + 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x12, 0x57, 0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, + 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, + 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, + 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x1a, 0x59, 0x0a, 0x12, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb6, + 0x06, 0x0a, 0x2b, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x86, + 0x01, 0x0a, 0x14, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x65, 0x63, + 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x53, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, + 0x73, 0x6d, 0x52, 0x13, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, + 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x4c, 0x0a, 0x0d, 0x78, 0x64, 0x73, 0x5f, 0x6c, + 0x62, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x78, 0x64, 0x73, 0x4c, 0x62, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xaf, 0x04, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x18, 0x0a, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, + 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, 0x19, + 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x16, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, + 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x6c, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x58, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, + 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, + 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6e, 0x73, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6e, 0x73, + 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x45, 0x44, 0x53, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, + 0x4c, 0x5f, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x22, 0xa3, 0x05, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, + 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, + 0x19, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x52, 0x16, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, + 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, + 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x72, + 0x0a, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, + 0x72, 0x79, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, - 0x7f, 0x0a, 0x0c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x59, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x25, 0x0a, 0x09, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0xd7, 0x05, 0x0a, 0x2b, 0x58, 0x64, 0x73, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x86, 0x01, 0x0a, 0x14, 0x64, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x53, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, - 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x13, 0x64, 0x69, 0x73, - 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, - 0x12, 0x4c, 0x0a, 0x0d, 0x78, 0x64, 0x73, 0x5f, 0x6c, 0x62, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x5c, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, + 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x03, + 0x0a, 0x1c, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0b, 0x78, 0x64, 0x73, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xd0, - 0x03, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, - 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, - 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x6c, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x58, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, - 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x6e, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6e, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x01, - 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, 0x53, 0x10, - 0x02, 0x22, 0xc4, 0x04, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, - 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x72, 0x0a, 0x0f, 0x64, 0x72, 0x6f, - 0x70, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x67, 0x52, 0x15, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x65, 0x0a, 0x1b, 0x52, 0x69, 0x6e, 0x67, + 0x48, 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x72, + 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x6d, 0x69, 0x6e, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6d, + 0x61, 0x78, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, + 0xa6, 0x03, 0x0a, 0x1c, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x64, - 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, - 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x5c, 0x0a, 0x0c, 0x44, 0x72, - 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, - 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, - 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, - 0x72, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x03, 0x0a, 0x1c, 0x45, 0x64, 0x73, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, - 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, + 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x60, 0x0a, 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x69, 0x63, 0x6b, - 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x69, 0x74, 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x60, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x70, 0x69, - 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x65, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0x65, 0x0a, 0x1b, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x52, 0x69, - 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, - 0x61, 0x78, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa6, 0x03, 0x0a, 0x1c, 0x4c, - 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, - 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x08, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x56, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x50, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, + 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, + 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, + 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, + 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x9c, 0x0b, 0x0a, 0x13, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, + 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, + 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, + 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, + 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x1a, 0x50, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, - 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, - 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, - 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x7a, - 0x6f, 0x6e, 0x65, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, - 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, - 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, - 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, - 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x88, 0x0a, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, - 0x0a, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, - 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x6b, - 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, - 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, - 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x12, 0x6e, - 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, - 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x82, - 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, - 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, - 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, - 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, - 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, - 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, - 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, - 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x5f, 0x0a, 0x10, 0x65, - 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, + 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x6a, 0x0a, 0x16, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, - 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x6c, 0x72, 0x73, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x72, 0x73, - 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, - 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, - 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, - 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, - 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, - 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, - 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, - 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, - 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, + 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x8d, 0x01, 0x0a, 0x20, 0x78, 0x64, 0x73, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, 0x73, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, + 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, + 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, + 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, + 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, + 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, + 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, + 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, + 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, + 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, - 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, - 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, - 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, - 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, + 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, + 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2662,7 +2989,7 @@ func file_grpc_service_config_service_config_proto_rawDescGZIP() []byte { } var file_grpc_service_config_service_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type)(0), // 0: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type (ServiceConfig_LoadBalancingPolicy)(0), // 1: grpc.service_config.ServiceConfig.LoadBalancingPolicy @@ -2672,94 +2999,110 @@ var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ (*GrpcLbConfig)(nil), // 5: grpc.service_config.GrpcLbConfig (*PriorityLoadBalancingPolicyConfig)(nil), // 6: grpc.service_config.PriorityLoadBalancingPolicyConfig (*WeightedTargetLoadBalancingPolicyConfig)(nil), // 7: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - (*CdsConfig)(nil), // 8: grpc.service_config.CdsConfig - (*XdsClusterResolverLoadBalancingPolicyConfig)(nil), // 9: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig - (*XdsClusterImplLoadBalancingPolicyConfig)(nil), // 10: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig - (*EdsLoadBalancingPolicyConfig)(nil), // 11: grpc.service_config.EdsLoadBalancingPolicyConfig - (*RingHashLoadBalancingConfig)(nil), // 12: grpc.service_config.RingHashLoadBalancingConfig - (*LrsLoadBalancingPolicyConfig)(nil), // 13: grpc.service_config.LrsLoadBalancingPolicyConfig - (*XdsConfig)(nil), // 14: grpc.service_config.XdsConfig - (*LoadBalancingConfig)(nil), // 15: grpc.service_config.LoadBalancingConfig - (*ServiceConfig)(nil), // 16: grpc.service_config.ServiceConfig - (*MethodConfig_Name)(nil), // 17: grpc.service_config.MethodConfig.Name - (*MethodConfig_RetryPolicy)(nil), // 18: grpc.service_config.MethodConfig.RetryPolicy - (*MethodConfig_HedgingPolicy)(nil), // 19: grpc.service_config.MethodConfig.HedgingPolicy - (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 20: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - nil, // 21: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 22: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - nil, // 23: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 24: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 26: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - (*ServiceConfig_RetryThrottlingPolicy)(nil), // 27: grpc.service_config.ServiceConfig.RetryThrottlingPolicy - (*ServiceConfig_HealthCheckConfig)(nil), // 28: grpc.service_config.ServiceConfig.HealthCheckConfig - (*wrapperspb.BoolValue)(nil), // 29: google.protobuf.BoolValue - (*durationpb.Duration)(nil), // 30: google.protobuf.Duration - (*wrapperspb.UInt32Value)(nil), // 31: google.protobuf.UInt32Value - (*wrapperspb.StringValue)(nil), // 32: google.protobuf.StringValue - (code.Code)(0), // 33: google.rpc.Code + (*XdsClusterManagerLoadBalancingPolicyConfig)(nil), // 8: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig + (*CdsConfig)(nil), // 9: grpc.service_config.CdsConfig + (*XdsServer)(nil), // 10: grpc.service_config.XdsServer + (*XdsClusterResolverLoadBalancingPolicyConfig)(nil), // 11: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig + (*XdsClusterImplLoadBalancingPolicyConfig)(nil), // 12: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig + (*EdsLoadBalancingPolicyConfig)(nil), // 13: grpc.service_config.EdsLoadBalancingPolicyConfig + (*RingHashLoadBalancingConfig)(nil), // 14: grpc.service_config.RingHashLoadBalancingConfig + (*LrsLoadBalancingPolicyConfig)(nil), // 15: grpc.service_config.LrsLoadBalancingPolicyConfig + (*XdsConfig)(nil), // 16: grpc.service_config.XdsConfig + (*LoadBalancingConfig)(nil), // 17: grpc.service_config.LoadBalancingConfig + (*ServiceConfig)(nil), // 18: grpc.service_config.ServiceConfig + (*MethodConfig_Name)(nil), // 19: grpc.service_config.MethodConfig.Name + (*MethodConfig_RetryPolicy)(nil), // 20: grpc.service_config.MethodConfig.RetryPolicy + (*MethodConfig_HedgingPolicy)(nil), // 21: grpc.service_config.MethodConfig.HedgingPolicy + (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 22: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + nil, // 23: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 24: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + nil, // 25: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + (*XdsClusterManagerLoadBalancingPolicyConfig_Child)(nil), // 26: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child + nil, // 27: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry + (*XdsServer_ChannelCredentials)(nil), // 28: grpc.service_config.XdsServer.ChannelCredentials + (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 29: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 30: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + (*ServiceConfig_RetryThrottlingPolicy)(nil), // 32: grpc.service_config.ServiceConfig.RetryThrottlingPolicy + (*ServiceConfig_HealthCheckConfig)(nil), // 33: grpc.service_config.ServiceConfig.HealthCheckConfig + (*wrapperspb.BoolValue)(nil), // 34: google.protobuf.BoolValue + (*durationpb.Duration)(nil), // 35: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 36: google.protobuf.UInt32Value + (*structpb.Value)(nil), // 37: google.protobuf.Value + (*wrapperspb.StringValue)(nil), // 38: google.protobuf.StringValue + (code.Code)(0), // 39: google.rpc.Code + (*structpb.Struct)(nil), // 40: google.protobuf.Struct } var file_grpc_service_config_service_config_proto_depIdxs = []int32{ - 17, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name - 29, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue - 30, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration - 31, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value - 31, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value - 18, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy - 19, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy - 15, // 7: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 21, // 8: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - 23, // 9: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - 24, // 10: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - 15, // 11: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig - 32, // 12: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 31, // 13: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 25, // 14: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - 15, // 15: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 32, // 16: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 15, // 17: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 15, // 18: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 26, // 19: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - 15, // 20: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 15, // 21: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 15, // 22: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig - 32, // 23: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 3, // 24: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig - 4, // 25: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig - 5, // 26: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig - 6, // 27: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig - 7, // 28: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - 8, // 29: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig - 9, // 30: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig - 10, // 31: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig - 11, // 32: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig - 12, // 33: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig - 13, // 34: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig - 14, // 35: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig - 14, // 36: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig - 1, // 37: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy - 15, // 38: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig - 2, // 39: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig - 27, // 40: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy - 28, // 41: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig - 30, // 42: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration - 30, // 43: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration - 33, // 44: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code - 30, // 45: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration - 33, // 46: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code - 15, // 47: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig - 20, // 48: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - 15, // 49: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 22, // 50: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - 32, // 51: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 31, // 52: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 0, // 53: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type - 32, // 54: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue - 55, // [55:55] is the sub-list for method output_type - 55, // [55:55] is the sub-list for method input_type - 55, // [55:55] is the sub-list for extension type_name - 55, // [55:55] is the sub-list for extension extendee - 0, // [0:55] is the sub-list for field type_name + 19, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name + 34, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue + 35, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration + 36, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value + 36, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value + 20, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy + 21, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy + 17, // 7: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 23, // 8: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + 25, // 9: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + 27, // 10: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry + 28, // 11: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials + 37, // 12: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value + 29, // 13: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + 17, // 14: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig + 38, // 15: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 10, // 16: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer + 36, // 17: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 30, // 18: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + 17, // 19: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 38, // 20: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 17, // 21: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 17, // 22: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 31, // 23: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + 17, // 24: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 17, // 25: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 17, // 26: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig + 38, // 27: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 3, // 28: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig + 4, // 29: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig + 5, // 30: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig + 6, // 31: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig + 7, // 32: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig + 8, // 33: grpc.service_config.LoadBalancingConfig.xds_cluster_manager_experimental:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig + 9, // 34: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig + 11, // 35: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig + 12, // 36: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig + 14, // 37: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig + 15, // 38: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig + 13, // 39: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig + 16, // 40: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig + 16, // 41: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig + 1, // 42: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy + 17, // 43: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig + 2, // 44: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig + 32, // 45: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy + 33, // 46: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig + 35, // 47: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration + 35, // 48: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration + 39, // 49: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code + 35, // 50: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration + 39, // 51: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code + 17, // 52: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig + 22, // 53: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + 17, // 54: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 24, // 55: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + 17, // 56: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 26, // 57: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child + 40, // 58: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct + 38, // 59: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 10, // 60: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer + 36, // 61: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 0, // 62: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type + 38, // 63: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue + 64, // [64:64] is the sub-list for method output_type + 64, // [64:64] is the sub-list for method input_type + 64, // [64:64] is the sub-list for extension type_name + 64, // [64:64] is the sub-list for extension extendee + 0, // [0:64] is the sub-list for field type_name } func init() { file_grpc_service_config_service_config_proto_init() } @@ -2841,7 +3184,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CdsConfig); i { + switch v := v.(*XdsClusterManagerLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2853,7 +3196,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig); i { + switch v := v.(*CdsConfig); i { case 0: return &v.state case 1: @@ -2865,7 +3208,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig); i { + switch v := v.(*XdsServer); i { case 0: return &v.state case 1: @@ -2877,7 +3220,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EdsLoadBalancingPolicyConfig); i { + switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2889,7 +3232,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RingHashLoadBalancingConfig); i { + switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2901,7 +3244,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LrsLoadBalancingPolicyConfig); i { + switch v := v.(*EdsLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2913,7 +3256,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsConfig); i { + switch v := v.(*RingHashLoadBalancingConfig); i { case 0: return &v.state case 1: @@ -2925,7 +3268,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancingConfig); i { + switch v := v.(*LrsLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -2937,7 +3280,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig); i { + switch v := v.(*XdsConfig); i { case 0: return &v.state case 1: @@ -2949,7 +3292,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_Name); i { + switch v := v.(*LoadBalancingConfig); i { case 0: return &v.state case 1: @@ -2961,7 +3304,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_RetryPolicy); i { + switch v := v.(*ServiceConfig); i { case 0: return &v.state case 1: @@ -2973,7 +3316,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_HedgingPolicy); i { + switch v := v.(*MethodConfig_Name); i { case 0: return &v.state case 1: @@ -2985,7 +3328,19 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { + switch v := v.(*MethodConfig_RetryPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodConfig_HedgingPolicy); i { case 0: return &v.state case 1: @@ -2997,7 +3352,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WeightedTargetLoadBalancingPolicyConfig_Target); i { + switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { case 0: return &v.state case 1: @@ -3009,6 +3364,42 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WeightedTargetLoadBalancingPolicyConfig_Target); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XdsClusterManagerLoadBalancingPolicyConfig_Child); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XdsServer_ChannelCredentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism); i { case 0: return &v.state @@ -3020,7 +3411,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig_DropCategory); i { case 0: return &v.state @@ -3032,7 +3423,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LrsLoadBalancingPolicyConfig_Locality); i { case 0: return &v.state @@ -3044,7 +3435,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_RetryThrottlingPolicy); i { case 0: return &v.state @@ -3056,7 +3447,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_HealthCheckConfig); i { case 0: return &v.state @@ -3073,18 +3464,19 @@ func file_grpc_service_config_service_config_proto_init() { (*MethodConfig_RetryPolicy_)(nil), (*MethodConfig_HedgingPolicy_)(nil), } - file_grpc_service_config_service_config_proto_msgTypes[13].OneofWrappers = []interface{}{ + file_grpc_service_config_service_config_proto_msgTypes[15].OneofWrappers = []interface{}{ (*LoadBalancingConfig_PickFirst)(nil), (*LoadBalancingConfig_RoundRobin)(nil), (*LoadBalancingConfig_Grpclb)(nil), (*LoadBalancingConfig_PriorityExperimental)(nil), (*LoadBalancingConfig_WeightedTargetExperimental)(nil), + (*LoadBalancingConfig_XdsClusterManagerExperimental)(nil), (*LoadBalancingConfig_CdsExperimental)(nil), (*LoadBalancingConfig_XdsClusterResolverExperimental)(nil), (*LoadBalancingConfig_XdsClusterImplExperimental)(nil), - (*LoadBalancingConfig_EdsExperimental)(nil), (*LoadBalancingConfig_RingHashExperimental)(nil), (*LoadBalancingConfig_LrsExperimental)(nil), + (*LoadBalancingConfig_EdsExperimental)(nil), (*LoadBalancingConfig_Xds)(nil), (*LoadBalancingConfig_XdsExperimental)(nil), } @@ -3094,7 +3486,7 @@ func file_grpc_service_config_service_config_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_service_config_service_config_proto_rawDesc, NumEnums: 2, - NumMessages: 27, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, From 449f1b222a55ab3773ed6ec0117f82eec817821c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 25 Jan 2022 10:53:10 -0800 Subject: [PATCH 407/998] grpclb: rename LB policy config field to `serviceName` (#5166) --- balancer/grpclb/grpclb.go | 4 ++-- balancer/grpclb/grpclb_config.go | 2 +- balancer/grpclb/grpclb_config_test.go | 8 ++++---- balancer/grpclb/grpclb_test.go | 2 +- balancer/rls/config_test.go | 26 +++++++++++++------------- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index fe423af182a4..6c3402e36c60 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -413,8 +413,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { // this target is sent in the first message on the stream. if gc != nil { target := lb.dialTarget - if gc.TargetName != "" { - target = gc.TargetName + if gc.ServiceName != "" { + target = gc.ServiceName } if target != lb.target { lb.target = target diff --git a/balancer/grpclb/grpclb_config.go b/balancer/grpclb/grpclb_config.go index b4e23dee0172..8942c31310af 100644 --- a/balancer/grpclb/grpclb_config.go +++ b/balancer/grpclb/grpclb_config.go @@ -34,7 +34,7 @@ const ( type grpclbServiceConfig struct { serviceconfig.LoadBalancingConfig ChildPolicy *[]map[string]json.RawMessage - TargetName string + ServiceName string } func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { diff --git a/balancer/grpclb/grpclb_config_test.go b/balancer/grpclb/grpclb_config_test.go index 0db2299157ec..040908728793 100644 --- a/balancer/grpclb/grpclb_config_test.go +++ b/balancer/grpclb/grpclb_config_test.go @@ -46,13 +46,13 @@ func (s) TestParse(t *testing.T) { "childPolicy": [ {"pick_first":{}} ], - "targetName": "foo-service" + "serviceName": "foo-service" }`, want: &grpclbServiceConfig{ ChildPolicy: &[]map[string]json.RawMessage{ {"pick_first": json.RawMessage("{}")}, }, - TargetName: "foo-service", + ServiceName: "foo-service", }, }, { @@ -63,14 +63,14 @@ func (s) TestParse(t *testing.T) { {"round_robin":{}}, {"pick_first":{}} ], - "targetName": "foo-service" + "serviceName": "foo-service" }`, want: &grpclbServiceConfig{ ChildPolicy: &[]map[string]json.RawMessage{ {"round_robin": json.RawMessage("{}")}, {"pick_first": json.RawMessage("{}")}, }, - TargetName: "foo-service", + ServiceName: "foo-service", }, }, } diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index 5f90a3827ee3..ed5297684dcd 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -1404,7 +1404,7 @@ func (s) TestGRPCLBWithTargetNameFieldInConfig(t *testing.T) { // Push the resolver update with target_field changed. // Push a resolver update with grpclb configuration containing the // target_name field. Our fake remote balancer has been updated above to expect the newServerName in the initial request. - lbCfg := fmt.Sprintf(`{"loadBalancingConfig": [{"grpclb": {"targetName": "%s"}}]}`, newServerName) + lbCfg := fmt.Sprintf(`{"loadBalancingConfig": [{"grpclb": {"serviceName": "%s"}}]}`, newServerName) rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(lbCfg)}, &grpclbstate.State{BalancerAddresses: []resolver.Address{{ Addr: tss.lbAddr, diff --git a/balancer/rls/config_test.go b/balancer/rls/config_test.go index 733666c28e6e..301a8219767b 100644 --- a/balancer/rls/config_test.go +++ b/balancer/rls/config_test.go @@ -82,7 +82,7 @@ func (s) TestParseConfig(t *testing.T) { {"unknown-policy": {"unknown-field": "unknown-value"}}, {"grpclb": {"childPolicy": [{"pickfirst": {}}]}} ], - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantCfg: &lbConfig{ lookupService: ":///target", @@ -92,10 +92,10 @@ func (s) TestParseConfig(t *testing.T) { cacheSizeBytes: maxCacheSize, defaultTarget: "passthrough:///default", childPolicyName: "grpclb", - childPolicyTargetField: "service_name", + childPolicyTargetField: "serviceName", childPolicyConfig: map[string]json.RawMessage{ - "childPolicy": json.RawMessage(`[{"pickfirst": {}}]`), - "service_name": json.RawMessage(childPolicyTargetFieldVal), + "childPolicy": json.RawMessage(`[{"pickfirst": {}}]`), + "serviceName": json.RawMessage(childPolicyTargetFieldVal), }, }, }, @@ -115,7 +115,7 @@ func (s) TestParseConfig(t *testing.T) { "defaultTarget": "passthrough:///default" }, "childPolicy": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}], - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantCfg: &lbConfig{ lookupService: "target", @@ -125,10 +125,10 @@ func (s) TestParseConfig(t *testing.T) { cacheSizeBytes: 1000, defaultTarget: "passthrough:///default", childPolicyName: "grpclb", - childPolicyTargetField: "service_name", + childPolicyTargetField: "serviceName", childPolicyConfig: map[string]json.RawMessage{ - "childPolicy": json.RawMessage(`[{"pickfirst": {}}]`), - "service_name": json.RawMessage(childPolicyTargetFieldVal), + "childPolicy": json.RawMessage(`[{"pickfirst": {}}]`), + "serviceName": json.RawMessage(childPolicyTargetFieldVal), }, }, }, @@ -273,7 +273,7 @@ func (s) TestParseConfigErrors(t *testing.T) { "staleAge" : "25s", "defaultTarget": "passthrough:///default" }, - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantErr: "rls: cache_size_bytes must be set to a non-zero value", }, @@ -292,7 +292,7 @@ func (s) TestParseConfigErrors(t *testing.T) { "cacheSizeBytes": 1000, "defaultTarget": "passthrough:///default" }, - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantErr: "rls: invalid childPolicy config: no supported policies found", }, @@ -315,7 +315,7 @@ func (s) TestParseConfigErrors(t *testing.T) { {"cds_experimental": {"Cluster": "my-fav-cluster"}}, {"unknown-policy": {"unknown-field": "unknown-value"}} ], - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantErr: "rls: invalid childPolicy config: no supported policies found", }, @@ -340,7 +340,7 @@ func (s) TestParseConfigErrors(t *testing.T) { "unknown-policy": {"unknown-field": "unknown-value"} } ], - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantErr: "does not contain exactly 1 policy/config pair", }, @@ -387,7 +387,7 @@ func (s) TestParseConfigErrors(t *testing.T) { {"unknown-policy": {"unknown-field": "unknown-value"}}, {"grpclb": {"childPolicy": "not-an-array"}} ], - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantErr: "rls: childPolicy config validation failed", }, From 231ca3b24ec9084567988ae9f9e8644a9c72c768 Mon Sep 17 00:00:00 2001 From: Roland Bracewell Shoemaker Date: Tue, 25 Jan 2022 13:32:27 -0800 Subject: [PATCH 408/998] security/advancedtls: fix CRL issuer comparison (#5130) Fix CRL issuer comparison issue --- security/advancedtls/crl.go | 50 +++++++++++++++---- security/advancedtls/crl_test.go | 33 ++++++++---- security/advancedtls/examples/go.sum | 1 + security/advancedtls/go.mod | 1 + security/advancedtls/go.sum | 1 + security/advancedtls/testdata/crl/2f11f022.r0 | 7 +++ 6 files changed, 73 insertions(+), 20 deletions(-) create mode 100644 security/advancedtls/testdata/crl/2f11f022.r0 diff --git a/security/advancedtls/crl.go b/security/advancedtls/crl.go index 3931c1ec6298..7988cb27176d 100644 --- a/security/advancedtls/crl.go +++ b/security/advancedtls/crl.go @@ -27,6 +27,7 @@ import ( "encoding/asn1" "encoding/binary" "encoding/hex" + "encoding/pem" "errors" "fmt" "io/ioutil" @@ -34,6 +35,8 @@ import ( "strings" "time" + "golang.org/x/crypto/cryptobyte" + cbasn1 "golang.org/x/crypto/cryptobyte/asn1" "google.golang.org/grpc/grpclog" ) @@ -83,6 +86,7 @@ type certificateListExt struct { CertList *pkix.CertificateList // RFC5280, 5.2.1, all conforming CRLs must have a AKID with the ID method. AuthorityKeyID []byte + RawIssuer []byte } const tagDirectoryName = 4 @@ -99,6 +103,11 @@ var ( ) // x509NameHash implements the OpenSSL X509_NAME_hash function for hashed directory lookups. +// +// NOTE: due to the behavior of asn1.Marshal, if the original encoding of the RDN sequence +// contains strings which do not use the ASN.1 PrintableString type, the name will not be +// re-encoded using those types, resulting in a hash which does not match that produced +// by OpenSSL. func x509NameHash(r pkix.RDNSequence) string { var canonBytes []byte // First, canonicalize all the strings. @@ -277,10 +286,7 @@ func checkCert(c *x509.Certificate, crlVerifyCrt []*x509.Certificate, cfg Revoca func checkCertRevocation(c *x509.Certificate, crl *certificateListExt) (RevocationStatus, error) { // Per section 5.3.3 we prime the certificate issuer with the CRL issuer. // Subsequent entries use the previous entry's issuer. - rawEntryIssuer, err := asn1.Marshal(crl.CertList.TBSCertList.Issuer) - if err != nil { - return RevocationUndetermined, err - } + rawEntryIssuer := crl.RawIssuer // Loop through all the revoked certificates. for _, revCert := range crl.CertList.TBSCertList.RevokedCertificates { @@ -456,10 +462,11 @@ func fetchCRL(loc string, rawIssuer []byte, cfg RevocationConfig) (*certificateL continue } - rawCRLIssuer, err := asn1.Marshal(certList.CertList.TBSCertList.Issuer) + rawCRLIssuer, err := extractCRLIssuer(crlBytes) if err != nil { - return nil, fmt.Errorf("asn1.Marshal(%v) failed err = %v", certList.CertList.TBSCertList.Issuer, err) + return nil, err } + certList.RawIssuer = rawCRLIssuer // RFC5280, 6.3.3 (b) Verify the issuer and scope of the complete CRL. if bytes.Equal(rawIssuer, rawCRLIssuer) { parsedCRL = certList @@ -478,10 +485,6 @@ func verifyCRL(crl *certificateListExt, rawIssuer []byte, chain []*x509.Certific // RFC5280, 6.3.3 (f) Obtain and validateate the certification path for the issuer of the complete CRL // We intentionally limit our CRLs to be signed with the same certificate path as the certificate // so we can use the chain from the connection. - rawCRLIssuer, err := asn1.Marshal(crl.CertList.TBSCertList.Issuer) - if err != nil { - return fmt.Errorf("asn1.Marshal(%v) failed err = %v", crl.CertList.TBSCertList.Issuer, err) - } for _, c := range chain { // Use the key where the subject and KIDs match. @@ -490,10 +493,35 @@ func verifyCRL(crl *certificateListExt, rawIssuer []byte, chain []*x509.Certific // "Conforming CRL issuers MUST use the key identifier method, and MUST // include this extension in all CRLs issued." // So, this is much simpler than RFC4158 and should be compatible. - if bytes.Equal(c.SubjectKeyId, crl.AuthorityKeyID) && bytes.Equal(c.RawSubject, rawCRLIssuer) { + if bytes.Equal(c.SubjectKeyId, crl.AuthorityKeyID) && bytes.Equal(c.RawSubject, crl.RawIssuer) { // RFC5280, 6.3.3 (g) Validate signature. return c.CheckCRLSignature(crl.CertList) } } return fmt.Errorf("verifyCRL: No certificates mached CRL issuer (%v)", crl.CertList.TBSCertList.Issuer) } + +var crlPemPrefix = []byte("-----BEGIN X509 CRL") + +// extractCRLIssuer extracts the raw ASN.1 encoding of the CRL issuer. Due to the design of +// pkix.CertificateList and pkix.RDNSequence, it is not possible to reliably marshal the +// parsed Issuer to it's original raw encoding. +func extractCRLIssuer(crlBytes []byte) ([]byte, error) { + if bytes.HasPrefix(crlBytes, crlPemPrefix) { + block, _ := pem.Decode(crlBytes) + if block != nil && block.Type == "X509 CRL" { + crlBytes = block.Bytes + } + } + + der := cryptobyte.String(crlBytes) + var issuer cryptobyte.String + if !der.ReadASN1(&der, cbasn1.SEQUENCE) || + !der.ReadASN1(&der, cbasn1.SEQUENCE) || + !der.SkipOptionalASN1(cbasn1.INTEGER) || + !der.SkipASN1(cbasn1.SEQUENCE) || + !der.ReadASN1Element(&issuer, cbasn1.SEQUENCE) { + return nil, errors.New("extractCRLIssuer: invalid ASN.1 encoding") + } + return issuer, nil +} diff --git a/security/advancedtls/crl_test.go b/security/advancedtls/crl_test.go index ec4483304c79..ef3eb85da1ef 100644 --- a/security/advancedtls/crl_test.go +++ b/security/advancedtls/crl_test.go @@ -337,7 +337,7 @@ func makeChain(t *testing.T, name string) []*x509.Certificate { return certChain } -func loadCRL(t *testing.T, path string) *pkix.CertificateList { +func loadCRL(t *testing.T, path string) *certificateListExt { b, err := ioutil.ReadFile(path) if err != nil { t.Fatalf("readFile(%v) failed err = %v", path, err) @@ -346,7 +346,15 @@ func loadCRL(t *testing.T, path string) *pkix.CertificateList { if err != nil { t.Fatalf("ParseCrl(%v) failed err = %v", path, err) } - return crl + crlExt, err := parseCRLExtensions(crl) + if err != nil { + t.Fatalf("parseCRLExtensions(%v) failed err = %v", path, err) + } + crlExt.RawIssuer, err = extractCRLIssuer(b) + if err != nil { + t.Fatalf("extractCRLIssuer(%v) failed err= %v", path, err) + } + return crlExt } func TestCachedCRL(t *testing.T) { @@ -450,11 +458,11 @@ func TestGetIssuerCRLCache(t *testing.T) { func TestVerifyCrl(t *testing.T) { tampered := loadCRL(t, testdata.Path("crl/1.crl")) // Change the signature so it won't verify - tampered.SignatureValue.Bytes[0]++ + tampered.CertList.SignatureValue.Bytes[0]++ verifyTests := []struct { desc string - crl *pkix.CertificateList + crl *certificateListExt certs []*x509.Certificate cert *x509.Certificate errWant string @@ -498,11 +506,7 @@ func TestVerifyCrl(t *testing.T) { for _, tt := range verifyTests { t.Run(tt.desc, func(t *testing.T) { - crlExt, err := parseCRLExtensions(tt.crl) - if err != nil { - t.Fatalf("parseCRLExtensions(%v) failed, err = %v", tt.crl.TBSCertList.Issuer, err) - } - err = verifyCRL(crlExt, tt.cert.RawIssuer, tt.certs) + err := verifyCRL(tt.crl, tt.cert.RawIssuer, tt.certs) switch { case tt.errWant == "" && err != nil: t.Errorf("Valid CRL did not verify err = %v", err) @@ -716,3 +720,14 @@ func TestVerifyConnection(t *testing.T) { }) } } + +func TestIssuerNonPrintableString(t *testing.T) { + rawIssuer, err := hex.DecodeString("300c310a300806022a030c023a29") + if err != nil { + t.Fatalf("failed to decode issuer: %s", err) + } + _, err = fetchCRL("", rawIssuer, RevocationConfig{RootDir: testdata.Path("crl")}) + if err != nil { + t.Fatalf("fetchCRL failed: %s", err) + } +} diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index d926d5ffba71..e84d5d32de1d 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -39,6 +39,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index 75527018ee78..8d12b627596e 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -5,6 +5,7 @@ go 1.14 require ( github.com/google/go-cmp v0.5.1 // indirect github.com/hashicorp/golang-lru v0.5.4 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 google.golang.org/grpc v1.38.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index d926d5ffba71..e84d5d32de1d 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -39,6 +39,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/security/advancedtls/testdata/crl/2f11f022.r0 b/security/advancedtls/testdata/crl/2f11f022.r0 new file mode 100644 index 000000000000..e570f17ee2a5 --- /dev/null +++ b/security/advancedtls/testdata/crl/2f11f022.r0 @@ -0,0 +1,7 @@ +-----BEGIN X509 CRL----- +MIHnMFICAQEwDQYJKoZIhvcNAQEMBQAwDDEKMAgGAioDDAI6KRcNMDkxMTEwMjMw +MDAwWhcNMDkxMTExMDAwMDAwWqASMBAwDgYDVR0jBAcwBYADAQIDMA0GCSqGSIb3 +DQEBDAUAA4GBAMl2sjOjtOQ+OCsRyjM0IvqTn7lmdGJMvpYAym367JBamJPCbYrL +MifCjCA1ra7gG0MweZbpm4SG2YLakwi1/B+XhApQ5VVv5SwDn6Yy5zr9ePLEF7Iy +sP86e9s5XfOusLTW+Spre8q1vi7pJrRvUxhJGuUuLoM6Uhvh65ViilDJ +-----END X509 CRL----- From e27717498dbcfb0d4ce04c6b4e616fffdb2b4b6a Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 25 Jan 2022 14:55:48 -0800 Subject: [PATCH 409/998] xds/bootstrap: escape authority when populating resource name template (#5160) --- xds/internal/xdsclient/bootstrap/bootstrap.go | 3 ++- xds/internal/xdsclient/bootstrap/bootstrap_test.go | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index ecec170774c7..eef0e4fe6ff4 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -25,6 +25,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "net/url" "strings" "github.com/golang/protobuf/jsonpb" @@ -401,7 +402,7 @@ func NewConfigFromContents(data []byte) (*Config, error) { // - if set, it must start with "xdstp:///" // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" for name, authority := range config.Authorities { - prefix := fmt.Sprintf("xdstp://%s", name) + prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) if authority.ClientListenerResourceNameTemplate == "" { authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" continue diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 6b6933e97bf2..d681666077c5 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -862,7 +862,8 @@ func TestNewConfigWithFederation(t *testing.T) { }], "client_default_listener_resource_name_template": "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", "authorities": { - "xds.td.com": { } + "xds.td.com": { }, + "#.com": { } } }`, // It's OK for an authority to not have servers. The top-level server @@ -956,6 +957,9 @@ func TestNewConfigWithFederation(t *testing.T) { "xds.td.com": { ClientListenerResourceNameTemplate: "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", }, + "#.com": { + ClientListenerResourceNameTemplate: "xdstp://%23.com/envoy.config.listener.v3.Listener/%s", + }, }, }, }, From 61a6a06b8879354998373f8bef56312ea07d6719 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 26 Jan 2022 11:02:23 -0800 Subject: [PATCH 410/998] server: handle context errors returned by service handler (#5156) --- interceptor.go | 9 ++++++--- server.go | 11 +++++++---- stream.go | 10 ++++++---- test/server_test.go | 35 +++++++++++++++++++++++++++++++++++ 4 files changed, 54 insertions(+), 11 deletions(-) diff --git a/interceptor.go b/interceptor.go index 668e0adcf0a9..bb96ef57be89 100644 --- a/interceptor.go +++ b/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/server.go b/server.go index eadf9e05fd18..b24b6d53958d 100644 --- a/server.go +++ b/server.go @@ -1283,9 +1283,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1549,7 +1550,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { diff --git a/stream.go b/stream.go index 625d47b34e59..8cdd652e037b 100644 --- a/stream.go +++ b/stream.go @@ -46,10 +46,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used diff --git a/test/server_test.go b/test/server_test.go index 97f352328873..411e0aa3c23d 100644 --- a/test/server_test.go +++ b/test/server_test.go @@ -32,6 +32,41 @@ import ( type ctxKey string +// TestServerReturningContextError verifies that if a context error is returned +// by the service handler, the status will have the correct status code, not +// Unknown. +func (s) TestServerReturningContextError(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return nil, context.DeadlineExceeded + }, + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + return context.DeadlineExceeded + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) + if s, ok := status.FromError(err); !ok || s.Code() != codes.DeadlineExceeded { + t.Fatalf("ss.Client.EmptyCall() got error %v; want ", err) + } + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("unexpected error starting the stream: %v", err) + } + _, err = stream.Recv() + if s, ok := status.FromError(err); !ok || s.Code() != codes.DeadlineExceeded { + t.Fatalf("ss.Client.FullDuplexCall().Recv() got error %v; want ", err) + } + +} + func (s) TestChainUnaryServerInterceptor(t *testing.T) { var ( firstIntKey = ctxKey("firstIntKey") From 0a68f8aff020eeb868a4921672afe7773b3359e7 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 26 Jan 2022 11:39:10 -0800 Subject: [PATCH 411/998] xds/federation: support federation in LRS (#5128) --- .../balancer/cdsbalancer/cdsbalancer.go | 20 +++-- .../balancer/cdsbalancer/cdsbalancer_test.go | 14 +++- .../balancer/clusterimpl/balancer_test.go | 77 ++++++++++--------- .../balancer/clusterimpl/clusterimpl.go | 19 ++--- xds/internal/balancer/clusterimpl/config.go | 15 ++-- .../balancer/clusterimpl/config_test.go | 21 ++--- .../balancer/clusterresolver/config.go | 20 ++--- .../balancer/clusterresolver/config_test.go | 71 +++++++++++------ .../balancer/clusterresolver/configbuilder.go | 10 +-- .../clusterresolver/configbuilder_test.go | 62 +++++++-------- xds/internal/testutils/fakeclient/client.go | 4 +- xds/internal/xdsclient/attributes.go | 2 +- xds/internal/xdsclient/authority.go | 15 ++-- xds/internal/xdsclient/bootstrap/bootstrap.go | 57 ++++++++++---- .../xdsclient/bootstrap/bootstrap_test.go | 21 +++++ .../xdsclient/controller/loadreport.go | 7 +- .../xdsclient/controller/v2_cds_test.go | 2 +- xds/internal/xdsclient/loadreport.go | 24 +++--- xds/internal/xdsclient/loadreport_test.go | 20 ++++- xds/internal/xdsclient/xdsresource/name.go | 3 + .../xdsclient/xdsresource/type_cds.go | 22 +++++- .../xdsclient/xdsresource/unmarshal_cds.go | 11 ++- .../xdsresource/unmarshal_cds_test.go | 68 ++++++++-------- 23 files changed, 359 insertions(+), 226 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index f1149108507c..0be796c47bad 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -318,12 +318,20 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { EDSServiceName: cu.EDSServiceName, MaxConcurrentRequests: cu.MaxRequests, } - if cu.EnableLRS { - // An empty string here indicates that the cluster_resolver balancer should use the - // same xDS server for load reporting as it does for EDS - // requests/responses. - dms[i].LoadReportingServerName = new(string) - + if cu.LRSServerConfig == xdsresource.ClusterLRSServerSelf { + bootstrapConfig := b.xdsClient.BootstrapConfig() + parsedName := xdsresource.ParseName(cu.ClusterName) + if parsedName.Scheme == xdsresource.FederationScheme { + // Is a federation resource name, find the corresponding + // authority server config. + if cfg, ok := bootstrapConfig.Authorities[parsedName.Authority]; ok { + dms[i].LoadReportingServer = cfg.XDSServer + } + } else { + // Not a federation resource name, use the default + // authority. + dms[i].LoadReportingServer = bootstrapConfig.XDSServer + } } case xdsresource.ClusterTypeLogicalDNS: dms[i] = clusterresolver.DiscoveryMechanism{ diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 242f7fa64992..efa34dbab0e4 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -48,6 +49,11 @@ const ( defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ) +var defaultTestAuthorityServerConfig = &bootstrap.ServerConfig{ + ServerURI: "self_server", + CredsType: "self_creds", +} + type s struct { grpctest.Tester } @@ -209,8 +215,7 @@ func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy *inter MaxConcurrentRequests: countMax, } if enableLRS { - discoveryMechanism.LoadReportingServerName = new(string) - + discoveryMechanism.LoadReportingServer = defaultTestAuthorityServerConfig } lbCfg := &clusterresolver.LBConfig{ DiscoveryMechanisms: []clusterresolver.DiscoveryMechanism{discoveryMechanism}, @@ -354,6 +359,9 @@ func (s) TestUpdateClientConnStateWithSameState(t *testing.T) { // to the edsBalancer. func (s) TestHandleClusterUpdate(t *testing.T) { xdsC, cdsB, edsB, _, cancel := setupWithWatch(t) + xdsC.SetBootstrapConfig(&bootstrap.Config{ + XDSServer: defaultTestAuthorityServerConfig, + }) defer func() { cancel() cdsB.Close() @@ -367,7 +375,7 @@ func (s) TestHandleClusterUpdate(t *testing.T) { }{ { name: "happy-case-with-lrs", - cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName, EnableLRS: true}, + cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf}, wantCCS: edsCCS(serviceName, nil, true, nil), }, { diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 5abf37fcbf16..d444ecd4f4f3 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -41,6 +41,7 @@ import ( xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" ) @@ -48,15 +49,18 @@ const ( defaultTestTimeout = 1 * time.Second defaultShortTestTimeout = 100 * time.Microsecond - testClusterName = "test-cluster" - testServiceName = "test-eds-service" - testLRSServerName = "test-lrs-name" + testClusterName = "test-cluster" + testServiceName = "test-eds-service" ) var ( testBackendAddrs = []resolver.Address{ {Addr: "1.1.1.1:1"}, } + testLRSServerConfig = &bootstrap.ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + CredsType: "google_default", + } cmpOpts = cmp.Options{ cmpopts.EquateEmpty(), @@ -103,9 +107,9 @@ func (s) TestDropByCategory(t *testing.T) { if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LoadReportingServerName: newString(testLRSServerName), + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, DropCategories: []DropConfig{{ Category: dropReason, RequestsPerMillion: million * dropNumerator / dropDenominator, @@ -125,8 +129,8 @@ func (s) TestDropByCategory(t *testing.T) { if err != nil { t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) } - if got.Server != testLRSServerName { - t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerName) + if got.Server != testLRSServerConfig { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerConfig) } sc1 := <-cc.NewSubConnCh @@ -191,9 +195,9 @@ func (s) TestDropByCategory(t *testing.T) { if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LoadReportingServerName: newString(testLRSServerName), + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, DropCategories: []DropConfig{{ Category: dropReason2, RequestsPerMillion: million * dropNumerator2 / dropDenominator2, @@ -257,10 +261,10 @@ func (s) TestDropCircuitBreaking(t *testing.T) { if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), BalancerConfig: &LBConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LoadReportingServerName: newString(testLRSServerName), - MaxConcurrentRequests: &maxRequest, + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: &maxRequest, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, @@ -276,8 +280,8 @@ func (s) TestDropCircuitBreaking(t *testing.T) { if err != nil { t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) } - if got.Server != testLRSServerName { - t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerName) + if got.Server != testLRSServerConfig { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerConfig) } sc1 := <-cc.NewSubConnCh @@ -605,9 +609,9 @@ func (s) TestLoadReporting(t *testing.T) { if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), BalancerConfig: &LBConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LoadReportingServerName: newString(testLRSServerName), + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, // Locality: testLocality, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, @@ -624,8 +628,8 @@ func (s) TestLoadReporting(t *testing.T) { if err != nil { t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) } - if got.Server != testLRSServerName { - t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerName) + if got.Server != testLRSServerConfig { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerConfig) } sc1 := <-cc.NewSubConnCh @@ -720,9 +724,9 @@ func (s) TestUpdateLRSServer(t *testing.T) { if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), BalancerConfig: &LBConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LoadReportingServerName: newString(""), + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, @@ -738,17 +742,21 @@ func (s) TestUpdateLRSServer(t *testing.T) { if err != nil { t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) } - if got.Server != "" { - t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, "") + if got.Server != testLRSServerConfig { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerConfig) } + testLRSServerConfig2 := &bootstrap.ServerConfig{ + ServerURI: "trafficdirector-another.googleapis.com:443", + CredsType: "google_default", + } // Update LRS server to a different name. if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), BalancerConfig: &LBConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LoadReportingServerName: newString(testLRSServerName), + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig2, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, @@ -763,17 +771,16 @@ func (s) TestUpdateLRSServer(t *testing.T) { if err2 != nil { t.Fatalf("xdsClient.ReportLoad failed with error: %v", err2) } - if got2.Server != testLRSServerName { - t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got2.Server, testLRSServerName) + if got2.Server != testLRSServerConfig2 { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got2.Server, testLRSServerConfig2) } // Update LRS server to nil, to disable LRS. if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), BalancerConfig: &LBConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LoadReportingServerName: nil, + Cluster: testClusterName, + EDSServiceName: testServiceName, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 03d357b1f4e9..0a6cf6ca9065 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -41,6 +41,7 @@ import ( xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/loadstore" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" ) @@ -104,7 +105,7 @@ type clusterImplBalancer struct { childLB balancer.Balancer cancelLoadReport func() edsServiceName string - lrsServerName *string + lrsServer *bootstrap.ServerConfig loadWrapper *loadstore.Wrapper clusterNameMu sync.Mutex @@ -171,22 +172,22 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { ) // Check if it's necessary to restart load report. - if b.lrsServerName == nil { - if newConfig.LoadReportingServerName != nil { + if b.lrsServer == nil { + if newConfig.LoadReportingServer != nil { // Old is nil, new is not nil, start new LRS. - b.lrsServerName = newConfig.LoadReportingServerName + b.lrsServer = newConfig.LoadReportingServer startNewLoadReport = true } // Old is nil, new is nil, do nothing. - } else if newConfig.LoadReportingServerName == nil { + } else if newConfig.LoadReportingServer == nil { // Old is not nil, new is nil, stop old, don't start new. - b.lrsServerName = newConfig.LoadReportingServerName + b.lrsServer = newConfig.LoadReportingServer stopOldLoadReport = true } else { // Old is not nil, new is not nil, compare string values, if // different, stop old and start new. - if *b.lrsServerName != *newConfig.LoadReportingServerName { - b.lrsServerName = newConfig.LoadReportingServerName + if *b.lrsServer != *newConfig.LoadReportingServer { + b.lrsServer = newConfig.LoadReportingServer stopOldLoadReport = true startNewLoadReport = true } @@ -206,7 +207,7 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { if startNewLoadReport { var loadStore *load.Store if b.xdsClient != nil { - loadStore, b.cancelLoadReport = b.xdsClient.ReportLoad(*b.lrsServerName) + loadStore, b.cancelLoadReport = b.xdsClient.ReportLoad(b.lrsServer) } b.loadWrapper.UpdateLoadStore(loadStore) } diff --git a/xds/internal/balancer/clusterimpl/config.go b/xds/internal/balancer/clusterimpl/config.go index 51ff654f6eb5..cfddc6fb2a1b 100644 --- a/xds/internal/balancer/clusterimpl/config.go +++ b/xds/internal/balancer/clusterimpl/config.go @@ -23,6 +23,7 @@ import ( internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) // DropConfig contains the category, and drop ratio. @@ -35,12 +36,14 @@ type DropConfig struct { type LBConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` - Cluster string `json:"cluster,omitempty"` - EDSServiceName string `json:"edsServiceName,omitempty"` - LoadReportingServerName *string `json:"lrsLoadReportingServerName,omitempty"` - MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` - DropCategories []DropConfig `json:"dropCategories,omitempty"` - ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` + Cluster string `json:"cluster,omitempty"` + EDSServiceName string `json:"edsServiceName,omitempty"` + // LoadReportingServer is the LRS server to send load reports to. If not + // present, load reporting will be disabled. + LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + DropCategories []DropConfig `json:"dropCategories,omitempty"` + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } func parseConfig(c json.RawMessage) (*LBConfig, error) { diff --git a/xds/internal/balancer/clusterimpl/config_test.go b/xds/internal/balancer/clusterimpl/config_test.go index 88bed5c182c0..b001b8fdf0a4 100644 --- a/xds/internal/balancer/clusterimpl/config_test.go +++ b/xds/internal/balancer/clusterimpl/config_test.go @@ -22,17 +22,22 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/roundrobin" _ "google.golang.org/grpc/balancer/weightedtarget" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const ( testJSONConfig = `{ "cluster": "test_cluster", "edsServiceName": "test-eds", - "lrsLoadReportingServerName": "lrs_server", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, "maxConcurrentRequests": 123, "dropCategories": [ { @@ -106,10 +111,10 @@ func TestParseConfig(t *testing.T) { name: "OK", js: testJSONConfig, want: &LBConfig{ - Cluster: "test_cluster", - EDSServiceName: "test-eds", - LoadReportingServerName: newString("lrs_server"), - MaxConcurrentRequests: newUint32(123), + Cluster: "test_cluster", + EDSServiceName: "test-eds", + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(123), DropCategories: []DropConfig{ {Category: "drop-1", RequestsPerMillion: 314}, {Category: "drop-2", RequestsPerMillion: 159}, @@ -128,17 +133,13 @@ func TestParseConfig(t *testing.T) { if (err != nil) != tt.wantErr { t.Fatalf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) } - if !cmp.Equal(got, tt.want) { + if !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(bootstrap.ServerConfig{}, "Creds")) { t.Errorf("parseConfig() got unexpected result, diff: %v", cmp.Diff(got, tt.want)) } }) } } -func newString(s string) *string { - return &s -} - func newUint32(i uint32) *uint32 { return &i } diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index a6a3cbab8040..363afd03ab2c 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -27,6 +27,7 @@ import ( internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) // DiscoveryMechanismType is the type of discovery mechanism. @@ -84,11 +85,9 @@ func (t *DiscoveryMechanismType) UnmarshalJSON(b []byte) error { type DiscoveryMechanism struct { // Cluster is the cluster name. Cluster string `json:"cluster,omitempty"` - // LoadReportingServerName is the LRS server to send load reports to. If - // not present, load reporting will be disabled. If set to the empty string, - // load reporting will be sent to the same server that we obtained CDS data - // from. - LoadReportingServerName *string `json:"lrsLoadReportingServerName,omitempty"` + // LoadReportingServer is the LRS server to send load reports to. If not + // present, load reporting will be disabled. + LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` // MaxConcurrentRequests is the maximum number of outstanding requests can // be made to the upstream cluster. Default is 1024. MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` @@ -110,8 +109,6 @@ func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { switch { case dm.Cluster != b.Cluster: return false - case !equalStringP(dm.LoadReportingServerName, b.LoadReportingServerName): - return false case !equalUint32P(dm.MaxConcurrentRequests, b.MaxConcurrentRequests): return false case dm.Type != b.Type: @@ -121,17 +118,14 @@ func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { case dm.DNSHostname != b.DNSHostname: return false } - return true -} -func equalStringP(a, b *string) bool { - if a == nil && b == nil { + if dm.LoadReportingServer == nil && b.LoadReportingServer == nil { return true } - if a == nil || b == nil { + if (dm.LoadReportingServer != nil) != (b.LoadReportingServer != nil) { return false } - return *a == *b + return dm.LoadReportingServer.String() == b.LoadReportingServer.String() } func equalUint32P(a, b *uint32) bool { diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index 796f8a493722..fb859e75ba4b 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/internal/balancer/stub" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) func TestDiscoveryMechanismTypeMarshalJSON(t *testing.T) { @@ -102,7 +103,10 @@ const ( testJSONConfig1 = `{ "discoveryMechanisms": [{ "cluster": "test-cluster-name", - "lrsLoadReportingServerName": "test-lrs-server", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, "maxConcurrentRequests": 314, "type": "EDS", "edsServiceName": "test-eds-service-name" @@ -111,7 +115,10 @@ const ( testJSONConfig2 = `{ "discoveryMechanisms": [{ "cluster": "test-cluster-name", - "lrsLoadReportingServerName": "test-lrs-server", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, "maxConcurrentRequests": 314, "type": "EDS", "edsServiceName": "test-eds-service-name" @@ -122,7 +129,10 @@ const ( testJSONConfig3 = `{ "discoveryMechanisms": [{ "cluster": "test-cluster-name", - "lrsLoadReportingServerName": "test-lrs-server", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, "maxConcurrentRequests": 314, "type": "EDS", "edsServiceName": "test-eds-service-name" @@ -132,7 +142,10 @@ const ( testJSONConfig4 = `{ "discoveryMechanisms": [{ "cluster": "test-cluster-name", - "lrsLoadReportingServerName": "test-lrs-server", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, "maxConcurrentRequests": 314, "type": "EDS", "edsServiceName": "test-eds-service-name" @@ -142,7 +155,10 @@ const ( testJSONConfig5 = `{ "discoveryMechanisms": [{ "cluster": "test-cluster-name", - "lrsLoadReportingServerName": "test-lrs-server", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, "maxConcurrentRequests": 314, "type": "EDS", "edsServiceName": "test-eds-service-name" @@ -151,6 +167,11 @@ const ( }` ) +var testLRSServerConfig = &bootstrap.ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + CredsType: "google_default", +} + func TestParseConfig(t *testing.T) { tests := []struct { name string @@ -170,11 +191,11 @@ func TestParseConfig(t *testing.T) { want: &LBConfig{ DiscoveryMechanisms: []DiscoveryMechanism{ { - Cluster: testClusterName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), - Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServcie, + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServcie, }, }, XDSLBPolicy: nil, @@ -187,11 +208,11 @@ func TestParseConfig(t *testing.T) { want: &LBConfig{ DiscoveryMechanisms: []DiscoveryMechanism{ { - Cluster: testClusterName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), - Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServcie, + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServcie, }, { Type: DiscoveryMechanismTypeLogicalDNS, @@ -207,11 +228,11 @@ func TestParseConfig(t *testing.T) { want: &LBConfig{ DiscoveryMechanisms: []DiscoveryMechanism{ { - Cluster: testClusterName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), - Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServcie, + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServcie, }, }, XDSLBPolicy: &internalserviceconfig.BalancerConfig{ @@ -227,11 +248,11 @@ func TestParseConfig(t *testing.T) { want: &LBConfig{ DiscoveryMechanisms: []DiscoveryMechanism{ { - Cluster: testClusterName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), - Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServcie, + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServcie, }, }, XDSLBPolicy: &internalserviceconfig.BalancerConfig{ diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index c2404b387b60..4cce16ff9a3d 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -257,11 +257,11 @@ var rrBalancerConfig = &internalserviceconfig.BalancerConfig{Name: roundrobin.Na // addresses. func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { clusterImplCfg := &clusterimpl.LBConfig{ - Cluster: mechanism.Cluster, - EDSServiceName: mechanism.EDSServiceName, - LoadReportingServerName: mechanism.LoadReportingServerName, - MaxConcurrentRequests: mechanism.MaxConcurrentRequests, - DropCategories: drops, + Cluster: mechanism.Cluster, + EDSServiceName: mechanism.EDSServiceName, + LoadReportingServer: mechanism.LoadReportingServer, + MaxConcurrentRequests: mechanism.MaxConcurrentRequests, + DropCategories: drops, // ChildPolicy is not set. Will be set based on xdsLBPolicy } diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index 011500f4bc9a..607f7b222419 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -125,11 +125,11 @@ func TestBuildPriorityConfigJSON(t *testing.T) { gotConfig, _, err := buildPriorityConfigJSON([]priorityConfig{ { mechanism: DiscoveryMechanism{ - Cluster: testClusterName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), - Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServiceName, + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, }, edsResp: xdsresource.EndpointsUpdate{ Drops: []xdsresource.OverloadDropConfig{ @@ -175,11 +175,11 @@ func TestBuildPriorityConfig(t *testing.T) { gotConfig, gotAddrs, _ := buildPriorityConfig([]priorityConfig{ { mechanism: DiscoveryMechanism{ - Cluster: testClusterName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), - Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServiceName, + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, }, edsResp: xdsresource.EndpointsUpdate{ Drops: []xdsresource.OverloadDropConfig{ @@ -212,10 +212,10 @@ func TestBuildPriorityConfig(t *testing.T) { Config: &internalserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ - Cluster: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), DropCategories: []clusterimpl.DropConfig{ { Category: testDropCategory, @@ -245,10 +245,10 @@ func TestBuildPriorityConfig(t *testing.T) { Config: &internalserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ - Cluster: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), DropCategories: []clusterimpl.DropConfig{ { Category: testDropCategory, @@ -369,11 +369,11 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { }, }, DiscoveryMechanism{ - Cluster: testClusterName, - MaxConcurrentRequests: newUint32(testMaxRequests), - LoadReportingServerName: newString(testLRSServer), - Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServiceName, + Cluster: testClusterName, + MaxConcurrentRequests: newUint32(testMaxRequests), + LoadReportingServer: testLRSServerConfig, + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, }, nil, ) @@ -384,10 +384,10 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { } wantConfigs := map[string]*clusterimpl.LBConfig{ "priority-2-0": { - Cluster: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), DropCategories: []clusterimpl.DropConfig{ { Category: testDropCategory, @@ -411,10 +411,10 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { }, }, "priority-2-1": { - Cluster: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServerName: newString(testLRSServer), - MaxConcurrentRequests: newUint32(testMaxRequests), + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), DropCategories: []clusterimpl.DropConfig{ { Category: testDropCategory, diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index 871aa7288c63..3ab57bbd489d 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -242,11 +242,11 @@ func (xdsC *Client) WaitForCancelEDSWatch(ctx context.Context) (string, error) { // ReportLoadArgs wraps the arguments passed to ReportLoad. type ReportLoadArgs struct { // Server is the name of the server to which the load is reported. - Server string + Server *bootstrap.ServerConfig } // ReportLoad starts reporting load about clusterName to server. -func (xdsC *Client) ReportLoad(server string) (loadStore *load.Store, cancel func()) { +func (xdsC *Client) ReportLoad(server *bootstrap.ServerConfig) (loadStore *load.Store, cancel func()) { xdsC.loadReportCh.Send(ReportLoadArgs{Server: server}) return xdsC.loadStore, func() { xdsC.lrsCancelCh.Send(nil) diff --git a/xds/internal/xdsclient/attributes.go b/xds/internal/xdsclient/attributes.go index 64f87f296591..514181627361 100644 --- a/xds/internal/xdsclient/attributes.go +++ b/xds/internal/xdsclient/attributes.go @@ -36,7 +36,7 @@ type XDSClient interface { WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() WatchEndpoints(clusterName string, edsCb func(xdsresource.EndpointsUpdate, error)) (cancel func()) - ReportLoad(server string) (*load.Store, func()) + ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) DumpLDS() map[string]xdsresource.UpdateWithMD DumpRDS() map[string]xdsresource.UpdateWithMD diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 6cc4c117755c..1a236849c377 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -27,8 +27,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -const federationScheme = "xdstp" - // findAuthority returns the authority for this name. If it doesn't already // exist, one will be created. // @@ -49,7 +47,7 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref fun } config := c.config.XDSServer - if scheme == federationScheme { + if scheme == xdsresource.FederationScheme { cfg, ok := c.config.Authorities[authority] if !ok { return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) @@ -78,6 +76,9 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref fun // newAuthority creates a new authority for the config. But before that, it // checks the cache to see if an authority for this config already exists. // +// The caller must take a reference of the returned authority before using, and +// unref afterwards. +// // caller must hold c.authorityMu func (c *clientImpl) newAuthority(config *bootstrap.ServerConfig) (_ *authority, retErr error) { // First check if there's already an authority for this config. If found, it @@ -219,8 +220,12 @@ func (a *authority) watchEndpoints(clusterName string, cb func(xdsresource.Endpo } } -func (a *authority) reportLoad(server string) (*load.Store, func()) { - return a.controller.ReportLoad(server) +func (a *authority) reportLoad() (*load.Store, func()) { + // An empty string means to report load to the same same used for ADS. There + // should never be a need to specify a string other than an empty string. If + // a different server is to be used, a different authority (controller) will + // be created. + return a.controller.ReportLoad("") } func (a *authority) dump(t xdsresource.ResourceType) map[string]xdsresource.UpdateWithMD { diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index eef0e4fe6ff4..15aed44eb73f 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -107,19 +107,26 @@ func (sc *ServerConfig) String() string { return strings.Join([]string{sc.ServerURI, sc.CredsType, ver}, "-") } -// UnmarshalJSON takes the json data (a list of servers) and unmarshals the -// first one in the list. -func (sc *ServerConfig) UnmarshalJSON(data []byte) error { - var servers []*xdsServer - if err := json.Unmarshal(data, &servers); err != nil { - return fmt.Errorf("xds: json.Unmarshal(data) for field xds_servers failed during bootstrap: %v", err) +// MarshalJSON marshals the ServerConfig to json. +func (sc ServerConfig) MarshalJSON() ([]byte, error) { + server := xdsServer{ + ServerURI: sc.ServerURI, + ChannelCreds: []channelCreds{{Type: sc.CredsType, Config: nil}}, } - if len(servers) < 1 { - return fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any management server to connect to") + if sc.TransportAPI == version.TransportV3 { + server.ServerFeatures = []string{serverFeaturesV3} + } + return json.Marshal(server) +} + +// UnmarshalJSON takes the json data (a server) and unmarshals it to the struct. +func (sc *ServerConfig) UnmarshalJSON(data []byte) error { + var server xdsServer + if err := json.Unmarshal(data, &server); err != nil { + return fmt.Errorf("xds: json.Unmarshal(data) for field ServerConfig failed during bootstrap: %v", err) } - xs := servers[0] - sc.ServerURI = xs.ServerURI - for _, cc := range xs.ChannelCreds { + sc.ServerURI = server.ServerURI + for _, cc := range server.ChannelCreds { // We stop at the first credential type that we support. sc.CredsType = cc.Type if cc.Type == credsGoogleDefault { @@ -130,7 +137,7 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { break } } - for _, f := range xs.ServerFeatures { + for _, f := range server.ServerFeatures { if f == serverFeaturesV3 { sc.TransportAPI = version.TransportV3 } @@ -138,6 +145,18 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { return nil } +// unmarshalJSONServerConfigSlice unmarshals JSON to a slice. +func unmarshalJSONServerConfigSlice(data []byte) ([]*ServerConfig, error) { + var servers []*ServerConfig + if err := json.Unmarshal(data, &servers); err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON to []*ServerConfig: %v", err) + } + if len(servers) < 1 { + return nil, fmt.Errorf("no management server found in JSON") + } + return servers, nil +} + // Authority contains configuration for an Authority for an xDS control plane // server. See the Authorities field in the Config struct for how it's used. type Authority struct { @@ -170,9 +189,11 @@ func (a *Authority) UnmarshalJSON(data []byte) error { for k, v := range jsonData { switch k { case "xds_servers": - if err := json.Unmarshal(v, &a.XDSServer); err != nil { - return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + servers, err := unmarshalJSONServerConfigSlice(v) + if err != nil { + return fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) } + a.XDSServer = servers[0] case "client_listener_resource_name_template": if err := json.Unmarshal(v, &a.ClientListenerResourceNameTemplate); err != nil { return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) @@ -243,7 +264,7 @@ type Config struct { type channelCreds struct { Type string `json:"type"` - Config json.RawMessage `json:"config"` + Config json.RawMessage `json:"config,omitempty"` } type xdsServer struct { @@ -325,9 +346,11 @@ func NewConfigFromContents(data []byte) (*Config, error) { return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) } case "xds_servers": - if err := json.Unmarshal(v, &config.XDSServer); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + servers, err := unmarshalJSONServerConfigSlice(v) + if err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) } + config.XDSServer = servers[0] case "certificate_providers": var providerInstances map[string]json.RawMessage if err := json.Unmarshal(v, &providerInstances); err != nil { diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index d681666077c5..573a3fca1730 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -994,3 +994,24 @@ func TestNewConfigWithFederation(t *testing.T) { }) } } + +func TestServerConfigMarshalAndUnmarshal(t *testing.T) { + c := ServerConfig{ + ServerURI: "test-server", + Creds: nil, + CredsType: "test-creds", + TransportAPI: version.TransportV3, + } + + bs, err := json.Marshal(c) + if err != nil { + t.Fatalf("failed to marshal: %v", err) + } + var cUnmarshal ServerConfig + if err := json.Unmarshal(bs, &cUnmarshal); err != nil { + t.Fatalf("failed to unmarshal: %v", err) + } + if diff := cmp.Diff(cUnmarshal, c); diff != "" { + t.Fatalf("diff (-got +want): %v", diff) + } +} diff --git a/xds/internal/xdsclient/controller/loadreport.go b/xds/internal/xdsclient/controller/loadreport.go index f8cfd017e415..a28cc95dc6f6 100644 --- a/xds/internal/xdsclient/controller/loadreport.go +++ b/xds/internal/xdsclient/controller/loadreport.go @@ -35,9 +35,10 @@ import ( // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. // -// TODO: LRS refactor; maybe a new controller should be created for a separate -// server, so that the same stream can be shared by different reporters to the -// same server, even if they originate from different Controllers. +// TODO(xdsfed): LRS refactor, delete the parameter of this function, and +// cleanup the multiple LRS ClientConn code. Each controller should have one +// ClientConn to the authority it's created for, all LRS streams (and ADS +// streams) in this controller should all share that ClientConn. func (c *Controller) ReportLoad(server string) (*load.Store, func()) { c.lrsMu.Lock() defer c.lrsMu.Unlock() diff --git a/xds/internal/xdsclient/controller/v2_cds_test.go b/xds/internal/xdsclient/controller/v2_cds_test.go index 20485dc1c280..d262b53a46bf 100644 --- a/xds/internal/xdsclient/controller/v2_cds_test.go +++ b/xds/internal/xdsclient/controller/v2_cds_test.go @@ -138,7 +138,7 @@ func (s) TestCDSHandleResponse(t *testing.T) { cdsResponse: goodCDSResponse1, wantErr: false, wantUpdate: map[string]xdsresource.ClusterUpdateErrTuple{ - goodClusterName1: {Update: xdsresource.ClusterUpdate{ClusterName: goodClusterName1, EDSServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}}, + goodClusterName1: {Update: xdsresource.ClusterUpdate{ClusterName: goodClusterName1, EDSServiceName: serviceName1, LRSServerConfig: xdsresource.ClusterLRSServerSelf, Raw: marshaledCluster1}}, }, wantUpdateMD: xdsresource.UpdateMetadata{ Status: xdsresource.ServiceStatusACKed, diff --git a/xds/internal/xdsclient/loadreport.go b/xds/internal/xdsclient/loadreport.go index d157731c789b..32c7e9c9d791 100644 --- a/xds/internal/xdsclient/loadreport.go +++ b/xds/internal/xdsclient/loadreport.go @@ -18,30 +18,26 @@ package xdsclient import ( + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -// ReportLoad starts an load reporting stream to the given server. If the server -// is not an empty string, and is different from the management server, a new -// ClientConn will be created. -// -// The same options used for creating the Client will be used (including -// NodeProto, and dial options if necessary). +// ReportLoad starts a load reporting stream to the given server. All load +// reports to the same server share the LRS stream. // // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. -func (c *clientImpl) ReportLoad(server string) (*load.Store, func()) { - // TODO: load reporting with federation also needs find the authority for - // this server first, then reports load to it. Currently always report to - // the default authority. This is needed to avoid a nil pointer panic. - a, unref, err := c.findAuthority(xdsresource.ParseName("")) +func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { + a, err := c.newAuthority(server) if err != nil { + c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err) return nil, func() {} } - store, cancelF := a.reportLoad(server) + // Hold the ref before starting load reporting. + a.ref() + store, cancelF := a.reportLoad() return store, func() { cancelF() - unref() + c.unrefAuthority(a) } } diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index 92b5ab6482d8..3c564ea97c31 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -67,7 +67,15 @@ func (s) TestLRSClient(t *testing.T) { defer cancel() // Report to the same address should not create new ClientConn. - store1, lrsCancel1 := xdsC.ReportLoad(fs.Address) + store1, lrsCancel1 := xdsC.ReportLoad( + &bootstrap.ServerConfig{ + ServerURI: fs.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV2, + NodeProto: &v2corepb.Node{}, + }, + ) defer lrsCancel1() if u, err := fs.NewConnChan.Receive(ctx); err != nil { @@ -87,7 +95,15 @@ func (s) TestLRSClient(t *testing.T) { defer sCleanup2() // Report to a different address should create new ClientConn. - store2, lrsCancel2 := xdsC.ReportLoad(fs2.Address) + store2, lrsCancel2 := xdsC.ReportLoad( + &bootstrap.ServerConfig{ + ServerURI: fs2.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV2, + NodeProto: &v2corepb.Node{}, + }, + ) defer lrsCancel2() if u, err := fs2.NewConnChan.Receive(ctx); err != nil { t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err) diff --git a/xds/internal/xdsclient/xdsresource/name.go b/xds/internal/xdsclient/xdsresource/name.go index 076e3d617a44..eb1ee323cee9 100644 --- a/xds/internal/xdsclient/xdsresource/name.go +++ b/xds/internal/xdsclient/xdsresource/name.go @@ -25,6 +25,9 @@ import ( "google.golang.org/grpc/internal/envconfig" ) +// FederationScheme is the scheme of a federation resource name. +const FederationScheme = "xdstp" + // Name contains the parsed component of an xDS resource name. // // An xDS resource name is in the format of diff --git a/xds/internal/xdsclient/xdsresource/type_cds.go b/xds/internal/xdsclient/xdsresource/type_cds.go index c200380be26f..ce3438c121f7 100644 --- a/xds/internal/xdsclient/xdsresource/type_cds.go +++ b/xds/internal/xdsclient/xdsresource/type_cds.go @@ -17,7 +17,9 @@ package xdsresource -import "google.golang.org/protobuf/types/known/anypb" +import ( + "google.golang.org/protobuf/types/known/anypb" +) // ClusterType is the type of cluster from a received CDS response. type ClusterType int @@ -35,6 +37,18 @@ const ( ClusterTypeAggregate ) +// ClusterLRSServerConfigType is the type of LRS server config. +type ClusterLRSServerConfigType int + +const ( + // ClusterLRSOff indicates LRS is off (loads are not reported for this + // cluster). + ClusterLRSOff ClusterLRSServerConfigType = iota + // ClusterLRSServerSelf indicates loads should be reported to the same + // server (the authority) where the CDS resp is received from. + ClusterLRSServerSelf +) + // ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its // config. type ClusterLBPolicyRingHash struct { @@ -51,8 +65,10 @@ type ClusterUpdate struct { // EDSServiceName is an optional name for EDS. If it's not set, the balancer // should watch ClusterName for the EDS resources. EDSServiceName string - // EnableLRS indicates whether or not load should be reported through LRS. - EnableLRS bool + // LRSServerConfig contains the server where the load reports should be sent + // to. This can be change to an interface, to support other types, e.g. a + // ServerConfig with ServerURI, creds. + LRSServerConfig ClusterLRSServerConfigType // SecurityCfg contains security configuration sent by the control plane. SecurityCfg *SecurityConfig // MaxRequests for circuit breaking, if any (otherwise nil). diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 5b34c1ae6e1e..2b8d8d3aaddf 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -127,12 +127,21 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu ret := ClusterUpdate{ ClusterName: cluster.GetName(), - EnableLRS: cluster.GetLrsServer().GetSelf() != nil, SecurityCfg: sc, MaxRequests: circuitBreakersFromCluster(cluster), LBPolicy: lbPolicy, } + // Note that this is different from the gRFC (gRFC A47 says to include the + // full ServerConfig{URL,creds,server feature} here). This information is + // not available here, because this function doesn't have access to the + // xdsclient bootstrap information now (can be added if necessary). The + // ServerConfig will be read and populated by the CDS balancer when + // processing this field. + if cluster.GetLrsServer().GetSelf() != nil { + ret.LRSServerConfig = ClusterLRSServerSelf + } + // Validate and set cluster type from the response. switch { case cluster.GetType() == v3clusterpb.Cluster_EDS: diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index dd2f72e0fada..4aad9308fc03 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -46,7 +46,7 @@ const ( serviceName = "service" ) -var emptyUpdate = ClusterUpdate{ClusterName: clusterName, EnableLRS: false} +var emptyUpdate = ClusterUpdate{ClusterName: clusterName, LRSServerConfig: ClusterLRSOff} func (s) TestValidateCluster_Failure(t *testing.T) { tests := []struct { @@ -263,7 +263,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, EnableLRS: false, ClusterType: ClusterTypeAggregate, + ClusterName: clusterName, LRSServerConfig: ClusterLRSOff, ClusterType: ClusterTypeAggregate, PrioritizedClusterNames: []string{"a", "b", "c"}, }, }, @@ -298,7 +298,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: false}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSOff}, }, { name: "happiest-case", @@ -320,7 +320,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSServerSelf}, }, { name: "happiest-case-with-circuitbreakers", @@ -354,7 +354,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, + wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSServerSelf, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, }, { name: "happiest-case-with-ring-hash-lb-policy-with-default-config", @@ -377,7 +377,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true, + ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSServerSelf, LBPolicy: &ClusterLBPolicyRingHash{MinimumRingSize: defaultRingHashMinSize, MaximumRingSize: defaultRingHashMaxSize}, }, }, @@ -408,7 +408,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, EDSServiceName: serviceName, EnableLRS: true, + ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSServerSelf, LBPolicy: &ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, }, }, @@ -468,9 +468,9 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { }, } wantUpdate := ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, } gotUpdate, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { @@ -1082,9 +1082,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -1124,9 +1124,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -1168,9 +1168,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -1216,9 +1216,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -1276,9 +1276,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -1343,9 +1343,9 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { }, }, wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - EDSServiceName: serviceName, - EnableLRS: false, + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, SecurityCfg: &SecurityConfig{ RootInstanceName: rootPluginInstance, RootCertName: rootCertName, @@ -1489,7 +1489,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { wantUpdate: map[string]ClusterUpdateErrTuple{ v2ClusterName: {Update: ClusterUpdate{ ClusterName: v2ClusterName, - EDSServiceName: v2Service, EnableLRS: true, + EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, Raw: v2ClusterAny, }}, }, @@ -1504,7 +1504,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { wantUpdate: map[string]ClusterUpdateErrTuple{ v3ClusterName: {Update: ClusterUpdate{ ClusterName: v3ClusterName, - EDSServiceName: v3Service, EnableLRS: true, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, Raw: v3ClusterAny, }}, }, @@ -1519,12 +1519,12 @@ func (s) TestUnmarshalCluster(t *testing.T) { wantUpdate: map[string]ClusterUpdateErrTuple{ v2ClusterName: {Update: ClusterUpdate{ ClusterName: v2ClusterName, - EDSServiceName: v2Service, EnableLRS: true, + EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, Raw: v2ClusterAny, }}, v3ClusterName: {Update: ClusterUpdate{ ClusterName: v3ClusterName, - EDSServiceName: v3Service, EnableLRS: true, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, Raw: v3ClusterAny, }}, }, @@ -1548,12 +1548,12 @@ func (s) TestUnmarshalCluster(t *testing.T) { wantUpdate: map[string]ClusterUpdateErrTuple{ v2ClusterName: {Update: ClusterUpdate{ ClusterName: v2ClusterName, - EDSServiceName: v2Service, EnableLRS: true, + EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, Raw: v2ClusterAny, }}, v3ClusterName: {Update: ClusterUpdate{ ClusterName: v3ClusterName, - EDSServiceName: v3Service, EnableLRS: true, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, Raw: v3ClusterAny, }}, "bad": {Err: cmpopts.AnyError}, From d31dbe7ad3a45f38cd482213867b19a9378b2dee Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 26 Jan 2022 11:39:28 -0800 Subject: [PATCH 412/998] xds/resource: accept Self as LDS's RDS config source and CDS's EDS config source (#5152) --- .../xdsclient/xdsresource/unmarshal_cds.go | 4 +-- .../xdsresource/unmarshal_cds_test.go | 32 +++++++++++++++++ .../xdsclient/xdsresource/unmarshal_lds.go | 4 +-- .../xdsresource/unmarshal_lds_test.go | 36 ++++++++++++++++++- 4 files changed, 71 insertions(+), 5 deletions(-) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 2b8d8d3aaddf..8501e9e100e9 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -145,8 +145,8 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // Validate and set cluster type from the response. switch { case cluster.GetType() == v3clusterpb.Cluster_EDS: - if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { - return ClusterUpdate{}, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) + if configsource := cluster.GetEdsClusterConfig().GetEdsConfig(); configsource.GetAds() == nil && configsource.GetSelf() == nil { + return ClusterUpdate{}, fmt.Errorf("CDS's EDS config source is not ADS or Self: %+v", cluster) } ret.ClusterType = ClusterTypeEDS ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 4aad9308fc03..1a303074098d 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -1421,6 +1421,23 @@ func (s) TestUnmarshalCluster(t *testing.T) { }, }, }) + + v3ClusterAnyWithEDSConfigSourceSelf = testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: v3ClusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{}, + }, + ServiceName: v3Service, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }) ) const testVersion = "test-version-cds" @@ -1513,6 +1530,21 @@ func (s) TestUnmarshalCluster(t *testing.T) { Version: testVersion, }, }, + { + name: "v3 cluster with EDS config source self", + resources: []*anypb.Any{v3ClusterAnyWithEDSConfigSourceSelf}, + wantUpdate: map[string]ClusterUpdateErrTuple{ + v3ClusterName: {Update: ClusterUpdate{ + ClusterName: v3ClusterName, + EDSServiceName: v3Service, EnableLRS: true, + Raw: v3ClusterAnyWithEDSConfigSourceSelf, + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, { name: "multiple clusters", resources: []*anypb.Any{v2ClusterAny, v3ClusterAny}, diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index f9663d05bee3..b259c7b87e4c 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -102,8 +102,8 @@ func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.Prefi switch apiLis.RouteSpecifier.(type) { case *v3httppb.HttpConnectionManager_Rds: - if apiLis.GetRds().GetConfigSource().GetAds() == nil { - return nil, fmt.Errorf("ConfigSource is not ADS: %+v", lis) + if configsource := apiLis.GetRds().GetConfigSource(); configsource.GetAds() == nil && configsource.GetSelf() == nil { + return nil, fmt.Errorf("LDS's RDS configSource is not ADS or Self: %+v", lis) } name := apiLis.GetRds().GetRouteConfigName() if name == "" { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index a5c53886ecbd..4444421a4929 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -204,6 +204,25 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, }) } + + v3ListenerWithCDSConfigSourceSelf = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny( + &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{}, + }, + RouteConfigName: v3RouteConfigName, + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }) + errMD = UpdateMetadata{ Status: ServiceStatusNACKed, Version: testVersion, @@ -294,7 +313,22 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { wantErr: true, }, { - name: "rds.ConfigSource in apiListener is not ADS", + name: "rds.ConfigSource in apiListener is Self", + resources: []*anypb.Any{v3ListenerWithCDSConfigSourceSelf}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + HTTPFilters: []HTTPFilter{routerFilter}, + Raw: v3ListenerWithCDSConfigSourceSelf, + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, + { + name: "rds.ConfigSource in apiListener is not ADS or Self", resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, ApiListener: &v3listenerpb.ApiListener{ From 3b70fe08d8c9f45ed4a85330846a699c573f1a60 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 27 Jan 2022 10:27:28 -0800 Subject: [PATCH 413/998] xds: fix broken test "unknown field EnableLRS in struct" (#5170) --- xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 1a303074098d..ddfe5d777f3a 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -1536,7 +1536,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { wantUpdate: map[string]ClusterUpdateErrTuple{ v3ClusterName: {Update: ClusterUpdate{ ClusterName: v3ClusterName, - EDSServiceName: v3Service, EnableLRS: true, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, Raw: v3ClusterAnyWithEDSConfigSourceSelf, }}, }, From e2fc510d5711c46d2678ea41f5feff55e32a7848 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 27 Jan 2022 10:41:28 -0800 Subject: [PATCH 414/998] internal/proto: update generated code (#5172) --- interop/grpc_testing/benchmark_service.pb.go | 6 ++++-- interop/grpc_testing/control.pb.go | 4 +++- interop/grpc_testing/empty.pb.go | 5 ++++- interop/grpc_testing/messages.pb.go | 4 +++- interop/grpc_testing/payloads.pb.go | 4 +++- interop/grpc_testing/report_qps_scenario_service.pb.go | 7 +++++-- interop/grpc_testing/stats.pb.go | 6 ++++-- interop/grpc_testing/test.pb.go | 4 +++- interop/grpc_testing/worker_service.pb.go | 5 ++++- 9 files changed, 33 insertions(+), 12 deletions(-) diff --git a/interop/grpc_testing/benchmark_service.pb.go b/interop/grpc_testing/benchmark_service.pb.go index a1057b8a7599..c528619fe6dd 100644 --- a/interop/grpc_testing/benchmark_service.pb.go +++ b/interop/grpc_testing/benchmark_service.pb.go @@ -75,8 +75,10 @@ var file_grpc_testing_benchmark_service_proto_rawDesc = []byte{ 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x2a, 0x0a, 0x0f, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x15, 0x42, + 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_grpc_testing_benchmark_service_proto_goTypes = []interface{}{ diff --git a/interop/grpc_testing/control.pb.go b/interop/grpc_testing/control.pb.go index 8db13921b77e..ad8d6ea6a123 100644 --- a/interop/grpc_testing/control.pb.go +++ b/interop/grpc_testing/control.pb.go @@ -2103,7 +2103,9 @@ var file_grpc_testing_control_proto_rawDesc = []byte{ 0x15, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x57, 0x41, 0x59, 0x53, 0x10, - 0x04, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x04, 0x42, 0x21, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x42, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/interop/grpc_testing/empty.pb.go b/interop/grpc_testing/empty.pb.go index 5378d2c58d0f..5b239de631b0 100644 --- a/interop/grpc_testing/empty.pb.go +++ b/interop/grpc_testing/empty.pb.go @@ -91,7 +91,10 @@ var file_grpc_testing_empty_proto_rawDesc = []byte{ 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x79, 0x42, 0x2a, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x0b, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index f956a5ad771f..1bf3d756717e 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -1805,7 +1805,9 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, - 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x10, 0x02, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/interop/grpc_testing/payloads.pb.go b/interop/grpc_testing/payloads.pb.go index b85a289c1532..1db2725915d4 100644 --- a/interop/grpc_testing/payloads.pb.go +++ b/interop/grpc_testing/payloads.pb.go @@ -313,7 +313,9 @@ var file_grpc_testing_payloads_proto_rawDesc = []byte{ 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x78, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x61, 0x64, 0x42, 0x22, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x0d, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/interop/grpc_testing/report_qps_scenario_service.pb.go b/interop/grpc_testing/report_qps_scenario_service.pb.go index 0f4de5984942..f007d47fcda3 100644 --- a/interop/grpc_testing/report_qps_scenario_service.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service.pb.go @@ -55,8 +55,11 @@ var file_grpc_testing_report_qps_scenario_service_proto_rawDesc = []byte{ 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x12, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x42, 0x32, 0x0a, 0x0f, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x1d, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x51, 0x70, 0x73, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_grpc_testing_report_qps_scenario_service_proto_goTypes = []interface{}{ diff --git a/interop/grpc_testing/stats.pb.go b/interop/grpc_testing/stats.pb.go index 3ff0ccd80b28..ace41211c00f 100644 --- a/interop/grpc_testing/stats.pb.go +++ b/interop/grpc_testing/stats.pb.go @@ -507,8 +507,10 @@ var file_grpc_testing_stats_proto_rawDesc = []byte{ 0x63, 0x71, 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x1f, 0x0a, 0x0f, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, + 0x0a, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/interop/grpc_testing/test.pb.go b/interop/grpc_testing/test.pb.go index 50db0950b9c5..742935c97aee 100644 --- a/interop/grpc_testing/test.pb.go +++ b/interop/grpc_testing/test.pb.go @@ -142,7 +142,9 @@ var file_grpc_testing_test_proto_rawDesc = []byte{ 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x73, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_grpc_testing_test_proto_goTypes = []interface{}{ diff --git a/interop/grpc_testing/worker_service.pb.go b/interop/grpc_testing/worker_service.pb.go index 3effdd6533b4..12b2f13e100d 100644 --- a/interop/grpc_testing/worker_service.pb.go +++ b/interop/grpc_testing/worker_service.pb.go @@ -66,7 +66,10 @@ var file_grpc_testing_worker_service_proto_rawDesc = []byte{ 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x51, 0x75, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x1a, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x42, 0x27, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x12, 0x57, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_grpc_testing_worker_service_proto_goTypes = []interface{}{ From 593ff8d017c6d3235c257780c00b6f2294e8acb5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 27 Jan 2022 15:03:30 -0800 Subject: [PATCH 415/998] rls: service field in RLS request must not contain slashes (#5168) --- balancer/rls/internal/keys/builder.go | 6 +++- balancer/rls/internal/keys/builder_test.go | 4 +-- balancer/rls/picker.go | 11 +++++++ balancer/rls/picker_test.go | 37 ++++++++++++++++++++++ 4 files changed, 55 insertions(+), 3 deletions(-) diff --git a/balancer/rls/internal/keys/builder.go b/balancer/rls/internal/keys/builder.go index fbf45c668d7f..d010f74456fe 100644 --- a/balancer/rls/internal/keys/builder.go +++ b/balancer/rls/internal/keys/builder.go @@ -120,6 +120,10 @@ type KeyMap struct { // RLSKey builds the RLS keys to be used for the given request, identified by // the request path and the request headers stored in metadata. func (bm BuilderMap) RLSKey(md metadata.MD, host, path string) KeyMap { + // The path passed in is of the form "/service/method". The keyBuilderMap is + // indexed with keys of the form "/service/" or "/service/method". The service + // that we set in the keyMap (to be sent out in the RLS request) should not + // include any slashes though. i := strings.LastIndex(path, "/") service, method := path[:i+1], path[i+1:] b, ok := bm[path] @@ -135,7 +139,7 @@ func (bm BuilderMap) RLSKey(md metadata.MD, host, path string) KeyMap { kvMap[b.hostKey] = host } if b.serviceKey != "" { - kvMap[b.serviceKey] = service + kvMap[b.serviceKey] = strings.Trim(service, "/") } if b.methodKey != "" { kvMap[b.methodKey] = method diff --git a/balancer/rls/internal/keys/builder_test.go b/balancer/rls/internal/keys/builder_test.go index 64ace65bd9a0..90c132bc9169 100644 --- a/balancer/rls/internal/keys/builder_test.go +++ b/balancer/rls/internal/keys/builder_test.go @@ -341,12 +341,12 @@ func TestRLSKey(t *testing.T) { "const-key-1": "const-val-1", "const-key-2": "const-val-2", "host": "dummy-host", - "service": "/gFoo/", + "service": "gFoo", "method": "method1", "k1": "v1", "k2": "v1", }, - Str: "const-key-1=const-val-1,const-key-2=const-val-2,host=dummy-host,k1=v1,k2=v1,method=method1,service=/gFoo/", + Str: "const-key-1=const-val-1,const-key-2=const-val-2,host=dummy-host,k1=v1,k2=v1,method=method1,service=gFoo", }, }, { diff --git a/balancer/rls/picker.go b/balancer/rls/picker.go index bf1824ea0343..9b40ccb4dfe1 100644 --- a/balancer/rls/picker.go +++ b/balancer/rls/picker.go @@ -20,6 +20,8 @@ package rls import ( "errors" + "fmt" + "strings" "sync/atomic" "time" @@ -65,8 +67,17 @@ type rlsPicker struct { logger *internalgrpclog.PrefixLogger } +// isFullMethodNameValid return true if name is of the form `/service/method`. +func isFullMethodNameValid(name string) bool { + return strings.HasPrefix(name, "/") && strings.Count(name, "/") == 2 +} + // Pick makes the routing decision for every outbound RPC. func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + if name := info.FullMethodName; !isFullMethodNameValid(name) { + return balancer.PickResult{}, fmt.Errorf("rls: method name %q is not of the form '/service/method", name) + } + // Build the request's keys using the key builders from LB config. md, _ := metadata.FromOutgoingContext(info.Ctx) reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) diff --git a/balancer/rls/picker_test.go b/balancer/rls/picker_test.go index 75991852d946..cfe45477f0bc 100644 --- a/balancer/rls/picker_test.go +++ b/balancer/rls/picker_test.go @@ -720,3 +720,40 @@ func (s) TestPick_DataCacheHit_PendingEntryExists_ExpiredEntry(t *testing.T) { }) } } + +func TestIsFullMethodNameValid(t *testing.T) { + tests := []struct { + desc string + methodName string + want bool + }{ + { + desc: "does not start with a slash", + methodName: "service/method", + want: false, + }, + { + desc: "does not contain a method", + methodName: "/service", + want: false, + }, + { + desc: "path has more elements", + methodName: "/service/path/to/method", + want: false, + }, + { + desc: "valid", + methodName: "/service/method", + want: true, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + if got := isFullMethodNameValid(test.methodName); got != test.want { + t.Fatalf("isFullMethodNameValid(%q) = %v, want %v", test.methodName, got, test.want) + } + }) + } +} From 980790869b005639bbf0e742f8c3008b235a1e6e Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 31 Jan 2022 12:49:45 -0800 Subject: [PATCH 416/998] rls: support `routeLookupChannelServiceConfig` field (#5176) --- balancer/rls/balancer.go | 2 +- balancer/rls/balancer_test.go | 72 +++++++++++++++++-- balancer/rls/config.go | 23 +++++- balancer/rls/config_test.go | 63 +++++++++++++--- balancer/rls/control_channel.go | 21 ++++-- balancer/rls/control_channel_test.go | 12 ++-- balancer/rls/helpers_test.go | 6 +- .../rls/internal/test/e2e/rls_lb_config.go | 7 +- internal/internal.go | 7 +- service_config.go | 2 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 2 +- .../resolver/cluster_specifier_plugin_test.go | 12 ++-- xds/internal/resolver/xds_resolver_test.go | 16 ++--- 13 files changed, 194 insertions(+), 51 deletions(-) diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go index 33da052ca562..3b9d610f36d1 100644 --- a/balancer/rls/balancer.go +++ b/balancer/rls/balancer.go @@ -254,7 +254,7 @@ func (b *rlsBalancer) handleControlChannelUpdate(newCfg *lbConfig) { // Create a new control channel and close the existing one. b.logger.Infof("Creating control channel to RLS server at: %v", newCfg.lookupService) - ctrlCh, err := newControlChannel(newCfg.lookupService, newCfg.lookupServiceTimeout, b.bopts, b.connectivityStateCh) + ctrlCh, err := newControlChannel(newCfg.lookupService, newCfg.controlChannelServiceConfig, newCfg.lookupServiceTimeout, b.bopts, b.connectivityStateCh) if err != nil { // This is very uncommon and usually represents a non-transient error. // There is not much we can do here other than wait for another update diff --git a/balancer/rls/balancer_test.go b/balancer/rls/balancer_test.go index 7d243cd13777..45d95b2ddeca 100644 --- a/balancer/rls/balancer_test.go +++ b/balancer/rls/balancer_test.go @@ -21,6 +21,7 @@ package rls import ( "context" "errors" + "fmt" "testing" "time" @@ -100,7 +101,7 @@ func (s) TestConfigUpdate_ControlChannel(t *testing.T) { if err != nil { t.Fatal(err) } - sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) r.UpdateState(resolver.State{ServiceConfig: sc}) // Ensure a connection is established to the second RLS server. @@ -177,6 +178,67 @@ func (s) TestConfigUpdate_ControlChannelWithCreds(t *testing.T) { } } +// TestConfigUpdate_ControlChannelServiceConfig tests the scenario where RLS LB +// policy's configuration specifies the service config for the control channel +// via the `routeLookupChannelServiceConfig` field. This test verifies that the +// provided service config is applied for the control channel. +func (s) TestConfigUpdate_ControlChannelServiceConfig(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Register a balancer to be used for the control channel, and set up a + // callback to get notified when the balancer receives a clientConn updates. + ccUpdateCh := testutils.NewChannel() + bf := &e2e.BalancerFuncs{ + UpdateClientConnState: func(cfg *e2e.RLSChildPolicyConfig) error { + if cfg.Backend != rlsServer.Address { + return fmt.Errorf("control channel LB policy received config with backend %q, want %q", cfg.Backend, rlsServer.Address) + } + ccUpdateCh.Replace(nil) + return nil + }, + } + controlChannelPolicyName := "test-control-channel-" + t.Name() + e2e.RegisterRLSChildPolicy(controlChannelPolicyName, bf) + t.Logf("Registered child policy with name %q", controlChannelPolicyName) + + // Build RLS service config and set the `routeLookupChannelServiceConfig` + // field to a service config which uses the above balancer. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + rlsConfig.RouteLookupChannelServiceConfig = fmt.Sprintf(`{"loadBalancingConfig" : [{%q: {"backend": %q} }]}`, controlChannelPolicyName, rlsServer.Address) + + // Start a test backend, and set up the fake RLS server to return this as a + // target in the RLS response. + backendCh, backendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///rls.test.example.com", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Verify that the control channel is using the LB policy we injected via the + // routeLookupChannelServiceConfig field. + if _, err := ccUpdateCh.Receive(ctx); err != nil { + t.Fatalf("timeout when waiting for control channel LB policy to receive a clientConn update") + } +} + // TestConfigUpdate_DefaultTarget tests the scenario where a config update // changes the default target. Verifies that RPCs get routed to the new default // target after the config has been applied. @@ -213,7 +275,7 @@ func (s) TestConfigUpdate_DefaultTarget(t *testing.T) { if err != nil { t.Fatal(err) } - sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) r.UpdateState(resolver.State{ServiceConfig: sc}) makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh2) } @@ -313,7 +375,7 @@ func (s) TestConfigUpdate_ChildPolicyConfigs(t *testing.T) { if err != nil { t.Fatal(err) } - sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) r.UpdateState(resolver.State{ServiceConfig: sc}) // Expect the child policy for the test backend to receive the update. @@ -418,7 +480,7 @@ func (s) TestConfigUpdate_ChildPolicyChange(t *testing.T) { if err != nil { t.Fatal(err) } - sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) r.UpdateState(resolver.State{ServiceConfig: sc}) // The above update should result in the first LB policy being shutdown and @@ -568,7 +630,7 @@ func (s) TestConfigUpdate_DataCacheSizeDecrease(t *testing.T) { if err != nil { t.Fatal(err) } - sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) r.UpdateState(resolver.State{ServiceConfig: sc}) <-clientConnUpdateDone diff --git a/balancer/rls/config.go b/balancer/rls/config.go index e9e94cd2c449..576a7572e5a1 100644 --- a/balancer/rls/config.go +++ b/balancer/rls/config.go @@ -29,6 +29,7 @@ import ( durationpb "github.com/golang/protobuf/ptypes/duration" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/pretty" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/resolver" @@ -61,9 +62,10 @@ type lbConfig struct { staleAge time.Duration defaultTarget string - childPolicyName string - childPolicyConfig map[string]json.RawMessage - childPolicyTargetField string + childPolicyName string + childPolicyConfig map[string]json.RawMessage + childPolicyTargetField string + controlChannelServiceConfig string } func (lbCfg *lbConfig) Equal(other *lbConfig) bool { @@ -76,6 +78,7 @@ func (lbCfg *lbConfig) Equal(other *lbConfig) bool { lbCfg.defaultTarget == other.defaultTarget && lbCfg.childPolicyName == other.childPolicyName && lbCfg.childPolicyTargetField == other.childPolicyTargetField && + lbCfg.controlChannelServiceConfig == other.controlChannelServiceConfig && childPolicyConfigEqual(lbCfg.childPolicyConfig, other.childPolicyConfig) } @@ -102,10 +105,14 @@ func childPolicyConfigEqual(a, b map[string]json.RawMessage) bool { // and makes it easier to unmarshal. type lbConfigJSON struct { RouteLookupConfig json.RawMessage + RouteLookupChannelServiceConfig json.RawMessage ChildPolicy []map[string]json.RawMessage ChildPolicyConfigTargetFieldName string } +// ParseConfig parses the JSON load balancer config provided into an +// internal form or returns an error if the config is invalid. +// // When parsing a config update, the following validations are performed: // - routeLookupConfig: // - grpc_keybuilders field: @@ -127,6 +134,8 @@ type lbConfigJSON struct { // - ignore `valid_targets` field // - `cache_size_bytes` field must have a value greater than 0, and if its // value is greater than 5M, we cap it at 5M +// - routeLookupChannelServiceConfig: +// - if specified, must parse as valid service config // - childPolicy: // - must find a valid child policy with a valid config // - childPolicyConfigTargetFieldName: @@ -148,6 +157,14 @@ func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, return nil, err } + if sc := string(cfgJSON.RouteLookupChannelServiceConfig); sc != "" { + parsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(sc) + if parsed.Err != nil { + return nil, fmt.Errorf("rls: bad control channel service config %q: %v", sc, parsed.Err) + } + lbCfg.controlChannelServiceConfig = sc + } + if cfgJSON.ChildPolicyConfigTargetFieldName == "" { return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config %+v", string(c)) } diff --git a/balancer/rls/config_test.go b/balancer/rls/config_test.go index 301a8219767b..86cfcad74935 100644 --- a/balancer/rls/config_test.go +++ b/balancer/rls/config_test.go @@ -41,6 +41,7 @@ func testEqual(a, b *lbConfig) bool { a.staleAge == b.staleAge && a.cacheSizeBytes == b.cacheSizeBytes && a.defaultTarget == b.defaultTarget && + a.controlChannelServiceConfig == b.controlChannelServiceConfig && a.childPolicyName == b.childPolicyName && a.childPolicyTargetField == b.childPolicyTargetField && childPolicyConfigEqual(a.childPolicyConfig, b.childPolicyConfig) @@ -114,18 +115,20 @@ func (s) TestParseConfig(t *testing.T) { "cacheSizeBytes": 1000, "defaultTarget": "passthrough:///default" }, + "routeLookupChannelServiceConfig": {"loadBalancingConfig": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}]}, "childPolicy": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}], "childPolicyConfigTargetFieldName": "serviceName" }`), wantCfg: &lbConfig{ - lookupService: "target", - lookupServiceTimeout: 100 * time.Second, - maxAge: 60 * time.Second, - staleAge: 50 * time.Second, - cacheSizeBytes: 1000, - defaultTarget: "passthrough:///default", - childPolicyName: "grpclb", - childPolicyTargetField: "serviceName", + lookupService: "target", + lookupServiceTimeout: 100 * time.Second, + maxAge: 60 * time.Second, + staleAge: 50 * time.Second, + cacheSizeBytes: 1000, + defaultTarget: "passthrough:///default", + controlChannelServiceConfig: `{"loadBalancingConfig": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}]}`, + childPolicyName: "grpclb", + childPolicyTargetField: "serviceName", childPolicyConfig: map[string]json.RawMessage{ "childPolicy": json.RawMessage(`[{"pickfirst": {}}]`), "serviceName": json.RawMessage(childPolicyTargetFieldVal), @@ -277,6 +280,50 @@ func (s) TestParseConfigErrors(t *testing.T) { }`), wantErr: "rls: cache_size_bytes must be set to a non-zero value", }, + { + desc: "routeLookupChannelServiceConfig is not in service config format", + input: []byte(`{ + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [{"service": "service", "method": "method"}], + "headers": [{"key": "k1", "names": ["v1"]}] + }], + "lookupService": "target", + "lookupServiceTimeout" : "100s", + "maxAge": "60s", + "staleAge" : "50s", + "cacheSizeBytes": 1000, + "defaultTarget": "passthrough:///default" + }, + "routeLookupChannelServiceConfig": "unknown", + "childPolicy": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}], + "childPolicyConfigTargetFieldName": "serviceName" + }`), + wantErr: "cannot unmarshal string into Go value of type grpc.jsonSC", + }, + { + desc: "routeLookupChannelServiceConfig contains unknown LB policy", + input: []byte(`{ + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [{"service": "service", "method": "method"}], + "headers": [{"key": "k1", "names": ["v1"]}] + }], + "lookupService": "target", + "lookupServiceTimeout" : "100s", + "maxAge": "60s", + "staleAge" : "50s", + "cacheSizeBytes": 1000, + "defaultTarget": "passthrough:///default" + }, + "routeLookupChannelServiceConfig": { + "loadBalancingConfig": [{"not_a_balancer1": {} }, {"not_a_balancer2": {}}] + }, + "childPolicy": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}], + "childPolicyConfigTargetFieldName": "serviceName" + }`), + wantErr: "invalid loadBalancingConfig: no supported policies found", + }, { desc: "no child policy", input: []byte(`{ diff --git a/balancer/rls/control_channel.go b/balancer/rls/control_channel.go index dc8446313e7d..9df96549f7dc 100644 --- a/balancer/rls/control_channel.go +++ b/balancer/rls/control_channel.go @@ -59,7 +59,10 @@ type controlChannel struct { logger *internalgrpclog.PrefixLogger } -func newControlChannel(rlsServerName string, rpcTimeout time.Duration, bOpts balancer.BuildOptions, backToReadyCh chan struct{}) (*controlChannel, error) { +// newControlChannel creates a controlChannel to rlsServerName and uses +// serviceConfig, if non-empty, as the default service config for the underlying +// gRPC channel. +func newControlChannel(rlsServerName, serviceConfig string, rpcTimeout time.Duration, bOpts balancer.BuildOptions, backToReadyCh chan struct{}) (*controlChannel, error) { ctrlCh := &controlChannel{ rpcTimeout: rpcTimeout, backToReadyCh: backToReadyCh, @@ -67,7 +70,7 @@ func newControlChannel(rlsServerName string, rpcTimeout time.Duration, bOpts bal } ctrlCh.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-control-channel %p] ", ctrlCh)) - dopts, err := ctrlCh.dialOpts(bOpts) + dopts, err := ctrlCh.dialOpts(bOpts, serviceConfig) if err != nil { return nil, err } @@ -83,7 +86,7 @@ func newControlChannel(rlsServerName string, rpcTimeout time.Duration, bOpts bal } // dialOpts constructs the dial options for the control plane channel. -func (cc *controlChannel) dialOpts(bOpts balancer.BuildOptions) ([]grpc.DialOption, error) { +func (cc *controlChannel) dialOpts(bOpts balancer.BuildOptions, serviceConfig string) ([]grpc.DialOption, error) { // The control plane channel will use the same authority as the parent // channel for server authorization. This ensures that the identity of the // RLS server and the identity of the backends is the same, so if the RLS @@ -114,7 +117,17 @@ func (cc *controlChannel) dialOpts(bOpts balancer.BuildOptions) ([]grpc.DialOpti cc.logger.Warningf("no credentials available, using Insecure") credsOpt = grpc.WithInsecure() } - return append(dopts, credsOpt), nil + dopts = append(dopts, credsOpt) + + // If the RLS LB policy's configuration specified a service config for the + // control channel, use that and disable service config fetching via the name + // resolver for the control channel. + if serviceConfig != "" { + cc.logger.Infof("Disabling service config from the name resolver and instead using: %s", serviceConfig) + dopts = append(dopts, grpc.WithDisableServiceConfig(), grpc.WithDefaultServiceConfig(serviceConfig)) + } + + return dopts, nil } func (cc *controlChannel) monitorConnectivityState() { diff --git a/balancer/rls/control_channel_test.go b/balancer/rls/control_channel_test.go index 78ddb8d1219a..7ecdc6bec5fd 100644 --- a/balancer/rls/control_channel_test.go +++ b/balancer/rls/control_channel_test.go @@ -51,7 +51,7 @@ func (s) TestControlChannelThrottled(t *testing.T) { overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) // Create a control channel to the fake RLS server. - ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, balancer.BuildOptions{}, nil) + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, balancer.BuildOptions{}, nil) if err != nil { t.Fatalf("Failed to create control channel to RLS server: %v", err) } @@ -79,7 +79,7 @@ func (s) TestLookupFailure(t *testing.T) { }) // Create a control channel to the fake RLS server. - ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, balancer.BuildOptions{}, nil) + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, balancer.BuildOptions{}, nil) if err != nil { t.Fatalf("Failed to create control channel to RLS server: %v", err) } @@ -118,7 +118,7 @@ func (s) TestLookupDeadlineExceeded(t *testing.T) { overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Create a control channel with a small deadline. - ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestShortTimeout, balancer.BuildOptions{}, nil) + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestShortTimeout, balancer.BuildOptions{}, nil) if err != nil { t.Fatalf("Failed to create control channel to RLS server: %v", err) } @@ -272,7 +272,7 @@ func testControlChannelCredsSuccess(t *testing.T, sopts []grpc.ServerOption, bop }) // Create a control channel to the fake server. - ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, bopts, nil) + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, bopts, nil) if err != nil { t.Fatalf("Failed to create control channel to RLS server: %v", err) } @@ -360,7 +360,7 @@ func testControlChannelCredsFailure(t *testing.T, sopts []grpc.ServerOption, bop overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Create the control channel to the fake server. - ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, bopts, nil) + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, bopts, nil) if err != nil { t.Fatalf("Failed to create control channel to RLS server: %v", err) } @@ -457,7 +457,7 @@ func (s) TestNewControlChannelUnsupportedCredsBundle(t *testing.T) { rlsServer, _ := setupFakeRLSServer(t, nil) // Create the control channel to the fake server. - ctrlCh, err := newControlChannel(rlsServer.Address, defaultTestTimeout, balancer.BuildOptions{CredsBundle: &unsupportedCredsBundle{}}, nil) + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, balancer.BuildOptions{CredsBundle: &unsupportedCredsBundle{}}, nil) if err == nil { ctrlCh.close() t.Fatal("newControlChannel succeeded when expected to fail") diff --git a/balancer/rls/helpers_test.go b/balancer/rls/helpers_test.go index 2b6a37cc36c9..26123f8ce855 100644 --- a/balancer/rls/helpers_test.go +++ b/balancer/rls/helpers_test.go @@ -197,7 +197,7 @@ func setupFakeRLSServer(t *testing.T, lis net.Listener, opts ...grpc.ServerOptio } // buildBasicRLSConfig constructs a basic service config for the RLS LB policy -// which header matching rules. This expects the passed child policy name to +// with header matching rules. This expects the passed child policy name to // have been registered by the caller. func buildBasicRLSConfig(childPolicyName, rlsServerAddress string) *e2e.RLSConfig { return &e2e.RLSConfig{ @@ -215,6 +215,7 @@ func buildBasicRLSConfig(childPolicyName, rlsServerAddress string) *e2e.RLSConfi LookupServiceTimeout: durationpb.New(defaultTestTimeout), CacheSizeBytes: 1024, }, + RouteLookupChannelServiceConfig: `{"loadBalancingConfig": [{"pick_first": {}}]}`, ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, ChildPolicyConfigTargetFieldName: e2e.RLSChildPolicyTargetNameField, } @@ -235,6 +236,7 @@ func buildBasicRLSConfigWithChildPolicy(t *testing.T, childPolicyName, rlsServer LookupServiceTimeout: durationpb.New(defaultTestTimeout), CacheSizeBytes: 1024, }, + RouteLookupChannelServiceConfig: `{"loadBalancingConfig": [{"pick_first": {}}]}`, ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, ChildPolicyConfigTargetFieldName: e2e.RLSChildPolicyTargetNameField, } @@ -275,7 +277,7 @@ func startManualResolverWithConfig(t *testing.T, rlsConfig *e2e.RLSConfig) *manu t.Fatal(err) } - sc := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(scJSON) + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) r := manual.NewBuilderWithScheme("rls-e2e") r.InitialState(resolver.State{ServiceConfig: sc}) t.Cleanup(r.Close) diff --git a/balancer/rls/internal/test/e2e/rls_lb_config.go b/balancer/rls/internal/test/e2e/rls_lb_config.go index 2aec642c77e0..0a5993d795c0 100644 --- a/balancer/rls/internal/test/e2e/rls_lb_config.go +++ b/balancer/rls/internal/test/e2e/rls_lb_config.go @@ -33,6 +33,7 @@ import ( // RLSConfig is a utility type to build service config for the RLS LB policy. type RLSConfig struct { RouteLookupConfig *rlspb.RouteLookupConfig + RouteLookupChannelServiceConfig string ChildPolicy *internalserviceconfig.BalancerConfig ChildPolicyConfigTargetFieldName string } @@ -60,12 +61,13 @@ func (c *RLSConfig) ServiceConfigJSON() (string, error) { { "rls_experimental": { "routeLookupConfig": %s, + "routeLookupChannelServiceConfig": %s, "childPolicy": %s, "childPolicyConfigTargetFieldName": %q } } ] -}`, string(routeLookupCfg), string(childPolicy), c.ChildPolicyConfigTargetFieldName), nil +}`, string(routeLookupCfg), c.RouteLookupChannelServiceConfig, string(childPolicy), c.ChildPolicyConfigTargetFieldName), nil } // LoadBalancingConfig generates load balancing config which can used as part of @@ -87,9 +89,10 @@ func (c *RLSConfig) LoadBalancingConfig() (serviceconfig.LoadBalancingConfig, er lbConfigJSON := fmt.Sprintf(` { "routeLookupConfig": %s, + "routeLookupChannelServiceConfig": %s, "childPolicy": %s, "childPolicyConfigTargetFieldName": %q -}`, string(routeLookupCfg), string(childPolicy), c.ChildPolicyConfigTargetFieldName) +}`, string(routeLookupCfg), c.RouteLookupChannelServiceConfig, string(childPolicy), c.ChildPolicyConfigTargetFieldName) builder := balancer.Get("rls_experimental") if builder == nil { diff --git a/internal/internal.go b/internal/internal.go index 1b596bf3579f..20fb880f344f 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool diff --git a/service_config.go b/service_config.go index 22c4240cf7e8..6926a06dc523 100644 --- a/service_config.go +++ b/service_config.go @@ -218,7 +218,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index efa34dbab0e4..112d25df3332 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -200,7 +200,7 @@ func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { jsonSC := fmt.Sprintf(cdsLBConfig, cluster) return balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{ - ServiceConfig: internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(jsonSC), + ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC), }, xdsC), BalancerConfig: &lbConfig{ClusterName: clusterName}, } diff --git a/xds/internal/resolver/cluster_specifier_plugin_test.go b/xds/internal/resolver/cluster_specifier_plugin_test.go index d432ad3c489d..9ac2ca2b52a0 100644 --- a/xds/internal/resolver/cluster_specifier_plugin_test.go +++ b/xds/internal/resolver/cluster_specifier_plugin_test.go @@ -95,7 +95,7 @@ func (s) TestXDSResolverClusterSpecifierPlugin(t *testing.T) { } }}]}` - wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -163,7 +163,7 @@ func (s) TestXDSResolverClusterSpecifierPluginConfigUpdate(t *testing.T) { } }}]}` - wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -199,7 +199,7 @@ func (s) TestXDSResolverClusterSpecifierPluginConfigUpdate(t *testing.T) { } }}]}` - wantSCParsed = internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -250,7 +250,7 @@ func (s) TestXDSResolverDelayedOnCommittedCSP(t *testing.T) { } }}]}` - wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -320,7 +320,7 @@ func (s) TestXDSResolverDelayedOnCommittedCSP(t *testing.T) { } }}]}` - wantSCParsed2 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON2) + wantSCParsed2 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON2) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed2.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -359,7 +359,7 @@ func (s) TestXDSResolverDelayedOnCommittedCSP(t *testing.T) { } }}]}` - wantSCParsed3 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON3) + wantSCParsed3 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON3) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed3.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index c5fa3b8f7493..f684c799123c 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -101,7 +101,7 @@ func (t *testClientConn) ReportError(err error) { } func (t *testClientConn) ParseServiceConfig(jsonSC string) *serviceconfig.ParseResult { - return internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(jsonSC) + return internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) } func newTestClientConn() *testClientConn { @@ -548,7 +548,7 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) } - wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(tt.wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(tt.wantJSON) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -730,7 +730,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { } } }}]}` - wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) gotState, err := tcc.stateCh.Receive(ctx) if err != nil { @@ -798,7 +798,7 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { if err := rState.ServiceConfig.Err; err != nil { t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) } - wantSCParsed = internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)("{}") + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)("{}") if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -992,7 +992,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { } } }}]}` - wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -1054,7 +1054,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { } } }}]}` - wantSCParsed2 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON2) + wantSCParsed2 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON2) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed2.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -1089,7 +1089,7 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { } } }}]}` - wantSCParsed3 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON3) + wantSCParsed3 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON3) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed3.Config) { t.Errorf("ClientConn.UpdateState received different service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) @@ -1177,7 +1177,7 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) - wantParsedConfig := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)("{}") + wantParsedConfig := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)("{}") if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantParsedConfig.Config) { t.Error("ClientConn.UpdateState got wrong service config") t.Errorf("gotParsed: %s", cmp.Diff(nil, rState.ServiceConfig.Config)) From f68fb05c3e6ba3319af624fefc6bd31246a126cc Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 1 Feb 2022 10:37:51 -0800 Subject: [PATCH 417/998] leakcheck: ignore http read/write goroutine (#5182) --- internal/leakcheck/leakcheck.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/leakcheck/leakcheck.go b/internal/leakcheck/leakcheck.go index 946c575f140f..80e43beb6c0e 100644 --- a/internal/leakcheck/leakcheck.go +++ b/internal/leakcheck/leakcheck.go @@ -42,6 +42,12 @@ var goroutinesToIgnore = []string{ "runtime_mcall", "(*loggingT).flushDaemon", "goroutine in C code", + // Ignore the http read/write goroutines. gce metadata.OnGCE() was leaking + // these, root cause unknown. + // + // https://github.com/grpc/grpc-go/issues/5171 + // https://github.com/grpc/grpc-go/issues/5173 + "created by net/http.(*Transport).dialConn", } // RegisterIgnoreGoroutine appends s into the ignore goroutine list. The From 2209ed9ec9484839bf8387e46aba7b5780ba5a7a Mon Sep 17 00:00:00 2001 From: Zhouyihai Ding Date: Tue, 1 Feb 2022 12:45:18 -0800 Subject: [PATCH 418/998] cmd/protoc-gen-go-grpc: add hooks to allow overriding generated code (#5056) --- cmd/protoc-gen-go-grpc/grpc.go | 73 +++++++++++++++++++++++++--------- 1 file changed, 54 insertions(+), 19 deletions(-) diff --git a/cmd/protoc-gen-go-grpc/grpc.go b/cmd/protoc-gen-go-grpc/grpc.go index f45e0403fd4f..66e26afc7d48 100644 --- a/cmd/protoc-gen-go-grpc/grpc.go +++ b/cmd/protoc-gen-go-grpc/grpc.go @@ -34,6 +34,53 @@ const ( statusPackage = protogen.GoImportPath("google.golang.org/grpc/status") ) +type serviceGenerateHelperInterface interface { + formatFullMethodName(service *protogen.Service, method *protogen.Method) string + generateNewClientDefinitions(g *protogen.GeneratedFile, service *protogen.Service, clientName string) + generateUnimplementedServerType(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) + generateServerFunctions(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service, serverType string, serviceDescVar string) +} + +type serviceGenerateHelper struct{} + +func (serviceGenerateHelper) formatFullMethodName(service *protogen.Service, method *protogen.Method) string { + return fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) +} + +func (serviceGenerateHelper) generateNewClientDefinitions(g *protogen.GeneratedFile, service *protogen.Service, clientName string) { +} + +func (serviceGenerateHelper) generateUnimplementedServerType(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { + serverType := service.GoName + "Server" + mustOrShould := "must" + if !*requireUnimplemented { + mustOrShould = "should" + } + // Server Unimplemented struct for forward compatibility. + g.P("// Unimplemented", serverType, " ", mustOrShould, " be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + for _, method := range service.Methods { + nilArg := "" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + nilArg = "nil," + } + g.P("func (Unimplemented", serverType, ") ", serverSignature(g, method), "{") + g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`) + g.P("}") + } + if *requireUnimplemented { + g.P("func (Unimplemented", serverType, ") mustEmbedUnimplemented", serverType, "() {}") + } + g.P() +} + +func (serviceGenerateHelper) generateServerFunctions(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service, serverType string, serviceDescVar string) { +} + +var helper serviceGenerateHelperInterface = serviceGenerateHelper{} + // generateFile generates a _grpc.pb.go file containing gRPC service definitions. func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { if len(file.Services) == 0 { @@ -121,6 +168,7 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P(deprecationComment) } g.P("func New", clientName, " (cc ", grpcPackage.Ident("ClientConnInterface"), ") ", clientName, " {") + helper.generateNewClientDefinitions(g, service, clientName) g.P("return &", unexport(clientName), "{cc}") g.P("}") g.P() @@ -170,23 +218,7 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P() // Server Unimplemented struct for forward compatibility. - g.P("// Unimplemented", serverType, " ", mustOrShould, " be embedded to have forward compatible implementations.") - g.P("type Unimplemented", serverType, " struct {") - g.P("}") - g.P() - for _, method := range service.Methods { - nilArg := "" - if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { - nilArg = "nil," - } - g.P("func (Unimplemented", serverType, ") ", serverSignature(g, method), "{") - g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`) - g.P("}") - } - if *requireUnimplemented { - g.P("func (Unimplemented", serverType, ") mustEmbedUnimplemented", serverType, "() {}") - } - g.P() + helper.generateUnimplementedServerType(gen, file, g, service) // Unsafe Server interface to opt-out of forward compatibility. g.P("// Unsafe", serverType, " may be embedded to opt out of forward compatibility for this service.") @@ -206,6 +238,8 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P("}") g.P() + helper.generateServerFunctions(gen, file, g, service, serverType, serviceDescVar) + // Server handler implementations. handlerNames := make([]string, 0, len(service.Methods)) for _, method := range service.Methods { @@ -270,7 +304,7 @@ func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) { service := method.Parent - sname := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) + sname := helper.formatFullMethodName(service, method) if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { g.P(deprecationComment) @@ -374,7 +408,8 @@ func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }") g.P("info := &", grpcPackage.Ident("UnaryServerInfo"), "{") g.P("Server: srv,") - g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name())), ",") + fullMethodName := helper.formatFullMethodName(service, method) + g.P("FullMethod: \"", fullMethodName, "\",") g.P("}") g.P("handler := func(ctx ", contextPackage.Ident("Context"), ", req interface{}) (interface{}, error) {") g.P("return srv.(", service.GoName, "Server).", method.GoName, "(ctx, req.(*", method.Input.GoIdent, "))") From c7f7d3a75c27863c70be6bdcc84b815c34c3a92a Mon Sep 17 00:00:00 2001 From: Anirudh Ramachandra Date: Tue, 1 Feb 2022 14:01:50 -0800 Subject: [PATCH 419/998] xds/bootstrap: add plugin system for credentials specified in bootstrap file (#5136) --- credentials/insecure/insecure.go | 26 ++++++++ xds/bootstrap/bootstrap.go | 64 +++++++++++++++++++ xds/bootstrap/bootstrap_test.go | 63 ++++++++++++++++++ xds/internal/xdsclient/bootstrap/bootstrap.go | 48 +++++++++++--- .../xdsclient/bootstrap/bootstrap_test.go | 30 +++++++++ 5 files changed, 222 insertions(+), 9 deletions(-) create mode 100644 xds/bootstrap/bootstrap.go create mode 100644 xds/bootstrap/bootstrap_test.go diff --git a/credentials/insecure/insecure.go b/credentials/insecure/insecure.go index 4fbed12565fd..82bee1443bfe 100644 --- a/credentials/insecure/insecure.go +++ b/credentials/insecure/insecure.go @@ -70,3 +70,29 @@ type info struct { func (info) AuthType() string { return "insecure" } + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/xds/bootstrap/bootstrap.go b/xds/bootstrap/bootstrap.go new file mode 100644 index 000000000000..db6c7d6754ac --- /dev/null +++ b/xds/bootstrap/bootstrap.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bootstrap provides the functionality to register possible options +// for aspects of the xDS client through the bootstrap file. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed +// in a later release. +package bootstrap + +import ( + "encoding/json" + + "google.golang.org/grpc/credentials" +) + +// registry is a map from credential type name to Credential builder. +var registry = make(map[string]Credentials) + +// Credentials interface encapsulates a credentials.Bundle builder +// that can be used for communicating with the xDS Management server. +type Credentials interface { + // Build returns a credential bundle associated with this credential. + Build(config json.RawMessage) (credentials.Bundle, error) + // Name returns the credential name associated with this credential. + Name() string +} + +// RegisterCredentials registers Credentials used for connecting to the xds +// management server. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple credentials are +// registered with the same name, the one registered last will take effect. +func RegisterCredentials(c Credentials) { + registry[c.Name()] = c +} + +// GetCredentials returns the credentials associated with a given name. +// If no credentials are registered with the name, nil will be returned. +func GetCredentials(name string) Credentials { + if c, ok := registry[name]; ok { + return c + } + + return nil +} diff --git a/xds/bootstrap/bootstrap_test.go b/xds/bootstrap/bootstrap_test.go new file mode 100644 index 000000000000..80ae31ccd2e3 --- /dev/null +++ b/xds/bootstrap/bootstrap_test.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package bootstrap + +import ( + "encoding/json" + "testing" + + "google.golang.org/grpc/credentials" +) + +const testCredsBuilderName = "test_creds" + +var builder = &testCredsBuilder{} + +func init() { + RegisterCredentials(builder) +} + +type testCredsBuilder struct { + config json.RawMessage +} + +func (t *testCredsBuilder) Build(config json.RawMessage) (credentials.Bundle, error) { + t.config = config + return nil, nil +} + +func (t *testCredsBuilder) Name() string { + return testCredsBuilderName +} + +func TestRegisterNew(t *testing.T) { + c := GetCredentials(testCredsBuilderName) + if c == nil { + t.Fatalf("GetCredentials(%q) credential = nil", testCredsBuilderName) + } + + const sampleConfig = "sample_config" + rawMessage := json.RawMessage(sampleConfig) + if _, err := c.Build(rawMessage); err != nil { + t.Errorf("Build(%v) error = %v, want nil", rawMessage, err) + } + + if got, want := string(builder.config), sampleConfig; got != want { + t.Errorf("Build config = %v, want %v", got, want) + } +} diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 15aed44eb73f..0115dcf927a3 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -31,12 +31,14 @@ import ( "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -49,18 +51,43 @@ const ( // server supports the v3 version of the xDS transport protocol. serverFeaturesV3 = "xds_v3" - // Type name for Google default credentials. - credsGoogleDefault = "google_default" - credsInsecure = "insecure" gRPCUserAgentName = "gRPC Go" clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" ) +func init() { + bootstrap.RegisterCredentials(&insecureCredsBuilder{}) + bootstrap.RegisterCredentials(&googleDefaultCredsBuilder{}) +} + var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version) // For overriding in unit tests. var bootstrapFileReadFunc = ioutil.ReadFile +// insecureCredsBuilder encapsulates a insecure credential that is built using a +// JSON config. +type insecureCredsBuilder struct{} + +func (i *insecureCredsBuilder) Build(json.RawMessage) (credentials.Bundle, error) { + return insecure.NewBundle(), nil +} +func (i *insecureCredsBuilder) Name() string { + return "insecure" +} + +// googleDefaultCredsBuilder encapsulates a Google Default credential that is built using a +// JSON config. +type googleDefaultCredsBuilder struct{} + +func (d *googleDefaultCredsBuilder) Build(json.RawMessage) (credentials.Bundle, error) { + return google.NewDefaultCredentials(), nil +} + +func (d *googleDefaultCredsBuilder) Name() string { + return "google_default" +} + // ServerConfig contains the configuration to connect to a server, including // URI, creds, and transport API version (e.g. v2 or v3). type ServerConfig struct { @@ -129,13 +156,16 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { for _, cc := range server.ChannelCreds { // We stop at the first credential type that we support. sc.CredsType = cc.Type - if cc.Type == credsGoogleDefault { - sc.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials()) - break - } else if cc.Type == credsInsecure { - sc.Creds = grpc.WithTransportCredentials(insecure.NewCredentials()) - break + c := bootstrap.GetCredentials(cc.Type) + if c == nil { + continue + } + bundle, err := c.Build(cc.Config) + if err != nil { + return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) } + sc.Creds = grpc.WithCredentialsBundle(bundle) + break } for _, f := range server.ServerFeatures { if f == serverFeaturesV3 { diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 573a3fca1730..36b4302c8fae 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/xds/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -1015,3 +1016,32 @@ func TestServerConfigMarshalAndUnmarshal(t *testing.T) { t.Fatalf("diff (-got +want): %v", diff) } } + +func TestDefaultBundles(t *testing.T) { + if c := bootstrap.GetCredentials("google_default"); c == nil { + t.Errorf(`bootstrap.GetCredentials("google_default") credential is nil, want non-nil`) + } + + if c := bootstrap.GetCredentials("insecure"); c == nil { + t.Errorf(`bootstrap.GetCredentials("insecure") credential is nil, want non-nil`) + } +} + +func TestCredsBuilders(t *testing.T) { + b := &googleDefaultCredsBuilder{} + if _, err := b.Build(nil); err != nil { + t.Errorf("googleDefaultCredsBuilder.Build failed: %v", err) + } + if got, want := b.Name(), "google_default"; got != want { + t.Errorf("googleDefaultCredsBuilder.Name = %v, want %v", got, want) + } + + i := &insecureCredsBuilder{} + if _, err := i.Build(nil); err != nil { + t.Errorf("insecureCredsBuilder.Build failed: %v", err) + } + + if got, want := i.Name(), "insecure"; got != want { + t.Errorf("insecureCredsBuilder.Name = %v, want %v", got, want) + } +} From f664adfe5bddd1da490e3e67a5d77179a9e2f1fd Mon Sep 17 00:00:00 2001 From: Wanlin Du <67486458+wanlin31@users.noreply.github.com> Date: Wed, 2 Feb 2022 11:36:41 -0600 Subject: [PATCH 420/998] benchmark: update client to include xds library (#5169) --- benchmark/worker/benchmark_client.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 43af38dc5f78..c5748d7016f4 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -37,6 +37,8 @@ import ( testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/xds" // To install the xds resolvers and balancers. ) var caFile = flag.String("ca_file", "", "The file containing the CA root cert file") From 2af7b5e6fa6381b474fc6efef2279ae2ce1347c5 Mon Sep 17 00:00:00 2001 From: Mohan Li <67390330+mohanli-ml@users.noreply.github.com> Date: Thu, 3 Feb 2022 10:01:53 -0800 Subject: [PATCH 421/998] xds/interop: update RPC timeout in blackhole after fallback case (#5174) --- interop/grpclb_fallback/client_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interop/grpclb_fallback/client_linux.go b/interop/grpclb_fallback/client_linux.go index 4fa09ceb4d9c..ed9b8be00ff3 100644 --- a/interop/grpclb_fallback/client_linux.go +++ b/interop/grpclb_fallback/client_linux.go @@ -193,7 +193,7 @@ func doSlowFallbackAfterStartup() { errorLog.Fatalf("Expected RPC to take grpclb route type BACKEND. Got: %v", g) } runCmd(*blackholeLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(40 * time.Second) + fallbackDeadline := time.Now().Add(80 * time.Second) waitForFallbackAndDoRPCs(client, fallbackDeadline) } From 1a63309895dbc46b1bf6d715844a79c8e783c9e5 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 4 Feb 2022 15:21:08 -0500 Subject: [PATCH 422/998] xds: Add support for Outlier Detection configuration in xdsclient (#5183) * Add support for Outlier Detection configuration in xdsclient --- internal/envconfig/xds.go | 6 +- .../xdsclient/xdsresource/type_cds.go | 71 ++++++++ .../xdsclient/xdsresource/unmarshal_cds.go | 142 +++++++++++++++- .../xdsresource/unmarshal_cds_test.go | 155 ++++++++++++++++++ 4 files changed, 369 insertions(+), 5 deletions(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 9bad03cec64f..5aecfef93e43 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -41,6 +41,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" @@ -82,7 +83,10 @@ var ( // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") - + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") diff --git a/xds/internal/xdsclient/xdsresource/type_cds.go b/xds/internal/xdsclient/xdsresource/type_cds.go index ce3438c121f7..b61a80b429c4 100644 --- a/xds/internal/xdsclient/xdsresource/type_cds.go +++ b/xds/internal/xdsclient/xdsresource/type_cds.go @@ -18,6 +18,8 @@ package xdsresource import ( + "time" + "google.golang.org/protobuf/types/known/anypb" ) @@ -56,6 +58,71 @@ type ClusterLBPolicyRingHash struct { MaximumRingSize uint64 } +// OutlierDetection is the outlier detection configuration for a cluster. +type OutlierDetection struct { + // Interval is the time interval between ejection analysis sweeps. This can + // result in both new ejections as well as addresses being returned to + // service. Defaults to 10s. + Interval time.Duration + // BaseEjectionTime is the base time that a host is ejected for. The real + // time is equal to the base time multiplied by the number of times the host + // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. + BaseEjectionTime time.Duration + // MaxEjectionTime is the maximum time that an address is ejected for. If + // not specified, the default value (300s) or the BaseEjectionTime value is + // applied, whichever is larger. + MaxEjectionTime time.Duration + // MaxEjectionPercent is the maximum % of an upstream cluster that can be + // ejected due to outlier detection. Defaults to 10% but will eject at least + // one host regardless of the value. + MaxEjectionPercent uint32 + // SuccessRateStddevFactor is used to determine the ejection threshold for + // success rate outlier ejection. The ejection threshold is the difference + // between the mean success rate, and the product of this factor and the + // standard deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + SuccessRateStdevFactor uint32 + // EnforcingSuccessRate is the % chance that a host will be actually ejected + // when an outlier status is detected through success rate statistics. This + // setting can be used to disable ejection or to ramp it up slowly. Defaults + // to 100. + EnforcingSuccessRate uint32 + // SuccessRateMinimumHosts is the number of hosts in a cluster that must + // have enough request volume to detect success rate outliers. If the number + // of hosts is less than this setting, outlier detection via success rate + // statistics is not performed for any host in the cluster. Defaults to 5. + SuccessRateMinimumHosts uint32 + // SuccessRateRequestVolume is the minimum number of total requests that + // must be collected in one interval (as defined by the interval duration + // above) to include this host in success rate based outlier detection. If + // the volume is lower than this setting, outlier detection via success rate + // statistics is not performed for that host. Defaults to 100. + SuccessRateRequestVolume uint32 + // FailurePercentageThreshold is the failure percentage to use when + // determining failure percentage-based outlier detection. If the failure + // percentage of a given host is greater than or equal to this value, it + // will be ejected. Defaults to 85. + FailurePercentageThreshold uint32 + // EnforcingFailurePercentage is the % chance that a host will be actually + // ejected when an outlier status is detected through failure percentage + // statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + EnforcingFailurePercentage uint32 + // FailurePercentageMinimumHosts is the minimum number of hosts in a cluster + // in order to perform failure percentage-based ejection. If the total + // number of hosts in the cluster is less than this value, failure + // percentage-based ejection will not be performed. Defaults to 5. + FailurePercentageMinimumHosts uint32 + // FailurePercentageRequestVolume is the minimum number of total requests + // that must be collected in one interval (as defined by the interval + // duration above) to perform failure percentage-based ejection for this + // host. If the volume is lower than this setting, failure percentage-based + // ejection will not be performed for this host. Defaults to 50. + FailurePercentageRequestVolume uint32 +} + // ClusterUpdate contains information from a received CDS response, which is of // interest to the registered CDS watcher. type ClusterUpdate struct { @@ -90,6 +157,10 @@ type ClusterUpdate struct { // will be set to different types based on the policy type. LBPolicy *ClusterLBPolicyRingHash + // OutlierDetection is the outlier detection configuration for this cluster. + // If nil, it means this cluster does not use the outlier detection feature. + OutlierDetection *OutlierDetection + // Raw is the resource from the xds response. Raw *anypb.Any } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 8501e9e100e9..eba78716eebd 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -22,6 +22,7 @@ import ( "fmt" "net" "strconv" + "time" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -125,11 +126,22 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu } } + // Process outlier detection received from the control plane iff the + // corresponding environment variable is set. + var od *OutlierDetection + if envconfig.XDSOutlierDetection { + var err error + if od, err = outlierConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } + } + ret := ClusterUpdate{ - ClusterName: cluster.GetName(), - SecurityCfg: sc, - MaxRequests: circuitBreakersFromCluster(cluster), - LBPolicy: lbPolicy, + ClusterName: cluster.GetName(), + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + LBPolicy: lbPolicy, + OutlierDetection: od, } // Note that this is different from the gRFC (gRFC A47 says to include the @@ -463,3 +475,125 @@ func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { } return nil } + +// outlierConfigFromCluster extracts the relevant outlier detection +// configuration from the received cluster resource. Returns nil if no +// OutlierDetection field set in the cluster resource. +func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (*OutlierDetection, error) { + od := cluster.GetOutlierDetection() + if od == nil { + return nil, nil + } + const ( + defaultInterval = 10 * time.Second + defaultBaseEjectionTime = 30 * time.Second + defaultMaxEjectionTime = 300 * time.Second + defaultMaxEjectionPercent = 10 + defaultSuccessRateStdevFactor = 1900 + defaultEnforcingSuccessRate = 100 + defaultSuccessRateMinimumHosts = 5 + defaultSuccessRateRequestVolume = 100 + defaultFailurePercentageThreshold = 85 + defaultEnforcingFailurePercentage = 0 + defaultFailurePercentageMinimumHosts = 5 + defaultFailurePercentageRequestVolume = 50 + ) + // "The google.protobuf.Duration fields interval, base_ejection_time, and + // max_ejection_time must obey the restrictions in the + // google.protobuf.Duration documentation and they must have non-negative + // values." - A50 + interval := defaultInterval + if i := od.GetInterval(); i != nil { + if err := i.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.interval is invalid with error %v", err) + } + if interval = i.AsDuration(); interval < 0 { + return nil, fmt.Errorf("outlier_detection.interval = %v; must be a valid duration and >= 0", interval) + } + } + + baseEjectionTime := defaultBaseEjectionTime + if bet := od.GetBaseEjectionTime(); bet != nil { + if err := bet.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.base_ejection_time is invalid with error %v", err) + } + if baseEjectionTime = bet.AsDuration(); baseEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.base_ejection_time = %v; must be >= 0", baseEjectionTime) + } + } + + maxEjectionTime := defaultMaxEjectionTime + if met := od.GetMaxEjectionTime(); met != nil { + if err := met.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.max_ejection_time is invalid with error %v", err) + } + if maxEjectionTime = met.AsDuration(); maxEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.max_ejection_time = %v; must be >= 0", maxEjectionTime) + } + } + + // "The fields max_ejection_percent, enforcing_success_rate, + // failure_percentage_threshold, and enforcing_failure_percentage must have + // values less than or equal to 100. If any of these requirements is + // violated, the Cluster resource should be NACKed." - A50 + maxEjectionPercent := uint32(defaultMaxEjectionPercent) + if mep := od.GetMaxEjectionPercent(); mep != nil { + if maxEjectionPercent = mep.GetValue(); maxEjectionPercent > 100 { + return nil, fmt.Errorf("outlier_detection.max_ejection_percent = %v; must be <= 100", maxEjectionPercent) + } + } + enforcingSuccessRate := uint32(defaultEnforcingSuccessRate) + if esr := od.GetEnforcingSuccessRate(); esr != nil { + if enforcingSuccessRate = esr.GetValue(); enforcingSuccessRate > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_success_rate = %v; must be <= 100", enforcingSuccessRate) + } + } + failurePercentageThreshold := uint32(defaultFailurePercentageThreshold) + if fpt := od.GetFailurePercentageThreshold(); fpt != nil { + if failurePercentageThreshold = fpt.GetValue(); failurePercentageThreshold > 100 { + return nil, fmt.Errorf("outlier_detection.failure_percentage_threshold = %v; must be <= 100", failurePercentageThreshold) + } + } + enforcingFailurePercentage := uint32(defaultEnforcingFailurePercentage) + if efp := od.GetEnforcingFailurePercentage(); efp != nil { + if enforcingFailurePercentage = efp.GetValue(); enforcingFailurePercentage > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_failure_percentage = %v; must be <= 100", enforcingFailurePercentage) + } + } + + successRateStdevFactor := uint32(defaultSuccessRateStdevFactor) + if srsf := od.GetSuccessRateStdevFactor(); srsf != nil { + successRateStdevFactor = srsf.GetValue() + } + successRateMinimumHosts := uint32(defaultSuccessRateMinimumHosts) + if srmh := od.GetSuccessRateMinimumHosts(); srmh != nil { + successRateMinimumHosts = srmh.GetValue() + } + successRateRequestVolume := uint32(defaultSuccessRateRequestVolume) + if srrv := od.GetSuccessRateRequestVolume(); srrv != nil { + successRateRequestVolume = srrv.GetValue() + } + failurePercentageMinimumHosts := uint32(defaultFailurePercentageMinimumHosts) + if fpmh := od.GetFailurePercentageMinimumHosts(); fpmh != nil { + failurePercentageMinimumHosts = fpmh.GetValue() + } + failurePercentageRequestVolume := uint32(defaultFailurePercentageRequestVolume) + if fprv := od.GetFailurePercentageRequestVolume(); fprv != nil { + failurePercentageRequestVolume = fprv.GetValue() + } + + return &OutlierDetection{ + Interval: interval, + BaseEjectionTime: baseEjectionTime, + MaxEjectionTime: maxEjectionTime, + MaxEjectionPercent: maxEjectionPercent, + EnforcingSuccessRate: enforcingSuccessRate, + FailurePercentageThreshold: failurePercentageThreshold, + EnforcingFailurePercentage: enforcingFailurePercentage, + SuccessRateStdevFactor: successRateStdevFactor, + SuccessRateMinimumHosts: successRateMinimumHosts, + SuccessRateRequestVolume: successRateRequestVolume, + FailurePercentageMinimumHosts: failurePercentageMinimumHosts, + FailurePercentageRequestVolume: failurePercentageRequestVolume, + }, nil +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index ddfe5d777f3a..096c4fb0e828 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -21,6 +21,7 @@ import ( "regexp" "strings" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -28,6 +29,7 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/wrapperspb" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" @@ -1620,3 +1622,156 @@ func (s) TestUnmarshalCluster(t *testing.T) { }) } } + +func (s) TestValidateClusterWithOutlierDetection(t *testing.T) { + oldOutlierDetectionSupportEnv := envconfig.XDSOutlierDetection + envconfig.XDSOutlierDetection = true + defer func() { envconfig.XDSOutlierDetection = oldOutlierDetectionSupportEnv }() + odToClusterProto := func(od *v3clusterpb.OutlierDetection) *v3clusterpb.Cluster { + // Cluster parsing doesn't fail with respect to fields orthogonal to + // outlier detection. + return &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + OutlierDetection: od, + } + } + odToClusterUpdate := func(od *OutlierDetection) ClusterUpdate { + return ClusterUpdate{ + ClusterName: clusterName, + LRSServerConfig: ClusterLRSOff, + OutlierDetection: od, + } + } + + tests := []struct { + name string + cluster *v3clusterpb.Cluster + wantUpdate ClusterUpdate + wantErr bool + }{ + { + name: "successful-case-all-defaults", + // Outlier detection proto is present without any fields specified, + // so should trigger all default values in the update. + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{}), + wantUpdate: odToClusterUpdate(&OutlierDetection{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateStdevFactor: 1900, + EnforcingSuccessRate: 100, + SuccessRateMinimumHosts: 5, + SuccessRateRequestVolume: 100, + FailurePercentageThreshold: 85, + EnforcingFailurePercentage: 0, + FailurePercentageMinimumHosts: 5, + FailurePercentageRequestVolume: 50, + }), + }, + { + name: "successful-case-all-fields-configured-and-valid", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{ + Interval: &durationpb.Duration{Seconds: 1}, + BaseEjectionTime: &durationpb.Duration{Seconds: 2}, + MaxEjectionTime: &durationpb.Duration{Seconds: 3}, + MaxEjectionPercent: &wrapperspb.UInt32Value{Value: 1}, + SuccessRateStdevFactor: &wrapperspb.UInt32Value{Value: 2}, + EnforcingSuccessRate: &wrapperspb.UInt32Value{Value: 3}, + SuccessRateMinimumHosts: &wrapperspb.UInt32Value{Value: 4}, + SuccessRateRequestVolume: &wrapperspb.UInt32Value{Value: 5}, + FailurePercentageThreshold: &wrapperspb.UInt32Value{Value: 6}, + EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 7}, + FailurePercentageMinimumHosts: &wrapperspb.UInt32Value{Value: 8}, + FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 9}, + }), + wantUpdate: odToClusterUpdate(&OutlierDetection{ + Interval: time.Second, + BaseEjectionTime: time.Second * 2, + MaxEjectionTime: time.Second * 3, + MaxEjectionPercent: 1, + SuccessRateStdevFactor: 2, + EnforcingSuccessRate: 3, + SuccessRateMinimumHosts: 4, + SuccessRateRequestVolume: 5, + FailurePercentageThreshold: 6, + EnforcingFailurePercentage: 7, + FailurePercentageMinimumHosts: 8, + FailurePercentageRequestVolume: 9, + }), + }, + { + name: "interval-is-negative", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{Interval: &durationpb.Duration{Seconds: -10}}), + wantErr: true, + }, + { + name: "interval-overflows", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{Interval: &durationpb.Duration{Seconds: 315576000001}}), + wantErr: true, + }, + { + name: "base-ejection-time-is-negative", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{BaseEjectionTime: &durationpb.Duration{Seconds: -10}}), + wantErr: true, + }, + { + name: "base-ejection-time-overflows", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{BaseEjectionTime: &durationpb.Duration{Seconds: 315576000001}}), + wantErr: true, + }, + { + name: "max-ejection-time-is-negative", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{MaxEjectionTime: &durationpb.Duration{Seconds: -10}}), + wantErr: true, + }, + { + name: "max-ejection-time-overflows", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{MaxEjectionTime: &durationpb.Duration{Seconds: 315576000001}}), + wantErr: true, + }, + { + name: "max-ejection-percent-is-greater-than-100", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{MaxEjectionPercent: &wrapperspb.UInt32Value{Value: 150}}), + wantErr: true, + }, + { + name: "enforcing-success-rate-is-greater-than-100", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{EnforcingSuccessRate: &wrapperspb.UInt32Value{Value: 150}}), + wantErr: true, + }, + { + name: "failure-percentage-threshold-is-greater-than-100", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{FailurePercentageThreshold: &wrapperspb.UInt32Value{Value: 150}}), + wantErr: true, + }, + { + name: "enforcing-failure-percentage-is-greater-than-100", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 150}}), + wantErr: true, + }, + // A Outlier Detection proto not present should lead to a nil + // OutlierDetection field in the ClusterUpdate, which is implicitly + // tested in every other test in this file. + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + update, err := validateClusterAndConstructClusterUpdate(test.cluster) + if (err != nil) != test.wantErr { + t.Errorf("validateClusterAndConstructClusterUpdate() returned err %v wantErr %v)", err, test.wantErr) + } + if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, +got):\n%s", diff) + } + }) + } +} From 91967153f567adc812d8da223ef984d02a3664ed Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 4 Feb 2022 13:01:27 -0800 Subject: [PATCH 423/998] xds/priority: log warnings instead of errors (#5185) --- xds/internal/balancer/priority/balancer_priority.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index 37cd44560437..3a18f6e10d83 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -82,7 +82,7 @@ func (b *priorityBalancer) syncPriority() { for p, name := range b.priorities { child, ok := b.children[name] if !ok { - b.logger.Errorf("child with name %q is not found in children", name) + b.logger.Warningf("child with name %q is not found in children", name) continue } @@ -112,7 +112,7 @@ func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { name := b.priorities[i] child, ok := b.children[name] if !ok { - b.logger.Errorf("child with name %q is not found in children", name) + b.logger.Warningf("child with name %q is not found in children", name) continue } child.stop() @@ -197,12 +197,12 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S priority, ok := b.childToPriority[childName] if !ok { - b.logger.Errorf("priority: received picker update with unknown child %v", childName) + b.logger.Warningf("priority: received picker update with unknown child %v", childName) return } if b.childInUse == "" { - b.logger.Errorf("priority: no child is in use when picker update is received") + b.logger.Warningf("priority: no child is in use when picker update is received") return } @@ -219,7 +219,7 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S // necessary. child, ok := b.children[childName] if !ok { - b.logger.Errorf("priority: child balancer not found for child %v, priority %v", childName, priority) + b.logger.Warningf("priority: child balancer not found for child %v, priority %v", childName, priority) return } oldState := child.state.ConnectivityState From c44f627fd1f65c4e9f2837c17b4a734c516172fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=B5=B5=E5=BB=B6?= Date: Thu, 10 Feb 2022 03:17:46 +0800 Subject: [PATCH 424/998] cleanup: replace grpc.WithInsecure with insecure.NewCredentials (#5177) --- admin/test/utils.go | 3 ++- authz/sdk_end2end_test.go | 11 ++++++----- balancer/grpclb/grpclb_remote_balancer.go | 3 ++- balancer/rls/control_channel.go | 3 ++- benchmark/benchmain/main.go | 3 ++- benchmark/client/main.go | 3 ++- benchmark/worker/benchmark_client.go | 3 ++- binarylog/binarylog_end2end_test.go | 3 ++- clientconn.go | 2 +- .../alts/internal/handshaker/service/service.go | 3 ++- examples/features/cancellation/client/main.go | 3 ++- examples/features/compression/client/main.go | 3 ++- examples/features/deadline/client/main.go | 3 ++- examples/features/deadline/server/main.go | 3 ++- examples/features/debugging/client/main.go | 3 ++- examples/features/errors/client/main.go | 3 ++- examples/features/health/client/main.go | 3 ++- examples/features/keepalive/client/main.go | 3 ++- examples/features/load_balancing/client/main.go | 5 +++-- examples/features/metadata/client/main.go | 3 ++- examples/features/multiplex/client/main.go | 3 ++- examples/features/name_resolving/client/main.go | 5 +++-- examples/features/retry/README.md | 2 +- examples/features/retry/client/main.go | 3 ++- examples/features/unix_abstract/client/main.go | 3 ++- examples/features/wait_for_ready/main.go | 3 ++- internal/stubserver/stubserver.go | 3 ++- interop/client/client.go | 3 ++- interop/http2/negative_http2_client.go | 3 ++- profiling/cmd/remote.go | 3 ++- reflection/serverreflection_test.go | 3 ++- stats/stats_test.go | 3 ++- stress/client/main.go | 3 ++- stress/metrics_client/main.go | 3 ++- test/authority_test.go | 3 ++- test/balancer_test.go | 10 +++++----- test/end2end_test.go | 17 +++++++++-------- test/goaway_test.go | 3 ++- test/gracefulstop_test.go | 3 ++- test/healthcheck_test.go | 3 ++- test/insecure_creds_test.go | 2 +- test/local_creds_test.go | 3 ++- test/retry_test.go | 3 ++- xds/csds/csds_test.go | 5 +++-- xds/internal/test/e2e/e2e.go | 3 ++- .../test/xds_server_integration_test.go | 8 ++++---- .../xdsclient/controller/controller_test.go | 4 ++-- xds/internal/xdsclient/singleton_test.go | 3 ++- 48 files changed, 112 insertions(+), 70 deletions(-) diff --git a/admin/test/utils.go b/admin/test/utils.go index 1add8afa824c..6540797ff2ba 100644 --- a/admin/test/utils.go +++ b/admin/test/utils.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/admin" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/status" ) @@ -78,7 +79,7 @@ func RunRegisterTests(t *testing.T, ec ExpectedStatusCodes) { server.Serve(lis) }() - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("cannot connect to server: %v", err) } diff --git a/authz/sdk_end2end_test.go b/authz/sdk_end2end_test.go index b3d449d5dfbd..db871147f8aa 100644 --- a/authz/sdk_end2end_test.go +++ b/authz/sdk_end2end_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/authz" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" @@ -324,7 +325,7 @@ func (s) TestSDKStaticPolicyEnd2End(t *testing.T) { go s.Serve(lis) // Establish a connection to the server. - clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } @@ -514,7 +515,7 @@ func (s) TestSDKFileWatcherEnd2End(t *testing.T) { go s.Serve(lis) // Establish a connection to the server. - clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } @@ -583,7 +584,7 @@ func (s) TestSDKFileWatcher_ValidPolicyRefresh(t *testing.T) { go s.Serve(lis) // Establish a connection to the server. - clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } @@ -631,7 +632,7 @@ func (s) TestSDKFileWatcher_InvalidPolicySkipReload(t *testing.T) { go s.Serve(lis) // Establish a connection to the server. - clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } @@ -682,7 +683,7 @@ func (s) TestSDKFileWatcher_RecoversFromReloadFailure(t *testing.T) { go s.Serve(lis) // Establish a connection to the server. - clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } diff --git a/balancer/grpclb/grpclb_remote_balancer.go b/balancer/grpclb/grpclb_remote_balancer.go index 330df4baa218..805bbbb789ae 100644 --- a/balancer/grpclb/grpclb_remote_balancer.go +++ b/balancer/grpclb/grpclb_remote_balancer.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" @@ -228,7 +229,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { } else if bundle := lb.grpclbClientConnCreds; bundle != nil { dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) } else { - dopts = append(dopts, grpc.WithInsecure()) + dopts = append(dopts, grpc.WithTransportCredentials(insecure.NewCredentials())) } if lb.opt.Dialer != nil { dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) diff --git a/balancer/rls/control_channel.go b/balancer/rls/control_channel.go index 9df96549f7dc..df78f7b55fbe 100644 --- a/balancer/rls/control_channel.go +++ b/balancer/rls/control_channel.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/adaptive" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" @@ -115,7 +116,7 @@ func (cc *controlChannel) dialOpts(bOpts balancer.BuildOptions, serviceConfig st credsOpt = grpc.WithCredentialsBundle(bundle) default: cc.logger.Warningf("no credentials available, using Insecure") - credsOpt = grpc.WithInsecure() + credsOpt = grpc.WithTransportCredentials(insecure.NewCredentials()) } dopts = append(dopts, credsOpt) diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index b3655820c1cf..d9707553a688 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -63,6 +63,7 @@ import ( "google.golang.org/grpc/benchmark/flags" "google.golang.org/grpc/benchmark/latency" "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" @@ -305,7 +306,7 @@ func makeClient(bf stats.Features) (testgrpc.BenchmarkServiceClient, func()) { ) } sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(bf.MaxConcurrentCalls+1))) - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) var lis net.Listener if bf.UseBufConn { diff --git a/benchmark/client/main.go b/benchmark/client/main.go index caf2db70a501..5c615ced1409 100644 --- a/benchmark/client/main.go +++ b/benchmark/client/main.go @@ -51,6 +51,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/benchmark" "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/syscall" @@ -135,7 +136,7 @@ func main() { func buildConnections(ctx context.Context) []*grpc.ClientConn { ccs := make([]*grpc.ClientConn, *numConn) for i := range ccs { - ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithInsecure(), grpc.WithBlock()) + ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) } return ccs } diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index c5748d7016f4..312fcfd7dc1f 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/status" "google.golang.org/grpc/testdata" @@ -135,7 +136,7 @@ func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } // Use byteBufCodec if it is required. diff --git a/binarylog/binarylog_end2end_test.go b/binarylog/binarylog_end2end_test.go index adf2d1f76047..1ac0a8e7c02b 100644 --- a/binarylog/binarylog_end2end_test.go +++ b/binarylog/binarylog_end2end_test.go @@ -31,6 +31,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/binarylog" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" iblog "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/grpctest" @@ -310,7 +311,7 @@ func (te *test) clientConn() *grpc.ClientConn { if te.cc != nil { return te.cc } - opts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithBlock()} + opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()} var err error te.cc, err = grpc.Dial(te.srvAddr, opts...) diff --git a/clientconn.go b/clientconn.go index 28f09dc87073..f9af78913710 100644 --- a/clientconn.go +++ b/clientconn.go @@ -79,7 +79,7 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") diff --git a/credentials/alts/internal/handshaker/service/service.go b/credentials/alts/internal/handshaker/service/service.go index 77d759cd956f..2de2c4affdaa 100644 --- a/credentials/alts/internal/handshaker/service/service.go +++ b/credentials/alts/internal/handshaker/service/service.go @@ -24,6 +24,7 @@ import ( "sync" grpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -49,7 +50,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/examples/features/cancellation/client/main.go b/examples/features/cancellation/client/main.go index 58bd4b6f0180..248619f7a617 100644 --- a/examples/features/cancellation/client/main.go +++ b/examples/features/cancellation/client/main.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/status" ) @@ -55,7 +56,7 @@ func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/compression/client/main.go b/examples/features/compression/client/main.go index 4375c5d7ef93..24c3bbac1089 100644 --- a/examples/features/compression/client/main.go +++ b/examples/features/compression/client/main.go @@ -27,6 +27,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" // Install the gzip compressor pb "google.golang.org/grpc/examples/features/proto/echo" ) @@ -37,7 +38,7 @@ func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/deadline/client/main.go b/examples/features/deadline/client/main.go index 0e2626130cec..8a4e3a2d26cc 100644 --- a/examples/features/deadline/client/main.go +++ b/examples/features/deadline/client/main.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/status" ) @@ -72,7 +73,7 @@ func streamingCall(c pb.EchoClient, requestID int, message string, want codes.Co func main() { flag.Parse() - conn, err := grpc.Dial(*addr, grpc.WithInsecure()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/deadline/server/main.go b/examples/features/deadline/server/main.go index 11cd47a6b5b3..ce3fc61679fc 100644 --- a/examples/features/deadline/server/main.go +++ b/examples/features/deadline/server/main.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" pb "google.golang.org/grpc/examples/features/proto/echo" @@ -94,7 +95,7 @@ func (s *server) Close() { func newEchoServer() *server { target := fmt.Sprintf("localhost:%v", *port) - cc, err := grpc.Dial(target, grpc.WithInsecure()) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/debugging/client/main.go b/examples/features/debugging/client/main.go index 373f7db08e91..09acfa8112e5 100644 --- a/examples/features/debugging/client/main.go +++ b/examples/features/debugging/client/main.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/channelz/service" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" @@ -59,7 +60,7 @@ func main() { /***** Initialize manual resolver and Dial *****/ r := manual.NewBuilderWithScheme("whatever") // Set up a connection to the server. - conn, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`)) + conn, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r), grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`)) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/errors/client/main.go b/examples/features/errors/client/main.go index 4bacff5f3e5b..7f905f82bef3 100644 --- a/examples/features/errors/client/main.go +++ b/examples/features/errors/client/main.go @@ -28,6 +28,7 @@ import ( epb "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/helloworld/helloworld" "google.golang.org/grpc/status" ) @@ -38,7 +39,7 @@ func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/health/client/main.go b/examples/features/health/client/main.go index 9cbc03f90a47..1e44aeb3d30e 100644 --- a/examples/features/health/client/main.go +++ b/examples/features/health/client/main.go @@ -27,6 +27,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" _ "google.golang.org/grpc/health" "google.golang.org/grpc/resolver" @@ -65,7 +66,7 @@ func main() { address := fmt.Sprintf("%s:///unused", r.Scheme()) options := []grpc.DialOption{ - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), grpc.WithResolvers(r), grpc.WithDefaultServiceConfig(serviceConfig), diff --git a/examples/features/keepalive/client/main.go b/examples/features/keepalive/client/main.go index a8cfbc5c4541..feb9b664bf4e 100644 --- a/examples/features/keepalive/client/main.go +++ b/examples/features/keepalive/client/main.go @@ -27,6 +27,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/keepalive" ) @@ -42,7 +43,7 @@ var kacp = keepalive.ClientParameters{ func main() { flag.Parse() - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithKeepaliveParams(kacp)) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithKeepaliveParams(kacp)) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/load_balancing/client/main.go b/examples/features/load_balancing/client/main.go index 5dd0ddfc7d08..2caecb7b3d32 100644 --- a/examples/features/load_balancing/client/main.go +++ b/examples/features/load_balancing/client/main.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ecpb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/resolver" ) @@ -58,7 +59,7 @@ func main() { // "pick_first" is the default, so there's no need to set the load balancing policy. pickfirstConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatalf("did not connect: %v", err) @@ -74,7 +75,7 @@ func main() { roundrobinConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), // This sets the initial balancing policy. - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatalf("did not connect: %v", err) diff --git a/examples/features/metadata/client/main.go b/examples/features/metadata/client/main.go index 3aa3a599c2dd..97e7fd40cf45 100644 --- a/examples/features/metadata/client/main.go +++ b/examples/features/metadata/client/main.go @@ -28,6 +28,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/metadata" ) @@ -286,7 +287,7 @@ const message = "this is examples/metadata" func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/multiplex/client/main.go b/examples/features/multiplex/client/main.go index e25bb7a838bb..3cd85240a335 100644 --- a/examples/features/multiplex/client/main.go +++ b/examples/features/multiplex/client/main.go @@ -27,6 +27,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ecpb "google.golang.org/grpc/examples/features/proto/echo" hwpb "google.golang.org/grpc/examples/helloworld/helloworld" ) @@ -58,7 +59,7 @@ func callUnaryEcho(client ecpb.EchoClient, message string) { func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/name_resolving/client/main.go b/examples/features/name_resolving/client/main.go index 25bd5fd46a75..ad6b310b6de7 100644 --- a/examples/features/name_resolving/client/main.go +++ b/examples/features/name_resolving/client/main.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ecpb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/resolver" ) @@ -57,7 +58,7 @@ func makeRPCs(cc *grpc.ClientConn, n int) { func main() { passthroughConn, err := grpc.Dial( fmt.Sprintf("passthrough:///%s", backendAddr), // Dial to "passthrough:///localhost:50051" - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatalf("did not connect: %v", err) @@ -71,7 +72,7 @@ func main() { exampleConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), // Dial to "example:///resolver.example.grpc.io" - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatalf("did not connect: %v", err) diff --git a/examples/features/retry/README.md b/examples/features/retry/README.md index f56d438adc2b..826cca7f40bc 100644 --- a/examples/features/retry/README.md +++ b/examples/features/retry/README.md @@ -62,5 +62,5 @@ To use the above service config, pass it with `grpc.WithDefaultServiceConfig` to `grpc.Dial`. ```go -conn, err := grpc.Dial(ctx,grpc.WithInsecure(), grpc.WithDefaultServiceConfig(retryPolicy)) +conn, err := grpc.Dial(ctx,grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(retryPolicy)) ``` diff --git a/examples/features/retry/client/main.go b/examples/features/retry/client/main.go index 73147cfe0a27..3b9b80e24ba7 100644 --- a/examples/features/retry/client/main.go +++ b/examples/features/retry/client/main.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" ) @@ -48,7 +49,7 @@ var ( // use grpc.WithDefaultServiceConfig() to set service config func retryDial() (*grpc.ClientConn, error) { - return grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithDefaultServiceConfig(retryPolicy)) + return grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(retryPolicy)) } func main() { diff --git a/examples/features/unix_abstract/client/main.go b/examples/features/unix_abstract/client/main.go index 96c6f82bf19b..3564e7e82fee 100644 --- a/examples/features/unix_abstract/client/main.go +++ b/examples/features/unix_abstract/client/main.go @@ -31,6 +31,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ecpb "google.golang.org/grpc/examples/features/proto/echo" ) @@ -62,7 +63,7 @@ func makeRPCs(cc *grpc.ClientConn, n int) { func main() { flag.Parse() sockAddr := fmt.Sprintf("unix-abstract:%v", *addr) - cc, err := grpc.Dial(sockAddr, grpc.WithInsecure()) + cc, err := grpc.Dial(sockAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("grpc.Dial(%q) failed: %v", sockAddr, err) } diff --git a/examples/features/wait_for_ready/main.go b/examples/features/wait_for_ready/main.go index f865410f1aa2..96524a81da32 100644 --- a/examples/features/wait_for_ready/main.go +++ b/examples/features/wait_for_ready/main.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" pb "google.golang.org/grpc/examples/features/proto/echo" @@ -58,7 +59,7 @@ func serve() { } func main() { - conn, err := grpc.Dial("localhost:50053", grpc.WithInsecure()) + conn, err := grpc.Dial("localhost:50053", grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/internal/stubserver/stubserver.go b/internal/stubserver/stubserver.go index f3ed23aa32a4..482c96a83b68 100644 --- a/internal/stubserver/stubserver.go +++ b/internal/stubserver/stubserver.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" @@ -116,7 +117,7 @@ func (ss *StubServer) StartServer(sopts ...grpc.ServerOption) error { // StartClient creates a client connected to this service that the test may use. // The newly created client will be available in the Client field of StubServer. func (ss *StubServer) StartClient(dopts ...grpc.DialOption) error { - opts := append([]grpc.DialOption{grpc.WithInsecure()}, dopts...) + opts := append([]grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}, dopts...) if ss.R != nil { ss.Target = ss.R.Scheme() + ":///" + ss.Address opts = append(opts, grpc.WithResolvers(ss.R)) diff --git a/interop/client/client.go b/interop/client/client.go index f41f56fbbd55..ba1598db0ee6 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/credentials/google" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" @@ -176,7 +177,7 @@ func main() { case credsComputeEngineCreds: opts = append(opts, grpc.WithCredentialsBundle(google.NewComputeEngineCredentials())) case credsNone: - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) default: logger.Fatal("Invalid creds") } diff --git a/interop/http2/negative_http2_client.go b/interop/http2/negative_http2_client.go index 9ed34f75716d..9fddc5f328a9 100644 --- a/interop/http2/negative_http2_client.go +++ b/interop/http2/negative_http2_client.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" "google.golang.org/grpc/status" @@ -131,7 +132,7 @@ func main() { flag.Parse() serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) var opts []grpc.DialOption - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) conn, err := grpc.Dial(serverAddr, opts...) if err != nil { logger.Fatalf("Fail to dial: %v", err) diff --git a/profiling/cmd/remote.go b/profiling/cmd/remote.go index b6adfd6a6bef..71c21c332b74 100644 --- a/profiling/cmd/remote.go +++ b/profiling/cmd/remote.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ppb "google.golang.org/grpc/profiling/proto" ) @@ -78,7 +79,7 @@ func remoteCommand() error { } logger.Infof("dialing %s", *flagAddress) - cc, err := grpc.Dial(*flagAddress, grpc.WithInsecure()) + cc, err := grpc.Dial(*flagAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { logger.Errorf("cannot dial %s: %v", *flagAddress, err) return err diff --git a/reflection/serverreflection_test.go b/reflection/serverreflection_test.go index 24070141c2f2..9d23e2876423 100644 --- a/reflection/serverreflection_test.go +++ b/reflection/serverreflection_test.go @@ -30,6 +30,7 @@ import ( "github.com/golang/protobuf/proto" dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" pb "google.golang.org/grpc/reflection/grpc_testing" @@ -205,7 +206,7 @@ func (x) TestReflectionEnd2end(t *testing.T) { go s.Serve(lis) // Create client. - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("cannot connect to server: %v", err) } diff --git a/stats/stats_test.go b/stats/stats_test.go index 234919e86c8d..1b08568b906b 100644 --- a/stats/stats_test.go +++ b/stats/stats_test.go @@ -30,6 +30,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" @@ -246,7 +247,7 @@ func (te *test) clientConn() *grpc.ClientConn { return te.cc } opts := []grpc.DialOption{ - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), grpc.WithUserAgent("test/0.0.1"), } diff --git a/stress/client/main.go b/stress/client/main.go index 37e2a38f42a2..5e260b172e8c 100644 --- a/stress/client/main.go +++ b/stress/client/main.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" "google.golang.org/grpc/status" @@ -293,7 +294,7 @@ func newConn(address string, useTLS, testCA bool, tlsServerName string) (*grpc.C } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } return grpc.Dial(address, opts...) } diff --git a/stress/metrics_client/main.go b/stress/metrics_client/main.go index ad6db6dd7a19..f64a64c8a3f0 100644 --- a/stress/metrics_client/main.go +++ b/stress/metrics_client/main.go @@ -26,6 +26,7 @@ import ( "io" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" metricspb "google.golang.org/grpc/stress/grpc_testing" ) @@ -74,7 +75,7 @@ func main() { logger.Fatalf("Metrics server address is empty.") } - conn, err := grpc.Dial(*metricsServerAddress, grpc.WithInsecure()) + conn, err := grpc.Dial(*metricsServerAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { logger.Fatalf("cannot connect to metrics server: %v", err) } diff --git a/test/authority_test.go b/test/authority_test.go index 0f823bdbd1b0..452b896eebf9 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" @@ -193,7 +194,7 @@ func (s) TestColonPortAuthority(t *testing.T) { // // Append "localhost" before calling net.Dial, in case net.Dial on certain // platforms doesn't work well for address without the IP. - cc, err := grpc.Dial(":"+port, grpc.WithInsecure(), grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + cc, err := grpc.Dial(":"+port, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { return (&net.Dialer{}).DialContext(ctx, "tcp", "localhost"+addr) })) if err != nil { diff --git a/test/balancer_test.go b/test/balancer_test.go index 5d5c85896d30..8fe1db32658b 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -393,7 +393,7 @@ func (s) TestNonGRPCLBBalancerGetsNoGRPCLBAddress(t *testing.T) { b := newTestBalancerKeepAddresses() balancer.Register(b) - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r), grpc.WithBalancerName(b.Name())) if err != nil { t.Fatalf("failed to dial: %v", err) @@ -655,7 +655,7 @@ func (s) TestServersSwap(t *testing.T) { // Initialize client r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: addr1}}}) - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(r)) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) if err != nil { t.Fatalf("Error creating client: %v", err) } @@ -709,7 +709,7 @@ func (s) TestEmptyAddrs(t *testing.T) { pfr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - pfcc, err := grpc.DialContext(ctx, pfr.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(pfr)) + pfcc, err := grpc.DialContext(ctx, pfr.Scheme()+":///", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(pfr)) if err != nil { t.Fatalf("Error creating client: %v", err) } @@ -729,7 +729,7 @@ func (s) TestEmptyAddrs(t *testing.T) { rrr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - rrcc, err := grpc.DialContext(ctx, rrr.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(rrr), + rrcc, err := grpc.DialContext(ctx, rrr.Scheme()+":///", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(rrr), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, roundrobin.Name))) if err != nil { t.Fatalf("Error creating client: %v", err) @@ -785,7 +785,7 @@ func (s) TestWaitForReady(t *testing.T) { // Initialize client r := manual.NewBuilderWithScheme("whatever") - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(r)) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) if err != nil { t.Fatalf("Error creating client: %v", err) } diff --git a/test/end2end_test.go b/test/end2end_test.go index 957d13f731f7..cdf0434a1217 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -50,6 +50,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/health" @@ -801,7 +802,7 @@ func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) case "empty": // Don't add any transport creds option. default: - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } // TODO(bar) switch balancer case "pick_first". var scheme string @@ -3740,7 +3741,7 @@ func (s) TestTransparentRetry(t *testing.T) { }, } server.start(t, lis) - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("failed to dial due to err: %v", err) } @@ -5131,7 +5132,7 @@ func (s) TestFlowControlLogicalRace(t *testing.T) { go s.Serve(lis) - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } @@ -6561,7 +6562,7 @@ func (s) TestServeExitsWhenListenerClosed(t *testing.T) { close(done) }() - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Failed to dial server: %v", err) } @@ -6780,7 +6781,7 @@ func (s) TestDisabledIOBuffers(t *testing.T) { defer s.Stop() dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second) defer dcancel() - cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0)) + cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0)) if err != nil { t.Fatalf("Failed to dial server") } @@ -6982,7 +6983,7 @@ func (s) TestNetPipeConn(t *testing.T) { go s.Serve(pl) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithDialer(pl.Dialer())) + cc, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDialer(pl.Dialer())) if err != nil { t.Fatalf("Error creating client: %v", err) } @@ -7082,7 +7083,7 @@ func (s) TestGoAwayThenClose(t *testing.T) { {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithInsecure()) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Error creating client: %v", err) } @@ -7483,7 +7484,7 @@ func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string responses: []httpServerResponse{{trailers: headerFields}}, } server.start(t, lis) - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("failed to dial due to err: %v", err) } diff --git a/test/goaway_test.go b/test/goaway_test.go index 6ef11e26419d..1b5a3b7a04e7 100644 --- a/test/goaway_test.go +++ b/test/goaway_test.go @@ -25,6 +25,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/keepalive" testpb "google.golang.org/grpc/test/grpc_testing" @@ -57,7 +58,7 @@ func (s) TestGracefulClientOnGoAway(t *testing.T) { } go s.Serve(lis) - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Failed to dial server: %v", err) } diff --git a/test/gracefulstop_test.go b/test/gracefulstop_test.go index 6058fb8b333c..a5a8448ad2fd 100644 --- a/test/gracefulstop_test.go +++ b/test/gracefulstop_test.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" @@ -146,7 +147,7 @@ func (s) TestGracefulStop(t *testing.T) { // even though GracefulStop has closed the listener. ctx, dialCancel := context.WithTimeout(context.Background(), 5*time.Second) defer dialCancel() - cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(d)) + cc, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(d)) if err != nil { t.Fatalf("grpc.DialContext(_, %q, _) = %v", lis.Addr().String(), err) } diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index 99f7d8951ebd..247ffea7c3c1 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" _ "google.golang.org/grpc/health" healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" @@ -154,7 +155,7 @@ type clientConfig struct { func setupClient(c *clientConfig) (cc *grpc.ClientConn, r *manual.Resolver, deferFunc func(), err error) { r = manual.NewBuilderWithScheme("whatever") var opts []grpc.DialOption - opts = append(opts, grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(c.balancerName)) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r), grpc.WithBalancerName(c.balancerName)) if c.testHealthCheckFuncWrapper != nil { opts = append(opts, internal.WithHealthCheckFunc.(func(internal.HealthChecker) grpc.DialOption)(c.testHealthCheckFuncWrapper)) } diff --git a/test/insecure_creds_test.go b/test/insecure_creds_test.go index 9c925e4757c7..ec1bb41433cf 100644 --- a/test/insecure_creds_test.go +++ b/test/insecure_creds_test.go @@ -124,7 +124,7 @@ func (s) TestInsecureCreds(t *testing.T) { go s.Serve(lis) addr := lis.Addr().String() - opts := []grpc.DialOption{grpc.WithInsecure()} + opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} if test.clientInsecureCreds { opts = []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} } diff --git a/test/local_creds_test.go b/test/local_creds_test.go index 3933bb39635b..8d649ed5365f 100644 --- a/test/local_creds_test.go +++ b/test/local_creds_test.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/local" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/peer" @@ -218,7 +219,7 @@ func (s) TestLocalCredsClientFail(t *testing.T) { func (s) TestLocalCredsServerFail(t *testing.T) { // Use insecure at client-side which should lead to server-side failure. - opts := []grpc.DialOption{grpc.WithInsecure()} + opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} if err := testLocalCredsE2EFail(opts); status.Code(err) != codes.Unavailable { t.Fatalf("testLocalCredsE2EFail() = %v; want %v", err, codes.Unavailable) } diff --git a/test/retry_test.go b/test/retry_test.go index 1bd866add606..1013e54ce051 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -33,6 +33,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" @@ -531,7 +532,7 @@ func (s) TestRetryStats(t *testing.T) { } server.start(t, lis) handler := &retryStatsHandler{} - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithStatsHandler(handler), + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithStatsHandler(handler), grpc.WithDefaultServiceConfig((`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index fd19599ab094..1d772b67f376 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -30,6 +30,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/uuid" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds" @@ -278,7 +279,7 @@ func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.M }() // Create CSDS client. - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("cannot connect to server: %v", err) } @@ -525,7 +526,7 @@ func (s) TestCSDSNoXDSClient(t *testing.T) { defer server.Stop() // Create CSDS client. - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("cannot connect to server: %v", err) } diff --git a/xds/internal/test/e2e/e2e.go b/xds/internal/test/e2e/e2e.go index 3c388fbf0d93..30b125b787a1 100644 --- a/xds/internal/test/e2e/e2e.go +++ b/xds/internal/test/e2e/e2e.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" channelzgrpc "google.golang.org/grpc/channelz/grpc_channelz_v1" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" + "google.golang.org/grpc/credentials/insecure" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" ) @@ -70,7 +71,7 @@ func newClient(target, binaryPath, bootstrap string, logger io.Writer, flags ... ) cmd.Start() - cc, err := grpc.Dial(fmt.Sprintf("localhost:%d", clientStatsPort), grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.WaitForReady(true))) + cc, err := grpc.Dial(fmt.Sprintf("localhost:%d", clientStatsPort), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true))) if err != nil { return nil, err } diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index c2afebfd2253..6904e0e88d5c 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -565,7 +565,7 @@ func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { t.Fatal(err) } - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithInsecure(), grpc.WithResolvers(resolver)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -952,7 +952,7 @@ func (s) TestRBACHTTPFilter(t *testing.T) { t.Fatal(err) } - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithInsecure(), grpc.WithResolvers(resolver)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -1143,7 +1143,7 @@ func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) { t.Fatal(err) } - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithInsecure(), grpc.WithResolvers(resolver)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } @@ -1200,7 +1200,7 @@ func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) { t.Fatal(err) } - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithInsecure(), grpc.WithResolvers(resolver)) + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } diff --git a/xds/internal/xdsclient/controller/controller_test.go b/xds/internal/xdsclient/controller/controller_test.go index 8c7c2838d838..644698cc26f7 100644 --- a/xds/internal/xdsclient/controller/controller_test.go +++ b/xds/internal/xdsclient/controller/controller_test.go @@ -87,7 +87,7 @@ func (s) TestNew(t *testing.T) { name: "happy-case", config: &bootstrap.ServerConfig{ ServerURI: testXDSServer, - Creds: grpc.WithInsecure(), + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), NodeProto: testutils.EmptyNodeProtoV2, }, }, @@ -111,7 +111,7 @@ func (s) TestNew(t *testing.T) { func (s) TestNewWithGRPCDial(t *testing.T) { config := &bootstrap.ServerConfig{ ServerURI: testXDSServer, - Creds: grpc.WithInsecure(), + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), NodeProto: testutils.EmptyNodeProtoV2, } diff --git a/xds/internal/xdsclient/singleton_test.go b/xds/internal/xdsclient/singleton_test.go index 1bf6077f3952..b22663f33ab5 100644 --- a/xds/internal/xdsclient/singleton_test.go +++ b/xds/internal/xdsclient/singleton_test.go @@ -23,6 +23,7 @@ import ( "testing" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -37,7 +38,7 @@ func (s) TestClientNewSingleton(t *testing.T) { return &bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: testXDSServer, - Creds: grpc.WithInsecure(), + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), NodeProto: xdstestutils.EmptyNodeProtoV2, }, }, nil From 0e055491d58ccdd3cb7f30796559a20abfe8d505 Mon Sep 17 00:00:00 2001 From: Appu Date: Wed, 9 Feb 2022 14:29:47 -0500 Subject: [PATCH 425/998] Format directory/file references (#5184) --- examples/route_guide/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/route_guide/README.md b/examples/route_guide/README.md index ddec3a0bb5b8..29343b1c6b59 100644 --- a/examples/route_guide/README.md +++ b/examples/route_guide/README.md @@ -4,11 +4,11 @@ perform unary, client streaming, server streaming and full duplex RPCs. Please refer to [gRPC Basics: Go](https://grpc.io/docs/tutorials/basic/go.html) for more information. -See the definition of the route guide service in routeguide/route_guide.proto. +See the definition of the route guide service in `routeguide/route_guide.proto`. # Run the sample code -To compile and run the server, assuming you are in the root of the route_guide -folder, i.e., .../examples/route_guide/, simply: +To compile and run the server, assuming you are in the root of the `route_guide` +folder, i.e., `.../examples/route_guide/`, simply: ```sh $ go run server/server.go From a354b1eec35081ebfc7673a7edf273a13a2bfaee Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 9 Feb 2022 14:14:44 -0800 Subject: [PATCH 426/998] channelz: rename NewChannelzStorage to NewChannelzStorageForTesting (#5190) --- channelz/service/service_sktopt_test.go | 2 +- channelz/service/service_test.go | 14 ++--- internal/channelz/funcs.go | 69 ++++++++++++++----------- test/channelz_linux_test.go | 2 +- test/channelz_test.go | 54 +++++++++---------- 5 files changed, 74 insertions(+), 67 deletions(-) diff --git a/channelz/service/service_sktopt_test.go b/channelz/service/service_sktopt_test.go index 4ea6b20cd6a6..efd383fce3c8 100644 --- a/channelz/service/service_sktopt_test.go +++ b/channelz/service/service_sktopt_test.go @@ -126,7 +126,7 @@ func protoToSocketOption(skopts []*channelzpb.SocketOption) *channelz.SocketOpti } func (s) TestGetSocketOptions(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) ss := []*dummySocket{ { diff --git a/channelz/service/service_test.go b/channelz/service/service_test.go index 03d2b29c27b4..17409533745b 100644 --- a/channelz/service/service_test.go +++ b/channelz/service/service_test.go @@ -322,7 +322,7 @@ func (s) TestGetTopChannels(t *testing.T) { }, {}, } - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) for _, c := range tcs { id := channelz.RegisterChannel(c, 0, "") @@ -371,7 +371,7 @@ func (s) TestGetServers(t *testing.T) { lastCallStartedTimestamp: time.Now().UTC(), }, } - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) for _, s := range ss { id := channelz.RegisterServer(s, "") @@ -400,7 +400,7 @@ func (s) TestGetServers(t *testing.T) { } func (s) TestGetServerSockets(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) @@ -441,7 +441,7 @@ func (s) TestGetServerSockets(t *testing.T) { // This test makes a GetServerSockets with a non-zero start ID, and expect only // sockets with ID >= the given start ID. func (s) TestGetServerSocketsNonZeroStartID(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) @@ -473,7 +473,7 @@ func (s) TestGetServerSocketsNonZeroStartID(t *testing.T) { } func (s) TestGetChannel(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) refNames := []string{"top channel 1", "nested channel 1", "sub channel 2", "nested channel 3"} ids := make([]int64, 4) @@ -578,7 +578,7 @@ func (s) TestGetSubChannel(t *testing.T) { subchanConnectivityChange = fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Ready) subChanPickNewAddress = fmt.Sprintf("Subchannel picks a new address %q to connect", "0.0.0.0") ) - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) refNames := []string{"top channel 1", "sub channel 1", "socket 1", "socket 2"} ids := make([]int64, 4) @@ -652,7 +652,7 @@ func (s) TestGetSubChannel(t *testing.T) { } func (s) TestGetSocket(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) ss := []*dummySocket{ { diff --git a/internal/channelz/funcs.go b/internal/channelz/funcs.go index cd1807543eee..ea660a147cf9 100644 --- a/internal/channelz/funcs.go +++ b/internal/channelz/funcs.go @@ -24,6 +24,7 @@ package channelz import ( + "context" "fmt" "sort" "sync" @@ -49,7 +50,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +96,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -326,6 +322,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c diff --git a/test/channelz_linux_test.go b/test/channelz_linux_test.go index aa6febe537a0..0eef08df3c9f 100644 --- a/test/channelz_linux_test.go +++ b/test/channelz_linux_test.go @@ -38,7 +38,7 @@ func (s) TestCZSocketMetricsSocketOption(t *testing.T) { } func testCZSocketMetricsSocketOption(t *testing.T, e env) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) te := newTest(t, e) te.startServer(&testServer{security: e.security}) diff --git a/test/channelz_test.go b/test/channelz_test.go index 6cb09dd8d89e..3c953f1a5e80 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -82,7 +82,7 @@ func (s) TestCZServerRegistrationAndDeletion(t *testing.T) { } for _, c := range testcases { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -101,7 +101,7 @@ func (s) TestCZServerRegistrationAndDeletion(t *testing.T) { } func (s) TestCZGetServer(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -153,7 +153,7 @@ func (s) TestCZTopChannelRegistrationAndDeletion(t *testing.T) { } for _, c := range testcases { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -191,7 +191,7 @@ func (s) TestCZTopChannelRegistrationAndDeletion(t *testing.T) { } func (s) TestCZTopChannelRegistrationAndDeletionWhenDialFail(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) // Make dial fails (due to no transport security specified) _, err := grpc.Dial("fake.addr") @@ -204,7 +204,7 @@ func (s) TestCZTopChannelRegistrationAndDeletionWhenDialFail(t *testing.T) { } func (s) TestCZNestedChannelRegistrationAndDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv // avoid calling API to set balancer type, which will void service config's change of balancer. @@ -248,7 +248,7 @@ func (s) TestCZNestedChannelRegistrationAndDeletion(t *testing.T) { } func (s) TestCZClientSubChannelSocketRegistrationAndDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv num := 3 // number of backends @@ -336,7 +336,7 @@ func (s) TestCZServerSocketRegistrationAndDeletion(t *testing.T) { } for _, c := range testcases { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -396,7 +396,7 @@ func (s) TestCZServerSocketRegistrationAndDeletion(t *testing.T) { } func (s) TestCZServerListenSocketDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) s := grpc.NewServer() lis, err := net.Listen("tcp", "localhost:0") @@ -453,7 +453,7 @@ func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) { // | | // v v // Socket1 Socket2 - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) topChanID := channelz.RegisterChannel(&dummyChannel{}, 0, "") subChanID1 := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") @@ -498,7 +498,7 @@ func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) { } func (s) TestCZChannelMetrics(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv num := 3 // number of backends @@ -586,7 +586,7 @@ func (s) TestCZChannelMetrics(t *testing.T) { } func (s) TestCZServerMetrics(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -862,7 +862,7 @@ func doIdleCallToInvokeKeepAlive(tc testpb.TestServiceClient, t *testing.T) { } func (s) TestCZClientSocketMetricsStreamsAndMessagesCount(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -962,7 +962,7 @@ func (s) TestCZClientSocketMetricsStreamsAndMessagesCount(t *testing.T) { // It is separated from other cases due to setup incompatibly, i.e. max receive // size violation will mask flow control violation. func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1046,7 +1046,7 @@ func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *t } func (s) TestCZClientAndServerSocketMetricsFlowControl(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1159,7 +1159,7 @@ func (s) TestCZClientAndServerSocketMetricsFlowControl(t *testing.T) { } func (s) TestCZClientSocketMetricsKeepAlive(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) defer func(t time.Duration) { internal.KeepaliveMinPingTime = t }(internal.KeepaliveMinPingTime) internal.KeepaliveMinPingTime = time.Second @@ -1212,7 +1212,7 @@ func (s) TestCZClientSocketMetricsKeepAlive(t *testing.T) { } func (s) TestCZServerSocketMetricsStreamsAndMessagesCount(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1273,7 +1273,7 @@ func (s) TestCZServerSocketMetricsStreamsAndMessagesCount(t *testing.T) { } func (s) TestCZServerSocketMetricsKeepAlive(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1345,7 +1345,7 @@ var cipherSuites = []string{ } func (s) TestCZSocketGetSecurityValueTLS(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpTLSRREnv te := newTest(t, e) @@ -1395,7 +1395,7 @@ func (s) TestCZSocketGetSecurityValueTLS(t *testing.T) { } func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv // avoid calling API to set balancer type, which will void service config's change of balancer. @@ -1470,7 +1470,7 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { } func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1562,7 +1562,7 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { } func (s) TestCZChannelAddressResolutionChange(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv e.balancer = "" @@ -1665,7 +1665,7 @@ func (s) TestCZChannelAddressResolutionChange(t *testing.T) { } func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv e.balancer = "" @@ -1739,7 +1739,7 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { } func (s) TestCZSubChannelConnectivityState(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1833,7 +1833,7 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { } func (s) TestCZChannelConnectivityState(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1889,7 +1889,7 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { } func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv // avoid newTest using WithBalancerName, which would override service @@ -1954,7 +1954,7 @@ func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { } func (s) TestCZTraceOverwriteSubChannelDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -2014,7 +2014,7 @@ func (s) TestCZTraceOverwriteSubChannelDeletion(t *testing.T) { } func (s) TestCZTraceTopChannelDeletionTraceClear(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) From 75fd0240ac4146e5bcd211e0a3de695ad4369de3 Mon Sep 17 00:00:00 2001 From: Ashitha Santhosh <55257063+ashithasantosh@users.noreply.github.com> Date: Thu, 10 Feb 2022 15:13:34 -0800 Subject: [PATCH 427/998] remove sdk term from grpc authz (#5191) --- ...end_test.go => grpc_authz_end2end_test.go} | 44 +++++++++---------- ...s.go => grpc_authz_server_interceptors.go} | 0 ...=> grpc_authz_server_interceptors_test.go} | 0 3 files changed, 22 insertions(+), 22 deletions(-) rename authz/{sdk_end2end_test.go => grpc_authz_end2end_test.go} (93%) rename authz/{sdk_server_interceptors.go => grpc_authz_server_interceptors.go} (100%) rename authz/{sdk_server_interceptors_test.go => grpc_authz_server_interceptors_test.go} (100%) diff --git a/authz/sdk_end2end_test.go b/authz/grpc_authz_end2end_test.go similarity index 93% rename from authz/sdk_end2end_test.go rename to authz/grpc_authz_end2end_test.go index db871147f8aa..f27ca2c06e20 100644 --- a/authz/sdk_end2end_test.go +++ b/authz/grpc_authz_end2end_test.go @@ -69,7 +69,7 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -var sdkTests = map[string]struct { +var authzTests = map[string]struct { authzPolicy string md metadata.MD wantStatus *status.Status @@ -307,10 +307,10 @@ var sdkTests = map[string]struct { }, } -func (s) TestSDKStaticPolicyEnd2End(t *testing.T) { - for name, test := range sdkTests { +func (s) TestStaticPolicyEnd2End(t *testing.T) { + for name, test := range authzTests { t.Run(name, func(t *testing.T) { - // Start a gRPC server with SDK unary and stream server interceptors. + // Start a gRPC server with gRPC authz unary and stream server interceptors. i, _ := authz.NewStatic(test.authzPolicy) s := grpc.NewServer( grpc.ChainUnaryInterceptor(i.UnaryInterceptor), @@ -363,7 +363,7 @@ func (s) TestSDKStaticPolicyEnd2End(t *testing.T) { } } -func (s) TestSDKAllowsRPCRequestWithPrincipalsFieldOnTLSAuthenticatedConnection(t *testing.T) { +func (s) TestAllowsRPCRequestWithPrincipalsFieldOnTLSAuthenticatedConnection(t *testing.T) { authzPolicy := `{ "name": "authz", "allow_rules": @@ -376,7 +376,7 @@ func (s) TestSDKAllowsRPCRequestWithPrincipalsFieldOnTLSAuthenticatedConnection( } ] }` - // Start a gRPC server with SDK unary server interceptor. + // Start a gRPC server with gRPC authz unary server interceptor. i, _ := authz.NewStatic(authzPolicy) creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) if err != nil { @@ -415,7 +415,7 @@ func (s) TestSDKAllowsRPCRequestWithPrincipalsFieldOnTLSAuthenticatedConnection( } } -func (s) TestSDKAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection(t *testing.T) { +func (s) TestAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection(t *testing.T) { authzPolicy := `{ "name": "authz", "allow_rules": @@ -428,7 +428,7 @@ func (s) TestSDKAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection } ] }` - // Start a gRPC server with SDK unary server interceptor. + // Start a gRPC server with gRPC authz unary server interceptor. i, _ := authz.NewStatic(authzPolicy) cert, err := tls.LoadX509KeyPair(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) if err != nil { @@ -493,14 +493,14 @@ func (s) TestSDKAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection } } -func (s) TestSDKFileWatcherEnd2End(t *testing.T) { - for name, test := range sdkTests { +func (s) TestFileWatcherEnd2End(t *testing.T) { + for name, test := range authzTests { t.Run(name, func(t *testing.T) { file := createTmpPolicyFile(t, name, []byte(test.authzPolicy)) i, _ := authz.NewFileWatcher(file, 1*time.Second) defer i.Close() - // Start a gRPC server with SDK unary and stream server interceptors. + // Start a gRPC server with gRPC authz unary and stream server interceptors. s := grpc.NewServer( grpc.ChainUnaryInterceptor(i.UnaryInterceptor), grpc.ChainStreamInterceptor(i.StreamInterceptor)) @@ -564,13 +564,13 @@ func retryUntil(ctx context.Context, tsc pb.TestServiceClient, want *status.Stat return lastErr } -func (s) TestSDKFileWatcher_ValidPolicyRefresh(t *testing.T) { - valid1 := sdkTests["DeniesRPCMatchInDenyAndAllow"] +func (s) TestFileWatcher_ValidPolicyRefresh(t *testing.T) { + valid1 := authzTests["DeniesRPCMatchInDenyAndAllow"] file := createTmpPolicyFile(t, "valid_policy_refresh", []byte(valid1.authzPolicy)) i, _ := authz.NewFileWatcher(file, 100*time.Millisecond) defer i.Close() - // Start a gRPC server with SDK unary server interceptor. + // Start a gRPC server with gRPC authz unary server interceptor. s := grpc.NewServer( grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) defer s.Stop() @@ -601,7 +601,7 @@ func (s) TestSDKFileWatcher_ValidPolicyRefresh(t *testing.T) { } // Rewrite the file with a different valid authorization policy. - valid2 := sdkTests["AllowsRPCEmptyDenyMatchInAllow"] + valid2 := authzTests["AllowsRPCEmptyDenyMatchInAllow"] if err := ioutil.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) } @@ -612,13 +612,13 @@ func (s) TestSDKFileWatcher_ValidPolicyRefresh(t *testing.T) { } } -func (s) TestSDKFileWatcher_InvalidPolicySkipReload(t *testing.T) { - valid := sdkTests["DeniesRPCMatchInDenyAndAllow"] +func (s) TestFileWatcher_InvalidPolicySkipReload(t *testing.T) { + valid := authzTests["DeniesRPCMatchInDenyAndAllow"] file := createTmpPolicyFile(t, "invalid_policy_skip_reload", []byte(valid.authzPolicy)) i, _ := authz.NewFileWatcher(file, 20*time.Millisecond) defer i.Close() - // Start a gRPC server with SDK unary server interceptors. + // Start a gRPC server with gRPC authz unary server interceptors. s := grpc.NewServer( grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) defer s.Stop() @@ -663,13 +663,13 @@ func (s) TestSDKFileWatcher_InvalidPolicySkipReload(t *testing.T) { } } -func (s) TestSDKFileWatcher_RecoversFromReloadFailure(t *testing.T) { - valid1 := sdkTests["DeniesRPCMatchInDenyAndAllow"] +func (s) TestFileWatcher_RecoversFromReloadFailure(t *testing.T) { + valid1 := authzTests["DeniesRPCMatchInDenyAndAllow"] file := createTmpPolicyFile(t, "recovers_from_reload_failure", []byte(valid1.authzPolicy)) i, _ := authz.NewFileWatcher(file, 100*time.Millisecond) defer i.Close() - // Start a gRPC server with SDK unary server interceptors. + // Start a gRPC server with gRPC authz unary server interceptors. s := grpc.NewServer( grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) defer s.Stop() @@ -714,7 +714,7 @@ func (s) TestSDKFileWatcher_RecoversFromReloadFailure(t *testing.T) { } // Rewrite the file with a different valid authorization policy. - valid2 := sdkTests["AllowsRPCEmptyDenyMatchInAllow"] + valid2 := authzTests["AllowsRPCEmptyDenyMatchInAllow"] if err := ioutil.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) } diff --git a/authz/sdk_server_interceptors.go b/authz/grpc_authz_server_interceptors.go similarity index 100% rename from authz/sdk_server_interceptors.go rename to authz/grpc_authz_server_interceptors.go diff --git a/authz/sdk_server_interceptors_test.go b/authz/grpc_authz_server_interceptors_test.go similarity index 100% rename from authz/sdk_server_interceptors_test.go rename to authz/grpc_authz_server_interceptors_test.go From 46009ac902e2256a2675e6e7057d384f6fdc222d Mon Sep 17 00:00:00 2001 From: Thomas Hallgren Date: Mon, 14 Feb 2022 16:44:21 +0100 Subject: [PATCH 428/998] transport: Add an Unwrap method to ConnectionError (#5148) --- internal/transport/transport.go | 6 ++++++ internal/transport/transport_test.go | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/internal/transport/transport.go b/internal/transport/transport.go index d3bf65b2bdff..0c43efaa6497 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -741,6 +741,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 34536c9c3252..1fc2d217acb0 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -27,6 +27,7 @@ import ( "io" "math" "net" + "os" "runtime" "strconv" "strings" @@ -2404,3 +2405,10 @@ func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { }) } } + +func TestConnectionError_Unwrap(t *testing.T) { + err := connectionErrorf(false, os.ErrNotExist, "unwrap me") + if !errors.Is(err, os.ErrNotExist) { + t.Error("ConnectionError does not unwrap") + } +} From ebc30b8fc32e02f0ac5eff0ddbe4ccdca58bd8c4 Mon Sep 17 00:00:00 2001 From: Eric Butler Date: Tue, 15 Feb 2022 14:01:42 -0500 Subject: [PATCH 429/998] reflection: use protobuf/reflect instead of go reflection, fix dynamic messages (#5180) --- examples/go.mod | 4 +- examples/go.sum | 11 +- go.mod | 6 +- go.sum | 11 +- .../grpc_testing_not_regenerate/README.md | 5 + .../grpc_testing_not_regenerate/dynamic.pb.go | 35 +++++ .../grpc_testing_not_regenerate/dynamic.proto | 13 ++ .../testv3.pb.go | 4 +- .../testv3.proto | 2 +- reflection/grpc_testingv3/README.md | 3 - reflection/serverreflection.go | 89 +++--------- reflection/serverreflection_test.go | 129 +++++++++++------- regenerate.sh | 12 +- security/advancedtls/examples/go.sum | 12 +- security/advancedtls/go.mod | 1 - security/advancedtls/go.sum | 12 +- vet.sh | 2 +- 17 files changed, 200 insertions(+), 151 deletions(-) create mode 100644 reflection/grpc_testing_not_regenerate/README.md create mode 100644 reflection/grpc_testing_not_regenerate/dynamic.pb.go create mode 100644 reflection/grpc_testing_not_regenerate/dynamic.proto rename reflection/{grpc_testingv3 => grpc_testing_not_regenerate}/testv3.pb.go (99%) rename reflection/{grpc_testingv3 => grpc_testing_not_regenerate}/testv3.proto (97%) delete mode 100644 reflection/grpc_testingv3/README.md diff --git a/examples/go.mod b/examples/go.mod index 4f19b852edd8..d5b7d122d0fc 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -3,11 +3,11 @@ module google.golang.org/grpc/examples go 1.14 require ( - github.com/golang/protobuf v1.4.3 + github.com/golang/protobuf v1.5.2 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 google.golang.org/grpc v1.36.0 - google.golang.org/protobuf v1.25.0 + google.golang.org/protobuf v1.26.0 ) replace google.golang.org/grpc => ../ diff --git a/examples/go.sum b/examples/go.sum index 1eff984d41bb..3a7309c5328e 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -28,13 +28,16 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -86,8 +89,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/go.mod b/go.mod index fcffdceef25c..0e7dc351ae14 100644 --- a/go.mod +++ b/go.mod @@ -8,12 +8,12 @@ require ( github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.4.3 - github.com/google/go-cmp v0.5.0 + github.com/golang/protobuf v1.5.2 + github.com/google/go-cmp v0.5.5 github.com/google/uuid v1.1.2 golang.org/x/net v0.0.0-20200822124328-c89045814202 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/protobuf v1.25.0 + google.golang.org/protobuf v1.26.0 ) diff --git a/go.sum b/go.sum index 8b542e0beb65..03be9ef71ec7 100644 --- a/go.sum +++ b/go.sum @@ -40,14 +40,17 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -117,8 +120,10 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/reflection/grpc_testing_not_regenerate/README.md b/reflection/grpc_testing_not_regenerate/README.md new file mode 100644 index 000000000000..7f29ff61a537 --- /dev/null +++ b/reflection/grpc_testing_not_regenerate/README.md @@ -0,0 +1,5 @@ +testv3.pb.go is generated with an older version of codegen, to test reflection behavior with `grpc.SupportPackageIsVersion3`. DO NOT REGENERATE! + +testv3.pb.go is manually edited to replace `"golang.org/x/net/context"` with `"context"`. + +dynamic.pb.go is generated with the latest protoc and manually edited to remove everything except the descriptor bytes var, which is renamed and exported. \ No newline at end of file diff --git a/reflection/grpc_testing_not_regenerate/dynamic.pb.go b/reflection/grpc_testing_not_regenerate/dynamic.pb.go new file mode 100644 index 000000000000..35e4f02478b2 --- /dev/null +++ b/reflection/grpc_testing_not_regenerate/dynamic.pb.go @@ -0,0 +1,35 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc_testing_not_regenerate + +// FileDynamicProtoRawDesc is the descriptor for dynamic.proto, see README.md. +var FileDynamicProtoRawDesc = []byte{ + 0x0a, 0x0d, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0c, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x22, 0x0c, 0x0a, + 0x0a, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x22, 0x0c, 0x0a, 0x0a, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x71, 0x32, 0x57, 0x0a, 0x0e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x31, 0x12, 0x18, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x64, 0x61, 0x74, 0x61, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} diff --git a/reflection/grpc_testing_not_regenerate/dynamic.proto b/reflection/grpc_testing_not_regenerate/dynamic.proto new file mode 100644 index 000000000000..bcfce1290f11 --- /dev/null +++ b/reflection/grpc_testing_not_regenerate/dynamic.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +option go_package = "google.golang.org/grpc/reflection/grpc_testing_not_regenerate"; + +package grpc.testing; + +message DynamicRes {} + +message DynamicReq {} + +service DynamicService { + rpc DynamicMessage1(DynamicReq) returns (DynamicRes); +} \ No newline at end of file diff --git a/reflection/grpc_testingv3/testv3.pb.go b/reflection/grpc_testing_not_regenerate/testv3.pb.go similarity index 99% rename from reflection/grpc_testingv3/testv3.pb.go rename to reflection/grpc_testing_not_regenerate/testv3.pb.go index d7a69e546ead..5cb225f31fb3 100644 --- a/reflection/grpc_testingv3/testv3.pb.go +++ b/reflection/grpc_testing_not_regenerate/testv3.pb.go @@ -3,7 +3,7 @@ // DO NOT EDIT! /* -Package grpc_testingv3 is a generated protocol buffer package. +Package grpc_testing_not_regenerate is a generated protocol buffer package. It is generated from these files: testv3.proto @@ -12,7 +12,7 @@ It has these top-level messages: SearchResponseV3 SearchRequestV3 */ -package grpc_testingv3 +package grpc_testing_not_regenerate import proto "github.com/golang/protobuf/proto" import fmt "fmt" diff --git a/reflection/grpc_testingv3/testv3.proto b/reflection/grpc_testing_not_regenerate/testv3.proto similarity index 97% rename from reflection/grpc_testingv3/testv3.proto rename to reflection/grpc_testing_not_regenerate/testv3.proto index 38a615a90d91..9049abc586ca 100644 --- a/reflection/grpc_testingv3/testv3.proto +++ b/reflection/grpc_testing_not_regenerate/testv3.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -option go_package = "google.golang.org/grpc/reflection/grpc_testingv3"; +option go_package = "google.golang.org/grpc/reflection/grpc_testing_not_regenerate"; package grpc.testingv3; diff --git a/reflection/grpc_testingv3/README.md b/reflection/grpc_testingv3/README.md deleted file mode 100644 index 83d58756a440..000000000000 --- a/reflection/grpc_testingv3/README.md +++ /dev/null @@ -1,3 +0,0 @@ -The pb.go is genenated with an older version of codegen, to test reflection behavior with `grpc.SupportPackageIsVersion3`. DO NOT REGENERATE! - -pb.go is manually edited to replace `"golang.org/x/net/context"` with `"context"`. diff --git a/reflection/serverreflection.go b/reflection/serverreflection.go index 82a5ba7f2444..9b387dddee58 100644 --- a/reflection/serverreflection.go +++ b/reflection/serverreflection.go @@ -42,7 +42,6 @@ import ( "fmt" "io" "io/ioutil" - "reflect" "sort" "sync" @@ -52,6 +51,10 @@ import ( "google.golang.org/grpc/codes" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/dynamicpb" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -80,14 +83,6 @@ func Register(s GRPCServer) { }) } -// protoMessage is used for type assertion on proto messages. -// Generated proto message implements function Descriptor(), but Descriptor() -// is not part of interface proto.Message. This interface is needed to -// call Descriptor(). -type protoMessage interface { - Descriptor() ([]byte, []int) -} - func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { s.initSymbols.Do(func() { serviceInfo := s.s.GetServiceInfo() @@ -194,18 +189,6 @@ func fqn(prefix, name string) string { return prefix + "." + name } -// fileDescForType gets the file descriptor for the given type. -// The given type should be a proto message. -func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - enc, _ := m.Descriptor() - - return decodeFileDesc(enc) -} - // decodeFileDesc does decompression and unmarshalling on the given // file descriptor byte slice. func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { @@ -234,21 +217,12 @@ func decompress(b []byte) ([]byte, error) { return out, nil } -func typeForName(name string) (reflect.Type, error) { - pt := proto.MessageType(name) - if pt == nil { - return nil, fmt.Errorf("unknown type: %q", name) - } - st := pt.Elem() - - return st, nil -} - -func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) +func fileDescContainingExtension(typeName string, ext int32) (*dpb.FileDescriptorProto, error) { + desc, err := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(typeName)) + if err != nil { + return nil, err } + m := dynamicpb.NewMessage(desc.(protoreflect.MessageDescriptor)) var extDesc *proto.ExtensionDesc for id, desc := range proto.RegisteredExtensions(m) { @@ -265,20 +239,6 @@ func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescripto return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) } -func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - exts := proto.RegisteredExtensions(m) - out := make([]int32, 0, len(exts)) - for id := range exts { - out = append(out, id) - } - return out, nil -} - // fileDescWithDependencies returns a slice of serialized fileDescriptors in // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. @@ -351,18 +311,12 @@ func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, s if fd == nil { // Check if it's a type name that was not present in the // transitive dependencies of the registered services. - if st, err := typeForName(name); err == nil { - fd, err = s.fileDescForType(st) - if err != nil { - return nil, err - } + desc, err := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(name)) + if err != nil { + return nil, err } + fd = protodesc.ToFileDescriptorProto(desc.Descriptor().ParentFile()) } - - if fd == nil { - return nil, fmt.Errorf("unknown symbol: %v", name) - } - return fileDescWithDependencies(fd, sentFileDescriptors) } @@ -370,11 +324,7 @@ func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, s // given extension, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - st, err := typeForName(typeName) - if err != nil { - return nil, err - } - fd, err := fileDescContainingExtension(st, extNum) + fd, err := fileDescContainingExtension(typeName, extNum) if err != nil { return nil, err } @@ -383,13 +333,16 @@ func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName st // allExtensionNumbersForTypeName returns all extension numbers for the given type. func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - st, err := typeForName(name) + desc, err := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(name)) if err != nil { return nil, err } - extNums, err := s.allExtensionNumbersForType(st) - if err != nil { - return nil, err + m := dynamicpb.NewMessage(desc.(protoreflect.MessageDescriptor)) + + exts := proto.RegisteredExtensions(m) + extNums := make([]int32, 0, len(exts)) + for id := range exts { + extNums = append(extNums, id) } return extNums, nil } diff --git a/reflection/serverreflection_test.go b/reflection/serverreflection_test.go index 9d23e2876423..730f51bf012d 100644 --- a/reflection/serverreflection_test.go +++ b/reflection/serverreflection_test.go @@ -34,7 +34,12 @@ import ( "google.golang.org/grpc/internal/grpctest" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" pb "google.golang.org/grpc/reflection/grpc_testing" - pbv3 "google.golang.org/grpc/reflection/grpc_testingv3" + pbv3 "google.golang.org/grpc/reflection/grpc_testing_not_regenerate" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" ) var ( @@ -45,12 +50,16 @@ var ( fdProto2 *dpb.FileDescriptorProto fdProto2Ext *dpb.FileDescriptorProto fdProto2Ext2 *dpb.FileDescriptorProto + fdDynamic *dpb.FileDescriptorProto + // reflection descriptors. + fdDynamicFile protoreflect.FileDescriptor // fileDescriptor marshalled. fdTestByte []byte fdTestv3Byte []byte fdProto2Byte []byte fdProto2ExtByte []byte fdProto2Ext2Byte []byte + fdDynamicByte []byte ) const defaultTestTimeout = 10 * time.Second @@ -79,65 +88,52 @@ func loadFileDesc(filename string) (*dpb.FileDescriptorProto, []byte) { return fd, b } -func init() { - fdTest, fdTestByte = loadFileDesc("reflection/grpc_testing/test.proto") - fdTestv3, fdTestv3Byte = loadFileDesc("testv3.proto") - fdProto2, fdProto2Byte = loadFileDesc("reflection/grpc_testing/proto2.proto") - fdProto2Ext, fdProto2ExtByte = loadFileDesc("reflection/grpc_testing/proto2_ext.proto") - fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("reflection/grpc_testing/proto2_ext2.proto") -} +func loadFileDescDynamic(b []byte) (*dpb.FileDescriptorProto, protoreflect.FileDescriptor, []byte) { + m := new(descriptorpb.FileDescriptorProto) + if err := proto.Unmarshal(b, m); err != nil { + panic(fmt.Sprintf("failed to unmarshal dynamic proto raw descriptor")) + } -func (x) TestFileDescForType(t *testing.T) { - for _, test := range []struct { - st reflect.Type - wantFd *dpb.FileDescriptorProto - }{ - {reflect.TypeOf(pb.SearchResponse_Result{}), fdTest}, - {reflect.TypeOf(pb.ToBeExtended{}), fdProto2}, - } { - fd, err := s.fileDescForType(test.st) - if err != nil || !proto.Equal(fd, test.wantFd) { - t.Errorf("fileDescForType(%q) = %q, %v, want %q, ", test.st, fd, err, test.wantFd) - } + fd, err := protodesc.NewFile(m, nil) + if err != nil { + panic(err) } -} -func (x) TestTypeForName(t *testing.T) { - for _, test := range []struct { - name string - want reflect.Type - }{ - {"grpc.testing.SearchResponse", reflect.TypeOf(pb.SearchResponse{})}, - } { - r, err := typeForName(test.name) - if err != nil || r != test.want { - t.Errorf("typeForName(%q) = %q, %v, want %q, ", test.name, r, err, test.want) - } + err = protoregistry.GlobalFiles.RegisterFile(fd) + if err != nil { + panic(err) } -} -func (x) TestTypeForNameNotFound(t *testing.T) { - for _, test := range []string{ - "grpc.testing.not_exiting", - } { - _, err := typeForName(test) - if err == nil { - t.Errorf("typeForName(%q) = _, %v, want _, ", test, err) + for i := 0; i < fd.Messages().Len(); i++ { + m := fd.Messages().Get(i) + if err := protoregistry.GlobalTypes.RegisterMessage(dynamicpb.NewMessageType(m)); err != nil { + panic(err) } } + + return m, fd, b +} + +func init() { + fdTest, fdTestByte = loadFileDesc("reflection/grpc_testing/test.proto") + fdTestv3, fdTestv3Byte = loadFileDesc("testv3.proto") + fdProto2, fdProto2Byte = loadFileDesc("reflection/grpc_testing/proto2.proto") + fdProto2Ext, fdProto2ExtByte = loadFileDesc("reflection/grpc_testing/proto2_ext.proto") + fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("reflection/grpc_testing/proto2_ext2.proto") + fdDynamic, fdDynamicFile, fdDynamicByte = loadFileDescDynamic(pbv3.FileDynamicProtoRawDesc) } func (x) TestFileDescContainingExtension(t *testing.T) { for _, test := range []struct { - st reflect.Type + st string extNum int32 want *dpb.FileDescriptorProto }{ - {reflect.TypeOf(pb.ToBeExtended{}), 13, fdProto2Ext}, - {reflect.TypeOf(pb.ToBeExtended{}), 17, fdProto2Ext}, - {reflect.TypeOf(pb.ToBeExtended{}), 19, fdProto2Ext}, - {reflect.TypeOf(pb.ToBeExtended{}), 23, fdProto2Ext2}, - {reflect.TypeOf(pb.ToBeExtended{}), 29, fdProto2Ext2}, + {"grpc.testing.ToBeExtended", 13, fdProto2Ext}, + {"grpc.testing.ToBeExtended", 17, fdProto2Ext}, + {"grpc.testing.ToBeExtended", 19, fdProto2Ext}, + {"grpc.testing.ToBeExtended", 23, fdProto2Ext2}, + {"grpc.testing.ToBeExtended", 29, fdProto2Ext2}, } { fd, err := fileDescContainingExtension(test.st, test.extNum) if err != nil || !proto.Equal(fd, test.want) { @@ -153,14 +149,14 @@ func (s intArray) Len() int { return len(s) } func (s intArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s intArray) Less(i, j int) bool { return s[i] < s[j] } -func (x) TestAllExtensionNumbersForType(t *testing.T) { +func (x) TestAllExtensionNumbersForTypeName(t *testing.T) { for _, test := range []struct { - st reflect.Type + st string want []int32 }{ - {reflect.TypeOf(pb.ToBeExtended{}), []int32{13, 17, 19, 23, 29}}, + {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, } { - r, err := s.allExtensionNumbersForType(test.st) + r, err := s.allExtensionNumbersForTypeName(test.st) sort.Sort(intArray(r)) if err != nil || !reflect.DeepEqual(r, test.want) { t.Errorf("allExtensionNumbersForType(%q) = %v, %v, want %v, ", test.st, r, err, test.want) @@ -201,6 +197,9 @@ func (x) TestReflectionEnd2end(t *testing.T) { s := grpc.NewServer() pb.RegisterSearchServiceServer(s, &server{}) pbv3.RegisterSearchServiceV3Server(s, &serverV3{}) + + registerDynamicProto(s, fdDynamic, fdDynamicFile) + // Register reflection service on s. Register(s) go s.Serve(lis) @@ -276,6 +275,7 @@ func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflecti {"reflection/grpc_testing/test.proto", fdTestByte}, {"reflection/grpc_testing/proto2.proto", fdProto2Byte}, {"reflection/grpc_testing/proto2_ext.proto", fdProto2ExtByte}, + {"dynamic.proto", fdDynamicByte}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ @@ -349,6 +349,10 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe {"grpc.testingv3.SearchResponseV3.Result.Value.str", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.State", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.State.FRESH", fdTestv3Byte}, + // Test dynamic symbols + {"grpc.testing.DynamicService", fdDynamicByte}, + {"grpc.testing.DynamicReq", fdDynamicByte}, + {"grpc.testing.DynamicRes", fdDynamicByte}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ @@ -479,6 +483,7 @@ func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_Ser want []int32 }{ {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, + {"grpc.testing.DynamicReq", nil}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ @@ -551,6 +556,7 @@ func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflection "grpc.testingv3.SearchServiceV3", "grpc.testing.SearchService", "grpc.reflection.v1alpha.ServerReflection", + "grpc.testing.DynamicService", } // Compare service names in response with want. if len(services) != len(want) { @@ -571,3 +577,26 @@ func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflection t.Errorf("ListServices = %v, want type ", r.MessageResponse) } } + +func registerDynamicProto(srv *grpc.Server, fdp *dpb.FileDescriptorProto, fd protoreflect.FileDescriptor) { + type emptyInterface interface{} + + for i := 0; i < fd.Services().Len(); i++ { + s := fd.Services().Get(i) + + sd := &grpc.ServiceDesc{ + ServiceName: string(s.FullName()), + HandlerType: (*emptyInterface)(nil), + Metadata: fdp.GetName(), + } + + for j := 0; j < s.Methods().Len(); j++ { + m := s.Methods().Get(j) + sd.Methods = append(sd.Methods, grpc.MethodDesc{ + MethodName: string(m.Name()), + }) + } + + srv.RegisterService(sd, struct{}{}) + } +} diff --git a/regenerate.sh b/regenerate.sh index 58c802f8aec7..978b89f37a4a 100755 --- a/regenerate.sh +++ b/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -117,9 +117,9 @@ done mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go # grpc/service_config/service_config.proto does not have a go_package option. mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index e84d5d32de1d..f79d9c9392e3 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -20,14 +20,16 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -77,8 +79,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index 8d12b627596e..b8904e21484e 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -3,7 +3,6 @@ module google.golang.org/grpc/security/advancedtls go 1.14 require ( - github.com/google/go-cmp v0.5.1 // indirect github.com/hashicorp/golang-lru v0.5.4 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 google.golang.org/grpc v1.38.0 diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index e84d5d32de1d..f79d9c9392e3 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -20,14 +20,16 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -77,8 +79,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vet.sh b/vet.sh index d923187a7b3a..ceb436c6ce47 100755 --- a/vet.sh +++ b/vet.sh @@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy git status --porcelain 2>&1 | fail_on_output || \ From ec717cad7395d45698b57c1df1ae36b4dbaa33dd Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 15 Feb 2022 15:41:49 -0800 Subject: [PATCH 430/998] xds: minor cleanup in xdsclient bootstrap code (#5195) --- internal/envconfig/xds.go | 6 +-- xds/internal/resolver/xds_resolver.go | 10 ++--- xds/internal/xdsclient/bootstrap/bootstrap.go | 38 +++++++++++-------- xds/internal/xdsclient/singleton.go | 17 +++++---- xds/server.go | 6 ++- xds/server_options.go | 6 +-- xds/xds.go | 2 +- 7 files changed, 48 insertions(+), 37 deletions(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 5aecfef93e43..7d996e51b5c1 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -26,13 +26,13 @@ import ( const ( // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. + // variable XDSBootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. + // and kept in variable XDSBootstrapFileContent. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index 6788090e29c0..c4b147d21efb 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -37,13 +37,13 @@ import ( const xdsScheme = "xds" -// NewBuilder creates a new xds resolver builder using a specific xds bootstrap -// config, so tests can use multiple xds clients in different ClientConns at -// the same time. -func NewBuilder(config []byte) (resolver.Builder, error) { +// NewBuilderForTesting creates a new xds resolver builder using a specific xds +// bootstrap config, so tests can use multiple xds clients in different +// ClientConns at the same time. +func NewBuilderForTesting(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ newXDSClient: func() (xdsclient.XDSClient, error) { - return xdsclient.NewClientWithBootstrapContents(config) + return xdsclient.NewWithBootstrapContentsForTesting(config) }, }, nil } diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 0115dcf927a3..4523a6131fd4 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -65,19 +65,20 @@ var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version) // For overriding in unit tests. var bootstrapFileReadFunc = ioutil.ReadFile -// insecureCredsBuilder encapsulates a insecure credential that is built using a -// JSON config. +// insecureCredsBuilder implements the `Credentials` interface defined in +// package `xds/bootstrap` and encapsulates an insecure credential. type insecureCredsBuilder struct{} func (i *insecureCredsBuilder) Build(json.RawMessage) (credentials.Bundle, error) { return insecure.NewBundle(), nil } + func (i *insecureCredsBuilder) Name() string { return "insecure" } -// googleDefaultCredsBuilder encapsulates a Google Default credential that is built using a -// JSON config. +// googleDefaultCredsBuilder implements the `Credentials` interface defined in +// package `xds/boostrap` and encapsulates a Google Default credential. type googleDefaultCredsBuilder struct{} func (d *googleDefaultCredsBuilder) Build(json.RawMessage) (credentials.Bundle, error) { @@ -328,11 +329,13 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { } // NewConfig returns a new instance of Config initialized by reading the -// bootstrap file found at ${GRPC_XDS_BOOTSTRAP}. +// bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents specified +// at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the former is +// preferred. // -// Currently, we support exactly one type of credential, which is -// "google_default", where we use the host's default certs for transport -// credentials and a Google oauth token for call credentials. +// We support a credential registration mechanism and only credentials +// registered through that mechanism will be accepted here. See package +// `xds/bootstrap` for details. // // This function tries to process as much of the bootstrap file as possible (in // the presence of the errors) and may return a Config object with certain @@ -346,13 +349,18 @@ func NewConfig() (*Config, error) { return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) } logger.Debugf("Bootstrap content: %s", data) - return NewConfigFromContents(data) + return newConfigFromContents(data) +} + +// NewConfigFromContentsForTesting returns a new Config using the specified +// bootstrap file contents instead of reading the environment variable. +// +// This is only suitable for testing purposes. +func NewConfigFromContentsForTesting(data []byte) (*Config, error) { + return newConfigFromContents(data) } -// NewConfigFromContents returns a new Config using the specified bootstrap -// file contents instead of reading the environment variable. This is only -// suitable for testing purposes. -func NewConfigFromContents(data []byte) (*Config, error) { +func newConfigFromContents(data []byte) (*Config, error) { config := &Config{} var jsonData map[string]json.RawMessage @@ -483,7 +491,7 @@ func NewConfigFromContents(data []byte) (*Config, error) { // file are populated here. // 3. For each server config (both top level and in each authority), we set its // node field to the v3.Node, or a v2.Node with the same content, depending on -// the server's transprot API version. +// the server's transport API version. func (c *Config) updateNodeProto(node *v3corepb.Node) error { v3 := node if v3 == nil { @@ -493,11 +501,11 @@ func (c *Config) updateNodeProto(node *v3corepb.Node) error { v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning) - v2 := &v2corepb.Node{} v3bytes, err := proto.Marshal(v3) if err != nil { return fmt.Errorf("xds: proto.Marshal(%v): %v", v3, err) } + v2 := &v2corepb.Node{} if err := proto.Unmarshal(v3bytes, v2); err != nil { return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3bytes, err) } diff --git a/xds/internal/xdsclient/singleton.go b/xds/internal/xdsclient/singleton.go index 1cf033c03198..f4951ba8f488 100644 --- a/xds/internal/xdsclient/singleton.go +++ b/xds/internal/xdsclient/singleton.go @@ -160,10 +160,10 @@ func (c *clientRefCounted) Close() { } } -// NewWithConfigForTesting is exported for testing only. +// NewWithConfigForTesting returns an xdsClient for the specified bootstrap +// config, separate from the global singleton. // -// Note that this function doesn't set the singleton, so that the testing states -// don't leak. +// This should be used for testing purposes only. func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) { cl, err := newWithConfig(config, watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) if err != nil { @@ -172,10 +172,11 @@ func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.D return &clientRefCounted{clientImpl: cl, refCount: 1}, nil } -// NewClientWithBootstrapContents returns an xds client for this config, -// separate from the global singleton. This should be used for testing -// purposes only. -func NewClientWithBootstrapContents(contents []byte) (XDSClient, error) { +// NewWithBootstrapContentsForTesting returns an xdsClient for this config, +// separate from the global singleton. +// +// This should be used for testing purposes only. +func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, error) { // Normalize the contents buf := bytes.Buffer{} err := json.Indent(&buf, contents, "", "") @@ -198,7 +199,7 @@ func NewClientWithBootstrapContents(contents []byte) (XDSClient, error) { c.mu.Unlock() } - bcfg, err := bootstrap.NewConfigFromContents(contents) + bcfg, err := bootstrap.NewConfigFromContentsForTesting(contents) if err != nil { return nil, fmt.Errorf("xds: error with bootstrap config: %v", err) } diff --git a/xds/server.go b/xds/server.go index 7d2c404ace9d..0319ddcaf533 100644 --- a/xds/server.go +++ b/xds/server.go @@ -161,11 +161,13 @@ func (s *GRPCServer) initXDSClient() error { } newXDSClient := newXDSClient - if s.opts.bootstrapContents != nil { + if s.opts.bootstrapContentsForTesting != nil { + // Bootstrap file contents may be specified as a server option for tests. newXDSClient = func() (xdsclient.XDSClient, error) { - return xdsclient.NewClientWithBootstrapContents(s.opts.bootstrapContents) + return xdsclient.NewWithBootstrapContentsForTesting(s.opts.bootstrapContentsForTesting) } } + client, err := newXDSClient() if err != nil { return fmt.Errorf("xds: failed to create xds-client: %v", err) diff --git a/xds/server_options.go b/xds/server_options.go index 1d46c3adb7b2..9bfa41c41a84 100644 --- a/xds/server_options.go +++ b/xds/server_options.go @@ -26,8 +26,8 @@ import ( ) type serverOptions struct { - modeCallback ServingModeCallbackFunc - bootstrapContents []byte + modeCallback ServingModeCallbackFunc + bootstrapContentsForTesting []byte } type serverOption struct { @@ -72,5 +72,5 @@ type ServingModeChangeArgs struct { // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func BootstrapContentsForTesting(contents []byte) grpc.ServerOption { - return &serverOption{apply: func(o *serverOptions) { o.bootstrapContents = contents }} + return &serverOption{apply: func(o *serverOptions) { o.bootstrapContentsForTesting = contents }} } diff --git a/xds/xds.go b/xds/xds.go index 818af0367ad0..744f3f139645 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -90,5 +90,5 @@ func init() { // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { - return xdsresolver.NewBuilder(bootstrapConfig) + return xdsresolver.NewBuilderForTesting(bootstrapConfig) } From 18564ff61d5505d955c7bd1adc28e4f1ed96300c Mon Sep 17 00:00:00 2001 From: Joshua Humphries Date: Fri, 18 Feb 2022 16:13:23 -0500 Subject: [PATCH 431/998] reflection: improve server implementation (#5197) --- reflection/serverreflection.go | 357 +++++++++------------------- reflection/serverreflection_test.go | 55 +++-- 2 files changed, 146 insertions(+), 266 deletions(-) diff --git a/reflection/serverreflection.go b/reflection/serverreflection.go index 9b387dddee58..81344abd77da 100644 --- a/reflection/serverreflection.go +++ b/reflection/serverreflection.go @@ -37,24 +37,17 @@ To register server reflection on a gRPC server: package reflection // import "google.golang.org/grpc/reflection" import ( - "bytes" - "compress/gzip" - "fmt" "io" - "io/ioutil" "sort" - "sync" - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/codes" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/types/dynamicpb" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -62,289 +55,174 @@ import ( // as a registry, for accumulating the services exposed by the server. type GRPCServer interface { grpc.ServiceRegistrar - GetServiceInfo() map[string]grpc.ServiceInfo + ServiceInfoProvider } var _ GRPCServer = (*grpc.Server)(nil) -type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer - s GRPCServer - - initSymbols sync.Once - serviceNames []string - symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files -} - // Register registers the server reflection service on the given gRPC server. func Register(s GRPCServer) { - rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ - s: s, - }) + svr := NewServer(ServerOptions{Services: s}) + rpb.RegisterServerReflectionServer(s, svr) } -func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { - s.initSymbols.Do(func() { - serviceInfo := s.s.GetServiceInfo() - - s.symbols = map[string]*dpb.FileDescriptorProto{} - s.serviceNames = make([]string, 0, len(serviceInfo)) - processed := map[string]struct{}{} - for svc, info := range serviceInfo { - s.serviceNames = append(s.serviceNames, svc) - fdenc, ok := parseMetadata(info.Metadata) - if !ok { - continue - } - fd, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fd, processed) - } - sort.Strings(s.serviceNames) - }) - - return s.serviceNames, s.symbols -} - -func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { - filename := fd.GetName() - if _, ok := processed[filename]; ok { - return - } - processed[filename] = struct{}{} - - prefix := fd.GetPackage() - - for _, msg := range fd.MessageType { - s.processMessage(fd, prefix, msg) - } - for _, en := range fd.EnumType { - s.processEnum(fd, prefix, en) - } - for _, ext := range fd.Extension { - s.processField(fd, prefix, ext) - } - for _, svc := range fd.Service { - svcName := fqn(prefix, svc.GetName()) - s.symbols[svcName] = fd - for _, meth := range svc.Method { - name := fqn(svcName, meth.GetName()) - s.symbols[name] = fd - } - } - - for _, dep := range fd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fdDep, processed) - } -} - -func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { - msgName := fqn(prefix, msg.GetName()) - s.symbols[msgName] = fd - - for _, nested := range msg.NestedType { - s.processMessage(fd, msgName, nested) - } - for _, en := range msg.EnumType { - s.processEnum(fd, msgName, en) - } - for _, ext := range msg.Extension { - s.processField(fd, msgName, ext) - } - for _, fld := range msg.Field { - s.processField(fd, msgName, fld) - } - for _, oneof := range msg.OneofDecl { - oneofName := fqn(msgName, oneof.GetName()) - s.symbols[oneofName] = fd - } -} - -func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { - enName := fqn(prefix, en.GetName()) - s.symbols[enName] = fd - - for _, val := range en.Value { - valName := fqn(enName, val.GetName()) - s.symbols[valName] = fd - } +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo } -func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { - fldName := fqn(prefix, fld.GetName()) - s.symbols[fldName] = fd +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) } -func fqn(prefix, name string) string { - if prefix == "" { - return name - } - return prefix + "." + name +// ServerOptions represents the options used to construct a reflection server. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver } -// decodeFileDesc does decompression and unmarshalling on the given -// file descriptor byte slice. -func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { - raw, err := decompress(enc) - if err != nil { - return nil, fmt.Errorf("failed to decompress enc: %v", err) - } - - fd := new(dpb.FileDescriptorProto) - if err := proto.Unmarshal(raw, fd); err != nil { - return nil, fmt.Errorf("bad descriptor: %v", err) +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) rpb.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles } - return fd, nil -} - -// decompress does gzip decompression. -func decompress(b []byte) ([]byte, error) { - r, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes } - out, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, } - return out, nil } -func fileDescContainingExtension(typeName string, ext int32) (*dpb.FileDescriptorProto, error) { - desc, err := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(typeName)) - if err != nil { - return nil, err - } - m := dynamicpb.NewMessage(desc.(protoreflect.MessageDescriptor)) - - var extDesc *proto.ExtensionDesc - for id, desc := range proto.RegisteredExtensions(m) { - if id == ext { - extDesc = desc - break - } - } - - if extDesc == nil { - return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) - } - - return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) +type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver } // fileDescWithDependencies returns a slice of serialized fileDescriptors in // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. -func fileDescWithDependencies(fd *dpb.FileDescriptorProto, sentFileDescriptors map[string]bool) ([][]byte, error) { - r := [][]byte{} - queue := []*dpb.FileDescriptorProto{fd} +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} for len(queue) > 0 { currentfd := queue[0] queue = queue[1:] - if sent := sentFileDescriptors[currentfd.GetName()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.GetName()] = true - currentfdEncoded, err := proto.Marshal(currentfd) + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) if err != nil { return nil, err } r = append(r, currentfdEncoded) } - for _, dep := range currentfd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - queue = append(queue, fdDep) + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) } } return r, nil } -// fileDescEncodingByFilename finds the file descriptor for given filename, -// finds all of its previously unsent transitive dependencies, does marshalling -// on them, and returns the marshalled result. -func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - enc := proto.FileDescriptor(name) - if enc == nil { - return nil, fmt.Errorf("unknown file: %v", name) - } - fd, err := decodeFileDesc(enc) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// parseMetadata finds the file descriptor bytes specified meta. -// For SupportPackageIsVersion4, m is the name of the proto file, we -// call proto.FileDescriptor to get the byte slice. -// For SupportPackageIsVersion3, m is a byte slice itself. -func parseMetadata(meta interface{}) ([]byte, bool) { - // Check if meta is the file name. - if fileNameForMeta, ok := meta.(string); ok { - return proto.FileDescriptor(fileNameForMeta), true - } - - // Check if meta is the byte slice. - if enc, ok := meta.([]byte); ok { - return enc, true - } - - return nil, false -} - // fileDescEncodingContainingSymbol finds the file descriptor containing the // given symbol, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. The given symbol // can be a type, a service or a method. func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - _, symbols := s.getSymbols() - fd := symbols[name] - if fd == nil { - // Check if it's a type name that was not present in the - // transitive dependencies of the registered services. - desc, err := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(name)) - if err != nil { - return nil, err - } - fd = protodesc.ToFileDescriptorProto(desc.Descriptor().ParentFile()) + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err } - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) } // fileDescEncodingContainingExtension finds the file descriptor containing // given extension, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - fd, err := fileDescContainingExtension(typeName, extNum) + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) if err != nil { return nil, err } - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) } // allExtensionNumbersForTypeName returns all extension numbers for the given type. func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - desc, err := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(name)) - if err != nil { - return nil, err + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } } - m := dynamicpb.NewMessage(desc.(protoreflect.MessageDescriptor)) + return numbers, nil +} - exts := proto.RegisteredExtensions(m) - extNums := make([]int32, 0, len(exts)) - for id := range exts { - extNums = append(extNums, id) +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &rpb.ServiceResponse{Name: svc}) } - return extNums, nil + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp } // ServerReflectionInfo is the reflection service handler. @@ -365,7 +243,11 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } switch req := in.MessageRequest.(type) { case *rpb.ServerReflectionRequest_FileByFilename: - b, err := s.fileDescEncodingByFilename(req.FileByFilename, sentFileDescriptors) + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ @@ -426,16 +308,9 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } } case *rpb.ServerReflectionRequest_ListServices: - svcNames, _ := s.getSymbols() - serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) - for i, n := range svcNames { - serviceResponses[i] = &rpb.ServiceResponse{ - Name: n, - } - } out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ ListServicesResponse: &rpb.ListServiceResponse{ - Service: serviceResponses, + Service: s.listServices(), }, } default: diff --git a/reflection/serverreflection_test.go b/reflection/serverreflection_test.go index 730f51bf012d..b35f674d926e 100644 --- a/reflection/serverreflection_test.go +++ b/reflection/serverreflection_test.go @@ -27,14 +27,13 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" pb "google.golang.org/grpc/reflection/grpc_testing" pbv3 "google.golang.org/grpc/reflection/grpc_testing_not_regenerate" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" @@ -43,14 +42,14 @@ import ( ) var ( - s = &serverReflectionServer{} + s = NewServer(ServerOptions{}).(*serverReflectionServer) // fileDescriptor of each test proto file. - fdTest *dpb.FileDescriptorProto - fdTestv3 *dpb.FileDescriptorProto - fdProto2 *dpb.FileDescriptorProto - fdProto2Ext *dpb.FileDescriptorProto - fdProto2Ext2 *dpb.FileDescriptorProto - fdDynamic *dpb.FileDescriptorProto + fdTest *descriptorpb.FileDescriptorProto + fdTestv3 *descriptorpb.FileDescriptorProto + fdProto2 *descriptorpb.FileDescriptorProto + fdProto2Ext *descriptorpb.FileDescriptorProto + fdProto2Ext2 *descriptorpb.FileDescriptorProto + fdDynamic *descriptorpb.FileDescriptorProto // reflection descriptors. fdDynamicFile protoreflect.FileDescriptor // fileDescriptor marshalled. @@ -72,23 +71,20 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, x{}) } -func loadFileDesc(filename string) (*dpb.FileDescriptorProto, []byte) { - enc := proto.FileDescriptor(filename) - if enc == nil { - panic(fmt.Sprintf("failed to find fd for file: %v", filename)) - } - fd, err := decodeFileDesc(enc) +func loadFileDesc(filename string) (*descriptorpb.FileDescriptorProto, []byte) { + fd, err := protoregistry.GlobalFiles.FindFileByPath(filename) if err != nil { - panic(fmt.Sprintf("failed to decode enc: %v", err)) + panic(err) } - b, err := proto.Marshal(fd) + fdProto := protodesc.ToFileDescriptorProto(fd) + b, err := proto.Marshal(fdProto) if err != nil { panic(fmt.Sprintf("failed to marshal fd: %v", err)) } - return fd, b + return fdProto, b } -func loadFileDescDynamic(b []byte) (*dpb.FileDescriptorProto, protoreflect.FileDescriptor, []byte) { +func loadFileDescDynamic(b []byte) (*descriptorpb.FileDescriptorProto, protoreflect.FileDescriptor, []byte) { m := new(descriptorpb.FileDescriptorProto) if err := proto.Unmarshal(b, m); err != nil { panic(fmt.Sprintf("failed to unmarshal dynamic proto raw descriptor")) @@ -127,7 +123,7 @@ func (x) TestFileDescContainingExtension(t *testing.T) { for _, test := range []struct { st string extNum int32 - want *dpb.FileDescriptorProto + want *descriptorpb.FileDescriptorProto }{ {"grpc.testing.ToBeExtended", 13, fdProto2Ext}, {"grpc.testing.ToBeExtended", 17, fdProto2Ext}, @@ -135,9 +131,18 @@ func (x) TestFileDescContainingExtension(t *testing.T) { {"grpc.testing.ToBeExtended", 23, fdProto2Ext2}, {"grpc.testing.ToBeExtended", 29, fdProto2Ext2}, } { - fd, err := fileDescContainingExtension(test.st, test.extNum) - if err != nil || !proto.Equal(fd, test.want) { - t.Errorf("fileDescContainingExtension(%q) = %q, %v, want %q, ", test.st, fd, err, test.want) + fd, err := s.fileDescEncodingContainingExtension(test.st, test.extNum, map[string]bool{}) + if err != nil { + t.Errorf("fileDescContainingExtension(%q) return error: %v", test.st, err) + continue + } + var actualFd descriptorpb.FileDescriptorProto + if err := proto.Unmarshal(fd[0], &actualFd); err != nil { + t.Errorf("fileDescContainingExtension(%q) return invalid bytes: %v", test.st, err) + continue + } + if !proto.Equal(&actualFd, test.want) { + t.Errorf("fileDescContainingExtension(%q) returned %q, but wanted %q", test.st, &actualFd, test.want) } } } @@ -348,7 +353,7 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe {"grpc.testingv3.SearchResponseV3.Result.Value.val", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.Result.Value.str", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.State", fdTestv3Byte}, - {"grpc.testingv3.SearchResponseV3.State.FRESH", fdTestv3Byte}, + {"grpc.testingv3.SearchResponseV3.FRESH", fdTestv3Byte}, // Test dynamic symbols {"grpc.testing.DynamicService", fdDynamicByte}, {"grpc.testing.DynamicReq", fdDynamicByte}, @@ -578,7 +583,7 @@ func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflection } } -func registerDynamicProto(srv *grpc.Server, fdp *dpb.FileDescriptorProto, fd protoreflect.FileDescriptor) { +func registerDynamicProto(srv *grpc.Server, fdp *descriptorpb.FileDescriptorProto, fd protoreflect.FileDescriptor) { type emptyInterface interface{} for i := 0; i < fd.Services().Len(); i++ { From 011544f72939c85397b0e24378280e6075061cb1 Mon Sep 17 00:00:00 2001 From: Ashitha Santhosh <55257063+ashithasantosh@users.noreply.github.com> Date: Fri, 18 Feb 2022 14:24:03 -0800 Subject: [PATCH 432/998] authz: add additional logs to sdk authz (#5094) * Adds additional logs to sdk authz * resolve comment * adds logs displaying request details * remove sdk_server_interceptor log * log subset of rpcData * resolving comment * format log message --- authz/grpc_authz_server_interceptors.go | 6 ++++++ internal/xds/rbac/rbac_engine.go | 16 +++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/authz/grpc_authz_server_interceptors.go b/authz/grpc_authz_server_interceptors.go index 72dc14ed85e4..1ac5e967030d 100644 --- a/authz/grpc_authz_server_interceptors.go +++ b/authz/grpc_authz_server_interceptors.go @@ -62,6 +62,9 @@ func (i *StaticInterceptor) UnaryInterceptor(ctx context.Context, req interface{ err := i.engines.IsAuthorized(ctx) if err != nil { if status.Code(err) == codes.PermissionDenied { + if logger.V(2) { + logger.Infof("unauthorized RPC request rejected: %v", err) + } return nil, status.Errorf(codes.PermissionDenied, "unauthorized RPC request rejected") } return nil, err @@ -76,6 +79,9 @@ func (i *StaticInterceptor) StreamInterceptor(srv interface{}, ss grpc.ServerStr err := i.engines.IsAuthorized(ss.Context()) if err != nil { if status.Code(err) == codes.PermissionDenied { + if logger.V(2) { + logger.Infof("unauthorized RPC request rejected: %v", err) + } return status.Errorf(codes.PermissionDenied, "unauthorized RPC request rejected") } return err diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index 66c7bf10bd0f..a212579c63e2 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -39,8 +39,6 @@ import ( "google.golang.org/grpc/status" ) -const logLevel = 2 - var logger = grpclog.Component("rbac") var getConnection = transport.GetConnection @@ -65,6 +63,16 @@ func NewChainEngine(policies []*v3rbacpb.RBAC) (*ChainEngine, error) { return &ChainEngine{chainedEngines: engines}, nil } +func (cre *ChainEngine) logRequestDetails(rpcData *rpcData) { + if logger.V(2) { + logger.Infof("checking request: url path=%s", rpcData.fullMethod) + if len(rpcData.certs) > 0 { + cert := rpcData.certs[0] + logger.Infof("uri sans=%q, dns sans=%q, subject=%v", cert.URIs, cert.DNSNames, cert.Subject) + } + } +} + // IsAuthorized determines if an incoming RPC is authorized based on the chain of RBAC // engines and their associated actions. // @@ -79,14 +87,16 @@ func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { } for _, engine := range cre.chainedEngines { matchingPolicyName, ok := engine.findMatchingPolicy(rpcData) - if logger.V(logLevel) && ok { + if logger.V(2) && ok { logger.Infof("incoming RPC matched to policy %v in engine with action %v", matchingPolicyName, engine.action) } switch { case engine.action == v3rbacpb.RBAC_ALLOW && !ok: + cre.logRequestDetails(rpcData) return status.Errorf(codes.PermissionDenied, "incoming RPC did not match an allow policy") case engine.action == v3rbacpb.RBAC_DENY && ok: + cre.logRequestDetails(rpcData) return status.Errorf(codes.PermissionDenied, "incoming RPC matched a deny policy %q", matchingPolicyName) } // Every policy in the engine list must be queried. Thus, iterate to the From fd1f98814e5bb443e7f3046f027e278fb5978c42 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 22 Feb 2022 11:51:16 -0800 Subject: [PATCH 433/998] Change version to 1.46.0-dev (#5204) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 06160c17d97c..c09089618a94 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.45.0-dev" +const Version = "1.46.0-dev" From 02f384d41a10d9c193774be4304c8e9ecab2166c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 22 Feb 2022 17:16:01 -0800 Subject: [PATCH 434/998] xds: rename rls proto import (#5205) --- xds/internal/clusterspecifier/rls/rls.go | 4 ++-- xds/internal/clusterspecifier/rls/rls_test.go | 22 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/xds/internal/clusterspecifier/rls/rls.go b/xds/internal/clusterspecifier/rls/rls.go index 58ba9364e6c5..6aca9715fc2a 100644 --- a/xds/internal/clusterspecifier/rls/rls.go +++ b/xds/internal/clusterspecifier/rls/rls.go @@ -27,7 +27,7 @@ import ( "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/anypb" @@ -65,7 +65,7 @@ func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.Bala if !ok { return nil, fmt.Errorf("rls_csp: error parsing config %v: unknown type %T", cfg, cfg) } - rlcs := new(grpc_lookup_v1.RouteLookupClusterSpecifier) + rlcs := new(rlspb.RouteLookupClusterSpecifier) if err := ptypes.UnmarshalAny(any, rlcs); err != nil { return nil, fmt.Errorf("rls_csp: error parsing config %v: %v", cfg, err) diff --git a/xds/internal/clusterspecifier/rls/rls_test.go b/xds/internal/clusterspecifier/rls/rls_test.go index 69bf165d8c91..36ca9b764bfe 100644 --- a/xds/internal/clusterspecifier/rls/rls_test.go +++ b/xds/internal/clusterspecifier/rls/rls_test.go @@ -27,7 +27,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" _ "google.golang.org/grpc/balancer/rls" "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/internal/testutils" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" "google.golang.org/grpc/xds/internal/clusterspecifier" @@ -106,17 +106,17 @@ func (s) TestParseClusterSpecifierConfig(t *testing.T) { } // This will error because the required match field is set in grpc key builder. -var rlsClusterSpecifierConfigError = testutils.MarshalAny(&grpc_lookup_v1.RouteLookupClusterSpecifier{ - RouteLookupConfig: &grpc_lookup_v1.RouteLookupConfig{ - GrpcKeybuilders: []*grpc_lookup_v1.GrpcKeyBuilder{ +var rlsClusterSpecifierConfigError = testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ + RouteLookupConfig: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ { - Names: []*grpc_lookup_v1.GrpcKeyBuilder_Name{ + Names: []*rlspb.GrpcKeyBuilder_Name{ { Service: "service", Method: "method", }, }, - Headers: []*grpc_lookup_v1.NameMatcher{ + Headers: []*rlspb.NameMatcher{ { Key: "k1", RequiredMatch: true, @@ -130,17 +130,17 @@ var rlsClusterSpecifierConfigError = testutils.MarshalAny(&grpc_lookup_v1.RouteL // Corresponds to the rls unit test case in // balancer/rls/internal/config_test.go. -var rlsClusterSpecifierConfigWithoutTransformations = testutils.MarshalAny(&grpc_lookup_v1.RouteLookupClusterSpecifier{ - RouteLookupConfig: &grpc_lookup_v1.RouteLookupConfig{ - GrpcKeybuilders: []*grpc_lookup_v1.GrpcKeyBuilder{ +var rlsClusterSpecifierConfigWithoutTransformations = testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ + RouteLookupConfig: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ { - Names: []*grpc_lookup_v1.GrpcKeyBuilder_Name{ + Names: []*rlspb.GrpcKeyBuilder_Name{ { Service: "service", Method: "method", }, }, - Headers: []*grpc_lookup_v1.NameMatcher{ + Headers: []*rlspb.NameMatcher{ { Key: "k1", Names: []string{"v1"}, From a73725f42db97964eb538a5cf4d302f760357dbe Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 23 Feb 2022 07:30:06 -0800 Subject: [PATCH 435/998] channelz: include channelz identifier in logs (#5192) --- balancer/balancer.go | 3 +- balancer/grpclb/grpclb_remote_balancer.go | 5 +- balancer_conn_wrappers.go | 1 + call_test.go | 11 +- channelz/channelz.go | 36 +++ channelz/service/service_sktopt_test.go | 23 +- channelz/service/service_test.go | 221 ++++++++++-------- clientconn.go | 114 +++++---- dialoptions.go | 5 +- internal/balancergroup/balancergroup_test.go | 9 +- internal/channelz/funcs.go | 159 ++++++++----- internal/channelz/id.go | 75 ++++++ internal/channelz/logging.go | 91 +++----- internal/channelz/types.go | 23 +- internal/transport/http2_client.go | 11 +- internal/transport/http2_server.go | 12 +- internal/transport/keepalive_test.go | 39 ++-- internal/transport/transport.go | 5 +- internal/transport/transport_test.go | 60 +++-- resolver/resolver.go | 8 +- resolver_conn_wrapper.go | 23 +- server.go | 41 ++-- test/channelz_test.go | 72 +++--- .../clustermanager/clustermanager_test.go | 4 +- 24 files changed, 640 insertions(+), 411 deletions(-) create mode 100644 channelz/channelz.go create mode 100644 internal/channelz/id.go diff --git a/balancer/balancer.go b/balancer/balancer.go index bcc6f5451c90..f7a7697cad02 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -192,7 +193,7 @@ type BuildOptions struct { // server can ignore this field. Authority string // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. diff --git a/balancer/grpclb/grpclb_remote_balancer.go b/balancer/grpclb/grpclb_remote_balancer.go index 805bbbb789ae..dab1959418e1 100644 --- a/balancer/grpclb/grpclb_remote_balancer.go +++ b/balancer/grpclb/grpclb_remote_balancer.go @@ -35,7 +35,6 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -240,9 +239,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - } + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index f4ea61746823..5eb87a552036 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -184,6 +184,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ac: ac} diff --git a/call_test.go b/call_test.go index 8fdbc9b7eb7e..48424fef9f72 100644 --- a/call_test.go +++ b/call_test.go @@ -31,6 +31,7 @@ import ( "time" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -123,12 +124,16 @@ type server struct { startedErr chan error // sent nil or an error after server starts mu sync.Mutex conns map[transport.ServerTransport]bool + channelzID *channelz.Identifier } type ctxKey string func newTestServer() *server { - return &server{startedErr: make(chan error, 1)} + return &server{ + startedErr: make(chan error, 1), + channelzID: channelz.NewIdentifierForTesting(channelz.RefServer, time.Now().Unix(), nil), + } } // start starts server. Other goroutines should block on s.startedErr for further operations. @@ -158,10 +163,12 @@ func (s *server) start(t *testing.T, port int, maxStreams uint32) { return } config := &transport.ServerConfig{ - MaxStreams: maxStreams, + MaxStreams: maxStreams, + ChannelzParentID: s.channelzID, } st, err := transport.NewServerTransport(conn, config) if err != nil { + t.Errorf("failed to create server transport: %v", err) continue } s.mu.Lock() diff --git a/channelz/channelz.go b/channelz/channelz.go new file mode 100644 index 000000000000..a220c47c59a5 --- /dev/null +++ b/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/channelz/service/service_sktopt_test.go b/channelz/service/service_sktopt_test.go index efd383fce3c8..7302d9105a4c 100644 --- a/channelz/service/service_sktopt_test.go +++ b/channelz/service/service_sktopt_test.go @@ -28,15 +28,17 @@ package service import ( "context" - "reflect" "strconv" "testing" "github.com/golang/protobuf/ptypes" durpb "github.com/golang/protobuf/ptypes/duration" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/sys/unix" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" + "google.golang.org/protobuf/testing/protocmp" ) func init() { @@ -139,20 +141,27 @@ func (s) TestGetSocketOptions(t *testing.T) { }, } svr := newCZServer() - ids := make([]int64, len(ss)) + ids := make([]*channelz.Identifier, len(ss)) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) for i, s := range ss { - ids[i] = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) + ids[i], _ = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) defer channelz.RemoveEntry(ids[i]) } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() for i, s := range ss { - resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i]}) - metrics := resp.GetSocket() - if !reflect.DeepEqual(metrics.GetRef(), &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}) || !reflect.DeepEqual(socketProtoToStruct(metrics), s) { - t.Fatalf("resp.GetSocket() want: metrics.GetRef() = %#v and %#v, got: metrics.GetRef() = %#v and %#v", &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}, s, metrics.GetRef(), socketProtoToStruct(metrics)) + resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i].Int()}) + got, want := resp.GetSocket().GetRef(), &channelzpb.SocketRef{SocketId: ids[i].Int(), Name: strconv.Itoa(i)} + if !cmp.Equal(got, want, cmpopts.IgnoreUnexported(channelzpb.SocketRef{})) { + t.Fatalf("resp.GetSocket() returned metrics.GetRef() = %#v, want %#v", got, want) + } + socket, err := socketProtoToStruct(resp.GetSocket()) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(s, socket, protocmp.Transform(), cmp.AllowUnexported(dummySocket{})); diff != "" { + t.Fatalf("unexpected socket, diff (-want +got):\n%s", diff) } } } diff --git a/channelz/service/service_test.go b/channelz/service/service_test.go index 17409533745b..6b05aa0c8524 100644 --- a/channelz/service/service_test.go +++ b/channelz/service/service_test.go @@ -22,18 +22,21 @@ import ( "context" "fmt" "net" - "reflect" "strconv" + "strings" "testing" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" + "google.golang.org/protobuf/testing/protocmp" ) func init() { @@ -61,14 +64,6 @@ type protoToSocketOptFunc func([]*channelzpb.SocketOption) *channelz.SocketOptio // It is only defined under linux environment on x86 architecture. var protoToSocketOpt protoToSocketOptFunc -// emptyTime is used for detecting unset value of time.Time type. -// For go1.7 and earlier, ptypes.Timestamp will fill in the loc field of time.Time -// with &utcLoc. However zero value of a time.Time type value loc field is nil. -// This behavior will make reflect.DeepEqual fail upon unset time.Time field, -// and cause false positive fatal error. -// TODO: Go1.7 is no longer supported - does this need a change? -var emptyTime time.Time - const defaultTestTimeout = 10 * time.Second type dummyChannel struct { @@ -149,7 +144,7 @@ func (d *dummySocket) ChannelzMetric() *channelz.SocketInternalMetric { } } -func channelProtoToStruct(c *channelzpb.Channel) *dummyChannel { +func channelProtoToStruct(c *channelzpb.Channel) (*dummyChannel, error) { dc := &dummyChannel{} pdata := c.GetData() switch pdata.GetState().GetState() { @@ -170,29 +165,29 @@ func channelProtoToStruct(c *channelzpb.Channel) *dummyChannel { dc.callsStarted = pdata.CallsStarted dc.callsSucceeded = pdata.CallsSucceeded dc.callsFailed = pdata.CallsFailed - if t, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - dc.lastCallStartedTimestamp = t - } + ts, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()) + if err != nil { + return nil, err } - return dc + dc.lastCallStartedTimestamp = ts + return dc, nil } -func serverProtoToStruct(s *channelzpb.Server) *dummyServer { +func serverProtoToStruct(s *channelzpb.Server) (*dummyServer, error) { ds := &dummyServer{} pdata := s.GetData() ds.callsStarted = pdata.CallsStarted ds.callsSucceeded = pdata.CallsSucceeded ds.callsFailed = pdata.CallsFailed - if t, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastCallStartedTimestamp = t - } + ts, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()) + if err != nil { + return nil, err } - return ds + ds.lastCallStartedTimestamp = ts + return ds, nil } -func socketProtoToStruct(s *channelzpb.Socket) *dummySocket { +func socketProtoToStruct(s *channelzpb.Socket) (*dummySocket, error) { ds := &dummySocket{} pdata := s.GetData() ds.streamsStarted = pdata.GetStreamsStarted() @@ -201,26 +196,26 @@ func socketProtoToStruct(s *channelzpb.Socket) *dummySocket { ds.messagesSent = pdata.GetMessagesSent() ds.messagesReceived = pdata.GetMessagesReceived() ds.keepAlivesSent = pdata.GetKeepAlivesSent() - if t, err := ptypes.Timestamp(pdata.GetLastLocalStreamCreatedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastLocalStreamCreatedTimestamp = t - } - } - if t, err := ptypes.Timestamp(pdata.GetLastRemoteStreamCreatedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastRemoteStreamCreatedTimestamp = t - } - } - if t, err := ptypes.Timestamp(pdata.GetLastMessageSentTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastMessageSentTimestamp = t - } - } - if t, err := ptypes.Timestamp(pdata.GetLastMessageReceivedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastMessageReceivedTimestamp = t - } - } + ts, err := ptypes.Timestamp(pdata.GetLastLocalStreamCreatedTimestamp()) + if err != nil { + return nil, err + } + ds.lastLocalStreamCreatedTimestamp = ts + ts, err = ptypes.Timestamp(pdata.GetLastRemoteStreamCreatedTimestamp()) + if err != nil { + return nil, err + } + ds.lastRemoteStreamCreatedTimestamp = ts + ts, err = ptypes.Timestamp(pdata.GetLastMessageSentTimestamp()) + if err != nil { + return nil, err + } + ds.lastMessageSentTimestamp = ts + ts, err = ptypes.Timestamp(pdata.GetLastMessageReceivedTimestamp()) + if err != nil { + return nil, err + } + ds.lastMessageReceivedTimestamp = ts if v := pdata.GetLocalFlowControlWindow(); v != nil { ds.localFlowControlWindow = v.Value } @@ -240,7 +235,7 @@ func socketProtoToStruct(s *channelzpb.Socket) *dummySocket { ds.remoteAddr = protoToAddr(remote) } ds.remoteName = s.GetRemoteName() - return ds + return ds, nil } func protoToSecurity(protoSecurity *channelzpb.Security) credentials.ChannelzSecurityValue { @@ -325,7 +320,7 @@ func (s) TestGetTopChannels(t *testing.T) { czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) for _, c := range tcs { - id := channelz.RegisterChannel(c, 0, "") + id := channelz.RegisterChannel(c, nil, "") defer channelz.RemoveEntry(id) } s := newCZServer() @@ -336,12 +331,16 @@ func (s) TestGetTopChannels(t *testing.T) { t.Fatalf("resp.GetEnd() want true, got %v", resp.GetEnd()) } for i, c := range resp.GetChannel() { - if !reflect.DeepEqual(channelProtoToStruct(c), tcs[i]) { - t.Fatalf("dummyChannel: %d, want: %#v, got: %#v", i, tcs[i], channelProtoToStruct(c)) + channel, err := channelProtoToStruct(c) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(tcs[i], channel, protocmp.Transform(), cmp.AllowUnexported(dummyChannel{})); diff != "" { + t.Fatalf("unexpected channel, diff (-want +got):\n%s", diff) } } for i := 0; i < 50; i++ { - id := channelz.RegisterChannel(tcs[0], 0, "") + id := channelz.RegisterChannel(tcs[0], nil, "") defer channelz.RemoveEntry(id) } resp, _ = s.GetTopChannels(ctx, &channelzpb.GetTopChannelsRequest{StartChannelId: 0}) @@ -385,8 +384,12 @@ func (s) TestGetServers(t *testing.T) { t.Fatalf("resp.GetEnd() want true, got %v", resp.GetEnd()) } for i, s := range resp.GetServer() { - if !reflect.DeepEqual(serverProtoToStruct(s), ss[i]) { - t.Fatalf("dummyServer: %d, want: %#v, got: %#v", i, ss[i], serverProtoToStruct(s)) + server, err := serverProtoToStruct(s) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(ss[i], server, protocmp.Transform(), cmp.AllowUnexported(dummyServer{})); diff != "" { + t.Fatalf("unexpected server, diff (-want +got):\n%s", diff) } } for i := 0; i < 50; i++ { @@ -405,34 +408,34 @@ func (s) TestGetServerSockets(t *testing.T) { svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) refNames := []string{"listen socket 1", "normal socket 1", "normal socket 2"} - ids := make([]int64, 3) - ids[0] = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) - ids[1] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) - ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) + ids := make([]*channelz.Identifier, 3) + ids[0], _ = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) + ids[1], _ = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) + ids[2], _ = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) for _, id := range ids { defer channelz.RemoveEntry(id) } svr := newCZServer() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - resp, _ := svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID, StartSocketId: 0}) + resp, _ := svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID.Int(), StartSocketId: 0}) if !resp.GetEnd() { t.Fatalf("resp.GetEnd() want: true, got: %v", resp.GetEnd()) } // GetServerSockets only return normal sockets. want := map[int64]string{ - ids[1]: refNames[1], - ids[2]: refNames[2], + ids[1].Int(): refNames[1], + ids[2].Int(): refNames[2], } - if !reflect.DeepEqual(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { + if !cmp.Equal(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { t.Fatalf("GetServerSockets want: %#v, got: %#v", want, resp.GetSocketRef()) } for i := 0; i < 50; i++ { - id := channelz.RegisterNormalSocket(&dummySocket{}, svrID, "") + id, _ := channelz.RegisterNormalSocket(&dummySocket{}, svrID, "") defer channelz.RemoveEntry(id) } - resp, _ = svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID, StartSocketId: 0}) + resp, _ = svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID.Int(), StartSocketId: 0}) if resp.GetEnd() { t.Fatalf("resp.GetEnd() want false, got %v", resp.GetEnd()) } @@ -446,10 +449,10 @@ func (s) TestGetServerSocketsNonZeroStartID(t *testing.T) { svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) refNames := []string{"listen socket 1", "normal socket 1", "normal socket 2"} - ids := make([]int64, 3) - ids[0] = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) - ids[1] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) - ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) + ids := make([]*channelz.Identifier, 3) + ids[0], _ = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) + ids[1], _ = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) + ids[2], _ = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) for _, id := range ids { defer channelz.RemoveEntry(id) } @@ -458,16 +461,16 @@ func (s) TestGetServerSocketsNonZeroStartID(t *testing.T) { defer cancel() // Make GetServerSockets with startID = ids[1]+1, so socket-1 won't be // included in the response. - resp, _ := svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID, StartSocketId: ids[1] + 1}) + resp, _ := svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID.Int(), StartSocketId: ids[1].Int() + 1}) if !resp.GetEnd() { t.Fatalf("resp.GetEnd() want: true, got: %v", resp.GetEnd()) } // GetServerSockets only return normal socket-2, socket-1 should be // filtered by start ID. want := map[int64]string{ - ids[2]: refNames[2], + ids[2].Int(): refNames[2], } - if !reflect.DeepEqual(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { + if !cmp.Equal(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { t.Fatalf("GetServerSockets want: %#v, got: %#v", want, resp.GetSocketRef()) } } @@ -475,38 +478,45 @@ func (s) TestGetServerSocketsNonZeroStartID(t *testing.T) { func (s) TestGetChannel(t *testing.T) { czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) + refNames := []string{"top channel 1", "nested channel 1", "sub channel 2", "nested channel 3"} - ids := make([]int64, 4) - ids[0] = channelz.RegisterChannel(&dummyChannel{}, 0, refNames[0]) + ids := make([]*channelz.Identifier, 4) + ids[0] = channelz.RegisterChannel(&dummyChannel{}, nil, refNames[0]) channelz.AddTraceEvent(logger, ids[0], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtInfo, }) + ids[1] = channelz.RegisterChannel(&dummyChannel{}, ids[0], refNames[1]) channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1]), + Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1].Int()), Severity: channelz.CtInfo, }, }) - ids[2] = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[2]) + var err error + ids[2], err = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[2]) + if err != nil { + t.Fatalf("channelz.RegisterSubChannel() failed: %v", err) + } channelz.AddTraceEvent(logger, ids[2], 0, &channelz.TraceEventDesc{ Desc: "SubChannel Created", Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2]), + Desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2].Int()), Severity: channelz.CtInfo, }, }) + ids[3] = channelz.RegisterChannel(&dummyChannel{}, ids[1], refNames[3]) channelz.AddTraceEvent(logger, ids[3], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[3]), + Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[3].Int()), Severity: channelz.CtInfo, }, }) @@ -518,21 +528,23 @@ func (s) TestGetChannel(t *testing.T) { Desc: "Resolver returns an empty address list", Severity: channelz.CtWarning, }) + for _, id := range ids { defer channelz.RemoveEntry(id) } + svr := newCZServer() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - resp, _ := svr.GetChannel(ctx, &channelzpb.GetChannelRequest{ChannelId: ids[0]}) + resp, _ := svr.GetChannel(ctx, &channelzpb.GetChannelRequest{ChannelId: ids[0].Int()}) metrics := resp.GetChannel() subChans := metrics.GetSubchannelRef() - if len(subChans) != 1 || subChans[0].GetName() != refNames[2] || subChans[0].GetSubchannelId() != ids[2] { - t.Fatalf("metrics.GetSubChannelRef() want %#v, got %#v", []*channelzpb.SubchannelRef{{SubchannelId: ids[2], Name: refNames[2]}}, subChans) + if len(subChans) != 1 || subChans[0].GetName() != refNames[2] || subChans[0].GetSubchannelId() != ids[2].Int() { + t.Fatalf("metrics.GetSubChannelRef() want %#v, got %#v", []*channelzpb.SubchannelRef{{SubchannelId: ids[2].Int(), Name: refNames[2]}}, subChans) } nestedChans := metrics.GetChannelRef() - if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[1] || nestedChans[0].GetChannelId() != ids[1] { - t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[1], Name: refNames[1]}}, nestedChans) + if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[1] || nestedChans[0].GetChannelId() != ids[1].Int() { + t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[1].Int(), Name: refNames[1]}}, nestedChans) } trace := metrics.GetData().GetTrace() want := []struct { @@ -542,14 +554,14 @@ func (s) TestGetChannel(t *testing.T) { childRef string }{ {desc: "Channel Created", severity: channelzpb.ChannelTraceEvent_CT_INFO}, - {desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1]), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[1], childRef: refNames[1]}, - {desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2]), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[2], childRef: refNames[2]}, + {desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1].Int()), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[1].Int(), childRef: refNames[1]}, + {desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2].Int()), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[2].Int(), childRef: refNames[2]}, {desc: fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready), severity: channelzpb.ChannelTraceEvent_CT_INFO}, {desc: "Resolver returns an empty address list", severity: channelzpb.ChannelTraceEvent_CT_WARNING}, } for i, e := range trace.Events { - if e.GetDescription() != want[i].desc { + if !strings.Contains(e.GetDescription(), want[i].desc) { t.Fatalf("trace: GetDescription want %#v, got %#v", want[i].desc, e.GetDescription()) } if e.GetSeverity() != want[i].severity { @@ -564,11 +576,11 @@ func (s) TestGetChannel(t *testing.T) { } } } - resp, _ = svr.GetChannel(ctx, &channelzpb.GetChannelRequest{ChannelId: ids[1]}) + resp, _ = svr.GetChannel(ctx, &channelzpb.GetChannelRequest{ChannelId: ids[1].Int()}) metrics = resp.GetChannel() nestedChans = metrics.GetChannelRef() - if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[3] || nestedChans[0].GetChannelId() != ids[3] { - t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[3], Name: refNames[3]}}, nestedChans) + if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[3] || nestedChans[0].GetChannelId() != ids[3].Int() { + t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[3].Int(), Name: refNames[3]}}, nestedChans) } } @@ -581,23 +593,27 @@ func (s) TestGetSubChannel(t *testing.T) { czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) refNames := []string{"top channel 1", "sub channel 1", "socket 1", "socket 2"} - ids := make([]int64, 4) - ids[0] = channelz.RegisterChannel(&dummyChannel{}, 0, refNames[0]) + ids := make([]*channelz.Identifier, 4) + ids[0] = channelz.RegisterChannel(&dummyChannel{}, nil, refNames[0]) channelz.AddTraceEvent(logger, ids[0], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtInfo, }) - ids[1] = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[1]) + var err error + ids[1], err = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[1]) + if err != nil { + t.Fatalf("channelz.RegisterSubChannel() failed: %v", err) + } channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: subchanCreated, Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[0]), + Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[0].Int()), Severity: channelz.CtInfo, }, }) - ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[2]) - ids[3] = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[3]) + ids[2], _ = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[2]) + ids[3], _ = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[3]) channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: subchanConnectivityChange, Severity: channelz.CtInfo, @@ -612,13 +628,13 @@ func (s) TestGetSubChannel(t *testing.T) { svr := newCZServer() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - resp, _ := svr.GetSubchannel(ctx, &channelzpb.GetSubchannelRequest{SubchannelId: ids[1]}) + resp, _ := svr.GetSubchannel(ctx, &channelzpb.GetSubchannelRequest{SubchannelId: ids[1].Int()}) metrics := resp.GetSubchannel() want := map[int64]string{ - ids[2]: refNames[2], - ids[3]: refNames[3], + ids[2].Int(): refNames[2], + ids[3].Int(): refNames[3], } - if !reflect.DeepEqual(convertSocketRefSliceToMap(metrics.GetSocketRef()), want) { + if !cmp.Equal(convertSocketRefSliceToMap(metrics.GetSocketRef()), want) { t.Fatalf("metrics.GetSocketRef() want %#v: got: %#v", want, metrics.GetSocketRef()) } @@ -726,20 +742,27 @@ func (s) TestGetSocket(t *testing.T) { }, } svr := newCZServer() - ids := make([]int64, len(ss)) + ids := make([]*channelz.Identifier, len(ss)) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) for i, s := range ss { - ids[i] = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) + ids[i], _ = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) defer channelz.RemoveEntry(ids[i]) } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() for i, s := range ss { - resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i]}) - metrics := resp.GetSocket() - if !reflect.DeepEqual(metrics.GetRef(), &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}) || !reflect.DeepEqual(socketProtoToStruct(metrics), s) { - t.Fatalf("resp.GetSocket() want: metrics.GetRef() = %#v and %#v, got: metrics.GetRef() = %#v and %#v", &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}, s, metrics.GetRef(), socketProtoToStruct(metrics)) + resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i].Int()}) + got, want := resp.GetSocket().GetRef(), &channelzpb.SocketRef{SocketId: ids[i].Int(), Name: strconv.Itoa(i)} + if !cmp.Equal(got, want, cmpopts.IgnoreUnexported(channelzpb.SocketRef{})) { + t.Fatalf("resp.GetSocket() returned metrics.GetRef() = %#v, want %#v", got, want) + } + socket, err := socketProtoToStruct(resp.GetSocket()) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(s, socket, protocmp.Transform(), cmp.AllowUnexported(dummySocket{})); diff != "" { + t.Fatalf("unexpected socket, diff (-want +got):\n%s", diff) } } } diff --git a/clientconn.go b/clientconn.go index f9af78913710..10f67edecdee 100644 --- a/clientconn.go +++ b/clientconn.go @@ -159,23 +159,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity @@ -398,7 +395,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -490,7 +487,7 @@ type ClientConn struct { firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData lceMu sync.Mutex // protects lastConnectionError @@ -768,17 +765,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} cc.mu.Unlock() return ac, nil @@ -1085,22 +1086,22 @@ func (cc *ClientConn) Close() error { for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1130,7 +1131,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1312,14 +1313,12 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1332,7 +1331,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr.Close(transport.ErrConnClosing) if connectCtx.Err() == context.DeadlineExceeded { err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) return err } return nil @@ -1497,19 +1496,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } diff --git a/dialoptions.go b/dialoptions.go index c4bf09f9e940..bdfc200e3bb2 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" @@ -57,7 +58,7 @@ type dialOptions struct { callOptions []CallOption // This is used by WithBalancerName dial option. balancerBuilder balancer.Builder - channelzParentID int64 + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -498,7 +499,7 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index 4942f8a7da87..d8a5a1c19b86 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" @@ -474,17 +475,15 @@ func (s) TestBalancerGroup_CloseStopsBalancerInCache(t *testing.T) { // to the balancergroup at creation time is passed to child policies. func (s) TestBalancerGroupBuildOptions(t *testing.T) { const ( - balancerName = "stubBalancer-TestBalancerGroupBuildOptions" - parent = int64(1234) - userAgent = "ua" - defaultTestTimeout = 1 * time.Second + balancerName = "stubBalancer-TestBalancerGroupBuildOptions" + userAgent = "ua" ) // Setup the stub balancer such that we can read the build options passed to // it in the UpdateClientConnState method. bOpts := balancer.BuildOptions{ DialCreds: insecure.NewCredentials(), - ChannelzParentID: parent, + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefChannel, 1234, nil), CustomUserAgent: userAgent, } stub.Register(balancerName, stub.BalancerFuncs{ diff --git a/internal/channelz/funcs.go b/internal/channelz/funcs.go index ea660a147cf9..777cbcd7921d 100644 --- a/internal/channelz/funcs.go +++ b/internal/channelz/funcs.go @@ -25,6 +25,7 @@ package channelz import ( "context" + "errors" "fmt" "sort" "sync" @@ -184,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid) - } else { - db.get().addChannel(id, cn, false, pid) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -240,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. diff --git a/internal/channelz/id.go b/internal/channelz/id.go new file mode 100644 index 000000000000..c9a27acd3710 --- /dev/null +++ b/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/internal/channelz/logging.go b/internal/channelz/logging.go index b0013f9c8865..8e13a3d2ce7b 100644 --- a/internal/channelz/logging.go +++ b/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/internal/channelz/types.go b/internal/channelz/types.go index 3c595d154bd3..ad0ce4dabf06 100644 --- a/internal/channelz/types.go +++ b/internal/channelz/types.go @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index f0c72d337105..38ed3d566fff 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -132,7 +132,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -351,8 +351,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } t.statsHandler.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -898,9 +899,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 2c6eaf0e59cf..227608c7f21e 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -117,7 +117,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool @@ -275,12 +275,12 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -1210,9 +1210,7 @@ func (t *http2Server) Close() { if err := t.conn.Close(); err != nil && logger.V(logLevel) { logger.Infof("transport: error closing conn during Close: %v", err) } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index c4021925f325..4f5a2bed6225 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -31,6 +31,7 @@ import ( "time" "golang.org/x/net/http2" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/keepalive" ) @@ -252,11 +253,15 @@ func (s) TestKeepaliveServerWithResponsiveClient(t *testing.T) { // logic is running even without any active streams. func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { connCh := make(chan net.Conn, 1) - client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - PermitWithoutStream: true, - }}, connCh) + copts := ConnectOptions{ + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + KeepaliveParams: keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + PermitWithoutStream: true, + }, + } + client, cancel := setUpWithNoPingServer(t, copts, connCh) defer cancel() defer client.Close(fmt.Errorf("closed manually by test")) @@ -284,10 +289,14 @@ func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { // active streams, and therefore the transport stays open. func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { connCh := make(chan net.Conn, 1) - client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - }}, connCh) + copts := ConnectOptions{ + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + KeepaliveParams: keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + }, + } + client, cancel := setUpWithNoPingServer(t, copts, connCh) defer cancel() defer client.Close(fmt.Errorf("closed manually by test")) @@ -313,10 +322,14 @@ func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { // transport even when there is an active stream. func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { connCh := make(chan net.Conn, 1) - client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - }}, connCh) + copts := ConnectOptions{ + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + KeepaliveParams: keepalive.ClientParameters{ + Time: 1 * time.Second, + Timeout: 1 * time.Second, + }, + } + client, cancel := setUpWithNoPingServer(t, copts, connCh) defer cancel() defer client.Close(fmt.Errorf("closed manually by test")) diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 0c43efaa6497..a9ce717f1605 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -529,7 +530,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -563,7 +564,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 1fc2d217acb0..d129dbf0a3bd 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/testutils" @@ -56,16 +57,6 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -type server struct { - lis net.Listener - port string - startedErr chan error // error (or nil) with server start value - mu sync.Mutex - conns map[ServerTransport]bool - h *testStreamHandler - ready chan struct{} -} - var ( expectedRequest = []byte("ping") expectedResponse = []byte("pong") @@ -299,6 +290,25 @@ func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) { } } +type server struct { + lis net.Listener + port string + startedErr chan error // error (or nil) with server start value + mu sync.Mutex + conns map[ServerTransport]bool + h *testStreamHandler + ready chan struct{} + channelzID *channelz.Identifier +} + +func newTestServer() *server { + return &server{ + startedErr: make(chan error, 1), + ready: make(chan struct{}), + channelzID: channelz.NewIdentifierForTesting(channelz.RefServer, time.Now().Unix(), nil), + } +} + // start starts server. Other goroutines should block on s.readyChan for further operations. func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hType) { var err error @@ -422,9 +432,10 @@ func (s *server) addr() string { return s.lis.Addr().String() } -func setUpServerOnly(t *testing.T, port int, serverConfig *ServerConfig, ht hType) *server { - server := &server{startedErr: make(chan error, 1), ready: make(chan struct{})} - go server.start(t, port, serverConfig, ht) +func setUpServerOnly(t *testing.T, port int, sc *ServerConfig, ht hType) *server { + server := newTestServer() + sc.ChannelzParentID = server.channelzID + go server.start(t, port, sc, ht) server.wait(t, 2*time.Second) return server } @@ -433,9 +444,11 @@ func setUp(t *testing.T, port int, maxStreams uint32, ht hType) (*server, *http2 return setUpWithOptions(t, port, &ServerConfig{MaxStreams: maxStreams}, ht, ConnectOptions{}) } -func setUpWithOptions(t *testing.T, port int, serverConfig *ServerConfig, ht hType, copts ConnectOptions) (*server, *http2Client, func()) { - server := setUpServerOnly(t, port, serverConfig, ht) +func setUpWithOptions(t *testing.T, port int, sc *ServerConfig, ht hType, copts ConnectOptions) (*server, *http2Client, func()) { + server := setUpServerOnly(t, port, sc, ht) addr := resolver.Address{Addr: "localhost:" + server.port} + copts.ChannelzParentID = channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil) + connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func() {}, func(GoAwayReason) {}, func() {}) if connErr != nil { @@ -1299,11 +1312,14 @@ func (s) TestClientWithMisbehavedServer(t *testing.T) { }() connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) defer cancel() - ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, ConnectOptions{}, func() {}, func(GoAwayReason) {}, func() {}) + + copts := ConnectOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)} + ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func() {}, func(GoAwayReason) {}, func() {}) if err != nil { t.Fatalf("Error while creating client transport: %v", err) } defer ct.Close(fmt.Errorf("closed manually by test")) + str, err := ct.NewStream(connectCtx, &CallHdr{}) if err != nil { t.Fatalf("Error while creating stream: %v", err) @@ -2181,7 +2197,11 @@ func (s) TestClientHandshakeInfo(t *testing.T) { defer cancel() creds := &attrTransportCreds{} - tr, err := NewClientTransport(ctx, context.Background(), addr, ConnectOptions{TransportCredentials: creds}, func() {}, func(GoAwayReason) {}, func() {}) + copts := ConnectOptions{ + TransportCredentials: creds, + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + } + tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func() {}, func(GoAwayReason) {}, func() {}) if err != nil { t.Fatalf("NewClientTransport(): %v", err) } @@ -2218,7 +2238,11 @@ func (s) TestClientHandshakeInfoDialer(t *testing.T) { return (&net.Dialer{}).DialContext(ctx, "tcp", addr) } - tr, err := NewClientTransport(ctx, context.Background(), addr, ConnectOptions{Dialer: dialer}, func() {}, func(GoAwayReason) {}, func() {}) + copts := ConnectOptions{ + Dialer: dialer, + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + } + tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func() {}, func(GoAwayReason) {}, func() {}) if err != nil { t.Fatalf("NewClientTransport(): %v", err) } diff --git a/resolver/resolver.go b/resolver/resolver.go index e28b68026062..ca2e35a3596f 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -139,13 +140,18 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o Address) bool { +func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && a.Type == o.Type && a.Metadata == o.Metadata } +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { diff --git a/resolver_conn_wrapper.go b/resolver_conn_wrapper.go index 2c47cd54f07c..05a9d4e0bac0 100644 --- a/resolver_conn_wrapper.go +++ b/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/server.go b/server.go index b24b6d53958d..96431a058bf8 100644 --- a/server.go +++ b/server.go @@ -134,7 +134,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -584,9 +584,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -712,7 +711,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +723,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -759,11 +757,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +766,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -1709,11 +1710,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1751,11 +1748,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() diff --git a/test/channelz_test.go b/test/channelz_test.go index 3c953f1a5e80..ecf5412a1f1a 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -23,12 +23,13 @@ import ( "crypto/tls" "fmt" "net" - "reflect" + "regexp" "strings" "sync" "testing" "time" + "github.com/google/go-cmp/cmp" "golang.org/x/net/http2" "google.golang.org/grpc" _ "google.golang.org/grpc/balancer/grpclb" @@ -455,11 +456,11 @@ func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) { // Socket1 Socket2 czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) - topChanID := channelz.RegisterChannel(&dummyChannel{}, 0, "") - subChanID1 := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") - subChanID2 := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") - sktID1 := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") - sktID2 := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") + topChanID := channelz.RegisterChannel(&dummyChannel{}, nil, "") + subChanID1, _ := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") + subChanID2, _ := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") + sktID1, _ := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") + sktID2, _ := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") tcs, _ := channelz.GetTopChannels(0, 0) if tcs == nil || len(tcs) != 1 { @@ -468,7 +469,7 @@ func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) { if len(tcs[0].SubChans) != 2 { t.Fatalf("There should be two SubChannel entries") } - sc := channelz.GetSubChannel(subChanID1) + sc := channelz.GetSubChannel(subChanID1.Int()) if sc == nil || len(sc.Sockets) != 2 { t.Fatalf("There should be two Socket entries") } @@ -1380,7 +1381,7 @@ func (s) TestCZSocketGetSecurityValueTLS(t *testing.T) { if !ok { return false, fmt.Errorf("the SocketData.Security is of type: %T, want: *credentials.TLSChannelzSecurityValue", skt.SocketData.Security) } - if !reflect.DeepEqual(securityVal.RemoteCertificate, cert.Certificate[0]) { + if !cmp.Equal(securityVal.RemoteCertificate, cert.Certificate[0]) { return false, fmt.Errorf("SocketData.Security.RemoteCertificate got: %v, want: %v", securityVal.RemoteCertificate, cert.Certificate[0]) } for _, v := range cipherSuites { @@ -1397,6 +1398,7 @@ func (s) TestCZSocketGetSecurityValueTLS(t *testing.T) { func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) + e := tcpClearRREnv // avoid calling API to set balancer type, which will void service config's change of balancer. e.balancer = "" @@ -1407,6 +1409,7 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { te.resolverScheme = r.Scheme() te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() + var nestedConn int64 if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) @@ -1431,8 +1434,9 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { if len(ncm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for nested channel not 0") } - if ncm.Trace.Events[0].Desc != "Channel Created" { - return false, fmt.Errorf("the first trace event should be \"Channel Created\", not %q", ncm.Trace.Events[0].Desc) + pattern := `Channel created` + if ok, _ := regexp.MatchString(pattern, ncm.Trace.Events[0].Desc); !ok { + return false, fmt.Errorf("the first trace event should be %q, not %q", pattern, ncm.Trace.Events[0].Desc) } return true, nil }); err != nil { @@ -1460,8 +1464,9 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { if len(ncm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for nested channel not 0") } - if ncm.Trace.Events[len(ncm.Trace.Events)-1].Desc != "Channel Deleted" { - return false, fmt.Errorf("the first trace event should be \"Channel Deleted\", not %q", ncm.Trace.Events[0].Desc) + pattern := `Channel created` + if ok, _ := regexp.MatchString(pattern, ncm.Trace.Events[0].Desc); !ok { + return false, fmt.Errorf("the first trace event should be %q, not %q", pattern, ncm.Trace.Events[0].Desc) } return true, nil }); err != nil { @@ -1509,8 +1514,9 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { if len(scm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } - if scm.Trace.Events[0].Desc != "Subchannel Created" { - return false, fmt.Errorf("the first trace event should be \"Subchannel Created\", not %q", scm.Trace.Events[0].Desc) + pattern := `Subchannel created` + if ok, _ := regexp.MatchString(pattern, scm.Trace.Events[0].Desc); !ok { + return false, fmt.Errorf("the first trace event should be %q, not %q", pattern, scm.Trace.Events[0].Desc) } return true, nil }); err != nil { @@ -1551,10 +1557,12 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { if len(scm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } - if got, want := scm.Trace.Events[len(scm.Trace.Events)-1].Desc, "Subchannel Deleted"; got != want { - return false, fmt.Errorf("the last trace event should be %q, not %q", want, got) - } + pattern := `Subchannel deleted` + desc := scm.Trace.Events[len(scm.Trace.Events)-1].Desc + if ok, _ := regexp.MatchString(pattern, desc); !ok { + return false, fmt.Errorf("the last trace event should be %q, not %q", pattern, desc) + } return true, nil }); err != nil { t.Fatal(err) @@ -1600,7 +1608,7 @@ func (s) TestCZChannelAddressResolutionChange(t *testing.T) { if err := verifyResultWithDelay(func() (bool, error) { cm := channelz.GetChannel(cid) for i := len(cm.Trace.Events) - 1; i >= 0; i-- { - if cm.Trace.Events[i].Desc == fmt.Sprintf("Channel switches to new LB policy %q", roundrobin.Name) { + if strings.Contains(cm.Trace.Events[i].Desc, fmt.Sprintf("Channel switches to new LB policy %q", roundrobin.Name)) { break } if i == 0 { @@ -1725,7 +1733,7 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } for i := len(scm.Trace.Events) - 1; i >= 0; i-- { - if scm.Trace.Events[i].Desc == fmt.Sprintf("Subchannel picks a new address %q to connect", te.srvAddrs[2]) { + if strings.Contains(scm.Trace.Events[i].Desc, fmt.Sprintf("Subchannel picks a new address %q to connect", te.srvAddrs[2])) { break } if i == 0 { @@ -1756,9 +1764,9 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } - var subConn int64 te.srv.Stop() + var subConn int64 if err := verifyResultWithDelay(func() (bool, error) { // we need to obtain the SubChannel id before it gets deleted from Channel's children list (due // to effect of r.UpdateState(resolver.State{Addresses:[]resolver.Address{}})) @@ -1773,6 +1781,7 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { for k := range tcs[0].SubChans { // get the SubChannel id for further trace inquiry. subConn = k + t.Logf("SubChannel Id is %d", subConn) } } scm := channelz.GetSubChannel(subConn) @@ -1786,8 +1795,10 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } var ready, connecting, transient, shutdown int + t.Log("SubChannel trace events seen so far...") for _, e := range scm.Trace.Events { - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure) { + t.Log(e.Desc) + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure)) { transient++ } } @@ -1798,17 +1809,19 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { } transient = 0 r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}}) + t.Log("SubChannel trace events seen so far...") for _, e := range scm.Trace.Events { - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Ready) { + t.Log(e.Desc) + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Ready)) { ready++ } - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Connecting) { + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Connecting)) { connecting++ } - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure) { + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure)) { transient++ } - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Shutdown) { + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Shutdown)) { shutdown++ } } @@ -1851,6 +1864,7 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } te.srv.Stop() + if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { @@ -1858,14 +1872,16 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { } var ready, connecting, transient int + t.Log("Channel trace events seen so far...") for _, e := range tcs[0].Trace.Events { - if e.Desc == fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready) { + t.Log(e.Desc) + if strings.Contains(e.Desc, fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready)) { ready++ } - if e.Desc == fmt.Sprintf("Channel Connectivity change to %v", connectivity.Connecting) { + if strings.Contains(e.Desc, fmt.Sprintf("Channel Connectivity change to %v", connectivity.Connecting)) { connecting++ } - if e.Desc == fmt.Sprintf("Channel Connectivity change to %v", connectivity.TransientFailure) { + if strings.Contains(e.Desc, fmt.Sprintf("Channel Connectivity change to %v", connectivity.TransientFailure)) { transient++ } } diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index 5b3a2403e1a7..771152b7bb97 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/hierarchy" "google.golang.org/grpc/internal/testutils" @@ -516,7 +517,6 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) { const ( balancerName = "stubBalancer-TestClusterManagerForwardsBalancerBuildOptions" - parent = int64(1234) userAgent = "ua" defaultTestTimeout = 1 * time.Second ) @@ -526,7 +526,7 @@ func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) { ccsCh := testutils.NewChannel() bOpts := balancer.BuildOptions{ DialCreds: insecure.NewCredentials(), - ChannelzParentID: parent, + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefChannel, 1234, nil), CustomUserAgent: userAgent, } stub.Register(balancerName, stub.BalancerFuncs{ From e601f1ae37aab4a0c5511548a2c046ab273606fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=99=BD=E6=B3=BD?= Date: Thu, 24 Feb 2022 03:15:55 +0800 Subject: [PATCH 436/998] fix: does not validate metadata keys and values (#4886) --- internal/metadata/metadata.go | 46 +++++++++++ internal/metadata/metadata_test.go | 27 +++++++ stream.go | 20 ++++- test/metadata_test.go | 120 +++++++++++++++++++++++++++++ 4 files changed, 212 insertions(+), 1 deletion(-) create mode 100644 test/metadata_test.go diff --git a/internal/metadata/metadata.go b/internal/metadata/metadata.go index b8733dbf340d..b2980f8ac44a 100644 --- a/internal/metadata/metadata.go +++ b/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/internal/metadata/metadata_test.go b/internal/metadata/metadata_test.go index 1aa0f9798e8c..80f1a44bb6ac 100644 --- a/internal/metadata/metadata_test.go +++ b/internal/metadata/metadata_test.go @@ -19,6 +19,8 @@ package metadata import ( + "errors" + "reflect" "testing" "github.com/google/go-cmp/cmp" @@ -84,3 +86,28 @@ func TestSet(t *testing.T) { }) } } + +func TestValidate(t *testing.T) { + for _, test := range []struct { + md metadata.MD + want error + }{ + { + md: map[string][]string{string(rune(0x19)): {"testVal"}}, + want: errors.New("header key \"\\x19\" contains illegal characters not in [0-9a-z-_.]"), + }, + { + md: map[string][]string{"test": {string(rune(0x19))}}, + want: errors.New("header key \"test\" contains value with non-printable ASCII characters"), + }, + { + md: map[string][]string{"test-bin": {string(rune(0x19))}}, + want: nil, + }, + } { + err := Validate(test.md) + if !reflect.DeepEqual(err, test.want) { + t.Errorf("validating metadata which is %v got err :%v, want err :%v", test.md, err, test.want) + } + } +} diff --git a/stream.go b/stream.go index 8cdd652e037b..acb3185f03ab 100644 --- a/stream.go +++ b/stream.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" @@ -166,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -1448,11 +1454,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) if ss.binlog != nil && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() ss.binlog.Log(&binarylog.ServerHeader{ @@ -1467,6 +1482,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } diff --git a/test/metadata_test.go b/test/metadata_test.go new file mode 100644 index 000000000000..e3da918fc722 --- /dev/null +++ b/test/metadata_test.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "io" + "reflect" + "testing" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +func (s) TestInvalidMetadata(t *testing.T) { + grpctest.TLogger.ExpectErrorN("stream: failed to validate md when setting trailer", 2) + + tests := []struct { + md metadata.MD + want error + recv error + }{ + { + md: map[string][]string{string(rune(0x19)): {"testVal"}}, + want: status.Error(codes.Internal, "header key \"\\x19\" contains illegal characters not in [0-9a-z-_.]"), + recv: status.Error(codes.Internal, "invalid header field name \"\\x19\""), + }, + { + md: map[string][]string{"test": {string(rune(0x19))}}, + want: status.Error(codes.Internal, "header key \"test\" contains value with non-printable ASCII characters"), + recv: status.Error(codes.Internal, "invalid header field value \"\\x19\""), + }, + { + md: map[string][]string{"test-bin": {string(rune(0x19))}}, + want: nil, + recv: io.EOF, + }, + { + md: map[string][]string{"test": {"value"}}, + want: nil, + recv: io.EOF, + }, + } + + testNum := 0 + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != nil { + return err + } + test := tests[testNum] + testNum++ + if err := stream.SetHeader(test.md); !reflect.DeepEqual(test.want, err) { + return fmt.Errorf("call stream.SendHeader(md) validate metadata which is %v got err :%v, want err :%v", test.md, err, test.want) + } + if err := stream.SendHeader(test.md); !reflect.DeepEqual(test.want, err) { + return fmt.Errorf("call stream.SendHeader(md) validate metadata which is %v got err :%v, want err :%v", test.md, err, test.want) + } + stream.SetTrailer(test.md) + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting ss endpoint server: %v", err) + } + defer ss.Stop() + + for _, test := range tests { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + ctx = metadata.NewOutgoingContext(ctx, test.md) + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !reflect.DeepEqual(test.want, err) { + t.Errorf("call ss.Client.EmptyCall() validate metadata which is %v got err :%v, want err :%v", test.md, err, test.want) + } + } + + // call the stream server's api to drive the server-side unit testing + for _, test := range tests { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + stream, err := ss.Client.FullDuplexCall(ctx) + defer cancel() + if err != nil { + t.Errorf("call ss.Client.FullDuplexCall(context.Background()) will success but got err :%v", err) + continue + } + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Errorf("call ss.Client stream Send(nil) will success but got err :%v", err) + } + if _, err := stream.Recv(); !reflect.DeepEqual(test.recv, err) { + t.Errorf("stream.Recv() = _, get err :%v, want err :%v", err, test.recv) + } + } +} From 328efcc9276e51849caaf0be0c9d524d0e4fd1cb Mon Sep 17 00:00:00 2001 From: Shang Jian Ding Date: Thu, 24 Feb 2022 12:28:58 -0600 Subject: [PATCH 437/998] example: add mutual TLS example (#5194) --- examples/data/x509/client_ca_cert.pem | 35 ++++++++ examples/data/x509/client_cert.pem | 32 ++++++++ examples/data/x509/client_key.pem | 51 ++++++++++++ examples/features/encryption/README.md | 21 ++++- .../features/encryption/mTLS/client/main.go | 81 ++++++++++++++++++ .../features/encryption/mTLS/server/main.go | 82 +++++++++++++++++++ 6 files changed, 301 insertions(+), 1 deletion(-) create mode 100644 examples/data/x509/client_ca_cert.pem create mode 100644 examples/data/x509/client_cert.pem create mode 100644 examples/data/x509/client_key.pem create mode 100644 examples/features/encryption/mTLS/client/main.go create mode 100644 examples/features/encryption/mTLS/server/main.go diff --git a/examples/data/x509/client_ca_cert.pem b/examples/data/x509/client_ca_cert.pem new file mode 100644 index 000000000000..026a4e478412 --- /dev/null +++ b/examples/data/x509/client_ca_cert.pem @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGAjCCA+qgAwIBAgIUZzkKhtgm6Y3RaksChHMIJFKV+U4wDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4 +NDI1MFoXDTMxMTIyMTE4NDI1MFowUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB +MQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3QtY2xp +ZW50X2NhMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1Rk7zsuwXn8r +KMHk+gmvaftFmlY+NHs1mKJPzyCGFnablnJtHU4hDpSvvNitoQ0OcurOo9V9ALlA +U2uw/1q6Yhg1Am4cXwSWHG0/GwCQAdPTVb7W1MiAd/IB5bx9xrwfjrpGLjVLS3/y +nOKP+kl1bf6WAcLEPClvH+kSG8xMwvg58ot7ipWQcWBTSuZLaz89d2yfxpvtwrvS +YDemY6f8Tkxil+kDjb2Jo/zdRDz8eIEOs1PcdztrdWWeQaYJVX6aEOHCfdVNOHw3 +jNQKyVREUgXjr/pkwo9fTnZjQdBUhZIo7NuPPG25t5qZK3dUDuLcVRQ5Vt0/45pZ +/HkZDCkxmSynZWz2gPClOHVPOG8Eqi0Mbd3XxQSsd1Go667oFotLvTuynbYhdh4s +xAJWXbFV26HgDXI5wXueXrs1n0stUlbD6KahfeoYBu+idX7gB4RftqhqlbIazu3y +hj22k8cMQEPkLhzmUwRt64juLA0+FRG0Hfr8vdZD+f91Qbv86Qw3c1/lckQIOlyI +MerljNbCbHJm9KOZGf1zizwvMVtVzuVtr6RY+Loov4gzhJ5kNSk/YDMQC42c2Yhz +Lr5y9EGe/cL8QXdKfjKNeJjCbzxTTFiVBq5XRKUgjz6ga+F7KGO7ayMBrexZ7+ap +z7ydlUYS+xp43hqdisAGmUMJdDVlHCMCAwEAAaOB0zCB0DAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTq92tDG5TfVvTqbu1bA593K6aAwjCBjQYDVR0jBIGFMIGC +gBTq92tDG5TfVvTqbu1bA593K6aAwqFUpFIwUDELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRl +c3QtY2xpZW50X2NhghRnOQqG2CbpjdFqSwKEcwgkUpX5TjAOBgNVHQ8BAf8EBAMC +AgQwDQYJKoZIhvcNAQELBQADggIBAMHOXRUUq5vf9G2NvnAR1lb0fTKx4/6B9rhU +Nli9uIoWGQyMu8icEMistUp4AdHWdhutKX9NS0Fe3e5ef6qIYCng0gVBE3fTHJd4 +V8MhGtyaK0K/gpTrJdClwK/litRIEjCFwNYEK8vtuqNjR82d8IuFjnbinb+IGCH0 +sLRGvvZch+dwM5N9BVRq20M2FZhyI+fWZmt1ZiBwnfy3xM+enD2I+/LOUFoxAmGS +m2vnS+ULhq7fLaK6vgyUIGqRDQMxYEql9QGzRIspV9vVhRuOCmowlJbgCv++eOUG +FvjlAPlQRGJ+ShpXO5n2pEkdjIJOrLf4kyviLDHffIl5I80fRWzv7GJ1HP+Bb9qO +LZGaiO3SelPhvJGTSV5uSZpgkFsBbgdbbGI60W2QQIHEwG0HdjnNk17+TmVEUoCj +rWK/Kzw5py1Egtibju4CiJ8uIKeew+2pfdnnyHoCVwCfdACc4dwRpet6fQvkRcru +5PR5MzZqUI2+bjg/hJrHj7SVpxpjcr3OZdh05T+heCVuPp+9mHBmcxbeA8rkMZAq +vILLwgwEriSbKy9Y1GLs2oaPNaWEpN9Q6kZPUwtwlzjHG3OOtldeXPpMVpg6Sb0y +3NnRfvfV/g2gm68S21j6qhGM2aeQCdCu5insqnR8GS5/stmuyCNnlst24JBneE0i +louEQ0EV +-----END CERTIFICATE----- diff --git a/examples/data/x509/client_cert.pem b/examples/data/x509/client_cert.pem new file mode 100644 index 000000000000..6f82cc3be84f --- /dev/null +++ b/examples/data/x509/client_cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx +CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4NDI1MVoXDTMxMTIyMTE4NDI1 +MVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBALUoje/J3uPOJ0dapY2s7mGLVPhYRaHyRnJE2/TY +zFOB0IisAF3R7BIDufQrHhk3fh0JazCw95TDD9rxsKEVs6Z50lmDkrg/bjlsniE/ +n+M1JacaLQW7xfh2L+Ei4jvMr101nAsimd6IxFU9m3+2SFbhPBG/GWWJ2ZKqQblz +DVMpNg9FYNmMe45vLevOhdPQBE4cVoAPhI9Je+P4Koslebhor0koUeQVeYdBbCq3 +3dQJPAHjBST6mD9mJI4yVrE3Xso3LO85WROUPhRYQyXhrgU15W6g9qTpMTfkriUe +FYLCtAPU9LBodyvjYLuwoEoyRVsA6Zh/vABteD8Afl552fV9KwN2fRVbTDAxQCp7 +P8gE3/rD1RKv7KBNJ/LrwMu7g4VO+tzYDxWee+eXPQ6M/zRWAb3E0v3UNHsF1ZBl +rlFhEiRShHrXDEKMQwCTSrRjwYajUpZ/Hq2USDgkLepKmTmCaoBfWHPyZwblqSTn +A4DNOh5N23eJyrLnJOPYjzZqEPfX5hDTjFRdVTQxtmYlJ1muwtlNyuwZDImhjO6G +54pPj/bV6gy1+YpIQBemPoXtqqmcRiEVWSV5zAizwRaWf85tqpxb1Tjuj2OpD9le +oO4JX0HLjhyQBoKspNohu2I4+s7ex/w92bf76cTpYTbMJqIp37YZmfPVztHVaMl4 +W0xRAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFMRdhhib+RS6IJpQ +zFsaKH1BNbyZMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD +AjANBgkqhkiG9w0BAQsFAAOCAgEAHyQwLSo/UdSoZCKcdeb0DCkeABRMUPIysroj +gQJCe3cAKOT+TxL6Qnp6jzM2/vt2T/lzlRp7RAB1jH3J4wy5re4cbCn1sbII3Rog +Nm4PKcw6hfUET44t1Gk9DsCjgvxEIirFBWVpxfn+YDI916iH1fkNURaMP+yxpQBL +3K4bmxanBiyBUHC8cyChLMD2NwXjOAA4pZFk0ohpmK0YUk4ra3Z3Q30DCH6NZ1ZP +aOMDHrCXU6MLlmPk8yiOnotgjqiYEgi3Bzxd/OHpR41Xo8k6g3UrN2GEQFs17ibQ +CQasxodOar5Vezu6ZKCYk5TaY4lugT34w+qxi8tVF54WY2jtWY5PUmU6ZT2Dw5cn +CQzlPUdEebOc1hltTvsD049/2lZmGlMXk0dykxy51jYAYznf2rb3cnC1vu1Wgi3w +J28xXBYD8AvME9jaJ6g3L+KR+AFCSLqpUsTxvu9zKf6pLrVtOCl+9G69uOK/wono +yMGNeel8rkzwzzr1LNrhmcKHqipkq83vqxIUT/mbpBUKO1ZXVG/TWKS6bpBTc4Pn +hBCIvGOSyoKuEiXnFr6fqLhLskUNcCNl7iOfA9h/MhS5ZufJXhhXu3Wbo/KC/mNh +y+fr1S9AyA+EJaYtJRKAOeewGvXYb881UNXWGCQU1aVNJnujRKFyhd07sEjxsad9 +Bn/aYes= +-----END CERTIFICATE----- diff --git a/examples/data/x509/client_key.pem b/examples/data/x509/client_key.pem new file mode 100644 index 000000000000..6cd652c55435 --- /dev/null +++ b/examples/data/x509/client_key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEAtSiN78ne484nR1qljazuYYtU+FhFofJGckTb9NjMU4HQiKwA +XdHsEgO59CseGTd+HQlrMLD3lMMP2vGwoRWzpnnSWYOSuD9uOWyeIT+f4zUlpxot +BbvF+HYv4SLiO8yvXTWcCyKZ3ojEVT2bf7ZIVuE8Eb8ZZYnZkqpBuXMNUyk2D0Vg +2Yx7jm8t686F09AEThxWgA+Ej0l74/gqiyV5uGivSShR5BV5h0FsKrfd1Ak8AeMF +JPqYP2YkjjJWsTdeyjcs7zlZE5Q+FFhDJeGuBTXlbqD2pOkxN+SuJR4VgsK0A9T0 +sGh3K+Ngu7CgSjJFWwDpmH+8AG14PwB+XnnZ9X0rA3Z9FVtMMDFAKns/yATf+sPV +Eq/soE0n8uvAy7uDhU763NgPFZ5755c9Doz/NFYBvcTS/dQ0ewXVkGWuUWESJFKE +etcMQoxDAJNKtGPBhqNSln8erZRIOCQt6kqZOYJqgF9Yc/JnBuWpJOcDgM06Hk3b +d4nKsuck49iPNmoQ99fmENOMVF1VNDG2ZiUnWa7C2U3K7BkMiaGM7obnik+P9tXq +DLX5ikhAF6Y+he2qqZxGIRVZJXnMCLPBFpZ/zm2qnFvVOO6PY6kP2V6g7glfQcuO +HJAGgqyk2iG7Yjj6zt7H/D3Zt/vpxOlhNswmoinfthmZ89XO0dVoyXhbTFECAwEA +AQKCAgEAjtzrijWVy+sQuMm4k1DUMSKzIKJkT4GDoqvBFoc+I4DVVmLmaxaYZ+B+ +bhruwo4rq3R5Ds4QgUWPJGfDllVJ9rhNdYA4XYrQPwL0dV36ljCcf/o5lTLuvbFe +stpStTwG86fKZlGkLIWI53wNPBshUzqOp6QfwB6E8Y/JAxnDYVi3pDVfWlDaQ4pU +GYklqtN6AauBX75dGK6nwDE+Q7uLES2lRjlA03FIBK1IQyv7CTM7GnXQ4cep9x1z +KJx0F4+F9kyq6AE+yRz4FA1C7wXZuYw2YhcYSxcHVH/IAceGyTcIxZjUWqYXjQnk +iD+TONAKN+kxTq01MtUhpfWasqC/i+6QU1eqf5YWpd6GsRKyrGgO02NND/SM6Z3V ++S9og4QAjdUyc8dkN+udd1K1CeYNFbmhrYpF2aS9k/PjDP3L137hDW6Cy+thIjZP +u9OB6ba2yUrbQDlmkCbh0vX+77HKAbT5bj8h9r7MqzNsPsgkaKS8gZ79T/Whr/ft +Xiu+eo/u1jtjwUjNMKGxQ9XiU2UU7QccthHHLcYaiv4eySHXA75h+Sho9cD1Vvs/ +ms1/nbCSuU9TSK0UK/V8YjeDA0eVGtDCX3weIW2ECQ80SoT7uf+fhjaLkvOadb7f +1O9DvYVYZvblxUm8ajOh+/n9VyB/I9R9Q8GdGiauXy16uXLZMdECggEBAPEx+4aR +XfuXmnEoPk3GpvyrRlrMy02aklrATKcxeAYo2uiIcuQv65E3AmE1GHpoxmAKjLty +fuUfGdT7f4uGeF6p+IEkW4jQm56UFbCdun9kduEaN9FRylTBqUKWIY2rtRS6nHZ8 +bAkL/6Uv3g9NWx95rV7HnAfC2n6AIvc8LRfQVVqSvjPbsEPvJAT2353D0Rb7vC2M +1hKeBrSNBiy57EKnrMDOhNpBvSBU0Zc+YsBRNAimKyBz7dt35H+THkFaEk9vGtG0 +QkDvngPzSX99Ojwk2mo9jGrh7LHErWih5C73IfvYUh3kyEwbZ5y25i9Z0F37boIG +jHSVvcPp+9x9PNUCggEBAMBHLyhBUAQVZFXtWysr0BjO34XffgkSt1XQa8cVxif7 +glWauUZtjfC7PT/qgY0mx2dI2bDcKiQQCBlVavP1RLRwj3rZv23eit7z13UgHSa6 +3dnsgpO2Zux6qoV48lO4xbuFqZtW+MP+9jthKwr95r8lmZ4cmGQwXXcqNsR7skFt +30Uhcyn+MTfyLwcqt8g9i98rrJmbPAuIME/Sz9DLIi6UxQLI6MeEn92AzECNDp18 +CypOL+sDrLw/7HNHNoSblgm628BHpBgT2qaOYnawRr0gni7MHXOAbDopKYDAtLuU +ZMFjlILdfiSDouhvKtMlZG9arTB0TasdAQJGPz53H40CggEBAJ4JDvJsOzVHb2Vn +ZfNWD0INA0spVqhheDXIPDFsg2UdzdmA1i7XizUZ4xBIVuKV1i1FnFKRwb1ktGtN +4pNMJ4B3RCFx7hvl+6FbDB8uKe2gqRfzMtGPEtCYF8xOTGvkLwEHCM/F1I/U8cuN +YqWKHQOxmTw58+1N6hXq5X4zSqSI1/RBpCiccJEClwo9q+VWUaEKjpEV74pBSslw +gbQ6mihOby3h40CSxFXz3WSI9vFmA38LScS40Qf1NZ21iqRtXQP5G4x93M9pcZLL +DMRhDBAuYYItE91QbONJqAmf0cBII1c9tQhrSCY96pTPbmFmKtX5kb3Whp85Ih7F +KEafNIUCggEBALMnoIDZmjyz0fFeX3wyLotu9kY+n6jEj56dvE6bsy694grxR4Cf +w4lybPeJAX0LjPBnqK5p9bn0VheEx0rYVVPrLUVCbmNo3+wtN6wiaAcWRnAvNtt7 +MRtWkFwc/W2U1GiNeiMLPm8guT1KpFhxiva/igsQic2QYwYNh0o8FzNvtIEtUajm +9+Uw+zCqVON2tUUT5JabVa9JDfrSamAZZZgRdh/KI1sD8BDrWWUsCVojoiOhBnTr +z5730ND4oYudjIc0XF0kY3krxqc6M/Ry+vZt1fW0qhxcpHrsr4cQB1ZgRiELL+1f +g5FyNfBs5HIofRRkYMqtE1FEjRQZcAQ76mECggEAaOUtM9BZuV9gEwmG4hmFfeXq +vJOMvlsDkRRbLuDQ1B8Vw3v7lt1+K+KfBt96MoQe08MyXM7sIMB+hn+zakNaM2W6 +UzTnAPQQAo+wELqj6U3DrV7zw7I1hZTA9G7qxMAQBEmk3u2q4/zWDAcyAx3D9JVj +L3G14pYf0drFLChnknVTPRaF0Q5upLYzCPLMa9w0FLKy6fkfdWdpzyjvW7+JEeFY +koA98hrottqJB2CcqehQDSCUHKKbd5U15y1NV1BQloaPJLwpPAVTkBszQSHanltN +l9POJBJlfQ1eWL88wHdKiLbtOg6PTfAmfghIRxakjHvxBgFO1/xG6Lxm7QwUDQ== +-----END RSA PRIVATE KEY----- diff --git a/examples/features/encryption/README.md b/examples/features/encryption/README.md index 2afca1d785f5..e4ce22933230 100644 --- a/examples/features/encryption/README.md +++ b/examples/features/encryption/README.md @@ -84,4 +84,23 @@ Next, same as TLS, start the server with the server credential and let client dial to server with the client credential. Finally, make an RPC to test the secure connection based upon ALTS is -successfully up. \ No newline at end of file +successfully up. + +### mTLS + +In mutual TLS (mTLS), the client and the server authenticate each other. gRPC +allows users to configure mutual TLS at the connection level. + +In normal TLS, the server is only concerned with presenting the server +certificate for clients to verify. In mutual TLS, the server also loads in a +list of trusted CA files for verifying client presented certificates with. +This is done via setting +[`tls.Config.ClientCAs`](https://pkg.go.dev/crypto/tls#Config.ClientCAs) +to the list of trusted CA files, +and setting [`tls.config.ClientAuth`](https://pkg.go.dev/crypto/tls#Config.ClientAuth) +to [`tls.RequireAndVerifyClientCert`](https://pkg.go.dev/crypto/tls#RequireAndVerifyClientCert). + +In normal TLS, the client is only concerned with authenticating the server by +using one or more trusted CA file. In mutual TLS, the client also presents its +client certificate to the server for authentication. This is done via setting +[`tls.Config.Certificates`](https://pkg.go.dev/crypto/tls#Config.Certificates). diff --git a/examples/features/encryption/mTLS/client/main.go b/examples/features/encryption/mTLS/client/main.go new file mode 100644 index 000000000000..3b5b4f31c108 --- /dev/null +++ b/examples/features/encryption/mTLS/client/main.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client which connects to the server using mTLS. +package main + +import ( + "context" + "crypto/tls" + "crypto/x509" + "flag" + "fmt" + "io/ioutil" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/examples/data" + ecpb "google.golang.org/grpc/examples/features/proto/echo" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") + +func callUnaryEcho(client ecpb.EchoClient, message string) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + resp, err := client.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) + if err != nil { + log.Fatalf("client.UnaryEcho(_) = _, %v: ", err) + } + fmt.Println("UnaryEcho: ", resp.Message) +} + +func main() { + flag.Parse() + + cert, err := tls.LoadX509KeyPair(data.Path("x509/client_cert.pem"), data.Path("x509/client_key.pem")) + if err != nil { + log.Fatalf("failed to load client cert: %v", err) + } + + ca := x509.NewCertPool() + caFilePath := data.Path("x509/ca_cert.pem") + caBytes, err := ioutil.ReadFile(caFilePath) + if err != nil { + log.Fatalf("failed to read ca cert %q: %v", caFilePath, err) + } + if ok := ca.AppendCertsFromPEM(caBytes); !ok { + log.Fatalf("failed to parse %q", caFilePath) + } + + tlsConfig := &tls.Config{ + ServerName: "x.test.example.com", + Certificates: []tls.Certificate{cert}, + RootCAs: ca, + } + + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + + callUnaryEcho(ecpb.NewEchoClient(conn), "hello world") +} diff --git a/examples/features/encryption/mTLS/server/main.go b/examples/features/encryption/mTLS/server/main.go new file mode 100644 index 000000000000..cdcc676b507b --- /dev/null +++ b/examples/features/encryption/mTLS/server/main.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server which authenticates clients using mTLS. +package main + +import ( + "context" + "crypto/tls" + "crypto/x509" + "flag" + "fmt" + "io/ioutil" + "log" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/examples/data" + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var port = flag.Int("port", 50051, "the port to serve on") + +type ecServer struct { + pb.UnimplementedEchoServer +} + +func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { + return &pb.EchoResponse{Message: req.Message}, nil +} + +func main() { + flag.Parse() + log.Printf("server starting on port %d...\n", *port) + + cert, err := tls.LoadX509KeyPair(data.Path("x509/server_cert.pem"), data.Path("x509/server_key.pem")) + if err != nil { + log.Fatalf("failed to load key pair: %s", err) + } + + ca := x509.NewCertPool() + caFilePath := data.Path("x509/client_ca_cert.pem") + caBytes, err := ioutil.ReadFile(caFilePath) + if err != nil { + log.Fatalf("failed to read ca cert %q: %v", caFilePath, err) + } + if ok := ca.AppendCertsFromPEM(caBytes); !ok { + log.Fatalf("failed to parse %q", caFilePath) + } + + tlsConfig := &tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + Certificates: []tls.Certificate{cert}, + ClientCAs: ca, + } + + s := grpc.NewServer(grpc.Creds(credentials.NewTLS(tlsConfig))) + pb.RegisterEchoServer(s, &ecServer{}) + lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} From 6f314bd725c32fba254ed3212209a78cae86d57a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 25 Feb 2022 16:54:14 -0800 Subject: [PATCH 438/998] reflection: add comments on protos (#5209) --- .../grpc_testing_not_regenerate/dynamic.proto | 20 ++++++++++++++++ .../grpc_testing_not_regenerate/testv3.pb.go | 24 +++++++++++++++---- .../grpc_testing_not_regenerate/testv3.proto | 21 ++++++++++++++++ 3 files changed, 61 insertions(+), 4 deletions(-) diff --git a/reflection/grpc_testing_not_regenerate/dynamic.proto b/reflection/grpc_testing_not_regenerate/dynamic.proto index bcfce1290f11..5eeba0892336 100644 --- a/reflection/grpc_testing_not_regenerate/dynamic.proto +++ b/reflection/grpc_testing_not_regenerate/dynamic.proto @@ -1,3 +1,20 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + syntax = "proto3"; option go_package = "google.golang.org/grpc/reflection/grpc_testing_not_regenerate"; @@ -8,6 +25,9 @@ message DynamicRes {} message DynamicReq {} +// DynamicService is used to test reflection on dynamically constructed protocol +// buffer messages. service DynamicService { + // DynamicMessage1 is a test RPC for dynamically constructed protobufs. rpc DynamicMessage1(DynamicReq) returns (DynamicRes); } \ No newline at end of file diff --git a/reflection/grpc_testing_not_regenerate/testv3.pb.go b/reflection/grpc_testing_not_regenerate/testv3.pb.go index 5cb225f31fb3..a99a4c9f1ba2 100644 --- a/reflection/grpc_testing_not_regenerate/testv3.pb.go +++ b/reflection/grpc_testing_not_regenerate/testv3.pb.go @@ -1,3 +1,20 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + // Code generated by protoc-gen-go. // source: testv3.proto // DO NOT EDIT! @@ -14,13 +31,12 @@ It has these top-level messages: */ package grpc_testing_not_regenerate -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( context "context" + fmt "fmt" + math "math" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" ) diff --git a/reflection/grpc_testing_not_regenerate/testv3.proto b/reflection/grpc_testing_not_regenerate/testv3.proto index 9049abc586ca..44f93ba8b076 100644 --- a/reflection/grpc_testing_not_regenerate/testv3.proto +++ b/reflection/grpc_testing_not_regenerate/testv3.proto @@ -1,3 +1,20 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + syntax = "proto3"; option go_package = "google.golang.org/grpc/reflection/grpc_testing_not_regenerate"; @@ -31,7 +48,11 @@ message SearchRequestV3 { string query = 1; } +// SearchServiceV3 is used to test grpc server reflection. service SearchServiceV3 { + // Search is a unary RPC. rpc Search(SearchRequestV3) returns (SearchResponseV3); + + // StreamingSearch is a streaming RPC. rpc StreamingSearch(stream SearchRequestV3) returns (stream SearchResponseV3); } From 6b15b1356efe4f9cacb9d869ede181878229eeb5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 28 Feb 2022 15:13:36 -0800 Subject: [PATCH 439/998] channelz: replace deprecated timestamp API with recommended ones (#5212) --- channelz/service/service_test.go | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/channelz/service/service_test.go b/channelz/service/service_test.go index 6b05aa0c8524..e03832a401b9 100644 --- a/channelz/service/service_test.go +++ b/channelz/service/service_test.go @@ -165,11 +165,10 @@ func channelProtoToStruct(c *channelzpb.Channel) (*dummyChannel, error) { dc.callsStarted = pdata.CallsStarted dc.callsSucceeded = pdata.CallsSucceeded dc.callsFailed = pdata.CallsFailed - ts, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()) - if err != nil { + if err := pdata.GetLastCallStartedTimestamp().CheckValid(); err != nil { return nil, err } - dc.lastCallStartedTimestamp = ts + dc.lastCallStartedTimestamp = pdata.GetLastCallStartedTimestamp().AsTime() return dc, nil } @@ -179,11 +178,10 @@ func serverProtoToStruct(s *channelzpb.Server) (*dummyServer, error) { ds.callsStarted = pdata.CallsStarted ds.callsSucceeded = pdata.CallsSucceeded ds.callsFailed = pdata.CallsFailed - ts, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()) - if err != nil { + if err := pdata.GetLastCallStartedTimestamp().CheckValid(); err != nil { return nil, err } - ds.lastCallStartedTimestamp = ts + ds.lastCallStartedTimestamp = pdata.GetLastCallStartedTimestamp().AsTime() return ds, nil } @@ -196,26 +194,22 @@ func socketProtoToStruct(s *channelzpb.Socket) (*dummySocket, error) { ds.messagesSent = pdata.GetMessagesSent() ds.messagesReceived = pdata.GetMessagesReceived() ds.keepAlivesSent = pdata.GetKeepAlivesSent() - ts, err := ptypes.Timestamp(pdata.GetLastLocalStreamCreatedTimestamp()) - if err != nil { + if err := pdata.GetLastLocalStreamCreatedTimestamp().CheckValid(); err != nil { return nil, err } - ds.lastLocalStreamCreatedTimestamp = ts - ts, err = ptypes.Timestamp(pdata.GetLastRemoteStreamCreatedTimestamp()) - if err != nil { + ds.lastLocalStreamCreatedTimestamp = pdata.GetLastLocalStreamCreatedTimestamp().AsTime() + if err := pdata.GetLastRemoteStreamCreatedTimestamp().CheckValid(); err != nil { return nil, err } - ds.lastRemoteStreamCreatedTimestamp = ts - ts, err = ptypes.Timestamp(pdata.GetLastMessageSentTimestamp()) - if err != nil { + ds.lastRemoteStreamCreatedTimestamp = pdata.GetLastRemoteStreamCreatedTimestamp().AsTime() + if err := pdata.GetLastMessageSentTimestamp().CheckValid(); err != nil { return nil, err } - ds.lastMessageSentTimestamp = ts - ts, err = ptypes.Timestamp(pdata.GetLastMessageReceivedTimestamp()) - if err != nil { + ds.lastMessageSentTimestamp = pdata.GetLastMessageSentTimestamp().AsTime() + if err := pdata.GetLastMessageReceivedTimestamp().CheckValid(); err != nil { return nil, err } - ds.lastMessageReceivedTimestamp = ts + ds.lastMessageReceivedTimestamp = pdata.GetLastMessageReceivedTimestamp().AsTime() if v := pdata.GetLocalFlowControlWindow(); v != nil { ds.localFlowControlWindow = v.Value } From 87b0a8d6db9383a83d28eda926ae8c697cf60422 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 2 Mar 2022 13:40:15 -0800 Subject: [PATCH 440/998] test: use real grpc.Server in call tests (#5213) --- call_test.go | 297 --------------------------------------- test/interceptor_test.go | 279 ++++++++++++++++++++++++++++++++++++ test/invoke_test.go | 152 ++++++++++++++++++++ 3 files changed, 431 insertions(+), 297 deletions(-) create mode 100644 test/interceptor_test.go create mode 100644 test/invoke_test.go diff --git a/call_test.go b/call_test.go index 48424fef9f72..3280109f4fbb 100644 --- a/call_test.go +++ b/call_test.go @@ -127,8 +127,6 @@ type server struct { channelzID *channelz.Identifier } -type ctxKey string - func newTestServer() *server { return &server{ startedErr: make(chan error, 1), @@ -211,298 +209,3 @@ func (s *server) stop() { s.conns = nil s.mu.Unlock() } - -func setUp(t *testing.T, port int, maxStreams uint32) (*server, *ClientConn) { - return setUpWithOptions(t, port, maxStreams) -} - -func setUpWithOptions(t *testing.T, port int, maxStreams uint32, dopts ...DialOption) (*server, *ClientConn) { - server := newTestServer() - go server.start(t, port, maxStreams) - server.wait(t, 2*time.Second) - addr := "localhost:" + server.port - dopts = append(dopts, WithBlock(), WithInsecure(), WithCodec(testCodec{})) - cc, err := Dial(addr, dopts...) - if err != nil { - t.Fatalf("Failed to create ClientConn: %v", err) - } - return server, cc -} - -func (s) TestUnaryClientInterceptor(t *testing.T) { - parentKey := ctxKey("parentKey") - - interceptor := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("interceptor should have %v in context", parentKey) - } - return invoker(ctx, method, req, reply, cc, opts...) - } - - server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithUnaryInterceptor(interceptor)) - defer func() { - cc.Close() - server.stop() - }() - - var reply string - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) - if err := cc.Invoke(parentCtx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) - } -} - -func (s) TestChainUnaryClientInterceptor(t *testing.T) { - var ( - parentKey = ctxKey("parentKey") - firstIntKey = ctxKey("firstIntKey") - secondIntKey = ctxKey("secondIntKey") - ) - - firstInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("first interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) != nil { - t.Fatalf("first interceptor should not have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) != nil { - t.Fatalf("first interceptor should not have %v in context", secondIntKey) - } - firstCtx := context.WithValue(ctx, firstIntKey, 1) - err := invoker(firstCtx, method, req, reply, cc, opts...) - *(reply.(*string)) += "1" - return err - } - - secondInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("second interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) == nil { - t.Fatalf("second interceptor should have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) != nil { - t.Fatalf("second interceptor should not have %v in context", secondIntKey) - } - secondCtx := context.WithValue(ctx, secondIntKey, 2) - err := invoker(secondCtx, method, req, reply, cc, opts...) - *(reply.(*string)) += "2" - return err - } - - lastInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("last interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) == nil { - t.Fatalf("last interceptor should have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) == nil { - t.Fatalf("last interceptor should have %v in context", secondIntKey) - } - err := invoker(ctx, method, req, reply, cc, opts...) - *(reply.(*string)) += "3" - return err - } - - server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithChainUnaryInterceptor(firstInt, secondInt, lastInt)) - defer func() { - cc.Close() - server.stop() - }() - - var reply string - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) - if err := cc.Invoke(parentCtx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse+"321" { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) - } -} - -func (s) TestChainOnBaseUnaryClientInterceptor(t *testing.T) { - var ( - parentKey = ctxKey("parentKey") - baseIntKey = ctxKey("baseIntKey") - ) - - baseInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("base interceptor should have %v in context", parentKey) - } - if ctx.Value(baseIntKey) != nil { - t.Fatalf("base interceptor should not have %v in context", baseIntKey) - } - baseCtx := context.WithValue(ctx, baseIntKey, 1) - return invoker(baseCtx, method, req, reply, cc, opts...) - } - - chainInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("chain interceptor should have %v in context", parentKey) - } - if ctx.Value(baseIntKey) == nil { - t.Fatalf("chain interceptor should have %v in context", baseIntKey) - } - return invoker(ctx, method, req, reply, cc, opts...) - } - - server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithUnaryInterceptor(baseInt), WithChainUnaryInterceptor(chainInt)) - defer func() { - cc.Close() - server.stop() - }() - - var reply string - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) - if err := cc.Invoke(parentCtx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) - } -} - -func (s) TestChainStreamClientInterceptor(t *testing.T) { - var ( - parentKey = ctxKey("parentKey") - firstIntKey = ctxKey("firstIntKey") - secondIntKey = ctxKey("secondIntKey") - ) - - firstInt := func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { - if ctx.Value(parentKey) == nil { - t.Fatalf("first interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) != nil { - t.Fatalf("first interceptor should not have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) != nil { - t.Fatalf("first interceptor should not have %v in context", secondIntKey) - } - firstCtx := context.WithValue(ctx, firstIntKey, 1) - return streamer(firstCtx, desc, cc, method, opts...) - } - - secondInt := func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { - if ctx.Value(parentKey) == nil { - t.Fatalf("second interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) == nil { - t.Fatalf("second interceptor should have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) != nil { - t.Fatalf("second interceptor should not have %v in context", secondIntKey) - } - secondCtx := context.WithValue(ctx, secondIntKey, 2) - return streamer(secondCtx, desc, cc, method, opts...) - } - - lastInt := func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { - if ctx.Value(parentKey) == nil { - t.Fatalf("last interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) == nil { - t.Fatalf("last interceptor should have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) == nil { - t.Fatalf("last interceptor should have %v in context", secondIntKey) - } - return streamer(ctx, desc, cc, method, opts...) - } - - server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithChainStreamInterceptor(firstInt, secondInt, lastInt)) - defer func() { - cc.Close() - server.stop() - }() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) - _, err := cc.NewStream(parentCtx, &StreamDesc{}, "/foo/bar") - if err != nil { - t.Fatalf("grpc.NewStream(_, _, _) = %v, want ", err) - } -} - -func (s) TestInvoke(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) - } - cc.Close() - server.stop() -} - -func (s) TestInvokeLargeErr(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - req := "hello" - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - err := cc.Invoke(ctx, "/foo/bar", &req, &reply) - if _, ok := status.FromError(err); !ok { - t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") - } - if status.Code(err) != codes.Internal || len(errorDesc(err)) != sizeLargeErr { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want an error of code %d and desc size %d", err, codes.Internal, sizeLargeErr) - } - cc.Close() - server.stop() -} - -// TestInvokeErrorSpecialChars checks that error messages don't get mangled. -func (s) TestInvokeErrorSpecialChars(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - req := "weird error" - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - err := cc.Invoke(ctx, "/foo/bar", &req, &reply) - if _, ok := status.FromError(err); !ok { - t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") - } - if got, want := errorDesc(err), weirdError; got != want { - t.Fatalf("grpc.Invoke(_, _, _, _, _) error = %q, want %q", got, want) - } - cc.Close() - server.stop() -} - -// TestInvokeCancel checks that an Invoke with a canceled context is not sent. -func (s) TestInvokeCancel(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - req := "canceled" - for i := 0; i < 100; i++ { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - cc.Invoke(ctx, "/foo/bar", &req, &reply) - } - if canceled != 0 { - t.Fatalf("received %d of 100 canceled requests", canceled) - } - cc.Close() - server.stop() -} - -// TestInvokeCancelClosedNonFail checks that a canceled non-failfast RPC -// on a closed client will terminate. -func (s) TestInvokeCancelClosedNonFailFast(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - cc.Close() - req := "hello" - ctx, cancel := context.WithCancel(context.Background()) - cancel() - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply, WaitForReady(true)); err == nil { - t.Fatalf("canceled invoke on closed connection should fail") - } - server.stop() -} diff --git a/test/interceptor_test.go b/test/interceptor_test.go new file mode 100644 index 000000000000..34a7cad5cc52 --- /dev/null +++ b/test/interceptor_test.go @@ -0,0 +1,279 @@ +/* + * + * Copyright 2022 gRPC authors. + + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +type parentCtxkey struct{} +type firstInterceptorCtxkey struct{} +type secondInterceptorCtxkey struct{} +type baseInterceptorCtxKey struct{} + +const ( + parentCtxVal = "parent" + firstInterceptorCtxVal = "firstInterceptor" + secondInterceptorCtxVal = "secondInterceptor" + baseInterceptorCtxVal = "baseInterceptor" +) + +// TestUnaryClientInterceptor_ContextValuePropagation verifies that a unary +// interceptor receives context values specified in the context passed to the +// RPC call. +func (s) TestUnaryClientInterceptor_ContextValuePropagation(t *testing.T) { + errCh := testutils.NewChannel() + unaryInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.Send(fmt.Errorf("unaryInt got %q in context.Val, want %q", got, parentCtxVal)) + } + errCh.Send(nil) + return invoker(ctx, method, req, reply, cc, opts...) + } + + // Start a stub server and use the above unary interceptor while creating a + // ClientConn to it. + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil, grpc.WithUnaryInterceptor(unaryInt)); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.EmptyCall(context.WithValue(ctx, parentCtxkey{}, parentCtxVal), &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall() failed: %v", err) + } + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for unary interceptor to be invoked: %v", err) + } + if val != nil { + t.Fatalf("unary interceptor failed: %v", val) + } +} + +// TestChainUnaryClientInterceptor_ContextValuePropagation verifies that a chain +// of unary interceptors receive context values specified in the original call +// as well as the ones specified by prior interceptors in the chain. +func (s) TestChainUnaryClientInterceptor_ContextValuePropagation(t *testing.T) { + errCh := testutils.NewChannel() + firstInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("first interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if ctx.Value(firstInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("first interceptor should not have %T in context", firstInterceptorCtxkey{})) + } + if ctx.Value(secondInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("first interceptor should not have %T in context", secondInterceptorCtxkey{})) + } + firstCtx := context.WithValue(ctx, firstInterceptorCtxkey{}, firstInterceptorCtxVal) + return invoker(firstCtx, method, req, reply, cc, opts...) + } + + secondInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("second interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(firstInterceptorCtxkey{}).(string); !ok || got != firstInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("second interceptor got %q in context.Val, want %q", got, firstInterceptorCtxVal)) + } + if ctx.Value(secondInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("second interceptor should not have %T in context", secondInterceptorCtxkey{})) + } + secondCtx := context.WithValue(ctx, secondInterceptorCtxkey{}, secondInterceptorCtxVal) + return invoker(secondCtx, method, req, reply, cc, opts...) + } + + lastInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(firstInterceptorCtxkey{}).(string); !ok || got != firstInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, firstInterceptorCtxVal)) + } + if got, ok := ctx.Value(secondInterceptorCtxkey{}).(string); !ok || got != secondInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, secondInterceptorCtxVal)) + } + errCh.SendContext(ctx, nil) + return invoker(ctx, method, req, reply, cc, opts...) + } + + // Start a stub server and use the above chain of interceptors while creating + // a ClientConn to it. + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil, grpc.WithChainUnaryInterceptor(firstInt, secondInt, lastInt)); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.EmptyCall(context.WithValue(ctx, parentCtxkey{}, parentCtxVal), &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall() failed: %v", err) + } + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for unary interceptor to be invoked: %v", err) + } + if val != nil { + t.Fatalf("unary interceptor failed: %v", val) + } +} + +// TestChainOnBaseUnaryClientInterceptor_ContextValuePropagation verifies that +// unary interceptors specified as a base interceptor or as a chain interceptor +// receive context values specified in the original call as well as the ones +// specified by interceptors in the chain. +func (s) TestChainOnBaseUnaryClientInterceptor_ContextValuePropagation(t *testing.T) { + errCh := testutils.NewChannel() + baseInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("base interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if ctx.Value(baseInterceptorCtxKey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("baseinterceptor should not have %T in context", baseInterceptorCtxKey{})) + } + baseCtx := context.WithValue(ctx, baseInterceptorCtxKey{}, baseInterceptorCtxVal) + return invoker(baseCtx, method, req, reply, cc, opts...) + } + + chainInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("chain interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(baseInterceptorCtxKey{}).(string); !ok || got != baseInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("chain interceptor got %q in context.Val, want %q", got, baseInterceptorCtxVal)) + } + errCh.SendContext(ctx, nil) + return invoker(ctx, method, req, reply, cc, opts...) + } + + // Start a stub server and use the above chain of interceptors while creating + // a ClientConn to it. + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil, grpc.WithUnaryInterceptor(baseInt), grpc.WithChainUnaryInterceptor(chainInt)); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.EmptyCall(context.WithValue(ctx, parentCtxkey{}, parentCtxVal), &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall() failed: %v", err) + } + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for unary interceptor to be invoked: %v", err) + } + if val != nil { + t.Fatalf("unary interceptor failed: %v", val) + } +} + +// TestChainStreamClientInterceptor_ContextValuePropagation verifies that a +// chain of stream interceptors receive context values specified in the original +// call as well as the ones specified by the prior interceptors in the chain. +func (s) TestChainStreamClientInterceptor_ContextValuePropagation(t *testing.T) { + errCh := testutils.NewChannel() + firstInt := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("first interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if ctx.Value(firstInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("first interceptor should not have %T in context", firstInterceptorCtxkey{})) + } + if ctx.Value(secondInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("first interceptor should not have %T in context", secondInterceptorCtxkey{})) + } + firstCtx := context.WithValue(ctx, firstInterceptorCtxkey{}, firstInterceptorCtxVal) + return streamer(firstCtx, desc, cc, method, opts...) + } + + secondInt := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("second interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(firstInterceptorCtxkey{}).(string); !ok || got != firstInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("second interceptor got %q in context.Val, want %q", got, firstInterceptorCtxVal)) + } + if ctx.Value(secondInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("second interceptor should not have %T in context", secondInterceptorCtxkey{})) + } + secondCtx := context.WithValue(ctx, secondInterceptorCtxkey{}, secondInterceptorCtxVal) + return streamer(secondCtx, desc, cc, method, opts...) + } + + lastInt := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(firstInterceptorCtxkey{}).(string); !ok || got != firstInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, firstInterceptorCtxVal)) + } + if got, ok := ctx.Value(secondInterceptorCtxkey{}).(string); !ok || got != secondInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, secondInterceptorCtxVal)) + } + errCh.SendContext(ctx, nil) + return streamer(ctx, desc, cc, method, opts...) + } + + // Start a stub server and use the above chain of interceptors while creating + // a ClientConn to it. + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + return stream.Send(&testpb.StreamingOutputCallResponse{}) + }, + } + if err := ss.Start(nil, grpc.WithChainStreamInterceptor(firstInt, secondInt, lastInt)); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.FullDuplexCall(context.WithValue(ctx, parentCtxkey{}, parentCtxVal)); err != nil { + t.Fatalf("ss.Client.FullDuplexCall() failed: %v", err) + } + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for stream interceptor to be invoked: %v", err) + } + if val != nil { + t.Fatalf("stream interceptor failed: %v", val) + } +} diff --git a/test/invoke_test.go b/test/invoke_test.go new file mode 100644 index 000000000000..49ad9044ee38 --- /dev/null +++ b/test/invoke_test.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2022 gRPC authors. + + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "strings" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/status" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +// TestInvoke verifies a straightforward invocation of ClientConn.Invoke(). +func (s) TestInvoke(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}); err != nil { + t.Fatalf("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") failed: %v", err) + } +} + +// TestInvokeLargeErr verifies an invocation of ClientConn.Invoke() where the +// server returns a really large error message. +func (s) TestInvokeLargeErr(t *testing.T) { + largeErrorStr := strings.Repeat("A", 1024*1024) + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, status.Error(codes.Internal, largeErrorStr) + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + err := ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}) + if err == nil { + t.Fatal("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") succeeded when expected to fail") + } + st, ok := status.FromError(err) + if !ok { + t.Fatal("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") received non-status error") + } + if status.Code(err) != codes.Internal || st.Message() != largeErrorStr { + t.Fatalf("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") failed with error: %v, want an error of code %d and desc size %d", err, codes.Internal, len(largeErrorStr)) + } +} + +// TestInvokeErrorSpecialChars tests an invocation of ClientConn.Invoke() and +// verifies that error messages don't get mangled. +func (s) TestInvokeErrorSpecialChars(t *testing.T) { + const weirdError = "format verbs: %v%s" + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, status.Error(codes.Internal, weirdError) + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + err := ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}) + if err == nil { + t.Fatal("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") succeeded when expected to fail") + } + st, ok := status.FromError(err) + if !ok { + t.Fatal("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") received non-status error") + } + if status.Code(err) != codes.Internal || st.Message() != weirdError { + t.Fatalf("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") failed with error: %v, want %v", err, weirdError) + } +} + +// TestInvokeCancel tests an invocation of ClientConn.Invoke() with a cancelled +// context and verifies that the request is not actually sent to the server. +func (s) TestInvokeCancel(t *testing.T) { + cancelled := 0 + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + cancelled++ + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + for i := 0; i < 100; i++ { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}) + } + if cancelled != 0 { + t.Fatalf("server received %d of 100 cancelled requests", cancelled) + } +} + +// TestInvokeCancelClosedNonFail tests an invocation of ClientConn.Invoke() with +// a cancelled non-failfast RPC on a closed ClientConn and verifies that the +// call terminates with an error. +func (s) TestInvokeCancelClosedNonFailFast(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ss.CC.Close() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + if err := ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}, grpc.WaitForReady(true)); err == nil { + t.Fatal("ClientConn.Invoke() on closed connection succeeded when expected to fail") + } +} From 63af97474cacdb6048d54b354d1a51c635a4262d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 3 Mar 2022 11:53:17 -0800 Subject: [PATCH 441/998] internal: update service config proto (#5222) --- .../grpc_service_config/service_config.pb.go | 1666 +++++++++++------ 1 file changed, 1065 insertions(+), 601 deletions(-) diff --git a/internal/proto/grpc_service_config/service_config.pb.go b/internal/proto/grpc_service_config/service_config.pb.go index 5a41804f3d87..30e16d113285 100644 --- a/internal/proto/grpc_service_config/service_config.pb.go +++ b/internal/proto/grpc_service_config/service_config.pb.go @@ -103,7 +103,7 @@ func (x XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Num // Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type.Descriptor instead. func (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9, 0, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10, 0, 0} } // Load balancing policy. @@ -167,7 +167,7 @@ func (x ServiceConfig_LoadBalancingPolicy) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConfig_LoadBalancingPolicy.Descriptor instead. func (ServiceConfig_LoadBalancingPolicy) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17, 0} } // Configuration for a method. @@ -421,6 +421,115 @@ func (*RoundRobinConfig) Descriptor() ([]byte, []int) { return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{2} } +// Configuration for outlier_detection LB policy +type OutlierDetectionLoadBalancingConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The time interval between ejection analysis sweeps. This can result in + // both new ejections as well as addresses being returned to service. Defaults + // to 10000ms or 10s. + Interval *durationpb.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // The base time that as address is ejected for. The real time is equal to the + // base time multiplied by the number of times the address has been ejected. + // Defaults to 30000ms or 30s. + BaseEjectionTime *durationpb.Duration `protobuf:"bytes,2,opt,name=base_ejection_time,json=baseEjectionTime,proto3" json:"base_ejection_time,omitempty"` + // The maximum time that an address is ejected for. If not specified, the default value (300000ms or 300s) or + // the base_ejection_time value is applied, whatever is larger. + MaxEjectionTime *durationpb.Duration `protobuf:"bytes,3,opt,name=max_ejection_time,json=maxEjectionTime,proto3" json:"max_ejection_time,omitempty"` + // The maximum % of an address list that can be ejected due to outlier + // detection. Defaults to 10% but will eject at least one address regardless of the value. + MaxEjectionPercent *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_ejection_percent,json=maxEjectionPercent,proto3" json:"max_ejection_percent,omitempty"` + // If set, success rate ejections will be performed + SuccessRateEjection *OutlierDetectionLoadBalancingConfig_SuccessRateEjection `protobuf:"bytes,5,opt,name=success_rate_ejection,json=successRateEjection,proto3" json:"success_rate_ejection,omitempty"` + // If set, failure rate ejections will be performed + FailurePercentageEjection *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection `protobuf:"bytes,6,opt,name=failure_percentage_ejection,json=failurePercentageEjection,proto3" json:"failure_percentage_ejection,omitempty"` + // The config for the child policy + ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,13,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` +} + +func (x *OutlierDetectionLoadBalancingConfig) Reset() { + *x = OutlierDetectionLoadBalancingConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutlierDetectionLoadBalancingConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutlierDetectionLoadBalancingConfig) ProtoMessage() {} + +func (x *OutlierDetectionLoadBalancingConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutlierDetectionLoadBalancingConfig.ProtoReflect.Descriptor instead. +func (*OutlierDetectionLoadBalancingConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3} +} + +func (x *OutlierDetectionLoadBalancingConfig) GetInterval() *durationpb.Duration { + if x != nil { + return x.Interval + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig) GetBaseEjectionTime() *durationpb.Duration { + if x != nil { + return x.BaseEjectionTime + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig) GetMaxEjectionTime() *durationpb.Duration { + if x != nil { + return x.MaxEjectionTime + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig) GetMaxEjectionPercent() *wrapperspb.UInt32Value { + if x != nil { + return x.MaxEjectionPercent + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig) GetSuccessRateEjection() *OutlierDetectionLoadBalancingConfig_SuccessRateEjection { + if x != nil { + return x.SuccessRateEjection + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig) GetFailurePercentageEjection() *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection { + if x != nil { + return x.FailurePercentageEjection + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig) GetChildPolicy() []*LoadBalancingConfig { + if x != nil { + return x.ChildPolicy + } + return nil +} + // Configuration for grpclb LB policy. type GrpcLbConfig struct { state protoimpl.MessageState @@ -442,7 +551,7 @@ type GrpcLbConfig struct { func (x *GrpcLbConfig) Reset() { *x = GrpcLbConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[3] + mi := &file_grpc_service_config_service_config_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -455,7 +564,7 @@ func (x *GrpcLbConfig) String() string { func (*GrpcLbConfig) ProtoMessage() {} func (x *GrpcLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[3] + mi := &file_grpc_service_config_service_config_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -468,7 +577,7 @@ func (x *GrpcLbConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use GrpcLbConfig.ProtoReflect.Descriptor instead. func (*GrpcLbConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4} } func (x *GrpcLbConfig) GetChildPolicy() []*LoadBalancingConfig { @@ -500,7 +609,7 @@ type PriorityLoadBalancingPolicyConfig struct { func (x *PriorityLoadBalancingPolicyConfig) Reset() { *x = PriorityLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[4] + mi := &file_grpc_service_config_service_config_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -513,7 +622,7 @@ func (x *PriorityLoadBalancingPolicyConfig) String() string { func (*PriorityLoadBalancingPolicyConfig) ProtoMessage() {} func (x *PriorityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[4] + mi := &file_grpc_service_config_service_config_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -526,7 +635,7 @@ func (x *PriorityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message // Deprecated: Use PriorityLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*PriorityLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5} } func (x *PriorityLoadBalancingPolicyConfig) GetChildren() map[string]*PriorityLoadBalancingPolicyConfig_Child { @@ -555,7 +664,7 @@ type WeightedTargetLoadBalancingPolicyConfig struct { func (x *WeightedTargetLoadBalancingPolicyConfig) Reset() { *x = WeightedTargetLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[5] + mi := &file_grpc_service_config_service_config_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -568,7 +677,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig) String() string { func (*WeightedTargetLoadBalancingPolicyConfig) ProtoMessage() {} func (x *WeightedTargetLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[5] + mi := &file_grpc_service_config_service_config_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -581,7 +690,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Me // Deprecated: Use WeightedTargetLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*WeightedTargetLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6} } func (x *WeightedTargetLoadBalancingPolicyConfig) GetTargets() map[string]*WeightedTargetLoadBalancingPolicyConfig_Target { @@ -603,7 +712,7 @@ type XdsClusterManagerLoadBalancingPolicyConfig struct { func (x *XdsClusterManagerLoadBalancingPolicyConfig) Reset() { *x = XdsClusterManagerLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] + mi := &file_grpc_service_config_service_config_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -616,7 +725,7 @@ func (x *XdsClusterManagerLoadBalancingPolicyConfig) String() string { func (*XdsClusterManagerLoadBalancingPolicyConfig) ProtoMessage() {} func (x *XdsClusterManagerLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] + mi := &file_grpc_service_config_service_config_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -629,7 +738,7 @@ func (x *XdsClusterManagerLoadBalancingPolicyConfig) ProtoReflect() protoreflect // Deprecated: Use XdsClusterManagerLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*XdsClusterManagerLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7} } func (x *XdsClusterManagerLoadBalancingPolicyConfig) GetChildren() map[string]*XdsClusterManagerLoadBalancingPolicyConfig_Child { @@ -651,7 +760,7 @@ type CdsConfig struct { func (x *CdsConfig) Reset() { *x = CdsConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] + mi := &file_grpc_service_config_service_config_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -664,7 +773,7 @@ func (x *CdsConfig) String() string { func (*CdsConfig) ProtoMessage() {} func (x *CdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] + mi := &file_grpc_service_config_service_config_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -677,7 +786,7 @@ func (x *CdsConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use CdsConfig.ProtoReflect.Descriptor instead. func (*CdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8} } func (x *CdsConfig) GetCluster() string { @@ -703,7 +812,7 @@ type XdsServer struct { func (x *XdsServer) Reset() { *x = XdsServer{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] + mi := &file_grpc_service_config_service_config_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -716,7 +825,7 @@ func (x *XdsServer) String() string { func (*XdsServer) ProtoMessage() {} func (x *XdsServer) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] + mi := &file_grpc_service_config_service_config_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -729,7 +838,7 @@ func (x *XdsServer) ProtoReflect() protoreflect.Message { // Deprecated: Use XdsServer.ProtoReflect.Descriptor instead. func (*XdsServer) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9} } func (x *XdsServer) GetServerUri() string { @@ -776,7 +885,7 @@ type XdsClusterResolverLoadBalancingPolicyConfig struct { func (x *XdsClusterResolverLoadBalancingPolicyConfig) Reset() { *x = XdsClusterResolverLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] + mi := &file_grpc_service_config_service_config_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -789,7 +898,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig) String() string { func (*XdsClusterResolverLoadBalancingPolicyConfig) ProtoMessage() {} func (x *XdsClusterResolverLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] + mi := &file_grpc_service_config_service_config_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -802,7 +911,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig) ProtoReflect() protoreflec // Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*XdsClusterResolverLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10} } func (x *XdsClusterResolverLoadBalancingPolicyConfig) GetDiscoveryMechanisms() []*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism { @@ -854,7 +963,7 @@ type XdsClusterImplLoadBalancingPolicyConfig struct { func (x *XdsClusterImplLoadBalancingPolicyConfig) Reset() { *x = XdsClusterImplLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] + mi := &file_grpc_service_config_service_config_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -867,7 +976,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig) String() string { func (*XdsClusterImplLoadBalancingPolicyConfig) ProtoMessage() {} func (x *XdsClusterImplLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] + mi := &file_grpc_service_config_service_config_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -880,7 +989,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Me // Deprecated: Use XdsClusterImplLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*XdsClusterImplLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11} } func (x *XdsClusterImplLoadBalancingPolicyConfig) GetCluster() string { @@ -966,7 +1075,7 @@ type EdsLoadBalancingPolicyConfig struct { func (x *EdsLoadBalancingPolicyConfig) Reset() { *x = EdsLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] + mi := &file_grpc_service_config_service_config_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -979,7 +1088,7 @@ func (x *EdsLoadBalancingPolicyConfig) String() string { func (*EdsLoadBalancingPolicyConfig) ProtoMessage() {} func (x *EdsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] + mi := &file_grpc_service_config_service_config_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -992,7 +1101,7 @@ func (x *EdsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use EdsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*EdsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{12} } func (x *EdsLoadBalancingPolicyConfig) GetCluster() string { @@ -1043,7 +1152,7 @@ type RingHashLoadBalancingConfig struct { func (x *RingHashLoadBalancingConfig) Reset() { *x = RingHashLoadBalancingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] + mi := &file_grpc_service_config_service_config_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1056,7 +1165,7 @@ func (x *RingHashLoadBalancingConfig) String() string { func (*RingHashLoadBalancingConfig) ProtoMessage() {} func (x *RingHashLoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] + mi := &file_grpc_service_config_service_config_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1069,7 +1178,7 @@ func (x *RingHashLoadBalancingConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RingHashLoadBalancingConfig.ProtoReflect.Descriptor instead. func (*RingHashLoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{12} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{13} } func (x *RingHashLoadBalancingConfig) GetMinRingSize() uint64 { @@ -1109,7 +1218,7 @@ type LrsLoadBalancingPolicyConfig struct { func (x *LrsLoadBalancingPolicyConfig) Reset() { *x = LrsLoadBalancingPolicyConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] + mi := &file_grpc_service_config_service_config_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1122,7 +1231,7 @@ func (x *LrsLoadBalancingPolicyConfig) String() string { func (*LrsLoadBalancingPolicyConfig) ProtoMessage() {} func (x *LrsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] + mi := &file_grpc_service_config_service_config_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1135,7 +1244,7 @@ func (x *LrsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use LrsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. func (*LrsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{13} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14} } func (x *LrsLoadBalancingPolicyConfig) GetClusterName() string { @@ -1206,7 +1315,7 @@ type XdsConfig struct { func (x *XdsConfig) Reset() { *x = XdsConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] + mi := &file_grpc_service_config_service_config_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1219,7 +1328,7 @@ func (x *XdsConfig) String() string { func (*XdsConfig) ProtoMessage() {} func (x *XdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] + mi := &file_grpc_service_config_service_config_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1232,7 +1341,7 @@ func (x *XdsConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use XdsConfig.ProtoReflect.Descriptor instead. func (*XdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{15} } // Deprecated: Do not use. @@ -1292,6 +1401,7 @@ type LoadBalancingConfig struct { // Types that are assignable to Policy: // *LoadBalancingConfig_PickFirst // *LoadBalancingConfig_RoundRobin + // *LoadBalancingConfig_OutlierDetection // *LoadBalancingConfig_Grpclb // *LoadBalancingConfig_PriorityExperimental // *LoadBalancingConfig_WeightedTargetExperimental @@ -1310,7 +1420,7 @@ type LoadBalancingConfig struct { func (x *LoadBalancingConfig) Reset() { *x = LoadBalancingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1323,7 +1433,7 @@ func (x *LoadBalancingConfig) String() string { func (*LoadBalancingConfig) ProtoMessage() {} func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1336,7 +1446,7 @@ func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use LoadBalancingConfig.ProtoReflect.Descriptor instead. func (*LoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{15} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16} } func (m *LoadBalancingConfig) GetPolicy() isLoadBalancingConfig_Policy { @@ -1360,6 +1470,13 @@ func (x *LoadBalancingConfig) GetRoundRobin() *RoundRobinConfig { return nil } +func (x *LoadBalancingConfig) GetOutlierDetection() *OutlierDetectionLoadBalancingConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_OutlierDetection); ok { + return x.OutlierDetection + } + return nil +} + func (x *LoadBalancingConfig) GetGrpclb() *GrpcLbConfig { if x, ok := x.GetPolicy().(*LoadBalancingConfig_Grpclb); ok { return x.Grpclb @@ -1460,6 +1577,10 @@ type LoadBalancingConfig_RoundRobin struct { RoundRobin *RoundRobinConfig `protobuf:"bytes,1,opt,name=round_robin,proto3,oneof"` } +type LoadBalancingConfig_OutlierDetection struct { + OutlierDetection *OutlierDetectionLoadBalancingConfig `protobuf:"bytes,15,opt,name=outlier_detection,proto3,oneof"` +} + type LoadBalancingConfig_Grpclb struct { // gRPC lookaside load balancing. // This will eventually be deprecated by the new xDS-based local @@ -1522,6 +1643,8 @@ func (*LoadBalancingConfig_PickFirst) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_RoundRobin) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_OutlierDetection) isLoadBalancingConfig_Policy() {} + func (*LoadBalancingConfig_Grpclb) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_PriorityExperimental) isLoadBalancingConfig_Policy() {} @@ -1568,7 +1691,7 @@ type ServiceConfig struct { func (x *ServiceConfig) Reset() { *x = ServiceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1581,7 +1704,7 @@ func (x *ServiceConfig) String() string { func (*ServiceConfig) ProtoMessage() {} func (x *ServiceConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1594,7 +1717,7 @@ func (x *ServiceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17} } // Deprecated: Do not use. @@ -1672,7 +1795,7 @@ type MethodConfig_Name struct { func (x *MethodConfig_Name) Reset() { *x = MethodConfig_Name{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1685,7 +1808,7 @@ func (x *MethodConfig_Name) String() string { func (*MethodConfig_Name) ProtoMessage() {} func (x *MethodConfig_Name) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1744,7 +1867,7 @@ type MethodConfig_RetryPolicy struct { func (x *MethodConfig_RetryPolicy) Reset() { *x = MethodConfig_RetryPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] + mi := &file_grpc_service_config_service_config_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1757,7 +1880,7 @@ func (x *MethodConfig_RetryPolicy) String() string { func (*MethodConfig_RetryPolicy) ProtoMessage() {} func (x *MethodConfig_RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] + mi := &file_grpc_service_config_service_config_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1839,7 +1962,7 @@ type MethodConfig_HedgingPolicy struct { func (x *MethodConfig_HedgingPolicy) Reset() { *x = MethodConfig_HedgingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1852,7 +1975,7 @@ func (x *MethodConfig_HedgingPolicy) String() string { func (*MethodConfig_HedgingPolicy) ProtoMessage() {} func (x *MethodConfig_HedgingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1889,6 +2012,186 @@ func (x *MethodConfig_HedgingPolicy) GetNonFatalStatusCodes() []code.Code { return nil } +// Parameters for the success rate ejection algorithm. +// This algorithm monitors the request success rate for all endpoints and +// ejects individual endpoints whose success rates are statistical outliers. +type OutlierDetectionLoadBalancingConfig_SuccessRateEjection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This factor is used to determine the ejection threshold for success rate + // outlier ejection. The ejection threshold is the difference between the + // mean success rate, and the product of this factor and the standard + // deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + StdevFactor *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=stdev_factor,json=stdevFactor,proto3" json:"stdev_factor,omitempty"` + // The % chance that an address will be actually ejected when an outlier status + // is detected through success rate statistics. This setting can be used to + // disable ejection or to ramp it up slowly. Defaults to 100. + EnforcementPercentage *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=enforcement_percentage,json=enforcementPercentage,proto3" json:"enforcement_percentage,omitempty"` + // The number of addresses that must have enough request volume to + // detect success rate outliers. If the number of addresses is less than this + // setting, outlier detection via success rate statistics is not performed + // for any addresses. Defaults to 5. + MinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=minimum_hosts,json=minimumHosts,proto3" json:"minimum_hosts,omitempty"` + // The minimum number of total requests that must be collected in one + // interval (as defined by the interval duration above) to include this address + // in success rate based outlier detection. If the volume is lower than this + // setting, outlier detection via success rate statistics is not performed + // for that address. Defaults to 100. + RequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=request_volume,json=requestVolume,proto3" json:"request_volume,omitempty"` +} + +func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) Reset() { + *x = OutlierDetectionLoadBalancingConfig_SuccessRateEjection{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection) ProtoMessage() {} + +func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutlierDetectionLoadBalancingConfig_SuccessRateEjection.ProtoReflect.Descriptor instead. +func (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) GetStdevFactor() *wrapperspb.UInt32Value { + if x != nil { + return x.StdevFactor + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) GetEnforcementPercentage() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcementPercentage + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) GetMinimumHosts() *wrapperspb.UInt32Value { + if x != nil { + return x.MinimumHosts + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) GetRequestVolume() *wrapperspb.UInt32Value { + if x != nil { + return x.RequestVolume + } + return nil +} + +// Parameters for the failure percentage algorithm. +// This algorithm ejects individual endpoints whose failure rate is greater than +// some threshold, independently of any other endpoint. +type OutlierDetectionLoadBalancingConfig_FailurePercentageEjection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The failure percentage to use when determining failure percentage-based outlier detection. If + // the failure percentage of a given address is greater than or equal to this value, it will be + // ejected. Defaults to 85. + Threshold *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=threshold,proto3" json:"threshold,omitempty"` + // The % chance that an address will be actually ejected when an outlier status is detected through + // failure percentage statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 100. + EnforcementPercentage *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=enforcement_percentage,json=enforcementPercentage,proto3" json:"enforcement_percentage,omitempty"` + // The minimum number of addresses in order to perform failure percentage-based ejection. + // If the total number of addresses is less than this value, failure percentage-based + // ejection will not be performed. Defaults to 5. + MinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=minimum_hosts,json=minimumHosts,proto3" json:"minimum_hosts,omitempty"` + // The minimum number of total requests that must be collected in one interval (as defined by the + // interval duration above) to perform failure percentage-based ejection for this address. If the + // volume is lower than this setting, failure percentage-based ejection will not be performed for + // this host. Defaults to 50. + RequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=request_volume,json=requestVolume,proto3" json:"request_volume,omitempty"` +} + +func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) Reset() { + *x = OutlierDetectionLoadBalancingConfig_FailurePercentageEjection{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) ProtoMessage() {} + +func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutlierDetectionLoadBalancingConfig_FailurePercentageEjection.ProtoReflect.Descriptor instead. +func (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) GetThreshold() *wrapperspb.UInt32Value { + if x != nil { + return x.Threshold + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) GetEnforcementPercentage() *wrapperspb.UInt32Value { + if x != nil { + return x.EnforcementPercentage + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) GetMinimumHosts() *wrapperspb.UInt32Value { + if x != nil { + return x.MinimumHosts + } + return nil +} + +func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) GetRequestVolume() *wrapperspb.UInt32Value { + if x != nil { + return x.RequestVolume + } + return nil +} + // A map of name to child policy configuration. // The names are used to allow the priority policy to update // existing child policies instead of creating new ones every @@ -1906,7 +2209,7 @@ type PriorityLoadBalancingPolicyConfig_Child struct { func (x *PriorityLoadBalancingPolicyConfig_Child) Reset() { *x = PriorityLoadBalancingPolicyConfig_Child{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1919,7 +2222,7 @@ func (x *PriorityLoadBalancingPolicyConfig_Child) String() string { func (*PriorityLoadBalancingPolicyConfig_Child) ProtoMessage() {} func (x *PriorityLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1932,7 +2235,7 @@ func (x *PriorityLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Me // Deprecated: Use PriorityLoadBalancingPolicyConfig_Child.ProtoReflect.Descriptor instead. func (*PriorityLoadBalancingPolicyConfig_Child) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5, 0} } func (x *PriorityLoadBalancingPolicyConfig_Child) GetConfig() []*LoadBalancingConfig { @@ -1961,7 +2264,7 @@ type WeightedTargetLoadBalancingPolicyConfig_Target struct { func (x *WeightedTargetLoadBalancingPolicyConfig_Target) Reset() { *x = WeightedTargetLoadBalancingPolicyConfig_Target{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + mi := &file_grpc_service_config_service_config_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1974,7 +2277,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig_Target) String() string { func (*WeightedTargetLoadBalancingPolicyConfig_Target) ProtoMessage() {} func (x *WeightedTargetLoadBalancingPolicyConfig_Target) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + mi := &file_grpc_service_config_service_config_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1987,7 +2290,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig_Target) ProtoReflect() protoref // Deprecated: Use WeightedTargetLoadBalancingPolicyConfig_Target.ProtoReflect.Descriptor instead. func (*WeightedTargetLoadBalancingPolicyConfig_Target) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6, 0} } func (x *WeightedTargetLoadBalancingPolicyConfig_Target) GetWeight() uint32 { @@ -2015,7 +2318,7 @@ type XdsClusterManagerLoadBalancingPolicyConfig_Child struct { func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) Reset() { *x = XdsClusterManagerLoadBalancingPolicyConfig_Child{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[24] + mi := &file_grpc_service_config_service_config_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2028,7 +2331,7 @@ func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) String() string { func (*XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoMessage() {} func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[24] + mi := &file_grpc_service_config_service_config_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2041,7 +2344,7 @@ func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoReflect() protor // Deprecated: Use XdsClusterManagerLoadBalancingPolicyConfig_Child.ProtoReflect.Descriptor instead. func (*XdsClusterManagerLoadBalancingPolicyConfig_Child) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7, 0} } func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) GetChildPolicy() []*LoadBalancingConfig { @@ -2063,7 +2366,7 @@ type XdsServer_ChannelCredentials struct { func (x *XdsServer_ChannelCredentials) Reset() { *x = XdsServer_ChannelCredentials{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[26] + mi := &file_grpc_service_config_service_config_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2076,7 +2379,7 @@ func (x *XdsServer_ChannelCredentials) String() string { func (*XdsServer_ChannelCredentials) ProtoMessage() {} func (x *XdsServer_ChannelCredentials) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[26] + mi := &file_grpc_service_config_service_config_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2089,7 +2392,7 @@ func (x *XdsServer_ChannelCredentials) ProtoReflect() protoreflect.Message { // Deprecated: Use XdsServer_ChannelCredentials.ProtoReflect.Descriptor instead. func (*XdsServer_ChannelCredentials) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9, 0} } func (x *XdsServer_ChannelCredentials) GetType() string { @@ -2142,12 +2445,15 @@ type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism struct { // For type LOGICAL_DNS only. // DNS name to resolve in "host:port" form. DnsHostname string `protobuf:"bytes,6,opt,name=dns_hostname,json=dnsHostname,proto3" json:"dns_hostname,omitempty"` + // The configuration for outlier_detection child policies + // Within this message, the child_policy field will be ignored + OutlierDetection *OutlierDetectionLoadBalancingConfig `protobuf:"bytes,8,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"` } func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Reset() { *x = XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[27] + mi := &file_grpc_service_config_service_config_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2160,7 +2466,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) String( func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoMessage() {} func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[27] + mi := &file_grpc_service_config_service_config_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2173,7 +2479,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoRe // Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism.ProtoReflect.Descriptor instead. func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10, 0} } func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetCluster() string { @@ -2226,6 +2532,13 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetDnsH return "" } +func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetOutlierDetection() *OutlierDetectionLoadBalancingConfig { + if x != nil { + return x.OutlierDetection + } + return nil +} + // Drop configuration. type XdsClusterImplLoadBalancingPolicyConfig_DropCategory struct { state protoimpl.MessageState @@ -2239,7 +2552,7 @@ type XdsClusterImplLoadBalancingPolicyConfig_DropCategory struct { func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Reset() { *x = XdsClusterImplLoadBalancingPolicyConfig_DropCategory{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[28] + mi := &file_grpc_service_config_service_config_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2252,7 +2565,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) String() string { func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoMessage() {} func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[28] + mi := &file_grpc_service_config_service_config_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2265,7 +2578,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoReflect() pr // Deprecated: Use XdsClusterImplLoadBalancingPolicyConfig_DropCategory.ProtoReflect.Descriptor instead. func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 0} } func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) GetCategory() string { @@ -2296,7 +2609,7 @@ type LrsLoadBalancingPolicyConfig_Locality struct { func (x *LrsLoadBalancingPolicyConfig_Locality) Reset() { *x = LrsLoadBalancingPolicyConfig_Locality{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[29] + mi := &file_grpc_service_config_service_config_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2309,7 +2622,7 @@ func (x *LrsLoadBalancingPolicyConfig_Locality) String() string { func (*LrsLoadBalancingPolicyConfig_Locality) ProtoMessage() {} func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[29] + mi := &file_grpc_service_config_service_config_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2322,7 +2635,7 @@ func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Mess // Deprecated: Use LrsLoadBalancingPolicyConfig_Locality.ProtoReflect.Descriptor instead. func (*LrsLoadBalancingPolicyConfig_Locality) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{13, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14, 0} } func (x *LrsLoadBalancingPolicyConfig_Locality) GetRegion() string { @@ -2380,7 +2693,7 @@ type ServiceConfig_RetryThrottlingPolicy struct { func (x *ServiceConfig_RetryThrottlingPolicy) Reset() { *x = ServiceConfig_RetryThrottlingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[30] + mi := &file_grpc_service_config_service_config_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2393,7 +2706,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) String() string { func (*ServiceConfig_RetryThrottlingPolicy) ProtoMessage() {} func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[30] + mi := &file_grpc_service_config_service_config_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2406,7 +2719,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Messag // Deprecated: Use ServiceConfig_RetryThrottlingPolicy.ProtoReflect.Descriptor instead. func (*ServiceConfig_RetryThrottlingPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17, 0} } func (x *ServiceConfig_RetryThrottlingPolicy) GetMaxTokens() uint32 { @@ -2435,7 +2748,7 @@ type ServiceConfig_HealthCheckConfig struct { func (x *ServiceConfig_HealthCheckConfig) Reset() { *x = ServiceConfig_HealthCheckConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[31] + mi := &file_grpc_service_config_service_config_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2448,7 +2761,7 @@ func (x *ServiceConfig_HealthCheckConfig) String() string { func (*ServiceConfig_HealthCheckConfig) ProtoMessage() {} func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[31] + mi := &file_grpc_service_config_service_config_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2461,7 +2774,7 @@ func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig_HealthCheckConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig_HealthCheckConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16, 1} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17, 1} } func (x *ServiceConfig_HealthCheckConfig) GetServiceName() *wrapperspb.StringValue { @@ -2557,171 +2870,213 @@ var file_grpc_service_config_service_config_proto_rawDesc = []byte{ 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x72, 0x5f, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x12, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x6e, - 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x7e, 0x0a, 0x0c, - 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x0c, - 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x03, 0x0a, - 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x60, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, - 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, - 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, - 0x64, 0x72, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, 0x40, - 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x72, 0x65, 0x73, - 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, - 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, - 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, - 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x02, - 0x0a, 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8e, 0x0a, 0x0a, + 0x23, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x47, 0x0a, 0x12, 0x62, + 0x61, 0x73, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x45, + 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x14, 0x6d, + 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x45, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x80, 0x01, 0x0a, 0x15, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6a, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, + 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x92, + 0x01, 0x0a, 0x1b, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x52, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, + 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, + 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x1a, 0xb3, 0x02, 0x0a, 0x13, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, + 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x74, 0x64, 0x65, + 0x76, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, 0x74, + 0x64, 0x65, 0x76, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x53, 0x0a, 0x16, 0x65, 0x6e, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x41, + 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, 0x73, 0x74, + 0x73, 0x12, 0x43, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x1a, 0xb4, 0x02, 0x0a, 0x19, 0x46, 0x61, 0x69, 0x6c, 0x75, + 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x45, 0x6a, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, + 0x12, 0x53, 0x0a, 0x16, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, + 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x43, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x22, 0x7e, 0x0a, + 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, + 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, + 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x03, + 0x0a, 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, + 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, + 0x40, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x72, 0x65, + 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, + 0x65, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, + 0x69, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, + 0x02, 0x0a, 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x07, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x07, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0x6d, - 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x7f, 0x0a, - 0x0c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x59, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf2, - 0x02, 0x0a, 0x2a, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, - 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, - 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, 0x54, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, - 0x64, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, + 0x6d, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x82, - 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x5b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x09, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0xa1, 0x02, 0x0a, 0x09, 0x58, - 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x12, 0x57, 0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, - 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x1a, 0x59, 0x0a, 0x12, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb6, - 0x06, 0x0a, 0x2b, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x86, - 0x01, 0x0a, 0x14, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x53, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x7f, + 0x0a, 0x0c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x59, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xf2, 0x02, 0x0a, 0x2a, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, + 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, 0x54, 0x0a, 0x05, 0x43, 0x68, 0x69, + 0x6c, 0x64, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, + 0x82, 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x5b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x09, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0xa1, 0x02, 0x0a, 0x09, + 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x12, 0x57, 0x0a, 0x0d, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, + 0x64, 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x1a, 0x59, 0x0a, 0x12, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2f, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, + 0x9d, 0x07, 0x0a, 0x2b, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, - 0x73, 0x6d, 0x52, 0x13, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x4c, 0x0a, 0x0d, 0x78, 0x64, 0x73, 0x5f, 0x6c, - 0x62, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x86, 0x01, 0x0a, 0x14, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x65, + 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x53, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x78, 0x64, 0x73, 0x4c, 0x62, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xaf, 0x04, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, - 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x18, 0x0a, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, 0x19, - 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x16, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, - 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x6c, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x58, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, - 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, - 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, - 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6e, 0x73, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6e, 0x73, - 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x45, 0x44, 0x53, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, - 0x4c, 0x5f, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x22, 0xa3, 0x05, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, - 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, + 0x69, 0x73, 0x6d, 0x52, 0x13, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, + 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x4c, 0x0a, 0x0d, 0x78, 0x64, 0x73, 0x5f, + 0x6c, 0x62, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x78, 0x64, 0x73, 0x4c, 0x62, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x96, 0x05, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x18, 0x0a, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, @@ -2733,247 +3088,299 @@ var file_grpc_service_config_service_config_proto_rawDesc = []byte{ 0x52, 0x16, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x72, - 0x0a, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, - 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, - 0x72, 0x79, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, - 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, - 0x5c, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, - 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x03, - 0x0a, 0x1c, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, - 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x6c, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x58, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, + 0x6d, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, + 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6e, 0x73, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6e, + 0x73, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x65, 0x0a, 0x11, 0x6f, 0x75, 0x74, + 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, + 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, + 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x2d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x01, 0x12, 0x0f, + 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x22, + 0xa3, 0x05, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, + 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, 0x19, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, + 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x16, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x15, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x65, 0x0a, 0x1b, 0x52, 0x69, 0x6e, 0x67, - 0x48, 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x72, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x6d, 0x69, 0x6e, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6d, - 0x61, 0x78, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, - 0xa6, 0x03, 0x0a, 0x1c, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, - 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, - 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x56, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, + 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x72, 0x0a, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x63, + 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x72, + 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, + 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, + 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x5c, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x43, + 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, + 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x69, + 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x03, 0x0a, 0x1c, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, + 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, + 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x17, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x60, + 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x50, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, - 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, - 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, - 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, - 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, - 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x9c, 0x0b, 0x0a, 0x13, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, - 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, - 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, - 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, - 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, - 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x65, 0x0a, 0x1b, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x22, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x52, 0x69, 0x6e, 0x67, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, + 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa6, 0x03, 0x0a, 0x1c, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x8d, 0x01, 0x0a, 0x20, 0x78, 0x64, 0x73, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0e, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, 0x73, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, - 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, - 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, - 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, - 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, - 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, - 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, - 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, - 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, - 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, - 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, - 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, + 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, + 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x6c, + 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, + 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x50, + 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, + 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, + 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, + 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0x86, 0x0c, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, + 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, + 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x68, + 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, + 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, + 0x6c, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, + 0x72, 0x70, 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, - 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, - 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x8d, 0x01, 0x0a, 0x20, 0x78, + 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, + 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, + 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, + 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, + 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, + 0x0a, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, + 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, + 0x00, 0x52, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, + 0x12, 0x50, 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, + 0x52, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, + 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, + 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, + 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, + 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, + 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, + 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2989,120 +3396,140 @@ func file_grpc_service_config_service_config_proto_rawDescGZIP() []byte { } var file_grpc_service_config_service_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 32) +var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 35) var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type)(0), // 0: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type (ServiceConfig_LoadBalancingPolicy)(0), // 1: grpc.service_config.ServiceConfig.LoadBalancingPolicy (*MethodConfig)(nil), // 2: grpc.service_config.MethodConfig (*PickFirstConfig)(nil), // 3: grpc.service_config.PickFirstConfig (*RoundRobinConfig)(nil), // 4: grpc.service_config.RoundRobinConfig - (*GrpcLbConfig)(nil), // 5: grpc.service_config.GrpcLbConfig - (*PriorityLoadBalancingPolicyConfig)(nil), // 6: grpc.service_config.PriorityLoadBalancingPolicyConfig - (*WeightedTargetLoadBalancingPolicyConfig)(nil), // 7: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - (*XdsClusterManagerLoadBalancingPolicyConfig)(nil), // 8: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig - (*CdsConfig)(nil), // 9: grpc.service_config.CdsConfig - (*XdsServer)(nil), // 10: grpc.service_config.XdsServer - (*XdsClusterResolverLoadBalancingPolicyConfig)(nil), // 11: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig - (*XdsClusterImplLoadBalancingPolicyConfig)(nil), // 12: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig - (*EdsLoadBalancingPolicyConfig)(nil), // 13: grpc.service_config.EdsLoadBalancingPolicyConfig - (*RingHashLoadBalancingConfig)(nil), // 14: grpc.service_config.RingHashLoadBalancingConfig - (*LrsLoadBalancingPolicyConfig)(nil), // 15: grpc.service_config.LrsLoadBalancingPolicyConfig - (*XdsConfig)(nil), // 16: grpc.service_config.XdsConfig - (*LoadBalancingConfig)(nil), // 17: grpc.service_config.LoadBalancingConfig - (*ServiceConfig)(nil), // 18: grpc.service_config.ServiceConfig - (*MethodConfig_Name)(nil), // 19: grpc.service_config.MethodConfig.Name - (*MethodConfig_RetryPolicy)(nil), // 20: grpc.service_config.MethodConfig.RetryPolicy - (*MethodConfig_HedgingPolicy)(nil), // 21: grpc.service_config.MethodConfig.HedgingPolicy - (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 22: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - nil, // 23: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 24: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - nil, // 25: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - (*XdsClusterManagerLoadBalancingPolicyConfig_Child)(nil), // 26: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child - nil, // 27: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry - (*XdsServer_ChannelCredentials)(nil), // 28: grpc.service_config.XdsServer.ChannelCredentials - (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 29: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 30: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - (*ServiceConfig_RetryThrottlingPolicy)(nil), // 32: grpc.service_config.ServiceConfig.RetryThrottlingPolicy - (*ServiceConfig_HealthCheckConfig)(nil), // 33: grpc.service_config.ServiceConfig.HealthCheckConfig - (*wrapperspb.BoolValue)(nil), // 34: google.protobuf.BoolValue - (*durationpb.Duration)(nil), // 35: google.protobuf.Duration - (*wrapperspb.UInt32Value)(nil), // 36: google.protobuf.UInt32Value - (*structpb.Value)(nil), // 37: google.protobuf.Value - (*wrapperspb.StringValue)(nil), // 38: google.protobuf.StringValue - (code.Code)(0), // 39: google.rpc.Code - (*structpb.Struct)(nil), // 40: google.protobuf.Struct + (*OutlierDetectionLoadBalancingConfig)(nil), // 5: grpc.service_config.OutlierDetectionLoadBalancingConfig + (*GrpcLbConfig)(nil), // 6: grpc.service_config.GrpcLbConfig + (*PriorityLoadBalancingPolicyConfig)(nil), // 7: grpc.service_config.PriorityLoadBalancingPolicyConfig + (*WeightedTargetLoadBalancingPolicyConfig)(nil), // 8: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig + (*XdsClusterManagerLoadBalancingPolicyConfig)(nil), // 9: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig + (*CdsConfig)(nil), // 10: grpc.service_config.CdsConfig + (*XdsServer)(nil), // 11: grpc.service_config.XdsServer + (*XdsClusterResolverLoadBalancingPolicyConfig)(nil), // 12: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig + (*XdsClusterImplLoadBalancingPolicyConfig)(nil), // 13: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig + (*EdsLoadBalancingPolicyConfig)(nil), // 14: grpc.service_config.EdsLoadBalancingPolicyConfig + (*RingHashLoadBalancingConfig)(nil), // 15: grpc.service_config.RingHashLoadBalancingConfig + (*LrsLoadBalancingPolicyConfig)(nil), // 16: grpc.service_config.LrsLoadBalancingPolicyConfig + (*XdsConfig)(nil), // 17: grpc.service_config.XdsConfig + (*LoadBalancingConfig)(nil), // 18: grpc.service_config.LoadBalancingConfig + (*ServiceConfig)(nil), // 19: grpc.service_config.ServiceConfig + (*MethodConfig_Name)(nil), // 20: grpc.service_config.MethodConfig.Name + (*MethodConfig_RetryPolicy)(nil), // 21: grpc.service_config.MethodConfig.RetryPolicy + (*MethodConfig_HedgingPolicy)(nil), // 22: grpc.service_config.MethodConfig.HedgingPolicy + (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection)(nil), // 23: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection + (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection)(nil), // 24: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection + (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 25: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + nil, // 26: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 27: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + nil, // 28: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + (*XdsClusterManagerLoadBalancingPolicyConfig_Child)(nil), // 29: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child + nil, // 30: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry + (*XdsServer_ChannelCredentials)(nil), // 31: grpc.service_config.XdsServer.ChannelCredentials + (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 32: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 33: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 34: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + (*ServiceConfig_RetryThrottlingPolicy)(nil), // 35: grpc.service_config.ServiceConfig.RetryThrottlingPolicy + (*ServiceConfig_HealthCheckConfig)(nil), // 36: grpc.service_config.ServiceConfig.HealthCheckConfig + (*wrapperspb.BoolValue)(nil), // 37: google.protobuf.BoolValue + (*durationpb.Duration)(nil), // 38: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 39: google.protobuf.UInt32Value + (*structpb.Value)(nil), // 40: google.protobuf.Value + (*wrapperspb.StringValue)(nil), // 41: google.protobuf.StringValue + (code.Code)(0), // 42: google.rpc.Code + (*structpb.Struct)(nil), // 43: google.protobuf.Struct } var file_grpc_service_config_service_config_proto_depIdxs = []int32{ - 19, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name - 34, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue - 35, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration - 36, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value - 36, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value - 20, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy - 21, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy - 17, // 7: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 23, // 8: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - 25, // 9: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - 27, // 10: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry - 28, // 11: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials - 37, // 12: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value - 29, // 13: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - 17, // 14: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig - 38, // 15: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 10, // 16: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 36, // 17: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 30, // 18: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - 17, // 19: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 38, // 20: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 17, // 21: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 17, // 22: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 31, // 23: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - 17, // 24: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 17, // 25: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 17, // 26: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig - 38, // 27: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 3, // 28: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig - 4, // 29: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig - 5, // 30: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig - 6, // 31: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig - 7, // 32: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - 8, // 33: grpc.service_config.LoadBalancingConfig.xds_cluster_manager_experimental:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig - 9, // 34: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig - 11, // 35: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig - 12, // 36: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig - 14, // 37: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig - 15, // 38: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig - 13, // 39: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig - 16, // 40: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig - 16, // 41: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig - 1, // 42: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy - 17, // 43: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig - 2, // 44: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig - 32, // 45: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy - 33, // 46: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig - 35, // 47: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration - 35, // 48: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration - 39, // 49: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code - 35, // 50: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration - 39, // 51: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code - 17, // 52: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig - 22, // 53: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - 17, // 54: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 24, // 55: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - 17, // 56: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 26, // 57: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child - 40, // 58: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct - 38, // 59: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 10, // 60: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 36, // 61: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 0, // 62: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type - 38, // 63: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue - 64, // [64:64] is the sub-list for method output_type - 64, // [64:64] is the sub-list for method input_type - 64, // [64:64] is the sub-list for extension type_name - 64, // [64:64] is the sub-list for extension extendee - 0, // [0:64] is the sub-list for field type_name + 20, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name + 37, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue + 38, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration + 39, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value + 39, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value + 21, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy + 22, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy + 38, // 7: grpc.service_config.OutlierDetectionLoadBalancingConfig.interval:type_name -> google.protobuf.Duration + 38, // 8: grpc.service_config.OutlierDetectionLoadBalancingConfig.base_ejection_time:type_name -> google.protobuf.Duration + 38, // 9: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_time:type_name -> google.protobuf.Duration + 39, // 10: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_percent:type_name -> google.protobuf.UInt32Value + 23, // 11: grpc.service_config.OutlierDetectionLoadBalancingConfig.success_rate_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection + 24, // 12: grpc.service_config.OutlierDetectionLoadBalancingConfig.failure_percentage_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection + 18, // 13: grpc.service_config.OutlierDetectionLoadBalancingConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 18, // 14: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 26, // 15: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + 28, // 16: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + 30, // 17: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry + 31, // 18: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials + 40, // 19: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value + 32, // 20: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + 18, // 21: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig + 41, // 22: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 11, // 23: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer + 39, // 24: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 33, // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + 18, // 26: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 41, // 27: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 18, // 28: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 18, // 29: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 34, // 30: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + 18, // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 18, // 32: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 18, // 33: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig + 41, // 34: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 3, // 35: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig + 4, // 36: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig + 5, // 37: grpc.service_config.LoadBalancingConfig.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig + 6, // 38: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig + 7, // 39: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig + 8, // 40: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig + 9, // 41: grpc.service_config.LoadBalancingConfig.xds_cluster_manager_experimental:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig + 10, // 42: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig + 12, // 43: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig + 13, // 44: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig + 15, // 45: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig + 16, // 46: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig + 14, // 47: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig + 17, // 48: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig + 17, // 49: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig + 1, // 50: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy + 18, // 51: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig + 2, // 52: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig + 35, // 53: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy + 36, // 54: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig + 38, // 55: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration + 38, // 56: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration + 42, // 57: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code + 38, // 58: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration + 42, // 59: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code + 39, // 60: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.stdev_factor:type_name -> google.protobuf.UInt32Value + 39, // 61: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value + 39, // 62: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value + 39, // 63: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.request_volume:type_name -> google.protobuf.UInt32Value + 39, // 64: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold:type_name -> google.protobuf.UInt32Value + 39, // 65: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value + 39, // 66: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value + 39, // 67: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.request_volume:type_name -> google.protobuf.UInt32Value + 18, // 68: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig + 25, // 69: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + 18, // 70: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 27, // 71: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + 18, // 72: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 29, // 73: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child + 43, // 74: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct + 41, // 75: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 11, // 76: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer + 39, // 77: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 0, // 78: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type + 5, // 79: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig + 41, // 80: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue + 81, // [81:81] is the sub-list for method output_type + 81, // [81:81] is the sub-list for method input_type + 81, // [81:81] is the sub-list for extension type_name + 81, // [81:81] is the sub-list for extension extendee + 0, // [0:81] is the sub-list for field type_name } func init() { file_grpc_service_config_service_config_proto_init() } @@ -3148,7 +3575,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLbConfig); i { + switch v := v.(*OutlierDetectionLoadBalancingConfig); i { case 0: return &v.state case 1: @@ -3160,7 +3587,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PriorityLoadBalancingPolicyConfig); i { + switch v := v.(*GrpcLbConfig); i { case 0: return &v.state case 1: @@ -3172,7 +3599,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WeightedTargetLoadBalancingPolicyConfig); i { + switch v := v.(*PriorityLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -3184,7 +3611,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterManagerLoadBalancingPolicyConfig); i { + switch v := v.(*WeightedTargetLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -3196,7 +3623,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CdsConfig); i { + switch v := v.(*XdsClusterManagerLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -3208,7 +3635,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsServer); i { + switch v := v.(*CdsConfig); i { case 0: return &v.state case 1: @@ -3220,7 +3647,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig); i { + switch v := v.(*XdsServer); i { case 0: return &v.state case 1: @@ -3232,7 +3659,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig); i { + switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -3244,7 +3671,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EdsLoadBalancingPolicyConfig); i { + switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -3256,7 +3683,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RingHashLoadBalancingConfig); i { + switch v := v.(*EdsLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -3268,7 +3695,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LrsLoadBalancingPolicyConfig); i { + switch v := v.(*RingHashLoadBalancingConfig); i { case 0: return &v.state case 1: @@ -3280,7 +3707,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsConfig); i { + switch v := v.(*LrsLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -3292,7 +3719,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancingConfig); i { + switch v := v.(*XdsConfig); i { case 0: return &v.state case 1: @@ -3304,7 +3731,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig); i { + switch v := v.(*LoadBalancingConfig); i { case 0: return &v.state case 1: @@ -3316,7 +3743,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_Name); i { + switch v := v.(*ServiceConfig); i { case 0: return &v.state case 1: @@ -3328,7 +3755,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_RetryPolicy); i { + switch v := v.(*MethodConfig_Name); i { case 0: return &v.state case 1: @@ -3340,7 +3767,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_HedgingPolicy); i { + switch v := v.(*MethodConfig_RetryPolicy); i { case 0: return &v.state case 1: @@ -3352,7 +3779,19 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { + switch v := v.(*MethodConfig_HedgingPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutlierDetectionLoadBalancingConfig_SuccessRateEjection); i { case 0: return &v.state case 1: @@ -3364,6 +3803,30 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WeightedTargetLoadBalancingPolicyConfig_Target); i { case 0: return &v.state @@ -3375,7 +3838,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterManagerLoadBalancingPolicyConfig_Child); i { case 0: return &v.state @@ -3387,7 +3850,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsServer_ChannelCredentials); i { case 0: return &v.state @@ -3399,7 +3862,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism); i { case 0: return &v.state @@ -3411,7 +3874,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig_DropCategory); i { case 0: return &v.state @@ -3423,7 +3886,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LrsLoadBalancingPolicyConfig_Locality); i { case 0: return &v.state @@ -3435,7 +3898,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_RetryThrottlingPolicy); i { case 0: return &v.state @@ -3447,7 +3910,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_HealthCheckConfig); i { case 0: return &v.state @@ -3464,9 +3927,10 @@ func file_grpc_service_config_service_config_proto_init() { (*MethodConfig_RetryPolicy_)(nil), (*MethodConfig_HedgingPolicy_)(nil), } - file_grpc_service_config_service_config_proto_msgTypes[15].OneofWrappers = []interface{}{ + file_grpc_service_config_service_config_proto_msgTypes[16].OneofWrappers = []interface{}{ (*LoadBalancingConfig_PickFirst)(nil), (*LoadBalancingConfig_RoundRobin)(nil), + (*LoadBalancingConfig_OutlierDetection)(nil), (*LoadBalancingConfig_Grpclb)(nil), (*LoadBalancingConfig_PriorityExperimental)(nil), (*LoadBalancingConfig_WeightedTargetExperimental)(nil), @@ -3486,7 +3950,7 @@ func file_grpc_service_config_service_config_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_service_config_service_config_proto_rawDesc, NumEnums: 2, - NumMessages: 32, + NumMessages: 35, NumExtensions: 0, NumServices: 0, }, From 431ea809a7676e1da8d09c33ae0d31fcba85f1ff Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 4 Mar 2022 12:00:21 -0500 Subject: [PATCH 442/998] internal/balancer: Add graceful switch LB policy (#5207) internal/balancer: Add graceful switch LB policy --- .../balancer/gracefulswitch/gracefulswitch.go | 375 ++++++ .../gracefulswitch/gracefulswitch_test.go | 1083 +++++++++++++++++ 2 files changed, 1458 insertions(+) create mode 100644 internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 internal/balancer/gracefulswitch/gracefulswitch_test.go diff --git a/internal/balancer/gracefulswitch/gracefulswitch.go b/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000000..af6cc46189f4 --- /dev/null +++ b/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,375 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/internal/balancer/gracefulswitch/gracefulswitch_test.go b/internal/balancer/gracefulswitch/gracefulswitch_test.go new file mode 100644 index 000000000000..02018f068ed0 --- /dev/null +++ b/internal/balancer/gracefulswitch/gracefulswitch_test.go @@ -0,0 +1,1083 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package gracefulswitch + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func setup(t *testing.T) (*testutils.TestClientConn, *Balancer) { + tcc := testutils.NewTestClientConn(t) + return tcc, NewBalancer(tcc, balancer.BuildOptions{}) +} + +// TestSuccessfulFirstUpdate tests a basic scenario for the graceful switch load +// balancer, where it is setup with a balancer which should populate the current +// load balancer. Any ClientConn updates should then be forwarded to this +// current load balancer. +func (s) TestSuccessfulFirstUpdate(t *testing.T) { + _, gsb := setup(t) + if err := gsb.SwitchTo(mockBalancerBuilder1{}); err != nil { + t.Fatalf("Balancer.SwitchTo failed with error: %v", err) + } + if gsb.balancerCurrent == nil { + t.Fatal("current balancer not populated after a successful call to SwitchTo()") + } + // This will be used to update the graceful switch balancer. This update + // should simply be forwarded down to the current load balancing policy. + ccs := balancer.ClientConnState{ + BalancerConfig: mockBalancerConfig{}, + } + + // Updating ClientConnState should forward the update exactly as is to the + // current balancer. + if err := gsb.UpdateClientConnState(ccs); err != nil { + t.Fatalf("Balancer.UpdateClientConnState(%v) failed: %v", ccs, err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForClientConnUpdate(ctx, ccs); err != nil { + t.Fatal(err) + } +} + +// TestTwoBalancersSameType tests the scenario where there is a graceful switch +// load balancer setup with a current and pending load balancer of the same +// type. Any ClientConn update should be forwarded to the current lb if there is +// a current lb and no pending lb, and the only the pending lb if the graceful +// switch balancer contains both a current lb and a pending lb. The pending load +// balancer should also swap into current whenever it updates with a +// connectivity state other than CONNECTING. +func (s) TestTwoBalancersSameType(t *testing.T) { + tcc, gsb := setup(t) + // This will be used to update the graceful switch balancer. This update + // should simply be forwarded down to either the current or pending load + // balancing policy. + ccs := balancer.ClientConnState{ + BalancerConfig: mockBalancerConfig{}, + } + + gsb.SwitchTo(mockBalancerBuilder1{}) + gsb.UpdateClientConnState(ccs) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForClientConnUpdate(ctx, ccs); err != nil { + t.Fatal(err) + } + + // The current balancer reporting READY should cause this state + // to be forwarded to the ClientConn. + gsb.balancerCurrent.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &neverErrPicker{}, + }) + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Ready { + t.Fatalf("current balancer reports connectivity state %v, want %v", state, connectivity.Ready) + } + } + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Should receive a never err picker. + if _, err := picker.Pick(balancer.PickInfo{}); err != nil { + t.Fatalf("ClientConn should have received a never err picker from an UpdateState call") + } + } + + // An explicit call to switchTo, even if the same type, should cause the + // balancer to build a new balancer for pending. + gsb.SwitchTo(mockBalancerBuilder1{}) + if gsb.balancerPending == nil { + t.Fatal("pending balancer not populated after another call to SwitchTo()") + } + + // A ClientConn update received should be forwarded to the new pending LB + // policy, and not the current one. + gsb.UpdateClientConnState(ccs) + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForClientConnUpdate(sCtx, ccs); err == nil { + t.Fatal("current balancer received a ClientConn update when there is a pending balancer") + } + if err := gsb.balancerPending.Balancer.(*mockBalancer).waitForClientConnUpdate(ctx, ccs); err != nil { + t.Fatal(err) + } + + // If the pending load balancer reports that is CONNECTING, no update should + // be sent to the ClientConn. + gsb.balancerPending.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + }) + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-tcc.NewStateCh: + t.Fatal("balancerPending reporting CONNECTING should not forward up to the ClientConn") + case <-sCtx.Done(): + } + + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + // If the pending load balancer reports a state other than CONNECTING, the + // pending load balancer is logically warmed up, and the ClientConn should + // be updated with the State and Picker to start using the new policy. The + // pending load balancing policy should also be switched into the current + // load balancer. + gsb.balancerPending.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &neverErrPicker{}, + }) + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Ready { + t.Fatalf("pending balancer reports connectivity state %v, want %v", state, connectivity.Ready) + } + } + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // This picker should be the recent one sent from UpdateState(), a never + // err picker, not the nil picker from two updateState() calls previous. + if picker == nil { + t.Fatalf("ClientConn should have received a never err picker, which is the most recent picker, from an UpdateState call") + } + if _, err := picker.Pick(balancer.PickInfo{}); err != nil { + t.Fatalf("ClientConn should have received a never err picker, which is the most recent picker, from an UpdateState call") + } + } + // The current balancer should be closed as a result of the swap. + if err := currBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } +} + +// TestCurrentNotReadyPendingUpdate tests the scenario where there is a current +// and pending load balancer setup in the graceful switch load balancer, and the +// current LB is not in the connectivity state READY. Any update from the +// pending load balancer should cause the graceful switch load balancer to swap +// the pending into current, and update the ClientConn with the pending load +// balancers state. +func (s) TestCurrentNotReadyPendingUpdate(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + gsb.SwitchTo(mockBalancerBuilder1{}) + if gsb.balancerPending == nil { + t.Fatal("pending balancer not populated after another call to SwitchTo()") + } + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + // Due to the current load balancer not being in state READY, any update + // from the pending load balancer should cause that update to be forwarded + // to the ClientConn and also cause the pending load balancer to swap into + // the current one. + gsb.balancerPending.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &neverErrPicker{}, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for an UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Connecting { + t.Fatalf("ClientConn received connectivity state %v, want %v (from pending)", state, connectivity.Connecting) + } + } + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for an UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Should receive a never err picker. + if _, err := picker.Pick(balancer.PickInfo{}); err != nil { + t.Fatalf("ClientConn should have received a never err picker from an UpdateState call") + } + } + + // The current balancer should be closed as a result of the swap. + if err := currBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } +} + +// TestCurrentLeavingReady tests the scenario where there is a current and +// pending load balancer setup in the graceful switch load balancer, with the +// current load balancer being in the state READY, and the current load balancer +// then transitions into a state other than READY. This should cause the pending +// load balancer to swap into the current load balancer, and the ClientConn to +// be updated with the cached pending load balancing state. Also, once the +// current is cleared from the graceful switch load balancer, any updates sent +// should be intercepted and not forwarded to the ClientConn, as the balancer +// has already been cleared. +func (s) TestCurrentLeavingReady(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + }) + + gsb.SwitchTo(mockBalancerBuilder2{}) + // Sends CONNECTING, shouldn't make it's way to ClientConn. + gsb.balancerPending.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &neverErrPicker{}, + }) + + // The current balancer leaving READY should cause the pending balancer to + // swap to the current balancer. This swap from current to pending should + // also update the ClientConn with the pending balancers cached state and + // picker. + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Idle, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Connecting { + t.Fatalf("current balancer reports connectivity state %v, want %v", state, connectivity.Connecting) + } + } + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Should receive a never err picker cached from pending LB's updateState() call, which + // was cached. + if _, err := picker.Pick(balancer.PickInfo{}); err != nil { + t.Fatalf("ClientConn should have received a never err picker, the cached picker, from an UpdateState call") + } + } + + // The current balancer should be closed as a result of the swap. + if err := currBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } + + // The current balancer is now cleared from the graceful switch load + // balancer. Thus, any update from the old current should be intercepted by + // the graceful switch load balancer and not forward up to the ClientConn. + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &neverErrPicker{}, + }) + + // This update should not be forwarded to the ClientConn. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-tcc.NewStateCh: + t.Fatal("UpdateState() from a cleared balancer should not make it's way to ClientConn") + } + + if _, err := currBal.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}); err == nil { + t.Fatal("newSubConn() from a cleared balancer should have returned an error") + } + + // This newSubConn call should also not reach the ClientConn. + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-tcc.NewSubConnCh: + t.Fatal("newSubConn() from a cleared balancer should not make it's way to ClientConn") + } +} + +// TestBalancerSubconns tests the SubConn functionality of the graceful switch +// load balancer. This tests the SubConn update flow in both directions, and +// make sure updates end up at the correct component. Also, it tests that on an +// UpdateSubConnState() call from the ClientConn, the graceful switch load +// balancer forwards it to the correct child balancer. +func (s) TestBalancerSubconns(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + gsb.SwitchTo(mockBalancerBuilder2{}) + + // A child balancer creating a new SubConn should eventually be forwarded to + // the ClientConn held by the graceful switch load balancer. + sc1, err := gsb.balancerCurrent.Balancer.(*mockBalancer).newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case sc := <-tcc.NewSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("NewSubConn, want %v, got %v", sc1, sc) + } + } + + // The other child balancer creating a new SubConn should also be eventually + // be forwarded to the ClientConn held by the graceful switch load balancer. + sc2, err := gsb.balancerPending.Balancer.(*mockBalancer).newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case sc := <-tcc.NewSubConnCh: + if !cmp.Equal(sc2, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("NewSubConn, want %v, got %v", sc2, sc) + } + } + scState := balancer.SubConnState{ConnectivityState: connectivity.Ready} + // Updating the SubConnState for sc1 should cause the graceful switch + // balancer to forward the Update to balancerCurrent for sc1, as that is the + // balancer that created this SubConn. + gsb.UpdateSubConnState(sc1, scState) + + // This update should get forwarded to balancerCurrent, as that is the LB + // that created this SubConn. + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForSubConnUpdate(ctx, subConnWithState{sc: sc1, state: scState}); err != nil { + t.Fatal(err) + } + // This update should not get forwarded to balancerPending, as that is not + // the LB that created this SubConn. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := gsb.balancerPending.Balancer.(*mockBalancer).waitForSubConnUpdate(sCtx, subConnWithState{sc: sc1, state: scState}); err == nil { + t.Fatalf("balancerPending should not have received a subconn update for sc1") + } + + // Updating the SubConnState for sc2 should cause the graceful switch + // balancer to forward the Update to balancerPending for sc2, as that is the + // balancer that created this SubConn. + gsb.UpdateSubConnState(sc2, scState) + + // This update should get forwarded to balancerPending, as that is the LB + // that created this SubConn. + if err := gsb.balancerPending.Balancer.(*mockBalancer).waitForSubConnUpdate(ctx, subConnWithState{sc: sc2, state: scState}); err != nil { + t.Fatal(err) + } + + // This update should not get forwarded to balancerCurrent, as that is not + // the LB that created this SubConn. + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForSubConnUpdate(sCtx, subConnWithState{sc: sc2, state: scState}); err == nil { + t.Fatalf("balancerCurrent should not have received a subconn update for sc2") + } + + // Updating the addresses for both SubConns and removing both SubConns + // should get forwarded to the ClientConn. + + // Updating the addresses for sc1 should get forwarded to the ClientConn. + gsb.balancerCurrent.Balancer.(*mockBalancer).updateAddresses(sc1, []resolver.Address{}) + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + + // Updating the addresses for sc2 should also get forwarded to the ClientConn. + gsb.balancerPending.Balancer.(*mockBalancer).updateAddresses(sc2, []resolver.Address{}) + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + + // balancerCurrent removing sc1 should get forwarded to the ClientConn. + gsb.balancerCurrent.Balancer.(*mockBalancer).removeSubConn(sc1) + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, sc) + } + } + // balancerPending removing sc2 should get forwarded to the ClientConn. + gsb.balancerPending.Balancer.(*mockBalancer).removeSubConn(sc2) + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc2, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc2, sc) + } + } +} + +// TestBalancerClose tests the graceful switch balancer's Close() functionality. +// From the Close() call, the graceful switch balancer should remove any created +// Subconns and Close() the current and pending load balancers. This Close() +// call should also cause any other events (calls to entrance functions) to be +// no-ops. +func (s) TestBalancerClose(t *testing.T) { + // Setup gsb balancer with current, pending, and one created SubConn on both + // current and pending. + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + gsb.SwitchTo(mockBalancerBuilder2{}) + + sc1, err := gsb.balancerCurrent.Balancer.(*mockBalancer).newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) // Will eventually get back a SubConn with an identifying property id 1 + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case <-tcc.NewSubConnCh: + } + + sc2, err := gsb.balancerPending.Balancer.(*mockBalancer).newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) // Will eventually get back a SubConn with an identifying property id 2 + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case <-tcc.NewSubConnCh: + } + + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + pendBal := gsb.balancerPending.Balancer.(*mockBalancer) + + // Closing the graceful switch load balancer should lead to removing any + // created SubConns, and closing both the current and pending load balancer. + gsb.Close() + + // The order of SubConns the graceful switch load balancer tells the Client + // Conn to remove is non deterministic, as it is stored in a map. However, + // the first SubConn removed should be either sc1 or sc2. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + if !cmp.Equal(sc2, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want either %v or %v, got %v", sc1, sc2, sc) + } + } + } + + // The graceful switch load balancer should then tell the ClientConn to + // remove the other SubConn. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + if !cmp.Equal(sc2, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want either %v or %v, got %v", sc1, sc2, sc) + } + } + } + + // The current balancer should get closed as a result of the graceful switch balancer being closed. + if err := currBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } + // The pending balancer should also get closed as a result of the graceful switch balancer being closed. + if err := pendBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } + + // Once the graceful switch load balancer has been closed, any entrance + // function should be a no-op and return errBalancerClosed if the function + // returns an error. + + // SwitchTo() should return an error due to the graceful switch load + // balancer having been closed already. + if err := gsb.SwitchTo(mockBalancerBuilder1{}); err != errBalancerClosed { + t.Fatalf("gsb.SwitchTo(%v) returned error %v, want %v", mockBalancerBuilder1{}, err, errBalancerClosed) + } + + // UpdateClientConnState() should return an error due to the graceful switch + // load balancer having been closed already. + ccs := balancer.ClientConnState{ + BalancerConfig: mockBalancerConfig{}, + } + if err := gsb.UpdateClientConnState(ccs); err != errBalancerClosed { + t.Fatalf("gsb.UpdateCLientConnState(%v) returned error %v, want %v", ccs, err, errBalancerClosed) + } + + // After the graceful switch load balancer has been closed, any resolver error + // shouldn't forward to either balancer, as the resolver error is a no-op + // and also even if not, the balancers should have been cleared from the + // graceful switch load balancer. + gsb.ResolverError(balancer.ErrBadResolverState) + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := currBal.waitForResolverError(sCtx, balancer.ErrBadResolverState); !strings.Contains(err.Error(), sCtx.Err().Error()) { + t.Fatal("the current balancer should not have received the resolver error after close") + } + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := pendBal.waitForResolverError(sCtx, balancer.ErrBadResolverState); !strings.Contains(err.Error(), sCtx.Err().Error()) { + t.Fatal("the pending balancer should not have received the resolver error after close") + } +} + +// TestResolverError tests the functionality of a Resolver Error. If there is a +// current balancer, but no pending, the error should be forwarded to the +// current balancer. If there is both a current and pending balancer, the error +// should be forwarded to only the pending balancer. +func (s) TestResolverError(t *testing.T) { + _, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + // If there is only a current balancer present, the resolver error should be + // forwarded to the current balancer. + gsb.ResolverError(balancer.ErrBadResolverState) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := currBal.waitForResolverError(ctx, balancer.ErrBadResolverState); err != nil { + t.Fatal(err) + } + + gsb.SwitchTo(mockBalancerBuilder1{}) + + // If there is a pending balancer present, then a resolver error should be + // forwarded to only the pending balancer, not the current. + pendBal := gsb.balancerPending.Balancer.(*mockBalancer) + gsb.ResolverError(balancer.ErrBadResolverState) + + // The Resolver Error should not be forwarded to the current load balancer. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := currBal.waitForResolverError(sCtx, balancer.ErrBadResolverState); !strings.Contains(err.Error(), sCtx.Err().Error()) { + t.Fatal("the current balancer should not have received the resolver error after close") + } + + // The Resolver Error should be forwarded to the pending load balancer. + if err := pendBal.waitForResolverError(ctx, balancer.ErrBadResolverState); err != nil { + t.Fatal(err) + } +} + +// TestPendingReplacedByAnotherPending tests the scenario where a graceful +// switch balancer has a current and pending load balancer, and receives a +// SwitchTo() call, which then replaces the pending. This should cause the +// graceful switch balancer to clear pending state, close old pending SubConns, +// and Close() the pending balancer being replaced. +func (s) TestPendingReplacedByAnotherPending(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + }) + + // Populate pending with a SwitchTo() call. + gsb.SwitchTo(mockBalancerBuilder2{}) + + pendBal := gsb.balancerPending.Balancer.(*mockBalancer) + sc1, err := pendBal.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + // This picker never returns an error, which can help this this test verify + // whether this cached state will get cleared on a new pending balancer + // (will replace it with a picker that always errors). + pendBal.updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &neverErrPicker{}, + }) + + // Replace pending with a SwitchTo() call. + gsb.SwitchTo(mockBalancerBuilder2{}) + // The pending balancer being replaced should cause the graceful switch + // balancer to Remove() any created SubConns for the old pending balancer + // and also Close() the old pending balancer. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a RemoveSubConn call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, sc) + } + } + + if err := pendBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } + + // Switching the current out of READY should cause the pending LB to swap + // into current, causing the graceful switch balancer to update the + // ClientConn with the cached pending state. Since the new pending hasn't + // sent an Update, the default state with connectivity state CONNECTING and + // an errPicker should be sent to the ClientConn. + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Idle, + }) + + // The update should contain a default connectivity state CONNECTING for the + // state of the new pending LB policy. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateState() call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Connecting { + t.Fatalf("UpdateState(), want connectivity state %v, got %v", connectivity.Connecting, state) + } + } + // The update should contain a default picker ErrPicker in the picker sent + // for the state of the new pending LB policy. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateState() call on the ClientConn") + case picker := <-tcc.NewPickerCh: + if _, err := picker.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("ClientConn should have received a never err picker from an UpdateState call") + } + } +} + +// Picker which never errors here for test purposes (can fill up tests further up with this) +type neverErrPicker struct{} + +func (p *neverErrPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, nil +} + +// TestUpdateSubConnStateRace tests the race condition when the graceful switch +// load balancer receives a SubConnUpdate concurrently with an UpdateState() +// call, which can cause the balancer to forward the update to to be closed and +// cleared. The balancer API guarantees to never call any method the balancer +// after a Close() call, and the test verifies that doesn't happen within the +// graceful switch load balancer. +func (s) TestUpdateSubConnStateRace(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(verifyBalancerBuilder{}) + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*verifyBalancer) + currBal.t = t + pendBal := gsb.balancerPending.Balancer.(*mockBalancer) + sc, err := currBal.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case <-tcc.NewSubConnCh: + } + // Spawn a goroutine that constantly calls UpdateSubConn for the current + // balancer, which will get deleted in this testing goroutine. + finished := make(chan struct{}) + go func() { + for { + select { + case <-finished: + return + default: + } + gsb.UpdateSubConnState(sc, balancer.SubConnState{ + ConnectivityState: connectivity.Ready, + }) + } + }() + time.Sleep(time.Millisecond) + // This UpdateState call causes current to be closed/cleared. + pendBal.updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + }) + // From this, either one of two things happen. Either the graceful switch + // load balancer doesn't Close() the current balancer before it forwards the + // SubConn update to the child, and the call gets forwarded down to the + // current balancer, or it can Close() the current balancer in between + // reading the balancer pointer and writing to it, and in that case the old + // current balancer should not be updated, as the balancer has already been + // closed and the balancer API guarantees it. + close(finished) +} + +// TestInlineCallbackInBuild tests the scenario where a balancer calls back into +// the balancer.ClientConn API inline from it's build function. +func (s) TestInlineCallbackInBuild(t *testing.T) { + tcc, gsb := setup(t) + // This build call should cause all of the inline updates to forward to the + // ClientConn. + gsb.SwitchTo(buildCallbackBalancerBuilder{}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateState() call on the ClientConn") + case <-tcc.NewStateCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn() call on the ClientConn") + case <-tcc.NewSubConnCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses() call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an RemoveSubConn() call on the ClientConn") + case <-tcc.RemoveSubConnCh: + } + oldCurrent := gsb.balancerCurrent.Balancer.(*buildCallbackBal) + + // Since the callback reports a state READY, this new inline balancer should + // be swapped to the current. + gsb.SwitchTo(buildCallbackBalancerBuilder{}) + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateState() call on the ClientConn") + case <-tcc.NewStateCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn() call on the ClientConn") + case <-tcc.NewSubConnCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses() call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an RemoveSubConn() call on the ClientConn") + case <-tcc.RemoveSubConnCh: + } + + // The current balancer should be closed as a result of the swap. + if err := oldCurrent.waitForClose(ctx); err != nil { + t.Fatalf("error waiting for balancer close: %v", err) + } + + // The old balancer should be deprecated and any calls from it should be a no-op. + oldCurrent.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-tcc.NewSubConnCh: + t.Fatal("Deprecated LB calling NewSubConn() should not forward up to the ClientConn") + case <-sCtx.Done(): + } +} + +const balancerName1 = "mock_balancer_1" +const balancerName2 = "mock_balancer_2" +const verifyBalName = "verifyNoSubConnUpdateAfterCloseBalancer" +const buildCallbackBalName = "callbackInBuildBalancer" + +type mockBalancerBuilder1 struct{} + +func (mockBalancerBuilder1) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &mockBalancer{ + ccsCh: testutils.NewChannel(), + scStateCh: testutils.NewChannel(), + resolverErrCh: testutils.NewChannel(), + closeCh: testutils.NewChannel(), + cc: cc, + } +} + +func (mockBalancerBuilder1) Name() string { + return balancerName1 +} + +type mockBalancerConfig struct { + serviceconfig.LoadBalancingConfig +} + +// mockBalancer is a fake balancer used to verify different actions from +// the gracefulswitch. It contains a bunch of channels to signal different events +// to the test. +type mockBalancer struct { + // ccsCh is a channel used to signal the receipt of a ClientConn update. + ccsCh *testutils.Channel + // scStateCh is a channel used to signal the receipt of a SubConn update. + scStateCh *testutils.Channel + // resolverErrCh is a channel used to signal a resolver error. + resolverErrCh *testutils.Channel + // closeCh is a channel used to signal the closing of this balancer. + closeCh *testutils.Channel + // Hold onto ClientConn wrapper to communicate with it + cc balancer.ClientConn +} + +type subConnWithState struct { + sc balancer.SubConn + state balancer.SubConnState +} + +func (mb1 *mockBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + // Need to verify this call...use a channel?...all of these will need verification + mb1.ccsCh.Send(ccs) + return nil +} + +func (mb1 *mockBalancer) ResolverError(err error) { + mb1.resolverErrCh.Send(err) +} + +func (mb1 *mockBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + mb1.scStateCh.Send(subConnWithState{sc: sc, state: state}) +} + +func (mb1 *mockBalancer) Close() { + mb1.closeCh.Send(struct{}{}) +} + +// waitForClientConnUpdate verifies if the mockBalancer receives the +// provided ClientConnState within a reasonable amount of time. +func (mb1 *mockBalancer) waitForClientConnUpdate(ctx context.Context, wantCCS balancer.ClientConnState) error { + ccs, err := mb1.ccsCh.Receive(ctx) + if err != nil { + return fmt.Errorf("error waiting for ClientConnUpdate: %v", err) + } + gotCCS := ccs.(balancer.ClientConnState) + if diff := cmp.Diff(gotCCS, wantCCS, cmpopts.IgnoreFields(resolver.State{}, "Attributes")); diff != "" { + return fmt.Errorf("error in ClientConnUpdate: received unexpected ClientConnState, diff (-got +want): %v", diff) + } + return nil +} + +// waitForSubConnUpdate verifies if the mockBalancer receives the provided +// SubConn update before the context expires. +func (mb1 *mockBalancer) waitForSubConnUpdate(ctx context.Context, wantSCS subConnWithState) error { + scs, err := mb1.scStateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("error waiting for SubConnUpdate: %v", err) + } + gotSCS := scs.(subConnWithState) + if !cmp.Equal(gotSCS, wantSCS, cmp.AllowUnexported(subConnWithState{}, testutils.TestSubConn{})) { + return fmt.Errorf("error in SubConnUpdate: received SubConnState: %+v, want %+v", gotSCS, wantSCS) + } + return nil +} + +// waitForResolverError verifies if the mockBalancer receives the provided +// resolver error before the context expires. +func (mb1 *mockBalancer) waitForResolverError(ctx context.Context, wantErr error) error { + gotErr, err := mb1.resolverErrCh.Receive(ctx) + if err != nil { + return fmt.Errorf("error waiting for resolver error: %v", err) + } + if gotErr != wantErr { + return fmt.Errorf("received resolver error: %v, want %v", gotErr, wantErr) + } + return nil +} + +// waitForClose verifies that the mockBalancer is closed before the context +// expires. +func (mb1 *mockBalancer) waitForClose(ctx context.Context) error { + if _, err := mb1.closeCh.Receive(ctx); err != nil { + return fmt.Errorf("error waiting for Close(): %v", err) + } + return nil +} + +func (mb1 *mockBalancer) updateState(state balancer.State) { + mb1.cc.UpdateState(state) +} + +func (mb1 *mockBalancer) newSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + return mb1.cc.NewSubConn(addrs, opts) +} + +func (mb1 *mockBalancer) updateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + mb1.cc.UpdateAddresses(sc, addrs) +} + +func (mb1 *mockBalancer) removeSubConn(sc balancer.SubConn) { + mb1.cc.RemoveSubConn(sc) +} + +type mockBalancerBuilder2 struct{} + +func (mockBalancerBuilder2) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &mockBalancer{ + ccsCh: testutils.NewChannel(), + scStateCh: testutils.NewChannel(), + resolverErrCh: testutils.NewChannel(), + closeCh: testutils.NewChannel(), + cc: cc, + } +} + +func (mockBalancerBuilder2) Name() string { + return balancerName2 +} + +type verifyBalancerBuilder struct{} + +func (verifyBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &verifyBalancer{ + closed: grpcsync.NewEvent(), + cc: cc, + } +} + +func (verifyBalancerBuilder) Name() string { + return verifyBalName +} + +// verifyBalancer is a balancer that verifies that after a Close() call, an +// updateSubConnState() call never happens. +type verifyBalancer struct { + closed *grpcsync.Event + // Hold onto the ClientConn wrapper to communicate with it. + cc balancer.ClientConn + // To fail the test if UpdateSubConnState gets called after Close(). + t *testing.T +} + +func (vb *verifyBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + return nil +} + +func (vb *verifyBalancer) ResolverError(err error) {} + +func (vb *verifyBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if vb.closed.HasFired() { + vb.t.Fatal("UpdateSubConnState was called after Close(), which breaks the balancer API") + } +} + +func (vb *verifyBalancer) Close() { + vb.closed.Fire() +} + +func (vb *verifyBalancer) newSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + return vb.cc.NewSubConn(addrs, opts) +} + +type buildCallbackBalancerBuilder struct{} + +func (buildCallbackBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + b := &buildCallbackBal{ + cc: cc, + closeCh: testutils.NewChannel(), + } + b.updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + }) + sc, err := b.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + return nil + } + b.updateAddresses(sc, []resolver.Address{}) + b.removeSubConn(sc) + return b +} + +func (buildCallbackBalancerBuilder) Name() string { + return buildCallbackBalName +} + +type buildCallbackBal struct { + // Hold onto the ClientConn wrapper to communicate with it. + cc balancer.ClientConn + // closeCh is a channel used to signal the closing of this balancer. + closeCh *testutils.Channel +} + +func (bcb *buildCallbackBal) UpdateClientConnState(ccs balancer.ClientConnState) error { + return nil +} + +func (bcb *buildCallbackBal) ResolverError(err error) {} + +func (bcb *buildCallbackBal) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {} + +func (bcb *buildCallbackBal) Close() { + bcb.closeCh.Send(struct{}{}) +} + +func (bcb *buildCallbackBal) updateState(state balancer.State) { + bcb.cc.UpdateState(state) +} + +func (bcb *buildCallbackBal) newSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + return bcb.cc.NewSubConn(addrs, opts) +} + +func (bcb *buildCallbackBal) updateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bcb.cc.UpdateAddresses(sc, addrs) +} + +func (bcb *buildCallbackBal) removeSubConn(sc balancer.SubConn) { + bcb.cc.RemoveSubConn(sc) +} + +// waitForClose verifies that the mockBalancer is closed before the context +// expires. +func (bcb *buildCallbackBal) waitForClose(ctx context.Context) error { + if _, err := bcb.closeCh.Receive(ctx); err != nil { + return err + } + return nil +} From cf6d4d5ab7af69753510e9ae717025aa31deb359 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 9 Mar 2022 07:51:20 -0800 Subject: [PATCH 443/998] grpc: document ClientConn fields (#5227) --- clientconn.go | 55 +++++++++++++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/clientconn.go b/clientconn.go index 10f67edecdee..c084a9013e42 100644 --- a/clientconn.go +++ b/clientconn.go @@ -461,34 +461,41 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + balancerBuildOpts balancer.BuildOptions // TODO: delete once we move to the gracefulswitch balancer. + channelzID *channelz.Identifier // Channelz identifier for the channel. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event + // TODO: Add a goodResolveEvent to track whether the name resolver sent us a + // good update. This will be used to determine if a balancer is configured on + // the channel instead of checking for `cc.balancerWrapper != nil`. - channelzID *channelz.Identifier - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + curBalancerName string // TODO: delete as part of https://github.com/grpc/grpc-go/issues/5229. + balancerWrapper *ccBalancerWrapper // TODO: Use gracefulswitch balancer to be able to initialize this once and never rewrite. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error From eead9a824c37eadae1d91de892ede9797084d78e Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 10 Mar 2022 13:15:11 -0800 Subject: [PATCH 444/998] grpc: delete deprecated API WithBalancerName() (#5232) --- balancer_switching_test.go | 43 +--- clientconn.go | 51 ++-- clientconn_state_transition_test.go | 99 ++++--- clientconn_test.go | 62 ++--- dialoptions.go | 43 +--- test/balancer_test.go | 14 +- test/end2end_test.go | 2 +- test/healthcheck_test.go | 386 ++++++++++------------------ 8 files changed, 254 insertions(+), 446 deletions(-) diff --git a/balancer_switching_test.go b/balancer_switching_test.go index 5d9a1f9fffc1..3c12dd2e4eec 100644 --- a/balancer_switching_test.go +++ b/balancer_switching_test.go @@ -26,7 +26,7 @@ import ( "time" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/resolver" @@ -161,7 +161,7 @@ func (s) TestSwitchBalancer(t *testing.T) { servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -184,38 +184,11 @@ func (s) TestSwitchBalancer(t *testing.T) { } } -// Test that balancer specified by dial option will not be overridden. -func (s) TestBalancerDialOption(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - const numServers = 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{}), WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - addrs := []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}} - r.UpdateState(resolver.State{Addresses: addrs}) - // The init balancer is roundrobin. - if err := checkRoundRobin(cc, servers); err != nil { - t.Fatalf("check roundrobin returned non-nil error: %v", err) - } - // Switch to pickfirst. - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "pick_first"}`), Addresses: addrs}, nil) - // Balancer is still roundrobin. - if err := checkRoundRobin(cc, servers); err != nil { - t.Fatalf("check roundrobin returned non-nil error: %v", err) - } -} - // First addr update contains grpclb. func (s) TestSwitchBalancerGRPCLBFirst(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -275,7 +248,7 @@ func (s) TestSwitchBalancerGRPCLBFirst(t *testing.T) { func (s) TestSwitchBalancerGRPCLBSecond(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -351,7 +324,7 @@ func (s) TestSwitchBalancerGRPCLBSecond(t *testing.T) { func (s) TestSwitchBalancerGRPCLBRoundRobin(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -413,7 +386,7 @@ func (s) TestSwitchBalancerGRPCLBRoundRobin(t *testing.T) { func (s) TestSwitchBalancerGRPCLBServiceConfig(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -503,7 +476,7 @@ func (s) TestSwitchBalancerGRPCLBWithGRPCLBNotRegistered(t *testing.T) { servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -550,7 +523,7 @@ func init() { // This test is to make sure this close doesn't cause a deadlock. func (s) TestSwitchBalancerOldRemoveSubConn(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } diff --git a/clientconn.go b/clientconn.go index c084a9013e42..dd12a14f09a4 100644 --- a/clientconn.go +++ b/clientconn.go @@ -674,7 +674,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } @@ -714,10 +714,6 @@ func (cc *ClientConn) switchBalancer(name string) { } channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } if cc.balancerWrapper != nil { // Don't hold cc.mu while closing the balancers. The balancers may call // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex @@ -999,35 +995,28 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.switchBalancer(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { diff --git a/clientconn_state_transition_test.go b/clientconn_state_transition_test.go index 2090c8de689b..0944e8434d79 100644 --- a/clientconn_state_transition_test.go +++ b/clientconn_state_transition_test.go @@ -20,6 +20,7 @@ package grpc import ( "context" + "fmt" "net" "sync" "testing" @@ -28,6 +29,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" @@ -141,9 +143,6 @@ client enters TRANSIENT FAILURE.`, } func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, server func(net.Listener) net.Conn) { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - pl := testutils.NewPipeListener() defer pl.Close() @@ -156,10 +155,9 @@ func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, s connMu.Unlock() }() - client, err := DialContext(ctx, - "", - WithInsecure(), - WithBalancerName(stateRecordingBalancerName), + client, err := Dial("", + WithTransportCredentials(insecure.NewCredentials()), + WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), WithDialer(pl.Dialer()), withBackoff(noBackoff{}), withMinConnectDeadline(func() time.Duration { return time.Millisecond * 100 })) @@ -170,12 +168,9 @@ func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, s go stayConnected(client) stateNotifications := testBalancerBuilder.nextStateNotifier() - - timeout := time.After(5 * time.Second) - for i := 0; i < len(want); i++ { select { - case <-timeout: + case <-time.After(defaultTestTimeout): t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen != want[i] { @@ -196,16 +191,6 @@ func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, s // When a READY connection is closed, the client enters IDLE then CONNECTING. func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { - want := []connectivity.State{ - connectivity.Connecting, - connectivity.Ready, - connectivity.Idle, - connectivity.Connecting, - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) @@ -237,7 +222,9 @@ func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { conn.Close() }() - client, err := DialContext(ctx, lis.Addr().String(), WithInsecure(), WithBalancerName(stateRecordingBalancerName)) + client, err := Dial(lis.Addr().String(), + WithTransportCredentials(insecure.NewCredentials()), + WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName))) if err != nil { t.Fatal(err) } @@ -246,11 +233,15 @@ func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { stateNotifications := testBalancerBuilder.nextStateNotifier() - timeout := time.After(5 * time.Second) - + want := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Idle, + connectivity.Connecting, + } for i := 0; i < len(want); i++ { select { - case <-timeout: + case <-time.After(defaultTestTimeout): t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen == connectivity.Ready { @@ -266,14 +257,6 @@ func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { // When the first connection is closed, the client stays in CONNECTING until it // tries the second address (which succeeds, and then it enters READY). func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) { - want := []connectivity.State{ - connectivity.Connecting, - connectivity.Ready, - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) @@ -324,19 +307,25 @@ func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) - client, err := DialContext(ctx, "whatever:///this-gets-overwritten", WithInsecure(), WithBalancerName(stateRecordingBalancerName), WithResolvers(rb)) + client, err := Dial("whatever:///this-gets-overwritten", + WithTransportCredentials(insecure.NewCredentials()), + WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), + WithResolvers(rb)) if err != nil { t.Fatal(err) } defer client.Close() stateNotifications := testBalancerBuilder.nextStateNotifier() - - timeout := time.After(5 * time.Second) - + want := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() for i := 0; i < len(want); i++ { select { - case <-timeout: + case <-ctx.Done(): t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen != want[i] { @@ -345,12 +334,12 @@ func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) } } select { - case <-timeout: + case <-ctx.Done(): t.Fatal("saw the correct state transitions, but timed out waiting for client to finish interactions with server 1") case <-server1Done: } select { - case <-timeout: + case <-ctx.Done(): t.Fatal("saw the correct state transitions, but timed out waiting for client to finish interactions with server 2") case <-server2Done: } @@ -359,16 +348,6 @@ func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) // When there are multiple addresses, and we enter READY on one of them, a // later closure should cause the client to enter CONNECTING func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { - want := []connectivity.State{ - connectivity.Connecting, - connectivity.Ready, - connectivity.Idle, - connectivity.Connecting, - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) @@ -414,7 +393,10 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) - client, err := DialContext(ctx, "whatever:///this-gets-overwritten", WithInsecure(), WithBalancerName(stateRecordingBalancerName), WithResolvers(rb)) + client, err := Dial("whatever:///this-gets-overwritten", + WithTransportCredentials(insecure.NewCredentials()), + WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), + WithResolvers(rb)) if err != nil { t.Fatal(err) } @@ -422,12 +404,17 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { go stayConnected(client) stateNotifications := testBalancerBuilder.nextStateNotifier() - - timeout := time.After(2 * time.Second) - + want := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Idle, + connectivity.Connecting, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() for i := 0; i < len(want); i++ { select { - case <-timeout: + case <-ctx.Done(): t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen == connectivity.Ready { @@ -439,7 +426,7 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { } } select { - case <-timeout: + case <-ctx.Done(): t.Fatal("saw the correct state transitions, but timed out waiting for client to finish interactions with server 1") case <-server1Done: } diff --git a/clientconn_test.go b/clientconn_test.go index ee39370a87fb..80547f51037a 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" @@ -69,7 +70,7 @@ func (s) TestDialWithTimeout(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{lisAddr}}) - client, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithTimeout(5*time.Second)) + client, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithTimeout(5*time.Second)) close(dialDone) if err != nil { t.Fatalf("Dial failed. Err: %v", err) @@ -121,7 +122,7 @@ func (s) TestDialWithMultipleBackendsNotSendingServerPreface(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{lis1Addr, lis2Addr}}) - client, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + client, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("Dial failed. Err: %v", err) } @@ -171,7 +172,7 @@ func (s) TestDialWaitsForServerSettings(t *testing.T) { }() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - client, err := DialContext(ctx, lis.Addr().String(), WithInsecure(), WithBlock()) + client, err := DialContext(ctx, lis.Addr().String(), WithTransportCredentials(insecure.NewCredentials()), WithBlock()) close(dialDone) if err != nil { t.Fatalf("Error while dialing. Err: %v", err) @@ -209,7 +210,7 @@ func (s) TestDialWaitsForServerSettingsAndFails(t *testing.T) { defer cancel() client, err := DialContext(ctx, lis.Addr().String(), - WithInsecure(), + WithTransportCredentials(insecure.NewCredentials()), WithReturnConnectionError(), withBackoff(noBackoff{}), withMinConnectDeadline(func() time.Duration { return time.Second / 4 })) @@ -286,7 +287,7 @@ func (s) TestCloseConnectionWhenServerPrefaceNotReceived(t *testing.T) { break } }() - client, err := Dial(lis.Addr().String(), WithInsecure(), withMinConnectDeadline(func() time.Duration { return time.Millisecond * 500 })) + client, err := Dial(lis.Addr().String(), WithTransportCredentials(insecure.NewCredentials()), withMinConnectDeadline(func() time.Duration { return time.Millisecond * 500 })) if err != nil { t.Fatalf("Error while dialing. Err: %v", err) } @@ -342,7 +343,7 @@ func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { prevAt = meow } }() - cc, err := Dial(lis.Addr().String(), WithInsecure()) + cc, err := Dial(lis.Addr().String(), WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Error while dialing. Err: %v", err) } @@ -352,7 +353,10 @@ func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { } func (s) TestWithTimeout(t *testing.T) { - conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure()) + conn, err := Dial("passthrough:///Non-Existent.Server:80", + WithTimeout(time.Millisecond), + WithBlock(), + WithTransportCredentials(insecure.NewCredentials())) if err == nil { conn.Close() } @@ -439,8 +443,8 @@ func (s) TestDial_OneBackoffPerRetryGroup(t *testing.T) { {Addr: lis2.Addr().String()}, }}) client, err := DialContext(ctx, "whatever:///this-gets-overwritten", - WithInsecure(), - WithBalancerName(stateRecordingBalancerName), + WithTransportCredentials(insecure.NewCredentials()), + WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), WithResolvers(rb), withMinConnectDeadline(getMinConnectTimeout)) if err != nil { @@ -466,7 +470,7 @@ func (s) TestDial_OneBackoffPerRetryGroup(t *testing.T) { func (s) TestDialContextCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - if _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure()); err != context.Canceled { + if _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithTransportCredentials(insecure.NewCredentials())); err != context.Canceled { t.Fatalf("DialContext(%v, _) = _, %v, want _, %v", ctx, err, context.Canceled) } } @@ -484,7 +488,7 @@ func (s) TestDialContextFailFast(t *testing.T) { return nil, failErr } - _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure(), WithDialer(dialer), FailOnNonTempDialError(true)) + _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithTransportCredentials(insecure.NewCredentials()), WithDialer(dialer), FailOnNonTempDialError(true)) if terr, ok := err.(transport.ConnectionError); !ok || terr.Origin() != failErr { t.Fatalf("DialContext() = _, %v, want _, %v", err, failErr) } @@ -529,7 +533,7 @@ func (s) TestCredentialsMisuse(t *testing.T) { // Use of perRPC creds requiring transport security over an insecure // transport must fail. - if _, err := Dial("passthrough:///Non-Existent.Server:80", WithPerRPCCredentials(securePerRPCCredentials{}), WithInsecure()); err != errTransportCredentialsMissing { + if _, err := Dial("passthrough:///Non-Existent.Server:80", WithPerRPCCredentials(securePerRPCCredentials{}), WithTransportCredentials(insecure.NewCredentials())); err != errTransportCredentialsMissing { t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredentialsMissing) } @@ -573,7 +577,7 @@ func (s) TestWithConnectParams(t *testing.T) { } func testBackoffConfigSet(t *testing.T, wantBackoff internalbackoff.Exponential, opts ...DialOption) { - opts = append(opts, WithInsecure()) + opts = append(opts, WithTransportCredentials(insecure.NewCredentials())) conn, err := Dial("passthrough:///foo:80", opts...) if err != nil { t.Fatalf("unexpected error dialing connection: %v", err) @@ -597,7 +601,7 @@ func testBackoffConfigSet(t *testing.T, wantBackoff internalbackoff.Exponential, func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) { // Default value specified for minConnectTimeout in the spec is 20 seconds. mct := 1 * time.Minute - conn, err := Dial("passthrough:///foo:80", WithInsecure(), WithConnectParams(ConnectParams{MinConnectTimeout: mct})) + conn, err := Dial("passthrough:///foo:80", WithTransportCredentials(insecure.NewCredentials()), WithConnectParams(ConnectParams{MinConnectTimeout: mct})) if err != nil { t.Fatalf("unexpected error dialing connection: %v", err) } @@ -611,7 +615,7 @@ func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) { func (s) TestResolverServiceConfigBeforeAddressNotPanic(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -628,7 +632,7 @@ func (s) TestResolverServiceConfigWhileClosingNotPanic(t *testing.T) { for i := 0; i < 10; i++ { // Run this multiple times to make sure it doesn't panic. r := manual.NewBuilderWithScheme(fmt.Sprintf("whatever-%d", i)) - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -641,7 +645,7 @@ func (s) TestResolverServiceConfigWhileClosingNotPanic(t *testing.T) { func (s) TestResolverEmptyUpdateNotPanic(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -691,7 +695,7 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { addr := lis.Addr().String() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - cc, err := DialContext(ctx, addr, WithBlock(), WithInsecure(), WithKeepaliveParams(keepalive.ClientParameters{ + cc, err := DialContext(ctx, addr, WithBlock(), WithTransportCredentials(insecure.NewCredentials()), WithKeepaliveParams(keepalive.ClientParameters{ Time: 10 * time.Second, Timeout: 100 * time.Millisecond, PermitWithoutStream: true, @@ -720,7 +724,7 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { func (s) TestDisableServiceConfigOption(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") addr := r.Scheme() + ":///non.existent" - cc, err := Dial(addr, WithInsecure(), WithResolvers(r), WithDisableServiceConfig()) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithDisableServiceConfig()) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -747,7 +751,7 @@ func (s) TestDisableServiceConfigOption(t *testing.T) { func (s) TestMethodConfigDefaultService(t *testing.T) { addr := "nonexist:///non.existent" - cc, err := Dial(addr, WithInsecure(), WithDefaultServiceConfig(`{ + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithDefaultServiceConfig(`{ "methodConfig": [{ "name": [ { @@ -770,7 +774,7 @@ func (s) TestMethodConfigDefaultService(t *testing.T) { func (s) TestGetClientConnTarget(t *testing.T) { addr := "nonexist:///non.existent" - cc, err := Dial(addr, WithInsecure()) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -796,7 +800,7 @@ func (s) TestResetConnectBackoff(t *testing.T) { dials <- struct{}{} return nil, errors.New("failed to fake dial") } - cc, err := Dial("any", WithInsecure(), WithDialer(dialer), withBackoff(backoffForever{})) + cc, err := Dial("any", WithTransportCredentials(insecure.NewCredentials()), WithDialer(dialer), withBackoff(backoffForever{})) if err != nil { t.Fatalf("Dial() = _, %v; want _, nil", err) } @@ -825,7 +829,7 @@ func (s) TestResetConnectBackoff(t *testing.T) { func (s) TestBackoffCancel(t *testing.T) { dialStrCh := make(chan string) - cc, err := Dial("any", WithInsecure(), WithDialer(func(t string, _ time.Duration) (net.Conn, error) { + cc, err := Dial("any", WithTransportCredentials(insecure.NewCredentials()), WithDialer(func(t string, _ time.Duration) (net.Conn, error) { dialStrCh <- t return nil, fmt.Errorf("test dialer, always error") })) @@ -956,10 +960,10 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { rb.InitialState(resolver.State{Addresses: addrsList}) client, err := Dial("whatever:///this-gets-overwritten", - WithInsecure(), + WithTransportCredentials(insecure.NewCredentials()), WithResolvers(rb), withBackoff(noBackoff{}), - WithBalancerName(stateRecordingBalancerName), + WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), withMinConnectDeadline(func() time.Duration { return time.Hour })) if err != nil { t.Fatal(err) @@ -1044,14 +1048,14 @@ func verifyWaitForReadyEqualsTrue(cc *ClientConn) bool { } func testInvalidDefaultServiceConfig(t *testing.T) { - _, err := Dial("fake.com", WithInsecure(), WithDefaultServiceConfig("")) + _, err := Dial("fake.com", WithTransportCredentials(insecure.NewCredentials()), WithDefaultServiceConfig("")) if !strings.Contains(err.Error(), invalidDefaultServiceConfigErrPrefix) { t.Fatalf("Dial got err: %v, want err contains: %v", err, invalidDefaultServiceConfigErrPrefix) } } func testDefaultServiceConfigWhenResolverServiceConfigDisabled(t *testing.T, r *manual.Resolver, addr string, js string) { - cc, err := Dial(addr, WithInsecure(), WithDisableServiceConfig(), WithResolvers(r), WithDefaultServiceConfig(js)) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithDisableServiceConfig(), WithResolvers(r), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -1067,7 +1071,7 @@ func testDefaultServiceConfigWhenResolverServiceConfigDisabled(t *testing.T, r * } func testDefaultServiceConfigWhenResolverDoesNotReturnServiceConfig(t *testing.T, r *manual.Resolver, addr string, js string) { - cc, err := Dial(addr, WithInsecure(), WithResolvers(r), WithDefaultServiceConfig(js)) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -1081,7 +1085,7 @@ func testDefaultServiceConfigWhenResolverDoesNotReturnServiceConfig(t *testing.T } func testDefaultServiceConfigWhenResolverReturnInvalidServiceConfig(t *testing.T, r *manual.Resolver, addr string, js string) { - cc, err := Dial(addr, WithInsecure(), WithResolvers(r), WithDefaultServiceConfig(js)) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } diff --git a/dialoptions.go b/dialoptions.go index bdfc200e3bb2..e7ac15ce4c7b 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -20,12 +20,10 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" @@ -46,18 +44,16 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool @@ -196,25 +192,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // diff --git a/test/balancer_test.go b/test/balancer_test.go index 8fe1db32658b..113fbaceafbc 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -153,7 +153,7 @@ func (s) TestCredsBundleFromBalancer(t *testing.T) { te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: ""}) te.tapHandle = authHandle te.customDialOptions = []grpc.DialOption{ - grpc.WithBalancerName(testBalancerName), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, testBalancerName)), } creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) if err != nil { @@ -188,7 +188,7 @@ func testPickExtraMetadata(t *testing.T, e env) { ) te.customDialOptions = []grpc.DialOption{ - grpc.WithBalancerName(testBalancerName), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, testBalancerName)), grpc.WithUserAgent(testUserAgent), } te.startServer(&testServer{security: e.security}) @@ -236,7 +236,7 @@ func testDoneInfo(t *testing.T, e env) { b := &testBalancer{} balancer.Register(b) te.customDialOptions = []grpc.DialOption{ - grpc.WithBalancerName(testBalancerName), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, testBalancerName)), } te.userAgent = failAppUA te.startServer(&testServer{security: e.security}) @@ -315,7 +315,7 @@ func testDoneLoads(t *testing.T) { return &testpb.Empty{}, nil }, } - if err := ss.Start(nil, grpc.WithBalancerName(testBalancerName)); err != nil { + if err := ss.Start(nil, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, testBalancerName))); err != nil { t.Fatalf("error starting testing server: %v", err) } defer ss.Stop() @@ -393,8 +393,10 @@ func (s) TestNonGRPCLBBalancerGetsNoGRPCLBAddress(t *testing.T) { b := newTestBalancerKeepAddresses() balancer.Register(b) - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r), - grpc.WithBalancerName(b.Name())) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, b.Name()))) if err != nil { t.Fatalf("failed to dial: %v", err) } diff --git a/test/end2end_test.go b/test/end2end_test.go index cdf0434a1217..f5ca011a39cc 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -812,7 +812,7 @@ func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) scheme = te.resolverScheme + ":///" } if te.e.balancer != "" { - opts = append(opts, grpc.WithBalancerName(te.e.balancer)) + opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, te.e.balancer))) } if te.clientInitialWindowSize > 0 { opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize)) diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index 247ffea7c3c1..abff2f56c438 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -49,7 +49,7 @@ func newTestHealthServer() *testHealthServer { return newTestHealthServerWithWatchFunc(defaultWatchFunc) } -func newTestHealthServerWithWatchFunc(f func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error) *testHealthServer { +func newTestHealthServerWithWatchFunc(f healthWatchFunc) *testHealthServer { return &testHealthServer{ watchFunc: f, update: make(chan struct{}, 1), @@ -83,9 +83,11 @@ func defaultWatchFunc(s *testHealthServer, in *healthpb.HealthCheckRequest, stre return nil } +type healthWatchFunc func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error + type testHealthServer struct { healthpb.UnimplementedHealthServer - watchFunc func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error + watchFunc healthWatchFunc mu sync.Mutex status map[string]healthpb.HealthCheckResponse_ServingStatus update chan struct{} @@ -125,25 +127,26 @@ func setupHealthCheckWrapper() (hcEnterChan chan struct{}, hcExitChan chan struc return } -type svrConfig struct { - specialWatchFunc func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error -} +func setupServer(t *testing.T, watchFunc healthWatchFunc) (*grpc.Server, net.Listener, *testHealthServer) { + t.Helper() -func setupServer(sc *svrConfig) (s *grpc.Server, lis net.Listener, ts *testHealthServer, deferFunc func(), err error) { - s = grpc.NewServer() - lis, err = net.Listen("tcp", "localhost:0") + lis, err := net.Listen("tcp", "localhost:0") if err != nil { - return nil, nil, nil, func() {}, fmt.Errorf("failed to listen due to err %v", err) + t.Fatalf("net.Listen() failed: %v", err) } - if sc.specialWatchFunc != nil { - ts = newTestHealthServerWithWatchFunc(sc.specialWatchFunc) + + var ts *testHealthServer + if watchFunc != nil { + ts = newTestHealthServerWithWatchFunc(watchFunc) } else { ts = newTestHealthServer() } + s := grpc.NewServer() healthgrpc.RegisterHealthServer(s, ts) testpb.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) - return s, lis, ts, s.Stop, nil + t.Cleanup(func() { s.Stop() }) + return s, lis, ts } type clientConfig struct { @@ -152,28 +155,34 @@ type clientConfig struct { extraDialOption []grpc.DialOption } -func setupClient(c *clientConfig) (cc *grpc.ClientConn, r *manual.Resolver, deferFunc func(), err error) { - r = manual.NewBuilderWithScheme("whatever") - var opts []grpc.DialOption - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r), grpc.WithBalancerName(c.balancerName)) - if c.testHealthCheckFuncWrapper != nil { - opts = append(opts, internal.WithHealthCheckFunc.(func(internal.HealthChecker) grpc.DialOption)(c.testHealthCheckFuncWrapper)) +func setupClient(t *testing.T, c *clientConfig) (*grpc.ClientConn, *manual.Resolver) { + t.Helper() + + r := manual.NewBuilderWithScheme("whatever") + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + } + if c != nil { + if c.balancerName != "" { + opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, c.balancerName))) + } + if c.testHealthCheckFuncWrapper != nil { + opts = append(opts, internal.WithHealthCheckFunc.(func(internal.HealthChecker) grpc.DialOption)(c.testHealthCheckFuncWrapper)) + } + opts = append(opts, c.extraDialOption...) } - opts = append(opts, c.extraDialOption...) - cc, err = grpc.Dial(r.Scheme()+":///test.server", opts...) - if err != nil { - return nil, nil, nil, fmt.Errorf("dial failed due to err: %v", err) + cc, err := grpc.Dial(r.Scheme()+":///test.server", opts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) } - return cc, r, func() { cc.Close() }, nil + t.Cleanup(func() { cc.Close() }) + return cc, r } func (s) TestHealthCheckWatchStateChange(t *testing.T) { - _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } + _, lis, ts := setupServer(t, nil) // The table below shows the expected series of addrConn connectivity transitions when server // updates its health status. As there's only one addrConn corresponds with the ClientConn in this @@ -189,20 +198,17 @@ func (s) TestHealthCheckWatchStateChange(t *testing.T) { //+------------------------------+-------------------------------------------+ ts.SetServingStatus("foo", healthpb.HealthCheckResponse_NOT_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - + cc, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if ok := cc.WaitForStateChange(ctx, connectivity.Idle); !ok { t.Fatal("ClientConn is still in IDLE state when the context times out.") @@ -258,22 +264,18 @@ func (s) TestHealthCheckHealthServerNotRegistered(t *testing.T) { go s.Serve(lis) defer s.Stop() - cc, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - + cc, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if ok := cc.WaitForStateChange(ctx, connectivity.Idle); !ok { t.Fatal("ClientConn is still in IDLE state when the context times out.") } @@ -288,37 +290,23 @@ func (s) TestHealthCheckHealthServerNotRegistered(t *testing.T) { // In the case of a goaway received, the health check stream should be terminated and health check // function should exit. func (s) TestHealthCheckWithGoAway(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - s, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - + s, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - // make some rpcs to make sure connection is working. if err := verifyResultWithDelay(func() (bool, error) { if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -380,33 +368,19 @@ func (s) TestHealthCheckWithGoAway(t *testing.T) { } func (s) TestHealthCheckWithConnClose(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - s, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - + s, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) tc := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -444,30 +418,17 @@ func (s) TestHealthCheckWithConnClose(t *testing.T) { // addrConn drain happens when addrConn gets torn down due to its address being no longer in the // address list returned by the resolver. func (s) TestHealthCheckWithAddrConnDrain(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - + _, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) tc := testpb.NewTestServiceClient(cc) sc := parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, @@ -537,32 +498,19 @@ func (s) TestHealthCheckWithAddrConnDrain(t *testing.T) { // ClientConn close will lead to its addrConns being torn down. func (s) TestHealthCheckWithClientConnClose(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - + _, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -602,39 +550,25 @@ func (s) TestHealthCheckWithClientConnClose(t *testing.T) { // closes the skipReset channel(since it has not been closed inside health check func) to unblock // onGoAway/onClose goroutine. func (s) TestHealthCheckWithoutSetConnectivityStateCalledAddrConnShutDown(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - _, lis, ts, deferFunc, err := setupServer(&svrConfig{ - specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - if in.Service != "delay" { - return status.Error(codes.FailedPrecondition, - "this special Watch function only handles request with service name to be \"delay\"") - } - // Do nothing to mock a delay of health check response from server side. - // This case is to help with the test that covers the condition that setConnectivityState is not - // called inside HealthCheckFunc before the func returns. - select { - case <-stream.Context().Done(): - case <-time.After(5 * time.Second): - } - return nil - }, - }) - defer deferFunc() - if err != nil { - t.Fatal(err) + watchFunc := func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + if in.Service != "delay" { + return status.Error(codes.FailedPrecondition, + "this special Watch function only handles request with service name to be \"delay\"") + } + // Do nothing to mock a delay of health check response from server side. + // This case is to help with the test that covers the condition that setConnectivityState is not + // called inside HealthCheckFunc before the func returns. + select { + case <-stream.Context().Done(): + case <-time.After(5 * time.Second): + } + return nil } - + _, lis, ts := setupServer(t, watchFunc) ts.SetServingStatus("delay", healthpb.HealthCheckResponse_SERVING) - _, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + _, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) // The serviceName "delay" is specially handled at server side, where response will not be sent // back to client immediately upon receiving the request (client should receive no response until @@ -642,7 +576,8 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalledAddrConnShutDown(t *tes sc := parseCfg(r, `{ "healthCheckConfig": { "serviceName": "delay" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, @@ -678,39 +613,25 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalledAddrConnShutDown(t *tes // closes the allowedToReset channel(since it has not been closed inside health check func) to unblock // onGoAway/onClose goroutine. func (s) TestHealthCheckWithoutSetConnectivityStateCalled(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - s, lis, ts, deferFunc, err := setupServer(&svrConfig{ - specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - if in.Service != "delay" { - return status.Error(codes.FailedPrecondition, - "this special Watch function only handles request with service name to be \"delay\"") - } - // Do nothing to mock a delay of health check response from server side. - // This case is to help with the test that covers the condition that setConnectivityState is not - // called inside HealthCheckFunc before the func returns. - select { - case <-stream.Context().Done(): - case <-time.After(5 * time.Second): - } - return nil - }, - }) - defer deferFunc() - if err != nil { - t.Fatal(err) + watchFunc := func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + if in.Service != "delay" { + return status.Error(codes.FailedPrecondition, + "this special Watch function only handles request with service name to be \"delay\"") + } + // Do nothing to mock a delay of health check response from server side. + // This case is to help with the test that covers the condition that setConnectivityState is not + // called inside HealthCheckFunc before the func returns. + select { + case <-stream.Context().Done(): + case <-time.After(5 * time.Second): + } + return nil } - + s, lis, ts := setupServer(t, watchFunc) ts.SetServingStatus("delay", healthpb.HealthCheckResponse_SERVING) - _, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + _, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) // The serviceName "delay" is specially handled at server side, where response will not be sent // back to client immediately upon receiving the request (client should receive no response until @@ -720,7 +641,8 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalled(t *testing.T) { ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "delay" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) select { @@ -750,25 +672,18 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalled(t *testing.T) { func testHealthCheckDisableWithDialOption(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", + cc, r := setupClient(t, &clientConfig{ testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, extraDialOption: []grpc.DialOption{grpc.WithDisableHealthCheck()}, }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - tc := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -792,24 +707,17 @@ func testHealthCheckDisableWithDialOption(t *testing.T, addr string) { func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "pick_first", + cc, r := setupClient(t, &clientConfig{ testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - tc := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"pick_first":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -833,18 +741,8 @@ func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { func testHealthCheckDisableWithServiceConfig(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) tc := testpb.NewTestServiceClient(cc) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: addr}}}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -867,11 +765,7 @@ func testHealthCheckDisableWithServiceConfig(t *testing.T, addr string) { } func (s) TestHealthCheckDisable(t *testing.T) { - _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } + _, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) // test client side disabling configuration. @@ -881,32 +775,23 @@ func (s) TestHealthCheckDisable(t *testing.T) { } func (s) TestHealthCheckChannelzCountingCallSuccess(t *testing.T) { - _, lis, _, deferFunc, err := setupServer(&svrConfig{ - specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - if in.Service != "channelzSuccess" { - return status.Error(codes.FailedPrecondition, - "this special Watch function only handles request with service name to be \"channelzSuccess\"") - } - return status.Error(codes.OK, "fake success") - }, - }) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - - _, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) - if err != nil { - t.Fatal(err) + watchFunc := func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + if in.Service != "channelzSuccess" { + return status.Error(codes.FailedPrecondition, + "this special Watch function only handles request with service name to be \"channelzSuccess\"") + } + return status.Error(codes.OK, "fake success") } - defer deferFunc() + _, lis, _ := setupServer(t, watchFunc) + _, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "channelzSuccess" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) if err := verifyResultWithDelay(func() (bool, error) { @@ -937,32 +822,23 @@ func (s) TestHealthCheckChannelzCountingCallSuccess(t *testing.T) { } func (s) TestHealthCheckChannelzCountingCallFailure(t *testing.T) { - _, lis, _, deferFunc, err := setupServer(&svrConfig{ - specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - if in.Service != "channelzFailure" { - return status.Error(codes.FailedPrecondition, - "this special Watch function only handles request with service name to be \"channelzFailure\"") - } - return status.Error(codes.Internal, "fake failure") - }, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - - _, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) - if err != nil { - t.Fatal(err) + watchFunc := func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + if in.Service != "channelzFailure" { + return status.Error(codes.FailedPrecondition, + "this special Watch function only handles request with service name to be \"channelzFailure\"") + } + return status.Error(codes.Internal, "fake failure") } - defer deferFunc() + _, lis, _ := setupServer(t, watchFunc) + _, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "channelzFailure" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) if err := verifyResultWithDelay(func() (bool, error) { From 722367c4a73781eb511fae0ff6386d3f42aca7ba Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 10 Mar 2022 16:29:55 -0800 Subject: [PATCH 445/998] proto: regenerate proto to pick up latest (#5237) --- .../grpc_service_config/service_config.pb.go | 983 +++++++++--------- 1 file changed, 500 insertions(+), 483 deletions(-) diff --git a/internal/proto/grpc_service_config/service_config.pb.go b/internal/proto/grpc_service_config/service_config.pb.go index 30e16d113285..6a18ea793a7e 100644 --- a/internal/proto/grpc_service_config/service_config.pb.go +++ b/internal/proto/grpc_service_config/service_config.pb.go @@ -546,6 +546,9 @@ type GrpcLbConfig struct { // Optional. If specified, overrides the name of the service to be sent to // the balancer. ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // Optional. The timeout in seconds for receiving the server list from the LB + // server. Defaults to 10s. + InitialFallbackTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=initial_fallback_timeout,json=initialFallbackTimeout,proto3" json:"initial_fallback_timeout,omitempty"` } func (x *GrpcLbConfig) Reset() { @@ -594,6 +597,13 @@ func (x *GrpcLbConfig) GetServiceName() string { return "" } +func (x *GrpcLbConfig) GetInitialFallbackTimeout() *durationpb.Duration { + if x != nil { + return x.InitialFallbackTimeout + } + return nil +} + // Configuration for priority LB policy. type PriorityLoadBalancingPolicyConfig struct { state protoimpl.MessageState @@ -2951,436 +2961,442 @@ var file_grpc_service_config_service_config_proto_rawDesc = []byte{ 0x65, 0x73, 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x22, 0x7e, 0x0a, - 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, - 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x03, - 0x0a, 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x22, 0xd3, 0x01, + 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, + 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, + 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x53, + 0x0a, 0x18, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, + 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x22, 0xae, 0x03, 0x0a, 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x08, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, - 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, - 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, - 0x40, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x72, 0x65, - 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, - 0x65, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, - 0x69, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, - 0x02, 0x0a, 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x07, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, + 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x05, + 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, 0x40, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, + 0x65, 0x5f, 0x72, 0x65, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, + 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, - 0x6d, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x7f, - 0x0a, 0x0c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x59, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xf2, 0x02, 0x0a, 0x2a, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, - 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, 0x54, 0x0a, 0x05, 0x43, 0x68, 0x69, - 0x6c, 0x64, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, - 0x82, 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x5b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x09, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0xa1, 0x02, 0x0a, 0x09, - 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x12, 0x57, 0x0a, 0x0d, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, - 0x64, 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x1a, 0x59, 0x0a, 0x12, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2f, - 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, - 0x9d, 0x07, 0x0a, 0x2b, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x86, 0x01, 0x0a, 0x14, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x53, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, - 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x02, 0x0a, 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, + 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, - 0x69, 0x73, 0x6d, 0x52, 0x13, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x4c, 0x0a, 0x0d, 0x78, 0x64, 0x73, 0x5f, - 0x6c, 0x62, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x78, 0x64, 0x73, 0x4c, 0x62, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x96, 0x05, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x63, 0x6f, - 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x18, 0x0a, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, - 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, - 0x19, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x52, 0x16, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, - 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x6c, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x58, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, - 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, - 0x6d, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, - 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6e, 0x73, 0x5f, 0x68, 0x6f, - 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6e, - 0x73, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x65, 0x0a, 0x11, 0x6f, 0x75, 0x74, - 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, - 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, - 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x2d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x01, 0x12, 0x0f, - 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x22, - 0xa3, 0x05, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, - 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, 0x19, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, - 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x16, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, - 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x72, 0x0a, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x63, - 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x72, - 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, - 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, - 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x5c, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x43, - 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, - 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, - 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, - 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x69, - 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x03, 0x0a, 0x1c, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, - 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x17, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x60, - 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, - 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x65, 0x0a, 0x1b, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x22, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x52, 0x69, 0x6e, 0x67, 0x53, - 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, - 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa6, 0x03, 0x0a, 0x1c, 0x4c, 0x72, 0x73, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, - 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x6c, - 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x08, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, - 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x50, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, - 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, - 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, - 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, - 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x12, 0x63, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0x6d, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, - 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, - 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0x86, 0x0c, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, - 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, - 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, - 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, - 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x68, - 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, - 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, - 0x6c, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, - 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, - 0x72, 0x70, 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, - 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, - 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, - 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x8d, 0x01, 0x0a, 0x20, 0x78, - 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, - 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x7f, 0x0a, 0x0c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x59, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf2, 0x02, 0x0a, 0x2a, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, + 0x54, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x82, 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, + 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x5b, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, + 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x09, 0x43, 0x64, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x22, 0xa1, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, + 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x12, + 0x57, 0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x59, 0x0a, 0x12, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9d, 0x07, 0x0a, 0x2b, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x86, 0x01, 0x0a, 0x14, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x53, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, - 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, - 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, - 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x13, 0x64, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x4c, + 0x0a, 0x0d, 0x78, 0x64, 0x73, 0x5f, 0x6c, 0x62, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0b, 0x78, 0x64, 0x73, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x96, 0x05, 0x0a, + 0x12, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, + 0x69, 0x73, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x64, 0x0a, + 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, 0x19, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x16, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x54, + 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x6d, + 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x12, 0x6c, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x58, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, + 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, + 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x64, 0x6e, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6e, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x65, 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, + 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x45, + 0x44, 0x53, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, + 0x44, 0x4e, 0x53, 0x10, 0x02, 0x22, 0xa3, 0x05, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, - 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, - 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, - 0x0a, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, - 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, - 0x00, 0x52, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, - 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, + 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, + 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, 0x19, 0x6c, + 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, + 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x16, + 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x72, 0x0a, 0x0f, + 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, - 0x12, 0x50, 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, - 0x52, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, - 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, - 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, - 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, + 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, + 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x5c, 0x0a, + 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, + 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x50, 0x65, 0x72, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x03, 0x0a, 0x1c, + 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, + 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x15, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x65, 0x0a, 0x1b, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, + 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x69, 0x6e, + 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x69, + 0x6e, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x61, 0x78, + 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa6, 0x03, + 0x0a, 0x1c, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x1e, 0x6c, + 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, + 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x56, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, - 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x50, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, + 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, + 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, + 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, + 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x86, 0x0c, 0x0a, 0x13, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, + 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, + 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, + 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, + 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x68, 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, + 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x75, 0x74, + 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, + 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, + 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, + 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, + 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x12, 0x8d, 0x01, 0x0a, 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, + 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, + 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, + 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, + 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, + 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, - 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, - 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, - 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, - 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, - 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, + 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, + 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, + 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, + 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, + 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, + 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, + 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, + 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, + 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, + 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3459,77 +3475,78 @@ var file_grpc_service_config_service_config_proto_depIdxs = []int32{ 24, // 12: grpc.service_config.OutlierDetectionLoadBalancingConfig.failure_percentage_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection 18, // 13: grpc.service_config.OutlierDetectionLoadBalancingConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig 18, // 14: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 26, // 15: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - 28, // 16: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - 30, // 17: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry - 31, // 18: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials - 40, // 19: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value - 32, // 20: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - 18, // 21: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig - 41, // 22: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 11, // 23: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 39, // 24: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 33, // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - 18, // 26: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 41, // 27: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 18, // 28: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 18, // 29: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 34, // 30: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - 18, // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 18, // 32: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 18, // 33: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig - 41, // 34: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 3, // 35: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig - 4, // 36: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig - 5, // 37: grpc.service_config.LoadBalancingConfig.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig - 6, // 38: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig - 7, // 39: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig - 8, // 40: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - 9, // 41: grpc.service_config.LoadBalancingConfig.xds_cluster_manager_experimental:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig - 10, // 42: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig - 12, // 43: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig - 13, // 44: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig - 15, // 45: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig - 16, // 46: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig - 14, // 47: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig - 17, // 48: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig - 17, // 49: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig - 1, // 50: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy - 18, // 51: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig - 2, // 52: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig - 35, // 53: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy - 36, // 54: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig - 38, // 55: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration - 38, // 56: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration - 42, // 57: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code - 38, // 58: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration - 42, // 59: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code - 39, // 60: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.stdev_factor:type_name -> google.protobuf.UInt32Value - 39, // 61: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value - 39, // 62: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value - 39, // 63: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.request_volume:type_name -> google.protobuf.UInt32Value - 39, // 64: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold:type_name -> google.protobuf.UInt32Value - 39, // 65: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value - 39, // 66: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value - 39, // 67: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.request_volume:type_name -> google.protobuf.UInt32Value - 18, // 68: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig - 25, // 69: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - 18, // 70: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 27, // 71: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - 18, // 72: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 29, // 73: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child - 43, // 74: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct - 41, // 75: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 11, // 76: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 39, // 77: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 0, // 78: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type - 5, // 79: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig - 41, // 80: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue - 81, // [81:81] is the sub-list for method output_type - 81, // [81:81] is the sub-list for method input_type - 81, // [81:81] is the sub-list for extension type_name - 81, // [81:81] is the sub-list for extension extendee - 0, // [0:81] is the sub-list for field type_name + 38, // 15: grpc.service_config.GrpcLbConfig.initial_fallback_timeout:type_name -> google.protobuf.Duration + 26, // 16: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + 28, // 17: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + 30, // 18: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry + 31, // 19: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials + 40, // 20: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value + 32, // 21: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + 18, // 22: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig + 41, // 23: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 11, // 24: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer + 39, // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 33, // 26: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + 18, // 27: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 41, // 28: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 18, // 29: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 18, // 30: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 34, // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + 18, // 32: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 18, // 33: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 18, // 34: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig + 41, // 35: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 3, // 36: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig + 4, // 37: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig + 5, // 38: grpc.service_config.LoadBalancingConfig.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig + 6, // 39: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig + 7, // 40: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig + 8, // 41: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig + 9, // 42: grpc.service_config.LoadBalancingConfig.xds_cluster_manager_experimental:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig + 10, // 43: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig + 12, // 44: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig + 13, // 45: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig + 15, // 46: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig + 16, // 47: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig + 14, // 48: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig + 17, // 49: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig + 17, // 50: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig + 1, // 51: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy + 18, // 52: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig + 2, // 53: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig + 35, // 54: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy + 36, // 55: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig + 38, // 56: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration + 38, // 57: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration + 42, // 58: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code + 38, // 59: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration + 42, // 60: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code + 39, // 61: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.stdev_factor:type_name -> google.protobuf.UInt32Value + 39, // 62: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value + 39, // 63: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value + 39, // 64: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.request_volume:type_name -> google.protobuf.UInt32Value + 39, // 65: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold:type_name -> google.protobuf.UInt32Value + 39, // 66: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value + 39, // 67: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value + 39, // 68: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.request_volume:type_name -> google.protobuf.UInt32Value + 18, // 69: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig + 25, // 70: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + 18, // 71: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 27, // 72: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + 18, // 73: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 29, // 74: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child + 43, // 75: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct + 41, // 76: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 11, // 77: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer + 39, // 78: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 0, // 79: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type + 5, // 80: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig + 41, // 81: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue + 82, // [82:82] is the sub-list for method output_type + 82, // [82:82] is the sub-list for method input_type + 82, // [82:82] is the sub-list for extension type_name + 82, // [82:82] is the sub-list for extension extendee + 0, // [0:82] is the sub-list for field type_name } func init() { file_grpc_service_config_service_config_proto_init() } From 84793b56f63c7eec98ab3fd3e16b300d6d96beb2 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 14 Mar 2022 16:04:34 -0700 Subject: [PATCH 446/998] xds/client: reset backoff when a message is received, even when the message is invalid (#5241) --- xds/internal/xdsclient/controller/transport.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/xds/internal/xdsclient/controller/transport.go b/xds/internal/xdsclient/controller/transport.go index 0b982b0d7057..e97717d974c6 100644 --- a/xds/internal/xdsclient/controller/transport.go +++ b/xds/internal/xdsclient/controller/transport.go @@ -188,17 +188,17 @@ func (t *Controller) sendExisting(stream grpc.ClientStream) bool { // recv receives xDS responses on the provided ADS stream and branches out to // message specific handlers. func (t *Controller) recv(stream grpc.ClientStream) bool { - success := false + msgReceived := false for { resp, err := t.vClient.RecvResponse(stream) if err != nil { t.updateHandler.NewConnectionError(err) t.logger.Warningf("ADS stream is closed with error: %v", err) - return success + return msgReceived } + msgReceived = true rType, version, nonce, err := t.handleResponse(resp) - if e, ok := err.(xdsresourceversion.ErrResourceTypeUnsupported); ok { t.logger.Warningf("%s", e.ErrStr) continue @@ -221,7 +221,6 @@ func (t *Controller) recv(stream grpc.ClientStream) bool { stream: stream, }) t.logger.Infof("Sending ACK for response type: %v, version: %v, nonce: %v", rType, version, nonce) - success = true } } From 23cc28fd6367517e5203626202fce8dd07efd267 Mon Sep 17 00:00:00 2001 From: Tristan Swadell Date: Tue, 15 Mar 2022 14:50:46 -0700 Subject: [PATCH 447/998] security/authorization: upgrade cel-v0.10.1 and fix breaking API change. (#5243) --- security/authorization/engine/engine_test.go | 5 + security/authorization/go.mod | 12 +- security/authorization/go.sum | 117 +++++++++++++++---- 3 files changed, 106 insertions(+), 28 deletions(-) diff --git a/security/authorization/engine/engine_test.go b/security/authorization/engine/engine_test.go index c159c4bd5c21..1fcff698aa2b 100644 --- a/security/authorization/engine/engine_test.go +++ b/security/authorization/engine/engine_test.go @@ -17,6 +17,7 @@ package engine import ( + "context" "reflect" "sort" "testing" @@ -48,6 +49,10 @@ func (fake fakeProgram) Eval(vars interface{}) (ref.Val, *cel.EvalDetails, error return fake.out, nil, fake.err } +func (fake fakeProgram) ContextEval(ctx context.Context, vars interface{}) (ref.Val, *cel.EvalDetails, error) { + return fake.Eval(vars) +} + type valMock struct { val interface{} } diff --git a/security/authorization/go.mod b/security/authorization/go.mod index ce34742af2c9..fbaa0ada0974 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -3,10 +3,10 @@ module google.golang.org/grpc/security/authorization go 1.14 require ( - github.com/envoyproxy/go-control-plane v0.9.5 - github.com/google/cel-go v0.5.1 - github.com/google/go-cmp v0.5.0 - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/grpc v1.31.0 - google.golang.org/protobuf v1.25.0 + github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 + github.com/google/cel-go v0.10.1 + github.com/google/go-cmp v0.5.5 + google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 + google.golang.org/grpc v1.40.0 + google.golang.org/protobuf v1.27.1 ) diff --git a/security/authorization/go.sum b/security/authorization/go.sum index 3c7ea6cf47fe..bcd03a229247 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -1,93 +1,166 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f h1:0cEys61Sr2hUBEXfNV8eyQP01oZuBgoMeHunebPirK8= -github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e h1:GCzyKMDDjSGnlpl3clrdAK7I1AaVoaiKDOYkUzChZzg= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU= -github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU= -github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/google/cel-go v0.5.1 h1:oDsbtAwlwFPEcC8dMoRWNuVzWJUDeDZeHjoet9rXjTs= -github.com/google/cel-go v0.5.1/go.mod h1:9SvtVVTtZV4DTB1/RuAD1D2HhuqEIdmZEE/r/lrFyKE= -github.com/google/cel-spec v0.4.0/go.mod h1:2pBM5cU4UKjbPDXBgwWkiwBsVgnxknuEJ7C5TDWwORQ= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/cel-go v0.10.1 h1:MQBGSZGnDwh7T/un+mzGKOMz3x+4E/GDPprWjDL+1Jg= +github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= +github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e h1:XMgFehsDnnLGtjvjOfqWSUzt0alpTR1RSEuznObga2c= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 94ee3865e17c5645148f8d24a32dc50f40810f13 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 15 Mar 2022 17:01:07 -0700 Subject: [PATCH 448/998] test: cleanup roundrobin tests to use stubserver (#5236) --- balancer/roundrobin/roundrobin_test.go | 591 ------------------------- test/insecure_creds_test.go | 3 - test/roundrobin_test.go | 411 +++++++++++++++++ test/timeouts.go | 29 ++ 4 files changed, 440 insertions(+), 594 deletions(-) delete mode 100644 balancer/roundrobin/roundrobin_test.go create mode 100644 test/roundrobin_test.go create mode 100644 test/timeouts.go diff --git a/balancer/roundrobin/roundrobin_test.go b/balancer/roundrobin/roundrobin_test.go deleted file mode 100644 index 574625642bef..000000000000 --- a/balancer/roundrobin/roundrobin_test.go +++ /dev/null @@ -1,591 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package roundrobin_test - -import ( - "context" - "fmt" - "net" - "strings" - "sync" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/grpctest" - imetadata "google.golang.org/grpc/internal/metadata" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" -) - -const ( - testMDKey = "test-md" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -type testServer struct { - testpb.UnimplementedTestServiceServer - - testMDChan chan []string -} - -func newTestServer(mdchan bool) *testServer { - t := &testServer{} - if mdchan { - t.testMDChan = make(chan []string, 1) - } - return t -} - -func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - if s.testMDChan == nil { - return &testpb.Empty{}, nil - } - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, status.Errorf(codes.Internal, "no metadata in context") - } - select { - case s.testMDChan <- md[testMDKey]: - case <-ctx.Done(): - return nil, ctx.Err() - } - return &testpb.Empty{}, nil -} - -func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { - return nil -} - -type test struct { - servers []*grpc.Server - serverImpls []*testServer - addresses []string -} - -func (t *test) cleanup() { - for _, s := range t.servers { - s.Stop() - } -} - -func startTestServers(count int, mdchan bool) (_ *test, err error) { - t := &test{} - - defer func() { - if err != nil { - t.cleanup() - } - }() - for i := 0; i < count; i++ { - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, fmt.Errorf("failed to listen %v", err) - } - - s := grpc.NewServer() - sImpl := newTestServer(mdchan) - testpb.RegisterTestServiceServer(s, sImpl) - t.servers = append(t.servers, s) - t.serverImpls = append(t.serverImpls, sImpl) - t.addresses = append(t.addresses, lis.Addr().String()) - - go func(s *grpc.Server, l net.Listener) { - s.Serve(l) - }(s, lis) - } - - return t, nil -} - -func (s) TestOneBackend(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1, false) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - cc, err := grpc.Dial(r.Scheme()+":///test.server", - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithResolvers(r), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - // The second RPC should succeed. - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } -} - -func (s) TestBackendsRoundRobin(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - backendCount := 5 - test, err := startTestServers(backendCount, false) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithResolvers(r), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var resolvedAddrs []resolver.Address - for i := 0; i < backendCount; i++ { - resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) - } - - r.UpdateState(resolver.State{Addresses: resolvedAddrs}) - var p peer.Peer - // Make sure connections to all servers are up. - for si := 0; si < backendCount; si++ { - var connected bool - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() == test.addresses[si] { - connected = true - break - } - time.Sleep(time.Millisecond) - } - if !connected { - t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) - } - } - - for i := 0; i < 3*backendCount; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() != test.addresses[i%backendCount] { - t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) - } - } -} - -func (s) TestAddressesRemoved(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1, false) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithResolvers(r), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - // The second RPC should succeed. - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) - - ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond) - defer cancel2() - // Wait for state to change to transient failure. - for src := cc.GetState(); src != connectivity.TransientFailure; src = cc.GetState() { - if !cc.WaitForStateChange(ctx2, src) { - t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.TransientFailure) - } - } - - const msgWant = "produced zero addresses" - if _, err := testc.EmptyCall(ctx2, &testpb.Empty{}); !strings.Contains(status.Convert(err).Message(), msgWant) { - t.Fatalf("EmptyCall() = _, %v, want _, Contains(Message(), %q)", err, msgWant) - } -} - -func (s) TestCloseWithPendingRPC(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1, false) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithResolvers(r), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - testc := testpb.NewTestServiceClient(cc) - - var wg sync.WaitGroup - for i := 0; i < 3; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // This RPC blocks until cc is closed. - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) == codes.DeadlineExceeded { - t.Errorf("RPC failed because of deadline after cc is closed; want error the client connection is closing") - } - cancel() - }() - } - cc.Close() - wg.Wait() -} - -func (s) TestNewAddressWhileBlocking(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1, false) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithResolvers(r), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - // The second RPC should succeed. - ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, nil", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) - - var wg sync.WaitGroup - for i := 0; i < 3; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // This RPC blocks until NewAddress is called. - testc.EmptyCall(context.Background(), &testpb.Empty{}) - }() - } - time.Sleep(50 * time.Millisecond) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - wg.Wait() -} - -func (s) TestOneServerDown(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - backendCount := 3 - test, err := startTestServers(backendCount, false) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithResolvers(r), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var resolvedAddrs []resolver.Address - for i := 0; i < backendCount; i++ { - resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) - } - - r.UpdateState(resolver.State{Addresses: resolvedAddrs}) - var p peer.Peer - // Make sure connections to all servers are up. - for si := 0; si < backendCount; si++ { - var connected bool - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() == test.addresses[si] { - connected = true - break - } - time.Sleep(time.Millisecond) - } - if !connected { - t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) - } - } - - for i := 0; i < 3*backendCount; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() != test.addresses[i%backendCount] { - t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) - } - } - - // Stop one server, RPCs should roundrobin among the remaining servers. - backendCount-- - test.servers[backendCount].Stop() - // Loop until see server[backendCount-1] twice without seeing server[backendCount]. - var targetSeen int - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - targetSeen = 0 - t.Logf("EmptyCall() = _, %v, want _, ", err) - // Due to a race, this RPC could possibly get the connection that - // was closing, and this RPC may fail. Keep trying when this - // happens. - continue - } - switch p.Addr.String() { - case test.addresses[backendCount-1]: - targetSeen++ - case test.addresses[backendCount]: - // Reset targetSeen if peer is server[backendCount]. - targetSeen = 0 - } - // Break to make sure the last picked address is server[-1], so the following for loop won't be flaky. - if targetSeen >= 2 { - break - } - } - if targetSeen != 2 { - t.Fatal("Failed to see server[backendCount-1] twice without seeing server[backendCount]") - } - for i := 0; i < 3*backendCount; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() != test.addresses[i%backendCount] { - t.Errorf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) - } - } -} - -func (s) TestAllServersDown(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - backendCount := 3 - test, err := startTestServers(backendCount, false) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithResolvers(r), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var resolvedAddrs []resolver.Address - for i := 0; i < backendCount; i++ { - resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) - } - - r.UpdateState(resolver.State{Addresses: resolvedAddrs}) - var p peer.Peer - // Make sure connections to all servers are up. - for si := 0; si < backendCount; si++ { - var connected bool - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() == test.addresses[si] { - connected = true - break - } - time.Sleep(time.Millisecond) - } - if !connected { - t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) - } - } - - for i := 0; i < 3*backendCount; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() != test.addresses[i%backendCount] { - t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) - } - } - - // All servers are stopped, failfast RPC should fail with unavailable. - for i := 0; i < backendCount; i++ { - test.servers[i].Stop() - } - time.Sleep(100 * time.Millisecond) - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) == codes.Unavailable { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("Failfast RPCs didn't fail with Unavailable after all servers are stopped") -} - -func (s) TestUpdateAddressAttributes(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1, true) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithResolvers(r), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name))) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - // The first RPC should fail because there's no address. - ctxShort, cancel2 := context.WithTimeout(ctx, time.Millisecond) - defer cancel2() - if _, err := testc.EmptyCall(ctxShort, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - // The second RPC should succeed. - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - // The second RPC should not set metadata, so there's no md in the channel. - md1 := <-test.serverImpls[0].testMDChan - if md1 != nil { - t.Fatalf("got md: %v, want empty metadata", md1) - } - - const testMDValue = "test-md-value" - // Update metadata in address. - r.UpdateState(resolver.State{Addresses: []resolver.Address{ - imetadata.Set(resolver.Address{Addr: test.addresses[0]}, metadata.Pairs(testMDKey, testMDValue)), - }}) - - // A future RPC should send metadata with it. The update doesn't - // necessarily happen synchronously, so we wait some time before failing if - // some RPCs do not contain it. - for { - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil { - if status.Code(err) == codes.DeadlineExceeded { - t.Fatalf("timed out waiting for metadata in response") - } - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - md2 := <-test.serverImpls[0].testMDChan - if len(md2) == 1 && md2[0] == testMDValue { - return - } - time.Sleep(10 * time.Millisecond) - } -} diff --git a/test/insecure_creds_test.go b/test/insecure_creds_test.go index ec1bb41433cf..28e8b7318143 100644 --- a/test/insecure_creds_test.go +++ b/test/insecure_creds_test.go @@ -23,7 +23,6 @@ import ( "net" "strings" "testing" - "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -36,8 +35,6 @@ import ( testpb "google.golang.org/grpc/test/grpc_testing" ) -const defaultTestTimeout = 5 * time.Second - // testLegacyPerRPCCredentials is a PerRPCCredentials that has yet incorporated security level. type testLegacyPerRPCCredentials struct{} diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go new file mode 100644 index 000000000000..557a47f77443 --- /dev/null +++ b/test/roundrobin_test.go @@ -0,0 +1,411 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" + imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +const rrServiceConfig = `{"loadBalancingConfig": [{"round_robin":{}}]}` + +func statsHandlerDialOption(funcs statsHandlerFuncs) grpc.DialOption { + return grpc.WithStatsHandler(&statsHandler{funcs: funcs}) +} + +type statsHandlerFuncs struct { + TagRPC func(context.Context, *stats.RPCTagInfo) context.Context + HandleRPC func(context.Context, stats.RPCStats) + TagConn func(context.Context, *stats.ConnTagInfo) context.Context + HandleConn func(context.Context, stats.ConnStats) +} + +type statsHandler struct { + funcs statsHandlerFuncs +} + +func (s *statsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + if s.funcs.TagRPC != nil { + return s.funcs.TagRPC(ctx, info) + } + return ctx +} + +func (s *statsHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) { + if s.funcs.HandleRPC != nil { + s.funcs.HandleRPC(ctx, stats) + } +} + +func (s *statsHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + if s.funcs.TagConn != nil { + return s.funcs.TagConn(ctx, info) + } + return ctx +} + +func (s *statsHandler) HandleConn(ctx context.Context, stats stats.ConnStats) { + if s.funcs.HandleConn != nil { + s.funcs.HandleConn(ctx, stats) + } +} + +func checkRoundRobin(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { + var peer peer.Peer + // Make sure connections to all backends are up. + backendCount := len(addrs) + for i := 0; i < backendCount; i++ { + for { + time.Sleep(time.Millisecond) + if ctx.Err() != nil { + return fmt.Errorf("timeout waiting for connection to %q to be up", addrs[i].Addr) + } + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + // Some tests remove backends and check if round robin is happening + // across the remaining backends. In such cases, RPCs can initially fail + // on the connection using the removed backend. Just keep retrying and + // eventually the connection using the removed backend will shutdown and + // will be removed. + continue + } + if peer.Addr.String() == addrs[i].Addr { + break + } + } + } + // Make sure RPCs are sent to all backends. + for i := 0; i < 3*backendCount; i++ { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + return fmt.Errorf("EmptyCall() = %v, want ", err) + } + if gotPeer, wantPeer := addrs[i%backendCount].Addr, peer.Addr.String(); gotPeer != wantPeer { + return fmt.Errorf("rpc sent to peer %q, want peer %q", gotPeer, wantPeer) + } + } + return nil +} + +func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, []*stubserver.StubServer) { + t.Helper() + r := manual.NewBuilderWithScheme("whatever") + + const backendCount = 5 + backends := make([]*stubserver.StubServer, backendCount) + addrs := make([]resolver.Address, backendCount) + for i := 0; i < backendCount; i++ { + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + + backends[i] = backend + addrs[i] = resolver.Address{Addr: backend.Address} + } + + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(rrServiceConfig), + } + dopts = append(dopts, opts...) + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + client := testpb.NewTestServiceClient(cc) + + // At this point, the resolver has not returned any addresses to the channel. + // This RPC must block until the context expires. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + r.UpdateState(resolver.State{Addresses: addrs}) + if err := checkRoundRobin(ctx, client, addrs); err != nil { + t.Fatal(err) + } + return cc, r, backends +} + +// TestRoundRobin_Basic tests the most basic scenario for round_robin. It brings +// up a bunch of backends and verifies that RPCs are getting round robin-ed +// across these backends. +func (s) TestRoundRobin_Basic(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testRoundRobinBasic(ctx, t) +} + +// TestRoundRobin_AddressesRemoved tests the scenario where a bunch of backends +// are brought up, and round_robin is configured as the LB policy and RPCs are +// being correctly round robin-ed across these backends. We then send a resolver +// update with no addresses and verify that the channel enters TransientFailure +// and RPCs fail with an expected error message. +func (s) TestRoundRobin_AddressesRemoved(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, r, _ := testRoundRobinBasic(ctx, t) + + // Send a resolver update with no addresses. This should push the channel into + // TransientFailure. + r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) + for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { + if !cc.WaitForStateChange(ctx, state) { + t.Fatalf("timeout waiting for state change. got %v; want %v", state, connectivity.TransientFailure) + } + } + + const msgWant = "produced zero addresses" + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); !strings.Contains(status.Convert(err).Message(), msgWant) { + t.Fatalf("EmptyCall() = %v, want Contains(Message(), %q)", err, msgWant) + } +} + +// TestRoundRobin_NewAddressWhileBlocking tests the case where round_robin is +// configured on a channel, things are working as expected and then a resolver +// updates removes all addresses. An RPC attempted at this point in time will be +// blocked because there are no valid backends. This test verifies that when new +// backends are added, the RPC is able to complete. +func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { + // Register a stats handler which writes to `rpcCh` when an RPC is started. + // The stats handler starts writing to `rpcCh` only after `begin` has fired. + // We are not interested in being notified about initial RPCs which ensure + // that round_robin is working as expected. We are only interested in being + // notified when we have an RPC which is blocked because there are no + // backends, and will become unblocked when the resolver reports new backends. + begin := grpcsync.NewEvent() + rpcCh := make(chan struct{}, 1) + shOption := statsHandlerDialOption(statsHandlerFuncs{ + HandleRPC: func(ctx context.Context, rpcStats stats.RPCStats) { + if !begin.HasFired() { + return + } + select { + case rpcCh <- struct{}{}: + default: + } + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, r, backends := testRoundRobinBasic(ctx, t, shOption) + + // Send a resolver update with no addresses. This should push the channel into + // TransientFailure. + r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) + for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { + if !cc.WaitForStateChange(ctx, state) { + t.Fatalf("timeout waiting for state change. got %v; want %v", state, connectivity.TransientFailure) + } + } + + begin.Fire() + client := testpb.NewTestServiceClient(cc) + doneCh := make(chan struct{}) + go func() { + // The channel is currently in TransientFailure and this RPC will block + // until the channel becomes Ready, which will only happen when we push a + // resolver update with a valid backend address. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Errorf("EmptyCall() = %v, want ", err) + } + close(doneCh) + }() + + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for RPC to start and block") + case <-rpcCh: + } + // Send a resolver update with a valid backend to push the channel to Ready + // and unblock the above RPC. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backends[0].Address}}}) + + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for blocked RPC to complete") + case <-doneCh: + } +} + +// TestRoundRobin_OneServerDown tests the scenario where a channel is configured +// to round robin across a set of backends, and things are working correctly. +// One backend goes down. The test verifies that going forward, RPCs are round +// robin-ed across the remaining set of backends. +func (s) TestRoundRobin_OneServerDown(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, _, backends := testRoundRobinBasic(ctx, t) + + // Stop one backend. RPCs should round robin across the remaining backends. + backends[len(backends)-1].Stop() + + addrs := make([]resolver.Address, len(backends)-1) + for i := 0; i < len(backends)-1; i++ { + addrs[i] = resolver.Address{Addr: backends[i].Address} + } + client := testpb.NewTestServiceClient(cc) + if err := checkRoundRobin(ctx, client, addrs); err != nil { + t.Fatalf("RPCs are not being round robined across remaining servers: %v", err) + } +} + +// TestRoundRobin_AllServersDown tests the scenario where a channel is +// configured to round robin across a set of backends, and things are working +// correctly. Then, all backends go down. The test verifies that the channel +// moves to TransientFailure and failfast RPCs fail with Unavailable. +func (s) TestRoundRobin_AllServersDown(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, _, backends := testRoundRobinBasic(ctx, t) + + // Stop all backends. + for _, b := range backends { + b.Stop() + } + + // Wait for TransientFailure. + for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { + if !cc.WaitForStateChange(ctx, state) { + t.Fatalf("timeout waiting for state change. got %v; want %v", state, connectivity.TransientFailure) + } + } + + // Failfast RPCs should fail with Unavailable. + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) == codes.Unavailable { + return + } +} + +// TestRoundRobin_UpdateAddressAttributes tests the scenario where the addresses +// returned by the resolver contain attributes. The test verifies that the +// attributes contained in the addresses show up as RPC metadata in the backend. +func (s) TestRoundRobin_UpdateAddressAttributes(t *testing.T) { + const ( + testMDKey = "test-md" + testMDValue = "test-md-value" + ) + r := manual.NewBuilderWithScheme("whatever") + + // Spin up a StubServer to serve as a backend. The implementation verifies + // that the expected metadata is received. + testMDChan := make(chan []string, 1) + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + md, ok := metadata.FromIncomingContext(ctx) + if ok { + select { + case testMDChan <- md[testMDKey]: + case <-ctx.Done(): + return nil, ctx.Err() + } + } + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + + // Dial the backend with round_robin as the LB policy. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(rrServiceConfig), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Send a resolver update with no address attributes. + addr := resolver.Address{Addr: backend.Address} + r.UpdateState(resolver.State{Addresses: []resolver.Address{addr}}) + + // Make an RPC and ensure it does not contain the metadata we are looking for. + client := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = %v, want ", err) + } + select { + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for metadata received in RPC") + case md := <-testMDChan: + if len(md) != 0 { + t.Fatalf("received metadata %v, want nil", md) + } + } + + // Send a resolver update with address attributes. + addrWithAttributes := imetadata.Set(addr, metadata.Pairs(testMDKey, testMDValue)) + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrWithAttributes}}) + + // Make an RPC and ensure it contains the metadata we are looking for. The + // resolver update isn't processed synchronously, so we wait some time before + // failing if some RPCs do not contain it. +Done: + for { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = %v, want ", err) + } + select { + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for metadata received in RPC") + case md := <-testMDChan: + if len(md) == 1 && md[0] == testMDValue { + break Done + } + } + time.Sleep(defaultTestShortTimeout) + } +} diff --git a/test/timeouts.go b/test/timeouts.go new file mode 100644 index 000000000000..1c0c2123938a --- /dev/null +++ b/test/timeouts.go @@ -0,0 +1,29 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import "time" + +const ( + // Default timeout for tests in this package. + defaultTestTimeout = 10 * time.Second + // Default short timeout, to be used when waiting for events which are not + // expected to happen. + defaultTestShortTimeout = 100 * time.Millisecond +) From c4cabf78f4a24fe377b4a56e0ea7a99e057d2a0d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 16 Mar 2022 12:02:56 -0700 Subject: [PATCH 449/998] grpc: handle invalid service configs by applying the default if available (#5238) --- balancer_conn_wrappers.go | 9 ++++++ clientconn.go | 59 ++++++++++++++++++++++----------------- 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 5eb87a552036..cb4b6728c22b 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -131,11 +131,17 @@ func (ccb *ccBalancerWrapper) watcher() { } func (ccb *ccBalancerWrapper) close() { + if ccb == nil { + return + } ccb.closed.Fire() <-ccb.done.Done() } func (ccb *ccBalancerWrapper) exitIdle() bool { + if ccb == nil { + return true + } if !ccb.hasExitIdle { return false } @@ -168,6 +174,9 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat } func (ccb *ccBalancerWrapper) resolverError(err error) { + if ccb == nil { + return + } ccb.balancerMu.Lock() defer ccb.balancerMu.Unlock() ccb.balancer.ResolverError(err) diff --git a/clientconn.go b/clientconn.go index dd12a14f09a4..e4819ca76b43 100644 --- a/clientconn.go +++ b/clientconn.go @@ -483,9 +483,6 @@ type ClientConn struct { // firstResolveEvent is used to track whether the name resolver sent us at // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - // TODO: Add a goodResolveEvent to track whether the name resolver sent us a - // good update. This will be used to determine if a balancer is configured on - // the channel instead of checking for `cc.balancerWrapper != nil`. // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. @@ -542,7 +539,7 @@ func (cc *ClientConn) GetState() connectivity.State { func (cc *ClientConn) Connect() { cc.mu.Lock() defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { + if cc.balancerWrapper.exitIdle() { return } for ac := range cc.conns { @@ -627,9 +624,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -657,18 +652,14 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) + if cc.sc == nil { + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, s.Addresses) } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) + cc.applyFailingLB(s.ServiceConfig) + cc.mu.Unlock() + return ret } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) - cc.mu.Unlock() - return ret } } } @@ -700,6 +691,26 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. +// +// Caller must hold cc.mu. +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) + } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) +} + // switchBalancer starts the switching from current balancer to the balancer // with the given name. // @@ -714,14 +725,12 @@ func (cc *ClientConn) switchBalancer(name string) { } channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } + // Don't hold cc.mu while closing the balancers. The balancers may call + // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex + // would cause a deadlock in that case. + cc.mu.Unlock() + cc.balancerWrapper.close() + cc.mu.Lock() builder := balancer.Get(name) if builder == nil { From fbe4ccbc1ebe0fda37127dc1ad06c8517cbd400b Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 16 Mar 2022 16:12:48 -0700 Subject: [PATCH 450/998] xds/client: include Node ID in error messages from the XdsClient (#5223) --- xds/internal/xdsclient/authority.go | 2 +- xds/internal/xdsclient/client_test.go | 9 +++++---- xds/internal/xdsclient/pubsub/pubsub.go | 8 +++++++- xds/internal/xdsclient/pubsub/watch.go | 13 ++++++++++--- 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 1a236849c377..eb1110ecc4c9 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -102,7 +102,7 @@ func (c *clientImpl) newAuthority(config *bootstrap.ServerConfig) (_ *authority, } // Make a new authority since there's no existing authority for this config. - ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, c.logger)} + ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, c.config.XDSServer.NodeProto, c.logger)} defer func() { if retErr != nil { ret.close() diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index e165f84f78d9..31f6d466fbea 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -21,6 +21,7 @@ package xdsclient import ( "context" "fmt" + "strings" "testing" "time" @@ -197,7 +198,7 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want } gotUpdate := u.(xdsresource.ListenerUpdateErrTuple) if wantErr != nil { - if gotUpdate.Err != wantErr { + if !strings.Contains(gotUpdate.Err.Error(), wantErr.Error()) { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) } return nil @@ -215,7 +216,7 @@ func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, w } gotUpdate := u.(xdsresource.RouteConfigUpdateErrTuple) if wantErr != nil { - if gotUpdate.Err != wantErr { + if !strings.Contains(gotUpdate.Err.Error(), wantErr.Error()) { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) } return nil @@ -233,7 +234,7 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU } gotUpdate := u.(xdsresource.ClusterUpdateErrTuple) if wantErr != nil { - if gotUpdate.Err != wantErr { + if !strings.Contains(gotUpdate.Err.Error(), wantErr.Error()) { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) } return nil @@ -251,7 +252,7 @@ func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wan } gotUpdate := u.(xdsresource.EndpointsUpdateErrTuple) if wantErr != nil { - if gotUpdate.Err != wantErr { + if !strings.Contains(gotUpdate.Err.Error(), wantErr.Error()) { return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) } return nil diff --git a/xds/internal/xdsclient/pubsub/pubsub.go b/xds/internal/xdsclient/pubsub/pubsub.go index a843fd5f191f..48b5ce48910e 100644 --- a/xds/internal/xdsclient/pubsub/pubsub.go +++ b/xds/internal/xdsclient/pubsub/pubsub.go @@ -23,9 +23,11 @@ package pubsub import ( + "fmt" "sync" "time" + "github.com/golang/protobuf/proto" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" @@ -41,6 +43,7 @@ type Pubsub struct { done *grpcsync.Event logger *grpclog.PrefixLogger watchExpiryTimeout time.Duration + nodeIDJSON string updateCh *buffer.Unbounded // chan *watcherInfoWithUpdate // All the following maps are to keep the updates/metadata in a cache. @@ -60,11 +63,14 @@ type Pubsub struct { } // New creates a new Pubsub. -func New(watchExpiryTimeout time.Duration, logger *grpclog.PrefixLogger) *Pubsub { +// +// The passed in nodeID will be attached to all errors sent to the watchers. +func New(watchExpiryTimeout time.Duration, nodeID proto.Message, logger *grpclog.PrefixLogger) *Pubsub { pb := &Pubsub{ done: grpcsync.NewEvent(), logger: logger, watchExpiryTimeout: watchExpiryTimeout, + nodeIDJSON: fmt.Sprint(nodeID), updateCh: buffer.NewUnbounded(), ldsWatchers: make(map[string]map[*watchInfo]bool), diff --git a/xds/internal/xdsclient/pubsub/watch.go b/xds/internal/xdsclient/pubsub/watch.go index 0baa683175dd..2fc6bb2d6030 100644 --- a/xds/internal/xdsclient/pubsub/watch.go +++ b/xds/internal/xdsclient/pubsub/watch.go @@ -100,9 +100,7 @@ func (wi *watchInfo) timeout() { // Caller must hold wi.mu. func (wi *watchInfo) sendErrorLocked(err error) { - var ( - u interface{} - ) + var u interface{} switch wi.rType { case xdsresource.ListenerResource: u = xdsresource.ListenerUpdate{} @@ -113,6 +111,15 @@ func (wi *watchInfo) sendErrorLocked(err error) { case xdsresource.EndpointsResource: u = xdsresource.EndpointsUpdate{} } + + errMsg := err.Error() + errTyp := xdsresource.ErrType(err) + if errTyp == xdsresource.ErrorTypeUnknown { + err = fmt.Errorf("%v, xDS client nodeID: %s", errMsg, wi.c.nodeIDJSON) + } else { + err = xdsresource.NewErrorf(errTyp, "%v, xDS client nodeID: %s", errMsg, wi.c.nodeIDJSON) + } + wi.c.scheduleCallback(wi, u, err) } From 6c3ccbe89a5a73a65f339b1e5d3586c2782a830c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 17 Mar 2022 10:17:03 -0700 Subject: [PATCH 451/998] grpc: remove remaining usages of grpc.WithInsecure() (#5246) --- clientconn_authority_test.go | 17 +++++++++-------- clientconn_parsed_target_test.go | 7 ++++--- dialoptions.go | 4 ++-- pickfirst_test.go | 15 ++++++++------- resolver_conn_wrapper_test.go | 5 +++-- 5 files changed, 26 insertions(+), 22 deletions(-) diff --git a/clientconn_authority_test.go b/clientconn_authority_test.go index 7a77de64c570..3efb2ae8571e 100644 --- a/clientconn_authority_test.go +++ b/clientconn_authority_test.go @@ -24,6 +24,7 @@ import ( "testing" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/testdata" ) @@ -43,7 +44,7 @@ func (s) TestClientConnAuthority(t *testing.T) { { name: "default", target: "Non-Existent.Server:8080", - opts: []DialOption{WithInsecure()}, + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, wantAuthority: "Non-Existent.Server:8080", }, { @@ -55,7 +56,7 @@ func (s) TestClientConnAuthority(t *testing.T) { { name: "override-via-WithAuthority", target: "Non-Existent.Server:8080", - opts: []DialOption{WithInsecure(), WithAuthority("authority-override")}, + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithAuthority("authority-override")}, wantAuthority: "authority-override", }, { @@ -67,13 +68,13 @@ func (s) TestClientConnAuthority(t *testing.T) { { name: "unix relative", target: "unix:sock.sock", - opts: []DialOption{WithInsecure()}, + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, wantAuthority: "localhost", }, { name: "unix relative with custom dialer", target: "unix:sock.sock", - opts: []DialOption{WithInsecure(), WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { return (&net.Dialer{}).DialContext(ctx, "", addr) })}, wantAuthority: "localhost", @@ -81,13 +82,13 @@ func (s) TestClientConnAuthority(t *testing.T) { { name: "unix absolute", target: "unix:/sock.sock", - opts: []DialOption{WithInsecure()}, + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, wantAuthority: "localhost", }, { name: "unix absolute with custom dialer", target: "unix:///sock.sock", - opts: []DialOption{WithInsecure(), WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { return (&net.Dialer{}).DialContext(ctx, "", addr) })}, wantAuthority: "localhost", @@ -95,13 +96,13 @@ func (s) TestClientConnAuthority(t *testing.T) { { name: "localhost colon port", target: "localhost:50051", - opts: []DialOption{WithInsecure()}, + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, wantAuthority: "localhost:50051", }, { name: "colon port", target: ":50051", - opts: []DialOption{WithInsecure()}, + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, wantAuthority: "localhost:50051", }, } diff --git a/clientconn_parsed_target_test.go b/clientconn_parsed_target_test.go index e41fafe09666..71ffb69edab4 100644 --- a/clientconn_parsed_target_test.go +++ b/clientconn_parsed_target_test.go @@ -27,6 +27,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/resolver" ) @@ -83,7 +84,7 @@ func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { for _, test := range tests { t.Run(test.target, func(t *testing.T) { - cc, err := Dial(test.target, WithInsecure()) + cc, err := Dial(test.target, WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Dial(%q) failed: %v", test.target, err) } @@ -106,7 +107,7 @@ func (s) TestParsedTarget_Failure_WithoutCustomDialer(t *testing.T) { for _, target := range targets { t.Run(target, func(t *testing.T) { - if cc, err := Dial(target, WithInsecure()); err == nil { + if cc, err := Dial(target, WithTransportCredentials(insecure.NewCredentials())); err == nil { defer cc.Close() t.Fatalf("Dial(%q) succeeded cc.parsedTarget = %+v, expected to fail", target, cc.parsedTarget) } @@ -178,7 +179,7 @@ func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { return nil, errors.New("dialer error") } - cc, err := Dial(test.target, WithInsecure(), WithContextDialer(dialer)) + cc, err := Dial(test.target, WithTransportCredentials(insecure.NewCredentials()), WithContextDialer(dialer)) if err != nil { t.Fatalf("Dial(%q) failed: %v", test.target, err) } diff --git a/dialoptions.go b/dialoptions.go index e7ac15ce4c7b..f2f605a17c47 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -282,8 +282,8 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. -// Will be supported throughout 1.x. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = insecure.NewCredentials() diff --git a/pickfirst_test.go b/pickfirst_test.go index 9ece7844a355..445026a9a14e 100644 --- a/pickfirst_test.go +++ b/pickfirst_test.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" @@ -46,7 +47,7 @@ func (s) TestOneBackendPickfirst(t *testing.T) { defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", - WithInsecure(), + WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { @@ -80,7 +81,7 @@ func (s) TestBackendsPickfirst(t *testing.T) { servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -112,7 +113,7 @@ func (s) TestNewAddressWhileBlockingPickfirst(t *testing.T) { servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -147,7 +148,7 @@ func (s) TestCloseWithPendingRPCPickfirst(t *testing.T) { _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -182,7 +183,7 @@ func (s) TestOneServerDownPickfirst(t *testing.T) { servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -222,7 +223,7 @@ func (s) TestAllServersDownPickfirst(t *testing.T) { servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -264,7 +265,7 @@ func (s) TestAddressesRemovedPickfirst(t *testing.T) { servers, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } diff --git a/resolver_conn_wrapper_test.go b/resolver_conn_wrapper_test.go index 1036946ad1e4..f7dcd7eb1978 100644 --- a/resolver_conn_wrapper_test.go +++ b/resolver_conn_wrapper_test.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" @@ -55,7 +56,7 @@ func (s) TestResolverErrorInBuild(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{ServiceConfig: &serviceconfig.ParseResult{Err: errors.New("resolver build err")}}) - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) } @@ -74,7 +75,7 @@ func (s) TestResolverErrorInBuild(t *testing.T) { func (s) TestServiceConfigErrorRPC(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) } From 97c314341871cd9f4837a6ffb71bfd991a9ba04e Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 17 Mar 2022 10:34:45 -0700 Subject: [PATCH 452/998] xds/client: accept resources wrapped in discoverypb.Resource message (#5242) --- xds/internal/xdsclient/bootstrap/bootstrap.go | 3 +- .../xdsclient/bootstrap/bootstrap_test.go | 6 +- xds/internal/xdsclient/xdsresource/type.go | 17 ++++++ .../xdsclient/xdsresource/unmarshal_cds.go | 5 ++ .../xdsresource/unmarshal_cds_test.go | 31 +++++++++++ .../xdsclient/xdsresource/unmarshal_eds.go | 5 ++ .../xdsresource/unmarshal_eds_test.go | 37 +++++++++++++ .../xdsclient/xdsresource/unmarshal_lds.go | 5 ++ .../xdsresource/unmarshal_lds_test.go | 23 ++++++++ .../xdsclient/xdsresource/unmarshal_rds.go | 5 ++ .../xdsresource/unmarshal_rds_test.go | 55 +++++++++++++++++++ .../xdsclient/xdsresource/version/version.go | 2 + 12 files changed, 190 insertions(+), 4 deletions(-) diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 4523a6131fd4..97fe4a8b0792 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -53,6 +53,7 @@ const ( gRPCUserAgentName = "gRPC Go" clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" + clientFeatureResourceWrapper = "xds.config.resource-in-sotw" ) func init() { @@ -499,7 +500,7 @@ func (c *Config) updateNodeProto(node *v3corepb.Node) error { } v3.UserAgentName = gRPCUserAgentName v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning) + v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) v3bytes, err := proto.Marshal(v3) if err != nil { diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 36b4302c8fae..6aa047d6ddbc 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -200,14 +200,14 @@ var ( BuildVersion: gRPCVersion, UserAgentName: gRPCUserAgentName, UserAgentVersionType: &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, - ClientFeatures: []string{clientFeatureNoOverprovisioning}, + ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, } v3NodeProto = &v3corepb.Node{ Id: "ENVOY_NODE_ID", Metadata: metadata, UserAgentName: gRPCUserAgentName, UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, - ClientFeatures: []string{clientFeatureNoOverprovisioning}, + ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, } nilCredsConfigV2 = &Config{ XDSServer: &ServerConfig{ @@ -401,7 +401,7 @@ func TestNewConfigV2ProtoSuccess(t *testing.T) { BuildVersion: gRPCVersion, UserAgentName: gRPCUserAgentName, UserAgentVersionType: &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, - ClientFeatures: []string{clientFeatureNoOverprovisioning}, + ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, }, }, ClientDefaultListenerResourceNameTemplate: "%s", diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go index c64f7c609c62..faf34f98e3c7 100644 --- a/xds/internal/xdsclient/xdsresource/type.go +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -20,6 +20,8 @@ package xdsresource import ( "time" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/golang/protobuf/proto" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) @@ -76,6 +78,21 @@ func IsEndpointsResource(url string) bool { return url == version.V2EndpointsURL || url == version.V3EndpointsURL } +// unwrapResource unwraps and returns the inner resource if it's in a resource +// wrapper. The original resource is returned if it's not wrapped. +func unwrapResource(r *anypb.Any) (*anypb.Any, error) { + url := r.GetTypeUrl() + if url != version.V2ResourceWrapperURL && url != version.V3ResourceWrapperURL { + // Not wrapped. + return r, nil + } + inner := &v3discoverypb.Resource{} + if err := proto.Unmarshal(r.GetValue(), inner); err != nil { + return nil, err + } + return inner.Resource, nil +} + // ServiceStatus is the status of the update. type ServiceStatus int diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index eba78716eebd..572941efb134 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -51,6 +51,11 @@ func UnmarshalCluster(opts *UnmarshalOptions) (map[string]ClusterUpdateErrTuple, } func unmarshalClusterResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { + r, err := unwrapResource(r) + if err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + if !IsClusterResource(r.GetTypeUrl()) { return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 096c4fb0e828..4569d135864f 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/envconfig" @@ -1517,6 +1518,21 @@ func (s) TestUnmarshalCluster(t *testing.T) { Version: testVersion, }, }, + { + name: "v2 cluster wrapped", + resources: []*anypb.Any{testutils.MarshalAny(&v2xdspb.Resource{Resource: v2ClusterAny})}, + wantUpdate: map[string]ClusterUpdateErrTuple{ + v2ClusterName: {Update: ClusterUpdate{ + ClusterName: v2ClusterName, + EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v2ClusterAny, + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, { name: "v3 cluster", resources: []*anypb.Any{v3ClusterAny}, @@ -1532,6 +1548,21 @@ func (s) TestUnmarshalCluster(t *testing.T) { Version: testVersion, }, }, + { + name: "v3 cluster wrapped", + resources: []*anypb.Any{testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3ClusterAny})}, + wantUpdate: map[string]ClusterUpdateErrTuple{ + v3ClusterName: {Update: ClusterUpdate{ + ClusterName: v3ClusterName, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v3ClusterAny, + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, { name: "v3 cluster with EDS config source self", resources: []*anypb.Any{v3ClusterAnyWithEDSConfigSourceSelf}, diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index f1774dedae43..147870cdf6bc 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -42,6 +42,11 @@ func UnmarshalEndpoints(opts *UnmarshalOptions) (map[string]EndpointsUpdateErrTu } func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { + r, err := unwrapResource(r) + if err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + if !IsEndpointsResource(r.GetTypeUrl()) { return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index 324d7d250f69..5c6118d4e727 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -25,6 +25,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" anypb "github.com/golang/protobuf/ptypes/any" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" @@ -227,6 +228,42 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { Version: testVersion, }, }, + { + name: "v3 endpoints wrapped", + resources: []*anypb.Any{testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3EndpointsAny})}, + wantUpdate: map[string]EndpointsUpdateErrTuple{ + "test": {Update: EndpointsUpdate{ + Drops: nil, + Localities: []Locality{ + { + Endpoints: []Endpoint{{ + Address: "addr1:314", + HealthStatus: EndpointHealthStatusUnhealthy, + Weight: 271, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []Endpoint{{ + Address: "addr2:159", + HealthStatus: EndpointHealthStatusDraining, + Weight: 828, + }}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, + }, + }, + Raw: v3EndpointsAny, + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, { // To test that unmarshal keeps processing on errors. name: "good and bad endpoints", diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index b259c7b87e4c..2e59c0605c9b 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -46,6 +46,11 @@ func UnmarshalListener(opts *UnmarshalOptions) (map[string]ListenerUpdateErrTupl } func unmarshalListenerResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { + r, err := unwrapResource(r) + if err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + if !IsListenerResource(r.GetTypeUrl()) { return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index 4444421a4929..9150d64dfa00 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -605,6 +606,17 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Version: testVersion, }, }, + { + name: "v2 listener resource wrapped", + resources: []*anypb.Any{testutils.MarshalAny(&v2xdspb.Resource{Resource: v2Lis})}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, { name: "v3 listener resource", resources: []*anypb.Any{v3LisWithFilters()}, @@ -616,6 +628,17 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Version: testVersion, }, }, + { + name: "v3 listener resource wrapped", + resources: []*anypb.Any{testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3LisWithFilters()})}, + wantUpdate: map[string]ListenerUpdateErrTuple{ + v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, // "To allow equating RBAC's direct_remote_ip and // remote_ip...HttpConnectionManager.xff_num_trusted_hops must be unset // or zero and HttpConnectionManager.original_ip_detection_extensions diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index f43b18292f0c..12c3d560fa7d 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -46,6 +46,11 @@ func UnmarshalRouteConfig(opts *UnmarshalOptions) (map[string]RouteConfigUpdateE } func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { + r, err := unwrapResource(r) + if err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + if !IsRouteConfigResource(r.GetTypeUrl()) { return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index a14d321b8eee..abae9ff09752 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -910,6 +911,33 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Version: testVersion, }, }, + { + name: "v2 routeConfig resource wrapped", + resources: []*anypb.Any{testutils.MarshalAny(&v2xdspb.Resource{Resource: v2RouteConfig})}, + wantUpdate: map[string]RouteConfigUpdateErrTuple{ + v2RouteConfigName: {Update: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + }, + Raw: v2RouteConfig, + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, { name: "v3 routeConfig resource", resources: []*anypb.Any{v3RouteConfig}, @@ -937,6 +965,33 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { Version: testVersion, }, }, + { + name: "v3 routeConfig resource wrapped", + resources: []*anypb.Any{testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3RouteConfig})}, + wantUpdate: map[string]RouteConfigUpdateErrTuple{ + v3RouteConfigName: {Update: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + }, + Raw: v3RouteConfig, + }}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusACKed, + Version: testVersion, + }, + }, { name: "multiple routeConfig resources", resources: []*anypb.Any{v2RouteConfig, v3RouteConfig}, diff --git a/xds/internal/xdsclient/xdsresource/version/version.go b/xds/internal/xdsclient/xdsresource/version/version.go index edfa68762f6e..2c4819abddc0 100644 --- a/xds/internal/xdsclient/xdsresource/version/version.go +++ b/xds/internal/xdsclient/xdsresource/version/version.go @@ -42,6 +42,7 @@ const ( V2ClusterType = "envoy.api.v2.Cluster" V2EndpointsType = "envoy.api.v2.ClusterLoadAssignment" + V2ResourceWrapperURL = googleapiPrefix + "envoy.api.v2.Resource" V2ListenerURL = googleapiPrefix + V2ListenerType V2RouteConfigURL = googleapiPrefix + V2RouteConfigType V2ClusterURL = googleapiPrefix + V2ClusterType @@ -53,6 +54,7 @@ const ( V3ClusterType = "envoy.config.cluster.v3.Cluster" V3EndpointsType = "envoy.config.endpoint.v3.ClusterLoadAssignment" + V3ResourceWrapperURL = googleapiPrefix + "envoy.service.discovery.v3.Resource" V3ListenerURL = googleapiPrefix + V3ListenerType V3RouteConfigURL = googleapiPrefix + V3RouteConfigType V3ClusterURL = googleapiPrefix + V3ClusterType From f95b001a48df30dbbdb89ccb9a4781a242591651 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 17 Mar 2022 14:35:42 -0700 Subject: [PATCH 453/998] xds: avoid log spam during server mode switches (better A36 compliance) (#5215) --- interop/xds/server/server.go | 5 +-- xds/internal/server/listener_wrapper.go | 8 +++- .../test/xds_server_integration_test.go | 14 +++++- .../test/xds_server_serving_mode_test.go | 24 +++++----- xds/server.go | 45 ++++++++++++++----- xds/server_test.go | 13 +++++- 6 files changed, 80 insertions(+), 29 deletions(-) diff --git a/interop/xds/server/server.go b/interop/xds/server/server.go index afbbc56af89e..5932199de6c6 100644 --- a/interop/xds/server/server.go +++ b/interop/xds/server/server.go @@ -100,10 +100,7 @@ func (x *xdsUpdateHealthServiceImpl) SetNotServing(_ context.Context, _ *testpb. } func xdsServingModeCallback(addr net.Addr, args xds.ServingModeChangeArgs) { - logger.Infof("Serving mode for xDS server at %s changed to %s", addr.String(), args.Mode) - if args.Err != nil { - logger.Infof("ServingModeCallback returned error: %v", args.Err) - } + logger.Infof("Serving mode callback for xDS server at %q invoked with mode: %q, err: %v", addr.String(), args.Mode, args.Err) } func main() { diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index c90f9672ea32..421ed7533633 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -111,6 +111,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru drainCallback: params.DrainCallback, isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), + mode: connectivity.ServingModeStarting, closed: grpcsync.NewEvent(), goodUpdate: grpcsync.NewEvent(), ldsUpdateCh: make(chan ldsUpdateWithError, 1), @@ -429,14 +430,19 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { } } +// switchMode updates the value of serving mode and filter chains stored in the +// listenerWrapper. And if the serving mode has changed, it invokes the +// registered mode change callback. func (l *listenerWrapper) switchMode(fcs *xdsresource.FilterChainManager, newMode connectivity.ServingMode, err error) { l.mu.Lock() defer l.mu.Unlock() l.filterChains = fcs + if l.mode == newMode { + return + } l.mode = newMode if l.modeCallback != nil { l.modeCallback(l.Listener.Addr(), newMode, err) } - l.logger.Warningf("Listener %q entering mode: %q due to error: %v", l.Addr(), newMode, err) } diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go index 6904e0e88d5c..b362926905b6 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/xds/internal/test/xds_server_integration_test.go @@ -80,8 +80,20 @@ func setupGRPCServer(t *testing.T, bootstrapContents []byte) (net.Listener, func t.Fatal(err) } + // Create a server option to get notified about serving mode changes. We don't + // do anything other than throwing a log entry here. But this is required, + // since the server code emits a log entry at the default level (which is + // ERROR) if no callback is registered for serving mode changes. Our + // testLogger fails the test if there is any log entry at ERROR level. It does + // provide an ExpectError() method, but that takes a string and it would be + // painful to construct the exact error message expected here. Instead this + // works just fine. + modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { + t.Logf("Serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + }) + // Initialize an xDS-enabled gRPC server and register the stubServer on it. - server := xds.NewGRPCServer(grpc.Creds(creds), xds.BootstrapContentsForTesting(bootstrapContents)) + server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) testpb.RegisterTestServiceServer(server, &testService{}) // Create a local listener and pass it to Serve(). diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/xds/internal/test/xds_server_serving_mode_test.go index 5fa17546e81d..236a831c642b 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/xds/internal/test/xds_server_serving_mode_test.go @@ -60,8 +60,6 @@ func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { updateCh := make(chan connectivity.ServingMode, 1) // Create a server option to get notified about serving mode changes. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { t.Logf("serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) updateCh <- args.Mode @@ -82,6 +80,8 @@ func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener}, } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -194,7 +194,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { case lis2.Addr().String(): updateCh2 <- args.Mode default: - t.Logf("serving mode callback invoked for unknown listener address: %q", addr.String()) + t.Errorf("serving mode callback invoked for unknown listener address: %q", addr.String()) } }) @@ -240,7 +240,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh1: if mode != connectivity.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) } } select { @@ -248,7 +248,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh2: if mode != connectivity.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) } } @@ -274,7 +274,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener1}, }); err != nil { - t.Error(err) + t.Fatal(err) } // Wait for lis2 to move to "not-serving" mode. @@ -283,7 +283,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh2: if mode != connectivity.ServingModeNotServing { - t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) } } @@ -298,7 +298,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { NodeID: nodeID, Listeners: []*v3listenerpb.Listener{}, }); err != nil { - t.Error(err) + t.Fatal(err) } // Wait for lis1 to move to "not-serving" mode. lis2 was already removed @@ -309,7 +309,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh1: if mode != connectivity.ServingModeNotServing { - t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) } } @@ -330,7 +330,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { NodeID: nodeID, Listeners: []*v3listenerpb.Listener{listener1, listener2}, }); err != nil { - t.Error(err) + t.Fatal(err) } // Wait for both listeners to move to "serving" mode. @@ -339,7 +339,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh1: if mode != connectivity.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) } } select { @@ -347,7 +347,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { t.Fatalf("timed out waiting for a mode change update: %v", err) case mode := <-updateCh2: if mode != connectivity.ServingModeServing { - t.Errorf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) } } diff --git a/xds/server.go b/xds/server.go index 0319ddcaf533..5ab8a5a98008 100644 --- a/xds/server.go +++ b/xds/server.go @@ -105,10 +105,10 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { s := &GRPCServer{ gs: newGRPCServer(newOpts...), quit: grpcsync.NewEvent(), - opts: handleServerOptions(opts), } s.logger = prefixLogger(s) s.logger.Infof("Created xds.GRPCServer") + s.handleServerOptions(opts) // We type assert our underlying gRPC server to the real grpc.Server here // before trying to retrieve the configured credentials. This approach @@ -128,14 +128,35 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { // handleServerOptions iterates through the list of server options passed in by // the user, and handles the xDS server specific options. -func handleServerOptions(opts []grpc.ServerOption) *serverOptions { - so := &serverOptions{} +func (s *GRPCServer) handleServerOptions(opts []grpc.ServerOption) { + so := s.defaultServerOptions() for _, opt := range opts { if o, ok := opt.(*serverOption); ok { o.apply(so) } } - return so + s.opts = so +} + +func (s *GRPCServer) defaultServerOptions() *serverOptions { + return &serverOptions{ + // A default serving mode change callback which simply logs at the + // default-visible log level. This will be used if the application does not + // register a mode change callback. + // + // Note that this means that `s.opts.modeCallback` will never be nil and can + // safely be invoked directly from `handleServingModeChanges`. + modeCallback: s.loggingServerModeChangeCallback, + } +} + +func (s *GRPCServer) loggingServerModeChangeCallback(addr net.Addr, args ServingModeChangeArgs) { + switch args.Mode { + case connectivity.ServingModeServing: + s.logger.Errorf("Listener %q entering mode: %q", addr.String(), args.Mode) + case connectivity.ServingModeNotServing: + s.logger.Errorf("Listener %q entering mode: %q due to error: %v", addr.String(), args.Mode, args.Err) + } } // RegisterService registers a service and its implementation to the underlying @@ -291,12 +312,16 @@ func (s *GRPCServer) handleServingModeChanges(updateCh *buffer.Unbounded) { drainServerTransports(gs, args.addr.String()) } } - if s.opts.modeCallback != nil { - s.opts.modeCallback(args.addr, ServingModeChangeArgs{ - Mode: args.mode, - Err: args.err, - }) - } + + // The XdsServer API will allow applications to register a "serving state" + // callback to be invoked when the server begins serving and when the + // server encounters errors that force it to be "not serving". If "not + // serving", the callback must be provided error information, for + // debugging use by developers - A36. + s.opts.modeCallback(args.addr, ServingModeChangeArgs{ + Mode: args.mode, + Err: args.err, + }) } } } diff --git a/xds/server_test.go b/xds/server_test.go index e307beee754d..ac0c573fdef7 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -711,7 +711,18 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { fs, clientCh, cleanup := setupOverrides() defer cleanup() - server := NewGRPCServer() + // Create a server option to get notified about serving mode changes. We don't + // do anything other than throwing a log entry here. But this is required, + // since the server code emits a log entry at the default level (which is + // ERROR) if no callback is registered for serving mode changes. Our + // testLogger fails the test if there is any log entry at ERROR level. It does + // provide an ExpectError() method, but that takes a string and it would be + // painful to construct the exact error message expected here. Instead this + // works just fine. + modeChangeOpt := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) { + t.Logf("Serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + }) + server := NewGRPCServer(modeChangeOpt) defer server.Stop() lis, err := testutils.LocalTCPListener() From 6131e9d46ea8c352a52dd213044e3cb344913179 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 21 Mar 2022 10:41:21 -0700 Subject: [PATCH 454/998] examples: regenerate certs to use SHA256 signing algorithm (#5256) --- examples/data/x509/README.md | 6 ++ examples/data/x509/ca_cert.pem | 58 +++++++-------- examples/data/x509/ca_key.pem | 52 +++++++++++++ examples/data/x509/client_ca_cert.pem | 65 ++++++++-------- examples/data/x509/client_ca_key.pem | 52 +++++++++++++ examples/data/x509/client_cert.pem | 54 +++++++------- examples/data/x509/client_key.pem | 98 ++++++++++++------------- examples/data/x509/create.sh | 69 +++++++++++++++++ examples/data/x509/openssl.cnf | 28 +++++++ examples/data/x509/server_cert.pem | 54 +++++++------- examples/data/x509/server_key.pem | 98 ++++++++++++------------- testdata/x509/README.md | 102 +------------------------- testdata/x509/create.sh | 25 +++++-- 13 files changed, 438 insertions(+), 323 deletions(-) create mode 100644 examples/data/x509/README.md create mode 100644 examples/data/x509/ca_key.pem create mode 100644 examples/data/x509/client_ca_key.pem create mode 100755 examples/data/x509/create.sh create mode 100644 examples/data/x509/openssl.cnf diff --git a/examples/data/x509/README.md b/examples/data/x509/README.md new file mode 100644 index 000000000000..3b9a05dac364 --- /dev/null +++ b/examples/data/x509/README.md @@ -0,0 +1,6 @@ +This directory contains x509 certificates and associated private keys used in +examples. + +How were these test certs/keys generated ? +------------------------------------------ +Run `./create.sh` diff --git a/examples/data/x509/ca_cert.pem b/examples/data/x509/ca_cert.pem index eee033e8cb05..868a01eb92f9 100644 --- a/examples/data/x509/ca_cert.pem +++ b/examples/data/x509/ca_cert.pem @@ -1,34 +1,34 @@ -----BEGIN CERTIFICATE----- -MIIF6jCCA9KgAwIBAgIJAKnJpgBC9CHNMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV +MIIF6jCCA9KgAwIBAgIJANQvyb7tgLDkMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD -MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMDA4MDQwMTU5NTdaFw0zMDA4 -MDIwMTU5NTdaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD +MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMjAzMTgyMTQ0NTZaFw0zMjAz +MTUyMTQ0NTZaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMZFKSUi+PlQ6z/aTz1Jp9lqrFAY -38cEIzpxS9ktQiWvLoYICImXRFhCH/h+WjmiyV8zYHcbft63BTUwgXJFuE0cxsJY -mqOUYL2wTD5PzgoN0B9KVgKyyi0SQ6WH9+D2ZvYAolHb1l6pYuxxk1bQL2OA80Cc -K659UioynIQtJ52NRqGRDI2EYsC9XRuhfddnDu/RwBaiv3ix84R3VAqcgRyOeGwH -cX2e+aX0m6ULnsiyPXG9y9wQi956CGGZimInV63S+sU3Mc6PuUt8rwFlmSXCZ/07 -D8No5ljNUo6Vt2BpAMQzSz+SU4PUFE7Vxbq4ypI+2ZbkI80YjDwF52/pMauqZFIP -Kjw0b2yyWD/F4hLmR7Rx9d8EFWRLZm2VYSVMiQTwANpb+uL7+kH8UE3QF7tryH8K -G65mMh18XiERgSAWgs5Z8j/B1W5bl17PVx2Ii1dYp0IquyAVjCIKRrFituvoXXZj -FHHpb/aUDpW0SYrT5dmDhAAGFkYfMTFd4EOj6bWepZtRRjPeIHR9B2yx8U0tFSMf -tuHCj95l2izJDUfKhVIkigpbRrElI2QqXAPIyIOqcdzlgtI6DIanCd/CwsfdyaEs -7AnW2mFWarbkxpw92RdGxYy6WXbdM+2EdY+cWKys06upINcnG2zvkCflAE39fg9F -BVCJC71oO3laXnf7AgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUBuToaw2a+AV/vfbooJn3yzwA3lMwgYAGA1UdIwR5MHeAFAbk6GsNmvgFf732 -6KCZ98s8AN5ToVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANGmhBQQ5f3n4UhgJLsXHh3CE3ej +Ox36ob+Hnny9Gb/OquA4FMKjTTaSrhKIQapqlCLODai50XKSRBJcgsvsqWk9UdL2 +3zf7CzAPmg5CmzpWWwgpKPTuK5W+gLA1+uMKecBdH5gqSswQ3TD1fMfnJuq9mNfC +GsMkplaqS5VATNFPVnqS7us3OXKEITmBaQP4wOpGP1PgqX7K08aZEeAyQJaTS5um +4MNlBLYa/nQ9Wca0Uk5tzoNjE6mWH7bTuwdoZgOIwKFmBbmsC9y/HzwV/zRsL8Yp ++7FwfIYuZ5j8gBNqSFQjDFkm6Q7RcQ/lyHHj9YduOgTciIFVgx+j8aZvFqH127h8 +WIb7Jppy0DEDJE1hRP6iV2uVoaUxhXWrCWLBUU+naLix7SJ8rqw8gHwRNWfM/Lwg +I3rGXdw5WIHVQcuxevN6qVSZeWVYAlAgfxjKtM5cKZyM+W80CSdVKEku1XA0sq6h +jaiJdo6hpm8BLIB2k7LWafc5MASst7XULk4uDC/OYcEz3+C3Ryn1qBltr1gA3+5K +ANuhjYCZH4P0pX08I1MpeVP6h8XhbBPEZg2txbVGlnDXEFoJN9Eg5iEKRBo/HKhf +lP84ljtBSmCnsF6K/y3vnRiu+BVNP5KMq179DNqEy7tSygzgY41m3pSFojdvA59N +JWJoy9/NZzdlU4nzAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUW5AMXXg/zPSaLHwSO/7LwoBeZYUwgYAGA1UdIwR5MHeAFFuQDF14P8z0mix8 +Ejv+y8KAXmWFoVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1zZXJ2ZXJfY2GC -CQCpyaYAQvQhzTAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBALUz -P2SiZAXZDwCH8kzHbLqsqacSM81bUSuG153t3fhwZU8hzXgQqifFububLkrLaRCj -VvtIS3XsbHmKYD1TBOOCZy5zE2KdpWYW47LmogBqUllKCSD099UHFB2YUepK9Zci -oxYJMhNWIhkoJ/NJMp70A8PZtxUvZafeUQl6xueo1yPbfQubg0lG9Pp2xkmTypSv -WJkpRyX8GSJYFoFFYdNcvICVw7E/Zg+PGXe8gjpAGWW8KxxaohPsdLid6f3KauJM -UCi/WQECzIpNzxQDSqnGeoqbZp+2y6mhgECQ3mG/K75n0fX0aV88DNwTd1o0xOpv -lHJo8VD9mvwnapbm/Bc7NWIzCjL8fo0IviRkmAuoz525eBy6NsUCf1f432auvNbg -OUaGGrY6Kse9sF8Tsc8XMoT9AfGQaR8Ay7oJHjaCZccvuxpB2n//L1UAjMRPYd2y -XAiSN2xz7WauUh4+v48lKbWa+dwn1G0pa6ZGB7IGBUbgva8Fi3iqVh3UZoz+0PFM -qVLG2SzhfMTMHg0kF+rI4eOcEKc1j3A83DmTTPZDz3APn53weJLJhKzrgQiI1JRW -boAJ4VFQF6zjxeecCIIiekH6saYKnol2yL6ksm0jyHoFejkrHWrzoRAwIhTf9avj -G7QS5fiSQk4PXCX42J5aS/zISy85RT120bkBjV/P +CQDUL8m+7YCw5DAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAKTh +Ofg4WospSN7Gg/q3bQqfSMT5XTFC7cj0j3cWDZBnmqb0HAFPmzHT+w3kBVNCyx1r +iatOhaZRH7RA0vacZQT5pD2MGU48/zFfwBV/qHENQWuRLD2WOOEU3cjjoINBclfP +im7ml/xgz0ACOgUyf+/2hkS7VLq4p9QQVGf2TQt65DZA9mUylZTdsBf4AfEg7IXv +gaYpq6tYmNi7fXDzR/LT+fPd4ejQARy9U7uVhecyH9zTUMzm2Fr/p7HhydSXNwhF +JUfPWw7XYO0lyA+8PxUSAKXOfsT44WNtHAeRm/Gkmn8inBdedFia/+M67k45b/wY +RF11QzvaMR33jmrdZWxCc0Xjg8oZIP7T9MfGFULEGCpB3NY4YjnRrid/JZ/edhPR +2iOiEiek4qAaxeIne3CR2dqCM+n+FV1zCs4n3S0os4+kknnS5aNR5wZpqpZfG0Co +FyWE+dE51cGcub1wT1oi5Xrxg/iRteCfd33Ky668FYKA/tHHdqkVfBflATU6iOtw +dIzvFJk1H1mUwpJrH/aNOHzVCQ5KSpcc+kXcOQPafTHFB6zMVJ6O+Vm7SrqiSENM +2b1fBKxHIsxOtwrKuzbRhU5+eAICqwMd6gcIpT/JSR1r+UfHVcrXalbeazmT2DS5 +CFOeinj4WQvtPYOdbYsWg8Y9zGN4L9zH6GovM1wD -----END CERTIFICATE----- diff --git a/examples/data/x509/ca_key.pem b/examples/data/x509/ca_key.pem new file mode 100644 index 000000000000..4dccea1be392 --- /dev/null +++ b/examples/data/x509/ca_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDRpoQUEOX95+FI +YCS7Fx4dwhN3ozsd+qG/h558vRm/zqrgOBTCo002kq4SiEGqapQizg2oudFykkQS +XILL7KlpPVHS9t83+wswD5oOQps6VlsIKSj07iuVvoCwNfrjCnnAXR+YKkrMEN0w +9XzH5ybqvZjXwhrDJKZWqkuVQEzRT1Z6ku7rNzlyhCE5gWkD+MDqRj9T4Kl+ytPG +mRHgMkCWk0ubpuDDZQS2Gv50PVnGtFJObc6DYxOplh+207sHaGYDiMChZgW5rAvc +vx88Ff80bC/GKfuxcHyGLmeY/IATakhUIwxZJukO0XEP5chx4/WHbjoE3IiBVYMf +o/Gmbxah9du4fFiG+yaactAxAyRNYUT+oldrlaGlMYV1qwliwVFPp2i4se0ifK6s +PIB8ETVnzPy8ICN6xl3cOViB1UHLsXrzeqlUmXllWAJQIH8YyrTOXCmcjPlvNAkn +VShJLtVwNLKuoY2oiXaOoaZvASyAdpOy1mn3OTAErLe11C5OLgwvzmHBM9/gt0cp +9agZba9YAN/uSgDboY2AmR+D9KV9PCNTKXlT+ofF4WwTxGYNrcW1RpZw1xBaCTfR +IOYhCkQaPxyoX5T/OJY7QUpgp7Beiv8t750YrvgVTT+SjKte/QzahMu7UsoM4GON +Zt6UhaI3bwOfTSViaMvfzWc3ZVOJ8wIDAQABAoICAQCxi7A9AhaUUWRzE6DnpGtH +zk0IO39cIx4KAsNQZiDBVDdXzYafUwaX2d57KVNbDAlJ9HCS3FKpEX9+gUPviQvr +aRe7boCZewv9dqkDvJqS7AEJxzm9O1pD5WI8WGqRDhUPuI2CIwbXDM0VokA7VuGZ +WFlxFxvs+UO5D10VF7A2blcRVQ/quQj4lzc/6P1TdL2DaVxGH3PLQd/ZR1ZhJI2Y +N0OHnOqp7wnvYqrtK+u0oI83hjym/ifvrYhMH8E7Q8lo4s4noSvmEvK0zlKYYxSO +g7RtwK47lcSPKgtn/yZDyvVX85qIgbBLcUmrqfB3qxMKz2lpJo6f4Rg7mm6SgW+K +zxYnGNCTPfiyPKiufM3rQPfJ4giqQ1XDKiZEKUJBo4mzzV6LcAoDaEqhHBlySpi3 +Z38I0rmAT62PRJ1sMkQl6j1Ben9TpwTzJmLX1sEO1Jsabsk8rRdV+ni5oRRUdW4H ++ratyQ8pmegLYyhAZqkD7FzKBLdznLmWXVTcBQkRoD5lQkCP2OF78TdL4twNvoTH +X4kQ3cNysWFXsm+yf4jSCHl4BEtGA2jOU690T0trtMf13aI3wEULmcBgc2ix+tch +wX79hwBYcjGGDfTMb39r/DrcgWMVFXawru78QFoN9vVxznit9LrOERBm6zN2ok4X +E1kD4YZGr8dxUHax0or4CQKCAQEA7W1Sxeqc0gV0ANQf3eCsFNjvT97z/RSzzUYF +wCe4rpzQ9ZNsY2UYMYmEzUuRBuQxYKCNTWot3hu+6OPMCp4pLuu2l8ha/wCM2TkY +6hceduvXkdUNUG1xZNSR8waw4PTXNeoOD30+GB4OpHdjzsF5pEzx853/Qo/ERJFx +A+aZZJy/Sfw82KTseYTniWYjH4iYUbC8TVLfRjPw6V2VcF78pYkdAQenGglqw/sI +4a3FhJspN9xV/PoPbb7PjBJFHUt7ZRQt+D3WPuhLSjyPxwV+3u2OsQ1/J/sxcih6 +rW2g+OJYrK4YkOqX9tLRB39RjO4H6Eiv5eUAw/+vHHufKRu1HwKCAQEA4gzxZNzm +r1X/5GAwwyBJ4eQUHFvEQsC2L4GTJnNNAvmJzSIWnmxGfFLhfJSabnlCMYelMhKS +Ntxokk5ItOhxlUbA1CucEtQgehJwREpUljlk7cii5MLZEkz11QxIVoAhGlq3svFG +B/gwYWNVWl2CXcK2o6BBD9sIgzgp7qhmdJej16h8YkWn7HibKs+OBcdCu+ri7wU+ +VdLpdhN3uqo1b1tO58Gv+40vuQE3ZKDdMy55V30+0qEqg6dXvDQ9nwYFkw6C31Ad +Wpa9ZB0A0HNSou1xTWyl/hDie6dlN84RHGX8on4sjgPrb8A8WVis+R2abvh9ApZA +fRZ3H/ZYXB1crQKCAQBgjgEHc+3qi0UtwRZkiSXyJHbOKIFY/r5QUJWuG3lDqYph +FF8T3N0F6EMVqhGEl/Bst14/iVq15Nqyo1ErUD63UiyjdVtsMLEW9d1n9ZbyDd9Q +8y/C8X8X3kqsZqAwG+IZjuHA8tH5xN93iwYP4yaw5onO5QYV75mFuRAY4gKnpAc2 +81lbUVbJ5H60pdDK1iX7ssAhQf6C8kSa4vAPDtH4D9a3wID4WbQNl115Sc31q5QL +n5NomdkEbIDDGfr5euTnqlk3hw5F7voPaqmd6mI6Dqnk3vRDMihdoJCjTt4T2Rju +wK5E4OKEAh/3yJNFmNemY0kFWSgCjUyNbMjBUv9JAoIBAQCYS9QO+m1JUA2ZVd1E +eWqNkFakTIdL2f5kv03ep+wIxwq6c+79SUGr3UMh5hStvXCFYjhAJhbwc0rY13lQ +uRJdWk/sIn2CifxfgjC1MccPdxeyxGxK56PMGqG9qgrKjITA9sGxA7EFCYe+9We5 +/Coq9VaLoxpyjkWL8rj9m+N7RfcTAubaZseeIBuamj+7UOZ7KOM/2i6HMBQugys1 +Thu2LLRanDnups6yPEmPuHmPVA5YjX9X9VFpZcNMf33MuAflbe9qeNVuBQUQgCHe +TvQr5QFjAoJLTCDq4nrlQCZzFZtB9vQZsjZbEg8WuxG+vN0hSrUemxBTtmEH3bbm +SLn5AoIBABGxznQFXXlF3eLIZqLvItDMSTpFp8YPk8GQWPT2V3pNNjvK/j7eg+tn +VouXv5LjyLTzWLKnPjIU4t+qwu6R9nohZ62OjGl6lssVdjPnf4R6UKzRa0iIZtH4 +BlGncnAbzb6TJuLX7dNwICoUCGyvk9tdnThH1FY3ZAEhOi1G8LEh7aBrj9/vUZ2d +S5jzZ7kLh04AB8OP1MXM3sZE7VlIxUtT/NLlwC8zRsg84pAjg3U7PygIDYQDzCRB +4yIvDziTPqDB/vdCKt7/Xary5Xj4NwqcPCRf6HvdHYCVeW7V+mWcMKZgodQARQhv +qQCK9iiN08MAFNia/0/Bj4D7XKurNRY= +-----END PRIVATE KEY----- diff --git a/examples/data/x509/client_ca_cert.pem b/examples/data/x509/client_ca_cert.pem index 026a4e478412..62a0ce0545a6 100644 --- a/examples/data/x509/client_ca_cert.pem +++ b/examples/data/x509/client_ca_cert.pem @@ -1,35 +1,34 @@ -----BEGIN CERTIFICATE----- -MIIGAjCCA+qgAwIBAgIUZzkKhtgm6Y3RaksChHMIJFKV+U4wDQYJKoZIhvcNAQEL -BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL -BgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4 -NDI1MFoXDTMxMTIyMTE4NDI1MFowUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB -MQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3QtY2xp -ZW50X2NhMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1Rk7zsuwXn8r -KMHk+gmvaftFmlY+NHs1mKJPzyCGFnablnJtHU4hDpSvvNitoQ0OcurOo9V9ALlA -U2uw/1q6Yhg1Am4cXwSWHG0/GwCQAdPTVb7W1MiAd/IB5bx9xrwfjrpGLjVLS3/y -nOKP+kl1bf6WAcLEPClvH+kSG8xMwvg58ot7ipWQcWBTSuZLaz89d2yfxpvtwrvS -YDemY6f8Tkxil+kDjb2Jo/zdRDz8eIEOs1PcdztrdWWeQaYJVX6aEOHCfdVNOHw3 -jNQKyVREUgXjr/pkwo9fTnZjQdBUhZIo7NuPPG25t5qZK3dUDuLcVRQ5Vt0/45pZ -/HkZDCkxmSynZWz2gPClOHVPOG8Eqi0Mbd3XxQSsd1Go667oFotLvTuynbYhdh4s -xAJWXbFV26HgDXI5wXueXrs1n0stUlbD6KahfeoYBu+idX7gB4RftqhqlbIazu3y -hj22k8cMQEPkLhzmUwRt64juLA0+FRG0Hfr8vdZD+f91Qbv86Qw3c1/lckQIOlyI -MerljNbCbHJm9KOZGf1zizwvMVtVzuVtr6RY+Loov4gzhJ5kNSk/YDMQC42c2Yhz -Lr5y9EGe/cL8QXdKfjKNeJjCbzxTTFiVBq5XRKUgjz6ga+F7KGO7ayMBrexZ7+ap -z7ydlUYS+xp43hqdisAGmUMJdDVlHCMCAwEAAaOB0zCB0DAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBTq92tDG5TfVvTqbu1bA593K6aAwjCBjQYDVR0jBIGFMIGC -gBTq92tDG5TfVvTqbu1bA593K6aAwqFUpFIwUDELMAkGA1UEBhMCVVMxCzAJBgNV -BAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRl -c3QtY2xpZW50X2NhghRnOQqG2CbpjdFqSwKEcwgkUpX5TjAOBgNVHQ8BAf8EBAMC -AgQwDQYJKoZIhvcNAQELBQADggIBAMHOXRUUq5vf9G2NvnAR1lb0fTKx4/6B9rhU -Nli9uIoWGQyMu8icEMistUp4AdHWdhutKX9NS0Fe3e5ef6qIYCng0gVBE3fTHJd4 -V8MhGtyaK0K/gpTrJdClwK/litRIEjCFwNYEK8vtuqNjR82d8IuFjnbinb+IGCH0 -sLRGvvZch+dwM5N9BVRq20M2FZhyI+fWZmt1ZiBwnfy3xM+enD2I+/LOUFoxAmGS -m2vnS+ULhq7fLaK6vgyUIGqRDQMxYEql9QGzRIspV9vVhRuOCmowlJbgCv++eOUG -FvjlAPlQRGJ+ShpXO5n2pEkdjIJOrLf4kyviLDHffIl5I80fRWzv7GJ1HP+Bb9qO -LZGaiO3SelPhvJGTSV5uSZpgkFsBbgdbbGI60W2QQIHEwG0HdjnNk17+TmVEUoCj -rWK/Kzw5py1Egtibju4CiJ8uIKeew+2pfdnnyHoCVwCfdACc4dwRpet6fQvkRcru -5PR5MzZqUI2+bjg/hJrHj7SVpxpjcr3OZdh05T+heCVuPp+9mHBmcxbeA8rkMZAq -vILLwgwEriSbKy9Y1GLs2oaPNaWEpN9Q6kZPUwtwlzjHG3OOtldeXPpMVpg6Sb0y -3NnRfvfV/g2gm68S21j6qhGM2aeQCdCu5insqnR8GS5/stmuyCNnlst24JBneE0i -louEQ0EV +MIIF6jCCA9KgAwIBAgIJAOhoXtjjP6JdMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD +MRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTAeFw0yMjAzMTgyMTQ0NThaFw0zMjAz +MTUyMTQ0NThaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD +U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAO7fTqeU+8OfKMwXABNF90+RYL4X +YS4ULx4rpf14Ntp1SF6o3itCSM3jJfHzexj2Pm16aL+OQll8ODtvTadqVSMndMCn +UN/jVjxiMmjkSNKpwUGG69CsQzCKoueKBCEy/CZSopQae6Wxn7mqTAzhFlh3idNL +J+12UtdqDxnPDsiG2XBET3UrKyJeBxMgRyPi/g4wHfhH9oJ97jkdacUlLko8l22s +ZiMSSwwOlWxtTY5t0FbHu08ufP4eYTqC0LL3z1Fon4v+4BqUyK7BT3dISwPBmSd1 +uTD7Wbaa/QmfU6Y18dkNlK00GUAcKWgPfLcm7EH/AAz5XkqozVR3z5FLBYFTxVrA +Ly/Gu5HLx/uwoYWeYRWBOSkqvdgf9PT57imO4fOi1CTQuq/1LAdaxGkm7yXaz0YP +ySTiT6PvcLWFEbjrbufxdBrF4/ZsQz5vdJiKq2IQmCIKONJOFHWqgoF4AA7Ze1cl +mrK0eLzUlG1WmSy5mpjByRanahQWYvK1s0tc8IwMRRJY4DS6Dp99EVyteKZP/jc0 +x+ILet2ThDhjY3AxtkzlejyylABgl2AyGoGzZzbaf1q/0LfM6SfYBSVZK3TFR3Kt +8lQnG0tztoM+bnM/JZ8UZ61s16jJVxWzlZ+rx8rCpIvh3Cnl52DGo6oA4Kt60uDP +3iiTLGNYqEyHmzgnAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUdOqNqaSjcn7BRN3fLs4eTIp1W9MwgYAGA1UdIwR5MHeAFHTqjamko3J+wUTd +3y7OHkyKdVvToVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV +BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1jbGllbnRfY2GC +CQDoaF7Y4z+iXTAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAOnH +CrwiJd51oBic5PwjQBhQcUtGOfR1BJe/PACpLXTf1Fbo8bLT5GxZLATlw9+EVO9P +JhhH+oiUuvA7dE2SRiZXpY7faqtDgvVfssyCrvACkM7pcP9A5kM4LiunX7dpY2xp +naJAqDV5Av1mOohHuVEZHqV6xQSREQFW2IusfpCsPP+P+RPKM2o571e6oz5RGbuP +dQ39QycBTK8ezccxaDaH614peAnBi4Q1GuxzgNmXq2FPDcf7F1QcWMrW3jUI8npi +Q9rXRwrqUYP7Yzz+dIziGdpOfZd7x/MyCXuqRdFdA+bulGM2Es5lvtguPOFhcWp0 +3hzLJ+yolxyqxnNNdaU0r+TDbgxOBjw0VxahuhzFDeZsP6Civzp+Y6MRdvofNXBm +IBD4uqmQtUUyE2uoznXvZkXaSc+0VIGgs04AMS9irBC2oVEGDp0AbelcIhdgToam +/NTuOmxgadwDuEn3TIFYkzx84J81kL8g0HQ1N09nSXChkSVb+XlxC+Wosxoazydr +M4FOvaa1V4vnmIdA2aF1nWTzJNcc9FC23zTmQkV2YJ1IKNmxGd3xBZzUtUBu5OgZ +vPXECtUjRcraNuXeL6gSX0qBaaVkcdxhp8CpI8k6Qb+mgOaq/ixrVEKtczBVXjHD +pO6QmwMZtqR8JsStbMCYXa2owt4k8F3yMlIKE6qX -----END CERTIFICATE----- diff --git a/examples/data/x509/client_ca_key.pem b/examples/data/x509/client_ca_key.pem new file mode 100644 index 000000000000..77065d5cc8a9 --- /dev/null +++ b/examples/data/x509/client_ca_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDu306nlPvDnyjM +FwATRfdPkWC+F2EuFC8eK6X9eDbadUheqN4rQkjN4yXx83sY9j5temi/jkJZfDg7 +b02nalUjJ3TAp1Df41Y8YjJo5EjSqcFBhuvQrEMwiqLnigQhMvwmUqKUGnulsZ+5 +qkwM4RZYd4nTSyftdlLXag8Zzw7IhtlwRE91KysiXgcTIEcj4v4OMB34R/aCfe45 +HWnFJS5KPJdtrGYjEksMDpVsbU2ObdBWx7tPLnz+HmE6gtCy989RaJ+L/uAalMiu +wU93SEsDwZkndbkw+1m2mv0Jn1OmNfHZDZStNBlAHCloD3y3JuxB/wAM+V5KqM1U +d8+RSwWBU8VawC8vxruRy8f7sKGFnmEVgTkpKr3YH/T0+e4pjuHzotQk0Lqv9SwH +WsRpJu8l2s9GD8kk4k+j73C1hRG4627n8XQaxeP2bEM+b3SYiqtiEJgiCjjSThR1 +qoKBeAAO2XtXJZqytHi81JRtVpksuZqYwckWp2oUFmLytbNLXPCMDEUSWOA0ug6f +fRFcrXimT/43NMfiC3rdk4Q4Y2NwMbZM5Xo8spQAYJdgMhqBs2c22n9av9C3zOkn +2AUlWSt0xUdyrfJUJxtLc7aDPm5zPyWfFGetbNeoyVcVs5Wfq8fKwqSL4dwp5edg +xqOqAOCretLgz94okyxjWKhMh5s4JwIDAQABAoICAAmMq9xPPHFpn3vpP3uFxIlN +yoxO6veonumZ3Rzw/WBmZ+pA3gDkuXxhpFaz4SvyTDScPCvMSCLDsIvPu08CFT0+ +ipBZIAaTVBM96b3/wlmJp8wy1KKXAGikYjbXcarSGvp9OzqohGDvZO9LO5cYOIh4 +3u2vh30ayd0KxGfHu1OQ8IhocrTAcQ0CrU26cJ2iqX1vtwMB/XziA/AMmPnkrqER +IwyjY8HrLUziGF8pT3xuL3IIshhMR3rxQ/nO2QEOnx8mC5rRKaxmXk9+MusV3Mnd +p33IWwr2QXPnZk5ILFPsvCptPJBgENJbTdx3IglAaRmKVDowjfB2Jx9FWur4ENQy ++yCzf0ygRoXnugtwE48/L7P8mlqZlZsxQbUUjXEPtht8rtM4CR5b0v7PHXiLh1oM +igfy1RDAQAZQRGIlWCOeV2soiyKLnCGyAaVXcM2ksDkYOSH4ObE4KwF1Ph87lNaG +ywolsPvQD0ygymXcuStrYHWamTp8qRjNvZBcThs3SaKN+lxXxPng2tBPUwU0S6nj +e0pjWco74elBk+fjjd0wNolKjUD7FhRXlWiXz9BgcCjRD9TLoVk8mp9cFL7OLzJc +735JmNKP8C5Qs91Ugo6Z9tWQQTdGHZe9ElUY0fWP0bs+4iBaadl63R26tchLncZE +LnYsi2AjDdV908cEkAiBAoIBAQD6LbGeyFHZA42nuSw/NFsMVldqU6QwmADQI3Tw +JEdw2thS8VIX2c8aeJkVL++dNmSPcqs4NqhzgJSm9o1xNqGZovAPK/B3NmLl1kzG +JPwSr8QwNxmKwUlbt1K48qIV0JmetOgRG/ll5ux2CxgWHzwgRwtvpbnxDa7Gf7BA +UfH7AfZJ3iV+HlJSxr9XxNgFoNEtpP9sqbOgt10f5JJlIELCTa38iMBojAGxlzyj +7DGYY/diQDr+6mRNnv2pY57dOnmdvN1w+p1W7saaeRCeltva/G+5n5AWMFl5qBjT +LDktBE+okH5wapkUsZzZTByTgFXdBC2wY2qBrOexBAyS8/F3AoIBAQD0bkNBc1ya +KYmWlCsVSUZxUGSOp9g7ZdzlB/1G523s3PltXSphsC4mACs7ZAs5OAO/bu05kurp +dOqEAxsC05IxD2/gGoarC6QfTum9CMNoKrvtczA7Gl+6D5djum17lULY6YSBO75J +L0FQK6nCVGfAbBRAqhiFi+9kXvNThuqjgoiCNwQYxaG8aovoAKTFdkzQjDw2tUgM +jqCM6ifOBJIRolFq2CBom8nB+wpsI1naFLaOdg0Luz/Ds03gD9nWa6a4XIowKCml +Tek1Q+S2hZoTgfOlKRbCcM1KyoaI9LKI/pbKmpNyyrADw/kZKevfsKnYwMpHlaTR +NSuQ2VJKuxrRAoIBAQCBQ3bQ+eQAYyugC7dm+OBKYZpNH+ZoDUHuSUO0iKo5D3pS +cMnf9PRjUwiVv+zoqCARVkhNhUBIXZlxI1c1teqNfXjX/fYDQqCa7L1Ca/2qkhKm +bvHNlc0XjIM7eHJzHxMgw4xcur2D/2sSGu1ZEM56RvsLtu96M32opnUk5rJG5V6i +EBwDLBuRFYvsB5MuZUdvdB9dv9lGIzgEsI9LnP2hc42APBBedGizn9b/Q5zkhlJd ++53/9I/a41lhWk3NNNd9vwYTyAnfzwPi8Ma7imsSnPgFSwKh1F2G1GnvQpxQPDgE +epQ59XofDR5j0EW7mMXEqtIIn3V6hyI3fkYY795FAoIBAQCsx7x26YsN1krRzA7g +TxmiQ8exJ2gsJIcOxqT8l98WTeVqry6kOxuD9R6aLs/YNIZBrbG2vuma+PBFPMS9 +LLzsPRNCAL4s7l+nWerTmvw2B+8rm/796Fi+dwL2lfOKJipIllj52TdbGDI874Bi +Q7PLSxrN0u7eh9pCwvORmY8G4eCI20bkE9+OBmq7JqlSg5ss19RAf8hcR/2pXmOg +t45hNLIEqp3OFEF8A26MnjiHdZjN/xidsFEUjwx/U/USIqqJK7Dq9ZjqprYw1rs3 +Yh1VqMiHeRIDhCU5twt+iCojuILy2G1d+XSOVNsiNIXtaz3EYBMcouUMlV8kVtpa +xQPhAoIBAEr8U7ZaAxN2Ptgb6B8M1CVNE6q7S1VuX+T8xkciadW2aRjJ3PufFfsk +Zo12fP9K/NeOPTIz0dQB6Gy/CKzDLb8NnJCJnCUUaO8E45C2L9r6qvIJpXWHp3vo +neGO49y/5st7suOZkWU2B6ZGwNWH90296mfSKcUNxSRMaHCotPdVDyvOgLC24ZWR +6teRaxB2sVZYqmoz+4+G8SOK40bHJKf1kwujbrS3OqzDzEeC/STtqYZWPW03MFkk +MBPQvwCWMJINv4zz4YrnOaA9COc1/fTXCG5kKYyalPD8VKxi1usas1pZwIqZkuwm +D6kBMuZ4gkKW24IYzXzOni0/BOnpOfM= +-----END PRIVATE KEY----- diff --git a/examples/data/x509/client_cert.pem b/examples/data/x509/client_cert.pem index 6f82cc3be84f..e35b94b1f27a 100644 --- a/examples/data/x509/client_cert.pem +++ b/examples/data/x509/client_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4NDI1MVoXDTMxMTIyMTE4NDI1 -MVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIyMDMxODIxNDQ1OVoXDTMyMDMxNTIxNDQ1 +OVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBALUoje/J3uPOJ0dapY2s7mGLVPhYRaHyRnJE2/TY -zFOB0IisAF3R7BIDufQrHhk3fh0JazCw95TDD9rxsKEVs6Z50lmDkrg/bjlsniE/ -n+M1JacaLQW7xfh2L+Ei4jvMr101nAsimd6IxFU9m3+2SFbhPBG/GWWJ2ZKqQblz -DVMpNg9FYNmMe45vLevOhdPQBE4cVoAPhI9Je+P4Koslebhor0koUeQVeYdBbCq3 -3dQJPAHjBST6mD9mJI4yVrE3Xso3LO85WROUPhRYQyXhrgU15W6g9qTpMTfkriUe -FYLCtAPU9LBodyvjYLuwoEoyRVsA6Zh/vABteD8Afl552fV9KwN2fRVbTDAxQCp7 -P8gE3/rD1RKv7KBNJ/LrwMu7g4VO+tzYDxWee+eXPQ6M/zRWAb3E0v3UNHsF1ZBl -rlFhEiRShHrXDEKMQwCTSrRjwYajUpZ/Hq2USDgkLepKmTmCaoBfWHPyZwblqSTn -A4DNOh5N23eJyrLnJOPYjzZqEPfX5hDTjFRdVTQxtmYlJ1muwtlNyuwZDImhjO6G -54pPj/bV6gy1+YpIQBemPoXtqqmcRiEVWSV5zAizwRaWf85tqpxb1Tjuj2OpD9le -oO4JX0HLjhyQBoKspNohu2I4+s7ex/w92bf76cTpYTbMJqIp37YZmfPVztHVaMl4 -W0xRAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFMRdhhib+RS6IJpQ -zFsaKH1BNbyZMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD -AjANBgkqhkiG9w0BAQsFAAOCAgEAHyQwLSo/UdSoZCKcdeb0DCkeABRMUPIysroj -gQJCe3cAKOT+TxL6Qnp6jzM2/vt2T/lzlRp7RAB1jH3J4wy5re4cbCn1sbII3Rog -Nm4PKcw6hfUET44t1Gk9DsCjgvxEIirFBWVpxfn+YDI916iH1fkNURaMP+yxpQBL -3K4bmxanBiyBUHC8cyChLMD2NwXjOAA4pZFk0ohpmK0YUk4ra3Z3Q30DCH6NZ1ZP -aOMDHrCXU6MLlmPk8yiOnotgjqiYEgi3Bzxd/OHpR41Xo8k6g3UrN2GEQFs17ibQ -CQasxodOar5Vezu6ZKCYk5TaY4lugT34w+qxi8tVF54WY2jtWY5PUmU6ZT2Dw5cn -CQzlPUdEebOc1hltTvsD049/2lZmGlMXk0dykxy51jYAYznf2rb3cnC1vu1Wgi3w -J28xXBYD8AvME9jaJ6g3L+KR+AFCSLqpUsTxvu9zKf6pLrVtOCl+9G69uOK/wono -yMGNeel8rkzwzzr1LNrhmcKHqipkq83vqxIUT/mbpBUKO1ZXVG/TWKS6bpBTc4Pn -hBCIvGOSyoKuEiXnFr6fqLhLskUNcCNl7iOfA9h/MhS5ZufJXhhXu3Wbo/KC/mNh -y+fr1S9AyA+EJaYtJRKAOeewGvXYb881UNXWGCQU1aVNJnujRKFyhd07sEjxsad9 -Bn/aYes= +AQEBBQADggIPADCCAgoCggIBAL2ec6a93OYIioefCs3KRz752E5VfJPyVuxalBMc +7Dx84NsdwpbUyDT6fO7ePYM8IvYAsLc5coLCP1HKGGRmYm423WZf8Kn93BDl0XcN +4bgtW9ZrekvYcXqSzygz3ifdQeZljZrqW43dkkYR2vWc+uJXs+vrRVZyUSLLbe97 +9zUbWbOfHBc1jK1vTUakl08VhllYbO0m0SYZIni0sioItVdVWTz9XE2COavLqwwL +MIq8N7JXEdYJC49JWfdzvqZYTxOn5FSTCWen7/mcZmuLYPwUCkSu05M5T2o1ygkd +ohA+/X9yjToPJ7NO509lKHWo7+sp9if6jZsiOU45/t84pD6juVZSZ20/A9i6hjtj +C0SqYk2iQEtRp+lT6yYa5ffeNllFUGtM+xq2are2n93PnXwMTUlYGuTtkyRPG717 +ZtQjKQuwfdJNoNbJl2cfQpmtLdm4Jzrg5cWiiFro+aqnZxIfUEEDkIBaUjYmwMkS +Qq+S32L4f4u7rtbnzdo/jVwq0wpSjTGQJEab+v2wZpDhVbQblTyI30A+TvBIzLil +09OX49/teZCp05kOJy0V/yXdQtPwlQGXdsCUmD6dnGav17fB1witXDdG+4SNoyF/ +PN+8wtlMQ8fWvLdxLsd/Rq6CEZQV9mBhrQxXUmFFDhd0O6wfxR/lVFxIWg70Fz7P ++z7tAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFG0psrHrGny8ziVm +RtulG3f9ROrhMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD +AjANBgkqhkiG9w0BAQsFAAOCAgEAtr1dzSQswIOlEGlLtoAwkL7ys/gP2fcdh7Jl +ggiPs266yzZFyGGdd2GKo6tcjdBNjfnO8T5h8eLzj7QlzKPqA/l0BgAW7s7WX9QF +wCivw1DHE815ujlQNo3yve38pd2/I0hdf9GtQLGyOirYpwW5YcHvpmLezrW6J3UU +CWIfYhqO6bSs+HCLkvQdsCG1TpveWYXfC9aXHjw+ZGOjBMEt6AgdWctwzTjQfZub +VjZosBC3ZkDjkA9LTqKP5f8XSWt89J4JCYkiFRiJuYYiNYcZpb0Ug93XjEHIHXMG +N/cD9fCB2HovoVu8YnezpSrqEhqEikHSq80fwbf+NaT0CEbPMx3UMzt8d8gwUiwE +nzzf/o4uOwoofNWfka0J1VPY1AtjUDvz44LyVhp4uvkEJEK1WQ46mM68H/EOUmpd +fHANEbV8HLq2iOjR78n5+MCHRcX7duScp5wT0ajfDg41VrhvV/u7YctFj8ynQJg5 +cqbH+GgTrEfAFFm5mZH1SGqNPyxr1eQFWXMRGE7R/NoyQo2uqrSRmz6JFXlnWtxF +YmLhnOdQaytcpiYN2YVyC/rLK3l3Tbh4u5axvlZP/hi+nQluiZzkH97iUqXcBU/9 +jYNohnJzXMHTIZM8FQY+9uGw9ErdDo7FmX5Xkp4TzEz9k10m1fnt0njSEzITtqpg +MoO9n00= -----END CERTIFICATE----- diff --git a/examples/data/x509/client_key.pem b/examples/data/x509/client_key.pem index 6cd652c55435..d9c4bae3bbb5 100644 --- a/examples/data/x509/client_key.pem +++ b/examples/data/x509/client_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKgIBAAKCAgEAtSiN78ne484nR1qljazuYYtU+FhFofJGckTb9NjMU4HQiKwA -XdHsEgO59CseGTd+HQlrMLD3lMMP2vGwoRWzpnnSWYOSuD9uOWyeIT+f4zUlpxot -BbvF+HYv4SLiO8yvXTWcCyKZ3ojEVT2bf7ZIVuE8Eb8ZZYnZkqpBuXMNUyk2D0Vg -2Yx7jm8t686F09AEThxWgA+Ej0l74/gqiyV5uGivSShR5BV5h0FsKrfd1Ak8AeMF -JPqYP2YkjjJWsTdeyjcs7zlZE5Q+FFhDJeGuBTXlbqD2pOkxN+SuJR4VgsK0A9T0 -sGh3K+Ngu7CgSjJFWwDpmH+8AG14PwB+XnnZ9X0rA3Z9FVtMMDFAKns/yATf+sPV -Eq/soE0n8uvAy7uDhU763NgPFZ5755c9Doz/NFYBvcTS/dQ0ewXVkGWuUWESJFKE -etcMQoxDAJNKtGPBhqNSln8erZRIOCQt6kqZOYJqgF9Yc/JnBuWpJOcDgM06Hk3b -d4nKsuck49iPNmoQ99fmENOMVF1VNDG2ZiUnWa7C2U3K7BkMiaGM7obnik+P9tXq -DLX5ikhAF6Y+he2qqZxGIRVZJXnMCLPBFpZ/zm2qnFvVOO6PY6kP2V6g7glfQcuO -HJAGgqyk2iG7Yjj6zt7H/D3Zt/vpxOlhNswmoinfthmZ89XO0dVoyXhbTFECAwEA -AQKCAgEAjtzrijWVy+sQuMm4k1DUMSKzIKJkT4GDoqvBFoc+I4DVVmLmaxaYZ+B+ -bhruwo4rq3R5Ds4QgUWPJGfDllVJ9rhNdYA4XYrQPwL0dV36ljCcf/o5lTLuvbFe -stpStTwG86fKZlGkLIWI53wNPBshUzqOp6QfwB6E8Y/JAxnDYVi3pDVfWlDaQ4pU -GYklqtN6AauBX75dGK6nwDE+Q7uLES2lRjlA03FIBK1IQyv7CTM7GnXQ4cep9x1z -KJx0F4+F9kyq6AE+yRz4FA1C7wXZuYw2YhcYSxcHVH/IAceGyTcIxZjUWqYXjQnk -iD+TONAKN+kxTq01MtUhpfWasqC/i+6QU1eqf5YWpd6GsRKyrGgO02NND/SM6Z3V -+S9og4QAjdUyc8dkN+udd1K1CeYNFbmhrYpF2aS9k/PjDP3L137hDW6Cy+thIjZP -u9OB6ba2yUrbQDlmkCbh0vX+77HKAbT5bj8h9r7MqzNsPsgkaKS8gZ79T/Whr/ft -Xiu+eo/u1jtjwUjNMKGxQ9XiU2UU7QccthHHLcYaiv4eySHXA75h+Sho9cD1Vvs/ -ms1/nbCSuU9TSK0UK/V8YjeDA0eVGtDCX3weIW2ECQ80SoT7uf+fhjaLkvOadb7f -1O9DvYVYZvblxUm8ajOh+/n9VyB/I9R9Q8GdGiauXy16uXLZMdECggEBAPEx+4aR -XfuXmnEoPk3GpvyrRlrMy02aklrATKcxeAYo2uiIcuQv65E3AmE1GHpoxmAKjLty -fuUfGdT7f4uGeF6p+IEkW4jQm56UFbCdun9kduEaN9FRylTBqUKWIY2rtRS6nHZ8 -bAkL/6Uv3g9NWx95rV7HnAfC2n6AIvc8LRfQVVqSvjPbsEPvJAT2353D0Rb7vC2M -1hKeBrSNBiy57EKnrMDOhNpBvSBU0Zc+YsBRNAimKyBz7dt35H+THkFaEk9vGtG0 -QkDvngPzSX99Ojwk2mo9jGrh7LHErWih5C73IfvYUh3kyEwbZ5y25i9Z0F37boIG -jHSVvcPp+9x9PNUCggEBAMBHLyhBUAQVZFXtWysr0BjO34XffgkSt1XQa8cVxif7 -glWauUZtjfC7PT/qgY0mx2dI2bDcKiQQCBlVavP1RLRwj3rZv23eit7z13UgHSa6 -3dnsgpO2Zux6qoV48lO4xbuFqZtW+MP+9jthKwr95r8lmZ4cmGQwXXcqNsR7skFt -30Uhcyn+MTfyLwcqt8g9i98rrJmbPAuIME/Sz9DLIi6UxQLI6MeEn92AzECNDp18 -CypOL+sDrLw/7HNHNoSblgm628BHpBgT2qaOYnawRr0gni7MHXOAbDopKYDAtLuU -ZMFjlILdfiSDouhvKtMlZG9arTB0TasdAQJGPz53H40CggEBAJ4JDvJsOzVHb2Vn -ZfNWD0INA0spVqhheDXIPDFsg2UdzdmA1i7XizUZ4xBIVuKV1i1FnFKRwb1ktGtN -4pNMJ4B3RCFx7hvl+6FbDB8uKe2gqRfzMtGPEtCYF8xOTGvkLwEHCM/F1I/U8cuN -YqWKHQOxmTw58+1N6hXq5X4zSqSI1/RBpCiccJEClwo9q+VWUaEKjpEV74pBSslw -gbQ6mihOby3h40CSxFXz3WSI9vFmA38LScS40Qf1NZ21iqRtXQP5G4x93M9pcZLL -DMRhDBAuYYItE91QbONJqAmf0cBII1c9tQhrSCY96pTPbmFmKtX5kb3Whp85Ih7F -KEafNIUCggEBALMnoIDZmjyz0fFeX3wyLotu9kY+n6jEj56dvE6bsy694grxR4Cf -w4lybPeJAX0LjPBnqK5p9bn0VheEx0rYVVPrLUVCbmNo3+wtN6wiaAcWRnAvNtt7 -MRtWkFwc/W2U1GiNeiMLPm8guT1KpFhxiva/igsQic2QYwYNh0o8FzNvtIEtUajm -9+Uw+zCqVON2tUUT5JabVa9JDfrSamAZZZgRdh/KI1sD8BDrWWUsCVojoiOhBnTr -z5730ND4oYudjIc0XF0kY3krxqc6M/Ry+vZt1fW0qhxcpHrsr4cQB1ZgRiELL+1f -g5FyNfBs5HIofRRkYMqtE1FEjRQZcAQ76mECggEAaOUtM9BZuV9gEwmG4hmFfeXq -vJOMvlsDkRRbLuDQ1B8Vw3v7lt1+K+KfBt96MoQe08MyXM7sIMB+hn+zakNaM2W6 -UzTnAPQQAo+wELqj6U3DrV7zw7I1hZTA9G7qxMAQBEmk3u2q4/zWDAcyAx3D9JVj -L3G14pYf0drFLChnknVTPRaF0Q5upLYzCPLMa9w0FLKy6fkfdWdpzyjvW7+JEeFY -koA98hrottqJB2CcqehQDSCUHKKbd5U15y1NV1BQloaPJLwpPAVTkBszQSHanltN -l9POJBJlfQ1eWL88wHdKiLbtOg6PTfAmfghIRxakjHvxBgFO1/xG6Lxm7QwUDQ== +MIIJKAIBAAKCAgEAvZ5zpr3c5giKh58KzcpHPvnYTlV8k/JW7FqUExzsPHzg2x3C +ltTINPp87t49gzwi9gCwtzlygsI/UcoYZGZibjbdZl/wqf3cEOXRdw3huC1b1mt6 +S9hxepLPKDPeJ91B5mWNmupbjd2SRhHa9Zz64lez6+tFVnJRIstt73v3NRtZs58c +FzWMrW9NRqSXTxWGWVhs7SbRJhkieLSyKgi1V1VZPP1cTYI5q8urDAswirw3slcR +1gkLj0lZ93O+plhPE6fkVJMJZ6fv+Zxma4tg/BQKRK7TkzlPajXKCR2iED79f3KN +Og8ns07nT2Uodajv6yn2J/qNmyI5Tjn+3zikPqO5VlJnbT8D2LqGO2MLRKpiTaJA +S1Gn6VPrJhrl9942WUVQa0z7GrZqt7af3c+dfAxNSVga5O2TJE8bvXtm1CMpC7B9 +0k2g1smXZx9Cma0t2bgnOuDlxaKIWuj5qqdnEh9QQQOQgFpSNibAyRJCr5LfYvh/ +i7uu1ufN2j+NXCrTClKNMZAkRpv6/bBmkOFVtBuVPIjfQD5O8EjMuKXT05fj3+15 +kKnTmQ4nLRX/Jd1C0/CVAZd2wJSYPp2cZq/Xt8HXCK1cN0b7hI2jIX8837zC2UxD +x9a8t3Eux39GroIRlBX2YGGtDFdSYUUOF3Q7rB/FH+VUXEhaDvQXPs/7Pu0CAwEA +AQKCAgAtlwQ9adbLo/ASrYV+dwzsMkv0gY9DTvfhOeHyOnj+DhRN+njHpP9B5ZvW +Hq7xd6r8NKxIUVKb57Irqwh0Uz2FPEG9FIIbjQK1OVxEYJ0NmDJFem/b/n1CODwA +cYAPW541k+MZBRHgKQ67NB3OAeE8PFPw/A8euruRPxH+i3KjXSETE8VAO0rIhEMz +Ie2TQRydLKp71mJg45grJ17Sxmc7STT8efoQVKgjCwPkEGiqYpiNk2uhZ2lVGRC9 +cyG6gu74TdyTDQss1e7Xt+fUIZ2+3d6eJt6NvjC+25Ho4SwO9eYjF1qnQ++KqATr +TOoOaADPLLaXZCFZ1D+s9Dq4Vrj+QGk8Fajotj4gBpUtc0JxtvYM9EhlW7DpchYm +Cxe8vmEi/54YErXKawTUXYBB8IeDzwtvi3v3ktmH8BsGJ6Y3RXDI9KIG/6IE5Xeu +hkPCJnB0e3G2nlaffNSrVknxF+z74DB3T2kj0zC/4H4/hHo4W5D/pswcGWlhREWG +E7ViXJjBRkc5tpS9HfNdZ2wHiccioDIdGSHGqGMF4rLCUE2n+zc4m6pvvNCjN5KB +S4+zps50Gqtbp3DH2h1YLtkzuzvDhgpMPyJ1qZsdgelRSi2IaE5oekuBGP2WeXFw +DLI/cijc13cCacH+kpllQL//zBP8mMGmussWGgrVXdm9ZqD+rQKCAQEA6OG+s8sa +QZJ8W1nukcaS5rSvJBeZO6neCd6EB4oew5UGJsSz+x4RtJ7aJhdTGtyCXqiR2uFw +SBYdTcOgNbBUXg39vWAv+k2lmxiMGuLnAcNcGYyDLXr1SUJwe4Be984WNFdqzY0z +LCd9NvutWWX0Xd1VBdhlDuu3eBenzPBKIxTk3N2gLvzYxC/62e29Trsm7Sur11ut +Jay/CRdomjaqIiZ8q8qgdSU+pPe2DZYzUOutySJhLUegrrgWvPS/i8FHf7AGRgki +wpFn3gy5zCsFzr6n/TzJ5zQvlz+PcbUHHb06U1cnT45fkFNAJJvBYa4vi/tRx92E +Bi8d4bn40fUo3wKCAQEA0HFDHzhRxN/RbzBkymGlgfrsKcBdaAzgClo5uAXr8sdi +efsgBFo228I5lK6ywfzOfD/UxGB6ucdkZb/tRLtoK0OqOGiNx2Q1yazRVbuhrBrR +Y7DDbh7164o/MAYqPGxTMUxzXia7WBtNm00Tv9pDsw+NTzbrk7OxkLZWbjQEj99T +A9pcqXYA1RJtD/6io/43/oVscWPdRrbrNrJz+27Bsau20MBheVmX5sLTO2iWKTN4 +/ofrvOv0ru0I3ACHiLIaQFXs4snQjlhJm5MJ6kuZVdYKAzyNE+YOPnAxoiQAlHau +E1aV8ON7jmjhwxa2QICCwVcUNmwXU4UztGyGZ5a1swKCAQAi90Ia3LPkhIoHbUlU +uev0l8x0LtbjDm44LSDFwQc9dnKl/4LGgY1HAVLfxUDFF7a7X7QGmTKyoB9mPakg +ZolEVfVzKa4Kdv4We2kN4GOu8BYz/9TyTzPk/ATHhk68BkVvNnDizACS8JrsVn2A +nr5CGalaZ1NFGj9B2MtpCesXuVtjjiMu6ufhDRMtBXUXDSKbGaODglBNB9LnGoyq +GusQlZbCdHoDHMR7IHZFM/ggfkJpoK/WjJqjoSBI3raj1TFXCqbmfRiq/goKXP7I +mO0WTaoLa8Uk4cEDhJeVCwk2feL0AHH2j/npQZav6HLwp6ab7fApgikAhLKH4dRq +MdUhAoIBAQC7svJVf7qqRT3sGTD5yXpnlJPreOzj0IxC5kKJgtOYuJDl9Qw8vxwd +QkXlrHcOFl++JSCsgZCiEHpI4c6AER5Zr0HuL8BUJ9oDtJqA0EhimXeqhLdHR5v9 +sWz7CuInrQgxIX3V75zOVy/IRF0fayWBbeS6y2LRi4O/I2KrNC5TfC/eDVlZxAg1 +1rTdLVg5wqebi3w+k0Xj8r3WcFXeuTq0ikNCsapUwyf1RcU+/wwRJ+exlKXkZrnc +d1h9/AAQSQk4m+eHxWIHfFs0O/E2yULXt7kmdvU3UPfMo+0d67uV9VUF1veIhuBx +OeLqcV5GsTKNdaOe6jELJayMsRlK2LzfAoIBAEoWFSUdf3ruvj+ONju0TDtdvvTb ++i+3ttqMK/duYM2TlD3Lvqyx3kNxlMTAArfvnwtKVSw0ZIGSPc/5KHnxldcdALgT +4Ub1YesUv5585thMw1EWyXAPognLhfTEVSLYKcMPoBNCv7FvAT3Mk5SZPReRkbT9 +oqDAzg7r+0+pjD9LmnIXfCxfbSV6zcBFF8/iGAmzh3CanDqVkUds1+Ia8018cfDS +KW5PQAEnJC/BZAI7SQsxH0J9M7NYxJRN0bua5Be0N+uuYSOa+d9yecugfmvga6jf +9nEcohJShacCSkQvIXlq5Uy/WBb6sbiTmHjjW14FG25B0rrQUjmFAUiYceI= -----END RSA PRIVATE KEY----- diff --git a/examples/data/x509/create.sh b/examples/data/x509/create.sh new file mode 100755 index 000000000000..2b5aa5cffa07 --- /dev/null +++ b/examples/data/x509/create.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +# Create the server CA certs. +openssl req -x509 \ + -newkey rsa:4096 \ + -nodes \ + -days 3650 \ + -keyout ca_key.pem \ + -out ca_cert.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server_ca/ \ + -config ./openssl.cnf \ + -extensions test_ca \ + -sha256 + +# Create the client CA certs. +openssl req -x509 \ + -newkey rsa:4096 \ + -nodes \ + -days 3650 \ + -keyout client_ca_key.pem \ + -out client_ca_cert.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client_ca/ \ + -config ./openssl.cnf \ + -extensions test_ca \ + -sha256 + +# Generate a server cert. +openssl genrsa -out server_key.pem 4096 +openssl req -new \ + -key server_key.pem \ + -days 3650 \ + -out server_csr.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server1/ \ + -config ./openssl.cnf \ + -reqexts test_server +openssl x509 -req \ + -in server_csr.pem \ + -CAkey ca_key.pem \ + -CA ca_cert.pem \ + -days 3650 \ + -set_serial 1000 \ + -out server_cert.pem \ + -extfile ./openssl.cnf \ + -extensions test_server \ + -sha256 +openssl verify -verbose -CAfile ca_cert.pem server_cert.pem + +# Generate a client cert. +openssl genrsa -out client_key.pem 4096 +openssl req -new \ + -key client_key.pem \ + -days 3650 \ + -out client_csr.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \ + -config ./openssl.cnf \ + -reqexts test_client +openssl x509 -req \ + -in client_csr.pem \ + -CAkey client_ca_key.pem \ + -CA client_ca_cert.pem \ + -days 3650 \ + -set_serial 1000 \ + -out client_cert.pem \ + -extfile ./openssl.cnf \ + -extensions test_client \ + -sha256 +openssl verify -verbose -CAfile client_ca_cert.pem client_cert.pem + +rm *_csr.pem diff --git a/examples/data/x509/openssl.cnf b/examples/data/x509/openssl.cnf new file mode 100644 index 000000000000..d1034214e1d3 --- /dev/null +++ b/examples/data/x509/openssl.cnf @@ -0,0 +1,28 @@ +[req] +distinguished_name = req_distinguished_name +attributes = req_attributes + +[req_distinguished_name] + +[req_attributes] + +[test_ca] +basicConstraints = critical,CA:TRUE +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer:always +keyUsage = critical,keyCertSign + +[test_server] +basicConstraints = critical,CA:FALSE +subjectKeyIdentifier = hash +keyUsage = critical,digitalSignature,keyEncipherment,keyAgreement +subjectAltName = @server_alt_names + +[server_alt_names] +DNS.1 = *.test.example.com + +[test_client] +basicConstraints = critical,CA:FALSE +subjectKeyIdentifier = hash +keyUsage = critical,nonRepudiation,digitalSignature,keyEncipherment +extendedKeyUsage = critical,clientAuth diff --git a/examples/data/x509/server_cert.pem b/examples/data/x509/server_cert.pem index 3e48a52fd108..f1a374008342 100644 --- a/examples/data/x509/server_cert.pem +++ b/examples/data/x509/server_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIwMDgwNDAxNTk1OFoXDTMwMDgwMjAxNTk1 +BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIyMDMxODIxNDQ1OFoXDTMyMDMxNTIxNDQ1 OFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAKonkszKvSg1IUvpfW3PAeDPLgLrXboOWJCXv3RD -5q6vf29+IBCaljSJmU6T7SplokUML5ZkY6adjX6awG+LH3tOMg9zvXpHuSPRpFUk -2oLFtaWuzJ+NC5HIM0wWDvdZ6KQsiPFbNxk2Rhkk+QKsiiptZy2yf/AbDY0sVieZ -BJZJ+os+BdFIk7+XUgDutPdSAutTANhrGycYa4iYAfDGQApz3sndSSsM2KVc0w5F -gW6w2UBC4ggc1ZaWdbVtkYo+0dCsrl1J7WUNsz8v8mjGsvm9eFuJjKFBiDhCF+xg -4Xzu1Wz7zV97994la/xMImQR4QDdky9IgKcJMVUGua6U0GE5lmt2wnd3aAI228Vm -6SnK7kKvnD8vRUyM9ByeRoMlrAuYb0AjnVBr/MTFbOaii6w2v3RjU0j6YFzp8+67 -ihOW9nkb1ayqSXD3T4QUD0p75Ne7/zz1r2amIh9pmSJlugLexVDpb86vXg9RnXjb -Zn2HTEkXsL5eHUIlQzuhK+gdmj+MLGf/Yzp3fdaJsA0cJfMjj5Ubb2gR4VwzrHy9 -AD2Kjjzs06pTtpULChwpr9IBTLEsZfw/4uW4II4pfe6Rwn4bGHFifjx0+3svlsSo -jdHcXEMHvdRPhWGUZ0rne+IK6Qxgb3OMZu7a04vV0RqvgovxM6hre3e0UzBJG45Y -qlQjAgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFFL5HUzehgKNfgdz -4nuw5fru5OTPMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh -bXBsZS5jb20wDQYJKoZIhvcNAQEFBQADggIBAHMPYTF4StfSx9869EoitlEi7Oz2 -YTOForDbsY9i0VnIamhIi9CpjekAGLo8SVojeAk7UV3ayiu0hEMAHJWbicgWTwWM -JvZWWfrIk/2WYyBWWTa711DuW26cvtbSebFzXsovNeTqMICiTeYbvOAK826UdH/o -OqNiHL+UO5xR1Xmqa2hKmLSl5J1n+zgm94l6SROzc9c5YDzn03U+8dlhoyXCwlTv -JRprOD+lupccxcKj5Tfh9/G6PjKsgxW+DZ+rvQV5f/l7c4m/bBrgS8tru4t2Xip0 -NhQW4qHnL0wXdTjaOG/1liLppjcp7SsP+vKF4shUvp+P8NQuAswBp/QtqUse5EYl -EUARWrjEpV4OHSKThkMackMg5E32keiOvQE6iICxtU+m2V+C3xXM3G2cGlDDx5Ob -tan0c9fZXoygrN2mc94GPogfwFGxwivajvvJIs/bsB3RkcIuLbi2UB76Wwoq+ZvH -15xxNZI1rpaDhjEuqwbSGPMPVpFtF5VERgYQ9LaDgj7yorwSQ1YLY8R1y0vSiAR2 -2YeOaBH1ZLPF9v9os1iK4TIC8XQfPv7ll2WdDwfbe2ux5GVbDBD4bPhP9s3F4a+f -oPhikWsUY4eN5CfS76x6xL0L60TL1AlWLlwuubTxpvNhv3GSyxjfunjcGiXDml20 -6S80qO4hepxzzjol +AQEBBQADggIPADCCAgoCggIBAL5GBWw+qfXyelelYL/RDA/Fk4GA8DlcBQgBOjBa +XCVDMAJj63sN+ubKBtphWe6Y9SWLJa2mt8a/ZTQZm2R5FPSp9rwdr04UQgmL11wh +DCmO+wkRUeTYwsqcidEHRwOxoctyO+lwgYw983T/fp83qtNS4bw+1kJwrLtFdgok +Kd9UGIugs8BTFqE/7CxFRXTYsNy/gj0pp411Dtgknl1UefPdjco2Qon8f3Dm5iDf +AyUM1oL8+fnRQj/r6P3XC4AOiBsF3duxiBzUp87YgmwDOaa8paKOx2UNLA/eP/aP +Uhd7HkygqOX+tc3H8dvYONo6lhwQD1JqyG6IOOWe2uf5YXKK2TphPPRnCW4QIED4 +PuXYHjIvGYA4Kf0Wmb2hPk6bxJidNoLp9lsJyqGfk3QnT5PRJVgO0mlzo/UsZo77 +5j+yq87yLe5OL2HrZd1KTfg7SKOtMJ9N6tm2Hw2jwypKz+x2jlEZOgXHmYb5aUaI ++4xG+9fqc8x3ScoHQGNujF3qHO5SxnXkufNUSVbWbv1Ble8peiKyG6AFQvtcs7KG +pEoFztGSlaABwSvxO8J3aJPAEok4OI5IAGJNy92XaBMLtyt270FC8JtUnL+JEubV +t8tY5cCcGK7EtRHb47mM0K8HEq+IU2nAq6/29Ka0IZlkb5fPoWzQAZEIVKgLNHt4 +96g9AgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFNx36JXsCIzVWCOw +1ETtaxlN79XrMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh +bXBsZS5jb20wDQYJKoZIhvcNAQELBQADggIBAAEEZln7lsS/HIysNPJktc0Gdu3n +X1BcA3wXh95YTugcxSSeLLx2SykXnwX+cJncc1OKbboO9DA5mZ+huCesGIOKeUkg +azQZL6FAdw9PQKdqKg3RgSQ4XhK990fPcmmBhSXY24jNNhRHxGw5lGBrD6X2SdW3 +m66yYzn9hMXL4yrweGO7OC4bdyISDrJiP+St/xeCoIcXP2s07dE6jl2VorJCWn4J +SxKfDhPPohZKl6dL9npkmPcpz2zRAYpo4tsVdAAQDBRui44Vvm1eBPUo7EH2UOEh +/3JtTeDUpldM8fDaKE0kTa1Ttxzs2e0Jm3M4/FMOxqSesyJldw54F4+4m24e/iQU +gceArYMFVFTipgrLfUuRvRxx/7D7V92pqTyuD3T78+KdTqrlxvCTOqSHhFE05jWD +RdynS6Ev/1QZLlnWgMwhQAnjhc1NKkso+namF1ZmHH9owiTRBlWDMNcHMDReaELd +QmFUvutHUpjidt1z+G6lzbP0XB5w+0vW4BsT0FqaYsFbK5ftryj1/K0VctrSd/ke +GI0vxrErAyLG2B8bdK88u2w7DCuXjAOp+CeA7HUmk93TsPEAhrxQ6lR51IC6LcK0 +gACSdnQDPGtkoRX00DTvdcOpzmkSgaGr/mXTqp2lR9IuZIhwKbhS3lDKsAZ/hinB +yaBwLiXfcvZrZOwy -----END CERTIFICATE----- diff --git a/examples/data/x509/server_key.pem b/examples/data/x509/server_key.pem index e71ad0ac9753..1c778db7c491 100644 --- a/examples/data/x509/server_key.pem +++ b/examples/data/x509/server_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAqieSzMq9KDUhS+l9bc8B4M8uAutdug5YkJe/dEPmrq9/b34g -EJqWNImZTpPtKmWiRQwvlmRjpp2NfprAb4sfe04yD3O9eke5I9GkVSTagsW1pa7M -n40LkcgzTBYO91nopCyI8Vs3GTZGGST5AqyKKm1nLbJ/8BsNjSxWJ5kElkn6iz4F -0UiTv5dSAO6091IC61MA2GsbJxhriJgB8MZACnPeyd1JKwzYpVzTDkWBbrDZQELi -CBzVlpZ1tW2Rij7R0KyuXUntZQ2zPy/yaMay+b14W4mMoUGIOEIX7GDhfO7VbPvN -X3v33iVr/EwiZBHhAN2TL0iApwkxVQa5rpTQYTmWa3bCd3doAjbbxWbpKcruQq+c -Py9FTIz0HJ5GgyWsC5hvQCOdUGv8xMVs5qKLrDa/dGNTSPpgXOnz7ruKE5b2eRvV -rKpJcPdPhBQPSnvk17v/PPWvZqYiH2mZImW6At7FUOlvzq9eD1GdeNtmfYdMSRew -vl4dQiVDO6Er6B2aP4wsZ/9jOnd91omwDRwl8yOPlRtvaBHhXDOsfL0APYqOPOzT -qlO2lQsKHCmv0gFMsSxl/D/i5bggjil97pHCfhsYcWJ+PHT7ey+WxKiN0dxcQwe9 -1E+FYZRnSud74grpDGBvc4xm7trTi9XRGq+Ci/EzqGt7d7RTMEkbjliqVCMCAwEA -AQKCAgEAjU6UEVMFSBDnd/2OVtUlQCeOlIoWql8jmeEL9Gg3eTbx5AugYWmf+D2V -fbZHrX/+BM2b74+rWkFZspyd14R4PpSv6jk6UASkcmS1zqfud8/tjIzgDli6FPVn -9HYVM8IM+9qoV5hi56M1D8iuq1PS4m081Kx6p1IwLN93JSdksdL6KQz3E9jsKp5m -UbPrwcDv/7JM723zfMJA+40Rf32EzalwicAl9YSTnrC57g428VAY+88Pm6EmmAqX -8nXt+hs1b9EYdQziA5wfEgiljfIFzHVXMN3IVlrv35iz+XBzkqddw0ZSRkvTiz8U -sNAhd22JqIhapVfWz+FIgM43Ag9ABUMNWoQlaT0+2KlhkL+cZ6J1nfpMTBEIatz0 -A/l4TGcvdDhREODrS5jrxwJNx/LMRENtFFnRzAPzX4RdkFvi8SOioAWRBvs1TZFo -ZLq2bzDOzDjs+EPQVx0SmjZEiBRhI6nC8Way00IdQi3T546r6qTKfPmXgjl5/fVO -J4adGVbEUnI/7+fqL2N82WVr+Le585EFP/6IL5FO++sAIGDqAOzEQhyRaLhmnz+D -GboeS/Tac9XdymFbrEvEMB4EFS3nsZHTeahfiqVd/SuXFDTHZ6kiqXweuhfsP1uW -7tGlnqtn+3zmLO6XRENPVvmjn7DhU255yjiKFdUqkajcoOYyWPECggEBANuYk+sr -UTScvJoh/VRHuqd9NkVVIoqfoTN61x6V1OuNNcmjMWsOIsH+n4SifLlUW6xCKaSK -8x8RJYfE9bnObv/NqM4DMhuaNd52bPKFi8IBbHSZpuRE/UEyJhMDpoto04H1GXx4 -1S49tndiNxQOv1/VojB4BH7kapY0yp30drK1CrocGN+YOUddxI9lOQpgt2AyoXVk -ehdyamK4uzQmkMyyGQljrV5EQbmyPCqZ1l/d0MJ9DixOBxnPDR9Ov9qrG4Dy6S/k -cH8PythqHTGTdlXgsBJaWEl2PyQupo3OhfiCV+79B9uxPfKvk5CIMVbnYxKgu+ly -RKSTSX+GHVgNwicCggEBAMZcwQIAA+I39sTRg/Vn/MxmUBAu3h2+oJcuZ3FQh4v5 -SL80BWEsooK9Oe4MzxyWkU+8FieFu5G6iXaSx8f3Wv6j90IzA3g6Xr9M5xBm5qUN -IqzF+hUZuKAEMY1NcPlFTa2NlrkT8JdfQvJ+D5QrcBIMFmg9cKG5x9yD7MfHTJkf -ztMDFOwP3n7ahKRBowfe7/unAEFf6hYFtYjV+bqMDmBFVmk2CIVtjFgO9BNBQ/LB -zGcnwo2VigWBIjRDF5BgV0v+2g0PZGaxJ362RigZjzJojx3gYj6kaZYX8yb6ttGo -RPGt1A9woz6m0G0fLLMlce1dpbBAna14UVY7AEVt56UCggEAVvii/Oz3CINbHyB/ -GLYf8t3gdK03NPfr/FuWf4KQBYqz1txPYjsDARo7S2ifRTdn51186LIvgApmdtNH -DwP3alClnpIdclktJKJ6m8LQi1HNBpEkTBwWwY9/DODRQT2PJ1VPdsDUja/baIT5 -k3QTz3zo85FVFnyYyky2QsDjkfup9/PQ1h2P8fftNW29naKYff0PfVMCF+80u0y2 -t/zeNHQE/nb/3unhrg4tTiIHiYhsedrVli6BGXOrms6xpYVHK1cJi/JJq8kxaWz9 -ivkAURrgISSu+sleUJI5XMiCvt3AveJxDk2wX0Gyi/eksuqJjoMiaV7cWOIMpfkT -/h/U2QKCAQAFirvduXBiVpvvXccpCRG4CDe+bADKpfPIpYRAVzaiQ4GzzdlEoMGd -k3nV28fBjbdbme6ohgT6ilKi3HD2dkO1j5Et6Uz0g/T3tUdTXvycqeRJHXLiOgi9 -d8CGqR456KTF74nBe/whzoiJS9pVkm0cI/hQSz8lVZJu58SqxDewo4HcxV5FRiA6 -PRKtoCPU6Xac+kp4iRx6JwiuXQQQIS+ZovZKFDdiuu/L2gcZrp4eXym9zA+UcxQb -GUOCYEl9QCPQPLuM19w/Pj3TPXZyUlx81Q0Cka1NALzuc5bYhPKsot3iPrAJCmWV -L4XtNozCKI6pSg+CABwnp4/mL9nPFsX9AoIBAQDHiDhG9jtBdgtAEog6oL2Z98qR -u5+nONtLQ61I5R22eZYOgWfxnz08fTtpaHaVWNLNzF0ApyxjxD+zkFHcMJDUuHkR -O0yxUbCaof7u8EFtq8P9ux4xjtCnZW+9da0Y07zBrcXTsHYnAOiqNbtvVYd6RPiW -AaE61hgvj1c9/BQh2lUcroQx+yJI8uAAQrfYtXzm90rb6qk6rWy4li2ybMjB+LmP -cIQIXIUzdwE5uhBnwIre74cIZRXFJBqFY01+mT8ShPUWJkpOe0Fojrkl633TUuNf -9thZ++Fjvs4s7alFH5Hc7Ulk4v/O1+owdjqERd8zlu7+568C9s50CGwFnH0d +MIIJKQIBAAKCAgEAvkYFbD6p9fJ6V6Vgv9EMD8WTgYDwOVwFCAE6MFpcJUMwAmPr +ew365soG2mFZ7pj1JYslraa3xr9lNBmbZHkU9Kn2vB2vThRCCYvXXCEMKY77CRFR +5NjCypyJ0QdHA7Ghy3I76XCBjD3zdP9+nzeq01LhvD7WQnCsu0V2CiQp31QYi6Cz +wFMWoT/sLEVFdNiw3L+CPSmnjXUO2CSeXVR5892NyjZCifx/cObmIN8DJQzWgvz5 ++dFCP+vo/dcLgA6IGwXd27GIHNSnztiCbAM5pryloo7HZQ0sD94/9o9SF3seTKCo +5f61zcfx29g42jqWHBAPUmrIbog45Z7a5/lhcorZOmE89GcJbhAgQPg+5dgeMi8Z +gDgp/RaZvaE+TpvEmJ02gun2WwnKoZ+TdCdPk9ElWA7SaXOj9SxmjvvmP7KrzvIt +7k4vYetl3UpN+DtIo60wn03q2bYfDaPDKkrP7HaOURk6BceZhvlpRoj7jEb71+pz +zHdJygdAY26MXeoc7lLGdeS581RJVtZu/UGV7yl6IrIboAVC+1yzsoakSgXO0ZKV +oAHBK/E7wndok8ASiTg4jkgAYk3L3ZdoEwu3K3bvQULwm1Scv4kS5tW3y1jlwJwY +rsS1EdvjuYzQrwcSr4hTacCrr/b0prQhmWRvl8+hbNABkQhUqAs0e3j3qD0CAwEA +AQKCAgBnR3CoGbd9hZl8u4qxc5IdeXwgflFmgRlGCAyCtHlxzG9hzMTD7Ymz/hMM +NG1xQltGfqn8AROd8MPJLOEY/1QtnZgM8fv24K4bqmlCW7nTUQXYHSubkUDiY2e3 +K0ETszaETMRSaLwY2IOujQQ4/ilePY3D9UOtmqVXnVN+G7USwP31xEvtZ+xPqHfU +a+FQlFIj8FuMQXDuKozdK7s+I51yjl7pVNx3M7QlH1/olcSKNta1EQXK4RgZxD6a +kkBuyPR93ohXOJ0OMSvI7eKVKIcBh0JM4z0+D5FMJ7IGbjL8Bdsjcs1a0g/y28Xf +NBVf9w8Fun3mmYmj3ZMsqDZgVg/bAfP2z7O9kMzbuqmjelOz8HXxTm/+GIHuseMx +b/nDZgB0ZN+FhATv/onshJcjr2L3SJYzEWqjYiqaCQo5qtib+/kxh6SHPhAY2o8l +zzMhKFsJMhmwW91FXqeDS9FTlcRXtYH1EJxNGa01GpyVa6plvvFTGBNkEUJnVuEp +ULohJw0NJQYQOz5omYaQVJ49lpzVhwLEolgSlIBiM3s9nSDvVBYu+bB1ovw5OTIJ +Wlc9cBrYmdxYdAj5n6JzIC1wixgxrFw1jBm8cL/2FQYtR7daZabTMyZj5vAUqjxr +OV+uvkSFcIyBs1ty9TnnKC3yd5Ma+5chR5u7JPc1lSSor6AwQQKCAQEA4d5XrCq5 +EikGII/unhkVZsh9xmILp/4PRKc+fV7TFEpGyn8HFCBToZk6nXv99roUBdeZFobw +gDuZqBa4ougm2zgBbhdQXGaW4yZdChJlSs9yY7OAVvnG9gjuHGmWsLhvmhaeXSr2 +auxVGRaltr3r8hP9eHhloDM6qdSSAQpsdeTBQD8Ep3//aL/BLqGcF0gLrZLPwo0+ +cku8jQoVXSSOW1+YSaXRGxueuIR8lldU4I3yp2DO++DGLsOZoGFT/+ZXc2B4nE1h +o1hCWt6RKw0q2rCkZ+i6SiPGsVgb9xn6W8wHFIPA/0sOwOdtbKqKd0xwn5DnX+vt +d8shlRRUDF7HDQKCAQEA16gR/2n59HZiQQhHU9BCvGFi4nxlsuij+nqDx9fUerDU +fK79NaOuraWNkCqz+2lqfu5o3e3XNFHlVsj98SyfmTdMZ8Fj19awqN20nCOmfRkk +/MDuEzRzvNlOYBa0PpMkKJn2sahEiXGNVI4g3cGip1c5wJ1HL3jF61io4F/auBLP +grLtw8CoTqc6VpJUvsWFjopTmNdAze8WMf3vK6AKu7PKkXH7mFQZusacpO/E61Ud +euiG9BYDIIkrnWIQdLpODgliLZzPNcJDTKTFJAfIzr3WQvUaFc1+tHyX3XhpicvP +J4zyNfHd2dZMK1csXQJvFSnPgXpy531Wca0riAYZ8QKCAQEAhaVEBxE4dLBlebrw +nAeHjEuxcELvVrWTXzH+XbxP9T+F56eGDriaA5JhBnIpcWXlFxfc82FgyN97KeRX +17y50Riwb+3HlQT23u0CPEVqPfvFWY0KsWwV99qM2a74hRR8pJYhmksjh1zTdYbb +AugZxiFh53iF2Wa2nWq0AX2jc5apalRfcqTgAaEEs4zYiUYN8uRdnmZovsRliqae +wYAx44sK1vkQY5PSNKff+C0wgbY8ECHOF2eGnIEMU8ODKnWm5RP+Ca4Xyckdahsr +lmeyJbhDb2BbaicFGEZkNa/fXZW50r+q4OQOlMHbE2NNjw1hzmi1HyLAXhOJiWZ/ +3NnvuQKCAQEAg04a/zeocBcwhcYjn717FLX6/kmdpkwNo3G7EQ+xmK5YAj6Nf35U +2fel9PR7N4WcyQIiKZYp5PpEOA4SyChSWHiZ9caDIyTd1UOAN11hfmOz6I0Tp+/U +1FQ/azQHtN3kMzBjSxJYAJN56NTM4BiJD3iFemiIsjfH0h7eXBcg1djmLf8B06FX +GOSrGZDpNmqPghVpBvNwyrJbAj9Jw3cjcdvrZ5lOBhaWv+kz8Rzn+h2N4Ir5uF46 +szGxs5bEzD2vTs6Zz4ndhC7uyRi9y81Nj8t4TLZtln7TOdNup/Mr1zGXxM4Fn6DP +YlYfdHgUU+Eqf2lApeZHVfkzi+1TRvPoEQKCAQAELU/d33TNwQ/Ylo2VhwAscY3s +hv31O4tpu5koHHjOo3RDPzjuEfwy006u8NVAoj97LrU2n+XTIlnXf14TKuKWQ+8q +ajIVNj+ZAbD3djCmYXbIEL+u6aL4K1ENdjo6DNTGgPMfISE79WrmGBIKtB//uMqy +fGTUSPeo+R5WmTGN29YxAnRE/jtwOgAcicACTc0e9nghHj3c2raI0IazY5XFP0/h +LszTNUQzWx6DjWsbB+Ymuhu4fHZTYftCrIMpjmjC9pkNggeJnkxylQz/pwO73uWg +ycDgJhRyaVhM8sJXiBk+OC/ySP2Lxo60aPa514LEYJKQxUCukCTXth/6p0Qo -----END RSA PRIVATE KEY----- diff --git a/testdata/x509/README.md b/testdata/x509/README.md index e64a385e5f97..661caf4ac858 100644 --- a/testdata/x509/README.md +++ b/testdata/x509/README.md @@ -3,104 +3,4 @@ gRPC-Go tests. How were these test certs/keys generated ? ------------------------------------------ -0. Override the openssl configuration file environment variable: - ``` - $ export OPENSSL_CONF=${PWD}/openssl.cnf - ``` - -1. Generate a self-signed CA certificate along with its private key: - ``` - $ openssl req -x509 \ - -newkey rsa:4096 \ - -nodes \ - -days 3650 \ - -keyout ca_key.pem \ - -out ca_cert.pem \ - -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-ca/ \ - -config ./openssl.cnf \ - -extensions test_ca - ``` - - To view the CA cert: - ``` - $ openssl x509 -text -noout -in ca_cert.pem - ``` - -2.a Generate a private key for the server: - ``` - $ openssl genrsa -out server_key.pem 4096 - ``` - -2.b Generate a private key for the client: - ``` - $ openssl genrsa -out client_key.pem 4096 - ``` - -3.a Generate a CSR for the server: - ``` - $ openssl req -new \ - -key server_key.pem \ - -days 3650 \ - -out server_csr.pem \ - -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server/ \ - -config ./openssl.cnf \ - -reqexts test_server - ``` - - To view the CSR: - ``` - $ openssl req -text -noout -in server_csr.pem - ``` - -3.b Generate a CSR for the client: - ``` - $ openssl req -new \ - -key client_key.pem \ - -days 3650 \ - -out client_csr.pem \ - -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client/ \ - -config ./openssl.cnf \ - -reqexts test_client - ``` - - To view the CSR: - ``` - $ openssl req -text -noout -in client_csr.pem - ``` - -4.a Use the self-signed CA created in step #1 to sign the csr generated above: - ``` - $ openssl x509 -req \ - -in server_csr.pem \ - -CAkey ca_key.pem \ - -CA ca_cert.pem \ - -days 3650 \ - -set_serial 1000 \ - -out server_cert.pem \ - -extfile ./openssl.cnf \ - -extensions test_server - ``` - -4.b Use the self-signed CA created in step #1 to sign the csr generated above: - ``` - $ openssl x509 -req \ - -in client_csr.pem \ - -CAkey ca_key.pem \ - -CA ca_cert.pem \ - -days 3650 \ - -set_serial 1000 \ - -out client_cert.pem \ - -extfile ./openssl.cnf \ - -extensions test_client - ``` - -5.a Verify the `server_cert.pem` is trusted by `ca_cert.pem`: - ``` - $ openssl verify -verbose -CAfile ca_cert.pem server_cert.pem - ``` - -5.b Verify the `client_cert.pem` is trusted by `ca_cert.pem`: - ``` - $ openssl verify -verbose -CAfile ca_cert.pem client_cert.pem - ``` - +Run `./create.sh` diff --git a/testdata/x509/create.sh b/testdata/x509/create.sh index 5bd3c5801e96..2cbc971fb8d7 100755 --- a/testdata/x509/create.sh +++ b/testdata/x509/create.sh @@ -9,7 +9,8 @@ openssl req -x509 \ -out server_ca_cert.pem \ -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server_ca/ \ -config ./openssl.cnf \ - -extensions test_ca + -extensions test_ca \ + -sha256 # Create the client CA certs. openssl req -x509 \ @@ -20,7 +21,8 @@ openssl req -x509 \ -out client_ca_cert.pem \ -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client_ca/ \ -config ./openssl.cnf \ - -extensions test_ca + -extensions test_ca \ + -sha256 # Generate two server certs. openssl genrsa -out server1_key.pem 4096 @@ -39,7 +41,8 @@ openssl x509 -req \ -set_serial 1000 \ -out server1_cert.pem \ -extfile ./openssl.cnf \ - -extensions test_server + -extensions test_server \ + -sha256 openssl verify -verbose -CAfile server_ca_cert.pem server1_cert.pem openssl genrsa -out server2_key.pem 4096 @@ -58,7 +61,8 @@ openssl x509 -req \ -set_serial 1000 \ -out server2_cert.pem \ -extfile ./openssl.cnf \ - -extensions test_server + -extensions test_server \ + -sha256 openssl verify -verbose -CAfile server_ca_cert.pem server2_cert.pem # Generate two client certs. @@ -78,7 +82,8 @@ openssl x509 -req \ -set_serial 1000 \ -out client1_cert.pem \ -extfile ./openssl.cnf \ - -extensions test_client + -extensions test_client \ + -sha256 openssl verify -verbose -CAfile client_ca_cert.pem client1_cert.pem openssl genrsa -out client2_key.pem 4096 @@ -97,7 +102,8 @@ openssl x509 -req \ -set_serial 1000 \ -out client2_cert.pem \ -extfile ./openssl.cnf \ - -extensions test_client + -extensions test_client \ + -sha256 openssl verify -verbose -CAfile client_ca_cert.pem client2_cert.pem # Generate a cert with SPIFFE ID. @@ -108,7 +114,8 @@ openssl req -x509 \ -nodes \ -days 3650 \ -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \ - -addext "subjectAltName = URI:spiffe://foo.bar.com/client/workload/1" + -addext "subjectAltName = URI:spiffe://foo.bar.com/client/workload/1" \ + -sha256 # Generate a cert with SPIFFE ID and another SAN URI field(which doesn't meet SPIFFE specs). openssl req -x509 \ @@ -118,6 +125,8 @@ openssl req -x509 \ -nodes \ -days 3650 \ -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \ - -addext "subjectAltName = URI:spiffe://foo.bar.com/client/workload/1, URI:https://bar.baz.com/client" + -addext "subjectAltName = URI:spiffe://foo.bar.com/client/workload/1, URI:https://bar.baz.com/client" \ + -sha256 + # Cleanup the CSRs. rm *_csr.pem From 8078d1d5419bcb8bfc23bd85426651da9050b647 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 21 Mar 2022 16:34:32 -0400 Subject: [PATCH 455/998] balancergroup: Switched subBalancerWrapper to wrap a gracefulswitch.Balancer (#5245) * balancergroup: Switched subBalancerWrapper to wrap a gracefulswitch.Balancer --- internal/balancergroup/balancergroup.go | 21 +++++++++---------- .../balancer/clusterresolver/priority_test.go | 15 ++++--------- 2 files changed, 14 insertions(+), 22 deletions(-) diff --git a/internal/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go index 3142503a0ad5..4f089fdf27e3 100644 --- a/internal/balancergroup/balancergroup.go +++ b/internal/balancergroup/balancergroup.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/resolver" @@ -67,7 +68,7 @@ type subBalancerWrapper struct { ccState *balancer.ClientConnState // The dynamic part of sub-balancer. Only used when balancer group is // started. Gets cleared when sub-balancer is closed. - balancer balancer.Balancer + balancer *gracefulswitch.Balancer } // UpdateState overrides balancer.ClientConn, to keep state and picker. @@ -93,11 +94,13 @@ func (sbc *subBalancerWrapper) updateBalancerStateWithCachedPicker() { } func (sbc *subBalancerWrapper) startBalancer() { - b := sbc.builder.Build(sbc, sbc.buildOpts) - sbc.group.logger.Infof("Created child policy %p of type %v", b, sbc.builder.Name()) - sbc.balancer = b + if sbc.balancer == nil { + sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts) + } + sbc.group.logger.Infof("Creating child policy of type %v", sbc.builder.Name()) + sbc.balancer.SwitchTo(sbc.builder) if sbc.ccState != nil { - b.UpdateClientConnState(*sbc.ccState) + sbc.balancer.UpdateClientConnState(*sbc.ccState) } } @@ -108,11 +111,8 @@ func (sbc *subBalancerWrapper) exitIdle() (complete bool) { if b == nil { return true } - if ei, ok := b.(balancer.ExitIdler); ok { - ei.ExitIdle() - return true - } - return false + b.ExitIdle() + return true } func (sbc *subBalancerWrapper) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { @@ -374,7 +374,6 @@ func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWrapper) { // sub-balancers. for sc, b := range bg.scToSubBalancer { if b == config { - bg.cc.RemoveSubConn(sc) delete(bg.scToSubBalancer, sc) } } diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index 9f1accd1f2d3..ba497bb7f108 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -289,18 +289,13 @@ func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { scToRemove []balancer.SubConn scToRemoveMap = make(map[balancer.SubConn]struct{}) ) - // Each subconn is removed twice. This is OK in production, but it makes - // testing harder. - // + // Each subconn is removed. // The sub-balancer to be closed is priority's child, clusterimpl, who has // weightedtarget as children. - // - // - When clusterimpl is removed from priority's balancergroup, all its - // subconns are removed once. // - When clusterimpl is closed, it closes weightedtarget, and this - // weightedtarget's balancer removes all the same subconns again. - for i := 0; i < 4; i++ { - // We expect 2 subconns, so we recv from channel 4 times. + // weightedtarget's balancer removes all the subconns. + for i := 0; i < 2; i++ { + // We expect 2 subconns, so we recv from channel 2 times. scToRemoveMap[<-cc.RemoveSubConnCh] = struct{}{} } for sc := range scToRemoveMap { @@ -503,7 +498,6 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) // p0 subconn should be removed. scToRemove := <-cc.RemoveSubConnCh - <-cc.RemoveSubConnCh // Drain the duplicate subconn removed. if !cmp.Equal(scToRemove, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc0, scToRemove) } @@ -551,7 +545,6 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { // p1 subconn should be removed. scToRemove1 := <-cc.RemoveSubConnCh - <-cc.RemoveSubConnCh // Drain the duplicate subconn removed. if !cmp.Equal(scToRemove1, sc11, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc11, scToRemove1) } From 89f31959afbde432826651efabd961e4890c291f Mon Sep 17 00:00:00 2001 From: apolcyn Date: Mon, 21 Mar 2022 13:41:02 -0700 Subject: [PATCH 456/998] interop/client: simplify fallback test client and parameterize fallback deadline (#5248) --- interop/grpclb_fallback/client_linux.go | 74 +++++++------------------ 1 file changed, 21 insertions(+), 53 deletions(-) diff --git a/interop/grpclb_fallback/client_linux.go b/interop/grpclb_fallback/client_linux.go index ed9b8be00ff3..b1cfde71134e 100644 --- a/interop/grpclb_fallback/client_linux.go +++ b/interop/grpclb_fallback/client_linux.go @@ -42,16 +42,14 @@ import ( ) var ( - customCredentialsType = flag.String("custom_credentials_type", "", "Client creds to use") - serverURI = flag.String("server_uri", "dns:///staging-grpc-directpath-fallback-test.googleapis.com:443", "The server host name") - unrouteLBAndBackendAddrsCmd = flag.String("unroute_lb_and_backend_addrs_cmd", "", "Command to make LB and backend address unroutable") - blackholeLBAndBackendAddrsCmd = flag.String("blackhole_lb_and_backend_addrs_cmd", "", "Command to make LB and backend addresses blackholed") - testCase = flag.String("test_case", "", + customCredentialsType = flag.String("custom_credentials_type", "", "Client creds to use") + serverURI = flag.String("server_uri", "dns:///staging-grpc-directpath-fallback-test.googleapis.com:443", "The server host name") + induceFallbackCmd = flag.String("induce_fallback_cmd", "", "Command to induce fallback e.g. by making certain addresses unroutable") + fallbackDeadlineSeconds = flag.Int("fallback_deadline_seconds", 1, "How long to wait for fallback to happen after induce_fallback_cmd") + testCase = flag.String("test_case", "", `Configure different test cases. Valid options are: - fast_fallback_before_startup : LB/backend connections fail fast before RPC's have been made; - fast_fallback_after_startup : LB/backend connections fail fast after RPC's have been made; - slow_fallback_before_startup : LB/backend connections black hole before RPC's have been made; - slow_fallback_after_startup : LB/backend connections black hole after RPC's have been made;`) + fallback_before_startup : LB/backend connections fail before RPC's have been made; + fallback_after_startup : LB/backend connections fail after RPC's have been made;`) infoLog = log.New(os.Stderr, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile) errorLog = log.New(os.Stderr, "ERROR: ", log.Ldate|log.Ltime|log.Lshortfile) ) @@ -155,69 +153,39 @@ func waitForFallbackAndDoRPCs(client testgrpc.TestServiceClient, fallbackDeadlin } } -func doFastFallbackBeforeStartup() { - runCmd(*unrouteLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(5 * time.Second) +func doFallbackBeforeStartup() { + runCmd(*induceFallbackCmd) + fallbackDeadline := time.Now().Add(time.Duration(*fallbackDeadlineSeconds) * time.Second) conn := createTestConn() defer conn.Close() client := testgrpc.NewTestServiceClient(conn) waitForFallbackAndDoRPCs(client, fallbackDeadline) } -func doSlowFallbackBeforeStartup() { - runCmd(*blackholeLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(60 * time.Second) - conn := createTestConn() - defer conn.Close() - client := testgrpc.NewTestServiceClient(conn) - waitForFallbackAndDoRPCs(client, fallbackDeadline) -} - -func doFastFallbackAfterStartup() { - conn := createTestConn() - defer conn.Close() - client := testgrpc.NewTestServiceClient(conn) - if g := doRPCAndGetPath(client, 20*time.Second); g != testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND { - errorLog.Fatalf("Expected RPC to take grpclb route type BACKEND. Got: %v", g) - } - runCmd(*unrouteLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(40 * time.Second) - waitForFallbackAndDoRPCs(client, fallbackDeadline) -} - -func doSlowFallbackAfterStartup() { +func doFallbackAfterStartup() { conn := createTestConn() defer conn.Close() client := testgrpc.NewTestServiceClient(conn) if g := doRPCAndGetPath(client, 20*time.Second); g != testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND { errorLog.Fatalf("Expected RPC to take grpclb route type BACKEND. Got: %v", g) } - runCmd(*blackholeLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(80 * time.Second) + runCmd(*induceFallbackCmd) + fallbackDeadline := time.Now().Add(time.Duration(*fallbackDeadlineSeconds) * time.Second) waitForFallbackAndDoRPCs(client, fallbackDeadline) } func main() { flag.Parse() - if len(*unrouteLBAndBackendAddrsCmd) == 0 { - errorLog.Fatalf("--unroute_lb_and_backend_addrs_cmd unset") - } - if len(*blackholeLBAndBackendAddrsCmd) == 0 { - errorLog.Fatalf("--blackhole_lb_and_backend_addrs_cmd unset") + if len(*induceFallbackCmd) == 0 { + errorLog.Fatalf("--induce_fallback_cmd unset") } switch *testCase { - case "fast_fallback_before_startup": - doFastFallbackBeforeStartup() - log.Printf("FastFallbackBeforeStartup done!\n") - case "fast_fallback_after_startup": - doFastFallbackAfterStartup() - log.Printf("FastFallbackAfterStartup done!\n") - case "slow_fallback_before_startup": - doSlowFallbackBeforeStartup() - log.Printf("SlowFallbackBeforeStartup done!\n") - case "slow_fallback_after_startup": - doSlowFallbackAfterStartup() - log.Printf("SlowFallbackAfterStartup done!\n") + case "fallback_before_startup": + doFallbackBeforeStartup() + log.Printf("FallbackBeforeStartup done!\n") + case "fallback_after_startup": + doFallbackAfterStartup() + log.Printf("FallbackAfterStartup done!\n") default: errorLog.Fatalf("Unsupported test case: %v", *testCase) } From 63bdcbcce5130749a935600dd009d14482f28a9a Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Mon, 21 Mar 2022 13:57:45 -0700 Subject: [PATCH 457/998] xds/interop: add xds_k8s_lb test suite (#5250) --- test/kokoro/xds_k8s_lb.cfg | 13 +++ test/kokoro/xds_k8s_lb.sh | 162 +++++++++++++++++++++++++++++++++++++ 2 files changed, 175 insertions(+) create mode 100644 test/kokoro/xds_k8s_lb.cfg create mode 100755 test/kokoro/xds_k8s_lb.sh diff --git a/test/kokoro/xds_k8s_lb.cfg b/test/kokoro/xds_k8s_lb.cfg new file mode 100644 index 000000000000..4e40bb510306 --- /dev/null +++ b/test/kokoro/xds_k8s_lb.cfg @@ -0,0 +1,13 @@ +# Config file for internal CI + +# Location of the continuous shell script in repository. +build_file: "grpc-go/test/kokoro/xds_k8s_lb.sh" +timeout_mins: 180 + +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*sponge_log.log" + strip_prefix: "artifacts" + } +} diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh new file mode 100755 index 000000000000..6e5fd68904cb --- /dev/null +++ b/test/kokoro/xds_k8s_lb.sh @@ -0,0 +1,162 @@ +#!/usr/bin/env bash +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +# Constants +readonly GITHUB_REPOSITORY_NAME="grpc-go" +readonly TEST_DRIVER_INSTALL_SCRIPT_URL="https://raw.githubusercontent.com/${TEST_DRIVER_REPO_OWNER:-grpc}/grpc/${TEST_DRIVER_BRANCH:-master}/tools/internal_ci/linux/grpc_xds_k8s_install_test_driver.sh" +## xDS test server/client Docker images +readonly SERVER_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-server" +readonly CLIENT_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-client" +readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}" + +####################################### +# Builds test app Docker images and pushes them to GCR +# Globals: +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# Arguments: +# None +# Outputs: +# Writes the output of `gcloud builds submit` to stdout, stderr +####################################### +build_test_app_docker_images() { + echo "Building Go xDS interop test app Docker images" + docker build -f "${SRC_DIR}/interop/xds/client/Dockerfile" -t "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + docker build -f "${SRC_DIR}/interop/xds/server/Dockerfile" -t "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + gcloud -q auth configure-docker + docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" + docker push "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" + if [[ -n $KOKORO_JOB_NAME ]]; then + branch_name=$(echo "$KOKORO_JOB_NAME" | sed -E 's|^grpc/go/([^/]+)/.*|\1|') + tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${branch_name}" + tag_and_push_docker_image "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" "${branch_name}" + fi +} + +####################################### +# Builds test app and its docker images unless they already exist +# Globals: +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# FORCE_IMAGE_BUILD +# Arguments: +# None +# Outputs: +# Writes the output to stdout, stderr +####################################### +build_docker_images_if_needed() { + # Check if images already exist + server_tags="$(gcloud_gcr_list_image_tags "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Server image: %s:%s\n" "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${server_tags:-Server image not found}" + + client_tags="$(gcloud_gcr_list_image_tags "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Client image: %s:%s\n" "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${client_tags:-Client image not found}" + + # Build if any of the images are missing, or FORCE_IMAGE_BUILD=1 + if [[ "${FORCE_IMAGE_BUILD}" == "1" || -z "${server_tags}" || -z "${client_tags}" ]]; then + build_test_app_docker_images + else + echo "Skipping Go test app build" + fi +} + +####################################### +# Executes the test case +# Globals: +# TEST_DRIVER_FLAGFILE: Relative path to test driver flagfile +# KUBE_CONTEXT: The name of kubectl context with GKE cluster access +# TEST_XML_OUTPUT_DIR: Output directory for the test xUnit XML report +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# Arguments: +# Test case name +# Outputs: +# Writes the output of test execution to stdout, stderr +# Test xUnit report to ${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml +####################################### +run_test() { + # Test driver usage: + # https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage + local test_name="${1:?Usage: run_test test_name}" + set -x + python -m "tests.${test_name}" \ + --flagfile="${TEST_DRIVER_FLAGFILE}" \ + --kube_context="${KUBE_CONTEXT}" \ + --server_image="${SERVER_IMAGE_NAME}:${GIT_COMMIT}" \ + --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ + --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ + --force_cleanup + set +x +} + +####################################### +# Main function: provision software necessary to execute tests, and run them +# Globals: +# KOKORO_ARTIFACTS_DIR +# GITHUB_REPOSITORY_NAME +# SRC_DIR: Populated with absolute path to the source repo +# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing +# the test driver +# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code +# TEST_DRIVER_FLAGFILE: Populated with relative path to test driver flagfile +# TEST_XML_OUTPUT_DIR: Populated with the path to test xUnit XML report +# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build +# GIT_COMMIT: Populated with the SHA-1 of git commit being built +# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built +# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access +# Arguments: +# None +# Outputs: +# Writes the output of test execution to stdout, stderr +####################################### +main() { + local script_dir + script_dir="$(dirname "$0")" + + # Source the test driver from the master branch. + echo "Sourcing test driver install script from: ${TEST_DRIVER_INSTALL_SCRIPT_URL}" + source /dev/stdin <<< "$(curl -s "${TEST_DRIVER_INSTALL_SCRIPT_URL}")" + + activate_gke_cluster GKE_CLUSTER_PSM_LB + activate_secondary_gke_cluster GKE_CLUSTER_PSM_LB + + set -x + if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then + kokoro_setup_test_driver "${GITHUB_REPOSITORY_NAME}" + else + local_setup_test_driver "${script_dir}" + fi + build_docker_images_if_needed + # Run tests + cd "${TEST_DRIVER_FULL_DIR}" + local failed_tests=0 + test_suites=("api_listener_test" "change_backend_service_test" "failover_test" "remove_neg_test" "round_robin_test" "affinity_test") + for test in "${test_suites[@]}"; do + run_test $test || (( failed_tests++ )) + done + echo "Failed test suites: ${failed_tests}" + if (( failed_tests > 0 )); then + exit 1 + fi +} + +main "$@" From 1ffd63de37de4571028efedb6422e29d08716d0c Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Mon, 21 Mar 2022 14:00:02 -0700 Subject: [PATCH 458/998] binarylog: generalize binarylog's MethodLogger preparing for new observability features (#5244) --- internal/binarylog/binarylog.go | 15 ++++++++++---- internal/binarylog/binarylog_test.go | 4 ++-- internal/binarylog/method_logger.go | 26 ++++++++++++++++-------- internal/binarylog/method_logger_test.go | 8 ++++---- stream.go | 4 ++-- 5 files changed, 37 insertions(+), 20 deletions(-) diff --git a/internal/binarylog/binarylog.go b/internal/binarylog/binarylog.go index 5cc3aeddb213..66efc4856fff 100644 --- a/internal/binarylog/binarylog.go +++ b/internal/binarylog/binarylog.go @@ -31,7 +31,7 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be @@ -49,17 +49,24 @@ func SetLogger(l Logger) { binLogger = l } +// GetLogger gets the binarg logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + // GetMethodLogger returns the methodLogger for the given methodName. // // methodName should be in the format of "/service/method". // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -148,7 +155,7 @@ func (l *logger) setBlacklist(method string) error { // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) diff --git a/internal/binarylog/binarylog_test.go b/internal/binarylog/binarylog_test.go index cbf2ba0d1bf8..617aeb2e83fb 100644 --- a/internal/binarylog/binarylog_test.go +++ b/internal/binarylog/binarylog_test.go @@ -93,7 +93,7 @@ func (s) TestGetMethodLogger(t *testing.T) { t.Errorf("in: %q, failed to create logger from config string", tc.in) continue } - ml := l.getMethodLogger(tc.method) + ml := l.GetMethodLogger(tc.method).(*methodLogger) if ml == nil { t.Errorf("in: %q, method logger is nil, want non-nil", tc.in) continue @@ -149,7 +149,7 @@ func (s) TestGetMethodLoggerOff(t *testing.T) { t.Errorf("in: %q, failed to create logger from config string", tc.in) continue } - ml := l.getMethodLogger(tc.method) + ml := l.GetMethodLogger(tc.method) if ml != nil { t.Errorf("in: %q, method logger is non-nil, want nil", tc.in) } diff --git a/internal/binarylog/method_logger.go b/internal/binarylog/method_logger.go index 0cdb41831509..24df0a1a0c4e 100644 --- a/internal/binarylog/method_logger.go +++ b/internal/binarylog/method_logger.go @@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +type methodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +61,8 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +func newMethodLogger(h, m uint64) *methodLogger { + return &methodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in methodLogger as possible. +func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *methodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/internal/binarylog/method_logger_test.go b/internal/binarylog/method_logger_test.go index a99360bd92df..31cc076343ff 100644 --- a/internal/binarylog/method_logger_test.go +++ b/internal/binarylog/method_logger_test.go @@ -350,7 +350,7 @@ func (s) TestLog(t *testing.T) { func (s) TestTruncateMetadataNotTruncated(t *testing.T) { testCases := []struct { - ml *MethodLogger + ml *methodLogger mpPb *pb.Metadata }{ { @@ -417,7 +417,7 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { func (s) TestTruncateMetadataTruncated(t *testing.T) { testCases := []struct { - ml *MethodLogger + ml *methodLogger mpPb *pb.Metadata entryLen int @@ -478,7 +478,7 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { func (s) TestTruncateMessageNotTruncated(t *testing.T) { testCases := []struct { - ml *MethodLogger + ml *methodLogger msgPb *pb.Message }{ { @@ -511,7 +511,7 @@ func (s) TestTruncateMessageNotTruncated(t *testing.T) { func (s) TestTruncateMessageTruncated(t *testing.T) { testCases := []struct { - ml *MethodLogger + ml *methodLogger msgPb *pb.Message oldLength uint32 diff --git a/stream.go b/stream.go index acb3185f03ab..e0b30b46fb11 100644 --- a/stream.go +++ b/stream.go @@ -462,7 +462,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlog binarylog.MethodLogger // Binary logger, can be nil. // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -1434,7 +1434,7 @@ type serverStream struct { statsHandler stats.Handler - binlog *binarylog.MethodLogger + binlog binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). From 7ea4af98f12a000c483418dc883fa637c1631e01 Mon Sep 17 00:00:00 2001 From: Benny Siegert Date: Mon, 21 Mar 2022 22:37:55 +0100 Subject: [PATCH 459/998] internal/googlecloud: refactor OS-dependent code (#5239) --- internal/googlecloud/googlecloud.go | 76 +++----------------- internal/googlecloud/googlecloud_test.go | 64 ++++------------- internal/googlecloud/manufacturer.go | 26 +++++++ internal/googlecloud/manufacturer_linux.go | 27 +++++++ internal/googlecloud/manufacturer_windows.go | 50 +++++++++++++ 5 files changed, 125 insertions(+), 118 deletions(-) create mode 100644 internal/googlecloud/manufacturer.go create mode 100644 internal/googlecloud/manufacturer_linux.go create mode 100644 internal/googlecloud/manufacturer_windows.go diff --git a/internal/googlecloud/googlecloud.go b/internal/googlecloud/googlecloud.go index d6c9e03fc4c8..6717b757f80d 100644 --- a/internal/googlecloud/googlecloud.go +++ b/internal/googlecloud/googlecloud.go @@ -20,13 +20,6 @@ package googlecloud import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "regexp" "runtime" "strings" "sync" @@ -35,43 +28,9 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const ( - linuxProductNameFile = "/sys/class/dmi/id/product_name" - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" - - logPrefix = "[googlecloud]" -) +const logPrefix = "[googlecloud]" var ( - // The following two variables will be reassigned in tests. - runningOS = runtime.GOOS - manufacturerReader = func() (io.Reader, error) { - switch runningOS { - case "linux": - return os.Open(linuxProductNameFile) - case "windows": - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return strings.NewReader(name), nil - } - } - return nil, errors.New("cannot determine the machine's manufacturer") - default: - return nil, fmt.Errorf("%s is not supported", runningOS) - } - } - vmOnGCEOnce sync.Once vmOnGCE bool @@ -84,21 +43,21 @@ var ( // package. We keep this to avoid depending on the cloud library module. func OnGCE() bool { vmOnGCEOnce.Do(func() { - vmOnGCE = isRunningOnGCE() + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) }) return vmOnGCE } -// isRunningOnGCE checks whether the local system, without doing a network request is +// isRunningOnGCE checks whether the local system, without doing a network request, is // running on GCP. -func isRunningOnGCE() bool { - manufacturer, err := readManufacturer() - if err != nil { - logger.Infof("failed to read manufacturer %v, returning OnGCE=false", err) - return false - } +func isRunningOnGCE(manufacturer []byte, goos string) bool { name := string(manufacturer) - switch runningOS { + switch goos { case "linux": name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" @@ -111,18 +70,3 @@ func isRunningOnGCE() bool { return false } } - -func readManufacturer() ([]byte, error) { - reader, err := manufacturerReader() - if err != nil { - return nil, err - } - if reader == nil { - return nil, errors.New("got nil reader") - } - manufacturer, err := ioutil.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) - } - return manufacturer, nil -} diff --git a/internal/googlecloud/googlecloud_test.go b/internal/googlecloud/googlecloud_test.go index bd5a42ffab97..69ab2fd4c5f2 100644 --- a/internal/googlecloud/googlecloud_test.go +++ b/internal/googlecloud/googlecloud_test.go @@ -19,68 +19,28 @@ package googlecloud import ( - "io" - "os" - "strings" "testing" ) -func setupManufacturerReader(testOS string, reader func() (io.Reader, error)) func() { - tmpOS := runningOS - tmpReader := manufacturerReader - - // Set test OS and reader function. - runningOS = testOS - manufacturerReader = reader - return func() { - runningOS = tmpOS - manufacturerReader = tmpReader - } -} - -func setup(testOS string, testReader io.Reader) func() { - reader := func() (io.Reader, error) { - return testReader, nil - } - return setupManufacturerReader(testOS, reader) -} - -func setupError(testOS string, err error) func() { - reader := func() (io.Reader, error) { - return nil, err - } - return setupManufacturerReader(testOS, reader) -} - func TestIsRunningOnGCE(t *testing.T) { for _, tc := range []struct { - description string - testOS string - testReader io.Reader - out bool + description string + testOS string + testManufacturer string + out bool }{ // Linux tests. - {"linux: not a GCP platform", "linux", strings.NewReader("not GCP"), false}, - {"Linux: GCP platform (Google)", "linux", strings.NewReader("Google"), true}, - {"Linux: GCP platform (Google Compute Engine)", "linux", strings.NewReader("Google Compute Engine"), true}, - {"Linux: GCP platform (Google Compute Engine) with extra spaces", "linux", strings.NewReader(" Google Compute Engine "), true}, + {"linux: not a GCP platform", "linux", "not GCP", false}, + {"Linux: GCP platform (Google)", "linux", "Google", true}, + {"Linux: GCP platform (Google Compute Engine)", "linux", "Google Compute Engine", true}, + {"Linux: GCP platform (Google Compute Engine) with extra spaces", "linux", " Google Compute Engine ", true}, // Windows tests. - {"windows: not a GCP platform", "windows", strings.NewReader("not GCP"), false}, - {"windows: GCP platform (Google)", "windows", strings.NewReader("Google"), true}, - {"windows: GCP platform (Google) with extra spaces", "windows", strings.NewReader(" Google "), true}, + {"windows: not a GCP platform", "windows", "not GCP", false}, + {"windows: GCP platform (Google)", "windows", "Google", true}, + {"windows: GCP platform (Google) with extra spaces", "windows", " Google ", true}, } { - reverseFunc := setup(tc.testOS, tc.testReader) - if got, want := isRunningOnGCE(), tc.out; got != want { + if got, want := isRunningOnGCE([]byte(tc.testManufacturer), tc.testOS), tc.out; got != want { t.Errorf("%v: isRunningOnGCE()=%v, want %v", tc.description, got, want) } - reverseFunc() - } -} - -func TestIsRunningOnGCENoProductNameFile(t *testing.T) { - reverseFunc := setupError("linux", os.ErrNotExist) - if isRunningOnGCE() { - t.Errorf("ErrNotExist: isRunningOnGCE()=true, want false") } - reverseFunc() } diff --git a/internal/googlecloud/manufacturer.go b/internal/googlecloud/manufacturer.go new file mode 100644 index 000000000000..ffa0f1ddee5d --- /dev/null +++ b/internal/googlecloud/manufacturer.go @@ -0,0 +1,26 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/internal/googlecloud/manufacturer_linux.go b/internal/googlecloud/manufacturer_linux.go new file mode 100644 index 000000000000..e53b8ffc837f --- /dev/null +++ b/internal/googlecloud/manufacturer_linux.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import "io/ioutil" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return ioutil.ReadFile(linuxProductNameFile) +} diff --git a/internal/googlecloud/manufacturer_windows.go b/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 000000000000..2d7aaaaa70fe --- /dev/null +++ b/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} From 7840bd63dea893764c348d1a2cacc3dbfc766a56 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 21 Mar 2022 14:51:11 -0700 Subject: [PATCH 460/998] grpc: add a comment explaining why updateClientConnState is handled inline (#5261) --- balancer_conn_wrappers.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index cb4b6728c22b..84934bc0e670 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -167,6 +167,15 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } +// updateClientConnState forwards the clientConn update to the wrapped balancer +// synchronously. +// +// Other calls from the channel like exitIdle() and handleSubConnStateChange() +// are handled asynchronously by pushing the update onto a channel, which is +// picked up by the watcher() goroutine and forwarded to the wrapped balancer. +// That approach cannot be taken here because the corresponding API on the +// balancer returns an error which needs to be sent back to the channel to be +// forward to the resolver. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { ccb.balancerMu.Lock() defer ccb.balancerMu.Unlock() From 50d0d0a51a8121297665d05eb204c10f7f2358a3 Mon Sep 17 00:00:00 2001 From: Anthonin Bonnefoy Date: Mon, 21 Mar 2022 23:07:27 +0100 Subject: [PATCH 461/998] server: set TCP_USER_TIMEOUT socket option for linux (#5219) --- internal/transport/http2_server.go | 6 ++++ internal/transport/keepalive_test.go | 53 +++++++++++++++++++++++----- 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 227608c7f21e..0956b500c18e 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -231,6 +232,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index 4f5a2bed6225..41395216fe43 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -645,19 +645,28 @@ func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T // the keepalive timeout, as detailed in proposal A18. func (s) TestTCPUserTimeout(t *testing.T) { tests := []struct { - time time.Duration - timeout time.Duration - wantTimeout time.Duration + time time.Duration + timeout time.Duration + clientWantTimeout time.Duration + serverWantTimeout time.Duration }{ { 10 * time.Second, 10 * time.Second, 10 * 1000 * time.Millisecond, + 10 * 1000 * time.Millisecond, }, { 0, 0, 0, + 20 * 1000 * time.Millisecond, + }, + { + infinity, + infinity, + 0, + 0, }, } for _, tt := range tests { @@ -666,7 +675,7 @@ func (s) TestTCPUserTimeout(t *testing.T) { 0, &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - Time: tt.timeout, + Time: tt.time, Timeout: tt.timeout, }, }, @@ -684,6 +693,26 @@ func (s) TestTCPUserTimeout(t *testing.T) { cancel() }() + var sc *http2Server + // Wait until the server transport is setup. + for { + server.mu.Lock() + if len(server.conns) == 0 { + server.mu.Unlock() + time.Sleep(time.Millisecond) + continue + } + for k := range server.conns { + var ok bool + sc, ok = k.(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", k) + } + } + server.mu.Unlock() + break + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() stream, err := client.NewStream(ctx, &CallHdr{}) @@ -692,15 +721,23 @@ func (s) TestTCPUserTimeout(t *testing.T) { } client.CloseStream(stream, io.EOF) - opt, err := syscall.GetTCPUserTimeout(client.conn) + cltOpt, err := syscall.GetTCPUserTimeout(client.conn) if err != nil { t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) } - if opt < 0 { + if cltOpt < 0 { t.Skipf("skipping test on unsupported environment") } - if gotTimeout := time.Duration(opt) * time.Millisecond; gotTimeout != tt.wantTimeout { - t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.wantTimeout) + if gotTimeout := time.Duration(cltOpt) * time.Millisecond; gotTimeout != tt.clientWantTimeout { + t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.clientWantTimeout) + } + + srvOpt, err := syscall.GetTCPUserTimeout(sc.conn) + if err != nil { + t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) + } + if gotTimeout := time.Duration(srvOpt) * time.Millisecond; gotTimeout != tt.serverWantTimeout { + t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.serverWantTimeout) } } } From 4635bf287fc1f7a26aabfacc146a3d9c21976d8d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 21 Mar 2022 15:47:31 -0700 Subject: [PATCH 462/998] make the RLS LB policy name available in the internal package (#5259) --- balancer/rls/balancer.go | 6 +++++- internal/internal.go | 6 ++++++ xds/internal/clusterspecifier/rls/rls.go | 11 ++++------- xds/internal/clusterspecifier/rls/rls_test.go | 5 +++-- xds/xds.go | 1 + 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go index 3b9d610f36d1..cde95d992d63 100644 --- a/balancer/rls/balancer.go +++ b/balancer/rls/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/buffer" @@ -40,7 +41,10 @@ import ( const ( // Name is the name of the RLS LB policy. - Name = "rls_experimental" + // + // It currently has an experimental suffix which would be removed once + // end-to-end testing of the policy is completed. + Name = internal.RLSLoadBalancingPolicyName // Default frequency for data cache purging. periodicCachePurgeFreq = time.Minute ) diff --git a/internal/internal.go b/internal/internal.go index 20fb880f344f..6d355b0b0134 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -85,3 +85,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/xds/internal/clusterspecifier/rls/rls.go b/xds/internal/clusterspecifier/rls/rls.go index 6aca9715fc2a..037795834c9b 100644 --- a/xds/internal/clusterspecifier/rls/rls.go +++ b/xds/internal/clusterspecifier/rls/rls.go @@ -26,14 +26,12 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/anypb" - - // Never remove this import as the RLS LB policy is registered in its init(). - rlslb "google.golang.org/grpc/balancer/rls" ) func init() { @@ -89,14 +87,13 @@ func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.Bala return nil, fmt.Errorf("rls_csp: error marshaling load balancing config %v: %v", lbCfgJSON, err) } - rlsBB := balancer.Get(rlslb.Name) + rlsBB := balancer.Get(internal.RLSLoadBalancingPolicyName) if rlsBB == nil { return nil, fmt.Errorf("RLS LB policy not registered") } - _, err = rlsBB.(balancer.ConfigParser).ParseConfig(rawJSON) - if err != nil { + if _, err = rlsBB.(balancer.ConfigParser).ParseConfig(rawJSON); err != nil { return nil, fmt.Errorf("rls_csp: validation error from rls lb policy parsing %v", err) } - return clusterspecifier.BalancerConfig{{rlslb.Name: lbCfgJSON}}, nil + return clusterspecifier.BalancerConfig{{internal.RLSLoadBalancingPolicyName: lbCfgJSON}}, nil } diff --git a/xds/internal/clusterspecifier/rls/rls_test.go b/xds/internal/clusterspecifier/rls/rls_test.go index 36ca9b764bfe..9e0a10b648e6 100644 --- a/xds/internal/clusterspecifier/rls/rls_test.go +++ b/xds/internal/clusterspecifier/rls/rls_test.go @@ -25,13 +25,14 @@ import ( "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - _ "google.golang.org/grpc/balancer/rls" "google.golang.org/grpc/internal/grpctest" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/internal/testutils" - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/protobuf/types/known/durationpb" + + _ "google.golang.org/grpc/balancer/rls" // Register the RLS LB policy. + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS LB policy. ) func init() { diff --git a/xds/xds.go b/xds/xds.go index 744f3f139645..1b2b0c5793f8 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -38,6 +38,7 @@ import ( _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. From 51ddcbad169405559640e761c2943086f9d748fe Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 21 Mar 2022 15:47:58 -0700 Subject: [PATCH 463/998] xds: suppress redundant updates only when we are SERVING (#5258) --- xds/internal/server/listener_wrapper.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 421ed7533633..c6ab885fcf90 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -438,7 +438,12 @@ func (l *listenerWrapper) switchMode(fcs *xdsresource.FilterChainManager, newMod defer l.mu.Unlock() l.filterChains = fcs - if l.mode == newMode { + if l.mode == newMode && l.mode == connectivity.ServingModeServing { + // Redundant updates are suppressed only when we are SERVING and the new + // mode is also SERVING. In the other case (where we are NOT_SERVING and the + // new mode is also NOT_SERVING), the update is not suppressed as: + // 1. the error may have change + // 2. it provides a timestamp of the last backoff attempt return } l.mode = newMode From 597e5d1b1a8757c0804e44d52eb73a8f35e9700d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 22 Mar 2022 08:50:50 -0700 Subject: [PATCH 464/998] don't apply defaultSC upon receipt of invalid service config (#5257) --- clientconn.go | 12 +- resolver_conn_wrapper_test.go | 94 ------ service_config.go | 3 + ...solver_test.go => config_selector_test.go} | 1 - test/resolver_update_test.go | 280 ++++++++++++++++++ 5 files changed, 288 insertions(+), 102 deletions(-) delete mode 100644 resolver_conn_wrapper_test.go rename test/{resolver_test.go => config_selector_test.go} (99%) create mode 100644 test/resolver_update_test.go diff --git a/clientconn.go b/clientconn.go index e4819ca76b43..565aa247473d 100644 --- a/clientconn.go +++ b/clientconn.go @@ -653,13 +653,11 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } else { ret = balancer.ErrBadResolverState if cc.sc == nil { - if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, s.Addresses) - } else { - cc.applyFailingLB(s.ServiceConfig) - cc.mu.Unlock() - return ret - } + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) + cc.mu.Unlock() + return ret } } } diff --git a/resolver_conn_wrapper_test.go b/resolver_conn_wrapper_test.go deleted file mode 100644 index f7dcd7eb1978..000000000000 --- a/resolver_conn_wrapper_test.go +++ /dev/null @@ -1,94 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "errors" - "strings" - "testing" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/balancer/stub" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/status" -) - -const happyBalancerName = "happy balancer" - -func init() { - // Register a balancer that never returns an error from - // UpdateClientConnState, and doesn't do anything else either. - bf := stub.BalancerFuncs{ - UpdateClientConnState: func(*stub.BalancerData, balancer.ClientConnState) error { - return nil - }, - } - stub.Register(happyBalancerName, bf) -} - -// TestResolverErrorInBuild makes the resolver.Builder call into the ClientConn -// during the Build call. We use two separate mutexes in the code which make -// sure there is no data race in this code path, and also that there is no -// deadlock. -func (s) TestResolverErrorInBuild(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - r.InitialState(resolver.State{ServiceConfig: &serviceconfig.ParseResult{Err: errors.New("resolver build err")}}) - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) - } - defer cc.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - var dummy int - const wantMsg = "error parsing service config" - const wantCode = codes.Unavailable - if err := cc.Invoke(ctx, "/foo/bar", &dummy, &dummy); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { - t.Fatalf("cc.Invoke(_, _, _, _) = %v; want status.Code()==%v, status.Message() contains %q", err, wantCode, wantMsg) - } -} - -func (s) TestServiceConfigErrorRPC(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) - } - defer cc.Close() - badsc := r.CC.ParseServiceConfig("bad config") - r.UpdateState(resolver.State{ServiceConfig: badsc}) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - var dummy int - const wantMsg = "error parsing service config" - const wantCode = codes.Unavailable - if err := cc.Invoke(ctx, "/foo/bar", &dummy, &dummy); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { - t.Fatalf("cc.Invoke(_, _, _, _) = %v; want status.Code()==%v, status.Message() contains %q", err, wantCode, wantMsg) - } -} diff --git a/service_config.go b/service_config.go index 6926a06dc523..b01c548bb9a9 100644 --- a/service_config.go +++ b/service_config.go @@ -381,6 +381,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/test/resolver_test.go b/test/config_selector_test.go similarity index 99% rename from test/resolver_test.go rename to test/config_selector_test.go index 648245aef9c3..6b0bb2e1ed8b 100644 --- a/test/resolver_test.go +++ b/test/config_selector_test.go @@ -211,5 +211,4 @@ func (s) TestConfigSelector(t *testing.T) { } }) } - } diff --git a/test/resolver_update_test.go b/test/resolver_update_test.go new file mode 100644 index 000000000000..b2443fdd8110 --- /dev/null +++ b/test/resolver_update_test.go @@ -0,0 +1,280 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +// TestResolverUpdateDuringBuild_ServiceConfigParseError makes the +// resolver.Builder call into the ClientConn, during the Build call, with a +// service config parsing error. +// +// We use two separate mutexes in the code which make sure there is no data race +// in this code path, and also that there is no deadlock. +func (s) TestResolverUpdateDuringBuild_ServiceConfigParseError(t *testing.T) { + // Setting InitialState on the manual resolver makes it call into the + // ClientConn during the Build call. + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{ServiceConfig: &serviceconfig.ParseResult{Err: errors.New("resolver build err")}}) + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testpb.NewTestServiceClient(cc) + const wantMsg = "error parsing service config" + const wantCode = codes.Unavailable + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { + t.Fatalf("EmptyCall RPC failed: %v; want code: %v, want message: %q", err, wantCode, wantMsg) + } +} + +type fakeConfig struct { + serviceconfig.Config +} + +// TestResolverUpdateDuringBuild_ServiceConfigInvalidTypeError makes the +// resolver.Builder call into the ClientConn, during the Build call, with an +// invalid service config type. +// +// We use two separate mutexes in the code which make sure there is no data race +// in this code path, and also that there is no deadlock. +func (s) TestResolverUpdateDuringBuild_ServiceConfigInvalidTypeError(t *testing.T) { + // Setting InitialState on the manual resolver makes it call into the + // ClientConn during the Build call. + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{ServiceConfig: &serviceconfig.ParseResult{Config: fakeConfig{}}}) + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testpb.NewTestServiceClient(cc) + const wantMsg = "illegal service config type" + const wantCode = codes.Unavailable + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { + t.Fatalf("EmptyCall RPC failed: %v; want code: %v, want message: %q", err, wantCode, wantMsg) + } +} + +// TestResolverUpdate_InvalidServiceConfigAsFirstUpdate makes the resolver send +// an update with an invalid service config as its first update. This should +// make the ClientConn apply the failing LB policy, and should result in RPC +// errors indicating the failing service config. +func (s) TestResolverUpdate_InvalidServiceConfigAsFirstUpdate(t *testing.T) { + r := manual.NewBuilderWithScheme("whatever") + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) + } + defer cc.Close() + + scpr := r.CC.ParseServiceConfig("bad json service config") + r.UpdateState(resolver.State{ServiceConfig: scpr}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testpb.NewTestServiceClient(cc) + const wantMsg = "error parsing service config" + const wantCode = codes.Unavailable + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { + t.Fatalf("EmptyCall RPC failed: %v; want code: %v, want message: %q", err, wantCode, wantMsg) + } +} + +// The wrappingBalancer wraps a pick_first balancer and writes to a channel when +// it receives a ClientConn update. This is different to a stub balancer which +// only notifies of updates from grpc, but does not contain a real balanacer. +// +// The wrappingBalancer allows us to write tests with a real backend and make +// real RPCs. +type wrappingBalancerBuilder struct { + name string + updateCh *testutils.Channel +} + +func (bb wrappingBalancerBuilder) Name() string { return bb.name } + +func (bb wrappingBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + pf := balancer.Get(grpc.PickFirstBalancerName) + b := &wrappingBalancer{ + Balancer: pf.Build(cc, opts), + updateCh: bb.updateCh, + } + return b +} + +func (bb wrappingBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &wrappingBalancerConfig{} + if err := json.Unmarshal(c, cfg); err != nil { + return nil, err + } + return cfg, nil +} + +type wrappingBalancer struct { + balancer.Balancer + updateCh *testutils.Channel +} + +func (b *wrappingBalancer) UpdateClientConnState(c balancer.ClientConnState) error { + if _, ok := c.BalancerConfig.(*wrappingBalancerConfig); !ok { + return fmt.Errorf("received balancer config of unsupported type %T", c.BalancerConfig) + } + b.updateCh.Send(c) + return b.Balancer.UpdateClientConnState(c) +} + +type wrappingBalancerConfig struct { + serviceconfig.LoadBalancingConfig + Config string `json:"config,omitempty"` +} + +func verifyClientConnStateUpdate(got, want balancer.ClientConnState) error { + if got, want := got.ResolverState.Addresses, want.ResolverState.Addresses; !cmp.Equal(got, want) { + return fmt.Errorf("update got unexpected addresses: %v, want %v", got, want) + } + if got, want := got.ResolverState.ServiceConfig.Config, want.ResolverState.ServiceConfig.Config; !internal.EqualServiceConfigForTesting(got, want) { + return fmt.Errorf("received unexpected service config: \ngot: %v \nwant: %v", got, want) + } + if got, want := got.BalancerConfig, want.BalancerConfig; !cmp.Equal(got, want) { + return fmt.Errorf("received unexpected balancer config: \ngot: %v \nwant: %v", cmp.Diff(nil, got), cmp.Diff(nil, want)) + } + return nil +} + +// TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate tests the scenario +// where the resolver sends an update with an invalid service config after +// having sent a good update. This should result in the ClientConn discarding +// the new invalid service config, and continuing to use the old good config. +func (s) TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate(t *testing.T) { + // Register a wrapper balancer to get notified of ClientConn updates. + ccsCh := testutils.NewChannel() + balancer.Register(wrappingBalancerBuilder{ + name: t.Name(), + updateCh: ccsCh, + }) + + // Start a backend exposing the test service. + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + defer backend.Stop() + + r := manual.NewBuilderWithScheme("whatever") + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) + } + defer cc.Close() + + // Push a resolver update and verify that our balancer receives the update. + addrs := []resolver.Address{{Addr: backend.Address}} + const lbCfg = "wrapping balancer LB policy config" + goodSC := r.CC.ParseServiceConfig(fmt.Sprintf(` +{ + "loadBalancingConfig": [ + { + "%v": { + "config": "%s" + } + } + ] +}`, t.Name(), lbCfg)) + r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: goodSC}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantCCS := balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: addrs, + ServiceConfig: goodSC, + }, + BalancerConfig: &wrappingBalancerConfig{Config: lbCfg}, + } + ccs, err := ccsCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for ClientConnState update from grpc") + } + gotCCS := ccs.(balancer.ClientConnState) + if err := verifyClientConnStateUpdate(gotCCS, wantCCS); err != nil { + t.Fatal(err) + } + + // Ensure RPCs are successful. + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall RPC failed: %v", err) + } + + // Push a bad resolver update and ensure that the update is propagated to our + // stub balancer. But since the pushed update contains an invalid service + // config, our balancer should continue to see the old loadBalancingConfig. + badSC := r.CC.ParseServiceConfig("bad json service config") + wantCCS.ResolverState.ServiceConfig = badSC + r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: badSC}) + ccs, err = ccsCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for ClientConnState update from grpc") + } + gotCCS = ccs.(balancer.ClientConnState) + if err := verifyClientConnStateUpdate(gotCCS, wantCCS); err != nil { + t.Fatal(err) + } + + // RPCs should continue to be successful since the ClientConn is using the old + // good service config. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall RPC failed: %v", err) + } +} From 9d088969a8a7e49bf17018dfc65b11998c677ce7 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 22 Mar 2022 11:28:17 -0700 Subject: [PATCH 465/998] github: update to test with Go 1.18 (#5262) --- .github/workflows/testing.yml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index b687fdb3dc39..ac2bcdfae75a 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -24,7 +24,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: 1.15 + go-version: 1.18 - name: Checkout repo uses: actions/checkout@v2 @@ -43,23 +43,26 @@ jobs: matrix: include: - type: vet+tests - goversion: 1.17 + goversion: 1.18 - type: tests - goversion: 1.17 + goversion: 1.18 testflags: -race - type: extras - goversion: 1.17 + goversion: 1.18 - type: tests - goversion: 1.17 + goversion: 1.18 goarch: 386 - type: tests - goversion: 1.17 + goversion: 1.18 goarch: arm64 + - type: tests + goversion: 1.17 + - type: tests goversion: 1.16 From 2fccb822f142f24e18db9b720f2094d891a6a9f5 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 22 Mar 2022 14:15:22 -0700 Subject: [PATCH 466/998] metadata: copy slices in FromContext() functions (#5267) --- metadata/metadata.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/metadata/metadata.go b/metadata/metadata.go index 3604c7819fdc..8e0f6abe89d7 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -188,7 +188,9 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } return out, true } @@ -226,7 +228,9 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } for _, added := range raw.added { if len(added)%2 == 1 { From e49486ded2ab4ae2029e4a7358f2a85db7aff5ac Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 22 Mar 2022 14:32:53 -0700 Subject: [PATCH 467/998] test: add a test for clientConn close with pending RPC (#5264) --- test/clientconn_test.go | 89 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 test/clientconn_test.go diff --git a/test/clientconn_test.go b/test/clientconn_test.go new file mode 100644 index 000000000000..58d0d54fe581 --- /dev/null +++ b/test/clientconn_test.go @@ -0,0 +1,89 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +// TestClientConnClose_WithPendingRPC tests the scenario where the channel has +// not yet received any update from the name resolver and hence RPCs are +// blocking. The test verifies that closing the ClientConn unblocks the RPC with +// the expected error code. +func (s) TestClientConnClose_WithPendingRPC(t *testing.T) { + // Initialize channelz. Used to determine pending RPC count. + czCleanup := channelz.NewChannelzStorageForTesting() + defer czCleanupWrapper(czCleanup, t) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + client := testgrpc.NewTestServiceClient(cc) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + doneErrCh := make(chan error, 1) + go func() { + // This RPC would block until the ClientConn is closed, because the + // resolver has not provided its first update yet. + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if status.Code(err) != codes.Canceled || !strings.Contains(err.Error(), "client connection is closing") { + doneErrCh <- fmt.Errorf("EmptyCall() = %v, want %s", err, codes.Canceled) + } + doneErrCh <- nil + }() + + // Make sure that there is one pending RPC on the ClientConn before attempting + // to close it. If we don't do this, cc.Close() can happen before the above + // goroutine gets to make the RPC. + for { + if err := ctx.Err(); err != nil { + t.Fatal(err) + } + tcs, _ := channelz.GetTopChannels(0, 0) + if len(tcs) != 1 { + t.Fatalf("there should only be one top channel, not %d", len(tcs)) + } + started := tcs[0].ChannelData.CallsStarted + completed := tcs[0].ChannelData.CallsSucceeded + tcs[0].ChannelData.CallsFailed + if (started - completed) == 1 { + break + } + time.Sleep(defaultTestShortTimeout) + } + cc.Close() + if err := <-doneErrCh; err != nil { + t.Fatal(err) + } +} From 96bdede4dfd5176348876f829f39a8f6f740ee4d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 22 Mar 2022 14:33:41 -0700 Subject: [PATCH 468/998] pickfirst: cleanup tests (#5263) --- balancer_switching_test.go | 8 + pickfirst_test.go | 349 ------------------------------------- test/pickfirst_test.go | 260 +++++++++++++++++++++++++++ 3 files changed, 268 insertions(+), 349 deletions(-) delete mode 100644 pickfirst_test.go create mode 100644 test/pickfirst_test.go diff --git a/balancer_switching_test.go b/balancer_switching_test.go index 3c12dd2e4eec..3812bbdc990e 100644 --- a/balancer_switching_test.go +++ b/balancer_switching_test.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" ) var _ balancer.Builder = &magicalLB{} @@ -79,6 +80,13 @@ func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, f } } +func errorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} + func checkPickFirst(cc *ClientConn, servers []*server) error { var ( req = "port" diff --git a/pickfirst_test.go b/pickfirst_test.go deleted file mode 100644 index 445026a9a14e..000000000000 --- a/pickfirst_test.go +++ /dev/null @@ -1,349 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "math" - "sync" - "testing" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/status" -) - -func errorDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} - -func (s) TestOneBackendPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 1 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", - WithTransportCredentials(insecure.NewCredentials()), - WithResolvers(r), - WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}}}) - // The second RPC should succeed. - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) -} - -func (s) TestBackendsPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}}) - // The second RPC should succeed with the first server. - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) -} - -func (s) TestNewAddressWhileBlockingPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 1 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var wg sync.WaitGroup - for i := 0; i < 3; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // This RPC blocks until NewAddress is called. - cc.Invoke(context.Background(), "/foo/bar", &req, &reply) - }() - } - time.Sleep(50 * time.Millisecond) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}}}) - wg.Wait() -} - -func (s) TestCloseWithPendingRPCPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 1 - _, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var wg sync.WaitGroup - for i := 0; i < 3; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // This RPC blocks until NewAddress is called. - cc.Invoke(context.Background(), "/foo/bar", &req, &reply) - }() - } - time.Sleep(50 * time.Millisecond) - cc.Close() - wg.Wait() -} - -func (s) TestOneServerDownPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}}) - // The second RPC should succeed with the first server. - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - break - } - time.Sleep(time.Millisecond) - } - - servers[0].stop() - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) -} - -func (s) TestAllServersDownPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}}) - // The second RPC should succeed with the first server. - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - break - } - time.Sleep(time.Millisecond) - } - - for i := 0; i < numServers; i++ { - servers[i].stop() - } - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); status.Code(err) == codes.Unavailable { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("EmptyCall() = _, %v, want _, error with code unavailable", err) -} - -func (s) TestAddressesRemovedPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 3 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}, {Addr: servers[2].addr}}}) - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - break - } - time.Sleep(time.Millisecond) - } - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) - } - time.Sleep(10 * time.Millisecond) - } - - // Remove server[0]. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}}}) - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { - break - } - time.Sleep(time.Millisecond) - } - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) - } - time.Sleep(10 * time.Millisecond) - } - - // Append server[0], nothing should change. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}, {Addr: servers[0].addr}}}) - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) - } - time.Sleep(10 * time.Millisecond) - } - - // Remove server[1]. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[2].addr}, {Addr: servers[0].addr}}}) - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[2].port { - break - } - time.Sleep(time.Millisecond) - } - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[2].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port) - } - time.Sleep(10 * time.Millisecond) - } - - // Remove server[2]. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}}}) - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - break - } - time.Sleep(time.Millisecond) - } - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) - } - time.Sleep(10 * time.Millisecond) - } -} diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go new file mode 100644 index 000000000000..7379ce286910 --- /dev/null +++ b/test/pickfirst_test.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +const pickFirstServiceConfig = `{"loadBalancingConfig": [{"pick_first":{}}]}` + +// setupPickFirst performs steps required for pick_first tests. It starts a +// bunch of backends exporting the TestService, creates a ClientConn to them +// with service config specifying the use of the pick_first LB policy. +func setupPickFirst(t *testing.T, backendCount int, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, []*stubserver.StubServer) { + t.Helper() + r := manual.NewBuilderWithScheme("whatever") + + backends := make([]*stubserver.StubServer, backendCount) + addrs := make([]resolver.Address, backendCount) + for i := 0; i < backendCount; i++ { + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + + backends[i] = backend + addrs[i] = resolver.Address{Addr: backend.Address} + } + + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(pickFirstServiceConfig), + } + dopts = append(dopts, opts...) + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // At this point, the resolver has not returned any addresses to the channel. + // This RPC must block until the context expires. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + return cc, r, backends +} + +// checkPickFirst makes a bunch of RPCs on the given ClientConn and verifies if +// the RPCs are routed to a peer matching wantAddr. +func checkPickFirst(ctx context.Context, cc *grpc.ClientConn, wantAddr string) error { + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + // Make sure the RPC reaches the expected backend once. + for { + time.Sleep(time.Millisecond) + if ctx.Err() != nil { + return fmt.Errorf("timeout waiting for RPC to be routed to %q", wantAddr) + } + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + // Some tests remove backends and check if pick_first is happening across + // the remaining backends. In such cases, RPCs can initially fail on the + // connection using the removed backend. Just keep retrying and eventually + // the connection using the removed backend will shutdown and will be + // removed. + continue + } + if peer.Addr.String() == wantAddr { + break + } + } + // Make sure subsequent RPCs are all routed to the same backend. + for i := 0; i < 10; i++ { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + return fmt.Errorf("EmptyCall() = %v, want ", err) + } + if gotAddr := peer.Addr.String(); gotAddr != wantAddr { + return fmt.Errorf("rpc sent to peer %q, want peer %q", gotAddr, wantAddr) + } + } + return nil +} + +// backendsToAddrs is a helper routine to convert from a set of backends to +// resolver addresses. Useful when pushing addresses to the manual resolver. +func backendsToAddrs(backends []*stubserver.StubServer) []resolver.Address { + addrs := make([]resolver.Address, len(backends)) + for i, backend := range backends { + addrs[i] = resolver.Address{Addr: backend.Address} + } + return addrs +} + +// TestPickFirst_OneBackend tests the most basic scenario for pick_first. It +// brings up a single backend and verifies that all RPCs get routed to it. +func (s) TestPickFirst_OneBackend(t *testing.T) { + cc, r, backends := setupPickFirst(t, 1) + + addrs := backendsToAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } +} + +// TestPickFirst_MultipleBackends tests the scenario with multiple backends and +// verifies that all RPCs get routed to the first one. +func (s) TestPickFirst_MultipleBackends(t *testing.T) { + cc, r, backends := setupPickFirst(t, 2) + + addrs := backendsToAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } +} + +// TestPickFirst_OneServerDown tests the scenario where we have multiple +// backends and pick_first is working as expected. Verifies that RPCs get routed +// to the next backend in the list when the first one goes down. +func (s) TestPickFirst_OneServerDown(t *testing.T) { + cc, r, backends := setupPickFirst(t, 2) + + addrs := backendsToAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } + + // Stop the backend which is currently being used. RPCs should get routed to + // the next backend in the list. + backends[0].Stop() + if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + t.Fatal(err) + } +} + +// TestPickFirst_AllServersDown tests the scenario where we have multiple +// backends and pick_first is working as expected. When all backends go down, +// the test verifies that RPCs fail with appropriate status code. +func (s) TestPickFirst_AllServersDown(t *testing.T) { + cc, r, backends := setupPickFirst(t, 2) + + addrs := backendsToAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } + + for _, b := range backends { + b.Stop() + } + + client := testgrpc.NewTestServiceClient(cc) + for { + if ctx.Err() != nil { + t.Fatalf("channel failed to move to Unavailable after all backends were stopped: %v", ctx.Err()) + } + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) == codes.Unavailable { + return + } + time.Sleep(defaultTestShortTimeout) + } +} + +// TestPickFirst_AddressesRemoved tests the scenario where we have multiple +// backends and pick_first is working as expected. It then verifies that when +// addresses are removed by the name resolver, RPCs get routed appropriately. +func (s) TestPickFirst_AddressesRemoved(t *testing.T) { + cc, r, backends := setupPickFirst(t, 3) + + addrs := backendsToAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } + + // Remove the first backend from the list of addresses originally pushed. + // RPCs should get routed to the first backend in the new list. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[1], addrs[2]}}) + if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + t.Fatal(err) + } + + // Append the backend that we just removed to the end of the list. + // Nothing should change. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[1], addrs[2], addrs[0]}}) + if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + t.Fatal(err) + } + + // Remove the first backend from the existing list of addresses. + // RPCs should get routed to the first backend in the new list. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[2], addrs[0]}}) + if err := checkPickFirst(ctx, cc, addrs[2].Addr); err != nil { + t.Fatal(err) + } + + // Remove the first backend from the existing list of addresses. + // RPCs should get routed to the first backend in the new list. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0]}}) + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } +} From 34660d102927805627609272ca420d301cd71b0d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 22 Mar 2022 14:37:12 -0700 Subject: [PATCH 469/998] test: use `t.Fatal` instead of `panic` for service config parsing errors (#5266) --- test/channelz_test.go | 27 +++++++++++++++++++------ test/config_selector_test.go | 2 +- test/end2end_test.go | 27 +++++++++---------------- test/healthcheck_test.go | 24 +++++++++++------------ test/parse_config.go | 38 ++++++++++++++++++++++++++++++++++++ 5 files changed, 81 insertions(+), 37 deletions(-) create mode 100644 test/parse_config.go diff --git a/test/channelz_test.go b/test/channelz_test.go index ecf5412a1f1a..ed8569bcd1a1 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -231,7 +231,10 @@ func (s) TestCZNestedChannelRegistrationAndDeletion(t *testing.T) { t.Fatal(err) } - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) // wait for the shutdown of grpclb balancer if err := verifyResultWithDelay(func() (bool, error) { @@ -1443,7 +1446,10 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { t.Fatal(err) } - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) // wait for the shutdown of grpclb balancer if err := verifyResultWithDelay(func() (bool, error) { @@ -1603,7 +1609,10 @@ func (s) TestCZChannelAddressResolutionChange(t *testing.T) { }); err != nil { t.Fatal(err) } - r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) if err := verifyResultWithDelay(func() (bool, error) { cm := channelz.GetChannel(cid) @@ -1620,7 +1629,7 @@ func (s) TestCZChannelAddressResolutionChange(t *testing.T) { t.Fatal(err) } - newSC := parseCfg(r, `{ + newSC := parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1937,7 +1946,10 @@ func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { t.Fatal(err) } - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) // wait for the shutdown of grpclb balancer if err := verifyResultWithDelay(func() (bool, error) { @@ -1955,7 +1967,10 @@ func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { // If nested channel deletion is last trace event before the next validation, it will fail, as the top channel will hold a reference to it. // This line forces a trace event on the top channel in that case. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) // verify that the nested channel no longer exist due to trace referencing it got overwritten. if err := verifyResultWithDelay(func() (bool, error) { diff --git a/test/config_selector_test.go b/test/config_selector_test.go index 6b0bb2e1ed8b..677a24839a1d 100644 --- a/test/config_selector_test.go +++ b/test/config_selector_test.go @@ -145,7 +145,7 @@ func (s) TestConfigSelector(t *testing.T) { var gotInfo *iresolver.RPCInfo state := iresolver.SetConfigSelector(resolver.State{ Addresses: []resolver.Address{{Addr: ss.Address}}, - ServiceConfig: parseCfg(ss.R, "{}"), + ServiceConfig: parseServiceConfig(t, ss.R, "{}"), }, funcConfigSelector{ f: func(i iresolver.RPCInfo) (*iresolver.RPCConfig, error) { gotInfo = &i diff --git a/test/end2end_test.go b/test/end2end_test.go index f5ca011a39cc..5fa36b1a2b11 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -68,7 +68,6 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -1574,7 +1573,7 @@ func (s) TestGetMethodConfig(t *testing.T) { addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1615,7 +1614,7 @@ func (s) TestGetMethodConfig(t *testing.T) { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } - r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{ + r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1662,7 +1661,7 @@ func (s) TestServiceConfigWaitForReady(t *testing.T) { addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1705,7 +1704,7 @@ func (s) TestServiceConfigWaitForReady(t *testing.T) { // Case2:Client API set failfast to be false, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. r.UpdateState(resolver.State{ Addresses: addrs, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1751,7 +1750,7 @@ func (s) TestServiceConfigTimeout(t *testing.T) { addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1798,7 +1797,7 @@ func (s) TestServiceConfigTimeout(t *testing.T) { // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. r.UpdateState(resolver.State{ Addresses: addrs, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1869,7 +1868,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { cc1 := te1.clientConn(grpc.WithResolvers(r)) addrs := []resolver.Address{{Addr: te1.srvAddr}} - sc := parseCfg(r, `{ + sc := parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -2108,7 +2107,7 @@ func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) { r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: te.srvAddr}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -7186,7 +7185,7 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { time.Sleep(time.Second) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: te.srvAddr}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -7501,14 +7500,6 @@ func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string } } -func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { - g := r.CC.ParseServiceConfig(s) - if g.Err != nil { - panic(fmt.Sprintf("Error parsing config %q: %v", s, g.Err)) - } - return g -} - func (s) TestClientCancellationPropagatesUnary(t *testing.T) { wg := &sync.WaitGroup{} called, done := make(chan struct{}), make(chan struct{}) diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index abff2f56c438..0dba4d7f495f 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -201,7 +201,7 @@ func (s) TestHealthCheckWatchStateChange(t *testing.T) { cc, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" }, @@ -267,7 +267,7 @@ func (s) TestHealthCheckHealthServerNotRegistered(t *testing.T) { cc, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" }, @@ -298,7 +298,7 @@ func (s) TestHealthCheckWithGoAway(t *testing.T) { tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" }, @@ -376,7 +376,7 @@ func (s) TestHealthCheckWithConnClose(t *testing.T) { tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" }, @@ -424,7 +424,7 @@ func (s) TestHealthCheckWithAddrConnDrain(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) tc := testpb.NewTestServiceClient(cc) - sc := parseCfg(r, `{ + sc := parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" }, @@ -506,7 +506,7 @@ func (s) TestHealthCheckWithClientConnClose(t *testing.T) { tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" }, @@ -573,7 +573,7 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalledAddrConnShutDown(t *tes // The serviceName "delay" is specially handled at server side, where response will not be sent // back to client immediately upon receiving the request (client should receive no response until // test ends). - sc := parseCfg(r, `{ + sc := parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "delay" }, @@ -638,7 +638,7 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalled(t *testing.T) { // test ends). r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "delay" }, @@ -679,7 +679,7 @@ func testHealthCheckDisableWithDialOption(t *testing.T, addr string) { tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" }, @@ -713,7 +713,7 @@ func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" }, @@ -787,7 +787,7 @@ func (s) TestHealthCheckChannelzCountingCallSuccess(t *testing.T) { _, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "channelzSuccess" }, @@ -834,7 +834,7 @@ func (s) TestHealthCheckChannelzCountingCallFailure(t *testing.T) { _, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "channelzFailure" }, diff --git a/test/parse_config.go b/test/parse_config.go new file mode 100644 index 000000000000..f375a3aa8a18 --- /dev/null +++ b/test/parse_config.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "testing" + + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" +) + +// parseServiceConfig is a test helper which uses the manual resolver to parse +// the given service config. It calls t.Fatal() if service config parsing fails. +func parseServiceConfig(t *testing.T, r *manual.Resolver, sc string) *serviceconfig.ParseResult { + t.Helper() + + scpr := r.CC.ParseServiceConfig(sc) + if scpr.Err != nil { + t.Fatalf("Failed to parse service config %q: %v", sc, scpr.Err) + } + return scpr +} From 3a74cd52c862325614b7456e1cb22be1ae4d4b8c Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 22 Mar 2022 18:58:13 -0400 Subject: [PATCH 470/998] balancergroup: Add trigger point to gracefully switch a child (#5251) * balancergroup: Add trigger point to gracefully switch a child --- internal/balancergroup/balancergroup.go | 31 ++++ internal/balancergroup/balancergroup_test.go | 142 +++++++++++++++++++ 2 files changed, 173 insertions(+) diff --git a/internal/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go index 4f089fdf27e3..3daad14473ee 100644 --- a/internal/balancergroup/balancergroup.go +++ b/internal/balancergroup/balancergroup.go @@ -160,6 +160,20 @@ func (sbc *subBalancerWrapper) resolverError(err error) { b.ResolverError(err) } +func (sbc *subBalancerWrapper) gracefulSwitch(builder balancer.Builder) { + sbc.builder = builder + b := sbc.balancer + // Even if you get an add and it persists builder but doesn't start + // balancer, this would leave graceful switch being nil, in which we are + // correctly overwriting with the recent builder here as well to use later. + // The graceful switch balancer's presence is an invariant of whether the + // balancer group is closed or not (if closed, nil, if started, present). + if sbc.balancer != nil { + sbc.group.logger.Infof("Switching child policy %v to type %v", sbc.id, sbc.builder.Name()) + b.SwitchTo(sbc.builder) + } +} + func (sbc *subBalancerWrapper) stopBalancer() { sbc.balancer.Close() sbc.balancer = nil @@ -332,6 +346,23 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { bg.outgoingMu.Unlock() } +// UpdateBuilder updates the builder for a current child, starting the Graceful +// Switch process for that child. +func (bg *BalancerGroup) UpdateBuilder(id string, builder balancer.Builder) { + bg.outgoingMu.Lock() + // This does not deal with the balancer cache because this call should come + // after an Add call for a given child balancer. If the child is removed, + // the caller will call Add if the child balancer comes back which would + // then deal with the balancer cache. + sbc := bg.idToBalancerConfig[id] + if sbc == nil { + // simply ignore it if not present, don't error + return + } + sbc.gracefulSwitch(builder) + bg.outgoingMu.Unlock() +} + // Remove removes the balancer with id from the group. // // But doesn't close the balancer. The balancer is kept in a cache, and will be diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index d8a5a1c19b86..d962faa0ab82 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -17,11 +17,13 @@ package balancergroup import ( + "context" "fmt" "testing" "time" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedtarget/weightedaggregator" @@ -34,6 +36,11 @@ import ( "google.golang.org/grpc/resolver" ) +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + var ( rrBuilder = balancer.Get(roundrobin.Name) testBalancerIDs = []string{"b1", "b2", "b3"} @@ -534,3 +541,138 @@ func (s) TestBalancerExitIdleOne(t *testing.T) { case <-exitIdleCh: } } + +// TestBalancerGracefulSwitch tests the graceful switch functionality for a +// child of the balancer group. At first, the child is configured as a round +// robin load balancer, and thus should behave accordingly. The test then +// gracefully switches this child to a custom type which only creates a SubConn +// for the second passed in address and also only picks that created SubConn. +// The new aggregated picker should reflect this change for the child. +func (s) TestBalancerGracefulSwitch(t *testing.T) { + cc := testutils.NewTestClientConn(t) + gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) + gator.Start() + bg := New(cc, balancer.BuildOptions{}, gator, nil) + gator.Add(testBalancerIDs[0], 1) + bg.Add(testBalancerIDs[0], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + + bg.Start() + + m1 := make(map[resolver.Address]balancer.SubConn) + scs := make(map[balancer.SubConn]bool) + for i := 0; i < 2; i++ { + addrs := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + m1[addrs[0]] = sc + scs[sc] = true + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + + p1 := <-cc.NewPickerCh + want := []balancer.SubConn{ + m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], + } + if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // The balancer type for testBalancersIDs[0] is currently Round Robin. Now, + // change it to a balancer that has separate behavior logically (creating + // SubConn for second address in address list and always picking that + // SubConn), and see if the downstream behavior reflects that change. + bg.UpdateBuilder(testBalancerIDs[0], wrappedPickFirstBalancerBuilder{}) + if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}); err != nil { + t.Fatalf("error updating ClientConn state: %v", err) + } + + addrs := <-cc.NewSubConnAddrsCh + if addrs[0].Addr != testBackendAddrs[3].Addr { + // Verifies forwarded to new created balancer, as the wrapped pick first + // balancer will delete first address. + t.Fatalf("newSubConn called with wrong address, want: %v, got : %v", testBackendAddrs[3].Addr, addrs[0].Addr) + } + sc := <-cc.NewSubConnCh + + // Update the pick first balancers SubConn as CONNECTING. This will cause + // the pick first balancer to UpdateState() with CONNECTING, which shouldn't send + // a Picker update back, as the Graceful Switch process is not complete. + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-cc.NewPickerCh: + t.Fatalf("No new picker should have been sent due to the Graceful Switch process not completing") + case <-ctx.Done(): + } + + // Update the pick first balancers SubConn as READY. This will cause + // the pick first balancer to UpdateState() with READY, which should send a + // Picker update back, as the Graceful Switch process is complete. This + // Picker should always pick the pick first's created SubConn which + // corresponds to address 3. + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p2 := <-cc.NewPickerCh + pr, err := p2.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("error picking: %v", err) + } + if pr.SubConn != sc { + t.Fatalf("picker.Pick(), want %v, got %v", sc, pr.SubConn) + } + + // The Graceful Switch process completing for the child should cause the + // SubConns for the balancer being gracefully switched from to get deleted. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for i := 0; i < 2; i++ { + select { + case <-ctx.Done(): + t.Fatalf("error waiting for RemoveSubConn()") + case sc := <-cc.RemoveSubConnCh: + // The SubConn removed should have been one of the two created + // SubConns, and both should be deleted. + if ok := scs[sc]; ok { + delete(scs, sc) + continue + } else { + t.Fatalf("RemoveSubConn called for wrong SubConn %v, want in %v", sc, scs) + } + } + } +} + +type wrappedPickFirstBalancerBuilder struct{} + +func (wrappedPickFirstBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(grpc.PickFirstBalancerName) + wpfb := &wrappedPickFirstBalancer{ + ClientConn: cc, + } + pf := builder.Build(wpfb, opts) + wpfb.Balancer = pf + return wpfb +} + +func (wrappedPickFirstBalancerBuilder) Name() string { + return "wrappedPickFirstBalancer" +} + +type wrappedPickFirstBalancer struct { + balancer.Balancer + balancer.ClientConn +} + +func (wb *wrappedPickFirstBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + s.ResolverState.Addresses = s.ResolverState.Addresses[1:] + return wb.Balancer.UpdateClientConnState(s) +} + +func (wb *wrappedPickFirstBalancer) UpdateState(state balancer.State) { + // Eat it if IDLE - allows it to switch over only on a READY SubConn. + if state.ConnectivityState == connectivity.Idle { + return + } + wb.ClientConn.UpdateState(state) +} From e63e1230fd01bc4390afdeb27a42c8e631ee9026 Mon Sep 17 00:00:00 2001 From: ZhenLian Date: Fri, 25 Mar 2022 09:58:06 -0700 Subject: [PATCH 471/998] advancedtls: remove the usage of CDP in CRL enforcement (#5218) --- security/advancedtls/crl.go | 49 +++++++++++++------------------- security/advancedtls/crl_test.go | 4 +-- 2 files changed, 22 insertions(+), 31 deletions(-) diff --git a/security/advancedtls/crl.go b/security/advancedtls/crl.go index 7988cb27176d..b54c1c571e6a 100644 --- a/security/advancedtls/crl.go +++ b/security/advancedtls/crl.go @@ -229,20 +229,20 @@ func cachedCrl(rawIssuer []byte, cache Cache) (*certificateListExt, bool) { } // fetchIssuerCRL fetches and verifies the CRL for rawIssuer from disk or cache if configured in cfg. -func fetchIssuerCRL(crlDistributionPoint string, rawIssuer []byte, crlVerifyCrt []*x509.Certificate, cfg RevocationConfig) (*certificateListExt, error) { +func fetchIssuerCRL(rawIssuer []byte, crlVerifyCrt []*x509.Certificate, cfg RevocationConfig) (*certificateListExt, error) { if cfg.Cache != nil { if crl, ok := cachedCrl(rawIssuer, cfg.Cache); ok { return crl, nil } } - crl, err := fetchCRL(crlDistributionPoint, rawIssuer, cfg) + crl, err := fetchCRL(rawIssuer, cfg) if err != nil { - return nil, fmt.Errorf("fetchCRL(%v) failed err = %v", crlDistributionPoint, err) + return nil, fmt.Errorf("fetchCRL() failed err = %v", err) } if err := verifyCRL(crl, rawIssuer, crlVerifyCrt); err != nil { - return nil, fmt.Errorf("verifyCRL(%v) failed err = %v", crlDistributionPoint, err) + return nil, fmt.Errorf("verifyCRL() failed err = %v", err) } if cfg.Cache != nil { cfg.Cache.Add(hex.EncodeToString(rawIssuer), crl) @@ -251,36 +251,27 @@ func fetchIssuerCRL(crlDistributionPoint string, rawIssuer []byte, crlVerifyCrt } // checkCert checks a single certificate against the CRL defined in the certificate. -// It will fetch and verify the CRL(s) defined by CRLDistributionPoints. +// It will fetch and verify the CRL(s) defined in the root directory specified by cfg. // If we can't load any authoritative CRL files, the status is RevocationUndetermined. // c is the certificate to check. // crlVerifyCrt is the group of possible certificates to verify the crl. func checkCert(c *x509.Certificate, crlVerifyCrt []*x509.Certificate, cfg RevocationConfig) RevocationStatus { - if len(c.CRLDistributionPoints) == 0 { - return RevocationUnrevoked + crl, err := fetchIssuerCRL(c.RawIssuer, crlVerifyCrt, cfg) + if err != nil { + // We couldn't load any CRL files for the certificate, so we don't know if it's RevocationUnrevoked or not. + grpclogLogger.Warningf("getIssuerCRL(%v) err = %v", c.Issuer, err) + return RevocationUndetermined } - // Iterate through CRL distribution points to check for status - for _, dp := range c.CRLDistributionPoints { - crl, err := fetchIssuerCRL(dp, c.RawIssuer, crlVerifyCrt, cfg) - if err != nil { - grpclogLogger.Warningf("getIssuerCRL(%v) err = %v", c.Issuer, err) - continue - } - revocation, err := checkCertRevocation(c, crl) - if err != nil { - grpclogLogger.Warningf("checkCertRevocation(CRL %v) failed %v", crl.CertList.TBSCertList.Issuer, err) - // We couldn't check the CRL file for some reason, so continue - // to the next file - continue - } - // Here we've gotten a CRL that loads and verifies. - // We only handle all-reasons CRL files, so this file - // is authoritative for the certificate. - return revocation - + revocation, err := checkCertRevocation(c, crl) + if err != nil { + grpclogLogger.Warningf("checkCertRevocation(CRL %v) failed %v", crl.CertList.TBSCertList.Issuer, err) + // We couldn't check the CRL file for some reason, so we don't know if it's RevocationUnrevoked or not. + return RevocationUndetermined } - // We couldn't load any CRL files for the certificate, so we don't know if it's RevocationUnrevoked or not. - return RevocationUndetermined + // Here we've gotten a CRL that loads and verifies. + // We only handle all-reasons CRL files, so this file + // is authoritative for the certificate. + return revocation } func checkCertRevocation(c *x509.Certificate, crl *certificateListExt) (RevocationStatus, error) { @@ -430,7 +421,7 @@ func parseCRLExtensions(c *pkix.CertificateList) (*certificateListExt, error) { return certList, nil } -func fetchCRL(loc string, rawIssuer []byte, cfg RevocationConfig) (*certificateListExt, error) { +func fetchCRL(rawIssuer []byte, cfg RevocationConfig) (*certificateListExt, error) { var parsedCRL *certificateListExt // 6.3.3 (a) (1) (ii) // According to X509_LOOKUP_hash_dir the format is issuer_hash.rN where N is an increasing number. diff --git a/security/advancedtls/crl_test.go b/security/advancedtls/crl_test.go index ef3eb85da1ef..1f026b1d2885 100644 --- a/security/advancedtls/crl_test.go +++ b/security/advancedtls/crl_test.go @@ -441,7 +441,7 @@ func TestGetIssuerCRLCache(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { cache.Purge() - _, err := fetchIssuerCRL("test", tt.rawIssuer, tt.certs, RevocationConfig{ + _, err := fetchIssuerCRL(tt.rawIssuer, tt.certs, RevocationConfig{ RootDir: testdata.Path("."), Cache: cache, }) @@ -726,7 +726,7 @@ func TestIssuerNonPrintableString(t *testing.T) { if err != nil { t.Fatalf("failed to decode issuer: %s", err) } - _, err = fetchCRL("", rawIssuer, RevocationConfig{RootDir: testdata.Path("crl")}) + _, err = fetchCRL(rawIssuer, RevocationConfig{RootDir: testdata.Path("crl")}) if err != nil { t.Fatalf("fetchCRL failed: %s", err) } From 562e12f07b7faa6a330bf48db1b0036b06cecca5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 28 Mar 2022 10:47:08 -0700 Subject: [PATCH 472/998] test: use channelz instead of stats handler to determine RPC count (#5275) --- test/roundrobin_test.go | 94 +++++++++++------------------------------ 1 file changed, 25 insertions(+), 69 deletions(-) diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go index 557a47f77443..a7b94197ffd5 100644 --- a/test/roundrobin_test.go +++ b/test/roundrobin_test.go @@ -29,14 +29,13 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" @@ -44,47 +43,6 @@ import ( const rrServiceConfig = `{"loadBalancingConfig": [{"round_robin":{}}]}` -func statsHandlerDialOption(funcs statsHandlerFuncs) grpc.DialOption { - return grpc.WithStatsHandler(&statsHandler{funcs: funcs}) -} - -type statsHandlerFuncs struct { - TagRPC func(context.Context, *stats.RPCTagInfo) context.Context - HandleRPC func(context.Context, stats.RPCStats) - TagConn func(context.Context, *stats.ConnTagInfo) context.Context - HandleConn func(context.Context, stats.ConnStats) -} - -type statsHandler struct { - funcs statsHandlerFuncs -} - -func (s *statsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - if s.funcs.TagRPC != nil { - return s.funcs.TagRPC(ctx, info) - } - return ctx -} - -func (s *statsHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) { - if s.funcs.HandleRPC != nil { - s.funcs.HandleRPC(ctx, stats) - } -} - -func (s *statsHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { - if s.funcs.TagConn != nil { - return s.funcs.TagConn(ctx, info) - } - return ctx -} - -func (s *statsHandler) HandleConn(ctx context.Context, stats stats.ConnStats) { - if s.funcs.HandleConn != nil { - s.funcs.HandleConn(ctx, stats) - } -} - func checkRoundRobin(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { var peer peer.Peer // Make sure connections to all backends are up. @@ -122,6 +80,11 @@ func checkRoundRobin(ctx context.Context, client testgrpc.TestServiceClient, add func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, []*stubserver.StubServer) { t.Helper() + + // Initialize channelz. Used to determine pending RPC count. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + r := manual.NewBuilderWithScheme("whatever") const backendCount = 5 @@ -210,29 +173,9 @@ func (s) TestRoundRobin_AddressesRemoved(t *testing.T) { // blocked because there are no valid backends. This test verifies that when new // backends are added, the RPC is able to complete. func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { - // Register a stats handler which writes to `rpcCh` when an RPC is started. - // The stats handler starts writing to `rpcCh` only after `begin` has fired. - // We are not interested in being notified about initial RPCs which ensure - // that round_robin is working as expected. We are only interested in being - // notified when we have an RPC which is blocked because there are no - // backends, and will become unblocked when the resolver reports new backends. - begin := grpcsync.NewEvent() - rpcCh := make(chan struct{}, 1) - shOption := statsHandlerDialOption(statsHandlerFuncs{ - HandleRPC: func(ctx context.Context, rpcStats stats.RPCStats) { - if !begin.HasFired() { - return - } - select { - case rpcCh <- struct{}{}: - default: - } - }, - }) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, r, backends := testRoundRobinBasic(ctx, t, shOption) + cc, r, backends := testRoundRobinBasic(ctx, t) // Send a resolver update with no addresses. This should push the channel into // TransientFailure. @@ -243,7 +186,6 @@ func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { } } - begin.Fire() client := testpb.NewTestServiceClient(cc) doneCh := make(chan struct{}) go func() { @@ -256,11 +198,25 @@ func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { close(doneCh) }() - select { - case <-ctx.Done(): - t.Fatal("Timeout when waiting for RPC to start and block") - case <-rpcCh: + // Make sure that there is one pending RPC on the ClientConn before attempting + // to push new addresses through the name resolver. If we don't do this, the + // resolver update can happen before the above goroutine gets to make the RPC. + for { + if err := ctx.Err(); err != nil { + t.Fatal(err) + } + tcs, _ := channelz.GetTopChannels(0, 0) + if len(tcs) != 1 { + t.Fatalf("there should only be one top channel, not %d", len(tcs)) + } + started := tcs[0].ChannelData.CallsStarted + completed := tcs[0].ChannelData.CallsSucceeded + tcs[0].ChannelData.CallsFailed + if (started - completed) == 1 { + break + } + time.Sleep(defaultTestShortTimeout) } + // Send a resolver update with a valid backend to push the channel to Ready // and unblock the above RPC. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backends[0].Address}}}) From c57d2b133cbee696909eb90cca04c8236c90b9fa Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 29 Mar 2022 14:04:20 -0700 Subject: [PATCH 473/998] clientconn: fix target parsing tests to be tolerant to new url.URL field OmitHost (#5279) --- clientconn_parsed_target_test.go | 113 +++++++++++++++++++------------ 1 file changed, 69 insertions(+), 44 deletions(-) diff --git a/clientconn_parsed_target_test.go b/clientconn_parsed_target_test.go index 71ffb69edab4..0993998efc9d 100644 --- a/clientconn_parsed_target_test.go +++ b/clientconn_parsed_target_test.go @@ -36,54 +36,65 @@ func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { defScheme := resolver.GetDefaultScheme() tests := []struct { target string + badScheme bool wantParsed resolver.Target }{ // No scheme is specified. - {target: "", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "", URL: url.URL{Scheme: defScheme, Path: "/"}}}, - {target: "://", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://", URL: url.URL{Scheme: defScheme, Path: "/://"}}}, - {target: ":///", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///", URL: url.URL{Scheme: defScheme, Path: "/:///"}}}, - {target: "://a/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/", URL: url.URL{Scheme: defScheme, Path: "/://a/"}}}, - {target: ":///a", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///a", URL: url.URL{Scheme: defScheme, Path: "/:///a"}}}, - {target: "://a/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/b", URL: url.URL{Scheme: defScheme, Path: "/://a/b"}}}, - {target: "/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/", URL: url.URL{Scheme: defScheme, Path: "//"}}}, - {target: "a/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a/b", URL: url.URL{Scheme: defScheme, Path: "/a/b"}}}, - {target: "a//b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a//b", URL: url.URL{Scheme: defScheme, Path: "/a//b"}}}, - {target: "google.com", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com", URL: url.URL{Scheme: defScheme, Path: "/google.com"}}}, - {target: "google.com/?a=b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com/", URL: url.URL{Scheme: defScheme, Path: "/google.com/", RawQuery: "a=b"}}}, - {target: "/unix/socket/address", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address", URL: url.URL{Scheme: defScheme, Path: "//unix/socket/address"}}}, + {target: "", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ""}}, + {target: "://", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://"}}, + {target: ":///", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///"}}, + {target: "://a/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/"}}, + {target: ":///a", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///a"}}, + {target: "://a/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/b"}}, + {target: "/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/"}}, + {target: "a/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a/b"}}, + {target: "a//b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a//b"}}, + {target: "google.com", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com"}}, + {target: "google.com/?a=b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com/"}}, + {target: "/unix/socket/address", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address"}}, // An unregistered scheme is specified. - {target: "a:///", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///", URL: url.URL{Scheme: defScheme, Path: "/a:///"}}}, - {target: "a://b/", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/", URL: url.URL{Scheme: defScheme, Path: "/a://b/"}}}, - {target: "a:///b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///b", URL: url.URL{Scheme: defScheme, Path: "/a:///b"}}}, - {target: "a://b/c", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/c", URL: url.URL{Scheme: defScheme, Path: "/a://b/c"}}}, - {target: "a:b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:b", URL: url.URL{Scheme: defScheme, Path: "/a:b"}}}, - {target: "a:/b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:/b", URL: url.URL{Scheme: defScheme, Path: "/a:/b"}}}, - {target: "a://b", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b", URL: url.URL{Scheme: defScheme, Path: "/a://b"}}}, + {target: "a:///", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///"}}, + {target: "a://b/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/"}}, + {target: "a:///b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///b"}}, + {target: "a://b/c", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/c"}}, + {target: "a:b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:b"}}, + {target: "a:/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:/b"}}, + {target: "a://b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b"}}, // A registered scheme is specified. - {target: "dns:///google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "google.com", URL: url.URL{Scheme: "dns", Path: "/google.com"}}}, - {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com", URL: url.URL{Scheme: "dns", Host: "a.server.com", Path: "/google.com"}}}, - {target: "dns://a.server.com/google.com/?a=b", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/", URL: url.URL{Scheme: "dns", Host: "a.server.com", Path: "/google.com/", RawQuery: "a=b"}}}, - {target: "unix:///a/b/c", wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix", Path: "/a/b/c"}}}, - {target: "unix-abstract:a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "a/b/c"}}}, - {target: "unix-abstract:a b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a b", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "a b"}}}, - {target: "unix-abstract:a:b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a:b", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "a:b"}}}, - {target: "unix-abstract:a-b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a-b", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "a-b"}}}, - {target: "unix-abstract:/ a///://::!@#$%25^&*()b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: " a///://::!@", URL: url.URL{Scheme: "unix-abstract", Path: "/ a///://::!@", RawPath: "/ a///://::!@", Fragment: "$%^&*()b", RawFragment: "$%25^&*()b"}}}, - {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "passthrough:abc", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "passthrough:abc"}}}, - {target: "unix-abstract:unix:///abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "unix:///abc", URL: url.URL{Scheme: "unix-abstract", Path: "", Opaque: "unix:///abc"}}}, - {target: "unix-abstract:///a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix-abstract", Path: "/a/b/c"}}}, - {target: "unix-abstract:///", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "", URL: url.URL{Scheme: "unix-abstract", Path: "/"}}}, - {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{Scheme: "passthrough", Authority: "", Endpoint: "unix:///a/b/c", URL: url.URL{Scheme: "passthrough", Path: "/unix:///a/b/c"}}}, + {target: "dns:///google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "google.com"}}, + {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}}, + {target: "dns://a.server.com/google.com/?a=b", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/"}}, + {target: "unix:///a/b/c", wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}}, + {target: "unix-abstract:a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c"}}, + {target: "unix-abstract:a b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a b"}}, + {target: "unix-abstract:a:b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a:b"}}, + {target: "unix-abstract:a-b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a-b"}}, + {target: "unix-abstract:/ a///://::!@#$%25^&*()b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: " a///://::!@"}}, + {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "passthrough:abc"}}, + {target: "unix-abstract:unix:///abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "unix:///abc"}}, + {target: "unix-abstract:///a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c"}}, + {target: "unix-abstract:///", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: ""}}, + {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{Scheme: "passthrough", Authority: "", Endpoint: "unix:///a/b/c"}}, // Cases for `scheme:absolute-path`. - {target: "dns:/a/b/c", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "dns", Path: "/a/b/c"}}}, - {target: "unregistered:/a/b/c", wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unregistered:/a/b/c", URL: url.URL{Scheme: defScheme, Path: "/unregistered:/a/b/c"}}}, + {target: "dns:/a/b/c", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "a/b/c"}}, + {target: "unregistered:/a/b/c", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unregistered:/a/b/c"}}, } for _, test := range tests { t.Run(test.target, func(t *testing.T) { + target := test.target + if test.badScheme { + target = defScheme + ":///" + target + } + url, err := url.Parse(target) + if err != nil { + t.Fatalf("Unexpected error parsing URL: %v", err) + } + test.wantParsed.URL = *url + cc, err := Dial(test.target, WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Dial(%q) failed: %v", test.target, err) @@ -119,6 +130,7 @@ func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { defScheme := resolver.GetDefaultScheme() tests := []struct { target string + badScheme bool wantParsed resolver.Target wantDialerAddress string }{ @@ -126,53 +138,66 @@ func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { // different behaviors with a custom dialer. { target: "unix:a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix", Opaque: "a/b/c"}}, + wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}, wantDialerAddress: "unix:a/b/c", }, { target: "unix:/a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix", Path: "/a/b/c"}}, + wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}, wantDialerAddress: "unix:///a/b/c", }, { target: "unix:///a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c", URL: url.URL{Scheme: "unix", Path: "/a/b/c"}}, + wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}, wantDialerAddress: "unix:///a/b/c", }, { target: "dns:///127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "127.0.0.1:50051", URL: url.URL{Scheme: "dns", Path: "/127.0.0.1:50051"}}, + wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "127.0.0.1:50051"}, wantDialerAddress: "127.0.0.1:50051", }, { target: ":///127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///127.0.0.1:50051", URL: url.URL{Scheme: defScheme, Path: "/:///127.0.0.1:50051"}}, + badScheme: true, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///127.0.0.1:50051"}, wantDialerAddress: ":///127.0.0.1:50051", }, { target: "dns://authority/127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: "dns", Authority: "authority", Endpoint: "127.0.0.1:50051", URL: url.URL{Scheme: "dns", Host: "authority", Path: "/127.0.0.1:50051"}}, + wantParsed: resolver.Target{Scheme: "dns", Authority: "authority", Endpoint: "127.0.0.1:50051"}, wantDialerAddress: "127.0.0.1:50051", }, { target: "://authority/127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://authority/127.0.0.1:50051", URL: url.URL{Scheme: defScheme, Path: "/://authority/127.0.0.1:50051"}}, + badScheme: true, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://authority/127.0.0.1:50051"}, wantDialerAddress: "://authority/127.0.0.1:50051", }, { target: "/unix/socket/address", - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address", URL: url.URL{Scheme: defScheme, Path: "//unix/socket/address"}}, + badScheme: true, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address"}, wantDialerAddress: "/unix/socket/address", }, { target: "passthrough://a.server.com/google.com", - wantParsed: resolver.Target{Scheme: "passthrough", Authority: "a.server.com", Endpoint: "google.com", URL: url.URL{Scheme: "passthrough", Host: "a.server.com", Path: "/google.com"}}, + wantParsed: resolver.Target{Scheme: "passthrough", Authority: "a.server.com", Endpoint: "google.com"}, wantDialerAddress: "google.com", }, } for _, test := range tests { t.Run(test.target, func(t *testing.T) { + target := test.target + if test.badScheme { + target = defScheme + ":///" + target + } + url, err := url.Parse(target) + if err != nil { + t.Fatalf("Unexpected error parsing URL: %v", err) + } + test.wantParsed.URL = *url + addrCh := make(chan string, 1) dialer := func(ctx context.Context, address string) (net.Conn, error) { addrCh <- address From 474948a978aad4f30667632290f5d2ec4c2d000d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 29 Mar 2022 14:28:47 -0700 Subject: [PATCH 474/998] github: run all testing jobs to completion even if one fails (#5282) --- .github/workflows/testing.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index ac2bcdfae75a..cef842601f3b 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -40,6 +40,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 20 strategy: + fail-fast: false matrix: include: - type: vet+tests From b6873c006da794d53fbee52aa463e8c21e9fc958 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 29 Mar 2022 15:06:28 -0700 Subject: [PATCH 475/998] grpc: move to `TransientFailure` in `pick_first` LB policy when all addresses are removed (#5274) --- picker_wrapper_test.go | 4 +- pickfirst.go | 126 +++++++++++++++++++++++++---------------- test/balancer_test.go | 82 --------------------------- test/pickfirst_test.go | 74 ++++++++++++++++++++++++ 4 files changed, 153 insertions(+), 133 deletions(-) diff --git a/picker_wrapper_test.go b/picker_wrapper_test.go index 5f786b28580e..a4fae85d3975 100644 --- a/picker_wrapper_test.go +++ b/picker_wrapper_test.go @@ -97,7 +97,7 @@ func (s) TestBlockingPickNoSubAvailable(t *testing.T) { bp := newPickerWrapper() var finishedCount uint64 bp.updatePicker(&testingPicker{err: balancer.ErrNoSubConnAvailable, maxCalled: goroutineCount}) - // All goroutines should block because picker returns no sc available. + // All goroutines should block because picker returns no subConn available. for i := goroutineCount; i > 0; i-- { go func() { if tr, _, err := bp.pick(context.Background(), true, balancer.PickInfo{}); err != nil || tr != testT { @@ -138,7 +138,7 @@ func (s) TestBlockingPickSCNotReady(t *testing.T) { bp := newPickerWrapper() bp.updatePicker(&testingPicker{sc: testSCNotReady, maxCalled: goroutineCount}) var finishedCount uint64 - // All goroutines should block because sc is not ready. + // All goroutines should block because subConn is not ready. for i := goroutineCount; i > 0; i-- { go func() { if tr, _, err := bp.pick(context.Background(), true, balancer.PickInfo{}); err != nil || tr != testT { diff --git a/pickfirst.go b/pickfirst.go index 5168b62b078a..fb7a99e0a273 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.sc != nil && b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +163,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/test/balancer_test.go b/test/balancer_test.go index 113fbaceafbc..c919f1e0f7c4 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -32,7 +32,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" @@ -683,87 +682,6 @@ func (s) TestServersSwap(t *testing.T) { } } -// TestEmptyAddrs verifies client behavior when a working connection is -// removed. In pick first and round-robin, both will continue using the old -// connections. -func (s) TestEmptyAddrs(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // Initialize server - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error while listening. Err: %v", err) - } - s := grpc.NewServer() - defer s.Stop() - const one = "1" - ts := &funcServer{ - unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{Username: one}, nil - }, - } - testpb.RegisterTestServiceServer(s, ts) - go s.Serve(lis) - - // Initialize pickfirst client - pfr := manual.NewBuilderWithScheme("whatever") - - pfr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - - pfcc, err := grpc.DialContext(ctx, pfr.Scheme()+":///", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(pfr)) - if err != nil { - t.Fatalf("Error creating client: %v", err) - } - defer pfcc.Close() - pfclient := testpb.NewTestServiceClient(pfcc) - - // Confirm we are connected to the server - if res, err := pfclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil || res.Username != one { - t.Fatalf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one) - } - - // Remove all addresses. - pfr.UpdateState(resolver.State{}) - - // Initialize roundrobin client - rrr := manual.NewBuilderWithScheme("whatever") - - rrr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - - rrcc, err := grpc.DialContext(ctx, rrr.Scheme()+":///", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(rrr), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, roundrobin.Name))) - if err != nil { - t.Fatalf("Error creating client: %v", err) - } - defer rrcc.Close() - rrclient := testpb.NewTestServiceClient(rrcc) - - // Confirm we are connected to the server - if res, err := rrclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil || res.Username != one { - t.Fatalf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one) - } - - // Remove all addresses. - rrr.UpdateState(resolver.State{}) - - // Confirm several new RPCs succeed on pick first. - for i := 0; i < 10; i++ { - if _, err := pfclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { - t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) - } - time.Sleep(5 * time.Millisecond) - } - - // Confirm several new RPCs succeed on round robin. - for i := 0; i < 10; i++ { - if _, err := pfclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { - t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) - } - time.Sleep(5 * time.Millisecond) - } -} - func (s) TestWaitForReady(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go index 7379ce286910..c88fdac1e728 100644 --- a/test/pickfirst_test.go +++ b/test/pickfirst_test.go @@ -26,7 +26,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -43,6 +45,11 @@ const pickFirstServiceConfig = `{"loadBalancingConfig": [{"pick_first":{}}]}` // with service config specifying the use of the pick_first LB policy. func setupPickFirst(t *testing.T, backendCount int, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, []*stubserver.StubServer) { t.Helper() + + // Initialize channelz. Used to determine pending RPC count. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + r := manual.NewBuilderWithScheme("whatever") backends := make([]*stubserver.StubServer, backendCount) @@ -258,3 +265,70 @@ func (s) TestPickFirst_AddressesRemoved(t *testing.T) { t.Fatal(err) } } + +// TestPickFirst_NewAddressWhileBlocking tests the case where pick_first is +// configured on a channel, things are working as expected and then a resolver +// updates removes all addresses. An RPC attempted at this point in time will be +// blocked because there are no valid backends. This test verifies that when new +// backends are added, the RPC is able to complete. +func (s) TestPickFirst_NewAddressWhileBlocking(t *testing.T) { + cc, r, backends := setupPickFirst(t, 2) + addrs := backendsToAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } + + // Send a resolver update with no addresses. This should push the channel into + // TransientFailure. + r.UpdateState(resolver.State{}) + for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { + if !cc.WaitForStateChange(ctx, state) { + t.Fatalf("timeout waiting for state change. got %v; want %v", state, connectivity.TransientFailure) + } + } + + doneCh := make(chan struct{}) + client := testpb.NewTestServiceClient(cc) + go func() { + // The channel is currently in TransientFailure and this RPC will block + // until the channel becomes Ready, which will only happen when we push a + // resolver update with a valid backend address. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Errorf("EmptyCall() = %v, want ", err) + } + close(doneCh) + }() + + // Make sure that there is one pending RPC on the ClientConn before attempting + // to push new addresses through the name resolver. If we don't do this, the + // resolver update can happen before the above goroutine gets to make the RPC. + for { + if err := ctx.Err(); err != nil { + t.Fatal(err) + } + tcs, _ := channelz.GetTopChannels(0, 0) + if len(tcs) != 1 { + t.Fatalf("there should only be one top channel, not %d", len(tcs)) + } + started := tcs[0].ChannelData.CallsStarted + completed := tcs[0].ChannelData.CallsSucceeded + tcs[0].ChannelData.CallsFailed + if (started - completed) == 1 { + break + } + time.Sleep(defaultTestShortTimeout) + } + + // Send a resolver update with a valid backend to push the channel to Ready + // and unblock the above RPC. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backends[0].Address}}}) + + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for blocked RPC to complete") + case <-doneCh: + } +} From 42cadc171d4f126ee02e3a395f3e8c04affbbf04 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 30 Mar 2022 12:58:41 -0700 Subject: [PATCH 476/998] test: cleanup balancer switching tests (#5271) --- balancer_switching_test.go | 569 --------------------- call_test.go | 211 -------- clientconn.go | 10 +- clientconn_state_transition_test.go | 5 +- clientconn_test.go | 9 + internal/testutils/fakegrpclb/server.go | 249 ++++++++++ interop/fake_grpclb/fake_grpclb.go | 127 +---- reflection/serverreflection_test.go | 2 +- server_test.go | 8 + test/balancer_switching_test.go | 636 ++++++++++++++++++++++++ test/pickfirst_test.go | 16 +- test/resolver_update_test.go | 2 +- test/roundrobin_test.go | 8 +- 13 files changed, 944 insertions(+), 908 deletions(-) delete mode 100644 balancer_switching_test.go delete mode 100644 call_test.go create mode 100644 internal/testutils/fakegrpclb/server.go create mode 100644 test/balancer_switching_test.go diff --git a/balancer_switching_test.go b/balancer_switching_test.go deleted file mode 100644 index 3812bbdc990e..000000000000 --- a/balancer_switching_test.go +++ /dev/null @@ -1,569 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "math" - "testing" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/balancer/stub" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/status" -) - -var _ balancer.Builder = &magicalLB{} -var _ balancer.Balancer = &magicalLB{} - -// magicalLB is a ringer for grpclb. It is used to avoid circular dependencies on the grpclb package -type magicalLB struct{} - -func (b *magicalLB) Name() string { - return "grpclb" -} - -func (b *magicalLB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - return b -} - -func (b *magicalLB) ResolverError(error) {} - -func (b *magicalLB) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {} - -func (b *magicalLB) UpdateClientConnState(balancer.ClientConnState) error { - return nil -} - -func (b *magicalLB) Close() {} - -func (b *magicalLB) ExitIdle() {} - -func init() { - balancer.Register(&magicalLB{}) -} - -func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, func()) { - var servers []*server - for i := 0; i < numServers; i++ { - s := newTestServer() - servers = append(servers, s) - go s.start(t, 0, maxStreams) - s.wait(t, 2*time.Second) - } - return servers, func() { - for i := 0; i < numServers; i++ { - servers[i].stop() - } - } -} - -func errorDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} - -func checkPickFirst(cc *ClientConn, servers []*server) error { - var ( - req = "port" - reply string - err error - ) - connected := false - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - for i := 0; i < 5000; i++ { - if err = cc.Invoke(ctx, "/foo/bar", &req, &reply); errorDesc(err) == servers[0].port { - if connected { - // connected is set to false if peer is not server[0]. So if - // connected is true here, this is the second time we saw - // server[0] in a row. Break because pickfirst is in effect. - break - } - connected = true - } else { - connected = false - } - time.Sleep(time.Millisecond) - } - if !connected { - return fmt.Errorf("pickfirst is not in effect after 5 second, EmptyCall() = _, %v, want _, %v", err, servers[0].port) - } - - // The following RPCs should all succeed with the first server. - for i := 0; i < 3; i++ { - err = cc.Invoke(ctx, "/foo/bar", &req, &reply) - if errorDesc(err) != servers[0].port { - return fmt.Errorf("index %d: want peer %v, got peer %v", i, servers[0].port, err) - } - } - return nil -} - -func checkRoundRobin(cc *ClientConn, servers []*server) error { - var ( - req = "port" - reply string - err error - ) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure connections to all servers are up. - for i := 0; i < 2; i++ { - // Do this check twice, otherwise the first RPC's transport may still be - // picked by the closing pickfirst balancer, and the test becomes flaky. - for _, s := range servers { - var up bool - for i := 0; i < 5000; i++ { - if err = cc.Invoke(ctx, "/foo/bar", &req, &reply); errorDesc(err) == s.port { - up = true - break - } - time.Sleep(time.Millisecond) - } - if !up { - return fmt.Errorf("server %v is not up within 5 second", s.port) - } - } - } - - serverCount := len(servers) - for i := 0; i < 3*serverCount; i++ { - err = cc.Invoke(ctx, "/foo/bar", &req, &reply) - if errorDesc(err) != servers[i%serverCount].port { - return fmt.Errorf("index %d: want peer %v, got peer %v", i, servers[i%serverCount].port, err) - } - } - return nil -} - -func (s) TestSwitchBalancer(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - const numServers = 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - addrs := []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}} - r.UpdateState(resolver.State{Addresses: addrs}) - // The default balancer is pickfirst. - if err := checkPickFirst(cc, servers); err != nil { - t.Fatalf("check pickfirst returned non-nil error: %v", err) - } - // Switch to roundrobin. - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`), Addresses: addrs}, nil) - if err := checkRoundRobin(cc, servers); err != nil { - t.Fatalf("check roundrobin returned non-nil error: %v", err) - } - // Switch to pickfirst. - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "pick_first"}`), Addresses: addrs}, nil) - if err := checkPickFirst(cc, servers); err != nil { - t.Fatalf("check pickfirst returned non-nil error: %v", err) - } -} - -// First addr update contains grpclb. -func (s) TestSwitchBalancerGRPCLBFirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - - // ClientConn will switch balancer to grpclb when receives an address of - // type GRPCLB. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}, {Addr: "grpclb", Type: resolver.GRPCLB}}}) - var isGRPCLB bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) - } - - // New update containing new backend and new grpclb. Should not switch - // balancer. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend2"}, {Addr: "grpclb2", Type: resolver.GRPCLB}}}) - for i := 0; i < 200; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if !isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("within 200 ms, cc.balancer switched to !grpclb, want grpclb") - } - - var isPickFirst bool - // Switch balancer to pickfirst. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isPickFirst = cc.curBalancerName == PickFirstBalancerName - cc.mu.Unlock() - if isPickFirst { - break - } - time.Sleep(time.Millisecond) - } - if !isPickFirst { - t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) - } -} - -// First addr update does not contain grpclb. -func (s) TestSwitchBalancerGRPCLBSecond(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) - var isPickFirst bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isPickFirst = cc.curBalancerName == PickFirstBalancerName - cc.mu.Unlock() - if isPickFirst { - break - } - time.Sleep(time.Millisecond) - } - if !isPickFirst { - t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) - } - - // ClientConn will switch balancer to grpclb when receives an address of - // type GRPCLB. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}, {Addr: "grpclb", Type: resolver.GRPCLB}}}) - var isGRPCLB bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) - } - - // New update containing new backend and new grpclb. Should not switch - // balancer. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend2"}, {Addr: "grpclb2", Type: resolver.GRPCLB}}}) - for i := 0; i < 200; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if !isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("within 200 ms, cc.balancer switched to !grpclb, want grpclb") - } - - // Switch balancer back. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isPickFirst = cc.curBalancerName == PickFirstBalancerName - cc.mu.Unlock() - if isPickFirst { - break - } - time.Sleep(time.Millisecond) - } - if !isPickFirst { - t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) - } -} - -// Test that if the current balancer is roundrobin, after switching to grpclb, -// when the resolved address doesn't contain grpclb addresses, balancer will be -// switched back to roundrobin. -func (s) TestSwitchBalancerGRPCLBRoundRobin(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - - sc := parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`) - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}, ServiceConfig: sc}) - var isRoundRobin bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isRoundRobin = cc.curBalancerName == "round_robin" - cc.mu.Unlock() - if isRoundRobin { - break - } - time.Sleep(time.Millisecond) - } - if !isRoundRobin { - t.Fatalf("after 5 second, cc.balancer is of type %v, not round_robin", cc.curBalancerName) - } - - // ClientConn will switch balancer to grpclb when receives an address of - // type GRPCLB. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "grpclb", Type: resolver.GRPCLB}}, ServiceConfig: sc}) - var isGRPCLB bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) - } - - // Switch balancer back. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}, ServiceConfig: sc}) - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isRoundRobin = cc.curBalancerName == "round_robin" - cc.mu.Unlock() - if isRoundRobin { - break - } - time.Sleep(time.Millisecond) - } - if !isRoundRobin { - t.Fatalf("after 5 second, cc.balancer is of type %v, not round_robin", cc.curBalancerName) - } -} - -// Test that if resolved address list contains grpclb, the balancer option in -// service config won't take effect. But when there's no grpclb address in a new -// resolved address list, balancer will be switched to the new one. -func (s) TestSwitchBalancerGRPCLBServiceConfig(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) - var isPickFirst bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isPickFirst = cc.curBalancerName == PickFirstBalancerName - cc.mu.Unlock() - if isPickFirst { - break - } - time.Sleep(time.Millisecond) - } - if !isPickFirst { - t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) - } - - // ClientConn will switch balancer to grpclb when receives an address of - // type GRPCLB. - addrs := []resolver.Address{{Addr: "grpclb", Type: resolver.GRPCLB}} - r.UpdateState(resolver.State{Addresses: addrs}) - var isGRPCLB bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) - } - - sc := parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`) - r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc}) - var isRoundRobin bool - for i := 0; i < 200; i++ { - cc.mu.Lock() - isRoundRobin = cc.curBalancerName == "round_robin" - cc.mu.Unlock() - if isRoundRobin { - break - } - time.Sleep(time.Millisecond) - } - // Balancer should NOT switch to round_robin because resolved list contains - // grpclb. - if isRoundRobin { - t.Fatalf("within 200 ms, cc.balancer switched to round_robin, want grpclb") - } - - // Switch balancer back. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}, ServiceConfig: sc}) - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isRoundRobin = cc.curBalancerName == "round_robin" - cc.mu.Unlock() - if isRoundRobin { - break - } - time.Sleep(time.Millisecond) - } - if !isRoundRobin { - t.Fatalf("after 5 second, cc.balancer is of type %v, not round_robin", cc.curBalancerName) - } -} - -// Test that when switching to grpclb fails because grpclb is not registered, -// the fallback balancer will only get backend addresses, not the grpclb server -// address. -// -// The tests sends 3 server addresses (all backends) as resolved addresses, but -// claim the first one is grpclb server. The all RPCs should all be send to the -// other addresses, not the first one. -func (s) TestSwitchBalancerGRPCLBWithGRPCLBNotRegistered(t *testing.T) { - internal.BalancerUnregister("grpclb") - defer balancer.Register(&magicalLB{}) - - r := manual.NewBuilderWithScheme("whatever") - - const numServers = 3 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}}}) - // The default balancer is pickfirst. - if err := checkPickFirst(cc, servers[1:]); err != nil { - t.Fatalf("check pickfirst returned non-nil error: %v", err) - } - // Try switching to grpclb by sending servers[0] as grpclb address. It's - // expected that servers[0] will be filtered out, so it will not be used by - // the balancer. - // - // If the filtering failed, servers[0] will be used for RPCs and the RPCs - // will succeed. The following checks will catch this and fail. - addrs := []resolver.Address{ - {Addr: servers[0].addr, Type: resolver.GRPCLB}, - {Addr: servers[1].addr}, {Addr: servers[2].addr}} - r.UpdateState(resolver.State{Addresses: addrs}) - // Still check for pickfirst, but only with server[1] and server[2]. - if err := checkPickFirst(cc, servers[1:]); err != nil { - t.Fatalf("check pickfirst returned non-nil error: %v", err) - } - // Switch to roundrobin, and check against server[1] and server[2]. - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`), Addresses: addrs}, nil) - if err := checkRoundRobin(cc, servers[1:]); err != nil { - t.Fatalf("check roundrobin returned non-nil error: %v", err) - } -} - -const inlineRemoveSubConnBalancerName = "test-inline-remove-subconn-balancer" - -func init() { - stub.Register(inlineRemoveSubConnBalancerName, stub.BalancerFuncs{ - Close: func(data *stub.BalancerData) { - data.ClientConn.RemoveSubConn(&acBalancerWrapper{}) - }, - }) -} - -// Test that when switching to balancers, the old balancer calls RemoveSubConn -// in Close. -// -// This test is to make sure this close doesn't cause a deadlock. -func (s) TestSwitchBalancerOldRemoveSubConn(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, fmt.Sprintf(`{"loadBalancingPolicy": "%v"}`, inlineRemoveSubConnBalancerName))}, nil) - // This service config update will switch balancer from - // "test-inline-remove-subconn-balancer" to "pick_first". The test balancer - // will be closed, which will call cc.RemoveSubConn() inline (this - // RemoveSubConn is not required by the API, but some balancers might do - // it). - // - // This is to make sure the cc.RemoveSubConn() from Close() doesn't cause a - // deadlock (e.g. trying to grab a mutex while it's already locked). - // - // Do it in a goroutine so this test will fail with a helpful message - // (though the goroutine will still leak). - done := make(chan struct{}) - go func() { - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "pick_first"}`)}, nil) - close(done) - }() - select { - case <-time.After(defaultTestTimeout): - t.Fatalf("timeout waiting for updateResolverState to finish") - case <-done: - } -} - -func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { - scpr := r.CC.ParseServiceConfig(s) - if scpr.Err != nil { - panic(fmt.Sprintf("Error parsing config %q: %v", s, scpr.Err)) - } - return scpr -} diff --git a/call_test.go b/call_test.go deleted file mode 100644 index 3280109f4fbb..000000000000 --- a/call_test.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "io" - "math" - "net" - "strconv" - "strings" - "sync" - "testing" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/status" -) - -var ( - expectedRequest = "ping" - expectedResponse = "pong" - weirdError = "format verbs: %v%s" - sizeLargeErr = 1024 * 1024 - canceled = 0 -) - -const defaultTestTimeout = 10 * time.Second - -type testCodec struct { -} - -func (testCodec) Marshal(v interface{}) ([]byte, error) { - return []byte(*(v.(*string))), nil -} - -func (testCodec) Unmarshal(data []byte, v interface{}) error { - *(v.(*string)) = string(data) - return nil -} - -func (testCodec) String() string { - return "test" -} - -type testStreamHandler struct { - port string - t transport.ServerTransport -} - -func (h *testStreamHandler) handleStream(t *testing.T, s *transport.Stream) { - p := &parser{r: s} - for { - pf, req, err := p.recvMsg(math.MaxInt32) - if err == io.EOF { - break - } - if err != nil { - return - } - if pf != compressionNone { - t.Errorf("Received the mistaken message format %d, want %d", pf, compressionNone) - return - } - var v string - codec := testCodec{} - if err := codec.Unmarshal(req, &v); err != nil { - t.Errorf("Failed to unmarshal the received message: %v", err) - return - } - if v == "weird error" { - h.t.WriteStatus(s, status.New(codes.Internal, weirdError)) - return - } - if v == "canceled" { - canceled++ - h.t.WriteStatus(s, status.New(codes.Internal, "")) - return - } - if v == "port" { - h.t.WriteStatus(s, status.New(codes.Internal, h.port)) - return - } - - if v != expectedRequest { - h.t.WriteStatus(s, status.New(codes.Internal, strings.Repeat("A", sizeLargeErr))) - return - } - } - // send a response back to end the stream. - data, err := encode(testCodec{}, &expectedResponse) - if err != nil { - t.Errorf("Failed to encode the response: %v", err) - return - } - hdr, payload := msgHeader(data, nil) - h.t.Write(s, hdr, payload, &transport.Options{}) - h.t.WriteStatus(s, status.New(codes.OK, "")) -} - -type server struct { - lis net.Listener - port string - addr string - startedErr chan error // sent nil or an error after server starts - mu sync.Mutex - conns map[transport.ServerTransport]bool - channelzID *channelz.Identifier -} - -func newTestServer() *server { - return &server{ - startedErr: make(chan error, 1), - channelzID: channelz.NewIdentifierForTesting(channelz.RefServer, time.Now().Unix(), nil), - } -} - -// start starts server. Other goroutines should block on s.startedErr for further operations. -func (s *server) start(t *testing.T, port int, maxStreams uint32) { - var err error - if port == 0 { - s.lis, err = net.Listen("tcp", "localhost:0") - } else { - s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port)) - } - if err != nil { - s.startedErr <- fmt.Errorf("failed to listen: %v", err) - return - } - s.addr = s.lis.Addr().String() - _, p, err := net.SplitHostPort(s.addr) - if err != nil { - s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err) - return - } - s.port = p - s.conns = make(map[transport.ServerTransport]bool) - s.startedErr <- nil - for { - conn, err := s.lis.Accept() - if err != nil { - return - } - config := &transport.ServerConfig{ - MaxStreams: maxStreams, - ChannelzParentID: s.channelzID, - } - st, err := transport.NewServerTransport(conn, config) - if err != nil { - t.Errorf("failed to create server transport: %v", err) - continue - } - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - st.Close() - return - } - s.conns[st] = true - s.mu.Unlock() - h := &testStreamHandler{ - port: s.port, - t: st, - } - go st.HandleStreams(func(s *transport.Stream) { - go h.handleStream(t, s) - }, func(ctx context.Context, method string) context.Context { - return ctx - }) - } -} - -func (s *server) wait(t *testing.T, timeout time.Duration) { - select { - case err := <-s.startedErr: - if err != nil { - t.Fatal(err) - } - case <-time.After(timeout): - t.Fatalf("Timed out after %v waiting for server to be ready", timeout) - } -} - -func (s *server) stop() { - s.lis.Close() - s.mu.Lock() - for c := range s.conns { - c.Close() - } - s.conns = nil - s.mu.Unlock() -} diff --git a/clientconn.go b/clientconn.go index 565aa247473d..86275dca4de7 100644 --- a/clientconn.go +++ b/clientconn.go @@ -672,14 +672,14 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.mu.Unlock() if cbn != grpclbName { // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] + var addrs []resolver.Address + for _, addr := range s.Addresses { + if addr.Type == resolver.GRPCLB { continue } - i++ + addrs = append(addrs, addr) } + s.Addresses = addrs } uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { diff --git a/clientconn_state_transition_test.go b/clientconn_state_transition_test.go index 0944e8434d79..d1c1321b33b1 100644 --- a/clientconn_state_transition_test.go +++ b/clientconn_state_transition_test.go @@ -35,7 +35,10 @@ import ( "google.golang.org/grpc/resolver/manual" ) -const stateRecordingBalancerName = "state_recoding_balancer" +const ( + stateRecordingBalancerName = "state_recoding_balancer" + defaultTestTimeout = 10 * time.Second +) var testBalancerBuilder = newStateRecordingBalancerBuilder() diff --git a/clientconn_test.go b/clientconn_test.go index 80547f51037a..353d8fb325d4 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -40,9 +40,18 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/testdata" ) +func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { + scpr := r.CC.ParseServiceConfig(s) + if scpr.Err != nil { + panic(fmt.Sprintf("Error parsing config %q: %v", s, scpr.Err)) + } + return scpr +} + func (s) TestDialWithTimeout(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { diff --git a/internal/testutils/fakegrpclb/server.go b/internal/testutils/fakegrpclb/server.go new file mode 100644 index 000000000000..8e33340484e9 --- /dev/null +++ b/internal/testutils/fakegrpclb/server.go @@ -0,0 +1,249 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package fakegrpclb provides a fake implementation of the grpclb server. +package fakegrpclb + +import ( + "errors" + "fmt" + "io" + "net" + "strconv" + "sync" + "time" + + "google.golang.org/grpc" + lbgrpc "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/status" +) + +var logger = grpclog.Component("fake_grpclb") + +// ServerParams wraps options passed while creating a Server. +type ServerParams struct { + ListenPort int // Listening port for the balancer server. + ServerOptions []grpc.ServerOption // gRPC options for the balancer server. + + LoadBalancedServiceName string // Service name being load balanced for. + LoadBalancedServicePort int // Service port being load balanced for. + BackendAddresses []string // Service backends to balance load across. + ShortStream bool // End balancer stream after sending server list. +} + +// Server is a fake implementation of the grpclb LoadBalancer service. It does +// not support stats reporting from clients, and always sends back a static list +// of backends to the client to balance load across. +// +// It is safe for concurrent access. +type Server struct { + lbgrpc.UnimplementedLoadBalancerServer + + // Options copied over from ServerParams passed to NewServer. + sOpts []grpc.ServerOption // gRPC server options. + serviceName string // Service name being load balanced for. + servicePort int // Service port being load balanced for. + shortStream bool // End balancer stream after sending server list. + + // Values initialized using ServerParams passed to NewServer. + backends []*lbpb.Server // Service backends to balance load across. + lis net.Listener // Listener for grpc connections to the LoadBalancer service. + + // mu guards access to below fields. + mu sync.Mutex + grpcServer *grpc.Server // Underlying grpc server. + address string // Actual listening address. + + stopped chan struct{} // Closed when Stop() is called. +} + +// NewServer creates a new Server with passed in params. Returns a non-nil error +// if the params are invalid. +func NewServer(params ServerParams) (*Server, error) { + var servers []*lbpb.Server + for _, addr := range params.BackendAddresses { + ipStr, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse list of backend address %q: %v", addr, err) + } + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("failed to parse ip: %q", ipStr) + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("failed to convert port %q to int", portStr) + } + logger.Infof("Adding backend ip: %q, port: %d to server list", ip.String(), port) + servers = append(servers, &lbpb.Server{ + IpAddress: ip, + Port: int32(port), + }) + } + + lis, err := net.Listen("tcp", ":"+strconv.Itoa(params.ListenPort)) + if err != nil { + return nil, fmt.Errorf("failed to listen on port %q: %v", params.ListenPort, err) + } + + return &Server{ + sOpts: params.ServerOptions, + serviceName: params.LoadBalancedServiceName, + servicePort: params.LoadBalancedServicePort, + shortStream: params.ShortStream, + backends: servers, + lis: lis, + address: lis.Addr().String(), + stopped: make(chan struct{}), + }, nil +} + +// Serve starts serving the LoadBalancer service on a gRPC server. +// +// It returns early with a non-nil error if it is unable to start serving. +// Otherwise, it blocks until Stop() is called, at which point it returns the +// error returned by the underlying grpc.Server's Serve() method. +func (s *Server) Serve() error { + s.mu.Lock() + if s.grpcServer != nil { + s.mu.Unlock() + return errors.New("Serve() called multiple times") + } + + server := grpc.NewServer(s.sOpts...) + s.grpcServer = server + s.mu.Unlock() + + logger.Infof("Begin listening on %s", s.lis.Addr().String()) + lbgrpc.RegisterLoadBalancerServer(server, s) + return server.Serve(s.lis) // This call will block. +} + +// Stop stops serving the LoadBalancer service and unblocks the preceding call +// to Serve(). +func (s *Server) Stop() { + defer close(s.stopped) + s.mu.Lock() + if s.grpcServer != nil { + s.grpcServer.Stop() + s.grpcServer = nil + } + s.mu.Unlock() +} + +// Address returns the host:port on which the LoadBalancer service is serving. +func (s *Server) Address() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.address +} + +// BalanceLoad provides a fake implementation of the LoadBalancer service. +func (s *Server) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServer) error { + logger.Info("New BalancerLoad stream started") + + req, err := stream.Recv() + if err == io.EOF { + logger.Warning("Received EOF when reading from the stream") + return nil + } + if err != nil { + logger.Warning("Failed to read LoadBalanceRequest from stream: %v", err) + return err + } + logger.Infof("Received LoadBalancerRequest:\n%s", pretty.ToJSON(req)) + + // Initial request contains the service being load balanced for. + initialReq := req.GetInitialRequest() + if initialReq == nil { + logger.Info("First message on the stream does not contain an InitialLoadBalanceRequest") + return status.Error(codes.Unknown, "First request not an InitialLoadBalanceRequest") + } + + // Basic validation of the service name and port from the incoming request. + // + // Clients targeting service:port can sometimes include the ":port" suffix in + // their requested names; handle this case. + serviceName, port, err := net.SplitHostPort(initialReq.Name) + if err != nil { + // Requested name did not contain a port. So, use the name as is. + serviceName = initialReq.Name + } else { + p, err := strconv.Atoi(port) + if err != nil { + logger.Info("Failed to parse requested service port %q to integer", port) + return status.Error(codes.Unknown, "Bad requested service port number") + } + if p != s.servicePort { + logger.Info("Requested service port number %q does not match expected", port, s.servicePort) + return status.Error(codes.Unknown, "Bad requested service port number") + } + } + if serviceName != s.serviceName { + logger.Info("Requested service name %q does not match expected %q", serviceName, s.serviceName) + return status.Error(codes.NotFound, "Bad requested service name") + } + + // Empty initial response disables stats reporting from the client. Stats + // reporting from the client is used to determine backend load and is not + // required for the purposes of this fake. + initResp := &lbpb.LoadBalanceResponse{ + LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{ + InitialResponse: &lbpb.InitialLoadBalanceResponse{}, + }, + } + if err := stream.Send(initResp); err != nil { + logger.Warningf("Failed to send InitialLoadBalanceResponse on the stream: %v", err) + return err + } + + resp := &lbpb.LoadBalanceResponse{ + LoadBalanceResponseType: &lbpb.LoadBalanceResponse_ServerList{ + ServerList: &lbpb.ServerList{Servers: s.backends}, + }, + } + logger.Infof("Sending response with server list: %s", pretty.ToJSON(resp)) + if err := stream.Send(resp); err != nil { + logger.Warningf("Failed to send InitialLoadBalanceResponse on the stream: %v", err) + return err + } + + if s.shortStream { + logger.Info("Ending stream early as the short stream option was set") + return nil + } + + for { + select { + case <-stream.Context().Done(): + return nil + case <-s.stopped: + return nil + case <-time.After(10 * time.Second): + logger.Infof("Sending response with server list: %s", pretty.ToJSON(resp)) + if err := stream.Send(resp); err != nil { + logger.Warningf("Failed to send InitialLoadBalanceResponse on the stream: %v", err) + return err + } + } + } +} diff --git a/interop/fake_grpclb/fake_grpclb.go b/interop/fake_grpclb/fake_grpclb.go index 6804235486ba..e29d2f439fa9 100644 --- a/interop/fake_grpclb/fake_grpclb.go +++ b/interop/fake_grpclb/fake_grpclb.go @@ -23,18 +23,13 @@ package main import ( "flag" - "net" - "strconv" "strings" - "time" "google.golang.org/grpc" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" + "google.golang.org/grpc/internal/testutils/fakegrpclb" "google.golang.org/grpc/testdata" ) @@ -49,69 +44,9 @@ var ( logger = grpclog.Component("interop") ) -type loadBalancerServer struct { - lbpb.UnimplementedLoadBalancerServer - serverListResponse *lbpb.LoadBalanceResponse -} - -func (l *loadBalancerServer) BalanceLoad(stream lbpb.LoadBalancer_BalanceLoadServer) error { - logger.Info("Begin handling new BalancerLoad request.") - var lbReq *lbpb.LoadBalanceRequest - var err error - if lbReq, err = stream.Recv(); err != nil { - logger.Errorf("Error receiving LoadBalanceRequest: %v", err) - return err - } - logger.Info("LoadBalancerRequest received.") - initialReq := lbReq.GetInitialRequest() - if initialReq == nil { - logger.Info("Expected first request to be an InitialRequest. Got: %v", lbReq) - return status.Error(codes.Unknown, "First request not an InitialRequest") - } - // gRPC clients targeting foo.bar.com:443 can sometimes include the ":443" suffix in - // their requested names; handle this case. TODO: make 443 configurable? - var cleanedName string - var requestedNamePortNumber string - if cleanedName, requestedNamePortNumber, err = net.SplitHostPort(initialReq.Name); err != nil { - cleanedName = initialReq.Name - } else { - if requestedNamePortNumber != "443" { - logger.Info("Bad requested service name port number: %v.", requestedNamePortNumber) - return status.Error(codes.Unknown, "Bad requested service name port number") - } - } - if cleanedName != *serviceName { - logger.Info("Expected requested service name: %v. Got: %v", *serviceName, initialReq.Name) - return status.Error(codes.NotFound, "Bad requested service name") - } - if err := stream.Send(&lbpb.LoadBalanceResponse{ - LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{ - InitialResponse: &lbpb.InitialLoadBalanceResponse{}, - }, - }); err != nil { - logger.Errorf("Error sending initial LB response: %v", err) - return status.Error(codes.Unknown, "Error sending initial response") - } - logger.Info("Send LoadBalanceResponse: %v", l.serverListResponse) - if err := stream.Send(l.serverListResponse); err != nil { - logger.Errorf("Error sending LB response: %v", err) - return status.Error(codes.Unknown, "Error sending response") - } - if *shortStream { - return nil - } - for { - logger.Info("Send LoadBalanceResponse: %v", l.serverListResponse) - if err := stream.Send(l.serverListResponse); err != nil { - logger.Errorf("Error sending LB response: %v", err) - return status.Error(codes.Unknown, "Error sending response") - } - time.Sleep(10 * time.Second) - } -} - func main() { flag.Parse() + var opts []grpc.ServerOption if *useTLS { certFile := testdata.Path("server1.pem") @@ -126,47 +61,23 @@ func main() { altsTC := alts.NewServerCreds(altsOpts) opts = append(opts, grpc.Creds(altsTC)) } - var serverList []*lbpb.Server - if len(*backendAddrs) == 0 { - serverList = make([]*lbpb.Server, 0) - } else { - rawBackendAddrs := strings.Split(*backendAddrs, ",") - serverList = make([]*lbpb.Server, len(rawBackendAddrs)) - for i := range rawBackendAddrs { - rawIP, rawPort, err := net.SplitHostPort(rawBackendAddrs[i]) - if err != nil { - logger.Fatalf("Failed to parse --backend_addrs[%d]=%v, error: %v", i, rawBackendAddrs[i], err) - } - ip := net.ParseIP(rawIP) - if ip == nil { - logger.Fatalf("Failed to parse ip: %v", rawIP) - } - numericPort, err := strconv.Atoi(rawPort) - if err != nil { - logger.Fatalf("Failed to convert port %v to int", rawPort) - } - logger.Infof("Adding backend ip: %v, port: %d", ip.String(), numericPort) - serverList[i] = &lbpb.Server{ - IpAddress: ip, - Port: int32(numericPort), - } - } - } - serverListResponse := &lbpb.LoadBalanceResponse{ - LoadBalanceResponseType: &lbpb.LoadBalanceResponse_ServerList{ - ServerList: &lbpb.ServerList{ - Servers: serverList, - }, - }, - } - server := grpc.NewServer(opts...) - logger.Infof("Begin listening on %d.", *port) - lis, err := net.Listen("tcp", ":"+strconv.Itoa(*port)) + + rawBackendAddrs := strings.Split(*backendAddrs, ",") + server, err := fakegrpclb.NewServer(fakegrpclb.ServerParams{ + ListenPort: *port, + ServerOptions: opts, + LoadBalancedServiceName: *serviceName, + LoadBalancedServicePort: 443, // TODO: make this configurable? + BackendAddresses: rawBackendAddrs, + ShortStream: *shortStream, + }) if err != nil { - logger.Fatalf("Failed to listen on port %v: %v", *port, err) + logger.Fatalf("Failed to create balancer server: %v", err) + } + + // Serve() starts serving and blocks until Stop() is called. We don't need to + // call Stop() here since we want the server to run until we are killed. + if err := server.Serve(); err != nil { + logger.Fatalf("Failed to start balancer server: %v", err) } - lbpb.RegisterLoadBalancerServer(server, &loadBalancerServer{ - serverListResponse: serverListResponse, - }) - server.Serve(lis) } diff --git a/reflection/serverreflection_test.go b/reflection/serverreflection_test.go index b35f674d926e..3aeac2be0722 100644 --- a/reflection/serverreflection_test.go +++ b/reflection/serverreflection_test.go @@ -87,7 +87,7 @@ func loadFileDesc(filename string) (*descriptorpb.FileDescriptorProto, []byte) { func loadFileDescDynamic(b []byte) (*descriptorpb.FileDescriptorProto, protoreflect.FileDescriptor, []byte) { m := new(descriptorpb.FileDescriptorProto) if err := proto.Unmarshal(b, m); err != nil { - panic(fmt.Sprintf("failed to unmarshal dynamic proto raw descriptor")) + panic("failed to unmarshal dynamic proto raw descriptor") } fd, err := protodesc.NewFile(m, nil) diff --git a/server_test.go b/server_test.go index b15939160144..7d4cf7bfc21e 100644 --- a/server_test.go +++ b/server_test.go @@ -28,12 +28,20 @@ import ( "time" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" ) type emptyServiceServer interface{} type testServer struct{} +func errorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} + func (s) TestStopBeforeServe(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { diff --git a/test/balancer_switching_test.go b/test/balancer_switching_test.go new file mode 100644 index 000000000000..2453738bfa47 --- /dev/null +++ b/test/balancer_switching_test.go @@ -0,0 +1,636 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils/fakegrpclb" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +const ( + loadBalancedServiceName = "foo.bar.service" + loadBalancedServicePort = 443 + wantGRPCLBTraceDesc = `Channel switches to new LB policy "grpclb"` + wantRoundRobinTraceDesc = `Channel switches to new LB policy "round_robin"` + wantPickFirstTraceDesc = `Channel switches to new LB policy "pick_first"` +) + +// setupBackendsAndFakeGRPCLB sets up the stub server backends and a fake grpclb +// server for tests which exercise balancer switch scenarios involving grpclb. +// Returns a cleanup function to be invoked by the caller. +func setupBackendsAndFakeGRPCLB(t *testing.T) ([]*stubserver.StubServer, *fakegrpclb.Server, func()) { + czCleanup := channelz.NewChannelzStorageForTesting() + backends, backendsCleanup := startBackendsForBalancerSwitch(t) + rawAddrs := stubBackendsToRawAddrs(backends) + + lbServer, err := fakegrpclb.NewServer(fakegrpclb.ServerParams{ + LoadBalancedServiceName: loadBalancedServiceName, + LoadBalancedServicePort: loadBalancedServicePort, + BackendAddresses: rawAddrs, + }) + if err != nil { + t.Fatalf("failed to create fake grpclb server: %v", err) + } + go func() { + if err := lbServer.Serve(); err != nil { + t.Errorf("fake grpclb Serve() failed: %v", err) + } + }() + + return backends, lbServer, func() { + backendsCleanup() + lbServer.Stop() + czCleanupWrapper(czCleanup, t) + } +} + +// startBackendsForBalancerSwitch spins up a bunch of stub server backends +// exposing the TestService. Returns a cleanup function to be invoked by the +// caller. +func startBackendsForBalancerSwitch(t *testing.T) ([]*stubserver.StubServer, func()) { + t.Helper() + + const backendCount = 3 + backends := make([]*stubserver.StubServer, backendCount) + for i := 0; i < backendCount; i++ { + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + backends[i] = backend + } + return backends, func() { + for _, b := range backends { + b.Stop() + } + } +} + +// stubBackendsToRawAddrs converts from a set of stub server backends to raw +// address strings. Useful when pushing addresses to the fake grpclb server. +func stubBackendsToRawAddrs(backends []*stubserver.StubServer) []string { + addrs := make([]string, len(backends)) + for i, backend := range backends { + addrs[i] = backend.Address + } + return addrs +} + +// checkForTraceEvent looks for a trace event in the top level channel matching +// the given description. Events before since are ignored. Returns nil error if +// such an event is found. +func checkForTraceEvent(ctx context.Context, wantDesc string, since time.Time) error { + for { + if err := ctx.Err(); err != nil { + return err + } + tcs, _ := channelz.GetTopChannels(0, 0) + if len(tcs) != 1 { + return fmt.Errorf("channelz returned %d top channels, want 1", len(tcs)) + } + for _, event := range tcs[0].Trace.Events { + if event.Timestamp.Before(since) { + continue + } + if strings.Contains(event.Desc, wantDesc) { + return nil + } + } + time.Sleep(defaultTestShortTimeout) + } +} + +// TestBalancerSwitch_Basic tests the basic scenario of switching from one LB +// policy to another, as specified in the service config. +func (s) TestBalancerSwitch_Basic(t *testing.T) { + backends, cleanup := startBackendsForBalancerSwitch(t) + defer cleanup() + addrs := stubBackendsToResolverAddrs(backends) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update without an LB policy in the service config. The + // channel should pick the default LB policy, which is pick_first. + r.UpdateState(resolver.State{Addresses: addrs}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } + + // Push a resolver update with the service config specifying "round_robin". + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), + }) + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + + // Push a resolver update with the service config specifying "pick_first". + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, pickFirstServiceConfig), + }) + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_grpclbToPickFirst tests the scenario where the channel +// starts off "grpclb", switches to "pick_first" and back. +func (s) TestBalancerSwitch_grpclbToPickFirst(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update with no service config and a single address pointing + // to the grpclb server we created above. This will cause the channel to + // switch to the "grpclb" balancer, and will equally distribute RPCs across + // the backends as the fake grpclb server does not support load reporting from + // the clients. + now := time.Now() + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) + } + + // Push a resolver update containing a non-existent grpclb server address. + // This should not lead to a balancer switch. + now = time.Now() + const nonExistentServer = "non-existent-grpclb-server-address" + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: nonExistentServer, Type: resolver.GRPCLB}}}) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + wantDesc := fmt.Sprintf("Channel switches to new LB policy %q", nonExistentServer) + if err := checkForTraceEvent(sCtx, wantDesc, now); err == nil { + t.Fatal("channel switched balancers when expected not to") + } + + // Push a resolver update containing no grpclb server address. This should + // lead to the channel using the default LB policy which is pick_first. + now = time.Now() + r.UpdateState(resolver.State{Addresses: addrs}) + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } + if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) + } +} + +// TestBalancerSwitch_pickFirstToGRPCLB tests the scenario where the channel +// starts off with "pick_first", switches to "grpclb" and back. +func (s) TestBalancerSwitch_pickFirstToGRPCLB(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update containing no grpclb server address. This should + // lead to the channel using the default LB policy which is pick_first. + now := time.Now() + r.UpdateState(resolver.State{Addresses: addrs}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) + } + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } + + // Push a resolver update with no service config and a single address pointing + // to the grpclb server we created above. This will cause the channel to + // switch to the "grpclb" balancer, and will equally distribute RPCs across + // the backends as the fake grpclb server does not support load reporting from + // the clients. + now = time.Now() + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) + if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) + } + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + + // Push a resolver update containing a non-existent grpclb server address. + // This should not lead to a balancer switch. + now = time.Now() + const nonExistentServer = "non-existent-grpclb-server-address" + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: nonExistentServer, Type: resolver.GRPCLB}}}) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + wantDesc := fmt.Sprintf("Channel switches to new LB policy %q", nonExistentServer) + if err := checkForTraceEvent(sCtx, wantDesc, now); err == nil { + t.Fatal("channel switched balancers when expected not to") + } + + // Switch to "pick_first" again by sending no grpclb server addresses. + now = time.Now() + r.UpdateState(resolver.State{Addresses: addrs}) + if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) + } + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_RoundRobinToGRPCLB tests the scenario where the channel +// starts off with "round_robin", switches to "grpclb" and back. +// +// Note that this test uses the deprecated `loadBalancingPolicy` field in the +// service config. +func (s) TestBalancerSwitch_RoundRobinToGRPCLB(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Note the use of the deprecated `loadBalancingPolicy` field here instead + // of the now recommended `loadBalancingConfig` field. The logic in the + // ClientConn which decides which balancer to switch to looks at the + // following places in the given order of preference: + // - `loadBalancingConfig` field + // - addresses of type grpclb + // - `loadBalancingPolicy` field + // If we use the `loadBalancingPolicy` field, the switch to "grpclb" later on + // in the test will not happen as the ClientConn will continue to use the LB + // policy received in the first update. + scpr := parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`) + + // Push a resolver update with the service config specifying "round_robin". + now := time.Now() + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: scpr, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkForTraceEvent(ctx, wantRoundRobinTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantRoundRobinTraceDesc, err) + } + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + + // Push a resolver update with no service config and a single address pointing + // to the grpclb server we created above. This will cause the channel to + // switch to the "grpclb" balancer, and will equally distribute RPCs across + // the backends as the fake grpclb server does not support load reporting from + // the clients. + now = time.Now() + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}, + ServiceConfig: scpr, + }) + if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) + } + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + + // Switch back to "round_robin". + now = time.Now() + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: scpr, + }) + if err := checkForTraceEvent(ctx, wantRoundRobinTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantRoundRobinTraceDesc, err) + } + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_grpclbNotRegistered tests the scenario where the grpclb +// balancer is not registered. Verifies that the ClientConn fallbacks to the +// default LB policy or the LB policy specified in the service config, and that +// addresses of type "grpclb" are filtered out. +func (s) TestBalancerSwitch_grpclbNotRegistered(t *testing.T) { + // Unregister the grpclb balancer builder for the duration of this test. + grpclbBuilder := balancer.Get("grpclb") + internal.BalancerUnregister(grpclbBuilder.Name()) + defer balancer.Register(grpclbBuilder) + + backends, cleanup := startBackendsForBalancerSwitch(t) + defer cleanup() + addrs := stubBackendsToResolverAddrs(backends) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update which contains a bunch of stub server backends and a + // grpclb server address. The latter should get the ClientConn to try and + // apply the grpclb policy. But since grpclb is not registered, it should + // fallback to the default LB policy which is pick_first. The ClientConn is + // also expected to filter out the grpclb address when sending the addresses + // list fo pick_first. + grpclbAddr := []resolver.Address{{Addr: "non-existent-grpclb-server-address", Type: resolver.GRPCLB}} + addrs = append(grpclbAddr, addrs...) + now := time.Now() + r.UpdateState(resolver.State{Addresses: addrs}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + t.Fatal(err) + } + if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) + } + + // Push a resolver update with the same addresses, but with a service config + // specifying "round_robin". The ClientConn is expected to filter out the + // grpclb address when sending the addresses list to round_robin. + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), + }) + if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy verifies that +// if the resolver update contains any addresses of type "grpclb", it overrides +// the LB policy specifies in the deprecated `loadBalancingPolicy` field of the +// service config. +func (s) TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update containing no grpclb server address. This should + // lead to the channel using the default LB policy which is pick_first. + now := time.Now() + r.UpdateState(resolver.State{Addresses: addrs}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) + } + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } + + // Push a resolver update with no service config. The addresses list contains + // the stub backend addresses and a single address pointing to the grpclb + // server we created above. This will cause the channel to switch to the + // "grpclb" balancer, and will equally distribute RPCs across the backends. + now = time.Now() + r.UpdateState(resolver.State{ + Addresses: append(addrs, resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), + }) + if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) + } + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + + // Push a resolver update with a service config using the deprecated + // `loadBalancingPolicy` field pointing to round_robin. The addresses list + // contains an address of type "grpclb". This should be preferred and hence + // there should be no balancer switch. + scpr := parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`) + now = time.Now() + r.UpdateState(resolver.State{ + Addresses: append(addrs, resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), + ServiceConfig: scpr, + }) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if err := checkForTraceEvent(sCtx, wantRoundRobinTraceDesc, now); err == nil { + t.Fatal("channel switched balancers when expected not to") + } + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + + // Switch to "round_robin" by removing the address of type "grpclb". + now = time.Now() + r.UpdateState(resolver.State{Addresses: addrs}) + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + if err := checkForTraceEvent(ctx, wantRoundRobinTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantRoundRobinTraceDesc, err) + } +} + +// TestBalancerSwitch_LoadBalancingConfigTrumps verifies that the +// `loadBalancingConfig` field in the service config trumps over addresses of +// type "grpclb" when it comes to deciding which LB policy is applied on the +// channel. +func (s) TestBalancerSwitch_LoadBalancingConfigTrumps(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update with no service config and a single address pointing + // to the grpclb server we created above. This will cause the channel to + // switch to the "grpclb" balancer, and will equally distribute RPCs across + // the backends as the fake grpclb server does not support load reporting from + // the clients. + now := time.Now() + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) + } + + // Push a resolver update with the service config specifying "round_robin" + // through the recommended `loadBalancingConfig` field. + now = time.Now() + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), + }) + if err := checkForTraceEvent(ctx, wantRoundRobinTraceDesc, now); err != nil { + t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantRoundRobinTraceDesc, err) + } + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } + + // Push a resolver update with no service config and an address of type + // "grpclb". The ClientConn should continue to use the service config received + // earlier, which specified the use of "round_robin" through the + // `loadBalancingConfig` field, and therefore the balancer should not be + // switched. And because the `loadBalancingConfig` field trumps everything + // else, the address of type "grpclb" should be ignored. + grpclbAddr := resolver.Address{Addr: "non-existent-grpclb-server-address", Type: resolver.GRPCLB} + now = time.Now() + r.UpdateState(resolver.State{Addresses: append(addrs, grpclbAddr)}) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if err := checkForTraceEvent(sCtx, wantRoundRobinTraceDesc, now); err == nil { + t.Fatal("channel switched balancers when expected not to") + } + if err := checkRoundRobin(ctx, cc, addrs); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_OldBalancerCallsRemoveSubConnInClose tests the scenario +// where the balancer being switched out calls RemoveSubConn() in its Close() +// method. Verifies that this sequence of calls doesn't lead to a deadlock. +func (s) TestBalancerSwitch_OldBalancerCallsRemoveSubConnInClose(t *testing.T) { + // Register a stub balancer which calls RemoveSubConn() from its Close(). + scChan := make(chan balancer.SubConn, 1) + uccsCalled := make(chan struct{}, 1) + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(data *stub.BalancerData, ccs balancer.ClientConnState) error { + sc, err := data.ClientConn.NewSubConn(ccs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("failed to create subConn: %v", err) + } + scChan <- sc + close(uccsCalled) + return nil + }, + Close: func(data *stub.BalancerData) { + data.ClientConn.RemoveSubConn(<-scChan) + }, + }) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update specifying our stub balancer as the LB policy. + scpr := parseServiceConfig(t, r, fmt.Sprintf(`{"loadBalancingPolicy": "%v"}`, t.Name())) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "dummy-address"}}, + ServiceConfig: scpr, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for UpdateClientConnState to be called: %v", ctx.Err()) + case <-uccsCalled: + } + + // The following service config update will switch balancer from our stub + // balancer to pick_first. The former will be closed, which will call + // cc.RemoveSubConn() inline (this RemoveSubConn is not required by the API, + // but some balancers might do it). + // + // This is to make sure the cc.RemoveSubConn() from Close() doesn't cause a + // deadlock (e.g. trying to grab a mutex while it's already locked). + // + // Do it in a goroutine so this test will fail with a helpful message + // (though the goroutine will still leak). + done := make(chan struct{}) + go func() { + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "dummy-address"}}, + ServiceConfig: parseServiceConfig(t, r, pickFirstServiceConfig), + }) + close(done) + }() + + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for resolver.UpdateState to finish: %v", ctx.Err()) + case <-done: + } +} diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go index c88fdac1e728..00a40055ea77 100644 --- a/test/pickfirst_test.go +++ b/test/pickfirst_test.go @@ -128,9 +128,9 @@ func checkPickFirst(ctx context.Context, cc *grpc.ClientConn, wantAddr string) e return nil } -// backendsToAddrs is a helper routine to convert from a set of backends to +// stubBackendsToResolverAddrs converts from a set of stub server backends to // resolver addresses. Useful when pushing addresses to the manual resolver. -func backendsToAddrs(backends []*stubserver.StubServer) []resolver.Address { +func stubBackendsToResolverAddrs(backends []*stubserver.StubServer) []resolver.Address { addrs := make([]resolver.Address, len(backends)) for i, backend := range backends { addrs[i] = resolver.Address{Addr: backend.Address} @@ -143,7 +143,7 @@ func backendsToAddrs(backends []*stubserver.StubServer) []resolver.Address { func (s) TestPickFirst_OneBackend(t *testing.T) { cc, r, backends := setupPickFirst(t, 1) - addrs := backendsToAddrs(backends) + addrs := stubBackendsToResolverAddrs(backends) r.UpdateState(resolver.State{Addresses: addrs}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -158,7 +158,7 @@ func (s) TestPickFirst_OneBackend(t *testing.T) { func (s) TestPickFirst_MultipleBackends(t *testing.T) { cc, r, backends := setupPickFirst(t, 2) - addrs := backendsToAddrs(backends) + addrs := stubBackendsToResolverAddrs(backends) r.UpdateState(resolver.State{Addresses: addrs}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -174,7 +174,7 @@ func (s) TestPickFirst_MultipleBackends(t *testing.T) { func (s) TestPickFirst_OneServerDown(t *testing.T) { cc, r, backends := setupPickFirst(t, 2) - addrs := backendsToAddrs(backends) + addrs := stubBackendsToResolverAddrs(backends) r.UpdateState(resolver.State{Addresses: addrs}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -197,7 +197,7 @@ func (s) TestPickFirst_OneServerDown(t *testing.T) { func (s) TestPickFirst_AllServersDown(t *testing.T) { cc, r, backends := setupPickFirst(t, 2) - addrs := backendsToAddrs(backends) + addrs := stubBackendsToResolverAddrs(backends) r.UpdateState(resolver.State{Addresses: addrs}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -228,7 +228,7 @@ func (s) TestPickFirst_AllServersDown(t *testing.T) { func (s) TestPickFirst_AddressesRemoved(t *testing.T) { cc, r, backends := setupPickFirst(t, 3) - addrs := backendsToAddrs(backends) + addrs := stubBackendsToResolverAddrs(backends) r.UpdateState(resolver.State{Addresses: addrs}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -273,7 +273,7 @@ func (s) TestPickFirst_AddressesRemoved(t *testing.T) { // backends are added, the RPC is able to complete. func (s) TestPickFirst_NewAddressWhileBlocking(t *testing.T) { cc, r, backends := setupPickFirst(t, 2) - addrs := backendsToAddrs(backends) + addrs := stubBackendsToResolverAddrs(backends) r.UpdateState(resolver.State{Addresses: addrs}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/test/resolver_update_test.go b/test/resolver_update_test.go index b2443fdd8110..bf7f0d2c2ed5 100644 --- a/test/resolver_update_test.go +++ b/test/resolver_update_test.go @@ -129,7 +129,7 @@ func (s) TestResolverUpdate_InvalidServiceConfigAsFirstUpdate(t *testing.T) { // The wrappingBalancer wraps a pick_first balancer and writes to a channel when // it receives a ClientConn update. This is different to a stub balancer which -// only notifies of updates from grpc, but does not contain a real balanacer. +// only notifies of updates from grpc, but does not contain a real balancer. // // The wrappingBalancer allows us to write tests with a real backend and make // real RPCs. diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go index a7b94197ffd5..7f16aa2cb3ce 100644 --- a/test/roundrobin_test.go +++ b/test/roundrobin_test.go @@ -43,7 +43,8 @@ import ( const rrServiceConfig = `{"loadBalancingConfig": [{"round_robin":{}}]}` -func checkRoundRobin(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { +func checkRoundRobin(ctx context.Context, cc *grpc.ClientConn, addrs []resolver.Address) error { + client := testgrpc.NewTestServiceClient(cc) var peer peer.Peer // Make sure connections to all backends are up. backendCount := len(addrs) @@ -126,7 +127,7 @@ func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOpt } r.UpdateState(resolver.State{Addresses: addrs}) - if err := checkRoundRobin(ctx, client, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs); err != nil { t.Fatal(err) } return cc, r, backends @@ -244,8 +245,7 @@ func (s) TestRoundRobin_OneServerDown(t *testing.T) { for i := 0; i < len(backends)-1; i++ { addrs[i] = resolver.Address{Addr: backends[i].Address} } - client := testpb.NewTestServiceClient(cc) - if err := checkRoundRobin(ctx, client, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs); err != nil { t.Fatalf("RPCs are not being round robined across remaining servers: %v", err) } } From 99aae3442dbe8495247dda07a6b1c8cda98accca Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 30 Mar 2022 18:01:37 -0400 Subject: [PATCH 477/998] cluster manager: Add Graceful Switch functionality to Cluster Manager (#5265) * cluster manager: Add Graceful Switch functionality to Cluster Manager --- .../balancer/clustermanager/clustermanager.go | 5 + .../clustermanager/clustermanager_test.go | 131 ++++++++++++++++++ 2 files changed, 136 insertions(+) diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index 8d71200d8c61..930427df1b72 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -93,6 +93,11 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) { b.stateAggregator.add(name) // Then add to the balancer group. b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + } else { + // Already present, check for type change and if so send down a new builder. + if newT.ChildPolicy.Name != b.children[name].ChildPolicy.Name { + b.bg.UpdateBuilder(name, balancer.Get(newT.ChildPolicy.Name)) + } } // TODO: handle error? How to aggregate errors and return? _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index 771152b7bb97..e8e551a0ca17 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -25,6 +25,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" @@ -48,6 +49,11 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + var ( rtBuilder balancer.Builder rtParser balancer.ConfigParser @@ -102,6 +108,7 @@ func init() { rtParser = rtBuilder.(balancer.ConfigParser) balancer.Register(&ignoreAttrsRRBuilder{balancer.Get(roundrobin.Name)}) + balancer.Register(wrappedPickFirstBalancerBuilder{}) balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond } @@ -632,3 +639,127 @@ func TestInitialIdle(t *testing.T) { t.Fatalf("Received aggregated state: %v, want Idle", state1) } } + +// TestClusterGracefulSwitch tests the graceful switch functionality for a child +// of the cluster manager. At first, the child is configured as a round robin +// load balancer, and thus should behave accordingly. The test then gracefully +// switches this child to a pick first load balancer. Once that balancer updates +// it's state and completes the graceful switch process the new picker should +// reflect this change. +func TestClusterGracefulSwitch(t *testing.T) { + cc := testutils.NewTestClientConn(t) + rtb := rtBuilder.Build(cc, balancer.BuildOptions{}) + + configJSON1 := `{ +"children": { + "csp:cluster":{ "childPolicy": [{"ignore_attrs_round_robin":""}] } +} +}` + config1, err := rtParser.ParseConfig([]byte(configJSON1)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, + } + if err := rtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddrs[0], []string{"csp:cluster"}), + }}, + BalancerConfig: config1, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + sc1 := <-cc.NewSubConnCh + rtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + rtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p1 := <-cc.NewPickerCh + pi := balancer.PickInfo{ + Ctx: SetPickedCluster(context.Background(), "csp:cluster"), + } + testPick(t, p1, pi, sc1, nil) + + // Same cluster, different balancer type. + configJSON2 := `{ +"children": { + "csp:cluster":{ "childPolicy": [{"wrappedPickFirstBalancer":""}] } +} +}` + config2, err := rtParser.ParseConfig([]byte(configJSON2)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + if err := rtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddrs[1], []string{"csp:cluster"}), + }}, + BalancerConfig: config2, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + sc2 := <-cc.NewSubConnCh + // Update the pick first balancers SubConn as CONNECTING. This will cause + // the pick first balancer to UpdateState() with CONNECTING, which shouldn't send + // a Picker update back, as the Graceful Switch process is not complete. + rtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-cc.NewPickerCh: + t.Fatalf("No new picker should have been sent due to the Graceful Switch process not completing") + case <-ctx.Done(): + } + + // Update the pick first balancers SubConn as READY. This will cause + // the pick first balancer to UpdateState() with READY, which should send a + // Picker update back, as the Graceful Switch process is complete. This + // Picker should always pick the pick first's created SubConn. + rtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p2 := <-cc.NewPickerCh + testPick(t, p2, pi, sc2, nil) + // The Graceful Switch process completing for the child should cause the + // SubConns for the balancer being gracefully switched from to get deleted. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("error waiting for RemoveSubConn()") + case rsc := <-cc.RemoveSubConnCh: + // The SubConn removed should have been the created SubConn + // from the child before switching. + if rsc != sc1 { + t.Fatalf("RemoveSubConn() got: %v, want %v", rsc, sc1) + } + } +} + +type wrappedPickFirstBalancerBuilder struct{} + +func (wrappedPickFirstBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(grpc.PickFirstBalancerName) + wpfb := &wrappedPickFirstBalancer{ + ClientConn: cc, + } + pf := builder.Build(wpfb, opts) + wpfb.Balancer = pf + return wpfb +} + +func (wrappedPickFirstBalancerBuilder) Name() string { + return "wrappedPickFirstBalancer" +} + +type wrappedPickFirstBalancer struct { + balancer.Balancer + balancer.ClientConn +} + +func (wb *wrappedPickFirstBalancer) UpdateState(state balancer.State) { + // Eat it if IDLE - allows it to switch over only on a READY SubConn. + if state.ConnectivityState == connectivity.Idle { + return + } + wb.ClientConn.UpdateState(state) +} From 4e780933f85a4d0f6903d1a5bb57bb7c882e611d Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 31 Mar 2022 14:02:26 -0400 Subject: [PATCH 478/998] xds: ignore routes with unsupported cluster specifiers (#5269) --- examples/go.mod | 2 +- examples/go.sum | 23 +++-- go.mod | 10 +-- go.sum | 23 +++-- security/advancedtls/examples/go.sum | 21 +++-- security/advancedtls/go.sum | 21 +++-- .../clusterspecifier/cluster_specifier.go | 2 +- .../xdsclient/xdsresource/unmarshal_rds.go | 15 +++- .../xdsresource/unmarshal_rds_test.go | 90 ++++++++++++++++--- 9 files changed, 157 insertions(+), 50 deletions(-) diff --git a/examples/go.mod b/examples/go.mod index d5b7d122d0fc..e168fbdf2150 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -7,7 +7,7 @@ require ( golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 google.golang.org/grpc v1.36.0 - google.golang.org/protobuf v1.26.0 + google.golang.org/protobuf v1.27.1 ) replace google.golang.org/grpc => ../ diff --git a/examples/go.sum b/examples/go.sum index 3a7309c5328e..8508780ea80f 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -7,14 +7,14 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -36,8 +36,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -56,8 +57,9 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -65,10 +67,14 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -91,8 +97,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/go.mod b/go.mod index 0e7dc351ae14..6a760ed7435b 100644 --- a/go.mod +++ b/go.mod @@ -6,14 +6,14 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 - github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 + github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.5.2 - github.com/google/go-cmp v0.5.5 + github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.1.2 - golang.org/x/net v0.0.0-20200822124328-c89045814202 + golang.org/x/net v0.0.0-20201021035429-f5854403a974 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd + golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/protobuf v1.26.0 + google.golang.org/protobuf v1.27.1 ) diff --git a/go.sum b/go.sum index 03be9ef71ec7..5f418dba1b41 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= @@ -22,8 +22,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -49,8 +49,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -75,8 +76,9 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -87,10 +89,14 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -122,8 +128,9 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index f79d9c9392e3..2918eee66e0f 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -3,11 +3,11 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -28,8 +28,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -48,18 +49,23 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -81,8 +87,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index f79d9c9392e3..2918eee66e0f 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -3,11 +3,11 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -28,8 +28,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -48,18 +49,23 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -81,8 +87,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/xds/internal/clusterspecifier/cluster_specifier.go b/xds/internal/clusterspecifier/cluster_specifier.go index 54776f20cf0b..9bb30f16589f 100644 --- a/xds/internal/clusterspecifier/cluster_specifier.go +++ b/xds/internal/clusterspecifier/cluster_specifier.go @@ -37,7 +37,7 @@ type ClusterSpecifier interface { // ParseClusterSpecifierConfig parses the provided configuration // proto.Message from the top level RDS configuration. The resulting // BalancerConfig will be used as configuration for a child LB Policy of the - // Cluster Manager LB Policy. + // Cluster Manager LB Policy. A nil BalancerConfig is invalid. ParseClusterSpecifierConfig(proto.Message) (BalancerConfig, error) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 12c3d560fa7d..a8fd65d974bb 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -147,6 +147,12 @@ func processClusterSpecifierPlugins(csps []*v3routepb.ClusterSpecifierPlugin) (m for _, csp := range csps { cs := clusterspecifier.Get(csp.GetExtension().GetTypedConfig().GetTypeUrl()) if cs == nil { + if csp.GetIsOptional() { + // "If a plugin is not supported but has is_optional set, then + // we will ignore any routes that point to that plugin" + cspCfgs[csp.GetExtension().GetName()] = nil + continue + } // "If no plugin is registered for it, the resource will be NACKed." // - RLS in xDS design return nil, fmt.Errorf("cluster specifier %q of type %q was not found", csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) @@ -354,8 +360,6 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif if totalWeight == 0 { return nil, nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) } - case *v3routepb.RouteAction_ClusterHeader: - continue case *v3routepb.RouteAction_ClusterSpecifierPlugin: if !envconfig.XDSRLS { return nil, nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) @@ -367,10 +371,15 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif // resource will be NACKed." - RLS in xDS design return nil, nil, fmt.Errorf("route %+v, action %+v, specifies a cluster specifier plugin %+v that is not in Route Configuration", r, a, a.ClusterSpecifierPlugin) } + if csps[a.ClusterSpecifierPlugin] == nil { + logger.Infof("route %+v references optional and unsupported cluster specifier plugin %v, the route will be ignored", r, a.ClusterSpecifierPlugin) + continue + } cspNames[a.ClusterSpecifierPlugin] = true route.ClusterSpecifierPlugin = a.ClusterSpecifierPlugin default: - return nil, nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) + logger.Infof("route %+v references unknown ClusterSpecifier %+v, the route will be ignored", r, a) + continue } msd := action.GetMaxStreamDuration() diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index abae9ff09752..b6034c72e3b8 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -98,6 +98,42 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { return rc } + goodRouteConfigWithClusterSpecifierPluginsAndNormalRoute = func(csps []*v3routepb.ClusterSpecifierPlugin, cspReferences []string) *v3routepb.RouteConfiguration { + rs := goodRouteConfigWithClusterSpecifierPlugins(csps, cspReferences) + rs.VirtualHosts[0].Routes = append(rs.VirtualHosts[0].Routes, &v3routepb.Route{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + CaseSensitive: &wrapperspb.BoolValue{Value: false}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}}) + return rs + } + goodRouteConfigWithUnsupportedClusterSpecifier = &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + CaseSensitive: &wrapperspb.BoolValue{Value: false}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}}, + }}, + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "|"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_ClusterHeader{}}, + }}, + }, + }, + }, + } + goodUpdateWithFilterConfigs = func(cfgs map[string]httpfilter.FilterConfig) RouteConfigUpdate { return RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ @@ -117,6 +153,17 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { Routes: nil, }}, } + goodUpdateWithNormalRoute = RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP("/"), + CaseInsensitive: true, + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + }, + } goodUpdateWithClusterSpecifierPluginA = RouteConfigUpdate{ VirtualHosts: []*VirtualHost{{ Domains: []string{ldsTarget}, @@ -130,12 +177,13 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { "cspA": nil, }, } - clusterSpecifierPlugin = func(name string, config *anypb.Any) *v3routepb.ClusterSpecifierPlugin { + clusterSpecifierPlugin = func(name string, config *anypb.Any, isOptional bool) *v3routepb.ClusterSpecifierPlugin { return &v3routepb.ClusterSpecifierPlugin{ Extension: &v3corepb.TypedExtensionConfig{ Name: name, TypedConfig: config, }, + IsOptional: isOptional, } } goodRouteConfigWithRetryPolicy = func(vhrp *v3routepb.RetryPolicy, rrp *v3routepb.RetryPolicy) *v3routepb.RouteConfiguration { @@ -651,7 +699,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { { name: "cluster-specifier-declared-which-not-registered", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ - clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist), + clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, false), }, []string{"cspA"}), wantError: true, rlsEnabled: true, @@ -659,7 +707,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { { name: "error-in-cluster-specifier-plugin-conversion-method", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ - clusterSpecifierPlugin("cspA", errorClusterSpecifierConfig), + clusterSpecifierPlugin("cspA", errorClusterSpecifierConfig, false), }, []string{"cspA"}), wantError: true, rlsEnabled: true, @@ -667,7 +715,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { { name: "route-action-that-references-undeclared-cluster-specifier-plugin", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ - clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false), }, []string{"cspA", "cspB"}), wantError: true, rlsEnabled: true, @@ -675,7 +723,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { { name: "emitted-cluster-specifier-plugins", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ - clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false), }, []string{"cspA"}), wantUpdate: goodUpdateWithClusterSpecifierPluginA, rlsEnabled: true, @@ -683,8 +731,8 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { { name: "deleted-cluster-specifier-plugins-not-referenced", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ - clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), - clusterSpecifierPlugin("cspB", mockClusterSpecifierConfig), + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false), + clusterSpecifierPlugin("cspB", mockClusterSpecifierConfig, false), }, []string{"cspA"}), wantUpdate: goodUpdateWithClusterSpecifierPluginA, rlsEnabled: true, @@ -692,17 +740,39 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { { name: "ignore-error-in-cluster-specifier-plugin", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ - clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist), + clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, false), }, []string{}), wantUpdate: goodUpdate, }, { name: "cluster-specifier-plugin-referenced-env-var-off", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ - clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig), + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false), }, []string{"cspA"}), wantError: true, }, + // This tests a scenario where a cluster specifier plugin is not found + // and is optional. Any routes referencing that not found optional + // cluster specifier plugin should be ignored. The config has two + // routes, and only one of them should be present in the update. + { + name: "cluster-specifier-plugin-not-found-and-optional-route-should-ignore", + rc: goodRouteConfigWithClusterSpecifierPluginsAndNormalRoute([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, true), + }, []string{"cspA"}), + wantUpdate: goodUpdateWithNormalRoute, + rlsEnabled: true, + }, + // This tests a scenario where a route has an unsupported cluster + // specifier. Any routes with an unsupported cluster specifier should be + // ignored. The config has two routes, and only one of them should be + // present in the update. + { + name: "unsupported-cluster-specifier-route-should-ignore", + rc: goodRouteConfigWithUnsupportedClusterSpecifier, + wantUpdate: goodUpdateWithNormalRoute, + rlsEnabled: true, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -747,7 +817,7 @@ func (mockClusterSpecifierPlugin) TypeURLs() []string { } func (mockClusterSpecifierPlugin) ParseClusterSpecifierConfig(proto.Message) (clusterspecifier.BalancerConfig, error) { - return nil, nil + return []map[string]interface{}{}, nil } type errorClusterSpecifierPlugin struct{} From 3cccf6a43b55b4a089e832213b0e34df859e1978 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 1 Apr 2022 10:20:47 -0700 Subject: [PATCH 479/998] xdsclient: always backoff between new streams even after successful stream (#5280) --- xds/internal/xdsclient/authority.go | 2 +- xds/internal/xdsclient/client_test.go | 2 +- xds/internal/xdsclient/controller.go | 6 ++-- .../xdsclient/controller/controller.go | 7 ++-- .../xdsclient/controller/controller_test.go | 6 ++-- .../xdsclient/controller/transport.go | 36 ++++++++----------- .../xdsclient/controller/v2_testutils_test.go | 5 +-- 7 files changed, 29 insertions(+), 35 deletions(-) diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index eb1110ecc4c9..cfe6fc865aff 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -108,7 +108,7 @@ func (c *clientImpl) newAuthority(config *bootstrap.ServerConfig) (_ *authority, ret.close() } }() - ctr, err := newController(config, ret.pubsub, c.updateValidator, c.logger) + ctr, err := newController(config, ret.pubsub, c.updateValidator, c.logger, nil) if err != nil { return nil, err } diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index 31f6d466fbea..d496aa59adc3 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -102,7 +102,7 @@ type testController struct { func overrideNewController(t *testing.T) *testutils.Channel { origNewController := newController ch := testutils.NewChannel() - newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (controllerInterface, error) { + newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, _ func(int) time.Duration) (controllerInterface, error) { ret := newTestController(config) ch.Send(ret) return ret, nil diff --git a/xds/internal/xdsclient/controller.go b/xds/internal/xdsclient/controller.go index 431a14498e1f..a99f4b164949 100644 --- a/xds/internal/xdsclient/controller.go +++ b/xds/internal/xdsclient/controller.go @@ -18,6 +18,8 @@ package xdsclient import ( + "time" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/controller" @@ -33,6 +35,6 @@ type controllerInterface interface { Close() } -var newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (controllerInterface, error) { - return controller.New(config, pubsub, validator, logger) +var newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, boff func(int) time.Duration) (controllerInterface, error) { + return controller.New(config, pubsub, validator, logger, boff) } diff --git a/xds/internal/xdsclient/controller/controller.go b/xds/internal/xdsclient/controller/controller.go index 3f7371ae63c7..d48297145472 100644 --- a/xds/internal/xdsclient/controller/controller.go +++ b/xds/internal/xdsclient/controller/controller.go @@ -100,7 +100,7 @@ func SetGRPCDial(dialer func(target string, opts ...grpc.DialOption) (*grpc.Clie } // New creates a new controller. -func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (_ *Controller, retErr error) { +func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, boff func(int) time.Duration) (_ *Controller, retErr error) { switch { case config == nil: return nil, errors.New("xds: no xds_server provided") @@ -120,12 +120,15 @@ func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, val }), } + if boff == nil { + boff = backoff.DefaultExponential.Backoff + } ret := &Controller{ config: config, updateValidator: validator, updateHandler: updateHandler, - backoff: backoff.DefaultExponential.Backoff, // TODO: should this be configurable? + backoff: boff, streamCh: make(chan grpc.ClientStream, 1), sendCh: buffer.NewUnbounded(), watchMap: make(map[xdsresource.ResourceType]map[string]bool), diff --git a/xds/internal/xdsclient/controller/controller_test.go b/xds/internal/xdsclient/controller/controller_test.go index 644698cc26f7..599afb3a3e94 100644 --- a/xds/internal/xdsclient/controller/controller_test.go +++ b/xds/internal/xdsclient/controller/controller_test.go @@ -95,7 +95,7 @@ func (s) TestNew(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - c, err := New(test.config, noopUpdateHandler, nil, nil) // Only testing the config, other inputs are left as nil. + c, err := New(test.config, noopUpdateHandler, nil, nil, nil) // Only testing the config, other inputs are left as nil. defer func() { if c != nil { c.Close() @@ -123,7 +123,7 @@ func (s) TestNewWithGRPCDial(t *testing.T) { // Set the dialer and make sure it is called. SetGRPCDial(customDialer) - c, err := New(config, noopUpdateHandler, nil, nil) + c, err := New(config, noopUpdateHandler, nil, nil, nil) if err != nil { t.Fatalf("New(%+v) = %v, want no error", config, err) } @@ -138,7 +138,7 @@ func (s) TestNewWithGRPCDial(t *testing.T) { // Reset the dialer and make sure it is not called. SetGRPCDial(grpc.Dial) - c, err = New(config, noopUpdateHandler, nil, nil) + c, err = New(config, noopUpdateHandler, nil, nil, nil) defer func() { if c != nil { c.Close() diff --git a/xds/internal/xdsclient/controller/transport.go b/xds/internal/xdsclient/controller/transport.go index e97717d974c6..9e9836512725 100644 --- a/xds/internal/xdsclient/controller/transport.go +++ b/xds/internal/xdsclient/controller/transport.go @@ -59,26 +59,21 @@ func (t *Controller) run(ctx context.Context) { // report error (and log) when stats is transient failure. retries := 0 - for { - select { - case <-ctx.Done(): - return - default: - } - - if retries != 0 { - timer := time.NewTimer(t.backoff(retries)) + lastStreamStartTime := time.Time{} + for ctx.Err() == nil { + dur := time.Until(lastStreamStartTime.Add(t.backoff(retries))) + if dur > 0 { + timer := time.NewTimer(dur) select { case <-timer.C: case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } + timer.Stop() return } } retries++ + lastStreamStartTime = time.Now() stream, err := t.vClient.NewStream(ctx, t.cc) if err != nil { t.updateHandler.NewConnectionError(err) @@ -370,24 +365,21 @@ func (t *Controller) processAckInfo(ack *ackAction, stream grpc.ClientStream) (t // It blocks until the context is cancelled. func (t *Controller) reportLoad(ctx context.Context, cc *grpc.ClientConn, opts controllerversion.LoadReportingOptions) { retries := 0 - for { - if ctx.Err() != nil { - return - } - - if retries != 0 { - timer := time.NewTimer(t.backoff(retries)) + lastStreamStartTime := time.Time{} + for ctx.Err() == nil { + dur := time.Until(lastStreamStartTime.Add(t.backoff(retries))) + if dur > 0 { + timer := time.NewTimer(dur) select { case <-timer.C: case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } + timer.Stop() return } } retries++ + lastStreamStartTime = time.Now() stream, err := t.vClient.NewLoadStatsStream(ctx, cc) if err != nil { t.logger.Warningf("lrs: failed to create stream: %v", err) diff --git a/xds/internal/xdsclient/controller/v2_testutils_test.go b/xds/internal/xdsclient/controller/v2_testutils_test.go index dfd195827d46..de147d480e54 100644 --- a/xds/internal/xdsclient/controller/v2_testutils_test.go +++ b/xds/internal/xdsclient/controller/v2_testutils_test.go @@ -458,13 +458,10 @@ func newTestController(p pubsub.UpdateHandler, controlPlanAddr string, n *basepb Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), TransportAPI: version.TransportV2, NodeProto: n, - }, p, nil, l) + }, p, nil, l, b) if err != nil { return nil, err } - // This direct setting backoff seems a bit hacky, but should be OK for the - // tests. Or we need to make it configurable in New(). - c.backoff = b return c, nil } From 0066bf69deb33b0e5bee4de69090c3ef8f6991aa Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 1 Apr 2022 13:14:35 -0700 Subject: [PATCH 480/998] grpc: perform graceful switching of LB policies in the `ClientConn` by default (#5285) --- balancer_conn_wrappers.go | 331 ++++++++++++------ clientconn.go | 93 +---- clientconn_test.go | 10 +- .../balancer/gracefulswitch/gracefulswitch.go | 7 + internal/balancer/stub/stub.go | 16 +- test/balancer_switching_test.go | 309 ++++++++-------- test/resolver_update_test.go | 90 ++--- test/roundrobin_test.go | 5 +- 8 files changed, 438 insertions(+), 423 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 84934bc0e670..b1c23eaae0db 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -20,136 +20,178 @@ package grpc import ( "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + cc *ClientConn -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.updateCh.Get(): + case u := <-ccb.updateCh.Get(): ccb.updateCh.Load() if ccb.closed.HasFired() { break } - switch u := t.(type) { + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } case <-ccb.closed.Done(): } if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - if ccb == nil { - return +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil } - ccb.closed.Fire() - <-ccb.done.Done() + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) exitIdle() bool { - if ccb == nil { - return true - } - if !ccb.hasExitIdle { - return false +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs } - ccb.updateCh.Put(exitIdle{}) - return true + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -167,39 +209,97 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -// updateClientConnState forwards the clientConn update to the wrapped balancer -// synchronously. -// -// Other calls from the channel like exitIdle() and handleSubConnStateChange() -// are handled asynchronously by pushing the update onto a channel, which is -// picked up by the watcher() goroutine and forwarded to the wrapped balancer. -// That approach cannot be taken here because the corresponding API on the -// balancer returns an error which needs to be sent back to the channel to be -// forward to the resolver. -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) } -func (ccb *ccBalancerWrapper) resolverError(err error) { - if ccb == nil { +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { return } - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() + ccb.balancer.ExitIdle() +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) } +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) @@ -209,15 +309,25 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -229,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is diff --git a/clientconn.go b/clientconn.go index 86275dca4de7..3ed6eb8e75e3 100644 --- a/clientconn.go +++ b/clientconn.go @@ -278,7 +278,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, @@ -286,7 +286,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -465,12 +465,12 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). - dopts dialOptions // Default and user specified dial options. - balancerBuildOpts balancer.BuildOptions // TODO: delete once we move to the gracefulswitch balancer. - channelzID *channelz.Identifier // Channelz identifier for the channel. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -491,8 +491,6 @@ type ClientConn struct { sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - curBalancerName string // TODO: delete as part of https://github.com/grpc/grpc-go/issues/5229. - balancerWrapper *ccBalancerWrapper // TODO: Use gracefulswitch balancer to be able to initialize this once and never rewrite. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -537,14 +535,7 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper.exitIdle() { - return - } - for ac := range cc.conns { - go ac.connect() - } + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -666,21 +657,9 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - var addrs []resolver.Address - for _, addr := range s.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - s.Addresses = addrs - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -709,50 +688,8 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. -// -// Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) - } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) -} - func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -1002,8 +939,6 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - // Only look at balancer types and switch balancer if balancer dial - // option is not set. var newBalancerName string if cc.sc != nil && cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name @@ -1023,7 +958,7 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel newBalancerName = PickFirstBalancerName } } - cc.switchBalancer(newBalancerName) + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1074,11 +1009,11 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - if bWrapper != nil { bWrapper.close() } diff --git a/clientconn_test.go b/clientconn_test.go index 353d8fb325d4..21f70c8f2514 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -845,9 +845,13 @@ func (s) TestBackoffCancel(t *testing.T) { if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } - <-dialStrCh - cc.Close() - // Should not leak. May need -count 5000 to exercise. + defer cc.Close() + + select { + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout when waiting for custom dialer to be invoked during Dial") + case <-dialStrCh: + } } // UpdateAddresses should cause the next reconnect to begin from the top of the diff --git a/internal/balancer/gracefulswitch/gracefulswitch.go b/internal/balancer/gracefulswitch/gracefulswitch.go index af6cc46189f4..7ba8f4d18319 100644 --- a/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/internal/balancer/gracefulswitch/gracefulswitch.go @@ -178,6 +178,9 @@ func (gsb *Balancer) ResolverError(err error) { } // ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. func (gsb *Balancer) ExitIdle() { balToUpdate := gsb.latestBalancer() if balToUpdate == nil { @@ -188,6 +191,10 @@ func (gsb *Balancer) ExitIdle() { // called. if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { ei.ExitIdle() + return + } + for sc := range balToUpdate.subconns { + sc.Connect() } } diff --git a/internal/balancer/stub/stub.go b/internal/balancer/stub/stub.go index 950eaaa0278a..9fe6d93c9db5 100644 --- a/internal/balancer/stub/stub.go +++ b/internal/balancer/stub/stub.go @@ -19,7 +19,12 @@ // Package stub implements a balancer for testing purposes. package stub -import "google.golang.org/grpc/balancer" +import ( + "encoding/json" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/serviceconfig" +) // BalancerFuncs contains all balancer.Balancer functions with a preceding // *BalancerData parameter for passing additional instance information. Any @@ -28,6 +33,8 @@ type BalancerFuncs struct { // Init is called after ClientConn and BuildOptions are set in // BalancerData. It may be used to initialize BalancerData.Data. Init func(*BalancerData) + // ParseConfig is used for parsing LB configs, if specified. + ParseConfig func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) UpdateClientConnState func(*BalancerData, balancer.ClientConnState) error ResolverError func(*BalancerData, error) @@ -97,6 +104,13 @@ func (bb bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. func (bb bb) Name() string { return bb.name } +func (bb bb) ParseConfig(lbCfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + if bb.bf.ParseConfig != nil { + return bb.bf.ParseConfig(lbCfg) + } + return nil, nil +} + // Register registers a stub balancer builder which will call the provided // functions. The name used should be unique. func Register(name string, bf BalancerFuncs) { diff --git a/test/balancer_switching_test.go b/test/balancer_switching_test.go index 2453738bfa47..ede88fda572b 100644 --- a/test/balancer_switching_test.go +++ b/test/balancer_switching_test.go @@ -21,9 +21,7 @@ package test import ( "context" "fmt" - "strings" "testing" - "time" "google.golang.org/grpc" "google.golang.org/grpc/balancer" @@ -43,21 +41,30 @@ const ( loadBalancedServicePort = 443 wantGRPCLBTraceDesc = `Channel switches to new LB policy "grpclb"` wantRoundRobinTraceDesc = `Channel switches to new LB policy "round_robin"` - wantPickFirstTraceDesc = `Channel switches to new LB policy "pick_first"` + + // This is the number of stub backends set up at the start of each test. The + // first backend is used for the "grpclb" policy and the rest are used for + // other LB policies to test balancer switching. + backendCount = 3 ) -// setupBackendsAndFakeGRPCLB sets up the stub server backends and a fake grpclb -// server for tests which exercise balancer switch scenarios involving grpclb. +// setupBackendsAndFakeGRPCLB sets up backendCount number of stub server +// backends and a fake grpclb server for tests which exercise balancer switch +// scenarios involving grpclb. +// +// The fake grpclb server always returns the first of the configured stub +// backends as backend addresses. So, the tests are free to use the other +// backends with other LB policies to verify balancer switching scenarios. +// // Returns a cleanup function to be invoked by the caller. func setupBackendsAndFakeGRPCLB(t *testing.T) ([]*stubserver.StubServer, *fakegrpclb.Server, func()) { czCleanup := channelz.NewChannelzStorageForTesting() backends, backendsCleanup := startBackendsForBalancerSwitch(t) - rawAddrs := stubBackendsToRawAddrs(backends) lbServer, err := fakegrpclb.NewServer(fakegrpclb.ServerParams{ LoadBalancedServiceName: loadBalancedServiceName, LoadBalancedServicePort: loadBalancedServicePort, - BackendAddresses: rawAddrs, + BackendAddresses: []string{backends[0].Address}, }) if err != nil { t.Fatalf("failed to create fake grpclb server: %v", err) @@ -81,7 +88,6 @@ func setupBackendsAndFakeGRPCLB(t *testing.T) ([]*stubserver.StubServer, *fakegr func startBackendsForBalancerSwitch(t *testing.T) ([]*stubserver.StubServer, func()) { t.Helper() - const backendCount = 3 backends := make([]*stubserver.StubServer, backendCount) for i := 0; i < backendCount; i++ { backend := &stubserver.StubServer{ @@ -100,40 +106,6 @@ func startBackendsForBalancerSwitch(t *testing.T) ([]*stubserver.StubServer, fun } } -// stubBackendsToRawAddrs converts from a set of stub server backends to raw -// address strings. Useful when pushing addresses to the fake grpclb server. -func stubBackendsToRawAddrs(backends []*stubserver.StubServer) []string { - addrs := make([]string, len(backends)) - for i, backend := range backends { - addrs[i] = backend.Address - } - return addrs -} - -// checkForTraceEvent looks for a trace event in the top level channel matching -// the given description. Events before since are ignored. Returns nil error if -// such an event is found. -func checkForTraceEvent(ctx context.Context, wantDesc string, since time.Time) error { - for { - if err := ctx.Err(); err != nil { - return err - } - tcs, _ := channelz.GetTopChannels(0, 0) - if len(tcs) != 1 { - return fmt.Errorf("channelz returned %d top channels, want 1", len(tcs)) - } - for _, event := range tcs[0].Trace.Events { - if event.Timestamp.Before(since) { - continue - } - if strings.Contains(event.Desc, wantDesc) { - return nil - } - } - time.Sleep(defaultTestShortTimeout) - } -} - // TestBalancerSwitch_Basic tests the basic scenario of switching from one LB // policy to another, as specified in the service config. func (s) TestBalancerSwitch_Basic(t *testing.T) { @@ -193,42 +165,31 @@ func (s) TestBalancerSwitch_grpclbToPickFirst(t *testing.T) { // Push a resolver update with no service config and a single address pointing // to the grpclb server we created above. This will cause the channel to - // switch to the "grpclb" balancer, and will equally distribute RPCs across - // the backends as the fake grpclb server does not support load reporting from - // the clients. - now := time.Now() + // switch to the "grpclb" balancer, which returns a single backend address. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs[0:1]); err != nil { t.Fatal(err) } - if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) - } // Push a resolver update containing a non-existent grpclb server address. // This should not lead to a balancer switch. - now = time.Now() const nonExistentServer = "non-existent-grpclb-server-address" r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: nonExistentServer, Type: resolver.GRPCLB}}}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - wantDesc := fmt.Sprintf("Channel switches to new LB policy %q", nonExistentServer) - if err := checkForTraceEvent(sCtx, wantDesc, now); err == nil { - t.Fatal("channel switched balancers when expected not to") + if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { + t.Fatal(err) } // Push a resolver update containing no grpclb server address. This should - // lead to the channel using the default LB policy which is pick_first. - now = time.Now() - r.UpdateState(resolver.State{Addresses: addrs}) - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + // lead to the channel using the default LB policy which is pick_first. The + // list of addresses pushed as part of this update is different from the one + // returned by the "grpclb" balancer. So, we should see RPCs going to the + // newly configured backends, as part of the balancer switch. + r.UpdateState(resolver.State{Addresses: addrs[1:]}) + if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { t.Fatal(err) } - if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) - } } // TestBalancerSwitch_pickFirstToGRPCLB tests the scenario where the channel @@ -248,50 +209,31 @@ func (s) TestBalancerSwitch_pickFirstToGRPCLB(t *testing.T) { // Push a resolver update containing no grpclb server address. This should // lead to the channel using the default LB policy which is pick_first. - now := time.Now() - r.UpdateState(resolver.State{Addresses: addrs}) + r.UpdateState(resolver.State{Addresses: addrs[1:]}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) - } - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { t.Fatal(err) } // Push a resolver update with no service config and a single address pointing // to the grpclb server we created above. This will cause the channel to - // switch to the "grpclb" balancer, and will equally distribute RPCs across - // the backends as the fake grpclb server does not support load reporting from - // the clients. - now = time.Now() + // switch to the "grpclb" balancer, which returns a single backend address. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) - if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) - } - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { t.Fatal(err) } // Push a resolver update containing a non-existent grpclb server address. // This should not lead to a balancer switch. - now = time.Now() - const nonExistentServer = "non-existent-grpclb-server-address" - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: nonExistentServer, Type: resolver.GRPCLB}}}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - wantDesc := fmt.Sprintf("Channel switches to new LB policy %q", nonExistentServer) - if err := checkForTraceEvent(sCtx, wantDesc, now); err == nil { - t.Fatal("channel switched balancers when expected not to") + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "nonExistentServer", Type: resolver.GRPCLB}}}) + if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { + t.Fatal(err) } // Switch to "pick_first" again by sending no grpclb server addresses. - now = time.Now() - r.UpdateState(resolver.State{Addresses: addrs}) - if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) - } - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + r.UpdateState(resolver.State{Addresses: addrs[1:]}) + if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { t.Fatal(err) } } @@ -327,47 +269,27 @@ func (s) TestBalancerSwitch_RoundRobinToGRPCLB(t *testing.T) { scpr := parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`) // Push a resolver update with the service config specifying "round_robin". - now := time.Now() - r.UpdateState(resolver.State{ - Addresses: addrs, - ServiceConfig: scpr, - }) + r.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: scpr}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkForTraceEvent(ctx, wantRoundRobinTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantRoundRobinTraceDesc, err) - } - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { t.Fatal(err) } // Push a resolver update with no service config and a single address pointing // to the grpclb server we created above. This will cause the channel to - // switch to the "grpclb" balancer, and will equally distribute RPCs across - // the backends as the fake grpclb server does not support load reporting from - // the clients. - now = time.Now() + // switch to the "grpclb" balancer, which returns a single backend address. r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}, ServiceConfig: scpr, }) - if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) - } - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { t.Fatal(err) } // Switch back to "round_robin". - now = time.Now() - r.UpdateState(resolver.State{ - Addresses: addrs, - ServiceConfig: scpr, - }) - if err := checkForTraceEvent(ctx, wantRoundRobinTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantRoundRobinTraceDesc, err) - } - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + r.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: scpr}) + if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { t.Fatal(err) } } @@ -401,16 +323,12 @@ func (s) TestBalancerSwitch_grpclbNotRegistered(t *testing.T) { // list fo pick_first. grpclbAddr := []resolver.Address{{Addr: "non-existent-grpclb-server-address", Type: resolver.GRPCLB}} addrs = append(grpclbAddr, addrs...) - now := time.Now() r.UpdateState(resolver.State{Addresses: addrs}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { t.Fatal(err) } - if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) - } // Push a resolver update with the same addresses, but with a service config // specifying "round_robin". The ClientConn is expected to filter out the @@ -443,29 +361,21 @@ func (s) TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy(t *testing // Push a resolver update containing no grpclb server address. This should // lead to the channel using the default LB policy which is pick_first. - now := time.Now() - r.UpdateState(resolver.State{Addresses: addrs}) + r.UpdateState(resolver.State{Addresses: addrs[1:]}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkForTraceEvent(ctx, wantPickFirstTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantPickFirstTraceDesc, err) - } - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { t.Fatal(err) } // Push a resolver update with no service config. The addresses list contains // the stub backend addresses and a single address pointing to the grpclb // server we created above. This will cause the channel to switch to the - // "grpclb" balancer, and will equally distribute RPCs across the backends. - now = time.Now() + // "grpclb" balancer, which returns a single backend address. r.UpdateState(resolver.State{ - Addresses: append(addrs, resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), + Addresses: append(addrs[1:], resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), }) - if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) - } - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { t.Fatal(err) } @@ -474,29 +384,19 @@ func (s) TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy(t *testing // contains an address of type "grpclb". This should be preferred and hence // there should be no balancer switch. scpr := parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`) - now = time.Now() r.UpdateState(resolver.State{ - Addresses: append(addrs, resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), + Addresses: append(addrs[1:], resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), ServiceConfig: scpr, }) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if err := checkForTraceEvent(sCtx, wantRoundRobinTraceDesc, now); err == nil { - t.Fatal("channel switched balancers when expected not to") - } - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { t.Fatal(err) } // Switch to "round_robin" by removing the address of type "grpclb". - now = time.Now() - r.UpdateState(resolver.State{Addresses: addrs}) - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + r.UpdateState(resolver.State{Addresses: addrs[1:]}) + if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { t.Fatal(err) } - if err := checkForTraceEvent(ctx, wantRoundRobinTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantRoundRobinTraceDesc, err) - } } // TestBalancerSwitch_LoadBalancingConfigTrumps verifies that the @@ -518,31 +418,21 @@ func (s) TestBalancerSwitch_LoadBalancingConfigTrumps(t *testing.T) { // Push a resolver update with no service config and a single address pointing // to the grpclb server we created above. This will cause the channel to - // switch to the "grpclb" balancer, and will equally distribute RPCs across - // the backends as the fake grpclb server does not support load reporting from - // the clients. - now := time.Now() + // switch to the "grpclb" balancer, which returns a single backend address. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { t.Fatal(err) } - if err := checkForTraceEvent(ctx, wantGRPCLBTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantGRPCLBTraceDesc, err) - } // Push a resolver update with the service config specifying "round_robin" // through the recommended `loadBalancingConfig` field. - now = time.Now() r.UpdateState(resolver.State{ - Addresses: addrs, + Addresses: addrs[1:], ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), }) - if err := checkForTraceEvent(ctx, wantRoundRobinTraceDesc, now); err != nil { - t.Fatalf("timeout when waiting for a trace event: %s, err: %v", wantRoundRobinTraceDesc, err) - } - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { t.Fatal(err) } @@ -553,14 +443,8 @@ func (s) TestBalancerSwitch_LoadBalancingConfigTrumps(t *testing.T) { // switched. And because the `loadBalancingConfig` field trumps everything // else, the address of type "grpclb" should be ignored. grpclbAddr := resolver.Address{Addr: "non-existent-grpclb-server-address", Type: resolver.GRPCLB} - now = time.Now() - r.UpdateState(resolver.State{Addresses: append(addrs, grpclbAddr)}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if err := checkForTraceEvent(sCtx, wantRoundRobinTraceDesc, now); err == nil { - t.Fatal("channel switched balancers when expected not to") - } - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + r.UpdateState(resolver.State{Addresses: append(addrs[1:], grpclbAddr)}) + if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { t.Fatal(err) } } @@ -634,3 +518,88 @@ func (s) TestBalancerSwitch_OldBalancerCallsRemoveSubConnInClose(t *testing.T) { case <-done: } } + +// TestBalancerSwitch_Graceful tests the graceful switching of LB policies. It +// starts off by configuring "round_robin" on the channel and ensures that RPCs +// are successful. Then, it switches to a stub balancer which does not report a +// picker until instructed by the test do to so. At this point, the test +// verifies that RPCs are still successful using the old balancer. Then the test +// asks the new balancer to report a healthy picker and the test verifies that +// the RPCs get routed using the picker reported by the new balancer. +func (s) TestBalancerSwitch_Graceful(t *testing.T) { + backends, cleanup := startBackendsForBalancerSwitch(t) + defer cleanup() + addrs := stubBackendsToResolverAddrs(backends) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update with the service config specifying "round_robin". + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + r.UpdateState(resolver.State{ + Addresses: addrs[1:], + ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), + }) + if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + t.Fatal(err) + } + + // Register a stub balancer which uses a "pick_first" balancer underneath and + // signals on a channel when it receives ClientConn updates. But it does not + // forward the ccUpdate to the underlying "pick_first" balancer until the test + // asks it to do so. This allows us to test the graceful switch functionality. + // Until the test asks the stub balancer to forward the ccUpdate, RPCs should + // get routed to the old balancer. And once the test gives the go ahead, RPCs + // should get routed to the new balancer. + ccUpdateCh := make(chan struct{}) + waitToProceed := make(chan struct{}) + stub.Register(t.Name(), stub.BalancerFuncs{ + Init: func(bd *stub.BalancerData) { + pf := balancer.Get(grpc.PickFirstBalancerName) + bd.Data = pf.Build(bd.ClientConn, bd.BuildOptions) + }, + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + bal := bd.Data.(balancer.Balancer) + close(ccUpdateCh) + go func() { + <-waitToProceed + bal.UpdateClientConnState(ccs) + }() + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + bal := bd.Data.(balancer.Balancer) + bal.UpdateSubConnState(sc, state) + }, + }) + + // Push a resolver update with the service config specifying our stub + // balancer. We should see a trace event for this balancer switch. But RPCs + // should still be routed to the old balancer since our stub balancer does not + // report a ready picker until we ask it to do so. + r.UpdateState(resolver.State{ + Addresses: addrs[:1], + ServiceConfig: r.CC.ParseServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%v": {}}]}`, t.Name())), + }) + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for a ClientConnState update on the new balancer") + case <-ccUpdateCh: + } + if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + t.Fatal(err) + } + + // Ask our stub balancer to forward the earlier received ccUpdate to the + // underlying "pick_first" balancer which will result in a healthy picker + // being reported to the channel. RPCs should start using the new balancer. + close(waitToProceed) + if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + t.Fatal(err) + } +} diff --git a/test/resolver_update_test.go b/test/resolver_update_test.go index bf7f0d2c2ed5..6b568a227aa4 100644 --- a/test/resolver_update_test.go +++ b/test/resolver_update_test.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" @@ -127,54 +128,6 @@ func (s) TestResolverUpdate_InvalidServiceConfigAsFirstUpdate(t *testing.T) { } } -// The wrappingBalancer wraps a pick_first balancer and writes to a channel when -// it receives a ClientConn update. This is different to a stub balancer which -// only notifies of updates from grpc, but does not contain a real balancer. -// -// The wrappingBalancer allows us to write tests with a real backend and make -// real RPCs. -type wrappingBalancerBuilder struct { - name string - updateCh *testutils.Channel -} - -func (bb wrappingBalancerBuilder) Name() string { return bb.name } - -func (bb wrappingBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - pf := balancer.Get(grpc.PickFirstBalancerName) - b := &wrappingBalancer{ - Balancer: pf.Build(cc, opts), - updateCh: bb.updateCh, - } - return b -} - -func (bb wrappingBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfg := &wrappingBalancerConfig{} - if err := json.Unmarshal(c, cfg); err != nil { - return nil, err - } - return cfg, nil -} - -type wrappingBalancer struct { - balancer.Balancer - updateCh *testutils.Channel -} - -func (b *wrappingBalancer) UpdateClientConnState(c balancer.ClientConnState) error { - if _, ok := c.BalancerConfig.(*wrappingBalancerConfig); !ok { - return fmt.Errorf("received balancer config of unsupported type %T", c.BalancerConfig) - } - b.updateCh.Send(c) - return b.Balancer.UpdateClientConnState(c) -} - -type wrappingBalancerConfig struct { - serviceconfig.LoadBalancingConfig - Config string `json:"config,omitempty"` -} - func verifyClientConnStateUpdate(got, want balancer.ClientConnState) error { if got, want := got.ResolverState.Addresses, want.ResolverState.Addresses; !cmp.Equal(got, want) { return fmt.Errorf("update got unexpected addresses: %v, want %v", got, want) @@ -193,11 +146,38 @@ func verifyClientConnStateUpdate(got, want balancer.ClientConnState) error { // having sent a good update. This should result in the ClientConn discarding // the new invalid service config, and continuing to use the old good config. func (s) TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate(t *testing.T) { - // Register a wrapper balancer to get notified of ClientConn updates. - ccsCh := testutils.NewChannel() - balancer.Register(wrappingBalancerBuilder{ - name: t.Name(), - updateCh: ccsCh, + type wrappingBalancerConfig struct { + serviceconfig.LoadBalancingConfig + Config string `json:"config,omitempty"` + } + + // Register a stub balancer which uses a "pick_first" balancer underneath and + // signals on a channel when it receives ClientConn updates. + ccUpdateCh := testutils.NewChannel() + stub.Register(t.Name(), stub.BalancerFuncs{ + Init: func(bd *stub.BalancerData) { + pf := balancer.Get(grpc.PickFirstBalancerName) + bd.Data = pf.Build(bd.ClientConn, bd.BuildOptions) + }, + ParseConfig: func(lbCfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &wrappingBalancerConfig{} + if err := json.Unmarshal(lbCfg, cfg); err != nil { + return nil, err + } + return cfg, nil + }, + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + if _, ok := ccs.BalancerConfig.(*wrappingBalancerConfig); !ok { + return fmt.Errorf("received balancer config of unsupported type %T", ccs.BalancerConfig) + } + bal := bd.Data.(balancer.Balancer) + ccUpdateCh.Send(ccs) + return bal.UpdateClientConnState(ccs) + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + bal := bd.Data.(balancer.Balancer) + bal.UpdateSubConnState(sc, state) + }, }) // Start a backend exposing the test service. @@ -242,7 +222,7 @@ func (s) TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate(t *testing.T) { }, BalancerConfig: &wrappingBalancerConfig{Config: lbCfg}, } - ccs, err := ccsCh.Receive(ctx) + ccs, err := ccUpdateCh.Receive(ctx) if err != nil { t.Fatalf("Timeout when waiting for ClientConnState update from grpc") } @@ -263,7 +243,7 @@ func (s) TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate(t *testing.T) { badSC := r.CC.ParseServiceConfig("bad json service config") wantCCS.ResolverState.ServiceConfig = badSC r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: badSC}) - ccs, err = ccsCh.Receive(ctx) + ccs, err = ccUpdateCh.Receive(ctx) if err != nil { t.Fatalf("Timeout when waiting for ClientConnState update from grpc") } diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go index 7f16aa2cb3ce..3724743ec7cf 100644 --- a/test/roundrobin_test.go +++ b/test/roundrobin_test.go @@ -45,7 +45,6 @@ const rrServiceConfig = `{"loadBalancingConfig": [{"round_robin":{}}]}` func checkRoundRobin(ctx context.Context, cc *grpc.ClientConn, addrs []resolver.Address) error { client := testgrpc.NewTestServiceClient(cc) - var peer peer.Peer // Make sure connections to all backends are up. backendCount := len(addrs) for i := 0; i < backendCount; i++ { @@ -54,6 +53,7 @@ func checkRoundRobin(ctx context.Context, cc *grpc.ClientConn, addrs []resolver. if ctx.Err() != nil { return fmt.Errorf("timeout waiting for connection to %q to be up", addrs[i].Addr) } + var peer peer.Peer if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { // Some tests remove backends and check if round robin is happening // across the remaining backends. In such cases, RPCs can initially fail @@ -69,10 +69,11 @@ func checkRoundRobin(ctx context.Context, cc *grpc.ClientConn, addrs []resolver. } // Make sure RPCs are sent to all backends. for i := 0; i < 3*backendCount; i++ { + var peer peer.Peer if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { return fmt.Errorf("EmptyCall() = %v, want ", err) } - if gotPeer, wantPeer := addrs[i%backendCount].Addr, peer.Addr.String(); gotPeer != wantPeer { + if gotPeer, wantPeer := peer.Addr.String(), addrs[i%backendCount].Addr; gotPeer != wantPeer { return fmt.Errorf("rpc sent to peer %q, want peer %q", gotPeer, wantPeer) } } From e583b196ce4717fdc024dd9432f8e4b229cc76e7 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 4 Apr 2022 17:38:37 -0400 Subject: [PATCH 481/998] xds: Add RLS in xDS e2e test (#5281) --- balancer/rls/balancer_test.go | 85 +++++------ balancer/rls/control_channel_test.go | 22 +-- balancer/rls/helpers_test.go | 71 --------- balancer/rls/picker_test.go | 50 +++---- .../testutils/rls/fake_rls_server.go | 28 +++- internal/testutils/wrappers.go | 74 ++++++++++ .../clusterspecifier/cluster_specifier.go | 5 + xds/internal/clusterspecifier/rls/rls.go | 22 +++ xds/internal/httpfilter/rbac/rbac.go | 17 ++- .../test/xds_client_integration_test.go | 136 ++++++++++++++++++ 10 files changed, 355 insertions(+), 155 deletions(-) rename balancer/rls/internal/test/e2e/rls_fakeserver.go => internal/testutils/rls/fake_rls_server.go (77%) create mode 100644 internal/testutils/wrappers.go diff --git a/balancer/rls/balancer_test.go b/balancer/rls/balancer_test.go index 45d95b2ddeca..1c2f9fca32a5 100644 --- a/balancer/rls/balancer_test.go +++ b/balancer/rls/balancer_test.go @@ -36,6 +36,7 @@ import ( rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" + rlstest "google.golang.org/grpc/internal/testutils/rls" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -48,10 +49,10 @@ import ( // and the old one is closed. func (s) TestConfigUpdate_ControlChannel(t *testing.T) { // Start two RLS servers. - lis1 := newListenerWrapper(t, nil) - rlsServer1, rlsReqCh1 := setupFakeRLSServer(t, lis1) - lis2 := newListenerWrapper(t, nil) - rlsServer2, rlsReqCh2 := setupFakeRLSServer(t, lis2) + lis1 := testutils.NewListenerWrapper(t, nil) + rlsServer1, rlsReqCh1 := rlstest.SetupFakeRLSServer(t, lis1) + lis2 := testutils.NewListenerWrapper(t, nil) + rlsServer2, rlsReqCh2 := rlstest.SetupFakeRLSServer(t, lis2) // Build RLS service config with the RLS server pointing to the first one. // Set a very low value for maxAge to ensure that the entry expires soon. @@ -61,12 +62,12 @@ func (s) TestConfigUpdate_ControlChannel(t *testing.T) { // Start a couple of test backends, and set up the fake RLS servers to return // these as a target in the RLS response. backendCh1, backendAddress1 := startBackend(t) - rlsServer1.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress1}}} + rlsServer1.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress1}}} }) backendCh2, backendAddress2 := startBackend(t) - rlsServer2.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress2}}} + rlsServer2.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress2}}} }) // Register a manual resolver and push the RLS service config through it. @@ -84,11 +85,11 @@ func (s) TestConfigUpdate_ControlChannel(t *testing.T) { makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh1) // Ensure a connection is established to the first RLS server. - val, err := lis1.newConnCh.Receive(ctx) + val, err := lis1.NewConnCh.Receive(ctx) if err != nil { t.Fatal("Timeout expired when waiting for LB policy to create control channel") } - conn1 := val.(*connWrapper) + conn1 := val.(*testutils.ConnWrapper) // Make sure an RLS request is sent out. verifyRLSRequest(t, rlsReqCh1, true) @@ -105,12 +106,12 @@ func (s) TestConfigUpdate_ControlChannel(t *testing.T) { r.UpdateState(resolver.State{ServiceConfig: sc}) // Ensure a connection is established to the second RLS server. - if _, err := lis2.newConnCh.Receive(ctx); err != nil { + if _, err := lis2.NewConnCh.Receive(ctx); err != nil { t.Fatal("Timeout expired when waiting for LB policy to create control channel") } // Ensure the connection to the old one is closed. - if _, err := conn1.closeCh.Receive(ctx); err != nil { + if _, err := conn1.CloseCh.Receive(ctx); err != nil { t.Fatal("Timeout expired when waiting for LB policy to close control channel") } @@ -136,8 +137,8 @@ func (s) TestConfigUpdate_ControlChannelWithCreds(t *testing.T) { } // Start an RLS server with the wrapped listener and credentials. - lis := newListenerWrapper(t, nil) - rlsServer, rlsReqCh := setupFakeRLSServer(t, lis, grpc.Creds(serverCreds)) + lis := testutils.NewListenerWrapper(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, lis, grpc.Creds(serverCreds)) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Build RLS service config. @@ -147,8 +148,8 @@ func (s) TestConfigUpdate_ControlChannelWithCreds(t *testing.T) { // and set up the fake RLS server to return this as the target in the RLS // response. backendCh, backendAddress := startBackend(t, grpc.Creds(serverCreds)) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} }) // Register a manual resolver and push the RLS service config through it. @@ -173,7 +174,7 @@ func (s) TestConfigUpdate_ControlChannelWithCreds(t *testing.T) { verifyRLSRequest(t, rlsReqCh, true) // Ensure a connection is established to the first RLS server. - if _, err := lis.newConnCh.Receive(ctx); err != nil { + if _, err := lis.NewConnCh.Receive(ctx); err != nil { t.Fatal("Timeout expired when waiting for LB policy to create control channel") } } @@ -184,7 +185,7 @@ func (s) TestConfigUpdate_ControlChannelWithCreds(t *testing.T) { // provided service config is applied for the control channel. func (s) TestConfigUpdate_ControlChannelServiceConfig(t *testing.T) { // Start an RLS server and set the throttler to never throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Register a balancer to be used for the control channel, and set up a @@ -211,8 +212,8 @@ func (s) TestConfigUpdate_ControlChannelServiceConfig(t *testing.T) { // Start a test backend, and set up the fake RLS server to return this as a // target in the RLS response. backendCh, backendAddress := startBackend(t) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} }) // Register a manual resolver and push the RLS service config through it. @@ -244,7 +245,7 @@ func (s) TestConfigUpdate_ControlChannelServiceConfig(t *testing.T) { // target after the config has been applied. func (s) TestConfigUpdate_DefaultTarget(t *testing.T) { // Start an RLS server and set the throttler to always throttle requests. - rlsServer, _ := setupFakeRLSServer(t, nil) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) // Build RLS service config with a default target. @@ -284,7 +285,7 @@ func (s) TestConfigUpdate_DefaultTarget(t *testing.T) { // child policy configuration are propagated correctly. func (s) TestConfigUpdate_ChildPolicyConfigs(t *testing.T) { // Start an RLS server and set the throttler to never throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Start a default backend and a test backend. @@ -292,8 +293,8 @@ func (s) TestConfigUpdate_ChildPolicyConfigs(t *testing.T) { testBackendCh, testBackendAddress := startBackend(t) // Set up the RLS server to respond with the test backend. - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} }) // Set up a test balancer callback to push configs received by child policies. @@ -411,7 +412,7 @@ func (s) TestConfigUpdate_ChildPolicyConfigs(t *testing.T) { // handled by closing the old balancer and creating a new one. func (s) TestConfigUpdate_ChildPolicyChange(t *testing.T) { // Start an RLS server and set the throttler to never throttle requests. - rlsServer, _ := setupFakeRLSServer(t, nil) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Set up balancer callbacks. @@ -507,14 +508,14 @@ func (s) TestConfigUpdate_ChildPolicyChange(t *testing.T) { // the caller of the RPC. func (s) TestConfigUpdate_BadChildPolicyConfigs(t *testing.T) { // Start an RLS server and set the throttler to never throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Set up the RLS server to respond with a bad target field which is expected // to cause the child policy's ParseTarget to fail and should result in the LB // policy creating a lame child policy wrapper. - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{e2e.RLSChildPolicyBadTarget}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{e2e.RLSChildPolicyBadTarget}}} }) // Build RLS service config with a default target. This default backend is @@ -567,7 +568,7 @@ func (s) TestConfigUpdate_DataCacheSizeDecrease(t *testing.T) { defer func() { minEvictDuration = origMinEvictDuration }() // Start an RLS server and set the throttler to never throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Register an LB policy to act as the child policy for RLS LB policy. @@ -582,14 +583,14 @@ func (s) TestConfigUpdate_DataCacheSizeDecrease(t *testing.T) { // these as targets in the RLS response, based on request keys. backendCh1, backendAddress1 := startBackend(t) backendCh2, backendAddress2 := startBackend(t) - rlsServer.SetResponseCallback(func(ctx context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + rlsServer.SetResponseCallback(func(ctx context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { if req.KeyMap["k1"] == "v1" { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress1}}} + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress1}}} } if req.KeyMap["k2"] == "v2" { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress2}}} + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress2}}} } - return &e2e.RouteLookupResponse{Err: errors.New("no keys in request metadata")} + return &rlstest.RouteLookupResponse{Err: errors.New("no keys in request metadata")} }) // Register a manual resolver and push the RLS service config through it. @@ -661,7 +662,7 @@ func (s) TestDataCachePurging(t *testing.T) { defer func() { dataCachePurgeHook = origDataCachePurgeHook }() // Start an RLS server and set the throttler to never throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Register an LB policy to act as the child policy for RLS LB policy. @@ -678,8 +679,8 @@ func (s) TestDataCachePurging(t *testing.T) { // Start a test backend, and set up the fake RLS server to return this as a // target in the RLS response. backendCh, backendAddress := startBackend(t) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} }) // Register a manual resolver and push the RLS service config through it. @@ -740,7 +741,7 @@ func (s) TestControlChannelConnectivityStateMonitoring(t *testing.T) { // Start an RLS server with the restartable listener and set the throttler to // never throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, lis) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, lis) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Override the reset backoff hook to get notified. @@ -769,8 +770,8 @@ func (s) TestControlChannelConnectivityStateMonitoring(t *testing.T) { // Start a test backend, and set up the fake RLS server to return this as a // target in the RLS response. backendCh, backendAddress := startBackend(t) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} }) // Register a manual resolver and push the RLS service config through it. @@ -818,7 +819,11 @@ func (s) TestControlChannelConnectivityStateMonitoring(t *testing.T) { ctxOutgoing := metadata.AppendToOutgoingContext(ctx, "n1", "v1") makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) - <-resetBackoffDone + select { + case <-ctx.Done(): + t.Fatalf("Timed out waiting for resetBackoffDone") + case <-resetBackoffDone: + } // The fact that the above RPC succeeded indicates that the control channel // has moved back to READY. The connectivity state monitoring code should have diff --git a/balancer/rls/control_channel_test.go b/balancer/rls/control_channel_test.go index 7ecdc6bec5fd..ccba8e36f976 100644 --- a/balancer/rls/control_channel_test.go +++ b/balancer/rls/control_channel_test.go @@ -32,11 +32,11 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/rls/internal/test/e2e" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlstest "google.golang.org/grpc/internal/testutils/rls" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/testdata" @@ -47,7 +47,7 @@ import ( // indicates that the control channel needs to be throttled. func (s) TestControlChannelThrottled(t *testing.T) { // Start an RLS server and set the throttler to always throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) // Create a control channel to the fake RLS server. @@ -70,12 +70,12 @@ func (s) TestControlChannelThrottled(t *testing.T) { // TestLookupFailure tests the case where the RLS server responds with an error. func (s) TestLookupFailure(t *testing.T) { // Start an RLS server and set the throttler to never throttle requests. - rlsServer, _ := setupFakeRLSServer(t, nil) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Setup the RLS server to respond with errors. - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Err: errors.New("rls failure")} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Err: errors.New("rls failure")} }) // Create a control channel to the fake RLS server. @@ -114,7 +114,7 @@ func (s) TestLookupDeadlineExceeded(t *testing.T) { } // Start an RLS server and set the throttler to never throttle. - rlsServer, _ := setupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Create a control channel with a small deadline. @@ -246,7 +246,7 @@ var ( Reason: rlspb.RouteLookupRequest_REASON_MISS, StaleHeaderData: staleHeaderData, } - lookupResponse = &e2e.RouteLookupResponse{ + lookupResponse = &rlstest.RouteLookupResponse{ Resp: &rlspb.RouteLookupResponse{ Targets: wantTargets, HeaderData: wantHeaderData, @@ -256,11 +256,11 @@ var ( func testControlChannelCredsSuccess(t *testing.T, sopts []grpc.ServerOption, bopts balancer.BuildOptions) { // Start an RLS server and set the throttler to never throttle requests. - rlsServer, _ := setupFakeRLSServer(t, nil, sopts...) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, sopts...) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Setup the RLS server to respond with a valid response. - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { return lookupResponse }) @@ -356,7 +356,7 @@ func testControlChannelCredsFailure(t *testing.T, sopts []grpc.ServerOption, bop // Start an RLS server and set the throttler to never throttle requests. The // creds failures happen before the RPC handler on the server is invoked. // So, there is need to setup the request and responses on the fake server. - rlsServer, _ := setupFakeRLSServer(t, nil, sopts...) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, sopts...) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Create the control channel to the fake server. @@ -454,7 +454,7 @@ func (*unsupportedCredsBundle) NewWithMode(mode string) (credentials.Bundle, err // TestNewControlChannelUnsupportedCredsBundle tests the case where the control // channel is configured with a bundle which does not support the mode we use. func (s) TestNewControlChannelUnsupportedCredsBundle(t *testing.T) { - rlsServer, _ := setupFakeRLSServer(t, nil) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) // Create the control channel to the fake server. ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, balancer.BuildOptions{CredsBundle: &unsupportedCredsBundle{}}, nil) diff --git a/balancer/rls/helpers_test.go b/balancer/rls/helpers_test.go index 26123f8ce855..5fca54a63ace 100644 --- a/balancer/rls/helpers_test.go +++ b/balancer/rls/helpers_test.go @@ -20,7 +20,6 @@ package rls import ( "context" - "net" "strings" "sync" "testing" @@ -35,7 +34,6 @@ import ( rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/stubserver" - "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" @@ -62,52 +60,6 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -// connWrapper wraps a net.Conn and pushes on a channel when closed. -type connWrapper struct { - net.Conn - closeCh *testutils.Channel -} - -func (cw *connWrapper) Close() error { - err := cw.Conn.Close() - cw.closeCh.Replace(nil) - return err -} - -// listenerWrapper wraps a net.Listener and the returned net.Conn. -// -// It pushes on a channel whenever it accepts a new connection. -type listenerWrapper struct { - net.Listener - newConnCh *testutils.Channel -} - -func (l *listenerWrapper) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return nil, err - } - closeCh := testutils.NewChannel() - conn := &connWrapper{Conn: c, closeCh: closeCh} - l.newConnCh.Send(conn) - return conn, nil -} - -func newListenerWrapper(t *testing.T, lis net.Listener) *listenerWrapper { - if lis == nil { - var err error - lis, err = testutils.LocalTCPListener() - if err != nil { - t.Fatal(err) - } - } - - return &listenerWrapper{ - Listener: lis, - newConnCh: testutils.NewChannel(), - } -} - // fakeBackoffStrategy is a fake implementation of the backoff.Strategy // interface, for tests to inject the backoff duration. type fakeBackoffStrategy struct { @@ -173,29 +125,6 @@ func overrideAdaptiveThrottler(t *testing.T, f *fakeThrottler) { t.Cleanup(func() { newAdaptiveThrottler = origAdaptiveThrottler }) } -// setupFakeRLSServer starts and returns a fake RouteLookupService server -// listening on the given listener or on a random local port. Also returns a -// channel for tests to get notified whenever the RouteLookup RPC is invoked on -// the fake server. -// -// This function sets up the fake server to respond with an empty response for -// the RouteLookup RPCs. Tests can override this by calling the -// SetResponseCallback() method on the returned fake server. -func setupFakeRLSServer(t *testing.T, lis net.Listener, opts ...grpc.ServerOption) (*e2e.FakeRouteLookupServer, chan struct{}) { - s, cancel := e2e.StartFakeRouteLookupServer(t, lis, opts...) - t.Logf("Started fake RLS server at %q", s.Address) - - ch := make(chan struct{}, 1) - s.SetRequestCallback(func(request *rlspb.RouteLookupRequest) { - select { - case ch <- struct{}{}: - default: - } - }) - t.Cleanup(cancel) - return s, ch -} - // buildBasicRLSConfig constructs a basic service config for the RLS LB policy // with header matching rules. This expects the passed child policy name to // have been registered by the caller. diff --git a/balancer/rls/picker_test.go b/balancer/rls/picker_test.go index cfe45477f0bc..a52aaa5e563d 100644 --- a/balancer/rls/picker_test.go +++ b/balancer/rls/picker_test.go @@ -25,10 +25,10 @@ import ( "time" "google.golang.org/grpc" - "google.golang.org/grpc/balancer/rls/internal/test/e2e" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlstest "google.golang.org/grpc/internal/testutils/rls" "google.golang.org/protobuf/types/known/durationpb" ) @@ -36,7 +36,7 @@ import ( // and no pending request either, and the ensuing RLS request is throttled. func (s) TestPick_DataCacheMiss_NoPendingEntry_ThrottledWithDefaultTarget(t *testing.T) { // Start an RLS server and set the throttler to always throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) // Build RLS service config with a default target. @@ -68,7 +68,7 @@ func (s) TestPick_DataCacheMiss_NoPendingEntry_ThrottledWithDefaultTarget(t *tes // expected to fail with an RLS throttled error. func (s) TestPick_DataCacheMiss_NoPendingEntry_ThrottledWithoutDefaultTarget(t *testing.T) { // Start an RLS server and set the throttler to always throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) // Build an RLS config without a default target. @@ -99,7 +99,7 @@ func (s) TestPick_DataCacheMiss_NoPendingEntry_ThrottledWithoutDefaultTarget(t * // deadline exceeded error. func (s) TestPick_DataCacheMiss_NoPendingEntry_NotThrottled(t *testing.T) { // Start an RLS server and set the throttler to never throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Build an RLS config without a default target. @@ -158,7 +158,7 @@ func (s) TestPick_DataCacheMiss_PendingEntryExists(t *testing.T) { } // Start an RLS server and set the throttler to never throttle. - rlsServer, _ := setupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Build RLS service config with an optional default target. @@ -203,7 +203,7 @@ func (s) TestPick_DataCacheMiss_PendingEntryExists(t *testing.T) { // delegated to the child policy. func (s) TestPick_DataCacheHit_NoPendingEntry_ValidEntry(t *testing.T) { // Start an RLS server and set the throttler to never throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Build the RLS config without a default target. @@ -212,8 +212,8 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ValidEntry(t *testing.T) { // Start a test backend, and setup the fake RLS server to return this as a // target in the RLS response. testBackendCh, testBackendAddress := startBackend(t) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} }) // Register a manual resolver and push the RLS service config through it. @@ -264,7 +264,7 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_StaleEntry(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { // Start an RLS server and setup the throttler appropriately. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) var throttler *fakeThrottler if test.throttled { throttler = oneTimeAllowingThrottler() @@ -283,8 +283,8 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_StaleEntry(t *testing.T) { // Start a test backend, and setup the fake RLS server to return // this as a target in the RLS response. testBackendCh, testBackendAddress := startBackend(t) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} }) // Register a manual resolver and push the RLS service config @@ -364,7 +364,7 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntry(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { // Start an RLS server and setup the throttler appropriately. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) var throttler *fakeThrottler if test.throttled { throttler = oneTimeAllowingThrottler() @@ -390,8 +390,8 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntry(t *testing.T) { // Start a test backend, and setup the fake RLS server to return // this as a target in the RLS response. testBackendCh, testBackendAddress := startBackend(t) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} }) // Register a manual resolver and push the RLS service config @@ -462,7 +462,7 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntryInBackoff(t *testing.T for _, test := range tests { t.Run(test.name, func(t *testing.T) { // Start an RLS server and set the throttler to never throttle requests. - rlsServer, rlsReqCh := setupFakeRLSServer(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Override the backoff strategy to return a large backoff which @@ -488,8 +488,8 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntryInBackoff(t *testing.T // Start a test backend, and set up the fake RLS server to return this as // a target in the RLS response. testBackendCh, testBackendAddress := startBackend(t) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} }) // Register a manual resolver and push the RLS service config through it. @@ -513,8 +513,8 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntryInBackoff(t *testing.T // Set up the fake RLS server to return errors. This will push the cache // entry into backoff. var rlsLastErr = errors.New("last RLS request failed") - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Err: rlsLastErr} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Err: rlsLastErr} }) // Since the RLS server is now configured to return errors, this will push @@ -567,7 +567,7 @@ func (s) TestPick_DataCacheHit_PendingEntryExists_StaleEntry(t *testing.T) { } // Start an RLS server and set the throttler to never throttle. - rlsServer, _ := setupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Build RLS service config with an optional default target. @@ -584,8 +584,8 @@ func (s) TestPick_DataCacheHit_PendingEntryExists_StaleEntry(t *testing.T) { // Start a test backend, and setup the fake RLS server to return // this as a target in the RLS response. testBackendCh, testBackendAddress := startBackend(t) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} }) // Register a manual resolver and push the RLS service config @@ -662,7 +662,7 @@ func (s) TestPick_DataCacheHit_PendingEntryExists_ExpiredEntry(t *testing.T) { } // Start an RLS server and set the throttler to never throttle. - rlsServer, _ := setupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) overrideAdaptiveThrottler(t, neverThrottlingThrottler()) // Build RLS service config with an optional default target. @@ -677,8 +677,8 @@ func (s) TestPick_DataCacheHit_PendingEntryExists_ExpiredEntry(t *testing.T) { // Start a test backend, and setup the fake RLS server to return // this as a target in the RLS response. testBackendCh, testBackendAddress := startBackend(t) - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *e2e.RouteLookupResponse { - return &e2e.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} }) // Register a manual resolver and push the RLS service config diff --git a/balancer/rls/internal/test/e2e/rls_fakeserver.go b/internal/testutils/rls/fake_rls_server.go similarity index 77% rename from balancer/rls/internal/test/e2e/rls_fakeserver.go rename to internal/testutils/rls/fake_rls_server.go index 521985412822..e64c9de3ae7f 100644 --- a/balancer/rls/internal/test/e2e/rls_fakeserver.go +++ b/internal/testutils/rls/fake_rls_server.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,8 @@ * */ -package e2e +// Package rls contains utilities for RouteLookupService e2e tests. +package rls import ( "context" @@ -39,6 +40,29 @@ type RouteLookupResponse struct { Err error } +// SetupFakeRLSServer starts and returns a fake RouteLookupService server +// listening on the given listener or on a random local port. Also returns a +// channel for tests to get notified whenever the RouteLookup RPC is invoked on +// the fake server. +// +// This function sets up the fake server to respond with an empty response for +// the RouteLookup RPCs. Tests can override this by calling the +// SetResponseCallback() method on the returned fake server. +func SetupFakeRLSServer(t *testing.T, lis net.Listener, opts ...grpc.ServerOption) (*FakeRouteLookupServer, chan struct{}) { + s, cancel := StartFakeRouteLookupServer(t, lis, opts...) + t.Logf("Started fake RLS server at %q", s.Address) + + ch := make(chan struct{}, 1) + s.SetRequestCallback(func(request *rlspb.RouteLookupRequest) { + select { + case ch <- struct{}{}: + default: + } + }) + t.Cleanup(cancel) + return s, ch +} + // FakeRouteLookupServer is a fake implementation of the RouteLookupService. // // It is safe for concurrent use. diff --git a/internal/testutils/wrappers.go b/internal/testutils/wrappers.go new file mode 100644 index 000000000000..c9b596d8851c --- /dev/null +++ b/internal/testutils/wrappers.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testutils + +import ( + "net" + "testing" +) + +// ConnWrapper wraps a net.Conn and pushes on a channel when closed. +type ConnWrapper struct { + net.Conn + CloseCh *Channel +} + +// Close closes the connection and sends a value on the close channel. +func (cw *ConnWrapper) Close() error { + err := cw.Conn.Close() + cw.CloseCh.Replace(nil) + return err +} + +// ListenerWrapper wraps a net.Listener and the returned net.Conn. +// +// It pushes on a channel whenever it accepts a new connection. +type ListenerWrapper struct { + net.Listener + NewConnCh *Channel +} + +// Accept wraps the Listener Accept and sends the accepted connection on a +// channel. +func (l *ListenerWrapper) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + closeCh := NewChannel() + conn := &ConnWrapper{Conn: c, CloseCh: closeCh} + l.NewConnCh.Send(conn) + return conn, nil +} + +// NewListenerWrapper returns a ListenerWrapper. +func NewListenerWrapper(t *testing.T, lis net.Listener) *ListenerWrapper { + if lis == nil { + var err error + lis, err = LocalTCPListener() + if err != nil { + t.Fatal(err) + } + } + + return &ListenerWrapper{ + Listener: lis, + NewConnCh: NewChannel(), + } +} diff --git a/xds/internal/clusterspecifier/cluster_specifier.go b/xds/internal/clusterspecifier/cluster_specifier.go index 9bb30f16589f..b95a101116ed 100644 --- a/xds/internal/clusterspecifier/cluster_specifier.go +++ b/xds/internal/clusterspecifier/cluster_specifier.go @@ -65,3 +65,8 @@ func Register(cs ClusterSpecifier) { func Get(typeURL string) ClusterSpecifier { return m[typeURL] } + +// UnregisterForTesting unregisters the ClusterSpecifier for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} diff --git a/xds/internal/clusterspecifier/rls/rls.go b/xds/internal/clusterspecifier/rls/rls.go index 037795834c9b..69fb7f4a9098 100644 --- a/xds/internal/clusterspecifier/rls/rls.go +++ b/xds/internal/clusterspecifier/rls/rls.go @@ -40,6 +40,28 @@ func init() { } } +// RegisterForTesting registers the RLS Cluster Specifier Plugin for testing +// purposes, regardless of the XDSRLS environment variable. This is needed +// because there is no way to set the XDSRLS environment variable to true in a +// test before init() in this package is run. +// +// TODO: Remove this function once the RLS env var is removed. +func RegisterForTesting() { + clusterspecifier.Register(rls{}) +} + +// UnregisterForTesting unregisters the RLS Cluster Specifier Plugin for testing +// purposes. This is needed because there is no way to unregister the RLS +// Cluster Specifier Plugin after registering it solely for testing purposes +// using rls.RegisterForTesting(). +// +// TODO: Remove this function once the RLS env var is removed. +func UnregisterForTesting() { + for _, typeURL := range rls.TypeURLs(rls{}) { + clusterspecifier.UnregisterForTesting(typeURL) + } +} + type rls struct{} func (rls) TypeURLs() []string { diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go index bb85dc80d460..3dc4b56826e6 100644 --- a/xds/internal/httpfilter/rbac/rbac.go +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -43,16 +43,21 @@ func init() { } } -// RegisterForTesting registers the RBAC HTTP Filter for testing purposes, regardless -// of the RBAC environment variable. This is needed because there is no way to set the RBAC -// environment variable to true in a test before init() in this package is run. +// RegisterForTesting registers the RBAC HTTP Filter for testing purposes, +// regardless of the RBAC environment variable. This is needed because there is +// no way to set the RBAC environment variable to true in a test before init() +// in this package is run. +// +// TODO: Remove this function once the RBAC env var is removed. func RegisterForTesting() { httpfilter.Register(builder{}) } -// UnregisterForTesting unregisters the RBAC HTTP Filter for testing purposes. This is needed because -// there is no way to unregister the HTTP Filter after registering it solely for testing purposes using -// rbac.RegisterForTesting() +// UnregisterForTesting unregisters the RBAC HTTP Filter for testing purposes. +// This is needed because there is no way to unregister the HTTP Filter after +// registering it solely for testing purposes using rbac.RegisterForTesting(). +// +// TODO: Remove this function once the RBAC env var is removed. func UnregisterForTesting() { for _, typeURL := range builder.TypeURLs(builder{}) { httpfilter.UnregisterForTesting(typeURL) diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go index 6a9a8c9688f0..e9e3fd584bf3 100644 --- a/xds/internal/test/xds_client_integration_test.go +++ b/xds/internal/test/xds_client_integration_test.go @@ -30,11 +30,23 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" + rlstest "google.golang.org/grpc/internal/testutils/rls" "google.golang.org/grpc/status" + rlscsp "google.golang.org/grpc/xds/internal/clusterspecifier/rls" "google.golang.org/grpc/xds/internal/testutils/e2e" + "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/grpc/balancer/rls" // Register the RLS Load Balancing policy. + _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS Cluster Specifier Plugin. + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" testpb "google.golang.org/grpc/test/grpc_testing" @@ -246,3 +258,127 @@ func (s) TestClientSideRetry(t *testing.T) { }) } } + +// defaultClientResourcesWithRLSCSP returns a set of resources (LDS, RDS, CDS, EDS) for a +// client to connect to a server with a RLS Load Balancer as a child of Cluster Manager. +func defaultClientResourcesWithRLSCSP(params e2e.ResourceParams, rlsProto *rlspb.RouteLookupConfig) e2e.UpdateOptions { + routeConfigName := "route-" + params.DialTarget + clusterName := "cluster-" + params.DialTarget + endpointsName := "endpoints-" + params.DialTarget + return e2e.UpdateOptions{ + NodeID: params.NodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(params.DialTarget, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{defaultRouteConfigWithRLSCSP(routeConfigName, params.DialTarget, rlsProto)}, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, endpointsName, params.SecLevel)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(endpointsName, params.Host, []uint32{params.Port})}, + } +} + +// defaultRouteConfigWithRLSCSP returns a basic xds RouteConfig resource with an +// RLS Cluster Specifier Plugin configured as the route. +func defaultRouteConfigWithRLSCSP(routeName, ldsTarget string, rlsProto *rlspb.RouteLookupConfig) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + ClusterSpecifierPlugins: []*v3routepb.ClusterSpecifierPlugin{ + { + Extension: &v3corepb.TypedExtensionConfig{ + Name: "rls-csp", + TypedConfig: testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ + RouteLookupConfig: rlsProto, + }), + }, + }, + }, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{ClusterSpecifierPlugin: "rls-csp"}, + }}, + }}, + }}, + } +} + +// TestRLSinxDS tests an xDS configured system with a RLS Balancer present. +// This test sets up the RLS Balancer using the RLS Cluster Specifier Plugin, +// spins up a test service and has a fake RLS Server correctly respond with a target +// corresponding to this test service. This test asserts an RPC proceeds as normal +// with the RLS Balancer as part of system. +func (s) TestRLSinxDS(t *testing.T) { + oldRLS := envconfig.XDSRLS + envconfig.XDSRLS = true + rlscsp.RegisterForTesting() + defer func() { + envconfig.XDSRLS = oldRLS + rlscsp.UnregisterForTesting() + }() + + // Set up all components and configuration necessary - management server, + // xDS resolver, fake RLS Server, and xDS configuration which specifies a + // RLS Balancer that communicates to this set up fake RLS Server. + managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) + defer cleanup1() + port, cleanup2 := clientSetup(t, &testService{}) + defer cleanup2() + + lis := testutils.NewListenerWrapper(t, nil) + rlsServer, rlsRequestCh := rlstest.SetupFakeRLSServer(t, lis) + rlsProto := &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{{Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "grpc.testing.TestService"}}}}, + LookupService: rlsServer.Address, + LookupServiceTimeout: durationpb.New(defaultTestTimeout), + CacheSizeBytes: 1024, + } + + const serviceName = "my-service-client-side-xds" + resources := defaultClientResourcesWithRLSCSP(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }, rlsProto) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Configure the fake RLS Server to set the RLS Balancers child CDS + // Cluster's name as the target for the RPC to use. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{"cluster-" + serviceName}}} + }) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + // Successfully sending the RPC will require the RLS Load Balancer to + // communicate with the fake RLS Server for information about the target. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // These RLS Verifications makes sure the RLS Load Balancer is actually part + // of the xDS Configured system that correctly sends out RPC. + + // Verify connection is established to RLS Server. + if _, err = lis.NewConnCh.Receive(ctx); err != nil { + t.Fatal("Timeout when waiting for RLS LB policy to create control channel") + } + + // Verify an rls request is sent out to fake RLS Server. + select { + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for an RLS request to be sent out") + case <-rlsRequestCh: + } +} From 337b815c4150a12d15349cb9be5f22fb391918de Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 5 Apr 2022 15:00:06 -0700 Subject: [PATCH 482/998] interop: build client without timeout; add logs to help debug failures (#5294) --- .github/workflows/testing.yml | 11 ++++++++--- interop/client/client.go | 1 + interop/interop_test.sh | 21 +++++++++++++++------ interop/server/server.go | 1 + 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index cef842601f3b..58bcb160e33c 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -50,9 +50,6 @@ jobs: goversion: 1.18 testflags: -race - - type: extras - goversion: 1.18 - - type: tests goversion: 1.18 goarch: 386 @@ -70,6 +67,9 @@ jobs: - type: tests goversion: 1.15 + - type: extras + goversion: 1.18 + steps: # Setup the environment. - name: Setup GOARCH @@ -114,8 +114,13 @@ jobs: - name: Run extras tests if: matrix.type == 'extras' run: | + export TERM=${TERM:-xterm} go version + echo -e "\n-- Running Examples --" examples/examples_test.sh + echo -e "\n-- Running AdvancedTLS Examples --" security/advancedtls/examples/examples_test.sh + echo -e "\n-- Running Interop Test --" interop/interop_test.sh + echo -e "\n-- Running xDS E2E Test --" xds/internal/test/e2e/run.sh diff --git a/interop/client/client.go b/interop/client/client.go index ba1598db0ee6..a1538b0c6921 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -107,6 +107,7 @@ const ( func main() { flag.Parse() + logger.Infof("Client running with test case %q", *testCase) var useGDC bool // use google default creds var useCEC bool // use compute engine creds if *customCredentialsType != "" { diff --git a/interop/interop_test.sh b/interop/interop_test.sh index 5aeaa2aa10a8..99e12c3c3829 100755 --- a/interop/interop_test.sh +++ b/interop/interop_test.sh @@ -36,13 +36,13 @@ clean () { } fail () { - echo "$(tput setaf 1) $1 $(tput sgr 0)" + echo "$(tput setaf 1) $(date): $1 $(tput sgr 0)" clean exit 1 } pass () { - echo "$(tput setaf 2) $1 $(tput sgr 0)" + echo "$(tput setaf 2) $(date): $1 $(tput sgr 0)" } # Don't run some tests that need a special environment: @@ -73,21 +73,30 @@ CASES=( ) # Build server +echo "$(tput setaf 4) $(date): building server $(tput sgr 0)" if ! go build -o /dev/null ./interop/server; then fail "failed to build server" else pass "successfully built server" fi +# Build client +echo "$(tput setaf 4) $(date): building client $(tput sgr 0)" +if ! go build -o /dev/null ./interop/client; then + fail "failed to build client" +else + pass "successfully built client" +fi + # Start server SERVER_LOG="$(mktemp)" -go run ./interop/server --use_tls &> $SERVER_LOG & +GRPC_GO_LOG_SEVERITY_LEVEL=info go run ./interop/server --use_tls &> $SERVER_LOG & for case in ${CASES[@]}; do - echo "$(tput setaf 4) testing: ${case} $(tput sgr 0)" + echo "$(tput setaf 4) $(date): testing: ${case} $(tput sgr 0)" CLIENT_LOG="$(mktemp)" - if ! timeout 20 go run ./interop/client --use_tls --server_host_override=foo.test.google.fr --use_test_ca --test_case="${case}" &> $CLIENT_LOG; then + if ! GRPC_GO_LOG_SEVERITY_LEVEL=info timeout 20 go run ./interop/client --use_tls --server_host_override=foo.test.google.fr --use_test_ca --test_case="${case}" &> $CLIENT_LOG; then fail "FAIL: test case ${case} got server log: $(cat $SERVER_LOG) @@ -95,7 +104,7 @@ for case in ${CASES[@]}; do $(cat $CLIENT_LOG) " else - pass "PASS: test case ${case}" + pass "PASS: test case ${case}" fi done diff --git a/interop/server/server.go b/interop/server/server.go index 16360abe9e7b..3d27ded80b68 100644 --- a/interop/server/server.go +++ b/interop/server/server.go @@ -55,6 +55,7 @@ func main() { if err != nil { logger.Fatalf("failed to listen: %v", err) } + logger.Infof("interop server listening on %v", lis.Addr()) var opts []grpc.ServerOption if *useTLS { if *certFile == "" { From 18fdf542fab01f909e600763267ef13770a222d5 Mon Sep 17 00:00:00 2001 From: Zhouyihai Ding Date: Wed, 6 Apr 2022 09:40:02 -0700 Subject: [PATCH 483/998] cmd/protoc-gen-go-grpc: allow hooks to modify client structs and service handlers (#5240) --- cmd/protoc-gen-go-grpc/grpc.go | 122 ++++++++++++++++++--------------- 1 file changed, 68 insertions(+), 54 deletions(-) diff --git a/cmd/protoc-gen-go-grpc/grpc.go b/cmd/protoc-gen-go-grpc/grpc.go index 66e26afc7d48..a21a97ac1be4 100644 --- a/cmd/protoc-gen-go-grpc/grpc.go +++ b/cmd/protoc-gen-go-grpc/grpc.go @@ -36,9 +36,11 @@ const ( type serviceGenerateHelperInterface interface { formatFullMethodName(service *protogen.Service, method *protogen.Method) string + generateClientStruct(g *protogen.GeneratedFile, clientName string) generateNewClientDefinitions(g *protogen.GeneratedFile, service *protogen.Service, clientName string) generateUnimplementedServerType(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) generateServerFunctions(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service, serverType string, serviceDescVar string) + formatHandlerFuncName(service *protogen.Service, hname string) string } type serviceGenerateHelper struct{} @@ -47,7 +49,15 @@ func (serviceGenerateHelper) formatFullMethodName(service *protogen.Service, met return fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) } +func (serviceGenerateHelper) generateClientStruct(g *protogen.GeneratedFile, clientName string) { + g.P("type ", unexport(clientName), " struct {") + g.P("cc ", grpcPackage.Ident("ClientConnInterface")) + g.P("}") + g.P() +} + func (serviceGenerateHelper) generateNewClientDefinitions(g *protogen.GeneratedFile, service *protogen.Service, clientName string) { + g.P("return &", unexport(clientName), "{cc}") } func (serviceGenerateHelper) generateUnimplementedServerType(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { @@ -77,6 +87,19 @@ func (serviceGenerateHelper) generateUnimplementedServerType(gen *protogen.Plugi } func (serviceGenerateHelper) generateServerFunctions(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service, serverType string, serviceDescVar string) { + // Server handler implementations. + handlerNames := make([]string, 0, len(service.Methods)) + for _, method := range service.Methods { + hname := genServerMethod(gen, file, g, method, func(hname string) string { + return hname + }) + handlerNames = append(handlerNames, hname) + } + genServiceDesc(file, g, serviceDescVar, serverType, service, handlerNames) +} + +func (serviceGenerateHelper) formatHandlerFuncName(service *protogen.Service, hname string) string { + return hname } var helper serviceGenerateHelperInterface = serviceGenerateHelper{} @@ -158,10 +181,7 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P() // Client structure. - g.P("type ", unexport(clientName), " struct {") - g.P("cc ", grpcPackage.Ident("ClientConnInterface")) - g.P("}") - g.P() + helper.generateClientStruct(g, clientName) // NewClient factory. if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { @@ -169,7 +189,6 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated } g.P("func New", clientName, " (cc ", grpcPackage.Ident("ClientConnInterface"), ") ", clientName, " {") helper.generateNewClientDefinitions(g, service, clientName) - g.P("return &", unexport(clientName), "{cc}") g.P("}") g.P() @@ -239,52 +258,6 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P() helper.generateServerFunctions(gen, file, g, service, serverType, serviceDescVar) - - // Server handler implementations. - handlerNames := make([]string, 0, len(service.Methods)) - for _, method := range service.Methods { - hname := genServerMethod(gen, file, g, method) - handlerNames = append(handlerNames, hname) - } - - // Service descriptor. - g.P("// ", serviceDescVar, " is the ", grpcPackage.Ident("ServiceDesc"), " for ", service.GoName, " service.") - g.P("// It's only intended for direct use with ", grpcPackage.Ident("RegisterService"), ",") - g.P("// and not to be introspected or modified (even as a copy)") - g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {") - g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",") - g.P("HandlerType: (*", serverType, ")(nil),") - g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{") - for i, method := range service.Methods { - if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { - continue - } - g.P("{") - g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",") - g.P("Handler: ", handlerNames[i], ",") - g.P("},") - } - g.P("},") - g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{") - for i, method := range service.Methods { - if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { - continue - } - g.P("{") - g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",") - g.P("Handler: ", handlerNames[i], ",") - if method.Desc.IsStreamingServer() { - g.P("ServerStreams: true,") - } - if method.Desc.IsStreamingClient() { - g.P("ClientStreams: true,") - } - g.P("},") - } - g.P("},") - g.P("Metadata: \"", file.Desc.Path(), "\",") - g.P("}") - g.P() } func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string { @@ -397,12 +370,53 @@ func serverSignature(g *protogen.GeneratedFile, method *protogen.Method) string return method.GoName + "(" + strings.Join(reqArgs, ", ") + ") " + ret } -func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method) string { +func genServiceDesc(file *protogen.File, g *protogen.GeneratedFile, serviceDescVar string, serverType string, service *protogen.Service, handlerNames []string) { + // Service descriptor. + g.P("// ", serviceDescVar, " is the ", grpcPackage.Ident("ServiceDesc"), " for ", service.GoName, " service.") + g.P("// It's only intended for direct use with ", grpcPackage.Ident("RegisterService"), ",") + g.P("// and not to be introspected or modified (even as a copy)") + g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {") + g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{") + for i, method := range service.Methods { + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{") + for i, method := range service.Methods { + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.Desc.IsStreamingServer() { + g.P("ServerStreams: true,") + } + if method.Desc.IsStreamingClient() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.Desc.Path(), "\",") + g.P("}") + g.P() +} + +func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, hnameFuncNameFormatter func(string) string) string { service := method.Parent hname := fmt.Sprintf("_%s_%s_Handler", service.GoName, method.GoName) if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { - g.P("func ", hname, "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {") + g.P("func ", hnameFuncNameFormatter(hname), "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {") g.P("in := new(", method.Input.GoIdent, ")") g.P("if err := dec(in); err != nil { return nil, err }") g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }") @@ -420,7 +434,7 @@ func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene return hname } streamType := unexport(service.GoName) + method.GoName + "Server" - g.P("func ", hname, "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {") + g.P("func ", hnameFuncNameFormatter(hname), "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {") if !method.Desc.IsStreamingClient() { g.P("m := new(", method.Input.GoIdent, ")") g.P("if err := stream.RecvMsg(m); err != nil { return err }") From 4467a29dbb390764f9792ce61af53017d621eb7c Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Wed, 6 Apr 2022 09:40:41 -0700 Subject: [PATCH 484/998] gcp/observability: implement logging via binarylog (#5196) --- gcp/observability/config.go | 102 ++ gcp/observability/exporting.go | 128 +++ gcp/observability/go.mod | 14 + gcp/observability/go.sum | 462 +++++++++ .../internal/config/config.pb.go | 325 +++++++ .../internal/config/config.proto | 78 ++ .../internal/logging/logging.pb.go | 914 ++++++++++++++++++ .../internal/logging/logging.proto | 153 +++ gcp/observability/logging.go | 347 +++++++ gcp/observability/observability.go | 80 ++ gcp/observability/observability_test.go | 642 ++++++++++++ gcp/observability/tags.go | 46 + gcp/observability/tags_test.go | 64 ++ internal/binarylog/binarylog.go | 76 +- internal/binarylog/env_config.go | 6 +- internal/binarylog/env_config_test.go | 46 +- 16 files changed, 3425 insertions(+), 58 deletions(-) create mode 100644 gcp/observability/config.go create mode 100644 gcp/observability/exporting.go create mode 100644 gcp/observability/go.mod create mode 100644 gcp/observability/go.sum create mode 100644 gcp/observability/internal/config/config.pb.go create mode 100644 gcp/observability/internal/config/config.proto create mode 100644 gcp/observability/internal/logging/logging.pb.go create mode 100644 gcp/observability/internal/logging/logging.proto create mode 100644 gcp/observability/logging.go create mode 100644 gcp/observability/observability.go create mode 100644 gcp/observability/observability_test.go create mode 100644 gcp/observability/tags.go create mode 100644 gcp/observability/tags_test.go diff --git a/gcp/observability/config.go b/gcp/observability/config.go new file mode 100644 index 000000000000..aea2a2db3298 --- /dev/null +++ b/gcp/observability/config.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "context" + "fmt" + "os" + "regexp" + + gcplogging "cloud.google.com/go/logging" + "golang.org/x/oauth2/google" + configpb "google.golang.org/grpc/observability/internal/config" + "google.golang.org/protobuf/encoding/protojson" +) + +const ( + envObservabilityConfig = "GRPC_CONFIG_OBSERVABILITY" + envProjectID = "GOOGLE_CLOUD_PROJECT" + logFilterPatternRegexpStr = `^([\w./]+)/((?:\w+)|[*])$` +) + +var logFilterPatternRegexp = regexp.MustCompile(logFilterPatternRegexpStr) + +// fetchDefaultProjectID fetches the default GCP project id from environment. +func fetchDefaultProjectID(ctx context.Context) string { + // Step 1: Check ENV var + if s := os.Getenv(envProjectID); s != "" { + logger.Infof("Found project ID from env %v: %v", envProjectID, s) + return s + } + // Step 2: Check default credential + credentials, err := google.FindDefaultCredentials(ctx, gcplogging.WriteScope) + if err != nil { + logger.Infof("Failed to locate Google Default Credential: %v", err) + return "" + } + if credentials.ProjectID == "" { + logger.Infof("Failed to find project ID in default credential: %v", err) + return "" + } + logger.Infof("Found project ID from Google Default Credential: %v", credentials.ProjectID) + return credentials.ProjectID +} + +func validateFilters(config *configpb.ObservabilityConfig) error { + for _, filter := range config.GetLogFilters() { + if filter.Pattern == "*" { + continue + } + match := logFilterPatternRegexp.FindStringSubmatch(filter.Pattern) + if match == nil { + return fmt.Errorf("invalid log filter pattern: %v", filter.Pattern) + } + } + return nil +} + +func parseObservabilityConfig() (*configpb.ObservabilityConfig, error) { + // Parse the config from ENV var + if content := os.Getenv(envObservabilityConfig); content != "" { + var config configpb.ObservabilityConfig + if err := protojson.Unmarshal([]byte(content), &config); err != nil { + return nil, fmt.Errorf("error parsing observability config from env %v: %v", envObservabilityConfig, err) + } + if err := validateFilters(&config); err != nil { + return nil, fmt.Errorf("error parsing observability config: %v", err) + } + logger.Infof("Parsed ObservabilityConfig: %+v", &config) + return &config, nil + } + // If the ENV var doesn't exist, do nothing + return nil, nil +} + +func ensureProjectIDInObservabilityConfig(ctx context.Context, config *configpb.ObservabilityConfig) error { + if config.GetDestinationProjectId() == "" { + // Try to fetch the GCP project id + projectID := fetchDefaultProjectID(ctx) + if projectID == "" { + return fmt.Errorf("empty destination project ID") + } + config.DestinationProjectId = projectID + } + return nil +} diff --git a/gcp/observability/exporting.go b/gcp/observability/exporting.go new file mode 100644 index 000000000000..898f35963ee5 --- /dev/null +++ b/gcp/observability/exporting.go @@ -0,0 +1,128 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "context" + "encoding/json" + "fmt" + "os" + + gcplogging "cloud.google.com/go/logging" + grpclogrecordpb "google.golang.org/grpc/observability/internal/logging" + "google.golang.org/protobuf/encoding/protojson" +) + +// loggingExporter is the interface of logging exporter for gRPC Observability. +// In future, we might expose this to allow users provide custom exporters. But +// now, it exists for testing purposes. +type loggingExporter interface { + // EmitGrpcLogRecord writes a gRPC LogRecord to cache without blocking. + EmitGrpcLogRecord(*grpclogrecordpb.GrpcLogRecord) + // Close flushes all pending data and closes the exporter. + Close() error +} + +type cloudLoggingExporter struct { + projectID string + client *gcplogging.Client + logger *gcplogging.Logger +} + +func newCloudLoggingExporter(ctx context.Context, projectID string) (*cloudLoggingExporter, error) { + c, err := gcplogging.NewClient(ctx, fmt.Sprintf("projects/%v", projectID)) + if err != nil { + return nil, fmt.Errorf("failed to create cloudLoggingExporter: %v", err) + } + defer logger.Infof("Successfully created cloudLoggingExporter") + customTags := getCustomTags(os.Environ()) + if len(customTags) != 0 { + logger.Infof("Adding custom tags: %+v", customTags) + } + return &cloudLoggingExporter{ + projectID: projectID, + client: c, + logger: c.Logger("grpc", gcplogging.CommonLabels(customTags)), + }, nil +} + +// mapLogLevelToSeverity maps the gRPC defined log level to Cloud Logging's +// Severity. The canonical definition can be found at +// https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#LogSeverity. +var logLevelToSeverity = map[grpclogrecordpb.GrpcLogRecord_LogLevel]gcplogging.Severity{ + grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_UNKNOWN: 0, + grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_TRACE: 100, // Cloud Logging doesn't have a trace level, treated as DEBUG. + grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_DEBUG: 100, + grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_INFO: 200, + grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_WARN: 400, + grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_ERROR: 500, + grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_CRITICAL: 600, +} + +var protoToJSONOptions = &protojson.MarshalOptions{ + UseProtoNames: true, + UseEnumNumbers: false, +} + +func (cle *cloudLoggingExporter) EmitGrpcLogRecord(l *grpclogrecordpb.GrpcLogRecord) { + // Converts the log record content to a more readable format via protojson. + jsonBytes, err := protoToJSONOptions.Marshal(l) + if err != nil { + logger.Infof("Unable to marshal log record: %v", l) + return + } + var payload map[string]interface{} + err = json.Unmarshal(jsonBytes, &payload) + if err != nil { + logger.Infof("Unable to unmarshal bytes to JSON: %v", jsonBytes) + return + } + entry := gcplogging.Entry{ + Timestamp: l.Timestamp.AsTime(), + Severity: logLevelToSeverity[l.LogLevel], + Payload: payload, + } + cle.logger.Log(entry) + if logger.V(2) { + logger.Infof("Uploading event to CloudLogging: %+v", entry) + } +} + +func (cle *cloudLoggingExporter) Close() error { + var errFlush, errClose error + if cle.logger != nil { + errFlush = cle.logger.Flush() + } + if cle.client != nil { + errClose = cle.client.Close() + } + if errFlush != nil && errClose != nil { + return fmt.Errorf("failed to close exporter. Flush failed: %v; Close failed: %v", errFlush, errClose) + } + if errFlush != nil { + return errFlush + } + if errClose != nil { + return errClose + } + cle.logger = nil + cle.client = nil + logger.Infof("Closed CloudLogging exporter") + return nil +} diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod new file mode 100644 index 000000000000..d622a879e36b --- /dev/null +++ b/gcp/observability/go.mod @@ -0,0 +1,14 @@ +module google.golang.org/grpc/observability + +go 1.14 + +require ( + cloud.google.com/go/logging v1.4.2 + github.com/golang/protobuf v1.5.2 + github.com/google/uuid v1.3.0 + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 + google.golang.org/grpc v1.43.0 + google.golang.org/protobuf v1.27.1 +) + +replace google.golang.org/grpc => ../../ diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum new file mode 100644 index 000000000000..0f46213edea2 --- /dev/null +++ b/gcp/observability/go.sum @@ -0,0 +1,462 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/logging v1.4.2 h1:Mu2Q75VBDQlW1HlBMjTX4X84UFR73G1TiLlRYc/b7tA= +cloud.google.com/go/logging v1.4.2/go.mod h1:jco9QZSx8HiVVqLJReq7z7bVdj0P1Jb9PDFs63T+axo= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324 h1:pAwJxDByZctfPwzlNGrDN2BQLsdPb9NkhoTJtUkAO28= +golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.46.0 h1:jkDWHOBIoNSD0OQpq4rtBVu+Rh325MPjXG1rakAp8JU= +google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a h1:VA0wtJaR+W1I11P2f535J7D/YxyvEFMTMvcmyeZ9FBE= +google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/gcp/observability/internal/config/config.pb.go b/gcp/observability/internal/config/config.pb.go new file mode 100644 index 000000000000..f41ddbf24db2 --- /dev/null +++ b/gcp/observability/internal/config/config.pb.go @@ -0,0 +1,325 @@ +// Copyright 2022 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Observability Config is used by gRPC Observability plugin to control provided +// observability features. It contains parameters to enable/disable certain +// features, or fine tune the verbosity. +// +// Note that gRPC may use this config in JSON form, not in protobuf form. This +// proto definition is intended to help document the schema but might not +// actually be used directly by gRPC. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: gcp/observability/internal/config/config.proto + +package config + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Configuration for observability behaviors. By default, no configuration is +// required for tracing/metrics/logging to function. This config captures the +// most common knobs for gRPC users. It's always possible to override with +// explicit config in code. +type ObservabilityConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether the logging data uploading to CloudLogging should be enabled or + // not. The default value is true. + EnableCloudLogging bool `protobuf:"varint,1,opt,name=enable_cloud_logging,json=enableCloudLogging,proto3" json:"enable_cloud_logging,omitempty"` + // The destination GCP project identifier for the uploading log entries. If + // empty, the gRPC Observability plugin will attempt to fetch the project_id + // from the GCP environment variables, or from the default credentials. + DestinationProjectId string `protobuf:"bytes,2,opt,name=destination_project_id,json=destinationProjectId,proto3" json:"destination_project_id,omitempty"` + // A list of method config. The order matters here - the first pattern which + // matches the current method will apply the associated config options in + // the LogFilter. Any other LogFilter that also matches that comes later + // will be ignored. So a LogFilter of "*/*" should appear last in this list. + LogFilters []*ObservabilityConfig_LogFilter `protobuf:"bytes,3,rep,name=log_filters,json=logFilters,proto3" json:"log_filters,omitempty"` +} + +func (x *ObservabilityConfig) Reset() { + *x = ObservabilityConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_gcp_observability_internal_config_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObservabilityConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObservabilityConfig) ProtoMessage() {} + +func (x *ObservabilityConfig) ProtoReflect() protoreflect.Message { + mi := &file_gcp_observability_internal_config_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObservabilityConfig.ProtoReflect.Descriptor instead. +func (*ObservabilityConfig) Descriptor() ([]byte, []int) { + return file_gcp_observability_internal_config_config_proto_rawDescGZIP(), []int{0} +} + +func (x *ObservabilityConfig) GetEnableCloudLogging() bool { + if x != nil { + return x.EnableCloudLogging + } + return false +} + +func (x *ObservabilityConfig) GetDestinationProjectId() string { + if x != nil { + return x.DestinationProjectId + } + return "" +} + +func (x *ObservabilityConfig) GetLogFilters() []*ObservabilityConfig_LogFilter { + if x != nil { + return x.LogFilters + } + return nil +} + +type ObservabilityConfig_LogFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Pattern is a string which can select a group of method names. By + // default, the pattern is an empty string, matching no methods. + // + // Only "*" Wildcard is accepted for pattern. A pattern is in the form + // of / or just a character "*" . + // + // If the pattern is "*", it specifies the defaults for all the + // services; If the pattern is /*, it specifies the defaults + // for all methods in the specified service ; If the pattern is + // */, this is not supported. + // + // Examples: + // - "Foo/Bar" selects only the method "Bar" from service "Foo" + // - "Foo/*" selects all methods from service "Foo" + // - "*" selects all methods from all services. + Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"` + // Number of bytes of each header to log. If the size of the header is + // greater than the defined limit, content pass the limit will be + // truncated. The default value is 0. + HeaderBytes int32 `protobuf:"varint,2,opt,name=header_bytes,json=headerBytes,proto3" json:"header_bytes,omitempty"` + // Number of bytes of each message to log. If the size of the message is + // greater than the defined limit, content pass the limit will be + // truncated. The default value is 0. + MessageBytes int32 `protobuf:"varint,3,opt,name=message_bytes,json=messageBytes,proto3" json:"message_bytes,omitempty"` +} + +func (x *ObservabilityConfig_LogFilter) Reset() { + *x = ObservabilityConfig_LogFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_gcp_observability_internal_config_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObservabilityConfig_LogFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObservabilityConfig_LogFilter) ProtoMessage() {} + +func (x *ObservabilityConfig_LogFilter) ProtoReflect() protoreflect.Message { + mi := &file_gcp_observability_internal_config_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObservabilityConfig_LogFilter.ProtoReflect.Descriptor instead. +func (*ObservabilityConfig_LogFilter) Descriptor() ([]byte, []int) { + return file_gcp_observability_internal_config_config_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *ObservabilityConfig_LogFilter) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *ObservabilityConfig_LogFilter) GetHeaderBytes() int32 { + if x != nil { + return x.HeaderBytes + } + return 0 +} + +func (x *ObservabilityConfig_LogFilter) GetMessageBytes() int32 { + if x != nil { + return x.MessageBytes + } + return 0 +} + +var File_gcp_observability_internal_config_config_proto protoreflect.FileDescriptor + +var file_gcp_observability_internal_config_config_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x67, 0x63, 0x70, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x21, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x22, 0xcf, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, + 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x6d, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x21, 0x0a, + 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, 0x74, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x18, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x6f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_gcp_observability_internal_config_config_proto_rawDescOnce sync.Once + file_gcp_observability_internal_config_config_proto_rawDescData = file_gcp_observability_internal_config_config_proto_rawDesc +) + +func file_gcp_observability_internal_config_config_proto_rawDescGZIP() []byte { + file_gcp_observability_internal_config_config_proto_rawDescOnce.Do(func() { + file_gcp_observability_internal_config_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_gcp_observability_internal_config_config_proto_rawDescData) + }) + return file_gcp_observability_internal_config_config_proto_rawDescData +} + +var file_gcp_observability_internal_config_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_gcp_observability_internal_config_config_proto_goTypes = []interface{}{ + (*ObservabilityConfig)(nil), // 0: grpc.observability.config.v1alpha.ObservabilityConfig + (*ObservabilityConfig_LogFilter)(nil), // 1: grpc.observability.config.v1alpha.ObservabilityConfig.LogFilter +} +var file_gcp_observability_internal_config_config_proto_depIdxs = []int32{ + 1, // 0: grpc.observability.config.v1alpha.ObservabilityConfig.log_filters:type_name -> grpc.observability.config.v1alpha.ObservabilityConfig.LogFilter + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_gcp_observability_internal_config_config_proto_init() } +func file_gcp_observability_internal_config_config_proto_init() { + if File_gcp_observability_internal_config_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_gcp_observability_internal_config_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObservabilityConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gcp_observability_internal_config_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObservabilityConfig_LogFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_gcp_observability_internal_config_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_gcp_observability_internal_config_config_proto_goTypes, + DependencyIndexes: file_gcp_observability_internal_config_config_proto_depIdxs, + MessageInfos: file_gcp_observability_internal_config_config_proto_msgTypes, + }.Build() + File_gcp_observability_internal_config_config_proto = out.File + file_gcp_observability_internal_config_config_proto_rawDesc = nil + file_gcp_observability_internal_config_config_proto_goTypes = nil + file_gcp_observability_internal_config_config_proto_depIdxs = nil +} diff --git a/gcp/observability/internal/config/config.proto b/gcp/observability/internal/config/config.proto new file mode 100644 index 000000000000..116300dcd25c --- /dev/null +++ b/gcp/observability/internal/config/config.proto @@ -0,0 +1,78 @@ +// Copyright 2022 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Observability Config is used by gRPC Observability plugin to control provided +// observability features. It contains parameters to enable/disable certain +// features, or fine tune the verbosity. +// +// Note that gRPC may use this config in JSON form, not in protobuf form. This +// proto definition is intended to help document the schema but might not +// actually be used directly by gRPC. + +syntax = "proto3"; + +package grpc.observability.config.v1alpha; + +option java_package = "io.grpc.observability.config"; +option java_multiple_files = true; +option java_outer_classname = "ObservabilityConfigProto"; +option go_package = "google.golang.org/grpc/gcp/observability/internal/config"; + +// Configuration for observability behaviors. By default, no configuration is +// required for tracing/metrics/logging to function. This config captures the +// most common knobs for gRPC users. It's always possible to override with +// explicit config in code. +message ObservabilityConfig { + // Whether the logging data uploading to CloudLogging should be enabled or + // not. The default value is true. + bool enable_cloud_logging = 1; + + // The destination GCP project identifier for the uploading log entries. If + // empty, the gRPC Observability plugin will attempt to fetch the project_id + // from the GCP environment variables, or from the default credentials. + string destination_project_id = 2; + + message LogFilter { + // Pattern is a string which can select a group of method names. By + // default, the pattern is an empty string, matching no methods. + // + // Only "*" Wildcard is accepted for pattern. A pattern is in the form + // of / or just a character "*" . + // + // If the pattern is "*", it specifies the defaults for all the + // services; If the pattern is /*, it specifies the defaults + // for all methods in the specified service ; If the pattern is + // */, this is not supported. + // + // Examples: + // - "Foo/Bar" selects only the method "Bar" from service "Foo" + // - "Foo/*" selects all methods from service "Foo" + // - "*" selects all methods from all services. + string pattern = 1; + // Number of bytes of each header to log. If the size of the header is + // greater than the defined limit, content pass the limit will be + // truncated. The default value is 0. + int32 header_bytes = 2; + // Number of bytes of each message to log. If the size of the message is + // greater than the defined limit, content pass the limit will be + // truncated. The default value is 0. + int32 message_bytes = 3; + } + + // A list of method config. The order matters here - the first pattern which + // matches the current method will apply the associated config options in + // the LogFilter. Any other LogFilter that also matches that comes later + // will be ignored. So a LogFilter of "*/*" should appear last in this list. + repeated LogFilter log_filters = 3; +} diff --git a/gcp/observability/internal/logging/logging.pb.go b/gcp/observability/internal/logging/logging.pb.go new file mode 100644 index 000000000000..a47c405759b6 --- /dev/null +++ b/gcp/observability/internal/logging/logging.pb.go @@ -0,0 +1,914 @@ +// Copyright 2022 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: gcp/observability/internal/logging/logging.proto + +package logging + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// List of event types +type GrpcLogRecord_EventType int32 + +const ( + // Unknown event type + GrpcLogRecord_GRPC_CALL_UNKNOWN GrpcLogRecord_EventType = 0 + // Header sent from client to server + GrpcLogRecord_GRPC_CALL_REQUEST_HEADER GrpcLogRecord_EventType = 1 + // Header sent from server to client + GrpcLogRecord_GRPC_CALL_RESPONSE_HEADER GrpcLogRecord_EventType = 2 + // Message sent from client to server + GrpcLogRecord_GRPC_CALL_REQUEST_MESSAGE GrpcLogRecord_EventType = 3 + // Message sent from server to client + GrpcLogRecord_GRPC_CALL_RESPONSE_MESSAGE GrpcLogRecord_EventType = 4 + // Trailer indicates the end of the gRPC call + GrpcLogRecord_GRPC_CALL_TRAILER GrpcLogRecord_EventType = 5 + // A signal that client is done sending + GrpcLogRecord_GRPC_CALL_HALF_CLOSE GrpcLogRecord_EventType = 6 + // A signal that the rpc is canceled + GrpcLogRecord_GRPC_CALL_CANCEL GrpcLogRecord_EventType = 7 +) + +// Enum value maps for GrpcLogRecord_EventType. +var ( + GrpcLogRecord_EventType_name = map[int32]string{ + 0: "GRPC_CALL_UNKNOWN", + 1: "GRPC_CALL_REQUEST_HEADER", + 2: "GRPC_CALL_RESPONSE_HEADER", + 3: "GRPC_CALL_REQUEST_MESSAGE", + 4: "GRPC_CALL_RESPONSE_MESSAGE", + 5: "GRPC_CALL_TRAILER", + 6: "GRPC_CALL_HALF_CLOSE", + 7: "GRPC_CALL_CANCEL", + } + GrpcLogRecord_EventType_value = map[string]int32{ + "GRPC_CALL_UNKNOWN": 0, + "GRPC_CALL_REQUEST_HEADER": 1, + "GRPC_CALL_RESPONSE_HEADER": 2, + "GRPC_CALL_REQUEST_MESSAGE": 3, + "GRPC_CALL_RESPONSE_MESSAGE": 4, + "GRPC_CALL_TRAILER": 5, + "GRPC_CALL_HALF_CLOSE": 6, + "GRPC_CALL_CANCEL": 7, + } +) + +func (x GrpcLogRecord_EventType) Enum() *GrpcLogRecord_EventType { + p := new(GrpcLogRecord_EventType) + *p = x + return p +} + +func (x GrpcLogRecord_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogRecord_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_gcp_observability_internal_logging_logging_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogRecord_EventType) Type() protoreflect.EnumType { + return &file_gcp_observability_internal_logging_logging_proto_enumTypes[0] +} + +func (x GrpcLogRecord_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogRecord_EventType.Descriptor instead. +func (GrpcLogRecord_EventType) EnumDescriptor() ([]byte, []int) { + return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 0} +} + +// The entity that generates the log entry +type GrpcLogRecord_EventLogger int32 + +const ( + GrpcLogRecord_LOGGER_UNKNOWN GrpcLogRecord_EventLogger = 0 + GrpcLogRecord_LOGGER_CLIENT GrpcLogRecord_EventLogger = 1 + GrpcLogRecord_LOGGER_SERVER GrpcLogRecord_EventLogger = 2 +) + +// Enum value maps for GrpcLogRecord_EventLogger. +var ( + GrpcLogRecord_EventLogger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogRecord_EventLogger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogRecord_EventLogger) Enum() *GrpcLogRecord_EventLogger { + p := new(GrpcLogRecord_EventLogger) + *p = x + return p +} + +func (x GrpcLogRecord_EventLogger) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogRecord_EventLogger) Descriptor() protoreflect.EnumDescriptor { + return file_gcp_observability_internal_logging_logging_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogRecord_EventLogger) Type() protoreflect.EnumType { + return &file_gcp_observability_internal_logging_logging_proto_enumTypes[1] +} + +func (x GrpcLogRecord_EventLogger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogRecord_EventLogger.Descriptor instead. +func (GrpcLogRecord_EventLogger) EnumDescriptor() ([]byte, []int) { + return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 1} +} + +// The log severity level of the log entry +type GrpcLogRecord_LogLevel int32 + +const ( + GrpcLogRecord_LOG_LEVEL_UNKNOWN GrpcLogRecord_LogLevel = 0 + GrpcLogRecord_LOG_LEVEL_TRACE GrpcLogRecord_LogLevel = 1 + GrpcLogRecord_LOG_LEVEL_DEBUG GrpcLogRecord_LogLevel = 2 + GrpcLogRecord_LOG_LEVEL_INFO GrpcLogRecord_LogLevel = 3 + GrpcLogRecord_LOG_LEVEL_WARN GrpcLogRecord_LogLevel = 4 + GrpcLogRecord_LOG_LEVEL_ERROR GrpcLogRecord_LogLevel = 5 + GrpcLogRecord_LOG_LEVEL_CRITICAL GrpcLogRecord_LogLevel = 6 +) + +// Enum value maps for GrpcLogRecord_LogLevel. +var ( + GrpcLogRecord_LogLevel_name = map[int32]string{ + 0: "LOG_LEVEL_UNKNOWN", + 1: "LOG_LEVEL_TRACE", + 2: "LOG_LEVEL_DEBUG", + 3: "LOG_LEVEL_INFO", + 4: "LOG_LEVEL_WARN", + 5: "LOG_LEVEL_ERROR", + 6: "LOG_LEVEL_CRITICAL", + } + GrpcLogRecord_LogLevel_value = map[string]int32{ + "LOG_LEVEL_UNKNOWN": 0, + "LOG_LEVEL_TRACE": 1, + "LOG_LEVEL_DEBUG": 2, + "LOG_LEVEL_INFO": 3, + "LOG_LEVEL_WARN": 4, + "LOG_LEVEL_ERROR": 5, + "LOG_LEVEL_CRITICAL": 6, + } +) + +func (x GrpcLogRecord_LogLevel) Enum() *GrpcLogRecord_LogLevel { + p := new(GrpcLogRecord_LogLevel) + *p = x + return p +} + +func (x GrpcLogRecord_LogLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogRecord_LogLevel) Descriptor() protoreflect.EnumDescriptor { + return file_gcp_observability_internal_logging_logging_proto_enumTypes[2].Descriptor() +} + +func (GrpcLogRecord_LogLevel) Type() protoreflect.EnumType { + return &file_gcp_observability_internal_logging_logging_proto_enumTypes[2] +} + +func (x GrpcLogRecord_LogLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogRecord_LogLevel.Descriptor instead. +func (GrpcLogRecord_LogLevel) EnumDescriptor() ([]byte, []int) { + return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 2} +} + +type GrpcLogRecord_Address_Type int32 + +const ( + GrpcLogRecord_Address_TYPE_UNKNOWN GrpcLogRecord_Address_Type = 0 + GrpcLogRecord_Address_TYPE_IPV4 GrpcLogRecord_Address_Type = 1 // in 1.2.3.4 form + GrpcLogRecord_Address_TYPE_IPV6 GrpcLogRecord_Address_Type = 2 // IPv6 canonical form (RFC5952 section 4) + GrpcLogRecord_Address_TYPE_UNIX GrpcLogRecord_Address_Type = 3 // UDS string +) + +// Enum value maps for GrpcLogRecord_Address_Type. +var ( + GrpcLogRecord_Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + GrpcLogRecord_Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x GrpcLogRecord_Address_Type) Enum() *GrpcLogRecord_Address_Type { + p := new(GrpcLogRecord_Address_Type) + *p = x + return p +} + +func (x GrpcLogRecord_Address_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogRecord_Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_gcp_observability_internal_logging_logging_proto_enumTypes[3].Descriptor() +} + +func (GrpcLogRecord_Address_Type) Type() protoreflect.EnumType { + return &file_gcp_observability_internal_logging_logging_proto_enumTypes[3] +} + +func (x GrpcLogRecord_Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogRecord_Address_Type.Descriptor instead. +func (GrpcLogRecord_Address_Type) EnumDescriptor() ([]byte, []int) { + return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 2, 0} +} + +type GrpcLogRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of the log event + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries. They will all have the same rpc_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + RpcId string `protobuf:"bytes,2,opt,name=rpc_id,json=rpcId,proto3" json:"rpc_id,omitempty"` + EventType GrpcLogRecord_EventType `protobuf:"varint,3,opt,name=event_type,json=eventType,proto3,enum=grpc.observability.logging.v1.GrpcLogRecord_EventType" json:"event_type,omitempty"` // one of the above EventType enum + EventLogger GrpcLogRecord_EventLogger `protobuf:"varint,4,opt,name=event_logger,json=eventLogger,proto3,enum=grpc.observability.logging.v1.GrpcLogRecord_EventLogger" json:"event_logger,omitempty"` // one of the above EventLogger enum + // the name of the service + ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // the name of the RPC method + MethodName string `protobuf:"bytes,6,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + LogLevel GrpcLogRecord_LogLevel `protobuf:"varint,7,opt,name=log_level,json=logLevel,proto3,enum=grpc.observability.logging.v1.GrpcLogRecord_LogLevel" json:"log_level,omitempty"` // one of the above LogLevel enum + // Peer address information. On client side, peer is logged on server + // header event or trailer event (if trailer-only). On server side, peer + // is always logged on the client header event. + PeerAddress *GrpcLogRecord_Address `protobuf:"bytes,8,opt,name=peer_address,json=peerAddress,proto3" json:"peer_address,omitempty"` + // the RPC timeout value + Timeout *durationpb.Duration `protobuf:"bytes,11,opt,name=timeout,proto3" json:"timeout,omitempty"` + // A single process may be used to run multiple virtual servers with + // different identities. + // The authority is the name of such a server identify. It is typically a + // portion of the URI in the form of or :. + Authority string `protobuf:"bytes,12,opt,name=authority,proto3" json:"authority,omitempty"` + // Size of the message or metadata, depending on the event type, + // regardless of whether the full message or metadata is being logged + // (i.e. could be truncated or omitted). + PayloadSize uint32 `protobuf:"varint,13,opt,name=payload_size,json=payloadSize,proto3" json:"payload_size,omitempty"` + // true if message or metadata field is either truncated or omitted due + // to config options + PayloadTruncated bool `protobuf:"varint,14,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Used by header event or trailer event + Metadata *GrpcLogRecord_Metadata `protobuf:"bytes,15,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The entry sequence ID for this call. The first message has a value of 1, + // to disambiguate from an unset value. The purpose of this field is to + // detect missing entries in environments where durability or ordering is + // not guaranteed. + SequenceId uint64 `protobuf:"varint,16,opt,name=sequence_id,json=sequenceId,proto3" json:"sequence_id,omitempty"` + // Used by message event + Message []byte `protobuf:"bytes,17,opt,name=message,proto3" json:"message,omitempty"` + // The gRPC status code + StatusCode uint32 `protobuf:"varint,18,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // The gRPC status message + StatusMessage string `protobuf:"bytes,19,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the grpc-status-details-bin metadata key, if any. + // This is always an encoded google.rpc.Status message + StatusDetails []byte `protobuf:"bytes,20,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *GrpcLogRecord) Reset() { + *x = GrpcLogRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogRecord) ProtoMessage() {} + +func (x *GrpcLogRecord) ProtoReflect() protoreflect.Message { + mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogRecord.ProtoReflect.Descriptor instead. +func (*GrpcLogRecord) Descriptor() ([]byte, []int) { + return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogRecord) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *GrpcLogRecord) GetRpcId() string { + if x != nil { + return x.RpcId + } + return "" +} + +func (x *GrpcLogRecord) GetEventType() GrpcLogRecord_EventType { + if x != nil { + return x.EventType + } + return GrpcLogRecord_GRPC_CALL_UNKNOWN +} + +func (x *GrpcLogRecord) GetEventLogger() GrpcLogRecord_EventLogger { + if x != nil { + return x.EventLogger + } + return GrpcLogRecord_LOGGER_UNKNOWN +} + +func (x *GrpcLogRecord) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *GrpcLogRecord) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (x *GrpcLogRecord) GetLogLevel() GrpcLogRecord_LogLevel { + if x != nil { + return x.LogLevel + } + return GrpcLogRecord_LOG_LEVEL_UNKNOWN +} + +func (x *GrpcLogRecord) GetPeerAddress() *GrpcLogRecord_Address { + if x != nil { + return x.PeerAddress + } + return nil +} + +func (x *GrpcLogRecord) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *GrpcLogRecord) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *GrpcLogRecord) GetPayloadSize() uint32 { + if x != nil { + return x.PayloadSize + } + return 0 +} + +func (x *GrpcLogRecord) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogRecord) GetMetadata() *GrpcLogRecord_Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *GrpcLogRecord) GetSequenceId() uint64 { + if x != nil { + return x.SequenceId + } + return 0 +} + +func (x *GrpcLogRecord) GetMessage() []byte { + if x != nil { + return x.Message + } + return nil +} + +func (x *GrpcLogRecord) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *GrpcLogRecord) GetStatusMessage() string { + if x != nil { + return x.StatusMessage + } + return "" +} + +func (x *GrpcLogRecord) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails + } + return nil +} + +// A list of metadata pairs +type GrpcLogRecord_Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*GrpcLogRecord_MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *GrpcLogRecord_Metadata) Reset() { + *x = GrpcLogRecord_Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogRecord_Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogRecord_Metadata) ProtoMessage() {} + +func (x *GrpcLogRecord_Metadata) ProtoReflect() protoreflect.Message { + mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogRecord_Metadata.ProtoReflect.Descriptor instead. +func (*GrpcLogRecord_Metadata) Descriptor() ([]byte, []int) { + return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *GrpcLogRecord_Metadata) GetEntry() []*GrpcLogRecord_MetadataEntry { + if x != nil { + return x.Entry + } + return nil +} + +// One metadata key value pair +type GrpcLogRecord_MetadataEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *GrpcLogRecord_MetadataEntry) Reset() { + *x = GrpcLogRecord_MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogRecord_MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogRecord_MetadataEntry) ProtoMessage() {} + +func (x *GrpcLogRecord_MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogRecord_MetadataEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogRecord_MetadataEntry) Descriptor() ([]byte, []int) { + return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *GrpcLogRecord_MetadataEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *GrpcLogRecord_MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Address information +type GrpcLogRecord_Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type GrpcLogRecord_Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.observability.logging.v1.GrpcLogRecord_Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *GrpcLogRecord_Address) Reset() { + *x = GrpcLogRecord_Address{} + if protoimpl.UnsafeEnabled { + mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogRecord_Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogRecord_Address) ProtoMessage() {} + +func (x *GrpcLogRecord_Address) ProtoReflect() protoreflect.Message { + mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogRecord_Address.ProtoReflect.Descriptor instead. +func (*GrpcLogRecord_Address) Descriptor() ([]byte, []int) { + return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 2} +} + +func (x *GrpcLogRecord_Address) GetType() GrpcLogRecord_Address_Type { + if x != nil { + return x.Type + } + return GrpcLogRecord_Address_TYPE_UNKNOWN +} + +func (x *GrpcLogRecord_Address) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *GrpcLogRecord_Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +var File_gcp_observability_internal_logging_logging_proto protoreflect.FileDescriptor + +var file_gcp_observability_internal_logging_logging_proto_rawDesc = []byte{ + 0x0a, 0x30, 0x67, 0x63, 0x70, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6c, 0x6f, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x2f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x1d, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xe5, 0x0d, 0x0a, 0x0d, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, + 0x0a, 0x06, 0x72, 0x70, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x72, 0x70, 0x63, 0x49, 0x64, 0x12, 0x55, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, + 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, + 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x5b, 0x0a, 0x0c, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x0b, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a, + 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x4c, + 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x57, 0x0a, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0b, 0x70, + 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, + 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x21, 0x0a, + 0x0c, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x51, 0x0a, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x49, + 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x13, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x1a, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x1a, 0xd2, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x4d, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, + 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, + 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x22, 0xe5, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x52, 0x50, 0x43, 0x5f, 0x43, 0x41, 0x4c, + 0x4c, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x47, + 0x52, 0x50, 0x43, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, + 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, + 0x43, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, + 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, + 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x4d, 0x45, + 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, 0x5f, + 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x4d, 0x45, + 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x52, 0x50, 0x43, 0x5f, + 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x05, 0x12, 0x18, + 0x0a, 0x14, 0x47, 0x52, 0x50, 0x43, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x48, 0x41, 0x4c, 0x46, + 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x52, 0x50, 0x43, + 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x10, 0x07, 0x22, 0x47, + 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x22, 0xa0, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x15, 0x0a, 0x11, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, + 0x4c, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, + 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x01, + 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x44, 0x45, + 0x42, 0x55, 0x47, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, + 0x45, 0x4c, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x4f, 0x47, + 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, + 0x0f, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x10, 0x05, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, + 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x06, 0x42, 0x77, 0x0a, 0x1d, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x42, 0x19, 0x4f, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x79, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_gcp_observability_internal_logging_logging_proto_rawDescOnce sync.Once + file_gcp_observability_internal_logging_logging_proto_rawDescData = file_gcp_observability_internal_logging_logging_proto_rawDesc +) + +func file_gcp_observability_internal_logging_logging_proto_rawDescGZIP() []byte { + file_gcp_observability_internal_logging_logging_proto_rawDescOnce.Do(func() { + file_gcp_observability_internal_logging_logging_proto_rawDescData = protoimpl.X.CompressGZIP(file_gcp_observability_internal_logging_logging_proto_rawDescData) + }) + return file_gcp_observability_internal_logging_logging_proto_rawDescData +} + +var file_gcp_observability_internal_logging_logging_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_gcp_observability_internal_logging_logging_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_gcp_observability_internal_logging_logging_proto_goTypes = []interface{}{ + (GrpcLogRecord_EventType)(0), // 0: grpc.observability.logging.v1.GrpcLogRecord.EventType + (GrpcLogRecord_EventLogger)(0), // 1: grpc.observability.logging.v1.GrpcLogRecord.EventLogger + (GrpcLogRecord_LogLevel)(0), // 2: grpc.observability.logging.v1.GrpcLogRecord.LogLevel + (GrpcLogRecord_Address_Type)(0), // 3: grpc.observability.logging.v1.GrpcLogRecord.Address.Type + (*GrpcLogRecord)(nil), // 4: grpc.observability.logging.v1.GrpcLogRecord + (*GrpcLogRecord_Metadata)(nil), // 5: grpc.observability.logging.v1.GrpcLogRecord.Metadata + (*GrpcLogRecord_MetadataEntry)(nil), // 6: grpc.observability.logging.v1.GrpcLogRecord.MetadataEntry + (*GrpcLogRecord_Address)(nil), // 7: grpc.observability.logging.v1.GrpcLogRecord.Address + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration +} +var file_gcp_observability_internal_logging_logging_proto_depIdxs = []int32{ + 8, // 0: grpc.observability.logging.v1.GrpcLogRecord.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.observability.logging.v1.GrpcLogRecord.event_type:type_name -> grpc.observability.logging.v1.GrpcLogRecord.EventType + 1, // 2: grpc.observability.logging.v1.GrpcLogRecord.event_logger:type_name -> grpc.observability.logging.v1.GrpcLogRecord.EventLogger + 2, // 3: grpc.observability.logging.v1.GrpcLogRecord.log_level:type_name -> grpc.observability.logging.v1.GrpcLogRecord.LogLevel + 7, // 4: grpc.observability.logging.v1.GrpcLogRecord.peer_address:type_name -> grpc.observability.logging.v1.GrpcLogRecord.Address + 9, // 5: grpc.observability.logging.v1.GrpcLogRecord.timeout:type_name -> google.protobuf.Duration + 5, // 6: grpc.observability.logging.v1.GrpcLogRecord.metadata:type_name -> grpc.observability.logging.v1.GrpcLogRecord.Metadata + 6, // 7: grpc.observability.logging.v1.GrpcLogRecord.Metadata.entry:type_name -> grpc.observability.logging.v1.GrpcLogRecord.MetadataEntry + 3, // 8: grpc.observability.logging.v1.GrpcLogRecord.Address.type:type_name -> grpc.observability.logging.v1.GrpcLogRecord.Address.Type + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_gcp_observability_internal_logging_logging_proto_init() } +func file_gcp_observability_internal_logging_logging_proto_init() { + if File_gcp_observability_internal_logging_logging_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_gcp_observability_internal_logging_logging_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gcp_observability_internal_logging_logging_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogRecord_Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gcp_observability_internal_logging_logging_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogRecord_MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_gcp_observability_internal_logging_logging_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogRecord_Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_gcp_observability_internal_logging_logging_proto_rawDesc, + NumEnums: 4, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_gcp_observability_internal_logging_logging_proto_goTypes, + DependencyIndexes: file_gcp_observability_internal_logging_logging_proto_depIdxs, + EnumInfos: file_gcp_observability_internal_logging_logging_proto_enumTypes, + MessageInfos: file_gcp_observability_internal_logging_logging_proto_msgTypes, + }.Build() + File_gcp_observability_internal_logging_logging_proto = out.File + file_gcp_observability_internal_logging_logging_proto_rawDesc = nil + file_gcp_observability_internal_logging_logging_proto_goTypes = nil + file_gcp_observability_internal_logging_logging_proto_depIdxs = nil +} diff --git a/gcp/observability/internal/logging/logging.proto b/gcp/observability/internal/logging/logging.proto new file mode 100644 index 000000000000..206d953a9ca8 --- /dev/null +++ b/gcp/observability/internal/logging/logging.proto @@ -0,0 +1,153 @@ +// Copyright 2022 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.observability.logging.v1; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option java_package = "io.grpc.observability.logging"; +option java_multiple_files = true; +option java_outer_classname = "ObservabilityLoggingProto"; +option go_package = "google.golang.org/grpc/gcp/observability/internal/logging"; + +message GrpcLogRecord { + // List of event types + enum EventType { + // Unknown event type + GRPC_CALL_UNKNOWN = 0; + // Header sent from client to server + GRPC_CALL_REQUEST_HEADER = 1; + // Header sent from server to client + GRPC_CALL_RESPONSE_HEADER = 2; + // Message sent from client to server + GRPC_CALL_REQUEST_MESSAGE = 3; + // Message sent from server to client + GRPC_CALL_RESPONSE_MESSAGE = 4; + // Trailer indicates the end of the gRPC call + GRPC_CALL_TRAILER = 5; + // A signal that client is done sending + GRPC_CALL_HALF_CLOSE = 6; + // A signal that the rpc is canceled + GRPC_CALL_CANCEL = 7; + } + // The entity that generates the log entry + enum EventLogger { + LOGGER_UNKNOWN = 0; + LOGGER_CLIENT = 1; + LOGGER_SERVER = 2; + } + // The log severity level of the log entry + enum LogLevel { + LOG_LEVEL_UNKNOWN = 0; + LOG_LEVEL_TRACE = 1; + LOG_LEVEL_DEBUG = 2; + LOG_LEVEL_INFO = 3; + LOG_LEVEL_WARN = 4; + LOG_LEVEL_ERROR = 5; + LOG_LEVEL_CRITICAL = 6; + } + + // The timestamp of the log event + google.protobuf.Timestamp timestamp = 1; + + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries. They will all have the same rpc_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + string rpc_id = 2; + + EventType event_type = 3; // one of the above EventType enum + EventLogger event_logger = 4; // one of the above EventLogger enum + + // the name of the service + string service_name = 5; + // the name of the RPC method + string method_name = 6; + + LogLevel log_level = 7; // one of the above LogLevel enum + + // Peer address information. On client side, peer is logged on server + // header event or trailer event (if trailer-only). On server side, peer + // is always logged on the client header event. + Address peer_address = 8; + + // the RPC timeout value + google.protobuf.Duration timeout = 11; + + // A single process may be used to run multiple virtual servers with + // different identities. + // The authority is the name of such a server identify. It is typically a + // portion of the URI in the form of or :. + string authority = 12; + + // Size of the message or metadata, depending on the event type, + // regardless of whether the full message or metadata is being logged + // (i.e. could be truncated or omitted). + uint32 payload_size = 13; + + // true if message or metadata field is either truncated or omitted due + // to config options + bool payload_truncated = 14; + + // Used by header event or trailer event + Metadata metadata = 15; + + // The entry sequence ID for this call. The first message has a value of 1, + // to disambiguate from an unset value. The purpose of this field is to + // detect missing entries in environments where durability or ordering is + // not guaranteed. + uint64 sequence_id = 16; + + // Used by message event + bytes message = 17; + + // The gRPC status code + uint32 status_code = 18; + + // The gRPC status message + string status_message = 19; + + // The value of the grpc-status-details-bin metadata key, if any. + // This is always an encoded google.rpc.Status message + bytes status_details = 20; + + // A list of metadata pairs + message Metadata { + repeated MetadataEntry entry = 1; + } + + // One metadata key value pair + message MetadataEntry { + string key = 1; + bytes value = 2; + } + + // Address information + message Address { + enum Type { + TYPE_UNKNOWN = 0; + TYPE_IPV4 = 1; // in 1.2.3.4 form + TYPE_IPV6 = 2; // IPv6 canonical form (RFC5952 section 4) + TYPE_UNIX = 3; // UDS string + } + Type type = 1; + string address = 2; + // only for TYPE_IPV4 and TYPE_IPV6 + uint32 ip_port = 3; + } +} diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go new file mode 100644 index 000000000000..fc9366440e38 --- /dev/null +++ b/gcp/observability/logging.go @@ -0,0 +1,347 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + "unsafe" + + "github.com/google/uuid" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + iblog "google.golang.org/grpc/internal/binarylog" + configpb "google.golang.org/grpc/observability/internal/config" + grpclogrecordpb "google.golang.org/grpc/observability/internal/logging" +) + +// translateMetadata translates the metadata from Binary Logging format to +// its GrpcLogRecord equivalent. +func translateMetadata(m *binlogpb.Metadata) *grpclogrecordpb.GrpcLogRecord_Metadata { + var res grpclogrecordpb.GrpcLogRecord_Metadata + res.Entry = make([]*grpclogrecordpb.GrpcLogRecord_MetadataEntry, len(m.Entry)) + for i, e := range m.Entry { + res.Entry[i] = &grpclogrecordpb.GrpcLogRecord_MetadataEntry{ + Key: e.Key, + Value: e.Value, + } + } + return &res +} + +func setPeerIfPresent(binlogEntry *binlogpb.GrpcLogEntry, grpcLogRecord *grpclogrecordpb.GrpcLogRecord) { + if binlogEntry.GetPeer() != nil { + grpcLogRecord.PeerAddress = &grpclogrecordpb.GrpcLogRecord_Address{ + Type: grpclogrecordpb.GrpcLogRecord_Address_Type(binlogEntry.Peer.Type), + Address: binlogEntry.Peer.Address, + IpPort: binlogEntry.Peer.IpPort, + } + } +} + +var loggerTypeToEventLogger = map[binlogpb.GrpcLogEntry_Logger]grpclogrecordpb.GrpcLogRecord_EventLogger{ + binlogpb.GrpcLogEntry_LOGGER_UNKNOWN: grpclogrecordpb.GrpcLogRecord_LOGGER_UNKNOWN, + binlogpb.GrpcLogEntry_LOGGER_CLIENT: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + binlogpb.GrpcLogEntry_LOGGER_SERVER: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, +} + +type binaryMethodLogger struct { + rpcID, serviceName, methodName string + originalMethodLogger iblog.MethodLogger + childMethodLogger iblog.MethodLogger + exporter loggingExporter +} + +func (ml *binaryMethodLogger) Log(c iblog.LogEntryConfig) { + // Invoke the original MethodLogger to maintain backward compatibility + if ml.originalMethodLogger != nil { + ml.originalMethodLogger.Log(c) + } + + // Fetch the compiled binary logging log entry + if ml.childMethodLogger == nil { + logger.Info("No wrapped method logger found") + return + } + var binlogEntry *binlogpb.GrpcLogEntry + o, ok := ml.childMethodLogger.(interface { + Build(iblog.LogEntryConfig) *binlogpb.GrpcLogEntry + }) + if !ok { + logger.Error("Failed to locate the Build method in wrapped method logger") + return + } + binlogEntry = o.Build(c) + + // Translate to GrpcLogRecord + grpcLogRecord := &grpclogrecordpb.GrpcLogRecord{ + Timestamp: binlogEntry.GetTimestamp(), + RpcId: ml.rpcID, + SequenceId: binlogEntry.GetSequenceIdWithinCall(), + EventLogger: loggerTypeToEventLogger[binlogEntry.Logger], + // Making DEBUG the default LogLevel + LogLevel: grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_DEBUG, + } + + switch binlogEntry.GetType() { + case binlogpb.GrpcLogEntry_EVENT_TYPE_UNKNOWN: + grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_UNKNOWN + case binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER: + grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_REQUEST_HEADER + if binlogEntry.GetClientHeader() != nil { + methodName := binlogEntry.GetClientHeader().MethodName + // Example method name: /grpc.testing.TestService/UnaryCall + if strings.Contains(methodName, "/") { + tokens := strings.Split(methodName, "/") + if len(tokens) == 3 { + // Record service name and method name for all events + ml.serviceName = tokens[1] + ml.methodName = tokens[2] + } else { + logger.Infof("Malformed method name: %v", methodName) + } + } + grpcLogRecord.Timeout = binlogEntry.GetClientHeader().Timeout + grpcLogRecord.Authority = binlogEntry.GetClientHeader().Authority + grpcLogRecord.Metadata = translateMetadata(binlogEntry.GetClientHeader().Metadata) + } + grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() + setPeerIfPresent(binlogEntry, grpcLogRecord) + case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER: + grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_RESPONSE_HEADER + grpcLogRecord.Metadata = translateMetadata(binlogEntry.GetServerHeader().Metadata) + grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() + setPeerIfPresent(binlogEntry, grpcLogRecord) + case binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE: + grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_REQUEST_MESSAGE + grpcLogRecord.Message = binlogEntry.GetMessage().GetData() + grpcLogRecord.PayloadSize = binlogEntry.GetMessage().GetLength() + grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() + case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE: + grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_RESPONSE_MESSAGE + grpcLogRecord.Message = binlogEntry.GetMessage().GetData() + grpcLogRecord.PayloadSize = binlogEntry.GetMessage().GetLength() + grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() + case binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE: + grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_HALF_CLOSE + case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER: + grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_TRAILER + grpcLogRecord.Metadata = translateMetadata(binlogEntry.GetTrailer().Metadata) + grpcLogRecord.StatusCode = binlogEntry.GetTrailer().GetStatusCode() + grpcLogRecord.StatusMessage = binlogEntry.GetTrailer().GetStatusMessage() + grpcLogRecord.StatusDetails = binlogEntry.GetTrailer().GetStatusDetails() + grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() + setPeerIfPresent(binlogEntry, grpcLogRecord) + case binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL: + grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_CANCEL + default: + logger.Infof("Unknown event type: %v", binlogEntry.Type) + return + } + grpcLogRecord.ServiceName = ml.serviceName + grpcLogRecord.MethodName = ml.methodName + ml.exporter.EmitGrpcLogRecord(grpcLogRecord) +} + +type binaryLogger struct { + // originalLogger is needed to ensure binary logging users won't be impacted + // by this plugin. Users are allowed to subscribe to a completely different + // set of methods. + originalLogger iblog.Logger + // exporter is a loggingExporter and the handle for uploading collected data + // to backends. + exporter unsafe.Pointer // loggingExporter + // logger is a iblog.Logger wrapped for reusing the pattern matching logic + // and the method logger creating logic. + logger unsafe.Pointer // iblog.Logger +} + +func (l *binaryLogger) loadExporter() loggingExporter { + ptrPtr := atomic.LoadPointer(&l.exporter) + if ptrPtr == nil { + return nil + } + exporterPtr := (*loggingExporter)(ptrPtr) + return *exporterPtr +} + +func (l *binaryLogger) loadLogger() iblog.Logger { + ptrPtr := atomic.LoadPointer(&l.logger) + if ptrPtr == nil { + return nil + } + loggerPtr := (*iblog.Logger)(ptrPtr) + return *loggerPtr +} + +func (l *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { + var ol iblog.MethodLogger + + if l.originalLogger != nil { + ol = l.originalLogger.GetMethodLogger(methodName) + } + + // If user specify a "*" pattern, binarylog will log every single call and + // content. This means the exporting RPC's events will be captured. Even if + // we batch up the uploads in the exporting RPC, the message content of that + // RPC will be logged. Without this exclusion, we may end up with an ever + // expanding message field in log entries, and crash the process with OOM. + if methodName == "google.logging.v2.LoggingServiceV2/WriteLogEntries" { + return ol + } + + // If no exporter is specified, there is no point creating a method + // logger. We don't have any chance to inject exporter after its + // creation. + exporter := l.loadExporter() + if exporter == nil { + return ol + } + + // If no logger is specified, e.g., during init period, do nothing. + binLogger := l.loadLogger() + if binLogger == nil { + return ol + } + + // If this method is not picked by LoggerConfig, do nothing. + ml := binLogger.GetMethodLogger(methodName) + if ml == nil { + return ol + } + + return &binaryMethodLogger{ + originalMethodLogger: ol, + childMethodLogger: ml, + rpcID: uuid.NewString(), + exporter: exporter, + } +} + +func (l *binaryLogger) Close() { + if l == nil { + return + } + ePtr := atomic.LoadPointer(&l.exporter) + if ePtr != nil { + exporter := (*loggingExporter)(ePtr) + if err := (*exporter).Close(); err != nil { + logger.Infof("Failed to close logging exporter: %v", err) + } + } +} + +func validateExistingMethodLoggerConfig(existing *iblog.MethodLoggerConfig, filter *configpb.ObservabilityConfig_LogFilter) bool { + // In future, we could add more validations. Currently, we only check if the + // new filter configs are different than the existing one, if so, we log a + // warning. + if existing != nil && (existing.Header != uint64(filter.HeaderBytes) || existing.Message != uint64(filter.MessageBytes)) { + logger.Warningf("Ignored log_filter config: %+v", filter) + } + return existing == nil +} + +func createBinaryLoggerConfig(filters []*configpb.ObservabilityConfig_LogFilter) iblog.LoggerConfig { + config := iblog.LoggerConfig{ + Services: make(map[string]*iblog.MethodLoggerConfig), + Methods: make(map[string]*iblog.MethodLoggerConfig), + } + // Try matching the filters one by one, pick the first match. The + // correctness of the log filter pattern is ensured by config.go. + for _, filter := range filters { + if filter.Pattern == "*" { + // Match a "*" + if !validateExistingMethodLoggerConfig(config.All, filter) { + continue + } + config.All = &iblog.MethodLoggerConfig{Header: uint64(filter.HeaderBytes), Message: uint64(filter.MessageBytes)} + continue + } + tokens := strings.SplitN(filter.Pattern, "/", 2) + filterService := tokens[0] + filterMethod := tokens[1] + if filterMethod == "*" { + // Handle "p.s/*" case + if !validateExistingMethodLoggerConfig(config.Services[filterService], filter) { + continue + } + config.Services[filterService] = &iblog.MethodLoggerConfig{Header: uint64(filter.HeaderBytes), Message: uint64(filter.MessageBytes)} + continue + } + // Exact match like "p.s/m" + if !validateExistingMethodLoggerConfig(config.Methods[filter.Pattern], filter) { + continue + } + config.Methods[filter.Pattern] = &iblog.MethodLoggerConfig{Header: uint64(filter.HeaderBytes), Message: uint64(filter.MessageBytes)} + } + return config +} + +// start is the core logic for setting up the custom binary logging logger, and +// it's also useful for testing. +func (l *binaryLogger) start(config *configpb.ObservabilityConfig, exporter loggingExporter) error { + filters := config.GetLogFilters() + if len(filters) == 0 || exporter == nil { + // Doing nothing is allowed + if exporter != nil { + // The exporter is owned by binaryLogger, so we should close it if + // we are not planning to use it. + exporter.Close() + } + logger.Info("Skipping gRPC Observability logger: no config") + return nil + } + + binLogger := iblog.NewLoggerFromConfig(createBinaryLoggerConfig(filters)) + if binLogger != nil { + atomic.StorePointer(&l.logger, unsafe.Pointer(&binLogger)) + } + atomic.StorePointer(&l.exporter, unsafe.Pointer(&exporter)) + logger.Info("Start gRPC Observability logger") + return nil +} + +func (l *binaryLogger) Start(ctx context.Context, config *configpb.ObservabilityConfig) error { + if config == nil || !config.GetEnableCloudLogging() { + return nil + } + if config.GetDestinationProjectId() == "" { + return fmt.Errorf("failed to enable CloudLogging: empty destination_project_id") + } + exporter, err := newCloudLoggingExporter(ctx, config.DestinationProjectId) + if err != nil { + return fmt.Errorf("unable to create CloudLogging exporter: %v", err) + } + l.start(config, exporter) + return nil +} + +func newBinaryLogger(iblogger iblog.Logger) *binaryLogger { + return &binaryLogger{ + originalLogger: iblogger, + } +} + +var defaultLogger *binaryLogger + +func prepareLogging() { + defaultLogger = newBinaryLogger(iblog.GetLogger()) + iblog.SetLogger(defaultLogger) +} diff --git a/gcp/observability/observability.go b/gcp/observability/observability.go new file mode 100644 index 000000000000..b0269249b380 --- /dev/null +++ b/gcp/observability/observability.go @@ -0,0 +1,80 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package observability implements the tracing, metrics, and logging data +// collection, and provides controlling knobs via a config file. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package observability + +import ( + "context" + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("observability") + +func init() { + prepareLogging() +} + +// Start is the opt-in API for gRPC Observability plugin. This function should +// be invoked in the main function, and before creating any gRPC clients or +// servers, otherwise, they might not be instrumented. At high-level, this +// module does the following: +// +// - it loads observability config from environment; +// - it registers default exporters if not disabled by the config; +// - it sets up binary logging sink against the logging exporter. +// +// Note: this method should only be invoked once. +// Note: currently, the binarylog module only supports one sink, so using the +// "observability" module will conflict with existing binarylog usage. +// Note: handle the error +func Start(ctx context.Context) error { + config, err := parseObservabilityConfig() + if err != nil { + return err + } + if config == nil { + return fmt.Errorf("no ObservabilityConfig found, it can be set via env %s", envObservabilityConfig) + } + + // Set the project ID if it isn't configured manually. + if err := ensureProjectIDInObservabilityConfig(ctx, config); err != nil { + return err + } + + // Logging is controlled by the config at methods level. + return defaultLogger.Start(ctx, config) +} + +// End is the clean-up API for gRPC Observability plugin. It is expected to be +// invoked in the main function of the application. The suggested usage is +// "defer observability.End()". This function also flushes data to upstream, and +// cleanup resources. +// +// Note: this method should only be invoked once. +func End() { + defaultLogger.Close() +} diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go new file mode 100644 index 000000000000..1785dcd2fa55 --- /dev/null +++ b/gcp/observability/observability_test.go @@ -0,0 +1,642 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "bytes" + "context" + "fmt" + "net" + "os" + "sync" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + iblog "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/leakcheck" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/metadata" + configpb "google.golang.org/grpc/observability/internal/config" + grpclogrecordpb "google.golang.org/grpc/observability/internal/logging" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func init() { + // OpenCensus, once included in binary, will spawn a global goroutine + // recorder that is not controllable by application. + // https://github.com/census-instrumentation/opencensus-go/issues/1191 + leakcheck.RegisterIgnoreGoroutine("go.opencensus.io/stats/view.(*worker).start") + // google-cloud-go leaks HTTP client. They are aware of this: + // https://github.com/googleapis/google-cloud-go/issues/1183 + leakcheck.RegisterIgnoreGoroutine("internal/poll.runtime_pollWait") +} + +var ( + defaultTestTimeout = 10 * time.Second + testHeaderMetadata = metadata.MD{"header": []string{"HeADer"}} + testTrailerMetadata = metadata.MD{"trailer": []string{"TrAileR"}} + testOkPayload = []byte{72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100} + testErrorPayload = []byte{77, 97, 114, 116, 104, 97} + testErrorMessage = "test case injected error" + infinitySizeBytes int32 = 1024 * 1024 * 1024 +) + +type testServer struct { + testgrpc.UnimplementedTestServiceServer +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + if err := grpc.SendHeader(ctx, testHeaderMetadata); err != nil { + return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want ", testHeaderMetadata, err) + } + if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil { + return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want ", testTrailerMetadata, err) + } + + if bytes.Equal(in.Payload.Body, testErrorPayload) { + return nil, fmt.Errorf(testErrorMessage) + } + + return &testpb.SimpleResponse{Payload: in.Payload}, nil +} + +type fakeLoggingExporter struct { + t *testing.T + clientEvents []*grpclogrecordpb.GrpcLogRecord + serverEvents []*grpclogrecordpb.GrpcLogRecord + isClosed bool + mu sync.Mutex +} + +func (fle *fakeLoggingExporter) EmitGrpcLogRecord(l *grpclogrecordpb.GrpcLogRecord) { + fle.mu.Lock() + defer fle.mu.Unlock() + switch l.EventLogger { + case grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT: + fle.clientEvents = append(fle.clientEvents, l) + case grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER: + fle.serverEvents = append(fle.serverEvents, l) + default: + fle.t.Fatalf("unexpected event logger: %v", l.EventLogger) + } + eventJSON, _ := protojson.Marshal(l) + fle.t.Logf("fakeLoggingExporter Emit: %s", eventJSON) +} + +func (fle *fakeLoggingExporter) Close() error { + fle.isClosed = true + return nil +} + +// test is an end-to-end test. It should be created with the newTest +// func, modified as needed, and then started with its startServer method. +// It should be cleaned up with the tearDown method. +type test struct { + t *testing.T + fle *fakeLoggingExporter + + testServer testgrpc.TestServiceServer // nil means none + // srv and srvAddr are set once startServer is called. + srv *grpc.Server + srvAddr string + + cc *grpc.ClientConn // nil until requested via clientConn +} + +func (te *test) tearDown() { + if te.cc != nil { + te.cc.Close() + te.cc = nil + } + te.srv.Stop() + End() + + if !te.fle.isClosed { + te.t.Fatalf("fakeLoggingExporter not closed!") + } +} + +// newTest returns a new test using the provided testing.T and +// environment. It is returned with default values. Tests should +// modify it before calling its startServer and clientConn methods. +func newTest(t *testing.T) *test { + return &test{ + t: t, + fle: &fakeLoggingExporter{t: t}, + } +} + +// startServer starts a gRPC server listening. Callers should defer a +// call to te.tearDown to clean up. +func (te *test) startServer(ts testgrpc.TestServiceServer) { + te.testServer = ts + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + te.t.Fatalf("Failed to listen: %v", err) + } + var opts []grpc.ServerOption + s := grpc.NewServer(opts...) + te.srv = s + if te.testServer != nil { + testgrpc.RegisterTestServiceServer(s, te.testServer) + } + + go s.Serve(lis) + te.srvAddr = lis.Addr().String() +} + +func (te *test) clientConn() *grpc.ClientConn { + if te.cc != nil { + return te.cc + } + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithBlock(), + grpc.WithUserAgent("test/0.0.1"), + } + var err error + te.cc, err = grpc.Dial(te.srvAddr, opts...) + if err != nil { + te.t.Fatalf("Dial(%q) = %v", te.srvAddr, err) + } + return te.cc +} + +func (te *test) enablePluginWithConfig(config *configpb.ObservabilityConfig) { + // Injects the fake exporter for testing purposes + defaultLogger = newBinaryLogger(nil) + iblog.SetLogger(defaultLogger) + if err := defaultLogger.start(config, te.fle); err != nil { + te.t.Fatalf("Failed to start plugin: %v", err) + } +} + +func (te *test) enablePluginWithCaptureAll() { + te.enablePluginWithConfig(&configpb.ObservabilityConfig{ + EnableCloudLogging: true, + DestinationProjectId: "fake", + LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + { + Pattern: "*", + HeaderBytes: infinitySizeBytes, + MessageBytes: infinitySizeBytes, + }, + }, + }) +} + +func checkEventCommon(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord) { + if seen.RpcId == "" { + t.Fatalf("expect non-empty RpcId") + } + if seen.SequenceId == 0 { + t.Fatalf("expect non-zero SequenceId") + } +} + +func checkEventRequestHeader(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord) { + checkEventCommon(t, seen) + if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_REQUEST_HEADER { + t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_REQUEST_HEADER", seen.EventType.String()) + } + if seen.EventLogger != want.EventLogger { + t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) + } + if want.Authority != "" && seen.Authority != want.Authority { + t.Fatalf("l.Authority = %v, want %v", seen.Authority, want.Authority) + } + if want.ServiceName != "" && seen.ServiceName != want.ServiceName { + t.Fatalf("l.ServiceName = %v, want %v", seen.ServiceName, want.ServiceName) + } + if want.MethodName != "" && seen.MethodName != want.MethodName { + t.Fatalf("l.MethodName = %v, want %v", seen.MethodName, want.MethodName) + } + if len(seen.Metadata.Entry) != 1 { + t.Fatalf("unexpected header size: %v != 1", len(seen.Metadata.Entry)) + } + if seen.Metadata.Entry[0].Key != "header" { + t.Fatalf("unexpected header: %v", seen.Metadata.Entry[0].Key) + } + if !bytes.Equal(seen.Metadata.Entry[0].Value, []byte(testHeaderMetadata["header"][0])) { + t.Fatalf("unexpected header value: %v", seen.Metadata.Entry[0].Value) + } +} + +func checkEventResponseHeader(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord) { + checkEventCommon(t, seen) + if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_RESPONSE_HEADER { + t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_RESPONSE_HEADER", seen.EventType.String()) + } + if seen.EventLogger != want.EventLogger { + t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) + } + if len(seen.Metadata.Entry) != 1 { + t.Fatalf("unexpected header size: %v != 1", len(seen.Metadata.Entry)) + } + if seen.Metadata.Entry[0].Key != "header" { + t.Fatalf("unexpected header: %v", seen.Metadata.Entry[0].Key) + } + if !bytes.Equal(seen.Metadata.Entry[0].Value, []byte(testHeaderMetadata["header"][0])) { + t.Fatalf("unexpected header value: %v", seen.Metadata.Entry[0].Value) + } +} + +func checkEventRequestMessage(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord, payload []byte) { + checkEventCommon(t, seen) + if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_REQUEST_MESSAGE { + t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_REQUEST_MESSAGE", seen.EventType.String()) + } + if seen.EventLogger != want.EventLogger { + t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) + } + msg := &testpb.SimpleRequest{Payload: &testpb.Payload{Body: payload}} + msgBytes, _ := proto.Marshal(msg) + if !bytes.Equal(seen.Message, msgBytes) { + t.Fatalf("unexpected payload: %v != %v", seen.Message, payload) + } +} + +func checkEventResponseMessage(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord, payload []byte) { + checkEventCommon(t, seen) + if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_RESPONSE_MESSAGE { + t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_RESPONSE_MESSAGE", seen.EventType.String()) + } + if seen.EventLogger != want.EventLogger { + t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) + } + msg := &testpb.SimpleResponse{Payload: &testpb.Payload{Body: payload}} + msgBytes, _ := proto.Marshal(msg) + if !bytes.Equal(seen.Message, msgBytes) { + t.Fatalf("unexpected payload: %v != %v", seen.Message, payload) + } +} + +func checkEventTrailer(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord) { + checkEventCommon(t, seen) + if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_TRAILER { + t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_TRAILER", seen.EventType.String()) + } + if seen.EventLogger != want.EventLogger { + t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) + } + if seen.StatusCode != want.StatusCode { + t.Fatalf("l.StatusCode = %v, want %v", seen.StatusCode, want.StatusCode) + } + if seen.StatusMessage != want.StatusMessage { + t.Fatalf("l.StatusMessage = %v, want %v", seen.StatusMessage, want.StatusMessage) + } + if !bytes.Equal(seen.StatusDetails, want.StatusDetails) { + t.Fatalf("l.StatusDetails = %v, want %v", seen.StatusDetails, want.StatusDetails) + } + if len(seen.Metadata.Entry) != 1 { + t.Fatalf("unexpected trailer size: %v != 1", len(seen.Metadata.Entry)) + } + if seen.Metadata.Entry[0].Key != "trailer" { + t.Fatalf("unexpected trailer: %v", seen.Metadata.Entry[0].Key) + } + if !bytes.Equal(seen.Metadata.Entry[0].Value, []byte(testTrailerMetadata["trailer"][0])) { + t.Fatalf("unexpected trailer value: %v", seen.Metadata.Entry[0].Value) + } +} + +func (s) TestLoggingForOkCall(t *testing.T) { + te := newTest(t) + defer te.tearDown() + te.enablePluginWithCaptureAll() + te.startServer(&testServer{}) + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + var ( + resp *testpb.SimpleResponse + req *testpb.SimpleRequest + err error + ) + req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} + tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + resp, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) + if err != nil { + t.Fatalf("unary call failed: %v", err) + } + t.Logf("unary call passed: %v", resp) + + // Wait for the gRPC transport to gracefully close to ensure no lost event. + te.cc.Close() + te.srv.GracefulStop() + // Check size of events + if len(te.fle.clientEvents) != 5 { + t.Fatalf("expects 5 client events, got %d", len(te.fle.clientEvents)) + } + if len(te.fle.serverEvents) != 5 { + t.Fatalf("expects 5 server events, got %d", len(te.fle.serverEvents)) + } + // Client events + checkEventRequestHeader(te.t, te.fle.clientEvents[0], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + Authority: te.srvAddr, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + }) + checkEventRequestMessage(te.t, te.fle.clientEvents[1], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + }, testOkPayload) + checkEventResponseHeader(te.t, te.fle.clientEvents[2], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + }) + checkEventResponseMessage(te.t, te.fle.clientEvents[3], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + }, testOkPayload) + checkEventTrailer(te.t, te.fle.clientEvents[4], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + StatusCode: 0, + }) + // Server events + checkEventRequestHeader(te.t, te.fle.serverEvents[0], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + }) + checkEventRequestMessage(te.t, te.fle.serverEvents[1], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + }, testOkPayload) + checkEventResponseHeader(te.t, te.fle.serverEvents[2], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + }) + checkEventResponseMessage(te.t, te.fle.serverEvents[3], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + }, testOkPayload) + checkEventTrailer(te.t, te.fle.serverEvents[4], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + StatusCode: 0, + }) +} + +func (s) TestLoggingForErrorCall(t *testing.T) { + te := newTest(t) + defer te.tearDown() + te.enablePluginWithCaptureAll() + te.startServer(&testServer{}) + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + var ( + req *testpb.SimpleRequest + err error + ) + req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testErrorPayload}} + tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) + if err == nil { + t.Fatalf("unary call expected to fail, but passed") + } + + // Wait for the gRPC transport to gracefully close to ensure no lost event. + te.cc.Close() + te.srv.GracefulStop() + // Check size of events + if len(te.fle.clientEvents) != 4 { + t.Fatalf("expects 4 client events, got %d", len(te.fle.clientEvents)) + } + if len(te.fle.serverEvents) != 4 { + t.Fatalf("expects 4 server events, got %d", len(te.fle.serverEvents)) + } + // Client events + checkEventRequestHeader(te.t, te.fle.clientEvents[0], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + Authority: te.srvAddr, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + }) + checkEventRequestMessage(te.t, te.fle.clientEvents[1], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + }, testErrorPayload) + checkEventResponseHeader(te.t, te.fle.clientEvents[2], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + }) + checkEventTrailer(te.t, te.fle.clientEvents[3], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + StatusCode: 2, + StatusMessage: testErrorMessage, + }) + // Server events + checkEventRequestHeader(te.t, te.fle.serverEvents[0], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + }) + checkEventRequestMessage(te.t, te.fle.serverEvents[1], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + }, testErrorPayload) + checkEventResponseHeader(te.t, te.fle.serverEvents[2], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + }) + checkEventTrailer(te.t, te.fle.serverEvents[3], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + StatusCode: 2, + StatusMessage: testErrorMessage, + }) +} + +func (s) TestEmptyConfig(t *testing.T) { + te := newTest(t) + defer te.tearDown() + te.enablePluginWithConfig(&configpb.ObservabilityConfig{}) + te.startServer(&testServer{}) + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + var ( + resp *testpb.SimpleResponse + req *testpb.SimpleRequest + err error + ) + req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} + tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + resp, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) + if err != nil { + t.Fatalf("unary call failed: %v", err) + } + t.Logf("unary call passed: %v", resp) + + // Wait for the gRPC transport to gracefully close to ensure no lost event. + te.cc.Close() + te.srv.GracefulStop() + // Check size of events + if len(te.fle.clientEvents) != 0 { + t.Fatalf("expects 0 client events, got %d", len(te.fle.clientEvents)) + } + if len(te.fle.serverEvents) != 0 { + t.Fatalf("expects 0 server events, got %d", len(te.fle.serverEvents)) + } +} + +func (s) TestOverrideConfig(t *testing.T) { + te := newTest(t) + defer te.tearDown() + // Setting 3 filters, expected to use the third filter, because it's the + // most specific one. The third filter allows message payload logging, and + // others disabling the message payload logging. We should observe this + // behavior latter. + te.enablePluginWithConfig(&configpb.ObservabilityConfig{ + EnableCloudLogging: true, + DestinationProjectId: "fake", + LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + { + Pattern: "wont/match", + MessageBytes: 0, + }, + { + Pattern: "*", + MessageBytes: 0, + }, + { + Pattern: "grpc.testing.TestService/*", + MessageBytes: 4096, + }, + }, + }) + te.startServer(&testServer{}) + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + var ( + resp *testpb.SimpleResponse + req *testpb.SimpleRequest + err error + ) + req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} + tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + resp, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) + if err != nil { + t.Fatalf("unary call failed: %v", err) + } + t.Logf("unary call passed: %v", resp) + + // Wait for the gRPC transport to gracefully close to ensure no lost event. + te.cc.Close() + te.srv.GracefulStop() + // Check size of events + if len(te.fle.clientEvents) != 5 { + t.Fatalf("expects 5 client events, got %d", len(te.fle.clientEvents)) + } + if len(te.fle.serverEvents) != 5 { + t.Fatalf("expects 5 server events, got %d", len(te.fle.serverEvents)) + } + // Check Client message payloads + checkEventRequestMessage(te.t, te.fle.clientEvents[1], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + }, testOkPayload) + checkEventResponseMessage(te.t, te.fle.clientEvents[3], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, + }, testOkPayload) + // Check Server message payloads + checkEventRequestMessage(te.t, te.fle.serverEvents[1], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + }, testOkPayload) + checkEventResponseMessage(te.t, te.fle.serverEvents[3], &grpclogrecordpb.GrpcLogRecord{ + EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, + }, testOkPayload) +} + +func (s) TestNoMatch(t *testing.T) { + te := newTest(t) + defer te.tearDown() + // Setting 3 filters, expected to use the second filter. The second filter + // allows message payload logging, and others disabling the message payload + // logging. We should observe this behavior latter. + te.enablePluginWithConfig(&configpb.ObservabilityConfig{ + EnableCloudLogging: true, + DestinationProjectId: "fake", + LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + { + Pattern: "wont/match", + MessageBytes: 0, + }, + }, + }) + te.startServer(&testServer{}) + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + var ( + resp *testpb.SimpleResponse + req *testpb.SimpleRequest + err error + ) + req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} + tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + resp, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) + if err != nil { + t.Fatalf("unary call failed: %v", err) + } + t.Logf("unary call passed: %v", resp) + + // Wait for the gRPC transport to gracefully close to ensure no lost event. + te.cc.Close() + te.srv.GracefulStop() + // Check size of events + if len(te.fle.clientEvents) != 0 { + t.Fatalf("expects 0 client events, got %d", len(te.fle.clientEvents)) + } + if len(te.fle.serverEvents) != 0 { + t.Fatalf("expects 0 server events, got %d", len(te.fle.serverEvents)) + } +} + +func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { + config := &configpb.ObservabilityConfig{ + EnableCloudLogging: true, + DestinationProjectId: "fake", + LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + { + Pattern: ":-)", + }, + { + Pattern: "*", + }, + }, + } + configJSON, err := protojson.Marshal(config) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) + } + os.Setenv(envObservabilityConfig, string(configJSON)) + // If there is at least one invalid pattern, which should not be silently tolerated. + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") + } +} + +func (s) TestNoEnvSet(t *testing.T) { + os.Setenv(envObservabilityConfig, "") + // If there is no observability config set at all, the Start should return an error. + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") + } +} diff --git a/gcp/observability/tags.go b/gcp/observability/tags.go new file mode 100644 index 000000000000..c9a900970ea9 --- /dev/null +++ b/gcp/observability/tags.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "strings" +) + +const ( + envPrefixCustomTags = "GRPC_OBSERVABILITY_" + envPrefixLen = len(envPrefixCustomTags) +) + +func getCustomTags(envs []string) map[string]string { + m := make(map[string]string) + for _, e := range envs { + if !strings.HasPrefix(e, envPrefixCustomTags) { + continue + } + tokens := strings.SplitN(e, "=", 2) + if len(tokens) == 2 { + if len(tokens[0]) == envPrefixLen { + // Empty key is not allowed + continue + } + m[tokens[0][envPrefixLen:]] = tokens[1] + } + } + return m +} diff --git a/gcp/observability/tags_test.go b/gcp/observability/tags_test.go new file mode 100644 index 000000000000..5a0353a03087 --- /dev/null +++ b/gcp/observability/tags_test.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "reflect" + "testing" +) + +// TestGetCustomTags tests the normal tags parsing +func (s) TestGetCustomTags(t *testing.T) { + var ( + input = []string{ + "GRPC_OBSERVABILITY_APP_NAME=app1", + "GRPC_OBSERVABILITY_DATACENTER=us-west1-a", + "GRPC_OBSERVABILITY_smallcase=OK", + } + expect = map[string]string{ + "APP_NAME": "app1", + "DATACENTER": "us-west1-a", + "smallcase": "OK", + } + ) + result := getCustomTags(input) + if !reflect.DeepEqual(result, expect) { + t.Errorf("result [%+v] != expect [%+v]", result, expect) + } +} + +// TestGetCustomTagsInvalid tests the invalid cases of tags parsing +func (s) TestGetCustomTagsInvalid(t *testing.T) { + var ( + input = []string{ + "GRPC_OBSERVABILITY_APP_NAME=app1", + "GRPC_OBSERVABILITY=foo", + "GRPC_OBSERVABILITY_=foo", // Users should not set "" as key name + "GRPC_STUFF=foo", + "STUFF_GRPC_OBSERVABILITY_=foo", + } + expect = map[string]string{ + "APP_NAME": "app1", + } + ) + result := getCustomTags(input) + if !reflect.DeepEqual(result, expect) { + t.Errorf("result [%+v] != expect [%+v]", result, expect) + } +} diff --git a/internal/binarylog/binarylog.go b/internal/binarylog/binarylog.go index 66efc4856fff..0a25ce43f3f0 100644 --- a/internal/binarylog/binarylog.go +++ b/internal/binarylog/binarylog.go @@ -75,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -95,57 +107,57 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // // New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // // New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } @@ -161,17 +173,17 @@ func (l *logger) GetMethodLogger(methodName string) MethodLogger { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return newMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/internal/binarylog/env_config.go b/internal/binarylog/env_config.go index d8f4e7602fde..ab589a76bf96 100644 --- a/internal/binarylog/env_config.go +++ b/internal/binarylog/env_config.go @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/internal/binarylog/env_config_test.go b/internal/binarylog/env_config_test.go index f67b4fd60326..9f888ad870ea 100644 --- a/internal/binarylog/env_config_test.go +++ b/internal/binarylog/env_config_test.go @@ -36,29 +36,29 @@ func (s) TestNewLoggerFromConfigString(t *testing.T) { c := fmt.Sprintf("*{h:1;m:2},%s{h},%s{m},%s{h;m}", s1+"/*", fullM1, fullM2) l := NewLoggerFromConfigString(c).(*logger) - if l.all.hdr != 1 || l.all.msg != 2 { - t.Errorf("l.all = %#v, want headerLen: 1, messageLen: 2", l.all) + if l.config.All.Header != 1 || l.config.All.Message != 2 { + t.Errorf("l.config.All = %#v, want headerLen: 1, messageLen: 2", l.config.All) } - if ml, ok := l.services[s1]; ok { - if ml.hdr != maxUInt || ml.msg != 0 { - t.Errorf("want maxUInt header, 0 message, got header: %v, message: %v", ml.hdr, ml.msg) + if ml, ok := l.config.Services[s1]; ok { + if ml.Header != maxUInt || ml.Message != 0 { + t.Errorf("want maxUInt header, 0 message, got header: %v, message: %v", ml.Header, ml.Message) } } else { t.Errorf("service/* is not set") } - if ml, ok := l.methods[fullM1]; ok { - if ml.hdr != 0 || ml.msg != maxUInt { - t.Errorf("want 0 header, maxUInt message, got header: %v, message: %v", ml.hdr, ml.msg) + if ml, ok := l.config.Methods[fullM1]; ok { + if ml.Header != 0 || ml.Message != maxUInt { + t.Errorf("want 0 header, maxUInt message, got header: %v, message: %v", ml.Header, ml.Message) } } else { t.Errorf("service/method{h} is not set") } - if ml, ok := l.methods[fullM2]; ok { - if ml.hdr != maxUInt || ml.msg != maxUInt { - t.Errorf("want maxUInt header, maxUInt message, got header: %v, message: %v", ml.hdr, ml.msg) + if ml, ok := l.config.Methods[fullM2]; ok { + if ml.Header != maxUInt || ml.Message != maxUInt { + t.Errorf("want maxUInt header, maxUInt message, got header: %v, message: %v", ml.Header, ml.Message) } } else { t.Errorf("service/method{h;m} is not set") @@ -249,7 +249,7 @@ func (s) TestFillMethodLoggerWithConfigStringBlacklist(t *testing.T) { t.Errorf("returned err %v, want nil", err) continue } - _, ok := l.blacklist[tc] + _, ok := l.config.Blacklist[tc] if !ok { t.Errorf("blacklist[%q] is not set", tc) } @@ -306,15 +306,15 @@ func (s) TestFillMethodLoggerWithConfigStringGlobal(t *testing.T) { t.Errorf("returned err %v, want nil", err) continue } - if l.all == nil { - t.Errorf("l.all is not set") + if l.config.All == nil { + t.Errorf("l.config.All is not set") continue } - if hdr := l.all.hdr; hdr != tc.hdr { + if hdr := l.config.All.Header; hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } - if msg := l.all.msg; msg != tc.msg { + if msg := l.config.All.Message; msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } @@ -371,16 +371,16 @@ func (s) TestFillMethodLoggerWithConfigStringPerService(t *testing.T) { t.Errorf("returned err %v, want nil", err) continue } - ml, ok := l.services[serviceName] + ml, ok := l.config.Services[serviceName] if !ok { t.Errorf("l.service[%q] is not set", serviceName) continue } - if hdr := ml.hdr; hdr != tc.hdr { + if hdr := ml.Header; hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } - if msg := ml.msg; msg != tc.msg { + if msg := ml.Message; msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } @@ -441,16 +441,16 @@ func (s) TestFillMethodLoggerWithConfigStringPerMethod(t *testing.T) { t.Errorf("returned err %v, want nil", err) continue } - ml, ok := l.methods[fullMethodName] + ml, ok := l.config.Methods[fullMethodName] if !ok { - t.Errorf("l.methods[%q] is not set", fullMethodName) + t.Errorf("l.config.Methods[%q] is not set", fullMethodName) continue } - if hdr := ml.hdr; hdr != tc.hdr { + if hdr := ml.Header; hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } - if msg := ml.msg; msg != tc.msg { + if msg := ml.Message; msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } From 5682cc6a321507ce2a865beb8dd4bed9a5db0e06 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 6 Apr 2022 10:56:53 -0700 Subject: [PATCH 485/998] Change version to 1.47.0-dev (#5298) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index c09089618a94..ff0d6c265146 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.46.0-dev" +const Version = "1.47.0-dev" From 924e4849c540a5b344735938f454db03decad030 Mon Sep 17 00:00:00 2001 From: idiamond-stripe <37758547+idiamond-stripe@users.noreply.github.com> Date: Fri, 8 Apr 2022 13:10:16 -0700 Subject: [PATCH 486/998] server: return better status for context err when writing header (#5292) --- internal/transport/http2_server.go | 59 ++++++++++++++++-------------- test/end2end_test.go | 4 +- 2 files changed, 33 insertions(+), 30 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 0956b500c18e..4969102f4af9 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -53,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -931,11 +930,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } + + if s.getState() == streamDone { + return t.streamContextErr(s) + } + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { @@ -946,7 +959,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } if err := t.writeHeaderLocked(s); err != nil { s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } s.hdrMu.Unlock() return nil @@ -1062,23 +1075,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1088,12 +1090,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1229,10 +1226,6 @@ func (t *http2Server) Close() { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1254,6 +1247,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1273,6 +1271,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) diff --git a/test/end2end_test.go b/test/end2end_test.go index 5fa36b1a2b11..766d568b9277 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -4964,8 +4964,8 @@ func testClientSendDataAfterCloseSend(t *testing.T, e env) { } if err := stream.SendMsg(nil); err == nil { t.Error("expected error sending message on stream after stream closed due to illegal data") - } else if status.Code(err) != codes.Internal { - t.Errorf("expected internal error, instead received '%v'", err) + } else if status.Code(err) != codes.Canceled { + t.Errorf("expected cancel error, instead received '%v'", err) } return nil }} From 9711b148c4a11c291160b9114468012a8961c615 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 8 Apr 2022 13:11:40 -0700 Subject: [PATCH 487/998] server: clarify documentation around setting and sending headers and ServerStream errors (#5302) --- server.go | 40 ++++++++++++++++++++++++++++++++-------- stream.go | 6 ++++-- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/server.go b/server.go index 96431a058bf8..65de84b30074 100644 --- a/server.go +++ b/server.go @@ -1801,12 +1801,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1818,8 +1832,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1833,6 +1853,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/stream.go b/stream.go index e0b30b46fb11..c72aac5877ce 100644 --- a/stream.go +++ b/stream.go @@ -1370,8 +1370,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. From ebd098392a8ba5a611762fbfd06de8fa06bca8ae Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 8 Apr 2022 15:41:56 -0700 Subject: [PATCH 488/998] xds/eds: reject EDS resources with multiple instances of the same locality in the same priority (#5303) --- .../xdsclient/xdsresource/unmarshal_eds.go | 13 ++++- .../xdsresource/unmarshal_eds_test.go | 53 +++++++++++++++++++ 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index 147870cdf6bc..9eb7117d9a22 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -107,7 +107,7 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) } - priorities := make(map[uint32]struct{}) + priorities := make(map[uint32]map[string]bool) for _, locality := range m.Endpoints { l := locality.GetLocality() if l == nil { @@ -119,7 +119,16 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, SubZone: l.SubZone, } priority := locality.GetPriority() - priorities[priority] = struct{}{} + localitiesWithPriority := priorities[priority] + if localitiesWithPriority == nil { + localitiesWithPriority = make(map[string]bool) + priorities[priority] = localitiesWithPriority + } + lidStr, _ := lid.ToString() + if localitiesWithPriority[lidStr] { + return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) + } + localitiesWithPriority[lidStr] = true ret.Localities = append(ret.Localities, Locality{ ID: lid, Endpoints: parseEndpoints(locality.GetLbEndpoints()), diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index 5c6118d4e727..db9d4f52896b 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -64,6 +64,17 @@ func (s) TestEDSParseRespProto(t *testing.T) { want: EndpointsUpdate{}, wantErr: true, }, + { + name: "duplicate-locality-in-the-same-priority", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-0", 1, 0, []string{"addr1:314"}, nil) + clab0.addLocality("locality-0", 1, 0, []string{"addr1:314"}, nil) // Duplicate locality with the same priority. + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, { name: "good", m: func() *v3endpointpb.ClusterLoadAssignment { @@ -105,6 +116,48 @@ func (s) TestEDSParseRespProto(t *testing.T) { }, wantErr: false, }, + { + name: "good duplicate locality with different priority", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY}, + Weight: []uint32{271}, + }) + // Same locality name, but with different priority. + clab0.addLocality("locality-1", 1, 0, []string{"addr2:159"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING}, + Weight: []uint32{828}, + }) + return clab0.Build() + }(), + want: EndpointsUpdate{ + Drops: nil, + Localities: []Locality{ + { + Endpoints: []Endpoint{{ + Address: "addr1:314", + HealthStatus: EndpointHealthStatusUnhealthy, + Weight: 271, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []Endpoint{{ + Address: "addr2:159", + HealthStatus: EndpointHealthStatusDraining, + Weight: 828, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 0, + Weight: 1, + }, + }, + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 8d68434c48b31d0073d1eb8155dacf06e5b4dcee Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Tue, 12 Apr 2022 13:39:41 -0700 Subject: [PATCH 489/998] gcp/observability: correctly test this module in presubmit tests (#5300) --- .github/workflows/testing.yml | 9 ++++++--- gcp/observability/config.go | 2 +- gcp/observability/exporting.go | 2 +- gcp/observability/go.mod | 6 ++++-- gcp/observability/logging.go | 6 +++--- gcp/observability/observability_test.go | 4 ++-- 6 files changed, 17 insertions(+), 12 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 58bcb160e33c..84c2907bbb6d 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -106,9 +106,12 @@ jobs: run: | go version go test ${{ matrix.testflags }} -cpu 1,4 -timeout 7m google.golang.org/grpc/... - cd ${GITHUB_WORKSPACE}/security/advancedtls && go test ${{ matrix.testflags }} -timeout 2m google.golang.org/grpc/security/advancedtls/... - cd ${GITHUB_WORKSPACE}/security/authorization && go test ${{ matrix.testflags }} -timeout 2m google.golang.org/grpc/security/authorization/... - + cd "${GITHUB_WORKSPACE}" + for MOD_FILE in $(find . -name 'go.mod' | grep -Ev '^\./go\.mod'); do + pushd "$(dirname ${MOD_FILE})" + go test ${{ matrix.testflags }} -timeout 2m ./... + popd + done # Non-core gRPC tests (examples, interop, etc) - name: Run extras tests diff --git a/gcp/observability/config.go b/gcp/observability/config.go index aea2a2db3298..fd0fc1485f26 100644 --- a/gcp/observability/config.go +++ b/gcp/observability/config.go @@ -26,7 +26,7 @@ import ( gcplogging "cloud.google.com/go/logging" "golang.org/x/oauth2/google" - configpb "google.golang.org/grpc/observability/internal/config" + configpb "google.golang.org/grpc/gcp/observability/internal/config" "google.golang.org/protobuf/encoding/protojson" ) diff --git a/gcp/observability/exporting.go b/gcp/observability/exporting.go index 898f35963ee5..79576eb999f2 100644 --- a/gcp/observability/exporting.go +++ b/gcp/observability/exporting.go @@ -25,7 +25,7 @@ import ( "os" gcplogging "cloud.google.com/go/logging" - grpclogrecordpb "google.golang.org/grpc/observability/internal/logging" + grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" "google.golang.org/protobuf/encoding/protojson" ) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index d622a879e36b..6f4cdd48b231 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -1,4 +1,4 @@ -module google.golang.org/grpc/observability +module google.golang.org/grpc/gcp/observability go 1.14 @@ -7,8 +7,10 @@ require ( github.com/golang/protobuf v1.5.2 github.com/google/uuid v1.3.0 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 - google.golang.org/grpc v1.43.0 + google.golang.org/grpc v1.46.0 google.golang.org/protobuf v1.27.1 ) +// TODO(lidiz) remove the following line when we have a release containing the +// necessary internal binary logging changes replace google.golang.org/grpc => ../../ diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index fc9366440e38..ed7e76d74c04 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -27,9 +27,9 @@ import ( "github.com/google/uuid" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + configpb "google.golang.org/grpc/gcp/observability/internal/config" + grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" iblog "google.golang.org/grpc/internal/binarylog" - configpb "google.golang.org/grpc/observability/internal/config" - grpclogrecordpb "google.golang.org/grpc/observability/internal/logging" ) // translateMetadata translates the metadata from Binary Logging format to @@ -203,7 +203,7 @@ func (l *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { // we batch up the uploads in the exporting RPC, the message content of that // RPC will be logged. Without this exclusion, we may end up with an ever // expanding message field in log entries, and crash the process with OOM. - if methodName == "google.logging.v2.LoggingServiceV2/WriteLogEntries" { + if methodName == "/google.logging.v2.LoggingServiceV2/WriteLogEntries" { return ol } diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index 1785dcd2fa55..16a3f935a68b 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -30,14 +30,14 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + configpb "google.golang.org/grpc/gcp/observability/internal/config" + grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" iblog "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/metadata" - configpb "google.golang.org/grpc/observability/internal/config" - grpclogrecordpb "google.golang.org/grpc/observability/internal/logging" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" From 3bf6719fc8ab5dac43b8494fcdc7e892efde6ea1 Mon Sep 17 00:00:00 2001 From: Mohan Li <67390330+mohanli-ml@users.noreply.github.com> Date: Tue, 12 Apr 2022 14:22:46 -0700 Subject: [PATCH 490/998] test/interop: register RLS to the interop test client (#5272) --- interop/client/client.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/interop/client/client.go b/interop/client/client.go index a1538b0c6921..3cfedfcb6542 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -29,7 +29,6 @@ import ( "time" "google.golang.org/grpc" - _ "google.golang.org/grpc/balancer/grpclb" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/credentials/google" @@ -39,7 +38,10 @@ import ( "google.golang.org/grpc/interop" "google.golang.org/grpc/resolver" "google.golang.org/grpc/testdata" - _ "google.golang.org/grpc/xds/googledirectpath" + + _ "google.golang.org/grpc/balancer/grpclb" // Register the grpclb load balancing policy. + _ "google.golang.org/grpc/balancer/rls" // Register the RLS load balancing policy. + _ "google.golang.org/grpc/xds/googledirectpath" // Register xDS resolver required for c2p directpath. testgrpc "google.golang.org/grpc/interop/grpc_testing" ) From 7567a5d96538a01715827a3eba57e70d8eedec37 Mon Sep 17 00:00:00 2001 From: Robin Liu <41276823+RobinLG@users.noreply.github.com> Date: Thu, 14 Apr 2022 01:15:49 +0800 Subject: [PATCH 491/998] documentation: fix typo in RegisterCodec godoc (#5306) --- encoding/encoding.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/encoding/encoding.go b/encoding/encoding.go index 6d84f74c7d08..18e530fc9024 100644 --- a/encoding/encoding.go +++ b/encoding/encoding.go @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { From dc86d5de854f99270c1343cdc8d22e7d0cbf0c59 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 29 Apr 2022 13:13:23 -0700 Subject: [PATCH 492/998] internal/proto: update generated code (#5332) --- .../grpc_service_config/service_config.pb.go | 773 ++++++++++-------- 1 file changed, 431 insertions(+), 342 deletions(-) diff --git a/internal/proto/grpc_service_config/service_config.pb.go b/internal/proto/grpc_service_config/service_config.pb.go index 6a18ea793a7e..d39f777577f9 100644 --- a/internal/proto/grpc_service_config/service_config.pb.go +++ b/internal/proto/grpc_service_config/service_config.pb.go @@ -167,7 +167,7 @@ func (x ServiceConfig_LoadBalancingPolicy) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConfig_LoadBalancingPolicy.Descriptor instead. func (ServiceConfig_LoadBalancingPolicy) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18, 0} } // Configuration for a method. @@ -883,12 +883,7 @@ type XdsClusterResolverLoadBalancingPolicyConfig struct { // Results from each discovery mechanism are concatenated together in // successive priorities. DiscoveryMechanisms []*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism `protobuf:"bytes,1,rep,name=discovery_mechanisms,json=discoveryMechanisms,proto3" json:"discovery_mechanisms,omitempty"` - // xDS LB policy. - // This represents the xDS LB policy, which does not necessarily map - // one-to-one to a gRPC LB policy. Currently, the following policies - // are supported: - // - "ROUND_ROBIN" (config is empty) - // - "RING_HASH" (config is a RingHashLoadBalancingConfig) + // xDS LB policy. Will be used as the child config of the xds_cluster_impl LB policy. XdsLbPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=xds_lb_policy,json=xdsLbPolicy,proto3" json:"xds_lb_policy,omitempty"` } @@ -1292,6 +1287,54 @@ func (x *LrsLoadBalancingPolicyConfig) GetChildPolicy() []*LoadBalancingConfig { return nil } +// Configuration for the xds_wrr_locality load balancing policy. +type XdsWrrLocalityLoadBalancingPolicyConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` +} + +func (x *XdsWrrLocalityLoadBalancingPolicyConfig) Reset() { + *x = XdsWrrLocalityLoadBalancingPolicyConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XdsWrrLocalityLoadBalancingPolicyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XdsWrrLocalityLoadBalancingPolicyConfig) ProtoMessage() {} + +func (x *XdsWrrLocalityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XdsWrrLocalityLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. +func (*XdsWrrLocalityLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{15} +} + +func (x *XdsWrrLocalityLoadBalancingPolicyConfig) GetChildPolicy() []*LoadBalancingConfig { + if x != nil { + return x.ChildPolicy + } + return nil +} + // Configuration for xds LB policy. type XdsConfig struct { state protoimpl.MessageState @@ -1325,7 +1368,7 @@ type XdsConfig struct { func (x *XdsConfig) Reset() { *x = XdsConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1338,7 +1381,7 @@ func (x *XdsConfig) String() string { func (*XdsConfig) ProtoMessage() {} func (x *XdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1351,7 +1394,7 @@ func (x *XdsConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use XdsConfig.ProtoReflect.Descriptor instead. func (*XdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{15} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16} } // Deprecated: Do not use. @@ -1424,13 +1467,14 @@ type LoadBalancingConfig struct { // *LoadBalancingConfig_EdsExperimental // *LoadBalancingConfig_Xds // *LoadBalancingConfig_XdsExperimental + // *LoadBalancingConfig_XdsWrrLocalityExperimental Policy isLoadBalancingConfig_Policy `protobuf_oneof:"policy"` } func (x *LoadBalancingConfig) Reset() { *x = LoadBalancingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1443,7 +1487,7 @@ func (x *LoadBalancingConfig) String() string { func (*LoadBalancingConfig) ProtoMessage() {} func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1456,7 +1500,7 @@ func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use LoadBalancingConfig.ProtoReflect.Descriptor instead. func (*LoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17} } func (m *LoadBalancingConfig) GetPolicy() isLoadBalancingConfig_Policy { @@ -1575,6 +1619,13 @@ func (x *LoadBalancingConfig) GetXdsExperimental() *XdsConfig { return nil } +func (x *LoadBalancingConfig) GetXdsWrrLocalityExperimental() *XdsWrrLocalityLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsWrrLocalityExperimental); ok { + return x.XdsWrrLocalityExperimental + } + return nil +} + type isLoadBalancingConfig_Policy interface { isLoadBalancingConfig_Policy() } @@ -1649,6 +1700,10 @@ type LoadBalancingConfig_XdsExperimental struct { XdsExperimental *XdsConfig `protobuf:"bytes,5,opt,name=xds_experimental,proto3,oneof"` } +type LoadBalancingConfig_XdsWrrLocalityExperimental struct { + XdsWrrLocalityExperimental *XdsWrrLocalityLoadBalancingPolicyConfig `protobuf:"bytes,16,opt,name=xds_wrr_locality_experimental,proto3,oneof"` +} + func (*LoadBalancingConfig_PickFirst) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_RoundRobin) isLoadBalancingConfig_Policy() {} @@ -1679,6 +1734,8 @@ func (*LoadBalancingConfig_Xds) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_XdsExperimental) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_XdsWrrLocalityExperimental) isLoadBalancingConfig_Policy() {} + // A ServiceConfig represents information about a service but is not specific to // any name resolver. type ServiceConfig struct { @@ -1701,7 +1758,7 @@ type ServiceConfig struct { func (x *ServiceConfig) Reset() { *x = ServiceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1714,7 +1771,7 @@ func (x *ServiceConfig) String() string { func (*ServiceConfig) ProtoMessage() {} func (x *ServiceConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1727,7 +1784,7 @@ func (x *ServiceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18} } // Deprecated: Do not use. @@ -1805,7 +1862,7 @@ type MethodConfig_Name struct { func (x *MethodConfig_Name) Reset() { *x = MethodConfig_Name{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] + mi := &file_grpc_service_config_service_config_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1818,7 +1875,7 @@ func (x *MethodConfig_Name) String() string { func (*MethodConfig_Name) ProtoMessage() {} func (x *MethodConfig_Name) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] + mi := &file_grpc_service_config_service_config_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1877,7 +1934,7 @@ type MethodConfig_RetryPolicy struct { func (x *MethodConfig_RetryPolicy) Reset() { *x = MethodConfig_RetryPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1890,7 +1947,7 @@ func (x *MethodConfig_RetryPolicy) String() string { func (*MethodConfig_RetryPolicy) ProtoMessage() {} func (x *MethodConfig_RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1972,7 +2029,7 @@ type MethodConfig_HedgingPolicy struct { func (x *MethodConfig_HedgingPolicy) Reset() { *x = MethodConfig_HedgingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1985,7 +2042,7 @@ func (x *MethodConfig_HedgingPolicy) String() string { func (*MethodConfig_HedgingPolicy) ProtoMessage() {} func (x *MethodConfig_HedgingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2058,7 +2115,7 @@ type OutlierDetectionLoadBalancingConfig_SuccessRateEjection struct { func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) Reset() { *x = OutlierDetectionLoadBalancingConfig_SuccessRateEjection{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2071,7 +2128,7 @@ func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) String() strin func (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection) ProtoMessage() {} func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2145,7 +2202,7 @@ type OutlierDetectionLoadBalancingConfig_FailurePercentageEjection struct { func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) Reset() { *x = OutlierDetectionLoadBalancingConfig_FailurePercentageEjection{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + mi := &file_grpc_service_config_service_config_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2158,7 +2215,7 @@ func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) String() func (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) ProtoMessage() {} func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + mi := &file_grpc_service_config_service_config_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2219,7 +2276,7 @@ type PriorityLoadBalancingPolicyConfig_Child struct { func (x *PriorityLoadBalancingPolicyConfig_Child) Reset() { *x = PriorityLoadBalancingPolicyConfig_Child{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[23] + mi := &file_grpc_service_config_service_config_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2232,7 +2289,7 @@ func (x *PriorityLoadBalancingPolicyConfig_Child) String() string { func (*PriorityLoadBalancingPolicyConfig_Child) ProtoMessage() {} func (x *PriorityLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[23] + mi := &file_grpc_service_config_service_config_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2274,7 +2331,7 @@ type WeightedTargetLoadBalancingPolicyConfig_Target struct { func (x *WeightedTargetLoadBalancingPolicyConfig_Target) Reset() { *x = WeightedTargetLoadBalancingPolicyConfig_Target{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[25] + mi := &file_grpc_service_config_service_config_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2287,7 +2344,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig_Target) String() string { func (*WeightedTargetLoadBalancingPolicyConfig_Target) ProtoMessage() {} func (x *WeightedTargetLoadBalancingPolicyConfig_Target) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[25] + mi := &file_grpc_service_config_service_config_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2328,7 +2385,7 @@ type XdsClusterManagerLoadBalancingPolicyConfig_Child struct { func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) Reset() { *x = XdsClusterManagerLoadBalancingPolicyConfig_Child{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[27] + mi := &file_grpc_service_config_service_config_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2341,7 +2398,7 @@ func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) String() string { func (*XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoMessage() {} func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[27] + mi := &file_grpc_service_config_service_config_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2376,7 +2433,7 @@ type XdsServer_ChannelCredentials struct { func (x *XdsServer_ChannelCredentials) Reset() { *x = XdsServer_ChannelCredentials{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[29] + mi := &file_grpc_service_config_service_config_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2389,7 +2446,7 @@ func (x *XdsServer_ChannelCredentials) String() string { func (*XdsServer_ChannelCredentials) ProtoMessage() {} func (x *XdsServer_ChannelCredentials) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[29] + mi := &file_grpc_service_config_service_config_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2463,7 +2520,7 @@ type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism struct { func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Reset() { *x = XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[30] + mi := &file_grpc_service_config_service_config_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2476,7 +2533,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) String( func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoMessage() {} func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[30] + mi := &file_grpc_service_config_service_config_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2562,7 +2619,7 @@ type XdsClusterImplLoadBalancingPolicyConfig_DropCategory struct { func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Reset() { *x = XdsClusterImplLoadBalancingPolicyConfig_DropCategory{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[31] + mi := &file_grpc_service_config_service_config_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2575,7 +2632,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) String() string { func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoMessage() {} func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[31] + mi := &file_grpc_service_config_service_config_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2619,7 +2676,7 @@ type LrsLoadBalancingPolicyConfig_Locality struct { func (x *LrsLoadBalancingPolicyConfig_Locality) Reset() { *x = LrsLoadBalancingPolicyConfig_Locality{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[32] + mi := &file_grpc_service_config_service_config_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2632,7 +2689,7 @@ func (x *LrsLoadBalancingPolicyConfig_Locality) String() string { func (*LrsLoadBalancingPolicyConfig_Locality) ProtoMessage() {} func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[32] + mi := &file_grpc_service_config_service_config_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2703,7 +2760,7 @@ type ServiceConfig_RetryThrottlingPolicy struct { func (x *ServiceConfig_RetryThrottlingPolicy) Reset() { *x = ServiceConfig_RetryThrottlingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[33] + mi := &file_grpc_service_config_service_config_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2716,7 +2773,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) String() string { func (*ServiceConfig_RetryThrottlingPolicy) ProtoMessage() {} func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[33] + mi := &file_grpc_service_config_service_config_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2729,7 +2786,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Messag // Deprecated: Use ServiceConfig_RetryThrottlingPolicy.ProtoReflect.Descriptor instead. func (*ServiceConfig_RetryThrottlingPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18, 0} } func (x *ServiceConfig_RetryThrottlingPolicy) GetMaxTokens() uint32 { @@ -2758,7 +2815,7 @@ type ServiceConfig_HealthCheckConfig struct { func (x *ServiceConfig_HealthCheckConfig) Reset() { *x = ServiceConfig_HealthCheckConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[34] + mi := &file_grpc_service_config_service_config_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2771,7 +2828,7 @@ func (x *ServiceConfig_HealthCheckConfig) String() string { func (*ServiceConfig_HealthCheckConfig) ProtoMessage() {} func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[34] + mi := &file_grpc_service_config_service_config_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2784,7 +2841,7 @@ func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig_HealthCheckConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig_HealthCheckConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17, 1} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18, 1} } func (x *ServiceConfig_HealthCheckConfig) GetServiceName() *wrapperspb.StringValue { @@ -3228,175 +3285,191 @@ var file_grpc_service_config_service_config_proto_rawDesc = []byte{ 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, - 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, - 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, - 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, - 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, - 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x86, 0x0c, 0x0a, 0x13, 0x4c, 0x6f, + 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x22, 0x76, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x57, 0x72, 0x72, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, - 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, - 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, - 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, - 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x68, 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, - 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x75, 0x74, - 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, - 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, - 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, - 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, - 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, - 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x12, 0x8d, 0x01, 0x0a, 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, - 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, + 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe0, + 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0d, + 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, - 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, - 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, - 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, - 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, + 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x22, 0x8d, 0x0d, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x69, 0x63, + 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, + 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x6f, 0x75, + 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x68, 0x0a, 0x11, + 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x75, + 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, + 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, 0x70, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, - 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, - 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, - 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, - 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, - 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, - 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, - 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, - 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, - 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, + 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x8d, 0x01, 0x0a, 0x20, 0x78, 0x64, 0x73, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, 0x73, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, + 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, + 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, + 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, + 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, + 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, - 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, + 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, + 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, + 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, + 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, + 0x64, 0x73, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x77, 0x72, + 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, + 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, - 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, - 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, - 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, - 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, - 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, - 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, - 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, - 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, + 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, + 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, + 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -3412,7 +3485,7 @@ func file_grpc_service_config_service_config_proto_rawDescGZIP() []byte { } var file_grpc_service_config_service_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 36) var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type)(0), // 0: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type (ServiceConfig_LoadBalancingPolicy)(0), // 1: grpc.service_config.ServiceConfig.LoadBalancingPolicy @@ -3431,122 +3504,125 @@ var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ (*EdsLoadBalancingPolicyConfig)(nil), // 14: grpc.service_config.EdsLoadBalancingPolicyConfig (*RingHashLoadBalancingConfig)(nil), // 15: grpc.service_config.RingHashLoadBalancingConfig (*LrsLoadBalancingPolicyConfig)(nil), // 16: grpc.service_config.LrsLoadBalancingPolicyConfig - (*XdsConfig)(nil), // 17: grpc.service_config.XdsConfig - (*LoadBalancingConfig)(nil), // 18: grpc.service_config.LoadBalancingConfig - (*ServiceConfig)(nil), // 19: grpc.service_config.ServiceConfig - (*MethodConfig_Name)(nil), // 20: grpc.service_config.MethodConfig.Name - (*MethodConfig_RetryPolicy)(nil), // 21: grpc.service_config.MethodConfig.RetryPolicy - (*MethodConfig_HedgingPolicy)(nil), // 22: grpc.service_config.MethodConfig.HedgingPolicy - (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection)(nil), // 23: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection - (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection)(nil), // 24: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection - (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 25: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - nil, // 26: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 27: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - nil, // 28: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - (*XdsClusterManagerLoadBalancingPolicyConfig_Child)(nil), // 29: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child - nil, // 30: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry - (*XdsServer_ChannelCredentials)(nil), // 31: grpc.service_config.XdsServer.ChannelCredentials - (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 32: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 33: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 34: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - (*ServiceConfig_RetryThrottlingPolicy)(nil), // 35: grpc.service_config.ServiceConfig.RetryThrottlingPolicy - (*ServiceConfig_HealthCheckConfig)(nil), // 36: grpc.service_config.ServiceConfig.HealthCheckConfig - (*wrapperspb.BoolValue)(nil), // 37: google.protobuf.BoolValue - (*durationpb.Duration)(nil), // 38: google.protobuf.Duration - (*wrapperspb.UInt32Value)(nil), // 39: google.protobuf.UInt32Value - (*structpb.Value)(nil), // 40: google.protobuf.Value - (*wrapperspb.StringValue)(nil), // 41: google.protobuf.StringValue - (code.Code)(0), // 42: google.rpc.Code - (*structpb.Struct)(nil), // 43: google.protobuf.Struct + (*XdsWrrLocalityLoadBalancingPolicyConfig)(nil), // 17: grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig + (*XdsConfig)(nil), // 18: grpc.service_config.XdsConfig + (*LoadBalancingConfig)(nil), // 19: grpc.service_config.LoadBalancingConfig + (*ServiceConfig)(nil), // 20: grpc.service_config.ServiceConfig + (*MethodConfig_Name)(nil), // 21: grpc.service_config.MethodConfig.Name + (*MethodConfig_RetryPolicy)(nil), // 22: grpc.service_config.MethodConfig.RetryPolicy + (*MethodConfig_HedgingPolicy)(nil), // 23: grpc.service_config.MethodConfig.HedgingPolicy + (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection)(nil), // 24: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection + (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection)(nil), // 25: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection + (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 26: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + nil, // 27: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 28: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + nil, // 29: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + (*XdsClusterManagerLoadBalancingPolicyConfig_Child)(nil), // 30: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child + nil, // 31: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry + (*XdsServer_ChannelCredentials)(nil), // 32: grpc.service_config.XdsServer.ChannelCredentials + (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 33: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 34: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 35: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + (*ServiceConfig_RetryThrottlingPolicy)(nil), // 36: grpc.service_config.ServiceConfig.RetryThrottlingPolicy + (*ServiceConfig_HealthCheckConfig)(nil), // 37: grpc.service_config.ServiceConfig.HealthCheckConfig + (*wrapperspb.BoolValue)(nil), // 38: google.protobuf.BoolValue + (*durationpb.Duration)(nil), // 39: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 40: google.protobuf.UInt32Value + (*structpb.Value)(nil), // 41: google.protobuf.Value + (*wrapperspb.StringValue)(nil), // 42: google.protobuf.StringValue + (code.Code)(0), // 43: google.rpc.Code + (*structpb.Struct)(nil), // 44: google.protobuf.Struct } var file_grpc_service_config_service_config_proto_depIdxs = []int32{ - 20, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name - 37, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue - 38, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration - 39, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value - 39, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value - 21, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy - 22, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy - 38, // 7: grpc.service_config.OutlierDetectionLoadBalancingConfig.interval:type_name -> google.protobuf.Duration - 38, // 8: grpc.service_config.OutlierDetectionLoadBalancingConfig.base_ejection_time:type_name -> google.protobuf.Duration - 38, // 9: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_time:type_name -> google.protobuf.Duration - 39, // 10: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_percent:type_name -> google.protobuf.UInt32Value - 23, // 11: grpc.service_config.OutlierDetectionLoadBalancingConfig.success_rate_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection - 24, // 12: grpc.service_config.OutlierDetectionLoadBalancingConfig.failure_percentage_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection - 18, // 13: grpc.service_config.OutlierDetectionLoadBalancingConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 18, // 14: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 38, // 15: grpc.service_config.GrpcLbConfig.initial_fallback_timeout:type_name -> google.protobuf.Duration - 26, // 16: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - 28, // 17: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - 30, // 18: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry - 31, // 19: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials - 40, // 20: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value - 32, // 21: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - 18, // 22: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig - 41, // 23: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 21, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name + 38, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue + 39, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration + 40, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value + 40, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value + 22, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy + 23, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy + 39, // 7: grpc.service_config.OutlierDetectionLoadBalancingConfig.interval:type_name -> google.protobuf.Duration + 39, // 8: grpc.service_config.OutlierDetectionLoadBalancingConfig.base_ejection_time:type_name -> google.protobuf.Duration + 39, // 9: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_time:type_name -> google.protobuf.Duration + 40, // 10: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_percent:type_name -> google.protobuf.UInt32Value + 24, // 11: grpc.service_config.OutlierDetectionLoadBalancingConfig.success_rate_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection + 25, // 12: grpc.service_config.OutlierDetectionLoadBalancingConfig.failure_percentage_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection + 19, // 13: grpc.service_config.OutlierDetectionLoadBalancingConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 19, // 14: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 39, // 15: grpc.service_config.GrpcLbConfig.initial_fallback_timeout:type_name -> google.protobuf.Duration + 27, // 16: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + 29, // 17: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + 31, // 18: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry + 32, // 19: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials + 41, // 20: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value + 33, // 21: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + 19, // 22: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig + 42, // 23: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue 11, // 24: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 39, // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 33, // 26: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - 18, // 27: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 41, // 28: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 18, // 29: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 18, // 30: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 34, // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - 18, // 32: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 18, // 33: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 18, // 34: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig - 41, // 35: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 3, // 36: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig - 4, // 37: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig - 5, // 38: grpc.service_config.LoadBalancingConfig.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig - 6, // 39: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig - 7, // 40: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig - 8, // 41: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - 9, // 42: grpc.service_config.LoadBalancingConfig.xds_cluster_manager_experimental:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig - 10, // 43: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig - 12, // 44: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig - 13, // 45: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig - 15, // 46: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig - 16, // 47: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig - 14, // 48: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig - 17, // 49: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig - 17, // 50: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig - 1, // 51: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy - 18, // 52: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig - 2, // 53: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig - 35, // 54: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy - 36, // 55: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig - 38, // 56: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration - 38, // 57: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration - 42, // 58: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code - 38, // 59: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration - 42, // 60: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code - 39, // 61: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.stdev_factor:type_name -> google.protobuf.UInt32Value - 39, // 62: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value - 39, // 63: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value - 39, // 64: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.request_volume:type_name -> google.protobuf.UInt32Value - 39, // 65: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold:type_name -> google.protobuf.UInt32Value - 39, // 66: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value - 39, // 67: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value - 39, // 68: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.request_volume:type_name -> google.protobuf.UInt32Value - 18, // 69: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig - 25, // 70: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - 18, // 71: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 27, // 72: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - 18, // 73: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 29, // 74: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child - 43, // 75: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct - 41, // 76: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 11, // 77: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 39, // 78: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 0, // 79: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type - 5, // 80: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig - 41, // 81: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue - 82, // [82:82] is the sub-list for method output_type - 82, // [82:82] is the sub-list for method input_type - 82, // [82:82] is the sub-list for extension type_name - 82, // [82:82] is the sub-list for extension extendee - 0, // [0:82] is the sub-list for field type_name + 40, // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 34, // 26: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + 19, // 27: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 42, // 28: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 19, // 29: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 19, // 30: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 35, // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + 19, // 32: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 19, // 33: grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 19, // 34: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 19, // 35: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig + 42, // 36: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 3, // 37: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig + 4, // 38: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig + 5, // 39: grpc.service_config.LoadBalancingConfig.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig + 6, // 40: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig + 7, // 41: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig + 8, // 42: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig + 9, // 43: grpc.service_config.LoadBalancingConfig.xds_cluster_manager_experimental:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig + 10, // 44: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig + 12, // 45: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig + 13, // 46: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig + 15, // 47: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig + 16, // 48: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig + 14, // 49: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig + 18, // 50: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig + 18, // 51: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig + 17, // 52: grpc.service_config.LoadBalancingConfig.xds_wrr_locality_experimental:type_name -> grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig + 1, // 53: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy + 19, // 54: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig + 2, // 55: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig + 36, // 56: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy + 37, // 57: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig + 39, // 58: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration + 39, // 59: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration + 43, // 60: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code + 39, // 61: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration + 43, // 62: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code + 40, // 63: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.stdev_factor:type_name -> google.protobuf.UInt32Value + 40, // 64: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value + 40, // 65: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value + 40, // 66: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.request_volume:type_name -> google.protobuf.UInt32Value + 40, // 67: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold:type_name -> google.protobuf.UInt32Value + 40, // 68: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value + 40, // 69: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value + 40, // 70: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.request_volume:type_name -> google.protobuf.UInt32Value + 19, // 71: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig + 26, // 72: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + 19, // 73: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 28, // 74: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + 19, // 75: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 30, // 76: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child + 44, // 77: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct + 42, // 78: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 11, // 79: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer + 40, // 80: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 0, // 81: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type + 5, // 82: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig + 42, // 83: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue + 84, // [84:84] is the sub-list for method output_type + 84, // [84:84] is the sub-list for method input_type + 84, // [84:84] is the sub-list for extension type_name + 84, // [84:84] is the sub-list for extension extendee + 0, // [0:84] is the sub-list for field type_name } func init() { file_grpc_service_config_service_config_proto_init() } @@ -3736,7 +3812,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsConfig); i { + switch v := v.(*XdsWrrLocalityLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -3748,7 +3824,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancingConfig); i { + switch v := v.(*XdsConfig); i { case 0: return &v.state case 1: @@ -3760,7 +3836,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig); i { + switch v := v.(*LoadBalancingConfig); i { case 0: return &v.state case 1: @@ -3772,7 +3848,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_Name); i { + switch v := v.(*ServiceConfig); i { case 0: return &v.state case 1: @@ -3784,7 +3860,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_RetryPolicy); i { + switch v := v.(*MethodConfig_Name); i { case 0: return &v.state case 1: @@ -3796,7 +3872,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_HedgingPolicy); i { + switch v := v.(*MethodConfig_RetryPolicy); i { case 0: return &v.state case 1: @@ -3808,7 +3884,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutlierDetectionLoadBalancingConfig_SuccessRateEjection); i { + switch v := v.(*MethodConfig_HedgingPolicy); i { case 0: return &v.state case 1: @@ -3820,7 +3896,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection); i { + switch v := v.(*OutlierDetectionLoadBalancingConfig_SuccessRateEjection); i { case 0: return &v.state case 1: @@ -3832,6 +3908,18 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { case 0: return &v.state @@ -3843,7 +3931,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WeightedTargetLoadBalancingPolicyConfig_Target); i { case 0: return &v.state @@ -3855,7 +3943,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterManagerLoadBalancingPolicyConfig_Child); i { case 0: return &v.state @@ -3867,7 +3955,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsServer_ChannelCredentials); i { case 0: return &v.state @@ -3879,7 +3967,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism); i { case 0: return &v.state @@ -3891,7 +3979,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig_DropCategory); i { case 0: return &v.state @@ -3903,7 +3991,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LrsLoadBalancingPolicyConfig_Locality); i { case 0: return &v.state @@ -3915,7 +4003,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_RetryThrottlingPolicy); i { case 0: return &v.state @@ -3927,7 +4015,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_HealthCheckConfig); i { case 0: return &v.state @@ -3944,7 +4032,7 @@ func file_grpc_service_config_service_config_proto_init() { (*MethodConfig_RetryPolicy_)(nil), (*MethodConfig_HedgingPolicy_)(nil), } - file_grpc_service_config_service_config_proto_msgTypes[16].OneofWrappers = []interface{}{ + file_grpc_service_config_service_config_proto_msgTypes[17].OneofWrappers = []interface{}{ (*LoadBalancingConfig_PickFirst)(nil), (*LoadBalancingConfig_RoundRobin)(nil), (*LoadBalancingConfig_OutlierDetection)(nil), @@ -3960,6 +4048,7 @@ func file_grpc_service_config_service_config_proto_init() { (*LoadBalancingConfig_EdsExperimental)(nil), (*LoadBalancingConfig_Xds)(nil), (*LoadBalancingConfig_XdsExperimental)(nil), + (*LoadBalancingConfig_XdsWrrLocalityExperimental)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -3967,7 +4056,7 @@ func file_grpc_service_config_service_config_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_service_config_service_config_proto_rawDesc, NumEnums: 2, - NumMessages: 35, + NumMessages: 36, NumExtensions: 0, NumServices: 0, }, From de73b2b645559c1030954e4fb836148ac7b13f82 Mon Sep 17 00:00:00 2001 From: stepbystep2 <770954908@qq.com> Date: Tue, 3 May 2022 03:02:47 +0800 Subject: [PATCH 493/998] examples: improve error messages (#5329) --- examples/route_guide/client/client.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/route_guide/client/client.go b/examples/route_guide/client/client.go index a96e32b5619d..7d24b88b1ced 100644 --- a/examples/route_guide/client/client.go +++ b/examples/route_guide/client/client.go @@ -51,7 +51,7 @@ func printFeature(client pb.RouteGuideClient, point *pb.Point) { defer cancel() feature, err := client.GetFeature(ctx, point) if err != nil { - log.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) + log.Fatalf("client.GetFeature failed: %v", err) } log.Println(feature) } @@ -63,7 +63,7 @@ func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { defer cancel() stream, err := client.ListFeatures(ctx, rect) if err != nil { - log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + log.Fatalf("client.ListFeatures failed: %v", err) } for { feature, err := stream.Recv() @@ -71,7 +71,7 @@ func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { break } if err != nil { - log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + log.Fatalf("client.ListFeatures failed: %v", err) } log.Printf("Feature: name: %q, point:(%v, %v)", feature.GetName(), feature.GetLocation().GetLatitude(), feature.GetLocation().GetLongitude()) @@ -92,16 +92,16 @@ func runRecordRoute(client pb.RouteGuideClient) { defer cancel() stream, err := client.RecordRoute(ctx) if err != nil { - log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) + log.Fatalf("client.RecordRoute failed: %v", err) } for _, point := range points { if err := stream.Send(point); err != nil { - log.Fatalf("%v.Send(%v) = %v", stream, point, err) + log.Fatalf("client.RecordRoute: stream.Send(%v) failed: %v", point, err) } } reply, err := stream.CloseAndRecv() if err != nil { - log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + log.Fatalf("client.RecordRoute failed: %v", err) } log.Printf("Route summary: %v", reply) } @@ -120,7 +120,7 @@ func runRouteChat(client pb.RouteGuideClient) { defer cancel() stream, err := client.RouteChat(ctx) if err != nil { - log.Fatalf("%v.RouteChat(_) = _, %v", client, err) + log.Fatalf("client.RouteChat failed: %v", err) } waitc := make(chan struct{}) go func() { @@ -132,14 +132,14 @@ func runRouteChat(client pb.RouteGuideClient) { return } if err != nil { - log.Fatalf("Failed to receive a note : %v", err) + log.Fatalf("client.RouteChat failed: %v", err) } log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) } }() for _, note := range notes { if err := stream.Send(note); err != nil { - log.Fatalf("Failed to send a note: %v", err) + log.Fatalf("client.RouteChat: stream.Send(%v) failed: %v", note, err) } } stream.CloseSend() From 78b13f27dedb6f51c78b5bf40cd0b16086af4038 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 3 May 2022 13:39:18 -0700 Subject: [PATCH 494/998] xds/client: hold authority mutex before making a new authority (#5331) --- xds/internal/xdsclient/authority.go | 6 +++--- xds/internal/xdsclient/loadreport.go | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index cfe6fc865aff..9bc4588c14e0 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -55,7 +55,7 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref fun config = cfg.XDSServer } - a, err := c.newAuthority(config) + a, err := c.newAuthorityLocked(config) if err != nil { return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) } @@ -73,14 +73,14 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref fun return a, func() { c.unrefAuthority(a) }, nil } -// newAuthority creates a new authority for the config. But before that, it +// newAuthorityLocked creates a new authority for the config. But before that, it // checks the cache to see if an authority for this config already exists. // // The caller must take a reference of the returned authority before using, and // unref afterwards. // // caller must hold c.authorityMu -func (c *clientImpl) newAuthority(config *bootstrap.ServerConfig) (_ *authority, retErr error) { +func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { // First check if there's already an authority for this config. If found, it // means this authority is used by other watches (could be the same // authority name, or a different authority name but the same server diff --git a/xds/internal/xdsclient/loadreport.go b/xds/internal/xdsclient/loadreport.go index 32c7e9c9d791..cba5afd454a7 100644 --- a/xds/internal/xdsclient/loadreport.go +++ b/xds/internal/xdsclient/loadreport.go @@ -28,7 +28,9 @@ import ( // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { - a, err := c.newAuthority(server) + c.authorityMu.Lock() + a, err := c.newAuthorityLocked(server) + c.authorityMu.Unlock() if err != nil { c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err) return nil, func() {} From 799605c22887b6c9b28fb2aab7c53ad624c45b25 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 4 May 2022 10:06:12 -0700 Subject: [PATCH 495/998] client: fix potential panic during RPC retries (#5323) --- clientconn.go | 6 +- internal/transport/http2_client.go | 20 +-- picker_wrapper.go | 8 +- stream.go | 195 +++++++++++++++-------------- test/end2end_test.go | 7 +- test/retry_test.go | 143 +++++++++++++++------ 6 files changed, 228 insertions(+), 151 deletions(-) diff --git a/clientconn.go b/clientconn.go index 3ed6eb8e75e3..ea9836d28b3c 100644 --- a/clientconn.go +++ b/clientconn.go @@ -907,14 +907,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 38ed3d566fff..24ca59084b43 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -631,8 +631,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // the wire. However, there are two notable exceptions: // // 1. If the stream headers violate the max header list size allowed by the -// server. In this case there is no reason to retry at all, as it is -// assumed the RPC would continue to fail on subsequent attempts. +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. // 2. If the credentials errored when requesting their headers. In this case, // it's possible a retry can fix the problem, but indefinitely transparently // retrying is not appropriate as it is likely the credentials, if they can @@ -640,8 +640,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call type NewStreamError struct { Err error - DoNotRetry bool - DoNotTransparentRetry bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -650,11 +649,11 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -754,13 +753,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true }, hdr) if err != nil { - return nil, &NewStreamError{Err: err} + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { @@ -768,9 +768,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea case <-ctx.Done(): return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain} + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing} + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } if t.statsHandler != nil { diff --git a/picker_wrapper.go b/picker_wrapper.go index e8367cb8993b..843633c910a1 100644 --- a/picker_wrapper.go +++ b/picker_wrapper.go @@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if _, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/stream.go b/stream.go index c72aac5877ce..236fc17ec3c4 100644 --- a/stream.go +++ b/stream.go @@ -303,14 +303,28 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs.binlog = binarylog.GetMethodLogger(method) - if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */) + if err != nil { cs.finish(err) return nil, err } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } @@ -349,9 +363,15 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method sh := cs.cc.dopts.copts.StatsHandler @@ -385,44 +405,39 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { ctx = trace.NewContext(ctx, trInfo.tr) } - newAttempt := &csAttempt{ + if cs.cc.parsedTarget.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + + return &csAttempt{ ctx: ctx, beginTime: beginTime, cs: cs, dc: cs.cc.dopts.dc, statsHandler: sh, trInfo: trInfo, - } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) - } - }() + }, nil +} - if err := ctx.Err(); err != nil { - return toRPCErr(err) - } +func (a *csAttempt) getTransport() error { + cs := a.cs - if cs.cc.parsedTarget.Scheme == "xds" { - // Add extra metadata (metadata that will be added by transport) to context - // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( - "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), - )) - } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } @@ -431,12 +446,21 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -514,6 +538,11 @@ type csAttempt struct { statsHandler stats.Handler beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -533,41 +562,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. If the RPC should be // retried, the bool indicates whether it is being retried transparently. -func (cs *clientStream) shouldRetry(err error) (bool, error) { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return true, nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return false, err - } +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.DoNotTransparentRetry { - return true, nil - } - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -579,14 +588,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { + if a.s != nil { + if !a.s.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -603,10 +612,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy @@ -651,19 +660,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - isTransparent, err := cs.shouldRetry(lastErr) + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(isTransparent); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -673,7 +687,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -703,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -734,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -752,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -803,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if cs.binlog != nil && err == nil { cs.binlog.Log(&binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, + Message: data, }) } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { diff --git a/test/end2end_test.go b/test/end2end_test.go index 766d568b9277..3c38e00006af 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -1508,7 +1508,7 @@ func testFailFast(t *testing.T, e env) { cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) @@ -1517,9 +1517,10 @@ func testFailFast(t *testing.T, e env) { te.srv.Stop() // Loop until the server teardown is propagated to the client. for { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + if err := ctx.Err(); err != nil { + t.Fatalf("EmptyCall did not return UNAVAILABLE before timeout") + } _, err := tc.EmptyCall(ctx, &testpb.Empty{}) - cancel() if status.Code(err) == codes.Unavailable { break } diff --git a/test/retry_test.go b/test/retry_test.go index 1013e54ce051..01b074e4015c 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" @@ -44,7 +45,8 @@ import ( func (s) TestRetryUnary(t *testing.T) { i := -1 ss := &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + EmptyCallF: func(context.Context, *testpb.Empty) (r *testpb.Empty, err error) { + defer func() { t.Logf("server call %v returning err %v", i, err) }() i++ switch i { case 0, 2, 5: @@ -55,11 +57,8 @@ func (s) TestRetryUnary(t *testing.T) { return nil, status.New(codes.AlreadyExists, "retryable error").Err() }, } - if err := ss.Start([]grpc.ServerOption{}); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - ss.NewServiceConfig(`{ + if err := ss.Start([]grpc.ServerOption{}, + grpc.WithDefaultServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, @@ -70,18 +69,10 @@ func (s) TestRetryUnary(t *testing.T) { "BackoffMultiplier": 1.0, "RetryableStatusCodes": [ "ALREADY_EXISTS" ] } - }]}`) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - for { - if ctx.Err() != nil { - t.Fatalf("Timed out waiting for service config update") - } - if ss.CC.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { - break - } - time.Sleep(time.Millisecond) + }]}`)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) } - cancel() + defer ss.Stop() testCases := []struct { code codes.Code @@ -95,7 +86,8 @@ func (s) TestRetryUnary(t *testing.T) { {codes.Internal, 11}, {codes.AlreadyExists, 15}, } - for _, tc := range testCases { + for num, tc := range testCases { + t.Log("Case", num) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) cancel() @@ -120,11 +112,8 @@ func (s) TestRetryThrottling(t *testing.T) { return nil, status.New(codes.Unavailable, "retryable error").Err() }, } - if err := ss.Start([]grpc.ServerOption{}); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - ss.NewServiceConfig(`{ + if err := ss.Start([]grpc.ServerOption{}, + grpc.WithDefaultServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, @@ -140,18 +129,10 @@ func (s) TestRetryThrottling(t *testing.T) { "maxTokens": 10, "tokenRatio": 0.5 } - }`) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - for { - if ctx.Err() != nil { - t.Fatalf("Timed out waiting for service config update") - } - if ss.CC.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { - break - } - time.Sleep(time.Millisecond) + }`)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) } - cancel() + defer ss.Stop() testCases := []struct { code codes.Code @@ -430,11 +411,8 @@ func (s) TestRetryStreaming(t *testing.T) { return nil }, } - if err := ss.Start([]grpc.ServerOption{}, grpc.WithDefaultCallOptions(grpc.MaxRetryRPCBufferSize(200))); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - ss.NewServiceConfig(`{ + if err := ss.Start([]grpc.ServerOption{}, grpc.WithDefaultCallOptions(grpc.MaxRetryRPCBufferSize(200)), + grpc.WithDefaultServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, @@ -445,7 +423,10 @@ func (s) TestRetryStreaming(t *testing.T) { "BackoffMultiplier": 1.0, "RetryableStatusCodes": [ "UNAVAILABLE" ] } - }]}`) + }]}`)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) for { if ctx.Err() != nil { @@ -644,3 +625,85 @@ func (s) TestRetryStats(t *testing.T) { t.Fatalf("pushback time before final attempt = %v; want ~10ms", diff) } } + +func (s) TestRetryTransparentWhenCommitted(t *testing.T) { + // With MaxConcurrentStreams=1: + // + // 1. Create stream 1 that is retriable. + // 2. Stream 1 is created and fails with a retriable code. + // 3. Create dummy stream 2, blocking indefinitely. + // 4. Stream 1 retries (and blocks until stream 2 finishes) + // 5. Stream 1 is canceled manually. + // + // If there is no bug, the stream is done and errors with CANCELED. With a bug: + // + // 6. Stream 1 has a nil stream (attempt.s). Operations like CloseSend will panic. + + first := grpcsync.NewEvent() + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + // signal? + if !first.HasFired() { + first.Fire() + t.Log("returned first error") + return status.Error(codes.AlreadyExists, "first attempt fails and is retriable") + } + t.Log("blocking") + <-stream.Context().Done() + return stream.Context().Err() + }, + } + + if err := ss.Start([]grpc.ServerOption{grpc.MaxConcurrentStreams(1)}, + grpc.WithDefaultServiceConfig(`{ + "methodConfig": [{ + "name": [{"service": "grpc.testing.TestService"}], + "waitForReady": true, + "retryPolicy": { + "MaxAttempts": 2, + "InitialBackoff": ".1s", + "MaxBackoff": ".1s", + "BackoffMultiplier": 1.0, + "RetryableStatusCodes": [ "ALREADY_EXISTS" ] + } + }]}`)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx1, cancel1 := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel1() + ctx2, cancel2 := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel2() + + stream1, err := ss.Client.FullDuplexCall(ctx1) + if err != nil { + t.Fatalf("Error creating stream 1: %v", err) + } + + // Create dummy stream to block indefinitely. + _, err = ss.Client.FullDuplexCall(ctx2) + if err != nil { + t.Errorf("Error creating stream 2: %v", err) + } + + stream1Closed := grpcsync.NewEvent() + go func() { + _, err := stream1.Recv() + // Will trigger a retry when it sees the ALREADY_EXISTS error + if status.Code(err) != codes.Canceled { + t.Errorf("Expected stream1 to be canceled; got error: %v", err) + } + stream1Closed.Fire() + }() + + // Wait longer than the retry backoff timer. + time.Sleep(200 * time.Millisecond) + cancel1() + + // Operations on the stream should not panic. + <-stream1Closed.Done() + stream1.CloseSend() + stream1.Recv() + stream1.Send(&testpb.StreamingOutputCallRequest{}) +} From ee67b3d8e9521dd9f45b25ba1b09095e10b0db05 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 5 May 2022 16:08:24 -0400 Subject: [PATCH 496/998] xds: Handle loops and ignore duplicates in aggregated cluster handling (#5317) xds: Handle loops and ignore duplicates in aggregated cluster handling --- .../balancer/cdsbalancer/cluster_handler.go | 163 ++++--- .../cdsbalancer/cluster_handler_test.go | 424 ++++++++++++++++++ 2 files changed, 531 insertions(+), 56 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index a10d8d772f2b..234511a45dcf 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -24,7 +24,12 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") +const maxDepth = 16 + +var ( + errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") + errExceedsMaxDepth = errors.New("aggregate cluster graph exceeds max depth") +) // clusterHandlerUpdate wraps the information received from the registered CDS // watcher. A non-nil error is propagated to the underlying cluster_resolver @@ -54,9 +59,10 @@ type clusterHandler struct { // A mutex to protect entire tree of clusters. clusterMutex sync.Mutex - root *clusterNode rootClusterName string + createdClusters map[string]*clusterNode + // A way to ping CDS Balancer about any updates or errors to a Node in the // tree. This will either get called from this handler constructing an // update or from a child with an error. Capacity of one as the only update @@ -66,39 +72,48 @@ type clusterHandler struct { func newClusterHandler(parent *cdsBalancer) *clusterHandler { return &clusterHandler{ - parent: parent, - updateChannel: make(chan clusterHandlerUpdate, 1), + parent: parent, + updateChannel: make(chan clusterHandlerUpdate, 1), + createdClusters: make(map[string]*clusterNode), } } func (ch *clusterHandler) updateRootCluster(rootClusterName string) { ch.clusterMutex.Lock() defer ch.clusterMutex.Unlock() - if ch.root == nil { + if ch.createdClusters[ch.rootClusterName] == nil { // Construct a root node on first update. - ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) + createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) ch.rootClusterName = rootClusterName return } // Check if root cluster was changed. If it was, delete old one and start // new one, if not do nothing. if rootClusterName != ch.rootClusterName { - ch.root.delete() - ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) + ch.createdClusters[ch.rootClusterName].delete() + createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) ch.rootClusterName = rootClusterName } } // This function tries to construct a cluster update to send to CDS. func (ch *clusterHandler) constructClusterUpdate() { - if ch.root == nil { + if ch.createdClusters[ch.rootClusterName] == nil { // If root is nil, this handler is closed, ignore the update. return } - clusterUpdate, err := ch.root.constructClusterUpdate() + clusterUpdate, err := ch.createdClusters[ch.rootClusterName].constructClusterUpdate(make(map[string]bool)) if err != nil { - // If there was an error received no op, as this simply means one of the - // children hasn't received an update yet. + // If there was an error received no op, as this can mean one of the + // children hasn't received an update yet, or the graph continued to + // stay in an error state. If the graph continues to stay in an error + // state, no new error needs to be written to the update buffer as that + // would be redundant information. + return + } + if clusterUpdate == nil { + // This means that there was an aggregated cluster with no EDS or DNS as + // leaf nodes. No update to be written. return } // For a ClusterUpdate, the only update CDS cares about is the most @@ -109,8 +124,8 @@ func (ch *clusterHandler) constructClusterUpdate() { default: } ch.updateChannel <- clusterHandlerUpdate{ - securityCfg: ch.root.clusterUpdate.SecurityCfg, - lbPolicy: ch.root.clusterUpdate.LBPolicy, + securityCfg: ch.createdClusters[ch.rootClusterName].clusterUpdate.SecurityCfg, + lbPolicy: ch.createdClusters[ch.rootClusterName].clusterUpdate.LBPolicy, updates: clusterUpdate, } } @@ -120,11 +135,10 @@ func (ch *clusterHandler) constructClusterUpdate() { func (ch *clusterHandler) close() { ch.clusterMutex.Lock() defer ch.clusterMutex.Unlock() - if ch.root == nil { + if ch.createdClusters[ch.rootClusterName] == nil { return } - ch.root.delete() - ch.root = nil + ch.createdClusters[ch.rootClusterName].delete() ch.rootClusterName = "" } @@ -136,7 +150,7 @@ type clusterNode struct { cancelFunc func() // A list of children, as the Node can be an aggregate Cluster. - children []*clusterNode + children []string // A ClusterUpdate in order to build a list of cluster updates for CDS to // send down to child XdsClusterResolverLoadBalancingPolicy. @@ -149,13 +163,30 @@ type clusterNode struct { receivedUpdate bool clusterHandler *clusterHandler + + depth int32 + refCount int32 + + // maxDepthErr is set if this cluster node is an aggregate cluster and has a + // child that causes the graph to exceed the maximum depth allowed. This is + // used to show a cluster graph as being in an error state when it constructs + // a cluster update. + maxDepthErr error } // CreateClusterNode creates a cluster node from a given clusterName. This will // also start the watch for that cluster. -func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler) *clusterNode { +func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler, depth int32) { + // If the cluster has already been created, simply return, which ignores + // duplicates. + if topLevelHandler.createdClusters[clusterName] != nil { + topLevelHandler.createdClusters[clusterName].refCount++ + return + } c := &clusterNode{ clusterHandler: topLevelHandler, + depth: depth, + refCount: 1, } // Communicate with the xds client here. topLevelHandler.parent.logger.Infof("CDS watch started on %v", clusterName) @@ -164,25 +195,43 @@ func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLev topLevelHandler.parent.logger.Infof("CDS watch canceled on %v", clusterName) cancel() } - return c + topLevelHandler.createdClusters[clusterName] = c } // This function cancels the cluster watch on the cluster and all of it's // children. func (c *clusterNode) delete() { - c.cancelFunc() - for _, child := range c.children { - child.delete() + c.refCount-- + if c.refCount == 0 { + c.cancelFunc() + delete(c.clusterHandler.createdClusters, c.clusterUpdate.ClusterName) + for _, child := range c.children { + if c.clusterHandler.createdClusters[child] != nil { + c.clusterHandler.createdClusters[child].delete() + } + } } } // Construct cluster update (potentially a list of ClusterUpdates) for a node. -func (c *clusterNode) constructClusterUpdate() ([]xdsresource.ClusterUpdate, error) { +func (c *clusterNode) constructClusterUpdate(clustersSeen map[string]bool) ([]xdsresource.ClusterUpdate, error) { // If the cluster has not yet received an update, the cluster update is not // yet ready. if !c.receivedUpdate { return nil, errNotReceivedUpdate } + if c.maxDepthErr != nil { + return nil, c.maxDepthErr + } + // Ignore duplicates. It's ok to ignore duplicates because the second + // occurrence of a cluster will never be used. I.e. in [C, D, C], the second + // C will never be used (the only way to fall back to lower priority D is if + // C is down, which means second C will never be chosen). Thus, [C, D, C] is + // logically equivalent to [C, D]. + if clustersSeen[c.clusterUpdate.ClusterName] { + return []xdsresource.ClusterUpdate{}, nil + } + clustersSeen[c.clusterUpdate.ClusterName] = true // Base case - LogicalDNS or EDS. Both of these cluster types will be tied // to a single ClusterUpdate. @@ -194,7 +243,7 @@ func (c *clusterNode) constructClusterUpdate() ([]xdsresource.ClusterUpdate, err // it's children. var childrenUpdates []xdsresource.ClusterUpdate for _, child := range c.children { - childUpdateList, err := child.constructClusterUpdate() + childUpdateList, err := c.clusterHandler.createdClusters[child].constructClusterUpdate(clustersSeen) if err != nil { return nil, err } @@ -219,6 +268,8 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er default: } c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: err} + c.receivedUpdate = false + c.maxDepthErr = nil return } @@ -233,9 +284,10 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er // cluster. if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { for _, child := range c.children { - child.delete() + c.clusterHandler.createdClusters[child].delete() } c.children = nil + c.maxDepthErr = nil // This is an update in the one leaf node, should try to send an update // to the parent CDS balancer. // @@ -248,6 +300,22 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er } // Aggregate cluster handling. + if len(clusterUpdate.PrioritizedClusterNames) >= 1 { + if c.depth == maxDepth-1 { + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-c.clusterHandler.updateChannel: + default: + } + c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: errExceedsMaxDepth} + c.children = []string{} + c.maxDepthErr = errExceedsMaxDepth + return + } + } + newChildren := make(map[string]bool) for _, childName := range clusterUpdate.PrioritizedClusterNames { newChildren[childName] = true @@ -261,59 +329,42 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er // the update to build (ex. if a child is created and a watch is started, // that child hasn't received an update yet due to the mutex lock on this // callback). - var createdChild, deletedChild bool + var createdChild bool // This map will represent the current children of the cluster. It will be // first added to in order to represent the new children. It will then have - // any children deleted that are no longer present. Then, from the cluster - // update received, will be used to construct the new child list. - mapCurrentChildren := make(map[string]*clusterNode) + // any children deleted that are no longer present. + mapCurrentChildren := make(map[string]bool) for _, child := range c.children { - mapCurrentChildren[child.clusterUpdate.ClusterName] = child + mapCurrentChildren[child] = true } // Add and construct any new child nodes. for child := range newChildren { if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready { - createdChild = true - mapCurrentChildren[child] = createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler) + createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler, c.depth+1) } } // Delete any child nodes no longer in the aggregate cluster's children. for child := range mapCurrentChildren { if _, stillAChild := newChildren[child]; !stillAChild { - deletedChild = true - mapCurrentChildren[child].delete() + c.clusterHandler.createdClusters[child].delete() delete(mapCurrentChildren, child) } } - // The order of the children list matters, so use the clusterUpdate from - // xdsclient as the ordering, and use that logical ordering for the new - // children list. This will be a mixture of child nodes which are all - // already constructed in the mapCurrentChildrenMap. - var children = make([]*clusterNode, 0, len(clusterUpdate.PrioritizedClusterNames)) - - for _, orderedChild := range clusterUpdate.PrioritizedClusterNames { - // The cluster's already have watches started for them in xds client, so - // you can use these pointers to construct the new children list, you - // just have to put them in the correct order using the original cluster - // update. - currentChild := mapCurrentChildren[orderedChild] - children = append(children, currentChild) - } - - c.children = children + c.children = clusterUpdate.PrioritizedClusterNames + c.maxDepthErr = nil // If the cluster is an aggregate cluster, if this callback created any new // child cluster nodes, then there's no possibility for a full cluster // update to successfully build, as those created children will not have - // received an update yet. However, if there was simply a child deleted, - // then there is a possibility that it will have a full cluster update to - // build and also will have a changed overall cluster update from the - // deleted child. - if deletedChild && !createdChild { + // received an update yet. Even if this update did not delete a child, there + // is still a possibility for the cluster update to build, as the aggregate + // cluster can ignore duplicated children and thus the update can fill out + // the full cluster update tree. + if !createdChild { c.clusterHandler.constructClusterUpdate() } } diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index b58f06d0eb27..caf10955014f 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -19,6 +19,7 @@ package cdsbalancer import ( "context" "errors" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -683,3 +684,426 @@ func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { t.Fatal("Timed out waiting for update from update channel.") } } + +// TestExceedsMaxStackDepth tests the scenario where an aggregate cluster +// exceeds the maximum depth, which is 16. This should cause an error to be +// written to the update buffer. +func (s) TestExceedsMaxStackDepth(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("cluster0") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + for i := 0; i <= 15; i++ { + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "cluster" + fmt.Sprint(i), + PrioritizedClusterNames: []string{"cluster" + fmt.Sprint(i+1)}, + }, nil) + if i == 15 { + // The 16th iteration will try and create a cluster which exceeds + // max stack depth and will thus error, so no CDS Watch will be + // started for the child. + continue + } + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + } + select { + case chu := <-ch.updateChannel: + if chu.err.Error() != "aggregate cluster graph exceeds max depth" { + t.Fatalf("Did not receive the expected error, instead received: %v", chu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for an error to be written to update channel.") + } +} + +// TestDiamondDependency tests a diamond shaped aggregate cluster (A->[B,C]; +// B->D; C->D). Due to both B and C pointing to D as it's child, it should be +// ignored for C. Once all 4 clusters have received a CDS update, an update +// should be then written to the update buffer, specifying a single Cluster D. +func (s) TestDiamondDependency(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("clusterA") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterB", "clusterC"}, + }, nil) + // Two watches should be started for both child clusters. + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + // B -> D. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterB", + PrioritizedClusterNames: []string{"clusterD"}, + }, nil) + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + // This shouldn't cause an update to be written to the update buffer, + // as cluster C has not received a cluster update yet. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }, nil) + + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("an update should not have been written to the update buffer") + case <-sCtx.Done(): + } + + // This update for C should cause an update to be written to the update + // buffer. When you search this aggregated cluster graph, each node has + // received an update. This update should only contain one clusterD, as + // clusterC does not add a clusterD child update due to the clusterD update + // already having been added as a child of clusterB. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterC", + PrioritizedClusterNames: []string{"clusterD"}, + }, nil) + + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestIgnoreDups tests the cluster (A->[B, C]; B->[C, D]). Only one watch +// should be started for cluster C. The update written to the update buffer +// should only contain one instance of cluster C correctly as a higher priority +// than D. +func (s) TestIgnoreDups(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("clusterA") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterB", "clusterC"}, + }, nil) + // Two watches should be started, one for each child cluster. + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + // The child cluster C should not have a watch started for it, as it is + // already part of the aggregate cluster graph as the child of the root + // cluster clusterA and has already had a watch started for it. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterB", + PrioritizedClusterNames: []string{"clusterC", "clusterD"}, + }, nil) + // Only one watch should be started, which should be for clusterD. + name, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if name != "clusterD" { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: clusterD", name) + } + + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err = fakeClient.WaitForWatchCluster(sCtx); err == nil { + t.Fatalf("only one watch should have been started for the children of clusterB") + } + + // This update should not cause an update to be written to the update + // buffer, as each cluster in the tree has not yet received a cluster + // update. With cluster B ignoring cluster C, the system should function as + // if cluster C was not a child of cluster B (meaning all 4 clusters should + // be required to get an update). + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterC", + }, nil) + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("an update should not have been written to the update buffer") + case <-sCtx.Done(): + } + + // This update causes all 4 clusters in the aggregated cluster graph to have + // received an update, so an update should be written to the update buffer + // with only a single occurrence of cluster C as a higher priority than + // cluster D. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }, nil) + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterC", + }, { + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } + + // Delete A's ref to C by updating A with only child B. Since B still has a + // reference to C, C's watch should not be canceled, and also an update + // should correctly be built. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterB"}, + }, nil) + + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterC", + }, { + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestErrorStateWholeTree tests the scenario where the aggregate cluster graph +// exceeds max depth. An error should be written to the update channel. +// Afterward, if a valid response comes in for another cluster, no update should +// be written to the update channel, as the aggregate cluster graph is still in +// the same error state. +func (s) TestErrorStateWholeTree(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("cluster0") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + for i := 0; i <= 15; i++ { + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "cluster" + fmt.Sprint(i), + PrioritizedClusterNames: []string{"cluster" + fmt.Sprint(i+1)}, + }, nil) + if i == 15 { + // The 16th iteration will try and create a cluster which exceeds + // max stack depth and will thus error, so no CDS Watch will be + // started for the child. + continue + } + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + } + select { + case chu := <-ch.updateChannel: + if chu.err.Error() != "aggregate cluster graph exceeds max depth" { + t.Fatalf("Did not receive the expected error, instead received: %v", chu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for an error to be written to update channel.") + } + + // Invoke a cluster callback for a node in the graph that rests within the + // allowed depth. This will cause the system to try and construct a cluster + // update, and it shouldn't write an update as the aggregate cluster graph + // is still in an error state. Since the graph continues to stay in an error + // state, no new error needs to be written to the update buffer as that + // would be redundant information. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "cluster3", + PrioritizedClusterNames: []string{"cluster4"}, + }, nil) + + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("an update should not have been written to the update buffer") + case <-sCtx.Done(): + } + + // Invoke the same cluster update for cluster 15, specifying it has a child + // cluster16. This should cause an error to be written to the update buffer, + // as it still exceeds the max depth. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "cluster15", + PrioritizedClusterNames: []string{"cluster16"}, + }, nil) + select { + case chu := <-ch.updateChannel: + if chu.err.Error() != "aggregate cluster graph exceeds max depth" { + t.Fatalf("Did not receive the expected error, instead received: %v", chu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for an error to be written to update channel.") + } + + // When you remove the child of cluster15 that causes the graph to be in the + // error state of exceeding max depth, the update should successfully + // construct and be written to the update buffer. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "cluster15", + }, nil) + + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "cluster15", + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestNodeChildOfItself tests the scenario where the aggregate cluster graph +// has a node that has child node of itself. The case for this is A -> A, and +// since there is no base cluster (EDS or Logical DNS), no update should be +// written if it tries to build a cluster update. +func (s) TestNodeChildOfItself(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("clusterA") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + // Invoke the callback informing the cluster handler that clusterA has a + // child that it is itself. Due to this child cluster being a duplicate, no + // watch should be started. Since there are no leaf nodes (i.e. EDS or + // Logical DNS), no update should be written to the update buffer. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterA"}, + }, nil) + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := fakeClient.WaitForWatchCluster(sCtx); err == nil { + t.Fatal("Watch should not have been started for clusterA") + } + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("update should not have been written to update buffer") + case <-sCtx.Done(): + } + + // Invoke the callback again informing the cluster handler that clusterA has + // a child that it is itself. Due to this child cluster being a duplicate, + // no watch should be started. Since there are no leaf nodes (i.e. EDS or + // Logical DNS), no update should be written to the update buffer. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterA"}, + }, nil) + + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := fakeClient.WaitForWatchCluster(sCtx); err == nil { + t.Fatal("Watch should not have been started for clusterA") + } + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("update should not have been written to update buffer, as clusterB has not received an update yet") + case <-sCtx.Done(): + } + + // Inform the cluster handler that clusterA now has clusterB as a child. + // This should not cancel the watch for A, as it is still the root cluster + // and still has a ref count, not write an update to update buffer as + // cluster B has not received an update yet, and start a new watch for + // cluster B as it is not a duplicate. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterB"}, + }, nil) + + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := fakeClient.WaitForCancelClusterWatch(sCtx); err == nil { + t.Fatal("clusterA should not have been canceled, as it is still the root cluster") + } + + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("update should not have been written to update buffer, as clusterB has not received an update yet") + case <-sCtx.Done(): + } + + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != "clusterB" { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, "clusterB") + } +} From 5c46f1aa4953a7c68fe445dd739b2db9d69a9890 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 9 May 2022 11:06:30 -0700 Subject: [PATCH 497/998] xdsclient/csds: fix leaked metadata (#5339) --- xds/internal/xdsclient/pubsub/update.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xds/internal/xdsclient/pubsub/update.go b/xds/internal/xdsclient/pubsub/update.go index 371405b67972..9ae6ae976712 100644 --- a/xds/internal/xdsclient/pubsub/update.go +++ b/xds/internal/xdsclient/pubsub/update.go @@ -232,7 +232,7 @@ func (pb *Pubsub) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTup // from cache, and also send an resource not found error to indicate // resource removed. delete(pb.cdsCache, name) - pb.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + pb.cdsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for wi := range pb.cdsWatchers[name] { wi.resourceNotFound() } From 462d8676960963b161face2f81ed9a48ecf67f9e Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 9 May 2022 16:13:07 -0400 Subject: [PATCH 498/998] xds: Add Outlier Detection configuration and CDS handling (#5299) xds: Add Outlier Detection configuration and CDS handling --- .../balancer/cdsbalancer/cdsbalancer.go | 51 +++++ .../cdsbalancer/cdsbalancer_security_test.go | 14 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 199 ++++++++++++++++-- .../balancer/clusterresolver/config.go | 6 + .../balancer/outlierdetection/config.go | 184 ++++++++++++++++ .../xdsclient/xdsresource/type_cds.go | 2 +- 6 files changed, 435 insertions(+), 21 deletions(-) create mode 100644 xds/internal/balancer/outlierdetection/config.go diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 0be796c47bad..d057ed66a53c 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal/buffer" xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -36,6 +37,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -270,6 +272,52 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc return provider, nil } +func outlierDetectionToConfig(od *xdsresource.OutlierDetection) *outlierdetection.LBConfig { // Already validated - no need to return error + if od == nil { + // "If the outlier_detection field is not set in the Cluster message, a + // "no-op" outlier_detection config will be generated, with interval set + // to the maximum possible value and all other fields unset." - A50 + return &outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + } + } + + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var sre *outlierdetection.SuccessRateEjection + if od.EnforcingSuccessRate != 0 { + sre = &outlierdetection.SuccessRateEjection{ + StdevFactor: od.SuccessRateStdevFactor, + EnforcementPercentage: od.EnforcingSuccessRate, + MinimumHosts: od.SuccessRateMinimumHosts, + RequestVolume: od.SuccessRateRequestVolume, + } + } + + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var fpe *outlierdetection.FailurePercentageEjection + if od.EnforcingFailurePercentage != 0 { + fpe = &outlierdetection.FailurePercentageEjection{ + Threshold: od.FailurePercentageThreshold, + EnforcementPercentage: od.EnforcingFailurePercentage, + MinimumHosts: od.FailurePercentageMinimumHosts, + RequestVolume: od.FailurePercentageRequestVolume, + } + } + + return &outlierdetection.LBConfig{ + Interval: od.Interval, + BaseEjectionTime: od.BaseEjectionTime, + MaxEjectionTime: od.MaxEjectionTime, + MaxEjectionPercent: od.MaxEjectionPercent, + SuccessRateEjection: sre, + FailurePercentageEjection: fpe, + } +} + // handleWatchUpdate handles a watch update from the xDS Client. Good updates // lead to clientConn updates being invoked on the underlying cluster_resolver balancer. func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { @@ -342,6 +390,9 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { default: b.logger.Infof("unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) } + if envconfig.XDSOutlierDetection { + dms[i].OutlierDetection = outlierDetectionToConfig(cu.OutlierDetection) + } } lbCfg := &clusterresolver.LBConfig{ DiscoveryMechanisms: dms, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index cd93dd0ecd84..c58990ab34d1 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -250,7 +250,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -306,7 +306,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // newChildBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -462,7 +462,7 @@ func (s) TestSecurityConfigUpdate_BadToGood(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -496,7 +496,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -549,7 +549,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -599,7 +599,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -677,7 +677,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { SubjectAltNameMatchers: testSANMatchers, }, } - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 112d25df3332..b15481f318b2 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -29,12 +29,14 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" @@ -49,10 +51,15 @@ const ( defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ) -var defaultTestAuthorityServerConfig = &bootstrap.ServerConfig{ - ServerURI: "self_server", - CredsType: "self_creds", -} +var ( + defaultTestAuthorityServerConfig = &bootstrap.ServerConfig{ + ServerURI: "self_server", + CredsType: "self_creds", + } + noopODLBCfg = &outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + } +) type s struct { grpctest.Tester @@ -208,11 +215,12 @@ func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { // edsCCS is a helper function to construct a good update passed from the // cdsBalancer to the edsBalancer. -func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy *internalserviceconfig.BalancerConfig) balancer.ClientConnState { +func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy *internalserviceconfig.BalancerConfig, odConfig *outlierdetection.LBConfig) balancer.ClientConnState { discoveryMechanism := clusterresolver.DiscoveryMechanism{ Type: clusterresolver.DiscoveryMechanismTypeEDS, Cluster: service, MaxConcurrentRequests: countMax, + OutlierDetection: odConfig, } if enableLRS { discoveryMechanism.LoadReportingServer = defaultTestAuthorityServerConfig @@ -358,11 +366,14 @@ func (s) TestUpdateClientConnStateWithSameState(t *testing.T) { // different updates and verifies that the expect ClientConnState is propagated // to the edsBalancer. func (s) TestHandleClusterUpdate(t *testing.T) { + oldOutlierDetection := envconfig.XDSOutlierDetection + envconfig.XDSOutlierDetection = true xdsC, cdsB, edsB, _, cancel := setupWithWatch(t) xdsC.SetBootstrapConfig(&bootstrap.Config{ XDSServer: defaultTestAuthorityServerConfig, }) defer func() { + envconfig.XDSOutlierDetection = oldOutlierDetection cancel() cdsB.Close() }() @@ -376,12 +387,12 @@ func (s) TestHandleClusterUpdate(t *testing.T) { { name: "happy-case-with-lrs", cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf}, - wantCCS: edsCCS(serviceName, nil, true, nil), + wantCCS: edsCCS(serviceName, nil, true, nil, noopODLBCfg), }, { name: "happy-case-without-lrs", cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName}, - wantCCS: edsCCS(serviceName, nil, false, nil), + wantCCS: edsCCS(serviceName, nil, false, nil, noopODLBCfg), }, { name: "happy-case-with-ring-hash-lb-policy", @@ -392,6 +403,41 @@ func (s) TestHandleClusterUpdate(t *testing.T) { wantCCS: edsCCS(serviceName, nil, false, &internalserviceconfig.BalancerConfig{ Name: ringhash.Name, Config: &ringhash.LBConfig{MinRingSize: 10, MaxRingSize: 100}, + }, noopODLBCfg), + }, + { + name: "happy-case-outlier-detection", + cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName, OutlierDetection: &xdsresource.OutlierDetection{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateStdevFactor: 1900, + EnforcingSuccessRate: 100, + SuccessRateMinimumHosts: 5, + SuccessRateRequestVolume: 100, + FailurePercentageThreshold: 85, + EnforcingFailurePercentage: 5, + FailurePercentageMinimumHosts: 5, + FailurePercentageRequestVolume: 50, + }}, + wantCCS: edsCCS(serviceName, nil, false, nil, &outlierdetection.LBConfig{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateEjection: &outlierdetection.SuccessRateEjection{ + StdevFactor: 1900, + EnforcementPercentage: 100, + MinimumHosts: 5, + RequestVolume: 100, + }, + FailurePercentageEjection: &outlierdetection.FailurePercentageEjection{ + Threshold: 85, + EnforcementPercentage: 5, + MinimumHosts: 5, + RequestVolume: 50, + }, }), }, } @@ -460,7 +506,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -545,7 +591,7 @@ func (s) TestResolverError(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -594,7 +640,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -629,7 +675,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // the service's counter with the new max requests. var maxRequests uint32 = 1 cdsUpdate := xdsresource.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} - wantCCS := edsCCS(clusterName, &maxRequests, false, nil) + wantCCS := edsCCS(clusterName, &maxRequests, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -662,7 +708,7 @@ func (s) TestClose(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -733,7 +779,7 @@ func (s) TestExitIdle(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, nil) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -795,3 +841,130 @@ func (s) TestParseConfig(t *testing.T) { }) } } + +func (s) TestOutlierDetectionToConfig(t *testing.T) { + tests := []struct { + name string + od *xdsresource.OutlierDetection + odLBCfgWant *outlierdetection.LBConfig + }{ + // "if the outlier_detection field is not set in the Cluster resource, + // a "no-op" outlier_detection config will be generated in the + // corresponding DiscoveryMechanism config, with interval set to the + // maximum possible value and all other fields unset." - A50 + { + name: "no-op-outlier-detection-config", + od: nil, + odLBCfgWant: noopODLBCfg, + }, + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* + // fields will be ignored." - A50 + { + name: "enforcing-success-rate-zero", + od: &xdsresource.OutlierDetection{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateStdevFactor: 1900, + EnforcingSuccessRate: 0, + SuccessRateMinimumHosts: 5, + SuccessRateRequestVolume: 100, + FailurePercentageThreshold: 85, + EnforcingFailurePercentage: 5, + FailurePercentageMinimumHosts: 5, + FailurePercentageRequestVolume: 50, + }, + odLBCfgWant: &outlierdetection.LBConfig{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateEjection: nil, + FailurePercentageEjection: &outlierdetection.FailurePercentageEjection{ + Threshold: 85, + EnforcementPercentage: 5, + MinimumHosts: 5, + RequestVolume: 50, + }, + }, + }, + // "If the enforcing_failure_percent field is set to 0 or null, the + // config failure_percent_ejection field will be null and all + // failure_percent_* fields will be ignored." - A50 + { + name: "enforcing-failure-percentage-zero", + od: &xdsresource.OutlierDetection{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateStdevFactor: 1900, + EnforcingSuccessRate: 100, + SuccessRateMinimumHosts: 5, + SuccessRateRequestVolume: 100, + FailurePercentageThreshold: 85, + EnforcingFailurePercentage: 0, + FailurePercentageMinimumHosts: 5, + FailurePercentageRequestVolume: 50, + }, + odLBCfgWant: &outlierdetection.LBConfig{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateEjection: &outlierdetection.SuccessRateEjection{ + StdevFactor: 1900, + EnforcementPercentage: 100, + MinimumHosts: 5, + RequestVolume: 100, + }, + FailurePercentageEjection: nil, + }, + }, + { + name: "normal-conversion", + od: &xdsresource.OutlierDetection{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateStdevFactor: 1900, + EnforcingSuccessRate: 100, + SuccessRateMinimumHosts: 5, + SuccessRateRequestVolume: 100, + FailurePercentageThreshold: 85, + EnforcingFailurePercentage: 5, + FailurePercentageMinimumHosts: 5, + FailurePercentageRequestVolume: 50, + }, + odLBCfgWant: &outlierdetection.LBConfig{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateEjection: &outlierdetection.SuccessRateEjection{ + StdevFactor: 1900, + EnforcementPercentage: 100, + MinimumHosts: 5, + RequestVolume: 100, + }, + FailurePercentageEjection: &outlierdetection.FailurePercentageEjection{ + Threshold: 85, + EnforcementPercentage: 5, + MinimumHosts: 5, + RequestVolume: 50, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + odLBCfgGot := outlierDetectionToConfig(test.od) + if diff := cmp.Diff(odLBCfgGot, test.odLBCfgWant); diff != "" { + t.Fatalf("outlierDetectionToConfig(%v) (-want, +got):\n%s", test.od, diff) + } + }) + } +} diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index 363afd03ab2c..1cbffdfa52fd 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer/roundrobin" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) @@ -102,6 +103,9 @@ type DiscoveryMechanism struct { // DNSHostname is the DNS name to resolve in "host:port" form. For type // LOGICAL_DNS only. DNSHostname string `json:"dnsHostname,omitempty"` + // OutlierDetection is the Outlier Detection LB configuration for this + // priority. + OutlierDetection *outlierdetection.LBConfig `json:"outlierDetection,omitempty"` } // Equal returns whether the DiscoveryMechanism is the same with the parameter. @@ -117,6 +121,8 @@ func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { return false case dm.DNSHostname != b.DNSHostname: return false + case !dm.OutlierDetection.Equal(b.OutlierDetection): + return false } if dm.LoadReportingServer == nil && b.LoadReportingServer == nil { diff --git a/xds/internal/balancer/outlierdetection/config.go b/xds/internal/balancer/outlierdetection/config.go new file mode 100644 index 000000000000..8b0cdcab4065 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/config.go @@ -0,0 +1,184 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package outlierdetection implements a balancer that implements +// Outlier Detection. +package outlierdetection + +import ( + "time" + + "github.com/google/go-cmp/cmp" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// SuccessRateEjection is parameters for the success rate ejection algorithm. +// This algorithm monitors the request success rate for all endpoints and ejects +// individual endpoints whose success rates are statistical outliers. +type SuccessRateEjection struct { + // StddevFactor is used to determine the ejection threshold for + // success rate outlier ejection. The ejection threshold is the difference + // between the mean success rate, and the product of this factor and the + // standard deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + StdevFactor uint32 `json:"stdevFactor,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually ejected + // when an outlier status is detected through success rate statistics. This + // setting can be used to disable ejection or to ramp it up slowly. Defaults + // to 100. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the number of hosts in a cluster that must have enough + // request volume to detect success rate outliers. If the number of hosts is + // less than this setting, outlier detection via success rate statistics is + // not performed for any host in the cluster. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // include this host in success rate based outlier detection. If the volume + // is lower than this setting, outlier detection via success rate statistics + // is not performed for that host. Defaults to 100. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// Equal returns whether the SuccessRateEjection is the same with the parameter. +func (sre *SuccessRateEjection) Equal(sre2 *SuccessRateEjection) bool { + if sre == nil && sre2 == nil { + return true + } + if (sre != nil) != (sre2 != nil) { + return false + } + if sre.StdevFactor != sre2.StdevFactor { + return false + } + if sre.EnforcementPercentage != sre2.EnforcementPercentage { + return false + } + if sre.MinimumHosts != sre2.MinimumHosts { + return false + } + return sre.RequestVolume == sre2.RequestVolume +} + +// FailurePercentageEjection is parameters for the failure percentage algorithm. +// This algorithm ejects individual endpoints whose failure rate is greater than +// some threshold, independently of any other endpoint. +type FailurePercentageEjection struct { + // Threshold is the failure percentage to use when determining failure + // percentage-based outlier detection. If the failure percentage of a given + // host is greater than or equal to this value, it will be ejected. Defaults + // to 85. + Threshold uint32 `json:"threshold,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually + // ejected when an outlier status is detected through failure percentage + // statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the minimum number of hosts in a cluster in order to + // perform failure percentage-based ejection. If the total number of hosts + // in the cluster is less than this value, failure percentage-based ejection + // will not be performed. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // perform failure percentage-based ejection for this host. If the volume is + // lower than this setting, failure percentage-based ejection will not be + // performed for this host. Defaults to 50. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// Equal returns whether the FailurePercentageEjection is the same with the +// parameter. +func (fpe *FailurePercentageEjection) Equal(fpe2 *FailurePercentageEjection) bool { + if fpe == nil && fpe2 == nil { + return true + } + if (fpe != nil) != (fpe2 != nil) { + return false + } + if fpe.Threshold != fpe2.Threshold { + return false + } + if fpe.EnforcementPercentage != fpe2.EnforcementPercentage { + return false + } + if fpe.MinimumHosts != fpe2.MinimumHosts { + return false + } + return fpe.RequestVolume == fpe2.RequestVolume +} + +// LBConfig is the config for the outlier detection balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // Interval is the time interval between ejection analysis sweeps. This can + // result in both new ejections as well as addresses being returned to + // service. Defaults to 10s. + Interval time.Duration `json:"interval,omitempty"` + // BaseEjectionTime is the base time that a host is ejected for. The real + // time is equal to the base time multiplied by the number of times the host + // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. + BaseEjectionTime time.Duration `json:"baseEjectionTime,omitempty"` + // MaxEjectionTime is the maximum time that an address is ejected for. If + // not specified, the default value (300s) or the BaseEjectionTime value is + // applied, whichever is larger. + MaxEjectionTime time.Duration `json:"maxEjectionTime,omitempty"` + // MaxEjectionPercent is the maximum % of an upstream cluster that can be + // ejected due to outlier detection. Defaults to 10% but will eject at least + // one host regardless of the value. + MaxEjectionPercent uint32 `json:"maxEjectionPercent,omitempty"` + // SuccessRateEjection is the parameters for the success rate ejection + // algorithm. If set, success rate ejections will be performed. + SuccessRateEjection *SuccessRateEjection `json:"successRateEjection,omitempty"` + // FailurePercentageEjection is the parameters for the failure percentage + // algorithm. If set, failure rate ejections will be performed. + FailurePercentageEjection *FailurePercentageEjection `json:"failurePercentageEjection,omitempty"` + // ChildPolicy is the config for the child policy. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// Equal returns whether the LBConfig is the same with the parameter. +func (lbc *LBConfig) Equal(lbc2 *LBConfig) bool { + if lbc == nil && lbc2 == nil { + return true + } + if (lbc != nil) != (lbc2 != nil) { + return false + } + if lbc.Interval != lbc2.Interval { + return false + } + if lbc.BaseEjectionTime != lbc2.BaseEjectionTime { + return false + } + if lbc.MaxEjectionTime != lbc2.MaxEjectionTime { + return false + } + if lbc.MaxEjectionPercent != lbc2.MaxEjectionPercent { + return false + } + if !lbc.SuccessRateEjection.Equal(lbc2.SuccessRateEjection) { + return false + } + if !lbc.FailurePercentageEjection.Equal(lbc2.FailurePercentageEjection) { + return false + } + return cmp.Equal(lbc.ChildPolicy, lbc2.ChildPolicy) +} diff --git a/xds/internal/xdsclient/xdsresource/type_cds.go b/xds/internal/xdsclient/xdsresource/type_cds.go index b61a80b429c4..d459717acd23 100644 --- a/xds/internal/xdsclient/xdsresource/type_cds.go +++ b/xds/internal/xdsclient/xdsresource/type_cds.go @@ -76,7 +76,7 @@ type OutlierDetection struct { // ejected due to outlier detection. Defaults to 10% but will eject at least // one host regardless of the value. MaxEjectionPercent uint32 - // SuccessRateStddevFactor is used to determine the ejection threshold for + // SuccessRateStdevFactor is used to determine the ejection threshold for // success rate outlier ejection. The ejection threshold is the difference // between the mean success rate, and the product of this factor and the // standard deviation of the mean success rate: mean - (stdev * From db79903af928ca8307700d83123a7702881c4e3c Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 10 May 2022 16:56:41 -0700 Subject: [PATCH 499/998] xds/priority: start the init timer when a child switch to Connecting from non-transient-failure state (#5334) --- xds/internal/balancer/priority/balancer.go | 26 +-- .../balancer/priority/balancer_child.go | 50 ++++- .../balancer/priority/balancer_priority.go | 212 +++++------------- .../balancer/priority/balancer_test.go | 25 ++- 4 files changed, 128 insertions(+), 185 deletions(-) diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index 98fd0672af42..d82bce761751 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -100,12 +100,6 @@ type priorityBalancer struct { childToPriority map[string]int // children is a map from child name to sub-balancers. children map[string]*childBalancer - // The timer to give a priority some time to connect. And if the priority - // doesn't go into Ready/Failure, the next priority will be started. - // - // One timer is enough because there can be at most one priority in init - // state. - priorityInitTimer *timerWrapper } func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { @@ -198,27 +192,17 @@ func (b *priorityBalancer) Close() { // Clear states of the current child in use, so if there's a race in picker // update, it will be dropped. b.childInUse = "" - b.stopPriorityInitTimer() + // Stop the child policies, this is necessary to stop the init timers in the + // children. + for _, child := range b.children { + child.stop() + } } func (b *priorityBalancer) ExitIdle() { b.bg.ExitIdle() } -// stopPriorityInitTimer stops the priorityInitTimer if it's not nil, and set it -// to nil. -// -// Caller must hold b.mu. -func (b *priorityBalancer) stopPriorityInitTimer() { - timerW := b.priorityInitTimer - if timerW == nil { - return - } - b.priorityInitTimer = nil - timerW.stopped = true - timerW.timer.Stop() -} - // UpdateState implements balancergroup.BalancerStateAggregator interface. The // balancer group sends new connectivity state and picker here. func (b *priorityBalancer) UpdateState(childName string, state balancer.State) { diff --git a/xds/internal/balancer/priority/balancer_child.go b/xds/internal/balancer/priority/balancer_child.go index 600705da01af..95bb34f26252 100644 --- a/xds/internal/balancer/priority/balancer_child.go +++ b/xds/internal/balancer/priority/balancer_child.go @@ -19,6 +19,8 @@ package priority import ( + "time" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" @@ -36,7 +38,16 @@ type childBalancer struct { rState resolver.State started bool - state balancer.State + // This is set when the child reports TransientFailure, and unset when it + // reports Ready or Idle. It is used to decide whether the failover timer + // should start when the child is transitioning into Connecting. The timer + // will be restarted if the child has not reported TF more recently than it + // reported Ready or Idle. + reportedTF bool + state balancer.State + // The timer to give a priority some time to connect. And if the priority + // doesn't go into Ready/Failure, the next priority will be started. + initTimer *timerWrapper } // newChildBalancer creates a child balancer place holder, but doesn't @@ -79,6 +90,7 @@ func (cb *childBalancer) start() { } cb.started = true cb.parent.bg.Add(cb.name, cb.bb) + cb.startInitTimer() } // sendUpdate sends the addresses and config to the child balancer. @@ -103,10 +115,46 @@ func (cb *childBalancer) stop() { if !cb.started { return } + cb.stopInitTimer() cb.parent.bg.Remove(cb.name) cb.started = false cb.state = balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), } + // Clear child.reportedTF, so that if this child is started later, it will + // be given time to connect. + cb.reportedTF = false +} + +func (cb *childBalancer) startInitTimer() { + if cb.initTimer != nil { + return + } + // Need this local variable to capture timerW in the AfterFunc closure + // to check the stopped boolean. + timerW := &timerWrapper{} + cb.initTimer = timerW + timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { + cb.parent.mu.Lock() + defer cb.parent.mu.Unlock() + if timerW.stopped { + return + } + cb.initTimer = nil + // Re-sync the priority. This will switch to the next priority if + // there's any. Note that it's important sync() is called after setting + // initTimer to nil. + cb.parent.syncPriority() + }) +} + +func (cb *childBalancer) stopInitTimer() { + timerW := cb.initTimer + if timerW == nil { + return + } + cb.initTimer = nil + timerW.stopped = true + timerW.timer.Stop() } diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index 3a18f6e10d83..829e51f1141c 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -36,9 +36,10 @@ var ( DefaultPriorityInitTimeout = 10 * time.Second ) -// syncPriority handles priority after a config update. It makes sure the -// balancer state (started or not) is in sync with the priorities (even in -// tricky cases where a child is moved from a priority to another). +// syncPriority handles priority after a config update or a child balancer +// connectivity state update. It makes sure the balancer state (started or not) +// is in sync with the priorities (even in tricky cases where a child is moved +// from a priority to another). // // It's guaranteed that after this function returns: // - If some child is READY, it is childInUse, and all lower priorities are @@ -53,10 +54,13 @@ var ( // set parent ClientConn to TransientFailure // - Otherwise, Scan all children from p0, and check balancer stats: // - For any of the following cases: -// - If balancer is not started (not built), this is either a new child -// with high priority, or a new builder for an existing child. -// - If balancer is READY -// - If this is the lowest priority +// - If balancer is not started (not built), this is either a new child with +// high priority, or a new builder for an existing child. +// - If balancer is Connecting and has non-nil initTimer (meaning it +// transitioned from Ready or Idle to connecting, not from TF, so we +// should give it init-time to connect). +// - If balancer is READY +// - If this is the lowest priority // - do the following: // - if this is not the old childInUse, override picker so old picker is no // longer used. @@ -69,9 +73,6 @@ func (b *priorityBalancer) syncPriority() { if len(b.priorities) == 0 { b.childInUse = "" b.priorityInUse = 0 - // Stop the init timer. This can happen if the only priority is removed - // shortly after it's added. - b.stopPriorityInitTimer() b.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), @@ -89,6 +90,7 @@ func (b *priorityBalancer) syncPriority() { if !child.started || child.state.ConnectivityState == connectivity.Ready || child.state.ConnectivityState == connectivity.Idle || + (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { if b.childInUse != "" && b.childInUse != child.name { // childInUse was set and is different from this child, will @@ -123,8 +125,7 @@ func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { // - stop all child with lower priorities // - if childInUse is not this child // - set childInUse to this child -// - stops init timer -// - if this child is not started, start it, and start a init timer +// - if this child is not started, start it // // Note that it does NOT send the current child state (picker) to the parent // ClientConn. The caller needs to send it if necessary. @@ -156,33 +157,8 @@ func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { b.childInUse = child.name b.priorityInUse = priority - // Init timer is always for childInUse. Since we are switching to a - // different child, we will stop the init timer no matter what. If this - // child is not started, we will start the init timer later. - b.stopPriorityInitTimer() - if !child.started { child.start() - // Need this local variable to capture timerW in the AfterFunc closure - // to check the stopped boolean. - timerW := &timerWrapper{} - b.priorityInitTimer = timerW - timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { - b.mu.Lock() - defer b.mu.Unlock() - if timerW.stopped { - return - } - b.priorityInitTimer = nil - // Switch to the next priority if there's any. - if pNext := priority + 1; pNext < len(b.priorities) { - nameNext := b.priorities[pNext] - if childNext, ok := b.children[nameNext]; ok { - b.switchToChild(childNext, pNext) - childNext.sendUpdate() - } - } - }) } } @@ -222,141 +198,57 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S b.logger.Warningf("priority: child balancer not found for child %v, priority %v", childName, priority) return } - oldState := child.state.ConnectivityState + oldChildState := child.state child.state = s + // We start/stop the init timer of this child based on the new connectivity + // state. syncPriority() later will need the init timer (to check if it's + // nil or not) to decide which child to switch to. switch s.ConnectivityState { case connectivity.Ready, connectivity.Idle: - // Note that idle is also handled as if it's Ready. It will close the - // lower priorities (which will be kept in a cache, not deleted), and - // new picks will use the Idle picker. - b.handlePriorityWithNewStateReady(child, priority) + child.reportedTF = false + child.stopInitTimer() case connectivity.TransientFailure: - b.handlePriorityWithNewStateTransientFailure(child, priority) + child.reportedTF = true + child.stopInitTimer() case connectivity.Connecting: - b.handlePriorityWithNewStateConnecting(child, priority, oldState) + if !child.reportedTF { + child.startInitTimer() + } default: // New state is Shutdown, should never happen. Don't forward. } -} - -// handlePriorityWithNewStateReady handles state Ready from a higher or equal -// priority. -// -// An update with state Ready: -// - If it's from higher priority: -// - Switch to this priority -// - Forward the update -// - If it's from priorityInUse: -// - Forward only -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateReady(child *childBalancer, priority int) { - // If one priority higher or equal to priorityInUse goes Ready, stop the - // init timer. If update is from higher than priorityInUse, priorityInUse - // will be closed, and the init timer will become useless. - b.stopPriorityInitTimer() - - // priorityInUse is lower than this priority, switch to this. - if b.priorityInUse > priority { - b.logger.Infof("Switching priority from %v to %v, because latter became Ready", b.priorityInUse, priority) - b.switchToChild(child, priority) - } - // Forward the update since it's READY. - b.cc.UpdateState(child.state) -} - -// handlePriorityWithNewStateTransientFailure handles state TransientFailure -// from a higher or equal priority. -// -// An update with state TransientFailure: -// - If it's from a higher priority: -// - Do not forward, and do nothing -// - If it's from priorityInUse: -// - If there's no lower: -// - Forward and do nothing else -// - If there's a lower priority: -// - Switch to the lower -// - Forward the lower child's state -// - Do NOT forward this update -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateTransientFailure(child *childBalancer, priority int) { - // priorityInUse is lower than this priority, do nothing. - if b.priorityInUse > priority { - return - } - // priorityInUse sends a failure. Stop its init timer. - b.stopPriorityInitTimer() - priorityNext := priority + 1 - if priorityNext >= len(b.priorities) { - // Forward this update. - b.cc.UpdateState(child.state) - return - } - b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) - nameNext := b.priorities[priorityNext] - childNext := b.children[nameNext] - b.switchToChild(childNext, priorityNext) - b.cc.UpdateState(childNext.state) - childNext.sendUpdate() -} -// handlePriorityWithNewStateConnecting handles state Connecting from a higher -// than or equal priority. -// -// An update with state Connecting: -// - If it's from a higher priority -// - Do nothing -// - If it's from priorityInUse, the behavior depends on previous state. -// -// When new state is Connecting, the behavior depends on previous state. If the -// previous state was Ready, this is a transition out from Ready to Connecting. -// Assuming there are multiple backends in the same priority, this mean we are -// in a bad situation and we should failover to the next priority (Side note: -// the current connectivity state aggregating algorithm (e.g. round-robin) is -// not handling this right, because if many backends all go from Ready to -// Connecting, the overall situation is more like TransientFailure, not -// Connecting). -// -// If the previous state was Idle, we don't do anything special with failure, -// and simply forward the update. The init timer should be in process, will -// handle failover if it timeouts. If the previous state was TransientFailure, -// we do not forward, because the lower priority is in use. -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateConnecting(child *childBalancer, priority int, oldState connectivity.State) { - // priorityInUse is lower than this priority, do nothing. - if b.priorityInUse > priority { - return - } - - switch oldState { - case connectivity.Ready: - // Handling transition from Ready to Connecting, is same as handling - // TransientFailure. There's no need to stop the init timer, because it - // should have been stopped when state turned Ready. - priorityNext := priority + 1 - if priorityNext >= len(b.priorities) { - // Forward this update. - b.cc.UpdateState(child.state) + oldPriorityInUse := b.priorityInUse + child.parent.syncPriority() + // If child is switched by syncPriority(), it also sends the update from the + // new child to overwrite the old picker used by the parent. + // + // But no update is sent if the child is not switches. That means if this + // update is from childInUse, and this child is still childInUse after + // syncing, the update being handled here is not sent to the parent. In that + // case, we need to do an explicit check here to forward the update. + if b.priorityInUse == oldPriorityInUse && b.priorityInUse == priority { + // Special handling for Connecting. If child was not switched, and this + // is a Connecting->Connecting transition, do not send the redundant + // update, since all Connecting pickers are the same (they tell the RPCs + // to repick). + // + // This can happen because the initial state of a child (before any + // update is received) is Connecting. When the child is started, it's + // picker is sent to the parent by syncPriority (to overwrite the old + // picker if there's any). When it reports Connecting after being + // started, it will send a Connecting update (handled here), causing a + // Connecting->Connecting transition. + if oldChildState.ConnectivityState == connectivity.Connecting && s.ConnectivityState == connectivity.Connecting { return } - b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) - nameNext := b.priorities[priorityNext] - childNext := b.children[nameNext] - b.switchToChild(childNext, priorityNext) - b.cc.UpdateState(childNext.state) - childNext.sendUpdate() - case connectivity.Idle: + // Only forward this update if sync() didn't switch child, and this + // child is in use. + // + // sync() forwards the update if the child was switched, so there's no + // need to forward again. b.cc.UpdateState(child.state) - default: - // Old state is Connecting, TransientFailure or Shutdown. Don't forward. } + } diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index e8963898727c..1683cafc5379 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -414,9 +414,8 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { t.Fatalf("want %v, got %v", want, err) } - // Turn 0 to Connecting, will start and use 1. Because 0 changing from Ready - // to Connecting is a failure. - pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // Turn 0 to TransientFailure, will start and use 1. + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. @@ -741,6 +740,9 @@ func (s) TestPriority_InitTimeout(t *testing.T) { } sc1 := <-cc.NewSubConnCh + // After the init timer of p0, when switching to p1, a connecting picker + // will be sent to the parent. Clear it here. + <-cc.NewPickerCh pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -859,6 +861,7 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { // Don't send any update to p0, so to not override the old state of p0. // Later, connect to p1 and then remove p1. This will fallback to p0, and // will send p0's old picker if they are not correctly removed. + <-cc.NewPickerCh // Clear the picker from old p0. // p1 will be used after priority init timeout. addrs11 := <-cc.NewSubConnAddrsCh @@ -1851,6 +1854,22 @@ func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { t.Fatalf("pick returned %v, %v, want _, %v", pr, err, errsTestInitIdle[0]) } + // The ClientConn state update triggers a priority switch, from p0 -> p0 + // (since p0 is still in use). Along with this the update, p0 also gets a + // ClientConn state update, with the addresses, which didn't change in this + // test (this update to the child is necessary in case the addresses are + // different). + // + // The test child policy, initIdleBalancer, blindly calls NewSubConn with + // all the addresses it receives, so this will trigger a NewSubConn with the + // old p0 addresses. (Note that in a real balancer, like roundrobin, no new + // SubConn will be created because the addresses didn't change). + // + // Clear those from the channel so the rest of the test can get the expected + // behavior. + <-cc.NewSubConnAddrsCh + <-cc.NewSubConnCh + // Turn p0 down, to start p1. pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs From d9b952b1706072a434320115194b98687bfeaffe Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 16 May 2022 14:15:22 -0700 Subject: [PATCH 500/998] xds/resolver: use correct resource name in log message (#5357) --- xds/internal/resolver/watch_service.go | 4 +++ xds/internal/resolver/xds_resolver.go | 36 ++++++++++++-------------- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 3db9be1cac07..000927c541f9 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -58,6 +58,10 @@ type ldsConfig struct { // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. +// +// TODO(easwars): Make this function a method on the xdsResolver type. +// Currently, there is a single call site for this function, and all arguments +// passed to it are fields of the xdsResolver type. func watchService(c xdsclient.XDSClient, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { w := &serviceUpdateWatcher{ logger: logger, diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index c4b147d21efb..8a613c4c44f6 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -63,9 +63,8 @@ type xdsResolverBuilder struct { // // The xds bootstrap process is performed (and a new xds client is built) every // time an xds resolver is built. -func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { +func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { r := &xdsResolver{ - target: t, cc: cc, closed: grpcsync.NewEvent(), updateCh: make(chan suWithError, 1), @@ -77,7 +76,7 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op } }() r.logger = prefixLogger(r) - r.logger.Infof("Creating resolver for target: %+v", t) + r.logger.Infof("Creating resolver for target: %+v", target) newXDSClient := newXDSClient if b.newXDSClient != nil { @@ -115,7 +114,7 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op // - If authority is not set in the target, use the top level template // - If authority is set, use the template from the authority map. template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate - if authority := r.target.URL.Host; authority != "" { + if authority := target.URL.Host; authority != "" { a := bootstrapConfig.Authorities[authority] if a == nil { return nil, fmt.Errorf("xds: authority %q is not found in the bootstrap file", authority) @@ -127,19 +126,19 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op template = a.ClientListenerResourceNameTemplate } } - endpoint := r.target.URL.Path + endpoint := target.URL.Path if endpoint == "" { - endpoint = r.target.URL.Opaque + endpoint = target.URL.Opaque } endpoint = strings.TrimPrefix(endpoint, "/") - resourceName := bootstrap.PopulateResourceTemplate(template, endpoint) + r.ldsResourceName = bootstrap.PopulateResourceTemplate(template, endpoint) - // Register a watch on the xdsClient for the user's dial target. - cancelWatch := watchService(r.client, resourceName, r.handleServiceUpdate, r.logger) - r.logger.Infof("Watch started on resource name %v with xds-client %p", r.target.Endpoint, r.client) + // Register a watch on the xdsClient for the resource name determined above. + cancelWatch := watchService(r.client, r.ldsResourceName, r.handleServiceUpdate, r.logger) + r.logger.Infof("Watch started on resource name %v with xds-client %p", r.ldsResourceName, r.client) r.cancelWatch = func() { cancelWatch() - r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.target.Endpoint, r.client) + r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.ldsResourceName, r.client) } go r.run() @@ -165,11 +164,10 @@ type suWithError struct { // (which performs LDS/RDS queries for the same), and passes the received // updates to the ClientConn. type xdsResolver struct { - target resolver.Target - cc resolver.ClientConn - closed *grpcsync.Event - - logger *grpclog.PrefixLogger + cc resolver.ClientConn + closed *grpcsync.Event + logger *grpclog.PrefixLogger + ldsResourceName string // The underlying xdsClient which performs all xDS requests and responses. client xdsclient.XDSClient @@ -212,7 +210,7 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { r.cc.ReportError(err) return false } - r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, pretty.FormatJSON(sc)) + r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.ldsResourceName, r.client, pretty.FormatJSON(sc)) // Send the update to the ClientConn. state := iresolver.SetConfigSelector(resolver.State{ @@ -231,7 +229,7 @@ func (r *xdsResolver) run() { return case update := <-r.updateCh: if update.err != nil { - r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.target.Endpoint, r.client, update.err) + r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.ldsResourceName, r.client, update.err) if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { // If error is resource-not-found, it means the LDS // resource was removed. Ultimately send an empty service @@ -259,7 +257,7 @@ func (r *xdsResolver) run() { // Create the config selector for this update. cs, err := r.newConfigSelector(update.su) if err != nil { - r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.target.Endpoint, r.client, err) + r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.ldsResourceName, r.client, err) r.cc.ReportError(err) continue } From e23132c6575e004c7ae416186d97d40057f0b928 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 17 May 2022 17:03:18 -0400 Subject: [PATCH 501/998] Added support for metadata matcher invert (#5345) Added support for metadata matcher invert --- internal/xds/rbac/matchers.go | 7 ++- internal/xds/rbac/rbac_engine_test.go | 72 ++++++++++++++++++++++++++- 2 files changed, 76 insertions(+), 3 deletions(-) diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go index 6f30c8016e2b..9873da268db6 100644 --- a/internal/xds/rbac/matchers.go +++ b/internal/xds/rbac/matchers.go @@ -122,8 +122,11 @@ func matchersFromPermissions(permissions []*v3rbacpb.Permission) ([]matcher, err } matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) case *v3rbacpb.Permission_Metadata: - // Not supported in gRPC RBAC currently - a permission typed as - // Metadata in the initial config will be a no-op. + // Never matches - so no-op if not inverted, always match if + // inverted. + if permission.GetMetadata().GetInvert() { // Test metadata being no-op and also metadata with invert always matching + matchers = append(matchers, &alwaysMatcher{}) + } case *v3rbacpb.Permission_RequestedServerName: // Not supported in gRPC RBAC currently - a permission typed as // requested server name in the initial config will be a no-op. diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go index e2e5d98c2a88..19bc4e8ca891 100644 --- a/internal/xds/rbac/rbac_engine_test.go +++ b/internal/xds/rbac/rbac_engine_test.go @@ -936,8 +936,78 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, + // This test tests that an RBAC policy configured with a metadata + // matcher as a permission doesn't match with any incoming RPC. + { + name: "metadata-never-matches", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "metadata-never-matches": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Metadata{ + Metadata: &v3matcherpb.MetadataMatcher{}, + }}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + rbacQueries: []struct { + rpcData *rpcData + wantStatusCode codes.Code + }{ + { + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + // This test tests that an RBAC policy configured with a metadata + // matcher with invert set to true as a permission always matches with + // any incoming RPC. + { + name: "metadata-invert-always-matches", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "metadata-invert-always-matches": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Metadata{ + Metadata: &v3matcherpb.MetadataMatcher{Invert: true}, + }}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + rbacQueries: []struct { + rpcData *rpcData + wantStatusCode codes.Code + }{ + { + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + }, + }, } - for _, test := range tests { t.Run(test.name, func(t *testing.T) { // Instantiate the chainedRBACEngine with different configurations that are From 333a441e27395b34f666e7d8f0ba871fae2ed53b Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 18 May 2022 10:58:39 -0700 Subject: [PATCH 502/998] xds/ringhash: update connectivity state aggregation, and make sure at least one SubConn is connecting in TF (#5338) --- xds/internal/balancer/ringhash/picker.go | 27 ++++++++ xds/internal/balancer/ringhash/ringhash.go | 67 +++++++++++++++++-- .../balancer/ringhash/ringhash_test.go | 60 ++++++++++++----- 3 files changed, 130 insertions(+), 24 deletions(-) diff --git a/xds/internal/balancer/ringhash/picker.go b/xds/internal/balancer/ringhash/picker.go index dcea6d46e517..ec3b5605690d 100644 --- a/xds/internal/balancer/ringhash/picker.go +++ b/xds/internal/balancer/ringhash/picker.go @@ -143,6 +143,8 @@ func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, erro return balancer.PickResult{}, fmt.Errorf("no connection is Ready") } +// nextSkippingDuplicates finds the next entry in the ring, with a different +// subconn from the given entry. func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { for next := ring.next(entry); next != entry; next = ring.next(next) { if next.sc != entry.sc { @@ -152,3 +154,28 @@ func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { // There's no qualifying next entry. return nil } + +// nextSkippingDuplicatesSubConn finds the next subconn in the ring, that's +// different from the given subconn. +func nextSkippingDuplicatesSubConn(ring *ring, sc *subConn) *subConn { + var entry *ringEntry + for _, it := range ring.items { + if it.sc == sc { + entry = it + break + } + } + if entry == nil { + // If the given subconn is not in the ring (e.g. it was deleted), return + // the first one. + if len(ring.items) > 0 { + return ring.items[0].sc + } + return nil + } + ee := nextSkippingDuplicates(ring, entry) + if ee == nil { + return nil + } + return ee.sc +} diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index f8a47f165bdf..4e9c1772b166 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -98,6 +98,10 @@ type subConn struct { // When connectivity state is updated to Idle for this SubConn, if // connectQueued is true, Connect() will be called on the SubConn. connectQueued bool + // attemptingToConnect indicates if this subconn is attempting to connect. + // It's set when queueConnect is called. It's unset when the state is + // changed to Ready/Shutdown, or Idle (and if connectQueued is false). + attemptingToConnect bool } // setState updates the state of this SubConn. @@ -113,6 +117,8 @@ func (sc *subConn) setState(s connectivity.State) { if sc.connectQueued { sc.connectQueued = false sc.sc.Connect() + } else { + sc.attemptingToConnect = false } case connectivity.Connecting: // Clear connectQueued if the SubConn isn't failing. This state @@ -122,11 +128,14 @@ func (sc *subConn) setState(s connectivity.State) { // Clear connectQueued if the SubConn isn't failing. This state // transition is unlikely to happen, but handle this just in case. sc.connectQueued = false + sc.attemptingToConnect = false // Set to a non-failing state. sc.failing = false case connectivity.TransientFailure: // Set to a failing state. sc.failing = true + case connectivity.Shutdown: + sc.attemptingToConnect = false } sc.state = s } @@ -149,6 +158,7 @@ func (sc *subConn) effectiveState() connectivity.State { func (sc *subConn) queueConnect() { sc.mu.Lock() defer sc.mu.Unlock() + sc.attemptingToConnect = true if sc.state == connectivity.Idle { sc.sc.Connect() return @@ -158,6 +168,12 @@ func (sc *subConn) queueConnect() { sc.connectQueued = true } +func (sc *subConn) isAttemptingToConnect() bool { + sc.mu.Lock() + defer sc.mu.Unlock() + return sc.attemptingToConnect +} + type ringhashBalancer struct { cc balancer.ClientConn logger *grpclog.PrefixLogger @@ -268,7 +284,8 @@ func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) err var err error b.ring, err = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize) if err != nil { - panic(err) + b.ResolverError(fmt.Errorf("ringhash failed to make a new ring: %v", err)) + return balancer.ErrBadResolverState } b.regeneratePicker() b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) @@ -334,12 +351,6 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance switch s { case connectivity.Idle: - // When the overall state is TransientFailure, this will never get picks - // if there's a lower priority. Need to keep the SubConns connecting so - // there's a chance it will recover. - if b.state == connectivity.TransientFailure { - scs.queueConnect() - } // No need to send an update. No queued RPC can be unblocked. If the // overall state changed because of this, sendUpdate is already true. case connectivity.Connecting: @@ -364,6 +375,35 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance if sendUpdate { b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } + + switch b.state { + case connectivity.Connecting, connectivity.TransientFailure: + // When overall state is TransientFailure, we need to make sure at least + // one SubConn is attempting to connect, otherwise this balancer may + // never get picks if the parent is priority. + // + // Because we report Connecting as the overall state when only one + // SubConn is in TransientFailure, we do the same check for Connecting + // here. + // + // Note that this check also covers deleting SubConns due to address + // change. E.g. if the SubConn attempting to connect is deleted, and the + // overall state is TF. Since there must be at least one SubConn + // attempting to connect, we need to trigger one. But since the deleted + // SubConn will eventually send a shutdown update, this code will run + // and trigger the next SubConn to connect. + for _, sc := range b.subConns { + if sc.isAttemptingToConnect() { + return + } + } + // Trigger a SubConn (this updated SubConn's next SubConn in the ring) + // to connect if nobody is attempting to connect. + sc := nextSkippingDuplicatesSubConn(b.ring, scs) + if sc != nil { + sc.queueConnect() + } + } } // mergeErrors builds an error from the last connection error and the last @@ -395,6 +435,7 @@ func (b *ringhashBalancer) Close() {} // // It's not thread safe. type connectivityStateEvaluator struct { + sum uint64 nums [5]uint64 } @@ -404,6 +445,7 @@ type connectivityStateEvaluator struct { // - If there is at least one subchannel in READY state, report READY. // - If there are 2 or more subchannels in TRANSIENT_FAILURE state, report TRANSIENT_FAILURE. // - If there is at least one subchannel in CONNECTING state, report CONNECTING. +// - If there is one subchannel in TRANSIENT_FAILURE and there is more than one subchannel, report state CONNECTING. // - If there is at least one subchannel in Idle state, report Idle. // - Otherwise, report TRANSIENT_FAILURE. // @@ -417,6 +459,14 @@ func (cse *connectivityStateEvaluator) recordTransition(oldState, newState conne updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. cse.nums[state] += updateVal } + if oldState == connectivity.Shutdown { + // There's technically no transition from Shutdown. But we record a + // Shutdown->Idle transition when a new SubConn is created. + cse.sum++ + } + if newState == connectivity.Shutdown { + cse.sum-- + } if cse.nums[connectivity.Ready] > 0 { return connectivity.Ready @@ -427,6 +477,9 @@ func (cse *connectivityStateEvaluator) recordTransition(oldState, newState conne if cse.nums[connectivity.Connecting] > 0 { return connectivity.Connecting } + if cse.nums[connectivity.TransientFailure] > 0 && cse.sum > 1 { + return connectivity.Connecting + } if cse.nums[connectivity.Idle] > 0 { return connectivity.Idle } diff --git a/xds/internal/balancer/ringhash/ringhash_test.go b/xds/internal/balancer/ringhash/ringhash_test.go index 015424cdafed..22586c60b154 100644 --- a/xds/internal/balancer/ringhash/ringhash_test.go +++ b/xds/internal/balancer/ringhash/ringhash_test.go @@ -365,8 +365,8 @@ func TestAddrWeightChange(t *testing.T) { } // TestSubConnToConnectWhenOverallTransientFailure covers the situation when the -// overall state is TransientFailure, the SubConns turning Idle will be -// triggered to Connect(). But not when the overall state is not +// overall state is TransientFailure, the SubConns turning Idle will trigger the +// next SubConn in the ring to Connect(). But not when the overall state is not // TransientFailure. func TestSubConnToConnectWhenOverallTransientFailure(t *testing.T) { wantAddrs := []resolver.Address{ @@ -377,30 +377,56 @@ func TestSubConnToConnectWhenOverallTransientFailure(t *testing.T) { _, b, p0 := setupTest(t, wantAddrs) ring0 := p0.(*picker).ring - // Turn all SubConns to TransientFailure. - for _, it := range ring0.items { - b.UpdateSubConnState(it.sc.sc, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - } - - // The next one turning Idle should Connect(). + // Turn the first subconn to transient failure. sc0 := ring0.items[0].sc.sc + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + + // It will trigger the second subconn to connect (because overall state is + // Connect (when one subconn is TF)). + sc1 := ring0.items[1].sc.sc + select { + case <-sc1.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestShortTimeout): + t.Fatalf("timeout waiting for Connect() from SubConn %v", sc1) + } + + // Turn the second subconn to TF. This will set the overall state to TF. + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + + // It will trigger the third subconn to connect. + sc2 := ring0.items[2].sc.sc + select { + case <-sc2.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestShortTimeout): + t.Fatalf("timeout waiting for Connect() from SubConn %v", sc2) + } + + // Turn the third subconn to TF. This will set the overall state to TF. + b.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + b.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + + // It will trigger the first subconn to connect. select { case <-sc0.(*testutils.TestSubConn).ConnectCh: - case <-time.After(defaultTestTimeout): - t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + case <-time.After(defaultTestShortTimeout): + t.Fatalf("timeout waiting for Connect() from SubConn %v", sc0) } - // If this SubConn is ready. Other SubConns turning Idle will not Connect(). - b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Turn the third subconn to TF again. + b.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + b.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Idle}) - // The third SubConn in the ring should connect. - sc1 := ring0.items[1].sc.sc - b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + // This will not trigger any new Connect() on the SubConns, because sc0 is + // still attempting to connect, and we only need one SubConn to connect. select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + t.Fatalf("unexpected Connect() from SubConn %v", sc0) case <-sc1.(*testutils.TestSubConn).ConnectCh: - t.Errorf("unexpected Connect() from SubConn %v", sc1) + t.Fatalf("unexpected Connect() from SubConn %v", sc1) + case <-sc2.(*testutils.TestSubConn).ConnectCh: + t.Fatalf("unexpected Connect() from SubConn %v", sc2) case <-time.After(defaultTestShortTimeout): } } From 9f4b31a11cc4deba7f5c542399d5ec71fab3a053 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 19 May 2022 14:48:44 -0400 Subject: [PATCH 503/998] Added HTTP status and grpc status to POST check (#5364) * Added HTTP status and grpc status to POST check --- internal/transport/controlbuf.go | 6 +++ internal/transport/http2_server.go | 16 ++++--- internal/transport/transport_test.go | 70 +++++++++++++++++----------- 3 files changed, 58 insertions(+), 34 deletions(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 8394d252df03..244f4b081d52 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -137,6 +137,7 @@ type earlyAbortStream struct { streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 4969102f4af9..45d7bd145e3e 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -448,6 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) return false } @@ -521,14 +522,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() return false @@ -549,6 +552,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) return false } diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index d129dbf0a3bd..c1f9664ada67 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -1696,21 +1696,6 @@ func (s) TestHeadersCausingStreamError(t *testing.T) { values []string } }{ - // If the client sends an HTTP/2 request with a :method header with a - // value other than POST, as specified in the gRPC over HTTP/2 - // specification, the server should close the stream. - { - name: "Client Sending Wrong Method", - headers: []struct { - name string - values []string - }{ - {name: ":method", values: []string{"PUT"}}, - {name: ":path", values: []string{"foo"}}, - {name: ":authority", values: []string{"localhost"}}, - {name: "content-type", values: []string{"application/grpc"}}, - }, - }, // "Transports must consider requests containing the Connection header // as malformed" - A41 Malformed requests map to a stream error of type // PROTOCOL_ERROR. @@ -1827,25 +1812,30 @@ func (s) TestHeadersCausingStreamError(t *testing.T) { } } -// TestHeadersMultipleHosts tests that a request with multiple hosts gets -// rejected with HTTP Status 400 and gRPC status Internal, regardless of whether -// the client is speaking gRPC or not. -func (s) TestHeadersMultipleHosts(t *testing.T) { +// TestHeadersHTTPStatusGRPCStatus tests requests with certain headers get a +// certain HTTP and gRPC status back. +func (s) TestHeadersHTTPStatusGRPCStatus(t *testing.T) { tests := []struct { name string headers []struct { name string values []string } + httpStatusWant string + grpcStatusWant string + grpcMessageWant string }{ // Note: multiple authority headers are handled by the framer itself, // which will cause a stream error. Thus, it will never get to - // operateHeaders with the check in operateHeaders for possible grpc-status sent back. + // operateHeaders with the check in operateHeaders for possible + // grpc-status sent back. // multiple :authority or multiple Host headers would make the eventual // :authority ambiguous as per A41. This takes precedence even over the // fact a request is non grpc. All of these requests should be rejected - // with grpc-status Internal. + // with grpc-status Internal. Thus, requests with multiple hosts should + // get rejected with HTTP Status 400 and gRPC status Internal, + // regardless of whether the client is speaking gRPC or not. { name: "Multiple host headers non grpc", headers: []struct { @@ -1857,6 +1847,9 @@ func (s) TestHeadersMultipleHosts(t *testing.T) { {name: ":authority", values: []string{"localhost"}}, {name: "host", values: []string{"localhost", "localhost2"}}, }, + httpStatusWant: "400", + grpcStatusWant: "13", + grpcMessageWant: "both must only have 1 value as per HTTP/2 spec", }, { name: "Multiple host headers grpc", @@ -1870,6 +1863,27 @@ func (s) TestHeadersMultipleHosts(t *testing.T) { {name: "content-type", values: []string{"application/grpc"}}, {name: "host", values: []string{"localhost", "localhost2"}}, }, + httpStatusWant: "400", + grpcStatusWant: "13", + grpcMessageWant: "both must only have 1 value as per HTTP/2 spec", + }, + // If the client sends an HTTP/2 request with a :method header with a + // value other than POST, as specified in the gRPC over HTTP/2 + // specification, the server should fail the RPC. + { + name: "Client Sending Wrong Method", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"PUT"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + }, + httpStatusWant: "405", + grpcStatusWant: "13", + grpcMessageWant: "which should be POST", }, } for _, test := range tests { @@ -1909,10 +1923,10 @@ func (s) TestHeadersMultipleHosts(t *testing.T) { case *http2.SettingsFrame: // Do nothing. A settings frame is expected from server preface. case *http2.MetaHeadersFrame: - var status, grpcStatus, grpcMessage string + var httpStatus, grpcStatus, grpcMessage string for _, header := range frame.Fields { if header.Name == ":status" { - status = header.Value + httpStatus = header.Value } if header.Name == "grpc-status" { grpcStatus = header.Value @@ -1921,15 +1935,15 @@ func (s) TestHeadersMultipleHosts(t *testing.T) { grpcMessage = header.Value } } - if status != "400" { - result.Send(fmt.Errorf("incorrect HTTP Status got %v, want 200", status)) + if httpStatus != test.httpStatusWant { + result.Send(fmt.Errorf("incorrect HTTP Status got %v, want %v", httpStatus, test.httpStatusWant)) return } - if grpcStatus != "13" { // grpc status code internal - result.Send(fmt.Errorf("incorrect gRPC Status got %v, want 13", grpcStatus)) + if grpcStatus != test.grpcStatusWant { // grpc status code internal + result.Send(fmt.Errorf("incorrect gRPC Status got %v, want %v", grpcStatus, test.grpcStatusWant)) return } - if !strings.Contains(grpcMessage, "both must only have 1 value as per HTTP/2 spec") { + if !strings.Contains(grpcMessage, test.grpcMessageWant) { result.Send(fmt.Errorf("incorrect gRPC message")) return } From 459729d6672eb555ada1060ceddf470ff8b1ec82 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 20 May 2022 15:17:13 -0700 Subject: [PATCH 504/998] xds/priority: avoid sending duplicate updates to children (#5374) --- xds/internal/balancer/priority/balancer.go | 2 +- xds/internal/balancer/priority/balancer_child.go | 2 +- .../balancer/priority/balancer_priority.go | 14 +++++++++++--- xds/internal/balancer/priority/balancer_test.go | 16 ---------------- 4 files changed, 13 insertions(+), 21 deletions(-) diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index d82bce761751..672f10122ffe 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -170,7 +170,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err } // Sync the states of all children to the new updated priorities. This // include starting/stopping child balancers when necessary. - b.syncPriority() + b.syncPriority(true) return nil } diff --git a/xds/internal/balancer/priority/balancer_child.go b/xds/internal/balancer/priority/balancer_child.go index 95bb34f26252..c00a56b8f9ee 100644 --- a/xds/internal/balancer/priority/balancer_child.go +++ b/xds/internal/balancer/priority/balancer_child.go @@ -145,7 +145,7 @@ func (cb *childBalancer) startInitTimer() { // Re-sync the priority. This will switch to the next priority if // there's any. Note that it's important sync() is called after setting // initTimer to nil. - cb.parent.syncPriority() + cb.parent.syncPriority(false) }) } diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index 829e51f1141c..2487c2626041 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -68,7 +68,7 @@ var ( // - forward the new addresses and config // // Caller must hold b.mu. -func (b *priorityBalancer) syncPriority() { +func (b *priorityBalancer) syncPriority(forceUpdate bool) { // Everything was removed by the update. if len(b.priorities) == 0 { b.childInUse = "" @@ -99,8 +99,16 @@ func (b *priorityBalancer) syncPriority() { b.cc.UpdateState(child.state) } b.logger.Infof("switching to (%q, %v) in syncPriority", child.name, p) + oldChildInUse := b.childInUse b.switchToChild(child, p) - child.sendUpdate() + if b.childInUse != oldChildInUse || forceUpdate { + // If child is switched, send the update to the new child. + // + // Or if forceUpdate is true (when this is triggered by a + // ClientConn update), because the ClientConn update might + // contain changes for this child. + child.sendUpdate() + } break } } @@ -220,7 +228,7 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S } oldPriorityInUse := b.priorityInUse - child.parent.syncPriority() + child.parent.syncPriority(false) // If child is switched by syncPriority(), it also sends the update from the // new child to overwrite the old picker used by the parent. // diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 1683cafc5379..98bbb5b8f525 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -1854,22 +1854,6 @@ func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { t.Fatalf("pick returned %v, %v, want _, %v", pr, err, errsTestInitIdle[0]) } - // The ClientConn state update triggers a priority switch, from p0 -> p0 - // (since p0 is still in use). Along with this the update, p0 also gets a - // ClientConn state update, with the addresses, which didn't change in this - // test (this update to the child is necessary in case the addresses are - // different). - // - // The test child policy, initIdleBalancer, blindly calls NewSubConn with - // all the addresses it receives, so this will trigger a NewSubConn with the - // old p0 addresses. (Note that in a real balancer, like roundrobin, no new - // SubConn will be created because the addresses didn't change). - // - // Clear those from the channel so the rest of the test can get the expected - // behavior. - <-cc.NewSubConnAddrsCh - <-cc.NewSubConnCh - // Turn p0 down, to start p1. pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs From 30b9d59a766858a4b51148e47edb3af2766ab617 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 20 May 2022 15:17:29 -0700 Subject: [PATCH 505/998] client/SubConn: do not recreate addrConn if UpdateAddresses is called with the same addresses (#5373) --- clientconn.go | 25 ++++++++++++++++++++++--- clientconn_test.go | 16 +++++++++------- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/clientconn.go b/clientconn.go index ea9836d28b3c..3fe28732b809 100644 --- a/clientconn.go +++ b/clientconn.go @@ -801,16 +801,31 @@ func (ac *addrConn) connect() error { return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using @@ -827,6 +842,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } diff --git a/clientconn_test.go b/clientconn_test.go index 21f70c8f2514..9f32999709f4 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -854,9 +854,10 @@ func (s) TestBackoffCancel(t *testing.T) { } } -// UpdateAddresses should cause the next reconnect to begin from the top of the -// list if the connection is not READY. -func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { +// TestUpdateAddresses_NoopIfCalledWithSameAddresses tests that UpdateAddresses +// should be noop if UpdateAddresses is called with the same list of addresses, +// even when the SubConn is in Connecting and doesn't have a current address. +func (s) TestUpdateAddresses_NoopIfCalledWithSameAddresses(t *testing.T) { lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) @@ -1008,19 +1009,20 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { } client.mu.Unlock() + // Call UpdateAddresses with the same list of addresses, it should be a noop + // (even when the SubConn is Connecting, and doesn't have a curAddr). ac.acbw.UpdateAddresses(addrsList) // We've called tryUpdateAddrs - now let's make server2 close the - // connection and check that it goes back to server1 instead of continuing - // to server3 or trying server2 again. + // connection and check that it continues to server3. close(closeServer2) select { case <-server1ContactedSecondTime: + t.Fatal("server1 was contacted a second time, but it should have continued to server 3") case <-server2ContactedSecondTime: - t.Fatal("server2 was contacted a second time, but it after tryUpdateAddrs it should have re-started the list and tried server1") + t.Fatal("server2 was contacted a second time, but it should have continued to server 3") case <-server3Contacted: - t.Fatal("server3 was contacted, but after tryUpdateAddrs it should have re-started the list and tried server1") case <-timeout: t.Fatal("timed out waiting for any server to be contacted after tryUpdateAddrs") } From 081c688437c76dc60b2f6aec68c1804de5f32945 Mon Sep 17 00:00:00 2001 From: Alexander Andreev Date: Mon, 23 May 2022 20:23:54 +0400 Subject: [PATCH 506/998] client: fix hctx leakage in addrConn.createTransport (#5337) --- clientconn.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clientconn.go b/clientconn.go index 3fe28732b809..de6d41c23841 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1238,6 +1238,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() defer connClosed.Fire() + defer hcancel() if !hcStarted || hctx.Err() != nil { // We didn't start the health check or set the state to READY, so // no need to do anything else here. @@ -1248,7 +1249,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state, since there may be a new transport in this addrConn. return } - hcancel() ac.transport = nil // Refresh the name resolver ac.cc.resolveNow(resolver.ResolveNowOptions{}) @@ -1271,6 +1271,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. + hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } From ed7522591b3485c919a04205094a07c2d96456ce Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 23 May 2022 15:14:50 -0400 Subject: [PATCH 507/998] Don't call cmp in non testing file (#5370) * Don't call cmp in non testing file --- .../balancer/clusterresolver/config.go | 2 +- .../balancer/outlierdetection/config.go | 12 ++-- .../balancer/outlierdetection/config_test.go | 72 +++++++++++++++++++ 3 files changed, 78 insertions(+), 8 deletions(-) create mode 100644 xds/internal/balancer/outlierdetection/config_test.go diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index 1cbffdfa52fd..26e2812d2f62 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -121,7 +121,7 @@ func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { return false case dm.DNSHostname != b.DNSHostname: return false - case !dm.OutlierDetection.Equal(b.OutlierDetection): + case !dm.OutlierDetection.EqualIgnoringChildPolicy(b.OutlierDetection): return false } diff --git a/xds/internal/balancer/outlierdetection/config.go b/xds/internal/balancer/outlierdetection/config.go index 8b0cdcab4065..da8311263150 100644 --- a/xds/internal/balancer/outlierdetection/config.go +++ b/xds/internal/balancer/outlierdetection/config.go @@ -22,7 +22,6 @@ package outlierdetection import ( "time" - "github.com/google/go-cmp/cmp" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -154,8 +153,10 @@ type LBConfig struct { ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } -// Equal returns whether the LBConfig is the same with the parameter. -func (lbc *LBConfig) Equal(lbc2 *LBConfig) bool { +// EqualIgnoringChildPolicy returns whether the LBConfig is same with the +// parameter outside of the child policy, only comparing the Outlier Detection +// specific configuration. +func (lbc *LBConfig) EqualIgnoringChildPolicy(lbc2 *LBConfig) bool { if lbc == nil && lbc2 == nil { return true } @@ -177,8 +178,5 @@ func (lbc *LBConfig) Equal(lbc2 *LBConfig) bool { if !lbc.SuccessRateEjection.Equal(lbc2.SuccessRateEjection) { return false } - if !lbc.FailurePercentageEjection.Equal(lbc2.FailurePercentageEjection) { - return false - } - return cmp.Equal(lbc.ChildPolicy, lbc2.ChildPolicy) + return lbc.FailurePercentageEjection.Equal(lbc2.FailurePercentageEjection) } diff --git a/xds/internal/balancer/outlierdetection/config_test.go b/xds/internal/balancer/outlierdetection/config_test.go new file mode 100644 index 000000000000..ce924dca1bc6 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/config_test.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "reflect" + "testing" +) + +func TestSuccessRateEjection(t *testing.T) { + fields := map[string]bool{ + "StdevFactor": true, + "EnforcementPercentage": true, + "MinimumHosts": true, + "RequestVolume": true, + } + typ := reflect.TypeOf(SuccessRateEjection{}) + for i := 0; i < typ.NumField(); i++ { + if n := typ.Field(i).Name; !fields[n] { + t.Errorf("New field in SuccessRateEjection %q, update this test and Equal", n) + } + } +} + +func TestEqualFieldsFailurePercentageEjection(t *testing.T) { + fields := map[string]bool{ + "Threshold": true, + "EnforcementPercentage": true, + "MinimumHosts": true, + "RequestVolume": true, + } + typ := reflect.TypeOf(FailurePercentageEjection{}) + for i := 0; i < typ.NumField(); i++ { + if n := typ.Field(i).Name; !fields[n] { + t.Errorf("New field in FailurePercentageEjection %q, update this test and Equal", n) + } + } +} + +func TestEqualFieldsLBConfig(t *testing.T) { + fields := map[string]bool{ + "LoadBalancingConfig": true, + "Interval": true, + "BaseEjectionTime": true, + "MaxEjectionTime": true, + "MaxEjectionPercent": true, + "SuccessRateEjection": true, + "FailurePercentageEjection": true, + "ChildPolicy": true, + } + typ := reflect.TypeOf(LBConfig{}) + for i := 0; i < typ.NumField(); i++ { + if n := typ.Field(i).Name; !fields[n] { + t.Errorf("New field in LBConfig %q, update this test and EqualIgnoringChildPolicy", n) + } + } +} From c6c0a06d47f0e71d8465f53f1fb78d1bbcd7f5cc Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 23 May 2022 16:25:24 -0400 Subject: [PATCH 508/998] Change version to 1.48.0-dev (#5379) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index ff0d6c265146..1d73c214920e 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.47.0-dev" +const Version = "1.48.0-dev" From c0e35731fa5cb0c1b681a9340e07a22b2f95a102 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 24 May 2022 11:13:30 -0700 Subject: [PATCH 509/998] xds: move e2e tests into grpc/test/xds directory (#5363) --- internal/internal.go | 40 ++ .../testutils/xds}/e2e/bootstrap.go | 0 .../testutils/xds}/e2e/clientresources.go | 0 .../testutils/xds}/e2e/server.go | 8 +- internal/testutils/xds/e2e/setup_certs.go | 98 +++++ .../xds/e2e/setup_management_server.go | 100 +++++ .../xds}/xds_client_affinity_test.go | 18 +- .../xds}/xds_client_federation_test.go | 24 +- test/xds/xds_client_integration_test.go | 108 +++++ test/xds/xds_client_retry_test.go | 181 +++++++++ .../xds_rls_clusterspecifier_plugin_test.go | 170 ++++++++ .../xds}/xds_security_config_nack_test.go | 20 +- test/xds/xds_server_integration_test.go | 370 +++++++++++++++++ .../xds/xds_server_rbac_test.go | 361 +--------------- .../xds}/xds_server_serving_mode_test.go | 19 +- xds/csds/csds_test.go | 2 +- xds/internal/clusterspecifier/rls/rls.go | 28 +- xds/internal/httpfilter/fault/fault_test.go | 2 +- xds/internal/httpfilter/rbac/rbac.go | 28 +- xds/internal/resolver/xds_resolver.go | 6 +- xds/internal/server/listener_wrapper_test.go | 14 +- xds/internal/test/e2e/controlplane.go | 2 +- xds/internal/test/e2e/e2e_test.go | 3 +- .../test/xds_client_integration_test.go | 384 ------------------ xds/internal/test/xds_integration_test.go | 200 --------- .../xdsresource/filter_chain_test.go | 2 +- .../xdsresource/unmarshal_lds_test.go | 7 +- xds/server_test.go | 2 +- xds/xds.go | 24 +- 29 files changed, 1163 insertions(+), 1058 deletions(-) rename {xds/internal/testutils => internal/testutils/xds}/e2e/bootstrap.go (100%) rename {xds/internal/testutils => internal/testutils/xds}/e2e/clientresources.go (100%) rename {xds/internal/testutils => internal/testutils/xds}/e2e/server.go (100%) create mode 100644 internal/testutils/xds/e2e/setup_certs.go create mode 100644 internal/testutils/xds/e2e/setup_management_server.go rename {xds/internal/test => test/xds}/xds_client_affinity_test.go (92%) rename {xds/internal/test => test/xds}/xds_client_federation_test.go (87%) create mode 100644 test/xds/xds_client_integration_test.go create mode 100644 test/xds/xds_client_retry_test.go create mode 100644 test/xds/xds_rls_clusterspecifier_plugin_test.go rename {xds/internal/test => test/xds}/xds_security_config_nack_test.go (96%) create mode 100644 test/xds/xds_server_integration_test.go rename xds/internal/test/xds_server_integration_test.go => test/xds/xds_server_rbac_test.go (70%) rename {xds/internal/test => test/xds}/xds_server_serving_mode_test.go (96%) delete mode 100644 xds/internal/test/xds_client_integration_test.go delete mode 100644 xds/internal/test/xds_integration_test.go diff --git a/internal/internal.go b/internal/internal.go index 6d355b0b0134..0f4512248174 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -63,6 +63,46 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/xds/internal/testutils/e2e/bootstrap.go b/internal/testutils/xds/e2e/bootstrap.go similarity index 100% rename from xds/internal/testutils/e2e/bootstrap.go rename to internal/testutils/xds/e2e/bootstrap.go diff --git a/xds/internal/testutils/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go similarity index 100% rename from xds/internal/testutils/e2e/clientresources.go rename to internal/testutils/xds/e2e/clientresources.go diff --git a/xds/internal/testutils/e2e/server.go b/internal/testutils/xds/e2e/server.go similarity index 100% rename from xds/internal/testutils/e2e/server.go rename to internal/testutils/xds/e2e/server.go index e47dcc5213c2..e611c56c673c 100644 --- a/xds/internal/testutils/e2e/server.go +++ b/internal/testutils/xds/e2e/server.go @@ -26,18 +26,18 @@ import ( "reflect" "strconv" + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/envoyproxy/go-control-plane/pkg/cache/types" v3cache "github.com/envoyproxy/go-control-plane/pkg/cache/v3" v3resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" v3server "github.com/envoyproxy/go-control-plane/pkg/server/v3" - - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" ) var logger = grpclog.Component("xds-e2e") diff --git a/internal/testutils/xds/e2e/setup_certs.go b/internal/testutils/xds/e2e/setup_certs.go new file mode 100644 index 000000000000..62ea51d04d7f --- /dev/null +++ b/internal/testutils/xds/e2e/setup_certs.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path" + "testing" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/testdata" +) + +const ( + // Names of files inside tempdir, for certprovider plugin to watch. + certFile = "cert.pem" + keyFile = "key.pem" + rootFile = "ca.pem" +) + +func createTmpFile(src, dst string) error { + data, err := ioutil.ReadFile(src) + if err != nil { + return fmt.Errorf("ioutil.ReadFile(%q) failed: %v", src, err) + } + if err := ioutil.WriteFile(dst, data, os.ModePerm); err != nil { + return fmt.Errorf("ioutil.WriteFile(%q) failed: %v", dst, err) + } + return nil +} + +// createTempDirWithFiles creates a temporary directory under the system default +// tempDir with the given dirSuffix. It also reads from certSrc, keySrc and +// rootSrc files are creates appropriate files under the newly create tempDir. +// Returns the name of the created tempDir. +func createTmpDirWithFiles(dirSuffix, certSrc, keySrc, rootSrc string) (string, error) { + // Create a temp directory. Passing an empty string for the first argument + // uses the system temp directory. + dir, err := ioutil.TempDir("", dirSuffix) + if err != nil { + return "", fmt.Errorf("ioutil.TempDir() failed: %v", err) + } + + if err := createTmpFile(testdata.Path(certSrc), path.Join(dir, certFile)); err != nil { + return "", err + } + if err := createTmpFile(testdata.Path(keySrc), path.Join(dir, keyFile)); err != nil { + return "", err + } + if err := createTmpFile(testdata.Path(rootSrc), path.Join(dir, rootFile)); err != nil { + return "", err + } + return dir, nil +} + +// CreateClientTLSCredentials creates client-side TLS transport credentials +// using certificate and key files from testdata/x509 directory. +func CreateClientTLSCredentials(t *testing.T) credentials.TransportCredentials { + t.Helper() + + cert, err := tls.LoadX509KeyPair(testdata.Path("x509/client1_cert.pem"), testdata.Path("x509/client1_key.pem")) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) + } + b, err := ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) + if err != nil { + t.Fatalf("ioutil.ReadFile(x509/server_ca_cert.pem) failed: %v", err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(b) { + t.Fatal("failed to append certificates") + } + return credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + ServerName: "x.test.example.com", + }) +} diff --git a/internal/testutils/xds/e2e/setup_management_server.go b/internal/testutils/xds/e2e/setup_management_server.go new file mode 100644 index 000000000000..ca45363d6e0b --- /dev/null +++ b/internal/testutils/xds/e2e/setup_management_server.go @@ -0,0 +1,100 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "encoding/json" + "path" + "testing" + + "github.com/google/uuid" + "google.golang.org/grpc/internal" + xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/resolver" +) + +// SetupManagementServer performs the following: +// - spin up an xDS management server on a local port +// - set up certificates for consumption by the file_watcher plugin +// - creates a bootstrap file in a temporary location +// - creates an xDS resolver using the above bootstrap contents +// +// Returns the following: +// - management server +// - nodeID to be used by the client when connecting to the management server +// - bootstrap contents to be used by the client +// - xDS resolver builder to be used by the client +// - a cleanup function to be invoked at the end of the test +func SetupManagementServer(t *testing.T) (*ManagementServer, string, []byte, resolver.Builder, func()) { + t.Helper() + + // Spin up an xDS management server on a local port. + server, err := StartManagementServer() + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer func() { + if err != nil { + server.Stop() + } + }() + + // Create a directory to hold certs and key files used on the server side. + serverDir, err := createTmpDirWithFiles("testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + if err != nil { + server.Stop() + t.Fatal(err) + } + + // Create a directory to hold certs and key files used on the client side. + clientDir, err := createTmpDirWithFiles("testClientSideXDS*", "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + if err != nil { + server.Stop() + t.Fatal(err) + } + + // Create certificate providers section of the bootstrap config with entries + // for both the client and server sides. + cpc := map[string]json.RawMessage{ + ServerSideCertProviderInstance: DefaultFileWatcherConfig(path.Join(serverDir, certFile), path.Join(serverDir, keyFile), path.Join(serverDir, rootFile)), + ClientSideCertProviderInstance: DefaultFileWatcherConfig(path.Join(clientDir, certFile), path.Join(clientDir, keyFile), path.Join(clientDir, rootFile)), + } + + // Create a bootstrap file in a temporary directory. + nodeID := uuid.New().String() + bootstrapContents, err := xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ + Version: xdsinternal.TransportV3, + NodeID: nodeID, + ServerURI: server.Address, + CertificateProviders: cpc, + ServerListenerResourceNameTemplate: ServerListenerResourceNameTemplate, + }) + if err != nil { + server.Stop() + t.Fatalf("Failed to create bootstrap file: %v", err) + } + resolverBuilder := internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error)) + resolver, err := resolverBuilder(bootstrapContents) + if err != nil { + server.Stop() + t.Fatalf("Failed to create xDS resolver for testing: %v", err) + } + + return server, nodeID, bootstrapContents, resolver, func() { server.Stop() } +} diff --git a/xds/internal/test/xds_client_affinity_test.go b/test/xds/xds_client_affinity_test.go similarity index 92% rename from xds/internal/test/xds_client_affinity_test.go rename to test/xds/xds_client_affinity_test.go index 55d98459251a..91ca697071c8 100644 --- a/xds/internal/test/xds_client_affinity_test.go +++ b/test/xds/xds_client_affinity_test.go @@ -1,6 +1,3 @@ -//go:build !386 -// +build !386 - /* * * Copyright 2021 gRPC authors. @@ -19,7 +16,7 @@ * */ -package xds_test +package xds import ( "context" @@ -29,16 +26,15 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/xds/internal/testutils/e2e" + "google.golang.org/grpc/internal/testutils/xds/e2e" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" ) -const hashHeaderName = "session_id" - // hashRouteConfig returns a RouteConfig resource with hash policy set to // header "session_id". func hashRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.RouteConfiguration { @@ -53,7 +49,7 @@ func hashRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.RouteC HashPolicy: []*v3routepb.RouteAction_HashPolicy{{ PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ Header: &v3routepb.RouteAction_HashPolicy_Header{ - HeaderName: hashHeaderName, + HeaderName: "session_id", }, }, Terminal: true, @@ -92,10 +88,10 @@ func (s) TestClientSideAffinitySanityCheck(t *testing.T) { return func() { envconfig.XDSRingHash = old } }()() - managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) defer cleanup1() - port, cleanup2 := clientSetup(t, &testService{}) + port, cleanup2 := startTestService(t, nil) defer cleanup2() const serviceName = "my-service-client-side-xds" @@ -130,7 +126,7 @@ func (s) TestClientSideAffinitySanityCheck(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("rpc EmptyCall() failed: %v", err) } diff --git a/xds/internal/test/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go similarity index 87% rename from xds/internal/test/xds_client_federation_test.go rename to test/xds/xds_client_federation_test.go index 09db314b726a..595e2272ffe4 100644 --- a/xds/internal/test/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -1,6 +1,3 @@ -//go:build !386 -// +build !386 - /* * * Copyright 2021 gRPC authors. @@ -18,7 +15,7 @@ * limitations under the License. */ -package xds_test +package xds import ( "context" @@ -32,13 +29,13 @@ import ( "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/testutils/xds/e2e" xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/resolver" + testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" - "google.golang.org/grpc/xds" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/e2e" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // TestClientSideFederation tests that federation is supported. @@ -83,22 +80,23 @@ func (s) TestClientSideFederation(t *testing.T) { t.Fatalf("Failed to create bootstrap file: %v", err) } - resolver, err := xds.NewXDSResolverWithConfigForTesting(bootstrapContents) + resolverBuilder := internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error)) + resolver, err := resolverBuilder(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS resolver for testing: %v", err) } - port, cleanup := clientSetup(t, &testService{}) + port, cleanup := startTestService(t, nil) defer cleanup() const serviceName = "my-service-client-side-xds" // LDS is old style name. ldsName := serviceName // RDS is new style, with the non default authority. - rdsName := testutils.BuildResourceName(xdsresource.RouteConfigResource, nonDefaultAuth, "route-"+serviceName, nil) + rdsName := fmt.Sprintf("xdstp://%s/envoy.config.route.v3.RouteConfiguration/%s", nonDefaultAuth, "route-"+serviceName) // CDS is old style name. cdsName := "cluster-" + serviceName // EDS is new style, with the non default authority. - edsName := testutils.BuildResourceName(xdsresource.EndpointsResource, nonDefaultAuth, "endpoints-"+serviceName, nil) + edsName := fmt.Sprintf("xdstp://%s/envoy.config.route.v3.ClusterLoadAssignment/%s", nonDefaultAuth, "endpoints-"+serviceName) // Split resources, put LDS/CDS in the default authority, and put RDS/EDS in // the other authority. @@ -135,7 +133,7 @@ func (s) TestClientSideFederation(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("rpc EmptyCall() failed: %v", err) } diff --git a/test/xds/xds_client_integration_test.go b/test/xds/xds_client_integration_test.go new file mode 100644 index 000000000000..399a0042a01a --- /dev/null +++ b/test/xds/xds_client_integration_test.go @@ -0,0 +1,108 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "context" + "fmt" + "net" + "strconv" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils/xds/e2e" + + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. +) + +// startTestService spins up a server exposing the TestService on a local port. +// +// Returns the following: +// - the port the server is listening on +// - cleanup function to be invoked by the tests when done +func startTestService(t *testing.T, server *stubserver.StubServer) (uint32, func()) { + if server == nil { + server = &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + } + server.StartServer() + + _, p, err := net.SplitHostPort(server.Address) + if err != nil { + t.Fatalf("invalid serving address for stub server: %v", err) + } + port, err := strconv.ParseUint(p, 10, 32) + if err != nil { + t.Fatalf("invalid serving port for stub server: %v", err) + } + return uint32(port), server.Stop +} + +func (s) TestClientSideXDS(t *testing.T) { + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) + defer cleanup1() + + port, cleanup2 := startTestService(t, nil) + defer cleanup2() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/test/xds/xds_client_retry_test.go b/test/xds/xds_client_retry_test.go new file mode 100644 index 000000000000..31968f885bff --- /dev/null +++ b/test/xds/xds_client_retry_test.go @@ -0,0 +1,181 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +func (s) TestClientSideRetry(t *testing.T) { + ctr := 0 + errs := []codes.Code{codes.ResourceExhausted} + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + defer func() { ctr++ }() + if ctr < len(errs) { + return nil, status.Errorf(errs[ctr], "this should be retried") + } + return &testpb.Empty{}, nil + }, + } + + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) + defer cleanup1() + + port, cleanup2 := startTestService(t, ss) + defer cleanup2() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + defer cancel() + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.ResourceExhausted { + t.Fatalf("rpc EmptyCall() = _, %v; want _, ResourceExhausted", err) + } + + testCases := []struct { + name string + vhPolicy *v3routepb.RetryPolicy + routePolicy *v3routepb.RetryPolicy + errs []codes.Code // the errors returned by the server for each RPC + tryAgainErr codes.Code // the error that would be returned if we are still using the old retry policies. + errWant codes.Code + }{{ + name: "virtualHost only, fail", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted,unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 1}, + }, + errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, + routePolicy: nil, + tryAgainErr: codes.ResourceExhausted, + errWant: codes.Unavailable, + }, { + name: "virtualHost only", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted, unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, + routePolicy: nil, + tryAgainErr: codes.Unavailable, + errWant: codes.OK, + }, { + name: "virtualHost+route, fail", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted,unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + routePolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, + tryAgainErr: codes.OK, + errWant: codes.Unavailable, + }, { + name: "virtualHost+route", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + routePolicy: &v3routepb.RetryPolicy{ + RetryOn: "unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + errs: []codes.Code{codes.Unavailable}, + tryAgainErr: codes.Unavailable, + errWant: codes.OK, + }, { + name: "virtualHost+route, not enough attempts", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + routePolicy: &v3routepb.RetryPolicy{ + RetryOn: "unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 1}, + }, + errs: []codes.Code{codes.Unavailable, codes.Unavailable}, + tryAgainErr: codes.OK, + errWant: codes.Unavailable, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + errs = tc.errs + + // Confirm tryAgainErr is correct before updating resources. + ctr = 0 + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if code := status.Code(err); code != tc.tryAgainErr { + t.Fatalf("with old retry policy: EmptyCall() = _, %v; want _, %v", err, tc.tryAgainErr) + } + + resources.Routes[0].VirtualHosts[0].RetryPolicy = tc.vhPolicy + resources.Routes[0].VirtualHosts[0].Routes[0].GetRoute().RetryPolicy = tc.routePolicy + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + for { + ctr = 0 + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if code := status.Code(err); code == tc.tryAgainErr { + continue + } else if code != tc.errWant { + t.Fatalf("rpc EmptyCall() = _, %v; want _, %v", err, tc.errWant) + } + break + } + }) + } +} diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go new file mode 100644 index 000000000000..392894017ec1 --- /dev/null +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/rls" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/protobuf/types/known/durationpb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" + + _ "google.golang.org/grpc/balancer/rls" // Register the RLS Load Balancing policy. +) + +// defaultClientResourcesWithRLSCSP returns a set of resources (LDS, RDS, CDS, EDS) for a +// client to connect to a server with a RLS Load Balancer as a child of Cluster Manager. +func defaultClientResourcesWithRLSCSP(params e2e.ResourceParams, rlsProto *rlspb.RouteLookupConfig) e2e.UpdateOptions { + routeConfigName := "route-" + params.DialTarget + clusterName := "cluster-" + params.DialTarget + endpointsName := "endpoints-" + params.DialTarget + return e2e.UpdateOptions{ + NodeID: params.NodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(params.DialTarget, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{defaultRouteConfigWithRLSCSP(routeConfigName, params.DialTarget, rlsProto)}, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, endpointsName, params.SecLevel)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(endpointsName, params.Host, []uint32{params.Port})}, + } +} + +// defaultRouteConfigWithRLSCSP returns a basic xds RouteConfig resource with an +// RLS Cluster Specifier Plugin configured as the route. +func defaultRouteConfigWithRLSCSP(routeName, ldsTarget string, rlsProto *rlspb.RouteLookupConfig) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + ClusterSpecifierPlugins: []*v3routepb.ClusterSpecifierPlugin{ + { + Extension: &v3corepb.TypedExtensionConfig{ + Name: "rls-csp", + TypedConfig: testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ + RouteLookupConfig: rlsProto, + }), + }, + }, + }, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{ClusterSpecifierPlugin: "rls-csp"}, + }}, + }}, + }}, + } +} + +// TestRLSinxDS tests an xDS configured system with an RLS Balancer present. +// +// This test sets up the RLS Balancer using the RLS Cluster Specifier Plugin, +// spins up a test service and has a fake RLS Server correctly respond with a +// target corresponding to this test service. This test asserts an RPC proceeds +// as normal with the RLS Balancer as part of system. +func (s) TestRLSinxDS(t *testing.T) { + oldRLS := envconfig.XDSRLS + envconfig.XDSRLS = true + internal.RegisterRLSClusterSpecifierPluginForTesting() + defer func() { + envconfig.XDSRLS = oldRLS + internal.UnregisterRLSClusterSpecifierPluginForTesting() + }() + + // Set up all components and configuration necessary - management server, + // xDS resolver, fake RLS Server, and xDS configuration which specifies an + // RLS Balancer that communicates to this set up fake RLS Server. + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) + defer cleanup1() + port, cleanup2 := startTestService(t, nil) + defer cleanup2() + + lis := testutils.NewListenerWrapper(t, nil) + rlsServer, rlsRequestCh := rls.SetupFakeRLSServer(t, lis) + rlsProto := &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{{Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "grpc.testing.TestService"}}}}, + LookupService: rlsServer.Address, + LookupServiceTimeout: durationpb.New(defaultTestTimeout), + CacheSizeBytes: 1024, + } + + const serviceName = "my-service-client-side-xds" + resources := defaultClientResourcesWithRLSCSP(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }, rlsProto) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Configure the fake RLS Server to set the RLS Balancers child CDS + // Cluster's name as the target for the RPC to use. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rls.RouteLookupResponse { + return &rls.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{"cluster-" + serviceName}}} + }) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + // Successfully sending the RPC will require the RLS Load Balancer to + // communicate with the fake RLS Server for information about the target. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // These RLS Verifications makes sure the RLS Load Balancer is actually part + // of the xDS Configured system that correctly sends out RPC. + + // Verify connection is established to RLS Server. + if _, err = lis.NewConnCh.Receive(ctx); err != nil { + t.Fatal("Timeout when waiting for RLS LB policy to create control channel") + } + + // Verify an rls request is sent out to fake RLS Server. + select { + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for an RLS request to be sent out") + case <-rlsRequestCh: + } +} diff --git a/xds/internal/test/xds_security_config_nack_test.go b/test/xds/xds_security_config_nack_test.go similarity index 96% rename from xds/internal/test/xds_security_config_nack_test.go rename to test/xds/xds_security_config_nack_test.go index 7b8e36c3f3a4..4fe469ed3ebe 100644 --- a/xds/internal/test/xds_security_config_nack_test.go +++ b/test/xds/xds_security_config_nack_test.go @@ -1,6 +1,3 @@ -//go:build !386 -// +build !386 - /* * * Copyright 2021 gRPC authors. @@ -19,7 +16,7 @@ * */ -package xds_test +package xds import ( "context" @@ -30,10 +27,11 @@ import ( "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/e2e" + "google.golang.org/grpc/internal/testutils/xds/e2e" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -43,7 +41,7 @@ func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { missingIdentityProviderInstance = "missing-identity-provider-instance" missingRootProviderInstance = "missing-root-provider-instance" ) - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -207,7 +205,7 @@ func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { } ctx2, cancel2 := context.WithTimeout(ctx, timeout) defer cancel2() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx2, &testpb.Empty{}, grpc.WaitForReady(true)); (err != nil) != test.wantErr { t.Fatalf("EmptyCall() returned err: %v, wantErr %v", err, test.wantErr) } @@ -323,13 +321,13 @@ func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // setupManagementServer() sets up a bootstrap file with certificate + // SetupManagementServer() sets up a bootstrap file with certificate // provider instance names: `e2e.ServerSideCertProviderInstance` and // `e2e.ClientSideCertProviderInstance`. - managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) defer cleanup1() - port, cleanup2 := clientSetup(t, &testService{}) + port, cleanup2 := startTestService(t, nil) defer cleanup2() // This creates a `Cluster` resource with a security config which @@ -363,7 +361,7 @@ func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { } ctx2, cancel2 := context.WithTimeout(ctx, timeout) defer cancel2() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx2, &testpb.Empty{}, grpc.WaitForReady(true)); (err != nil) != test.wantErr { t.Fatalf("EmptyCall() returned err: %v, wantErr %v", err, test.wantErr) } diff --git a/test/xds/xds_server_integration_test.go b/test/xds/xds_server_integration_test.go new file mode 100644 index 000000000000..f3057aa0eafb --- /dev/null +++ b/test/xds/xds_server_integration_test.go @@ -0,0 +1,370 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "context" + "fmt" + "net" + "strconv" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds" + + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +type testService struct { + testpb.TestServiceServer +} + +func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil +} + +func (*testService) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil +} + +// setupGRPCServer performs the following: +// - spin up an xDS-enabled gRPC server, configure it with xdsCredentials and +// register the test service on it +// - create a local TCP listener and start serving on it +// +// Returns the following: +// - local listener on which the xDS-enabled gRPC server is serving on +// - cleanup function to be invoked by the tests when done +func setupGRPCServer(t *testing.T, bootstrapContents []byte) (net.Listener, func()) { + t.Helper() + + // Configure xDS credentials to be used on the server-side. + creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a server option to get notified about serving mode changes. We don't + // do anything other than throwing a log entry here. But this is required, + // since the server code emits a log entry at the default level (which is + // ERROR) if no callback is registered for serving mode changes. Our + // testLogger fails the test if there is any log entry at ERROR level. It does + // provide an ExpectError() method, but that takes a string and it would be + // painful to construct the exact error message expected here. Instead this + // works just fine. + modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { + t.Logf("Serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + }) + + // Initialize an xDS-enabled gRPC server and register the stubServer on it. + server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) + testgrpc.RegisterTestServiceServer(server, &testService{}) + + // Create a local listener and pass it to Serve(). + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + return lis, func() { + server.Stop() + } +} + +func hostPortFromListener(lis net.Listener) (string, uint32, error) { + host, p, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + return "", 0, fmt.Errorf("net.SplitHostPort(%s) failed: %v", lis.Addr().String(), err) + } + port, err := strconv.ParseInt(p, 10, 32) + if err != nil { + return "", 0, fmt.Errorf("strconv.ParseInt(%s, 10, 32) failed: %v", p, err) + } + return host, uint32(port), nil +} + +// TestServerSideXDS_Fallback is an e2e test which verifies xDS credentials +// fallback functionality. +// +// The following sequence of events happen as part of this test: +// - An xDS-enabled gRPC server is created and xDS credentials are configured. +// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS +// credentials are configured. +// - Control plane is configured to not send any security configuration to both +// the client and the server. This results in both of them using the +// configured fallback credentials (which is insecure creds in this case). +func (s) TestServerSideXDS_Fallback(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + // Grab the host and port of the server and create client side xDS resources + // corresponding to it. This contains default resources with no security + // configuration in the Cluster resources. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + + // Create an inbound xDS listener resource for the server side that does not + // contain any security configuration. This should force the server-side + // xdsCredentials to use fallback. + inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources.Listeners = append(resources.Listeners, inboundLis) + + // Setup the management server with client and server-side resources. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make a successful RPC. + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Errorf("rpc EmptyCall() failed: %v", err) + } +} + +// TestServerSideXDS_FileWatcherCerts is an e2e test which verifies xDS +// credentials with file watcher certificate provider. +// +// The following sequence of events happen as part of this test: +// - An xDS-enabled gRPC server is created and xDS credentials are configured. +// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS +// credentials are configured. +// - Control plane is configured to send security configuration to both the +// client and the server, pointing to the file watcher certificate provider. +// We verify both TLS and mTLS scenarios. +func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { + tests := []struct { + name string + secLevel e2e.SecurityLevel + }{ + { + name: "tls", + secLevel: e2e.SecurityLevelTLS, + }, + { + name: "mtls", + secLevel: e2e.SecurityLevelMTLS, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + // Grab the host and port of the server and create client side xDS + // resources corresponding to it. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + + // Create xDS resources to be consumed on the client side. This + // includes the listener, route configuration, cluster (with + // security configuration) and endpoint resources. + serviceName := "my-service-file-watcher-certs-" + test.name + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: test.secLevel, + }) + + // Create an inbound xDS listener resource for the server side that + // contains security configuration pointing to the file watcher + // plugin. + inboundLis := e2e.DefaultServerListener(host, port, test.secLevel) + resources.Listeners = append(resources.Listeners, inboundLis) + + // Setup the management server with client and server resources. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make an RPC. + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + }) + } +} + +// TestServerSideXDS_SecurityConfigChange is an e2e test where xDS is enabled on +// the server-side and xdsCredentials are configured for security. The control +// plane initially does not any security configuration. This forces the +// xdsCredentials to use fallback creds, which is this case is insecure creds. +// We verify that a client connecting with TLS creds is not able to successfully +// make an RPC. The control plane then sends a listener resource with security +// configuration pointing to the use of the file_watcher plugin and we verify +// that the same client is now able to successfully make an RPC. +func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + // Grab the host and port of the server and create client side xDS resources + // corresponding to it. This contains default resources with no security + // configuration in the Cluster resource. This should force the xDS + // credentials on the client to use its fallback. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-security-config-change" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + + // Create an inbound xDS listener resource for the server side that does not + // contain any security configuration. This should force the xDS credentials + // on server to use its fallback. + inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources.Listeners = append(resources.Listeners, inboundLis) + + // Setup the management server with client and server-side resources. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + xdsCreds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make a successful RPC. + xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(xdsCreds), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer xdsCC.Close() + + client := testgrpc.NewTestServiceClient(xdsCC) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Create a ClientConn with TLS creds. This should fail since the server is + // using fallback credentials which in this case in insecure creds. + tlsCreds := e2e.CreateClientTLSCredentials(t) + tlsCC, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(tlsCreds)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer tlsCC.Close() + + // We don't set 'waitForReady` here since we want this call to failfast. + client = testgrpc.NewTestServiceClient(tlsCC) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { + t.Fatal("rpc EmptyCall() succeeded when expected to fail") + } + + // Switch server and client side resources with ones that contain required + // security configuration for mTLS with a file watcher certificate provider. + resources = e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelMTLS, + }) + inboundLis = e2e.DefaultServerListener(host, port, e2e.SecurityLevelMTLS) + resources.Listeners = append(resources.Listeners, inboundLis) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Make another RPC with `waitForReady` set and expect this to succeed. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/xds/internal/test/xds_server_integration_test.go b/test/xds/xds_server_rbac_test.go similarity index 70% rename from xds/internal/test/xds_server_integration_test.go rename to test/xds/xds_server_rbac_test.go index b362926905b6..216653a8d1f7 100644 --- a/xds/internal/test/xds_server_integration_test.go +++ b/test/xds/xds_server_rbac_test.go @@ -1,9 +1,6 @@ -//go:build !386 -// +build !386 - /* * - * Copyright 2020 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +16,7 @@ * */ -// Package xds_test contains e2e tests for xDS use. -package xds_test +package xds import ( "context" @@ -30,356 +26,29 @@ import ( "strings" "testing" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds" - "google.golang.org/grpc/xds/internal/httpfilter/rbac" - "google.golang.org/grpc/xds/internal/testutils/e2e" + "google.golang.org/protobuf/types/known/anypb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" rpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" - v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" - anypb "github.com/golang/protobuf/ptypes/any" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - xdscreds "google.golang.org/grpc/credentials/xds" + testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" ) -const ( - // Names of files inside tempdir, for certprovider plugin to watch. - certFile = "cert.pem" - keyFile = "key.pem" - rootFile = "ca.pem" -) - -// setupGRPCServer performs the following: -// - spin up an xDS-enabled gRPC server, configure it with xdsCredentials and -// register the test service on it -// - create a local TCP listener and start serving on it -// -// Returns the following: -// - local listener on which the xDS-enabled gRPC server is serving on -// - cleanup function to be invoked by the tests when done -func setupGRPCServer(t *testing.T, bootstrapContents []byte) (net.Listener, func()) { - t.Helper() - - // Configure xDS credentials to be used on the server-side. - creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{ - FallbackCreds: insecure.NewCredentials(), - }) - if err != nil { - t.Fatal(err) - } - - // Create a server option to get notified about serving mode changes. We don't - // do anything other than throwing a log entry here. But this is required, - // since the server code emits a log entry at the default level (which is - // ERROR) if no callback is registered for serving mode changes. Our - // testLogger fails the test if there is any log entry at ERROR level. It does - // provide an ExpectError() method, but that takes a string and it would be - // painful to construct the exact error message expected here. Instead this - // works just fine. - modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { - t.Logf("Serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) - }) - - // Initialize an xDS-enabled gRPC server and register the stubServer on it. - server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) - testpb.RegisterTestServiceServer(server, &testService{}) - - // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - - go func() { - if err := server.Serve(lis); err != nil { - t.Errorf("Serve() failed: %v", err) - } - }() - - return lis, func() { - server.Stop() - } -} - -func hostPortFromListener(lis net.Listener) (string, uint32, error) { - host, p, err := net.SplitHostPort(lis.Addr().String()) - if err != nil { - return "", 0, fmt.Errorf("net.SplitHostPort(%s) failed: %v", lis.Addr().String(), err) - } - port, err := strconv.ParseInt(p, 10, 32) - if err != nil { - return "", 0, fmt.Errorf("strconv.ParseInt(%s, 10, 32) failed: %v", p, err) - } - return host, uint32(port), nil -} - -// TestServerSideXDS_Fallback is an e2e test which verifies xDS credentials -// fallback functionality. -// -// The following sequence of events happen as part of this test: -// - An xDS-enabled gRPC server is created and xDS credentials are configured. -// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS -// credentials are configured. -// - Control plane is configured to not send any security configuration to both -// the client and the server. This results in both of them using the -// configured fallback credentials (which is insecure creds in this case). -func (s) TestServerSideXDS_Fallback(t *testing.T) { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) - defer cleanup1() - - lis, cleanup2 := setupGRPCServer(t, bootstrapContents) - defer cleanup2() - - // Grab the host and port of the server and create client side xDS resources - // corresponding to it. This contains default resources with no security - // configuration in the Cluster resources. - host, port, err := hostPortFromListener(lis) - if err != nil { - t.Fatalf("failed to retrieve host and port of server: %v", err) - } - const serviceName = "my-service-fallback" - resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: serviceName, - NodeID: nodeID, - Host: host, - Port: port, - SecLevel: e2e.SecurityLevelNone, - }) - - // Create an inbound xDS listener resource for the server side that does not - // contain any security configuration. This should force the server-side - // xdsCredentials to use fallback. - inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) - resources.Listeners = append(resources.Listeners, inboundLis) - - // Setup the management server with client and server-side resources. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create client-side xDS credentials with an insecure fallback. - creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ - FallbackCreds: insecure.NewCredentials(), - }) - if err != nil { - t.Fatal(err) - } - - // Create a ClientConn with the xds scheme and make a successful RPC. - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Errorf("rpc EmptyCall() failed: %v", err) - } -} - -// TestServerSideXDS_FileWatcherCerts is an e2e test which verifies xDS -// credentials with file watcher certificate provider. -// -// The following sequence of events happen as part of this test: -// - An xDS-enabled gRPC server is created and xDS credentials are configured. -// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS -// credentials are configured. -// - Control plane is configured to send security configuration to both the -// client and the server, pointing to the file watcher certificate provider. -// We verify both TLS and mTLS scenarios. -func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { - tests := []struct { - name string - secLevel e2e.SecurityLevel - }{ - { - name: "tls", - secLevel: e2e.SecurityLevelTLS, - }, - { - name: "mtls", - secLevel: e2e.SecurityLevelMTLS, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) - defer cleanup1() - - lis, cleanup2 := setupGRPCServer(t, bootstrapContents) - defer cleanup2() - - // Grab the host and port of the server and create client side xDS - // resources corresponding to it. - host, port, err := hostPortFromListener(lis) - if err != nil { - t.Fatalf("failed to retrieve host and port of server: %v", err) - } - - // Create xDS resources to be consumed on the client side. This - // includes the listener, route configuration, cluster (with - // security configuration) and endpoint resources. - serviceName := "my-service-file-watcher-certs-" + test.name - resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: serviceName, - NodeID: nodeID, - Host: host, - Port: port, - SecLevel: test.secLevel, - }) - - // Create an inbound xDS listener resource for the server side that - // contains security configuration pointing to the file watcher - // plugin. - inboundLis := e2e.DefaultServerListener(host, port, test.secLevel) - resources.Listeners = append(resources.Listeners, inboundLis) - - // Setup the management server with client and server resources. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create client-side xDS credentials with an insecure fallback. - creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ - FallbackCreds: insecure.NewCredentials(), - }) - if err != nil { - t.Fatal(err) - } - - // Create a ClientConn with the xds scheme and make an RPC. - cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } - }) - } -} - -// TestServerSideXDS_SecurityConfigChange is an e2e test where xDS is enabled on -// the server-side and xdsCredentials are configured for security. The control -// plane initially does not any security configuration. This forces the -// xdsCredentials to use fallback creds, which is this case is insecure creds. -// We verify that a client connecting with TLS creds is not able to successfully -// make an RPC. The control plane then sends a listener resource with security -// configuration pointing to the use of the file_watcher plugin and we verify -// that the same client is now able to successfully make an RPC. -func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) - defer cleanup1() - - lis, cleanup2 := setupGRPCServer(t, bootstrapContents) - defer cleanup2() - - // Grab the host and port of the server and create client side xDS resources - // corresponding to it. This contains default resources with no security - // configuration in the Cluster resource. This should force the xDS - // credentials on the client to use its fallback. - host, port, err := hostPortFromListener(lis) - if err != nil { - t.Fatalf("failed to retrieve host and port of server: %v", err) - } - const serviceName = "my-service-security-config-change" - resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: serviceName, - NodeID: nodeID, - Host: host, - Port: port, - SecLevel: e2e.SecurityLevelNone, - }) - - // Create an inbound xDS listener resource for the server side that does not - // contain any security configuration. This should force the xDS credentials - // on server to use its fallback. - inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) - resources.Listeners = append(resources.Listeners, inboundLis) - - // Setup the management server with client and server-side resources. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create client-side xDS credentials with an insecure fallback. - xdsCreds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ - FallbackCreds: insecure.NewCredentials(), - }) - if err != nil { - t.Fatal(err) - } - - // Create a ClientConn with the xds scheme and make a successful RPC. - xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(xdsCreds), grpc.WithResolvers(resolver)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer xdsCC.Close() - - client := testpb.NewTestServiceClient(xdsCC) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } - - // Create a ClientConn with TLS creds. This should fail since the server is - // using fallback credentials which in this case in insecure creds. - tlsCreds := createClientTLSCredentials(t) - tlsCC, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(tlsCreds)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer tlsCC.Close() - - // We don't set 'waitForReady` here since we want this call to failfast. - client = testpb.NewTestServiceClient(tlsCC) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { - t.Fatal("rpc EmptyCall() succeeded when expected to fail") - } - - // Switch server and client side resources with ones that contain required - // security configuration for mTLS with a file watcher certificate provider. - resources = e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: serviceName, - NodeID: nodeID, - Host: host, - Port: port, - SecLevel: e2e.SecurityLevelMTLS, - }) - inboundLis = e2e.DefaultServerListener(host, port, e2e.SecurityLevelMTLS) - resources.Listeners = append(resources.Listeners, inboundLis) - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Make another RPC with `waitForReady` set and expect this to succeed. - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } -} - // TestServerSideXDS_RouteConfiguration is an e2e test which verifies routing // functionality. The xDS enabled server will be set up with route configuration // where the route configuration has routes with the correct routing actions @@ -391,7 +60,7 @@ func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { defer func() { envconfig.XDSRBAC = oldRBAC }() - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -583,7 +252,7 @@ func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) // This Empty Call should match to a route with a correct action // (NonForwardingAction). Thus, this RPC should proceed as normal. There is @@ -739,8 +408,8 @@ func (s) TestRBACHTTPFilter(t *testing.T) { defer func() { envconfig.XDSRBAC = oldRBAC }() - rbac.RegisterForTesting() - defer rbac.UnregisterForTesting() + internal.RegisterRBACHTTPFilterForTesting() + defer internal.UnregisterRBACHTTPFilterForTesting() tests := []struct { name string rbacCfg *rpb.RBAC @@ -936,7 +605,7 @@ func (s) TestRBACHTTPFilter(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { func() { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -970,7 +639,7 @@ func (s) TestRBACHTTPFilter(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != test.wantStatusEmptyCall { t.Fatalf("EmptyCall() returned err with status: %v, wantStatusEmptyCall: %v", status.Code(err), test.wantStatusEmptyCall) @@ -1121,7 +790,7 @@ func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) { envconfig.XDSRBAC = oldRBAC }() - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -1178,7 +847,7 @@ func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) { envconfig.XDSRBAC = oldRBAC }() - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := setupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) diff --git a/xds/internal/test/xds_server_serving_mode_test.go b/test/xds/xds_server_serving_mode_test.go similarity index 96% rename from xds/internal/test/xds_server_serving_mode_test.go rename to test/xds/xds_server_serving_mode_test.go index 236a831c642b..fe3a5612d9f1 100644 --- a/xds/internal/test/xds_server_serving_mode_test.go +++ b/test/xds/xds_server_serving_mode_test.go @@ -1,6 +1,3 @@ -//go:build !386 -// +build !386 - /* * * Copyright 2021 gRPC authors. @@ -19,8 +16,7 @@ * */ -// Package xds_test contains e2e tests for xDS use. -package xds_test +package xds import ( "context" @@ -34,10 +30,11 @@ import ( "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds" - "google.golang.org/grpc/xds/internal/testutils/e2e" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -46,7 +43,7 @@ import ( // change callback is not invoked and client connections to the server are not // recycled. func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { - managementServer, nodeID, bootstrapContents, _, cleanup := setupManagementServer(t) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t) defer cleanup() creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) @@ -68,7 +65,7 @@ func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { // Initialize an xDS-enabled gRPC server and register the stubServer on it. server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) defer server.Stop() - testpb.RegisterTestServiceServer(server, &testService{}) + testgrpc.RegisterTestServiceServer(server, &testService{}) // Setup the management server to respond with the listener resources. host, port, err := hostPortFromListener(lis) @@ -157,7 +154,7 @@ func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { // xDS enabled gRPC servers. It verifies that appropriate mode changes happen in // the server, and also verifies behavior of clientConns under these modes. func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { - managementServer, nodeID, bootstrapContents, _, cleanup := setupManagementServer(t) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t) defer cleanup() // Configure xDS credentials to be used on the server-side. @@ -359,7 +356,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { func waitForSuccessfulRPC(ctx context.Context, t *testing.T, cc *grpc.ClientConn) { t.Helper() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) if _, err := c.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("rpc EmptyCall() failed: %v", err) } @@ -369,7 +366,7 @@ func waitForFailedRPC(ctx context.Context, t *testing.T, cc *grpc.ClientConn) { t.Helper() // Attempt one RPC before waiting for the ticker to expire. - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil { return } diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 1d772b67f376..d8dcdcdfbd0f 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -33,9 +33,9 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/internal/xds" _ "google.golang.org/grpc/xds/internal/httpfilter/router" - "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" diff --git a/xds/internal/clusterspecifier/rls/rls.go b/xds/internal/clusterspecifier/rls/rls.go index 69fb7f4a9098..a167cc5fa2c9 100644 --- a/xds/internal/clusterspecifier/rls/rls.go +++ b/xds/internal/clusterspecifier/rls/rls.go @@ -38,27 +38,15 @@ func init() { if envconfig.XDSRLS { clusterspecifier.Register(rls{}) } -} - -// RegisterForTesting registers the RLS Cluster Specifier Plugin for testing -// purposes, regardless of the XDSRLS environment variable. This is needed -// because there is no way to set the XDSRLS environment variable to true in a -// test before init() in this package is run. -// -// TODO: Remove this function once the RLS env var is removed. -func RegisterForTesting() { - clusterspecifier.Register(rls{}) -} -// UnregisterForTesting unregisters the RLS Cluster Specifier Plugin for testing -// purposes. This is needed because there is no way to unregister the RLS -// Cluster Specifier Plugin after registering it solely for testing purposes -// using rls.RegisterForTesting(). -// -// TODO: Remove this function once the RLS env var is removed. -func UnregisterForTesting() { - for _, typeURL := range rls.TypeURLs(rls{}) { - clusterspecifier.UnregisterForTesting(typeURL) + // TODO: Remove these once the RLS env var is removed. + internal.RegisterRLSClusterSpecifierPluginForTesting = func() { + clusterspecifier.Register(rls{}) + } + internal.UnregisterRLSClusterSpecifierPluginForTesting = func() { + for _, typeURL := range rls.TypeURLs(rls{}) { + clusterspecifier.UnregisterForTesting(typeURL) + } } } diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 6ee5e654c7dc..e44f91a55588 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -39,10 +39,10 @@ import ( "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/protobuf/types/known/wrapperspb" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go index 3dc4b56826e6..209283c3bf59 100644 --- a/xds/internal/httpfilter/rbac/rbac.go +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -27,6 +27,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/xds/rbac" @@ -41,26 +42,15 @@ func init() { if envconfig.XDSRBAC { httpfilter.Register(builder{}) } -} - -// RegisterForTesting registers the RBAC HTTP Filter for testing purposes, -// regardless of the RBAC environment variable. This is needed because there is -// no way to set the RBAC environment variable to true in a test before init() -// in this package is run. -// -// TODO: Remove this function once the RBAC env var is removed. -func RegisterForTesting() { - httpfilter.Register(builder{}) -} -// UnregisterForTesting unregisters the RBAC HTTP Filter for testing purposes. -// This is needed because there is no way to unregister the HTTP Filter after -// registering it solely for testing purposes using rbac.RegisterForTesting(). -// -// TODO: Remove this function once the RBAC env var is removed. -func UnregisterForTesting() { - for _, typeURL := range builder.TypeURLs(builder{}) { - httpfilter.UnregisterForTesting(typeURL) + // TODO: Remove these once the RBAC env var is removed. + internal.RegisterRBACHTTPFilterForTesting = func() { + httpfilter.Register(builder{}) + } + internal.UnregisterRBACHTTPFilterForTesting = func() { + for _, typeURL := range builder.TypeURLs(builder{}) { + httpfilter.UnregisterForTesting(typeURL) + } } } diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index 8a613c4c44f6..4f31d9c44c38 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -25,6 +25,7 @@ import ( "strings" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -37,10 +38,10 @@ import ( const xdsScheme = "xds" -// NewBuilderForTesting creates a new xds resolver builder using a specific xds +// newBuilderForTesting creates a new xds resolver builder using a specific xds // bootstrap config, so tests can use multiple xds clients in different // ClientConns at the same time. -func NewBuilderForTesting(config []byte) (resolver.Builder, error) { +func newBuilderForTesting(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ newXDSClient: func() (xdsclient.XDSClient, error) { return xdsclient.NewWithBootstrapContentsForTesting(config) @@ -53,6 +54,7 @@ var newXDSClient = func() (xdsclient.XDSClient, error) { return xdsclient.New() func init() { resolver.Register(&xdsResolverBuilder{}) + internal.NewXDSResolverWithConfigForTesting = newBuilderForTesting } type xdsResolverBuilder struct { diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 85ac93c2ed44..2c1e8b75b8fe 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -26,19 +26,21 @@ import ( "testing" "time" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" + _ "google.golang.org/grpc/xds/internal/httpfilter/router" - "google.golang.org/grpc/xds/internal/testutils/e2e" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( diff --git a/xds/internal/test/e2e/controlplane.go b/xds/internal/test/e2e/controlplane.go index b663cb31f7e3..8f27ff053d75 100644 --- a/xds/internal/test/e2e/controlplane.go +++ b/xds/internal/test/e2e/controlplane.go @@ -21,8 +21,8 @@ import ( "fmt" "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils/xds/e2e" xdsinternal "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/xds/internal/testutils/e2e" ) type controlPlane struct { diff --git a/xds/internal/test/e2e/e2e_test.go b/xds/internal/test/e2e/e2e_test.go index ca547a522f24..309c58010cf3 100644 --- a/xds/internal/test/e2e/e2e_test.go +++ b/xds/internal/test/e2e/e2e_test.go @@ -27,11 +27,12 @@ import ( "testing" "time" + "google.golang.org/grpc/internal/testutils/xds/e2e" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" testpb "google.golang.org/grpc/interop/grpc_testing" - "google.golang.org/grpc/xds/internal/testutils/e2e" ) var ( diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go deleted file mode 100644 index e9e3fd584bf3..000000000000 --- a/xds/internal/test/xds_client_integration_test.go +++ /dev/null @@ -1,384 +0,0 @@ -//go:build !386 -// +build !386 - -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xds_test - -import ( - "context" - "fmt" - "net" - "testing" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/envconfig" - rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/internal/stubserver" - "google.golang.org/grpc/internal/testutils" - rlstest "google.golang.org/grpc/internal/testutils/rls" - "google.golang.org/grpc/status" - rlscsp "google.golang.org/grpc/xds/internal/clusterspecifier/rls" - "google.golang.org/grpc/xds/internal/testutils/e2e" - "google.golang.org/protobuf/types/known/durationpb" - - _ "google.golang.org/grpc/balancer/rls" // Register the RLS Load Balancing policy. - _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS Cluster Specifier Plugin. - - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - testpb "google.golang.org/grpc/test/grpc_testing" -) - -// clientSetup performs a bunch of steps common to all xDS client tests here: -// - spin up a gRPC server and register the test service on it -// - create a local TCP listener and start serving on it -// -// Returns the following: -// - the port the server is listening on -// - cleanup function to be invoked by the tests when done -func clientSetup(t *testing.T, tss testpb.TestServiceServer) (uint32, func()) { - // Initialize a gRPC server and register the stubServer on it. - server := grpc.NewServer() - testpb.RegisterTestServiceServer(server, tss) - - // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - - go func() { - if err := server.Serve(lis); err != nil { - t.Errorf("Serve() failed: %v", err) - } - }() - - return uint32(lis.Addr().(*net.TCPAddr).Port), func() { - server.Stop() - } -} - -func (s) TestClientSideXDS(t *testing.T) { - managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) - defer cleanup1() - - port, cleanup2 := clientSetup(t, &testService{}) - defer cleanup2() - - const serviceName = "my-service-client-side-xds" - resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: serviceName, - NodeID: nodeID, - Host: "localhost", - Port: port, - SecLevel: e2e.SecurityLevelNone, - }) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create a ClientConn and make a successful RPC. - cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } -} - -func (s) TestClientSideRetry(t *testing.T) { - ctr := 0 - errs := []codes.Code{codes.ResourceExhausted} - ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - defer func() { ctr++ }() - if ctr < len(errs) { - return nil, status.Errorf(errs[ctr], "this should be retried") - } - return &testpb.Empty{}, nil - }, - } - - managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) - defer cleanup1() - - port, cleanup2 := clientSetup(t, ss) - defer cleanup2() - - const serviceName = "my-service-client-side-xds" - resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: serviceName, - NodeID: nodeID, - Host: "localhost", - Port: port, - SecLevel: e2e.SecurityLevelNone, - }) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create a ClientConn and make a successful RPC. - cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - defer cancel() - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.ResourceExhausted { - t.Fatalf("rpc EmptyCall() = _, %v; want _, ResourceExhausted", err) - } - - testCases := []struct { - name string - vhPolicy *v3routepb.RetryPolicy - routePolicy *v3routepb.RetryPolicy - errs []codes.Code // the errors returned by the server for each RPC - tryAgainErr codes.Code // the error that would be returned if we are still using the old retry policies. - errWant codes.Code - }{{ - name: "virtualHost only, fail", - vhPolicy: &v3routepb.RetryPolicy{ - RetryOn: "resource-exhausted,unavailable", - NumRetries: &wrapperspb.UInt32Value{Value: 1}, - }, - errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, - routePolicy: nil, - tryAgainErr: codes.ResourceExhausted, - errWant: codes.Unavailable, - }, { - name: "virtualHost only", - vhPolicy: &v3routepb.RetryPolicy{ - RetryOn: "resource-exhausted, unavailable", - NumRetries: &wrapperspb.UInt32Value{Value: 2}, - }, - errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, - routePolicy: nil, - tryAgainErr: codes.Unavailable, - errWant: codes.OK, - }, { - name: "virtualHost+route, fail", - vhPolicy: &v3routepb.RetryPolicy{ - RetryOn: "resource-exhausted,unavailable", - NumRetries: &wrapperspb.UInt32Value{Value: 2}, - }, - routePolicy: &v3routepb.RetryPolicy{ - RetryOn: "resource-exhausted", - NumRetries: &wrapperspb.UInt32Value{Value: 2}, - }, - errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, - tryAgainErr: codes.OK, - errWant: codes.Unavailable, - }, { - name: "virtualHost+route", - vhPolicy: &v3routepb.RetryPolicy{ - RetryOn: "resource-exhausted", - NumRetries: &wrapperspb.UInt32Value{Value: 2}, - }, - routePolicy: &v3routepb.RetryPolicy{ - RetryOn: "unavailable", - NumRetries: &wrapperspb.UInt32Value{Value: 2}, - }, - errs: []codes.Code{codes.Unavailable}, - tryAgainErr: codes.Unavailable, - errWant: codes.OK, - }, { - name: "virtualHost+route, not enough attempts", - vhPolicy: &v3routepb.RetryPolicy{ - RetryOn: "unavailable", - NumRetries: &wrapperspb.UInt32Value{Value: 2}, - }, - routePolicy: &v3routepb.RetryPolicy{ - RetryOn: "unavailable", - NumRetries: &wrapperspb.UInt32Value{Value: 1}, - }, - errs: []codes.Code{codes.Unavailable, codes.Unavailable}, - tryAgainErr: codes.OK, - errWant: codes.Unavailable, - }} - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - errs = tc.errs - - // Confirm tryAgainErr is correct before updating resources. - ctr = 0 - _, err := client.EmptyCall(ctx, &testpb.Empty{}) - if code := status.Code(err); code != tc.tryAgainErr { - t.Fatalf("with old retry policy: EmptyCall() = _, %v; want _, %v", err, tc.tryAgainErr) - } - - resources.Routes[0].VirtualHosts[0].RetryPolicy = tc.vhPolicy - resources.Routes[0].VirtualHosts[0].Routes[0].GetRoute().RetryPolicy = tc.routePolicy - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - for { - ctr = 0 - _, err := client.EmptyCall(ctx, &testpb.Empty{}) - if code := status.Code(err); code == tc.tryAgainErr { - continue - } else if code != tc.errWant { - t.Fatalf("rpc EmptyCall() = _, %v; want _, %v", err, tc.errWant) - } - break - } - }) - } -} - -// defaultClientResourcesWithRLSCSP returns a set of resources (LDS, RDS, CDS, EDS) for a -// client to connect to a server with a RLS Load Balancer as a child of Cluster Manager. -func defaultClientResourcesWithRLSCSP(params e2e.ResourceParams, rlsProto *rlspb.RouteLookupConfig) e2e.UpdateOptions { - routeConfigName := "route-" + params.DialTarget - clusterName := "cluster-" + params.DialTarget - endpointsName := "endpoints-" + params.DialTarget - return e2e.UpdateOptions{ - NodeID: params.NodeID, - Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(params.DialTarget, routeConfigName)}, - Routes: []*v3routepb.RouteConfiguration{defaultRouteConfigWithRLSCSP(routeConfigName, params.DialTarget, rlsProto)}, - Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, endpointsName, params.SecLevel)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(endpointsName, params.Host, []uint32{params.Port})}, - } -} - -// defaultRouteConfigWithRLSCSP returns a basic xds RouteConfig resource with an -// RLS Cluster Specifier Plugin configured as the route. -func defaultRouteConfigWithRLSCSP(routeName, ldsTarget string, rlsProto *rlspb.RouteLookupConfig) *v3routepb.RouteConfiguration { - return &v3routepb.RouteConfiguration{ - Name: routeName, - ClusterSpecifierPlugins: []*v3routepb.ClusterSpecifierPlugin{ - { - Extension: &v3corepb.TypedExtensionConfig{ - Name: "rls-csp", - TypedConfig: testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ - RouteLookupConfig: rlsProto, - }), - }, - }, - }, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{ClusterSpecifierPlugin: "rls-csp"}, - }}, - }}, - }}, - } -} - -// TestRLSinxDS tests an xDS configured system with a RLS Balancer present. -// This test sets up the RLS Balancer using the RLS Cluster Specifier Plugin, -// spins up a test service and has a fake RLS Server correctly respond with a target -// corresponding to this test service. This test asserts an RPC proceeds as normal -// with the RLS Balancer as part of system. -func (s) TestRLSinxDS(t *testing.T) { - oldRLS := envconfig.XDSRLS - envconfig.XDSRLS = true - rlscsp.RegisterForTesting() - defer func() { - envconfig.XDSRLS = oldRLS - rlscsp.UnregisterForTesting() - }() - - // Set up all components and configuration necessary - management server, - // xDS resolver, fake RLS Server, and xDS configuration which specifies a - // RLS Balancer that communicates to this set up fake RLS Server. - managementServer, nodeID, _, resolver, cleanup1 := setupManagementServer(t) - defer cleanup1() - port, cleanup2 := clientSetup(t, &testService{}) - defer cleanup2() - - lis := testutils.NewListenerWrapper(t, nil) - rlsServer, rlsRequestCh := rlstest.SetupFakeRLSServer(t, lis) - rlsProto := &rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{{Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "grpc.testing.TestService"}}}}, - LookupService: rlsServer.Address, - LookupServiceTimeout: durationpb.New(defaultTestTimeout), - CacheSizeBytes: 1024, - } - - const serviceName = "my-service-client-side-xds" - resources := defaultClientResourcesWithRLSCSP(e2e.ResourceParams{ - DialTarget: serviceName, - NodeID: nodeID, - Host: "localhost", - Port: port, - SecLevel: e2e.SecurityLevelNone, - }, rlsProto) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Configure the fake RLS Server to set the RLS Balancers child CDS - // Cluster's name as the target for the RPC to use. - rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { - return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{"cluster-" + serviceName}}} - }) - - // Create a ClientConn and make a successful RPC. - cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - // Successfully sending the RPC will require the RLS Load Balancer to - // communicate with the fake RLS Server for information about the target. - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } - - // These RLS Verifications makes sure the RLS Load Balancer is actually part - // of the xDS Configured system that correctly sends out RPC. - - // Verify connection is established to RLS Server. - if _, err = lis.NewConnCh.Receive(ctx); err != nil { - t.Fatal("Timeout when waiting for RLS LB policy to create control channel") - } - - // Verify an rls request is sent out to fake RLS Server. - select { - case <-ctx.Done(): - t.Fatalf("Timeout when waiting for an RLS request to be sent out") - case <-rlsRequestCh: - } -} diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/test/xds_integration_test.go deleted file mode 100644 index 4b7cca3b828f..000000000000 --- a/xds/internal/test/xds_integration_test.go +++ /dev/null @@ -1,200 +0,0 @@ -//go:build !386 -// +build !386 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package xds_test contains e2e tests for xDS use. -package xds_test - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" - "testing" - "time" - - "github.com/google/uuid" - - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/testdata" - "google.golang.org/grpc/xds" - "google.golang.org/grpc/xds/internal/testutils/e2e" - - xdsinternal "google.golang.org/grpc/internal/xds" - testpb "google.golang.org/grpc/test/grpc_testing" -) - -const ( - defaultTestTimeout = 10 * time.Second - defaultTestShortTimeout = 100 * time.Millisecond -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -type testService struct { - testpb.TestServiceServer -} - -func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil -} - -func (*testService) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{}, nil -} - -func createTmpFile(src, dst string) error { - data, err := ioutil.ReadFile(src) - if err != nil { - return fmt.Errorf("ioutil.ReadFile(%q) failed: %v", src, err) - } - if err := ioutil.WriteFile(dst, data, os.ModePerm); err != nil { - return fmt.Errorf("ioutil.WriteFile(%q) failed: %v", dst, err) - } - return nil -} - -// createTempDirWithFiles creates a temporary directory under the system default -// tempDir with the given dirSuffix. It also reads from certSrc, keySrc and -// rootSrc files are creates appropriate files under the newly create tempDir. -// Returns the name of the created tempDir. -func createTmpDirWithFiles(dirSuffix, certSrc, keySrc, rootSrc string) (string, error) { - // Create a temp directory. Passing an empty string for the first argument - // uses the system temp directory. - dir, err := ioutil.TempDir("", dirSuffix) - if err != nil { - return "", fmt.Errorf("ioutil.TempDir() failed: %v", err) - } - - if err := createTmpFile(testdata.Path(certSrc), path.Join(dir, certFile)); err != nil { - return "", err - } - if err := createTmpFile(testdata.Path(keySrc), path.Join(dir, keyFile)); err != nil { - return "", err - } - if err := createTmpFile(testdata.Path(rootSrc), path.Join(dir, rootFile)); err != nil { - return "", err - } - return dir, nil -} - -// createClientTLSCredentials creates client-side TLS transport credentials. -func createClientTLSCredentials(t *testing.T) credentials.TransportCredentials { - t.Helper() - - cert, err := tls.LoadX509KeyPair(testdata.Path("x509/client1_cert.pem"), testdata.Path("x509/client1_key.pem")) - if err != nil { - t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) - } - b, err := ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) - if err != nil { - t.Fatalf("ioutil.ReadFile(x509/server_ca_cert.pem) failed: %v", err) - } - roots := x509.NewCertPool() - if !roots.AppendCertsFromPEM(b) { - t.Fatal("failed to append certificates") - } - return credentials.NewTLS(&tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: roots, - ServerName: "x.test.example.com", - }) -} - -// setupManagement server performs the following: -// - spin up an xDS management server on a local port -// - set up certificates for consumption by the file_watcher plugin -// - creates a bootstrap file in a temporary location -// - creates an xDS resolver using the above bootstrap contents -// -// Returns the following: -// - management server -// - nodeID to be used by the client when connecting to the management server -// - bootstrap contents to be used by the client -// - xDS resolver builder to be used by the client -// - a cleanup function to be invoked at the end of the test -func setupManagementServer(t *testing.T) (*e2e.ManagementServer, string, []byte, resolver.Builder, func()) { - t.Helper() - - // Spin up an xDS management server on a local port. - server, err := e2e.StartManagementServer() - if err != nil { - t.Fatalf("Failed to spin up the xDS management server: %v", err) - } - defer func() { - if err != nil { - server.Stop() - } - }() - - // Create a directory to hold certs and key files used on the server side. - serverDir, err := createTmpDirWithFiles("testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") - if err != nil { - server.Stop() - t.Fatal(err) - } - - // Create a directory to hold certs and key files used on the client side. - clientDir, err := createTmpDirWithFiles("testClientSideXDS*", "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") - if err != nil { - server.Stop() - t.Fatal(err) - } - - // Create certificate providers section of the bootstrap config with entries - // for both the client and server sides. - cpc := map[string]json.RawMessage{ - e2e.ServerSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(serverDir, certFile), path.Join(serverDir, keyFile), path.Join(serverDir, rootFile)), - e2e.ClientSideCertProviderInstance: e2e.DefaultFileWatcherConfig(path.Join(clientDir, certFile), path.Join(clientDir, keyFile), path.Join(clientDir, rootFile)), - } - - // Create a bootstrap file in a temporary directory. - nodeID := uuid.New().String() - bootstrapContents, err := xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ - Version: xdsinternal.TransportV3, - NodeID: nodeID, - ServerURI: server.Address, - CertificateProviders: cpc, - ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, - }) - if err != nil { - server.Stop() - t.Fatalf("Failed to create bootstrap file: %v", err) - } - resolver, err := xds.NewXDSResolverWithConfigForTesting(bootstrapContents) - if err != nil { - server.Stop() - t.Fatalf("Failed to create xDS resolver for testing: %v", err) - } - - return server, nodeID, bootstrapContents, resolver, func() { server.Stop() } -} diff --git a/xds/internal/xdsclient/xdsresource/filter_chain_test.go b/xds/internal/xdsclient/xdsresource/filter_chain_test.go index 71e537f29b74..c141619c5a5a 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain_test.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain_test.go @@ -40,9 +40,9 @@ import ( "google.golang.org/grpc/internal/envconfig" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" - "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index 9150d64dfa00..f46cf3801aca 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -29,10 +29,8 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds/internal/httpfilter" - _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" - _ "google.golang.org/grpc/xds/internal/httpfilter/router" - "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/durationpb" @@ -53,6 +51,9 @@ import ( anypb "github.com/golang/protobuf/ptypes/any" spb "github.com/golang/protobuf/ptypes/struct" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC HTTP filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. ) func (s) TestUnmarshalListener_ClientSide(t *testing.T) { diff --git a/xds/server_test.go b/xds/server_test.go index ac0c573fdef7..4ad86879df07 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -35,9 +35,9 @@ import ( "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" _ "google.golang.org/grpc/xds/internal/httpfilter/router" xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" diff --git a/xds/xds.go b/xds/xds.go index 1b2b0c5793f8..3ff3c76bce4b 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -32,17 +32,15 @@ import ( v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "google.golang.org/grpc" + _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. internaladmin "google.golang.org/grpc/internal/admin" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" - - _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. - xdsresolver "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 xDS API client. _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. ) @@ -75,21 +73,3 @@ func init() { return csdss.Close, nil }) } - -// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using -// the provided xds bootstrap config instead of the global configuration from -// the supported environment variables. The resolver.Builder is meant to be -// used in conjunction with the grpc.WithResolvers DialOption. -// -// Testing Only -// -// This function should ONLY be used for testing and may not work with some -// other features, including the CSDS service. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { - return xdsresolver.NewBuilderForTesting(bootstrapConfig) -} From a45cd25f599fe66fe3a533186453f4d2261d075d Mon Sep 17 00:00:00 2001 From: apolcyn Date: Wed, 25 May 2022 09:49:02 -0700 Subject: [PATCH 510/998] xds: Enable aggregate and logical dns clusters by default (#5380) --- internal/envconfig/xds.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 7d996e51b5c1..55aaeea8b455 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -77,7 +77,7 @@ var ( // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable From 6e253e8afaa1a8073867fca7563fb468337cb12c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 25 May 2022 10:06:34 -0700 Subject: [PATCH 511/998] interop: update proto by running regenerate.sh (#5381) --- interop/grpc_testing/messages.pb.go | 677 +++++++++++++++++----------- 1 file changed, 411 insertions(+), 266 deletions(-) diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index 1bf3d756717e..b39bf4964ade 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -387,6 +387,10 @@ type SimpleRequest struct { FillServerId bool `protobuf:"varint,9,opt,name=fill_server_id,json=fillServerId,proto3" json:"fill_server_id,omitempty"` // Whether SimpleResponse should include grpclb_route_type. FillGrpclbRouteType bool `protobuf:"varint,10,opt,name=fill_grpclb_route_type,json=fillGrpclbRouteType,proto3" json:"fill_grpclb_route_type,omitempty"` + // If set the server should record this metrics report data for the current RPC. + OrcaPerQueryReport *TestOrcaReport `protobuf:"bytes,11,opt,name=orca_per_query_report,json=orcaPerQueryReport,proto3" json:"orca_per_query_report,omitempty"` + // If set the server should update this metrics report data at the OOB server. + OrcaOobReport *TestOrcaReport `protobuf:"bytes,12,opt,name=orca_oob_report,json=orcaOobReport,proto3" json:"orca_oob_report,omitempty"` } func (x *SimpleRequest) Reset() { @@ -491,6 +495,20 @@ func (x *SimpleRequest) GetFillGrpclbRouteType() bool { return false } +func (x *SimpleRequest) GetOrcaPerQueryReport() *TestOrcaReport { + if x != nil { + return x.OrcaPerQueryReport + } + return nil +} + +func (x *SimpleRequest) GetOrcaOobReport() *TestOrcaReport { + if x != nil { + return x.OrcaOobReport + } + return nil +} + // Unary response, as configured by the request. type SimpleResponse struct { state protoimpl.MessageState @@ -1360,6 +1378,80 @@ func (*ClientConfigureResponse) Descriptor() ([]byte, []int) { return file_grpc_testing_messages_proto_rawDescGZIP(), []int{17} } +// Metrics data the server will update and send to the client. It mirrors orca load report +// https://github.com/cncf/xds/blob/eded343319d09f30032952beda9840bbd3dcf7ac/xds/data/orca/v3/orca_load_report.proto#L15, +// but avoids orca dependency. Used by both per-query and out-of-band reporting tests. +type TestOrcaReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CpuUtilization float64 `protobuf:"fixed64,1,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"` + MemoryUtilization float64 `protobuf:"fixed64,2,opt,name=memory_utilization,json=memoryUtilization,proto3" json:"memory_utilization,omitempty"` + RequestCost map[string]float64 `protobuf:"bytes,3,rep,name=request_cost,json=requestCost,proto3" json:"request_cost,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + Utilization map[string]float64 `protobuf:"bytes,4,rep,name=utilization,proto3" json:"utilization,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` +} + +func (x *TestOrcaReport) Reset() { + *x = TestOrcaReport{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_testing_messages_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TestOrcaReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestOrcaReport) ProtoMessage() {} + +func (x *TestOrcaReport) ProtoReflect() protoreflect.Message { + mi := &file_grpc_testing_messages_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestOrcaReport.ProtoReflect.Descriptor instead. +func (*TestOrcaReport) Descriptor() ([]byte, []int) { + return file_grpc_testing_messages_proto_rawDescGZIP(), []int{18} +} + +func (x *TestOrcaReport) GetCpuUtilization() float64 { + if x != nil { + return x.CpuUtilization + } + return 0 +} + +func (x *TestOrcaReport) GetMemoryUtilization() float64 { + if x != nil { + return x.MemoryUtilization + } + return 0 +} + +func (x *TestOrcaReport) GetRequestCost() map[string]float64 { + if x != nil { + return x.RequestCost + } + return nil +} + +func (x *TestOrcaReport) GetUtilization() map[string]float64 { + if x != nil { + return x.Utilization + } + return nil +} + type LoadBalancerStatsResponse_RpcsByPeer struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1372,7 +1464,7 @@ type LoadBalancerStatsResponse_RpcsByPeer struct { func (x *LoadBalancerStatsResponse_RpcsByPeer) Reset() { *x = LoadBalancerStatsResponse_RpcsByPeer{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[18] + mi := &file_grpc_testing_messages_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1385,7 +1477,7 @@ func (x *LoadBalancerStatsResponse_RpcsByPeer) String() string { func (*LoadBalancerStatsResponse_RpcsByPeer) ProtoMessage() {} func (x *LoadBalancerStatsResponse_RpcsByPeer) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[18] + mi := &file_grpc_testing_messages_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1423,7 +1515,7 @@ type LoadBalancerAccumulatedStatsResponse_MethodStats struct { func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) Reset() { *x = LoadBalancerAccumulatedStatsResponse_MethodStats{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[25] + mi := &file_grpc_testing_messages_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1436,7 +1528,7 @@ func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) String() string { func (*LoadBalancerAccumulatedStatsResponse_MethodStats) ProtoMessage() {} func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[25] + mi := &file_grpc_testing_messages_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1480,7 +1572,7 @@ type ClientConfigureRequest_Metadata struct { func (x *ClientConfigureRequest_Metadata) Reset() { *x = ClientConfigureRequest_Metadata{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[28] + mi := &file_grpc_testing_messages_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1493,7 +1585,7 @@ func (x *ClientConfigureRequest_Metadata) String() string { func (*ClientConfigureRequest_Metadata) ProtoMessage() {} func (x *ClientConfigureRequest_Metadata) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[28] + mi := &file_grpc_testing_messages_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1546,7 +1638,7 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x45, 0x63, 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xa2, 0x04, 0x0a, 0x0d, 0x53, 0x69, 0x6d, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xb9, 0x05, 0x0a, 0x0d, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, @@ -1580,234 +1672,268 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x6c, 0x47, 0x72, - 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x82, 0x02, - 0x0a, 0x0e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, 0x67, - 0x72, 0x70, 0x63, 0x6c, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, - 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0x54, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x65, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x82, 0x01, - 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x55, 0x73, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x65, 0x64, 0x22, 0xa3, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x41, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x63, - 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x4e, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, + 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4f, 0x0a, + 0x15, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, + 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x12, 0x6f, 0x72, 0x63, 0x61, + 0x50, 0x65, 0x72, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x44, + 0x0a, 0x0f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0d, 0x6f, 0x72, 0x63, 0x61, 0x4f, 0x6f, 0x62, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x22, 0x82, 0x02, 0x0a, 0x0e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x4a, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6d, - 0x61, 0x78, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x63, - 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, - 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x6f, - 0x66, 0x66, 0x4d, 0x73, 0x22, 0x46, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x73, 0x22, 0x56, 0x0a, 0x18, + 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x61, 0x75, 0x74, 0x68, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x72, 0x70, + 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x67, 0x72, + 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, + 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, + 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0x54, + 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x17, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x53, 0x69, 0x7a, 0x65, 0x22, 0x82, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x75, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x55, 0x73, + 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x63, + 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0xa3, 0x02, 0x0a, 0x1a, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, + 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x41, 0x0a, 0x0f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x63, 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0x4e, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, + 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, + 0x4a, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x73, 0x22, 0x46, 0x0a, 0x0d, 0x52, + 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, + 0x73, 0x73, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, + 0x66, 0x4d, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x22, 0xe2, 0x04, 0x0a, 0x19, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, - 0x72, 0x70, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, - 0x70, 0x63, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, - 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x53, 0x65, 0x63, 0x22, 0xe2, 0x04, 0x0a, 0x19, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x21, 0x0a, - 0x0c, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x5f, 0x0a, 0x0e, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x1a, 0xb1, 0x01, 0x0a, 0x0a, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, - 0x12, 0x64, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, - 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, - 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, - 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, - 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x73, 0x0a, 0x11, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, - 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x86, 0x09, 0x0a, 0x24, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, - 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, - 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, - 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, - 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, - 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x50, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, - 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x18, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, - 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x8b, 0x01, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, - 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0c, 0x72, 0x70, 0x63, + 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, + 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, + 0x50, 0x65, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x0e, 0x72, 0x70, 0x63, 0x73, 0x5f, + 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x70, 0x63, 0x73, + 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xb1, 0x01, 0x0a, 0x0a, 0x52, 0x70, 0x63, + 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, + 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, + 0x72, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, + 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, + 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x73, 0x0a, 0x11, 0x52, + 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, + 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, + 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x86, 0x09, 0x0a, 0x24, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, + 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, - 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, - 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, - 0x70, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, - 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x1a, 0x49, 0x0a, 0x1b, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, - 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, - 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, - 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, + 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x52, 0x70, + 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, + 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x18, + 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, + 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x8b, 0x01, 0x0a, 0x19, 0x6e, 0x75, 0x6d, + 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, + 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x70, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x73, 0x50, + 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x49, 0x0a, 0x1b, 0x4e, 0x75, 0x6d, 0x52, + 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0xcf, 0x01, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x81, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, - 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, - 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, + 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xcf, 0x01, 0x0a, 0x0b, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x70, + 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, + 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x81, 0x01, 0x0a, + 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x05, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, - 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x53, 0x65, 0x63, 0x1a, 0x74, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x40, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, - 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, - 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, - 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2a, 0x1f, 0x0a, 0x0b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x10, 0x0a, 0x0c, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, - 0x00, 0x2a, 0x6f, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, - 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, - 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, - 0x4b, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, - 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, - 0x10, 0x02, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x1a, 0x74, 0x0a, 0x08, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, + 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, + 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x22, 0x19, 0x0a, 0x17, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x03, 0x0a, 0x0e, 0x54, 0x65, 0x73, 0x74, + 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x70, + 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x74, + 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x11, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, + 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, + 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x43, 0x6f, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, + 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x1f, 0x0a, 0x0b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, + 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x2a, 0x6f, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, + 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, + 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, + 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, + 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, + 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1823,7 +1949,7 @@ func file_grpc_testing_messages_proto_rawDescGZIP() []byte { } var file_grpc_testing_messages_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_grpc_testing_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_grpc_testing_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_grpc_testing_messages_proto_goTypes = []interface{}{ (PayloadType)(0), // 0: grpc.testing.PayloadType (GrpclbRouteType)(0), // 1: grpc.testing.GrpclbRouteType @@ -1846,17 +1972,20 @@ var file_grpc_testing_messages_proto_goTypes = []interface{}{ (*LoadBalancerAccumulatedStatsResponse)(nil), // 18: grpc.testing.LoadBalancerAccumulatedStatsResponse (*ClientConfigureRequest)(nil), // 19: grpc.testing.ClientConfigureRequest (*ClientConfigureResponse)(nil), // 20: grpc.testing.ClientConfigureResponse - (*LoadBalancerStatsResponse_RpcsByPeer)(nil), // 21: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer - nil, // 22: grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry - nil, // 23: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry - nil, // 24: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry - nil, // 25: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry - nil, // 26: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry - nil, // 27: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry - (*LoadBalancerAccumulatedStatsResponse_MethodStats)(nil), // 28: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats - nil, // 29: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry - nil, // 30: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry - (*ClientConfigureRequest_Metadata)(nil), // 31: grpc.testing.ClientConfigureRequest.Metadata + (*TestOrcaReport)(nil), // 21: grpc.testing.TestOrcaReport + (*LoadBalancerStatsResponse_RpcsByPeer)(nil), // 22: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer + nil, // 23: grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry + nil, // 24: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry + nil, // 25: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry + nil, // 26: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry + nil, // 27: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry + nil, // 28: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry + (*LoadBalancerAccumulatedStatsResponse_MethodStats)(nil), // 29: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats + nil, // 30: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry + nil, // 31: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry + (*ClientConfigureRequest_Metadata)(nil), // 32: grpc.testing.ClientConfigureRequest.Metadata + nil, // 33: grpc.testing.TestOrcaReport.RequestCostEntry + nil, // 34: grpc.testing.TestOrcaReport.UtilizationEntry } var file_grpc_testing_messages_proto_depIdxs = []int32{ 0, // 0: grpc.testing.Payload.type:type_name -> grpc.testing.PayloadType @@ -1865,34 +1994,38 @@ var file_grpc_testing_messages_proto_depIdxs = []int32{ 3, // 3: grpc.testing.SimpleRequest.response_compressed:type_name -> grpc.testing.BoolValue 5, // 4: grpc.testing.SimpleRequest.response_status:type_name -> grpc.testing.EchoStatus 3, // 5: grpc.testing.SimpleRequest.expect_compressed:type_name -> grpc.testing.BoolValue - 4, // 6: grpc.testing.SimpleResponse.payload:type_name -> grpc.testing.Payload - 1, // 7: grpc.testing.SimpleResponse.grpclb_route_type:type_name -> grpc.testing.GrpclbRouteType - 4, // 8: grpc.testing.StreamingInputCallRequest.payload:type_name -> grpc.testing.Payload - 3, // 9: grpc.testing.StreamingInputCallRequest.expect_compressed:type_name -> grpc.testing.BoolValue - 3, // 10: grpc.testing.ResponseParameters.compressed:type_name -> grpc.testing.BoolValue - 0, // 11: grpc.testing.StreamingOutputCallRequest.response_type:type_name -> grpc.testing.PayloadType - 10, // 12: grpc.testing.StreamingOutputCallRequest.response_parameters:type_name -> grpc.testing.ResponseParameters - 4, // 13: grpc.testing.StreamingOutputCallRequest.payload:type_name -> grpc.testing.Payload - 5, // 14: grpc.testing.StreamingOutputCallRequest.response_status:type_name -> grpc.testing.EchoStatus - 4, // 15: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload - 22, // 16: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry - 23, // 17: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry - 25, // 18: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry - 26, // 19: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry - 27, // 20: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry - 29, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry - 2, // 22: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType - 31, // 23: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata - 24, // 24: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry - 21, // 25: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer - 30, // 26: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry - 28, // 27: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats - 2, // 28: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType - 29, // [29:29] is the sub-list for method output_type - 29, // [29:29] is the sub-list for method input_type - 29, // [29:29] is the sub-list for extension type_name - 29, // [29:29] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 21, // 6: grpc.testing.SimpleRequest.orca_per_query_report:type_name -> grpc.testing.TestOrcaReport + 21, // 7: grpc.testing.SimpleRequest.orca_oob_report:type_name -> grpc.testing.TestOrcaReport + 4, // 8: grpc.testing.SimpleResponse.payload:type_name -> grpc.testing.Payload + 1, // 9: grpc.testing.SimpleResponse.grpclb_route_type:type_name -> grpc.testing.GrpclbRouteType + 4, // 10: grpc.testing.StreamingInputCallRequest.payload:type_name -> grpc.testing.Payload + 3, // 11: grpc.testing.StreamingInputCallRequest.expect_compressed:type_name -> grpc.testing.BoolValue + 3, // 12: grpc.testing.ResponseParameters.compressed:type_name -> grpc.testing.BoolValue + 0, // 13: grpc.testing.StreamingOutputCallRequest.response_type:type_name -> grpc.testing.PayloadType + 10, // 14: grpc.testing.StreamingOutputCallRequest.response_parameters:type_name -> grpc.testing.ResponseParameters + 4, // 15: grpc.testing.StreamingOutputCallRequest.payload:type_name -> grpc.testing.Payload + 5, // 16: grpc.testing.StreamingOutputCallRequest.response_status:type_name -> grpc.testing.EchoStatus + 4, // 17: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload + 23, // 18: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry + 24, // 19: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry + 26, // 20: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry + 27, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry + 28, // 22: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry + 30, // 23: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry + 2, // 24: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 32, // 25: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata + 33, // 26: grpc.testing.TestOrcaReport.request_cost:type_name -> grpc.testing.TestOrcaReport.RequestCostEntry + 34, // 27: grpc.testing.TestOrcaReport.utilization:type_name -> grpc.testing.TestOrcaReport.UtilizationEntry + 25, // 28: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry + 22, // 29: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer + 31, // 30: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry + 29, // 31: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats + 2, // 32: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name } func init() { file_grpc_testing_messages_proto_init() } @@ -2118,6 +2251,18 @@ func file_grpc_testing_messages_proto_init() { } } file_grpc_testing_messages_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TestOrcaReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_testing_messages_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LoadBalancerStatsResponse_RpcsByPeer); i { case 0: return &v.state @@ -2129,7 +2274,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LoadBalancerAccumulatedStatsResponse_MethodStats); i { case 0: return &v.state @@ -2141,7 +2286,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ClientConfigureRequest_Metadata); i { case 0: return &v.state @@ -2160,7 +2305,7 @@ func file_grpc_testing_messages_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_testing_messages_proto_rawDesc, NumEnums: 3, - NumMessages: 29, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, From da6ef0055c12a6e385d6e920bdaa06b5e9d3ab67 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 25 May 2022 17:03:33 -0700 Subject: [PATCH 512/998] xds/clusterresolver: reuse child policy names for the same locality (#5367) --- .../balancer/clusterresolver/configbuilder.go | 53 ++++----- .../configbuilder_childname.go | 88 ++++++++++++++ .../configbuilder_childname_test.go | 111 ++++++++++++++++++ .../clusterresolver/configbuilder_test.go | 63 +++++----- .../clusterresolver/resource_resolver.go | 80 +++++++++---- .../clusterresolver/resource_resolver_test.go | 75 +++++++----- 6 files changed, 355 insertions(+), 115 deletions(-) create mode 100644 xds/internal/balancer/clusterresolver/configbuilder_childname.go create mode 100644 xds/internal/balancer/clusterresolver/configbuilder_childname_test.go diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index 4cce16ff9a3d..7eb76dd51d7f 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -51,6 +51,9 @@ type priorityConfig struct { edsResp xdsresource.EndpointsUpdate // addresses is set only if type is DNS. addresses []string + // Each discovery mechanism has a name generator so that the child policies + // can reuse names between updates (EDS updates for example). + childNameGen *nameGenerator } // buildPriorityConfigJSON builds balancer config for the passed in @@ -118,10 +121,10 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} retAddrs []resolver.Address ) - for i, p := range priorities { + for _, p := range priorities { switch p.mechanism.Type { case DiscoveryMechanismTypeEDS: - names, configs, addrs, err := buildClusterImplConfigForEDS(i, p.edsResp, p.mechanism, xdsLBPolicy) + names, configs, addrs, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) if err != nil { return nil, nil, err } @@ -135,7 +138,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi } retAddrs = append(retAddrs, addrs...) case DiscoveryMechanismTypeLogicalDNS: - name, config, addrs := buildClusterImplConfigForDNS(i, p.addresses, p.mechanism) + name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism) retConfig.Priorities = append(retConfig.Priorities, name) retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: config}, @@ -149,11 +152,11 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi return retConfig, retAddrs, nil } -func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { +func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { // Endpoint picking policy for DNS is hardcoded to pick_first. const childPolicy = "pick_first" retAddrs := make([]resolver.Address, 0, len(addrStrs)) - pName := fmt.Sprintf("priority-%v", parentPriority) + pName := fmt.Sprintf("priority-%v", g.prefix) for _, addrStr := range addrStrs { retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) } @@ -172,7 +175,7 @@ func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string, mechani // - map{"p0":p0_config, "p1":p1_config} // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 -func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { +func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) for _, d := range edsResp.Drops { drops = append(drops, clusterimpl.DropConfig{ @@ -181,15 +184,12 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsresource.Endpoi }) } - priorityChildNames, priorities := groupLocalitiesByPriority(edsResp.Localities) - retNames := make([]string, 0, len(priorityChildNames)) - retAddrs := make([]resolver.Address, 0, len(priorityChildNames)) - retConfigs := make(map[string]*clusterimpl.LBConfig, len(priorityChildNames)) - for _, priorityName := range priorityChildNames { - priorityLocalities := priorities[priorityName] - // Prepend parent priority to the priority names, to avoid duplicates. - pName := fmt.Sprintf("priority-%v-%v", parentPriority, priorityName) - retNames = append(retNames, pName) + priorities := groupLocalitiesByPriority(edsResp.Localities) + retNames := g.generate(priorities) + retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) + var retAddrs []resolver.Address + for i, pName := range retNames { + priorityLocalities := priorities[i] cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) if err != nil { return nil, nil, nil, err @@ -202,33 +202,32 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsresource.Endpoi // groupLocalitiesByPriority returns the localities grouped by priority. // -// It also returns a list of strings where each string represents a priority, -// and the list is sorted from higher priority to lower priority. +// The returned list is sorted from higher priority to lower. Each item in the +// list is a group of localities. // // For example, for L0-p0, L1-p0, L2-p1, results will be -// - ["p0", "p1"] -// - map{"p0":[L0, L1], "p1":[L2]} -func groupLocalitiesByPriority(localities []xdsresource.Locality) ([]string, map[string][]xdsresource.Locality) { +// - [[L0, L1], [L2]] +func groupLocalitiesByPriority(localities []xdsresource.Locality) [][]xdsresource.Locality { var priorityIntSlice []int - priorities := make(map[string][]xdsresource.Locality) + priorities := make(map[int][]xdsresource.Locality) for _, locality := range localities { if locality.Weight == 0 { continue } - priorityName := fmt.Sprintf("%v", locality.Priority) - priorities[priorityName] = append(priorities[priorityName], locality) - priorityIntSlice = append(priorityIntSlice, int(locality.Priority)) + priority := int(locality.Priority) + priorities[priority] = append(priorities[priority], locality) + priorityIntSlice = append(priorityIntSlice, priority) } // Sort the priorities based on the int value, deduplicate, and then turn // the sorted list into a string list. This will be child names, in priority // order. sort.Ints(priorityIntSlice) priorityIntSliceDeduped := dedupSortedIntSlice(priorityIntSlice) - priorityNameSlice := make([]string, 0, len(priorityIntSliceDeduped)) + ret := make([][]xdsresource.Locality, 0, len(priorityIntSliceDeduped)) for _, p := range priorityIntSliceDeduped { - priorityNameSlice = append(priorityNameSlice, fmt.Sprintf("%v", p)) + ret = append(ret, priorities[p]) } - return priorityNameSlice, priorities + return ret } func dedupSortedIntSlice(a []int) []int { diff --git a/xds/internal/balancer/clusterresolver/configbuilder_childname.go b/xds/internal/balancer/clusterresolver/configbuilder_childname.go new file mode 100644 index 000000000000..119f4c474752 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/configbuilder_childname.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "fmt" + + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// nameGenerator generates a child name for a list of priorities (each priority +// is a list of localities). +// +// The purpose of this generator is to reuse names between updates. So the +// struct keeps state between generate() calls, and a later generate() might +// return names returned by the previous call. +type nameGenerator struct { + existingNames map[internal.LocalityID]string + prefix uint64 + nextID uint64 +} + +func newNameGenerator(prefix uint64) *nameGenerator { + return &nameGenerator{prefix: prefix} +} + +// generate returns a list of names for the given list of priorities. +// +// Each priority is a list of localities. The name for the priority is picked as +// - for each locality in this priority, if it exists in the existing names, +// this priority will reuse the name +// - if no reusable name is found for this priority, a new name is generated +// +// For example: +// - update 1: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 2: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 3: [[L1, L2], [L3]] --> ["0", "2"] (Two priorities were merged) +// - update 4: [[L1], [L4]] --> ["0", "3",] (A priority was split, and a new priority was added) +func (ng *nameGenerator) generate(priorities [][]xdsresource.Locality) []string { + var ret []string + usedNames := make(map[string]bool) + newNames := make(map[internal.LocalityID]string) + for _, priority := range priorities { + var nameFound string + for _, locality := range priority { + if name, ok := ng.existingNames[locality.ID]; ok { + if !usedNames[name] { + nameFound = name + // Found a name to use. No need to process the remaining + // localities. + break + } + } + } + + if nameFound == "" { + // No appropriate used name is found. Make a new name. + nameFound = fmt.Sprintf("priority-%d-%d", ng.prefix, ng.nextID) + ng.nextID++ + } + + ret = append(ret, nameFound) + // All localities in this priority share the same name. Add them all to + // the new map. + for _, l := range priority { + newNames[l.ID] = nameFound + } + usedNames[nameFound] = true + } + ng.existingNames = newNames + return ret +} diff --git a/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go b/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go new file mode 100644 index 000000000000..6a3dbba83a4b --- /dev/null +++ b/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +func Test_nameGenerator_generate(t *testing.T) { + tests := []struct { + name string + prefix uint64 + input1 [][]xdsresource.Locality + input2 [][]xdsresource.Locality + want []string + }{ + { + name: "init, two new priorities", + prefix: 3, + input1: nil, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, + }, + want: []string{"priority-3-0", "priority-3-1"}, + }, + { + name: "one new priority", + prefix: 1, + input1: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + }, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, + }, + want: []string{"priority-1-0", "priority-1-1"}, + }, + { + name: "merge two priorities", + prefix: 4, + input1: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}, {ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + want: []string{"priority-4-0", "priority-4-2"}, + }, + { + name: "swap two priorities", + input1: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + want: []string{"priority-0-1", "priority-0-0", "priority-0-2"}, + }, + { + name: "split priority", + input1: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}, {ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, // This gets a newly generated name, sice "0-0" was already picked. + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + want: []string{"priority-0-0", "priority-0-2", "priority-0-1"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ng := newNameGenerator(tt.prefix) + got1 := ng.generate(tt.input1) + t.Logf("%v", got1) + got := ng.generate(tt.input2) + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("generate() = got: %v, want: %v, diff (-got +want): %s", got, tt.want, diff) + } + }) + } +} diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index 607f7b222419..80d46fa0119d 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -146,12 +146,14 @@ func TestBuildPriorityConfigJSON(t *testing.T) { testLocalitiesP1[1], }, }, + childNameGen: newNameGenerator(0), }, { mechanism: DiscoveryMechanism{ Type: DiscoveryMechanismTypeLogicalDNS, }, - addresses: testAddressStrs[4], + addresses: testAddressStrs[4], + childNameGen: newNameGenerator(1), }, }, nil) if err != nil { @@ -196,13 +198,15 @@ func TestBuildPriorityConfig(t *testing.T) { testLocalitiesP1[1], }, }, + childNameGen: newNameGenerator(0), }, { mechanism: DiscoveryMechanism{ Cluster: testClusterName2, Type: DiscoveryMechanismTypeLogicalDNS, }, - addresses: testAddressStrs[4], + addresses: testAddressStrs[4], + childNameGen: newNameGenerator(1), }, }, nil) @@ -309,7 +313,7 @@ func TestBuildPriorityConfig(t *testing.T) { } func TestBuildClusterImplConfigForDNS(t *testing.T) { - gotName, gotConfig, gotAddrs := buildClusterImplConfigForDNS(3, testAddressStrs[0], DiscoveryMechanism{Cluster: testClusterName2, Type: DiscoveryMechanismTypeLogicalDNS}) + gotName, gotConfig, gotAddrs := buildClusterImplConfigForDNS(newNameGenerator(3), testAddressStrs[0], DiscoveryMechanism{Cluster: testClusterName2, Type: DiscoveryMechanismTypeLogicalDNS}) wantName := "priority-3" wantConfig := &clusterimpl.LBConfig{ Cluster: testClusterName2, @@ -335,7 +339,7 @@ func TestBuildClusterImplConfigForDNS(t *testing.T) { func TestBuildClusterImplConfigForEDS(t *testing.T) { gotNames, gotConfigs, gotAddrs, _ := buildClusterImplConfigForEDS( - 2, + newNameGenerator(2), xdsresource.EndpointsUpdate{ Drops: []xdsresource.OverloadDropConfig{ { @@ -465,32 +469,28 @@ func TestGroupLocalitiesByPriority(t *testing.T) { tests := []struct { name string localities []xdsresource.Locality - wantPriorities []string - wantLocalities map[string][]xdsresource.Locality + wantLocalities [][]xdsresource.Locality }{ { - name: "1 locality 1 priority", - localities: []xdsresource.Locality{testLocalitiesP0[0]}, - wantPriorities: []string{"0"}, - wantLocalities: map[string][]xdsresource.Locality{ - "0": {testLocalitiesP0[0]}, + name: "1 locality 1 priority", + localities: []xdsresource.Locality{testLocalitiesP0[0]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[0]}, }, }, { - name: "2 locality 1 priority", - localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP0[1]}, - wantPriorities: []string{"0"}, - wantLocalities: map[string][]xdsresource.Locality{ - "0": {testLocalitiesP0[0], testLocalitiesP0[1]}, + name: "2 locality 1 priority", + localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP0[1]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[0], testLocalitiesP0[1]}, }, }, { - name: "1 locality in each", - localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP1[0]}, - wantPriorities: []string{"0", "1"}, - wantLocalities: map[string][]xdsresource.Locality{ - "0": {testLocalitiesP0[0]}, - "1": {testLocalitiesP1[0]}, + name: "1 locality in each", + localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP1[0]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[0]}, + {testLocalitiesP1[0]}, }, }, { @@ -498,10 +498,9 @@ func TestGroupLocalitiesByPriority(t *testing.T) { localities: []xdsresource.Locality{ testLocalitiesP0[0], testLocalitiesP0[1], testLocalitiesP1[0], testLocalitiesP1[1]}, - wantPriorities: []string{"0", "1"}, - wantLocalities: map[string][]xdsresource.Locality{ - "0": {testLocalitiesP0[0], testLocalitiesP0[1]}, - "1": {testLocalitiesP1[0], testLocalitiesP1[1]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[0], testLocalitiesP0[1]}, + {testLocalitiesP1[0], testLocalitiesP1[1]}, }, }, { @@ -512,19 +511,15 @@ func TestGroupLocalitiesByPriority(t *testing.T) { localities: []xdsresource.Locality{ testLocalitiesP1[1], testLocalitiesP0[1], testLocalitiesP1[0], testLocalitiesP0[0]}, - wantPriorities: []string{"0", "1"}, - wantLocalities: map[string][]xdsresource.Locality{ - "0": {testLocalitiesP0[1], testLocalitiesP0[0]}, - "1": {testLocalitiesP1[1], testLocalitiesP1[0]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[1], testLocalitiesP0[0]}, + {testLocalitiesP1[1], testLocalitiesP1[0]}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gotPriorities, gotLocalities := groupLocalitiesByPriority(tt.localities) - if diff := cmp.Diff(gotPriorities, tt.wantPriorities); diff != "" { - t.Errorf("groupLocalitiesByPriority() diff(-got +want) %v", diff) - } + gotLocalities := groupLocalitiesByPriority(tt.localities) if diff := cmp.Diff(gotLocalities, tt.wantLocalities); diff != "" { t.Errorf("groupLocalitiesByPriority() diff(-got +want) %v", diff) } diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index 9d7db26ad14a..3e4e7a7af412 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -55,6 +55,8 @@ type resolverMechanismTuple struct { dm DiscoveryMechanism dmKey discoveryMechanismKey r discoveryMechanism + + childNameGen *nameGenerator } type resourceResolver struct { @@ -62,17 +64,28 @@ type resourceResolver struct { updateChannel chan *resourceUpdate // mu protects the slice and map, and content of the resolvers in the slice. - mu sync.Mutex - mechanisms []DiscoveryMechanism - children []resolverMechanismTuple - childrenMap map[discoveryMechanismKey]discoveryMechanism + mu sync.Mutex + mechanisms []DiscoveryMechanism + children []resolverMechanismTuple + // childrenMap's value only needs the resolver implementation (type + // discoveryMechanism) and the childNameGen. The other two fields are not + // used. + // + // TODO(cleanup): maybe we can make a new type with just the necessary + // fields, and use it here instead. + childrenMap map[discoveryMechanismKey]resolverMechanismTuple + // Each new discovery mechanism needs a child name generator to reuse child + // policy names. But to make sure the names across discover mechanism + // doesn't conflict, we need a seq ID. This ID is incremented for each new + // discover mechanism. + childNameGeneratorSeqID uint64 } func newResourceResolver(parent *clusterResolverBalancer) *resourceResolver { return &resourceResolver{ parent: parent, updateChannel: make(chan *resourceUpdate, 1), - childrenMap: make(map[discoveryMechanismKey]discoveryMechanism), + childrenMap: make(map[discoveryMechanismKey]resolverMechanismTuple), } } @@ -112,31 +125,54 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { dmKey := discoveryMechanismKey{typ: dm.Type, name: nameToWatch} newDMs[dmKey] = true - r := rr.childrenMap[dmKey] - if r == nil { - r = newEDSResolver(nameToWatch, rr.parent.xdsClient, rr) + r, ok := rr.childrenMap[dmKey] + if !ok { + r = resolverMechanismTuple{ + dm: dm, + dmKey: dmKey, + r: newEDSResolver(nameToWatch, rr.parent.xdsClient, rr), + childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), + } rr.childrenMap[dmKey] = r + rr.childNameGeneratorSeqID++ + } else { + // If this is not new, keep the fields (especially + // childNameGen), and only update the DiscoveryMechanism. + // + // Note that the same dmKey doesn't mean the same + // DiscoveryMechanism. There are fields (e.g. + // MaxConcurrentRequests) in DiscoveryMechanism that are not + // copied to dmKey, we need to keep those updated. + r.dm = dm } - rr.children[i] = resolverMechanismTuple{dm: dm, dmKey: dmKey, r: r} + rr.children[i] = r case DiscoveryMechanismTypeLogicalDNS: // Name to resolve in DNS is the hostname, not the ClientConn // target. dmKey := discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} newDMs[dmKey] = true - r := rr.childrenMap[dmKey] - if r == nil { - r = newDNSResolver(dm.DNSHostname, rr) + r, ok := rr.childrenMap[dmKey] + if !ok { + r = resolverMechanismTuple{ + dm: dm, + dmKey: dmKey, + r: newDNSResolver(dm.DNSHostname, rr), + childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), + } rr.childrenMap[dmKey] = r + rr.childNameGeneratorSeqID++ + } else { + r.dm = dm } - rr.children[i] = resolverMechanismTuple{dm: dm, dmKey: dmKey, r: r} + rr.children[i] = r } } // Stop the resources that were removed. for dm, r := range rr.childrenMap { if !newDMs[dm] { delete(rr.childrenMap, dm) - r.stop() + r.r.stop() } } // Regenerate even if there's no change in discovery mechanism, in case @@ -150,7 +186,7 @@ func (rr *resourceResolver) resolveNow() { rr.mu.Lock() defer rr.mu.Unlock() for _, r := range rr.childrenMap { - r.resolveNow() + r.r.resolveNow() } } @@ -159,7 +195,7 @@ func (rr *resourceResolver) stop() { defer rr.mu.Unlock() for dm, r := range rr.childrenMap { delete(rr.childrenMap, dm) - r.stop() + r.r.stop() } rr.mechanisms = nil rr.children = nil @@ -174,13 +210,7 @@ func (rr *resourceResolver) stop() { func (rr *resourceResolver) generate() { var ret []priorityConfig for _, rDM := range rr.children { - r, ok := rr.childrenMap[rDM.dmKey] - if !ok { - rr.parent.logger.Infof("resolver for %+v not found, should never happen", rDM.dmKey) - continue - } - - u, ok := r.lastUpdate() + u, ok := rDM.r.lastUpdate() if !ok { // Don't send updates to parent until all resolvers have update to // send. @@ -188,9 +218,9 @@ func (rr *resourceResolver) generate() { } switch uu := u.(type) { case xdsresource.EndpointsUpdate: - ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu}) + ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu, childNameGen: rDM.childNameGen}) case []string: - ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu}) + ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu, childNameGen: rDM.childNameGen}) } } select { diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go index 432fdd9ceb65..8c90ed0e1cd4 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -68,7 +68,8 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { Cluster: testClusterName, EDSServiceName: testEDSServcie, }, - edsResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], + childNameGen: newNameGenerator(0), }}, }, { @@ -81,7 +82,8 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, - edsResp: testEDSUpdates[1], + edsResp: testEDSUpdates[1], + childNameGen: newNameGenerator(0), }}, }, } { @@ -107,7 +109,7 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { fakeClient.InvokeWatchEDSCallback("", test.edsUpdate, nil) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, test.want, cmp.AllowUnexported(priorityConfig{})); diff != "" { + if diff := cmp.Diff(u.priorities, test.want, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -161,7 +163,8 @@ func (s) TestResourceResolverOneDNSResource(t *testing.T) { Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, - addresses: []string{"1.1.1.1", "2.2.2.2"}, + addresses: []string{"1.1.1.1", "2.2.2.2"}, + childNameGen: newNameGenerator(0), }}, }, } { @@ -189,7 +192,7 @@ func (s) TestResourceResolverOneDNSResource(t *testing.T) { dnsR.UpdateState(resolver.State{Addresses: test.addrs}) select { case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, test.want, cmp.AllowUnexported(priorityConfig{})); diff != "" { + if diff := cmp.Diff(u.priorities, test.want, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -243,8 +246,9 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { Cluster: testClusterName, EDSServiceName: testEDSServcie, }, - edsResp: testEDSUpdates[0], - }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { + edsResp: testEDSUpdates[0], + childNameGen: newNameGenerator(0), + }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -289,8 +293,9 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, - edsResp: testEDSUpdates[1], - }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { + edsResp: testEDSUpdates[1], + childNameGen: newNameGenerator(1), + }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -317,8 +322,9 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { Cluster: testClusterName, MaxConcurrentRequests: newUint32(123), }, - edsResp: testEDSUpdates[1], - }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { + edsResp: testEDSUpdates[1], + childNameGen: newNameGenerator(1), + }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -388,7 +394,8 @@ func (s) TestResourceResolverNoChangeNoUpdate(t *testing.T) { Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, - edsResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], + childNameGen: newNameGenerator(0), }, { mechanism: DiscoveryMechanism{ @@ -396,9 +403,10 @@ func (s) TestResourceResolverNoChangeNoUpdate(t *testing.T) { Cluster: testClusterNames[1], MaxConcurrentRequests: newUint32(100), }, - edsResp: testEDSUpdates[1], + edsResp: testEDSUpdates[1], + childNameGen: newNameGenerator(1), }, - }, cmp.AllowUnexported(priorityConfig{})); diff != "" { + }, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -500,16 +508,18 @@ func (s) TestResourceResolverChangePriority(t *testing.T) { Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, - edsResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], + childNameGen: newNameGenerator(0), }, { mechanism: DiscoveryMechanism{ Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[1], }, - edsResp: testEDSUpdates[1], + edsResp: testEDSUpdates[1], + childNameGen: newNameGenerator(1), }, - }, cmp.AllowUnexported(priorityConfig{})); diff != "" { + }, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -541,16 +551,18 @@ func (s) TestResourceResolverChangePriority(t *testing.T) { Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[1], }, - edsResp: testEDSUpdates[1], + edsResp: testEDSUpdates[1], + childNameGen: newNameGenerator(1), }, { mechanism: DiscoveryMechanism{ Type: DiscoveryMechanismTypeEDS, Cluster: testClusterNames[0], }, - edsResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], + childNameGen: newNameGenerator(0), }, - }, cmp.AllowUnexported(priorityConfig{})); diff != "" { + }, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -628,16 +640,18 @@ func (s) TestResourceResolverEDSAndDNS(t *testing.T) { Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, - edsResp: testEDSUpdates[0], + edsResp: testEDSUpdates[0], + childNameGen: newNameGenerator(0), }, { mechanism: DiscoveryMechanism{ Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, - addresses: []string{"1.1.1.1", "2.2.2.2"}, + addresses: []string{"1.1.1.1", "2.2.2.2"}, + childNameGen: newNameGenerator(1), }, - }, cmp.AllowUnexported(priorityConfig{})); diff != "" { + }, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -689,8 +703,9 @@ func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, }, - edsResp: testEDSUpdates[0], - }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { + edsResp: testEDSUpdates[0], + childNameGen: newNameGenerator(0), + }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -726,8 +741,9 @@ func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, - addresses: []string{"1.1.1.1", "2.2.2.2"}, - }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { + addresses: []string{"1.1.1.1", "2.2.2.2"}, + childNameGen: newNameGenerator(1), + }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -847,8 +863,9 @@ func (s) TestResourceResolverDNSResolveNow(t *testing.T) { Type: DiscoveryMechanismTypeLogicalDNS, DNSHostname: testDNSTarget, }, - addresses: []string{"1.1.1.1", "2.2.2.2"}, - }}, cmp.AllowUnexported(priorityConfig{})); diff != "" { + addresses: []string{"1.1.1.1", "2.2.2.2"}, + childNameGen: newNameGenerator(0), + }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) } case <-ctx.Done(): From e41f868588c99493b35169e258a11b2ce128e139 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 25 May 2022 17:03:52 -0700 Subject: [PATCH 513/998] test/xds: move tests to a package with _test suffix (#5382) --- test/xds/xds_client_affinity_test.go | 2 +- test/xds/xds_client_federation_test.go | 3 ++- test/xds/xds_client_integration_test.go | 2 +- test/xds/xds_client_retry_test.go | 2 +- test/xds/xds_rls_clusterspecifier_plugin_test.go | 2 +- test/xds/xds_security_config_nack_test.go | 2 +- test/xds/xds_server_integration_test.go | 2 +- test/xds/xds_server_rbac_test.go | 2 +- test/xds/xds_server_serving_mode_test.go | 2 +- 9 files changed, 10 insertions(+), 9 deletions(-) diff --git a/test/xds/xds_client_affinity_test.go b/test/xds/xds_client_affinity_test.go index 91ca697071c8..58b7fca03f52 100644 --- a/test/xds/xds_client_affinity_test.go +++ b/test/xds/xds_client_affinity_test.go @@ -16,7 +16,7 @@ * */ -package xds +package xds_test import ( "context" diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go index 595e2272ffe4..26f4c6b29963 100644 --- a/test/xds/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -13,9 +13,10 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ -package xds +package xds_test import ( "context" diff --git a/test/xds/xds_client_integration_test.go b/test/xds/xds_client_integration_test.go index 399a0042a01a..b2c3d2f8354e 100644 --- a/test/xds/xds_client_integration_test.go +++ b/test/xds/xds_client_integration_test.go @@ -16,7 +16,7 @@ * */ -package xds +package xds_test import ( "context" diff --git a/test/xds/xds_client_retry_test.go b/test/xds/xds_client_retry_test.go index 31968f885bff..646b66be67af 100644 --- a/test/xds/xds_client_retry_test.go +++ b/test/xds/xds_client_retry_test.go @@ -16,7 +16,7 @@ * */ -package xds +package xds_test import ( "context" diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go index 392894017ec1..68d9fd99a7b2 100644 --- a/test/xds/xds_rls_clusterspecifier_plugin_test.go +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -16,7 +16,7 @@ * */ -package xds +package xds_test import ( "context" diff --git a/test/xds/xds_security_config_nack_test.go b/test/xds/xds_security_config_nack_test.go index 4fe469ed3ebe..c5ec1196bbb7 100644 --- a/test/xds/xds_security_config_nack_test.go +++ b/test/xds/xds_security_config_nack_test.go @@ -16,7 +16,7 @@ * */ -package xds +package xds_test import ( "context" diff --git a/test/xds/xds_server_integration_test.go b/test/xds/xds_server_integration_test.go index f3057aa0eafb..cafea3064d23 100644 --- a/test/xds/xds_server_integration_test.go +++ b/test/xds/xds_server_integration_test.go @@ -16,7 +16,7 @@ * */ -package xds +package xds_test import ( "context" diff --git a/test/xds/xds_server_rbac_test.go b/test/xds/xds_server_rbac_test.go index 216653a8d1f7..c48e2039c767 100644 --- a/test/xds/xds_server_rbac_test.go +++ b/test/xds/xds_server_rbac_test.go @@ -16,7 +16,7 @@ * */ -package xds +package xds_test import ( "context" diff --git a/test/xds/xds_server_serving_mode_test.go b/test/xds/xds_server_serving_mode_test.go index fe3a5612d9f1..03a62ed6c784 100644 --- a/test/xds/xds_server_serving_mode_test.go +++ b/test/xds/xds_server_serving_mode_test.go @@ -16,7 +16,7 @@ * */ -package xds +package xds_test import ( "context" From 70a80552d51f74bd2fb313cff7fea68cde4c2612 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 31 May 2022 22:44:42 +0000 Subject: [PATCH 514/998] xds/priority: clean up tests (#5387) --- internal/testutils/balancer.go | 77 ++++ .../balancer/priority/balancer_test.go | 416 ++++++++---------- 2 files changed, 249 insertions(+), 244 deletions(-) diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index ff43fb7340c5..e8c485e85566 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -187,6 +187,83 @@ func (tcc *TestClientConn) WaitForErrPicker(ctx context.Context) error { return nil } +// WaitForPickerWithErr waits until an error picker is pushed to this +// ClientConn with the error matching the wanted error. Also drains the +// matching entry from the state channel. Returns an error if the provided +// context expires, including the last received picker error (if any). +func (tcc *TestClientConn) WaitForPickerWithErr(ctx context.Context, want error) error { + lastErr := errors.New("received no picker") + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout when waiting for an error picker with %v; last picker error: %v", want, lastErr) + case picker := <-tcc.NewPickerCh: + <-tcc.NewStateCh + for i := 0; i < 5; i++ { + if _, lastErr = picker.Pick(balancer.PickInfo{}); lastErr == nil || lastErr.Error() != want.Error() { + break + } + return nil + } + } + } +} + +// WaitForConnectivityState waits until the state pushed to this ClientConn +// matches the wanted state. Also drains the matching entry from the picker +// channel. Returns an error if the provided context expires, including the +// last received state (if any). +func (tcc *TestClientConn) WaitForConnectivityState(ctx context.Context, want connectivity.State) error { + var lastState connectivity.State = -1 + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout when waiting for state to be %s; last state: %s", want, lastState) + case s := <-tcc.NewStateCh: + <-tcc.NewPickerCh + if s == want { + return nil + } + lastState = s + } + } +} + +// WaitForRoundRobinPicker waits for a picker that passes IsRoundRobin. Also +// drains the matching state channel and requires it to be READY to be +// considered. Returns an error if the provided context expires, including the +// last received error from IsRoundRobin or the picker (if any). +func (tcc *TestClientConn) WaitForRoundRobinPicker(ctx context.Context, want ...balancer.SubConn) error { + lastErr := errors.New("received no picker") + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout when waiting for round robin picker with %v; last error: %v", want, lastErr) + case s := <-tcc.NewStateCh: + p := <-tcc.NewPickerCh + if s != connectivity.Ready { + lastErr = fmt.Errorf("received state %v instead of ready", s) + break + } + var pickerErr error + if err := IsRoundRobin(want, func() balancer.SubConn { + sc, err := p.Pick(balancer.PickInfo{}) + if err != nil { + pickerErr = err + } + return sc.SubConn + }); pickerErr != nil { + lastErr = pickerErr + continue + } else if err != nil { + lastErr = err + continue + } + return nil + } + } +} + // IsRoundRobin checks whether f's return value is roundrobin of elements from // want. But it doesn't check for the order. Note that want can contain // duplicate items, which makes it weight-round-robin. diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 98bbb5b8f525..5b96d7101f3b 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -37,6 +37,8 @@ import ( "google.golang.org/grpc/resolver" ) +const defaultTestTimeout = 5 * time.Second + type s struct { grpctest.Tester } @@ -83,6 +85,9 @@ func subConnFromPicker(t *testing.T, p balancer.Picker) func() balancer.SubConn // // Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0. func (s) TestPriority_HighPriorityReady(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -118,10 +123,8 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Add p2, it shouldn't cause any updates. @@ -190,12 +193,15 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { // Init 0 and 1; 0 is up, use 0; 0 is down, 1 is up, use 1; add 2, use 1; 1 is // down, use 2; remove 2, use 1. func (s) TestPriority_SwitchPriority(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() - // Two localities, with priorities [0, 1], each with one backend. + t.Log("Two localities, with priorities [0, 1], each with one backend.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: []resolver.Address{ @@ -220,30 +226,24 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { } sc0 := <-cc.NewSubConnCh - // p0 is ready. + t.Log("Make p0 ready.") pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } - // Turn down 0, will start and use 1. + t.Log("Turn down 0, will start and use 1.") pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } - // Handle SubConn creation from 1. + t.Log("Handle SubConn creation from 1.") addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -253,15 +253,11 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } - // Add p2, it shouldn't cause any udpates. + t.Log("Add p2, it shouldn't cause any udpates.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: []resolver.Address{ @@ -292,16 +288,13 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { case <-time.After(time.Millisecond * 100): } - // Turn down 1, use 2 + t.Log("Turn down 1, use 2.") pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 2 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p3.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs2 := <-cc.NewSubConnAddrsCh @@ -313,15 +306,11 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - p4 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p4.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } - // Remove 2, use 1. + t.Log("Remove 2, use 1.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: []resolver.Address{ @@ -348,22 +337,18 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { // Should get an update with 1's old transient failure picker, to override // 2's old picker. - p5 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p5.Pick(balancer.PickInfo{}); err == nil { - t.Fatalf("want pick error non-nil, got nil") - } + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) } + <-cc.NewStateCh // Drain to match picker pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // Does not change the aggregate state, because round robin does not leave + // TRANIENT_FAILURE if a subconn goes CONNECTING. pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - p6 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p6.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } } @@ -373,6 +358,9 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { // Init 0 and 1; 0 is up, use 0; 0 is connecting, 1 is up, use 1; 0 is ready, // use 0. func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -408,10 +396,8 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } // Turn 0 to TransientFailure, will start and use 1. @@ -419,11 +405,8 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } // Handle SubConn creation from 1. @@ -436,12 +419,8 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Turn 0 back to Ready. @@ -454,12 +433,8 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { t.Fatalf("RemoveSubConn, want %v, got %v", sc0, scToRemove) } - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p3.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } } @@ -467,6 +442,9 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { // // Init 0 and 1; 0 and 1 both down; add 2, use 2. func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -497,16 +475,13 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { } sc0 := <-cc.NewSubConnCh - // Turn down 0, 1 is used. + t.Log("Turn down 0, 1 is used.") pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -514,18 +489,17 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh - // Turn down 1, pick should error. + + t.Log("Turn down 1, pick should error.") pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Test pick failure. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail1.Pick(balancer.PickInfo{}); err == nil { - t.Fatalf("want pick error non-nil, got nil") - } + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) } + <-cc.NewStateCh // Drain to match picker - // Add p2, it should create a new SubConn. + t.Log("Add p2, it should create a new SubConn.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: []resolver.Address{ @@ -547,11 +521,8 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { } // A new connecting picker should be updated for the new priority. - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs2 := <-cc.NewSubConnAddrsCh @@ -563,12 +534,8 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } } @@ -576,7 +543,8 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { // // Init 0,1,2; 0 and 1 down, use 2; 0 up, close 1 and 2. func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { - // defer time.Sleep(10 * time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) @@ -614,11 +582,8 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -631,11 +596,8 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 2 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs2 := <-cc.NewSubConnAddrsCh @@ -647,12 +609,8 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } // When 0 becomes ready, 0 should be used, 1 and 2 should all be closed. @@ -671,12 +629,8 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { } // Test pick with 0. - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p0.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } } @@ -685,7 +639,10 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { // // Init 0,1; 0 is not ready (in connecting), after timeout, use 1. func (s) TestPriority_InitTimeout(t *testing.T) { - const testPriorityInitTimeout = time.Second + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + const testPriorityInitTimeout = 200 * time.Millisecond defer func() func() { old := DefaultPriorityInitTimeout DefaultPriorityInitTimeout = testPriorityInitTimeout @@ -742,23 +699,21 @@ func (s) TestPriority_InitTimeout(t *testing.T) { // After the init timer of p0, when switching to p1, a connecting picker // will be sent to the parent. Clear it here. - <-cc.NewPickerCh pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } } // EDS removes all priorities, and re-adds them. func (s) TestPriority_RemovesAllPriorities(t *testing.T) { - const testPriorityInitTimeout = time.Second + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + const testPriorityInitTimeout = 200 * time.Millisecond defer func() func() { old := DefaultPriorityInitTimeout DefaultPriorityInitTimeout = testPriorityInitTimeout @@ -800,10 +755,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } // Remove all priorities. @@ -826,11 +779,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { } // Test pick return TransientFailure. - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != ErrAllPrioritiesRemoved { - t.Fatalf("want pick error %v, got %v", ErrAllPrioritiesRemoved, err) - } + if err := cc.WaitForPickerWithErr(ctx, ErrAllPrioritiesRemoved); err != nil { + t.Fatal(err.Error()) } // Re-add two localities, with previous priorities, but different backends. @@ -861,7 +811,6 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { // Don't send any update to p0, so to not override the old state of p0. // Later, connect to p1 and then remove p1. This will fallback to p0, and // will send p0's old picker if they are not correctly removed. - <-cc.NewPickerCh // Clear the picker from old p0. // p1 will be used after priority init timeout. addrs11 := <-cc.NewSubConnAddrsCh @@ -873,10 +822,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { pb.UpdateSubConnState(sc11, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p1 := <-cc.NewPickerCh - want = []balancer.SubConn{sc11} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc11); err != nil { + t.Fatal(err.Error()) } // Remove p1, to fallback to p0. @@ -903,11 +850,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { } // Test pick return NoSubConn. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if scst, err := pFail1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error _, %v, got %v, _ ,%v", balancer.ErrNoSubConnAvailable, scst, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } // Send an ready update for the p0 sc that was received when re-adding @@ -916,10 +860,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { pb.UpdateSubConnState(sc01, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc01} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc01); err != nil { + t.Fatal(err.Error()) } select { @@ -936,6 +878,9 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { // Test the case where the high priority contains no backends. The low priority // will be used. func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -971,10 +916,8 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Remove addresses from priority 0, should use p1. @@ -1008,11 +951,8 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } // p1 is ready. @@ -1020,16 +960,14 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } } // Test the case where the first and only priority is removed. func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { - const testPriorityInitTimeout = time.Second + const testPriorityInitTimeout = 200 * time.Millisecond defer func(t time.Duration) { DefaultPriorityInitTimeout = t }(DefaultPriorityInitTimeout) @@ -1078,6 +1016,9 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { // // Init a(p0) and b(p1); a(p0) is up, use a; move b to p0, a to p1, use b. func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -1113,10 +1054,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Swap child with p0 and p1, the child at lower priority should now be the @@ -1143,11 +1082,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { // balancer should immediately update the picker so the picker from old // child is not used. In this case, the picker becomes a // no-subconn-available picker because this child is just started. - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } // Old subconn should be removed. @@ -1167,10 +1103,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only new subconns. - p2 := <-cc.NewPickerCh - want2 := []balancer.SubConn{sc2} - if err := testutils.IsRoundRobin(want2, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want2, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } } @@ -1179,6 +1113,9 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { // // Init a(p0) and b(p1); a(p0) is down, use b; move b to p0, a to p1, use b. func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -1213,11 +1150,8 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -1229,10 +1163,8 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Swap child with p0 and p1, the child at lower priority should now be the @@ -1279,6 +1211,9 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { // // Init a(p0) and b(p1); a(p0) is down, use b; move b to p0, a to p1, use b. func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -1313,11 +1248,8 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -1329,10 +1261,8 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Remove child with p1, the child at higher priority should now be used. @@ -1358,12 +1288,10 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) } - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err == nil { - t.Fatalf("want pick error , got %v", err) - } + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) } + <-cc.NewStateCh // Drain to match picker // Because there was no new child, no new subconn should be created. select { @@ -1378,6 +1306,9 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { // Init 0; 0 is up, use 0; remove 0, only picker is updated, no subconn is // removed; re-add 0, picker is updated. func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + const testChildCacheTimeout = time.Second defer func() func() { old := balancergroup.DefaultSubBalancerCloseTimeout @@ -1420,10 +1351,8 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Remove the child, it shouldn't cause any conn changed, but picker should @@ -1435,11 +1364,8 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { t.Fatalf("failed to update ClientConn state: %v", err) } - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != ErrAllPrioritiesRemoved { - t.Fatalf("want pick error %v, got %v", ErrAllPrioritiesRemoved, err) - } + if err := cc.WaitForPickerWithErr(ctx, ErrAllPrioritiesRemoved); err != nil { + t.Fatal(err.Error()) } // But no conn changes should happen. Child balancer is in cache. @@ -1469,10 +1395,8 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { } // Test roundrobin with only p0 subconns. - p2 := <-cc.NewPickerCh - want2 := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want2, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want2, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // But no conn changes should happen. Child balancer is just taken out from @@ -1490,6 +1414,9 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { // // Init 0; 0 is up, use 0; change 0's policy, 0 is used. func (s) TestPriority_ChildPolicyChange(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -1523,10 +1450,8 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Change the policy for the child (still roundrobin, but with a different @@ -1563,10 +1488,8 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pickfirst with the new subconns. - p2 := <-cc.NewPickerCh - want2 := []balancer.SubConn{sc2} - if err := testutils.IsRoundRobin(want2, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want2, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } } @@ -1590,6 +1513,9 @@ func init() { // (e.g., roundrobin handling empty addresses). There could be deadlock caused // by acquiring a locked mutex. func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -1612,18 +1538,17 @@ func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { t.Fatalf("failed to update ClientConn state: %v", err) } - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != errTestInlineStateUpdate { - t.Fatalf("picker.Pick, got err %q, want err %q", err, errTestInlineStateUpdate) - } + if err := cc.WaitForPickerWithErr(ctx, errTestInlineStateUpdate); err != nil { + t.Fatal(err.Error()) } } // When the child policy's configured to ignore reresolution requests, the // ResolveNow() calls from this child should be all ignored. func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -1650,8 +1575,6 @@ func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { t.Fatalf("failed to update ClientConn state: %v", err) } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() // This is the balancer.ClientConn that the inner resolverNowBalancer is // built with. balancerCCI, err := resolveNowBalancerCCCh.Receive(ctx) @@ -1705,6 +1628,9 @@ func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { // ResolveNow() calls from this child should be all ignored, from the other // children are forwarded. func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -1735,8 +1661,6 @@ func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { t.Fatalf("failed to update ClientConn state: %v", err) } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() // This is the balancer.ClientConn from p0. balancerCCI0, err := resolveNowBalancerCCCh.Receive(ctx) if err != nil { @@ -1795,6 +1719,10 @@ func init() { stub.Register(fmt.Sprintf("%s-%d", initIdleBalancerName, ii), stub.BalancerFuncs{ UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{}) + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &testutils.TestConstPicker{Err: balancer.ErrNoSubConnAvailable}, + }) return nil }, UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { @@ -1817,6 +1745,9 @@ func init() { // // Init 0, 1; 0 is Idle, use 0; 0 is down, start 1; 1 is Idle, use 1. func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -1849,20 +1780,16 @@ func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { // Send an Idle state update to trigger an Idle picker update. pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) - p0 := <-cc.NewPickerCh - if pr, err := p0.Pick(balancer.PickInfo{}); err != errsTestInitIdle[0] { - t.Fatalf("pick returned %v, %v, want _, %v", pr, err, errsTestInitIdle[0]) + if err := cc.WaitForPickerWithErr(ctx, errsTestInitIdle[0]); err != nil { + t.Fatal(err.Error()) } // Turn p0 down, to start p1. pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -1872,9 +1799,8 @@ func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { sc1 := <-cc.NewSubConnCh // Idle picker from p1 should also be forwarded. pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Idle}) - p2 := <-cc.NewPickerCh - if pr, err := p2.Pick(balancer.PickInfo{}); err != errsTestInitIdle[1] { - t.Fatalf("pick returned %v, %v, want _, %v", pr, err, errsTestInitIdle[1]) + if err := cc.WaitForPickerWithErr(ctx, errsTestInitIdle[1]); err != nil { + t.Fatal(err.Error()) } } @@ -1885,6 +1811,9 @@ func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { // // Init 0; 0 is Idle, use 0; add 1, use 0. func (s) TestPriority_AddLowPriorityWhenHighIsInIdle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) @@ -1915,9 +1844,8 @@ func (s) TestPriority_AddLowPriorityWhenHighIsInIdle(t *testing.T) { // Send an Idle state update to trigger an Idle picker update. pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) - p0 := <-cc.NewPickerCh - if pr, err := p0.Pick(balancer.PickInfo{}); err != errsTestInitIdle[0] { - t.Fatalf("pick returned %v, %v, want _, %v", pr, err, errsTestInitIdle[0]) + if err := cc.WaitForPickerWithErr(ctx, errsTestInitIdle[0]); err != nil { + t.Fatal(err.Error()) } // Add 1, should keep using 0. From 13b378bc458535e6da1693fc67e09a4951cd3259 Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Thu, 2 Jun 2022 16:17:01 -0700 Subject: [PATCH 515/998] internal: add global DialOptions and ServerOptions for all clients and servers (#5352) --- clientconn.go | 4 ++ default_dial_option_server_option_test.go | 82 +++++++++++++++++++++++ dialoptions.go | 11 +++ internal/internal.go | 14 ++++ server.go | 10 +++ 5 files changed, 121 insertions(+) create mode 100644 default_dial_option_server_option_test.go diff --git a/clientconn.go b/clientconn.go index de6d41c23841..0d21f2210b68 100644 --- a/clientconn.go +++ b/clientconn.go @@ -146,6 +146,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) + } + for _, opt := range opts { opt.apply(&cc.dopts) } diff --git a/default_dial_option_server_option_test.go b/default_dial_option_server_option_test.go new file mode 100644 index 000000000000..952b1c4a2b17 --- /dev/null +++ b/default_dial_option_server_option_test.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + "testing" + + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" +) + +func (s) TestAddExtraDialOptions(t *testing.T) { + // Ensure the Dial fails without credentials + if _, err := Dial("fake"); err == nil { + t.Fatalf("Dialing without a credential did not fail") + } else { + if !strings.Contains(err.Error(), "no transport security set") { + t.Fatalf("Dialing failed with unexpected error: %v", err) + } + } + + // Set and check the DialOptions + opts := []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithTransportCredentials(insecure.NewCredentials()), WithTransportCredentials(insecure.NewCredentials())} + internal.AddExtraDialOptions.(func(opt ...DialOption))(opts...) + for i, opt := range opts { + if extraDialOptions[i] != opt { + t.Fatalf("Unexpected extra dial option at index %d: %v != %v", i, extraDialOptions[i], opt) + } + } + + // Ensure the Dial passes with the extra dial options + if cc, err := Dial("fake"); err != nil { + t.Fatalf("Dialing with insecure credential failed: %v", err) + } else { + cc.Close() + } + + internal.ClearExtraDialOptions() + if len(extraDialOptions) != 0 { + t.Fatalf("Unexpected len of extraDialOptions: %d != 0", len(extraDialOptions)) + } +} + +func (s) TestAddExtraServerOptions(t *testing.T) { + const maxRecvSize = 998765 + // Set and check the ServerOptions + opts := []ServerOption{StatsHandler(nil), Creds(insecure.NewCredentials()), MaxRecvMsgSize(maxRecvSize)} + internal.AddExtraServerOptions.(func(opt ...ServerOption))(opts...) + for i, opt := range opts { + if extraServerOptions[i] != opt { + t.Fatalf("Unexpected extra server option at index %d: %v != %v", i, extraServerOptions[i], opt) + } + } + + // Ensure the extra server options applies to new servers + s := NewServer() + if s.opts.maxReceiveMessageSize != maxRecvSize { + t.Fatalf("Unexpected s.opts.maxReceiveMessageSize: %d != %d", s.opts.maxReceiveMessageSize, maxRecvSize) + } + + internal.ClearExtraServerOptions() + if len(extraServerOptions) != 0 { + t.Fatalf("Unexpected len of extraServerOptions: %d != 0", len(extraServerOptions)) + } +} diff --git a/dialoptions.go b/dialoptions.go index f2f605a17c47..4cccebe8fcc3 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -35,6 +35,15 @@ import ( "google.golang.org/grpc/stats" ) +func init() { + internal.AddExtraDialOptions = func(opt ...DialOption) { + extraDialOptions = append(extraDialOptions, opt...) + } + internal.ClearExtraDialOptions = func() { + extraDialOptions = nil + } +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -70,6 +79,8 @@ type DialOption interface { apply(*dialOptions) } +var extraDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // diff --git a/internal/internal.go b/internal/internal.go index 0f4512248174..59352cc958e8 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -63,6 +63,20 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) + // AddExtraServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddExtraServerOptions interface{} // func(opt ...ServerOption) + // ClearExtraServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + ClearExtraServerOptions func() + // AddExtraDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddExtraDialOptions interface{} // func(opt ...DialOption) + // ClearExtraDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + ClearExtraDialOptions func() // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from diff --git a/server.go b/server.go index 65de84b30074..7dc6a7b24776 100644 --- a/server.go +++ b/server.go @@ -73,6 +73,12 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } + internal.AddExtraServerOptions = func(opt ...ServerOption) { + extraServerOptions = opt + } + internal.ClearExtraServerOptions = func() { + extraServerOptions = nil + } } var statusOK = status.New(codes.OK, "") @@ -174,6 +180,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } +var extraServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -560,6 +567,9 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range extraServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } From ea86bf74972b469b49cfbf6ab10258f478358036 Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Fri, 3 Jun 2022 09:15:50 -0700 Subject: [PATCH 516/998] stats: add support for multiple stats handlers in a single client or server (#5347) --- dialoptions.go | 2 +- internal/transport/handler_server.go | 22 ++--- internal/transport/http2_client.go | 45 +++++----- internal/transport/http2_server.go | 28 +++--- internal/transport/transport.go | 6 +- server.go | 44 +++++----- stats/stats_test.go | 122 +++++++++++++++++++++++---- stream.go | 64 +++++++------- 8 files changed, 219 insertions(+), 114 deletions(-) diff --git a/dialoptions.go b/dialoptions.go index 4cccebe8fcc3..75d01ba777c8 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -391,7 +391,7 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) }) } diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index 1c3459c2b4c5..090120925bb4 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -49,7 +49,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC // from inside an http.Handler. It requires that the http Server // supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { return nil, errors.New("gRPC requires HTTP/2") } @@ -138,7 +138,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler } func (ht *serverHandlerTransport) Close() { @@ -228,10 +228,10 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -314,10 +314,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -369,14 +369,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 24ca59084b43..be371c6e0f73 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -90,7 +90,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -311,7 +311,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, onPrefaceReceipt: onPrefaceReceipt, nextID: 1, @@ -341,15 +341,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) if err != nil { @@ -773,24 +773,27 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) } - t.statsHandler.HandleRPC(s.ctx, outHeader) } return s, nil } @@ -916,11 +919,11 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } @@ -1432,7 +1435,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { if isHeader { inHeader := &stats.InHeader{ Client: true, @@ -1440,14 +1443,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, } - t.statsHandler.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + sh.HandleRPC(s.ctx, inTrailer) } } diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 45d7bd145e3e..2b0fde334ce0 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -82,7 +82,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -257,7 +257,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -272,13 +272,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) if err != nil { @@ -570,8 +570,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, @@ -580,7 +580,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -996,14 +996,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -1064,10 +1064,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1222,9 +1222,9 @@ func (t *http2Server) Close() { for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } diff --git a/internal/transport/transport.go b/internal/transport/transport.go index a9ce717f1605..6c3ba8515940 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -523,7 +523,7 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 @@ -553,8 +553,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. diff --git a/server.go b/server.go index 7dc6a7b24776..b54f5bb572a7 100644 --- a/server.go +++ b/server.go @@ -156,7 +156,7 @@ type serverOptions struct { chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -442,7 +442,7 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + o.statsHandlers = append(o.statsHandlers, h) }) } @@ -877,7 +877,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -973,7 +973,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -1086,8 +1086,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1134,13 +1136,13 @@ func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerIn } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, @@ -1171,7 +1173,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1253,7 +1255,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || binlog != nil { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) @@ -1270,7 +1272,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1428,16 +1430,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - sh.HandleRPC(stream.Context(), statsBegin) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1449,10 +1453,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1466,7 +1470,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1474,7 +1478,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { diff --git a/stats/stats_test.go b/stats/stats_test.go index 1b08568b906b..9a1a6c11253c 100644 --- a/stats/stats_test.go +++ b/stats/stats_test.go @@ -176,10 +176,10 @@ func (s *testServer) StreamingOutputCall(in *testpb.StreamingOutputCallRequest, // func, modified as needed, and then started with its startServer method. // It should be cleaned up with the tearDown method. type test struct { - t *testing.T - compress string - clientStatsHandler stats.Handler - serverStatsHandler stats.Handler + t *testing.T + compress string + clientStatsHandlers []stats.Handler + serverStatsHandlers []stats.Handler testServer testgrpc.TestServiceServer // nil means none // srv and srvAddr are set once startServer is called. @@ -204,12 +204,12 @@ type testConfig struct { // newTest returns a new test using the provided testing.T and // environment. It is returned with default values. Tests should // modify it before calling its startServer and clientConn methods. -func newTest(t *testing.T, tc *testConfig, ch stats.Handler, sh stats.Handler) *test { +func newTest(t *testing.T, tc *testConfig, chs []stats.Handler, shs []stats.Handler) *test { te := &test{ - t: t, - compress: tc.compress, - clientStatsHandler: ch, - serverStatsHandler: sh, + t: t, + compress: tc.compress, + clientStatsHandlers: chs, + serverStatsHandlers: shs, } return te } @@ -229,8 +229,8 @@ func (te *test) startServer(ts testgrpc.TestServiceServer) { grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), ) } - if te.serverStatsHandler != nil { - opts = append(opts, grpc.StatsHandler(te.serverStatsHandler)) + for _, sh := range te.serverStatsHandlers { + opts = append(opts, grpc.StatsHandler(sh)) } s := grpc.NewServer(opts...) te.srv = s @@ -257,8 +257,8 @@ func (te *test) clientConn() *grpc.ClientConn { grpc.WithDecompressor(grpc.NewGZIPDecompressor()), ) } - if te.clientStatsHandler != nil { - opts = append(opts, grpc.WithStatsHandler(te.clientStatsHandler)) + for _, sh := range te.clientStatsHandlers { + opts = append(opts, grpc.WithStatsHandler(sh)) } var err error @@ -846,7 +846,7 @@ func checkServerStats(t *testing.T, got []*gotData, expect *expectedData, checkF func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []func(t *testing.T, d *gotData, e *expectedData)) { h := &statshandler{} - te := newTest(t, tc, nil, h) + te := newTest(t, tc, nil, []stats.Handler{h}) te.startServer(&testServer{}) defer te.tearDown() @@ -1146,7 +1146,7 @@ func checkClientStats(t *testing.T, got []*gotData, expect *expectedData, checkF func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map[int]*checkFuncWithCount) { h := &statshandler{} - te := newTest(t, tc, h, nil) + te := newTest(t, tc, []stats.Handler{h}, nil) te.startServer(&testServer{}) defer te.tearDown() @@ -1375,3 +1375,95 @@ func (s) TestTrace(t *testing.T) { t.Errorf("OutgoingTrace(%v) = %v; want nil", ctx, tr) } } + +func (s) TestMultipleClientStatsHandler(t *testing.T) { + h := &statshandler{} + tc := &testConfig{compress: ""} + te := newTest(t, tc, []stats.Handler{h, h}, nil) + te.startServer(&testServer{}) + defer te.tearDown() + + cc := &rpcConfig{success: false, failfast: false, callType: unaryRPC} + _, _, err := te.doUnaryCall(cc) + if cc.success != (err == nil) { + t.Fatalf("cc.success: %v, got error: %v", cc.success, err) + } + te.cc.Close() + te.srv.GracefulStop() // Wait for the server to stop. + + for start := time.Now(); time.Since(start) < defaultTestTimeout; { + h.mu.Lock() + if _, ok := h.gotRPC[len(h.gotRPC)-1].s.(*stats.End); ok { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + for start := time.Now(); time.Since(start) < defaultTestTimeout; { + h.mu.Lock() + if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + // Each RPC generates 6 stats events on the client-side, times 2 StatsHandler + if len(h.gotRPC) != 12 { + t.Fatalf("h.gotRPC: unexpected amount of RPCStats: %v != %v", len(h.gotRPC), 12) + } + + // Each connection generates 4 conn events on the client-side, times 2 StatsHandler + if len(h.gotConn) != 4 { + t.Fatalf("h.gotConn: unexpected amount of ConnStats: %v != %v", len(h.gotConn), 4) + } +} + +func (s) TestMultipleServerStatsHandler(t *testing.T) { + h := &statshandler{} + tc := &testConfig{compress: ""} + te := newTest(t, tc, nil, []stats.Handler{h, h}) + te.startServer(&testServer{}) + defer te.tearDown() + + cc := &rpcConfig{success: false, failfast: false, callType: unaryRPC} + _, _, err := te.doUnaryCall(cc) + if cc.success != (err == nil) { + t.Fatalf("cc.success: %v, got error: %v", cc.success, err) + } + te.cc.Close() + te.srv.GracefulStop() // Wait for the server to stop. + + for start := time.Now(); time.Since(start) < defaultTestTimeout; { + h.mu.Lock() + if _, ok := h.gotRPC[len(h.gotRPC)-1].s.(*stats.End); ok { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + for start := time.Now(); time.Since(start) < defaultTestTimeout; { + h.mu.Lock() + if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + // Each RPC generates 6 stats events on the server-side, times 2 StatsHandler + if len(h.gotRPC) != 12 { + t.Fatalf("h.gotRPC: unexpected amount of RPCStats: %v != %v", len(h.gotRPC), 12) + } + + // Each connection generates 4 conn events on the server-side, times 2 StatsHandler + if len(h.gotConn) != 4 { + t.Fatalf("h.gotConn: unexpected amount of ConnStats: %v != %v", len(h.gotConn), 4) + } +} diff --git a/stream.go b/stream.go index 236fc17ec3c4..6d82e0d7cca3 100644 --- a/stream.go +++ b/stream.go @@ -374,9 +374,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method - sh := cs.cc.dopts.copts.StatsHandler var beginTime time.Time - if sh != nil { + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) beginTime = time.Now() begin := &stats.Begin{ @@ -414,12 +414,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) } return &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, }, nil } @@ -536,8 +536,8 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler - beginTime time.Time + statsHandlers []stats.Handler + beginTime time.Time // set for newStream errors that may be transparently retried allowTransparentRetry bool @@ -960,8 +960,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -971,7 +971,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -1008,8 +1008,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1068,7 +1068,7 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, BeginTime: a.beginTime, @@ -1076,7 +1076,7 @@ func (a *csAttempt) finish(err error) { Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1445,7 +1445,7 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler binlog binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It @@ -1555,8 +1555,10 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { Message: data, }) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1590,7 +1592,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || ss.binlog != nil { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { @@ -1605,15 +1607,17 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } } if ss.binlog != nil { ss.binlog.Log(&binarylog.ClientMessage{ From cbcceaf7678d2ea8a333e7db1d12ee5deb287dd0 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 6 Jun 2022 15:25:05 -0400 Subject: [PATCH 517/998] gracefulswitch: fix exit idle race (#5384) * gracefulswitch: fix exit idle race --- .../balancer/gracefulswitch/gracefulswitch.go | 2 + .../gracefulswitch/gracefulswitch_test.go | 50 +++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/internal/balancer/gracefulswitch/gracefulswitch.go b/internal/balancer/gracefulswitch/gracefulswitch.go index 7ba8f4d18319..08666f62a7cb 100644 --- a/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/internal/balancer/gracefulswitch/gracefulswitch.go @@ -193,6 +193,8 @@ func (gsb *Balancer) ExitIdle() { ei.ExitIdle() return } + gsb.mu.Lock() + defer gsb.mu.Unlock() for sc := range balToUpdate.subconns { sc.Connect() } diff --git a/internal/balancer/gracefulswitch/gracefulswitch_test.go b/internal/balancer/gracefulswitch/gracefulswitch_test.go index 02018f068ed0..265e1f78e12d 100644 --- a/internal/balancer/gracefulswitch/gracefulswitch_test.go +++ b/internal/balancer/gracefulswitch/gracefulswitch_test.go @@ -826,6 +826,40 @@ func (s) TestInlineCallbackInBuild(t *testing.T) { } } +// TestExitIdle tests the ExitIdle operation on the Graceful Switch Balancer for +// both possible codepaths, one where the child implements ExitIdler interface +// and one where the child doesn't implement ExitIdler interface. +func (s) TestExitIdle(t *testing.T) { + _, gsb := setup(t) + // switch to a balancer that implements ExitIdle{} (will populate current). + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // exitIdle on the Graceful Switch Balancer should get forwarded to the + // current child as it implements exitIdle. + gsb.ExitIdle() + if err := currBal.waitForExitIdle(ctx); err != nil { + t.Fatal(err) + } + + // switch to a balancer that doesn't implement ExitIdle{} (will populate + // pending). + gsb.SwitchTo(verifyBalancerBuilder{}) + // call exitIdle concurrently with newSubConn to make sure there is not a + // data race. + done := make(chan struct{}) + go func() { + gsb.ExitIdle() + close(done) + }() + pendBal := gsb.balancerPending.Balancer.(*verifyBalancer) + for i := 0; i < 10; i++ { + pendBal.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + } + <-done +} + const balancerName1 = "mock_balancer_1" const balancerName2 = "mock_balancer_2" const verifyBalName = "verifyNoSubConnUpdateAfterCloseBalancer" @@ -839,6 +873,7 @@ func (mockBalancerBuilder1) Build(cc balancer.ClientConn, opts balancer.BuildOpt scStateCh: testutils.NewChannel(), resolverErrCh: testutils.NewChannel(), closeCh: testutils.NewChannel(), + exitIdleCh: testutils.NewChannel(), cc: cc, } } @@ -863,6 +898,8 @@ type mockBalancer struct { resolverErrCh *testutils.Channel // closeCh is a channel used to signal the closing of this balancer. closeCh *testutils.Channel + // exitIdleCh is a channel used to signal the receipt of an ExitIdle call. + exitIdleCh *testutils.Channel // Hold onto ClientConn wrapper to communicate with it cc balancer.ClientConn } @@ -890,6 +927,10 @@ func (mb1 *mockBalancer) Close() { mb1.closeCh.Send(struct{}{}) } +func (mb1 *mockBalancer) ExitIdle() { + mb1.exitIdleCh.Send(struct{}{}) +} + // waitForClientConnUpdate verifies if the mockBalancer receives the // provided ClientConnState within a reasonable amount of time. func (mb1 *mockBalancer) waitForClientConnUpdate(ctx context.Context, wantCCS balancer.ClientConnState) error { @@ -940,6 +981,15 @@ func (mb1 *mockBalancer) waitForClose(ctx context.Context) error { return nil } +// waitForExitIdle verifies that ExitIdle gets called on the mockBalancer before +// the context expires. +func (mb1 *mockBalancer) waitForExitIdle(ctx context.Context) error { + if _, err := mb1.exitIdleCh.Receive(ctx); err != nil { + return fmt.Errorf("error waiting for ExitIdle(): %v", err) + } + return nil +} + func (mb1 *mockBalancer) updateState(state balancer.State) { mb1.cc.UpdateState(state) } From ca5cc0bcad6e0270c565f8b08907212fc1c4a3e5 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 6 Jun 2022 20:20:12 +0000 Subject: [PATCH 518/998] credentials/google: support new-style xDS cluster names (#5399) --- credentials/google/google_test.go | 16 +++++++++ credentials/google/xds.go | 54 +++++++++++++++++++++++----- credentials/google/xds_test.go | 58 +++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+), 9 deletions(-) create mode 100644 credentials/google/xds_test.go diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go index efebb3efab75..1809d545d0ec 100644 --- a/credentials/google/google_test.go +++ b/credentials/google/google_test.go @@ -122,6 +122,22 @@ func (s) TestClientHandshakeBasedOnClusterName(t *testing.T) { // CFE should use tls. wantTyp: "tls", }, + { + name: "with xdstp CFE cluster name", + ctx: icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_bigtable.googleapis.com").Attributes, + }), + // CFE should use tls. + wantTyp: "tls", + }, + { + name: "with xdstp non-CFE cluster name", + ctx: icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, "xdstp://other.com/envoy.config.cluster.v3.Cluster/google_cfe_bigtable.googleapis.com").Attributes, + }), + // non-CFE should use atls. + wantTyp: "alts", + }, } for _, tt := range tests { t.Run(bundleTyp+" "+tt.name, func(t *testing.T) { diff --git a/credentials/google/xds.go b/credentials/google/xds.go index b8c2e8f9204c..e32edc0421c3 100644 --- a/credentials/google/xds.go +++ b/credentials/google/xds.go @@ -21,6 +21,7 @@ package google import ( "context" "net" + "net/url" "strings" "google.golang.org/grpc/credentials" @@ -28,12 +29,16 @@ import ( ) const cfeClusterNamePrefix = "google_cfe_" +const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" +const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" // clusterTransportCreds is a combo of TLS + ALTS. // // On the client, ClientHandshake picks TLS or ALTS based on address attributes. // - if attributes has cluster name -// - if cluster name has prefix "google_cfe_", use TLS +// - if cluster name has prefix "google_cfe_", or +// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", +// use TLS // - otherwise, use ALTS // - else, do TLS // @@ -50,18 +55,49 @@ func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clust } } -func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { +// clusterName returns the xDS cluster name stored in the attributes in the +// context. +func clusterName(ctx context.Context) string { chi := credentials.ClientHandshakeInfoFromContext(ctx) if chi.Attributes == nil { - return c.tls.ClientHandshake(ctx, authority, rawConn) + return "" + } + cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) + return cluster +} + +// isDirectPathCluster returns true if the cluster in the context is a +// directpath cluster, meaning ALTS should be used. +func isDirectPathCluster(ctx context.Context) bool { + cluster := clusterName(ctx) + if cluster == "" { + // No cluster; not xDS; use TLS. + return false + } + if strings.HasPrefix(cluster, cfeClusterNamePrefix) { + // xDS cluster prefixed by "google_cfe_"; use TLS. + return false } - cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes) - if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) { - return c.tls.ClientHandshake(ctx, authority, rawConn) + if !strings.HasPrefix(cluster, "xdstp:") { + // Other xDS cluster name; use ALTS. + return true + } + u, err := url.Parse(cluster) + if err != nil { + // Shouldn't happen, but assume ALTS. + return true + } + // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. + return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if isDirectPathCluster(ctx) { + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) } - // If attributes have cluster name, and cluster name is not cfe, it's a - // backend address, use ALTS. - return c.alts.ClientHandshake(ctx, authority, rawConn) + return c.tls.ClientHandshake(ctx, authority, rawConn) } func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { diff --git a/credentials/google/xds_test.go b/credentials/google/xds_test.go new file mode 100644 index 000000000000..8aeba396a518 --- /dev/null +++ b/credentials/google/xds_test.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package google + +import ( + "context" + "testing" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/resolver" +) + +func (s) TestIsDirectPathCluster(t *testing.T) { + c := func(cluster string) context.Context { + return icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, cluster).Attributes, + }) + } + + testCases := []struct { + name string + ctx context.Context + want bool + }{ + {"not an xDS cluster", context.Background(), false}, + {"cfe", c("google_cfe_bigtable.googleapis.com"), false}, + {"non-cfe", c("google_bigtable.googleapis.com"), true}, + {"starts with xdstp but not cfe format", c("xdstp:google_cfe_bigtable.googleapis.com"), true}, + {"no authority", c("xdstp:///envoy.config.cluster.v3.Cluster/google_cfe_"), true}, + {"wrong authority", c("xdstp://foo.bar/envoy.config.cluster.v3.Cluster/google_cfe_"), true}, + {"xdstp CFE", c("xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_"), false}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if got := isDirectPathCluster(tc.ctx); got != tc.want { + t.Errorf("isDirectPathCluster(_) = %v; want %v", got, tc.want) + } + }) + } +} From a0d5484ee35d7302b08ebc8fd4ac5268c71da528 Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Mon, 6 Jun 2022 15:42:49 -0700 Subject: [PATCH 519/998] interop: remove duplicated xDS tests in GCE framework (#5395) --- test/kokoro/xds.sh | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/test/kokoro/xds.sh b/test/kokoro/xds.sh index 0c12f0088b58..ca676f9d58ed 100755 --- a/test/kokoro/xds.sh +++ b/test/kokoro/xds.sh @@ -7,12 +7,6 @@ cd github export GOPATH="${HOME}/gopath" pushd grpc-go/interop/xds/client -branch=$(git branch --all --no-color --contains "${KOKORO_GITHUB_COMMIT}" \ - | grep -v HEAD | head -1) -shopt -s extglob -branch="${branch//[[:space:]]}" -branch="${branch##remotes/origin/}" -shopt -u extglob # Install a version of Go supported by gRPC for the new features, e.g. # errors.Is() curl --retry 3 -O -L https://go.dev/dl/go1.17.3.linux-amd64.tar.gz @@ -22,7 +16,7 @@ sudo ln -s /usr/local/go/bin/go /usr/bin/go for i in 1 2 3; do go build && break || sleep 5; done popd -git clone -b "${branch}" --single-branch --depth=1 https://github.com/grpc/grpc.git +git clone -b master --single-branch --depth=1 https://github.com/grpc/grpc.git grpc/tools/run_tests/helper_scripts/prep_xds.sh @@ -33,7 +27,7 @@ grpc/tools/run_tests/helper_scripts/prep_xds.sh # they are added into "all". GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info \ python3 grpc/tools/run_tests/run_xds_tests.py \ - --test_case="all,circuit_breaking,timeout,fault_injection,csds" \ + --test_case="ping_pong,circuit_breaking" \ --project_id=grpc-testing \ --project_num=830293263384 \ --source_image=projects/grpc-testing/global/images/xds-test-server-5 \ @@ -48,4 +42,3 @@ GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info \ {fail_on_failed_rpc} \ {rpcs_to_send} \ {metadata_to_send}" - From 34e4fc3bb588713db92eb4009522f82aa938213b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 6 Jun 2022 16:07:02 -0700 Subject: [PATCH 520/998] rls: use UNAVAILABLE instead of status from control plane (#5400) --- balancer/rls/picker.go | 19 ++++++++++++++++--- balancer/rls/picker_test.go | 6 +++--- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/balancer/rls/picker.go b/balancer/rls/picker.go index 9b40ccb4dfe1..ece27f0fc2ed 100644 --- a/balancer/rls/picker.go +++ b/balancer/rls/picker.go @@ -27,10 +27,12 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" internalgrpclog "google.golang.org/grpc/internal/grpclog" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) var ( @@ -129,9 +131,15 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // We get here only if the data cache entry has expired. If entry is in // backoff, delegate to default target or fail the pick. if dcEntry.backoffState != nil && dcEntry.backoffTime.After(now) { - status := dcEntry.status + st := dcEntry.status p.lb.cacheMu.RUnlock() - return p.useDefaultPickIfPossible(info, status) + + // Avoid propagating the status code received on control plane RPCs to the + // data plane which can lead to unexpected outcomes as we do not control + // the status code sent by the control plane. Propagating the status + // message received from the control plane is still fine, as it could be + // useful for debugging purposes. + return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) } // We get here only if the entry has expired and is not in backoff. @@ -220,7 +228,12 @@ func (p *rlsPicker) sendRequestAndReturnPick(cacheKey cacheKey, bs *backoffState // Entry is in backoff. Delegate to default target or fail the pick. case dcEntry.backoffState != nil && dcEntry.backoffTime.After(now): - return p.useDefaultPickIfPossible(info, dcEntry.status) + // Avoid propagating the status code received on control plane RPCs to the + // data plane which can lead to unexpected outcomes as we do not control + // the status code sent by the control plane. Propagating the status + // message received from the control plane is still fine, as it could be + // useful for debugging purposes. + return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", dcEntry.status.Error()))) // Entry has expired, but is not in backoff. Send request and queue pick. default: diff --git a/balancer/rls/picker_test.go b/balancer/rls/picker_test.go index a52aaa5e563d..11c91055d68c 100644 --- a/balancer/rls/picker_test.go +++ b/balancer/rls/picker_test.go @@ -20,7 +20,6 @@ package rls import ( "context" - "errors" "testing" "time" @@ -29,6 +28,7 @@ import ( "google.golang.org/grpc/credentials/insecure" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" rlstest "google.golang.org/grpc/internal/testutils/rls" + "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/durationpb" ) @@ -512,7 +512,7 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntryInBackoff(t *testing.T // Set up the fake RLS server to return errors. This will push the cache // entry into backoff. - var rlsLastErr = errors.New("last RLS request failed") + var rlsLastErr = status.Error(codes.DeadlineExceeded, "last RLS request failed") rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { return &rlstest.RouteLookupResponse{Err: rlsLastErr} }) @@ -524,7 +524,7 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntryInBackoff(t *testing.T if test.withDefaultTarget { makeTestRPCAndExpectItToReachBackend(ctx, t, cc, defBackendCh) } else { - makeTestRPCAndVerifyError(ctx, t, cc, codes.Unknown, rlsLastErr) + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, rlsLastErr) } }) } From 9ee2f146d7004b586a37291873e27294358f80ba Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Tue, 7 Jun 2022 15:56:38 -0700 Subject: [PATCH 521/998] gcp/observability: Implement tracing/metrics via OpenCensus (#5372) --- gcp/observability/config.go | 3 + gcp/observability/go.mod | 2 + gcp/observability/go.sum | 111 +++++++++++++-- .../internal/config/config.pb.go | 117 ++++++++++------ .../internal/config/config.proto | 29 ++-- gcp/observability/observability.go | 9 +- gcp/observability/observability_test.go | 129 +++++++++++++++--- gcp/observability/opencensus.go | 88 ++++++++++++ 8 files changed, 411 insertions(+), 77 deletions(-) create mode 100644 gcp/observability/opencensus.go diff --git a/gcp/observability/config.go b/gcp/observability/config.go index fd0fc1485f26..428d527a30c6 100644 --- a/gcp/observability/config.go +++ b/gcp/observability/config.go @@ -82,6 +82,9 @@ func parseObservabilityConfig() (*configpb.ObservabilityConfig, error) { if err := validateFilters(&config); err != nil { return nil, fmt.Errorf("error parsing observability config: %v", err) } + if config.GlobalTraceSamplingRate > 1 || config.GlobalTraceSamplingRate < 0 { + return nil, fmt.Errorf("error parsing observability config: invalid global trace sampling rate %v", config.GlobalTraceSamplingRate) + } logger.Infof("Parsed ObservabilityConfig: %+v", &config) return &config, nil } diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 6f4cdd48b231..dcbfdaebb6e3 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -4,8 +4,10 @@ go 1.14 require ( cloud.google.com/go/logging v1.4.2 + contrib.go.opencensus.io/exporter/stackdriver v0.13.12 github.com/golang/protobuf v1.5.2 github.com/google/uuid v1.3.0 + go.opencensus.io v0.23.0 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 google.golang.org/grpc v1.46.0 google.golang.org/protobuf v1.27.1 diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index 0f46213edea2..ff0b941c7c48 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -16,8 +16,15 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -28,6 +35,8 @@ cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/logging v1.4.2 h1:Mu2Q75VBDQlW1HlBMjTX4X84UFR73G1TiLlRYc/b7tA= cloud.google.com/go/logging v1.4.2/go.mod h1:jco9QZSx8HiVVqLJReq7z7bVdj0P1Jb9PDFs63T+axo= +cloud.google.com/go/monitoring v1.1.0 h1:ZnyNdf/XRcynMmKzRSNTOdOyYPs6G7do1l2D2hIvIKo= +cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -38,11 +47,19 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/trace v1.0.0 h1:laKx2y7IWMjguCe5zZx6n7qLtREk4kyE69SXVC0VSN8= +cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -51,6 +68,7 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -70,6 +88,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -88,6 +107,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -106,8 +127,9 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -119,27 +141,41 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -150,6 +186,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -185,8 +222,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -196,8 +233,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -230,6 +267,7 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -244,6 +282,10 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -291,9 +333,20 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324 h1:pAwJxDByZctfPwzlNGrDN2BQLsdPb9NkhoTJtUkAO28= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -351,8 +404,12 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -379,8 +436,18 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.46.0 h1:jkDWHOBIoNSD0OQpq4rtBVu+Rh325MPjXG1rakAp8JU= google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.59.0 h1:fPfFO7gttlXYo2ALuD3HxJzh8vaF++4youI0BkFL6GE= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -428,8 +495,29 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a h1:VA0wtJaR+W1I11P2f535J7D/YxyvEFMTMvcmyeZ9FBE= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2 h1:CUp93KYgL06Y/PdI8aRJaFiAHevPIGWQmijSqaUhue8= +google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -445,10 +533,13 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/gcp/observability/internal/config/config.pb.go b/gcp/observability/internal/config/config.pb.go index f41ddbf24db2..a60269c984d3 100644 --- a/gcp/observability/internal/config/config.pb.go +++ b/gcp/observability/internal/config/config.pb.go @@ -1,16 +1,16 @@ // Copyright 2022 The gRPC Authors // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. // Observability Config is used by gRPC Observability plugin to control provided // observability features. It contains parameters to enable/disable certain @@ -56,8 +56,12 @@ type ObservabilityConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Whether the logging data uploading to CloudLogging should be enabled or - // not. The default value is true. + // Whether the tracing data upload to CloudTrace should be enabled or not. + EnableCloudTrace bool `protobuf:"varint,4,opt,name=enable_cloud_trace,json=enableCloudTrace,proto3" json:"enable_cloud_trace,omitempty"` + // Whether the metrics data upload to CloudMonitoring should be enabled or + // not. + EnableCloudMonitoring bool `protobuf:"varint,5,opt,name=enable_cloud_monitoring,json=enableCloudMonitoring,proto3" json:"enable_cloud_monitoring,omitempty"` + // Whether the logging data upload to CloudLogging should be enabled or not. EnableCloudLogging bool `protobuf:"varint,1,opt,name=enable_cloud_logging,json=enableCloudLogging,proto3" json:"enable_cloud_logging,omitempty"` // The destination GCP project identifier for the uploading log entries. If // empty, the gRPC Observability plugin will attempt to fetch the project_id @@ -68,6 +72,10 @@ type ObservabilityConfig struct { // the LogFilter. Any other LogFilter that also matches that comes later // will be ignored. So a LogFilter of "*/*" should appear last in this list. LogFilters []*ObservabilityConfig_LogFilter `protobuf:"bytes,3,rep,name=log_filters,json=logFilters,proto3" json:"log_filters,omitempty"` + // The global setting that controls the probability of a RPC being traced. + // For example, 0.05 means there is a 5% chance for a RPC to be traced, 1.0 + // means trace every call, 0 means don’t start new traces. + GlobalTraceSamplingRate float64 `protobuf:"fixed64,6,opt,name=global_trace_sampling_rate,json=globalTraceSamplingRate,proto3" json:"global_trace_sampling_rate,omitempty"` } func (x *ObservabilityConfig) Reset() { @@ -102,6 +110,20 @@ func (*ObservabilityConfig) Descriptor() ([]byte, []int) { return file_gcp_observability_internal_config_config_proto_rawDescGZIP(), []int{0} } +func (x *ObservabilityConfig) GetEnableCloudTrace() bool { + if x != nil { + return x.EnableCloudTrace + } + return false +} + +func (x *ObservabilityConfig) GetEnableCloudMonitoring() bool { + if x != nil { + return x.EnableCloudMonitoring + } + return false +} + func (x *ObservabilityConfig) GetEnableCloudLogging() bool { if x != nil { return x.EnableCloudLogging @@ -123,6 +145,13 @@ func (x *ObservabilityConfig) GetLogFilters() []*ObservabilityConfig_LogFilter { return nil } +func (x *ObservabilityConfig) GetGlobalTraceSamplingRate() float64 { + if x != nil { + return x.GlobalTraceSamplingRate + } + return 0 +} + type ObservabilityConfig_LogFilter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -215,36 +244,46 @@ var file_gcp_observability_internal_config_config_proto_rawDesc = []byte{ 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x22, 0xcf, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x67, - 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, - 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4f, 0x62, 0x73, - 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x6d, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x21, 0x0a, - 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, 0x74, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x18, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x6f, 0x62, - 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x70, 0x68, 0x61, 0x22, 0xf2, 0x03, 0x0a, 0x13, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x12, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x1a, + 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x17, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, + 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x6d, 0x0a, 0x09, 0x4c, 0x6f, 0x67, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, 0x74, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x18, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, + 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/gcp/observability/internal/config/config.proto b/gcp/observability/internal/config/config.proto index 116300dcd25c..2c108bfa2abf 100644 --- a/gcp/observability/internal/config/config.proto +++ b/gcp/observability/internal/config/config.proto @@ -1,16 +1,16 @@ // Copyright 2022 The gRPC Authors // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. // Observability Config is used by gRPC Observability plugin to control provided // observability features. It contains parameters to enable/disable certain @@ -34,8 +34,14 @@ option go_package = "google.golang.org/grpc/gcp/observability/internal/config"; // most common knobs for gRPC users. It's always possible to override with // explicit config in code. message ObservabilityConfig { - // Whether the logging data uploading to CloudLogging should be enabled or - // not. The default value is true. + // Whether the tracing data upload to CloudTrace should be enabled or not. + bool enable_cloud_trace = 4; + + // Whether the metrics data upload to CloudMonitoring should be enabled or + // not. + bool enable_cloud_monitoring = 5; + + // Whether the logging data upload to CloudLogging should be enabled or not. bool enable_cloud_logging = 1; // The destination GCP project identifier for the uploading log entries. If @@ -75,4 +81,9 @@ message ObservabilityConfig { // the LogFilter. Any other LogFilter that also matches that comes later // will be ignored. So a LogFilter of "*/*" should appear last in this list. repeated LogFilter log_filters = 3; + + // The global setting that controls the probability of a RPC being traced. + // For example, 0.05 means there is a 5% chance for a RPC to be traced, 1.0 + // means trace every call, 0 means don’t start new traces. + double global_trace_sampling_rate = 6; } diff --git a/gcp/observability/observability.go b/gcp/observability/observability.go index b0269249b380..40242692e8d2 100644 --- a/gcp/observability/observability.go +++ b/gcp/observability/observability.go @@ -45,11 +45,9 @@ func init() { // // - it loads observability config from environment; // - it registers default exporters if not disabled by the config; -// - it sets up binary logging sink against the logging exporter. +// - it sets up telemetry collectors (binary logging sink or StatsHandlers). // // Note: this method should only be invoked once. -// Note: currently, the binarylog module only supports one sink, so using the -// "observability" module will conflict with existing binarylog usage. // Note: handle the error func Start(ctx context.Context) error { config, err := parseObservabilityConfig() @@ -65,6 +63,11 @@ func Start(ctx context.Context) error { return err } + // Enabling tracing and metrics via OpenCensus + if err := startOpenCensus(config, nil); err != nil { + return fmt.Errorf("failed to instrument OpenCensus: %v", err) + } + // Logging is controlled by the config at methods level. return defaultLogger.Start(ctx, config) } diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index 16a3f935a68b..62936ccec109 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -28,6 +28,8 @@ import ( "testing" "time" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" configpb "google.golang.org/grpc/gcp/observability/internal/config" @@ -69,6 +71,7 @@ var ( testErrorPayload = []byte{77, 97, 114, 116, 104, 97} testErrorMessage = "test case injected error" infinitySizeBytes int32 = 1024 * 1024 * 1024 + defaultRequestCount = 24 ) type testServer struct { @@ -124,6 +127,7 @@ func (fle *fakeLoggingExporter) Close() error { type test struct { t *testing.T fle *fakeLoggingExporter + fe *fakeOpenCensusExporter testServer testgrpc.TestServiceServer // nil means none // srv and srvAddr are set once startServer is called. @@ -141,7 +145,7 @@ func (te *test) tearDown() { te.srv.Stop() End() - if !te.fle.isClosed { + if te.fle != nil && !te.fle.isClosed { te.t.Fatalf("fakeLoggingExporter not closed!") } } @@ -151,8 +155,7 @@ func (te *test) tearDown() { // modify it before calling its startServer and clientConn methods. func newTest(t *testing.T) *test { return &test{ - t: t, - fle: &fakeLoggingExporter{t: t}, + t: t, } } @@ -194,6 +197,7 @@ func (te *test) clientConn() *grpc.ClientConn { func (te *test) enablePluginWithConfig(config *configpb.ObservabilityConfig) { // Injects the fake exporter for testing purposes + te.fle = &fakeLoggingExporter{t: te.t} defaultLogger = newBinaryLogger(nil) iblog.SetLogger(defaultLogger) if err := defaultLogger.start(config, te.fle); err != nil { @@ -215,6 +219,18 @@ func (te *test) enablePluginWithCaptureAll() { }) } +func (te *test) enableOpenCensus() { + defaultMetricsReportingInterval = time.Millisecond * 100 + config := &configpb.ObservabilityConfig{ + EnableCloudLogging: true, + EnableCloudTrace: true, + EnableCloudMonitoring: true, + GlobalTraceSamplingRate: 1.0, + } + te.fe = &fakeOpenCensusExporter{SeenViews: make(map[string]string), t: te.t} + startOpenCensus(config, te.fe) +} + func checkEventCommon(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord) { if seen.RpcId == "" { t.Fatalf("expect non-empty RpcId") @@ -329,6 +345,48 @@ func checkEventTrailer(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want * } } +const ( + TypeOpenCensusViewDistribution string = "distribution" + TypeOpenCensusViewCount = "count" + TypeOpenCensusViewSum = "sum" + TypeOpenCensusViewLastValue = "last_value" +) + +type fakeOpenCensusExporter struct { + // The map of the observed View name and type + SeenViews map[string]string + // Number of spans + SeenSpans int + + t *testing.T + mu sync.RWMutex +} + +func (fe *fakeOpenCensusExporter) ExportView(vd *view.Data) { + fe.mu.Lock() + defer fe.mu.Unlock() + for _, row := range vd.Rows { + fe.t.Logf("Metrics[%s]", vd.View.Name) + switch row.Data.(type) { + case *view.DistributionData: + fe.SeenViews[vd.View.Name] = TypeOpenCensusViewDistribution + case *view.CountData: + fe.SeenViews[vd.View.Name] = TypeOpenCensusViewCount + case *view.SumData: + fe.SeenViews[vd.View.Name] = TypeOpenCensusViewSum + case *view.LastValueData: + fe.SeenViews[vd.View.Name] = TypeOpenCensusViewLastValue + } + } +} + +func (fe *fakeOpenCensusExporter) ExportSpan(vd *trace.SpanData) { + fe.mu.Lock() + defer fe.mu.Unlock() + fe.SeenSpans++ + fe.t.Logf("Span[%v]", vd.Name) +} + func (s) TestLoggingForOkCall(t *testing.T) { te := newTest(t) defer te.tearDown() @@ -406,14 +464,10 @@ func (s) TestLoggingForErrorCall(t *testing.T) { te.startServer(&testServer{}) tc := testgrpc.NewTestServiceClient(te.clientConn()) - var ( - req *testpb.SimpleRequest - err error - ) - req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testErrorPayload}} + req := &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testErrorPayload}} tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - _, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) + _, err := tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) if err == nil { t.Fatalf("unary call expected to fail, but passed") } @@ -470,15 +524,10 @@ func (s) TestEmptyConfig(t *testing.T) { te.startServer(&testServer{}) tc := testgrpc.NewTestServiceClient(te.clientConn()) - var ( - resp *testpb.SimpleResponse - req *testpb.SimpleRequest - err error - ) - req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} + req := &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - resp, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) + resp, err := tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) if err != nil { t.Fatalf("unary call failed: %v", err) } @@ -640,3 +689,51 @@ func (s) TestNoEnvSet(t *testing.T) { t.Fatalf("Invalid patterns not triggering error") } } + +func (s) TestOpenCensusIntegration(t *testing.T) { + te := newTest(t) + defer te.tearDown() + te.enableOpenCensus() + te.startServer(&testServer{}) + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + for i := 0; i < defaultRequestCount; i++ { + req := &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} + tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) + if err != nil { + t.Fatalf("unary call failed: %v", err) + } + } + t.Logf("unary call passed count=%v", defaultRequestCount) + + // Wait for the gRPC transport to gracefully close to ensure no lost event. + te.cc.Close() + te.srv.GracefulStop() + + var errs []error + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for ctx.Err() == nil { + errs = nil + te.fe.mu.RLock() + if value := te.fe.SeenViews["grpc.io/client/completed_rpcs"]; value != TypeOpenCensusViewCount { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/completed_rpcs: %s != %s", value, TypeOpenCensusViewCount)) + } + if value := te.fe.SeenViews["grpc.io/server/completed_rpcs"]; value != TypeOpenCensusViewCount { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/completed_rpcs: %s != %s", value, TypeOpenCensusViewCount)) + } + if te.fe.SeenSpans <= 0 { + errs = append(errs, fmt.Errorf("unexpected number of seen spans: %v <= 0", te.fe.SeenSpans)) + } + te.fe.mu.RUnlock() + if len(errs) == 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + if len(errs) != 0 { + t.Fatalf("Invalid OpenCensus export data: %v", errs) + } +} diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go new file mode 100644 index 000000000000..c303eb87f76b --- /dev/null +++ b/gcp/observability/opencensus.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "fmt" + "time" + + "contrib.go.opencensus.io/exporter/stackdriver" + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + + "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "google.golang.org/grpc" + configpb "google.golang.org/grpc/gcp/observability/internal/config" + "google.golang.org/grpc/internal" +) + +var ( + // It's a variable instead of const to speed up testing + defaultMetricsReportingInterval = time.Second * 30 +) + +// This method accepts config and exporter; the exporter argument is exposed to +// assist unit testing of the OpenCensus behavior. +func startOpenCensus(config *configpb.ObservabilityConfig, exporter interface{}) error { + // If both tracing and metrics are disabled, there's no point inject default + // StatsHandler. + if config == nil || (!config.EnableCloudTrace && !config.EnableCloudMonitoring) { + return nil + } + + if exporter == nil { + // Create the Stackdriver exporter, which is shared between tracing and stats + mr := monitoredresource.Autodetect() + logger.Infof("Detected MonitoredResource:: %+v", mr) + var err error + if exporter, err = stackdriver.NewExporter(stackdriver.Options{ + ProjectID: config.DestinationProjectId, + MonitoredResource: mr, + }); err != nil { + return fmt.Errorf("failed to create Stackdriver exporter: %v", err) + } + } + + var so trace.StartOptions + if config.EnableCloudTrace { + so.Sampler = trace.ProbabilitySampler(config.GlobalTraceSamplingRate) + trace.RegisterExporter(exporter.(trace.Exporter)) + logger.Infof("Start collecting and exporting trace spans with global_trace_sampling_rate=%.2f", config.GlobalTraceSamplingRate) + } + + if config.EnableCloudMonitoring { + if err := view.Register(ocgrpc.DefaultClientViews...); err != nil { + return fmt.Errorf("failed to register default client views: %v", err) + } + if err := view.Register(ocgrpc.DefaultServerViews...); err != nil { + return fmt.Errorf("failed to register default server views: %v", err) + } + view.SetReportingPeriod(defaultMetricsReportingInterval) + view.RegisterExporter(exporter.(view.Exporter)) + logger.Infof("Start collecting and exporting metrics") + } + + // Only register default StatsHandlers if other things are setup correctly. + internal.AddExtraServerOptions.(func(opt ...grpc.ServerOption))(grpc.StatsHandler(&ocgrpc.ServerHandler{StartOptions: so})) + internal.AddExtraDialOptions.(func(opt ...grpc.DialOption))(grpc.WithStatsHandler(&ocgrpc.ClientHandler{StartOptions: so})) + logger.Infof("Enabled OpenCensus StatsHandlers for clients and servers") + + return nil +} From 584d9cd11a1da55e3558041c9f88f22ca2255f4e Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Wed, 8 Jun 2022 08:25:36 -0700 Subject: [PATCH 522/998] gcp/observability: update log name (#5414) --- gcp/observability/exporting.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gcp/observability/exporting.go b/gcp/observability/exporting.go index 79576eb999f2..cf95383726c3 100644 --- a/gcp/observability/exporting.go +++ b/gcp/observability/exporting.go @@ -58,7 +58,7 @@ func newCloudLoggingExporter(ctx context.Context, projectID string) (*cloudLoggi return &cloudLoggingExporter{ projectID: projectID, client: c, - logger: c.Logger("grpc", gcplogging.CommonLabels(customTags)), + logger: c.Logger("microservices.googleapis.com/observability/grpc", gcplogging.CommonLabels(customTags)), }, nil } From 71f16a1d4afab21dea1732c89401d2e7b9949d61 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 14 Jun 2022 15:32:36 -0700 Subject: [PATCH 523/998] internal/proto: pull in recent changes to service config proto (#5424) --- .../grpc_service_config/service_config.pb.go | 758 ++++++++++-------- 1 file changed, 424 insertions(+), 334 deletions(-) diff --git a/internal/proto/grpc_service_config/service_config.pb.go b/internal/proto/grpc_service_config/service_config.pb.go index d39f777577f9..0037d2214f35 100644 --- a/internal/proto/grpc_service_config/service_config.pb.go +++ b/internal/proto/grpc_service_config/service_config.pb.go @@ -167,7 +167,7 @@ func (x ServiceConfig_LoadBalancingPolicy) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConfig_LoadBalancingPolicy.Descriptor instead. func (ServiceConfig_LoadBalancingPolicy) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{19, 0} } // Configuration for a method. @@ -1335,6 +1335,54 @@ func (x *XdsWrrLocalityLoadBalancingPolicyConfig) GetChildPolicy() []*LoadBalanc return nil } +// Configuration for the least_request LB policy. +type LeastRequestLocalityLoadBalancingPolicyConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChoiceCount uint64 `protobuf:"varint,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` +} + +func (x *LeastRequestLocalityLoadBalancingPolicyConfig) Reset() { + *x = LeastRequestLocalityLoadBalancingPolicyConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeastRequestLocalityLoadBalancingPolicyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeastRequestLocalityLoadBalancingPolicyConfig) ProtoMessage() {} + +func (x *LeastRequestLocalityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeastRequestLocalityLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. +func (*LeastRequestLocalityLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16} +} + +func (x *LeastRequestLocalityLoadBalancingPolicyConfig) GetChoiceCount() uint64 { + if x != nil { + return x.ChoiceCount + } + return 0 +} + // Configuration for xds LB policy. type XdsConfig struct { state protoimpl.MessageState @@ -1368,7 +1416,7 @@ type XdsConfig struct { func (x *XdsConfig) Reset() { *x = XdsConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1381,7 +1429,7 @@ func (x *XdsConfig) String() string { func (*XdsConfig) ProtoMessage() {} func (x *XdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] + mi := &file_grpc_service_config_service_config_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1394,7 +1442,7 @@ func (x *XdsConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use XdsConfig.ProtoReflect.Descriptor instead. func (*XdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17} } // Deprecated: Do not use. @@ -1468,13 +1516,14 @@ type LoadBalancingConfig struct { // *LoadBalancingConfig_Xds // *LoadBalancingConfig_XdsExperimental // *LoadBalancingConfig_XdsWrrLocalityExperimental + // *LoadBalancingConfig_LeastRequestExperimental Policy isLoadBalancingConfig_Policy `protobuf_oneof:"policy"` } func (x *LoadBalancingConfig) Reset() { *x = LoadBalancingConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1487,7 +1536,7 @@ func (x *LoadBalancingConfig) String() string { func (*LoadBalancingConfig) ProtoMessage() {} func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] + mi := &file_grpc_service_config_service_config_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1500,7 +1549,7 @@ func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use LoadBalancingConfig.ProtoReflect.Descriptor instead. func (*LoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18} } func (m *LoadBalancingConfig) GetPolicy() isLoadBalancingConfig_Policy { @@ -1626,6 +1675,13 @@ func (x *LoadBalancingConfig) GetXdsWrrLocalityExperimental() *XdsWrrLocalityLoa return nil } +func (x *LoadBalancingConfig) GetLeastRequestExperimental() *LeastRequestLocalityLoadBalancingPolicyConfig { + if x, ok := x.GetPolicy().(*LoadBalancingConfig_LeastRequestExperimental); ok { + return x.LeastRequestExperimental + } + return nil +} + type isLoadBalancingConfig_Policy interface { isLoadBalancingConfig_Policy() } @@ -1704,6 +1760,10 @@ type LoadBalancingConfig_XdsWrrLocalityExperimental struct { XdsWrrLocalityExperimental *XdsWrrLocalityLoadBalancingPolicyConfig `protobuf:"bytes,16,opt,name=xds_wrr_locality_experimental,proto3,oneof"` } +type LoadBalancingConfig_LeastRequestExperimental struct { + LeastRequestExperimental *LeastRequestLocalityLoadBalancingPolicyConfig `protobuf:"bytes,17,opt,name=least_request_experimental,proto3,oneof"` +} + func (*LoadBalancingConfig_PickFirst) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_RoundRobin) isLoadBalancingConfig_Policy() {} @@ -1736,6 +1796,8 @@ func (*LoadBalancingConfig_XdsExperimental) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_XdsWrrLocalityExperimental) isLoadBalancingConfig_Policy() {} +func (*LoadBalancingConfig_LeastRequestExperimental) isLoadBalancingConfig_Policy() {} + // A ServiceConfig represents information about a service but is not specific to // any name resolver. type ServiceConfig struct { @@ -1758,7 +1820,7 @@ type ServiceConfig struct { func (x *ServiceConfig) Reset() { *x = ServiceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] + mi := &file_grpc_service_config_service_config_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1771,7 +1833,7 @@ func (x *ServiceConfig) String() string { func (*ServiceConfig) ProtoMessage() {} func (x *ServiceConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] + mi := &file_grpc_service_config_service_config_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1784,7 +1846,7 @@ func (x *ServiceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{19} } // Deprecated: Do not use. @@ -1862,7 +1924,7 @@ type MethodConfig_Name struct { func (x *MethodConfig_Name) Reset() { *x = MethodConfig_Name{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1875,7 +1937,7 @@ func (x *MethodConfig_Name) String() string { func (*MethodConfig_Name) ProtoMessage() {} func (x *MethodConfig_Name) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] + mi := &file_grpc_service_config_service_config_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1934,7 +1996,7 @@ type MethodConfig_RetryPolicy struct { func (x *MethodConfig_RetryPolicy) Reset() { *x = MethodConfig_RetryPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1947,7 +2009,7 @@ func (x *MethodConfig_RetryPolicy) String() string { func (*MethodConfig_RetryPolicy) ProtoMessage() {} func (x *MethodConfig_RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] + mi := &file_grpc_service_config_service_config_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2029,7 +2091,7 @@ type MethodConfig_HedgingPolicy struct { func (x *MethodConfig_HedgingPolicy) Reset() { *x = MethodConfig_HedgingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2042,7 +2104,7 @@ func (x *MethodConfig_HedgingPolicy) String() string { func (*MethodConfig_HedgingPolicy) ProtoMessage() {} func (x *MethodConfig_HedgingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] + mi := &file_grpc_service_config_service_config_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2115,7 +2177,7 @@ type OutlierDetectionLoadBalancingConfig_SuccessRateEjection struct { func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) Reset() { *x = OutlierDetectionLoadBalancingConfig_SuccessRateEjection{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + mi := &file_grpc_service_config_service_config_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2128,7 +2190,7 @@ func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) String() strin func (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection) ProtoMessage() {} func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] + mi := &file_grpc_service_config_service_config_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2202,7 +2264,7 @@ type OutlierDetectionLoadBalancingConfig_FailurePercentageEjection struct { func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) Reset() { *x = OutlierDetectionLoadBalancingConfig_FailurePercentageEjection{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[23] + mi := &file_grpc_service_config_service_config_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2215,7 +2277,7 @@ func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) String() func (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) ProtoMessage() {} func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[23] + mi := &file_grpc_service_config_service_config_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2276,7 +2338,7 @@ type PriorityLoadBalancingPolicyConfig_Child struct { func (x *PriorityLoadBalancingPolicyConfig_Child) Reset() { *x = PriorityLoadBalancingPolicyConfig_Child{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[24] + mi := &file_grpc_service_config_service_config_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2289,7 +2351,7 @@ func (x *PriorityLoadBalancingPolicyConfig_Child) String() string { func (*PriorityLoadBalancingPolicyConfig_Child) ProtoMessage() {} func (x *PriorityLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[24] + mi := &file_grpc_service_config_service_config_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2331,7 +2393,7 @@ type WeightedTargetLoadBalancingPolicyConfig_Target struct { func (x *WeightedTargetLoadBalancingPolicyConfig_Target) Reset() { *x = WeightedTargetLoadBalancingPolicyConfig_Target{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[26] + mi := &file_grpc_service_config_service_config_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2344,7 +2406,7 @@ func (x *WeightedTargetLoadBalancingPolicyConfig_Target) String() string { func (*WeightedTargetLoadBalancingPolicyConfig_Target) ProtoMessage() {} func (x *WeightedTargetLoadBalancingPolicyConfig_Target) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[26] + mi := &file_grpc_service_config_service_config_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2385,7 +2447,7 @@ type XdsClusterManagerLoadBalancingPolicyConfig_Child struct { func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) Reset() { *x = XdsClusterManagerLoadBalancingPolicyConfig_Child{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[28] + mi := &file_grpc_service_config_service_config_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2398,7 +2460,7 @@ func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) String() string { func (*XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoMessage() {} func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[28] + mi := &file_grpc_service_config_service_config_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2433,7 +2495,7 @@ type XdsServer_ChannelCredentials struct { func (x *XdsServer_ChannelCredentials) Reset() { *x = XdsServer_ChannelCredentials{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[30] + mi := &file_grpc_service_config_service_config_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2446,7 +2508,7 @@ func (x *XdsServer_ChannelCredentials) String() string { func (*XdsServer_ChannelCredentials) ProtoMessage() {} func (x *XdsServer_ChannelCredentials) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[30] + mi := &file_grpc_service_config_service_config_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2520,7 +2582,7 @@ type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism struct { func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Reset() { *x = XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[31] + mi := &file_grpc_service_config_service_config_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2533,7 +2595,7 @@ func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) String( func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoMessage() {} func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[31] + mi := &file_grpc_service_config_service_config_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2619,7 +2681,7 @@ type XdsClusterImplLoadBalancingPolicyConfig_DropCategory struct { func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Reset() { *x = XdsClusterImplLoadBalancingPolicyConfig_DropCategory{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[32] + mi := &file_grpc_service_config_service_config_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2632,7 +2694,7 @@ func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) String() string { func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoMessage() {} func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[32] + mi := &file_grpc_service_config_service_config_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2676,7 +2738,7 @@ type LrsLoadBalancingPolicyConfig_Locality struct { func (x *LrsLoadBalancingPolicyConfig_Locality) Reset() { *x = LrsLoadBalancingPolicyConfig_Locality{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[33] + mi := &file_grpc_service_config_service_config_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2689,7 +2751,7 @@ func (x *LrsLoadBalancingPolicyConfig_Locality) String() string { func (*LrsLoadBalancingPolicyConfig_Locality) ProtoMessage() {} func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[33] + mi := &file_grpc_service_config_service_config_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2760,7 +2822,7 @@ type ServiceConfig_RetryThrottlingPolicy struct { func (x *ServiceConfig_RetryThrottlingPolicy) Reset() { *x = ServiceConfig_RetryThrottlingPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[34] + mi := &file_grpc_service_config_service_config_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2773,7 +2835,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) String() string { func (*ServiceConfig_RetryThrottlingPolicy) ProtoMessage() {} func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[34] + mi := &file_grpc_service_config_service_config_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2786,7 +2848,7 @@ func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Messag // Deprecated: Use ServiceConfig_RetryThrottlingPolicy.ProtoReflect.Descriptor instead. func (*ServiceConfig_RetryThrottlingPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18, 0} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{19, 0} } func (x *ServiceConfig_RetryThrottlingPolicy) GetMaxTokens() uint32 { @@ -2815,7 +2877,7 @@ type ServiceConfig_HealthCheckConfig struct { func (x *ServiceConfig_HealthCheckConfig) Reset() { *x = ServiceConfig_HealthCheckConfig{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[35] + mi := &file_grpc_service_config_service_config_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2828,7 +2890,7 @@ func (x *ServiceConfig_HealthCheckConfig) String() string { func (*ServiceConfig_HealthCheckConfig) ProtoMessage() {} func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[35] + mi := &file_grpc_service_config_service_config_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2841,7 +2903,7 @@ func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConfig_HealthCheckConfig.ProtoReflect.Descriptor instead. func (*ServiceConfig_HealthCheckConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18, 1} + return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{19, 1} } func (x *ServiceConfig_HealthCheckConfig) GetServiceName() *wrapperspb.StringValue { @@ -3292,184 +3354,197 @@ var file_grpc_service_config_service_config_proto_rawDesc = []byte{ 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe0, - 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0d, - 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, + 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x52, + 0x0a, 0x2d, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, + 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, + 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x94, 0x0e, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, + 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, + 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, + 0x12, 0x68, 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x22, 0x8d, 0x0d, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x69, 0x63, - 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x6f, 0x75, - 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x68, 0x0a, 0x11, - 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x75, - 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, - 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, - 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x15, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, - 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, + 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, + 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, + 0x70, 0x63, 0x6c, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, 0x77, 0x65, 0x69, 0x67, + 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x8d, 0x01, 0x0a, 0x20, 0x78, 0x64, 0x73, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0e, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, 0x63, 0x64, 0x73, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, + 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x8d, 0x01, 0x0a, + 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x78, 0x64, 0x73, 0x5f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, + 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, + 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, + 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, + 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, + 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, + 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, + 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, + 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x12, 0x63, 0x0a, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, + 0x01, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, + 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, + 0x64, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, + 0x48, 0x00, 0x52, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x77, 0x72, 0x72, + 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, + 0x73, 0x5f, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x65, + 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1a, + 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, - 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, - 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, - 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, - 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, - 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, - 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, - 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, - 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, - 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, - 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, - 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, - 0x64, 0x73, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x77, 0x72, - 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, - 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, - 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, - 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, - 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, - 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, - 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, - 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, - 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1a, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, + 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, + 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, + 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, + 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, + 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, + 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, + 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3485,7 +3560,7 @@ func file_grpc_service_config_service_config_proto_rawDescGZIP() []byte { } var file_grpc_service_config_service_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 36) +var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 37) var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type)(0), // 0: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type (ServiceConfig_LoadBalancingPolicy)(0), // 1: grpc.service_config.ServiceConfig.LoadBalancingPolicy @@ -3505,72 +3580,73 @@ var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ (*RingHashLoadBalancingConfig)(nil), // 15: grpc.service_config.RingHashLoadBalancingConfig (*LrsLoadBalancingPolicyConfig)(nil), // 16: grpc.service_config.LrsLoadBalancingPolicyConfig (*XdsWrrLocalityLoadBalancingPolicyConfig)(nil), // 17: grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig - (*XdsConfig)(nil), // 18: grpc.service_config.XdsConfig - (*LoadBalancingConfig)(nil), // 19: grpc.service_config.LoadBalancingConfig - (*ServiceConfig)(nil), // 20: grpc.service_config.ServiceConfig - (*MethodConfig_Name)(nil), // 21: grpc.service_config.MethodConfig.Name - (*MethodConfig_RetryPolicy)(nil), // 22: grpc.service_config.MethodConfig.RetryPolicy - (*MethodConfig_HedgingPolicy)(nil), // 23: grpc.service_config.MethodConfig.HedgingPolicy - (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection)(nil), // 24: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection - (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection)(nil), // 25: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection - (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 26: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - nil, // 27: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 28: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - nil, // 29: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - (*XdsClusterManagerLoadBalancingPolicyConfig_Child)(nil), // 30: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child - nil, // 31: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry - (*XdsServer_ChannelCredentials)(nil), // 32: grpc.service_config.XdsServer.ChannelCredentials - (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 33: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 34: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 35: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - (*ServiceConfig_RetryThrottlingPolicy)(nil), // 36: grpc.service_config.ServiceConfig.RetryThrottlingPolicy - (*ServiceConfig_HealthCheckConfig)(nil), // 37: grpc.service_config.ServiceConfig.HealthCheckConfig - (*wrapperspb.BoolValue)(nil), // 38: google.protobuf.BoolValue - (*durationpb.Duration)(nil), // 39: google.protobuf.Duration - (*wrapperspb.UInt32Value)(nil), // 40: google.protobuf.UInt32Value - (*structpb.Value)(nil), // 41: google.protobuf.Value - (*wrapperspb.StringValue)(nil), // 42: google.protobuf.StringValue - (code.Code)(0), // 43: google.rpc.Code - (*structpb.Struct)(nil), // 44: google.protobuf.Struct + (*LeastRequestLocalityLoadBalancingPolicyConfig)(nil), // 18: grpc.service_config.LeastRequestLocalityLoadBalancingPolicyConfig + (*XdsConfig)(nil), // 19: grpc.service_config.XdsConfig + (*LoadBalancingConfig)(nil), // 20: grpc.service_config.LoadBalancingConfig + (*ServiceConfig)(nil), // 21: grpc.service_config.ServiceConfig + (*MethodConfig_Name)(nil), // 22: grpc.service_config.MethodConfig.Name + (*MethodConfig_RetryPolicy)(nil), // 23: grpc.service_config.MethodConfig.RetryPolicy + (*MethodConfig_HedgingPolicy)(nil), // 24: grpc.service_config.MethodConfig.HedgingPolicy + (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection)(nil), // 25: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection + (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection)(nil), // 26: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection + (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 27: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + nil, // 28: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 29: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + nil, // 30: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + (*XdsClusterManagerLoadBalancingPolicyConfig_Child)(nil), // 31: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child + nil, // 32: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry + (*XdsServer_ChannelCredentials)(nil), // 33: grpc.service_config.XdsServer.ChannelCredentials + (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 34: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 35: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 36: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + (*ServiceConfig_RetryThrottlingPolicy)(nil), // 37: grpc.service_config.ServiceConfig.RetryThrottlingPolicy + (*ServiceConfig_HealthCheckConfig)(nil), // 38: grpc.service_config.ServiceConfig.HealthCheckConfig + (*wrapperspb.BoolValue)(nil), // 39: google.protobuf.BoolValue + (*durationpb.Duration)(nil), // 40: google.protobuf.Duration + (*wrapperspb.UInt32Value)(nil), // 41: google.protobuf.UInt32Value + (*structpb.Value)(nil), // 42: google.protobuf.Value + (*wrapperspb.StringValue)(nil), // 43: google.protobuf.StringValue + (code.Code)(0), // 44: google.rpc.Code + (*structpb.Struct)(nil), // 45: google.protobuf.Struct } var file_grpc_service_config_service_config_proto_depIdxs = []int32{ - 21, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name - 38, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue - 39, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration - 40, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value - 40, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value - 22, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy - 23, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy - 39, // 7: grpc.service_config.OutlierDetectionLoadBalancingConfig.interval:type_name -> google.protobuf.Duration - 39, // 8: grpc.service_config.OutlierDetectionLoadBalancingConfig.base_ejection_time:type_name -> google.protobuf.Duration - 39, // 9: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_time:type_name -> google.protobuf.Duration - 40, // 10: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_percent:type_name -> google.protobuf.UInt32Value - 24, // 11: grpc.service_config.OutlierDetectionLoadBalancingConfig.success_rate_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection - 25, // 12: grpc.service_config.OutlierDetectionLoadBalancingConfig.failure_percentage_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection - 19, // 13: grpc.service_config.OutlierDetectionLoadBalancingConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 19, // 14: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 39, // 15: grpc.service_config.GrpcLbConfig.initial_fallback_timeout:type_name -> google.protobuf.Duration - 27, // 16: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - 29, // 17: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - 31, // 18: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry - 32, // 19: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials - 41, // 20: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value - 33, // 21: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - 19, // 22: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig - 42, // 23: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 22, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name + 39, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue + 40, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration + 41, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value + 41, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value + 23, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy + 24, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy + 40, // 7: grpc.service_config.OutlierDetectionLoadBalancingConfig.interval:type_name -> google.protobuf.Duration + 40, // 8: grpc.service_config.OutlierDetectionLoadBalancingConfig.base_ejection_time:type_name -> google.protobuf.Duration + 40, // 9: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_time:type_name -> google.protobuf.Duration + 41, // 10: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_percent:type_name -> google.protobuf.UInt32Value + 25, // 11: grpc.service_config.OutlierDetectionLoadBalancingConfig.success_rate_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection + 26, // 12: grpc.service_config.OutlierDetectionLoadBalancingConfig.failure_percentage_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection + 20, // 13: grpc.service_config.OutlierDetectionLoadBalancingConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 20, // 14: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 40, // 15: grpc.service_config.GrpcLbConfig.initial_fallback_timeout:type_name -> google.protobuf.Duration + 28, // 16: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry + 30, // 17: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry + 32, // 18: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry + 33, // 19: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials + 42, // 20: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value + 34, // 21: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism + 20, // 22: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig + 43, // 23: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue 11, // 24: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 40, // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 34, // 26: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - 19, // 27: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 42, // 28: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 19, // 29: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 19, // 30: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 35, // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - 19, // 32: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 19, // 33: grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 19, // 34: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 19, // 35: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig - 42, // 36: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 41, // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 35, // 26: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory + 20, // 27: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 43, // 28: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 20, // 29: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 20, // 30: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig + 36, // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality + 20, // 32: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 20, // 33: grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 20, // 34: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 20, // 35: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig + 43, // 36: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue 3, // 37: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig 4, // 38: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig 5, // 39: grpc.service_config.LoadBalancingConfig.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig @@ -3584,45 +3660,46 @@ var file_grpc_service_config_service_config_proto_depIdxs = []int32{ 15, // 47: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig 16, // 48: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig 14, // 49: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig - 18, // 50: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig - 18, // 51: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig + 19, // 50: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig + 19, // 51: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig 17, // 52: grpc.service_config.LoadBalancingConfig.xds_wrr_locality_experimental:type_name -> grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig - 1, // 53: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy - 19, // 54: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig - 2, // 55: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig - 36, // 56: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy - 37, // 57: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig - 39, // 58: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration - 39, // 59: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration - 43, // 60: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code - 39, // 61: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration - 43, // 62: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code - 40, // 63: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.stdev_factor:type_name -> google.protobuf.UInt32Value - 40, // 64: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value - 40, // 65: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value - 40, // 66: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.request_volume:type_name -> google.protobuf.UInt32Value - 40, // 67: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold:type_name -> google.protobuf.UInt32Value - 40, // 68: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value - 40, // 69: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value - 40, // 70: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.request_volume:type_name -> google.protobuf.UInt32Value - 19, // 71: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig - 26, // 72: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - 19, // 73: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 28, // 74: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - 19, // 75: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 30, // 76: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child - 44, // 77: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct - 42, // 78: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 11, // 79: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 40, // 80: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 0, // 81: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type - 5, // 82: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig - 42, // 83: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue - 84, // [84:84] is the sub-list for method output_type - 84, // [84:84] is the sub-list for method input_type - 84, // [84:84] is the sub-list for extension type_name - 84, // [84:84] is the sub-list for extension extendee - 0, // [0:84] is the sub-list for field type_name + 18, // 53: grpc.service_config.LoadBalancingConfig.least_request_experimental:type_name -> grpc.service_config.LeastRequestLocalityLoadBalancingPolicyConfig + 1, // 54: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy + 20, // 55: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig + 2, // 56: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig + 37, // 57: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy + 38, // 58: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig + 40, // 59: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration + 40, // 60: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration + 44, // 61: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code + 40, // 62: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration + 44, // 63: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code + 41, // 64: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.stdev_factor:type_name -> google.protobuf.UInt32Value + 41, // 65: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value + 41, // 66: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value + 41, // 67: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.request_volume:type_name -> google.protobuf.UInt32Value + 41, // 68: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold:type_name -> google.protobuf.UInt32Value + 41, // 69: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value + 41, // 70: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value + 41, // 71: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.request_volume:type_name -> google.protobuf.UInt32Value + 20, // 72: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig + 27, // 73: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child + 20, // 74: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 29, // 75: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target + 20, // 76: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig + 31, // 77: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child + 45, // 78: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct + 43, // 79: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue + 11, // 80: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer + 41, // 81: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value + 0, // 82: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type + 5, // 83: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig + 43, // 84: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue + 85, // [85:85] is the sub-list for method output_type + 85, // [85:85] is the sub-list for method input_type + 85, // [85:85] is the sub-list for extension type_name + 85, // [85:85] is the sub-list for extension extendee + 0, // [0:85] is the sub-list for field type_name } func init() { file_grpc_service_config_service_config_proto_init() } @@ -3824,7 +3901,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsConfig); i { + switch v := v.(*LeastRequestLocalityLoadBalancingPolicyConfig); i { case 0: return &v.state case 1: @@ -3836,7 +3913,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancingConfig); i { + switch v := v.(*XdsConfig); i { case 0: return &v.state case 1: @@ -3848,7 +3925,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig); i { + switch v := v.(*LoadBalancingConfig); i { case 0: return &v.state case 1: @@ -3860,7 +3937,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_Name); i { + switch v := v.(*ServiceConfig); i { case 0: return &v.state case 1: @@ -3872,7 +3949,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_RetryPolicy); i { + switch v := v.(*MethodConfig_Name); i { case 0: return &v.state case 1: @@ -3884,7 +3961,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_HedgingPolicy); i { + switch v := v.(*MethodConfig_RetryPolicy); i { case 0: return &v.state case 1: @@ -3896,7 +3973,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutlierDetectionLoadBalancingConfig_SuccessRateEjection); i { + switch v := v.(*MethodConfig_HedgingPolicy); i { case 0: return &v.state case 1: @@ -3908,7 +3985,7 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection); i { + switch v := v.(*OutlierDetectionLoadBalancingConfig_SuccessRateEjection); i { case 0: return &v.state case 1: @@ -3920,6 +3997,18 @@ func file_grpc_service_config_service_config_proto_init() { } } file_grpc_service_config_service_config_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_service_config_service_config_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { case 0: return &v.state @@ -3931,7 +4020,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WeightedTargetLoadBalancingPolicyConfig_Target); i { case 0: return &v.state @@ -3943,7 +4032,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterManagerLoadBalancingPolicyConfig_Child); i { case 0: return &v.state @@ -3955,7 +4044,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsServer_ChannelCredentials); i { case 0: return &v.state @@ -3967,7 +4056,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism); i { case 0: return &v.state @@ -3979,7 +4068,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig_DropCategory); i { case 0: return &v.state @@ -3991,7 +4080,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LrsLoadBalancingPolicyConfig_Locality); i { case 0: return &v.state @@ -4003,7 +4092,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_RetryThrottlingPolicy); i { case 0: return &v.state @@ -4015,7 +4104,7 @@ func file_grpc_service_config_service_config_proto_init() { return nil } } - file_grpc_service_config_service_config_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_grpc_service_config_service_config_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceConfig_HealthCheckConfig); i { case 0: return &v.state @@ -4032,7 +4121,7 @@ func file_grpc_service_config_service_config_proto_init() { (*MethodConfig_RetryPolicy_)(nil), (*MethodConfig_HedgingPolicy_)(nil), } - file_grpc_service_config_service_config_proto_msgTypes[17].OneofWrappers = []interface{}{ + file_grpc_service_config_service_config_proto_msgTypes[18].OneofWrappers = []interface{}{ (*LoadBalancingConfig_PickFirst)(nil), (*LoadBalancingConfig_RoundRobin)(nil), (*LoadBalancingConfig_OutlierDetection)(nil), @@ -4049,6 +4138,7 @@ func file_grpc_service_config_service_config_proto_init() { (*LoadBalancingConfig_Xds)(nil), (*LoadBalancingConfig_XdsExperimental)(nil), (*LoadBalancingConfig_XdsWrrLocalityExperimental)(nil), + (*LoadBalancingConfig_LeastRequestExperimental)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -4056,7 +4146,7 @@ func file_grpc_service_config_service_config_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_service_config_service_config_proto_rawDesc, NumEnums: 2, - NumMessages: 36, + NumMessages: 37, NumExtensions: 0, NumServices: 0, }, From f229f9c79b2663ecfc315f5f8af64cbd5581b344 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 15 Jun 2022 10:28:59 -0700 Subject: [PATCH 524/998] weightedroundrobin: update comments to indicate where addrInfo is stored (#5427) --- balancer/weightedroundrobin/weightedroundrobin.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/balancer/weightedroundrobin/weightedroundrobin.go b/balancer/weightedroundrobin/weightedroundrobin.go index f15dddb56218..d82b714e0701 100644 --- a/balancer/weightedroundrobin/weightedroundrobin.go +++ b/balancer/weightedroundrobin/weightedroundrobin.go @@ -26,12 +26,12 @@ import ( // Name is the name of weighted_round_robin balancer. const Name = "weighted_round_robin" -// attributeKey is the type used as the key to store AddrInfo in the Attributes -// field of resolver.Address. +// attributeKey is the type used as the key to store AddrInfo in the +// BalancerAttributes field of resolver.Address. type attributeKey struct{} -// AddrInfo will be stored inside Address metadata in order to use weighted -// roundrobin balancer. +// AddrInfo will be stored in the BalancerAttributes field of Address in order +// to use weighted roundrobin balancer. type AddrInfo struct { Weight uint32 } @@ -42,8 +42,8 @@ func (a AddrInfo) Equal(o interface{}) bool { return ok && oa.Weight == a.Weight } -// SetAddrInfo returns a copy of addr in which the Attributes field is updated -// with addrInfo. +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with addrInfo. // // Experimental // @@ -54,7 +54,8 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { return addr } -// GetAddrInfo returns the AddrInfo stored in the Attributes fields of addr. +// GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. // // Experimental // From c0198a9ce33b0755079e4cdae7f2315af2ebe993 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 15 Jun 2022 10:51:48 -0700 Subject: [PATCH 525/998] ringhash: use grpctest.Tester in unit tests (#5428) --- xds/internal/balancer/ringhash/config_test.go | 2 +- xds/internal/balancer/ringhash/picker_test.go | 14 ++++++------- xds/internal/balancer/ringhash/ring_test.go | 6 +++--- .../balancer/ringhash/ringhash_test.go | 21 +++++++++++++------ 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/xds/internal/balancer/ringhash/config_test.go b/xds/internal/balancer/ringhash/config_test.go index a2a966dc3181..175301981ef4 100644 --- a/xds/internal/balancer/ringhash/config_test.go +++ b/xds/internal/balancer/ringhash/config_test.go @@ -24,7 +24,7 @@ import ( "github.com/google/go-cmp/cmp" ) -func TestParseConfig(t *testing.T) { +func (s) TestParseConfig(t *testing.T) { tests := []struct { name string js string diff --git a/xds/internal/balancer/ringhash/picker_test.go b/xds/internal/balancer/ringhash/picker_test.go index 2619ea7002c6..5963110b0ff1 100644 --- a/xds/internal/balancer/ringhash/picker_test.go +++ b/xds/internal/balancer/ringhash/picker_test.go @@ -46,7 +46,7 @@ func newTestRing(cStats []connectivity.State) *ring { return &ring{items: items} } -func TestPickerPickFirstTwo(t *testing.T) { +func (s) TestPickerPickFirstTwo(t *testing.T) { tests := []struct { name string ring *ring @@ -121,7 +121,7 @@ func TestPickerPickFirstTwo(t *testing.T) { // TestPickerPickTriggerTFConnect covers that if the picked SubConn is // TransientFailures, all SubConns until a non-TransientFailure are queued for // Connect(). -func TestPickerPickTriggerTFConnect(t *testing.T) { +func (s) TestPickerPickTriggerTFConnect(t *testing.T) { ring := newTestRing([]connectivity.State{ connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Idle, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, @@ -152,7 +152,7 @@ func TestPickerPickTriggerTFConnect(t *testing.T) { // TestPickerPickTriggerTFReturnReady covers that if the picked SubConn is // TransientFailure, SubConn 2 and 3 are TransientFailure, 4 is Ready. SubConn 2 // and 3 will Connect(), and 4 will be returned. -func TestPickerPickTriggerTFReturnReady(t *testing.T) { +func (s) TestPickerPickTriggerTFReturnReady(t *testing.T) { ring := newTestRing([]connectivity.State{ connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Ready, }) @@ -178,7 +178,7 @@ func TestPickerPickTriggerTFReturnReady(t *testing.T) { // TransientFailure, SubConn 2 is TransientFailure, 3 is Idle (init Idle). Pick // will be queue, SubConn 3 will Connect(), SubConn 4 and 5 (in TransientFailre) // will not queue a Connect. -func TestPickerPickTriggerTFWithIdle(t *testing.T) { +func (s) TestPickerPickTriggerTFWithIdle(t *testing.T) { ring := newTestRing([]connectivity.State{ connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Idle, connectivity.TransientFailure, connectivity.TransientFailure, }) @@ -211,7 +211,7 @@ func TestPickerPickTriggerTFWithIdle(t *testing.T) { } } -func TestNextSkippingDuplicatesNoDup(t *testing.T) { +func (s) TestNextSkippingDuplicatesNoDup(t *testing.T) { testRing := newTestRing([]connectivity.State{connectivity.Idle, connectivity.Idle}) tests := []struct { name string @@ -265,7 +265,7 @@ func addDups(r *ring, count int) *ring { return &ring{items: items} } -func TestNextSkippingDuplicatesMoreDup(t *testing.T) { +func (s) TestNextSkippingDuplicatesMoreDup(t *testing.T) { testRing := newTestRing([]connectivity.State{connectivity.Idle, connectivity.Idle}) // Make a new ring with duplicate SubConns. dupTestRing := addDups(testRing, 3) @@ -274,7 +274,7 @@ func TestNextSkippingDuplicatesMoreDup(t *testing.T) { } } -func TestNextSkippingDuplicatesOnlyDup(t *testing.T) { +func (s) TestNextSkippingDuplicatesOnlyDup(t *testing.T) { testRing := newTestRing([]connectivity.State{connectivity.Idle}) // Make a new ring with only duplicate SubConns. dupTestRing := addDups(testRing, 3) diff --git a/xds/internal/balancer/ringhash/ring_test.go b/xds/internal/balancer/ringhash/ring_test.go index 2d664e05bb24..a47215339cf5 100644 --- a/xds/internal/balancer/ringhash/ring_test.go +++ b/xds/internal/balancer/ringhash/ring_test.go @@ -31,7 +31,7 @@ func testAddr(addr string, weight uint32) resolver.Address { return resolver.Address{Addr: addr, Metadata: weight} } -func TestRingNew(t *testing.T) { +func (s) TestRingNew(t *testing.T) { testAddrs := []resolver.Address{ testAddr("a", 3), testAddr("b", 3), @@ -75,7 +75,7 @@ func equalApproximately(x, y float64) bool { return delta/mean < 0.25 } -func TestRingPick(t *testing.T) { +func (s) TestRingPick(t *testing.T) { r, _ := newRing(map[resolver.Address]*subConn{ {Addr: "a", Metadata: uint32(3)}: {addr: "a"}, {Addr: "b", Metadata: uint32(3)}: {addr: "b"}, @@ -97,7 +97,7 @@ func TestRingPick(t *testing.T) { } } -func TestRingNext(t *testing.T) { +func (s) TestRingNext(t *testing.T) { r, _ := newRing(map[resolver.Address]*subConn{ {Addr: "a", Metadata: uint32(3)}: {addr: "a"}, {Addr: "b", Metadata: uint32(3)}: {addr: "b"}, diff --git a/xds/internal/balancer/ringhash/ringhash_test.go b/xds/internal/balancer/ringhash/ringhash_test.go index 22586c60b154..cac8476b9290 100644 --- a/xds/internal/balancer/ringhash/ringhash_test.go +++ b/xds/internal/balancer/ringhash/ringhash_test.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" ) @@ -98,7 +99,15 @@ func setupTest(t *testing.T, addrs []resolver.Address) (*testutils.TestClientCon return cc, b, p1 } -func TestOneSubConn(t *testing.T) { +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestOneSubConn(t *testing.T) { wantAddr1 := resolver.Address{Addr: testBackendAddrStrs[0]} cc, b, p0 := setupTest(t, []resolver.Address{wantAddr1}) ring0 := p0.(*picker).ring @@ -135,7 +144,7 @@ func TestOneSubConn(t *testing.T) { // TestThreeBackendsAffinity covers that there are 3 SubConns, RPCs with the // same hash always pick the same SubConn. When the one picked is down, another // one will be picked. -func TestThreeSubConnsAffinity(t *testing.T) { +func (s) TestThreeSubConnsAffinity(t *testing.T) { wantAddrs := []resolver.Address{ {Addr: testBackendAddrStrs[0]}, {Addr: testBackendAddrStrs[1]}, @@ -228,7 +237,7 @@ func TestThreeSubConnsAffinity(t *testing.T) { // TestThreeBackendsAffinity covers that there are 3 SubConns, RPCs with the // same hash always pick the same SubConn. Then try different hash to pick // another backend, and verify the first hash still picks the first backend. -func TestThreeSubConnsAffinityMultiple(t *testing.T) { +func (s) TestThreeSubConnsAffinityMultiple(t *testing.T) { wantAddrs := []resolver.Address{ {Addr: testBackendAddrStrs[0]}, {Addr: testBackendAddrStrs[1]}, @@ -299,7 +308,7 @@ func TestThreeSubConnsAffinityMultiple(t *testing.T) { } } -func TestAddrWeightChange(t *testing.T) { +func (s) TestAddrWeightChange(t *testing.T) { wantAddrs := []resolver.Address{ {Addr: testBackendAddrStrs[0]}, {Addr: testBackendAddrStrs[1]}, @@ -368,7 +377,7 @@ func TestAddrWeightChange(t *testing.T) { // overall state is TransientFailure, the SubConns turning Idle will trigger the // next SubConn in the ring to Connect(). But not when the overall state is not // TransientFailure. -func TestSubConnToConnectWhenOverallTransientFailure(t *testing.T) { +func (s) TestSubConnToConnectWhenOverallTransientFailure(t *testing.T) { wantAddrs := []resolver.Address{ {Addr: testBackendAddrStrs[0]}, {Addr: testBackendAddrStrs[1]}, @@ -431,7 +440,7 @@ func TestSubConnToConnectWhenOverallTransientFailure(t *testing.T) { } } -func TestConnectivityStateEvaluatorRecordTransition(t *testing.T) { +func (s) TestConnectivityStateEvaluatorRecordTransition(t *testing.T) { tests := []struct { name string from, to []connectivity.State From f14d611253e03f029fe143abef00101838c6086a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 15 Jun 2022 14:42:38 -0700 Subject: [PATCH 526/998] resolver: minor improvements to AddressMap (#5426) --- resolver/map.go | 55 +++++++++++++++++++++++++++++++++----------- resolver/map_test.go | 21 +++++++++++++++++ 2 files changed, 63 insertions(+), 13 deletions(-) diff --git a/resolver/map.go b/resolver/map.go index e87ecd0eeb38..efcb7f3efd82 100644 --- a/resolver/map.go +++ b/resolver/map.go @@ -28,25 +28,40 @@ type addressMapEntry struct { // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. type AddressMap struct { - m map[string]addressMapEntryList + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} } type addressMapEntryList []*addressMapEntry // NewAddressMap creates a new AddressMap. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[string]addressMapEntryList)} + return &AddressMap{m: make(map[Address]addressMapEntryList)} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. func (l addressMapEntryList) find(addr Address) int { - if len(l) == 0 { - return -1 - } for i, entry := range l { - if entry.addr.ServerName == addr.ServerName && - entry.addr.Attributes.Equal(addr.Attributes) { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { return i } } @@ -55,7 +70,8 @@ func (l addressMapEntryList) find(addr Address) int { // Get returns the value for the address in the map, if present. func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } @@ -64,17 +80,19 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { // Set updates or adds the value to the address in the map. func (a *AddressMap) Set(addr Address, value interface{}) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { - a.m[addr.Addr][entry].value = value + entryList[entry].value = value return } - a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) } // Delete removes addr from the map. func (a *AddressMap) Delete(addr Address) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] entry := entryList.find(addr) if entry == -1 { return @@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) { copy(entryList[entry:], entryList[entry+1:]) entryList = entryList[:len(entryList)-1] } - a.m[addr.Addr] = entryList + a.m[addrKey] = entryList } // Len returns the number of entries in the map. @@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address { } return ret } + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/resolver/map_test.go b/resolver/map_test.go index 26d539bcd6d3..0b0ac1667902 100644 --- a/resolver/map_test.go +++ b/resolver/map_test.go @@ -140,3 +140,24 @@ func (s) TestAddressMap_Keys(t *testing.T) { t.Fatalf("addrMap.Keys returned unexpected elements (-want, +got):\n%v", d) } } + +func (s) TestAddressMap_Values(t *testing.T) { + addrMap := NewAddressMap() + addrMap.Set(addr1, 1) + addrMap.Set(addr2, 2) + addrMap.Set(addr3, 3) + addrMap.Set(addr4, 4) + addrMap.Set(addr5, 5) + addrMap.Set(addr6, 6) + addrMap.Set(addr7, 7) // aliases addr1 + + want := []int{2, 3, 4, 5, 6, 7} + var got []int + for _, v := range addrMap.Values() { + got = append(got, v.(int)) + } + sort.Ints(got) + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("addrMap.Values returned unexpected elements (-want, +got):\n%v", diff) + } +} From 1dabf5459db7cdbb160fe54e1a2efa030a2193bc Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Thu, 16 Jun 2022 16:45:19 -0700 Subject: [PATCH 527/998] test/kokoro: use standard TESTING_VERSION in the new framework builds (#5434) --- test/kokoro/psm-security.sh | 11 +++++++---- test/kokoro/xds_k8s_lb.sh | 11 +++++++---- test/kokoro/xds_url_map.sh | 4 +++- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/test/kokoro/psm-security.sh b/test/kokoro/psm-security.sh index c76f0860df9f..0606d8921445 100755 --- a/test/kokoro/psm-security.sh +++ b/test/kokoro/psm-security.sh @@ -29,6 +29,7 @@ readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}" # SERVER_IMAGE_NAME: Test server Docker image name # CLIENT_IMAGE_NAME: Test client Docker image name # GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test, f.e. v1.42.x, master # Arguments: # None # Outputs: @@ -41,10 +42,9 @@ build_test_app_docker_images() { gcloud -q auth configure-docker docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" docker push "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" - if [[ -n $KOKORO_JOB_NAME ]]; then - branch_name=$(echo "$KOKORO_JOB_NAME" | sed -E 's|^grpc/go/([^/]+)/.*|\1|') - tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${branch_name}" - tag_and_push_docker_image "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" "${branch_name}" + if is_version_branch "${TESTING_VERSION}"; then + tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" + tag_and_push_docker_image "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" fi } @@ -87,6 +87,8 @@ build_docker_images_if_needed() { # SERVER_IMAGE_NAME: Test server Docker image name # CLIENT_IMAGE_NAME: Test client Docker image name # GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test: used by the framework to determine the supported PSM +# features. # Arguments: # Test case name # Outputs: @@ -103,6 +105,7 @@ run_test() { --kube_context="${KUBE_CONTEXT}" \ --server_image="${SERVER_IMAGE_NAME}:${GIT_COMMIT}" \ --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ + --testing_version="${TESTING_VERSION}" \ --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ --force_cleanup \ --nocheck_local_certs diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh index 6e5fd68904cb..9226f9f129f0 100755 --- a/test/kokoro/xds_k8s_lb.sh +++ b/test/kokoro/xds_k8s_lb.sh @@ -29,6 +29,7 @@ readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}" # SERVER_IMAGE_NAME: Test server Docker image name # CLIENT_IMAGE_NAME: Test client Docker image name # GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test, f.e. v1.42.x, master # Arguments: # None # Outputs: @@ -41,10 +42,9 @@ build_test_app_docker_images() { gcloud -q auth configure-docker docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" docker push "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" - if [[ -n $KOKORO_JOB_NAME ]]; then - branch_name=$(echo "$KOKORO_JOB_NAME" | sed -E 's|^grpc/go/([^/]+)/.*|\1|') - tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${branch_name}" - tag_and_push_docker_image "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" "${branch_name}" + if is_version_branch "${TESTING_VERSION}"; then + tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" + tag_and_push_docker_image "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" fi } @@ -87,6 +87,8 @@ build_docker_images_if_needed() { # SERVER_IMAGE_NAME: Test server Docker image name # CLIENT_IMAGE_NAME: Test client Docker image name # GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test: used by the framework to determine the supported PSM +# features. # Arguments: # Test case name # Outputs: @@ -103,6 +105,7 @@ run_test() { --kube_context="${KUBE_CONTEXT}" \ --server_image="${SERVER_IMAGE_NAME}:${GIT_COMMIT}" \ --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ + --testing_version="${TESTING_VERSION}" \ --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ --force_cleanup set +x diff --git a/test/kokoro/xds_url_map.sh b/test/kokoro/xds_url_map.sh index 34805d43a13a..193a81bcd7b2 100755 --- a/test/kokoro/xds_url_map.sh +++ b/test/kokoro/xds_url_map.sh @@ -72,6 +72,8 @@ build_docker_images_if_needed() { # TEST_XML_OUTPUT_DIR: Output directory for the test xUnit XML report # CLIENT_IMAGE_NAME: Test client Docker image name # GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test: used by the framework to determine the supported PSM +# features. # Arguments: # Test case name # Outputs: @@ -87,7 +89,7 @@ run_test() { --flagfile="${TEST_DRIVER_FLAGFILE}" \ --kube_context="${KUBE_CONTEXT}" \ --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ - --testing_version=$(echo "$KOKORO_JOB_NAME" | sed -E 's|^grpc/go/([^/]+)/.*|\1|') \ + --testing_version="${TESTING_VERSION}" \ --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ --flagfile="config/url-map.cfg" set +x From 29d9970c51d68a9b3e645d40eab0f3ad8f9185ee Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 17 Jun 2022 13:46:47 -0400 Subject: [PATCH 528/998] xds: Outlier Detection configuration in Cluster Resolver Balancer (#5371) xds: Outlier Detection configuration in Cluster Resolver Balancer --- internal/internal.go | 16 + .../balancer/cdsbalancer/cdsbalancer.go | 6 +- .../cdsbalancer/cdsbalancer_security_test.go | 15 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 26 +- .../clusterresolver/clusterresolver_test.go | 123 +++++++- .../balancer/clusterresolver/config.go | 4 +- .../balancer/clusterresolver/configbuilder.go | 44 ++- .../clusterresolver/configbuilder_test.go | 216 +++++++++++++ .../balancer/outlierdetection/balancer.go | 106 +++++++ .../outlierdetection/balancer_test.go | 289 ++++++++++++++++++ 10 files changed, 816 insertions(+), 29 deletions(-) create mode 100644 xds/internal/balancer/outlierdetection/balancer.go create mode 100644 xds/internal/balancer/outlierdetection/balancer_test.go diff --git a/internal/internal.go b/internal/internal.go index 59352cc958e8..83018be7c718 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -117,6 +117,22 @@ var ( // // TODO: Remove this function once the RBAC env var is removed. UnregisterRBACHTTPFilterForTesting func() + + // RegisterOutlierDetectionBalancerForTesting registers the Outlier + // Detection Balancer for testing purposes, regardless of the Outlier + // Detection environment variable. + // + // TODO: Remove this function once the Outlier Detection env var is removed. + RegisterOutlierDetectionBalancerForTesting func() + + // UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier + // Detection Balancer for testing purposes. This is needed because there is + // no way to unregister the Outlier Detection Balancer after registering it + // solely for testing purposes using + // RegisterOutlierDetectionBalancerForTesting(). + // + // TODO: Remove this function once the Outlier Detection env var is removed. + UnregisterOutlierDetectionBalancerForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index d057ed66a53c..14c1c2e769aa 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -272,12 +272,12 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc return provider, nil } -func outlierDetectionToConfig(od *xdsresource.OutlierDetection) *outlierdetection.LBConfig { // Already validated - no need to return error +func outlierDetectionToConfig(od *xdsresource.OutlierDetection) outlierdetection.LBConfig { // Already validated - no need to return error if od == nil { // "If the outlier_detection field is not set in the Cluster message, a // "no-op" outlier_detection config will be generated, with interval set // to the maximum possible value and all other fields unset." - A50 - return &outlierdetection.LBConfig{ + return outlierdetection.LBConfig{ Interval: 1<<63 - 1, } } @@ -308,7 +308,7 @@ func outlierDetectionToConfig(od *xdsresource.OutlierDetection) *outlierdetectio } } - return &outlierdetection.LBConfig{ + return outlierdetection.LBConfig{ Interval: od.Interval, BaseEjectionTime: od.BaseEjectionTime, MaxEjectionTime: od.MaxEjectionTime, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index c58990ab34d1..685e77adf463 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -250,7 +251,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -306,7 +307,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // newChildBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -462,7 +463,7 @@ func (s) TestSecurityConfigUpdate_BadToGood(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -496,7 +497,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -549,7 +550,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -599,7 +600,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -677,7 +678,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { SubjectAltNameMatchers: testSANMatchers, }, } - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index b15481f318b2..a88081e1d0ec 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -56,7 +56,7 @@ var ( ServerURI: "self_server", CredsType: "self_creds", } - noopODLBCfg = &outlierdetection.LBConfig{ + noopODLBCfg = outlierdetection.LBConfig{ Interval: 1<<63 - 1, } ) @@ -215,7 +215,7 @@ func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { // edsCCS is a helper function to construct a good update passed from the // cdsBalancer to the edsBalancer. -func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy *internalserviceconfig.BalancerConfig, odConfig *outlierdetection.LBConfig) balancer.ClientConnState { +func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy *internalserviceconfig.BalancerConfig, odConfig outlierdetection.LBConfig) balancer.ClientConnState { discoveryMechanism := clusterresolver.DiscoveryMechanism{ Type: clusterresolver.DiscoveryMechanismTypeEDS, Cluster: service, @@ -421,7 +421,7 @@ func (s) TestHandleClusterUpdate(t *testing.T) { FailurePercentageMinimumHosts: 5, FailurePercentageRequestVolume: 50, }}, - wantCCS: edsCCS(serviceName, nil, false, nil, &outlierdetection.LBConfig{ + wantCCS: edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{ Interval: 10 * time.Second, BaseEjectionTime: 30 * time.Second, MaxEjectionTime: 300 * time.Second, @@ -506,7 +506,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -591,7 +591,7 @@ func (s) TestResolverError(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -640,7 +640,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -675,7 +675,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // the service's counter with the new max requests. var maxRequests uint32 = 1 cdsUpdate := xdsresource.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} - wantCCS := edsCCS(clusterName, &maxRequests, false, nil, nil) + wantCCS := edsCCS(clusterName, &maxRequests, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -708,7 +708,7 @@ func (s) TestClose(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -779,7 +779,7 @@ func (s) TestExitIdle(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, nil) + wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -846,7 +846,7 @@ func (s) TestOutlierDetectionToConfig(t *testing.T) { tests := []struct { name string od *xdsresource.OutlierDetection - odLBCfgWant *outlierdetection.LBConfig + odLBCfgWant outlierdetection.LBConfig }{ // "if the outlier_detection field is not set in the Cluster resource, // a "no-op" outlier_detection config will be generated in the @@ -876,7 +876,7 @@ func (s) TestOutlierDetectionToConfig(t *testing.T) { FailurePercentageMinimumHosts: 5, FailurePercentageRequestVolume: 50, }, - odLBCfgWant: &outlierdetection.LBConfig{ + odLBCfgWant: outlierdetection.LBConfig{ Interval: 10 * time.Second, BaseEjectionTime: 30 * time.Second, MaxEjectionTime: 300 * time.Second, @@ -909,7 +909,7 @@ func (s) TestOutlierDetectionToConfig(t *testing.T) { FailurePercentageMinimumHosts: 5, FailurePercentageRequestVolume: 50, }, - odLBCfgWant: &outlierdetection.LBConfig{ + odLBCfgWant: outlierdetection.LBConfig{ Interval: 10 * time.Second, BaseEjectionTime: 30 * time.Second, MaxEjectionTime: 300 * time.Second, @@ -939,7 +939,7 @@ func (s) TestOutlierDetectionToConfig(t *testing.T) { FailurePercentageMinimumHosts: 5, FailurePercentageRequestVolume: 50, }, - odLBCfgWant: &outlierdetection.LBConfig{ + odLBCfgWant: outlierdetection.LBConfig{ Interval: 10 * time.Second, BaseEjectionTime: 30 * time.Second, MaxEjectionTime: 300 * time.Second, diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 3b0843f6807e..6e96f2e31f8a 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -25,12 +25,21 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal" + xdsinternal "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -53,7 +62,7 @@ var ( Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: "endpoint1"}}, - ID: internal.LocalityID{Zone: "zone"}, + ID: xdsinternal.LocalityID{Zone: "zone"}, Priority: 1, Weight: 100, }, @@ -130,6 +139,18 @@ func (f *fakeChildBalancer) Close() {} func (f *fakeChildBalancer) ExitIdle() {} +func (f *fakeChildBalancer) waitForClientConnStateChangeVerifyBalancerConfig(ctx context.Context, wantCCS balancer.ClientConnState) error { + ccs, err := f.clientConnState.Receive(ctx) + if err != nil { + return err + } + gotCCS := ccs.(balancer.ClientConnState) + if diff := cmp.Diff(gotCCS, wantCCS, cmpopts.IgnoreFields(balancer.ClientConnState{}, "ResolverState")); diff != "" { + return fmt.Errorf("received unexpected ClientConnState, diff (-got +want): %v", diff) + } + return nil +} + func (f *fakeChildBalancer) waitForClientConnStateChange(ctx context.Context) error { _, err := f.clientConnState.Receive(ctx) if err != nil { @@ -500,3 +521,101 @@ func newLBConfigWithOneEDS(edsServiceName string) *LBConfig { }}, } } + +func newLBConfigWithOneEDSAndOutlierDetection(edsServiceName string, odCfg outlierdetection.LBConfig) *LBConfig { + lbCfg := newLBConfigWithOneEDS(edsServiceName) + lbCfg.DiscoveryMechanisms[0].OutlierDetection = odCfg + return lbCfg +} + +// TestOutlierDetection tests the Balancer Config sent down to the child +// priority balancer when Outlier Detection is turned on. The Priority +// Configuration sent downward should have a top level Outlier Detection Policy +// for each priority. +func (s) TestOutlierDetection(t *testing.T) { + oldOutlierDetection := envconfig.XDSOutlierDetection + envconfig.XDSOutlierDetection = true + internal.RegisterOutlierDetectionBalancerForTesting() + defer func() { + envconfig.XDSOutlierDetection = oldOutlierDetection + }() + + edsLBCh := testutils.NewChannel() + xdsC, cleanup := setup(edsLBCh) + defer cleanup() + builder := balancer.Get(Name) + edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) + if edsB == nil { + t.Fatalf("builder.Build(%s) failed and returned nil", Name) + } + defer edsB.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Update Cluster Resolver with Client Conn State with Outlier Detection + // configuration present. This is what will be passed down to this balancer, + // as CDS Balancer gets the Cluster Update and converts the Outlier + // Detection data to an Outlier Detection configuration and sends it to this + // level. + if err := edsB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), + BalancerConfig: newLBConfigWithOneEDSAndOutlierDetection(testEDSServcie, noopODCfg), + }); err != nil { + t.Fatal(err) + } + if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { + t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) + } + + // Invoke EDS Callback - causes child balancer to be built and then + // UpdateClientConnState called on it with Outlier Detection as a direct + // child. + xdsC.InvokeWatchEDSCallback("", defaultEndpointsUpdate, nil) + edsLB, err := waitForNewChildLB(ctx, edsLBCh) + if err != nil { + t.Fatal(err) + } + + localityID := xdsinternal.LocalityID{Zone: "zone"} + // The priority configuration generated should have Outlier Detection as a + // direct child due to Outlier Detection being turned on. + pCfgWant := &priority.LBConfig{ + Children: map[string]*priority.Child{ + "priority-0-0": { + Config: &internalserviceconfig.BalancerConfig{ + Name: outlierdetection.Name, + Config: &outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: testClusterName, + EDSServiceName: "test-eds-service-name", + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedtarget.Name, + Config: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(localityID.ToString): { + Weight: 100, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + }, + }, + }, + }, + }, + IgnoreReresolutionRequests: true, + }, + }, + Priorities: []string{"priority-0-0"}, + } + + if err := edsLB.waitForClientConnStateChangeVerifyBalancerConfig(ctx, balancer.ClientConnState{ + BalancerConfig: pCfgWant, + }); err != nil { + t.Fatalf("EDS impl got unexpected update: %v", err) + } +} diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index 26e2812d2f62..cb870027a4e4 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -105,7 +105,7 @@ type DiscoveryMechanism struct { DNSHostname string `json:"dnsHostname,omitempty"` // OutlierDetection is the Outlier Detection LB configuration for this // priority. - OutlierDetection *outlierdetection.LBConfig `json:"outlierDetection,omitempty"` + OutlierDetection outlierdetection.LBConfig `json:"outlierDetection,omitempty"` } // Equal returns whether the DiscoveryMechanism is the same with the parameter. @@ -121,7 +121,7 @@ func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { return false case dm.DNSHostname != b.DNSHostname: return false - case !dm.OutlierDetection.EqualIgnoringChildPolicy(b.OutlierDetection): + case !dm.OutlierDetection.EqualIgnoringChildPolicy(&b.OutlierDetection): return false } diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index 7eb76dd51d7f..dc91d7fbd139 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -26,11 +26,13 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -129,29 +131,67 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi return nil, nil, err } retConfig.Priorities = append(retConfig.Priorities, names...) + retAddrs = append(retAddrs, addrs...) + var odCfgs map[string]*outlierdetection.LBConfig + if envconfig.XDSOutlierDetection { + odCfgs = convertClusterImplMapToOutlierDetection(configs, p.mechanism.OutlierDetection) + for n, c := range odCfgs { + retConfig.Children[n] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c}, + // Ignore all re-resolution from EDS children. + IgnoreReresolutionRequests: true, + } + } + continue + } for n, c := range configs { retConfig.Children[n] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: c}, // Ignore all re-resolution from EDS children. IgnoreReresolutionRequests: true, } + } - retAddrs = append(retAddrs, addrs...) case DiscoveryMechanismTypeLogicalDNS: name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism) retConfig.Priorities = append(retConfig.Priorities, name) + retAddrs = append(retAddrs, addrs...) + var odCfg *outlierdetection.LBConfig + if envconfig.XDSOutlierDetection { + odCfg = makeClusterImplOutlierDetectionChild(config, p.mechanism.OutlierDetection) + retConfig.Children[name] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, + // Not ignore re-resolution from DNS children, they will trigger + // DNS to re-resolve. + IgnoreReresolutionRequests: false, + } + continue + } retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: config}, // Not ignore re-resolution from DNS children, they will trigger // DNS to re-resolve. IgnoreReresolutionRequests: false, } - retAddrs = append(retAddrs, addrs...) } } return retConfig, retAddrs, nil } +func convertClusterImplMapToOutlierDetection(ciCfgs map[string]*clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) map[string]*outlierdetection.LBConfig { + odCfgs := make(map[string]*outlierdetection.LBConfig, len(ciCfgs)) + for n, c := range ciCfgs { + odCfgs[n] = makeClusterImplOutlierDetectionChild(c, odCfg) + } + return odCfgs +} + +func makeClusterImplOutlierDetectionChild(ciCfg *clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) *outlierdetection.LBConfig { + odCfgRet := odCfg + odCfgRet.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: ciCfg} + return &odCfgRet +} + func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { // Endpoint picking policy for DNS is hardcoded to pick_first. const childPolicy = "pick_first" diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index 80d46fa0119d..cfe7de65d1a0 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -31,11 +31,13 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -68,6 +70,10 @@ var ( }) return out })} + + noopODCfg = outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + } ) func init() { @@ -312,6 +318,142 @@ func TestBuildPriorityConfig(t *testing.T) { } } +// TestBuildPriorityConfigWithOutlierDetection tests the priority config +// generation with Outlier Detection toggled on. Each top level balancer per +// priority should be an Outlier Detection balancer, with a Cluster Impl +// Balancer as a child. +func TestBuildPriorityConfigWithOutlierDetection(t *testing.T) { + oldOutlierDetection := envconfig.XDSOutlierDetection + envconfig.XDSOutlierDetection = true + defer func() { + envconfig.XDSOutlierDetection = oldOutlierDetection + }() + + gotConfig, _, _ := buildPriorityConfig([]priorityConfig{ + { + // EDS - OD config should be the top level for both of the EDS + // priorities balancer This EDS priority will have multiple sub + // priorities. The Outlier Detection configuration specified in the + // Discovery Mechanism should be the top level for each sub + // priorities balancer. + mechanism: DiscoveryMechanism{ + Cluster: testClusterName, + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, + OutlierDetection: noopODCfg, + }, + edsResp: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + testLocalitiesP0[0], + testLocalitiesP0[1], + testLocalitiesP1[0], + testLocalitiesP1[1], + }, + }, + childNameGen: newNameGenerator(0), + }, + { + // This OD config should wrap the Logical DNS priorities balancer. + mechanism: DiscoveryMechanism{ + Cluster: testClusterName2, + Type: DiscoveryMechanismTypeLogicalDNS, + OutlierDetection: noopODCfg, + }, + addresses: testAddressStrs[4], + childNameGen: newNameGenerator(1), + }, + }, nil) + + wantConfig := &priority.LBConfig{ + Children: map[string]*priority.Child{ + "priority-0-0": { + Config: &internalserviceconfig.BalancerConfig{ + Name: outlierdetection.Name, + Config: &outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + DropCategories: []clusterimpl.DropConfig{}, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedtarget.Name, + Config: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(testLocalityIDs[0].ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + assertString(testLocalityIDs[1].ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + }, + }, + }, + }, + }, + IgnoreReresolutionRequests: true, + }, + "priority-0-1": { + Config: &internalserviceconfig.BalancerConfig{ + Name: outlierdetection.Name, + Config: &outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + DropCategories: []clusterimpl.DropConfig{}, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: weightedtarget.Name, + Config: &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + assertString(testLocalityIDs[2].ToString): { + Weight: 20, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + assertString(testLocalityIDs[3].ToString): { + Weight: 80, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + }, + }, + }, + }, + }, + }, + }, + IgnoreReresolutionRequests: true, + }, + "priority-1": { + Config: &internalserviceconfig.BalancerConfig{ + Name: outlierdetection.Name, + Config: &outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: testClusterName2, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: "pick_first"}, + }, + }, + }, + }, + IgnoreReresolutionRequests: false, + }, + }, + Priorities: []string{"priority-0-0", "priority-0-1", "priority-1"}, + } + if diff := cmp.Diff(gotConfig, wantConfig); diff != "" { + t.Errorf("buildPriorityConfig() diff (-got +want) %v", diff) + } +} + func TestBuildClusterImplConfigForDNS(t *testing.T) { gotName, gotConfig, gotAddrs := buildClusterImplConfigForDNS(newNameGenerator(3), testAddressStrs[0], DiscoveryMechanism{Cluster: testClusterName2, Type: DiscoveryMechanismTypeLogicalDNS}) wantName := "priority-3" @@ -975,3 +1117,77 @@ func testAddrWithAttrs(addrStr string, weight *uint32, priority string, lID *int addr = hierarchy.Set(addr, path) return addr } + +func TestConvertClusterImplMapToOutlierDetection(t *testing.T) { + tests := []struct { + name string + ciCfgsMap map[string]*clusterimpl.LBConfig + odCfg outlierdetection.LBConfig + wantODCfgs map[string]*outlierdetection.LBConfig + }{ + { + name: "single-entry-noop", + ciCfgsMap: map[string]*clusterimpl.LBConfig{ + "child1": { + Cluster: "cluster1", + }, + }, + odCfg: outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + }, + wantODCfgs: map[string]*outlierdetection.LBConfig{ + "child1": { + Interval: 1<<63 - 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: "cluster1", + }, + }, + }, + }, + }, + { + name: "multiple-entries-noop", + ciCfgsMap: map[string]*clusterimpl.LBConfig{ + "child1": { + Cluster: "cluster1", + }, + "child2": { + Cluster: "cluster2", + }, + }, + odCfg: outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + }, + wantODCfgs: map[string]*outlierdetection.LBConfig{ + "child1": { + Interval: 1<<63 - 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: "cluster1", + }, + }, + }, + "child2": { + Interval: 1<<63 - 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: "cluster2", + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := convertClusterImplMapToOutlierDetection(test.ciCfgsMap, test.odCfg) + if diff := cmp.Diff(got, test.wantODCfgs); diff != "" { + t.Fatalf("convertClusterImplMapToOutlierDetection() diff(-got +want) %v", diff) + } + }) + } +} diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go new file mode 100644 index 000000000000..674075c4b3f9 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -0,0 +1,106 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package outlierdetection implements a balancer that implements +// Outlier Detection. +package outlierdetection + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the outlier detection balancer. +const Name = "outlier_detection_experimental" + +func init() { + if envconfig.XDSOutlierDetection { + balancer.Register(bb{}) + } + // TODO: Remove these once the Outlier Detection env var is removed. + internal.RegisterOutlierDetectionBalancerForTesting = func() { + balancer.Register(bb{}) + } + internal.UnregisterOutlierDetectionBalancerForTesting = func() { + internal.BalancerUnregister(Name) + } +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + return nil +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg *LBConfig + if err := json.Unmarshal(s, &lbCfg); err != nil { // Validates child config if present as well. + return nil, fmt.Errorf("xds: unable to unmarshal LBconfig: %s, error: %v", string(s), err) + } + + // Note: in the xds flow, these validations will never fail. The xdsclient + // performs the same validations as here on the xds Outlier Detection + // resource before parsing into the internal struct which gets marshaled + // into JSON before calling this function. A50 defines two separate places + // for these validations to take place, the xdsclient and this ParseConfig + // method. "When parsing a config from JSON, if any of these requirements is + // violated, that should be treated as a parsing error." - A50 + + switch { + // "The google.protobuf.Duration fields interval, base_ejection_time, and + // max_ejection_time must obey the restrictions in the + // google.protobuf.Duration documentation and they must have non-negative + // values." - A50 + // Approximately 290 years is the maximum time that time.Duration (int64) + // can represent. The restrictions on the protobuf.Duration field are to be + // within +-10000 years. Thus, just check for negative values. + case lbCfg.Interval < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.interval = %s; must be >= 0", lbCfg.Interval) + case lbCfg.BaseEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.base_ejection_time = %s; must be >= 0", lbCfg.BaseEjectionTime) + case lbCfg.MaxEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_time = %s; must be >= 0", lbCfg.MaxEjectionTime) + // "The fields max_ejection_percent, + // success_rate_ejection.enforcement_percentage, + // failure_percentage_ejection.threshold, and + // failure_percentage.enforcement_percentage must have values less than or + // equal to 100." - A50 + case lbCfg.MaxEjectionPercent > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_percent = %v; must be <= 100", lbCfg.MaxEjectionPercent) + case lbCfg.SuccessRateEjection != nil && lbCfg.SuccessRateEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage = %v; must be <= 100", lbCfg.SuccessRateEjection.EnforcementPercentage) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.Threshold > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold = %v; must be <= 100", lbCfg.FailurePercentageEjection.Threshold) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = %v; must be <= 100", lbCfg.FailurePercentageEjection.EnforcementPercentage) + case lbCfg.ChildPolicy == nil: + return nil, errors.New("OutlierDetectionLoadBalancingConfig.child_policy must be present") + } + + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} diff --git a/xds/internal/balancer/outlierdetection/balancer_test.go b/xds/internal/balancer/outlierdetection/balancer_test.go new file mode 100644 index 000000000000..106e2b64dbc2 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/balancer_test.go @@ -0,0 +1,289 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package outlierdetection + +import ( + "encoding/json" + "errors" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/grpctest" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestParseConfig verifies the ParseConfig() method in the Outlier Detection +// Balancer. +func (s) TestParseConfig(t *testing.T) { + parser := bb{} + + tests := []struct { + name string + input string + wantCfg serviceconfig.LoadBalancingConfig + wantErr string + }{ + { + name: "noop-lb-config", + input: `{ + "interval": 9223372036854775807, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + Interval: 1<<63 - 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "good-lb-config", + input: `{ + "interval": 10000000000, + "baseEjectionTime": 30000000000, + "maxEjectionTime": 300000000000, + "maxEjectionPercent": 10, + "successRateEjection": { + "stdevFactor": 1900, + "enforcementPercentage": 100, + "minimumHosts": 5, + "requestVolume": 100 + }, + "failurePercentageEjection": { + "threshold": 85, + "enforcementPercentage": 5, + "minimumHosts": 5, + "requestVolume": 50 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1900, + EnforcementPercentage: 100, + MinimumHosts: 5, + RequestVolume: 100, + }, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 85, + EnforcementPercentage: 5, + MinimumHosts: 5, + RequestVolume: 50, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "interval-is-negative", + input: `{"interval": -10}`, + wantErr: "OutlierDetectionLoadBalancingConfig.interval = -10ns; must be >= 0", + }, + { + name: "base-ejection-time-is-negative", + input: `{"baseEjectionTime": -10}`, + wantErr: "OutlierDetectionLoadBalancingConfig.base_ejection_time = -10ns; must be >= 0", + }, + { + name: "max-ejection-time-is-negative", + input: `{"maxEjectionTime": -10}`, + wantErr: "OutlierDetectionLoadBalancingConfig.max_ejection_time = -10ns; must be >= 0", + }, + { + name: "max-ejection-percent-is-greater-than-100", + input: `{"maxEjectionPercent": 150}`, + wantErr: "OutlierDetectionLoadBalancingConfig.max_ejection_percent = 150; must be <= 100", + }, + { + name: "enforcement-percentage-success-rate-is-greater-than-100", + input: `{ + "successRateEjection": { + "enforcementPercentage": 150 + } + }`, + wantErr: "OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage = 150; must be <= 100", + }, + { + name: "failure-percentage-threshold-is-greater-than-100", + input: `{ + "failurePercentageEjection": { + "threshold": 150 + } + }`, + wantErr: "OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold = 150; must be <= 100", + }, + { + name: "enforcement-percentage-failure-percentage-ejection-is-greater-than-100", + input: `{ + "failurePercentageEjection": { + "enforcementPercentage": 150 + } + }`, + wantErr: "OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = 150; must be <= 100", + }, + { + name: "child-policy-not-present", + input: `{ + "interval": 10000000000, + "baseEjectionTime": 30000000000, + "maxEjectionTime": 300000000000, + "maxEjectionPercent": 10, + "successRateEjection": { + "stdevFactor": 1900, + "enforcementPercentage": 100, + "minimumHosts": 5, + "requestVolume": 100 + }, + "failurePercentageEjection": { + "threshold": 85, + "enforcementPercentage": 5, + "minimumHosts": 5, + "requestVolume": 50 + } + }`, + wantErr: "OutlierDetectionLoadBalancingConfig.child_policy must be present", + }, + { + name: "child-policy-present-but-parse-error", + input: `{ + "interval": 9223372036854775807, + "childPolicy": [ + { + "errParseConfigBalancer": { + "cluster": "test_cluster" + } + } + ] + }`, + wantErr: "error parsing loadBalancingConfig for policy \"errParseConfigBalancer\"", + }, + { + name: "no-supported-child-policy", + input: `{ + "interval": 9223372036854775807, + "childPolicy": [ + { + "doesNotExistBalancer": { + "cluster": "test_cluster" + } + } + ] + }`, + wantErr: "invalid loadBalancingConfig: no supported policies found", + }, + { + name: "child-policy", + input: `{ + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + gotCfg, gotErr := parser.ParseConfig(json.RawMessage(test.input)) + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("ParseConfig(%v) = %v, wantErr %v", test.input, gotErr, test.wantErr) + } + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("ParseConfig(%v) = %v, wantErr %v", test.input, gotErr, test.wantErr) + } + if test.wantErr != "" { + return + } + if diff := cmp.Diff(gotCfg, test.wantCfg); diff != "" { + t.Fatalf("parseConfig(%v) got unexpected output, diff (-got +want): %v", string(test.input), diff) + } + }) + } +} + +func (lbc *LBConfig) Equal(lbc2 *LBConfig) bool { + if !lbc.EqualIgnoringChildPolicy(lbc2) { + return false + } + return cmp.Equal(lbc.ChildPolicy, lbc2.ChildPolicy) +} + +func init() { + balancer.Register(errParseConfigBuilder{}) +} + +type errParseConfigBuilder struct{} + +func (errParseConfigBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return nil +} + +func (errParseConfigBuilder) Name() string { + return "errParseConfigBalancer" +} + +func (errParseConfigBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return nil, errors.New("some error") +} From 3e7b97febc7fa0d9804786b60c4de14d700c6956 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 17 Jun 2022 18:14:31 +0000 Subject: [PATCH 529/998] xds/priority: bug fix and minor behavior change (#5417) --- balancer/base/balancer.go | 4 + internal/testutils/balancer.go | 48 +++- .../balancer/clusterimpl/balancer_test.go | 250 +++++++++--------- .../balancer/clusterresolver/eds_impl_test.go | 135 +++++----- .../balancer/clusterresolver/priority_test.go | 43 ++- xds/internal/balancer/priority/balancer.go | 74 ++++-- .../balancer/priority/balancer_child.go | 11 +- .../balancer/priority/balancer_priority.go | 95 +------ .../balancer/priority/balancer_test.go | 124 ++++++++- 9 files changed, 463 insertions(+), 321 deletions(-) diff --git a/balancer/base/balancer.go b/balancer/base/balancer.go index a67074a3ad06..e8dfc828aaac 100644 --- a/balancer/base/balancer.go +++ b/balancer/base/balancer.go @@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -134,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index e8c485e85566..f23b215a7b35 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -188,9 +188,9 @@ func (tcc *TestClientConn) WaitForErrPicker(ctx context.Context) error { } // WaitForPickerWithErr waits until an error picker is pushed to this -// ClientConn with the error matching the wanted error. Also drains the -// matching entry from the state channel. Returns an error if the provided -// context expires, including the last received picker error (if any). +// ClientConn with the error matching the wanted error. Returns an error if +// the provided context expires, including the last received picker error (if +// any). func (tcc *TestClientConn) WaitForPickerWithErr(ctx context.Context, want error) error { lastErr := errors.New("received no picker") for { @@ -198,7 +198,6 @@ func (tcc *TestClientConn) WaitForPickerWithErr(ctx context.Context, want error) case <-ctx.Done(): return fmt.Errorf("timeout when waiting for an error picker with %v; last picker error: %v", want, lastErr) case picker := <-tcc.NewPickerCh: - <-tcc.NewStateCh for i := 0; i < 5; i++ { if _, lastErr = picker.Pick(balancer.PickInfo{}); lastErr == nil || lastErr.Error() != want.Error() { break @@ -210,9 +209,8 @@ func (tcc *TestClientConn) WaitForPickerWithErr(ctx context.Context, want error) } // WaitForConnectivityState waits until the state pushed to this ClientConn -// matches the wanted state. Also drains the matching entry from the picker -// channel. Returns an error if the provided context expires, including the -// last received state (if any). +// matches the wanted state. Returns an error if the provided context expires, +// including the last received state (if any). func (tcc *TestClientConn) WaitForConnectivityState(ctx context.Context, want connectivity.State) error { var lastState connectivity.State = -1 for { @@ -220,7 +218,6 @@ func (tcc *TestClientConn) WaitForConnectivityState(ctx context.Context, want co case <-ctx.Done(): return fmt.Errorf("timeout when waiting for state to be %s; last state: %s", want, lastState) case s := <-tcc.NewStateCh: - <-tcc.NewPickerCh if s == want { return nil } @@ -230,17 +227,22 @@ func (tcc *TestClientConn) WaitForConnectivityState(ctx context.Context, want co } // WaitForRoundRobinPicker waits for a picker that passes IsRoundRobin. Also -// drains the matching state channel and requires it to be READY to be -// considered. Returns an error if the provided context expires, including the -// last received error from IsRoundRobin or the picker (if any). +// drains the matching state channel and requires it to be READY (if an entry +// is pending) to be considered. Returns an error if the provided context +// expires, including the last received error from IsRoundRobin or the picker +// (if any). func (tcc *TestClientConn) WaitForRoundRobinPicker(ctx context.Context, want ...balancer.SubConn) error { lastErr := errors.New("received no picker") for { select { case <-ctx.Done(): return fmt.Errorf("timeout when waiting for round robin picker with %v; last error: %v", want, lastErr) - case s := <-tcc.NewStateCh: - p := <-tcc.NewPickerCh + case p := <-tcc.NewPickerCh: + s := connectivity.Ready + select { + case s = <-tcc.NewStateCh: + default: + } if s != connectivity.Ready { lastErr = fmt.Errorf("received state %v instead of ready", s) break @@ -250,6 +252,8 @@ func (tcc *TestClientConn) WaitForRoundRobinPicker(ctx context.Context, want ... sc, err := p.Pick(balancer.PickInfo{}) if err != nil { pickerErr = err + } else if sc.Done != nil { + sc.Done(balancer.DoneInfo{}) } return sc.SubConn }); pickerErr != nil { @@ -264,6 +268,24 @@ func (tcc *TestClientConn) WaitForRoundRobinPicker(ctx context.Context, want ... } } +// WaitForPicker waits for a picker that results in f returning nil. If the +// context expires, returns the last error returned by f (if any). +func (tcc *TestClientConn) WaitForPicker(ctx context.Context, f func(balancer.Picker) error) error { + lastErr := errors.New("received no picker") + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout when waiting for picker; last error: %v", lastErr) + case p := <-tcc.NewPickerCh: + if err := f(p); err != nil { + lastErr = err + continue + } + return nil + } + } +} + // IsRoundRobin checks whether f's return value is roundrobin of elements from // want. But it doesn't check for the order. Note that want can contain // duplicate items, which makes it weight-round-robin. diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index d444ecd4f4f3..a0085872a416 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -46,7 +46,7 @@ import ( ) const ( - defaultTestTimeout = 1 * time.Second + defaultTestTimeout = 5 * time.Second defaultShortTestTimeout = 100 * time.Microsecond testClusterName = "test-cluster" @@ -90,6 +90,9 @@ func init() { // TestDropByCategory verifies that the balancer correctly drops the picks, and // that the drops are reported. func (s) TestDropByCategory(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -122,9 +125,6 @@ func (s) TestDropByCategory(t *testing.T) { t.Fatalf("unexpected error from UpdateClientConnState: %v", err) } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - got, err := xdsC.WaitForReportLoad(ctx) if err != nil { t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) @@ -136,33 +136,34 @@ func (s) TestDropByCategory(t *testing.T) { sc1 := <-cc.NewSubConnCh b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) // This should get the connecting picker. - p0 := <-cc.NewPickerCh - for i := 0; i < 10; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != balancer.ErrNoSubConnAvailable { - t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with one backend. - p1 := <-cc.NewPickerCh + const rpcCount = 20 - for i := 0; i < rpcCount; i++ { - gotSCSt, err := p1.Pick(balancer.PickInfo{}) - // Even RPCs are dropped. - if i%2 == 0 { - if err == nil || !strings.Contains(err.Error(), "dropped") { - t.Fatalf("pick.Pick, got %v, %v, want error RPC dropped", gotSCSt, err) + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + for i := 0; i < rpcCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + // Even RPCs are dropped. + if i%2 == 0 { + if err == nil || !strings.Contains(err.Error(), "dropped") { + return fmt.Errorf("pick.Pick, got %v, %v, want error RPC dropped", gotSCSt, err) + } + continue + } + if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + return fmt.Errorf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + if gotSCSt.Done != nil { + gotSCSt.Done(balancer.DoneInfo{}) } - continue - } - if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) - } - if gotSCSt.Done != nil { - gotSCSt.Done(balancer.DoneInfo{}) } + return nil + }); err != nil { + t.Fatal(err.Error()) } // Dump load data from the store and compare with expected counts. @@ -210,22 +211,26 @@ func (s) TestDropByCategory(t *testing.T) { t.Fatalf("unexpected error from UpdateClientConnState: %v", err) } - p2 := <-cc.NewPickerCh - for i := 0; i < rpcCount; i++ { - gotSCSt, err := p2.Pick(balancer.PickInfo{}) - // Even RPCs are dropped. - if i%4 == 0 { - if err == nil || !strings.Contains(err.Error(), "dropped") { - t.Fatalf("pick.Pick, got %v, %v, want error RPC dropped", gotSCSt, err) + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + for i := 0; i < rpcCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + // Even RPCs are dropped. + if i%4 == 0 { + if err == nil || !strings.Contains(err.Error(), "dropped") { + return fmt.Errorf("pick.Pick, got %v, %v, want error RPC dropped", gotSCSt, err) + } + continue + } + if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + return fmt.Errorf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + if gotSCSt.Done != nil { + gotSCSt.Done(balancer.DoneInfo{}) } - continue - } - if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) - } - if gotSCSt.Done != nil { - gotSCSt.Done(balancer.DoneInfo{}) } + return nil + }); err != nil { + t.Fatal(err.Error()) } const dropCount2 = rpcCount * dropNumerator2 / dropDenominator2 @@ -287,51 +292,52 @@ func (s) TestDropCircuitBreaking(t *testing.T) { sc1 := <-cc.NewSubConnCh b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) // This should get the connecting picker. - p0 := <-cc.NewPickerCh - for i := 0; i < 10; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != balancer.ErrNoSubConnAvailable { - t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with one backend. - dones := []func(){} - p1 := <-cc.NewPickerCh const rpcCount = 100 - for i := 0; i < rpcCount; i++ { - gotSCSt, err := p1.Pick(balancer.PickInfo{}) - if i < 50 && err != nil { - t.Errorf("The first 50%% picks should be non-drops, got error %v", err) - } else if i > 50 && err == nil { - t.Errorf("The second 50%% picks should be drops, got error ") - } - dones = append(dones, func() { - if gotSCSt.Done != nil { - gotSCSt.Done(balancer.DoneInfo{}) + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + dones := []func(){} + for i := 0; i < rpcCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + if i < 50 && err != nil { + return fmt.Errorf("The first 50%% picks should be non-drops, got error %v", err) + } else if i > 50 && err == nil { + return fmt.Errorf("The second 50%% picks should be drops, got error ") } - }) - } - for _, done := range dones { - done() - } - - dones = []func(){} - // Pick without drops. - for i := 0; i < 50; i++ { - gotSCSt, err := p1.Pick(balancer.PickInfo{}) - if err != nil { - t.Errorf("The third 50%% picks should be non-drops, got error %v", err) + dones = append(dones, func() { + if gotSCSt.Done != nil { + gotSCSt.Done(balancer.DoneInfo{}) + } + }) } - dones = append(dones, func() { - if gotSCSt.Done != nil { - gotSCSt.Done(balancer.DoneInfo{}) + for _, done := range dones { + done() + } + + dones = []func(){} + // Pick without drops. + for i := 0; i < 50; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + if err != nil { + t.Errorf("The third 50%% picks should be non-drops, got error %v", err) } - }) - } - for _, done := range dones { - done() + dones = append(dones, func() { + if gotSCSt.Done != nil { + gotSCSt.Done(balancer.DoneInfo{}) + } + }) + } + for _, done := range dones { + done() + } + + return nil + }); err != nil { + t.Fatal(err.Error()) } // Dump load data from the store and compare with expected counts. @@ -426,6 +432,9 @@ func (s) TestPickerUpdateAfterClose(t *testing.T) { // TestClusterNameInAddressAttributes covers the case that cluster name is // attached to the subconn address attributes. func (s) TestClusterNameInAddressAttributes(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -451,12 +460,8 @@ func (s) TestClusterNameInAddressAttributes(t *testing.T) { sc1 := <-cc.NewSubConnCh b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) // This should get the connecting picker. - p0 := <-cc.NewPickerCh - for i := 0; i < 10; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != balancer.ErrNoSubConnAvailable { - t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -470,16 +475,8 @@ func (s) TestClusterNameInAddressAttributes(t *testing.T) { b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with one backend. - p1 := <-cc.NewPickerCh - const rpcCount = 20 - for i := 0; i < rpcCount; i++ { - gotSCSt, err := p1.Pick(balancer.PickInfo{}) - if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) - } - if gotSCSt.Done != nil { - gotSCSt.Done(balancer.DoneInfo{}) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } const testClusterName2 = "test-cluster-2" @@ -511,6 +508,9 @@ func (s) TestClusterNameInAddressAttributes(t *testing.T) { // TestReResolution verifies that when a SubConn turns transient failure, // re-resolution is triggered. func (s) TestReResolution(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() defer xdsC.Close() @@ -536,22 +536,14 @@ func (s) TestReResolution(t *testing.T) { sc1 := <-cc.NewSubConnCh b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) // This should get the connecting picker. - p0 := <-cc.NewPickerCh - for i := 0; i < 10; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != balancer.ErrNoSubConnAvailable { - t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // This should get the transient failure picker. - p1 := <-cc.NewPickerCh - for i := 0; i < 10; i++ { - _, err := p1.Pick(balancer.PickInfo{}) - if err == nil { - t.Fatalf("picker.Pick, got _,%v, want not nil", err) - } + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) } // The transient failure should trigger a re-resolution. @@ -563,20 +555,14 @@ func (s) TestReResolution(t *testing.T) { b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with one backend. - p2 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // This should get the transient failure picker. - p3 := <-cc.NewPickerCh - for i := 0; i < 10; i++ { - _, err := p3.Pick(balancer.PickInfo{}) - if err == nil { - t.Fatalf("picker.Pick, got _,%v, want not nil", err) - } + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) } // The transient failure should trigger a re-resolution. @@ -635,32 +621,32 @@ func (s) TestLoadReporting(t *testing.T) { sc1 := <-cc.NewSubConnCh b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) // This should get the connecting picker. - p0 := <-cc.NewPickerCh - for i := 0; i < 10; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != balancer.ErrNoSubConnAvailable { - t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with one backend. - p1 := <-cc.NewPickerCh const successCount = 5 - for i := 0; i < successCount; i++ { - gotSCSt, err := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) - } - gotSCSt.Done(balancer.DoneInfo{}) - } const errorCount = 5 - for i := 0; i < errorCount; i++ { - gotSCSt, err := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + for i := 0; i < successCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + return fmt.Errorf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + gotSCSt.Done(balancer.DoneInfo{}) } - gotSCSt.Done(balancer.DoneInfo{Err: fmt.Errorf("error")}) + for i := 0; i < errorCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + return fmt.Errorf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + gotSCSt.Done(balancer.DoneInfo{Err: fmt.Errorf("error")}) + } + return nil + }); err != nil { + t.Fatal(err.Error()) } // Dump load data from the store and compare with expected counts. diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index 7f2bfa8a75d1..ddafa18a6100 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -456,6 +456,9 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { } func (s) TestEDS_CircuitBreaking(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() @@ -481,43 +484,51 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Picks with drops. - dones := []func(){} - p := <-cc.NewPickerCh - for i := 0; i < 100; i++ { - pr, err := p.Pick(balancer.PickInfo{}) - if i < 50 && err != nil { - t.Errorf("The first 50%% picks should be non-drops, got error %v", err) - } else if i > 50 && err == nil { - t.Errorf("The second 50%% picks should be drops, got error ") - } - dones = append(dones, func() { + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + dones := []func(){} + defer func() { + for _, f := range dones { + f() + } + }() + + for i := 0; i < 100; i++ { + pr, err := p.Pick(balancer.PickInfo{}) if pr.Done != nil { - pr.Done(balancer.DoneInfo{}) + dones = append(dones, func() { + pr.Done(balancer.DoneInfo{}) + }) } - }) - } - for _, done := range dones { - done() - } - dones = []func(){} + if i < 50 && err != nil { + return fmt.Errorf("The first 50%% picks should be non-drops, got error %v", err) + } else if i > 50 && err == nil { + return fmt.Errorf("The second 50%% picks should be drops, got error ") + } + } - // Pick without drops. - for i := 0; i < 50; i++ { - pr, err := p.Pick(balancer.PickInfo{}) - if err != nil { - t.Errorf("The third 50%% picks should be non-drops, got error %v", err) + for _, done := range dones { + done() } - dones = append(dones, func() { + dones = []func(){} + + // Pick without drops. + for i := 0; i < 50; i++ { + pr, err := p.Pick(balancer.PickInfo{}) if pr.Done != nil { - pr.Done(balancer.DoneInfo{}) + dones = append(dones, func() { + pr.Done(balancer.DoneInfo{}) + }) } - }) - } - // Without this, future tests with the same service name will fail. - for _, done := range dones { - done() + if err != nil { + return fmt.Errorf("The third 50%% picks should be non-drops, got error %v", err) + } + } + + return nil + }); err != nil { + t.Fatal(err.Error()) } // Send another update, with only circuit breaking update (and no picker @@ -536,42 +547,48 @@ func (s) TestEDS_CircuitBreaking(t *testing.T) { } // Picks with drops. - dones = []func(){} - p2 := <-cc.NewPickerCh - for i := 0; i < 100; i++ { - pr, err := p2.Pick(balancer.PickInfo{}) - if i < 10 && err != nil { - t.Errorf("The first 10%% picks should be non-drops, got error %v", err) - } else if i > 10 && err == nil { - t.Errorf("The next 90%% picks should be drops, got error ") - } - dones = append(dones, func() { - if pr.Done != nil { - pr.Done(balancer.DoneInfo{}) + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + dones := []func(){} + defer func() { + for _, f := range dones { + f() } - }) - } + }() - for _, done := range dones { - done() - } - dones = []func(){} + for i := 0; i < 100; i++ { + pr, err := p.Pick(balancer.PickInfo{}) + if pr.Done != nil { + dones = append(dones, func() { + pr.Done(balancer.DoneInfo{}) + }) + } + if i < 10 && err != nil { + return fmt.Errorf("The first 10%% picks should be non-drops, got error %v", err) + } else if i > 10 && err == nil { + return fmt.Errorf("The next 90%% picks should be drops, got error ") + } + } - // Pick without drops. - for i := 0; i < 10; i++ { - pr, err := p2.Pick(balancer.PickInfo{}) - if err != nil { - t.Errorf("The next 10%% picks should be non-drops, got error %v", err) + for _, done := range dones { + done() } - dones = append(dones, func() { + dones = []func(){} + + // Pick without drops. + for i := 0; i < 10; i++ { + pr, err := p.Pick(balancer.PickInfo{}) if pr.Done != nil { - pr.Done(balancer.DoneInfo{}) + dones = append(dones, func() { + pr.Done(balancer.DoneInfo{}) + }) } - }) - } - // Without this, future tests with the same service name will fail. - for _, done := range dones { - done() + if err != nil { + return fmt.Errorf("The next 10%% picks should be non-drops, got error %v", err) + } + } + return nil + }); err != nil { + t.Fatal(err.Error()) } } diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index ba497bb7f108..4c8f0b57c02d 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -69,8 +69,6 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: @@ -78,6 +76,18 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { case <-time.After(defaultTestShortTimeout): } + select { + case p := <-cc.NewPickerCh: + // If we do get a new picker, ensure it is still a p1 picker. + if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, subConnFromPicker(p)); err != nil { + t.Fatal(err.Error()) + } + default: + // No new picker; we were previously using p1 and should still be using + // p1, so this is okay. No need to wait for defaultTestShortTimeout + // since we just waited immediately above. + } + // Remove p2, no updates. clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) @@ -85,14 +95,25 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: t.Fatalf("got unexpected remove SubConn") case <-time.After(defaultTestShortTimeout): } + + select { + case p := <-cc.NewPickerCh: + // If we do get a new picker, ensure it is still a p1 picker. + if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, subConnFromPicker(p)); err != nil { + t.Fatal(err.Error()) + } + default: + // No new picker; we were previously using p1 and should still be using + // p1, so this is okay. No need to wait for defaultTestShortTimeout + // since we just waited immediately above. + } + } // Lower priority is used when higher priority is not ready. @@ -147,8 +168,6 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: @@ -156,6 +175,18 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { case <-time.After(defaultTestShortTimeout): } + select { + case p := <-cc.NewPickerCh: + // If we do get a new picker, ensure it is still a p1 picker. + if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, subConnFromPicker(p)); err != nil { + t.Fatal(err.Error()) + } + default: + // No new picker; we were previously using p1 and should still be using + // p1, so this is okay. No need to wait for defaultTestShortTimeout + // since we just waited immediately above. + } + // Turn down 1, use 2 edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) addrs2 := <-cc.NewSubConnAddrsCh diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index 672f10122ffe..d05ef18c2876 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -30,6 +30,8 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" @@ -53,7 +55,6 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba b := &priorityBalancer{ cc: cc, done: grpcsync.NewEvent(), - childToPriority: make(map[string]int), children: make(map[string]*childBalancer), childBalancerStateUpdate: buffer.NewUnbounded(), } @@ -90,16 +91,17 @@ type priorityBalancer struct { mu sync.Mutex childInUse string - // priority of the child that's current in use. Int starting from 0, and 0 - // is the higher priority. - priorityInUse int // priorities is a list of child names from higher to lower priority. priorities []string - // childToPriority is a map from the child name to it's priority. Priority - // is an int start from 0, and 0 is the higher priority. - childToPriority map[string]int // children is a map from child name to sub-balancers. children map[string]*childBalancer + + // Set during UpdateClientConnState when calling into sub-balancers. + // Prevents child updates from recomputing the active priority or sending + // an update of the aggregated picker to the parent. Cleared after all + // sub-balancers have finished UpdateClientConnState, after which + // syncPriority is called manually. + inhibitPickerUpdates bool } func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { @@ -111,7 +113,6 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err addressesSplit := hierarchy.Group(s.ResolverState.Addresses) b.mu.Lock() - defer b.mu.Unlock() // Create and remove children, since we know all children from the config // are used by some priority. for name, newSubConfig := range newConfig.Children { @@ -146,15 +147,14 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err } // Update config and address, but note that this doesn't send the - // updates to child balancer (the child balancer might not be built, if - // it's a low priority). + // updates to non-started child balancers (the child balancer might not + // be built, if it's a low priority). currentChild.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }) } - // Remove child from children if it's not in new config. for name, oldChild := range b.children { if _, ok := newConfig.Children[name]; !ok { @@ -164,13 +164,32 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // Update priorities and handle priority changes. b.priorities = newConfig.Priorities - b.childToPriority = make(map[string]int, len(newConfig.Priorities)) - for pi, pName := range newConfig.Priorities { - b.childToPriority[pName] = pi + + // Everything was removed by the update. + if len(b.priorities) == 0 { + b.childInUse = "" + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), + }) + b.mu.Unlock() + return nil } - // Sync the states of all children to the new updated priorities. This - // include starting/stopping child balancers when necessary. - b.syncPriority(true) + + // This will sync the states of all children to the new updated + // priorities. Includes starting/stopping child balancers when necessary. + // Block picker updates until all children have had a chance to call + // UpdateState to prevent races where, e.g., the active priority reports + // transient failure but a higher priority may have reported something that + // made it active, and if the transient failure update is handled first, + // RPCs could fail. + b.inhibitPickerUpdates = true + // Add an item to queue to notify us when the current items in the queue + // are done and syncPriority has been called. + done := make(chan struct{}) + b.childBalancerStateUpdate.Put(resumePickerUpdates{done: done}) + b.mu.Unlock() + <-done return nil } @@ -206,7 +225,7 @@ func (b *priorityBalancer) ExitIdle() { // UpdateState implements balancergroup.BalancerStateAggregator interface. The // balancer group sends new connectivity state and picker here. func (b *priorityBalancer) UpdateState(childName string, state balancer.State) { - b.childBalancerStateUpdate.Put(&childBalancerState{ + b.childBalancerStateUpdate.Put(childBalancerState{ name: childName, s: state, }) @@ -217,6 +236,10 @@ type childBalancerState struct { s balancer.State } +type resumePickerUpdates struct { + done chan struct{} +} + // run handles child update in a separate goroutine, so if the child sends // updates inline (when called by parent), it won't cause deadlocks (by trying // to hold the same mutex). @@ -225,11 +248,22 @@ func (b *priorityBalancer) run() { select { case u := <-b.childBalancerStateUpdate.Get(): b.childBalancerStateUpdate.Load() - s := u.(*childBalancerState) // Needs to handle state update in a goroutine, because each state // update needs to start/close child policy, could result in // deadlock. - b.handleChildStateUpdate(s.name, s.s) + b.mu.Lock() + if b.done.HasFired() { + return + } + switch s := u.(type) { + case childBalancerState: + b.handleChildStateUpdate(s.name, s.s) + case resumePickerUpdates: + b.inhibitPickerUpdates = false + b.syncPriority("") + close(s.done) + } + b.mu.Unlock() case <-b.done.Done(): return } diff --git a/xds/internal/balancer/priority/balancer_child.go b/xds/internal/balancer/priority/balancer_child.go index c00a56b8f9ee..34bab34c915c 100644 --- a/xds/internal/balancer/priority/balancer_child.go +++ b/xds/internal/balancer/priority/balancer_child.go @@ -44,7 +44,8 @@ type childBalancer struct { // will be restarted if the child has not reported TF more recently than it // reported Ready or Idle. reportedTF bool - state balancer.State + // The latest state the child balancer provided. + state balancer.State // The timer to give a priority some time to connect. And if the priority // doesn't go into Ready/Failure, the next priority will be started. initTimer *timerWrapper @@ -74,11 +75,14 @@ func (cb *childBalancer) updateBuilder(bb balancer.Builder) { } // updateConfig sets childBalancer's config and state, but doesn't send update to -// the child balancer. +// the child balancer unless it is started. func (cb *childBalancer) updateConfig(child *Child, rState resolver.State) { cb.ignoreReresolutionRequests = child.IgnoreReresolutionRequests cb.config = child.Config.Config cb.rState = rState + if cb.started { + cb.sendUpdate() + } } // start builds the child balancer if it's not already started. @@ -91,6 +95,7 @@ func (cb *childBalancer) start() { cb.started = true cb.parent.bg.Add(cb.name, cb.bb) cb.startInitTimer() + cb.sendUpdate() } // sendUpdate sends the addresses and config to the child balancer. @@ -145,7 +150,7 @@ func (cb *childBalancer) startInitTimer() { // Re-sync the priority. This will switch to the next priority if // there's any. Note that it's important sync() is called after setting // initTimer to nil. - cb.parent.syncPriority(false) + cb.parent.syncPriority("") }) } diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index 2487c2626041..33068709e292 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -23,7 +23,6 @@ import ( "time" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" ) @@ -59,7 +58,7 @@ var ( // - If balancer is Connecting and has non-nil initTimer (meaning it // transitioned from Ready or Idle to connecting, not from TF, so we // should give it init-time to connect). -// - If balancer is READY +// - If balancer is READY or IDLE // - If this is the lowest priority // - do the following: // - if this is not the old childInUse, override picker so old picker is no @@ -68,18 +67,10 @@ var ( // - forward the new addresses and config // // Caller must hold b.mu. -func (b *priorityBalancer) syncPriority(forceUpdate bool) { - // Everything was removed by the update. - if len(b.priorities) == 0 { - b.childInUse = "" - b.priorityInUse = 0 - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), - }) +func (b *priorityBalancer) syncPriority(childUpdating string) { + if b.inhibitPickerUpdates { return } - for p, name := range b.priorities { child, ok := b.children[name] if !ok { @@ -92,23 +83,14 @@ func (b *priorityBalancer) syncPriority(forceUpdate bool) { child.state.ConnectivityState == connectivity.Idle || (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { - if b.childInUse != "" && b.childInUse != child.name { - // childInUse was set and is different from this child, will - // change childInUse later. We need to update picker here - // immediately so parent stops using the old picker. + if b.childInUse != child.name || child.name == childUpdating { + logger.Warningf("ciu, cn, cu: %v, %v, %v", b.childInUse, child.name, childUpdating) + // If we switch children or the child in use just updated its + // picker, push the child's picker to the parent. b.cc.UpdateState(child.state) } b.logger.Infof("switching to (%q, %v) in syncPriority", child.name, p) - oldChildInUse := b.childInUse b.switchToChild(child, p) - if b.childInUse != oldChildInUse || forceUpdate { - // If child is switched, send the update to the new child. - // - // Or if forceUpdate is true (when this is triggered by a - // ClientConn update), because the ClientConn update might - // contain changes for this child. - child.sendUpdate() - } break } } @@ -163,7 +145,6 @@ func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { return } b.childInUse = child.name - b.priorityInUse = priority if !child.started { child.start() @@ -173,40 +154,13 @@ func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { // handleChildStateUpdate start/close priorities based on the connectivity // state. func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.State) { - b.mu.Lock() - defer b.mu.Unlock() - if b.done.HasFired() { - return - } - - priority, ok := b.childToPriority[childName] - if !ok { - b.logger.Warningf("priority: received picker update with unknown child %v", childName) - return - } - - if b.childInUse == "" { - b.logger.Warningf("priority: no child is in use when picker update is received") - return - } - - // priorityInUse is higher than this priority. - if b.priorityInUse < priority { - // Lower priorities should all be closed, this is an unexpected update. - // Can happen if the child policy sends an update after we tell it to - // close. - b.logger.Warningf("priority: received picker update from priority %v, lower than priority in use %v", priority, b.priorityInUse) - return - } - // Update state in child. The updated picker will be sent to parent later if // necessary. child, ok := b.children[childName] if !ok { - b.logger.Warningf("priority: child balancer not found for child %v, priority %v", childName, priority) + b.logger.Warningf("priority: child balancer not found for child %v", childName) return } - oldChildState := child.state child.state = s // We start/stop the init timer of this child based on the new connectivity @@ -227,36 +181,5 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S // New state is Shutdown, should never happen. Don't forward. } - oldPriorityInUse := b.priorityInUse - child.parent.syncPriority(false) - // If child is switched by syncPriority(), it also sends the update from the - // new child to overwrite the old picker used by the parent. - // - // But no update is sent if the child is not switches. That means if this - // update is from childInUse, and this child is still childInUse after - // syncing, the update being handled here is not sent to the parent. In that - // case, we need to do an explicit check here to forward the update. - if b.priorityInUse == oldPriorityInUse && b.priorityInUse == priority { - // Special handling for Connecting. If child was not switched, and this - // is a Connecting->Connecting transition, do not send the redundant - // update, since all Connecting pickers are the same (they tell the RPCs - // to repick). - // - // This can happen because the initial state of a child (before any - // update is received) is Connecting. When the child is started, it's - // picker is sent to the parent by syncPriority (to overwrite the old - // picker if there's any). When it reports Connecting after being - // started, it will send a Connecting update (handled here), causing a - // Connecting->Connecting transition. - if oldChildState.ConnectivityState == connectivity.Connecting && s.ConnectivityState == connectivity.Connecting { - return - } - // Only forward this update if sync() didn't switch child, and this - // child is in use. - // - // sync() forwards the update if the child was switched, so there's no - // need to forward again. - b.cc.UpdateState(child.state) - } - + child.parent.syncPriority(childName) } diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 5b96d7101f3b..ccf3a5edfc2e 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -149,8 +149,6 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { } select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case sc := <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn: %s", sc) case sc := <-cc.RemoveSubConnCh: @@ -1886,3 +1884,125 @@ func (s) TestPriority_AddLowPriorityWhenHighIsInIdle(t *testing.T) { t.Fatalf("got unexpected call to NewSubConn with addr: %v, want %v", addrsNew, want) } } + +// Lower priority is used when higher priority is not ready; higher priority +// still gets updates. +// +// Init 0 and 1; 0 is down, 1 is up, use 1; update 0; 0 is up, use 0 +func (s) TestPriority_HighPriorityUpdatesWhenLowInUse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + t.Log("Two localities, with priorities [0, 1], each with one backend.") + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + addrs0 := <-cc.NewSubConnAddrsCh + if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc0 := <-cc.NewSubConnCh + + t.Log("Make p0 fail.") + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + + // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs + // will retry. + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) + } + + t.Log("Make p1 ready.") + addrs1 := <-cc.NewSubConnAddrsCh + if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc1 := <-cc.NewSubConnCh + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // Test pick with 1. + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } + + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // Does not change the aggregate state, because round robin does not leave + // TRANIENT_FAILURE if a subconn goes CONNECTING. + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } + + t.Log("Change p0 to use new address.") + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[3]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Two new subconns are created by the previous update; one by p0 and one + // by p1. They don't happen concurrently, but they could happen in any + // order. + t.Log("Make p0 and p1 both ready; p0 should be used.") + var sc2, sc3 balancer.SubConn + for i := 0; i < 2; i++ { + addr := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + switch addr[0].Addr { + case testBackendAddrStrs[2]: + sc2 = sc + case testBackendAddrStrs[3]: + sc3 = sc + default: + t.Fatalf("sc is created with addr %v, want %v or %v", addr[0].Addr, testBackendAddrStrs[2], testBackendAddrStrs[3]) + } + pb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + pb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + if sc2 == nil { + t.Fatalf("sc not created with addr %v", testBackendAddrStrs[2]) + } + if sc3 == nil { + t.Fatalf("sc not created with addr %v", testBackendAddrStrs[3]) + } + + // Test pick with 0. + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) + } +} From b288a2407a71d7f96d2bb5fff48a7e0b538d075e Mon Sep 17 00:00:00 2001 From: apolcyn Date: Tue, 21 Jun 2022 10:34:37 -0700 Subject: [PATCH 530/998] interop testing: log the peer address in interop soak client (#5419) --- interop/test_utils.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/interop/test_utils.go b/interop/test_utils.go index 19a5c1f7cd33..975a290bc6e6 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/status" testgrpc "google.golang.org/grpc/interop/grpc_testing" @@ -676,7 +677,7 @@ func DoPickFirstUnary(tc testgrpc.TestServiceClient) { } } -func doOneSoakIteration(ctx context.Context, tc testgrpc.TestServiceClient, resetChannel bool, serverAddr string, dopts []grpc.DialOption) (latency time.Duration, err error) { +func doOneSoakIteration(ctx context.Context, tc testgrpc.TestServiceClient, resetChannel bool, serverAddr string, dopts []grpc.DialOption, copts []grpc.CallOption) (latency time.Duration, err error) { start := time.Now() client := tc if resetChannel { @@ -698,7 +699,7 @@ func doOneSoakIteration(ctx context.Context, tc testgrpc.TestServiceClient, rese Payload: pl, } var reply *testpb.SimpleResponse - reply, err = client.UnaryCall(ctx, req) + reply, err = client.UnaryCall(ctx, req, copts...) if err != nil { err = fmt.Errorf("/TestService/UnaryCall RPC failed: %s", err) return @@ -733,20 +734,21 @@ func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.D break } iterationsDone++ - latency, err := doOneSoakIteration(ctx, tc, resetChannel, serverAddr, dopts) + var p peer.Peer + latency, err := doOneSoakIteration(ctx, tc, resetChannel, serverAddr, dopts, []grpc.CallOption{grpc.Peer(&p)}) latencyMs := int64(latency / time.Millisecond) h.Add(latencyMs) if err != nil { totalFailures++ - fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d failed: %s\n", i, latencyMs, err) + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s failed: %s\n", i, latencyMs, p.Addr.String(), err) continue } if latency > perIterationMaxAcceptableLatency { totalFailures++ - fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d exceeds max acceptable latency: %d\n", i, latencyMs, perIterationMaxAcceptableLatency.Milliseconds()) + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s exceeds max acceptable latency: %d\n", i, latencyMs, p.Addr.String(), perIterationMaxAcceptableLatency.Milliseconds()) continue } - fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d succeeded\n", i, latencyMs) + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s succeeded\n", i, latencyMs, p.Addr.String()) } var b bytes.Buffer h.Print(&b) From 28de4866ce7440b675662abbdd5c43b476bd4dae Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 21 Jun 2022 15:51:37 -0700 Subject: [PATCH 531/998] interop: update grpc_testing proto (#5451) --- interop/grpc_testing/messages.pb.go | 435 ++++++++++++++-------------- 1 file changed, 225 insertions(+), 210 deletions(-) diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index b39bf4964ade..2ccd4d9ab388 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -803,6 +803,8 @@ type StreamingOutputCallRequest struct { Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` // Whether server should return a given status ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"` + // If set the server should update this metrics report data at the OOB server. + OrcaOobReport *TestOrcaReport `protobuf:"bytes,8,opt,name=orca_oob_report,json=orcaOobReport,proto3" json:"orca_oob_report,omitempty"` } func (x *StreamingOutputCallRequest) Reset() { @@ -865,6 +867,13 @@ func (x *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { return nil } +func (x *StreamingOutputCallRequest) GetOrcaOobReport() *TestOrcaReport { + if x != nil { + return x.OrcaOobReport + } + return nil +} + // Server-streaming response, as configured by the request and parameters. type StreamingOutputCallResponse struct { state protoimpl.MessageState @@ -1721,7 +1730,7 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x63, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0xa3, 0x02, 0x0a, 0x1a, 0x53, 0x74, + 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0xe9, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, @@ -1739,201 +1748,206 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x63, 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0x4e, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, - 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, - 0x4a, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x73, 0x22, 0x46, 0x0a, 0x0d, 0x52, - 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, - 0x73, 0x73, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, - 0x66, 0x4d, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x22, 0xe2, 0x04, 0x0a, 0x19, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0c, 0x72, 0x70, 0x63, - 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, - 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, - 0x50, 0x65, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x61, 0x69, 0x6c, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x0e, 0x72, 0x70, 0x63, 0x73, 0x5f, - 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x70, 0x63, 0x73, - 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xb1, 0x01, 0x0a, 0x0a, 0x52, 0x70, 0x63, - 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, - 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, + 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x44, 0x0a, 0x0f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0d, 0x6f, 0x72, 0x63, 0x61, 0x4f, 0x6f, 0x62, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x4e, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, + 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x4a, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, + 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, + 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x52, + 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, + 0x73, 0x22, 0x46, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, + 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x09, + 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, - 0x72, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, - 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, - 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x73, 0x0a, 0x11, 0x52, - 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, - 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, - 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x86, 0x09, 0x0a, 0x24, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, - 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, + 0x63, 0x22, 0xe2, 0x04, 0x0a, 0x19, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x59, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, - 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x52, 0x70, - 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, - 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x18, - 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, - 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x8b, 0x01, 0x0a, 0x19, 0x6e, 0x75, 0x6d, - 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, - 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x75, + 0x6d, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, + 0x0e, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xb1, + 0x01, 0x0a, 0x0a, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x64, 0x0a, + 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, + 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, + 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, + 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, + 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x73, 0x0a, 0x11, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x86, 0x09, + 0x0a, 0x24, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, + 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, 0x6d, 0x5f, 0x72, + 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, + 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x70, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, - 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, - 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x73, 0x50, - 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x49, 0x0a, 0x1b, 0x4e, 0x75, 0x6d, 0x52, - 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, - 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, + 0x16, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, + 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, 0x75, 0x6d, 0x5f, + 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, + 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, + 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, + 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x18, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, + 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x8b, + 0x01, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, + 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, + 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, + 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, + 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x70, 0x0a, 0x10, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x49, + 0x0a, 0x1b, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, + 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, 0x4e, 0x75, 0x6d, + 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, + 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xcf, 0x01, 0x0a, 0x0b, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x70, - 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, - 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, - 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x81, 0x01, 0x0a, - 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x05, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, - 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x1a, 0x74, 0x0a, 0x08, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, - 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, - 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x22, 0x19, 0x0a, 0x17, + 0x1a, 0xcf, 0x01, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, + 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x81, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, + 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x42, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x03, 0x0a, 0x0e, 0x54, 0x65, 0x73, 0x74, - 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x70, - 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x74, - 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x11, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, - 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, - 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, - 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x43, 0x6f, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, - 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x1f, 0x0a, 0x0b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, - 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x2a, 0x6f, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, - 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, - 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, - 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, - 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, - 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, + 0x63, 0x1a, 0x74, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, + 0x10, 0x01, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x03, + 0x0a, 0x0e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, + 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x11, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x74, 0x69, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, + 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x74, + 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, + 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, + 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, + 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, + 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x1f, 0x0a, 0x0b, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4f, + 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x2a, 0x6f, 0x0a, 0x0f, + 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1e, + 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x1d, + 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x1d, 0x0a, + 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2005,27 +2019,28 @@ var file_grpc_testing_messages_proto_depIdxs = []int32{ 10, // 14: grpc.testing.StreamingOutputCallRequest.response_parameters:type_name -> grpc.testing.ResponseParameters 4, // 15: grpc.testing.StreamingOutputCallRequest.payload:type_name -> grpc.testing.Payload 5, // 16: grpc.testing.StreamingOutputCallRequest.response_status:type_name -> grpc.testing.EchoStatus - 4, // 17: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload - 23, // 18: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry - 24, // 19: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry - 26, // 20: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry - 27, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry - 28, // 22: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry - 30, // 23: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry - 2, // 24: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType - 32, // 25: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata - 33, // 26: grpc.testing.TestOrcaReport.request_cost:type_name -> grpc.testing.TestOrcaReport.RequestCostEntry - 34, // 27: grpc.testing.TestOrcaReport.utilization:type_name -> grpc.testing.TestOrcaReport.UtilizationEntry - 25, // 28: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry - 22, // 29: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer - 31, // 30: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry - 29, // 31: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats - 2, // 32: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType - 33, // [33:33] is the sub-list for method output_type - 33, // [33:33] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 33, // [33:33] is the sub-list for extension extendee - 0, // [0:33] is the sub-list for field type_name + 21, // 17: grpc.testing.StreamingOutputCallRequest.orca_oob_report:type_name -> grpc.testing.TestOrcaReport + 4, // 18: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload + 23, // 19: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry + 24, // 20: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry + 26, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry + 27, // 22: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry + 28, // 23: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry + 30, // 24: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry + 2, // 25: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 32, // 26: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata + 33, // 27: grpc.testing.TestOrcaReport.request_cost:type_name -> grpc.testing.TestOrcaReport.RequestCostEntry + 34, // 28: grpc.testing.TestOrcaReport.utilization:type_name -> grpc.testing.TestOrcaReport.UtilizationEntry + 25, // 29: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry + 22, // 30: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer + 31, // 31: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry + 29, // 32: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats + 2, // 33: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 34, // [34:34] is the sub-list for method output_type + 34, // [34:34] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name } func init() { file_grpc_testing_messages_proto_init() } From 5cdb09fa29c15f5490235e9c78f19640bf735d09 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 22 Jun 2022 16:33:50 -0700 Subject: [PATCH 532/998] outlierdetection: fix package level comments (#5457) --- xds/internal/balancer/outlierdetection/balancer.go | 5 +++-- xds/internal/balancer/outlierdetection/config.go | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 674075c4b3f9..8729461383e9 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -16,8 +16,9 @@ * */ -// Package outlierdetection implements a balancer that implements -// Outlier Detection. +// Package outlierdetection provides an implementation of the outlier detection +// LB policy, as defined in +// https://github.com/grpc/proposal/blob/master/A50-xds-outlier-detection.md. package outlierdetection import ( diff --git a/xds/internal/balancer/outlierdetection/config.go b/xds/internal/balancer/outlierdetection/config.go index da8311263150..c931674ae409 100644 --- a/xds/internal/balancer/outlierdetection/config.go +++ b/xds/internal/balancer/outlierdetection/config.go @@ -15,8 +15,6 @@ * limitations under the License. */ -// Package outlierdetection implements a balancer that implements -// Outlier Detection. package outlierdetection import ( From 06ad0b82211b43ef7e0363245b5f70f797d14687 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 23 Jun 2022 13:40:41 -0700 Subject: [PATCH 533/998] internal/proto: remove obsolete test and service_config.pb.go (#5459) --- .../proto/grpc_service_config/example_test.go | 66 - .../grpc_service_config/service_config.pb.go | 4162 ----------------- regenerate.sh | 7 +- 3 files changed, 1 insertion(+), 4234 deletions(-) delete mode 100644 internal/proto/grpc_service_config/example_test.go delete mode 100644 internal/proto/grpc_service_config/service_config.pb.go diff --git a/internal/proto/grpc_service_config/example_test.go b/internal/proto/grpc_service_config/example_test.go deleted file mode 100644 index b707d8b05e39..000000000000 --- a/internal/proto/grpc_service_config/example_test.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package grpc_service_config_test - -import ( - "testing" - - "github.com/golang/protobuf/jsonpb" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "google.golang.org/grpc/internal/grpctest" - scpb "google.golang.org/grpc/internal/proto/grpc_service_config" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -// TestXdsConfigMarshalToJSON is an example to print json format of xds_config. -func (s) TestXdsConfigMarshalToJSON(t *testing.T) { - c := &scpb.XdsConfig{ - ChildPolicy: []*scpb.LoadBalancingConfig{ - {Policy: &scpb.LoadBalancingConfig_Grpclb{ - Grpclb: &scpb.GrpcLbConfig{}, - }}, - {Policy: &scpb.LoadBalancingConfig_RoundRobin{ - RoundRobin: &scpb.RoundRobinConfig{}, - }}, - }, - FallbackPolicy: []*scpb.LoadBalancingConfig{ - {Policy: &scpb.LoadBalancingConfig_Grpclb{ - Grpclb: &scpb.GrpcLbConfig{}, - }}, - {Policy: &scpb.LoadBalancingConfig_PickFirst{ - PickFirst: &scpb.PickFirstConfig{}, - }}, - }, - EdsServiceName: "eds.service.name", - LrsLoadReportingServerName: &wrapperspb.StringValue{ - Value: "lrs.server.name", - }, - } - j, err := (&jsonpb.Marshaler{}).MarshalToString(c) - if err != nil { - t.Fatalf("failed to marshal proto to json: %v", err) - } - t.Logf(j) -} diff --git a/internal/proto/grpc_service_config/service_config.pb.go b/internal/proto/grpc_service_config/service_config.pb.go deleted file mode 100644 index 0037d2214f35..000000000000 --- a/internal/proto/grpc_service_config/service_config.pb.go +++ /dev/null @@ -1,4162 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// A ServiceConfig is supplied when a service is deployed. It mostly contains -// parameters for how clients that connect to the service should behave (for -// example, the load balancing policy to use to pick between service replicas). -// -// The configuration options provided here act as overrides to automatically -// chosen option values. Service owners should be conservative in specifying -// options as the system is likely to choose better values for these options in -// the vast majority of cases. In other words, please specify a configuration -// option only if you really have to, and avoid copy-paste inclusion of configs. -// -// Note that gRPC uses the service config in JSON form, not in protobuf -// form. This proto definition is intended to help document the schema but -// will not actually be used directly by gRPC. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: grpc/service_config/service_config.proto - -package grpc_service_config - -import ( - proto "github.com/golang/protobuf/proto" - code "google.golang.org/genproto/googleapis/rpc/code" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - structpb "google.golang.org/protobuf/types/known/structpb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type int32 - -const ( - XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_UNKNOWN XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type = 0 - XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_EDS XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type = 1 - XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_LOGICAL_DNS XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type = 2 -) - -// Enum value maps for XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type. -var ( - XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "EDS", - 2: "LOGICAL_DNS", - } - XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type_value = map[string]int32{ - "UNKNOWN": 0, - "EDS": 1, - "LOGICAL_DNS": 2, - } -) - -func (x XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Enum() *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type { - p := new(XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) - *p = x - return p -} - -func (x XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_service_config_service_config_proto_enumTypes[0].Descriptor() -} - -func (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Type() protoreflect.EnumType { - return &file_grpc_service_config_service_config_proto_enumTypes[0] -} - -func (x XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type.Descriptor instead. -func (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10, 0, 0} -} - -// Load balancing policy. -// -// Note that load_balancing_policy is deprecated in favor of -// load_balancing_config; the former will be used only if the latter -// is unset. -// -// If no LB policy is configured here, then the default is pick_first. -// If the policy name is set via the client API, that value overrides -// the value specified here. -// -// If the deprecated load_balancing_policy field is used, note that if the -// resolver returns at least one balancer address (as opposed to backend -// addresses), gRPC will use grpclb (see -// https://github.com/grpc/grpc/blob/master/doc/load-balancing.md), -// regardless of what policy is configured here. However, if the resolver -// returns at least one backend address in addition to the balancer -// address(es), the client may fall back to the requested policy if it -// is unable to reach any of the grpclb load balancers. -type ServiceConfig_LoadBalancingPolicy int32 - -const ( - ServiceConfig_UNSPECIFIED ServiceConfig_LoadBalancingPolicy = 0 - ServiceConfig_ROUND_ROBIN ServiceConfig_LoadBalancingPolicy = 1 -) - -// Enum value maps for ServiceConfig_LoadBalancingPolicy. -var ( - ServiceConfig_LoadBalancingPolicy_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "ROUND_ROBIN", - } - ServiceConfig_LoadBalancingPolicy_value = map[string]int32{ - "UNSPECIFIED": 0, - "ROUND_ROBIN": 1, - } -) - -func (x ServiceConfig_LoadBalancingPolicy) Enum() *ServiceConfig_LoadBalancingPolicy { - p := new(ServiceConfig_LoadBalancingPolicy) - *p = x - return p -} - -func (x ServiceConfig_LoadBalancingPolicy) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ServiceConfig_LoadBalancingPolicy) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_service_config_service_config_proto_enumTypes[1].Descriptor() -} - -func (ServiceConfig_LoadBalancingPolicy) Type() protoreflect.EnumType { - return &file_grpc_service_config_service_config_proto_enumTypes[1] -} - -func (x ServiceConfig_LoadBalancingPolicy) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ServiceConfig_LoadBalancingPolicy.Descriptor instead. -func (ServiceConfig_LoadBalancingPolicy) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{19, 0} -} - -// Configuration for a method. -type MethodConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []*MethodConfig_Name `protobuf:"bytes,1,rep,name=name,proto3" json:"name,omitempty"` - // Whether RPCs sent to this method should wait until the connection is - // ready by default. If false, the RPC will abort immediately if there is - // a transient failure connecting to the server. Otherwise, gRPC will - // attempt to connect until the deadline is exceeded. - // - // The value specified via the gRPC client API will override the value - // set here. However, note that setting the value in the client API will - // also affect transient errors encountered during name resolution, which - // cannot be caught by the value here, since the service config is - // obtained by the gRPC client via name resolution. - WaitForReady *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=wait_for_ready,json=waitForReady,proto3" json:"wait_for_ready,omitempty"` - // The default timeout in seconds for RPCs sent to this method. This can be - // overridden in code. If no reply is received in the specified amount of - // time, the request is aborted and a DEADLINE_EXCEEDED error status - // is returned to the caller. - // - // The actual deadline used will be the minimum of the value specified here - // and the value set by the application via the gRPC client API. If either - // one is not set, then the other will be used. If neither is set, then the - // request has no deadline. - Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` - // The maximum allowed payload size for an individual request or object in a - // stream (client->server) in bytes. The size which is measured is the - // serialized payload after per-message compression (but before stream - // compression) in bytes. This applies both to streaming and non-streaming - // requests. - // - // The actual value used is the minimum of the value specified here and the - // value set by the application via the gRPC client API. If either one is - // not set, then the other will be used. If neither is set, then the - // built-in default is used. - // - // If a client attempts to send an object larger than this value, it will not - // be sent and the client will see a ClientError. - // Note that 0 is a valid value, meaning that the request message - // must be empty. - MaxRequestMessageBytes *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_request_message_bytes,json=maxRequestMessageBytes,proto3" json:"max_request_message_bytes,omitempty"` - // The maximum allowed payload size for an individual response or object in a - // stream (server->client) in bytes. The size which is measured is the - // serialized payload after per-message compression (but before stream - // compression) in bytes. This applies both to streaming and non-streaming - // requests. - // - // The actual value used is the minimum of the value specified here and the - // value set by the application via the gRPC client API. If either one is - // not set, then the other will be used. If neither is set, then the - // built-in default is used. - // - // If a server attempts to send an object larger than this value, it will not - // be sent, and a ServerError will be sent to the client instead. - // Note that 0 is a valid value, meaning that the response message - // must be empty. - MaxResponseMessageBytes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=max_response_message_bytes,json=maxResponseMessageBytes,proto3" json:"max_response_message_bytes,omitempty"` - // Only one of retry_policy or hedging_policy may be set. If neither is set, - // RPCs will not be retried or hedged. - // - // Types that are assignable to RetryOrHedgingPolicy: - // *MethodConfig_RetryPolicy_ - // *MethodConfig_HedgingPolicy_ - RetryOrHedgingPolicy isMethodConfig_RetryOrHedgingPolicy `protobuf_oneof:"retry_or_hedging_policy"` -} - -func (x *MethodConfig) Reset() { - *x = MethodConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodConfig) ProtoMessage() {} - -func (x *MethodConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodConfig.ProtoReflect.Descriptor instead. -func (*MethodConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{0} -} - -func (x *MethodConfig) GetName() []*MethodConfig_Name { - if x != nil { - return x.Name - } - return nil -} - -func (x *MethodConfig) GetWaitForReady() *wrapperspb.BoolValue { - if x != nil { - return x.WaitForReady - } - return nil -} - -func (x *MethodConfig) GetTimeout() *durationpb.Duration { - if x != nil { - return x.Timeout - } - return nil -} - -func (x *MethodConfig) GetMaxRequestMessageBytes() *wrapperspb.UInt32Value { - if x != nil { - return x.MaxRequestMessageBytes - } - return nil -} - -func (x *MethodConfig) GetMaxResponseMessageBytes() *wrapperspb.UInt32Value { - if x != nil { - return x.MaxResponseMessageBytes - } - return nil -} - -func (m *MethodConfig) GetRetryOrHedgingPolicy() isMethodConfig_RetryOrHedgingPolicy { - if m != nil { - return m.RetryOrHedgingPolicy - } - return nil -} - -func (x *MethodConfig) GetRetryPolicy() *MethodConfig_RetryPolicy { - if x, ok := x.GetRetryOrHedgingPolicy().(*MethodConfig_RetryPolicy_); ok { - return x.RetryPolicy - } - return nil -} - -func (x *MethodConfig) GetHedgingPolicy() *MethodConfig_HedgingPolicy { - if x, ok := x.GetRetryOrHedgingPolicy().(*MethodConfig_HedgingPolicy_); ok { - return x.HedgingPolicy - } - return nil -} - -type isMethodConfig_RetryOrHedgingPolicy interface { - isMethodConfig_RetryOrHedgingPolicy() -} - -type MethodConfig_RetryPolicy_ struct { - RetryPolicy *MethodConfig_RetryPolicy `protobuf:"bytes,6,opt,name=retry_policy,json=retryPolicy,proto3,oneof"` -} - -type MethodConfig_HedgingPolicy_ struct { - HedgingPolicy *MethodConfig_HedgingPolicy `protobuf:"bytes,7,opt,name=hedging_policy,json=hedgingPolicy,proto3,oneof"` -} - -func (*MethodConfig_RetryPolicy_) isMethodConfig_RetryOrHedgingPolicy() {} - -func (*MethodConfig_HedgingPolicy_) isMethodConfig_RetryOrHedgingPolicy() {} - -// Configuration for pick_first LB policy. -type PickFirstConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *PickFirstConfig) Reset() { - *x = PickFirstConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PickFirstConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PickFirstConfig) ProtoMessage() {} - -func (x *PickFirstConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PickFirstConfig.ProtoReflect.Descriptor instead. -func (*PickFirstConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{1} -} - -// Configuration for round_robin LB policy. -type RoundRobinConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *RoundRobinConfig) Reset() { - *x = RoundRobinConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RoundRobinConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RoundRobinConfig) ProtoMessage() {} - -func (x *RoundRobinConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RoundRobinConfig.ProtoReflect.Descriptor instead. -func (*RoundRobinConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{2} -} - -// Configuration for outlier_detection LB policy -type OutlierDetectionLoadBalancingConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The time interval between ejection analysis sweeps. This can result in - // both new ejections as well as addresses being returned to service. Defaults - // to 10000ms or 10s. - Interval *durationpb.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` - // The base time that as address is ejected for. The real time is equal to the - // base time multiplied by the number of times the address has been ejected. - // Defaults to 30000ms or 30s. - BaseEjectionTime *durationpb.Duration `protobuf:"bytes,2,opt,name=base_ejection_time,json=baseEjectionTime,proto3" json:"base_ejection_time,omitempty"` - // The maximum time that an address is ejected for. If not specified, the default value (300000ms or 300s) or - // the base_ejection_time value is applied, whatever is larger. - MaxEjectionTime *durationpb.Duration `protobuf:"bytes,3,opt,name=max_ejection_time,json=maxEjectionTime,proto3" json:"max_ejection_time,omitempty"` - // The maximum % of an address list that can be ejected due to outlier - // detection. Defaults to 10% but will eject at least one address regardless of the value. - MaxEjectionPercent *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_ejection_percent,json=maxEjectionPercent,proto3" json:"max_ejection_percent,omitempty"` - // If set, success rate ejections will be performed - SuccessRateEjection *OutlierDetectionLoadBalancingConfig_SuccessRateEjection `protobuf:"bytes,5,opt,name=success_rate_ejection,json=successRateEjection,proto3" json:"success_rate_ejection,omitempty"` - // If set, failure rate ejections will be performed - FailurePercentageEjection *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection `protobuf:"bytes,6,opt,name=failure_percentage_ejection,json=failurePercentageEjection,proto3" json:"failure_percentage_ejection,omitempty"` - // The config for the child policy - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,13,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` -} - -func (x *OutlierDetectionLoadBalancingConfig) Reset() { - *x = OutlierDetectionLoadBalancingConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OutlierDetectionLoadBalancingConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OutlierDetectionLoadBalancingConfig) ProtoMessage() {} - -func (x *OutlierDetectionLoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OutlierDetectionLoadBalancingConfig.ProtoReflect.Descriptor instead. -func (*OutlierDetectionLoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3} -} - -func (x *OutlierDetectionLoadBalancingConfig) GetInterval() *durationpb.Duration { - if x != nil { - return x.Interval - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig) GetBaseEjectionTime() *durationpb.Duration { - if x != nil { - return x.BaseEjectionTime - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig) GetMaxEjectionTime() *durationpb.Duration { - if x != nil { - return x.MaxEjectionTime - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig) GetMaxEjectionPercent() *wrapperspb.UInt32Value { - if x != nil { - return x.MaxEjectionPercent - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig) GetSuccessRateEjection() *OutlierDetectionLoadBalancingConfig_SuccessRateEjection { - if x != nil { - return x.SuccessRateEjection - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig) GetFailurePercentageEjection() *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection { - if x != nil { - return x.FailurePercentageEjection - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -// Configuration for grpclb LB policy. -type GrpcLbConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. What LB policy to use for routing between the backend - // addresses. If unset, defaults to round_robin. - // Currently, the only supported values are round_robin and pick_first. - // Note that this will be used both in balancer mode and in fallback mode. - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` - // Optional. If specified, overrides the name of the service to be sent to - // the balancer. - ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - // Optional. The timeout in seconds for receiving the server list from the LB - // server. Defaults to 10s. - InitialFallbackTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=initial_fallback_timeout,json=initialFallbackTimeout,proto3" json:"initial_fallback_timeout,omitempty"` -} - -func (x *GrpcLbConfig) Reset() { - *x = GrpcLbConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcLbConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcLbConfig) ProtoMessage() {} - -func (x *GrpcLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcLbConfig.ProtoReflect.Descriptor instead. -func (*GrpcLbConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4} -} - -func (x *GrpcLbConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -func (x *GrpcLbConfig) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -func (x *GrpcLbConfig) GetInitialFallbackTimeout() *durationpb.Duration { - if x != nil { - return x.InitialFallbackTimeout - } - return nil -} - -// Configuration for priority LB policy. -type PriorityLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Children map[string]*PriorityLoadBalancingPolicyConfig_Child `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // A list of child names in decreasing priority order - // (i.e., first element is the highest priority). - Priorities []string `protobuf:"bytes,2,rep,name=priorities,proto3" json:"priorities,omitempty"` -} - -func (x *PriorityLoadBalancingPolicyConfig) Reset() { - *x = PriorityLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PriorityLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PriorityLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *PriorityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PriorityLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*PriorityLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5} -} - -func (x *PriorityLoadBalancingPolicyConfig) GetChildren() map[string]*PriorityLoadBalancingPolicyConfig_Child { - if x != nil { - return x.Children - } - return nil -} - -func (x *PriorityLoadBalancingPolicyConfig) GetPriorities() []string { - if x != nil { - return x.Priorities - } - return nil -} - -// Configuration for weighted_target LB policy. -type WeightedTargetLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Targets map[string]*WeightedTargetLoadBalancingPolicyConfig_Target `protobuf:"bytes,1,rep,name=targets,proto3" json:"targets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *WeightedTargetLoadBalancingPolicyConfig) Reset() { - *x = WeightedTargetLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WeightedTargetLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WeightedTargetLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *WeightedTargetLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WeightedTargetLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*WeightedTargetLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6} -} - -func (x *WeightedTargetLoadBalancingPolicyConfig) GetTargets() map[string]*WeightedTargetLoadBalancingPolicyConfig_Target { - if x != nil { - return x.Targets - } - return nil -} - -// Configuration for xds_cluster_manager_experimental LB policy. -type XdsClusterManagerLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Children map[string]*XdsClusterManagerLoadBalancingPolicyConfig_Child `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *XdsClusterManagerLoadBalancingPolicyConfig) Reset() { - *x = XdsClusterManagerLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsClusterManagerLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsClusterManagerLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *XdsClusterManagerLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsClusterManagerLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*XdsClusterManagerLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7} -} - -func (x *XdsClusterManagerLoadBalancingPolicyConfig) GetChildren() map[string]*XdsClusterManagerLoadBalancingPolicyConfig_Child { - if x != nil { - return x.Children - } - return nil -} - -// Configuration for the cds LB policy. -type CdsConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` // Required. -} - -func (x *CdsConfig) Reset() { - *x = CdsConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CdsConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CdsConfig) ProtoMessage() {} - -func (x *CdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CdsConfig.ProtoReflect.Descriptor instead. -func (*CdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8} -} - -func (x *CdsConfig) GetCluster() string { - if x != nil { - return x.Cluster - } - return "" -} - -// Represents an xDS server. -type XdsServer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServerUri string `protobuf:"bytes,1,opt,name=server_uri,proto3" json:"server_uri,omitempty"` // Required. - // A list of channel creds to use. The first supported type will be used. - ChannelCreds []*XdsServer_ChannelCredentials `protobuf:"bytes,2,rep,name=channel_creds,proto3" json:"channel_creds,omitempty"` - // A repeated list of server features. - ServerFeatures []*structpb.Value `protobuf:"bytes,3,rep,name=server_features,proto3" json:"server_features,omitempty"` -} - -func (x *XdsServer) Reset() { - *x = XdsServer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsServer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsServer) ProtoMessage() {} - -func (x *XdsServer) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsServer.ProtoReflect.Descriptor instead. -func (*XdsServer) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9} -} - -func (x *XdsServer) GetServerUri() string { - if x != nil { - return x.ServerUri - } - return "" -} - -func (x *XdsServer) GetChannelCreds() []*XdsServer_ChannelCredentials { - if x != nil { - return x.ChannelCreds - } - return nil -} - -func (x *XdsServer) GetServerFeatures() []*structpb.Value { - if x != nil { - return x.ServerFeatures - } - return nil -} - -// Configuration for xds_cluster_resolver LB policy. -type XdsClusterResolverLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Ordered list of discovery mechanisms. - // Must have at least one element. - // Results from each discovery mechanism are concatenated together in - // successive priorities. - DiscoveryMechanisms []*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism `protobuf:"bytes,1,rep,name=discovery_mechanisms,json=discoveryMechanisms,proto3" json:"discovery_mechanisms,omitempty"` - // xDS LB policy. Will be used as the child config of the xds_cluster_impl LB policy. - XdsLbPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=xds_lb_policy,json=xdsLbPolicy,proto3" json:"xds_lb_policy,omitempty"` -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig) Reset() { - *x = XdsClusterResolverLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsClusterResolverLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*XdsClusterResolverLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10} -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig) GetDiscoveryMechanisms() []*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism { - if x != nil { - return x.DiscoveryMechanisms - } - return nil -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig) GetXdsLbPolicy() []*LoadBalancingConfig { - if x != nil { - return x.XdsLbPolicy - } - return nil -} - -// Configuration for xds_cluster_impl LB policy. -type XdsClusterImplLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Cluster name. Required. - Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - // EDS service name. - // Not set if cluster is not an EDS cluster or if it does not - // specify an EDS service name. - EdsServiceName string `protobuf:"bytes,2,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` - // Server to send load reports to. - // If unset, no load reporting is done. - // If set to empty string, load reporting will be sent to the same - // server as we are getting xds data from. - // DEPRECATED: Use new lrs_load_reporting_server field instead. - // - // Deprecated: Do not use. - LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,3,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` - // LRS server to send load reports to. - // If not present, load reporting will be disabled. - // Supercedes lrs_load_reporting_server_name field. - LrsLoadReportingServer *XdsServer `protobuf:"bytes,7,opt,name=lrs_load_reporting_server,json=lrsLoadReportingServer,proto3" json:"lrs_load_reporting_server,omitempty"` - // Maximum number of outstanding requests can be made to the upstream cluster. - // Default is 1024. - MaxConcurrentRequests *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_concurrent_requests,json=maxConcurrentRequests,proto3" json:"max_concurrent_requests,omitempty"` - DropCategories []*XdsClusterImplLoadBalancingPolicyConfig_DropCategory `protobuf:"bytes,5,rep,name=drop_categories,json=dropCategories,proto3" json:"drop_categories,omitempty"` - // Child policy. - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,6,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig) Reset() { - *x = XdsClusterImplLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsClusterImplLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *XdsClusterImplLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsClusterImplLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*XdsClusterImplLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11} -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig) GetCluster() string { - if x != nil { - return x.Cluster - } - return "" -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig) GetEdsServiceName() string { - if x != nil { - return x.EdsServiceName - } - return "" -} - -// Deprecated: Do not use. -func (x *XdsClusterImplLoadBalancingPolicyConfig) GetLrsLoadReportingServerName() *wrapperspb.StringValue { - if x != nil { - return x.LrsLoadReportingServerName - } - return nil -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig) GetLrsLoadReportingServer() *XdsServer { - if x != nil { - return x.LrsLoadReportingServer - } - return nil -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig) GetMaxConcurrentRequests() *wrapperspb.UInt32Value { - if x != nil { - return x.MaxConcurrentRequests - } - return nil -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig) GetDropCategories() []*XdsClusterImplLoadBalancingPolicyConfig_DropCategory { - if x != nil { - return x.DropCategories - } - return nil -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -// Configuration for eds LB policy. -type EdsLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Cluster name. Required. - Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - // EDS service name, as returned in CDS. - // May be unset if not specified in CDS. - EdsServiceName string `protobuf:"bytes,2,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` - // Server to send load reports to. - // If unset, no load reporting is done. - // If set to empty string, load reporting will be sent to the same - // server as we are getting xds data from. - LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,3,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` - // Locality-picking policy. - // This policy's config is expected to be in the format used - // by the weighted_target policy. Note that the config should include - // an empty value for the "targets" field; that empty value will be - // replaced by one that is dynamically generated based on the EDS data. - // Optional; defaults to "weighted_target". - LocalityPickingPolicy []*LoadBalancingConfig `protobuf:"bytes,4,rep,name=locality_picking_policy,json=localityPickingPolicy,proto3" json:"locality_picking_policy,omitempty"` - // Endpoint-picking policy. - // This will be configured as the policy for each child in the - // locality-policy's config. - // Optional; defaults to "round_robin". - EndpointPickingPolicy []*LoadBalancingConfig `protobuf:"bytes,5,rep,name=endpoint_picking_policy,json=endpointPickingPolicy,proto3" json:"endpoint_picking_policy,omitempty"` -} - -func (x *EdsLoadBalancingPolicyConfig) Reset() { - *x = EdsLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EdsLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EdsLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *EdsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EdsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*EdsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{12} -} - -func (x *EdsLoadBalancingPolicyConfig) GetCluster() string { - if x != nil { - return x.Cluster - } - return "" -} - -func (x *EdsLoadBalancingPolicyConfig) GetEdsServiceName() string { - if x != nil { - return x.EdsServiceName - } - return "" -} - -func (x *EdsLoadBalancingPolicyConfig) GetLrsLoadReportingServerName() *wrapperspb.StringValue { - if x != nil { - return x.LrsLoadReportingServerName - } - return nil -} - -func (x *EdsLoadBalancingPolicyConfig) GetLocalityPickingPolicy() []*LoadBalancingConfig { - if x != nil { - return x.LocalityPickingPolicy - } - return nil -} - -func (x *EdsLoadBalancingPolicyConfig) GetEndpointPickingPolicy() []*LoadBalancingConfig { - if x != nil { - return x.EndpointPickingPolicy - } - return nil -} - -// Configuration for ring_hash LB policy. -type RingHashLoadBalancingConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - MinRingSize uint64 `protobuf:"varint,1,opt,name=min_ring_size,json=minRingSize,proto3" json:"min_ring_size,omitempty"` - MaxRingSize uint64 `protobuf:"varint,2,opt,name=max_ring_size,json=maxRingSize,proto3" json:"max_ring_size,omitempty"` -} - -func (x *RingHashLoadBalancingConfig) Reset() { - *x = RingHashLoadBalancingConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RingHashLoadBalancingConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RingHashLoadBalancingConfig) ProtoMessage() {} - -func (x *RingHashLoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RingHashLoadBalancingConfig.ProtoReflect.Descriptor instead. -func (*RingHashLoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{13} -} - -func (x *RingHashLoadBalancingConfig) GetMinRingSize() uint64 { - if x != nil { - return x.MinRingSize - } - return 0 -} - -func (x *RingHashLoadBalancingConfig) GetMaxRingSize() uint64 { - if x != nil { - return x.MaxRingSize - } - return 0 -} - -// Configuration for lrs LB policy. -type LrsLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Cluster name. Required. - ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` - // EDS service name, as returned in CDS. - // May be unset if not specified in CDS. - EdsServiceName string `protobuf:"bytes,2,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` - // Server to send load reports to. Required. - // If set to empty string, load reporting will be sent to the same - // server as we are getting xds data from. - LrsLoadReportingServerName string `protobuf:"bytes,3,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` - Locality *LrsLoadBalancingPolicyConfig_Locality `protobuf:"bytes,4,opt,name=locality,proto3" json:"locality,omitempty"` - // Endpoint-picking policy. - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,5,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` -} - -func (x *LrsLoadBalancingPolicyConfig) Reset() { - *x = LrsLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LrsLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LrsLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *LrsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LrsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*LrsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14} -} - -func (x *LrsLoadBalancingPolicyConfig) GetClusterName() string { - if x != nil { - return x.ClusterName - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig) GetEdsServiceName() string { - if x != nil { - return x.EdsServiceName - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig) GetLrsLoadReportingServerName() string { - if x != nil { - return x.LrsLoadReportingServerName - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig) GetLocality() *LrsLoadBalancingPolicyConfig_Locality { - if x != nil { - return x.Locality - } - return nil -} - -func (x *LrsLoadBalancingPolicyConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -// Configuration for the xds_wrr_locality load balancing policy. -type XdsWrrLocalityLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` -} - -func (x *XdsWrrLocalityLoadBalancingPolicyConfig) Reset() { - *x = XdsWrrLocalityLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsWrrLocalityLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsWrrLocalityLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *XdsWrrLocalityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsWrrLocalityLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*XdsWrrLocalityLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{15} -} - -func (x *XdsWrrLocalityLoadBalancingPolicyConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -// Configuration for the least_request LB policy. -type LeastRequestLocalityLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ChoiceCount uint64 `protobuf:"varint,1,opt,name=choice_count,json=choiceCount,proto3" json:"choice_count,omitempty"` -} - -func (x *LeastRequestLocalityLoadBalancingPolicyConfig) Reset() { - *x = LeastRequestLocalityLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LeastRequestLocalityLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LeastRequestLocalityLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *LeastRequestLocalityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LeastRequestLocalityLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*LeastRequestLocalityLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{16} -} - -func (x *LeastRequestLocalityLoadBalancingPolicyConfig) GetChoiceCount() uint64 { - if x != nil { - return x.ChoiceCount - } - return 0 -} - -// Configuration for xds LB policy. -type XdsConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Name of balancer to connect to. - // - // Deprecated: Do not use. - BalancerName string `protobuf:"bytes,1,opt,name=balancer_name,json=balancerName,proto3" json:"balancer_name,omitempty"` - // Optional. What LB policy to use for intra-locality routing. - // If unset, will use whatever algorithm is specified by the balancer. - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` - // Optional. What LB policy to use in fallback mode. If not - // specified, defaults to round_robin. - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. - FallbackPolicy []*LoadBalancingConfig `protobuf:"bytes,3,rep,name=fallback_policy,json=fallbackPolicy,proto3" json:"fallback_policy,omitempty"` - // Optional. Name to use in EDS query. If not present, defaults to - // the server name from the target URI. - EdsServiceName string `protobuf:"bytes,4,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` - // LRS server to send load reports to. - // If not present, load reporting will be disabled. - // If set to the empty string, load reporting will be sent to the same - // server that we obtained CDS data from. - LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,5,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` -} - -func (x *XdsConfig) Reset() { - *x = XdsConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsConfig) ProtoMessage() {} - -func (x *XdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsConfig.ProtoReflect.Descriptor instead. -func (*XdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{17} -} - -// Deprecated: Do not use. -func (x *XdsConfig) GetBalancerName() string { - if x != nil { - return x.BalancerName - } - return "" -} - -func (x *XdsConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -func (x *XdsConfig) GetFallbackPolicy() []*LoadBalancingConfig { - if x != nil { - return x.FallbackPolicy - } - return nil -} - -func (x *XdsConfig) GetEdsServiceName() string { - if x != nil { - return x.EdsServiceName - } - return "" -} - -func (x *XdsConfig) GetLrsLoadReportingServerName() *wrapperspb.StringValue { - if x != nil { - return x.LrsLoadReportingServerName - } - return nil -} - -// Selects LB policy and provides corresponding configuration. -// -// In general, all instances of this field should be repeated. Clients will -// iterate through the list in order and stop at the first policy that they -// support. This allows the service config to specify custom policies that may -// not be known to all clients. -// -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. -type LoadBalancingConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Exactly one LB policy may be configured. - // - // Types that are assignable to Policy: - // *LoadBalancingConfig_PickFirst - // *LoadBalancingConfig_RoundRobin - // *LoadBalancingConfig_OutlierDetection - // *LoadBalancingConfig_Grpclb - // *LoadBalancingConfig_PriorityExperimental - // *LoadBalancingConfig_WeightedTargetExperimental - // *LoadBalancingConfig_XdsClusterManagerExperimental - // *LoadBalancingConfig_CdsExperimental - // *LoadBalancingConfig_XdsClusterResolverExperimental - // *LoadBalancingConfig_XdsClusterImplExperimental - // *LoadBalancingConfig_RingHashExperimental - // *LoadBalancingConfig_LrsExperimental - // *LoadBalancingConfig_EdsExperimental - // *LoadBalancingConfig_Xds - // *LoadBalancingConfig_XdsExperimental - // *LoadBalancingConfig_XdsWrrLocalityExperimental - // *LoadBalancingConfig_LeastRequestExperimental - Policy isLoadBalancingConfig_Policy `protobuf_oneof:"policy"` -} - -func (x *LoadBalancingConfig) Reset() { - *x = LoadBalancingConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LoadBalancingConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LoadBalancingConfig) ProtoMessage() {} - -func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LoadBalancingConfig.ProtoReflect.Descriptor instead. -func (*LoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{18} -} - -func (m *LoadBalancingConfig) GetPolicy() isLoadBalancingConfig_Policy { - if m != nil { - return m.Policy - } - return nil -} - -func (x *LoadBalancingConfig) GetPickFirst() *PickFirstConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_PickFirst); ok { - return x.PickFirst - } - return nil -} - -func (x *LoadBalancingConfig) GetRoundRobin() *RoundRobinConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_RoundRobin); ok { - return x.RoundRobin - } - return nil -} - -func (x *LoadBalancingConfig) GetOutlierDetection() *OutlierDetectionLoadBalancingConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_OutlierDetection); ok { - return x.OutlierDetection - } - return nil -} - -func (x *LoadBalancingConfig) GetGrpclb() *GrpcLbConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Grpclb); ok { - return x.Grpclb - } - return nil -} - -func (x *LoadBalancingConfig) GetPriorityExperimental() *PriorityLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_PriorityExperimental); ok { - return x.PriorityExperimental - } - return nil -} - -func (x *LoadBalancingConfig) GetWeightedTargetExperimental() *WeightedTargetLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_WeightedTargetExperimental); ok { - return x.WeightedTargetExperimental - } - return nil -} - -func (x *LoadBalancingConfig) GetXdsClusterManagerExperimental() *XdsClusterManagerLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsClusterManagerExperimental); ok { - return x.XdsClusterManagerExperimental - } - return nil -} - -func (x *LoadBalancingConfig) GetCdsExperimental() *CdsConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_CdsExperimental); ok { - return x.CdsExperimental - } - return nil -} - -func (x *LoadBalancingConfig) GetXdsClusterResolverExperimental() *XdsClusterResolverLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsClusterResolverExperimental); ok { - return x.XdsClusterResolverExperimental - } - return nil -} - -func (x *LoadBalancingConfig) GetXdsClusterImplExperimental() *XdsClusterImplLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsClusterImplExperimental); ok { - return x.XdsClusterImplExperimental - } - return nil -} - -func (x *LoadBalancingConfig) GetRingHashExperimental() *RingHashLoadBalancingConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_RingHashExperimental); ok { - return x.RingHashExperimental - } - return nil -} - -// Deprecated: Do not use. -func (x *LoadBalancingConfig) GetLrsExperimental() *LrsLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_LrsExperimental); ok { - return x.LrsExperimental - } - return nil -} - -// Deprecated: Do not use. -func (x *LoadBalancingConfig) GetEdsExperimental() *EdsLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_EdsExperimental); ok { - return x.EdsExperimental - } - return nil -} - -// Deprecated: Do not use. -func (x *LoadBalancingConfig) GetXds() *XdsConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Xds); ok { - return x.Xds - } - return nil -} - -// Deprecated: Do not use. -func (x *LoadBalancingConfig) GetXdsExperimental() *XdsConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsExperimental); ok { - return x.XdsExperimental - } - return nil -} - -func (x *LoadBalancingConfig) GetXdsWrrLocalityExperimental() *XdsWrrLocalityLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsWrrLocalityExperimental); ok { - return x.XdsWrrLocalityExperimental - } - return nil -} - -func (x *LoadBalancingConfig) GetLeastRequestExperimental() *LeastRequestLocalityLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_LeastRequestExperimental); ok { - return x.LeastRequestExperimental - } - return nil -} - -type isLoadBalancingConfig_Policy interface { - isLoadBalancingConfig_Policy() -} - -type LoadBalancingConfig_PickFirst struct { - PickFirst *PickFirstConfig `protobuf:"bytes,4,opt,name=pick_first,proto3,oneof"` -} - -type LoadBalancingConfig_RoundRobin struct { - RoundRobin *RoundRobinConfig `protobuf:"bytes,1,opt,name=round_robin,proto3,oneof"` -} - -type LoadBalancingConfig_OutlierDetection struct { - OutlierDetection *OutlierDetectionLoadBalancingConfig `protobuf:"bytes,15,opt,name=outlier_detection,proto3,oneof"` -} - -type LoadBalancingConfig_Grpclb struct { - // gRPC lookaside load balancing. - // This will eventually be deprecated by the new xDS-based local - // balancing policy. - Grpclb *GrpcLbConfig `protobuf:"bytes,3,opt,name=grpclb,proto3,oneof"` -} - -type LoadBalancingConfig_PriorityExperimental struct { - PriorityExperimental *PriorityLoadBalancingPolicyConfig `protobuf:"bytes,9,opt,name=priority_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_WeightedTargetExperimental struct { - WeightedTargetExperimental *WeightedTargetLoadBalancingPolicyConfig `protobuf:"bytes,10,opt,name=weighted_target_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_XdsClusterManagerExperimental struct { - // xDS-based load balancing. - XdsClusterManagerExperimental *XdsClusterManagerLoadBalancingPolicyConfig `protobuf:"bytes,14,opt,name=xds_cluster_manager_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_CdsExperimental struct { - CdsExperimental *CdsConfig `protobuf:"bytes,6,opt,name=cds_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_XdsClusterResolverExperimental struct { - XdsClusterResolverExperimental *XdsClusterResolverLoadBalancingPolicyConfig `protobuf:"bytes,11,opt,name=xds_cluster_resolver_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_XdsClusterImplExperimental struct { - XdsClusterImplExperimental *XdsClusterImplLoadBalancingPolicyConfig `protobuf:"bytes,12,opt,name=xds_cluster_impl_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_RingHashExperimental struct { - RingHashExperimental *RingHashLoadBalancingConfig `protobuf:"bytes,13,opt,name=ring_hash_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_LrsExperimental struct { - // Deprecated xDS-related policies. - // - // Deprecated: Do not use. - LrsExperimental *LrsLoadBalancingPolicyConfig `protobuf:"bytes,8,opt,name=lrs_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_EdsExperimental struct { - // Deprecated: Do not use. - EdsExperimental *EdsLoadBalancingPolicyConfig `protobuf:"bytes,7,opt,name=eds_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_Xds struct { - // Deprecated: Do not use. - Xds *XdsConfig `protobuf:"bytes,2,opt,name=xds,proto3,oneof"` -} - -type LoadBalancingConfig_XdsExperimental struct { - // Deprecated: Do not use. - XdsExperimental *XdsConfig `protobuf:"bytes,5,opt,name=xds_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_XdsWrrLocalityExperimental struct { - XdsWrrLocalityExperimental *XdsWrrLocalityLoadBalancingPolicyConfig `protobuf:"bytes,16,opt,name=xds_wrr_locality_experimental,proto3,oneof"` -} - -type LoadBalancingConfig_LeastRequestExperimental struct { - LeastRequestExperimental *LeastRequestLocalityLoadBalancingPolicyConfig `protobuf:"bytes,17,opt,name=least_request_experimental,proto3,oneof"` -} - -func (*LoadBalancingConfig_PickFirst) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_RoundRobin) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_OutlierDetection) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_Grpclb) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_PriorityExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_WeightedTargetExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_XdsClusterManagerExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_CdsExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_XdsClusterResolverExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_XdsClusterImplExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_RingHashExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_LrsExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_EdsExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_Xds) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_XdsExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_XdsWrrLocalityExperimental) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_LeastRequestExperimental) isLoadBalancingConfig_Policy() {} - -// A ServiceConfig represents information about a service but is not specific to -// any name resolver. -type ServiceConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated: Do not use. - LoadBalancingPolicy ServiceConfig_LoadBalancingPolicy `protobuf:"varint,1,opt,name=load_balancing_policy,json=loadBalancingPolicy,proto3,enum=grpc.service_config.ServiceConfig_LoadBalancingPolicy" json:"load_balancing_policy,omitempty"` - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. If none - // are supported, the service config is considered invalid. - LoadBalancingConfig []*LoadBalancingConfig `protobuf:"bytes,4,rep,name=load_balancing_config,json=loadBalancingConfig,proto3" json:"load_balancing_config,omitempty"` - // Per-method configuration. - MethodConfig []*MethodConfig `protobuf:"bytes,2,rep,name=method_config,json=methodConfig,proto3" json:"method_config,omitempty"` - RetryThrottling *ServiceConfig_RetryThrottlingPolicy `protobuf:"bytes,3,opt,name=retry_throttling,json=retryThrottling,proto3" json:"retry_throttling,omitempty"` - HealthCheckConfig *ServiceConfig_HealthCheckConfig `protobuf:"bytes,5,opt,name=health_check_config,json=healthCheckConfig,proto3" json:"health_check_config,omitempty"` -} - -func (x *ServiceConfig) Reset() { - *x = ServiceConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConfig) ProtoMessage() {} - -func (x *ServiceConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConfig.ProtoReflect.Descriptor instead. -func (*ServiceConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{19} -} - -// Deprecated: Do not use. -func (x *ServiceConfig) GetLoadBalancingPolicy() ServiceConfig_LoadBalancingPolicy { - if x != nil { - return x.LoadBalancingPolicy - } - return ServiceConfig_UNSPECIFIED -} - -func (x *ServiceConfig) GetLoadBalancingConfig() []*LoadBalancingConfig { - if x != nil { - return x.LoadBalancingConfig - } - return nil -} - -func (x *ServiceConfig) GetMethodConfig() []*MethodConfig { - if x != nil { - return x.MethodConfig - } - return nil -} - -func (x *ServiceConfig) GetRetryThrottling() *ServiceConfig_RetryThrottlingPolicy { - if x != nil { - return x.RetryThrottling - } - return nil -} - -func (x *ServiceConfig) GetHealthCheckConfig() *ServiceConfig_HealthCheckConfig { - if x != nil { - return x.HealthCheckConfig - } - return nil -} - -// The names of the methods to which this configuration applies. -// - MethodConfig without names (empty list) will be skipped. -// - Each name entry must be unique across the entire ServiceConfig. -// - If the 'method' field is empty, this MethodConfig specifies the defaults -// for all methods for the specified service. -// - If the 'service' field is empty, the 'method' field must be empty, and -// this MethodConfig specifies the default for all methods (it's the default -// config). -// -// When determining which MethodConfig to use for a given RPC, the most -// specific match wins. For example, let's say that the service config -// contains the following MethodConfig entries: -// -// method_config { name { } ... } -// method_config { name { service: "MyService" } ... } -// method_config { name { service: "MyService" method: "Foo" } ... } -// -// MyService/Foo will use the third entry, because it exactly matches the -// service and method name. MyService/Bar will use the second entry, because -// it provides the default for all methods of MyService. AnotherService/Baz -// will use the first entry, because it doesn't match the other two. -// -// In JSON representation, value "", value `null`, and not present are the -// same. The following are the same Name: -// - { "service": "s" } -// - { "service": "s", "method": null } -// - { "service": "s", "method": "" } -type MethodConfig_Name struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` // Required. Includes proto package name. - Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` -} - -func (x *MethodConfig_Name) Reset() { - *x = MethodConfig_Name{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodConfig_Name) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodConfig_Name) ProtoMessage() {} - -func (x *MethodConfig_Name) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodConfig_Name.ProtoReflect.Descriptor instead. -func (*MethodConfig_Name) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *MethodConfig_Name) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *MethodConfig_Name) GetMethod() string { - if x != nil { - return x.Method - } - return "" -} - -// The retry policy for outgoing RPCs. -type MethodConfig_RetryPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The maximum number of RPC attempts, including the original attempt. - // - // This field is required and must be greater than 1. - // Any value greater than 5 will be treated as if it were 5. - MaxAttempts uint32 `protobuf:"varint,1,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initial_backoff). In general, the nth attempt will occur at - // random(0, - // min(initial_backoff*backoff_multiplier**(n-1), max_backoff)). - // Required. Must be greater than zero. - InitialBackoff *durationpb.Duration `protobuf:"bytes,2,opt,name=initial_backoff,json=initialBackoff,proto3" json:"initial_backoff,omitempty"` - // Required. Must be greater than zero. - MaxBackoff *durationpb.Duration `protobuf:"bytes,3,opt,name=max_backoff,json=maxBackoff,proto3" json:"max_backoff,omitempty"` - BackoffMultiplier float32 `protobuf:"fixed32,4,opt,name=backoff_multiplier,json=backoffMultiplier,proto3" json:"backoff_multiplier,omitempty"` // Required. Must be greater than zero. - // The set of status codes which may be retried. - // - // This field is required and must be non-empty. - RetryableStatusCodes []code.Code `protobuf:"varint,5,rep,packed,name=retryable_status_codes,json=retryableStatusCodes,proto3,enum=google.rpc.Code" json:"retryable_status_codes,omitempty"` -} - -func (x *MethodConfig_RetryPolicy) Reset() { - *x = MethodConfig_RetryPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodConfig_RetryPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodConfig_RetryPolicy) ProtoMessage() {} - -func (x *MethodConfig_RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodConfig_RetryPolicy.ProtoReflect.Descriptor instead. -func (*MethodConfig_RetryPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *MethodConfig_RetryPolicy) GetMaxAttempts() uint32 { - if x != nil { - return x.MaxAttempts - } - return 0 -} - -func (x *MethodConfig_RetryPolicy) GetInitialBackoff() *durationpb.Duration { - if x != nil { - return x.InitialBackoff - } - return nil -} - -func (x *MethodConfig_RetryPolicy) GetMaxBackoff() *durationpb.Duration { - if x != nil { - return x.MaxBackoff - } - return nil -} - -func (x *MethodConfig_RetryPolicy) GetBackoffMultiplier() float32 { - if x != nil { - return x.BackoffMultiplier - } - return 0 -} - -func (x *MethodConfig_RetryPolicy) GetRetryableStatusCodes() []code.Code { - if x != nil { - return x.RetryableStatusCodes - } - return nil -} - -// The hedging policy for outgoing RPCs. Hedged RPCs may execute more than -// once on the server, so only idempotent methods should specify a hedging -// policy. -type MethodConfig_HedgingPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The hedging policy will send up to max_requests RPCs. - // This number represents the total number of all attempts, including - // the original attempt. - // - // This field is required and must be greater than 1. - // Any value greater than 5 will be treated as if it were 5. - MaxAttempts uint32 `protobuf:"varint,1,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` - // The first RPC will be sent immediately, but the max_requests-1 subsequent - // hedged RPCs will be sent at intervals of every hedging_delay. Set this - // to 0 to immediately send all max_requests RPCs. - HedgingDelay *durationpb.Duration `protobuf:"bytes,2,opt,name=hedging_delay,json=hedgingDelay,proto3" json:"hedging_delay,omitempty"` - // The set of status codes which indicate other hedged RPCs may still - // succeed. If a non-fatal status code is returned by the server, hedged - // RPCs will continue. Otherwise, outstanding requests will be canceled and - // the error returned to the client application layer. - // - // This field is optional. - NonFatalStatusCodes []code.Code `protobuf:"varint,3,rep,packed,name=non_fatal_status_codes,json=nonFatalStatusCodes,proto3,enum=google.rpc.Code" json:"non_fatal_status_codes,omitempty"` -} - -func (x *MethodConfig_HedgingPolicy) Reset() { - *x = MethodConfig_HedgingPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodConfig_HedgingPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodConfig_HedgingPolicy) ProtoMessage() {} - -func (x *MethodConfig_HedgingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodConfig_HedgingPolicy.ProtoReflect.Descriptor instead. -func (*MethodConfig_HedgingPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{0, 2} -} - -func (x *MethodConfig_HedgingPolicy) GetMaxAttempts() uint32 { - if x != nil { - return x.MaxAttempts - } - return 0 -} - -func (x *MethodConfig_HedgingPolicy) GetHedgingDelay() *durationpb.Duration { - if x != nil { - return x.HedgingDelay - } - return nil -} - -func (x *MethodConfig_HedgingPolicy) GetNonFatalStatusCodes() []code.Code { - if x != nil { - return x.NonFatalStatusCodes - } - return nil -} - -// Parameters for the success rate ejection algorithm. -// This algorithm monitors the request success rate for all endpoints and -// ejects individual endpoints whose success rates are statistical outliers. -type OutlierDetectionLoadBalancingConfig_SuccessRateEjection struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This factor is used to determine the ejection threshold for success rate - // outlier ejection. The ejection threshold is the difference between the - // mean success rate, and the product of this factor and the standard - // deviation of the mean success rate: mean - (stdev * - // success_rate_stdev_factor). This factor is divided by a thousand to get a - // double. That is, if the desired factor is 1.9, the runtime value should - // be 1900. Defaults to 1900. - StdevFactor *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=stdev_factor,json=stdevFactor,proto3" json:"stdev_factor,omitempty"` - // The % chance that an address will be actually ejected when an outlier status - // is detected through success rate statistics. This setting can be used to - // disable ejection or to ramp it up slowly. Defaults to 100. - EnforcementPercentage *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=enforcement_percentage,json=enforcementPercentage,proto3" json:"enforcement_percentage,omitempty"` - // The number of addresses that must have enough request volume to - // detect success rate outliers. If the number of addresses is less than this - // setting, outlier detection via success rate statistics is not performed - // for any addresses. Defaults to 5. - MinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=minimum_hosts,json=minimumHosts,proto3" json:"minimum_hosts,omitempty"` - // The minimum number of total requests that must be collected in one - // interval (as defined by the interval duration above) to include this address - // in success rate based outlier detection. If the volume is lower than this - // setting, outlier detection via success rate statistics is not performed - // for that address. Defaults to 100. - RequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=request_volume,json=requestVolume,proto3" json:"request_volume,omitempty"` -} - -func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) Reset() { - *x = OutlierDetectionLoadBalancingConfig_SuccessRateEjection{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection) ProtoMessage() {} - -func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OutlierDetectionLoadBalancingConfig_SuccessRateEjection.ProtoReflect.Descriptor instead. -func (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3, 0} -} - -func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) GetStdevFactor() *wrapperspb.UInt32Value { - if x != nil { - return x.StdevFactor - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) GetEnforcementPercentage() *wrapperspb.UInt32Value { - if x != nil { - return x.EnforcementPercentage - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) GetMinimumHosts() *wrapperspb.UInt32Value { - if x != nil { - return x.MinimumHosts - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig_SuccessRateEjection) GetRequestVolume() *wrapperspb.UInt32Value { - if x != nil { - return x.RequestVolume - } - return nil -} - -// Parameters for the failure percentage algorithm. -// This algorithm ejects individual endpoints whose failure rate is greater than -// some threshold, independently of any other endpoint. -type OutlierDetectionLoadBalancingConfig_FailurePercentageEjection struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The failure percentage to use when determining failure percentage-based outlier detection. If - // the failure percentage of a given address is greater than or equal to this value, it will be - // ejected. Defaults to 85. - Threshold *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=threshold,proto3" json:"threshold,omitempty"` - // The % chance that an address will be actually ejected when an outlier status is detected through - // failure percentage statistics. This setting can be used to disable ejection or to ramp it up - // slowly. Defaults to 100. - EnforcementPercentage *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=enforcement_percentage,json=enforcementPercentage,proto3" json:"enforcement_percentage,omitempty"` - // The minimum number of addresses in order to perform failure percentage-based ejection. - // If the total number of addresses is less than this value, failure percentage-based - // ejection will not be performed. Defaults to 5. - MinimumHosts *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=minimum_hosts,json=minimumHosts,proto3" json:"minimum_hosts,omitempty"` - // The minimum number of total requests that must be collected in one interval (as defined by the - // interval duration above) to perform failure percentage-based ejection for this address. If the - // volume is lower than this setting, failure percentage-based ejection will not be performed for - // this host. Defaults to 50. - RequestVolume *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=request_volume,json=requestVolume,proto3" json:"request_volume,omitempty"` -} - -func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) Reset() { - *x = OutlierDetectionLoadBalancingConfig_FailurePercentageEjection{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) ProtoMessage() {} - -func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OutlierDetectionLoadBalancingConfig_FailurePercentageEjection.ProtoReflect.Descriptor instead. -func (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3, 1} -} - -func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) GetThreshold() *wrapperspb.UInt32Value { - if x != nil { - return x.Threshold - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) GetEnforcementPercentage() *wrapperspb.UInt32Value { - if x != nil { - return x.EnforcementPercentage - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) GetMinimumHosts() *wrapperspb.UInt32Value { - if x != nil { - return x.MinimumHosts - } - return nil -} - -func (x *OutlierDetectionLoadBalancingConfig_FailurePercentageEjection) GetRequestVolume() *wrapperspb.UInt32Value { - if x != nil { - return x.RequestVolume - } - return nil -} - -// A map of name to child policy configuration. -// The names are used to allow the priority policy to update -// existing child policies instead of creating new ones every -// time it receives a config update. -type PriorityLoadBalancingPolicyConfig_Child struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Config []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=config,proto3" json:"config,omitempty"` - // If true, will ignore reresolution requests from this child. - IgnoreReresolutionRequests bool `protobuf:"varint,2,opt,name=ignore_reresolution_requests,json=ignoreReresolutionRequests,proto3" json:"ignore_reresolution_requests,omitempty"` -} - -func (x *PriorityLoadBalancingPolicyConfig_Child) Reset() { - *x = PriorityLoadBalancingPolicyConfig_Child{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PriorityLoadBalancingPolicyConfig_Child) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PriorityLoadBalancingPolicyConfig_Child) ProtoMessage() {} - -func (x *PriorityLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PriorityLoadBalancingPolicyConfig_Child.ProtoReflect.Descriptor instead. -func (*PriorityLoadBalancingPolicyConfig_Child) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5, 0} -} - -func (x *PriorityLoadBalancingPolicyConfig_Child) GetConfig() []*LoadBalancingConfig { - if x != nil { - return x.Config - } - return nil -} - -func (x *PriorityLoadBalancingPolicyConfig_Child) GetIgnoreReresolutionRequests() bool { - if x != nil { - return x.IgnoreReresolutionRequests - } - return false -} - -type WeightedTargetLoadBalancingPolicyConfig_Target struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Weight uint32 `protobuf:"varint,1,opt,name=weight,proto3" json:"weight,omitempty"` - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` -} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) Reset() { - *x = WeightedTargetLoadBalancingPolicyConfig_Target{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WeightedTargetLoadBalancingPolicyConfig_Target) ProtoMessage() {} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WeightedTargetLoadBalancingPolicyConfig_Target.ProtoReflect.Descriptor instead. -func (*WeightedTargetLoadBalancingPolicyConfig_Target) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6, 0} -} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) GetWeight() uint32 { - if x != nil { - return x.Weight - } - return 0 -} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -type XdsClusterManagerLoadBalancingPolicyConfig_Child struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` -} - -func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) Reset() { - *x = XdsClusterManagerLoadBalancingPolicyConfig_Child{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoMessage() {} - -func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsClusterManagerLoadBalancingPolicyConfig_Child.ProtoReflect.Descriptor instead. -func (*XdsClusterManagerLoadBalancingPolicyConfig_Child) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7, 0} -} - -func (x *XdsClusterManagerLoadBalancingPolicyConfig_Child) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -type XdsServer_ChannelCredentials struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Required. - Config *structpb.Struct `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` // Optional JSON config. -} - -func (x *XdsServer_ChannelCredentials) Reset() { - *x = XdsServer_ChannelCredentials{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsServer_ChannelCredentials) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsServer_ChannelCredentials) ProtoMessage() {} - -func (x *XdsServer_ChannelCredentials) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsServer_ChannelCredentials.ProtoReflect.Descriptor instead. -func (*XdsServer_ChannelCredentials) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9, 0} -} - -func (x *XdsServer_ChannelCredentials) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *XdsServer_ChannelCredentials) GetConfig() *structpb.Struct { - if x != nil { - return x.Config - } - return nil -} - -// Describes a discovery mechanism instance. -// For EDS or LOGICAL_DNS clusters, there will be exactly one -// DiscoveryMechanism, which will describe the cluster of the parent -// CDS policy. -// For aggregate clusters, there will be one DiscoveryMechanism for each -// underlying cluster. -type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Cluster name. - Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - // LRS server to send load reports to. - // If not present, load reporting will be disabled. - // If set to the empty string, load reporting will be sent to the same - // server that we obtained CDS data from. - // DEPRECATED: Use new lrs_load_reporting_server field instead. - // - // Deprecated: Do not use. - LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,2,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` - // LRS server to send load reports to. - // If not present, load reporting will be disabled. - // Supercedes lrs_load_reporting_server_name field. - LrsLoadReportingServer *XdsServer `protobuf:"bytes,7,opt,name=lrs_load_reporting_server,json=lrsLoadReportingServer,proto3" json:"lrs_load_reporting_server,omitempty"` - // Maximum number of outstanding requests can be made to the upstream - // cluster. Default is 1024. - MaxConcurrentRequests *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_concurrent_requests,json=maxConcurrentRequests,proto3" json:"max_concurrent_requests,omitempty"` - Type XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type" json:"type,omitempty"` - // For type EDS only. - // EDS service name, as returned in CDS. - // May be unset if not specified in CDS. - EdsServiceName string `protobuf:"bytes,5,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` - // For type LOGICAL_DNS only. - // DNS name to resolve in "host:port" form. - DnsHostname string `protobuf:"bytes,6,opt,name=dns_hostname,json=dnsHostname,proto3" json:"dns_hostname,omitempty"` - // The configuration for outlier_detection child policies - // Within this message, the child_policy field will be ignored - OutlierDetection *OutlierDetectionLoadBalancingConfig `protobuf:"bytes,8,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"` -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Reset() { - *x = XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoMessage() {} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism.ProtoReflect.Descriptor instead. -func (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10, 0} -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetCluster() string { - if x != nil { - return x.Cluster - } - return "" -} - -// Deprecated: Do not use. -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetLrsLoadReportingServerName() *wrapperspb.StringValue { - if x != nil { - return x.LrsLoadReportingServerName - } - return nil -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetLrsLoadReportingServer() *XdsServer { - if x != nil { - return x.LrsLoadReportingServer - } - return nil -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetMaxConcurrentRequests() *wrapperspb.UInt32Value { - if x != nil { - return x.MaxConcurrentRequests - } - return nil -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetType() XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type { - if x != nil { - return x.Type - } - return XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_UNKNOWN -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetEdsServiceName() string { - if x != nil { - return x.EdsServiceName - } - return "" -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetDnsHostname() string { - if x != nil { - return x.DnsHostname - } - return "" -} - -func (x *XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism) GetOutlierDetection() *OutlierDetectionLoadBalancingConfig { - if x != nil { - return x.OutlierDetection - } - return nil -} - -// Drop configuration. -type XdsClusterImplLoadBalancingPolicyConfig_DropCategory struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Category string `protobuf:"bytes,1,opt,name=category,proto3" json:"category,omitempty"` - RequestsPerMillion uint32 `protobuf:"varint,2,opt,name=requests_per_million,json=requestsPerMillion,proto3" json:"requests_per_million,omitempty"` -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Reset() { - *x = XdsClusterImplLoadBalancingPolicyConfig_DropCategory{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoMessage() {} - -func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsClusterImplLoadBalancingPolicyConfig_DropCategory.ProtoReflect.Descriptor instead. -func (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 0} -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) GetCategory() string { - if x != nil { - return x.Category - } - return "" -} - -func (x *XdsClusterImplLoadBalancingPolicyConfig_DropCategory) GetRequestsPerMillion() uint32 { - if x != nil { - return x.RequestsPerMillion - } - return 0 -} - -// The locality for which this policy will report load. Required. -type LrsLoadBalancingPolicyConfig_Locality struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"` - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - Subzone string `protobuf:"bytes,3,opt,name=subzone,proto3" json:"subzone,omitempty"` -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) Reset() { - *x = LrsLoadBalancingPolicyConfig_Locality{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LrsLoadBalancingPolicyConfig_Locality) ProtoMessage() {} - -func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LrsLoadBalancingPolicyConfig_Locality.ProtoReflect.Descriptor instead. -func (*LrsLoadBalancingPolicyConfig_Locality) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{14, 0} -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) GetRegion() string { - if x != nil { - return x.Region - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) GetSubzone() string { - if x != nil { - return x.Subzone - } - return "" -} - -// If a RetryThrottlingPolicy is provided, gRPC will automatically throttle -// retry attempts and hedged RPCs when the client's ratio of failures to -// successes exceeds a threshold. -// -// For each server name, the gRPC client will maintain a token_count which is -// initially set to max_tokens. Every outgoing RPC (regardless of service or -// method invoked) will change token_count as follows: -// -// - Every failed RPC will decrement the token_count by 1. -// - Every successful RPC will increment the token_count by token_ratio. -// -// If token_count is less than or equal to max_tokens / 2, then RPCs will not -// be retried and hedged RPCs will not be sent. -type ServiceConfig_RetryThrottlingPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The number of tokens starts at max_tokens. The token_count will always be - // between 0 and max_tokens. - // - // This field is required and must be greater than zero. - MaxTokens uint32 `protobuf:"varint,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` - // The amount of tokens to add on each successful RPC. Typically this will - // be some number between 0 and 1, e.g., 0.1. - // - // This field is required and must be greater than zero. Up to 3 decimal - // places are supported. - TokenRatio float32 `protobuf:"fixed32,2,opt,name=token_ratio,json=tokenRatio,proto3" json:"token_ratio,omitempty"` -} - -func (x *ServiceConfig_RetryThrottlingPolicy) Reset() { - *x = ServiceConfig_RetryThrottlingPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConfig_RetryThrottlingPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConfig_RetryThrottlingPolicy) ProtoMessage() {} - -func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConfig_RetryThrottlingPolicy.ProtoReflect.Descriptor instead. -func (*ServiceConfig_RetryThrottlingPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{19, 0} -} - -func (x *ServiceConfig_RetryThrottlingPolicy) GetMaxTokens() uint32 { - if x != nil { - return x.MaxTokens - } - return 0 -} - -func (x *ServiceConfig_RetryThrottlingPolicy) GetTokenRatio() float32 { - if x != nil { - return x.TokenRatio - } - return 0 -} - -type ServiceConfig_HealthCheckConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Service name to use in the health-checking request. - ServiceName *wrapperspb.StringValue `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` -} - -func (x *ServiceConfig_HealthCheckConfig) Reset() { - *x = ServiceConfig_HealthCheckConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConfig_HealthCheckConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConfig_HealthCheckConfig) ProtoMessage() {} - -func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConfig_HealthCheckConfig.ProtoReflect.Descriptor instead. -func (*ServiceConfig_HealthCheckConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{19, 1} -} - -func (x *ServiceConfig_HealthCheckConfig) GetServiceName() *wrapperspb.StringValue { - if x != nil { - return x.ServiceName - } - return nil -} - -var File_grpc_service_config_service_config_proto protoreflect.FileDescriptor - -var file_grpc_service_config_service_config_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, - 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xde, 0x08, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x72, 0x65, - 0x61, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x52, 0x65, - 0x61, 0x64, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x59, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0c, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x58, 0x0a, 0x0e, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x64, 0x67, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x68, 0x65, 0x64, - 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x38, 0x0a, 0x04, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, - 0x6d, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x41, - 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x3a, 0x0a, 0x0b, 0x6d, - 0x61, 0x78, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6d, 0x61, 0x78, - 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x2d, 0x0a, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x6f, - 0x66, 0x66, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x02, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x1a, 0xb9, - 0x01, 0x0a, 0x0d, 0x48, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, - 0x70, 0x74, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x64, - 0x65, 0x6c, 0x61, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x6c, 0x61, 0x79, 0x12, 0x45, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x74, 0x61, 0x6c, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, - 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x6e, 0x6f, 0x6e, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x72, 0x5f, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, - 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x12, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x6e, - 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8e, 0x0a, 0x0a, - 0x23, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x47, 0x0a, 0x12, 0x62, - 0x61, 0x73, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x10, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x45, - 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x14, 0x6d, - 0x61, 0x78, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x45, 0x6a, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x80, 0x01, 0x0a, 0x15, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6a, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, - 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x92, - 0x01, 0x0a, 0x1b, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x52, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, - 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, - 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x1a, 0xb3, 0x02, 0x0a, 0x13, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, - 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x74, 0x64, 0x65, - 0x76, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, 0x74, - 0x64, 0x65, 0x76, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x53, 0x0a, 0x16, 0x65, 0x6e, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x41, - 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x6f, 0x73, 0x74, - 0x73, 0x12, 0x43, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x1a, 0xb4, 0x02, 0x0a, 0x19, 0x46, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x45, 0x6a, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, - 0x12, 0x53, 0x0a, 0x16, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, - 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, - 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, - 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x69, - 0x6d, 0x75, 0x6d, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x43, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x22, 0xd3, 0x01, - 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, - 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, - 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x53, - 0x0a, 0x18, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x22, 0xae, 0x03, 0x0a, 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x08, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, - 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x05, - 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, 0x40, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, - 0x65, 0x5f, 0x72, 0x65, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, - 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, - 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x02, 0x0a, 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, - 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x63, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0x6d, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x7f, 0x0a, 0x0c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x59, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf2, 0x02, 0x0a, 0x2a, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, - 0x54, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, - 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x82, 0x01, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, - 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x5b, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, - 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x09, 0x43, 0x64, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x22, 0xa1, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, - 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x12, - 0x57, 0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x59, 0x0a, 0x12, 0x43, 0x68, - 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9d, 0x07, 0x0a, 0x2b, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x86, 0x01, 0x0a, 0x14, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, - 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x53, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, - 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x13, 0x64, 0x69, 0x73, 0x63, 0x6f, - 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x4c, - 0x0a, 0x0d, 0x78, 0x64, 0x73, 0x5f, 0x6c, 0x62, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0b, 0x78, 0x64, 0x73, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x96, 0x05, 0x0a, - 0x12, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, - 0x69, 0x73, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x64, 0x0a, - 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, - 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, 0x19, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x16, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x54, - 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x6d, - 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x73, 0x12, 0x6c, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x58, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x64, 0x6e, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6e, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x65, 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2d, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x45, - 0x44, 0x53, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, - 0x44, 0x4e, 0x53, 0x10, 0x02, 0x22, 0xa3, 0x05, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, - 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, - 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x59, 0x0a, 0x19, 0x6c, - 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, - 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x16, - 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x72, 0x0a, 0x0f, - 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, - 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, - 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x5c, 0x0a, - 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, - 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x73, 0x50, 0x65, 0x72, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x03, 0x0a, 0x1c, - 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, - 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x15, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x65, 0x0a, 0x1b, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, - 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x69, 0x6e, - 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x69, - 0x6e, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x61, 0x78, - 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa6, 0x03, - 0x0a, 0x1c, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x1e, 0x6c, - 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, - 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x56, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x50, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x22, 0x76, 0x0a, 0x27, 0x58, 0x64, 0x73, 0x57, 0x72, 0x72, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x52, - 0x0a, 0x2d, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x6f, - 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, - 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x94, 0x0e, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, - 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, - 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, - 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, - 0x12, 0x68, 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, - 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, - 0x70, 0x63, 0x6c, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x12, 0x6e, 0x0a, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, - 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, - 0x52, 0x15, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, - 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x82, 0x01, 0x0a, 0x1c, 0x77, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, - 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1c, - 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x8d, 0x01, 0x0a, - 0x20, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, - 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, - 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x20, 0x78, 0x64, 0x73, 0x5f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x4c, 0x0a, 0x10, - 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x10, 0x63, 0x64, 0x73, 0x5f, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x90, 0x01, 0x0a, 0x21, 0x78, - 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, - 0x76, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x21, 0x78, 0x64, 0x73, 0x5f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, - 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, - 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6d, - 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6d, 0x70, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x6a, 0x0a, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, - 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, - 0x61, 0x73, 0x68, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x16, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, - 0x61, 0x73, 0x68, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x12, 0x63, 0x0a, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, - 0x01, 0x48, 0x00, 0x52, 0x10, 0x6c, 0x72, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, - 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x63, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, - 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, - 0x64, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, - 0x48, 0x00, 0x52, 0x10, 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x5f, 0x77, 0x72, 0x72, - 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x57, 0x72, 0x72, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1d, 0x78, 0x64, - 0x73, 0x5f, 0x77, 0x72, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x65, - 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x84, 0x01, 0x0a, 0x1a, - 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x1a, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x42, 0x08, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, - 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, - 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, - 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, - 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, - 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x57, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, - 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, - 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, - 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, - 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x42, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_service_config_service_config_proto_rawDescOnce sync.Once - file_grpc_service_config_service_config_proto_rawDescData = file_grpc_service_config_service_config_proto_rawDesc -) - -func file_grpc_service_config_service_config_proto_rawDescGZIP() []byte { - file_grpc_service_config_service_config_proto_rawDescOnce.Do(func() { - file_grpc_service_config_service_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_service_config_service_config_proto_rawDescData) - }) - return file_grpc_service_config_service_config_proto_rawDescData -} - -var file_grpc_service_config_service_config_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 37) -var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ - (XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism_Type)(0), // 0: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type - (ServiceConfig_LoadBalancingPolicy)(0), // 1: grpc.service_config.ServiceConfig.LoadBalancingPolicy - (*MethodConfig)(nil), // 2: grpc.service_config.MethodConfig - (*PickFirstConfig)(nil), // 3: grpc.service_config.PickFirstConfig - (*RoundRobinConfig)(nil), // 4: grpc.service_config.RoundRobinConfig - (*OutlierDetectionLoadBalancingConfig)(nil), // 5: grpc.service_config.OutlierDetectionLoadBalancingConfig - (*GrpcLbConfig)(nil), // 6: grpc.service_config.GrpcLbConfig - (*PriorityLoadBalancingPolicyConfig)(nil), // 7: grpc.service_config.PriorityLoadBalancingPolicyConfig - (*WeightedTargetLoadBalancingPolicyConfig)(nil), // 8: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - (*XdsClusterManagerLoadBalancingPolicyConfig)(nil), // 9: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig - (*CdsConfig)(nil), // 10: grpc.service_config.CdsConfig - (*XdsServer)(nil), // 11: grpc.service_config.XdsServer - (*XdsClusterResolverLoadBalancingPolicyConfig)(nil), // 12: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig - (*XdsClusterImplLoadBalancingPolicyConfig)(nil), // 13: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig - (*EdsLoadBalancingPolicyConfig)(nil), // 14: grpc.service_config.EdsLoadBalancingPolicyConfig - (*RingHashLoadBalancingConfig)(nil), // 15: grpc.service_config.RingHashLoadBalancingConfig - (*LrsLoadBalancingPolicyConfig)(nil), // 16: grpc.service_config.LrsLoadBalancingPolicyConfig - (*XdsWrrLocalityLoadBalancingPolicyConfig)(nil), // 17: grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig - (*LeastRequestLocalityLoadBalancingPolicyConfig)(nil), // 18: grpc.service_config.LeastRequestLocalityLoadBalancingPolicyConfig - (*XdsConfig)(nil), // 19: grpc.service_config.XdsConfig - (*LoadBalancingConfig)(nil), // 20: grpc.service_config.LoadBalancingConfig - (*ServiceConfig)(nil), // 21: grpc.service_config.ServiceConfig - (*MethodConfig_Name)(nil), // 22: grpc.service_config.MethodConfig.Name - (*MethodConfig_RetryPolicy)(nil), // 23: grpc.service_config.MethodConfig.RetryPolicy - (*MethodConfig_HedgingPolicy)(nil), // 24: grpc.service_config.MethodConfig.HedgingPolicy - (*OutlierDetectionLoadBalancingConfig_SuccessRateEjection)(nil), // 25: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection - (*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection)(nil), // 26: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection - (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 27: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - nil, // 28: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 29: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - nil, // 30: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - (*XdsClusterManagerLoadBalancingPolicyConfig_Child)(nil), // 31: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child - nil, // 32: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry - (*XdsServer_ChannelCredentials)(nil), // 33: grpc.service_config.XdsServer.ChannelCredentials - (*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism)(nil), // 34: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - (*XdsClusterImplLoadBalancingPolicyConfig_DropCategory)(nil), // 35: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 36: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - (*ServiceConfig_RetryThrottlingPolicy)(nil), // 37: grpc.service_config.ServiceConfig.RetryThrottlingPolicy - (*ServiceConfig_HealthCheckConfig)(nil), // 38: grpc.service_config.ServiceConfig.HealthCheckConfig - (*wrapperspb.BoolValue)(nil), // 39: google.protobuf.BoolValue - (*durationpb.Duration)(nil), // 40: google.protobuf.Duration - (*wrapperspb.UInt32Value)(nil), // 41: google.protobuf.UInt32Value - (*structpb.Value)(nil), // 42: google.protobuf.Value - (*wrapperspb.StringValue)(nil), // 43: google.protobuf.StringValue - (code.Code)(0), // 44: google.rpc.Code - (*structpb.Struct)(nil), // 45: google.protobuf.Struct -} -var file_grpc_service_config_service_config_proto_depIdxs = []int32{ - 22, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name - 39, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue - 40, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration - 41, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value - 41, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value - 23, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy - 24, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy - 40, // 7: grpc.service_config.OutlierDetectionLoadBalancingConfig.interval:type_name -> google.protobuf.Duration - 40, // 8: grpc.service_config.OutlierDetectionLoadBalancingConfig.base_ejection_time:type_name -> google.protobuf.Duration - 40, // 9: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_time:type_name -> google.protobuf.Duration - 41, // 10: grpc.service_config.OutlierDetectionLoadBalancingConfig.max_ejection_percent:type_name -> google.protobuf.UInt32Value - 25, // 11: grpc.service_config.OutlierDetectionLoadBalancingConfig.success_rate_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection - 26, // 12: grpc.service_config.OutlierDetectionLoadBalancingConfig.failure_percentage_ejection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection - 20, // 13: grpc.service_config.OutlierDetectionLoadBalancingConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 20, // 14: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 40, // 15: grpc.service_config.GrpcLbConfig.initial_fallback_timeout:type_name -> google.protobuf.Duration - 28, // 16: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - 30, // 17: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - 32, // 18: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry - 33, // 19: grpc.service_config.XdsServer.channel_creds:type_name -> grpc.service_config.XdsServer.ChannelCredentials - 42, // 20: grpc.service_config.XdsServer.server_features:type_name -> google.protobuf.Value - 34, // 21: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.discovery_mechanisms:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism - 20, // 22: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.xds_lb_policy:type_name -> grpc.service_config.LoadBalancingConfig - 43, // 23: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 11, // 24: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 41, // 25: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 35, // 26: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.drop_categories:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.DropCategory - 20, // 27: grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 43, // 28: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 20, // 29: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 20, // 30: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 36, // 31: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - 20, // 32: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 20, // 33: grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 20, // 34: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 20, // 35: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig - 43, // 36: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 3, // 37: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig - 4, // 38: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig - 5, // 39: grpc.service_config.LoadBalancingConfig.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig - 6, // 40: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig - 7, // 41: grpc.service_config.LoadBalancingConfig.priority_experimental:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig - 8, // 42: grpc.service_config.LoadBalancingConfig.weighted_target_experimental:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - 9, // 43: grpc.service_config.LoadBalancingConfig.xds_cluster_manager_experimental:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig - 10, // 44: grpc.service_config.LoadBalancingConfig.cds_experimental:type_name -> grpc.service_config.CdsConfig - 12, // 45: grpc.service_config.LoadBalancingConfig.xds_cluster_resolver_experimental:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig - 13, // 46: grpc.service_config.LoadBalancingConfig.xds_cluster_impl_experimental:type_name -> grpc.service_config.XdsClusterImplLoadBalancingPolicyConfig - 15, // 47: grpc.service_config.LoadBalancingConfig.ring_hash_experimental:type_name -> grpc.service_config.RingHashLoadBalancingConfig - 16, // 48: grpc.service_config.LoadBalancingConfig.lrs_experimental:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig - 14, // 49: grpc.service_config.LoadBalancingConfig.eds_experimental:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig - 19, // 50: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig - 19, // 51: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig - 17, // 52: grpc.service_config.LoadBalancingConfig.xds_wrr_locality_experimental:type_name -> grpc.service_config.XdsWrrLocalityLoadBalancingPolicyConfig - 18, // 53: grpc.service_config.LoadBalancingConfig.least_request_experimental:type_name -> grpc.service_config.LeastRequestLocalityLoadBalancingPolicyConfig - 1, // 54: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy - 20, // 55: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig - 2, // 56: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig - 37, // 57: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy - 38, // 58: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig - 40, // 59: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration - 40, // 60: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration - 44, // 61: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code - 40, // 62: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration - 44, // 63: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code - 41, // 64: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.stdev_factor:type_name -> google.protobuf.UInt32Value - 41, // 65: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value - 41, // 66: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value - 41, // 67: grpc.service_config.OutlierDetectionLoadBalancingConfig.SuccessRateEjection.request_volume:type_name -> google.protobuf.UInt32Value - 41, // 68: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold:type_name -> google.protobuf.UInt32Value - 41, // 69: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage:type_name -> google.protobuf.UInt32Value - 41, // 70: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.minimum_hosts:type_name -> google.protobuf.UInt32Value - 41, // 71: grpc.service_config.OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.request_volume:type_name -> google.protobuf.UInt32Value - 20, // 72: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig - 27, // 73: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - 20, // 74: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 29, // 75: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - 20, // 76: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 31, // 77: grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.XdsClusterManagerLoadBalancingPolicyConfig.Child - 45, // 78: grpc.service_config.XdsServer.ChannelCredentials.config:type_name -> google.protobuf.Struct - 43, // 79: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 11, // 80: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.lrs_load_reporting_server:type_name -> grpc.service_config.XdsServer - 41, // 81: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.max_concurrent_requests:type_name -> google.protobuf.UInt32Value - 0, // 82: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.type:type_name -> grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.Type - 5, // 83: grpc.service_config.XdsClusterResolverLoadBalancingPolicyConfig.DiscoveryMechanism.outlier_detection:type_name -> grpc.service_config.OutlierDetectionLoadBalancingConfig - 43, // 84: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue - 85, // [85:85] is the sub-list for method output_type - 85, // [85:85] is the sub-list for method input_type - 85, // [85:85] is the sub-list for extension type_name - 85, // [85:85] is the sub-list for extension extendee - 0, // [0:85] is the sub-list for field type_name -} - -func init() { file_grpc_service_config_service_config_proto_init() } -func file_grpc_service_config_service_config_proto_init() { - if File_grpc_service_config_service_config_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_service_config_service_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PickFirstConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RoundRobinConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutlierDetectionLoadBalancingConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLbConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PriorityLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WeightedTargetLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterManagerLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CdsConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsServer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EdsLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RingHashLoadBalancingConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LrsLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsWrrLocalityLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LeastRequestLocalityLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancingConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_Name); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_RetryPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_HedgingPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutlierDetectionLoadBalancingConfig_SuccessRateEjection); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutlierDetectionLoadBalancingConfig_FailurePercentageEjection); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WeightedTargetLoadBalancingPolicyConfig_Target); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterManagerLoadBalancingPolicyConfig_Child); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsServer_ChannelCredentials); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterResolverLoadBalancingPolicyConfig_DiscoveryMechanism); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsClusterImplLoadBalancingPolicyConfig_DropCategory); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LrsLoadBalancingPolicyConfig_Locality); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig_RetryThrottlingPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig_HealthCheckConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_service_config_service_config_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*MethodConfig_RetryPolicy_)(nil), - (*MethodConfig_HedgingPolicy_)(nil), - } - file_grpc_service_config_service_config_proto_msgTypes[18].OneofWrappers = []interface{}{ - (*LoadBalancingConfig_PickFirst)(nil), - (*LoadBalancingConfig_RoundRobin)(nil), - (*LoadBalancingConfig_OutlierDetection)(nil), - (*LoadBalancingConfig_Grpclb)(nil), - (*LoadBalancingConfig_PriorityExperimental)(nil), - (*LoadBalancingConfig_WeightedTargetExperimental)(nil), - (*LoadBalancingConfig_XdsClusterManagerExperimental)(nil), - (*LoadBalancingConfig_CdsExperimental)(nil), - (*LoadBalancingConfig_XdsClusterResolverExperimental)(nil), - (*LoadBalancingConfig_XdsClusterImplExperimental)(nil), - (*LoadBalancingConfig_RingHashExperimental)(nil), - (*LoadBalancingConfig_LrsExperimental)(nil), - (*LoadBalancingConfig_EdsExperimental)(nil), - (*LoadBalancingConfig_Xds)(nil), - (*LoadBalancingConfig_XdsExperimental)(nil), - (*LoadBalancingConfig_XdsWrrLocalityExperimental)(nil), - (*LoadBalancingConfig_LeastRequestExperimental)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_service_config_service_config_proto_rawDesc, - NumEnums: 2, - NumMessages: 37, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_grpc_service_config_service_config_proto_goTypes, - DependencyIndexes: file_grpc_service_config_service_config_proto_depIdxs, - EnumInfos: file_grpc_service_config_service_config_proto_enumTypes, - MessageInfos: file_grpc_service_config_service_config_proto_msgTypes, - }.Build() - File_grpc_service_config_service_config_proto = out.File - file_grpc_service_config_service_config_proto_rawDesc = nil - file_grpc_service_config_service_config_proto_goTypes = nil - file_grpc_service_config_service_config_proto_depIdxs = nil -} diff --git a/regenerate.sh b/regenerate.sh index 978b89f37a4a..99db79fafcfb 100755 --- a/regenerate.sh +++ b/regenerate.sh @@ -68,7 +68,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto ) @@ -80,8 +79,7 @@ SOURCES=( # Note that the protos listed here are all for testing purposes. All protos to # be used externally should have a go_package option (and they don't need to be # listed here). -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ -Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ @@ -121,9 +119,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config - # grpc/testing does not have a go_package option. mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ From c6ee1c71447e2e5a1bbb017f6162a1840fb2e2c6 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 23 Jun 2022 16:53:43 -0700 Subject: [PATCH 534/998] xdsclient: only include nodeID in error strings, not the whole nodeProto (#5461) --- xds/internal/xdsclient/authority.go | 10 +++++++++- xds/internal/xdsclient/pubsub/pubsub.go | 8 +++----- xds/internal/xdsclient/pubsub/watch.go | 4 ++-- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 9bc4588c14e0..26db726dd943 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -21,6 +21,8 @@ import ( "errors" "fmt" + v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/pubsub" @@ -102,7 +104,13 @@ func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *auth } // Make a new authority since there's no existing authority for this config. - ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, c.config.XDSServer.NodeProto, c.logger)} + nodeID := "" + if v3, ok := c.config.XDSServer.NodeProto.(*v3corepb.Node); ok { + nodeID = v3.GetId() + } else if v2, ok := c.config.XDSServer.NodeProto.(*v2corepb.Node); ok { + nodeID = v2.GetId() + } + ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, nodeID, c.logger)} defer func() { if retErr != nil { ret.close() diff --git a/xds/internal/xdsclient/pubsub/pubsub.go b/xds/internal/xdsclient/pubsub/pubsub.go index 48b5ce48910e..95e8ac77300e 100644 --- a/xds/internal/xdsclient/pubsub/pubsub.go +++ b/xds/internal/xdsclient/pubsub/pubsub.go @@ -23,11 +23,9 @@ package pubsub import ( - "fmt" "sync" "time" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" @@ -43,7 +41,7 @@ type Pubsub struct { done *grpcsync.Event logger *grpclog.PrefixLogger watchExpiryTimeout time.Duration - nodeIDJSON string + nodeID string updateCh *buffer.Unbounded // chan *watcherInfoWithUpdate // All the following maps are to keep the updates/metadata in a cache. @@ -65,12 +63,12 @@ type Pubsub struct { // New creates a new Pubsub. // // The passed in nodeID will be attached to all errors sent to the watchers. -func New(watchExpiryTimeout time.Duration, nodeID proto.Message, logger *grpclog.PrefixLogger) *Pubsub { +func New(watchExpiryTimeout time.Duration, nodeID string, logger *grpclog.PrefixLogger) *Pubsub { pb := &Pubsub{ done: grpcsync.NewEvent(), logger: logger, watchExpiryTimeout: watchExpiryTimeout, - nodeIDJSON: fmt.Sprint(nodeID), + nodeID: nodeID, updateCh: buffer.NewUnbounded(), ldsWatchers: make(map[string]map[*watchInfo]bool), diff --git a/xds/internal/xdsclient/pubsub/watch.go b/xds/internal/xdsclient/pubsub/watch.go index 2fc6bb2d6030..bef179936a89 100644 --- a/xds/internal/xdsclient/pubsub/watch.go +++ b/xds/internal/xdsclient/pubsub/watch.go @@ -115,9 +115,9 @@ func (wi *watchInfo) sendErrorLocked(err error) { errMsg := err.Error() errTyp := xdsresource.ErrType(err) if errTyp == xdsresource.ErrorTypeUnknown { - err = fmt.Errorf("%v, xDS client nodeID: %s", errMsg, wi.c.nodeIDJSON) + err = fmt.Errorf("%v, xDS client nodeID: %s", errMsg, wi.c.nodeID) } else { - err = xdsresource.NewErrorf(errTyp, "%v, xDS client nodeID: %s", errMsg, wi.c.nodeIDJSON) + err = xdsresource.NewErrorf(errTyp, "%v, xDS client nodeID: %s", errMsg, wi.c.nodeID) } wi.c.scheduleCallback(wi, u, err) From d883f3d5faf91e030539ce310875850357c6c4a8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 24 Jun 2022 10:23:13 -0700 Subject: [PATCH 535/998] test/xds: fail only when state changes to something other than READY and IDLE (#5463) --- test/xds/xds_server_serving_mode_test.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/test/xds/xds_server_serving_mode_test.go b/test/xds/xds_server_serving_mode_test.go index 03a62ed6c784..118a63394f8b 100644 --- a/test/xds/xds_server_serving_mode_test.go +++ b/test/xds/xds_server_serving_mode_test.go @@ -112,9 +112,18 @@ func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { // suppressed, server will recycle client connections. errCh := make(chan error, 1) go func() { - if cc.WaitForStateChange(ctx, connectivity.Ready) { - errCh <- fmt.Errorf("unexpected connectivity state change {%s --> %s} on the client connection", connectivity.Ready, cc.GetState()) - return + prev := connectivity.Ready // We know we are READY since we just did an RPC. + for { + curr := cc.GetState() + if !(curr == connectivity.Ready || curr == connectivity.Idle) { + errCh <- fmt.Errorf("unexpected connectivity state change {%s --> %s} on the client connection", prev, curr) + return + } + if !cc.WaitForStateChange(ctx, curr) { + // Break out of the for loop when the context has been cancelled. + break + } + prev = curr } errCh <- nil }() From 4b750055a530f53ee3715fe6763bf8855677847b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 24 Jun 2022 10:48:40 -0700 Subject: [PATCH 536/998] clusterresolver: merge P(p)arseConfig functions (#5462) --- .../clusterresolver/clusterresolver.go | 6 +++++ .../balancer/clusterresolver/config.go | 19 -------------- .../balancer/clusterresolver/config_test.go | 25 +++++++++++-------- .../balancer/clusterresolver/configbuilder.go | 10 ++++---- .../clusterresolver/configbuilder_test.go | 4 +-- 5 files changed, 27 insertions(+), 37 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index d49014cfa433..9b373fb36970 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -23,10 +23,12 @@ import ( "encoding/json" "errors" "fmt" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" @@ -35,6 +37,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -99,6 +102,9 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err if err := json.Unmarshal(c, &cfg); err != nil { return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(c), err) } + if lbp := cfg.XDSLBPolicy; lbp != nil && !strings.EqualFold(lbp.Name, roundrobin.Name) && !strings.EqualFold(lbp.Name, ringhash.Name) { + return nil, fmt.Errorf("unsupported child policy with name %q, not one of {%q,%q}", lbp.Name, roundrobin.Name, ringhash.Name) + } return &cfg, nil } diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index cb870027a4e4..2458b106772f 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -21,13 +21,10 @@ import ( "bytes" "encoding/json" "fmt" - "strings" - "google.golang.org/grpc/balancer/roundrobin" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/outlierdetection" - "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) @@ -167,19 +164,3 @@ type LBConfig struct { // is responsible for both locality picking and endpoint picking. XDSLBPolicy *internalserviceconfig.BalancerConfig `json:"xdsLbPolicy,omitempty"` } - -const ( - rrName = roundrobin.Name - rhName = ringhash.Name -) - -func parseConfig(c json.RawMessage) (*LBConfig, error) { - var cfg LBConfig - if err := json.Unmarshal(c, &cfg); err != nil { - return nil, err - } - if lbp := cfg.XDSLBPolicy; lbp != nil && !strings.EqualFold(lbp.Name, rrName) && !strings.EqualFold(lbp.Name, rhName) { - return nil, fmt.Errorf("unsupported child policy with name %q, not one of {%q,%q}", lbp.Name, rrName, rhName) - } - return &cfg, nil -} diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index fb859e75ba4b..6e2d8624050b 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -23,7 +23,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/balancer" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -91,14 +91,6 @@ func TestDiscoveryMechanismTypeUnmarshalJSON(t *testing.T) { } } -func init() { - // This is needed now for the config parsing tests to pass. Otherwise they - // will fail with "RING_HASH unsupported". - // - // TODO: delete this once ring-hash policy is implemented and imported. - stub.Register(rhName, stub.BalancerFuncs{}) -} - const ( testJSONConfig1 = `{ "discoveryMechanisms": [{ @@ -257,7 +249,7 @@ func TestParseConfig(t *testing.T) { }, XDSLBPolicy: &internalserviceconfig.BalancerConfig{ Name: ringhash.Name, - Config: nil, + Config: &ringhash.LBConfig{MinRingSize: 1024, MaxRingSize: 8388608}, // Ringhash LB config with default min and max. }, }, wantErr: false, @@ -269,11 +261,22 @@ func TestParseConfig(t *testing.T) { }, } for _, tt := range tests { + b := balancer.Get(Name) + if b == nil { + t.Fatalf("LB policy %q not registered", Name) + } + cfgParser, ok := b.(balancer.ConfigParser) + if !ok { + t.Fatalf("LB policy %q does not support config parsing", Name) + } t.Run(tt.name, func(t *testing.T) { - got, err := parseConfig([]byte(tt.js)) + got, err := cfgParser.ParseConfig([]byte(tt.js)) if (err != nil) != tt.wantErr { t.Fatalf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) } + if tt.wantErr { + return + } if diff := cmp.Diff(got, tt.want); diff != "" { t.Errorf("parseConfig() got unexpected output, diff (-got +want): %v", diff) } diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index dc91d7fbd139..186409bf9bc7 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -304,22 +304,22 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority // ChildPolicy is not set. Will be set based on xdsLBPolicy } - if xdsLBPolicy == nil || xdsLBPolicy.Name == rrName { + if xdsLBPolicy == nil || xdsLBPolicy.Name == roundrobin.Name { // If lb policy is ROUND_ROBIN: // - locality-picking policy is weighted_target // - endpoint-picking policy is round_robin - logger.Infof("xds lb policy is %q, building config with weighted_target + round_robin", rrName) + logger.Infof("xds lb policy is %q, building config with weighted_target + round_robin", roundrobin.Name) // Child of weighted_target is hardcoded to round_robin. wtConfig, addrs := localitiesToWeightedTarget(localities, priorityName, rrBalancerConfig) clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: weightedtarget.Name, Config: wtConfig} return clusterImplCfg, addrs, nil } - if xdsLBPolicy.Name == rhName { + if xdsLBPolicy.Name == ringhash.Name { // If lb policy is RIHG_HASH, will build one ring_hash policy as child. // The endpoints from all localities will be flattened to one addresses // list, and the ring_hash policy will pick endpoints from it. - logger.Infof("xds lb policy is %q, building config with ring_hash", rhName) + logger.Infof("xds lb policy is %q, building config with ring_hash", ringhash.Name) addrs := localitiesToRingHash(localities, priorityName) // Set child to ring_hash, note that the ring_hash config is from // xdsLBPolicy. @@ -327,7 +327,7 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority return clusterImplCfg, addrs, nil } - return nil, nil, fmt.Errorf("unsupported xds LB policy %q, not one of {%q,%q}", xdsLBPolicy.Name, rrName, rhName) + return nil, nil, fmt.Errorf("unsupported xds LB policy %q, not one of {%q,%q}", xdsLBPolicy.Name, roundrobin.Name, ringhash.Name) } // localitiesToRingHash takes a list of localities (with the same priority), and diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index cfe7de65d1a0..d050df11b02a 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -731,7 +731,7 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { }, }, priorityName: "test-priority", - childPolicy: &internalserviceconfig.BalancerConfig{Name: rrName}, + childPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, mechanism: DiscoveryMechanism{ Cluster: testClusterName, Type: DiscoveryMechanismTypeEDS, @@ -789,7 +789,7 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { }, }, priorityName: "test-priority", - childPolicy: &internalserviceconfig.BalancerConfig{Name: rhName, Config: &ringhash.LBConfig{MinRingSize: 1, MaxRingSize: 2}}, + childPolicy: &internalserviceconfig.BalancerConfig{Name: ringhash.Name, Config: &ringhash.LBConfig{MinRingSize: 1, MaxRingSize: 2}}, // lrsServer is nil, so LRS policy will not be used. wantConfig: &clusterimpl.LBConfig{ ChildPolicy: &internalserviceconfig.BalancerConfig{ From c075d2011c8b55585404b52c1d358e0cbb39c355 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Fri, 24 Jun 2022 11:34:16 -0700 Subject: [PATCH 537/998] interop client: provide new flag, --soak_min_time_ms_between_rpcs (#5421) --- interop/client/client.go | 5 +++-- interop/test_utils.go | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/interop/client/client.go b/interop/client/client.go index 3cfedfcb6542..1e3a46a75745 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -68,6 +68,7 @@ var ( soakMaxFailures = flag.Int("soak_max_failures", 0, "The number of iterations in soak tests that are allowed to fail (either due to non-OK status code or exceeding the per-iteration max acceptable latency).") soakPerIterationMaxAcceptableLatencyMs = flag.Int("soak_per_iteration_max_acceptable_latency_ms", 1000, "The number of milliseconds a single iteration in the two soak tests (rpc_soak and channel_soak) should take.") soakOverallTimeoutSeconds = flag.Int("soak_overall_timeout_seconds", 10, "The overall number of seconds after which a soak test should stop and fail, if the desired number of iterations have not yet completed.") + soakMinTimeMsBetweenRPCs = flag.Int("soak_min_time_ms_between_rpcs", 0, "The minimum time in milliseconds between consecutive RPCs in a soak test (rpc_soak or channel_soak), useful for limiting QPS") tlsServerName = flag.String("server_host_override", "", "The server name used to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") testCase = flag.String("test_case", "large_unary", `Configure different test cases. Valid options are: @@ -301,10 +302,10 @@ func main() { interop.DoPickFirstUnary(tc) logger.Infoln("PickFirstUnary done") case "rpc_soak": - interop.DoSoakTest(tc, serverAddr, opts, false /* resetChannel */, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) + interop.DoSoakTest(tc, serverAddr, opts, false /* resetChannel */, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Duration(*soakMinTimeMsBetweenRPCs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) logger.Infoln("RpcSoak done") case "channel_soak": - interop.DoSoakTest(tc, serverAddr, opts, true /* resetChannel */, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) + interop.DoSoakTest(tc, serverAddr, opts, true /* resetChannel */, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Duration(*soakMinTimeMsBetweenRPCs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) logger.Infoln("ChannelSoak done") default: logger.Fatal("Unsupported test case: ", *testCase) diff --git a/interop/test_utils.go b/interop/test_utils.go index 975a290bc6e6..dc841a110604 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -716,7 +716,7 @@ func doOneSoakIteration(ctx context.Context, tc testgrpc.TestServiceClient, rese // DoSoakTest runs large unary RPCs in a loop for a configurable number of times, with configurable failure thresholds. // If resetChannel is false, then each RPC will be performed on tc. Otherwise, each RPC will be performed on a new // stub that is created with the provided server address and dial options. -func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.DialOption, resetChannel bool, soakIterations int, maxFailures int, perIterationMaxAcceptableLatency time.Duration, overallDeadline time.Time) { +func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.DialOption, resetChannel bool, soakIterations int, maxFailures int, perIterationMaxAcceptableLatency time.Duration, minTimeBetweenRPCs time.Duration, overallDeadline time.Time) { start := time.Now() ctx, cancel := context.WithDeadline(context.Background(), overallDeadline) defer cancel() @@ -733,6 +733,7 @@ func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.D if time.Now().After(overallDeadline) { break } + earliestNextStart := time.After(minTimeBetweenRPCs) iterationsDone++ var p peer.Peer latency, err := doOneSoakIteration(ctx, tc, resetChannel, serverAddr, dopts, []grpc.CallOption{grpc.Peer(&p)}) @@ -749,6 +750,7 @@ func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.D continue } fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s succeeded\n", i, latencyMs, p.Addr.String()) + <-earliestNextStart } var b bytes.Buffer h.Print(&b) From 15739b5c88737e866194c45328750deb3521ab83 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 24 Jun 2022 13:31:35 -0700 Subject: [PATCH 538/998] health: split imports into healthpb and healthgrpc (#5466) --- examples/features/health/server/main.go | 3 ++- examples/features/xds/server/main.go | 3 ++- interop/xds/server/server.go | 5 +++-- test/end2end_test.go | 6 +++--- test/healthcheck_test.go | 2 +- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/examples/features/health/server/main.go b/examples/features/health/server/main.go index 3f79c8ba3470..65039b38d5be 100644 --- a/examples/features/health/server/main.go +++ b/examples/features/health/server/main.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/health" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" ) @@ -62,7 +63,7 @@ func main() { s := grpc.NewServer() healthcheck := health.NewServer() - healthpb.RegisterHealthServer(s, healthcheck) + healthgrpc.RegisterHealthServer(s, healthcheck) pb.RegisterEchoServer(s, &echoServer{}) go func() { diff --git a/examples/features/xds/server/main.go b/examples/features/xds/server/main.go index 0367060f4b5d..a7889dbdf3c3 100644 --- a/examples/features/xds/server/main.go +++ b/examples/features/xds/server/main.go @@ -35,6 +35,7 @@ import ( xdscreds "google.golang.org/grpc/credentials/xds" pb "google.golang.org/grpc/examples/helloworld/helloworld" "google.golang.org/grpc/health" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/xds" ) @@ -95,7 +96,7 @@ func main() { grpcServer := grpc.NewServer() healthServer := health.NewServer() healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) - healthpb.RegisterHealthServer(grpcServer, healthServer) + healthgrpc.RegisterHealthServer(grpcServer, healthServer) log.Printf("Serving GreeterService on %s and HealthService on %s", greeterLis.Addr().String(), healthLis.Addr().String()) go func() { diff --git a/interop/xds/server/server.go b/interop/xds/server/server.go index 5932199de6c6..4bc69ae2fe6d 100644 --- a/interop/xds/server/server.go +++ b/interop/xds/server/server.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/xds" xdscreds "google.golang.org/grpc/credentials/xds" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" @@ -125,7 +126,7 @@ func main() { server := grpc.NewServer() testgrpc.RegisterTestServiceServer(server, testService) healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) - healthpb.RegisterHealthServer(server, healthServer) + healthgrpc.RegisterHealthServer(server, healthServer) testgrpc.RegisterXdsUpdateHealthServiceServer(server, updateHealthService) reflection.Register(server) cleanup, err := admin.Register(server) @@ -172,7 +173,7 @@ func main() { // it and start serving. maintenanceServer := grpc.NewServer() healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) - healthpb.RegisterHealthServer(maintenanceServer, healthServer) + healthgrpc.RegisterHealthServer(maintenanceServer, healthServer) testgrpc.RegisterXdsUpdateHealthServiceServer(maintenanceServer, updateHealthService) reflection.Register(maintenanceServer) cleanup, err := admin.Register(maintenanceServer) diff --git a/test/end2end_test.go b/test/end2end_test.go index 3c38e00006af..da0acbf3d75d 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -470,7 +470,7 @@ type test struct { // expose the server's health using the default health service // implementation. This should only be used when a non-default health service // implementation is required. - healthServer healthpb.HealthServer + healthServer healthgrpc.HealthServer maxStream uint32 tapHandle tap.ServerInHandle maxServerMsgSize *int @@ -512,12 +512,12 @@ type test struct { // These are are set once startServer is called. The common case is to have // only one testServer. srv stopper - hSrv healthpb.HealthServer + hSrv healthgrpc.HealthServer srvAddr string // These are are set once startServers is called. srvs []stopper - hSrvs []healthpb.HealthServer + hSrvs []healthgrpc.HealthServer srvAddrs []string cc *grpc.ClientConn // nil until requested via clientConn diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index 0dba4d7f495f..0c4f1a324db4 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -86,7 +86,7 @@ func defaultWatchFunc(s *testHealthServer, in *healthpb.HealthCheckRequest, stre type healthWatchFunc func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error type testHealthServer struct { - healthpb.UnimplementedHealthServer + healthgrpc.UnimplementedHealthServer watchFunc healthWatchFunc mu sync.Mutex status map[string]healthpb.HealthCheckResponse_ServingStatus From 755bf5a191a0c1fd427c0794121dac58475f080e Mon Sep 17 00:00:00 2001 From: kennylong Date: Sat, 25 Jun 2022 06:38:14 +0800 Subject: [PATCH 539/998] fix typo in the binary log (#5467) Signed-off-by: kennylong --- internal/binarylog/binarylog.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/binarylog/binarylog.go b/internal/binarylog/binarylog.go index 0a25ce43f3f0..e3dfe204f9ae 100644 --- a/internal/binarylog/binarylog.go +++ b/internal/binarylog/binarylog.go @@ -42,14 +42,14 @@ var binLogger Logger var grpclogLogger = grpclog.Component("binarylog") -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetLogger gets the binarg logger. +// GetLogger gets the binary logger. // // Only call this at init time. func GetLogger() Logger { From c9b16c884c9797f5cdde9c2aeba2c20320213cd6 Mon Sep 17 00:00:00 2001 From: mitchsw Date: Tue, 28 Jun 2022 13:37:44 -0400 Subject: [PATCH 540/998] transport: remove unused `bufWriter.onFlush()` (#5464) --- internal/transport/http_util.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index d8247bcdf692..b77513068622 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -322,8 +322,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -360,9 +358,6 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) w.offset = 0 return w.err From 423cd8e3ad5ff82dee28390f6fecd0e8658b4cd8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 29 Jun 2022 12:14:24 -0700 Subject: [PATCH 541/998] interop: update proto to make vet happy (#5475) --- interop/grpc_testing/control.pb.go | 638 +++++++++++++++-------------- 1 file changed, 334 insertions(+), 304 deletions(-) diff --git a/interop/grpc_testing/control.pb.go b/interop/grpc_testing/control.pb.go index ad8d6ea6a123..ce60ba06f5a2 100644 --- a/interop/grpc_testing/control.pb.go +++ b/interop/grpc_testing/control.pb.go @@ -24,6 +24,7 @@ import ( proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -1524,6 +1525,9 @@ type ScenarioResultSummary struct { // Queries per CPU-sec over all servers or clients ServerQueriesPerCpuSec float64 `protobuf:"fixed64,17,opt,name=server_queries_per_cpu_sec,json=serverQueriesPerCpuSec,proto3" json:"server_queries_per_cpu_sec,omitempty"` ClientQueriesPerCpuSec float64 `protobuf:"fixed64,18,opt,name=client_queries_per_cpu_sec,json=clientQueriesPerCpuSec,proto3" json:"client_queries_per_cpu_sec,omitempty"` + // Start and end time for the test scenario + StartTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,20,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` } func (x *ScenarioResultSummary) Reset() { @@ -1684,6 +1688,20 @@ func (x *ScenarioResultSummary) GetClientQueriesPerCpuSec() float64 { return 0 } +func (x *ScenarioResultSummary) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *ScenarioResultSummary) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + // Results of a single benchmark scenario. type ScenarioResult struct { state protoimpl.MessageState @@ -1813,299 +1831,308 @@ var file_grpc_testing_control_proto_rawDesc = []byte{ 0x2f, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x32, 0x0a, 0x0d, 0x50, 0x6f, 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x6c, 0x6f, - 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x65, - 0x64, 0x4c, 0x6f, 0x61, 0x64, 0x22, 0x12, 0x0a, 0x10, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x4c, - 0x6f, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0a, 0x4c, 0x6f, - 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x6c, 0x6f, 0x73, - 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x6f, - 0x73, 0x65, 0x64, 0x4c, 0x6f, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, - 0x0a, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x4c, 0x6f, 0x6f, 0x70, 0x12, 0x37, 0x0a, 0x07, 0x70, - 0x6f, 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x6f, 0x69, 0x73, - 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x07, 0x70, 0x6f, 0x69, - 0x73, 0x73, 0x6f, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x7f, 0x0a, 0x0e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1e, - 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x75, 0x73, 0x65, 0x54, 0x65, 0x73, 0x74, 0x43, 0x61, 0x12, 0x30, - 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6f, 0x76, - 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x72, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x72, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x22, 0x67, 0x0a, - 0x0a, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x1d, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, - 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xf6, 0x07, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x39, - 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x3f, 0x0a, 0x1c, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, - 0x72, 0x70, 0x63, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x19, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x52, 0x70, 0x63, 0x73, 0x50, 0x65, 0x72, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x73, - 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, - 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x08, - 0x72, 0x70, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x70, - 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x72, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, - 0x0a, 0x0b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0a, 0x6c, - 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, - 0x10, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x5f, - 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6f, 0x72, 0x65, - 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, - 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x70, 0x69, 0x12, 0x3b, 0x0a, - 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x10, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x52, 0x0b, 0x63, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x68, - 0x72, 0x65, 0x61, 0x64, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x71, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x50, 0x65, 0x72, 0x43, 0x71, - 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x12, 0x28, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x61, 0x6c, 0x65, 0x73, 0x63, 0x65, - 0x5f, 0x61, 0x70, 0x69, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x43, - 0x6f, 0x61, 0x6c, 0x65, 0x73, 0x63, 0x65, 0x41, 0x70, 0x69, 0x12, 0x58, 0x0a, 0x29, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x05, 0x52, 0x25, 0x6d, - 0x65, 0x64, 0x69, 0x61, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4d, 0x69, - 0x6c, 0x6c, 0x69, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, - 0x3f, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x32, 0x0a, 0x0d, 0x50, 0x6f, 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x6c, + 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x6f, 0x66, 0x66, 0x65, 0x72, + 0x65, 0x64, 0x4c, 0x6f, 0x61, 0x64, 0x22, 0x12, 0x0a, 0x10, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, + 0x4c, 0x6f, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0a, 0x4c, + 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x6c, 0x6f, + 0x73, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, - 0x22, 0x1c, 0x0a, 0x04, 0x4d, 0x61, 0x72, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x73, 0x65, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x73, 0x65, 0x74, 0x22, 0x75, - 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x32, 0x0a, 0x05, - 0x73, 0x65, 0x74, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, - 0x12, 0x28, 0x0a, 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61, - 0x72, 0x6b, 0x48, 0x00, 0x52, 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x72, - 0x67, 0x74, 0x79, 0x70, 0x65, 0x22, 0xc0, 0x04, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x14, - 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x68, 0x72, - 0x65, 0x61, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x73, 0x79, 0x6e, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x1d, - 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x42, 0x0a, - 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x28, - 0x0a, 0x10, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, - 0x70, 0x69, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x70, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x65, - 0x61, 0x64, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x71, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0c, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x50, 0x65, 0x72, 0x43, 0x71, 0x12, 0x2f, - 0x0a, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x3c, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, - 0xea, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, - 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x29, 0x0a, - 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x75, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x41, 0x72, 0x67, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, 0x12, 0x28, 0x0a, 0x04, 0x6d, 0x61, - 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61, 0x72, 0x6b, 0x48, 0x00, 0x52, 0x04, - 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x72, 0x67, 0x74, 0x79, 0x70, 0x65, 0x22, - 0x69, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x22, 0x0d, 0x0a, 0x0b, 0x43, 0x6f, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x0c, 0x43, 0x6f, 0x72, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x22, - 0x06, 0x0a, 0x04, 0x56, 0x6f, 0x69, 0x64, 0x22, 0xef, 0x02, 0x0a, 0x08, 0x53, 0x63, 0x65, 0x6e, - 0x61, 0x72, 0x69, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, - 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6e, 0x75, 0x6d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x0d, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, - 0x75, 0x6d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, - 0x77, 0x61, 0x72, 0x6d, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x77, 0x61, 0x72, 0x6d, 0x75, 0x70, 0x53, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, - 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, - 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x12, 0x37, 0x0a, 0x18, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x15, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x41, 0x0a, 0x09, 0x53, 0x63, 0x65, - 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x73, 0x12, 0x34, 0x0a, 0x09, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, - 0x69, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, - 0x6f, 0x52, 0x09, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x73, 0x22, 0xbb, 0x06, 0x0a, - 0x15, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x03, 0x71, 0x70, 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x71, 0x70, 0x73, 0x5f, - 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x71, 0x70, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x43, 0x6f, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, - 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x2c, 0x0a, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x55, - 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x5f, 0x35, 0x30, 0x18, 0x07, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x35, 0x30, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x5f, 0x39, 0x30, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x39, 0x30, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x5f, 0x39, 0x35, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x39, 0x35, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, - 0x39, 0x39, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x39, 0x39, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x39, - 0x39, 0x39, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x39, 0x39, 0x39, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, - 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, - 0x0a, 0x1e, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x1b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, - 0x75, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x65, + 0x6f, 0x73, 0x65, 0x64, 0x4c, 0x6f, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, + 0x52, 0x0a, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x4c, 0x6f, 0x6f, 0x70, 0x12, 0x37, 0x0a, 0x07, + 0x70, 0x6f, 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x6f, 0x69, + 0x73, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x07, 0x70, 0x6f, + 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x7f, 0x0a, + 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x1e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x75, 0x73, 0x65, 0x54, 0x65, 0x73, 0x74, 0x43, 0x61, 0x12, + 0x30, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x72, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x72, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x22, 0x67, + 0x0a, 0x0a, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xf6, 0x07, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, + 0x39, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x19, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x52, 0x70, 0x63, 0x73, 0x50, 0x65, 0x72, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, + 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, + 0x61, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x73, 0x79, 0x6e, 0x63, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x30, 0x0a, + 0x08, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, + 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x72, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x39, 0x0a, 0x0b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0a, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, + 0x0a, 0x10, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x72, 0x65, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6f, 0x72, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x6f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x70, 0x69, 0x12, 0x3b, + 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x10, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x52, 0x0b, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, + 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x71, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x50, 0x65, 0x72, 0x43, + 0x71, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x61, 0x6c, 0x65, 0x73, 0x63, + 0x65, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x75, 0x73, 0x65, + 0x43, 0x6f, 0x61, 0x6c, 0x65, 0x73, 0x63, 0x65, 0x41, 0x70, 0x69, 0x12, 0x58, 0x0a, 0x29, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x05, 0x52, 0x25, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4d, + 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x22, 0x3f, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x22, 0x1c, 0x0a, 0x04, 0x4d, 0x61, 0x72, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x73, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x73, 0x65, 0x74, 0x22, + 0x75, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x32, 0x0a, + 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x05, 0x73, 0x65, 0x74, 0x75, + 0x70, 0x12, 0x28, 0x0a, 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4d, + 0x61, 0x72, 0x6b, 0x48, 0x00, 0x52, 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x61, + 0x72, 0x67, 0x74, 0x79, 0x70, 0x65, 0x22, 0xc0, 0x04, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, + 0x14, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x68, + 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x73, 0x79, + 0x6e, 0x63, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x42, + 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, + 0x28, 0x0a, 0x10, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x61, 0x70, 0x69, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x74, 0x68, 0x65, 0x72, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x70, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x68, 0x72, + 0x65, 0x61, 0x64, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x71, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x50, 0x65, 0x72, 0x43, 0x71, 0x12, + 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, + 0x61, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x3c, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, + 0x18, 0xea, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, + 0x67, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x29, + 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x75, 0x0a, 0x0a, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x41, 0x72, 0x67, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x00, 0x52, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, 0x12, 0x28, 0x0a, 0x04, 0x6d, + 0x61, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61, 0x72, 0x6b, 0x48, 0x00, 0x52, + 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x72, 0x67, 0x74, 0x79, 0x70, 0x65, + 0x22, 0x69, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x22, 0x0d, 0x0a, 0x0b, 0x43, + 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x0c, 0x43, 0x6f, + 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, + 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, + 0x22, 0x06, 0x0a, 0x04, 0x56, 0x6f, 0x69, 0x64, 0x22, 0xef, 0x02, 0x0a, 0x08, 0x53, 0x63, 0x65, + 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x75, + 0x6d, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x6e, 0x75, 0x6d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x0d, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, + 0x6e, 0x75, 0x6d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x77, 0x61, 0x72, 0x6d, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x77, 0x61, 0x72, 0x6d, 0x75, 0x70, 0x53, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, + 0x6b, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x10, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x15, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x41, 0x0a, 0x09, 0x53, 0x63, + 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x73, 0x12, 0x34, 0x0a, 0x09, 0x73, 0x63, 0x65, 0x6e, 0x61, + 0x72, 0x69, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, + 0x69, 0x6f, 0x52, 0x09, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x73, 0x22, 0xad, 0x07, + 0x0a, 0x15, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x71, 0x70, 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x71, 0x70, 0x73, + 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x71, 0x70, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x5f, 0x35, 0x30, 0x18, 0x07, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x35, 0x30, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x5f, 0x39, 0x30, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x39, 0x30, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x39, 0x35, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x39, 0x35, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x5f, 0x39, 0x39, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x39, 0x39, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, + 0x39, 0x39, 0x39, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x39, 0x39, 0x39, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x43, 0x0a, 0x1e, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x01, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x12, 0x37, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x73, - 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x0f, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x6c, 0x73, 0x50, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x6c, 0x73, 0x50, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x71, 0x75, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x65, 0x63, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x51, 0x75, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x50, 0x65, 0x72, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x12, 0x3a, - 0x0a, 0x1a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, - 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x50, 0x65, 0x72, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x22, 0xf6, 0x03, 0x0a, 0x0e, 0x53, - 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, - 0x08, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, - 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x08, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, - 0x6f, 0x12, 0x39, 0x0a, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x3c, 0x0a, 0x0c, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x07, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, - 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x07, 0x20, 0x03, - 0x28, 0x08, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x2a, 0x56, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, - 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, 0x43, 0x4c, - 0x49, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4c, 0x4c, 0x42, 0x41, - 0x43, 0x4b, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x2a, 0x70, 0x0a, 0x0a, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x59, 0x4e, - 0x43, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x53, - 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, - 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x49, 0x43, 0x5f, 0x53, 0x45, - 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4c, 0x4c, - 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x04, 0x2a, 0x72, 0x0a, - 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x4e, 0x41, 0x52, - 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, - 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, - 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x19, 0x0a, - 0x15, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x52, 0x45, - 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x57, 0x41, 0x59, 0x53, 0x10, - 0x04, 0x42, 0x21, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x42, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x1b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x66, 0x75, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x01, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x12, 0x37, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, + 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x6c, 0x73, + 0x50, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x6c, 0x73, 0x50, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x71, 0x75, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x65, + 0x63, 0x18, 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x51, + 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x50, 0x65, 0x72, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x12, + 0x3a, 0x0a, 0x1a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x51, 0x75, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x50, 0x65, 0x72, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x12, 0x39, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf6, 0x03, + 0x0a, 0x0e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x08, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x08, 0x73, 0x63, 0x65, 0x6e, + 0x61, 0x72, 0x69, 0x6f, 0x12, 0x39, 0x0a, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, + 0x3c, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x3c, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x3d, + 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, + 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x25, 0x0a, + 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x08, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a, 0x0f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2a, 0x56, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x54, 0x48, 0x45, 0x52, + 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4c, + 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x2a, 0x70, + 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x00, 0x12, 0x10, 0x0a, + 0x0c, 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x18, 0x0a, 0x14, 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x49, 0x43, + 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x54, 0x48, + 0x45, 0x52, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x43, + 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x04, + 0x2a, 0x72, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x55, + 0x4e, 0x41, 0x52, 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, + 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, + 0x4e, 0x47, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x02, + 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x52, + 0x4f, 0x4d, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x53, + 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x57, 0x41, + 0x59, 0x53, 0x10, 0x04, 0x42, 0x21, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2149,8 +2176,9 @@ var file_grpc_testing_control_proto_goTypes = []interface{}{ (*HistogramParams)(nil), // 23: grpc.testing.HistogramParams (*ClientStats)(nil), // 24: grpc.testing.ClientStats (*ServerStats)(nil), // 25: grpc.testing.ServerStats - (*HistogramData)(nil), // 26: grpc.testing.HistogramData - (*RequestResultCount)(nil), // 27: grpc.testing.RequestResultCount + (*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp + (*HistogramData)(nil), // 27: grpc.testing.HistogramData + (*RequestResultCount)(nil), // 28: grpc.testing.RequestResultCount } var file_grpc_testing_control_proto_depIdxs = []int32{ 4, // 0: grpc.testing.LoadParams.closed_loop:type_name -> grpc.testing.ClosedLoopParams @@ -2175,17 +2203,19 @@ var file_grpc_testing_control_proto_depIdxs = []int32{ 8, // 19: grpc.testing.Scenario.client_config:type_name -> grpc.testing.ClientConfig 12, // 20: grpc.testing.Scenario.server_config:type_name -> grpc.testing.ServerConfig 18, // 21: grpc.testing.Scenarios.scenarios:type_name -> grpc.testing.Scenario - 18, // 22: grpc.testing.ScenarioResult.scenario:type_name -> grpc.testing.Scenario - 26, // 23: grpc.testing.ScenarioResult.latencies:type_name -> grpc.testing.HistogramData - 24, // 24: grpc.testing.ScenarioResult.client_stats:type_name -> grpc.testing.ClientStats - 25, // 25: grpc.testing.ScenarioResult.server_stats:type_name -> grpc.testing.ServerStats - 20, // 26: grpc.testing.ScenarioResult.summary:type_name -> grpc.testing.ScenarioResultSummary - 27, // 27: grpc.testing.ScenarioResult.request_results:type_name -> grpc.testing.RequestResultCount - 28, // [28:28] is the sub-list for method output_type - 28, // [28:28] is the sub-list for method input_type - 28, // [28:28] is the sub-list for extension type_name - 28, // [28:28] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 26, // 22: grpc.testing.ScenarioResultSummary.start_time:type_name -> google.protobuf.Timestamp + 26, // 23: grpc.testing.ScenarioResultSummary.end_time:type_name -> google.protobuf.Timestamp + 18, // 24: grpc.testing.ScenarioResult.scenario:type_name -> grpc.testing.Scenario + 27, // 25: grpc.testing.ScenarioResult.latencies:type_name -> grpc.testing.HistogramData + 24, // 26: grpc.testing.ScenarioResult.client_stats:type_name -> grpc.testing.ClientStats + 25, // 27: grpc.testing.ScenarioResult.server_stats:type_name -> grpc.testing.ServerStats + 20, // 28: grpc.testing.ScenarioResult.summary:type_name -> grpc.testing.ScenarioResultSummary + 28, // 29: grpc.testing.ScenarioResult.request_results:type_name -> grpc.testing.RequestResultCount + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name } func init() { file_grpc_testing_control_proto_init() } From 5770b1dea58dd6a8bddb0585e40ed63d4d096e27 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 30 Jun 2022 15:28:57 -0700 Subject: [PATCH 542/998] xds: drop localities with zero weight at the xdsClient layer (#5476) --- .../balancer/clusterresolver/configbuilder.go | 3 --- .../balancer/clusterresolver/eds_impl_test.go | 25 ------------------- .../xdsclient/xdsresource/type_eds.go | 5 +++- .../xdsclient/xdsresource/unmarshal_eds.go | 9 +++++-- .../xdsresource/unmarshal_eds_test.go | 16 +++++++++++- 5 files changed, 26 insertions(+), 32 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index 186409bf9bc7..a29658ec3141 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -251,9 +251,6 @@ func groupLocalitiesByPriority(localities []xdsresource.Locality) [][]xdsresourc var priorityIntSlice []int priorities := make(map[int][]xdsresource.Locality) for _, locality := range localities { - if locality.Weight == 0 { - continue - } priority := int(locality.Priority) priorities[priority] = append(priorities[priority], locality) priorityIntSlice = append(priorityIntSlice, priority) diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go index ddafa18a6100..2621b791494b 100644 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/eds_impl_test.go @@ -295,31 +295,6 @@ func (s) TestEDS_TwoLocalities(t *testing.T) { if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2, sc2, sc2, sc2, sc3, sc4}); err != nil { t.Fatal(err) } - - // Change weight of the locality[1] to 0, it should never be picked. - clab6 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab6.AddLocality(testSubZones[1], 0, 0, testEndpointAddrs[1:2], nil) - clab6.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab6.Build()), nil) - - // Changing weight of locality[1] to 0 caused it to be removed. It's subconn - // should also be removed. - // - // NOTE: this is because we handle locality with weight 0 same as the - // locality doesn't exist. If this changes in the future, this removeSubConn - // behavior will also change. - scToRemove2 := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove2, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove2) - } - - // Test pick with two subconns different locality weight. - // - // Locality-1 will be not be picked, and locality-2 will be picked. - // Locality-2 contains sc3 and sc4. So expect sc3, sc4. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3, sc4}); err != nil { - t.Fatal(err) - } } // The EDS balancer gets EDS resp with unhealthy endpoints. Test that only diff --git a/xds/internal/xdsclient/xdsresource/type_eds.go b/xds/internal/xdsclient/xdsresource/type_eds.go index ad590160f6af..ec70f32ca436 100644 --- a/xds/internal/xdsclient/xdsresource/type_eds.go +++ b/xds/internal/xdsclient/xdsresource/type_eds.go @@ -64,7 +64,10 @@ type Locality struct { // EndpointsUpdate contains an EDS update. type EndpointsUpdate struct { - Drops []OverloadDropConfig + Drops []OverloadDropConfig + // Localities in the EDS response with `load_balancing_weight` field not set + // or explicitly set to 0 are ignored while parsing the resource, and + // therefore do not show up here. Localities []Locality // Raw is the resource from the xds response. diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index 9eb7117d9a22..7cc12d73d6d5 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -57,7 +57,7 @@ func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (str } logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) - u, err := parseEDSRespProto(cla) + u, err := parseEDSRespProto(cla, logger) if err != nil { return cla.GetClusterName(), EndpointsUpdate{}, err } @@ -102,7 +102,7 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) []Endpoint { return endpoints } -func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { +func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.PrefixLogger) (EndpointsUpdate, error) { ret := EndpointsUpdate{} for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) @@ -113,6 +113,11 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, if l == nil { return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) } + weight := locality.GetLoadBalancingWeight().GetValue() + if weight == 0 { + logger.Warningf("Ignoring locality %s with weight 0", pretty.ToJSON(l)) + continue + } lid := internal.LocalityID{ Region: l.Region, Zone: l.Zone, diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index db9d4f52896b..3ce069c29664 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -75,6 +75,20 @@ func (s) TestEDSParseRespProto(t *testing.T) { want: EndpointsUpdate{}, wantErr: true, }, + { + name: "missing locality weight", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 0, 1, []string{"addr1:314"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_HEALTHY}, + }) + clab0.addLocality("locality-2", 0, 0, []string{"addr2:159"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_HEALTHY}, + }) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + }, { name: "good", m: func() *v3endpointpb.ClusterLoadAssignment { @@ -161,7 +175,7 @@ func (s) TestEDSParseRespProto(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := parseEDSRespProto(tt.m) + got, err := parseEDSRespProto(tt.m, nil) if (err != nil) != tt.wantErr { t.Errorf("parseEDSRespProto() error = %v, wantErr %v", err, tt.wantErr) return From 8c494a9aed3e60c60875d9559bf7526e6defc7ea Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 1 Jul 2022 23:03:10 +0000 Subject: [PATCH 543/998] Change version to 1.49.0-dev (#5484) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 1d73c214920e..e0ec0ffdbf7f 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.48.0-dev" +const Version = "1.49.0-dev" From a6dcb714b2fe351f9eca6b915d52166683b30e6c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 6 Jul 2022 10:31:02 -0700 Subject: [PATCH 544/998] xdsclient: don't reset version info after stream restart (#5422) --- internal/testutils/xds/e2e/logging.go | 48 ++++++ internal/testutils/xds/e2e/server.go | 100 +++++++---- .../xds/e2e/setup_management_server.go | 4 +- test/xds/xds_client_ack_nack_test.go | 160 ++++++++++++++++++ test/xds/xds_client_affinity_test.go | 2 +- test/xds/xds_client_federation_test.go | 4 +- test/xds/xds_client_integration_test.go | 2 +- test/xds/xds_client_retry_test.go | 2 +- .../xds_rls_clusterspecifier_plugin_test.go | 2 +- test/xds/xds_security_config_nack_test.go | 4 +- test/xds/xds_server_integration_test.go | 6 +- test/xds/xds_server_rbac_test.go | 8 +- test/xds/xds_server_serving_mode_test.go | 4 +- xds/csds/csds_test.go | 2 +- xds/internal/httpfilter/fault/fault_test.go | 2 +- xds/internal/test/e2e/controlplane.go | 2 +- .../xdsclient/controller/controller.go | 2 +- .../xdsclient/controller/transport.go | 18 +- 18 files changed, 309 insertions(+), 63 deletions(-) create mode 100644 internal/testutils/xds/e2e/logging.go create mode 100644 test/xds/xds_client_ack_nack_test.go diff --git a/internal/testutils/xds/e2e/logging.go b/internal/testutils/xds/e2e/logging.go new file mode 100644 index 000000000000..f524c451b002 --- /dev/null +++ b/internal/testutils/xds/e2e/logging.go @@ -0,0 +1,48 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("xds-e2e") + +// serverLogger implements the Logger interface defined at +// envoyproxy/go-control-plane/pkg/log. This is passed to the Snapshot cache. +type serverLogger struct{} + +func (l serverLogger) Debugf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.InfoDepth(1, msg) +} +func (l serverLogger) Infof(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.InfoDepth(1, msg) +} +func (l serverLogger) Warnf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.WarningDepth(1, msg) +} +func (l serverLogger) Errorf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.ErrorDepth(1, msg) +} diff --git a/internal/testutils/xds/e2e/server.go b/internal/testutils/xds/e2e/server.go index e611c56c673c..efe68be299b5 100644 --- a/internal/testutils/xds/e2e/server.go +++ b/internal/testutils/xds/e2e/server.go @@ -26,43 +26,19 @@ import ( "reflect" "strconv" - "github.com/envoyproxy/go-control-plane/pkg/cache/types" - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/envoyproxy/go-control-plane/pkg/cache/types" v3cache "github.com/envoyproxy/go-control-plane/pkg/cache/v3" v3resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" v3server "github.com/envoyproxy/go-control-plane/pkg/server/v3" + "google.golang.org/grpc" ) -var logger = grpclog.Component("xds-e2e") - -// serverLogger implements the Logger interface defined at -// envoyproxy/go-control-plane/pkg/log. This is passed to the Snapshot cache. -type serverLogger struct{} - -func (l serverLogger) Debugf(format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - logger.InfoDepth(1, msg) -} -func (l serverLogger) Infof(format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - logger.InfoDepth(1, msg) -} -func (l serverLogger) Warnf(format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - logger.WarningDepth(1, msg) -} -func (l serverLogger) Errorf(format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - logger.ErrorDepth(1, msg) -} - // ManagementServer is a thin wrapper around the xDS control plane // implementation provided by envoyproxy/go-control-plane. type ManagementServer struct { @@ -77,27 +53,81 @@ type ManagementServer struct { version int // Version of resource snapshot. } +// ManagementServerOptions contains options to be passed to the management +// server during creation. +type ManagementServerOptions struct { + // Listener to accept connections on. If nil, a TPC listener on a local port + // will be created and used. + Listener net.Listener + + // The callbacks defined below correspond to the state of the world (sotw) + // version of the xDS API on the management server. + + // OnStreamOpen is called when an xDS stream is opened. The callback is + // invoked with the assigned stream ID and the type URL from the incoming + // request (or "" for ADS). + // + // Returning an error from this callback will end processing and close the + // stream. OnStreamClosed will still be called. + OnStreamOpen func(context.Context, int64, string) error + + // OnStreamClosed is called immediately prior to closing an xDS stream. The + // callback is invoked with the stream ID of the stream being closed. + OnStreamClosed func(int64) + + // OnStreamRequest is called when a request is received on the stream. The + // callback is invoked with the stream ID of the stream on which the request + // was received and the received request. + // + // Returning an error from this callback will end processing and close the + // stream. OnStreamClosed will still be called. + OnStreamRequest func(int64, *v3discoverypb.DiscoveryRequest) error + + // OnStreamResponse is called immediately prior to sending a response on the + // stream. The callback is invoked with the stream ID of the stream on which + // the response is being sent along with the incoming request and the outgoing + // response. + OnStreamResponse func(context.Context, int64, *v3discoverypb.DiscoveryRequest, *v3discoverypb.DiscoveryResponse) +} + // StartManagementServer initializes a management server which implements the // AggregatedDiscoveryService endpoint. The management server is initialized // with no resources. Tests should call the Update() method to change the // resource snapshot held by the management server, as required by the test // logic. When the test is done, it should call the Stop() method to cleanup // resources allocated by the management server. -func StartManagementServer() (*ManagementServer, error) { +func StartManagementServer(opts *ManagementServerOptions) (*ManagementServer, error) { // Create a snapshot cache. cache := v3cache.NewSnapshotCache(true, v3cache.IDHash{}, serverLogger{}) logger.Infof("Created new snapshot cache...") - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, fmt.Errorf("failed to start xDS management server: %v", err) + var lis net.Listener + if opts != nil && opts.Listener != nil { + lis = opts.Listener + } else { + var err error + lis, err = net.Listen("tcp", "localhost:0") + if err != nil { + return nil, fmt.Errorf("failed to start xDS management server: %v", err) + } } - // Create an xDS management server and register the ADS implementation - // provided by it on a gRPC server. Cancelling the context passed to the - // server is the only way of stopping it at the end of the test. + // Cancelling the context passed to the server is the only way of stopping it + // at the end of the test. ctx, cancel := context.WithCancel(context.Background()) - xs := v3server.NewServer(ctx, cache, v3server.CallbackFuncs{}) + callbacks := v3server.CallbackFuncs{} + if opts != nil { + callbacks = v3server.CallbackFuncs{ + StreamOpenFunc: opts.OnStreamOpen, + StreamClosedFunc: opts.OnStreamClosed, + StreamRequestFunc: opts.OnStreamRequest, + StreamResponseFunc: opts.OnStreamResponse, + } + } + + // Create an xDS management server and register the ADS implementation + // provided by it on a gRPC server. + xs := v3server.NewServer(ctx, cache, callbacks) gs := grpc.NewServer() v3discoverygrpc.RegisterAggregatedDiscoveryServiceServer(gs, xs) logger.Infof("Registered Aggregated Discovery Service (ADS)...") diff --git a/internal/testutils/xds/e2e/setup_management_server.go b/internal/testutils/xds/e2e/setup_management_server.go index ca45363d6e0b..c61f0620cb11 100644 --- a/internal/testutils/xds/e2e/setup_management_server.go +++ b/internal/testutils/xds/e2e/setup_management_server.go @@ -41,11 +41,11 @@ import ( // - bootstrap contents to be used by the client // - xDS resolver builder to be used by the client // - a cleanup function to be invoked at the end of the test -func SetupManagementServer(t *testing.T) (*ManagementServer, string, []byte, resolver.Builder, func()) { +func SetupManagementServer(t *testing.T, opts *ManagementServerOptions) (*ManagementServer, string, []byte, resolver.Builder, func()) { t.Helper() // Spin up an xDS management server on a local port. - server, err := StartManagementServer() + server, err := StartManagementServer(opts) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } diff --git a/test/xds/xds_client_ack_nack_test.go b/test/xds/xds_client_ack_nack_test.go new file mode 100644 index 000000000000..ca0ec56e284b --- /dev/null +++ b/test/xds/xds_client_ack_nack_test.go @@ -0,0 +1,160 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +// We are interested in LDS, RDS, CDS and EDS resources as part of the regular +// xDS flow on the client. +const wantResources = 4 + +// seenAllACKs returns true if we have seen two streams with acks for all the +// resources that we are interested in. +func seenAllACKs(acks map[int64]map[string]string) bool { + if len(acks) != 2 { + return false + } + for _, v := range acks { + if len(v) != wantResources { + return false + } + } + return true +} + +// TestClientResourceVersionAfterStreamRestart tests the scenario where the +// xdsClient's ADS stream to the management server gets broken. This test +// verifies that the version number on the initial request on the new stream +// indicates the most recent version seen by the client on the previous stream. +func (s) TestClientResourceVersionAfterStreamRestart(t *testing.T) { + // Create a restartable listener which can close existing connections. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis := testutils.NewRestartableListener(l) + + streamClosed := grpcsync.NewEvent() // Event to notify stream closure. + acksReceived := grpcsync.NewEvent() // Event to notify receipt of acks for all resources. + // Map from stream id to a map of resource type to resource version. + ackVersionsMap := make(map[int64]map[string]string) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + Listener: lis, + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // Return early under the following circumstances: + // - Received all the requests we wanted to see. This is to avoid + // any stray requests leading to test flakes. + // - Request contains no resource names. Such requests are usually + // seen when the xdsclient is shutting down and is no longer + // interested in the resources that it had subscribed to earlier. + if acksReceived.HasFired() || len(req.GetResourceNames()) == 0 { + return nil + } + // Create a stream specific map to store ack versions if this is the + // first time we are seeing this stream id. + if ackVersionsMap[id] == nil { + ackVersionsMap[id] = make(map[string]string) + } + ackVersionsMap[id][req.GetTypeUrl()] = req.GetVersionInfo() + if seenAllACKs(ackVersionsMap) { + acksReceived.Fire() + } + return nil + }, + OnStreamClosed: func(int64) { + streamClosed.Fire() + }, + }) + defer cleanup1() + + port, cleanup2 := startTestService(t, nil) + defer cleanup2() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // A successful RPC means that we have captured the ack versions for all + // resources in the OnStreamRequest callback. Nothing more needs to be done + // here before stream restart. + + // Stop the listener on the management server. This will cause the client to + // backoff and recreate the stream. + lis.Stop() + + // Wait for the stream to be closed on the server. + <-streamClosed.Done() + + // Restart the listener on the management server to be able to accept + // reconnect attempts from the client. + lis.Restart() + + // Wait for all the previously sent resources to be re-requested. + <-acksReceived.Done() + + // We depend on the fact that the management server assigns monotonically + // increasing stream IDs starting at 1. + const ( + idBeforeRestart = 1 + idAfterRestart = 2 + ) + if diff := cmp.Diff(ackVersionsMap[idBeforeRestart], ackVersionsMap[idAfterRestart]); diff != "" { + t.Fatalf("unexpected diff in ack versions before and after stream restart (-want, +got):\n%s", diff) + } + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/test/xds/xds_client_affinity_test.go b/test/xds/xds_client_affinity_test.go index 58b7fca03f52..94666af3b472 100644 --- a/test/xds/xds_client_affinity_test.go +++ b/test/xds/xds_client_affinity_test.go @@ -88,7 +88,7 @@ func (s) TestClientSideAffinitySanityCheck(t *testing.T) { return func() { envconfig.XDSRingHash = old } }()() - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() port, cleanup2 := startTestService(t, nil) diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go index 26f4c6b29963..29cf350337d7 100644 --- a/test/xds/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -53,7 +53,7 @@ func (s) TestClientSideFederation(t *testing.T) { defer func() { envconfig.XDSFederation = oldXDSFederation }() // Start a management server as the default authority. - serverDefaultAuth, err := e2e.StartManagementServer() + serverDefaultAuth, err := e2e.StartManagementServer(nil) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } @@ -61,7 +61,7 @@ func (s) TestClientSideFederation(t *testing.T) { // Start another management server as the other authority. const nonDefaultAuth = "non-default-auth" - serverAnotherAuth, err := e2e.StartManagementServer() + serverAnotherAuth, err := e2e.StartManagementServer(nil) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } diff --git a/test/xds/xds_client_integration_test.go b/test/xds/xds_client_integration_test.go index b2c3d2f8354e..a0bafb987a73 100644 --- a/test/xds/xds_client_integration_test.go +++ b/test/xds/xds_client_integration_test.go @@ -74,7 +74,7 @@ func startTestService(t *testing.T, server *stubserver.StubServer) (uint32, func } func (s) TestClientSideXDS(t *testing.T) { - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() port, cleanup2 := startTestService(t, nil) diff --git a/test/xds/xds_client_retry_test.go b/test/xds/xds_client_retry_test.go index 646b66be67af..46eb8f34f3d0 100644 --- a/test/xds/xds_client_retry_test.go +++ b/test/xds/xds_client_retry_test.go @@ -49,7 +49,7 @@ func (s) TestClientSideRetry(t *testing.T) { }, } - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() port, cleanup2 := startTestService(t, ss) diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go index 68d9fd99a7b2..35b5fe37dc1b 100644 --- a/test/xds/xds_rls_clusterspecifier_plugin_test.go +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -104,7 +104,7 @@ func (s) TestRLSinxDS(t *testing.T) { // Set up all components and configuration necessary - management server, // xDS resolver, fake RLS Server, and xDS configuration which specifies an // RLS Balancer that communicates to this set up fake RLS Server. - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() port, cleanup2 := startTestService(t, nil) defer cleanup2() diff --git a/test/xds/xds_security_config_nack_test.go b/test/xds/xds_security_config_nack_test.go index c5ec1196bbb7..750fff039155 100644 --- a/test/xds/xds_security_config_nack_test.go +++ b/test/xds/xds_security_config_nack_test.go @@ -41,7 +41,7 @@ func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { missingIdentityProviderInstance = "missing-identity-provider-instance" missingRootProviderInstance = "missing-root-provider-instance" ) - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -324,7 +324,7 @@ func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { // SetupManagementServer() sets up a bootstrap file with certificate // provider instance names: `e2e.ServerSideCertProviderInstance` and // `e2e.ClientSideCertProviderInstance`. - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() port, cleanup2 := startTestService(t, nil) diff --git a/test/xds/xds_server_integration_test.go b/test/xds/xds_server_integration_test.go index cafea3064d23..3da983e1e89c 100644 --- a/test/xds/xds_server_integration_test.go +++ b/test/xds/xds_server_integration_test.go @@ -125,7 +125,7 @@ func hostPortFromListener(lis net.Listener) (string, uint32, error) { // the client and the server. This results in both of them using the // configured fallback credentials (which is insecure creds in this case). func (s) TestServerSideXDS_Fallback(t *testing.T) { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -207,7 +207,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -277,7 +277,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { // configuration pointing to the use of the file_watcher plugin and we verify // that the same client is now able to successfully make an RPC. func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) diff --git a/test/xds/xds_server_rbac_test.go b/test/xds/xds_server_rbac_test.go index c48e2039c767..b1058bda9677 100644 --- a/test/xds/xds_server_rbac_test.go +++ b/test/xds/xds_server_rbac_test.go @@ -60,7 +60,7 @@ func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { defer func() { envconfig.XDSRBAC = oldRBAC }() - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -605,7 +605,7 @@ func (s) TestRBACHTTPFilter(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { func() { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -790,7 +790,7 @@ func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) { envconfig.XDSRBAC = oldRBAC }() - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -847,7 +847,7 @@ func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) { envconfig.XDSRBAC = oldRBAC }() - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) diff --git a/test/xds/xds_server_serving_mode_test.go b/test/xds/xds_server_serving_mode_test.go index 118a63394f8b..6f730948c128 100644 --- a/test/xds/xds_server_serving_mode_test.go +++ b/test/xds/xds_server_serving_mode_test.go @@ -43,7 +43,7 @@ import ( // change callback is not invoked and client connections to the server are not // recycled. func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { - managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) @@ -163,7 +163,7 @@ func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { // xDS enabled gRPC servers. It verifies that appropriate mode changes happen in // the server, and also verifies behavior of clientConns under these modes. func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { - managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() // Configure xDS credentials to be used on the server-side. diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index d8dcdcdfbd0f..c82eb601b661 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -236,7 +236,7 @@ func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.M // Spin up a xDS management server on a local port. nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer() + fs, err := e2e.StartManagementServer(nil) if err != nil { t.Fatal(err) } diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index e44f91a55588..904585fc4a6e 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -100,7 +100,7 @@ func (*testService) FullDuplexCall(stream testpb.TestService_FullDuplexCallServe func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { // Spin up a xDS management server on a local port. nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer() + fs, err := e2e.StartManagementServer(nil) if err != nil { t.Fatal(err) } diff --git a/xds/internal/test/e2e/controlplane.go b/xds/internal/test/e2e/controlplane.go index 8f27ff053d75..0ad6fa201fb7 100644 --- a/xds/internal/test/e2e/controlplane.go +++ b/xds/internal/test/e2e/controlplane.go @@ -33,7 +33,7 @@ type controlPlane struct { func newControlPlane() (*controlPlane, error) { // Spin up an xDS management server on a local port. - server, err := e2e.StartManagementServer() + server, err := e2e.StartManagementServer(nil) if err != nil { return nil, fmt.Errorf("failed to spin up the xDS management server: %v", err) } diff --git a/xds/internal/xdsclient/controller/controller.go b/xds/internal/xdsclient/controller/controller.go index d48297145472..4b07dc8d6ac5 100644 --- a/xds/internal/xdsclient/controller/controller.go +++ b/xds/internal/xdsclient/controller/controller.go @@ -72,7 +72,7 @@ type Controller struct { watchMap map[xdsresource.ResourceType]map[string]bool // versionMap contains the version that was acked (the version in the ack // request that was sent on wire). The key is rType, the value is the - // version string, becaues the versions for different resource types should + // version string, because the versions for different resource types should // be independent. versionMap map[xdsresource.ResourceType]string // nonceMap contains the nonce from the most recent received response. diff --git a/xds/internal/xdsclient/controller/transport.go b/xds/internal/xdsclient/controller/transport.go index 9e9836512725..34c5b024dd87 100644 --- a/xds/internal/xdsclient/controller/transport.go +++ b/xds/internal/xdsclient/controller/transport.go @@ -166,12 +166,20 @@ func (t *Controller) sendExisting(stream grpc.ClientStream) bool { t.mu.Lock() defer t.mu.Unlock() - // Reset the ack versions when the stream restarts. - t.versionMap = make(map[xdsresource.ResourceType]string) + // Reset only the nonce when the stream restarts. + // + // xDS spec says the following. See section: + // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version + // + // Note that the version for a resource type is not a property of an + // individual xDS stream but rather a property of the resources themselves. If + // the stream becomes broken and the client creates a new stream, the client’s + // initial request on the new stream should indicate the most recent version + // seen by the client on the previous stream t.nonceMap = make(map[xdsresource.ResourceType]string) for rType, s := range t.watchMap { - if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, "", "", ""); err != nil { + if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, t.versionMap[rType], "", ""); err != nil { t.logger.Warningf("ADS request failed: %v", err) return false } @@ -296,8 +304,8 @@ func (t *Controller) processWatchInfo(w *watchAction) (target []string, rType xd rType = w.rType target = mapToSlice(current) // We don't reset version or nonce when a new watch is started. The version - // and nonce from previous response are carried by the request unless the - // stream is recreated. + // and nonce from previous response are carried by the request. Only the nonce + // is reset when the stream is recreated. ver = t.versionMap[rType] nonce = t.nonceMap[rType] return target, rType, ver, nonce From 0d04c6f5d427c4c853afd23899461ede6c6e4de8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 7 Jul 2022 13:04:08 -0700 Subject: [PATCH 545/998] ringhash: don't recreate subConns when update doesn't change address information (#5431) --- xds/internal/balancer/ringhash/ring.go | 32 +++--- xds/internal/balancer/ringhash/ring_test.go | 40 ++++---- xds/internal/balancer/ringhash/ringhash.go | 98 ++++++++++--------- .../balancer/ringhash/ringhash_test.go | 24 +++++ 4 files changed, 112 insertions(+), 82 deletions(-) diff --git a/xds/internal/balancer/ringhash/ring.go b/xds/internal/balancer/ringhash/ring.go index 68e844cfb487..5e8d881006e6 100644 --- a/xds/internal/balancer/ringhash/ring.go +++ b/xds/internal/balancer/ringhash/ring.go @@ -43,8 +43,8 @@ type ringEntry struct { sc *subConn } -// newRing creates a ring from the subConns. The ring size is limited by the -// passed in max/min. +// newRing creates a ring from the subConns stored in the AddressMap. The ring +// size is limited by the passed in max/min. // // ring entries will be created for each subConn, and subConn with high weight // (specified by the address) may have multiple entries. @@ -64,7 +64,7 @@ type ringEntry struct { // // To pick from a ring, a binary search will be done for the given target hash, // and first item with hash >= given hash will be returned. -func newRing(subConns map[resolver.Address]*subConn, minRingSize, maxRingSize uint64) (*ring, error) { +func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64) (*ring, error) { // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 normalizedWeights, minWeight, err := normalizeWeights(subConns) if err != nil { @@ -95,7 +95,7 @@ func newRing(subConns map[resolver.Address]*subConn, minRingSize, maxRingSize ui for _, scw := range normalizedWeights { targetIdx += scale * scw.weight for float64(idx) < targetIdx { - h := xxhash.Sum64String(scw.sc.addr + strconv.Itoa(len(items))) + h := xxhash.Sum64String(scw.sc.addr + strconv.Itoa(idx)) items = append(items, &ringEntry{idx: idx, hash: h, sc: scw.sc}) idx++ } @@ -111,26 +111,26 @@ func newRing(subConns map[resolver.Address]*subConn, minRingSize, maxRingSize ui // normalizeWeights divides all the weights by the sum, so that the total weight // is 1. -func normalizeWeights(subConns map[resolver.Address]*subConn) (_ []subConnWithWeight, min float64, _ error) { - if len(subConns) == 0 { +func normalizeWeights(subConns *resolver.AddressMap) ([]subConnWithWeight, float64, error) { + keys := subConns.Keys() + if len(keys) == 0 { return nil, 0, fmt.Errorf("number of subconns is 0") } var weightSum uint32 - for a := range subConns { - // The address weight was moved from attributes to the Metadata field. - // This is necessary (all the attributes need to be stripped) for the - // balancer to detect identical {address+weight} combination. - weightSum += a.Metadata.(uint32) + for _, a := range keys { + weightSum += getWeightAttribute(a) } if weightSum == 0 { return nil, 0, fmt.Errorf("total weight of all subconns is 0") } weightSumF := float64(weightSum) - ret := make([]subConnWithWeight, 0, len(subConns)) - min = math.MaxFloat64 - for a, sc := range subConns { - nw := float64(a.Metadata.(uint32)) / weightSumF - ret = append(ret, subConnWithWeight{sc: sc, weight: nw}) + ret := make([]subConnWithWeight, 0, len(keys)) + min := float64(1.0) + for _, a := range keys { + v, _ := subConns.Get(a) + scInfo := v.(*subConn) + nw := float64(getWeightAttribute(a)) / weightSumF + ret = append(ret, subConnWithWeight{sc: scInfo, weight: nw}) if nw < min { min = nw } diff --git a/xds/internal/balancer/ringhash/ring_test.go b/xds/internal/balancer/ringhash/ring_test.go index a47215339cf5..20184ab8d20e 100644 --- a/xds/internal/balancer/ringhash/ring_test.go +++ b/xds/internal/balancer/ringhash/ring_test.go @@ -24,25 +24,31 @@ import ( "testing" xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/resolver" ) -func testAddr(addr string, weight uint32) resolver.Address { - return resolver.Address{Addr: addr, Metadata: weight} -} +var testAddrs []resolver.Address +var testSubConnMap *resolver.AddressMap -func (s) TestRingNew(t *testing.T) { - testAddrs := []resolver.Address{ +func init() { + testAddrs = []resolver.Address{ testAddr("a", 3), testAddr("b", 3), testAddr("c", 4), } + testSubConnMap = resolver.NewAddressMap() + testSubConnMap.Set(testAddrs[0], &subConn{addr: "a"}) + testSubConnMap.Set(testAddrs[1], &subConn{addr: "b"}) + testSubConnMap.Set(testAddrs[2], &subConn{addr: "c"}) +} + +func testAddr(addr string, weight uint32) resolver.Address { + return weightedroundrobin.SetAddrInfo(resolver.Address{Addr: addr}, weightedroundrobin.AddrInfo{Weight: weight}) +} + +func (s) TestRingNew(t *testing.T) { var totalWeight float64 = 10 - testSubConnMap := map[resolver.Address]*subConn{ - testAddr("a", 3): {addr: "a"}, - testAddr("b", 3): {addr: "b"}, - testAddr("c", 4): {addr: "c"}, - } for _, min := range []uint64{3, 4, 6, 8} { for _, max := range []uint64{20, 8} { t.Run(fmt.Sprintf("size-min-%v-max-%v", min, max), func(t *testing.T) { @@ -59,7 +65,7 @@ func (s) TestRingNew(t *testing.T) { } } got := float64(count) / float64(totalCount) - want := float64(a.Metadata.(uint32)) / totalWeight + want := float64(getWeightAttribute(a)) / totalWeight if !equalApproximately(got, want) { t.Fatalf("unexpected item weight in ring: %v != %v", got, want) } @@ -76,11 +82,7 @@ func equalApproximately(x, y float64) bool { } func (s) TestRingPick(t *testing.T) { - r, _ := newRing(map[resolver.Address]*subConn{ - {Addr: "a", Metadata: uint32(3)}: {addr: "a"}, - {Addr: "b", Metadata: uint32(3)}: {addr: "b"}, - {Addr: "c", Metadata: uint32(4)}: {addr: "c"}, - }, 10, 20) + r, _ := newRing(testSubConnMap, 10, 20) for _, h := range []uint64{xxhash.Sum64String("1"), xxhash.Sum64String("2"), xxhash.Sum64String("3"), xxhash.Sum64String("4")} { t.Run(fmt.Sprintf("picking-hash-%v", h), func(t *testing.T) { e := r.pick(h) @@ -98,11 +100,7 @@ func (s) TestRingPick(t *testing.T) { } func (s) TestRingNext(t *testing.T) { - r, _ := newRing(map[resolver.Address]*subConn{ - {Addr: "a", Metadata: uint32(3)}: {addr: "a"}, - {Addr: "b", Metadata: uint32(3)}: {addr: "b"}, - {Addr: "c", Metadata: uint32(4)}: {addr: "c"}, - }, 10, 20) + r, _ := newRing(testSubConnMap, 10, 20) for _, e := range r.items { ne := r.next(e) diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index 4e9c1772b166..3e06fc4eb6eb 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -47,7 +47,7 @@ type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &ringhashBalancer{ cc: cc, - subConns: make(map[resolver.Address]*subConn), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]*subConn), csEvltr: &connectivityStateEvaluator{}, } @@ -65,8 +65,9 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err } type subConn struct { - addr string - sc balancer.SubConn + addr string + weight uint32 + sc balancer.SubConn mu sync.RWMutex // This is the actual state of this SubConn (as updated by the ClientConn). @@ -178,9 +179,8 @@ type ringhashBalancer struct { cc balancer.ClientConn logger *grpclog.PrefixLogger - config *LBConfig - - subConns map[resolver.Address]*subConn // `attributes` is stripped from the keys of this map (the addresses) + config *LBConfig + subConns *resolver.AddressMap // Map from resolver.Address to `*subConn`. scStates map[balancer.SubConn]*subConn // ring is always in sync with subConns. When subConns change, a new ring is @@ -208,55 +208,47 @@ type ringhashBalancer struct { // SubConn states are Idle. func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { var addrsUpdated bool - // addrsSet is the set converted from addrs, it's used for quick lookup of - // an address. - // - // Addresses in this map all have attributes stripped, but metadata set to - // the weight. So that weight change can be detected. - // - // TODO: this won't be necessary if there are ways to compare address - // attributes. - addrsSet := make(map[resolver.Address]struct{}) - for _, a := range addrs { - aNoAttrs := a - // Strip attributes but set Metadata to the weight. - aNoAttrs.Attributes = nil - w := weightedroundrobin.GetAddrInfo(a).Weight - if w == 0 { - // If weight is not set, use 1. - w = 1 - } - aNoAttrs.Metadata = w - addrsSet[aNoAttrs] = struct{}{} - if scInfo, ok := b.subConns[aNoAttrs]; !ok { - // When creating SubConn, the original address with attributes is - // passed through. So that connection configurations in attributes - // (like creds) will be used. - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: true}) + // addrsSet is the set converted from addrs, used for quick lookup. + addrsSet := resolver.NewAddressMap() + for _, addr := range addrs { + addrsSet.Set(addr, true) + newWeight := getWeightAttribute(addr) + if val, ok := b.subConns.Get(addr); !ok { + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{HealthCheckEnabled: true}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - scs := &subConn{addr: a.Addr, sc: sc} + scs := &subConn{addr: addr.Addr, weight: newWeight, sc: sc} scs.setState(connectivity.Idle) b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) - b.subConns[aNoAttrs] = scs + b.subConns.Set(addr, scs) b.scStates[sc] = scs addrsUpdated = true } else { - // Always update the subconn's address in case the attributes - // changed. The SubConn does a reflect.DeepEqual of the new and old - // addresses. So this is a noop if the current address is the same - // as the old one (including attributes). - b.subConns[aNoAttrs] = scInfo - b.cc.UpdateAddresses(scInfo.sc, []resolver.Address{a}) + // We have seen this address before and created a subConn for it. If the + // weight associated with the address has changed, update the subConns map + // with the new weight. This will be used when a new ring is created. + // + // There is no need to call UpdateAddresses on the subConn at this point + // since *only* the weight attribute has changed, and that does not affect + // subConn uniqueness. + scInfo := val.(*subConn) + if oldWeight := scInfo.weight; oldWeight != newWeight { + scInfo.weight = newWeight + b.subConns.Set(addr, scInfo) + // Return true to force recreation of the ring. + addrsUpdated = true + } } } - for a, scInfo := range b.subConns { - // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { + for _, addr := range b.subConns.Keys() { + // addr was removed by resolver. + if _, ok := addrsSet.Get(addr); !ok { + v, _ := b.subConns.Get(addr) + scInfo := v.(*subConn) b.cc.RemoveSubConn(scInfo.sc) - delete(b.subConns, a) + b.subConns.Delete(addr) addrsUpdated = true // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. @@ -304,7 +296,7 @@ func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) err func (b *ringhashBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -392,7 +384,8 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance // attempting to connect, we need to trigger one. But since the deleted // SubConn will eventually send a shutdown update, this code will run // and trigger the next SubConn to connect. - for _, sc := range b.subConns { + for _, v := range b.subConns.Values() { + sc := v.(*subConn) if sc.isAttemptingToConnect() { return } @@ -485,3 +478,18 @@ func (cse *connectivityStateEvaluator) recordTransition(oldState, newState conne } return connectivity.TransientFailure } + +// getWeightAttribute is a convenience function which returns the value of the +// weight attribute stored in the BalancerAttributes field of addr, using the +// weightedroundrobin package. +// +// When used in the xDS context, the weight attribute is guaranteed to be +// non-zero. But, when used in a non-xDS context, the weight attribute could be +// unset. A Default of 1 is used in the latter case. +func getWeightAttribute(addr resolver.Address) uint32 { + w := weightedroundrobin.GetAddrInfo(addr).Weight + if w == 0 { + return 1 + } + return w +} diff --git a/xds/internal/balancer/ringhash/ringhash_test.go b/xds/internal/balancer/ringhash/ringhash_test.go index cac8476b9290..02302321ce5e 100644 --- a/xds/internal/balancer/ringhash/ringhash_test.go +++ b/xds/internal/balancer/ringhash/ringhash_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal" ) var ( @@ -491,3 +492,26 @@ func (s) TestConnectivityStateEvaluatorRecordTransition(t *testing.T) { }) } } + +// TestAddrBalancerAttributesChange tests the case where the ringhash balancer +// receives a ClientConnUpdate with the same config and addresses as received in +// the previous update. Although the `BalancerAttributes` contents are the same, +// the pointer is different. This test verifies that subConns are not recreated +// in this scenario. +func (s) TestAddrBalancerAttributesChange(t *testing.T) { + addrs1 := []resolver.Address{internal.SetLocalityID(resolver.Address{Addr: testBackendAddrStrs[0]}, internal.LocalityID{Region: "americas"})} + cc, b, _ := setupTest(t, addrs1) + + addrs2 := []resolver.Address{internal.SetLocalityID(resolver.Address{Addr: testBackendAddrStrs[0]}, internal.LocalityID{Region: "americas"})} + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: addrs2}, + BalancerConfig: nil, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + select { + case <-cc.NewSubConnCh: + t.Fatal("new subConn created for an update with the same addresses") + case <-time.After(defaultTestShortTimeout): + } +} From 03fee09e68e40a4369918fc0a944c2b551e73b65 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 7 Jul 2022 13:48:34 -0700 Subject: [PATCH 546/998] balancer: fix connectivity state aggregation algorithm to follow the spec (#5473) --- balancer/balancer.go | 53 ------ balancer/conn_state_evaluator.go | 70 ++++++++ balancer/conn_state_evaluator_test.go | 245 ++++++++++++++++++++++++++ 3 files changed, 315 insertions(+), 53 deletions(-) create mode 100644 balancer/conn_state_evaluator.go create mode 100644 balancer/conn_state_evaluator_test.go diff --git a/balancer/balancer.go b/balancer/balancer.go index f7a7697cad02..25713908072c 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -371,56 +371,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transient failure state. - numIdle uint64 // Number of addrConns in idle state. -} - -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else there are no subconns and the aggregated state is Transient Failure -// -// Shutdown is not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - case connectivity.Idle: - cse.numIdle += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - if cse.numTransientFailure > 0 { - return connectivity.TransientFailure - } - if cse.numIdle > 0 { - return connectivity.Idle - } - return connectivity.TransientFailure -} diff --git a/balancer/conn_state_evaluator.go b/balancer/conn_state_evaluator.go new file mode 100644 index 000000000000..a87b6809af38 --- /dev/null +++ b/balancer/conn_state_evaluator.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/balancer/conn_state_evaluator_test.go b/balancer/conn_state_evaluator_test.go new file mode 100644 index 000000000000..d82ddf84c240 --- /dev/null +++ b/balancer/conn_state_evaluator_test.go @@ -0,0 +1,245 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "testing" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpctest" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestRecordTransition_FirstStateChange tests the first call to +// RecordTransition where the `oldState` is usually set to `Shutdown` (a state +// that the ConnectivityStateEvaluator is set to ignore). +func (s) TestRecordTransition_FirstStateChange(t *testing.T) { + tests := []struct { + newState connectivity.State + wantState connectivity.State + }{ + { + newState: connectivity.Idle, + wantState: connectivity.Idle, + }, + { + newState: connectivity.Connecting, + wantState: connectivity.Connecting, + }, + { + newState: connectivity.Ready, + wantState: connectivity.Ready, + }, + { + newState: connectivity.TransientFailure, + wantState: connectivity.TransientFailure, + }, + { + newState: connectivity.Shutdown, + wantState: connectivity.TransientFailure, + }, + } + for _, test := range tests { + cse := &ConnectivityStateEvaluator{} + if gotState := cse.RecordTransition(connectivity.Shutdown, test.newState); gotState != test.wantState { + t.Fatalf("RecordTransition(%v, %v) = %v, want %v", connectivity.Shutdown, test.newState, gotState, test.wantState) + } + } +} + +// TestRecordTransition_SameState tests the scenario where state transitions to +// the same state are recorded multiple times. +func (s) TestRecordTransition_SameState(t *testing.T) { + tests := []struct { + newState connectivity.State + wantState connectivity.State + }{ + { + newState: connectivity.Idle, + wantState: connectivity.Idle, + }, + { + newState: connectivity.Connecting, + wantState: connectivity.Connecting, + }, + { + newState: connectivity.Ready, + wantState: connectivity.Ready, + }, + { + newState: connectivity.TransientFailure, + wantState: connectivity.TransientFailure, + }, + { + newState: connectivity.Shutdown, + wantState: connectivity.TransientFailure, + }, + } + const numStateChanges = 5 + for _, test := range tests { + cse := &ConnectivityStateEvaluator{} + var prevState, gotState connectivity.State + prevState = connectivity.Shutdown + for i := 0; i < numStateChanges; i++ { + gotState = cse.RecordTransition(prevState, test.newState) + prevState = test.newState + } + if gotState != test.wantState { + t.Fatalf("RecordTransition() = %v, want %v", gotState, test.wantState) + } + } +} + +// TestRecordTransition_SingleSubConn_DifferentStates tests some common +// connectivity state change scenarios, on a single subConn. +func (s) TestRecordTransition_SingleSubConn_DifferentStates(t *testing.T) { + tests := []struct { + name string + states []connectivity.State + wantState connectivity.State + }{ + { + name: "regular transition to ready", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready}, + wantState: connectivity.Ready, + }, + { + name: "regular transition to transient failure", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.TransientFailure}, + wantState: connectivity.TransientFailure, + }, + { + name: "regular transition to ready", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.Idle}, + wantState: connectivity.Idle, + }, + { + name: "transition from ready to transient failure", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure}, + wantState: connectivity.TransientFailure, + }, + { + name: "transition from transient failure back to ready", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure, connectivity.Ready}, + wantState: connectivity.Ready, + }, + { + // This state transition is usually suppressed at the LB policy level, by + // not calling RecordTransition. + name: "transition from transient failure back to idle", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure, connectivity.Idle}, + wantState: connectivity.Idle, + }, + { + // This state transition is usually suppressed at the LB policy level, by + // not calling RecordTransition. + name: "transition from transient failure back to connecting", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure, connectivity.Connecting}, + wantState: connectivity.Connecting, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cse := &ConnectivityStateEvaluator{} + var prevState, gotState connectivity.State + prevState = connectivity.Shutdown + for _, newState := range test.states { + gotState = cse.RecordTransition(prevState, newState) + prevState = newState + } + if gotState != test.wantState { + t.Fatalf("RecordTransition() = %v, want %v", gotState, test.wantState) + } + }) + } +} + +// TestRecordTransition_MultipleSubConns_DifferentStates tests state transitions +// among multiple subConns, and verifies that the connectivity state aggregation +// algorithm produces the expected aggregate connectivity state. +func (s) TestRecordTransition_MultipleSubConns_DifferentStates(t *testing.T) { + tests := []struct { + name string + // Each entry in this slice corresponds to the state changes happening on an + // individual subConn. + subConnStates [][]connectivity.State + wantState connectivity.State + }{ + { + name: "atleast one ready", + subConnStates: [][]connectivity.State{ + {connectivity.Idle, connectivity.Connecting, connectivity.Ready}, + {connectivity.Idle}, + {connectivity.Idle, connectivity.Connecting}, + {connectivity.Idle, connectivity.Connecting, connectivity.TransientFailure}, + }, + wantState: connectivity.Ready, + }, + { + name: "atleast one connecting", + subConnStates: [][]connectivity.State{ + {connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.Connecting}, + {connectivity.Idle}, + {connectivity.Idle, connectivity.Connecting, connectivity.TransientFailure}, + }, + wantState: connectivity.Connecting, + }, + { + name: "atleast one idle", + subConnStates: [][]connectivity.State{ + {connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.Idle}, + {connectivity.Idle, connectivity.Connecting, connectivity.TransientFailure}, + }, + wantState: connectivity.Idle, + }, + { + name: "atleast one transient failure", + subConnStates: [][]connectivity.State{ + {connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure}, + {connectivity.TransientFailure}, + }, + wantState: connectivity.TransientFailure, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cse := &ConnectivityStateEvaluator{} + var prevState, gotState connectivity.State + for _, scStates := range test.subConnStates { + prevState = connectivity.Shutdown + for _, newState := range scStates { + gotState = cse.RecordTransition(prevState, newState) + prevState = newState + } + } + if gotState != test.wantState { + t.Fatalf("RecordTransition() = %v, want %v", gotState, test.wantState) + } + }) + } +} From 38df45cac79357ed60e9fe53eb24bc2b8e32abbc Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 7 Jul 2022 15:06:18 -0700 Subject: [PATCH 547/998] xdsclient: move XDSClient interface definition to client.go (#5479) --- xds/internal/xdsclient/attributes.go | 26 +------------------------- xds/internal/xdsclient/client.go | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 25 deletions(-) diff --git a/xds/internal/xdsclient/attributes.go b/xds/internal/xdsclient/attributes.go index 514181627361..9076a76fd0dc 100644 --- a/xds/internal/xdsclient/attributes.go +++ b/xds/internal/xdsclient/attributes.go @@ -17,36 +17,12 @@ package xdsclient -import ( - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) +import "google.golang.org/grpc/resolver" type clientKeyType string const clientKey = clientKeyType("grpc.xds.internal.client.Client") -// XDSClient is a full fledged gRPC client which queries a set of discovery APIs -// (collectively termed as xDS) on a remote management server, to discover -// various dynamic resources. -type XDSClient interface { - WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() - WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() - WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() - WatchEndpoints(clusterName string, edsCb func(xdsresource.EndpointsUpdate, error)) (cancel func()) - ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) - - DumpLDS() map[string]xdsresource.UpdateWithMD - DumpRDS() map[string]xdsresource.UpdateWithMD - DumpCDS() map[string]xdsresource.UpdateWithMD - DumpEDS() map[string]xdsresource.UpdateWithMD - - BootstrapConfig() *bootstrap.Config - Close() -} - // FromResolverState returns the Client from state, or nil if not present. func FromResolverState(state resolver.State) XDSClient { cs, _ := state.Attributes.Value(clientKey).(XDSClient) diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 817a4507eb34..90551986a0d2 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -29,9 +29,29 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) +// XDSClient is a full fledged gRPC client which queries a set of discovery APIs +// (collectively termed as xDS) on a remote management server, to discover +// various dynamic resources. +type XDSClient interface { + WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() + WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() + WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() + ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) + + DumpLDS() map[string]xdsresource.UpdateWithMD + DumpRDS() map[string]xdsresource.UpdateWithMD + DumpCDS() map[string]xdsresource.UpdateWithMD + DumpEDS() map[string]xdsresource.UpdateWithMD + + BootstrapConfig() *bootstrap.Config + Close() +} + // clientImpl is the real implementation of the xds client. The exported Client // is a wrapper of this struct with a ref count. // From 2c0949c22d46095edc579d9e66edcd025192b98c Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 8 Jul 2022 14:34:38 -0400 Subject: [PATCH 548/998] all: update to 'go 1.17' to enable module graph pruning (#5477) --- go.mod | 11 ++++++++++- go.sum | 1 - 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 6a760ed7435b..3ad611bf2d9a 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc -go 1.14 +go 1.17 require ( github.com/cespare/xxhash/v2 v2.1.1 @@ -17,3 +17,12 @@ require ( google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 google.golang.org/protobuf v1.27.1 ) + +require ( + cloud.google.com/go v0.34.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect + github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + golang.org/x/text v0.3.3 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/appengine v1.4.0 // indirect +) diff --git a/go.sum b/go.sum index 5f418dba1b41..be33e4b1e355 100644 --- a/go.sum +++ b/go.sum @@ -131,7 +131,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From 5017088853f0337c49e5e90d00299bcca4926e0e Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 12 Jul 2022 09:33:51 -0700 Subject: [PATCH 549/998] internal/xds: generate an entry in the authorities map with empty string key (#5493) --- internal/xds/bootstrap.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/internal/xds/bootstrap.go b/internal/xds/bootstrap.go index 4905b7825ee7..4f583d1f4fd3 100644 --- a/internal/xds/bootstrap.go +++ b/internal/xds/bootstrap.go @@ -62,6 +62,11 @@ type BootstrapOptions struct { // map[authority-name]ServerURI). The other fields (version, creds, // features) are assumed to be the same as the default authority (they can // be added later if needed). + // + // If the env var corresponding to federation (envconfig.XDSFederation) is + // set, an entry with empty string as the key and empty server config as + // value will be added. This will be used by new style resource names with + // an empty authority. Authorities map[string]string } @@ -122,6 +127,11 @@ func BootstrapContents(opts BootstrapOptions) ([]byte, error) { } auths := make(map[string]authority) + if envconfig.XDSFederation { + // This will end up using the top-level server list for new style + // resources with empty authority. + auths[""] = authority{} + } for n, auURI := range opts.Authorities { auths[n] = authority{XdsServers: []server{{ ServerURI: auURI, From e02f27d9695e01eb371d3a9a03ab7883b0cba27b Mon Sep 17 00:00:00 2001 From: kennylong Date: Wed, 13 Jul 2022 01:04:32 +0800 Subject: [PATCH 550/998] internal: move baseContentType comment where it should be (#5486) Signed-off-by: longkai --- internal/grpcutil/method.go | 5 +++++ internal/transport/http_util.go | 6 ------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/internal/grpcutil/method.go b/internal/grpcutil/method.go index 4e7475060c1c..e9c4af64830c 100644 --- a/internal/grpcutil/method.go +++ b/internal/grpcutil/method.go @@ -39,6 +39,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { return methodName[:pos], methodName[pos+1:], nil } +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. const baseContentType = "application/grpc" // ContentSubtype returns the content-subtype for the given content-type. The diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index b77513068622..56e95788d9cb 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -47,12 +47,6 @@ const ( http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ) var ( From 9ba66f1b84200242519fc5f25fc5175809ddba4f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 12 Jul 2022 11:37:52 -0700 Subject: [PATCH 551/998] xdsclient: use top-level server list if authority specific list is empty (#5491) --- xds/internal/xdsclient/authority.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 26db726dd943..4c0ad2832c86 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -54,7 +54,9 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref fun if !ok { return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) } - config = cfg.XDSServer + if cfg.XDSServer != nil { + config = cfg.XDSServer + } } a, err := c.newAuthorityLocked(config) From c40237875568dbb0fbe1d409d7f8df97074c9efa Mon Sep 17 00:00:00 2001 From: mitchsw Date: Tue, 12 Jul 2022 16:36:41 -0400 Subject: [PATCH 552/998] doc: remove comment about obsolete GRPC_GO_RETRY env var (#5495) --- dialoptions.go | 3 --- examples/features/retry/README.md | 5 ++--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/dialoptions.go b/dialoptions.go index 75d01ba777c8..1c28ee7112af 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -528,9 +528,6 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently enabled by default, but may be disabled by -// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true diff --git a/examples/features/retry/README.md b/examples/features/retry/README.md index 826cca7f40bc..e39a1c71704d 100644 --- a/examples/features/retry/README.md +++ b/examples/features/retry/README.md @@ -18,11 +18,10 @@ First start the server: go run server/main.go ``` -Then run the client. Note that when running the client, `GRPC_GO_RETRY=on` must be set in -your environment: +Then run the client: ```bash -GRPC_GO_RETRY=on go run client/main.go +go run client/main.go ``` ## Usage From 5e15eac0c4dfc1d9e5797a955205d38add29a314 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 12 Jul 2022 14:12:07 -0700 Subject: [PATCH 553/998] xdsclient: handle empty authority in new style resource names (#5488) --- xds/internal/xdsclient/xdsresource/name.go | 2 +- .../xdsclient/xdsresource/name_test.go | 86 ++++++++++++------- 2 files changed, 57 insertions(+), 31 deletions(-) diff --git a/xds/internal/xdsclient/xdsresource/name.go b/xds/internal/xdsclient/xdsresource/name.go index eb1ee323cee9..80c0efd37b39 100644 --- a/xds/internal/xdsclient/xdsresource/name.go +++ b/xds/internal/xdsclient/xdsresource/name.go @@ -119,7 +119,7 @@ func (n *Name) String() string { path := n.Type if n.ID != "" { - path = path + "/" + n.ID + path = "/" + path + "/" + n.ID } tempURL := &url.URL{ diff --git a/xds/internal/xdsclient/xdsresource/name_test.go b/xds/internal/xdsclient/xdsresource/name_test.go index 8ef9d894840c..cfab669c54bc 100644 --- a/xds/internal/xdsclient/xdsresource/name_test.go +++ b/xds/internal/xdsclient/xdsresource/name_test.go @@ -18,54 +18,76 @@ package xdsresource import ( - "reflect" "testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/envconfig" ) func TestParseName(t *testing.T) { tests := []struct { - name string - env bool // Whether federation env is set to true. - in string - want *Name + name string + env bool // Whether federation env is set to true. + in string + want *Name + wantStr string }{ { - name: "env off", - env: false, - in: "xdstp://auth/type/id", - want: &Name{ID: "xdstp://auth/type/id"}, + name: "env off", + env: false, + in: "xdstp://auth/type/id", + want: &Name{ID: "xdstp://auth/type/id"}, + wantStr: "xdstp://auth/type/id", }, { - name: "old style name", - env: true, - in: "test-resource", - want: &Name{ID: "test-resource"}, + name: "old style name", + env: true, + in: "test-resource", + want: &Name{ID: "test-resource"}, + wantStr: "test-resource", }, { - name: "invalid not url", - env: true, - in: "a:/b/c", - want: &Name{ID: "a:/b/c"}, + name: "invalid not url", + env: true, + in: "a:/b/c", + want: &Name{ID: "a:/b/c"}, + wantStr: "a:/b/c", }, { - name: "invalid no resource type", - env: true, - in: "xdstp://auth/id", - want: &Name{ID: "xdstp://auth/id"}, + name: "invalid no resource type", + env: true, + in: "xdstp://auth/id", + want: &Name{ID: "xdstp://auth/id"}, + wantStr: "xdstp://auth/id", }, { - name: "valid no ctx params", - env: true, - in: "xdstp://auth/type/id", - want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id"}, + name: "valid with no authority", + env: true, + in: "xdstp:///type/id", + want: &Name{Scheme: "xdstp", Authority: "", Type: "type", ID: "id"}, + wantStr: "xdstp:///type/id", }, { - name: "valid with ctx params", - env: true, - in: "xdstp://auth/type/id?a=1&b=2", - want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id", ContextParams: map[string]string{"a": "1", "b": "2"}}, + name: "valid no ctx params", + env: true, + in: "xdstp://auth/type/id", + want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id"}, + wantStr: "xdstp://auth/type/id", + }, + { + name: "valid with ctx params", + env: true, + in: "xdstp://auth/type/id?a=1&b=2", + want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id", ContextParams: map[string]string{"a": "1", "b": "2"}}, + wantStr: "xdstp://auth/type/id?a=1&b=2", + }, + { + name: "valid with ctx params sorted by keys", + env: true, + in: "xdstp://auth/type/id?b=2&a=1", + want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id", ContextParams: map[string]string{"a": "1", "b": "2"}}, + wantStr: "xdstp://auth/type/id?a=1&b=2", }, } for _, tt := range tests { @@ -77,9 +99,13 @@ func TestParseName(t *testing.T) { return func() { envconfig.XDSFederation = oldEnv } }()() } - if got := ParseName(tt.in); !reflect.DeepEqual(got, tt.want) { + got := ParseName(tt.in) + if !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(Name{}, "processingDirective")) { t.Errorf("ParseName() = %#v, want %#v", got, tt.want) } + if gotStr := got.String(); gotStr != tt.wantStr { + t.Errorf("Name.String() = %s, want %s", gotStr, tt.wantStr) + } }) } } From a094a1095c07d37362f7ab37b92a6aa46c2d8b07 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 13 Jul 2022 11:37:09 -0400 Subject: [PATCH 554/998] Fix race between activeStreams and bdp window size (#5494) * Fix race between activeStreams and bdp window size --- internal/transport/http2_client.go | 30 +++++++++++++------------ test/end2end_test.go | 36 ++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index be371c6e0f73..28c77af70aba 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -78,6 +78,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -109,6 +110,7 @@ type http2Client struct { waitingStreams uint32 nextID uint32 + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream @@ -685,7 +687,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, cleanup(err) return err } - t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) @@ -719,6 +720,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, t.nextID += 2 s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -744,13 +752,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } for { success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { // Connection closed. @@ -1003,13 +1005,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -1215,7 +1217,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. diff --git a/test/end2end_test.go b/test/end2end_test.go index da0acbf3d75d..c44925f96a62 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -8041,3 +8041,39 @@ func (s) TestServerClosesConn(t *testing.T) { } t.Fatalf("timed out waiting for conns to be closed by server; still open: %v", atomic.LoadInt32(&wrapLis.connsOpen)) } + +// TestUnexpectedEOF tests a scenario where a client invokes two unary RPC +// calls. The first call receives a payload which exceeds max grpc receive +// message length, and the second gets a large response. This second RPC should +// not fail with unexpected.EOF. +func (s) TestUnexpectedEOF(t *testing.T) { + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{ + Payload: &testpb.Payload{ + Body: bytes.Repeat([]byte("a"), int(in.ResponseSize)), + }, + }, nil + }, + } + if err := ss.Start([]grpc.ServerOption{}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for i := 0; i < 10; i++ { + // exceeds grpc.DefaultMaxRecvMessageSize, this should error with + // RESOURCE_EXHAUSTED error. + _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{ResponseSize: 4194304}) + if code := status.Code(err); code != codes.ResourceExhausted { + t.Fatalf("UnaryCall RPC returned error: %v, want status code %v", err, codes.ResourceExhausted) + } + // Larger response that doesn't exceed DefaultMaxRecvMessageSize, this + // should work normally. + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{ResponseSize: 275075}); err != nil { + t.Fatalf("UnaryCall RPC failed: %v", err) + } + } +} From 4f47c8c163e816487cdc6ea5d6ca262dce2f09b3 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 14 Jul 2022 12:12:48 -0700 Subject: [PATCH 555/998] test/xds: wait for all ACKs before forcing stream restart (#5500) --- test/xds/xds_client_ack_nack_test.go | 74 +++++++++++++++++++--------- 1 file changed, 51 insertions(+), 23 deletions(-) diff --git a/test/xds/xds_client_ack_nack_test.go b/test/xds/xds_client_ack_nack_test.go index ca0ec56e284b..ca954f8a34fc 100644 --- a/test/xds/xds_client_ack_nack_test.go +++ b/test/xds/xds_client_ack_nack_test.go @@ -39,14 +39,15 @@ import ( // xDS flow on the client. const wantResources = 4 -// seenAllACKs returns true if we have seen two streams with acks for all the -// resources that we are interested in. -func seenAllACKs(acks map[int64]map[string]string) bool { - if len(acks) != 2 { +// seenAllACKs returns true if the provided ackVersions map contains valid acks +// for all the resources that we are interested in. If `wantNonEmpty` is true, +// only non-empty ack versions are considered valid. +func seenAllACKs(acksVersions map[string]string, wantNonEmpty bool) bool { + if len(acksVersions) != wantResources { return false } - for _, v := range acks { - if len(v) != wantResources { + for _, ack := range acksVersions { + if wantNonEmpty && ack == "" { return false } } @@ -65,8 +66,19 @@ func (s) TestClientResourceVersionAfterStreamRestart(t *testing.T) { } lis := testutils.NewRestartableListener(l) - streamClosed := grpcsync.NewEvent() // Event to notify stream closure. - acksReceived := grpcsync.NewEvent() // Event to notify receipt of acks for all resources. + // We depend on the fact that the management server assigns monotonically + // increasing stream IDs starting at 1. + const ( + idBeforeRestart = 1 + idAfterRestart = 2 + ) + + // Events of importance in the test, in the order in which they are expected + // to happen. + acksReceivedBeforeRestart := grpcsync.NewEvent() + streamRestarted := grpcsync.NewEvent() + acksReceivedAfterRestart := grpcsync.NewEvent() + // Map from stream id to a map of resource type to resource version. ackVersionsMap := make(map[int64]map[string]string) managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ @@ -78,7 +90,7 @@ func (s) TestClientResourceVersionAfterStreamRestart(t *testing.T) { // - Request contains no resource names. Such requests are usually // seen when the xdsclient is shutting down and is no longer // interested in the resources that it had subscribed to earlier. - if acksReceived.HasFired() || len(req.GetResourceNames()) == 0 { + if acksReceivedAfterRestart.HasFired() || len(req.GetResourceNames()) == 0 { return nil } // Create a stream specific map to store ack versions if this is the @@ -87,13 +99,25 @@ func (s) TestClientResourceVersionAfterStreamRestart(t *testing.T) { ackVersionsMap[id] = make(map[string]string) } ackVersionsMap[id][req.GetTypeUrl()] = req.GetVersionInfo() - if seenAllACKs(ackVersionsMap) { - acksReceived.Fire() + // Prior to stream restart, we are interested only in non-empty + // resource versions. The xdsclient first sends out requests with an + // empty version string. After receipt of requested resource, it + // sends out another request for the same resource, but this time + // with a non-empty version string, to serve as an ACK. + if seenAllACKs(ackVersionsMap[idBeforeRestart], true) { + acksReceivedBeforeRestart.Fire() + } + // After stream restart, we expect the xdsclient to send out + // requests with version string set to the previously ACKed + // versions. If it sends out requests with empty version string, it + // is a bug and we want this test to catch it. + if seenAllACKs(ackVersionsMap[idAfterRestart], false) { + acksReceivedAfterRestart.Fire() } return nil }, OnStreamClosed: func(int64) { - streamClosed.Fire() + streamRestarted.Fire() }, }) defer cleanup1() @@ -127,30 +151,34 @@ func (s) TestClientResourceVersionAfterStreamRestart(t *testing.T) { t.Fatalf("rpc EmptyCall() failed: %v", err) } - // A successful RPC means that we have captured the ack versions for all - // resources in the OnStreamRequest callback. Nothing more needs to be done - // here before stream restart. + // A successful RPC means that the xdsclient received all requested + // resources. The ACKs from the xdsclient may get a little delayed. So, we + // need to wait for all ACKs to be received on the management server before + // restarting the stream. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for all resources to be ACKed prior to stream restart") + case <-acksReceivedBeforeRestart.Done(): + } // Stop the listener on the management server. This will cause the client to // backoff and recreate the stream. lis.Stop() // Wait for the stream to be closed on the server. - <-streamClosed.Done() + <-streamRestarted.Done() // Restart the listener on the management server to be able to accept // reconnect attempts from the client. lis.Restart() // Wait for all the previously sent resources to be re-requested. - <-acksReceived.Done() + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for all resources to be ACKed post stream restart") + case <-acksReceivedAfterRestart.Done(): + } - // We depend on the fact that the management server assigns monotonically - // increasing stream IDs starting at 1. - const ( - idBeforeRestart = 1 - idAfterRestart = 2 - ) if diff := cmp.Diff(ackVersionsMap[idBeforeRestart], ackVersionsMap[idAfterRestart]); diff != "" { t.Fatalf("unexpected diff in ack versions before and after stream restart (-want, +got):\n%s", diff) } From 96aa657ba60fd64cc8b2e2c99eb6fca514b4e7f7 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 14 Jul 2022 13:51:23 -0700 Subject: [PATCH 556/998] xds: readd NewXDSResolverWithConfigForTesting() (#5504) --- xds/xds.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/xds/xds.go b/xds/xds.go index 3ff3c76bce4b..2fbce34663c0 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -33,7 +33,9 @@ import ( v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "google.golang.org/grpc" _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. + "google.golang.org/grpc/internal" internaladmin "google.golang.org/grpc/internal/admin" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. @@ -73,3 +75,21 @@ func init() { return csdss.Close, nil }) } + +// NewXDSResolverWithConfigForTesting creates a new xDS resolver builder using +// the provided xDS bootstrap config instead of the global configuration from +// the supported environment variables. The resolver.Builder is meant to be +// used in conjunction with the grpc.WithResolvers DialOption. +// +// Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { + return internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error))(bootstrapConfig) +} From 30d54d398f70876f81aec64875e4540d9a40b1b8 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 14 Jul 2022 23:52:18 +0000 Subject: [PATCH 557/998] client: fix stream creation issue with transparent retry (#5503) --- stream.go | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/stream.go b/stream.go index 6d82e0d7cca3..446a91e323ee 100644 --- a/stream.go +++ b/stream.go @@ -140,13 +140,13 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. @@ -303,12 +303,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs.binlog = binarylog.GetMethodLogger(method) - cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */) - if err != nil { - cs.finish(err) - return nil, err - } - // Pick the transport to use and create a new stream on the transport. // Assign cs.attempt upon success. op := func(a *csAttempt) error { @@ -704,6 +698,18 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } a := cs.attempt cs.mu.Unlock() err := op(a) From 3a77d29151deb3a7bb1a5bdb421162fb7c450d8e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 15 Jul 2022 18:58:44 +0000 Subject: [PATCH 558/998] xdsclient: fix LRS stream leaks when errors are encountered (#5505) --- .../xdsclient/controller/transport.go | 51 +++++++++++-------- .../controller/version/v2/loadreport.go | 21 +++++++- .../controller/version/v3/loadreport.go | 21 +++++++- 3 files changed, 67 insertions(+), 26 deletions(-) diff --git a/xds/internal/xdsclient/controller/transport.go b/xds/internal/xdsclient/controller/transport.go index 34c5b024dd87..28641dc874a4 100644 --- a/xds/internal/xdsclient/controller/transport.go +++ b/xds/internal/xdsclient/controller/transport.go @@ -99,10 +99,10 @@ func (t *Controller) run(ctx context.Context) { // new requests to send on the stream. // // For each new request (watchAction), it's -// - processed and added to the watch map -// - so resend will pick them up when there are new streams -// - sent on the current stream if there's one -// - the current stream is cleared when any send on it fails +// - processed and added to the watch map +// so, resend will pick them up when there are new streams +// - sent on the current stream if there's one +// the current stream is cleared when any send on it fails // // For each new stream, all the existing requests will be resent. // @@ -388,26 +388,33 @@ func (t *Controller) reportLoad(ctx context.Context, cc *grpc.ClientConn, opts c retries++ lastStreamStartTime = time.Now() - stream, err := t.vClient.NewLoadStatsStream(ctx, cc) - if err != nil { - t.logger.Warningf("lrs: failed to create stream: %v", err) - continue - } - t.logger.Infof("lrs: created LRS stream") + func() { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := t.vClient.NewLoadStatsStream(streamCtx, cc) + if err != nil { + t.logger.Warningf("lrs: failed to create stream: %v", err) + return + } + t.logger.Infof("lrs: created LRS stream") - if err := t.vClient.SendFirstLoadStatsRequest(stream); err != nil { - t.logger.Warningf("lrs: failed to send first request: %v", err) - continue - } + if err := t.vClient.SendFirstLoadStatsRequest(stream); err != nil { + t.logger.Warningf("lrs: failed to send first request: %v", err) + return + } - clusters, interval, err := t.vClient.HandleLoadStatsResponse(stream) - if err != nil { - t.logger.Warningf("%v", err) - continue - } + clusters, interval, err := t.vClient.HandleLoadStatsResponse(stream) + if err != nil { + t.logger.Warningf("lrs: error from stream: %v", err) + return + } - retries = 0 - t.sendLoads(ctx, stream, opts.LoadStore, clusters, interval) + retries = 0 + t.sendLoads(streamCtx, stream, opts.LoadStore, clusters, interval) + }() } } @@ -421,7 +428,7 @@ func (t *Controller) sendLoads(ctx context.Context, stream grpc.ClientStream, st return } if err := t.vClient.SendLoadStatsRequest(stream, store.Stats(clusterNames)); err != nil { - t.logger.Warningf("%v", err) + t.logger.Warningf("lrs: error from stream: %v", err) return } } diff --git a/xds/internal/xdsclient/controller/version/v2/loadreport.go b/xds/internal/xdsclient/controller/version/v2/loadreport.go index f0034e21c353..da5128ac456e 100644 --- a/xds/internal/xdsclient/controller/version/v2/loadreport.go +++ b/xds/internal/xdsclient/controller/version/v2/loadreport.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "io" "time" "github.com/golang/protobuf/proto" @@ -59,7 +60,11 @@ func (v2c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { req := &lrspb.LoadStatsRequest{Node: node} v2c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) - return stream.Send(req) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err } func (v2c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time.Duration, error) { @@ -149,5 +154,17 @@ func (v2c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} v2c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) - return stream.Send(req) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream lrsStream) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } } diff --git a/xds/internal/xdsclient/controller/version/v3/loadreport.go b/xds/internal/xdsclient/controller/version/v3/loadreport.go index 8cdb5476fbbd..f8d866bb1a59 100644 --- a/xds/internal/xdsclient/controller/version/v3/loadreport.go +++ b/xds/internal/xdsclient/controller/version/v3/loadreport.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "io" "time" "github.com/golang/protobuf/proto" @@ -59,7 +60,11 @@ func (v3c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { req := &lrspb.LoadStatsRequest{Node: node} v3c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) - return stream.Send(req) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err } func (v3c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time.Duration, error) { @@ -148,5 +153,17 @@ func (v3c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} v3c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) - return stream.Send(req) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream lrsStream) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } } From f601dfac73c9ed39935bf1fb6b2d8f5c60b892ae Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Fri, 15 Jul 2022 16:28:52 -0700 Subject: [PATCH 559/998] test/kokoro: Add missing secondary_kube_context to xds LB tests (#5508) --- test/kokoro/xds_k8s_lb.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh index 9226f9f129f0..3487392f4544 100755 --- a/test/kokoro/xds_k8s_lb.sh +++ b/test/kokoro/xds_k8s_lb.sh @@ -83,6 +83,7 @@ build_docker_images_if_needed() { # Globals: # TEST_DRIVER_FLAGFILE: Relative path to test driver flagfile # KUBE_CONTEXT: The name of kubectl context with GKE cluster access +# SECONDARY_KUBE_CONTEXT: The name of kubectl context with secondary GKE cluster access, if any # TEST_XML_OUTPUT_DIR: Output directory for the test xUnit XML report # SERVER_IMAGE_NAME: Test server Docker image name # CLIENT_IMAGE_NAME: Test client Docker image name @@ -103,6 +104,7 @@ run_test() { python -m "tests.${test_name}" \ --flagfile="${TEST_DRIVER_FLAGFILE}" \ --kube_context="${KUBE_CONTEXT}" \ + --secondary_kube_context="${SECONDARY_KUBE_CONTEXT}" \ --server_image="${SERVER_IMAGE_NAME}:${GIT_COMMIT}" \ --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ --testing_version="${TESTING_VERSION}" \ From 6dd40ad6e600e3f91f622731bc890eacd7aa3aa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juraci=20Paix=C3=A3o=20Kr=C3=B6hling?= Date: Tue, 19 Jul 2022 19:18:52 -0300 Subject: [PATCH 560/998] Change the log-level when a new ServerTransport cannot be created (#5524) --- server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.go b/server.go index b54f5bb572a7..fd90352fc398 100644 --- a/server.go +++ b/server.go @@ -898,7 +898,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } From d0f3c561ea36cd0ae77e5f65824a5e003b613af8 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Tue, 19 Jul 2022 16:06:21 -0700 Subject: [PATCH 561/998] interop client: fixes for interop soak test (#5502) --- interop/test_utils.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/interop/test_utils.go b/interop/test_utils.go index dc841a110604..013ed5cab7a1 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -741,12 +741,18 @@ func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.D h.Add(latencyMs) if err != nil { totalFailures++ - fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s failed: %s\n", i, latencyMs, p.Addr.String(), err) + addrStr := "nil" + if p.Addr != nil { + addrStr = p.Addr.String() + } + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s failed: %s\n", i, latencyMs, addrStr, err) + <-earliestNextStart continue } if latency > perIterationMaxAcceptableLatency { totalFailures++ fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s exceeds max acceptable latency: %d\n", i, latencyMs, p.Addr.String(), perIterationMaxAcceptableLatency.Milliseconds()) + <-earliestNextStart continue } fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s succeeded\n", i, latencyMs, p.Addr.String()) From 679138d61ee55550b861b2fa7792b0cf2511abe2 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 20 Jul 2022 17:24:54 -0400 Subject: [PATCH 562/998] gcp/observability: Add support for Environment Variable GRPC_CONFIG_OBSERVABILITY_JSON (#5525) * gcp/observability: Add support for Environment Variable GRPC_CONFIG_OBSERVABILITY_JSON --- gcp/observability/config.go | 47 ++++++++----- gcp/observability/observability_test.go | 90 +++++++++++++++++++++++++ 2 files changed, 121 insertions(+), 16 deletions(-) diff --git a/gcp/observability/config.go b/gcp/observability/config.go index 428d527a30c6..ecda5b230073 100644 --- a/gcp/observability/config.go +++ b/gcp/observability/config.go @@ -20,7 +20,9 @@ package observability import ( "context" + "encoding/json" "fmt" + "io/ioutil" "os" "regexp" @@ -31,9 +33,10 @@ import ( ) const ( - envObservabilityConfig = "GRPC_CONFIG_OBSERVABILITY" - envProjectID = "GOOGLE_CLOUD_PROJECT" - logFilterPatternRegexpStr = `^([\w./]+)/((?:\w+)|[*])$` + envObservabilityConfig = "GRPC_CONFIG_OBSERVABILITY" + envObservabilityConfigJSON = "GRPC_CONFIG_OBSERVABILITY_JSON" + envProjectID = "GOOGLE_CLOUD_PROJECT" + logFilterPatternRegexpStr = `^([\w./]+)/((?:\w+)|[*])$` ) var logFilterPatternRegexp = regexp.MustCompile(logFilterPatternRegexpStr) @@ -72,21 +75,33 @@ func validateFilters(config *configpb.ObservabilityConfig) error { return nil } +// unmarshalAndVerifyConfig unmarshals a json string representing an +// observability config into its protobuf format, and also verifies the +// configuration's fields for validity. +func unmarshalAndVerifyConfig(rawJSON json.RawMessage) (*configpb.ObservabilityConfig, error) { + var config configpb.ObservabilityConfig + if err := protojson.Unmarshal(rawJSON, &config); err != nil { + return nil, fmt.Errorf("error parsing observability config: %v", err) + } + if err := validateFilters(&config); err != nil { + return nil, fmt.Errorf("error parsing observability config: %v", err) + } + if config.GlobalTraceSamplingRate > 1 || config.GlobalTraceSamplingRate < 0 { + return nil, fmt.Errorf("error parsing observability config: invalid global trace sampling rate %v", config.GlobalTraceSamplingRate) + } + logger.Infof("Parsed ObservabilityConfig: %+v", &config) + return &config, nil +} + func parseObservabilityConfig() (*configpb.ObservabilityConfig, error) { - // Parse the config from ENV var - if content := os.Getenv(envObservabilityConfig); content != "" { - var config configpb.ObservabilityConfig - if err := protojson.Unmarshal([]byte(content), &config); err != nil { - return nil, fmt.Errorf("error parsing observability config from env %v: %v", envObservabilityConfig, err) - } - if err := validateFilters(&config); err != nil { - return nil, fmt.Errorf("error parsing observability config: %v", err) - } - if config.GlobalTraceSamplingRate > 1 || config.GlobalTraceSamplingRate < 0 { - return nil, fmt.Errorf("error parsing observability config: invalid global trace sampling rate %v", config.GlobalTraceSamplingRate) + if fileSystemPath := os.Getenv(envObservabilityConfigJSON); fileSystemPath != "" { + content, err := ioutil.ReadFile(fileSystemPath) // TODO: Switch to os.ReadFile once dropped support for go 1.15 + if err != nil { + return nil, fmt.Errorf("error reading observability configuration file %q: %v", fileSystemPath, err) } - logger.Infof("Parsed ObservabilityConfig: %+v", &config) - return &config, nil + return unmarshalAndVerifyConfig(content) + } else if content := os.Getenv(envObservabilityConfig); content != "" { + return unmarshalAndVerifyConfig([]byte(content)) } // If the ENV var doesn't exist, do nothing return nil, nil diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index 62936ccec109..c5fa59c6648f 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -21,7 +21,9 @@ package observability import ( "bytes" "context" + "encoding/json" "fmt" + "io/ioutil" "net" "os" "sync" @@ -675,6 +677,7 @@ func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { if err != nil { t.Fatalf("failed to convert config to JSON: %v", err) } + os.Setenv(envObservabilityConfigJSON, "") os.Setenv(envObservabilityConfig, string(configJSON)) // If there is at least one invalid pattern, which should not be silently tolerated. if err := Start(context.Background()); err == nil { @@ -682,7 +685,94 @@ func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { } } +// createTmpConfigInFileSystem creates a random observability config at a random +// place in the temporary portion of the file system dependent on system. It +// also sets the environment variable GRPC_CONFIG_OBSERVABILITY_JSON to point to +// this created config. +func createTmpConfigInFileSystem(rawJSON string) (*os.File, error) { + configJSONFile, err := ioutil.TempFile(os.TempDir(), "configJSON-") + if err != nil { + return nil, fmt.Errorf("cannot create file %v: %v", configJSONFile.Name(), err) + } + _, err = configJSONFile.Write(json.RawMessage(rawJSON)) + if err != nil { + return nil, fmt.Errorf("cannot write marshalled JSON: %v", err) + } + os.Setenv(envObservabilityConfigJSON, configJSONFile.Name()) + return configJSONFile, nil +} + +// TestJSONEnvVarSet tests a valid observability configuration specified by the +// GRPC_CONFIG_OBSERVABILITY_JSON environment variable, whose value represents a +// file path pointing to a JSON encoded config. +func (s) TestJSONEnvVarSet(t *testing.T) { + configJSON := `{ + "destinationProjectId": "fake", + "logFilters":[{"pattern":"*","headerBytes":1073741824,"messageBytes":1073741824}] + }` + configJSONFile, err := createTmpConfigInFileSystem(configJSON) + defer configJSONFile.Close() + if err != nil { + t.Fatalf("failed to create config in file system: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := Start(ctx); err != nil { + t.Fatalf("error starting observability with valid config through file system: %v", err) + } + defer End() +} + +// TestBothConfigEnvVarsSet tests the scenario where both configuration +// environment variables are set. The file system environment variable should +// take precedence, and an error should return in the case of the file system +// configuration being invalid, even if the direct configuration environment +// variable is set and valid. +func (s) TestBothConfigEnvVarsSet(t *testing.T) { + configJSON := `{ + "destinationProjectId":"fake", + "logFilters":[{"pattern":":-)"}, {"pattern":"*"}] + }` + configJSONFile, err := createTmpConfigInFileSystem(configJSON) + defer configJSONFile.Close() + if err != nil { + t.Fatalf("failed to create config in file system: %v", err) + } + // This configuration should be ignored, as precedence 2. + validConfig := &configpb.ObservabilityConfig{ + EnableCloudLogging: true, + DestinationProjectId: "fake", + LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + { + Pattern: "*", + HeaderBytes: infinitySizeBytes, + MessageBytes: infinitySizeBytes, + }, + }, + } + validConfigJSON, err := protojson.Marshal(validConfig) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) + } + os.Setenv(envObservabilityConfig, string(validConfigJSON)) + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") + } +} + +// TestErrInFileSystemEnvVar tests the scenario where an observability +// configuration is specified with environment variable that specifies a +// location in the file system for configuration, and this location doesn't have +// a file (or valid configuration). +func (s) TestErrInFileSystemEnvVar(t *testing.T) { + os.Setenv(envObservabilityConfigJSON, "/this-file/does-not-exist") + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid file system path not triggering error") + } +} + func (s) TestNoEnvSet(t *testing.T) { + os.Setenv(envObservabilityConfigJSON, "") os.Setenv(envObservabilityConfig, "") // If there is no observability config set at all, the Start should return an error. if err := Start(context.Background()); err == nil { From 86117db53ecc029020470ecdf80c520ce375d2b7 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 21 Jul 2022 17:51:01 +0000 Subject: [PATCH 563/998] balancer/weightedtarget: pause picker updates during UpdateClientConnState (#5527) --- .../weightedaggregator/aggregator.go | 41 ++++++++++++++ balancer/weightedtarget/weightedtarget.go | 3 + .../weightedtarget/weightedtarget_test.go | 55 +++++++++++++++++++ 3 files changed, 99 insertions(+) diff --git a/balancer/weightedtarget/weightedaggregator/aggregator.go b/balancer/weightedtarget/weightedaggregator/aggregator.go index 7e1d106e9ff9..38bd9b223f80 100644 --- a/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -68,6 +68,11 @@ type Aggregator struct { // // If an ID is not in map, it's either removed or never added. idToPickerState map[string]*weightedPickerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool } // New creates a new weighted balancer state aggregator. @@ -141,6 +146,27 @@ func (wbsa *Aggregator) UpdateWeight(id string, newWeight uint32) { pState.weight = newWeight } +// PauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (wbsa *Aggregator) PauseStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = true + wbsa.needUpdateStateOnResume = false +} + +// ResumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (wbsa *Aggregator) ResumeStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = false + if wbsa.needUpdateStateOnResume { + wbsa.cc.UpdateState(wbsa.build()) + } +} + // UpdateState is called to report a balancer state change from sub-balancer. // It's usually called by the balancer group. // @@ -166,6 +192,14 @@ func (wbsa *Aggregator) UpdateState(id string, newState balancer.State) { if !wbsa.started { return } + + if wbsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + wbsa.needUpdateStateOnResume = true + return + } + wbsa.cc.UpdateState(wbsa.build()) } @@ -191,6 +225,13 @@ func (wbsa *Aggregator) BuildAndUpdate() { if !wbsa.started { return } + if wbsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + wbsa.needUpdateStateOnResume = true + return + } + wbsa.cc.UpdateState(wbsa.build()) } diff --git a/balancer/weightedtarget/weightedtarget.go b/balancer/weightedtarget/weightedtarget.go index b6fa532b5120..2582c84c5488 100644 --- a/balancer/weightedtarget/weightedtarget.go +++ b/balancer/weightedtarget/weightedtarget.go @@ -90,6 +90,9 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat var rebuildStateAndPicker bool + b.stateAggregator.PauseStateUpdates() + defer b.stateAggregator.ResumeStateUpdates() + // Remove sub-pickers and sub-balancers that are not in the new config. for name := range b.targets { if _, ok := newConfig.Targets[name]; !ok { diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go index fa63b3f25822..cc9235264224 100644 --- a/balancer/weightedtarget/weightedtarget_test.go +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -1218,3 +1218,58 @@ func (s) TestInitialIdle(t *testing.T) { t.Fatalf("Received aggregated state: %v, want Idle", state) } } + +// tcc wraps a testutils.TestClientConn but stores all state transitions in a +// slice. +type tcc struct { + *testutils.TestClientConn + states []balancer.State +} + +func (t *tcc) UpdateState(bs balancer.State) { + t.states = append(t.states, bs) + t.TestClientConn.UpdateState(bs) +} + +func (s) TestUpdateStatePauses(t *testing.T) { + cc := &tcc{TestClientConn: testutils.NewTestClientConn(t)} + + balFuncs := stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, s balancer.ClientConnState) error { + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: nil}) + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: nil}) + return nil + }, + } + stub.Register("update_state_balancer", balFuncs) + + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"update_state_balancer": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addrs := []resolver.Address{{Addr: testBackendAddrStrs[0], Attributes: nil}} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addrs[0], []string{"cds:cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that the only state update is the second one called by the child. + if len(cc.states) != 1 || cc.states[0].ConnectivityState != connectivity.Ready { + t.Fatalf("cc.states = %v; want [connectivity.Ready]", cc.states) + } +} From fdc5d2f3da856f3cdd3483280ae33da5bee17a93 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 21 Jul 2022 22:31:07 +0000 Subject: [PATCH 564/998] xds/clustermanager: pause picker updates during UpdateClientConnState (#5528) --- .../clustermanager/balancerstateaggregator.go | 38 +++++++++++++ .../balancer/clustermanager/clustermanager.go | 2 + .../clustermanager/clustermanager_test.go | 56 +++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/xds/internal/balancer/clustermanager/balancerstateaggregator.go b/xds/internal/balancer/clustermanager/balancerstateaggregator.go index 6e0e03299f95..4b971a3e241b 100644 --- a/xds/internal/balancer/clustermanager/balancerstateaggregator.go +++ b/xds/internal/balancer/clustermanager/balancerstateaggregator.go @@ -57,6 +57,11 @@ type balancerStateAggregator struct { // // If an ID is not in map, it's either removed or never added. idToPickerState map[string]*subBalancerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool } func newBalancerStateAggregator(cc balancer.ClientConn, logger *grpclog.PrefixLogger) *balancerStateAggregator { @@ -118,6 +123,27 @@ func (bsa *balancerStateAggregator) remove(id string) { delete(bsa.idToPickerState, id) } +// pauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (bsa *balancerStateAggregator) pauseStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = true + bsa.needUpdateStateOnResume = false +} + +// resumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (bsa *balancerStateAggregator) resumeStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = false + if bsa.needUpdateStateOnResume { + bsa.cc.UpdateState(bsa.build()) + } +} + // UpdateState is called to report a balancer state change from sub-balancer. // It's usually called by the balancer group. // @@ -143,6 +169,12 @@ func (bsa *balancerStateAggregator) UpdateState(id string, state balancer.State) if !bsa.started { return } + if bsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + bsa.needUpdateStateOnResume = true + return + } bsa.cc.UpdateState(bsa.build()) } @@ -168,6 +200,12 @@ func (bsa *balancerStateAggregator) buildAndUpdate() { if !bsa.started { return } + if bsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + bsa.needUpdateStateOnResume = true + return + } bsa.cc.UpdateState(bsa.build()) } diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index 930427df1b72..6ac7a39b2b4c 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -123,6 +123,8 @@ func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { } b.logger.Infof("update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) + b.stateAggregator.pauseStateUpdates() + defer b.stateAggregator.resumeStateUpdates() b.updateChildren(s, newConfig) return nil } diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index e8e551a0ca17..7d5966339444 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -763,3 +763,59 @@ func (wb *wrappedPickFirstBalancer) UpdateState(state balancer.State) { } wb.ClientConn.UpdateState(state) } + +// tcc wraps a testutils.TestClientConn but stores all state transitions in a +// slice. +type tcc struct { + *testutils.TestClientConn + states []balancer.State +} + +func (t *tcc) UpdateState(bs balancer.State) { + t.states = append(t.states, bs) + t.TestClientConn.UpdateState(bs) +} + +func (s) TestUpdateStatePauses(t *testing.T) { + cc := &tcc{TestClientConn: testutils.NewTestClientConn(t)} + + balFuncs := stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, s balancer.ClientConnState) error { + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: nil}) + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: nil}) + return nil + }, + } + stub.Register("update_state_balancer", balFuncs) + + rtb := rtBuilder.Build(cc, balancer.BuildOptions{}) + + configJSON1 := `{ +"children": { + "cds:cluster_1":{ "childPolicy": [{"update_state_balancer":""}] } +} +}` + + config1, err := rtParser.ParseConfig([]byte(configJSON1)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + } + if err := rtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), + }}, + BalancerConfig: config1, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that the only state update is the second one called by the child. + if len(cc.states) != 1 || cc.states[0].ConnectivityState != connectivity.Ready { + t.Fatalf("cc.states = %v; want [connectivity.Ready]", cc.states) + } +} From ae261b06ed98132283895f7618b5a24c6acb058b Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 25 Jul 2022 17:12:28 -0400 Subject: [PATCH 565/998] xds: Fixed GoLang regression for Outlier Detection (#5537) xds: Fixed GoLang regression for Outlier Detection --- internal/envconfig/xds.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 55aaeea8b455..a83b26bb869c 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -41,7 +41,6 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" - outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" @@ -86,7 +85,7 @@ var ( // XDSOutlierDetection indicates whether outlier detection support is // enabled, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". - XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") + XDSOutlierDetection = false // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") From 9a689dc4bb1a4b519ecfef5047c1798f51178ff7 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 25 Jul 2022 14:16:05 -0700 Subject: [PATCH 566/998] xdsclient: change receiver on BootstrapConfig() to be consistent (#5532) --- xds/internal/xdsclient/client.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 90551986a0d2..2fb698183519 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -52,6 +52,8 @@ type XDSClient interface { Close() } +var _ XDSClient = &clientImpl{} + // clientImpl is the real implementation of the xds client. The exported Client // is a wrapper of this struct with a ref count. // @@ -105,7 +107,7 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i // BootstrapConfig returns the configuration read from the bootstrap file. // Callers must treat the return value as read-only. -func (c *clientRefCounted) BootstrapConfig() *bootstrap.Config { +func (c *clientImpl) BootstrapConfig() *bootstrap.Config { return c.config } From b695a7f27da0d9f41841e48f933bf6ccbfb94bcd Mon Sep 17 00:00:00 2001 From: Mohan Li <67390330+mohanli-ml@users.noreply.github.com> Date: Mon, 25 Jul 2022 15:17:04 -0700 Subject: [PATCH 567/998] test/interop: increase pick_first timeout (#5529) --- interop/test_utils.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/interop/test_utils.go b/interop/test_utils.go index 013ed5cab7a1..8a2baceb80c0 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -655,7 +655,8 @@ func DoPickFirstUnary(tc testgrpc.TestServiceClient) { Payload: pl, FillServerId: true, } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + // TODO(mohanli): Revert the timeout back to 10s once TD migrates to xdstp. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() var serverID string for i := 0; i < rpcCount; i++ { From e72cb1c13f9f80d4f3f1b06463096f2403682202 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 27 Jul 2022 09:55:15 -0700 Subject: [PATCH 568/998] xdsclient: organize existing contents better with new files (#5533) --- xds/internal/xdsclient/authority.go | 124 -------------- xds/internal/xdsclient/client.go | 131 -------------- xds/internal/xdsclient/client_new.go | 134 +++++++++++++++ xds/internal/xdsclient/clientimpl.go | 95 ++++++++++ .../xdsclient/clientimpl_authority.go | 162 ++++++++++++++++++ .../xdsclient/{dump.go => clientimpl_dump.go} | 0 ...loadreport.go => clientimpl_loadreport.go} | 0 .../xdsclient/clientimpl_validator.go | 67 ++++++++ .../{watchers.go => clientimpl_watchers.go} | 0 xds/internal/xdsclient/controller.go | 40 ----- xds/internal/xdsclient/singleton.go | 122 ++----------- 11 files changed, 469 insertions(+), 406 deletions(-) create mode 100644 xds/internal/xdsclient/client_new.go create mode 100644 xds/internal/xdsclient/clientimpl.go create mode 100644 xds/internal/xdsclient/clientimpl_authority.go rename xds/internal/xdsclient/{dump.go => clientimpl_dump.go} (100%) rename xds/internal/xdsclient/{loadreport.go => clientimpl_loadreport.go} (100%) create mode 100644 xds/internal/xdsclient/clientimpl_validator.go rename xds/internal/xdsclient/{watchers.go => clientimpl_watchers.go} (100%) delete mode 100644 xds/internal/xdsclient/controller.go diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 4c0ad2832c86..817cb7338f5a 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -18,136 +18,12 @@ package xdsclient import ( - "errors" - "fmt" - - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -// findAuthority returns the authority for this name. If it doesn't already -// exist, one will be created. -// -// Note that this doesn't always create new authority. authorities with the same -// config but different names are shared. -// -// The returned unref function must be called when the caller is done using this -// authority, without holding c.authorityMu. -// -// Caller must not hold c.authorityMu. -func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref func(), _ error) { - scheme, authority := n.Scheme, n.Authority - - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - if c.done.HasFired() { - return nil, nil, errors.New("the xds-client is closed") - } - - config := c.config.XDSServer - if scheme == xdsresource.FederationScheme { - cfg, ok := c.config.Authorities[authority] - if !ok { - return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) - } - if cfg.XDSServer != nil { - config = cfg.XDSServer - } - } - - a, err := c.newAuthorityLocked(config) - if err != nil { - return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) - } - // All returned authority from this function will be used by a watch, - // holding the ref here. - // - // Note that this must be done while c.authorityMu is held, to avoid the - // race that an authority is returned, but before the watch starts, the - // old last watch is canceled (in another goroutine), causing this - // authority to be removed, and then a watch will start on a removed - // authority. - // - // unref() will be done when the watch is canceled. - a.ref() - return a, func() { c.unrefAuthority(a) }, nil -} - -// newAuthorityLocked creates a new authority for the config. But before that, it -// checks the cache to see if an authority for this config already exists. -// -// The caller must take a reference of the returned authority before using, and -// unref afterwards. -// -// caller must hold c.authorityMu -func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { - // First check if there's already an authority for this config. If found, it - // means this authority is used by other watches (could be the same - // authority name, or a different authority name but the same server - // config). Return it. - configStr := config.String() - if a, ok := c.authorities[configStr]; ok { - return a, nil - } - // Second check if there's an authority in the idle cache. If found, it - // means this authority was created, but moved to the idle cache because the - // watch was canceled. Move it from idle cache to the authority cache, and - // return. - if old, ok := c.idleAuthorities.Remove(configStr); ok { - oldA, _ := old.(*authority) - if oldA != nil { - c.authorities[configStr] = oldA - return oldA, nil - } - } - - // Make a new authority since there's no existing authority for this config. - nodeID := "" - if v3, ok := c.config.XDSServer.NodeProto.(*v3corepb.Node); ok { - nodeID = v3.GetId() - } else if v2, ok := c.config.XDSServer.NodeProto.(*v2corepb.Node); ok { - nodeID = v2.GetId() - } - ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, nodeID, c.logger)} - defer func() { - if retErr != nil { - ret.close() - } - }() - ctr, err := newController(config, ret.pubsub, c.updateValidator, c.logger, nil) - if err != nil { - return nil, err - } - ret.controller = ctr - // Add it to the cache, so it will be reused. - c.authorities[configStr] = ret - return ret, nil -} - -// unrefAuthority unrefs the authority. It also moves the authority to idle -// cache if it's ref count is 0. -// -// This function doesn't need to called explicitly. It's called by the returned -// unref from findAuthority(). -// -// Caller must not hold c.authorityMu. -func (c *clientImpl) unrefAuthority(a *authority) { - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - if a.unref() > 0 { - return - } - configStr := a.config.String() - delete(c.authorities, configStr) - c.idleAuthorities.Add(configStr, a, func() { - a.close() - }) -} - // authority is a combination of pubsub and the controller for this authority. // // Note that it might make sense to use one pubsub for all the resources (for diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 2fb698183519..8ae7301fb7e3 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -21,13 +21,6 @@ package xdsclient import ( - "fmt" - "sync" - "time" - - "google.golang.org/grpc/internal/cache" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -51,127 +44,3 @@ type XDSClient interface { BootstrapConfig() *bootstrap.Config Close() } - -var _ XDSClient = &clientImpl{} - -// clientImpl is the real implementation of the xds client. The exported Client -// is a wrapper of this struct with a ref count. -// -// Implements UpdateHandler interface. -// TODO(easwars): Make a wrapper struct which implements this interface in the -// style of ccBalancerWrapper so that the Client type does not implement these -// exported methods. -type clientImpl struct { - done *grpcsync.Event - config *bootstrap.Config - - // authorityMu protects the authority fields. It's necessary because an - // authority is created when it's used. - authorityMu sync.Mutex - // authorities is a map from ServerConfig to authority. So that - // different authorities sharing the same ServerConfig can share the - // authority. - // - // The key is **ServerConfig.String()**, not the authority name. - // - // An authority is either in authorities, or idleAuthorities, - // never both. - authorities map[string]*authority - // idleAuthorities keeps the authorities that are not used (the last - // watch on it was canceled). They are kept in the cache and will be deleted - // after a timeout. The key is ServerConfig.String(). - // - // An authority is either in authorities, or idleAuthorities, - // never both. - idleAuthorities *cache.TimeoutCache - - logger *grpclog.PrefixLogger - watchExpiryTimeout time.Duration -} - -// newWithConfig returns a new xdsClient with the given config. -func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { - c := &clientImpl{ - done: grpcsync.NewEvent(), - config: config, - watchExpiryTimeout: watchExpiryTimeout, - authorities: make(map[string]*authority), - idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), - } - - c.logger = prefixLogger(c) - c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) - c.logger.Infof("Created") - return c, nil -} - -// BootstrapConfig returns the configuration read from the bootstrap file. -// Callers must treat the return value as read-only. -func (c *clientImpl) BootstrapConfig() *bootstrap.Config { - return c.config -} - -// Close closes the gRPC connection to the management server. -func (c *clientImpl) Close() { - if c.done.HasFired() { - return - } - c.done.Fire() - // TODO: Should we invoke the registered callbacks here with an error that - // the client is closed? - - // Note that Close needs to check for nils even if some of them are always - // set in the constructor. This is because the constructor defers Close() in - // error cases, and the fields might not be set when the error happens. - - c.authorityMu.Lock() - for _, a := range c.authorities { - a.close() - } - c.idleAuthorities.Clear(true) - c.authorityMu.Unlock() - - c.logger.Infof("Shutdown") -} - -func (c *clientImpl) filterChainUpdateValidator(fc *xdsresource.FilterChain) error { - if fc == nil { - return nil - } - return c.securityConfigUpdateValidator(fc.SecurityCfg) -} - -func (c *clientImpl) securityConfigUpdateValidator(sc *xdsresource.SecurityConfig) error { - if sc == nil { - return nil - } - if sc.IdentityInstanceName != "" { - if _, ok := c.config.CertProviderConfigs[sc.IdentityInstanceName]; !ok { - return fmt.Errorf("identitiy certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) - } - } - if sc.RootInstanceName != "" { - if _, ok := c.config.CertProviderConfigs[sc.RootInstanceName]; !ok { - return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) - } - } - return nil -} - -func (c *clientImpl) updateValidator(u interface{}) error { - switch update := u.(type) { - case xdsresource.ListenerUpdate: - if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil { - return nil - } - return update.InboundListenerCfg.FilterChains.Validate(c.filterChainUpdateValidator) - case xdsresource.ClusterUpdate: - return c.securityConfigUpdateValidator(update.SecurityCfg) - default: - // We currently invoke this update validation function only for LDS and - // CDS updates. In the future, if we wish to invoke it for other xDS - // updates, corresponding plumbing needs to be added to those unmarshal - // functions. - } - return nil -} diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go new file mode 100644 index 000000000000..0631d3b0fadb --- /dev/null +++ b/xds/internal/xdsclient/client_new.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "bytes" + "encoding/json" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +// New returns a new xDS client configured by the bootstrap file specified in env +// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG. +// +// The returned client is a reference counted singleton instance. This function +// creates a new client only when one doesn't already exist. +// +// Note that the first invocation of New() or NewWithConfig() sets the client +// singleton. The following calls will return the singleton client without +// checking or using the config. +func New() (XDSClient, error) { + return newRefCountedWithConfig(nil) +} + +// NewWithConfig returns a new xDS client configured by the given config. +// +// Internal/Testing Only +// +// This function should ONLY be used for internal (c2p resolver) and/or testing +// purposese. DO NOT use this elsewhere. Use New() instead. +func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { + return newRefCountedWithConfig(config) +} + +// newWithConfig returns a new xdsClient with the given config. +func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { + c := &clientImpl{ + done: grpcsync.NewEvent(), + config: config, + watchExpiryTimeout: watchExpiryTimeout, + authorities: make(map[string]*authority), + idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), + } + + c.logger = prefixLogger(c) + c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) + c.logger.Infof("Created") + return c, nil +} + +// NewWithConfigForTesting returns an xDS client for the specified bootstrap +// config, separate from the global singleton. +// +// Testing Only +// +// This function should ONLY be used for testing purposes. +func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) { + cl, err := newWithConfig(config, watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + return nil, err + } + return &clientRefCounted{clientImpl: cl, refCount: 1}, nil +} + +// NewWithBootstrapContentsForTesting returns an xDS client for this config, +// separate from the global singleton. +// +// +// Testing Only +// +// This function should ONLY be used for testing purposes. +func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, error) { + // Normalize the contents + buf := bytes.Buffer{} + err := json.Indent(&buf, contents, "", "") + if err != nil { + return nil, fmt.Errorf("xds: error normalizing JSON: %v", err) + } + contents = bytes.TrimSpace(buf.Bytes()) + + clientsMu.Lock() + defer clientsMu.Unlock() + if c := clients[string(contents)]; c != nil { + c.mu.Lock() + // Since we don't remove the *Client from the map when it is closed, we + // need to recreate the impl if the ref count dropped to zero. + if c.refCount > 0 { + c.refCount++ + c.mu.Unlock() + return c, nil + } + c.mu.Unlock() + } + + bcfg, err := bootstrap.NewConfigFromContentsForTesting(contents) + if err != nil { + return nil, fmt.Errorf("xds: error with bootstrap config: %v", err) + } + + cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + return nil, err + } + + c := &clientRefCounted{clientImpl: cImpl, refCount: 1} + clients[string(contents)] = c + return c, nil +} + +var ( + clients = map[string]*clientRefCounted{} + clientsMu sync.Mutex +) diff --git a/xds/internal/xdsclient/clientimpl.go b/xds/internal/xdsclient/clientimpl.go new file mode 100644 index 000000000000..800ae91fa311 --- /dev/null +++ b/xds/internal/xdsclient/clientimpl.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "sync" + "time" + + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +var _ XDSClient = &clientImpl{} + +// clientImpl is the real implementation of the xds client. The exported Client +// is a wrapper of this struct with a ref count. +// +// Implements UpdateHandler interface. +// TODO(easwars): Make a wrapper struct which implements this interface in the +// style of ccBalancerWrapper so that the Client type does not implement these +// exported methods. +type clientImpl struct { + done *grpcsync.Event + config *bootstrap.Config + + // authorityMu protects the authority fields. It's necessary because an + // authority is created when it's used. + authorityMu sync.Mutex + // authorities is a map from ServerConfig to authority. So that + // different authorities sharing the same ServerConfig can share the + // authority. + // + // The key is **ServerConfig.String()**, not the authority name. + // + // An authority is either in authorities, or idleAuthorities, + // never both. + authorities map[string]*authority + // idleAuthorities keeps the authorities that are not used (the last + // watch on it was canceled). They are kept in the cache and will be deleted + // after a timeout. The key is ServerConfig.String(). + // + // An authority is either in authorities, or idleAuthorities, + // never both. + idleAuthorities *cache.TimeoutCache + + logger *grpclog.PrefixLogger + watchExpiryTimeout time.Duration +} + +// BootstrapConfig returns the configuration read from the bootstrap file. +// Callers must treat the return value as read-only. +func (c *clientImpl) BootstrapConfig() *bootstrap.Config { + return c.config +} + +// Close closes the gRPC connection to the management server. +func (c *clientImpl) Close() { + if c.done.HasFired() { + return + } + c.done.Fire() + // TODO: Should we invoke the registered callbacks here with an error that + // the client is closed? + + // Note that Close needs to check for nils even if some of them are always + // set in the constructor. This is because the constructor defers Close() in + // error cases, and the fields might not be set when the error happens. + + c.authorityMu.Lock() + for _, a := range c.authorities { + a.close() + } + c.idleAuthorities.Clear(true) + c.authorityMu.Unlock() + + c.logger.Infof("Shutdown") +} diff --git a/xds/internal/xdsclient/clientimpl_authority.go b/xds/internal/xdsclient/clientimpl_authority.go new file mode 100644 index 000000000000..674ccb7dfbd6 --- /dev/null +++ b/xds/internal/xdsclient/clientimpl_authority.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "errors" + "fmt" + "time" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/controller" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +type controllerInterface interface { + AddWatch(resourceType xdsresource.ResourceType, resourceName string) + RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) + ReportLoad(server string) (*load.Store, func()) + Close() +} + +var newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, boff func(int) time.Duration) (controllerInterface, error) { + return controller.New(config, pubsub, validator, logger, boff) +} + +// findAuthority returns the authority for this name. If it doesn't already +// exist, one will be created. +// +// Note that this doesn't always create new authority. authorities with the same +// config but different names are shared. +// +// The returned unref function must be called when the caller is done using this +// authority, without holding c.authorityMu. +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref func(), _ error) { + scheme, authority := n.Scheme, n.Authority + + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if c.done.HasFired() { + return nil, nil, errors.New("the xds-client is closed") + } + + config := c.config.XDSServer + if scheme == xdsresource.FederationScheme { + cfg, ok := c.config.Authorities[authority] + if !ok { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + config = cfg.XDSServer + } + + a, err := c.newAuthorityLocked(config) + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) + } + // All returned authority from this function will be used by a watch, + // holding the ref here. + // + // Note that this must be done while c.authorityMu is held, to avoid the + // race that an authority is returned, but before the watch starts, the + // old last watch is canceled (in another goroutine), causing this + // authority to be removed, and then a watch will start on a removed + // authority. + // + // unref() will be done when the watch is canceled. + a.ref() + return a, func() { c.unrefAuthority(a) }, nil +} + +// newAuthorityLocked creates a new authority for the config. But before that, it +// checks the cache to see if an authority for this config already exists. +// +// The caller must take a reference of the returned authority before using, and +// unref afterwards. +// +// caller must hold c.authorityMu +func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { + // First check if there's already an authority for this config. If found, it + // means this authority is used by other watches (could be the same + // authority name, or a different authority name but the same server + // config). Return it. + configStr := config.String() + if a, ok := c.authorities[configStr]; ok { + return a, nil + } + // Second check if there's an authority in the idle cache. If found, it + // means this authority was created, but moved to the idle cache because the + // watch was canceled. Move it from idle cache to the authority cache, and + // return. + if old, ok := c.idleAuthorities.Remove(configStr); ok { + oldA, _ := old.(*authority) + if oldA != nil { + c.authorities[configStr] = oldA + return oldA, nil + } + } + + // Make a new authority since there's no existing authority for this config. + nodeID := "" + if v3, ok := c.config.XDSServer.NodeProto.(*v3corepb.Node); ok { + nodeID = v3.GetId() + } else if v2, ok := c.config.XDSServer.NodeProto.(*v2corepb.Node); ok { + nodeID = v2.GetId() + } + ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, nodeID, c.logger)} + defer func() { + if retErr != nil { + ret.close() + } + }() + ctr, err := newController(config, ret.pubsub, c.updateValidator, c.logger, nil) + if err != nil { + return nil, err + } + ret.controller = ctr + // Add it to the cache, so it will be reused. + c.authorities[configStr] = ret + return ret, nil +} + +// unrefAuthority unrefs the authority. It also moves the authority to idle +// cache if it's ref count is 0. +// +// This function doesn't need to called explicitly. It's called by the returned +// unref from findAuthority(). +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) unrefAuthority(a *authority) { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if a.unref() > 0 { + return + } + configStr := a.config.String() + delete(c.authorities, configStr) + c.idleAuthorities.Add(configStr, a, func() { + a.close() + }) +} diff --git a/xds/internal/xdsclient/dump.go b/xds/internal/xdsclient/clientimpl_dump.go similarity index 100% rename from xds/internal/xdsclient/dump.go rename to xds/internal/xdsclient/clientimpl_dump.go diff --git a/xds/internal/xdsclient/loadreport.go b/xds/internal/xdsclient/clientimpl_loadreport.go similarity index 100% rename from xds/internal/xdsclient/loadreport.go rename to xds/internal/xdsclient/clientimpl_loadreport.go diff --git a/xds/internal/xdsclient/clientimpl_validator.go b/xds/internal/xdsclient/clientimpl_validator.go new file mode 100644 index 000000000000..50bdbe4e23f4 --- /dev/null +++ b/xds/internal/xdsclient/clientimpl_validator.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +func (c *clientImpl) filterChainUpdateValidator(fc *xdsresource.FilterChain) error { + if fc == nil { + return nil + } + return c.securityConfigUpdateValidator(fc.SecurityCfg) +} + +func (c *clientImpl) securityConfigUpdateValidator(sc *xdsresource.SecurityConfig) error { + if sc == nil { + return nil + } + if sc.IdentityInstanceName != "" { + if _, ok := c.config.CertProviderConfigs[sc.IdentityInstanceName]; !ok { + return fmt.Errorf("identitiy certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) + } + } + if sc.RootInstanceName != "" { + if _, ok := c.config.CertProviderConfigs[sc.RootInstanceName]; !ok { + return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) + } + } + return nil +} + +func (c *clientImpl) updateValidator(u interface{}) error { + switch update := u.(type) { + case xdsresource.ListenerUpdate: + if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil { + return nil + } + return update.InboundListenerCfg.FilterChains.Validate(c.filterChainUpdateValidator) + case xdsresource.ClusterUpdate: + return c.securityConfigUpdateValidator(update.SecurityCfg) + default: + // We currently invoke this update validation function only for LDS and + // CDS updates. In the future, if we wish to invoke it for other xDS + // updates, corresponding plumbing needs to be added to those unmarshal + // functions. + } + return nil +} diff --git a/xds/internal/xdsclient/watchers.go b/xds/internal/xdsclient/clientimpl_watchers.go similarity index 100% rename from xds/internal/xdsclient/watchers.go rename to xds/internal/xdsclient/clientimpl_watchers.go diff --git a/xds/internal/xdsclient/controller.go b/xds/internal/xdsclient/controller.go deleted file mode 100644 index a99f4b164949..000000000000 --- a/xds/internal/xdsclient/controller.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package xdsclient - -import ( - "time" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/controller" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -type controllerInterface interface { - AddWatch(resourceType xdsresource.ResourceType, resourceName string) - RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) - ReportLoad(server string) (*load.Store, func()) - Close() -} - -var newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, boff func(int) time.Duration) (controllerInterface, error) { - return controller.New(config, pubsub, validator, logger, boff) -} diff --git a/xds/internal/xdsclient/singleton.go b/xds/internal/xdsclient/singleton.go index f4951ba8f488..c07dd4323f76 100644 --- a/xds/internal/xdsclient/singleton.go +++ b/xds/internal/xdsclient/singleton.go @@ -19,8 +19,6 @@ package xdsclient import ( - "bytes" - "encoding/json" "fmt" "sync" "time" @@ -34,7 +32,7 @@ const ( ) var ( - // This is the Client returned by New(). It contains one client implementation, + // This is the client returned by New(). It contains one client implementation, // and maintains the refcount. singletonClient = &clientRefCounted{} @@ -63,55 +61,6 @@ func (o *onceClosingClient) Close() { o.once.Do(o.XDSClient.Close) } -// clientRefCounted is ref-counted, and to be shared by the xds resolver and -// balancer implementations, across multiple ClientConns and Servers. -type clientRefCounted struct { - *clientImpl - - // This mu protects all the fields, including the embedded clientImpl above. - mu sync.Mutex - refCount int -} - -// New returns a new xdsClient configured by the bootstrap file specified in env -// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG. -// -// The returned xdsClient is a singleton. This function creates the xds client -// if it doesn't already exist. -// -// Note that the first invocation of New() or NewWithConfig() sets the client -// singleton. The following calls will return the singleton xds client without -// checking or using the config. -func New() (XDSClient, error) { - // This cannot just return newRefCounted(), because in error cases, the - // returned nil is a typed nil (*clientRefCounted), which may cause nil - // checks fail. - c, err := newRefCounted() - if err != nil { - return nil, err - } - return c, nil -} - -func newRefCounted() (XDSClient, error) { - return newRefCountedWithConfig(nil) -} - -// NewWithConfig returns a new xdsClient configured by the given config. -// -// The returned xdsClient is a singleton. This function creates the xds client -// if it doesn't already exist. -// -// Note that the first invocation of New() or NewWithConfig() sets the client -// singleton. The following calls will return the singleton xds client without -// checking or using the config. -// -// This function is internal only, for c2p resolver and testing to use. DO NOT -// use this elsewhere. Use New() instead. -func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { - return newRefCountedWithConfig(config) -} - func newRefCountedWithConfig(config *bootstrap.Config) (XDSClient, error) { singletonClient.mu.Lock() defer singletonClient.mu.Unlock() @@ -144,6 +93,16 @@ func newRefCountedWithConfig(config *bootstrap.Config) (XDSClient, error) { return &onceClosingClient{XDSClient: singletonClient}, nil } +// clientRefCounted is ref-counted, and to be shared by the xds resolver and +// balancer implementations, across multiple ClientConns and Servers. +type clientRefCounted struct { + *clientImpl + + // This mu protects all the fields, including the embedded clientImpl above. + mu sync.Mutex + refCount int +} + // Close closes the client. It does ref count of the xds client implementation, // and closes the gRPC connection to the management server when ref count // reaches 0. @@ -159,62 +118,3 @@ func (c *clientRefCounted) Close() { singletonClientImplCloseHook() } } - -// NewWithConfigForTesting returns an xdsClient for the specified bootstrap -// config, separate from the global singleton. -// -// This should be used for testing purposes only. -func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) { - cl, err := newWithConfig(config, watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - return nil, err - } - return &clientRefCounted{clientImpl: cl, refCount: 1}, nil -} - -// NewWithBootstrapContentsForTesting returns an xdsClient for this config, -// separate from the global singleton. -// -// This should be used for testing purposes only. -func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, error) { - // Normalize the contents - buf := bytes.Buffer{} - err := json.Indent(&buf, contents, "", "") - if err != nil { - return nil, fmt.Errorf("xds: error normalizing JSON: %v", err) - } - contents = bytes.TrimSpace(buf.Bytes()) - - clientsMu.Lock() - defer clientsMu.Unlock() - if c := clients[string(contents)]; c != nil { - c.mu.Lock() - // Since we don't remove the *Client from the map when it is closed, we - // need to recreate the impl if the ref count dropped to zero. - if c.refCount > 0 { - c.refCount++ - c.mu.Unlock() - return c, nil - } - c.mu.Unlock() - } - - bcfg, err := bootstrap.NewConfigFromContentsForTesting(contents) - if err != nil { - return nil, fmt.Errorf("xds: error with bootstrap config: %v", err) - } - - cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - return nil, err - } - - c := &clientRefCounted{clientImpl: cImpl, refCount: 1} - clients[string(contents)] = c - return c, nil -} - -var ( - clients = map[string]*clientRefCounted{} - clientsMu sync.Mutex -) From fd4700cf417e8a9485cf7a67c839224ed919b192 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 28 Jul 2022 10:06:03 -0700 Subject: [PATCH 569/998] xdsclient: cleanup listener watchers test (#5506) --- .../xdsclient/clientimpl_authority.go | 4 +- .../xdsclient/e2e_test/lds_watchers_test.go | 798 ++++++++++++++++++ .../xdsclient/watchers_listener_test.go | 206 ----- 3 files changed, 801 insertions(+), 207 deletions(-) create mode 100644 xds/internal/xdsclient/e2e_test/lds_watchers_test.go delete mode 100644 xds/internal/xdsclient/watchers_listener_test.go diff --git a/xds/internal/xdsclient/clientimpl_authority.go b/xds/internal/xdsclient/clientimpl_authority.go index 674ccb7dfbd6..623420ccc78f 100644 --- a/xds/internal/xdsclient/clientimpl_authority.go +++ b/xds/internal/xdsclient/clientimpl_authority.go @@ -69,7 +69,9 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref fun if !ok { return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) } - config = cfg.XDSServer + if cfg.XDSServer != nil { + config = cfg.XDSServer + } } a, err := c.newAuthorityLocked(config) diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go new file mode 100644 index 000000000000..a9ddda12443b --- /dev/null +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -0,0 +1,798 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/envoyproxy/go-control-plane/pkg/wellknown" + + _ "google.golang.org/grpc/xds" // To ensure internal.NewXDSResolverWithConfigForTesting is set. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. +) + +func overrideFedEnvVar(t *testing.T) func() { + oldFed := envconfig.XDSFederation + envconfig.XDSFederation = true + return func() { envconfig.XDSFederation = oldFed } +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + defaultTestWatchExpiryTimeout = 500 * time.Millisecond + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. + + ldsName = "xdsclient-test-lds-resource" + rdsName = "xdsclient-test-rds-resource" + ldsNameNewStyle = "xdstp:///envoy.config.listener.v3.Listener/xdsclient-test-lds-resource" + rdsNameNewStyle = "xdstp:///envoy.config.listener.v3.Listener/xdsclient-test-rds-resource" +) + +// badListenerResource returns a listener resource for the given name which does +// not contain the `RouteSpecifier` field in the HTTPConnectionManager, and +// hence is expected to be NACKed by the client. +func badListenerResource(name string) *v3listenerpb.Listener { + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("router", &v3routerpb.Router{})}, + }) + return &v3listenerpb.Listener{ + Name: name, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + } +} + +// xdsClient is expected to produce an error containing this string when an +// update is received containing a listener created using `badListenerResource`. +const wantNACKErr = "no RouteSpecifier" + +// verifyNoListenerUpdate verifies that no listener update is received on the +// provided update channel, and returns an error if an update is received. +// +// A very short deadline is used while waiting for the update, as this function +// is intended to be used when an update is not expected. +func verifyNoListenerUpdate(ctx context.Context, updateCh *testutils.Channel) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + return fmt.Errorf("unexpected ListenerUpdate: %v", u) + } + return nil +} + +// verifyListenerUpdate waits for an update to be received on the provided +// update channel and verifies that it matches the expected update. +// +// Returns an error if no update is received before the context deadline expires +// or the received update does not match the expected one. +func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ListenerUpdateErrTuple) error { + u, err := updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("timeout when waiting for a listener resource from the management server: %v", err) + } + got := u.(xdsresource.ListenerUpdateErrTuple) + if wantUpdate.Err != nil { + if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { + return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) + } + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.HTTPFilter{}, "Filter", "Config"), + cmpopts.IgnoreFields(xdsresource.ListenerUpdate{}, "Raw"), + } + if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { + return fmt.Errorf("received unepected diff in the listener resource update: (-want, got):\n%s", diff) + } + return nil +} + +// TestLDSWatch covers the case where a single watcher exists for a single +// listener resource. The test verifies the following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of the watch callback. +// 2. An update from the management server containing a resource *not* being +// watched should not result in the invocation of the watch callback. +// 3. After the watch is cancelled, an update from the management server +// containing the resource that was being watched should not result in the +// invocation of the watch callback. +// +// The test is run for old and new style names. +func (s) TestLDSWatch(t *testing.T) { + defer overrideFedEnvVar(t)() + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + tests := []struct { + desc string + resourceName string + watchedResource *v3listenerpb.Listener // The resource being watched. + updatedWatchedResource *v3listenerpb.Listener // The watched resource after an update. + notWatchedResource *v3listenerpb.Listener // A resource which is not being watched. + wantUpdate xdsresource.ListenerUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: ldsName, + watchedResource: e2e.DefaultClientListener(ldsName, rdsName), + updatedWatchedResource: e2e.DefaultClientListener(ldsName, "new-rds-resource"), + notWatchedResource: e2e.DefaultClientListener("unsubscribed-lds-resource", rdsName), + wantUpdate: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + }, + { + desc: "new style resource", + resourceName: ldsNameNewStyle, + watchedResource: e2e.DefaultClientListener(ldsNameNewStyle, rdsNameNewStyle), + updatedWatchedResource: e2e.DefaultClientListener(ldsNameNewStyle, "new-rds-resource"), + notWatchedResource: e2e.DefaultClientListener("unsubscribed-lds-resource", rdsNameNewStyle), + wantUpdate: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsNameNewStyle, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Register a watch for a listener resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + ldsCancel := client.WatchListener(test.resourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single listener + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyListenerUpdate(ctx, updateCh, test.wantUpdate); err != nil { + t.Fatal(err) + } + + // Configure the management server to return an additional listener + // resource, one that we are not interested in. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.watchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoListenerUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + + // Cancel the watch and update the resource corresponding to the original + // watch. Ensure that the cancelled watch callback is not invoked. + ldsCancel() + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.updatedWatchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoListenerUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestLDSWatch_TwoWatchesForSameResourceName covers the case where two watchers +// exist for a single listener resource. The test verifies the following +// scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of both watch callbacks. +// 2. After one of the watches is cancelled, a redundant update from the +// management server should not result in the invocation of either of the +// watch callbacks. +// 3. An update from the management server containing the resource being +// watched should result in the invocation of the un-cancelled watch +// callback. +// +// The test is run for old and new style names. +func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { + defer overrideFedEnvVar(t)() + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + tests := []struct { + desc string + resourceName string + watchedResource *v3listenerpb.Listener // The resource being watched. + updatedWatchedResource *v3listenerpb.Listener // The watched resource after an update. + wantUpdateV1 xdsresource.ListenerUpdateErrTuple + wantUpdateV2 xdsresource.ListenerUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: ldsName, + watchedResource: e2e.DefaultClientListener(ldsName, rdsName), + updatedWatchedResource: e2e.DefaultClientListener(ldsName, "new-rds-resource"), + wantUpdateV1: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + wantUpdateV2: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: "new-rds-resource", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + }, + { + desc: "new style resource", + resourceName: ldsNameNewStyle, + watchedResource: e2e.DefaultClientListener(ldsNameNewStyle, rdsNameNewStyle), + updatedWatchedResource: e2e.DefaultClientListener(ldsNameNewStyle, "new-rds-resource"), + wantUpdateV1: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsNameNewStyle, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + wantUpdateV2: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: "new-rds-resource", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Register two watches for the same listener resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(test.resourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(test.resourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single listener + // resource, corresponding to the one we registered watches for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyListenerUpdate(ctx, updateCh1, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh2, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + + // Cancel the second watch and force the management server to push a + // redundant update for the resource being watched. Neither of the + // two watch callbacks should be invoked. + ldsCancel2() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoListenerUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + if err := verifyNoListenerUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update to the resource being watched. The un-cancelled callback + // should be invoked while the cancelled one should not be. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.updatedWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyListenerUpdate(ctx, updateCh1, test.wantUpdateV2); err != nil { + t.Fatal(err) + } + if err := verifyNoListenerUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestLDSWatch_ThreeWatchesForDifferentResourceNames covers the case with three +// watchers (two watchers for one resource, and the third watcher for another +// resource), exist across two listener resources. The test verifies that an +// update from the management server containing both resources results in the +// invocation of all watch callbacks. +// +// The test is run with both old and new style names. +func (s) TestLDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { + defer overrideFedEnvVar(t)() + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for the same listener resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Register the third watch for a different listener resource. + updateCh3 := testutils.NewChannel() + ldsCancel3 := client.WatchListener(ldsNameNewStyle, func(u xdsresource.ListenerUpdate, err error) { + updateCh3.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel3() + + // Configure the management server to return two listener resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + e2e.DefaultClientListener(ldsName, rdsName), + e2e.DefaultClientListener(ldsNameNewStyle, rdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the all watchers. The two + // resources returned differ only in the resource name. Therefore the + // expected update is the same for all the watchers. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh3, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestLDSWatch_ResourceCaching covers the case where a watch is registered for +// a resource which is already present in the cache. The test verifies that the +// watch callback is invoked with the contents from the cache, instead of a +// request being sent to the management server. +func (s) TestLDSWatch_ResourceCaching(t *testing.T) { + defer overrideFedEnvVar(t)() + firstRequestReceived := false + firstAckReceived := grpcsync.NewEvent() + secondRequestReceived := grpcsync.NewEvent() + + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // The first request has an empty version string. + if !firstRequestReceived && req.GetVersionInfo() == "" { + firstRequestReceived = true + return nil + } + // The first ack has a non-empty version string. + if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" { + firstAckReceived.Fire() + return nil + } + // Any requests after the first request and ack, are not expected. + secondRequestReceived.Fire() + return nil + }, + }) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a listener resource and have the watch + // callback push the received update on to a channel. + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + + // Configure the management server to return a single listener + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for receipt of ACK at the management server") + case <-firstAckReceived.Done(): + } + + // Register another watch for the same resource. This should get the update + // from the cache. + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + // No request should get sent out as part of this watch. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-secondRequestReceived.Done(): + t.Fatal("xdsClient sent out request instead of using update from cache") + } +} + +// TestLDSWatch_ResourceRemoved covers the cases where a resource being watched +// is removed from the management server. The test verifies the following +// scenarios: +// 1. Removing a resource should trigger the watch callback with a resource +// removed error. It should not trigger the watch callback for an unrelated +// resource. +// 2. An update to another resource should result in the invocation of the watch +// callback associated with that resource. It should not result in the +// invocation of the watch callback associated with the deleted resource. +// +// The test is run with both old and new style names. +func (s) TestLDSWatch_ResourceRemoved(t *testing.T) { + defer overrideFedEnvVar(t)() + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for two listener resources and have the + // callbacks push the received updates on to a channel. + resourceName1 := ldsName + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(resourceName1, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + + resourceName2 := ldsNameNewStyle + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(resourceName2, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Configure the management server to return two listener resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + e2e.DefaultClientListener(resourceName1, rdsName), + e2e.DefaultClientListener(resourceName2, rdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for both watchers. The two + // resources returned differ only in the resource name. Therefore the + // expected update is the same for both watchers. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + + // Remove the first listener resource on the management server. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(resourceName2, rdsName)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // The first watcher should receive a resource removed error, while the + // second watcher should not see an update. + if err := verifyListenerUpdate(ctx, updateCh1, xdsresource.ListenerUpdateErrTuple{ + Err: xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, ""), + }); err != nil { + t.Fatal(err) + } + if err := verifyNoListenerUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update the second listener resource on the management server. The first + // watcher should not see an update, while the second watcher should. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(resourceName2, "new-rds-resource")}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoListenerUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + wantUpdate = xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: "new-rds-resource", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestLDSWatch_NACKError covers the case where an update from the management +// server is NACK'ed by the xdsclient. The test verifies that the error is +// propagated to the watcher. +func (s) TestLDSWatch_NACKError(t *testing.T) { + defer overrideFedEnvVar(t)() + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a listener resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + ldsCancel := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel() + + // Configure the management server to return a single listener resource + // which is expected to be NACKed by the client. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{badListenerResource(ldsName)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher. + u, err := updateCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a listener resource from the management server: %v", err) + } + gotErr := u.(xdsresource.ListenerUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantNACKErr) + } +} + +// TestLDSWatch_PartialValid covers the case where a response from the +// management server contains both valid and invalid resources and is expected +// to be NACK'ed by the xdsclient. The test verifies that watchers corresponding +// to the valid resource receive the update, while watchers corresponding to the +// invalid resource receive an error. +func (s) TestLDSWatch_PartialValid(t *testing.T) { + defer overrideFedEnvVar(t)() + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for listener resources. The first watch is expected + // to receive an error because the received resource is NACKed. The second + // watch is expected to get a good update. + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + resourceName2 := ldsNameNewStyle + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(resourceName2, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Configure the management with server two listener resources. One of these + // is a bad resource causing the update to be NACKed. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + badListenerResource(ldsName), + e2e.DefaultClientListener(resourceName2, rdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher which + // requested for the bad resource. + u, err := updateCh1.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a listener resource from the management server: %v", err) + } + gotErr := u.(xdsresource.ListenerUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantNACKErr) + } + + // Verify that the watcher watching the good resource receives a good + // update. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/watchers_listener_test.go b/xds/internal/xdsclient/watchers_listener_test.go deleted file mode 100644 index f0f34d4c578e..000000000000 --- a/xds/internal/xdsclient/watchers_listener_test.go +++ /dev/null @@ -1,206 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "context" - "fmt" - "testing" - - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/types/known/anypb" -) - -// TestLDSWatch covers the cases: -// - an update is received after a watch() -// - an update for another resource name -// - an update is received after cancel() -func (s) TestLDSWatch(t *testing.T) { - testWatch(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName) -} - -// TestLDSTwoWatchSameResourceName covers the case where an update is received -// after two watch() for the same resource name. -func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { - testTwoWatchSameResourceName(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName) -} - -// TestLDSThreeWatchDifferentResourceName covers the case where an update is -// received after three watch() for different resource names. -func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { - testThreeWatchDifferentResourceName(t, xdsresource.ListenerResource, - xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"}, testLDSName+"1", - xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"}, testLDSName+"2", - ) -} - -// TestLDSWatchAfterCache covers the case where watch is called after the update -// is in cache. -func (s) TestLDSWatchAfterCache(t *testing.T) { - testWatchAfterCache(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName) -} - -// TestLDSResourceRemoved covers the cases: -// - an update is received after a watch() -// - another update is received, with one resource removed -// - this should trigger callback with resource removed error -// - one more update without the removed resource -// - the callback (above) shouldn't receive any update -func (s) TestLDSResourceRemoved(t *testing.T) { - testResourceRemoved(t, xdsresource.ListenerResource, - xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "1"}, testLDSName+"1", - xdsresource.ListenerUpdate{RouteConfigName: testRDSName + "2"}, testLDSName+"2", - ) -} - -// TestListenerWatchNACKError covers the case that an update is NACK'ed, and the -// watcher should also receive the error. -func (s) TestListenerWatchNACKError(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - ldsUpdateCh, _ := newWatch(t, client, xdsresource.ListenerResource, testLDSName) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ListenerResource, testLDSName) - - wantError := fmt.Errorf("testing error") - updateHandler.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, xdsresource.ListenerUpdate{}, wantError); err != nil { - t.Fatal(err) - } -} - -// TestListenerWatchPartialValid covers the case that a response contains both -// valid and invalid resources. This response will be NACK'ed by the xdsclient. -// But the watchers with valid resources should receive the update, those with -// invalida resources should receive an error. -func (s) TestListenerWatchPartialValid(t *testing.T) { - testWatchPartialValid(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}, testLDSName) -} - -// TestListenerWatch_RedundantUpdateSupression tests scenarios where an update -// with an unmodified resource is suppressed, and modified resource is not. -func (s) TestListenerWatch_RedundantUpdateSupression(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - ldsUpdateCh, _ := newWatch(t, client, xdsresource.ListenerResource, testLDSName) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ListenerResource, testLDSName) - - basicListener := testutils.MarshalAny(&v3listenerpb.Listener{ - Name: testLDSName, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{RouteConfigName: "route-config-name"}, - }, - }), - }, - }) - listenerWithFilter1 := testutils.MarshalAny(&v3listenerpb.Listener{ - Name: testLDSName, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{RouteConfigName: "route-config-name"}, - }, - HttpFilters: []*v3httppb.HttpFilter{ - { - Name: "customFilter1", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: &anypb.Any{ - TypeUrl: "custom.filter", - Value: []byte{1, 2, 3}, - }}, - }, - }, - }), - }, - }) - listenerWithFilter2 := testutils.MarshalAny(&v3listenerpb.Listener{ - Name: testLDSName, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{RouteConfigName: "route-config-name"}, - }, - HttpFilters: []*v3httppb.HttpFilter{ - { - Name: "customFilter2", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: &anypb.Any{ - TypeUrl: "custom.filter", - Value: []byte{1, 2, 3}, - }}, - }, - }, - }), - }, - }) - - tests := []struct { - update xdsresource.ListenerUpdate - wantCallback bool - }{ - { - // First update. Callback should be invoked. - update: xdsresource.ListenerUpdate{Raw: basicListener}, - wantCallback: true, - }, - { - // Same update as previous. Callback should be skipped. - update: xdsresource.ListenerUpdate{Raw: basicListener}, - wantCallback: false, - }, - { - // New update. Callback should be invoked. - update: xdsresource.ListenerUpdate{Raw: listenerWithFilter1}, - wantCallback: true, - }, - { - // Same update as previous. Callback should be skipped. - update: xdsresource.ListenerUpdate{Raw: listenerWithFilter1}, - wantCallback: false, - }, - { - // New update. Callback should be invoked. - update: xdsresource.ListenerUpdate{Raw: listenerWithFilter2}, - wantCallback: true, - }, - { - // Same update as previous. Callback should be skipped. - update: xdsresource.ListenerUpdate{Raw: listenerWithFilter2}, - wantCallback: false, - }, - } - for _, test := range tests { - updateHandler.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{testLDSName: {Update: test.update}}, xdsresource.UpdateMetadata{}) - if test.wantCallback { - if err := verifyListenerUpdate(ctx, ldsUpdateCh, test.update, nil); err != nil { - t.Fatal(err) - } - } else { - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u) - } - } - } -} From 2f60cb8b279f68b8f23345c1ad3bf2c799d592d5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 28 Jul 2022 13:42:47 -0700 Subject: [PATCH 570/998] test: improve the logic for checking round_robin (#5534) --- test/roundrobin_test.go | 77 ++++++++++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 24 deletions(-) diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go index 3724743ec7cf..0a1300479bf6 100644 --- a/test/roundrobin_test.go +++ b/test/roundrobin_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" @@ -45,37 +46,65 @@ const rrServiceConfig = `{"loadBalancingConfig": [{"round_robin":{}}]}` func checkRoundRobin(ctx context.Context, cc *grpc.ClientConn, addrs []resolver.Address) error { client := testgrpc.NewTestServiceClient(cc) - // Make sure connections to all backends are up. + // Make sure connections to all backends are up. We need to do this two + // times (to be sure that round_robin has kicked in) because the channel + // could have been configured with a different LB policy before the switch + // to round_robin. And the previous LB policy could be sharing backends with + // round_robin, and therefore in the first iteration of this loop, RPCs + // could land on backends owned by the previous LB policy. backendCount := len(addrs) - for i := 0; i < backendCount; i++ { - for { - time.Sleep(time.Millisecond) - if ctx.Err() != nil { - return fmt.Errorf("timeout waiting for connection to %q to be up", addrs[i].Addr) + for j := 0; j < 2; j++ { + for i := 0; i < backendCount; i++ { + for { + time.Sleep(time.Millisecond) + if ctx.Err() != nil { + return fmt.Errorf("timeout waiting for connection to %q to be up", addrs[i].Addr) + } + var peer peer.Peer + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + // Some tests remove backends and check if round robin is + // happening across the remaining backends. In such cases, + // RPCs can initially fail on the connection using the + // removed backend. Just keep retrying and eventually the + // connection using the removed backend will shutdown and + // will be removed. + continue + } + if peer.Addr.String() == addrs[i].Addr { + break + } } + } + } + // Perform 3 iterations. + var iterations [][]string + for i := 0; i < 3; i++ { + iteration := make([]string, backendCount) + for c := 0; c < backendCount; c++ { var peer peer.Peer if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { - // Some tests remove backends and check if round robin is happening - // across the remaining backends. In such cases, RPCs can initially fail - // on the connection using the removed backend. Just keep retrying and - // eventually the connection using the removed backend will shutdown and - // will be removed. - continue - } - if peer.Addr.String() == addrs[i].Addr { - break + return fmt.Errorf("EmptyCall() = %v, want ", err) } + iteration[c] = peer.Addr.String() } + iterations = append(iterations, iteration) } - // Make sure RPCs are sent to all backends. - for i := 0; i < 3*backendCount; i++ { - var peer peer.Peer - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { - return fmt.Errorf("EmptyCall() = %v, want ", err) - } - if gotPeer, wantPeer := peer.Addr.String(), addrs[i%backendCount].Addr; gotPeer != wantPeer { - return fmt.Errorf("rpc sent to peer %q, want peer %q", gotPeer, wantPeer) - } + // Ensure the the first iteration contains all addresses in addrs. To + // support duplicate addresses, we determine the count of each address. + wantAddrCount := make(map[string]int) + for _, addr := range addrs { + wantAddrCount[addr.Addr]++ + } + gotAddrCount := make(map[string]int) + for _, addr := range iterations[0] { + gotAddrCount[addr]++ + } + if diff := cmp.Diff(gotAddrCount, wantAddrCount); diff != "" { + return fmt.Errorf("non-roundrobin, got address count in one iteration: %v, want: %v, Diff: %s", gotAddrCount, wantAddrCount, diff) + } + // Ensure all three iterations contain the same addresses. + if !cmp.Equal(iterations[0], iterations[1]) || !cmp.Equal(iterations[0], iterations[2]) { + return fmt.Errorf("non-roundrobin, first iter: %v, second iter: %v, third iter: %v", iterations[0], iterations[1], iterations[2]) } return nil } From 1ec054bb67cabe7118016af2827f7b82ee4a352e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 28 Jul 2022 23:38:04 +0000 Subject: [PATCH 571/998] transport/server: fix race that could cause a stray header to be sent (#5513) --- internal/transport/http2_server.go | 21 ++++++++++---------- test/end2end_test.go | 32 ++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 10 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 2b0fde334ce0..28bcba0a33c6 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -945,15 +945,16 @@ func (t *http2Server) streamContextErr(s *Stream) error { // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() { - return ErrIllegalHeaderWrite - } - + s.hdrMu.Lock() + defer s.hdrMu.Unlock() if s.getState() == streamDone { return t.streamContextErr(s) } - s.hdrMu.Lock() + if s.updateHeaderSent() { + return ErrIllegalHeaderWrite + } + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -962,10 +963,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return status.Convert(err).Err() } - s.hdrMu.Unlock() return nil } @@ -1013,17 +1012,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. @@ -1052,7 +1053,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { diff --git a/test/end2end_test.go b/test/end2end_test.go index c44925f96a62..348fd0b0b1b7 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -8077,3 +8077,35 @@ func (s) TestUnexpectedEOF(t *testing.T) { } } } + +// TestRecvWhileReturningStatus performs a Recv in a service handler while the +// handler returns its status. A race condition could result in the server +// sending the first headers frame without the HTTP :status header. This can +// happen when the failed Recv (due to the handler returning) and the handler's +// status both attempt to write the status, which would be the first headers +// frame sent, simultaneously. +func (s) TestRecvWhileReturningStatus(t *testing.T) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + // The client never sends, so this Recv blocks until the server + // returns and causes stream operations to return errors. + go stream.Recv() + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + for i := 0; i < 100; i++ { + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Error while creating stream: %v", err) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("stream.Recv() = %v, want io.EOF", err) + } + } +} From 02f1a7ac92e65e7d5fe11be77a8060c177b1c064 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 29 Jul 2022 17:45:21 +0000 Subject: [PATCH 572/998] grpc: prevent a nil stats handler from causing a panic (#5543) --- default_dial_option_server_option_test.go | 2 +- dialoptions.go | 22 ++++++---- server.go | 50 +++++++++++++---------- test/end2end_test.go | 21 ++++++++++ 4 files changed, 64 insertions(+), 31 deletions(-) diff --git a/default_dial_option_server_option_test.go b/default_dial_option_server_option_test.go index 952b1c4a2b17..3dc446f58b5a 100644 --- a/default_dial_option_server_option_test.go +++ b/default_dial_option_server_option_test.go @@ -61,7 +61,7 @@ func (s) TestAddExtraDialOptions(t *testing.T) { func (s) TestAddExtraServerOptions(t *testing.T) { const maxRecvSize = 998765 // Set and check the ServerOptions - opts := []ServerOption{StatsHandler(nil), Creds(insecure.NewCredentials()), MaxRecvMsgSize(maxRecvSize)} + opts := []ServerOption{Creds(insecure.NewCredentials()), MaxRecvMsgSize(maxRecvSize)} internal.AddExtraServerOptions.(func(opt ...ServerOption))(opts...) for i, opt := range opts { if extraServerOptions[i] != opt { diff --git a/dialoptions.go b/dialoptions.go index 1c28ee7112af..60403bc160ec 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -84,7 +84,7 @@ var extraDialOptions []DialOption // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -275,7 +275,7 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -304,7 +304,7 @@ func WithInsecure() DialOption { // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -335,7 +335,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -391,6 +391,12 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) }) } @@ -403,7 +409,7 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -483,7 +489,7 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -545,7 +551,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -592,7 +598,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/server.go b/server.go index fd90352fc398..2ad9da7bfccf 100644 --- a/server.go +++ b/server.go @@ -190,7 +190,7 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -305,7 +305,7 @@ func CustomCodec(codec Codec) ServerOption { // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. // Will be supported throughout 1.x. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -426,7 +426,7 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -442,6 +442,12 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } o.statsHandlers = append(o.statsHandlers, h) }) } @@ -469,7 +475,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -490,7 +496,7 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -505,7 +511,7 @@ func HeaderTableSize(s uint32) ServerOption { // zero (default) will disable workers and spawn a new goroutine for each // stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -956,19 +962,19 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features // available through grpc-go's HTTP/2 server. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1674,7 +1680,7 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1689,7 +1695,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1704,7 +1710,7 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1825,12 +1831,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { // When called multiple times, all the provided metadata will be merged. All // the metadata will be sent out when one of the following happens: // -// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. -// - The first response message is sent. For unary handlers, this occurs when -// the handler returns; for streaming handlers, this can happen when stream's -// SendMsg method is called. -// - An RPC status is sent out (error or success). This occurs when the handler -// returns. +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. // // SetHeader will fail if called after any of the events above. // diff --git a/test/end2end_test.go b/test/end2end_test.go index 348fd0b0b1b7..8a4f11515675 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -8042,6 +8042,27 @@ func (s) TestServerClosesConn(t *testing.T) { t.Fatalf("timed out waiting for conns to be closed by server; still open: %v", atomic.LoadInt32(&wrapLis.connsOpen)) } +// TestNilStatsHandler ensures we do not panic as a result of a nil stats +// handler. +func (s) TestNilStatsHandler(t *testing.T) { + grpctest.TLogger.ExpectErrorN("ignoring nil parameter", 2) + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{grpc.StatsHandler(nil)}, grpc.WithStatsHandler(nil)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } +} + // TestUnexpectedEOF tests a scenario where a client invokes two unary RPC // calls. The first call receives a payload which exceeds max grpc receive // message length, and the second gets a large response. This second RPC should From c14e29e609121fa66a1621141fae4fc9cc28e727 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 1 Aug 2022 16:10:21 -0700 Subject: [PATCH 573/998] rls: suppress picker updates from children when handling config updates (#5539) --- balancer/rls/balancer.go | 147 ++++++++++-------- balancer/rls/balancer_test.go | 264 ++++++++++++++++++++++++++++++++ balancer/rls/control_channel.go | 16 +- 3 files changed, 357 insertions(+), 70 deletions(-) diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go index cde95d992d63..b7a11e8851b9 100644 --- a/balancer/rls/balancer.go +++ b/balancer/rls/balancer.go @@ -21,6 +21,7 @@ package rls import ( "encoding/json" + "errors" "fmt" "sync" "sync/atomic" @@ -36,6 +37,7 @@ import ( "google.golang.org/grpc/internal/buffer" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" ) @@ -50,7 +52,8 @@ const ( ) var ( - logger = grpclog.Component("rls") + logger = grpclog.Component("rls") + errBalancerClosed = errors.New("rls LB policy is closed") // Below defined vars for overriding in unit tests. @@ -88,16 +91,14 @@ func (rlsBB) Name() string { func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { lb := &rlsBalancer{ - done: grpcsync.NewEvent(), - cc: cc, - bopts: opts, - purgeTicker: dataCachePurgeTicker(), - lbCfg: &lbConfig{}, - pendingMap: make(map[cacheKey]*backoffState), - childPolicies: make(map[string]*childPolicyWrapper), - ccUpdateCh: make(chan *balancer.ClientConnState, 1), - childPolicyStateUpdateCh: buffer.NewUnbounded(), - connectivityStateCh: make(chan struct{}), + done: grpcsync.NewEvent(), + cc: cc, + bopts: opts, + purgeTicker: dataCachePurgeTicker(), + lbCfg: &lbConfig{}, + pendingMap: make(map[cacheKey]*backoffState), + childPolicies: make(map[string]*childPolicyWrapper), + updateCh: buffer.NewUnbounded(), } lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) lb.dataCache = newDataCache(maxCacheSize, lb.logger) @@ -139,13 +140,28 @@ type rlsBalancer struct { // default child policy wrapper when a new picker is created. See // sendNewPickerLocked() for details. lastPicker *rlsPicker + // Set during UpdateClientConnState when pushing updates to child policies. + // Prevents state updates from child policies causing new pickers to be sent + // up the channel. Cleared after all child policies have processed the + // updates sent to them, after which a new picker is sent up the channel. + inhibitPickerUpdates bool + + // Channel on which all updates are pushed. Processed in run(). + updateCh *buffer.Unbounded +} + +type resumePickerUpdates struct { + done chan struct{} +} - // Channels on which updates are received or pushed. - ccUpdateCh chan *balancer.ClientConnState - childPolicyStateUpdateCh *buffer.Unbounded // idAndState from child policy. - connectivityStateCh chan struct{} // signalled when control channel becomes READY again. +// childPolicyIDAndState wraps a child policy id and its state update. +type childPolicyIDAndState struct { + id string + state balancer.State } +type controlChannelReady struct{} + // run is a long-running goroutine which handles all the updates that the // balancer wishes to handle. The appropriate updateHandler will push the update // on to a channel that this goroutine will select on, thereby the handling of @@ -154,21 +170,30 @@ func (b *rlsBalancer) run() { go b.purgeDataCache() for { select { - case u := <-b.ccUpdateCh: - b.handleClientConnUpdate(u) - case u := <-b.childPolicyStateUpdateCh.Get(): - update := u.(idAndState) - b.childPolicyStateUpdateCh.Load() - b.handleChildPolicyStateUpdate(update.id, update.state) - case <-b.connectivityStateCh: - b.logger.Infof("Resetting backoff state after control channel getting back to READY") - b.cacheMu.Lock() - updatePicker := b.dataCache.resetBackoffState(&backoffState{bs: defaultBackoffStrategy}) - b.cacheMu.Unlock() - if updatePicker { - b.sendNewPicker() + case u := <-b.updateCh.Get(): + b.updateCh.Load() + switch update := u.(type) { + case childPolicyIDAndState: + b.handleChildPolicyStateUpdate(update.id, update.state) + case controlChannelReady: + b.logger.Infof("Resetting backoff state after control channel getting back to READY") + b.cacheMu.Lock() + updatePicker := b.dataCache.resetBackoffState(&backoffState{bs: defaultBackoffStrategy}) + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + resetBackoffHook() + case resumePickerUpdates: + b.stateMu.Lock() + b.logger.Infof("Resuming picker updates after config propagation to child policies") + b.inhibitPickerUpdates = false + b.sendNewPickerLocked() + close(update.done) + b.stateMu.Unlock() + default: + b.logger.Errorf("Unsupported update type %T", update) } - resetBackoffHook() case <-b.done.Done(): return } @@ -196,33 +221,23 @@ func (b *rlsBalancer) purgeDataCache() { } func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - // Remove unprocessed update from the channel, if one exists, before pushing - // the most recent one. - select { - case <-b.ccUpdateCh: - default: - } - b.ccUpdateCh <- &ccs - return nil -} - -// handleClientConnUpdate handles updates to the service config. -// -// Invoked from the run() goroutine and this will attempt to grab the mutex. -func (b *rlsBalancer) handleClientConnUpdate(ccs *balancer.ClientConnState) { + defer clientConnUpdateHook() if b.done.HasFired() { - b.logger.Warningf("Received service config after balancer close") - return + b.logger.Warningf("Received service config after balancer close: %s", pretty.ToJSON(ccs.BalancerConfig)) + return errBalancerClosed } b.stateMu.Lock() - defer b.stateMu.Unlock() newCfg := ccs.BalancerConfig.(*lbConfig) if b.lbCfg.Equal(newCfg) { + b.stateMu.Unlock() b.logger.Infof("New service config matches existing config") - return + return nil } + b.logger.Infof("Delaying picker updates until config is propagated to and processed by child policies") + b.inhibitPickerUpdates = true + // When the RLS server name changes, the old control channel needs to be // swapped out for a new one. All state associated with the throttling // algorithm is stored on a per-control-channel basis; when we swap out @@ -238,13 +253,19 @@ func (b *rlsBalancer) handleClientConnUpdate(ccs *balancer.ClientConnState) { // Any changes to child policy name or configuration needs to be handled by // either creating new child policies or pushing updates to existing ones. b.resolverState = ccs.ResolverState - b.handleChildPolicyConfigUpdate(newCfg, ccs) + b.handleChildPolicyConfigUpdate(newCfg, &ccs) - // Update the copy of the config in the LB policy and send a new picker. + // Update the copy of the config in the LB policy before releasing the lock. b.lbCfg = newCfg - b.sendNewPickerLocked() - clientConnUpdateHook() + // Enqueue an event which will notify us when the above update has been + // propagated to all child policies, and the child policies have all + // processed their updates, and we have sent a picker update. + done := make(chan struct{}) + b.updateCh.Put(resumePickerUpdates{done: done}) + b.stateMu.Unlock() + <-done + return nil } // handleControlChannelUpdate handles updates to service config fields which @@ -258,7 +279,10 @@ func (b *rlsBalancer) handleControlChannelUpdate(newCfg *lbConfig) { // Create a new control channel and close the existing one. b.logger.Infof("Creating control channel to RLS server at: %v", newCfg.lookupService) - ctrlCh, err := newControlChannel(newCfg.lookupService, newCfg.controlChannelServiceConfig, newCfg.lookupServiceTimeout, b.bopts, b.connectivityStateCh) + backToReadyFn := func() { + b.updateCh.Put(controlChannelReady{}) + } + ctrlCh, err := newControlChannel(newCfg.lookupService, newCfg.controlChannelServiceConfig, newCfg.lookupServiceTimeout, b.bopts, backToReadyFn) if err != nil { // This is very uncommon and usually represents a non-transient error. // There is not much we can do here other than wait for another update @@ -438,8 +462,13 @@ func (b *rlsBalancer) sendNewPickerLocked() { ConnectivityState: aggregatedState, Picker: picker, } - b.logger.Infof("New balancer.State: %+v", state) - b.cc.UpdateState(state) + + if !b.inhibitPickerUpdates { + b.logger.Infof("New balancer.State: %+v", state) + b.cc.UpdateState(state) + } else { + b.logger.Infof("Delaying picker update: %+v", state) + } if b.lastPicker != nil { if b.defaultPolicy != nil { @@ -499,18 +528,12 @@ func (b *rlsBalancer) aggregatedConnectivityState() connectivity.State { } } -// idAndState wraps a child policy id and its state update. -type idAndState struct { - id string - state balancer.State -} - // UpdateState is a implementation of the balancergroup.BalancerStateAggregator // interface. The actual state aggregation functionality is handled // asynchronously. This method only pushes the state update on to channel read // and dispatched by the run() goroutine. func (b *rlsBalancer) UpdateState(id string, state balancer.State) { - b.childPolicyStateUpdateCh.Put(idAndState{id: id, state: state}) + b.updateCh.Put(childPolicyIDAndState{id: id, state: state}) } // handleChildPolicyStateUpdate provides the state aggregator functionality for @@ -543,7 +566,7 @@ func (b *rlsBalancer) handleChildPolicyStateUpdate(id string, newState balancer. return } atomic.StorePointer(&cpw.state, unsafe.Pointer(&newState)) - b.logger.Infof("Child policy %q has new state %+v", id, cpw.state) + b.logger.Infof("Child policy %q has new state %+v", id, newState) b.sendNewPickerLocked() } diff --git a/balancer/rls/balancer_test.go b/balancer/rls/balancer_test.go index 1c2f9fca32a5..7df7f4335205 100644 --- a/balancer/rls/balancer_test.go +++ b/balancer/rls/balancer_test.go @@ -20,8 +20,10 @@ package rls import ( "context" + "encoding/json" "errors" "fmt" + "sync" "testing" "time" @@ -30,6 +32,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/test/e2e" "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" @@ -39,6 +42,7 @@ import ( rlstest "google.golang.org/grpc/internal/testutils/rls" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/testdata" "google.golang.org/protobuf/types/known/durationpb" @@ -833,3 +837,263 @@ func (s) TestControlChannelConnectivityStateMonitoring(t *testing.T) { makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh) verifyRLSRequest(t, rlsReqCh, true) } + +const wrappingTopLevelBalancerName = "wrapping-top-level-balancer" +const multipleUpdateStateChildBalancerName = "multiple-update-state-child-balancer" + +type wrappingTopLevelBalancerBuilder struct { + balCh chan balancer.Balancer +} + +func (w *wrappingTopLevelBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + tlb := &wrappingTopLevelBalancer{ClientConn: cc} + tlb.Balancer = balancer.Get(Name).Build(tlb, balancer.BuildOptions{}) + w.balCh <- tlb + return tlb +} + +func (w *wrappingTopLevelBalancerBuilder) Name() string { + return wrappingTopLevelBalancerName +} + +func (w *wrappingTopLevelBalancerBuilder) ParseConfig(sc json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + parser := balancer.Get(Name).(balancer.ConfigParser) + return parser.ParseConfig(sc) +} + +// wrappingTopLevelBalancer acts as the top-level LB policy on the channel and +// wraps an RLS LB policy. It forwards all balancer API calls unmodified to the +// underlying RLS LB policy. It overrides the UpdateState method on the +// balancer.ClientConn passed to the RLS LB policy and stores all state updates +// pushed by the latter. +type wrappingTopLevelBalancer struct { + balancer.ClientConn + balancer.Balancer + + mu sync.Mutex + states []balancer.State +} + +func (w *wrappingTopLevelBalancer) UpdateState(bs balancer.State) { + w.mu.Lock() + w.states = append(w.states, bs) + w.mu.Unlock() + w.ClientConn.UpdateState(bs) +} + +func (w *wrappingTopLevelBalancer) getStates() []balancer.State { + w.mu.Lock() + defer w.mu.Unlock() + + states := make([]balancer.State, len(w.states)) + for i, s := range w.states { + states[i] = s + } + return states +} + +// wrappedPickFirstBalancerBuilder builds a balancer which wraps a pickfirst +// balancer. The wrapping balancing receives addresses to be passed to the +// underlying pickfirst balancer as part of its configuration. +type wrappedPickFirstBalancerBuilder struct{} + +func (wrappedPickFirstBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(grpc.PickFirstBalancerName) + wpfb := &wrappedPickFirstBalancer{ + ClientConn: cc, + } + pf := builder.Build(wpfb, opts) + wpfb.Balancer = pf + return wpfb +} + +func (wrappedPickFirstBalancerBuilder) Name() string { + return multipleUpdateStateChildBalancerName +} + +type WrappedPickFirstBalancerConfig struct { + serviceconfig.LoadBalancingConfig + Backend string // The target for which this child policy was created. +} + +func (wbb *wrappedPickFirstBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &WrappedPickFirstBalancerConfig{} + if err := json.Unmarshal(c, cfg); err != nil { + return nil, err + } + return cfg, nil +} + +// wrappedPickFirstBalancer wraps a pickfirst balancer and makes multiple calls +// to UpdateState when handling a config update in UpdateClientConnState. When +// this policy is used as a child policy of the RLS LB policy, it is expected +// that the latter suppress these updates and push a single picker update on the +// channel (after the config has been processed by all child policies). +type wrappedPickFirstBalancer struct { + balancer.Balancer + balancer.ClientConn +} + +func (wb *wrappedPickFirstBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + wb.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &testutils.TestConstPicker{Err: balancer.ErrNoSubConnAvailable}}) + wb.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &testutils.TestConstPicker{Err: balancer.ErrNoSubConnAvailable}}) + + cfg := ccs.BalancerConfig.(*WrappedPickFirstBalancerConfig) + return wb.Balancer.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{{Addr: cfg.Backend}}}, + }) +} + +func (wb *wrappedPickFirstBalancer) UpdateState(state balancer.State) { + // Eat it if IDLE - allows it to switch over only on a READY SubConn. + if state.ConnectivityState == connectivity.Idle { + return + } + wb.ClientConn.UpdateState(state) +} + +// TestUpdateStatePauses tests the scenario where a config update received by +// the RLS LB policy results in multiple UpdateState calls from the child +// policies. This test verifies that picker updates are paused when the config +// update is being processed by RLS LB policy and its child policies. +// +// The test uses a wrapping balancer as the top-level LB policy on the channel. +// The wrapping balancer wraps an RLS LB policy as a child policy and forwards +// all calls to it. It also records the UpdateState() calls from the RLS LB +// policy and makes it available for inspection by the test. +// +// The test uses another wrapped balancer (which wraps a pickfirst balancer) as +// the child policy of the RLS LB policy. This balancer makes multiple +// UpdateState calls when handling an update from its parent in +// UpdateClientConnState. +func (s) TestUpdateStatePauses(t *testing.T) { + // Override the hook to get notified when UpdateClientConnState is done. + clientConnUpdateDone := make(chan struct{}, 1) + origClientConnUpdateHook := clientConnUpdateHook + clientConnUpdateHook = func() { clientConnUpdateDone <- struct{}{} } + defer func() { clientConnUpdateHook = origClientConnUpdateHook }() + + // Register the top-level wrapping balancer which forwards calls to RLS. + bb := &wrappingTopLevelBalancerBuilder{balCh: make(chan balancer.Balancer, 1)} + balancer.Register(bb) + + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Start a test backend and set the RLS server to respond with it. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a child policy which wraps a pickfirst balancer and receives the + // backend address as part of its configuration. + balancer.Register(&wrappedPickFirstBalancerBuilder{}) + + // Register a manual resolver and push the RLS service config through it. + r := manual.NewBuilderWithScheme("rls-e2e") + scJSON := fmt.Sprintf(` +{ + "loadBalancingConfig": [ + { + "%s": { + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [{"service": "grpc.testing.TestService"}] + }], + "lookupService": "%s", + "cacheSizeBytes": 1000 + }, + "childPolicy": [{"%s": {}}], + "childPolicyConfigTargetFieldName": "Backend" + } + } + ] +}`, wrappingTopLevelBalancerName, rlsServer.Address, multipleUpdateStateChildBalancerName) + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r.InitialState(resolver.State{ServiceConfig: sc}) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Wait for the clientconn update to be processed by the RLS LB policy. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + case <-clientConnUpdateDone: + } + + // Get the top-level LB policy configured on the channel, to be able to read + // the state updates pushed by its child (the RLS LB policy.) + var wb *wrappingTopLevelBalancer + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for state update on the top-level LB policy") + case b := <-bb.balCh: + wb = b.(*wrappingTopLevelBalancer) + } + + // It is important to note that at this point no child policies have been + // created because we have not attempted any RPC so far. When we attempt an + // RPC (below), child policies will be created and their configs will be + // pushed to them. But this config update will not happen in the context of + // a config update on the parent. + + // Make an RPC and ensure it gets routed to the test backend. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Cache the state changes seen up to this point. + states0 := wb.getStates() + + // Push an updated service config. As mentioned earlier, the previous config + // updates on the child policies did not happen in the context of a config + // update on the parent. Hence, this update is required to force the + // scenario which we are interesting in testing here, i.e child policies get + // config updates as part of the parent policy getting its config update. + scJSON = fmt.Sprintf(` +{ + "loadBalancingConfig": [ + { + "%s": { + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [ + {"service": "grpc.testing.TestService"}, + {"service": "grpc.health.v1.Health"} + ] + }], + "lookupService": "%s", + "cacheSizeBytes": 1000 + }, + "childPolicy": [{"%s": {}}], + "childPolicyConfigTargetFieldName": "Backend" + } + } + ] +}`, wrappingTopLevelBalancerName, rlsServer.Address, multipleUpdateStateChildBalancerName) + sc = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + // Wait for the clientconn update to be processed by the RLS LB policy. + select { + case <-ctx.Done(): + case <-clientConnUpdateDone: + } + + // Even though the child policies used in this test make multiple calls to + // UpdateState as part of handling their configs, we expect the RLS policy + // to inhibit picker updates during this time frame, and send a single + // picker once the config update is completely handled. + states1 := wb.getStates() + if len(states1) != len(states0)+1 { + t.Fatalf("more than one state update seen. before %v, after %v", states0, states1) + } +} diff --git a/balancer/rls/control_channel.go b/balancer/rls/control_channel.go index df78f7b55fbe..4acc11d90e94 100644 --- a/balancer/rls/control_channel.go +++ b/balancer/rls/control_channel.go @@ -48,9 +48,9 @@ type controlChannel struct { // rpcTimeout specifies the timeout for the RouteLookup RPC call. The LB // policy receives this value in its service config. rpcTimeout time.Duration - // backToReadyCh is the channel on which an update is pushed when the - // connectivity state changes from READY --> TRANSIENT_FAILURE --> READY. - backToReadyCh chan struct{} + // backToReadyFunc is a callback to be invoked when the connectivity state + // changes from READY --> TRANSIENT_FAILURE --> READY. + backToReadyFunc func() // throttler in an adaptive throttling implementation used to avoid // hammering the RLS service while it is overloaded or down. throttler adaptiveThrottler @@ -63,11 +63,11 @@ type controlChannel struct { // newControlChannel creates a controlChannel to rlsServerName and uses // serviceConfig, if non-empty, as the default service config for the underlying // gRPC channel. -func newControlChannel(rlsServerName, serviceConfig string, rpcTimeout time.Duration, bOpts balancer.BuildOptions, backToReadyCh chan struct{}) (*controlChannel, error) { +func newControlChannel(rlsServerName, serviceConfig string, rpcTimeout time.Duration, bOpts balancer.BuildOptions, backToReadyFunc func()) (*controlChannel, error) { ctrlCh := &controlChannel{ - rpcTimeout: rpcTimeout, - backToReadyCh: backToReadyCh, - throttler: newAdaptiveThrottler(), + rpcTimeout: rpcTimeout, + backToReadyFunc: backToReadyFunc, + throttler: newAdaptiveThrottler(), } ctrlCh.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-control-channel %p] ", ctrlCh)) @@ -171,7 +171,7 @@ func (cc *controlChannel) monitorConnectivityState() { if !first { cc.logger.Infof("Control channel back to READY") - cc.backToReadyCh <- struct{}{} + cc.backToReadyFunc() } first = false From 23f015c36d60311d47a6ed686efc39031606269a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 1 Aug 2022 16:43:25 -0700 Subject: [PATCH 574/998] priority: sync priority with child in use (#5549) --- xds/internal/balancer/priority/balancer.go | 2 +- xds/internal/balancer/priority/balancer_priority.go | 3 ++- xds/internal/balancer/priority/balancer_test.go | 6 ------ 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index d05ef18c2876..b5cace684960 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -260,7 +260,7 @@ func (b *priorityBalancer) run() { b.handleChildStateUpdate(s.name, s.s) case resumePickerUpdates: b.inhibitPickerUpdates = false - b.syncPriority("") + b.syncPriority(b.childInUse) close(s.done) } b.mu.Unlock() diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index 33068709e292..c12dfe47ffea 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -69,6 +69,7 @@ var ( // Caller must hold b.mu. func (b *priorityBalancer) syncPriority(childUpdating string) { if b.inhibitPickerUpdates { + b.logger.Infof("Skipping update from child with name %q", childUpdating) return } for p, name := range b.priorities { @@ -84,7 +85,7 @@ func (b *priorityBalancer) syncPriority(childUpdating string) { (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { if b.childInUse != child.name || child.name == childUpdating { - logger.Warningf("ciu, cn, cu: %v, %v, %v", b.childInUse, child.name, childUpdating) + b.logger.Warningf("childInUse, childUpdating: %q, %q", b.childInUse, child.name) // If we switch children or the child in use just updated its // picker, push the child's picker to the parent. b.cc.UpdateState(child.state) diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index ccf3a5edfc2e..d3329378cd73 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -176,8 +176,6 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { } select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: @@ -277,8 +275,6 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { } select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case sc := <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn, %s", sc) case <-cc.RemoveSubConnCh: @@ -1194,8 +1190,6 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { // Because this was a ready child moved to a higher priority, no new subconn // or picker should be updated. select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: From 57aaa10b8a9e575f4834f19fa63c0d2e184f372e Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 2 Aug 2022 12:31:30 -0700 Subject: [PATCH 575/998] test: move clientconn state transition test to test/ directory (#5551) --- clientconn_test.go | 88 +++++++++++++++++-- .../clientconn_state_transition_test.go | 85 +++++++++++------- 2 files changed, 135 insertions(+), 38 deletions(-) rename clientconn_state_transition_test.go => test/clientconn_state_transition_test.go (85%) diff --git a/clientconn_test.go b/clientconn_test.go index 9f32999709f4..ee8372ad891b 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -25,12 +25,14 @@ import ( "math" "net" "strings" + "sync" "sync/atomic" "testing" "time" "golang.org/x/net/http2" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" @@ -44,6 +46,17 @@ import ( "google.golang.org/grpc/testdata" ) +const ( + defaultTestTimeout = 10 * time.Second + stateRecordingBalancerName = "state_recording_balancer" +) + +var testBalancerBuilder = newStateRecordingBalancerBuilder() + +func init() { + balancer.Register(testBalancerBuilder) +} + func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { scpr := r.CC.ParseServiceConfig(s) if scpr.Err != nil { @@ -221,8 +234,10 @@ func (s) TestDialWaitsForServerSettingsAndFails(t *testing.T) { lis.Addr().String(), WithTransportCredentials(insecure.NewCredentials()), WithReturnConnectionError(), - withBackoff(noBackoff{}), - withMinConnectDeadline(func() time.Duration { return time.Second / 4 })) + WithConnectParams(ConnectParams{ + Backoff: backoff.Config{}, + MinConnectTimeout: 250 * time.Millisecond, + })) lis.Close() if err == nil { client.Close() @@ -453,7 +468,6 @@ func (s) TestDial_OneBackoffPerRetryGroup(t *testing.T) { }}) client, err := DialContext(ctx, "whatever:///this-gets-overwritten", WithTransportCredentials(insecure.NewCredentials()), - WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), WithResolvers(rb), withMinConnectDeadline(getMinConnectTimeout)) if err != nil { @@ -976,9 +990,11 @@ func (s) TestUpdateAddresses_NoopIfCalledWithSameAddresses(t *testing.T) { client, err := Dial("whatever:///this-gets-overwritten", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(rb), - withBackoff(noBackoff{}), - WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), - withMinConnectDeadline(func() time.Duration { return time.Hour })) + WithConnectParams(ConnectParams{ + Backoff: backoff.Config{}, + MinConnectTimeout: time.Hour, + }), + WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName))) if err != nil { t.Fatal(err) } @@ -1113,6 +1129,66 @@ func testDefaultServiceConfigWhenResolverReturnInvalidServiceConfig(t *testing.T } } +type stateRecordingBalancer struct { + notifier chan<- connectivity.State + balancer.Balancer +} + +func (b *stateRecordingBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + b.notifier <- s.ConnectivityState + b.Balancer.UpdateSubConnState(sc, s) +} + +func (b *stateRecordingBalancer) ResetNotifier(r chan<- connectivity.State) { + b.notifier = r +} + +func (b *stateRecordingBalancer) Close() { + b.Balancer.Close() +} + +type stateRecordingBalancerBuilder struct { + mu sync.Mutex + notifier chan connectivity.State // The notifier used in the last Balancer. +} + +func newStateRecordingBalancerBuilder() *stateRecordingBalancerBuilder { + return &stateRecordingBalancerBuilder{} +} + +func (b *stateRecordingBalancerBuilder) Name() string { + return stateRecordingBalancerName +} + +func (b *stateRecordingBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + stateNotifications := make(chan connectivity.State, 10) + b.mu.Lock() + b.notifier = stateNotifications + b.mu.Unlock() + return &stateRecordingBalancer{ + notifier: stateNotifications, + Balancer: balancer.Get("pick_first").Build(cc, opts), + } +} + +func (b *stateRecordingBalancerBuilder) nextStateNotifier() <-chan connectivity.State { + b.mu.Lock() + defer b.mu.Unlock() + ret := b.notifier + b.notifier = nil + return ret +} + +// Keep reading until something causes the connection to die (EOF, server +// closed, etc). Useful as a tool for mindlessly keeping the connection +// healthy, since the client will error if things like client prefaces are not +// accepted in a timely fashion. +func keepReading(conn net.Conn) { + buf := make([]byte, 1024) + for _, err := conn.Read(buf); err == nil; _, err = conn.Read(buf) { + } +} + // stayConnected makes cc stay connected by repeatedly calling cc.Connect() // until the state becomes Shutdown or until 10 seconds elapses. func stayConnected(cc *ClientConn) { diff --git a/clientconn_state_transition_test.go b/test/clientconn_state_transition_test.go similarity index 85% rename from clientconn_state_transition_test.go rename to test/clientconn_state_transition_test.go index d1c1321b33b1..1f15c6905ad6 100644 --- a/clientconn_state_transition_test.go +++ b/test/clientconn_state_transition_test.go @@ -16,7 +16,7 @@ * */ -package grpc +package test import ( "context" @@ -27,6 +27,8 @@ import ( "time" "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" @@ -35,10 +37,7 @@ import ( "google.golang.org/grpc/resolver/manual" ) -const ( - stateRecordingBalancerName = "state_recoding_balancer" - defaultTestTimeout = 10 * time.Second -) +const stateRecordingBalancerName = "state_recording_balancer" var testBalancerBuilder = newStateRecordingBalancerBuilder() @@ -158,17 +157,22 @@ func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, s connMu.Unlock() }() - client, err := Dial("", - WithTransportCredentials(insecure.NewCredentials()), - WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), - WithDialer(pl.Dialer()), - withBackoff(noBackoff{}), - withMinConnectDeadline(func() time.Duration { return time.Millisecond * 100 })) + client, err := grpc.Dial("", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), + grpc.WithDialer(pl.Dialer()), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{}, + MinConnectTimeout: 100 * time.Millisecond, + })) if err != nil { t.Fatal(err) } defer client.Close() - go stayConnected(client) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + go stayConnected(ctx, client) stateNotifications := testBalancerBuilder.nextStateNotifier() for i := 0; i < len(want); i++ { @@ -225,14 +229,17 @@ func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { conn.Close() }() - client, err := Dial(lis.Addr().String(), - WithTransportCredentials(insecure.NewCredentials()), - WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName))) + client, err := grpc.Dial(lis.Addr().String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName))) if err != nil { t.Fatal(err) } defer client.Close() - go stayConnected(client) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + go stayConnected(ctx, client) stateNotifications := testBalancerBuilder.nextStateNotifier() @@ -310,10 +317,10 @@ func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) - client, err := Dial("whatever:///this-gets-overwritten", - WithTransportCredentials(insecure.NewCredentials()), - WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), - WithResolvers(rb)) + client, err := grpc.Dial("whatever:///this-gets-overwritten", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), + grpc.WithResolvers(rb)) if err != nil { t.Fatal(err) } @@ -396,15 +403,18 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) - client, err := Dial("whatever:///this-gets-overwritten", - WithTransportCredentials(insecure.NewCredentials()), - WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), - WithResolvers(rb)) + client, err := grpc.Dial("whatever:///this-gets-overwritten", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), + grpc.WithResolvers(rb)) if err != nil { t.Fatal(err) } defer client.Close() - go stayConnected(client) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + go stayConnected(ctx, client) stateNotifications := testBalancerBuilder.nextStateNotifier() want := []connectivity.State{ @@ -413,8 +423,6 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { connectivity.Idle, connectivity.Connecting, } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() for i := 0; i < len(want); i++ { select { case <-ctx.Done(): @@ -473,7 +481,7 @@ func (b *stateRecordingBalancerBuilder) Build(cc balancer.ClientConn, opts balan b.mu.Unlock() return &stateRecordingBalancer{ notifier: stateNotifications, - Balancer: balancer.Get(PickFirstBalancerName).Build(cc, opts), + Balancer: balancer.Get("pick_first").Build(cc, opts), } } @@ -485,10 +493,6 @@ func (b *stateRecordingBalancerBuilder) nextStateNotifier() <-chan connectivity. return ret } -type noBackoff struct{} - -func (b noBackoff) Backoff(int) time.Duration { return time.Duration(0) } - // Keep reading until something causes the connection to die (EOF, server // closed, etc). Useful as a tool for mindlessly keeping the connection // healthy, since the client will error if things like client prefaces are not @@ -498,3 +502,20 @@ func keepReading(conn net.Conn) { for _, err := conn.Read(buf); err == nil; _, err = conn.Read(buf) { } } + +// stayConnected makes cc stay connected by repeatedly calling cc.Connect() +// until the state becomes Shutdown or until ithe context expires. +func stayConnected(ctx context.Context, cc *grpc.ClientConn) { + for { + state := cc.GetState() + switch state { + case connectivity.Idle: + cc.Connect() + case connectivity.Shutdown: + return + } + if !cc.WaitForStateChange(ctx, state) { + return + } + } +} From a077b9468383c645cffbb02b58df82cda8bdc629 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 2 Aug 2022 15:58:50 -0400 Subject: [PATCH 576/998] Switched unlock to defer in newAddrConn (#5556) --- clientconn.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clientconn.go b/clientconn.go index 0d21f2210b68..779b03bca1c3 100644 --- a/clientconn.go +++ b/clientconn.go @@ -712,8 +712,8 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() + defer cc.mu.Unlock() if cc.conns == nil { - cc.mu.Unlock() return nil, ErrClientConnClosing } @@ -732,7 +732,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }) cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } From 9bc72deba4d2107740084b05f415a766a62c6e06 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 2 Aug 2022 15:56:37 -0700 Subject: [PATCH 577/998] grpc: remove mentions of WithBalancerName from comments (#5555) --- service_config.go | 7 +++---- test/channelz_test.go | 2 -- vet.sh | 1 - 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/service_config.go b/service_config.go index b01c548bb9a9..01bbb2025aed 100644 --- a/service_config.go +++ b/service_config.go @@ -57,10 +57,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancerName will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. LB *string // lbConfig is the service config's load balancing configuration. If diff --git a/test/channelz_test.go b/test/channelz_test.go index ed8569bcd1a1..0633b0799fb2 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -1917,8 +1917,6 @@ func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv - // avoid newTest using WithBalancerName, which would override service - // config's change of balancer below. e.balancer = "" te := newTest(t, e) channelz.SetMaxTraceEntry(1) diff --git a/vet.sh b/vet.sh index ceb436c6ce47..c3fc8253b13a 100755 --- a/vet.sh +++ b/vet.sh @@ -147,7 +147,6 @@ grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor grpc.ServiceConfig -grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor grpc.WithDialer From b89f49b0ffe3a1ebdf037e7e42dc99ca6a90c8a5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 3 Aug 2022 08:51:59 -0700 Subject: [PATCH 578/998] xdsclient: deflake Test/LDSWatch_PartialValid (#5552) --- .../xdsclient/e2e_test/lds_watchers_test.go | 43 ++++++++++--------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go index a9ddda12443b..2a8951c51d28 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -46,10 +46,10 @@ import ( _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. ) -func overrideFedEnvVar(t *testing.T) func() { +func overrideFedEnvVar(t *testing.T) { oldFed := envconfig.XDSFederation envconfig.XDSFederation = true - return func() { envconfig.XDSFederation = oldFed } + t.Cleanup(func() { envconfig.XDSFederation = oldFed }) } type s struct { @@ -148,7 +148,7 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want // // The test is run for old and new style names. func (s) TestLDSWatch(t *testing.T) { - defer overrideFedEnvVar(t)() + overrideFedEnvVar(t) mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() @@ -268,7 +268,7 @@ func (s) TestLDSWatch(t *testing.T) { // // The test is run for old and new style names. func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { - defer overrideFedEnvVar(t)() + overrideFedEnvVar(t) mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() @@ -402,7 +402,7 @@ func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { // // The test is run with both old and new style names. func (s) TestLDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { - defer overrideFedEnvVar(t)() + overrideFedEnvVar(t) mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() @@ -474,7 +474,7 @@ func (s) TestLDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { // watch callback is invoked with the contents from the cache, instead of a // request being sent to the management server. func (s) TestLDSWatch_ResourceCaching(t *testing.T) { - defer overrideFedEnvVar(t)() + overrideFedEnvVar(t) firstRequestReceived := false firstAckReceived := grpcsync.NewEvent() secondRequestReceived := grpcsync.NewEvent() @@ -574,7 +574,7 @@ func (s) TestLDSWatch_ResourceCaching(t *testing.T) { // // The test is run with both old and new style names. func (s) TestLDSWatch_ResourceRemoved(t *testing.T) { - defer overrideFedEnvVar(t)() + overrideFedEnvVar(t) mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() @@ -682,7 +682,7 @@ func (s) TestLDSWatch_ResourceRemoved(t *testing.T) { // server is NACK'ed by the xdsclient. The test verifies that the error is // propagated to the watcher. func (s) TestLDSWatch_NACKError(t *testing.T) { - defer overrideFedEnvVar(t)() + overrideFedEnvVar(t) mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() @@ -695,9 +695,11 @@ func (s) TestLDSWatch_NACKError(t *testing.T) { // Register a watch for a listener resource and have the watch // callback push the received update on to a channel. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() updateCh := testutils.NewChannel() ldsCancel := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { - updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + updateCh.SendContext(ctx, xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) }) defer ldsCancel() @@ -708,8 +710,6 @@ func (s) TestLDSWatch_NACKError(t *testing.T) { Listeners: []*v3listenerpb.Listener{badListenerResource(ldsName)}, SkipValidation: true, } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() if err := mgmtServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } @@ -731,7 +731,7 @@ func (s) TestLDSWatch_NACKError(t *testing.T) { // to the valid resource receive the update, while watchers corresponding to the // invalid resource receive an error. func (s) TestLDSWatch_PartialValid(t *testing.T) { - defer overrideFedEnvVar(t)() + overrideFedEnvVar(t) mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() @@ -745,15 +745,18 @@ func (s) TestLDSWatch_PartialValid(t *testing.T) { // Register two watches for listener resources. The first watch is expected // to receive an error because the received resource is NACKed. The second // watch is expected to get a good update. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + badResourceName := ldsName updateCh1 := testutils.NewChannel() - ldsCancel1 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { - updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + ldsCancel1 := client.WatchListener(badResourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.SendContext(ctx, xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) }) defer ldsCancel1() - resourceName2 := ldsNameNewStyle + goodResourceName := ldsNameNewStyle updateCh2 := testutils.NewChannel() - ldsCancel2 := client.WatchListener(resourceName2, func(u xdsresource.ListenerUpdate, err error) { - updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + ldsCancel2 := client.WatchListener(goodResourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.SendContext(ctx, xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) }) defer ldsCancel2() @@ -762,13 +765,11 @@ func (s) TestLDSWatch_PartialValid(t *testing.T) { resources := e2e.UpdateOptions{ NodeID: nodeID, Listeners: []*v3listenerpb.Listener{ - badListenerResource(ldsName), - e2e.DefaultClientListener(resourceName2, rdsName), + badListenerResource(badResourceName), + e2e.DefaultClientListener(goodResourceName, rdsName), }, SkipValidation: true, } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() if err := mgmtServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } From 946dde008f3d66634ab1ae4624a44efb10aa5be6 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 3 Aug 2022 16:46:34 -0700 Subject: [PATCH 579/998] xdsclient: NACK endpoint resources with zero weight (#5560) --- internal/testutils/xds/e2e/clientresources.go | 1 + xds/internal/testutils/protos.go | 3 +++ xds/internal/xdsclient/controller/v2_eds_test.go | 6 +++--- .../xdsclient/xdsresource/unmarshal_eds.go | 16 ++++++++++++---- .../xdsclient/xdsresource/unmarshal_eds_test.go | 10 ++++++++++ 5 files changed, 29 insertions(+), 7 deletions(-) diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go index f3f7f6307c53..f5363aff91f4 100644 --- a/internal/testutils/xds/e2e/clientresources.go +++ b/internal/testutils/xds/e2e/clientresources.go @@ -370,6 +370,7 @@ func DefaultEndpoint(clusterName string, host string, ports []uint32) *v3endpoin PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: port}}, }}, }}, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, }) } return &v3endpointpb.ClusterLoadAssignment{ diff --git a/xds/internal/testutils/protos.go b/xds/internal/testutils/protos.go index fc3cdf307fcd..b9fbc7448e18 100644 --- a/xds/internal/testutils/protos.go +++ b/xds/internal/testutils/protos.go @@ -118,6 +118,9 @@ func (clab *ClusterLoadAssignmentBuilder) AddLocality(subzone string, weight uin lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: opts.Weight[i]} } } + if lbe.LoadBalancingWeight == nil { + lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 1} + } lbEndPoints = append(lbEndPoints, lbe) } diff --git a/xds/internal/xdsclient/controller/v2_eds_test.go b/xds/internal/xdsclient/controller/v2_eds_test.go index aaa84f9c3d63..665fee92a1ea 100644 --- a/xds/internal/xdsclient/controller/v2_eds_test.go +++ b/xds/internal/xdsclient/controller/v2_eds_test.go @@ -117,7 +117,7 @@ func (s) TestEDSHandleResponse(t *testing.T) { "not-goodEDSName": {Update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { - Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314", Weight: 1}}, ID: internal.LocalityID{SubZone: "locality-1"}, Priority: 0, Weight: 1, @@ -140,13 +140,13 @@ func (s) TestEDSHandleResponse(t *testing.T) { goodEDSName: {Update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { - Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314", Weight: 1}}, ID: internal.LocalityID{SubZone: "locality-1"}, Priority: 1, Weight: 1, }, { - Endpoints: []xdsresource.Endpoint{{Address: "addr2:159"}}, + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159", Weight: 1}}, ID: internal.LocalityID{SubZone: "locality-2"}, Priority: 0, Weight: 1, diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index 7cc12d73d6d5..a2e8fd1c173f 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -90,16 +90,20 @@ func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropO } } -func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) []Endpoint { +func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) ([]Endpoint, error) { endpoints := make([]Endpoint, 0, len(lbEndpoints)) for _, lbEndpoint := range lbEndpoints { + weight := lbEndpoint.GetLoadBalancingWeight().GetValue() + if weight == 0 { + return nil, fmt.Errorf("EDS response contains an endpoint with zero weight: %+v", lbEndpoint) + } endpoints = append(endpoints, Endpoint{ HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), - Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), + Weight: weight, }) } - return endpoints + return endpoints, nil } func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.PrefixLogger) (EndpointsUpdate, error) { @@ -134,9 +138,13 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.Pr return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) } localitiesWithPriority[lidStr] = true + endpoints, err := parseEndpoints(locality.GetLbEndpoints()) + if err != nil { + return EndpointsUpdate{}, err + } ret.Localities = append(ret.Localities, Locality{ ID: lid, - Endpoints: parseEndpoints(locality.GetLbEndpoints()), + Endpoints: endpoints, Weight: locality.GetLoadBalancingWeight().GetValue(), Priority: priority, }) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index 3ce069c29664..28ceb11c6e16 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -64,6 +64,16 @@ func (s) TestEDSParseRespProto(t *testing.T) { want: EndpointsUpdate{}, wantErr: true, }, + { + name: "zero-endpoint-weight", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-0", 1, 0, []string{"addr1:314"}, &addLocalityOptions{Weight: []uint32{0}}) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, { name: "duplicate-locality-in-the-same-priority", m: func() *v3endpointpb.ClusterLoadAssignment { From f9409d385fbaa24c04d2c634aafe974f4f13a042 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 4 Aug 2022 12:11:45 -0700 Subject: [PATCH 580/998] ringhash: handle config updates properly (#5557) --- xds/internal/balancer/ringhash/ring.go | 34 +++++++------- xds/internal/balancer/ringhash/ring_test.go | 6 +-- xds/internal/balancer/ringhash/ringhash.go | 44 ++++++++++--------- .../balancer/ringhash/ringhash_test.go | 42 ++++++++++++++++-- 4 files changed, 81 insertions(+), 45 deletions(-) diff --git a/xds/internal/balancer/ringhash/ring.go b/xds/internal/balancer/ringhash/ring.go index 5e8d881006e6..71d31eaeb8b0 100644 --- a/xds/internal/balancer/ringhash/ring.go +++ b/xds/internal/balancer/ringhash/ring.go @@ -19,7 +19,6 @@ package ringhash import ( - "fmt" "math" "sort" "strconv" @@ -64,12 +63,12 @@ type ringEntry struct { // // To pick from a ring, a binary search will be done for the given target hash, // and first item with hash >= given hash will be returned. -func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64) (*ring, error) { +// +// Must be called with a non-empty subConns map. +func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64) *ring { // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 - normalizedWeights, minWeight, err := normalizeWeights(subConns) - if err != nil { - return nil, err - } + normalizedWeights, minWeight := normalizeWeights(subConns) + // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. // Scale up the size of the ring such that the least-weighted host gets a @@ -106,30 +105,29 @@ func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64) (*r for i, ii := range items { ii.idx = i } - return &ring{items: items}, nil + return &ring{items: items} } // normalizeWeights divides all the weights by the sum, so that the total weight // is 1. -func normalizeWeights(subConns *resolver.AddressMap) ([]subConnWithWeight, float64, error) { - keys := subConns.Keys() - if len(keys) == 0 { - return nil, 0, fmt.Errorf("number of subconns is 0") - } +// +// Must be called with a non-empty subConns map. +func normalizeWeights(subConns *resolver.AddressMap) ([]subConnWithWeight, float64) { var weightSum uint32 + keys := subConns.Keys() for _, a := range keys { weightSum += getWeightAttribute(a) } - if weightSum == 0 { - return nil, 0, fmt.Errorf("total weight of all subconns is 0") - } - weightSumF := float64(weightSum) ret := make([]subConnWithWeight, 0, len(keys)) min := float64(1.0) for _, a := range keys { v, _ := subConns.Get(a) scInfo := v.(*subConn) - nw := float64(getWeightAttribute(a)) / weightSumF + // getWeightAttribute() returns 1 if the weight attribute is not found + // on the address. And since this function is guaranteed to be called + // with a non-empty subConns map, weightSum is guaranteed to be + // non-zero. So, we need not worry about divide a by zero error here. + nw := float64(getWeightAttribute(a)) / float64(weightSum) ret = append(ret, subConnWithWeight{sc: scInfo, weight: nw}) if nw < min { min = nw @@ -142,7 +140,7 @@ func normalizeWeights(subConns *resolver.AddressMap) ([]subConnWithWeight, float // where an address is added and then removed, the RPCs will still pick the // same old SubConn. sort.Slice(ret, func(i, j int) bool { return ret[i].sc.addr < ret[j].sc.addr }) - return ret, min, nil + return ret, min } // pick does a binary search. It returns the item with smallest index i that diff --git a/xds/internal/balancer/ringhash/ring_test.go b/xds/internal/balancer/ringhash/ring_test.go index 20184ab8d20e..b1d987609903 100644 --- a/xds/internal/balancer/ringhash/ring_test.go +++ b/xds/internal/balancer/ringhash/ring_test.go @@ -52,7 +52,7 @@ func (s) TestRingNew(t *testing.T) { for _, min := range []uint64{3, 4, 6, 8} { for _, max := range []uint64{20, 8} { t.Run(fmt.Sprintf("size-min-%v-max-%v", min, max), func(t *testing.T) { - r, _ := newRing(testSubConnMap, min, max) + r := newRing(testSubConnMap, min, max) totalCount := len(r.items) if totalCount < int(min) || totalCount > int(max) { t.Fatalf("unexpect size %v, want min %v, max %v", totalCount, min, max) @@ -82,7 +82,7 @@ func equalApproximately(x, y float64) bool { } func (s) TestRingPick(t *testing.T) { - r, _ := newRing(testSubConnMap, 10, 20) + r := newRing(testSubConnMap, 10, 20) for _, h := range []uint64{xxhash.Sum64String("1"), xxhash.Sum64String("2"), xxhash.Sum64String("3"), xxhash.Sum64String("4")} { t.Run(fmt.Sprintf("picking-hash-%v", h), func(t *testing.T) { e := r.pick(h) @@ -100,7 +100,7 @@ func (s) TestRingPick(t *testing.T) { } func (s) TestRingNext(t *testing.T) { - r, _ := newRing(testSubConnMap, 10, 20) + r := newRing(testSubConnMap, 10, 20) for _, e := range r.items { ne := r.next(e) diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index 3e06fc4eb6eb..e2ad49fca4ab 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -259,29 +259,22 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) - if b.config == nil { - newConfig, ok := s.BalancerConfig.(*LBConfig) - if !ok { - return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) - } - b.config = newConfig + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } - // Successful resolution; clear resolver error and ensure we return nil. - b.resolverErr = nil - if b.updateAddresses(s.ResolverState.Addresses) { - // If addresses were updated, no matter whether it resulted in SubConn - // creation/deletion, or just weight update, we will need to regenerate - // the ring. - var err error - b.ring, err = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize) - if err != nil { - b.ResolverError(fmt.Errorf("ringhash failed to make a new ring: %v", err)) - return balancer.ErrBadResolverState - } - b.regeneratePicker() - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + // If addresses were updated, whether it resulted in SubConn + // creation/deletion, or just weight update, we need to regenerate the ring + // and send a new picker. + regenerateRing := b.updateAddresses(s.ResolverState.Addresses) + + // If the ring configuration has changed, we need to regenerate the ring and + // send a new picker. + if b.config == nil || b.config.MinRingSize != newConfig.MinRingSize || b.config.MaxRingSize != newConfig.MaxRingSize { + regenerateRing = true } + b.config = newConfig // If resolver state contains no addresses, return an error so ClientConn // will trigger re-resolve. Also records this as an resolver error, so when @@ -291,6 +284,17 @@ func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) err b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + if regenerateRing { + // Ring creation is guaranteed to not fail because we call newRing() + // with a non-empty subConns map. + b.ring = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize) + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + // Successful resolution; clear resolver error and return nil. + b.resolverErr = nil return nil } diff --git a/xds/internal/balancer/ringhash/ringhash_test.go b/xds/internal/balancer/ringhash/ringhash_test.go index 02302321ce5e..e5b10556e982 100644 --- a/xds/internal/balancer/ringhash/ringhash_test.go +++ b/xds/internal/balancer/ringhash/ringhash_test.go @@ -108,6 +108,40 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } +// TestUpdateClientConnState_NewRingSize tests the scenario where the ringhash +// LB policy receives new configuration which specifies new values for the ring +// min and max sizes. The test verifies that a new ring is created and a new +// picker is sent to the ClientConn. +func (s) TestUpdateClientConnState_NewRingSize(t *testing.T) { + origMinRingSize, origMaxRingSize := 1, 10 // Configured from `testConfig` in `setupTest` + newMinRingSize, newMaxRingSize := 20, 100 + + addrs := []resolver.Address{{Addr: testBackendAddrStrs[0]}} + cc, b, p1 := setupTest(t, addrs) + ring1 := p1.(*picker).ring + if ringSize := len(ring1.items); ringSize < origMinRingSize || ringSize > origMaxRingSize { + t.Fatalf("Ring created with size %d, want between [%d, %d]", ringSize, origMinRingSize, origMaxRingSize) + } + + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: addrs}, + BalancerConfig: &LBConfig{MinRingSize: uint64(newMinRingSize), MaxRingSize: uint64(newMaxRingSize)}, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + + var ring2 *ring + select { + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout when waiting for a picker update after a configuration update") + case p2 := <-cc.NewPickerCh: + ring2 = p2.(*picker).ring + } + if ringSize := len(ring2.items); ringSize < newMinRingSize || ringSize > newMaxRingSize { + t.Fatalf("Ring created with size %d, want between [%d, %d]", ringSize, newMinRingSize, newMaxRingSize) + } +} + func (s) TestOneSubConn(t *testing.T) { wantAddr1 := resolver.Address{Addr: testBackendAddrStrs[0]} cc, b, p0 := setupTest(t, []resolver.Address{wantAddr1}) @@ -320,7 +354,7 @@ func (s) TestAddrWeightChange(t *testing.T) { if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: wantAddrs}, - BalancerConfig: nil, + BalancerConfig: testConfig, }); err != nil { t.Fatalf("UpdateClientConnState returned err: %v", err) } @@ -336,7 +370,7 @@ func (s) TestAddrWeightChange(t *testing.T) { {Addr: testBackendAddrStrs[0]}, {Addr: testBackendAddrStrs[1]}, }}, - BalancerConfig: nil, + BalancerConfig: testConfig, }); err != nil { t.Fatalf("UpdateClientConnState returned err: %v", err) } @@ -359,7 +393,7 @@ func (s) TestAddrWeightChange(t *testing.T) { resolver.Address{Addr: testBackendAddrStrs[1]}, weightedroundrobin.AddrInfo{Weight: 2}), }}, - BalancerConfig: nil, + BalancerConfig: testConfig, }); err != nil { t.Fatalf("UpdateClientConnState returned err: %v", err) } @@ -505,7 +539,7 @@ func (s) TestAddrBalancerAttributesChange(t *testing.T) { addrs2 := []resolver.Address{internal.SetLocalityID(resolver.Address{Addr: testBackendAddrStrs[0]}, internal.LocalityID{Region: "americas"})} if err := b.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: addrs2}, - BalancerConfig: nil, + BalancerConfig: testConfig, }); err != nil { t.Fatalf("UpdateClientConnState returned err: %v", err) } From 6f34b7ad1546ea084ed79f4b1db6ba5e61c2d8c4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 5 Aug 2022 15:12:37 -0700 Subject: [PATCH 581/998] xdsclient: NACK endpoint resource if load_balancing_weight is specified and is zero (#5568) --- internal/testutils/xds/e2e/clientresources.go | 1 - xds/internal/testutils/protos.go | 3 --- xds/internal/xdsclient/xdsresource/unmarshal_eds.go | 12 +++++++++--- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go index f5363aff91f4..f3f7f6307c53 100644 --- a/internal/testutils/xds/e2e/clientresources.go +++ b/internal/testutils/xds/e2e/clientresources.go @@ -370,7 +370,6 @@ func DefaultEndpoint(clusterName string, host string, ports []uint32) *v3endpoin PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: port}}, }}, }}, - LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, }) } return &v3endpointpb.ClusterLoadAssignment{ diff --git a/xds/internal/testutils/protos.go b/xds/internal/testutils/protos.go index b9fbc7448e18..fc3cdf307fcd 100644 --- a/xds/internal/testutils/protos.go +++ b/xds/internal/testutils/protos.go @@ -118,9 +118,6 @@ func (clab *ClusterLoadAssignmentBuilder) AddLocality(subzone string, weight uin lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: opts.Weight[i]} } } - if lbe.LoadBalancingWeight == nil { - lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 1} - } lbEndPoints = append(lbEndPoints, lbe) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index a2e8fd1c173f..7d4b89dc9dc1 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -93,9 +93,15 @@ func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropO func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) ([]Endpoint, error) { endpoints := make([]Endpoint, 0, len(lbEndpoints)) for _, lbEndpoint := range lbEndpoints { - weight := lbEndpoint.GetLoadBalancingWeight().GetValue() - if weight == 0 { - return nil, fmt.Errorf("EDS response contains an endpoint with zero weight: %+v", lbEndpoint) + // If the load_balancing_weight field is specified, it must be set to a + // value of at least 1. If unspecified, each host is presumed to have + // equal weight in a locality. + weight := uint32(1) + if w := lbEndpoint.GetLoadBalancingWeight(); w != nil { + if w.GetValue() == 0 { + return nil, fmt.Errorf("EDS response contains an endpoint with zero weight: %+v", lbEndpoint) + } + weight = w.GetValue() } endpoints = append(endpoints, Endpoint{ HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), From 7981af402b2327512c001f8c8eb92b5034ec8ad4 Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Tue, 9 Aug 2022 10:50:18 -0700 Subject: [PATCH 582/998] test/kokoro: add missing image tagging to the xDS interop url map buildscript (#5569) --- test/kokoro/xds_url_map.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/kokoro/xds_url_map.sh b/test/kokoro/xds_url_map.sh index 193a81bcd7b2..633004b0c800 100755 --- a/test/kokoro/xds_url_map.sh +++ b/test/kokoro/xds_url_map.sh @@ -37,6 +37,9 @@ build_test_app_docker_images() { docker build -f "${SRC_DIR}/interop/xds/client/Dockerfile" -t "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" gcloud -q auth configure-docker docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" + if is_version_branch "${TESTING_VERSION}"; then + tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" + fi } ####################################### From c7fe135d124ebd51e6a9728d1e55c7bebc85407e Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 11 Aug 2022 18:17:31 -0400 Subject: [PATCH 583/998] O11Y: Added support for custom tags (#5565) * O11Y: Added support for custom tags --- gcp/observability/config.go | 87 ++++- gcp/observability/exporting.go | 14 +- .../internal/config/config.pb.go | 364 ------------------ .../internal/config/config.proto | 89 ----- gcp/observability/logging.go | 17 +- gcp/observability/observability.go | 2 +- gcp/observability/observability_test.go | 142 +++++-- gcp/observability/opencensus.go | 58 ++- gcp/observability/tags.go | 46 --- gcp/observability/tags_test.go | 64 --- 10 files changed, 236 insertions(+), 647 deletions(-) delete mode 100644 gcp/observability/internal/config/config.pb.go delete mode 100644 gcp/observability/internal/config/config.proto delete mode 100644 gcp/observability/tags.go delete mode 100644 gcp/observability/tags_test.go diff --git a/gcp/observability/config.go b/gcp/observability/config.go index ecda5b230073..1a9f718e81b1 100644 --- a/gcp/observability/config.go +++ b/gcp/observability/config.go @@ -28,8 +28,6 @@ import ( gcplogging "cloud.google.com/go/logging" "golang.org/x/oauth2/google" - configpb "google.golang.org/grpc/gcp/observability/internal/config" - "google.golang.org/protobuf/encoding/protojson" ) const ( @@ -41,6 +39,69 @@ const ( var logFilterPatternRegexp = regexp.MustCompile(logFilterPatternRegexpStr) +// logFilter represents a method logging configuration. +type logFilter struct { + // Pattern is a string which can select a group of method names. By + // default, the Pattern is an empty string, matching no methods. + // + // Only "*" Wildcard is accepted for Pattern. A Pattern is in the form + // of / or just a character "*" . + // + // If the Pattern is "*", it specifies the defaults for all the + // services; If the Pattern is /*, it specifies the defaults + // for all methods in the specified service ; If the Pattern is + // */, this is not supported. + // + // Examples: + // - "Foo/Bar" selects only the method "Bar" from service "Foo" + // - "Foo/*" selects all methods from service "Foo" + // - "*" selects all methods from all services. + Pattern string `json:"pattern,omitempty"` + // HeaderBytes is the number of bytes of each header to log. If the size of + // the header is greater than the defined limit, content past the limit will + // be truncated. The default value is 0. + HeaderBytes int32 `json:"header_bytes,omitempty"` + // MessageBytes is the number of bytes of each message to log. If the size + // of the message is greater than the defined limit, content pass the limit + // will be truncated. The default value is 0. + MessageBytes int32 `json:"message_bytes,omitempty"` +} + +// config is configuration for observability behaviors. By default, no +// configuration is required for tracing/metrics/logging to function. This +// config captures the most common knobs for gRPC users. It's always possible to +// override with explicit config in code. +type config struct { + // EnableCloudTrace represents whether the tracing data upload to + // CloudTrace should be enabled or not. + EnableCloudTrace bool `json:"enable_cloud_trace,omitempty"` + // EnableCloudMonitoring represents whether the metrics data upload to + // CloudMonitoring should be enabled or not. + EnableCloudMonitoring bool `json:"enable_cloud_monitoring,omitempty"` + // EnableCloudLogging represents Whether the logging data upload to + // CloudLogging should be enabled or not. + EnableCloudLogging bool `json:"enable_cloud_logging,omitempty"` + // DestinationProjectID is the destination GCP project identifier for the + // uploading log entries. If empty, the gRPC Observability plugin will + // attempt to fetch the project_id from the GCP environment variables, or + // from the default credentials. + DestinationProjectID string `json:"destination_project_id,omitempty"` + // LogFilters is a list of method config. The order matters here - the first + // Pattern which matches the current method will apply the associated config + // options in the logFilter. Any other logFilter that also matches that + // comes later will be ignored. So a logFilter of "*/*" should appear last + // in this list. + LogFilters []logFilter `json:"log_filters,omitempty"` + // GlobalTraceSamplingRate is the global setting that controls the + // probability of a RPC being traced. For example, 0.05 means there is a 5% + // chance for a RPC to be traced, 1.0 means trace every call, 0 means don’t + // start new traces. + GlobalTraceSamplingRate float64 `json:"global_trace_sampling_rate,omitempty"` + // CustomTags a list of custom tags that will be attached to every log + // entry. + CustomTags map[string]string `json:"custom_tags,omitempty"` +} + // fetchDefaultProjectID fetches the default GCP project id from environment. func fetchDefaultProjectID(ctx context.Context) string { // Step 1: Check ENV var @@ -62,25 +123,25 @@ func fetchDefaultProjectID(ctx context.Context) string { return credentials.ProjectID } -func validateFilters(config *configpb.ObservabilityConfig) error { - for _, filter := range config.GetLogFilters() { +func validateFilters(config *config) error { + for _, filter := range config.LogFilters { if filter.Pattern == "*" { continue } match := logFilterPatternRegexp.FindStringSubmatch(filter.Pattern) if match == nil { - return fmt.Errorf("invalid log filter pattern: %v", filter.Pattern) + return fmt.Errorf("invalid log filter Pattern: %v", filter.Pattern) } } return nil } // unmarshalAndVerifyConfig unmarshals a json string representing an -// observability config into its protobuf format, and also verifies the +// observability config into its internal go format, and also verifies the // configuration's fields for validity. -func unmarshalAndVerifyConfig(rawJSON json.RawMessage) (*configpb.ObservabilityConfig, error) { - var config configpb.ObservabilityConfig - if err := protojson.Unmarshal(rawJSON, &config); err != nil { +func unmarshalAndVerifyConfig(rawJSON json.RawMessage) (*config, error) { + var config config + if err := json.Unmarshal(rawJSON, &config); err != nil { return nil, fmt.Errorf("error parsing observability config: %v", err) } if err := validateFilters(&config); err != nil { @@ -93,7 +154,7 @@ func unmarshalAndVerifyConfig(rawJSON json.RawMessage) (*configpb.ObservabilityC return &config, nil } -func parseObservabilityConfig() (*configpb.ObservabilityConfig, error) { +func parseObservabilityConfig() (*config, error) { if fileSystemPath := os.Getenv(envObservabilityConfigJSON); fileSystemPath != "" { content, err := ioutil.ReadFile(fileSystemPath) // TODO: Switch to os.ReadFile once dropped support for go 1.15 if err != nil { @@ -107,14 +168,14 @@ func parseObservabilityConfig() (*configpb.ObservabilityConfig, error) { return nil, nil } -func ensureProjectIDInObservabilityConfig(ctx context.Context, config *configpb.ObservabilityConfig) error { - if config.GetDestinationProjectId() == "" { +func ensureProjectIDInObservabilityConfig(ctx context.Context, config *config) error { + if config.DestinationProjectID == "" { // Try to fetch the GCP project id projectID := fetchDefaultProjectID(ctx) if projectID == "" { return fmt.Errorf("empty destination project ID") } - config.DestinationProjectId = projectID + config.DestinationProjectID = projectID } return nil } diff --git a/gcp/observability/exporting.go b/gcp/observability/exporting.go index cf95383726c3..0c566f4b183c 100644 --- a/gcp/observability/exporting.go +++ b/gcp/observability/exporting.go @@ -22,7 +22,6 @@ import ( "context" "encoding/json" "fmt" - "os" gcplogging "cloud.google.com/go/logging" grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" @@ -45,20 +44,19 @@ type cloudLoggingExporter struct { logger *gcplogging.Logger } -func newCloudLoggingExporter(ctx context.Context, projectID string) (*cloudLoggingExporter, error) { - c, err := gcplogging.NewClient(ctx, fmt.Sprintf("projects/%v", projectID)) +func newCloudLoggingExporter(ctx context.Context, config *config) (*cloudLoggingExporter, error) { + c, err := gcplogging.NewClient(ctx, fmt.Sprintf("projects/%v", config.DestinationProjectID)) if err != nil { return nil, fmt.Errorf("failed to create cloudLoggingExporter: %v", err) } defer logger.Infof("Successfully created cloudLoggingExporter") - customTags := getCustomTags(os.Environ()) - if len(customTags) != 0 { - logger.Infof("Adding custom tags: %+v", customTags) + if len(config.CustomTags) != 0 { + logger.Infof("Adding custom tags: %+v", config.CustomTags) } return &cloudLoggingExporter{ - projectID: projectID, + projectID: config.DestinationProjectID, client: c, - logger: c.Logger("microservices.googleapis.com/observability/grpc", gcplogging.CommonLabels(customTags)), + logger: c.Logger("microservices.googleapis.com/observability/grpc", gcplogging.CommonLabels(config.CustomTags)), }, nil } diff --git a/gcp/observability/internal/config/config.pb.go b/gcp/observability/internal/config/config.pb.go deleted file mode 100644 index a60269c984d3..000000000000 --- a/gcp/observability/internal/config/config.pb.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright 2022 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not -// use this file except in compliance with the License. You may obtain a copy of -// the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations under -// the License. - -// Observability Config is used by gRPC Observability plugin to control provided -// observability features. It contains parameters to enable/disable certain -// features, or fine tune the verbosity. -// -// Note that gRPC may use this config in JSON form, not in protobuf form. This -// proto definition is intended to help document the schema but might not -// actually be used directly by gRPC. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: gcp/observability/internal/config/config.proto - -package config - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Configuration for observability behaviors. By default, no configuration is -// required for tracing/metrics/logging to function. This config captures the -// most common knobs for gRPC users. It's always possible to override with -// explicit config in code. -type ObservabilityConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether the tracing data upload to CloudTrace should be enabled or not. - EnableCloudTrace bool `protobuf:"varint,4,opt,name=enable_cloud_trace,json=enableCloudTrace,proto3" json:"enable_cloud_trace,omitempty"` - // Whether the metrics data upload to CloudMonitoring should be enabled or - // not. - EnableCloudMonitoring bool `protobuf:"varint,5,opt,name=enable_cloud_monitoring,json=enableCloudMonitoring,proto3" json:"enable_cloud_monitoring,omitempty"` - // Whether the logging data upload to CloudLogging should be enabled or not. - EnableCloudLogging bool `protobuf:"varint,1,opt,name=enable_cloud_logging,json=enableCloudLogging,proto3" json:"enable_cloud_logging,omitempty"` - // The destination GCP project identifier for the uploading log entries. If - // empty, the gRPC Observability plugin will attempt to fetch the project_id - // from the GCP environment variables, or from the default credentials. - DestinationProjectId string `protobuf:"bytes,2,opt,name=destination_project_id,json=destinationProjectId,proto3" json:"destination_project_id,omitempty"` - // A list of method config. The order matters here - the first pattern which - // matches the current method will apply the associated config options in - // the LogFilter. Any other LogFilter that also matches that comes later - // will be ignored. So a LogFilter of "*/*" should appear last in this list. - LogFilters []*ObservabilityConfig_LogFilter `protobuf:"bytes,3,rep,name=log_filters,json=logFilters,proto3" json:"log_filters,omitempty"` - // The global setting that controls the probability of a RPC being traced. - // For example, 0.05 means there is a 5% chance for a RPC to be traced, 1.0 - // means trace every call, 0 means don’t start new traces. - GlobalTraceSamplingRate float64 `protobuf:"fixed64,6,opt,name=global_trace_sampling_rate,json=globalTraceSamplingRate,proto3" json:"global_trace_sampling_rate,omitempty"` -} - -func (x *ObservabilityConfig) Reset() { - *x = ObservabilityConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_gcp_observability_internal_config_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ObservabilityConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ObservabilityConfig) ProtoMessage() {} - -func (x *ObservabilityConfig) ProtoReflect() protoreflect.Message { - mi := &file_gcp_observability_internal_config_config_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ObservabilityConfig.ProtoReflect.Descriptor instead. -func (*ObservabilityConfig) Descriptor() ([]byte, []int) { - return file_gcp_observability_internal_config_config_proto_rawDescGZIP(), []int{0} -} - -func (x *ObservabilityConfig) GetEnableCloudTrace() bool { - if x != nil { - return x.EnableCloudTrace - } - return false -} - -func (x *ObservabilityConfig) GetEnableCloudMonitoring() bool { - if x != nil { - return x.EnableCloudMonitoring - } - return false -} - -func (x *ObservabilityConfig) GetEnableCloudLogging() bool { - if x != nil { - return x.EnableCloudLogging - } - return false -} - -func (x *ObservabilityConfig) GetDestinationProjectId() string { - if x != nil { - return x.DestinationProjectId - } - return "" -} - -func (x *ObservabilityConfig) GetLogFilters() []*ObservabilityConfig_LogFilter { - if x != nil { - return x.LogFilters - } - return nil -} - -func (x *ObservabilityConfig) GetGlobalTraceSamplingRate() float64 { - if x != nil { - return x.GlobalTraceSamplingRate - } - return 0 -} - -type ObservabilityConfig_LogFilter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Pattern is a string which can select a group of method names. By - // default, the pattern is an empty string, matching no methods. - // - // Only "*" Wildcard is accepted for pattern. A pattern is in the form - // of / or just a character "*" . - // - // If the pattern is "*", it specifies the defaults for all the - // services; If the pattern is /*, it specifies the defaults - // for all methods in the specified service ; If the pattern is - // */, this is not supported. - // - // Examples: - // - "Foo/Bar" selects only the method "Bar" from service "Foo" - // - "Foo/*" selects all methods from service "Foo" - // - "*" selects all methods from all services. - Pattern string `protobuf:"bytes,1,opt,name=pattern,proto3" json:"pattern,omitempty"` - // Number of bytes of each header to log. If the size of the header is - // greater than the defined limit, content pass the limit will be - // truncated. The default value is 0. - HeaderBytes int32 `protobuf:"varint,2,opt,name=header_bytes,json=headerBytes,proto3" json:"header_bytes,omitempty"` - // Number of bytes of each message to log. If the size of the message is - // greater than the defined limit, content pass the limit will be - // truncated. The default value is 0. - MessageBytes int32 `protobuf:"varint,3,opt,name=message_bytes,json=messageBytes,proto3" json:"message_bytes,omitempty"` -} - -func (x *ObservabilityConfig_LogFilter) Reset() { - *x = ObservabilityConfig_LogFilter{} - if protoimpl.UnsafeEnabled { - mi := &file_gcp_observability_internal_config_config_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ObservabilityConfig_LogFilter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ObservabilityConfig_LogFilter) ProtoMessage() {} - -func (x *ObservabilityConfig_LogFilter) ProtoReflect() protoreflect.Message { - mi := &file_gcp_observability_internal_config_config_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ObservabilityConfig_LogFilter.ProtoReflect.Descriptor instead. -func (*ObservabilityConfig_LogFilter) Descriptor() ([]byte, []int) { - return file_gcp_observability_internal_config_config_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *ObservabilityConfig_LogFilter) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *ObservabilityConfig_LogFilter) GetHeaderBytes() int32 { - if x != nil { - return x.HeaderBytes - } - return 0 -} - -func (x *ObservabilityConfig_LogFilter) GetMessageBytes() int32 { - if x != nil { - return x.MessageBytes - } - return 0 -} - -var File_gcp_observability_internal_config_config_proto protoreflect.FileDescriptor - -var file_gcp_observability_internal_config_config_proto_rawDesc = []byte{ - 0x0a, 0x2e, 0x67, 0x63, 0x70, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x21, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x22, 0xf2, 0x03, 0x0a, 0x13, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x12, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, - 0x67, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x6f, 0x67, 0x67, - 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, - 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x2e, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x1a, - 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, - 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x17, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, - 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x6d, 0x0a, 0x09, 0x4c, 0x6f, 0x67, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, 0x74, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x18, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, - 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2f, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_gcp_observability_internal_config_config_proto_rawDescOnce sync.Once - file_gcp_observability_internal_config_config_proto_rawDescData = file_gcp_observability_internal_config_config_proto_rawDesc -) - -func file_gcp_observability_internal_config_config_proto_rawDescGZIP() []byte { - file_gcp_observability_internal_config_config_proto_rawDescOnce.Do(func() { - file_gcp_observability_internal_config_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_gcp_observability_internal_config_config_proto_rawDescData) - }) - return file_gcp_observability_internal_config_config_proto_rawDescData -} - -var file_gcp_observability_internal_config_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_gcp_observability_internal_config_config_proto_goTypes = []interface{}{ - (*ObservabilityConfig)(nil), // 0: grpc.observability.config.v1alpha.ObservabilityConfig - (*ObservabilityConfig_LogFilter)(nil), // 1: grpc.observability.config.v1alpha.ObservabilityConfig.LogFilter -} -var file_gcp_observability_internal_config_config_proto_depIdxs = []int32{ - 1, // 0: grpc.observability.config.v1alpha.ObservabilityConfig.log_filters:type_name -> grpc.observability.config.v1alpha.ObservabilityConfig.LogFilter - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_gcp_observability_internal_config_config_proto_init() } -func file_gcp_observability_internal_config_config_proto_init() { - if File_gcp_observability_internal_config_config_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_gcp_observability_internal_config_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObservabilityConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_gcp_observability_internal_config_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObservabilityConfig_LogFilter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_gcp_observability_internal_config_config_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_gcp_observability_internal_config_config_proto_goTypes, - DependencyIndexes: file_gcp_observability_internal_config_config_proto_depIdxs, - MessageInfos: file_gcp_observability_internal_config_config_proto_msgTypes, - }.Build() - File_gcp_observability_internal_config_config_proto = out.File - file_gcp_observability_internal_config_config_proto_rawDesc = nil - file_gcp_observability_internal_config_config_proto_goTypes = nil - file_gcp_observability_internal_config_config_proto_depIdxs = nil -} diff --git a/gcp/observability/internal/config/config.proto b/gcp/observability/internal/config/config.proto deleted file mode 100644 index 2c108bfa2abf..000000000000 --- a/gcp/observability/internal/config/config.proto +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2022 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may not -// use this file except in compliance with the License. You may obtain a copy of -// the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations under -// the License. - -// Observability Config is used by gRPC Observability plugin to control provided -// observability features. It contains parameters to enable/disable certain -// features, or fine tune the verbosity. -// -// Note that gRPC may use this config in JSON form, not in protobuf form. This -// proto definition is intended to help document the schema but might not -// actually be used directly by gRPC. - -syntax = "proto3"; - -package grpc.observability.config.v1alpha; - -option java_package = "io.grpc.observability.config"; -option java_multiple_files = true; -option java_outer_classname = "ObservabilityConfigProto"; -option go_package = "google.golang.org/grpc/gcp/observability/internal/config"; - -// Configuration for observability behaviors. By default, no configuration is -// required for tracing/metrics/logging to function. This config captures the -// most common knobs for gRPC users. It's always possible to override with -// explicit config in code. -message ObservabilityConfig { - // Whether the tracing data upload to CloudTrace should be enabled or not. - bool enable_cloud_trace = 4; - - // Whether the metrics data upload to CloudMonitoring should be enabled or - // not. - bool enable_cloud_monitoring = 5; - - // Whether the logging data upload to CloudLogging should be enabled or not. - bool enable_cloud_logging = 1; - - // The destination GCP project identifier for the uploading log entries. If - // empty, the gRPC Observability plugin will attempt to fetch the project_id - // from the GCP environment variables, or from the default credentials. - string destination_project_id = 2; - - message LogFilter { - // Pattern is a string which can select a group of method names. By - // default, the pattern is an empty string, matching no methods. - // - // Only "*" Wildcard is accepted for pattern. A pattern is in the form - // of / or just a character "*" . - // - // If the pattern is "*", it specifies the defaults for all the - // services; If the pattern is /*, it specifies the defaults - // for all methods in the specified service ; If the pattern is - // */, this is not supported. - // - // Examples: - // - "Foo/Bar" selects only the method "Bar" from service "Foo" - // - "Foo/*" selects all methods from service "Foo" - // - "*" selects all methods from all services. - string pattern = 1; - // Number of bytes of each header to log. If the size of the header is - // greater than the defined limit, content pass the limit will be - // truncated. The default value is 0. - int32 header_bytes = 2; - // Number of bytes of each message to log. If the size of the message is - // greater than the defined limit, content pass the limit will be - // truncated. The default value is 0. - int32 message_bytes = 3; - } - - // A list of method config. The order matters here - the first pattern which - // matches the current method will apply the associated config options in - // the LogFilter. Any other LogFilter that also matches that comes later - // will be ignored. So a LogFilter of "*/*" should appear last in this list. - repeated LogFilter log_filters = 3; - - // The global setting that controls the probability of a RPC being traced. - // For example, 0.05 means there is a 5% chance for a RPC to be traced, 1.0 - // means trace every call, 0 means don’t start new traces. - double global_trace_sampling_rate = 6; -} diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index ed7e76d74c04..f48af56997f4 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -27,7 +27,6 @@ import ( "github.com/google/uuid" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - configpb "google.golang.org/grpc/gcp/observability/internal/config" grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" iblog "google.golang.org/grpc/internal/binarylog" ) @@ -248,7 +247,7 @@ func (l *binaryLogger) Close() { } } -func validateExistingMethodLoggerConfig(existing *iblog.MethodLoggerConfig, filter *configpb.ObservabilityConfig_LogFilter) bool { +func validateExistingMethodLoggerConfig(existing *iblog.MethodLoggerConfig, filter logFilter) bool { // In future, we could add more validations. Currently, we only check if the // new filter configs are different than the existing one, if so, we log a // warning. @@ -258,7 +257,7 @@ func validateExistingMethodLoggerConfig(existing *iblog.MethodLoggerConfig, filt return existing == nil } -func createBinaryLoggerConfig(filters []*configpb.ObservabilityConfig_LogFilter) iblog.LoggerConfig { +func createBinaryLoggerConfig(filters []logFilter) iblog.LoggerConfig { config := iblog.LoggerConfig{ Services: make(map[string]*iblog.MethodLoggerConfig), Methods: make(map[string]*iblog.MethodLoggerConfig), @@ -296,8 +295,8 @@ func createBinaryLoggerConfig(filters []*configpb.ObservabilityConfig_LogFilter) // start is the core logic for setting up the custom binary logging logger, and // it's also useful for testing. -func (l *binaryLogger) start(config *configpb.ObservabilityConfig, exporter loggingExporter) error { - filters := config.GetLogFilters() +func (l *binaryLogger) start(config *config, exporter loggingExporter) error { + filters := config.LogFilters if len(filters) == 0 || exporter == nil { // Doing nothing is allowed if exporter != nil { @@ -318,14 +317,14 @@ func (l *binaryLogger) start(config *configpb.ObservabilityConfig, exporter logg return nil } -func (l *binaryLogger) Start(ctx context.Context, config *configpb.ObservabilityConfig) error { - if config == nil || !config.GetEnableCloudLogging() { +func (l *binaryLogger) Start(ctx context.Context, config *config) error { + if config == nil || !config.EnableCloudLogging { return nil } - if config.GetDestinationProjectId() == "" { + if config.DestinationProjectID == "" { return fmt.Errorf("failed to enable CloudLogging: empty destination_project_id") } - exporter, err := newCloudLoggingExporter(ctx, config.DestinationProjectId) + exporter, err := newCloudLoggingExporter(ctx, config) if err != nil { return fmt.Errorf("unable to create CloudLogging exporter: %v", err) } diff --git a/gcp/observability/observability.go b/gcp/observability/observability.go index 40242692e8d2..1ab20d4cab5b 100644 --- a/gcp/observability/observability.go +++ b/gcp/observability/observability.go @@ -64,7 +64,7 @@ func Start(ctx context.Context) error { } // Enabling tracing and metrics via OpenCensus - if err := startOpenCensus(config, nil); err != nil { + if err := startOpenCensus(config); err != nil { return fmt.Errorf("failed to instrument OpenCensus: %v", err) } diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index c5fa59c6648f..3e8f1d704bda 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -34,8 +34,8 @@ import ( "go.opencensus.io/trace" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - configpb "google.golang.org/grpc/gcp/observability/internal/config" grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" + "google.golang.org/grpc/internal" iblog "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" @@ -129,7 +129,6 @@ func (fle *fakeLoggingExporter) Close() error { type test struct { t *testing.T fle *fakeLoggingExporter - fe *fakeOpenCensusExporter testServer testgrpc.TestServiceServer // nil means none // srv and srvAddr are set once startServer is called. @@ -197,7 +196,7 @@ func (te *test) clientConn() *grpc.ClientConn { return te.cc } -func (te *test) enablePluginWithConfig(config *configpb.ObservabilityConfig) { +func (te *test) enablePluginWithConfig(config *config) { // Injects the fake exporter for testing purposes te.fle = &fakeLoggingExporter{t: te.t} defaultLogger = newBinaryLogger(nil) @@ -208,10 +207,10 @@ func (te *test) enablePluginWithConfig(config *configpb.ObservabilityConfig) { } func (te *test) enablePluginWithCaptureAll() { - te.enablePluginWithConfig(&configpb.ObservabilityConfig{ + te.enablePluginWithConfig(&config{ EnableCloudLogging: true, - DestinationProjectId: "fake", - LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + DestinationProjectID: "fake", + LogFilters: []logFilter{ { Pattern: "*", HeaderBytes: infinitySizeBytes, @@ -223,14 +222,13 @@ func (te *test) enablePluginWithCaptureAll() { func (te *test) enableOpenCensus() { defaultMetricsReportingInterval = time.Millisecond * 100 - config := &configpb.ObservabilityConfig{ + config := &config{ EnableCloudLogging: true, EnableCloudTrace: true, EnableCloudMonitoring: true, GlobalTraceSamplingRate: 1.0, } - te.fe = &fakeOpenCensusExporter{SeenViews: make(map[string]string), t: te.t} - startOpenCensus(config, te.fe) + startOpenCensus(config) } func checkEventCommon(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord) { @@ -522,7 +520,7 @@ func (s) TestLoggingForErrorCall(t *testing.T) { func (s) TestEmptyConfig(t *testing.T) { te := newTest(t) defer te.tearDown() - te.enablePluginWithConfig(&configpb.ObservabilityConfig{}) + te.enablePluginWithConfig(&config{}) te.startServer(&testServer{}) tc := testgrpc.NewTestServiceClient(te.clientConn()) @@ -554,10 +552,10 @@ func (s) TestOverrideConfig(t *testing.T) { // most specific one. The third filter allows message payload logging, and // others disabling the message payload logging. We should observe this // behavior latter. - te.enablePluginWithConfig(&configpb.ObservabilityConfig{ + te.enablePluginWithConfig(&config{ EnableCloudLogging: true, - DestinationProjectId: "fake", - LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + DestinationProjectID: "fake", + LogFilters: []logFilter{ { Pattern: "wont/match", MessageBytes: 0, @@ -621,10 +619,10 @@ func (s) TestNoMatch(t *testing.T) { // Setting 3 filters, expected to use the second filter. The second filter // allows message payload logging, and others disabling the message payload // logging. We should observe this behavior latter. - te.enablePluginWithConfig(&configpb.ObservabilityConfig{ + te.enablePluginWithConfig(&config{ EnableCloudLogging: true, - DestinationProjectId: "fake", - LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + DestinationProjectID: "fake", + LogFilters: []logFilter{ { Pattern: "wont/match", MessageBytes: 0, @@ -661,10 +659,10 @@ func (s) TestNoMatch(t *testing.T) { } func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { - config := &configpb.ObservabilityConfig{ + config := &config{ EnableCloudLogging: true, - DestinationProjectId: "fake", - LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + DestinationProjectID: "fake", + LogFilters: []logFilter{ { Pattern: ":-)", }, @@ -673,7 +671,7 @@ func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { }, }, } - configJSON, err := protojson.Marshal(config) + configJSON, err := json.Marshal(config) if err != nil { t.Fatalf("failed to convert config to JSON: %v", err) } @@ -689,7 +687,7 @@ func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { // place in the temporary portion of the file system dependent on system. It // also sets the environment variable GRPC_CONFIG_OBSERVABILITY_JSON to point to // this created config. -func createTmpConfigInFileSystem(rawJSON string) (*os.File, error) { +func createTmpConfigInFileSystem(rawJSON string) (func(), error) { configJSONFile, err := ioutil.TempFile(os.TempDir(), "configJSON-") if err != nil { return nil, fmt.Errorf("cannot create file %v: %v", configJSONFile.Name(), err) @@ -699,7 +697,10 @@ func createTmpConfigInFileSystem(rawJSON string) (*os.File, error) { return nil, fmt.Errorf("cannot write marshalled JSON: %v", err) } os.Setenv(envObservabilityConfigJSON, configJSONFile.Name()) - return configJSONFile, nil + return func() { + configJSONFile.Close() + os.Setenv(envObservabilityConfigJSON, "") + }, nil } // TestJSONEnvVarSet tests a valid observability configuration specified by the @@ -707,11 +708,12 @@ func createTmpConfigInFileSystem(rawJSON string) (*os.File, error) { // file path pointing to a JSON encoded config. func (s) TestJSONEnvVarSet(t *testing.T) { configJSON := `{ - "destinationProjectId": "fake", - "logFilters":[{"pattern":"*","headerBytes":1073741824,"messageBytes":1073741824}] + "destination_project_id": "fake", + "log_filters":[{"pattern":"*","header_bytes":1073741824,"message_bytes":1073741824}] }` - configJSONFile, err := createTmpConfigInFileSystem(configJSON) - defer configJSONFile.Close() + cleanup, err := createTmpConfigInFileSystem(configJSON) + defer cleanup() + if err != nil { t.Fatalf("failed to create config in file system: %v", err) } @@ -730,19 +732,19 @@ func (s) TestJSONEnvVarSet(t *testing.T) { // variable is set and valid. func (s) TestBothConfigEnvVarsSet(t *testing.T) { configJSON := `{ - "destinationProjectId":"fake", - "logFilters":[{"pattern":":-)"}, {"pattern":"*"}] + "destination_project_id":"fake", + "log_filters":[{"pattern":":-)"}, {"pattern_string":"*"}] }` - configJSONFile, err := createTmpConfigInFileSystem(configJSON) - defer configJSONFile.Close() + cleanup, err := createTmpConfigInFileSystem(configJSON) + defer cleanup() if err != nil { t.Fatalf("failed to create config in file system: %v", err) } // This configuration should be ignored, as precedence 2. - validConfig := &configpb.ObservabilityConfig{ + validConfig := &config{ EnableCloudLogging: true, - DestinationProjectId: "fake", - LogFilters: []*configpb.ObservabilityConfig_LogFilter{ + DestinationProjectID: "fake", + LogFilters: []logFilter{ { Pattern: "*", HeaderBytes: infinitySizeBytes, @@ -750,7 +752,7 @@ func (s) TestBothConfigEnvVarsSet(t *testing.T) { }, }, } - validConfigJSON, err := protojson.Marshal(validConfig) + validConfigJSON, err := json.Marshal(validConfig) if err != nil { t.Fatalf("failed to convert config to JSON: %v", err) } @@ -766,6 +768,7 @@ func (s) TestBothConfigEnvVarsSet(t *testing.T) { // a file (or valid configuration). func (s) TestErrInFileSystemEnvVar(t *testing.T) { os.Setenv(envObservabilityConfigJSON, "/this-file/does-not-exist") + defer os.Setenv(envObservabilityConfigJSON, "") if err := Start(context.Background()); err == nil { t.Fatalf("Invalid file system path not triggering error") } @@ -783,6 +786,16 @@ func (s) TestNoEnvSet(t *testing.T) { func (s) TestOpenCensusIntegration(t *testing.T) { te := newTest(t) defer te.tearDown() + fe := &fakeOpenCensusExporter{SeenViews: make(map[string]string), t: te.t} + + defer func(ne func(config *config) (tracingMetricsExporter, error)) { + newExporter = ne + }(newExporter) + + newExporter = func(config *config) (tracingMetricsExporter, error) { + return fe, nil + } + te.enableOpenCensus() te.startServer(&testServer{}) tc := testgrpc.NewTestServiceClient(te.clientConn()) @@ -807,17 +820,17 @@ func (s) TestOpenCensusIntegration(t *testing.T) { defer cancel() for ctx.Err() == nil { errs = nil - te.fe.mu.RLock() - if value := te.fe.SeenViews["grpc.io/client/completed_rpcs"]; value != TypeOpenCensusViewCount { + fe.mu.RLock() + if value := fe.SeenViews["grpc.io/client/completed_rpcs"]; value != TypeOpenCensusViewCount { errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/completed_rpcs: %s != %s", value, TypeOpenCensusViewCount)) } - if value := te.fe.SeenViews["grpc.io/server/completed_rpcs"]; value != TypeOpenCensusViewCount { + if value := fe.SeenViews["grpc.io/server/completed_rpcs"]; value != TypeOpenCensusViewCount { errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/completed_rpcs: %s != %s", value, TypeOpenCensusViewCount)) } - if te.fe.SeenSpans <= 0 { - errs = append(errs, fmt.Errorf("unexpected number of seen spans: %v <= 0", te.fe.SeenSpans)) + if fe.SeenSpans <= 0 { + errs = append(errs, fmt.Errorf("unexpected number of seen spans: %v <= 0", fe.SeenSpans)) } - te.fe.mu.RUnlock() + fe.mu.RUnlock() if len(errs) == 0 { break } @@ -827,3 +840,52 @@ func (s) TestOpenCensusIntegration(t *testing.T) { t.Fatalf("Invalid OpenCensus export data: %v", errs) } } + +// TestCustomTagsTracingMetrics verifies that the custom tags defined in our +// observability configuration and set to two hardcoded values are passed to the +// function to create an exporter. +func (s) TestCustomTagsTracingMetrics(t *testing.T) { + defer func(ne func(config *config) (tracingMetricsExporter, error)) { + newExporter = ne + }(newExporter) + fe := &fakeOpenCensusExporter{SeenViews: make(map[string]string), t: t} + newExporter = func(config *config) (tracingMetricsExporter, error) { + ct := config.CustomTags + if len(ct) < 1 { + t.Fatalf("less than 2 custom tags sent in") + } + if val, ok := ct["customtag1"]; !ok || val != "wow" { + t.Fatalf("incorrect custom tag: got %v, want %v", val, "wow") + } + if val, ok := ct["customtag2"]; !ok || val != "nice" { + t.Fatalf("incorrect custom tag: got %v, want %v", val, "nice") + } + return fe, nil + } + + // This configuration present in file system and it's defined custom tags should make it + // to the created exporter. + configJSON := `{ + "destination_project_id": "fake", + "enable_cloud_trace": true, + "enable_cloud_monitoring": true, + "global_trace_sampling_rate": 1.0, + "custom_tags":{"customtag1":"wow","customtag2":"nice"} + }` + cleanup, err := createTmpConfigInFileSystem(configJSON) + defer cleanup() + + // To clear globally registered tracing and metrics exporters. + defer func() { + internal.ClearExtraDialOptions() + internal.ClearExtraServerOptions() + }() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + err = Start(ctx) + defer End() + if err != nil { + t.Fatalf("Start() failed with err: %v", err) + } +} diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index c303eb87f76b..7d297f90bfc3 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -29,7 +29,6 @@ import ( "go.opencensus.io/stats/view" "go.opencensus.io/trace" "google.golang.org/grpc" - configpb "google.golang.org/grpc/gcp/observability/internal/config" "google.golang.org/grpc/internal" ) @@ -38,26 +37,59 @@ var ( defaultMetricsReportingInterval = time.Second * 30 ) +func tagsToMonitoringLabels(tags map[string]string) *stackdriver.Labels { + labels := &stackdriver.Labels{} + for k, v := range tags { + labels.Set(k, v, "") + } + return labels +} + +func tagsToTraceAttributes(tags map[string]string) map[string]interface{} { + ta := make(map[string]interface{}, len(tags)) + for k, v := range tags { + ta[k] = v + } + return ta +} + +type tracingMetricsExporter interface { + trace.Exporter + view.Exporter +} + +// global to stub out in tests +var newExporter = newStackdriverExporter + +func newStackdriverExporter(config *config) (tracingMetricsExporter, error) { + // Create the Stackdriver exporter, which is shared between tracing and stats + mr := monitoredresource.Autodetect() + logger.Infof("Detected MonitoredResource:: %+v", mr) + var err error + exporter, err := stackdriver.NewExporter(stackdriver.Options{ + ProjectID: config.DestinationProjectID, + MonitoredResource: mr, + DefaultMonitoringLabels: tagsToMonitoringLabels(config.CustomTags), + DefaultTraceAttributes: tagsToTraceAttributes(config.CustomTags), + }) + if err != nil { + return nil, fmt.Errorf("failed to create Stackdriver exporter: %v", err) + } + return exporter, nil +} + // This method accepts config and exporter; the exporter argument is exposed to // assist unit testing of the OpenCensus behavior. -func startOpenCensus(config *configpb.ObservabilityConfig, exporter interface{}) error { +func startOpenCensus(config *config) error { // If both tracing and metrics are disabled, there's no point inject default // StatsHandler. if config == nil || (!config.EnableCloudTrace && !config.EnableCloudMonitoring) { return nil } - if exporter == nil { - // Create the Stackdriver exporter, which is shared between tracing and stats - mr := monitoredresource.Autodetect() - logger.Infof("Detected MonitoredResource:: %+v", mr) - var err error - if exporter, err = stackdriver.NewExporter(stackdriver.Options{ - ProjectID: config.DestinationProjectId, - MonitoredResource: mr, - }); err != nil { - return fmt.Errorf("failed to create Stackdriver exporter: %v", err) - } + exporter, err := newExporter(config) + if err != nil { + return err } var so trace.StartOptions diff --git a/gcp/observability/tags.go b/gcp/observability/tags.go deleted file mode 100644 index c9a900970ea9..000000000000 --- a/gcp/observability/tags.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package observability - -import ( - "strings" -) - -const ( - envPrefixCustomTags = "GRPC_OBSERVABILITY_" - envPrefixLen = len(envPrefixCustomTags) -) - -func getCustomTags(envs []string) map[string]string { - m := make(map[string]string) - for _, e := range envs { - if !strings.HasPrefix(e, envPrefixCustomTags) { - continue - } - tokens := strings.SplitN(e, "=", 2) - if len(tokens) == 2 { - if len(tokens[0]) == envPrefixLen { - // Empty key is not allowed - continue - } - m[tokens[0][envPrefixLen:]] = tokens[1] - } - } - return m -} diff --git a/gcp/observability/tags_test.go b/gcp/observability/tags_test.go deleted file mode 100644 index 5a0353a03087..000000000000 --- a/gcp/observability/tags_test.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package observability - -import ( - "reflect" - "testing" -) - -// TestGetCustomTags tests the normal tags parsing -func (s) TestGetCustomTags(t *testing.T) { - var ( - input = []string{ - "GRPC_OBSERVABILITY_APP_NAME=app1", - "GRPC_OBSERVABILITY_DATACENTER=us-west1-a", - "GRPC_OBSERVABILITY_smallcase=OK", - } - expect = map[string]string{ - "APP_NAME": "app1", - "DATACENTER": "us-west1-a", - "smallcase": "OK", - } - ) - result := getCustomTags(input) - if !reflect.DeepEqual(result, expect) { - t.Errorf("result [%+v] != expect [%+v]", result, expect) - } -} - -// TestGetCustomTagsInvalid tests the invalid cases of tags parsing -func (s) TestGetCustomTagsInvalid(t *testing.T) { - var ( - input = []string{ - "GRPC_OBSERVABILITY_APP_NAME=app1", - "GRPC_OBSERVABILITY=foo", - "GRPC_OBSERVABILITY_=foo", // Users should not set "" as key name - "GRPC_STUFF=foo", - "STUFF_GRPC_OBSERVABILITY_=foo", - } - expect = map[string]string{ - "APP_NAME": "app1", - } - ) - result := getCustomTags(input) - if !reflect.DeepEqual(result, expect) { - t.Errorf("result [%+v] != expect [%+v]", result, expect) - } -} From 92cee3440fed794a84394c42046b6b61c07af705 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 12 Aug 2022 14:13:55 -0400 Subject: [PATCH 584/998] gcp/observability: Add logging filters for logging, tracing, and metrics API calls (#5582) * gcp/observability: Add logging filters for logging, tracing, and metrics API calls --- gcp/observability/logging.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index f48af56997f4..8f93564353cd 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -197,12 +197,9 @@ func (l *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { ol = l.originalLogger.GetMethodLogger(methodName) } - // If user specify a "*" pattern, binarylog will log every single call and - // content. This means the exporting RPC's events will be captured. Even if - // we batch up the uploads in the exporting RPC, the message content of that - // RPC will be logged. Without this exclusion, we may end up with an ever - // expanding message field in log entries, and crash the process with OOM. - if methodName == "/google.logging.v2.LoggingServiceV2/WriteLogEntries" { + // Prevent logging from logging, traces, and metrics API calls. + if strings.HasPrefix(methodName, "/google.logging.v2.LoggingServiceV2/") || strings.HasPrefix(methodName, "/google.monitoring.v3.MetricService/") || + strings.HasPrefix(methodName, "/google.devtools.cloudtrace.v2.TraceService/") { return ol } From 802b32e0ec27636f3e60f83a8157a62d364f334e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 15 Aug 2022 17:22:53 +0000 Subject: [PATCH 585/998] Change version to 1.50.0-dev (#5585) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index e0ec0ffdbf7f..ac8b4bf8f166 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.49.0-dev" +const Version = "1.50.0-dev" From c11858e8bcbc4571c84a81ac6e2defc20c8e13b7 Mon Sep 17 00:00:00 2001 From: Anuraag Agrawal Date: Wed, 17 Aug 2022 02:13:12 +0900 Subject: [PATCH 586/998] Publish arm64 binaries to GitHub releases (#5561) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6c0748048cc9..673e05c069e5 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ jobs: strategy: matrix: goos: [linux, darwin, windows] - goarch: [386, amd64] + goarch: [386, amd64, arm64] exclude: - goos: darwin goarch: 386 From 3f5b7ab48ca0e597815c6ade7c6c43e8eee95893 Mon Sep 17 00:00:00 2001 From: kennylong Date: Wed, 17 Aug 2022 01:16:30 +0800 Subject: [PATCH 587/998] internal/transport: fix typo (#5566) --- internal/transport/controlbuf.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 244f4b081d52..409769f48fdc 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -886,9 +886,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true From c56f196d25d129d55f02c289a9919614fb20f7d0 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 18 Aug 2022 15:06:30 +0000 Subject: [PATCH 588/998] internal/fakegrpclb: don't listen on all adapters (#5592) --- internal/testutils/fakegrpclb/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/testutils/fakegrpclb/server.go b/internal/testutils/fakegrpclb/server.go index 8e33340484e9..82be2c1af1a4 100644 --- a/internal/testutils/fakegrpclb/server.go +++ b/internal/testutils/fakegrpclb/server.go @@ -100,7 +100,7 @@ func NewServer(params ServerParams) (*Server, error) { }) } - lis, err := net.Listen("tcp", ":"+strconv.Itoa(params.ListenPort)) + lis, err := net.Listen("tcp", "localhost:"+strconv.Itoa(params.ListenPort)) if err != nil { return nil, fmt.Errorf("failed to listen on port %q: %v", params.ListenPort, err) } From 97cb7b1653d735224598d1f0d76df126890697fd Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 18 Aug 2022 17:37:07 +0000 Subject: [PATCH 589/998] xds/clusterresolver: prevent deadlock of concurrent Close and UpdateState calls (#5588) --- .../clusterresolver/resource_resolver.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index 3e4e7a7af412..9c2fc6e7c797 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -192,13 +192,21 @@ func (rr *resourceResolver) resolveNow() { func (rr *resourceResolver) stop() { rr.mu.Lock() - defer rr.mu.Unlock() - for dm, r := range rr.childrenMap { - delete(rr.childrenMap, dm) - r.r.stop() - } + // Save the previous childrenMap to stop the children outside the mutex, + // and reinitialize the map. We only need to reinitialize to allow for the + // policy to be reused if the resource comes back. In practice, this does + // not happen as the parent LB policy will also be closed, causing this to + // be removed entirely, but a future use case might want to reuse the + // policy instead. + cm := rr.childrenMap + rr.childrenMap = make(map[discoveryMechanismKey]resolverMechanismTuple) rr.mechanisms = nil rr.children = nil + rr.mu.Unlock() + + for _, r := range cm { + r.r.stop() + } } // generate collects all the updates from all the resolvers, and push the From 02fbca0f406b09e2346a808b5f942ceb704a1082 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 22 Aug 2022 19:53:20 +0000 Subject: [PATCH 590/998] xds/resolver: generate channel ID randomly (#5591) --- xds/internal/resolver/serviceconfig.go | 5 ++--- xds/internal/resolver/serviceconfig_test.go | 7 ++++--- xds/internal/resolver/xds_resolver.go | 6 ++++++ xds/internal/xdsclient/xdsresource/type_rds.go | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index fd75af210457..d1dd79354ae0 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -245,9 +245,8 @@ func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies [ generatedHash = true generatedPolicyHash = true case xdsresource.HashPolicyTypeChannelID: - // Hash the ClientConn pointer which logically uniquely - // identifies the client. - policyHash = xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)) + // Use the static channel ID as the hash for this policy. + policyHash = cs.r.channelID generatedHash = true generatedPolicyHash = true } diff --git a/xds/internal/resolver/serviceconfig_test.go b/xds/internal/resolver/serviceconfig_test.go index 98d633a9e190..786b003c0154 100644 --- a/xds/internal/resolver/serviceconfig_test.go +++ b/xds/internal/resolver/serviceconfig_test.go @@ -20,7 +20,6 @@ package resolver import ( "context" - "fmt" "regexp" "testing" @@ -50,9 +49,11 @@ func (s) TestPruneActiveClusters(t *testing.T) { } func (s) TestGenerateRequestHash(t *testing.T) { + const channelID = 12378921 cs := &configSelector{ r: &xdsResolver{ - cc: &testClientConn{}, + cc: &testClientConn{}, + channelID: channelID, }, } tests := []struct { @@ -85,7 +86,7 @@ func (s) TestGenerateRequestHash(t *testing.T) { hashPolicies: []*xdsresource.HashPolicy{{ HashPolicyType: xdsresource.HashPolicyTypeChannelID, }}, - requestHashWant: xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)), + requestHashWant: channelID, rpcInfo: iresolver.RPCInfo{}, }, // TestGenerateRequestHashEmptyString tests generating request hashes diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index 4f31d9c44c38..f473fcbaa733 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" @@ -71,6 +72,7 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon closed: grpcsync.NewEvent(), updateCh: make(chan suWithError, 1), activeClusters: make(map[string]*clusterInfo), + channelID: grpcrand.Uint64(), } defer func() { if retErr != nil { @@ -184,6 +186,10 @@ type xdsResolver struct { activeClusters map[string]*clusterInfo curConfigSelector *configSelector + + // A random number which uniquely identifies the channel which owns this + // resolver. + channelID uint64 } // sendNewServiceConfig prunes active clusters, generates a new service config diff --git a/xds/internal/xdsclient/xdsresource/type_rds.go b/xds/internal/xdsclient/xdsresource/type_rds.go index decffd4ae767..0504346c399f 100644 --- a/xds/internal/xdsclient/xdsresource/type_rds.go +++ b/xds/internal/xdsclient/xdsresource/type_rds.go @@ -80,7 +80,7 @@ const ( // HashPolicyTypeHeader specifies to hash a Header in the incoming request. HashPolicyTypeHeader HashPolicyType = iota // HashPolicyTypeChannelID specifies to hash a unique Identifier of the - // Channel. In grpc-go, this will be done using the ClientConn pointer. + // Channel. This is a 64-bit random int computed at initialization time. HashPolicyTypeChannelID ) From 641dc8710ceeda279e58d5bb1e33d45d43228136 Mon Sep 17 00:00:00 2001 From: feihu-stripe <88515053+feihu-stripe@users.noreply.github.com> Date: Wed, 24 Aug 2022 09:46:22 -0700 Subject: [PATCH 591/998] transport: add peer information to http2Server and http2Client context (#5589) --- internal/transport/http2_client.go | 4 ++- internal/transport/http2_server.go | 19 ++++++----- internal/transport/transport_test.go | 51 ++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 9 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 28c77af70aba..53643fa97477 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -326,6 +326,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), } + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -469,7 +471,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, - AuthInfo: t.authInfo, + AuthInfo: t.authInfo, // Can be nil } } diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 28bcba0a33c6..3dd15647bc84 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -265,6 +265,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -485,14 +488,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) @@ -1416,6 +1412,13 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index c1f9664ada67..760e1b64f358 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -35,6 +35,8 @@ import ( "testing" "time" + "google.golang.org/grpc/peer" + "github.com/google/go-cmp/cmp" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" @@ -2450,3 +2452,52 @@ func TestConnectionError_Unwrap(t *testing.T) { t.Error("ConnectionError does not unwrap") } } + +func (s) TestPeerSetInServerContext(t *testing.T) { + // create client and server transports. + server, client, cancel := setUp(t, 0, math.MaxUint32, normal) + defer cancel() + defer server.stop() + defer client.Close(fmt.Errorf("closed manually by test")) + + // create a stream with client transport. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := client.NewStream(ctx, &CallHdr{}) + if err != nil { + t.Fatalf("failed to create a stream: %v", err) + } + + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + + if len(server.conns) == 0 { + return true, fmt.Errorf("timed-out while waiting for connection to be created on the server") + } + return false, nil + }) + + // verify peer is set in client transport context. + if _, ok := peer.FromContext(client.ctx); !ok { + t.Fatalf("Peer expected in client transport's context, but actually not found.") + } + + // verify peer is set in stream context. + if _, ok := peer.FromContext(stream.ctx); !ok { + t.Fatalf("Peer expected in stream context, but actually not found.") + } + + // verify peer is set in server transport context. + server.mu.Lock() + for k := range server.conns { + sc, ok := k.(*http2Server) + if !ok { + t.Fatalf("ServerTransport is of type %T, want %T", k, &http2Server{}) + } + if _, ok = peer.FromContext(sc.ctx); !ok { + t.Fatalf("Peer expected in server transport's context, but actually not found.") + } + } + server.mu.Unlock() +} From b225ddaa0c83fa48bbbebc10610d5371c91cde97 Mon Sep 17 00:00:00 2001 From: Ronak Jain Date: Fri, 26 Aug 2022 23:46:00 +0530 Subject: [PATCH 592/998] transport: update http2 spec document link (#5597) --- internal/transport/http_util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index 56e95788d9cb..d632dc4e812a 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -45,7 +45,7 @@ import ( const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 ) From d5dee5fdbdeb52f6ea10b37b2cc7ce37814642d7 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 26 Aug 2022 15:08:47 -0700 Subject: [PATCH 593/998] xds/ringhash: make reconnection logic work for a single subConn (#5601) --- .../ringhash/e2e/ringhash_balancer_test.go | 153 ++++++++++++++++++ xds/internal/balancer/ringhash/logging.go | 4 + xds/internal/balancer/ringhash/ringhash.go | 30 +++- 3 files changed, 180 insertions(+), 7 deletions(-) create mode 100644 xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go diff --git a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go new file mode 100644 index 000000000000..19a7aafb73b5 --- /dev/null +++ b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go @@ -0,0 +1,153 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash_test + +import ( + "context" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" + + _ "google.golang.org/grpc/xds/internal/balancer/ringhash" // Register the ring_hash_experimental LB policy. +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. +) + +type testService struct { + testpb.TestServiceServer +} + +func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil +} + +// TestRingHash_ReconnectToMoveOutOfTransientFailure tests the case where the +// ring contains a single subConn, and verifies that when the server goes down, +// the LB policy on the client automatically reconnects until the subChannel +// moves out of TRANSIENT_FAILURE. +func (s) TestRingHash_ReconnectToMoveOutOfTransientFailure(t *testing.T) { + // Create a restartable listener to simulate server being down. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis := testutils.NewRestartableListener(l) + + // Start a server backend exposing the test service. + server := grpc.NewServer() + defer server.Stop() + testgrpc.RegisterTestServiceServer(server, &testService{}) + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + // Create a clientConn with a manual resolver (which is used to push the + // address of the test backend), and a default service config pointing to + // the use of the ring_hash_experimental LB policy. + const ringHashServiceConfig = `{"loadBalancingConfig": [{"ring_hash_experimental":{}}]}` + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(ringHashServiceConfig), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Push the address of the test backend through the manual resolver. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Stopping the server listener will close the transport on the client, + // which will lead to the channel eventually moving to IDLE. The ring_hash + // LB policy is not expected to reconnect by itself at this point. + lis.Stop() + for state := cc.GetState(); state != connectivity.Idle && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout waiting for channel to reach %q after server shutdown: %v", connectivity.Idle, err) + } + + // Make an RPC to get the ring_hash LB policy to reconnect and thereby move + // to TRANSIENT_FAILURE upon connection failure. + client.EmptyCall(ctx, &testpb.Empty{}) + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if cc.GetState() == connectivity.TransientFailure { + break + } + } + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout waiting for channel to reach %q after server shutdown: %v", connectivity.TransientFailure, err) + } + + // An RPC at this point is expected to fail. + if _, err = client.EmptyCall(ctx, &testpb.Empty{}); err == nil { + t.Fatal("EmptyCall RPC succeeded when the channel is in TRANSIENT_FAILURE") + } + + // Restart the server listener. The ring_hash LB polcy is expected to + // attempt to reconnect on its own and come out of TRANSIENT_FAILURE, even + // without an RPC attempt. + lis.Restart() + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if cc.GetState() == connectivity.Ready { + break + } + } + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout waiting for channel to reach READT after server restart: %v", err) + } + + // An RPC at this point is expected to fail. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/xds/internal/balancer/ringhash/logging.go b/xds/internal/balancer/ringhash/logging.go index 64a1d467f554..3e0f0adf58eb 100644 --- a/xds/internal/balancer/ringhash/logging.go +++ b/xds/internal/balancer/ringhash/logging.go @@ -32,3 +32,7 @@ var logger = grpclog.Component("xds") func prefixLogger(p *ringhashBalancer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } + +func subConnPrefixLogger(p *ringhashBalancer, sc *subConn) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)+fmt.Sprintf("[subConn %p] ", sc)) +} diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index e2ad49fca4ab..8056b29c127d 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -68,6 +68,7 @@ type subConn struct { addr string weight uint32 sc balancer.SubConn + logger *grpclog.PrefixLogger mu sync.RWMutex // This is the actual state of this SubConn (as updated by the ClientConn). @@ -117,6 +118,7 @@ func (sc *subConn) setState(s connectivity.State) { // Trigger Connect() if new state is Idle, and there is a queued connect. if sc.connectQueued { sc.connectQueued = false + sc.logger.Infof("Executing a queued connect for subConn moving to state: %v", sc.state) sc.sc.Connect() } else { sc.attemptingToConnect = false @@ -161,11 +163,13 @@ func (sc *subConn) queueConnect() { defer sc.mu.Unlock() sc.attemptingToConnect = true if sc.state == connectivity.Idle { + sc.logger.Infof("Executing a queued connect for subConn in state: %v", sc.state) sc.sc.Connect() return } // Queue this connect, and when this SubConn switches back to Idle (happens // after backoff in TransientFailure), it will Connect(). + sc.logger.Infof("Queueing a connect for subConn in state: %v", sc.state) sc.connectQueued = true } @@ -216,10 +220,11 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { if val, ok := b.subConns.Get(addr); !ok { sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{HealthCheckEnabled: true}) if err != nil { - logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + b.logger.Warningf("Failed to create new SubConn: %v", err) continue } scs := &subConn{addr: addr.Addr, weight: newWeight, sc: sc} + scs.logger = subConnPrefixLogger(b, scs) scs.setState(connectivity.Idle) b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) b.subConns.Set(addr, scs) @@ -328,15 +333,18 @@ func (b *ringhashBalancer) ResolverError(err error) { // for some RPCs. func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState - b.logger.Infof("handle SubConn state change: %p, %v", sc, s) + if logger.V(2) { + b.logger.Infof("Handle SubConn state change: %p, %v", sc, s) + } scs, ok := b.scStates[sc] if !ok { - b.logger.Infof("got state changes for an unknown SubConn: %p, %v", sc, s) + b.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) return } oldSCState := scs.effectiveState() scs.setState(s) newSCState := scs.effectiveState() + b.logger.Infof("SubConn's effective old state was: %v, new state is %v", oldSCState, newSCState) var sendUpdate bool oldBalancerState := b.state @@ -353,15 +361,15 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance // No need to send an update. No queued RPC can be unblocked. If the // overall state changed because of this, sendUpdate is already true. case connectivity.Ready: - // Resend the picker, there's no need to regenerate the picker because - // the ring didn't change. + // We need to regenerate the picker even if the ring has not changed + // because we could be moving from TRANSIENT_FAILURE to READY, in which + // case, we need to update the error picker returned earlier. + b.regeneratePicker() sendUpdate = true case connectivity.TransientFailure: // Save error to be reported via picker. b.connErr = state.ConnectionError - // Regenerate picker to update error message. b.regeneratePicker() - sendUpdate = true case connectivity.Shutdown: // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. @@ -369,6 +377,7 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance } if sendUpdate { + b.logger.Infof("Pushing new state %v and picker %p", b.state, b.picker) b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } @@ -399,7 +408,14 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance sc := nextSkippingDuplicatesSubConn(b.ring, scs) if sc != nil { sc.queueConnect() + return } + // This handles the edge case where we have a single subConn in the + // ring. nextSkippingDuplicatesSubCon() would have returned nil. We + // still need to ensure that some subConn is attempting to connect, in + // order to give the LB policy a chance to move out of + // TRANSIENT_FAILURE. Hence, we try connecting on the current subConn. + scs.queueConnect() } } From fe592260bf659cfbf539642ba3145dc998417a00 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 29 Aug 2022 16:23:12 -0700 Subject: [PATCH 594/998] clusterresolver: deflake eds_impl tests (#5562) --- internal/testutils/roundrobin/roundrobin.go | 223 +++++++ .../xds/e2e/setup_management_server.go | 15 +- test/balancer_switching_test.go | 44 +- test/roundrobin_test.go | 76 +-- .../clusterresolver/clusterresolver_test.go | 2 +- .../clusterresolver/e2e_test/eds_impl_test.go | 587 ++++++++++++++++++ .../balancer/clusterresolver/eds_impl_test.go | 569 ----------------- .../balancer/clusterresolver/priority_test.go | 59 ++ 8 files changed, 911 insertions(+), 664 deletions(-) create mode 100644 internal/testutils/roundrobin/roundrobin.go create mode 100644 xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go delete mode 100644 xds/internal/balancer/clusterresolver/eds_impl_test.go diff --git a/internal/testutils/roundrobin/roundrobin.go b/internal/testutils/roundrobin/roundrobin.go new file mode 100644 index 000000000000..034c8c6f9d0f --- /dev/null +++ b/internal/testutils/roundrobin/roundrobin.go @@ -0,0 +1,223 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin contains helper functions to check for roundrobin and +// weighted-roundrobin load balancing of RPCs in tests. +package roundrobin + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +var logger = grpclog.Component("testutils-roundrobin") + +// waitForTrafficToReachBackends repeatedly makes RPCs using the provided +// TestServiceClient until RPCs reach all backends specified in addrs, or the +// context expires, in which case a non-nil error is returned. +func waitForTrafficToReachBackends(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { + // Make sure connections to all backends are up. We need to do this two + // times (to be sure that round_robin has kicked in) because the channel + // could have been configured with a different LB policy before the switch + // to round_robin. And the previous LB policy could be sharing backends with + // round_robin, and therefore in the first iteration of this loop, RPCs + // could land on backends owned by the previous LB policy. + for j := 0; j < 2; j++ { + for i := 0; i < len(addrs); i++ { + for { + time.Sleep(time.Millisecond) + if ctx.Err() != nil { + return fmt.Errorf("timeout waiting for connection to %q to be up", addrs[i].Addr) + } + var peer peer.Peer + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + // Some tests remove backends and check if round robin is + // happening across the remaining backends. In such cases, + // RPCs can initially fail on the connection using the + // removed backend. Just keep retrying and eventually the + // connection using the removed backend will shutdown and + // will be removed. + continue + } + if peer.Addr.String() == addrs[i].Addr { + break + } + } + } + } + return nil +} + +// CheckRoundRobinRPCs verifies that EmptyCall RPCs on the given ClientConn, +// connected to a server exposing the test.grpc_testing.TestService, are +// roundrobined across the given backend addresses. +// +// Returns a non-nil error if context deadline expires before RPCs start to get +// roundrobined across the given backends. +func CheckRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { + if err := waitForTrafficToReachBackends(ctx, client, addrs); err != nil { + return err + } + + // At this point, RPCs are getting successfully executed at the backends + // that we care about. To support duplicate addresses (in addrs) and + // backends being removed from the list of addresses passed to the + // roundrobin LB, we do the following: + // 1. Determine the count of RPCs that we expect each of our backends to + // receive per iteration. + // 2. Wait until the same pattern repeats a few times, or the context + // deadline expires. + wantAddrCount := make(map[string]int) + for _, addr := range addrs { + wantAddrCount[addr.Addr]++ + } + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + // Perform 3 more iterations. + var iterations [][]string + for i := 0; i < 3; i++ { + iteration := make([]string, len(addrs)) + for c := 0; c < len(addrs); c++ { + var peer peer.Peer + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + return fmt.Errorf("EmptyCall() = %v, want ", err) + } + iteration[c] = peer.Addr.String() + } + iterations = append(iterations, iteration) + } + // Ensure the the first iteration contains all addresses in addrs. + gotAddrCount := make(map[string]int) + for _, addr := range iterations[0] { + gotAddrCount[addr]++ + } + if diff := cmp.Diff(gotAddrCount, wantAddrCount); diff != "" { + logger.Infof("non-roundrobin, got address count in one iteration: %v, want: %v, Diff: %s", gotAddrCount, wantAddrCount, diff) + continue + } + // Ensure all three iterations contain the same addresses. + if !cmp.Equal(iterations[0], iterations[1]) || !cmp.Equal(iterations[0], iterations[2]) { + logger.Infof("non-roundrobin, first iter: %v, second iter: %v, third iter: %v", iterations[0], iterations[1], iterations[2]) + continue + } + return nil + } + return fmt.Errorf("Timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) +} + +// CheckWeightedRoundRobinRPCs verifies that EmptyCall RPCs on the given +// ClientConn, connected to a server exposing the test.grpc_testing.TestService, +// are weighted roundrobined (with randomness) across the given backend +// addresses. +// +// Returns a non-nil error if context deadline expires before RPCs start to get +// roundrobined across the given backends. +func CheckWeightedRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { + if err := waitForTrafficToReachBackends(ctx, client, addrs); err != nil { + return err + } + + // At this point, RPCs are getting successfully executed at the backends + // that we care about. To take the randomness of the WRR into account, we + // look for approximate distribution instead of exact. + wantAddrCount := make(map[string]int) + for _, addr := range addrs { + wantAddrCount[addr.Addr]++ + } + wantRatio := make(map[string]float64) + for addr, count := range wantAddrCount { + wantRatio[addr] = float64(count) / float64(len(addrs)) + } + + // There is a small possibility that RPCs are reaching backends that we + // don't expect them to reach here. The can happen because: + // - at time T0, the list of backends [A, B, C, D]. + // - at time T1, the test updates the list of backends to [A, B, C], and + // immediately starts attempting to check the distribution of RPCs to the + // new backends. + // - there is no way for the test to wait for a new picker to be pushed on + // to the channel (which contains the updated list of backends) before + // starting to attempt the RPC distribution checks. + // - This is usually a transitory state and will eventually fix itself when + // the new picker is pushed on the channel, and RPCs will start getting + // routed to only backends that we care about. + // + // We work around this situation by using two loops. The inner loop contains + // the meat of the calculations, and includes the logic which factors out + // the randomness in weighted roundrobin. If we ever see an RPCs getting + // routed to a backend that we dont expect it to get routed to, we break + // from the inner loop thereby resetting all state and start afresh. + for { + results := make(map[string]float64) + totalCount := float64(0) + InnerLoop: + for { + if ctx.Err() != nil { + return fmt.Errorf("timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) + } + for i := 0; i < len(addrs); i++ { + var peer peer.Peer + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + return fmt.Errorf("EmptyCall() = %v, want ", err) + } + if addr := peer.Addr.String(); wantAddrCount[addr] == 0 { + break InnerLoop + } + results[peer.Addr.String()]++ + } + totalCount += float64(len(addrs)) + + gotRatio := make(map[string]float64) + for addr, count := range results { + gotRatio[addr] = count / totalCount + } + if equalApproximate(gotRatio, wantRatio) { + return nil + } + logger.Infof("non-weighted-roundrobin, gotRatio: %v, wantRatio: %v", gotRatio, wantRatio) + } + <-time.After(time.Millisecond) + } +} + +func equalApproximate(got, want map[string]float64) bool { + if len(got) != len(want) { + return false + } + opt := cmp.Comparer(func(x, y float64) bool { + delta := math.Abs(x - y) + mean := math.Abs(x+y) / 2.0 + return delta/mean < 0.05 + }) + for addr := range want { + if !cmp.Equal(got[addr], want[addr], opt) { + return false + } + } + return true +} diff --git a/internal/testutils/xds/e2e/setup_management_server.go b/internal/testutils/xds/e2e/setup_management_server.go index c61f0620cb11..b5efa2bd175c 100644 --- a/internal/testutils/xds/e2e/setup_management_server.go +++ b/internal/testutils/xds/e2e/setup_management_server.go @@ -89,12 +89,15 @@ func SetupManagementServer(t *testing.T, opts *ManagementServerOptions) (*Manage server.Stop() t.Fatalf("Failed to create bootstrap file: %v", err) } - resolverBuilder := internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error)) - resolver, err := resolverBuilder(bootstrapContents) - if err != nil { - server.Stop() - t.Fatalf("Failed to create xDS resolver for testing: %v", err) + + var rb resolver.Builder + if newResolver := internal.NewXDSResolverWithConfigForTesting; newResolver != nil { + rb, err = newResolver.(func([]byte) (resolver.Builder, error))(bootstrapContents) + if err != nil { + server.Stop() + t.Fatalf("Failed to create xDS resolver for testing: %v", err) + } } - return server, nodeID, bootstrapContents, resolver, func() { server.Stop() } + return server, nodeID, bootstrapContents, rb, func() { server.Stop() } } diff --git a/test/balancer_switching_test.go b/test/balancer_switching_test.go index ede88fda572b..94ac796558bc 100644 --- a/test/balancer_switching_test.go +++ b/test/balancer_switching_test.go @@ -31,8 +31,10 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils/fakegrpclb" + rrutil "google.golang.org/grpc/internal/testutils/roundrobin" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" + testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -134,7 +136,8 @@ func (s) TestBalancerSwitch_Basic(t *testing.T) { Addresses: addrs, ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), }) - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + client := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { t.Fatal(err) } @@ -169,7 +172,8 @@ func (s) TestBalancerSwitch_grpclbToPickFirst(t *testing.T) { r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkRoundRobin(ctx, cc, addrs[0:1]); err != nil { + client := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[0:1]); err != nil { t.Fatal(err) } @@ -177,7 +181,7 @@ func (s) TestBalancerSwitch_grpclbToPickFirst(t *testing.T) { // This should not lead to a balancer switch. const nonExistentServer = "non-existent-grpclb-server-address" r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: nonExistentServer, Type: resolver.GRPCLB}}}) - if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } @@ -220,14 +224,15 @@ func (s) TestBalancerSwitch_pickFirstToGRPCLB(t *testing.T) { // to the grpclb server we created above. This will cause the channel to // switch to the "grpclb" balancer, which returns a single backend address. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) - if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { + client := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } // Push a resolver update containing a non-existent grpclb server address. // This should not lead to a balancer switch. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "nonExistentServer", Type: resolver.GRPCLB}}}) - if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } @@ -272,7 +277,8 @@ func (s) TestBalancerSwitch_RoundRobinToGRPCLB(t *testing.T) { r.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: scpr}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + client := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } @@ -283,13 +289,13 @@ func (s) TestBalancerSwitch_RoundRobinToGRPCLB(t *testing.T) { Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}, ServiceConfig: scpr, }) - if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } // Switch back to "round_robin". r.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: scpr}) - if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } } @@ -337,7 +343,8 @@ func (s) TestBalancerSwitch_grpclbNotRegistered(t *testing.T) { Addresses: addrs, ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), }) - if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + client := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } } @@ -375,7 +382,8 @@ func (s) TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy(t *testing r.UpdateState(resolver.State{ Addresses: append(addrs[1:], resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), }) - if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { + client := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } @@ -388,13 +396,13 @@ func (s) TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy(t *testing Addresses: append(addrs[1:], resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), ServiceConfig: scpr, }) - if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } // Switch to "round_robin" by removing the address of type "grpclb". r.UpdateState(resolver.State{Addresses: addrs[1:]}) - if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } } @@ -422,7 +430,8 @@ func (s) TestBalancerSwitch_LoadBalancingConfigTrumps(t *testing.T) { r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkRoundRobin(ctx, cc, addrs[:1]); err != nil { + client := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } @@ -432,7 +441,7 @@ func (s) TestBalancerSwitch_LoadBalancingConfigTrumps(t *testing.T) { Addresses: addrs[1:], ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), }) - if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } @@ -444,7 +453,7 @@ func (s) TestBalancerSwitch_LoadBalancingConfigTrumps(t *testing.T) { // else, the address of type "grpclb" should be ignored. grpclbAddr := resolver.Address{Addr: "non-existent-grpclb-server-address", Type: resolver.GRPCLB} r.UpdateState(resolver.State{Addresses: append(addrs[1:], grpclbAddr)}) - if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } } @@ -545,7 +554,8 @@ func (s) TestBalancerSwitch_Graceful(t *testing.T) { Addresses: addrs[1:], ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), }) - if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + client := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } @@ -591,7 +601,7 @@ func (s) TestBalancerSwitch_Graceful(t *testing.T) { t.Fatal("Timeout when waiting for a ClientConnState update on the new balancer") case <-ccUpdateCh: } - if err := checkRoundRobin(ctx, cc, addrs[1:]); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go index 0a1300479bf6..80f04dd25ab4 100644 --- a/test/roundrobin_test.go +++ b/test/roundrobin_test.go @@ -20,12 +20,10 @@ package test import ( "context" - "fmt" "strings" "testing" "time" - "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" @@ -33,82 +31,17 @@ import ( "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/stubserver" + rrutil "google.golang.org/grpc/internal/testutils/roundrobin" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" ) const rrServiceConfig = `{"loadBalancingConfig": [{"round_robin":{}}]}` -func checkRoundRobin(ctx context.Context, cc *grpc.ClientConn, addrs []resolver.Address) error { - client := testgrpc.NewTestServiceClient(cc) - // Make sure connections to all backends are up. We need to do this two - // times (to be sure that round_robin has kicked in) because the channel - // could have been configured with a different LB policy before the switch - // to round_robin. And the previous LB policy could be sharing backends with - // round_robin, and therefore in the first iteration of this loop, RPCs - // could land on backends owned by the previous LB policy. - backendCount := len(addrs) - for j := 0; j < 2; j++ { - for i := 0; i < backendCount; i++ { - for { - time.Sleep(time.Millisecond) - if ctx.Err() != nil { - return fmt.Errorf("timeout waiting for connection to %q to be up", addrs[i].Addr) - } - var peer peer.Peer - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { - // Some tests remove backends and check if round robin is - // happening across the remaining backends. In such cases, - // RPCs can initially fail on the connection using the - // removed backend. Just keep retrying and eventually the - // connection using the removed backend will shutdown and - // will be removed. - continue - } - if peer.Addr.String() == addrs[i].Addr { - break - } - } - } - } - // Perform 3 iterations. - var iterations [][]string - for i := 0; i < 3; i++ { - iteration := make([]string, backendCount) - for c := 0; c < backendCount; c++ { - var peer peer.Peer - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { - return fmt.Errorf("EmptyCall() = %v, want ", err) - } - iteration[c] = peer.Addr.String() - } - iterations = append(iterations, iteration) - } - // Ensure the the first iteration contains all addresses in addrs. To - // support duplicate addresses, we determine the count of each address. - wantAddrCount := make(map[string]int) - for _, addr := range addrs { - wantAddrCount[addr.Addr]++ - } - gotAddrCount := make(map[string]int) - for _, addr := range iterations[0] { - gotAddrCount[addr]++ - } - if diff := cmp.Diff(gotAddrCount, wantAddrCount); diff != "" { - return fmt.Errorf("non-roundrobin, got address count in one iteration: %v, want: %v, Diff: %s", gotAddrCount, wantAddrCount, diff) - } - // Ensure all three iterations contain the same addresses. - if !cmp.Equal(iterations[0], iterations[1]) || !cmp.Equal(iterations[0], iterations[2]) { - return fmt.Errorf("non-roundrobin, first iter: %v, second iter: %v, third iter: %v", iterations[0], iterations[1], iterations[2]) - } - return nil -} - func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, []*stubserver.StubServer) { t.Helper() @@ -157,7 +90,7 @@ func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOpt } r.UpdateState(resolver.State{Addresses: addrs}) - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { t.Fatal(err) } return cc, r, backends @@ -275,7 +208,8 @@ func (s) TestRoundRobin_OneServerDown(t *testing.T) { for i := 0; i < len(backends)-1; i++ { addrs[i] = resolver.Address{Addr: backends[i].Address} } - if err := checkRoundRobin(ctx, cc, addrs); err != nil { + client := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { t.Fatalf("RPCs are not being round robined across remaining servers: %v", err) } } diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 6e96f2e31f8a..f6f6249d9e89 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -48,7 +48,7 @@ import ( ) const ( - defaultTestTimeout = 1 * time.Second + defaultTestTimeout = 5 * time.Second defaultTestShortTimeout = 10 * time.Millisecond testEDSServcie = "test-eds-service-name" testClusterName = "test-cluster-name" diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go new file mode 100644 index 000000000000..6742675ed6ce --- /dev/null +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -0,0 +1,587 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e_test + +import ( + "context" + "errors" + "fmt" + "net" + "strconv" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + rrutil "google.golang.org/grpc/internal/testutils/roundrobin" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/xdsclient" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" + + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the "cluster_resolver_experimental" LB policy. + _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. +) + +const ( + clusterName = "cluster-my-service-client-side-xds" + edsServiceName = "endpoints-my-service-client-side-xds" + localityName1 = "my-locality-1" + localityName2 = "my-locality-2" + localityName3 = "my-locality-3" + + defaultTestTimeout = 5 * time.Second +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// backendAddressesAndPorts extracts the address and port of each of the +// StubServers passed in and returns them. Fails the test if any of the +// StubServers passed have an invalid address. +func backendAddressesAndPorts(t *testing.T, servers []*stubserver.StubServer) ([]resolver.Address, []uint32) { + addrs := make([]resolver.Address, len(servers)) + ports := make([]uint32, len(servers)) + for i := 0; i < len(servers); i++ { + addrs[i] = resolver.Address{Addr: servers[i].Address} + ports[i] = extractPortFromAddress(t, servers[i].Address) + } + return addrs, ports +} + +func extractPortFromAddress(t *testing.T, address string) uint32 { + _, p, err := net.SplitHostPort(address) + if err != nil { + t.Fatalf("invalid server address %q: %v", address, err) + } + port, err := strconv.ParseUint(p, 10, 32) + if err != nil { + t.Fatalf("invalid server address %q: %v", address, err) + } + return uint32(port) +} + +func startTestServiceBackends(t *testing.T, numBackends int) ([]*stubserver.StubServer, func()) { + servers := make([]*stubserver.StubServer, numBackends) + for i := 0; i < numBackends; i++ { + servers[i] = &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + servers[i].StartServer() + } + + return servers, func() { + for _, server := range servers { + server.Stop() + } + } +} + +// endpointResource returns an EDS resource for the given cluster name and +// localities. Backends within a locality are all assumed to be on the same +// machine (localhost). +func endpointResource(clusterName string, localities []localityInfo) *v3endpointpb.ClusterLoadAssignment { + var localityEndpoints []*v3endpointpb.LocalityLbEndpoints + for _, locality := range localities { + var endpoints []*v3endpointpb.LbEndpoint + for i, port := range locality.ports { + endpoint := &v3endpointpb.LbEndpoint{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: "localhost", + PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: port}}, + }, + }, + }, + }, + } + if i < len(locality.healthStatus) { + endpoint.HealthStatus = locality.healthStatus[i] + } + endpoints = append(endpoints, endpoint) + } + localityEndpoints = append(localityEndpoints, &v3endpointpb.LocalityLbEndpoints{ + Locality: &v3corepb.Locality{SubZone: locality.name}, + LbEndpoints: endpoints, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: locality.weight}, + }) + } + return &v3endpointpb.ClusterLoadAssignment{ + ClusterName: clusterName, + Endpoints: localityEndpoints, + } +} + +type localityInfo struct { + name string + weight uint32 + ports []uint32 + healthStatus []v3corepb.HealthStatus +} + +// clientEndpointsResource returns an EDS resource for the specified nodeID, +// service name and localities. +func clientEndpointsResource(nodeID, edsServiceName string, localities []localityInfo) e2e.UpdateOptions { + return e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{endpointResource(edsServiceName, localities)}, + SkipValidation: true, + } +} + +// TestEDS_OneLocality tests the cluster_resolver LB policy using an EDS +// resource with one locality. The following scenarios are tested: +// 1. Single backend. Test verifies that RPCs reach this backend. +// 2. Add a backend. Test verifies that RPCs are roundrobined across the two +// backends. +// 3. Remove one backend. Test verifies that all RPCs reach the other backend. +// 4. Replace the backend. Test verifies that all RPCs reach the new backend. +func (s) TestEDS_OneLocality(t *testing.T) { + // Spin up a management server to receive xDS resources from. + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil) + defer cleanup1() + + // Start backend servers which provide an implementation of the TestService. + servers, cleanup2 := startTestServiceBackends(t, 3) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Create xDS resources for consumption by the test. We start off with a + // single backend in a single EDS locality. + resources := clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[:1]}}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Create a manual resolver and push a service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s" + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, client)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Ensure RPCs are being roundrobined across the single backend. + testClient := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Add a backend to the same locality, and ensure RPCs are sent in a + // roundrobin fashion across the two backends. + resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[:2]}}) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[:2]); err != nil { + t.Fatal(err) + } + + // Remove the first backend, and ensure all RPCs are sent to the second + // backend. + resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[1:2]}}) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[1:2]); err != nil { + t.Fatal(err) + } + + // Replace the backend, and ensure all RPCs are sent to the new backend. + resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[2:3]}}) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[2:3]); err != nil { + t.Fatal(err) + } +} + +// TestEDS_MultipleLocalities tests the cluster_resolver LB policy using an EDS +// resource with multiple localities. The following scenarios are tested: +// 1. Two localities, each with a single backend. Test verifies that RPCs are +// weighted roundrobined across these two backends. +// 2. Add another locality, with a single backend. Test verifies that RPCs are +// weighted roundrobined across all the backends. +// 3. Remove one locality. Test verifies that RPCs are weighted roundrobined +// across backends from the remaining localities. +// 4. Add a backend to one locality. Test verifies that RPCs are weighted +// roundrobined across localities. +// 5. Change the weight of one of the localities. Test verifies that RPCs are +// weighted roundrobined across the localities. +// +// In our LB policy tree, one of the descendents of the "cluster_resolver" LB +// policy is the "weighted_target" LB policy which performs weighted roundrobin +// across localities (and this has a randomness component associated with it). +// Therefore, the moment we have backends from more than one locality, RPCs are +// weighted roundrobined across them. +func (s) TestEDS_MultipleLocalities(t *testing.T) { + // Spin up a management server to receive xDS resources from. + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil) + defer cleanup1() + + // Start backend servers which provide an implementation of the TestService. + servers, cleanup2 := startTestServiceBackends(t, 4) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Create xDS resources for consumption by the test. We start off with two + // localities, and single backend in each of them. + resources := clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ + {name: localityName1, weight: 1, ports: ports[:1]}, + {name: localityName2, weight: 1, ports: ports[1:2]}, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Create a manual resolver and push service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s" + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, client)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Ensure RPCs are being weighted roundrobined across the two backends. + testClient := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, addrs[0:2]); err != nil { + t.Fatal(err) + } + + // Add another locality with a single backend, and ensure RPCs are being + // weighted roundrobined across the three backends. + resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ + {name: localityName1, weight: 1, ports: ports[:1]}, + {name: localityName2, weight: 1, ports: ports[1:2]}, + {name: localityName3, weight: 1, ports: ports[2:3]}, + }) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, addrs[0:3]); err != nil { + t.Fatal(err) + } + + // Remove the first locality, and ensure RPCs are being weighted + // roundrobined across the remaining two backends. + resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ + {name: localityName2, weight: 1, ports: ports[1:2]}, + {name: localityName3, weight: 1, ports: ports[2:3]}, + }) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, addrs[1:3]); err != nil { + t.Fatal(err) + } + + // Add a backend to one locality, and ensure weighted roundrobin. Since RPCs + // are roundrobined across localities, locality2's backend will receive + // twice the traffic. + resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ + {name: localityName2, weight: 1, ports: ports[1:2]}, + {name: localityName3, weight: 1, ports: ports[2:4]}, + }) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + wantAddrs := []resolver.Address{addrs[1], addrs[1], addrs[2], addrs[3]} + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, wantAddrs); err != nil { + t.Fatal(err) + } + + // Change the weight of locality2 and ensure weighted roundrobin. Since + // locality2 has twice the weight of locality3, it will be picked twice as + // frequently as locality3 for RPCs. And since locality2 has a single + // backend and locality3 has two backends, the backend in locality2 will + // receive four times the traffic of each of locality3's backends. + resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ + {name: localityName2, weight: 2, ports: ports[1:2]}, + {name: localityName3, weight: 1, ports: ports[2:4]}, + }) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + wantAddrs = []resolver.Address{addrs[1], addrs[1], addrs[1], addrs[1], addrs[2], addrs[3]} + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, wantAddrs); err != nil { + t.Fatal(err) + } +} + +// TestEDS_EndpointsHealth tests the cluster_resolver LB policy using an EDS +// resource which specifies endpoint health information and verifies that +// traffic is routed only to backends deemed capable of receiving traffic. +func (s) TestEDS_EndpointsHealth(t *testing.T) { + // Spin up a management server to receive xDS resources from. + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil) + defer cleanup1() + + // Start backend servers which provide an implementation of the TestService. + servers, cleanup2 := startTestServiceBackends(t, 12) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Create xDS resources for consumption by the test. Two localities with + // six backends each, with two of the six backends being healthy. Both + // UNKNOWN and HEALTHY are considered by gRPC for load balancing. + resources := clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ + {name: localityName1, weight: 1, ports: ports[:6], healthStatus: []v3corepb.HealthStatus{ + v3corepb.HealthStatus_UNKNOWN, + v3corepb.HealthStatus_HEALTHY, + v3corepb.HealthStatus_UNHEALTHY, + v3corepb.HealthStatus_DRAINING, + v3corepb.HealthStatus_TIMEOUT, + v3corepb.HealthStatus_DEGRADED, + }}, + {name: localityName2, weight: 1, ports: ports[6:12], healthStatus: []v3corepb.HealthStatus{ + v3corepb.HealthStatus_UNKNOWN, + v3corepb.HealthStatus_HEALTHY, + v3corepb.HealthStatus_UNHEALTHY, + v3corepb.HealthStatus_DRAINING, + v3corepb.HealthStatus_TIMEOUT, + v3corepb.HealthStatus_DEGRADED, + }}, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Create a manual resolver and push service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s" + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, client)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Ensure RPCs are being weighted roundrobined across healthy backends from + // both localities. + testClient := testpb.NewTestServiceClient(cc) + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, append(addrs[0:2], addrs[6:8]...)); err != nil { + t.Fatal(err) + } +} + +// TestEDS_EmptyUpdate tests the cluster_resolver LB policy using an EDS +// resource with no localities and verifies that RPCs fail with "all priorities +// removed" error. +func (s) TestEDS_EmptyUpdate(t *testing.T) { + // Spin up a management server to receive xDS resources from. + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil) + defer cleanup1() + + // Start backend servers which provide an implementation of the TestService. + servers, cleanup2 := startTestServiceBackends(t, 4) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + oldCacheTimeout := balancergroup.DefaultSubBalancerCloseTimeout + balancergroup.DefaultSubBalancerCloseTimeout = 100 * time.Microsecond + defer func() { balancergroup.DefaultSubBalancerCloseTimeout = oldCacheTimeout }() + + // Create xDS resources for consumption by the test. The first update is an + // empty update. This should put the channel in TRANSIENT_FAILURE. + resources := clientEndpointsResource(nodeID, edsServiceName, nil) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Create a manual resolver and push service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s" + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, client)) + + // Create a ClientConn and ensure that RPCs fail with "all priorities + // removed" error. This is the expected error when the cluster_resolver LB + // policy receives an EDS update with no localities. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + testClient := testpb.NewTestServiceClient(cc) + if err := waitForAllPrioritiesRemovedError(ctx, t, testClient); err != nil { + t.Fatal(err) + } + + // Add a locality with one backend and ensure RPCs are successful. + resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[:1]}}) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Push another empty update and ensure that RPCs fail with "all priorities + // removed" error again. + resources = clientEndpointsResource(nodeID, edsServiceName, nil) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := waitForAllPrioritiesRemovedError(ctx, t, testClient); err != nil { + t.Fatal(err) + } +} + +// waitForAllPrioritiesRemovedError repeatedly makes RPCs using the +// TestServiceClient until they fail with an error which indicates that all +// priorities have been removed. A non-nil error is returned if the context +// expires before RPCs fail with the expected error. +func waitForAllPrioritiesRemovedError(ctx context.Context, t *testing.T, client testgrpc.TestServiceClient) error { + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if err == nil { + t.Log("EmptyCall() succeeded after EDS update with no localities") + continue + } + if code := status.Code(err); code != codes.Unavailable { + t.Logf("EmptyCall() returned code: %v, want: %v", code, codes.Unavailable) + continue + } + if !strings.Contains(err.Error(), priority.ErrAllPrioritiesRemoved.Error()) { + t.Logf("EmptyCall() = %v, want %v", err, priority.ErrAllPrioritiesRemoved) + continue + } + return nil + } + return errors.New("timeout when waiting for RPCs to fail with UNAVAILABLE status and priority.ErrAllPrioritiesRemoved error") +} diff --git a/xds/internal/balancer/clusterresolver/eds_impl_test.go b/xds/internal/balancer/clusterresolver/eds_impl_test.go deleted file mode 100644 index 2621b791494b..000000000000 --- a/xds/internal/balancer/clusterresolver/eds_impl_test.go +++ /dev/null @@ -1,569 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package clusterresolver - -import ( - "context" - "fmt" - "sort" - "testing" - "time" - - corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/weightedtarget" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/balancergroup" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/clusterimpl" - "google.golang.org/grpc/xds/internal/balancer/priority" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -var ( - testClusterNames = []string{"test-cluster-1", "test-cluster-2"} - testSubZones = []string{"I", "II", "III", "IV"} - testEndpointAddrs []string -) - -const testBackendAddrsCount = 12 - -func init() { - for i := 0; i < testBackendAddrsCount; i++ { - testEndpointAddrs = append(testEndpointAddrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) - } - balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond - clusterimpl.NewRandomWRR = testutils.NewTestWRR - weightedtarget.NewRandomWRR = testutils.NewTestWRR - balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond * 100 -} - -func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) (balancer.Balancer, *testutils.TestClientConn, *fakeclient.Client, func()) { - xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) - cc := testutils.NewTestClientConn(t) - builder := balancer.Get(Name) - edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) - if edsb == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", Name) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{{ - Cluster: testClusterName, - Type: DiscoveryMechanismTypeEDS, - }}, - }, - }); err != nil { - edsb.Close() - xdsC.Close() - t.Fatal(err) - } - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - edsb.Close() - xdsC.Close() - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - return edsb, cc, xdsC, func() { - edsb.Close() - xdsC.Close() - } -} - -// One locality -// - add backend -// - remove backend -// - replace backend -// - change drop rate -func (s) TestEDS_OneLocality(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - - // One locality with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - - sc1 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Pick with only the first backend. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { - t.Fatal(err) - } - - // The same locality, add one more backend. - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) - - sc2 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with two subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1, sc2}); err != nil { - t.Fatal(err) - } - - // The same locality, delete first backend. - clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) - - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - // Test pick with only the second subconn. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { - t.Fatal(err) - } - - // The same locality, replace backend. - clab4 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab4.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab4.Build()), nil) - - sc3 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - scToRemove = <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove) - } - edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - // Test pick with only the third subconn. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3}); err != nil { - t.Fatal(err) - } - - // The same locality, different drop rate, dropping 50%. - clab5 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], map[string]uint32{"test-drop": 50}) - clab5.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab5.Build()), nil) - - // Picks with drops. - if err := testPickerFromCh(cc.NewPickerCh, func(p balancer.Picker) error { - for i := 0; i < 100; i++ { - _, err := p.Pick(balancer.PickInfo{}) - // TODO: the dropping algorithm needs a design. When the dropping algorithm - // is fixed, this test also needs fix. - if i%2 == 0 && err == nil { - return fmt.Errorf("%d - the even number picks should be drops, got error ", i) - } else if i%2 != 0 && err != nil { - return fmt.Errorf("%d - the odd number picks should be non-drops, got error %v", i, err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - // The same locality, remove drops. - clab6 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab6.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab6.Build()), nil) - - // Pick without drops. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3}); err != nil { - t.Fatal(err) - } -} - -// 2 locality -// - start with 2 locality -// - add locality -// - remove locality -// - address change for the locality -// - update locality weight -func (s) TestEDS_TwoLocalities(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - - // Two localities, each with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - sc1 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Add the second locality later to make sure sc2 belongs to the second - // locality. Otherwise the test is flaky because of a map is used in EDS to - // keep localities. - clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - sc2 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with two subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1, sc2}); err != nil { - t.Fatal(err) - } - - // Add another locality, with one backend. - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - clab2.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) - - sc3 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with three subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1, sc2, sc3}); err != nil { - t.Fatal(err) - } - - // Remove first locality. - clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab3.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - clab3.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) - - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - // Test pick with two subconns (without the first one). - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2, sc3}); err != nil { - t.Fatal(err) - } - - // Add a backend to the last locality. - clab4 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab4.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - clab4.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab4.Build()), nil) - - sc4 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with two subconns (without the first one). - // - // Locality-1 will be picked twice, and locality-2 will be picked twice. - // Locality-1 contains only sc2, locality-2 contains sc3 and sc4. So expect - // two sc2's and sc3, sc4. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2, sc2, sc3, sc4}); err != nil { - t.Fatal(err) - } - - // Change weight of the locality[1]. - clab5 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab5.AddLocality(testSubZones[1], 2, 0, testEndpointAddrs[1:2], nil) - clab5.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab5.Build()), nil) - - // Test pick with two subconns different locality weight. - // - // Locality-1 will be picked four times, and locality-2 will be picked twice - // (weight 2 and 1). Locality-1 contains only sc2, locality-2 contains sc3 and - // sc4. So expect four sc2's and sc3, sc4. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2, sc2, sc2, sc2, sc3, sc4}); err != nil { - t.Fatal(err) - } -} - -// The EDS balancer gets EDS resp with unhealthy endpoints. Test that only -// healthy ones are used. -func (s) TestEDS_EndpointsHealth(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - - // Two localities, each 3 backend, one Healthy, one Unhealthy, one Unknown. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:6], &xdstestutils.AddLocalityOptions{ - Health: []corepb.HealthStatus{ - corepb.HealthStatus_HEALTHY, - corepb.HealthStatus_UNHEALTHY, - corepb.HealthStatus_UNKNOWN, - corepb.HealthStatus_DRAINING, - corepb.HealthStatus_TIMEOUT, - corepb.HealthStatus_DEGRADED, - }, - }) - clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[6:12], &xdstestutils.AddLocalityOptions{ - Health: []corepb.HealthStatus{ - corepb.HealthStatus_HEALTHY, - corepb.HealthStatus_UNHEALTHY, - corepb.HealthStatus_UNKNOWN, - corepb.HealthStatus_DRAINING, - corepb.HealthStatus_TIMEOUT, - corepb.HealthStatus_DEGRADED, - }, - }) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - - var ( - readySCs []balancer.SubConn - newSubConnAddrStrs []string - ) - for i := 0; i < 4; i++ { - addr := <-cc.NewSubConnAddrsCh - newSubConnAddrStrs = append(newSubConnAddrStrs, addr[0].Addr) - sc := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - readySCs = append(readySCs, sc) - } - - wantNewSubConnAddrStrs := []string{ - testEndpointAddrs[0], - testEndpointAddrs[2], - testEndpointAddrs[6], - testEndpointAddrs[8], - } - sortStrTrans := cmp.Transformer("Sort", func(in []string) []string { - out := append([]string(nil), in...) // Copy input to avoid mutating it. - sort.Strings(out) - return out - }) - if !cmp.Equal(newSubConnAddrStrs, wantNewSubConnAddrStrs, sortStrTrans) { - t.Fatalf("want newSubConn with address %v, got %v", wantNewSubConnAddrStrs, newSubConnAddrStrs) - } - - // There should be exactly 4 new SubConns. Check to make sure there's no - // more subconns being created. - select { - case <-cc.NewSubConnCh: - t.Fatalf("Got unexpected new subconn") - case <-time.After(time.Microsecond * 100): - } - - // Test roundrobin with the subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, readySCs); err != nil { - t.Fatal(err) - } -} - -// TestEDS_EmptyUpdate covers the cases when eds impl receives an empty update. -// -// It should send an error picker with transient failure to the parent. -func (s) TestEDS_EmptyUpdate(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - - const cacheTimeout = 100 * time.Microsecond - oldCacheTimeout := balancergroup.DefaultSubBalancerCloseTimeout - balancergroup.DefaultSubBalancerCloseTimeout = cacheTimeout - defer func() { balancergroup.DefaultSubBalancerCloseTimeout = oldCacheTimeout }() - - // The first update is an empty update. - xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) - // Pick should fail with transient failure, and all priority removed error. - if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { - t.Fatal(err) - } - - // One locality with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - - sc1 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Pick with only the first backend. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { - t.Fatal(err) - } - - xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) - // Pick should fail with transient failure, and all priority removed error. - if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { - t.Fatal(err) - } - - // Wait for the old SubConn to be removed (which happens when the child - // policy is closed), so a new update would trigger a new SubConn (we need - // this new SubConn to tell if the next picker is newly created). - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - // Handle another update with priorities and localities. - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - - sc2 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Pick with only the first backend. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { - t.Fatal(err) - } -} - -func (s) TestEDS_CircuitBreaking(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - - var maxRequests uint32 = 50 - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{{ - Cluster: testClusterName, - MaxConcurrentRequests: &maxRequests, - Type: DiscoveryMechanismTypeEDS, - }}, - }, - }); err != nil { - t.Fatal(err) - } - - // One locality with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - sc1 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Picks with drops. - if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { - dones := []func(){} - defer func() { - for _, f := range dones { - f() - } - }() - - for i := 0; i < 100; i++ { - pr, err := p.Pick(balancer.PickInfo{}) - if pr.Done != nil { - dones = append(dones, func() { - pr.Done(balancer.DoneInfo{}) - }) - } - - if i < 50 && err != nil { - return fmt.Errorf("The first 50%% picks should be non-drops, got error %v", err) - } else if i > 50 && err == nil { - return fmt.Errorf("The second 50%% picks should be drops, got error ") - } - } - - for _, done := range dones { - done() - } - dones = []func(){} - - // Pick without drops. - for i := 0; i < 50; i++ { - pr, err := p.Pick(balancer.PickInfo{}) - if pr.Done != nil { - dones = append(dones, func() { - pr.Done(balancer.DoneInfo{}) - }) - } - - if err != nil { - return fmt.Errorf("The third 50%% picks should be non-drops, got error %v", err) - } - } - - return nil - }); err != nil { - t.Fatal(err.Error()) - } - - // Send another update, with only circuit breaking update (and no picker - // update afterwards). Make sure the new picker uses the new configs. - var maxRequests2 uint32 = 10 - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{{ - Cluster: testClusterName, - MaxConcurrentRequests: &maxRequests2, - Type: DiscoveryMechanismTypeEDS, - }}, - }, - }); err != nil { - t.Fatal(err) - } - - // Picks with drops. - if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { - dones := []func(){} - defer func() { - for _, f := range dones { - f() - } - }() - - for i := 0; i < 100; i++ { - pr, err := p.Pick(balancer.PickInfo{}) - if pr.Done != nil { - dones = append(dones, func() { - pr.Done(balancer.DoneInfo{}) - }) - } - if i < 10 && err != nil { - return fmt.Errorf("The first 10%% picks should be non-drops, got error %v", err) - } else if i > 10 && err == nil { - return fmt.Errorf("The next 90%% picks should be drops, got error ") - } - } - - for _, done := range dones { - done() - } - dones = []func(){} - - // Pick without drops. - for i := 0; i < 10; i++ { - pr, err := p.Pick(balancer.PickInfo{}) - if pr.Done != nil { - dones = append(dones, func() { - pr.Done(balancer.DoneInfo{}) - }) - } - - if err != nil { - return fmt.Errorf("The next 10%% picks should be non-drops, got error %v", err) - } - } - return nil - }); err != nil { - t.Fatal(err.Error()) - } -} diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index 4c8f0b57c02d..b08b82089898 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -19,19 +19,78 @@ package clusterresolver import ( "context" + "fmt" "testing" "time" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancergroup" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" "google.golang.org/grpc/xds/internal/balancer/priority" xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" ) +var ( + testClusterNames = []string{"test-cluster-1", "test-cluster-2"} + testSubZones = []string{"I", "II", "III", "IV"} + testEndpointAddrs []string +) + +const testBackendAddrsCount = 12 + +func init() { + for i := 0; i < testBackendAddrsCount; i++ { + testEndpointAddrs = append(testEndpointAddrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) + } + balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond + clusterimpl.NewRandomWRR = testutils.NewTestWRR + weightedtarget.NewRandomWRR = testutils.NewTestWRR + balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond * 100 +} + +func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) (balancer.Balancer, *testutils.TestClientConn, *fakeclient.Client, func()) { + xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) + cc := testutils.NewTestClientConn(t) + builder := balancer.Get(Name) + edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) + if edsb == nil { + t.Fatalf("builder.Build(%s) failed and returned nil", Name) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := edsb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), + BalancerConfig: &LBConfig{ + DiscoveryMechanisms: []DiscoveryMechanism{{ + Cluster: testClusterName, + Type: DiscoveryMechanismTypeEDS, + }}, + }, + }); err != nil { + edsb.Close() + xdsC.Close() + t.Fatal(err) + } + if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { + edsb.Close() + xdsC.Close() + t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) + } + return edsb, cc, xdsC, func() { + edsb.Close() + xdsC.Close() + } +} + // When a high priority is ready, adding/removing lower locality doesn't cause // changes. // From 1dd025639203663e50e9da2ead009608af65169b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 29 Aug 2022 17:23:55 -0700 Subject: [PATCH 595/998] ringhash: implement a no-op ExitIdle() method (#5614) --- xds/internal/balancer/ringhash/ringhash.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index 8056b29c127d..59ccd0127a2a 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -443,6 +443,11 @@ func (b *ringhashBalancer) regeneratePicker() { func (b *ringhashBalancer) Close() {} +func (b *ringhashBalancer) ExitIdle() { + // ExitIdle implementation is a no-op because connections are either + // triggers from picks or from subConn state changes. +} + // connectivityStateEvaluator takes the connectivity states of multiple SubConns // and returns one aggregated connectivity state. // From f0f9f00f44785da430934a2118379aa2e2d69bd3 Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Tue, 30 Aug 2022 13:24:38 -0700 Subject: [PATCH 596/998] test/kokoro: enable pod log collection in the buildscripts (#5608) --- test/kokoro/psm-security.cfg | 2 +- test/kokoro/psm-security.sh | 22 ++++++++++++++++------ test/kokoro/xds_k8s_lb.cfg | 2 +- test/kokoro/xds_k8s_lb.sh | 10 +++++++--- test/kokoro/xds_url_map.cfg | 2 +- test/kokoro/xds_url_map.sh | 10 +++++++--- 6 files changed, 33 insertions(+), 15 deletions(-) diff --git a/test/kokoro/psm-security.cfg b/test/kokoro/psm-security.cfg index 5faa6b50458a..7d86de68633d 100644 --- a/test/kokoro/psm-security.cfg +++ b/test/kokoro/psm-security.cfg @@ -7,7 +7,7 @@ timeout_mins: 180 action { define_artifacts { regex: "artifacts/**/*sponge_log.xml" - regex: "artifacts/**/*sponge_log.log" + regex: "artifacts/**/*.log" strip_prefix: "artifacts" } } diff --git a/test/kokoro/psm-security.sh b/test/kokoro/psm-security.sh index 0606d8921445..97aca28d79b7 100755 --- a/test/kokoro/psm-security.sh +++ b/test/kokoro/psm-security.sh @@ -100,16 +100,20 @@ run_test() { # https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage local test_name="${1:?Usage: run_test test_name}" set -x + local out_dir="${TEST_XML_OUTPUT_DIR}/${test_name}" + mkdir -pv "${out_dir}" python -m "tests.${test_name}" \ --flagfile="${TEST_DRIVER_FLAGFILE}" \ --kube_context="${KUBE_CONTEXT}" \ --server_image="${SERVER_IMAGE_NAME}:${GIT_COMMIT}" \ --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ --testing_version="${TESTING_VERSION}" \ - --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ + --nocheck_local_certs \ --force_cleanup \ - --nocheck_local_certs - set +x + --collect_app_logs \ + --log_dir="${out_dir}" \ + --xml_output_file="${out_dir}/sponge_log.xml" \ + |& tee "${out_dir}/sponge_log.log" } ####################################### @@ -151,9 +155,15 @@ main() { build_docker_images_if_needed # Run tests cd "${TEST_DRIVER_FULL_DIR}" - run_test baseline_test - run_test security_test - run_test authz_test + local failed_tests=0 + test_suites=("baseline_test" "security_test" "authz_test") + for test in "${test_suites[@]}"; do + run_test $test || (( failed_tests++ )) + done + echo "Failed test suites: ${failed_tests}" + if (( failed_tests > 0 )); then + exit 1 + fi } main "$@" diff --git a/test/kokoro/xds_k8s_lb.cfg b/test/kokoro/xds_k8s_lb.cfg index 4e40bb510306..5b989a6fe073 100644 --- a/test/kokoro/xds_k8s_lb.cfg +++ b/test/kokoro/xds_k8s_lb.cfg @@ -7,7 +7,7 @@ timeout_mins: 180 action { define_artifacts { regex: "artifacts/**/*sponge_log.xml" - regex: "artifacts/**/*sponge_log.log" + regex: "artifacts/**/*.log" strip_prefix: "artifacts" } } diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh index 3487392f4544..f8a0b3a828c2 100755 --- a/test/kokoro/xds_k8s_lb.sh +++ b/test/kokoro/xds_k8s_lb.sh @@ -100,6 +100,8 @@ run_test() { # Test driver usage: # https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage local test_name="${1:?Usage: run_test test_name}" + local out_dir="${TEST_XML_OUTPUT_DIR}/${test_name}" + mkdir -pv "${out_dir}" set -x python -m "tests.${test_name}" \ --flagfile="${TEST_DRIVER_FLAGFILE}" \ @@ -108,9 +110,11 @@ run_test() { --server_image="${SERVER_IMAGE_NAME}:${GIT_COMMIT}" \ --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ --testing_version="${TESTING_VERSION}" \ - --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ - --force_cleanup - set +x + --force_cleanup \ + --collect_app_logs \ + --log_dir="${out_dir}" \ + --xml_output_file="${out_dir}/sponge_log.xml" \ + |& tee "${out_dir}/sponge_log.log" } ####################################### diff --git a/test/kokoro/xds_url_map.cfg b/test/kokoro/xds_url_map.cfg index f6fd84a419af..49ebc48e93c6 100644 --- a/test/kokoro/xds_url_map.cfg +++ b/test/kokoro/xds_url_map.cfg @@ -7,7 +7,7 @@ timeout_mins: 60 action { define_artifacts { regex: "artifacts/**/*sponge_log.xml" - regex: "artifacts/**/*sponge_log.log" + regex: "artifacts/**/*.log" strip_prefix: "artifacts" } } diff --git a/test/kokoro/xds_url_map.sh b/test/kokoro/xds_url_map.sh index 633004b0c800..3084242252b8 100755 --- a/test/kokoro/xds_url_map.sh +++ b/test/kokoro/xds_url_map.sh @@ -87,15 +87,19 @@ run_test() { # Test driver usage: # https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage local test_name="${1:?Usage: run_test test_name}" + local out_dir="${TEST_XML_OUTPUT_DIR}/${test_name}" + mkdir -pv "${out_dir}" set -x python -m "tests.${test_name}" \ --flagfile="${TEST_DRIVER_FLAGFILE}" \ + --flagfile="config/url-map.cfg" \ --kube_context="${KUBE_CONTEXT}" \ --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ --testing_version="${TESTING_VERSION}" \ - --xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \ - --flagfile="config/url-map.cfg" - set +x + --collect_app_logs \ + --log_dir="${out_dir}" \ + --xml_output_file="${out_dir}/sponge_log.xml" \ + |& tee "${out_dir}/sponge_log.log" } ####################################### From c351f37ddcb2ba7bfdee1e2e3aabe984e1649a0b Mon Sep 17 00:00:00 2001 From: Abirdcfly Date: Wed, 31 Aug 2022 05:01:37 +0800 Subject: [PATCH 597/998] chore: remove duplicate word in comments (#5616) --- balancer/rls/config.go | 4 ++-- credentials/sts/sts.go | 2 +- credentials/sts/sts_test.go | 2 +- credentials/tls/certprovider/store_test.go | 2 +- internal/balancergroup/balancerstateaggregator.go | 2 +- internal/testutils/roundrobin/roundrobin.go | 2 +- security/advancedtls/advancedtls_integration_test.go | 4 ++-- xds/internal/balancer/cdsbalancer/cdsbalancer_test.go | 2 +- xds/internal/resolver/watch_service.go | 2 +- 9 files changed, 11 insertions(+), 11 deletions(-) diff --git a/balancer/rls/config.go b/balancer/rls/config.go index 576a7572e5a1..4cd0738eb695 100644 --- a/balancer/rls/config.go +++ b/balancer/rls/config.go @@ -127,7 +127,7 @@ type lbConfigJSON struct { // - across all `headers`, `constant_keys` and `extra_keys` fields: // - must not have the same `key` specified twice // - no `key` must be the empty string -// - `lookup_service` field must be set and and must parse as a target URI +// - `lookup_service` field must be set and must parse as a target URI // - if `max_age` > 5m, it should be set to 5 minutes // - if `stale_age` > `max_age`, ignore it // - if `stale_age` is set, then `max_age` must also be set @@ -185,7 +185,7 @@ func parseRLSProto(rlsProto *rlspb.RouteLookupConfig) (*lbConfig, error) { return nil, err } - // `lookup_service` field must be set and and must parse as a target URI. + // `lookup_service` field must be set and must parse as a target URI. lookupService := rlsProto.GetLookupService() if lookupService == "" { return nil, fmt.Errorf("rls: empty lookup_service in route lookup config %+v", rlsProto) diff --git a/credentials/sts/sts.go b/credentials/sts/sts.go index da5fa1ad16f5..981537ca1175 100644 --- a/credentials/sts/sts.go +++ b/credentials/sts/sts.go @@ -105,7 +105,7 @@ type Options struct { // ActorTokenType is an identifier, as described in // https://tools.ietf.org/html/rfc8693#section-3, that indicates the type of - // the the security token in the "actor_token_path" parameter. + // the security token in the "actor_token_path" parameter. ActorTokenType string // Optional. } diff --git a/credentials/sts/sts_test.go b/credentials/sts/sts_test.go index dd634361d7cf..3a2f239dacae 100644 --- a/credentials/sts/sts_test.go +++ b/credentials/sts/sts_test.go @@ -114,7 +114,7 @@ func (r errReader) Read(b []byte) (n int, err error) { } // We need a function to construct the response instead of simply declaring it -// as a variable since the the response body will be consumed by the +// as a variable since the response body will be consumed by the // credentials, and therefore we will need a new one everytime. func makeGoodResponse() *http.Response { respJSON, _ := json.Marshal(responseParameters{ diff --git a/credentials/tls/certprovider/store_test.go b/credentials/tls/certprovider/store_test.go index ee1f4a358ba5..faeba8db4fe9 100644 --- a/credentials/tls/certprovider/store_test.go +++ b/credentials/tls/certprovider/store_test.go @@ -342,7 +342,7 @@ func (s) TestStoreSingleProviderDifferentConfigs(t *testing.T) { t.Fatal(err) } - // Push new key material into only one of the fake providers and and verify + // Push new key material into only one of the fake providers and verify // that the providers returned by the store return the appropriate key // material. km2 := loadKeyMaterials(t, "x509/server2_cert.pem", "x509/server2_key.pem", "x509/client_ca_cert.pem") diff --git a/internal/balancergroup/balancerstateaggregator.go b/internal/balancergroup/balancerstateaggregator.go index 116394385059..816869555323 100644 --- a/internal/balancergroup/balancerstateaggregator.go +++ b/internal/balancergroup/balancerstateaggregator.go @@ -26,7 +26,7 @@ import ( // state. // // It takes care of merging sub-picker into one picker. The picking config is -// passed directly from the the parent to the aggregator implementation (instead +// passed directly from the parent to the aggregator implementation (instead // via balancer group). type BalancerStateAggregator interface { // UpdateState updates the state of the id. diff --git a/internal/testutils/roundrobin/roundrobin.go b/internal/testutils/roundrobin/roundrobin.go index 034c8c6f9d0f..8c4b4e4bd06e 100644 --- a/internal/testutils/roundrobin/roundrobin.go +++ b/internal/testutils/roundrobin/roundrobin.go @@ -111,7 +111,7 @@ func CheckRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, } iterations = append(iterations, iteration) } - // Ensure the the first iteration contains all addresses in addrs. + // Ensure the first iteration contains all addresses in addrs. gotAddrCount := make(map[string]int) for _, addr := range iterations[0] { gotAddrCount[addr]++ diff --git a/security/advancedtls/advancedtls_integration_test.go b/security/advancedtls/advancedtls_integration_test.go index 8cddfc234b12..d547985dd74a 100644 --- a/security/advancedtls/advancedtls_integration_test.go +++ b/security/advancedtls/advancedtls_integration_test.go @@ -308,7 +308,7 @@ func (s) TestEnd2End(t *testing.T) { // The mutual authentication works at the beginning, since ClientCert1 // trusted by ServerTrust1, ServerCert1 by ClientTrust1, and also the // custom verification check on server side allows all connections. - // At stage 1, server disallows the the connections by setting custom + // At stage 1, server disallows the connections by setting custom // verification check. The following calls should fail. Previous // connections should not be affected. // At stage 2, server allows all the connections again and the @@ -555,7 +555,7 @@ func createProviders(tmpFiles *tmpCredsFiles) (certprovider.Provider, certprovid // Next, we change the identity certs that IdentityProvider is watching. Since // the identity key is not changed, the IdentityProvider should ignore the // update, and the connection should still be good. -// Then the the identity key is changed. This time IdentityProvider should pick +// Then the identity key is changed. This time IdentityProvider should pick // up the update, and the connection should fail, due to the trust certs on the // other side is not changed. // Finally, the trust certs that other-side's RootProvider is watching get diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index a88081e1d0ec..dfa47913ae29 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -694,7 +694,7 @@ func (s) TestCircuitBreaking(t *testing.T) { counter.EndRequest() } -// TestClose verifies the Close() method in the the CDS balancer. +// TestClose verifies the Close() method in the CDS balancer. func (s) TestClose(t *testing.T) { // This creates a CDS balancer, pushes a ClientConnState update with a fake // xdsClient, and makes sure that the CDS balancer registers a watch on the diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 000927c541f9..4f8609ce9df5 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -143,7 +143,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, // update before reporting this LDS config. if w.lastUpdate.virtualHost != nil { // We want to send an update with the new fields from the new LDS - // (e.g. max stream duration), and old fields from the the previous + // (e.g. max stream duration), and old fields from the previous // RDS. // // But note that this should only happen when virtual host is set, From d875a0e8938398cb1f75bd18af02793eb37a850b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 30 Aug 2022 14:01:55 -0700 Subject: [PATCH 598/998] xdsclient: NACK cluster resource if config_source_specifier in lrs_server is not self (#5613) --- .../xdsclient/xdsresource/unmarshal_cds.go | 9 ++++- .../xdsresource/unmarshal_cds_test.go | 35 +++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 572941efb134..3621d61209a0 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -155,7 +155,14 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // xdsclient bootstrap information now (can be added if necessary). The // ServerConfig will be read and populated by the CDS balancer when // processing this field. - if cluster.GetLrsServer().GetSelf() != nil { + // According to A27: + // If the `lrs_server` field is set, it must have its `self` field set, in + // which case the client should use LRS for load reporting. Otherwise + // (the `lrs_server` field is not set), LRS load reporting will be disabled. + if lrs := cluster.GetLrsServer(); lrs != nil { + if lrs.GetSelf() == nil { + return ClusterUpdate{}, fmt.Errorf("unsupported config_source_specifier %T in lrs_server field", lrs.ConfigSourceSpecifier) + } ret.LRSServerConfig = ClusterLRSServerSelf } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 4569d135864f..67e057a6c400 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -1503,6 +1503,41 @@ func (s) TestUnmarshalCluster(t *testing.T) { }, wantErr: true, }, + { + name: "cluster resource with non-self lrs_server field", + resources: []*anypb.Any{ + testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: "test", + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: v3Service, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }), + }, + wantUpdate: map[string]ClusterUpdateErrTuple{ + "test": {Err: cmpopts.AnyError}, + }, + wantMD: UpdateMetadata{ + Status: ServiceStatusNACKed, + Version: testVersion, + ErrState: &UpdateErrorMetadata{ + Version: testVersion, + Err: cmpopts.AnyError, + }, + }, + wantErr: true, + }, { name: "v2 cluster", resources: []*anypb.Any{v2ClusterAny}, From ddcda5f76a3b0abccece6f1ef99ed0eb500c2732 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 31 Aug 2022 14:37:02 -0700 Subject: [PATCH 599/998] alts: do not set WaitForReady on handshaker RPCs (#5620) --- credentials/alts/internal/handshaker/handshaker.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/credentials/alts/internal/handshaker/handshaker.go b/credentials/alts/internal/handshaker/handshaker.go index 8bc7ceee0aff..7b953a520e5b 100644 --- a/credentials/alts/internal/handshaker/handshaker.go +++ b/credentials/alts/internal/handshaker/handshaker.go @@ -158,7 +158,7 @@ type altsHandshaker struct { // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) if err != nil { return nil, err } @@ -174,7 +174,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) if err != nil { return nil, err } From aee9f0ed1722db9c45e66eab9e530e72fa2b067c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 1 Sep 2022 15:58:29 -0700 Subject: [PATCH 600/998] orca: server side custom metrics implementation (#5531) --- internal/internal.go | 3 + orca/call_metric_recorder.go | 130 +++++++++++++ orca/call_metric_recorder_test.go | 300 ++++++++++++++++++++++++++++++ orca/internal/internal.go | 27 +++ orca/orca.go | 164 ++++++++++++++++ orca/orca_test.go | 86 +++++++++ orca/service.go | 194 +++++++++++++++++++ orca/service_test.go | 193 +++++++++++++++++++ server.go | 17 ++ 9 files changed, 1114 insertions(+) create mode 100644 orca/call_metric_recorder.go create mode 100644 orca/call_metric_recorder_test.go create mode 100644 orca/internal/internal.go create mode 100644 orca/orca.go create mode 100644 orca/orca_test.go create mode 100644 orca/service.go create mode 100644 orca/service_test.go diff --git a/internal/internal.go b/internal/internal.go index 83018be7c718..9ce1f18ae9d6 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -77,6 +77,9 @@ var ( // ClearExtraDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. ClearExtraDialOptions func() + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from diff --git a/orca/call_metric_recorder.go b/orca/call_metric_recorder.go new file mode 100644 index 000000000000..62f2a1a6c220 --- /dev/null +++ b/orca/call_metric_recorder.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "context" + "sync" + "sync/atomic" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// CallMetricRecorder provides functionality to record per-RPC custom backend +// metrics. See CallMetricsServerOption() for more details. +// +// Safe for concurrent use. +type CallMetricRecorder struct { + cpu atomic.Value // float64 + memory atomic.Value // float64 + + mu sync.RWMutex + requestCost map[string]float64 + utilization map[string]float64 +} + +func newCallMetricRecorder() *CallMetricRecorder { + return &CallMetricRecorder{ + requestCost: make(map[string]float64), + utilization: make(map[string]float64), + } +} + +// SetCPUUtilization records a measurement for the CPU utilization metric. +func (c *CallMetricRecorder) SetCPUUtilization(val float64) { + c.cpu.Store(val) +} + +// SetMemoryUtilization records a measurement for the memory utilization metric. +func (c *CallMetricRecorder) SetMemoryUtilization(val float64) { + c.memory.Store(val) +} + +// SetRequestCost records a measurement for a request cost metric, +// uniquely identifiable by name. +func (c *CallMetricRecorder) SetRequestCost(name string, val float64) { + c.mu.Lock() + c.requestCost[name] = val + c.mu.Unlock() +} + +// SetUtilization records a measurement for a utilization metric uniquely +// identifiable by name. +func (c *CallMetricRecorder) SetUtilization(name string, val float64) { + c.mu.Lock() + c.utilization[name] = val + c.mu.Unlock() +} + +// toLoadReportProto dumps the recorded measurements as an OrcaLoadReport proto. +func (c *CallMetricRecorder) toLoadReportProto() *v3orcapb.OrcaLoadReport { + c.mu.RLock() + defer c.mu.RUnlock() + + cost := make(map[string]float64, len(c.requestCost)) + for k, v := range c.requestCost { + cost[k] = v + } + util := make(map[string]float64, len(c.utilization)) + for k, v := range c.utilization { + util[k] = v + } + cpu, _ := c.cpu.Load().(float64) + mem, _ := c.memory.Load().(float64) + return &v3orcapb.OrcaLoadReport{ + CpuUtilization: cpu, + MemUtilization: mem, + RequestCost: cost, + Utilization: util, + } +} + +type callMetricRecorderCtxKey struct{} + +// CallMetricRecorderFromContext returns the RPC specific custom metrics +// recorder [CallMetricRecorder] embedded in the provided RPC context. +// +// Returns nil if no custom metrics recorder is found in the provided context, +// which will be the case when custom metrics reporting is not enabled. +func CallMetricRecorderFromContext(ctx context.Context) *CallMetricRecorder { + rw, ok := ctx.Value(callMetricRecorderCtxKey{}).(*recorderWrapper) + if !ok { + return nil + } + return rw.recorder() +} + +func newContextWithRecorderWrapper(ctx context.Context, r *recorderWrapper) context.Context { + return context.WithValue(ctx, callMetricRecorderCtxKey{}, r) +} + +// recorderWrapper is a wrapper around a CallMetricRecorder to ensures that +// concurrent calls to CallMetricRecorderFromContext() results in only one +// allocation of the underlying metric recorder. +type recorderWrapper struct { + once sync.Once + r *CallMetricRecorder +} + +func (rw *recorderWrapper) recorder() *CallMetricRecorder { + rw.once.Do(func() { + rw.r = newCallMetricRecorder() + }) + return rw.r +} diff --git a/orca/call_metric_recorder_test.go b/orca/call_metric_recorder_test.go new file mode 100644 index 000000000000..f18d7259c249 --- /dev/null +++ b/orca/call_metric_recorder_test.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca_test + +import ( + "context" + "errors" + "io" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const defaultTestTimeout = 5 * time.Second + +// TestE2ECallMetricsUnary tests the injection of custom backend metrics from +// the server application for a unary RPC, and verifies that expected load +// reports are received at the client. +func (s) TestE2ECallMetricsUnary(t *testing.T) { + tests := []struct { + desc string + injectMetrics bool + wantProto *v3orcapb.OrcaLoadReport + wantErr error + }{ + { + desc: "with custom backend metrics", + injectMetrics: true, + wantProto: &v3orcapb.OrcaLoadReport{ + CpuUtilization: 1.0, + MemUtilization: 50.0, + RequestCost: map[string]float64{"queryCost": 25.0}, + Utilization: map[string]float64{"queueSize": 75.0}, + }, + }, + { + desc: "with no custom backend metrics", + injectMetrics: false, + wantErr: orca.ErrLoadReportMissing, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // A server option to enables reporting of per-call backend metrics. + callMetricsServerOption := orca.CallMetricsServerOption() + + // An interceptor to injects custom backend metrics, added only when + // the injectMetrics field in the test is set. + injectingInterceptor := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + recorder := orca.CallMetricRecorderFromContext(ctx) + if recorder == nil { + err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") + t.Error(err) + return nil, err + } + recorder.SetCPUUtilization(1.0) + recorder.SetMemoryUtilization(50.0) + // This value will be overwritten by a write to the same metric + // from the server handler. + recorder.SetUtilization("queueSize", 1.0) + return handler(ctx, req) + } + + // A stub server whose unary handler injects custom metrics, if the + // injectMetrics field in the test is set. It overwrites one of the + // values injected above, by the interceptor. + srv := stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if !test.injectMetrics { + return &testpb.Empty{}, nil + } + recorder := orca.CallMetricRecorderFromContext(ctx) + if recorder == nil { + err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") + t.Error(err) + return nil, err + } + recorder.SetRequestCost("queryCost", 25.0) + recorder.SetUtilization("queueSize", 75.0) + return &testpb.Empty{}, nil + }, + } + + // Start the stub server with the appropriate server options. + sopts := []grpc.ServerOption{callMetricsServerOption} + if test.injectMetrics { + sopts = append(sopts, grpc.ChainUnaryInterceptor(injectingInterceptor)) + } + if err := srv.StartServer(sopts...); err != nil { + t.Fatalf("Failed to start server: %v", err) + } + defer srv.Stop() + + // Dial the stub server. + cc, err := grpc.Dial(srv.Address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%s) failed: %v", srv.Address, err) + } + defer cc.Close() + + // Make a unary RPC and expect the trailer metadata to contain the custom + // backend metrics as an ORCA LoadReport protobuf message. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + trailer := metadata.MD{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Trailer(&trailer)); err != nil { + t.Fatalf("EmptyCall failed: %v", err) + } + + gotProto, err := orca.ToLoadReport(trailer) + if test.wantErr != nil && !errors.Is(err, test.wantErr) { + t.Fatalf("When retrieving load report, got error: %v, want: %v", err, orca.ErrLoadReportMissing) + } + if test.wantProto != nil && !cmp.Equal(gotProto, test.wantProto, cmp.Comparer(proto.Equal)) { + t.Fatalf("Received load report in trailer: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(test.wantProto)) + } + }) + } +} + +// TestE2ECallMetricsStreaming tests the injection of custom backend metrics +// from the server application for a streaming RPC, and verifies that expected +// load reports are received at the client. +func (s) TestE2ECallMetricsStreaming(t *testing.T) { + tests := []struct { + desc string + injectMetrics bool + wantProto *v3orcapb.OrcaLoadReport + wantErr error + }{ + { + desc: "with custom backend metrics", + injectMetrics: true, + wantProto: &v3orcapb.OrcaLoadReport{ + CpuUtilization: 1.0, + MemUtilization: 50.0, + RequestCost: map[string]float64{"queryCost": 25.0}, + Utilization: map[string]float64{"queueSize": 75.0}, + }, + }, + { + desc: "with no custom backend metrics", + injectMetrics: false, + wantErr: orca.ErrLoadReportMissing, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // A server option to enables reporting of per-call backend metrics. + callMetricsServerOption := orca.CallMetricsServerOption() + + // An interceptor which injects custom backend metrics, added only + // when the injectMetrics field in the test is set. + injectingInterceptor := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + recorder := orca.CallMetricRecorderFromContext(ss.Context()) + if recorder == nil { + err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") + t.Error(err) + return err + } + recorder.SetCPUUtilization(1.0) + recorder.SetMemoryUtilization(50.0) + // This value will be overwritten by a write to the same metric + // from the server handler. + recorder.SetUtilization("queueSize", 1.0) + return handler(srv, ss) + } + + // A stub server whose streaming handler injects custom metrics, if + // the injectMetrics field in the test is set. It overwrites one of + // the values injected above, by the interceptor. + srv := stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + if test.injectMetrics { + recorder := orca.CallMetricRecorderFromContext(stream.Context()) + if recorder == nil { + err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") + t.Error(err) + return err + } + recorder.SetRequestCost("queryCost", 25.0) + recorder.SetUtilization("queueSize", 75.0) + } + + // Streaming implementation replies with a dummy response until the + // client closes the stream (in which case it will see an io.EOF), + // or an error occurs while reading/writing messages. + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + payload := &testpb.Payload{Body: make([]byte, 32)} + if err := stream.Send(&testpb.StreamingOutputCallResponse{Payload: payload}); err != nil { + return err + } + } + }, + } + + // Start the stub server with the appropriate server options. + sopts := []grpc.ServerOption{callMetricsServerOption} + if test.injectMetrics { + sopts = append(sopts, grpc.ChainStreamInterceptor(injectingInterceptor)) + } + if err := srv.StartServer(sopts...); err != nil { + t.Fatalf("Failed to start server: %v", err) + } + defer srv.Stop() + + // Dial the stub server. + cc, err := grpc.Dial(srv.Address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%s) failed: %v", srv.Address, err) + } + defer cc.Close() + + // Start the full duplex streaming RPC. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + tc := testgrpc.NewTestServiceClient(cc) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("FullDuplexCall failed: %v", err) + } + + // Send one request to the server. + payload := &testpb.Payload{Type: testpb.PayloadType_RANDOM, Body: make([]byte, 32)} + req := &testpb.StreamingOutputCallRequest{Payload: payload} + if err := stream.Send(req); err != nil { + t.Fatalf("stream.Send() failed: %v", err) + } + // Read one reply from the server. + if _, err := stream.Recv(); err != nil { + t.Fatalf("stream.Recv() failed: %v", err) + } + // Close the sending side. + if err := stream.CloseSend(); err != nil { + t.Fatalf("stream.CloseSend() failed: %v", err) + } + // Make sure it is safe to read the trailer. + for { + if _, err := stream.Recv(); err != nil { + break + } + } + + gotProto, err := orca.ToLoadReport(stream.Trailer()) + if test.wantErr != nil && !errors.Is(err, test.wantErr) { + t.Fatalf("When retrieving load report, got error: %v, want: %v", err, orca.ErrLoadReportMissing) + } + if test.wantProto != nil && !cmp.Equal(gotProto, test.wantProto, cmp.Comparer(proto.Equal)) { + t.Fatalf("Received load report in trailer: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(test.wantProto)) + } + }) + } +} diff --git a/orca/internal/internal.go b/orca/internal/internal.go new file mode 100644 index 000000000000..882fd8287a9b --- /dev/null +++ b/orca/internal/internal.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains orca-internal code, for testing purposes and to +// avoid polluting the godoc of the top-level orca package. +package internal + +// AllowAnyMinReportingInterval prevents clamping of the MinReportingInterval +// configured via ServiceOptions, to a minimum of 30s. +// +// For testing purposes only. +var AllowAnyMinReportingInterval interface{} // func(*ServiceOptions) diff --git a/orca/orca.go b/orca/orca.go new file mode 100644 index 000000000000..414f6ed6ef4f --- /dev/null +++ b/orca/orca.go @@ -0,0 +1,164 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package orca implements Open Request Cost Aggregation, which is an open +// standard for request cost aggregation and reporting by backends and the +// corresponding aggregation of such reports by L7 load balancers (such as +// Envoy) on the data plane. In a proxyless world with gRPC enabled +// applications, aggregation of such reports will be done by the gRPC client. +// +// Experimental +// +// Notice: All APIs is this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package orca + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +var ( + logger = grpclog.Component("orca-backend-metrics") + joinServerOptions = internal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) +) + +const trailerMetadataKey = "endpoint-load-metrics-bin" + +// CallMetricsServerOption returns a server option which enables the reporting +// of per-RPC custom backend metrics for unary and streaming RPCs. +// +// Server applications interested in injecting custom backend metrics should +// pass the server option returned from this function as the first argument to +// grpc.NewServer(). +// +// Subsequently, server RPC handlers can retrieve a reference to the RPC +// specific custom metrics recorder [CallMetricRecorder] to be used, via a call +// to CallMetricRecorderFromContext(), and inject custom metrics at any time +// during the RPC lifecycle. +// +// The injected custom metrics will be sent as part of trailer metadata, as a +// binary-encoded [ORCA LoadReport] protobuf message, with the metadata key +// being set be "endpoint-load-metrics-bin". +// +// [ORCA LoadReport]: https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15 +func CallMetricsServerOption() grpc.ServerOption { + return joinServerOptions(grpc.ChainUnaryInterceptor(unaryInt), grpc.ChainStreamInterceptor(streamInt)) +} + +func unaryInt(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricRecorderFromContext(). + rw := &recorderWrapper{} + ctxWithRecorder := newContextWithRecorderWrapper(ctx, rw) + + resp, err := handler(ctxWithRecorder, req) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r == nil { + return resp, err + } + setTrailerMetadata(ctx, rw.r) + return resp, err +} + +func streamInt(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricRecorderFromContext(). + rw := &recorderWrapper{} + ws := &wrappedStream{ + ServerStream: ss, + ctx: newContextWithRecorderWrapper(ss.Context(), rw), + } + + err := handler(srv, ws) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r == nil { + return err + } + setTrailerMetadata(ss.Context(), rw.r) + return err +} + +// setTrailerMetadata adds a trailer metadata entry with key being set to +// `trailerMetadataKey` and value being set to the binary-encoded +// orca.OrcaLoadReport protobuf message. +// +// This function is called from the unary and streaming interceptors defined +// above. Any errors encountered here are not propagated to the caller because +// they are ignored there. Hence we simply log any errors encountered here at +// warning level, and return nothing. +func setTrailerMetadata(ctx context.Context, r *CallMetricRecorder) { + b, err := proto.Marshal(r.toLoadReportProto()) + if err != nil { + logger.Warningf("failed to marshal load report: %v", err) + return + } + if err := grpc.SetTrailer(ctx, metadata.Pairs(trailerMetadataKey, string(b))); err != nil { + logger.Warningf("failed to set trailer metadata: %v", err) + } +} + +// wrappedStream wraps the grpc.ServerStream received by the streaming +// interceptor. Overrides only the Context() method to return a context which +// contains a reference to the CallMetricRecorder corresponding to this stream. +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedStream) Context() context.Context { + return w.ctx +} + +// ErrLoadReportMissing indicates no ORCA load report was found in trailers. +var ErrLoadReportMissing = errors.New("orca load report missing in provided metadata") + +// ToLoadReport unmarshals a binary encoded [ORCA LoadReport] protobuf message +// from md and returns the corresponding struct. The load report is expected to +// be stored as the value for key "endpoint-load-metrics-bin". +// +// If no load report was found in the provided metadata, ErrLoadReportMissing is +// returned. +// +// [ORCA LoadReport]: (https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15) +func ToLoadReport(md metadata.MD) (*v3orcapb.OrcaLoadReport, error) { + vs := md.Get(trailerMetadataKey) + if len(vs) == 0 { + return nil, ErrLoadReportMissing + } + ret := new(v3orcapb.OrcaLoadReport) + if err := proto.Unmarshal([]byte(vs[0]), ret); err != nil { + return nil, fmt.Errorf("failed to unmarshal load report found in metadata: %v", err) + } + return ret, nil +} diff --git a/orca/orca_test.go b/orca/orca_test.go new file mode 100644 index 000000000000..fd356cfba437 --- /dev/null +++ b/orca/orca_test.go @@ -0,0 +1,86 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +func TestToLoadReport(t *testing.T) { + tests := []struct { + name string + md metadata.MD + want *v3orcapb.OrcaLoadReport + wantErr bool + }{ + { + name: "no load report in metadata", + md: metadata.MD{}, + wantErr: true, + }, + { + name: "badly marshaled load report", + md: func() metadata.MD { + return metadata.Pairs("endpoint-load-metrics-bin", string("foo-bar")) + }(), + wantErr: true, + }, + { + name: "good load report", + md: func() metadata.MD { + b, _ := proto.Marshal(&v3orcapb.OrcaLoadReport{ + CpuUtilization: 1.0, + MemUtilization: 50.0, + RequestCost: map[string]float64{"queryCost": 25.0}, + Utilization: map[string]float64{"queueSize": 75.0}, + }) + return metadata.Pairs("endpoint-load-metrics-bin", string(b)) + }(), + want: &v3orcapb.OrcaLoadReport{ + CpuUtilization: 1.0, + MemUtilization: 50.0, + RequestCost: map[string]float64{"queryCost": 25.0}, + Utilization: map[string]float64{"queueSize": 75.0}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := orca.ToLoadReport(test.md) + if (err != nil) != test.wantErr { + t.Fatalf("orca.ToLoadReport(%v) = %v, wantErr: %v", test.md, err, test.wantErr) + } + if test.wantErr { + return + } + if !cmp.Equal(got, test.want, cmp.Comparer(proto.Equal)) { + t.Fatalf("Extracted load report from metadata: %s, want: %s", pretty.ToJSON(got), pretty.ToJSON(test.want)) + } + }) + } +} diff --git a/orca/service.go b/orca/service.go new file mode 100644 index 000000000000..d36b76f2a9b0 --- /dev/null +++ b/orca/service.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" +) + +func init() { + internal.AllowAnyMinReportingInterval = func(so *ServiceOptions) { + so.allowAnyMinReportingInterval = true + } +} + +// minReportingInterval is the absolute minimum value supported for +// out-of-band metrics reporting from the ORCA service implementation +// provided by the orca package. +const minReportingInterval = 30 * time.Second + +// Service provides an implementation of the OpenRcaService as defined in the +// [ORCA] service protos. Instances of this type must be created via calls to +// Register() or NewService(). +// +// Server applications can use the SetXxx() and DeleteXxx() methods to record +// measurements corresponding to backend metrics, which eventually get pushed to +// clients who have initiated the SteamCoreMetrics streaming RPC. +// +// [ORCA]: https://github.com/cncf/xds/blob/main/xds/service/orca/v3/orca.proto +type Service struct { + v3orcaservicegrpc.UnimplementedOpenRcaServiceServer + + // Minimum reporting interval, as configured by the user, or the default. + minReportingInterval time.Duration + + // mu guards the custom metrics injected by the server application. + mu sync.RWMutex + cpu float64 + memory float64 + utilization map[string]float64 +} + +// ServiceOptions contains options to configure the ORCA service implementation. +type ServiceOptions struct { + // MinReportingInterval sets the lower bound for how often out-of-band + // metrics are reported on the streaming RPC initiated by the client. If + // unspecified, negative or less than the default value of 30s, the default + // is used. Clients may request a higher value as part of the + // StreamCoreMetrics streaming RPC. + MinReportingInterval time.Duration + + // Allow a minReportingInterval which is less than the default of 30s. + // Used for testing purposes only. + allowAnyMinReportingInterval bool +} + +// NewService creates a new ORCA service implementation configured using the +// provided options. +func NewService(opts ServiceOptions) (*Service, error) { + // The default minimum supported reporting interval value can be overridden + // for testing purposes through the orca internal package. + if !opts.allowAnyMinReportingInterval { + if opts.MinReportingInterval < 0 || opts.MinReportingInterval < minReportingInterval { + opts.MinReportingInterval = minReportingInterval + } + } + service := &Service{ + minReportingInterval: opts.MinReportingInterval, + utilization: make(map[string]float64), + } + return service, nil +} + +// Register creates a new ORCA service implementation configured using the +// provided options and registers the same on the provided service registrar. +func Register(s *grpc.Server, opts ServiceOptions) (*Service, error) { + service, err := NewService(opts) + if err != nil { + return nil, err + } + v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, service) + return service, nil +} + +// determineReportingInterval determines the reporting interval for out-of-band +// metrics. If the reporting interval is not specified in the request, or is +// negative or is less than the configured minimum (via +// ServiceOptions.MinReportingInterval), the latter is used. Else the value from +// the incoming request is used. +func (s *Service) determineReportingInterval(req *v3orcaservicegrpc.OrcaLoadReportRequest) time.Duration { + if req.GetReportInterval() == nil { + return s.minReportingInterval + } + dur := req.GetReportInterval().AsDuration() + if dur < s.minReportingInterval { + logger.Warningf("Received reporting interval %q is less than configured minimum: %v. Using default: %s", dur, s.minReportingInterval) + return s.minReportingInterval + } + return dur +} + +func (s *Service) sendMetricsResponse(stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + return stream.Send(s.toLoadReportProto()) +} + +// StreamCoreMetrics streams custom backend metrics injected by the server +// application. +func (s *Service) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, stream v3orcaservicepb.OpenRcaService_StreamCoreMetricsServer) error { + ticker := time.NewTicker(s.determineReportingInterval(req)) + defer ticker.Stop() + + for { + if err := s.sendMetricsResponse(stream); err != nil { + return err + } + // Send a response containing the currently recorded metrics + select { + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + case <-ticker.C: + } + } +} + +// SetCPUUtilization records a measurement for the CPU utilization metric. +func (s *Service) SetCPUUtilization(val float64) { + s.mu.Lock() + s.cpu = val + s.mu.Unlock() +} + +// SetMemoryUtilization records a measurement for the memory utilization metric. +func (s *Service) SetMemoryUtilization(val float64) { + s.mu.Lock() + s.memory = val + s.mu.Unlock() +} + +// SetUtilization records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *Service) SetUtilization(name string, val float64) { + s.mu.Lock() + s.utilization[name] = val + s.mu.Unlock() +} + +// DeleteUtilization deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *Service) DeleteUtilization(name string) { + s.mu.Lock() + delete(s.utilization, name) + s.mu.Unlock() +} + +// toLoadReportProto dumps the recorded measurements as an OrcaLoadReport proto. +func (s *Service) toLoadReportProto() *v3orcapb.OrcaLoadReport { + s.mu.RLock() + defer s.mu.RUnlock() + + util := make(map[string]float64, len(s.utilization)) + for k, v := range s.utilization { + util[k] = v + } + return &v3orcapb.OrcaLoadReport{ + CpuUtilization: s.cpu, + MemUtilization: s.memory, + Utilization: util, + } +} diff --git a/orca/service_test.go b/orca/service_test.go new file mode 100644 index 000000000000..b9eff4365786 --- /dev/null +++ b/orca/service_test.go @@ -0,0 +1,193 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/orca/internal" + "google.golang.org/protobuf/types/known/durationpb" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +const requestsMetricKey = "test-service-requests" + +// An implementation of grpc_testing.TestService for the purpose of this test. +// We cannot use the StubServer approach here because we need to register the +// OpenRCAService as well on the same gRPC server. +type testServiceImpl struct { + mu sync.Mutex + requests int64 + + testgrpc.TestServiceServer + orcaSrv *orca.Service +} + +func (t *testServiceImpl) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + t.mu.Lock() + t.requests++ + t.mu.Unlock() + + t.orcaSrv.SetUtilization(requestsMetricKey, float64(t.requests)) + t.orcaSrv.SetCPUUtilization(50.0) + t.orcaSrv.SetMemoryUtilization(99.0) + return &testpb.SimpleResponse{}, nil +} + +func (t *testServiceImpl) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { + t.orcaSrv.DeleteUtilization(requestsMetricKey) + t.orcaSrv.SetCPUUtilization(0) + t.orcaSrv.SetMemoryUtilization(0) + return &testpb.Empty{}, nil +} + +// Test_E2E_CustomBackendMetrics_OutOfBand tests the injection of out-of-band +// custom backend metrics from the server application, and verifies that +// expected load reports are received at the client. +// +// TODO: Change this test to use the client API, when ready, to read the +// out-of-band metrics pushed by the server. +func (s) Test_E2E_CustomBackendMetrics_OutOfBand(t *testing.T) { + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + // Override the min reporting interval in the internal package. + const shortReportingInterval = 100 * time.Millisecond + opts := orca.ServiceOptions{MinReportingInterval: shortReportingInterval} + internal.AllowAnyMinReportingInterval.(func(*orca.ServiceOptions))(&opts) + + // Register the OpenRCAService with a very short metrics reporting interval. + s := grpc.NewServer() + orcaSrv, err := orca.Register(s, opts) + if err != nil { + t.Fatalf("orca.EnableOutOfBandMetricsReportingForTesting() failed: %v", err) + } + + // Register the test service implementation on the same grpc server, and start serving. + testpb.RegisterTestServiceServer(s, &testServiceImpl{orcaSrv: orcaSrv}) + go s.Serve(lis) + defer s.Stop() + t.Logf("Started gRPC server at %s...", lis.Addr().String()) + + // Dial the test server. + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%s) failed: %v", lis.Addr().String(), err) + } + defer cc.Close() + + // Spawn a goroutine which sends 100 unary RPCs to the test server. This + // will trigger the injection of custom backend metrics from the + // testServiceImpl. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testStub := testgrpc.NewTestServiceClient(cc) + errCh := make(chan error, 1) + go func() { + for i := 0; i < 100; i++ { + if _, err := testStub.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + errCh <- fmt.Errorf("UnaryCall failed: %v", err) + return + } + time.Sleep(10 * time.Millisecond) + } + errCh <- nil + }() + + // Start the server streaming RPC to receive custom backend metrics. + oobStub := v3orcaservicegrpc.NewOpenRcaServiceClient(cc) + stream, err := oobStub.StreamCoreMetrics(ctx, &v3orcaservicepb.OrcaLoadReportRequest{ReportInterval: durationpb.New(shortReportingInterval)}) + if err != nil { + t.Fatalf("Failed to create a stream for out-of-band metrics") + } + + // Wait for the server to push metrics which indicate the completion of all + // the unary RPCs made from the above goroutine. + for { + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for out-of-band custom backend metrics to match expected values") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + default: + } + + wantProto := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 50.0, + MemUtilization: 99.0, + Utilization: map[string]float64{requestsMetricKey: 100.0}, + } + gotProto, err := stream.Recv() + if err != nil { + t.Fatalf("Recv() failed: %v", err) + } + if !cmp.Equal(gotProto, wantProto, cmp.Comparer(proto.Equal)) { + t.Logf("Received load report from stream: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(wantProto)) + continue + } + // This means that we received the metrics which we expected. + break + } + + // The EmptyCall RPC is expected to delete earlier injected metrics. + if _, err := testStub.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall failed: %v", err) + } + // Wait for the server to push empty metrics which indicate the processing + // of the above EmptyCall RPC. + for { + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for out-of-band custom backend metrics to match expected values") + default: + } + + wantProto := &v3orcapb.OrcaLoadReport{} + gotProto, err := stream.Recv() + if err != nil { + t.Fatalf("Recv() failed: %v", err) + } + if !cmp.Equal(gotProto, wantProto, cmp.Comparer(proto.Equal)) { + t.Logf("Received load report from stream: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(wantProto)) + continue + } + // This means that we received the metrics which we expected. + break + } +} diff --git a/server.go b/server.go index 2ad9da7bfccf..6ef3df67d9e5 100644 --- a/server.go +++ b/server.go @@ -79,6 +79,7 @@ func init() { internal.ClearExtraServerOptions = func() { extraServerOptions = nil } + internal.JoinServerOptions = newJoinServerOption } var statusOK = status.New(codes.OK, "") @@ -214,6 +215,22 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + // WriteBufferSize determines how much data can be batched before doing a write on the wire. // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. // The default value for this buffer is 32KB. From 99ae81bf6f19441d9b33e5902488e4844660f237 Mon Sep 17 00:00:00 2001 From: ethanvc Date: Fri, 2 Sep 2022 14:19:31 +0800 Subject: [PATCH 601/998] roundrobin: optimization of the roundrobin implementation. (#5607) * optimization of the roundrobin implementation. --- balancer/roundrobin/roundrobin.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/balancer/roundrobin/roundrobin.go b/balancer/roundrobin/roundrobin.go index 274eb2f85802..f7031ad2251b 100644 --- a/balancer/roundrobin/roundrobin.go +++ b/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } From 60a3a7e969c401ca16dbcd0108ad544fb35aa61c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 2 Sep 2022 14:09:10 -0700 Subject: [PATCH 602/998] cleanup: fixes for issues surfaced by vet (#5617) --- balancer/rls/balancer_test.go | 4 +--- credentials/alts/internal/conn/record_test.go | 4 ++-- internal/testutils/balancer.go | 5 +---- internal/testutils/roundrobin/roundrobin.go | 2 +- xds/internal/balancer/clusterimpl/balancer_test.go | 7 ------- xds/internal/balancer/priority/balancer_test.go | 10 ---------- 6 files changed, 5 insertions(+), 27 deletions(-) diff --git a/balancer/rls/balancer_test.go b/balancer/rls/balancer_test.go index 7df7f4335205..6ac57188bff4 100644 --- a/balancer/rls/balancer_test.go +++ b/balancer/rls/balancer_test.go @@ -886,9 +886,7 @@ func (w *wrappingTopLevelBalancer) getStates() []balancer.State { defer w.mu.Unlock() states := make([]balancer.State, len(w.states)) - for i, s := range w.states { - states[i] = s - } + copy(states, w.states) return states } diff --git a/credentials/alts/internal/conn/record_test.go b/credentials/alts/internal/conn/record_test.go index c18f902b4012..0b4177a581ed 100644 --- a/credentials/alts/internal/conn/record_test.go +++ b/credentials/alts/internal/conn/record_test.go @@ -220,7 +220,7 @@ func testFrameTooLarge(t *testing.T, rp string) { payload := make([]byte, payloadLen) c, err := clientConn.crypto.Encrypt(nil, payload) if err != nil { - t.Fatalf(fmt.Sprintf("Error encrypting message: %v", err)) + t.Fatalf("Error encrypting message: %v", err) } msgLen := msgTypeFieldSize + len(c) framedMsg := make([]byte, MsgLenFieldSize+msgLen) @@ -229,7 +229,7 @@ func testFrameTooLarge(t *testing.T, rp string) { binary.LittleEndian.PutUint32(msg[:msgTypeFieldSize], altsRecordMsgType) copy(msg[msgTypeFieldSize:], c) if _, err = buf.Write(framedMsg); err != nil { - t.Fatal(fmt.Sprintf("Unexpected error writing to buffer: %v", err)) + t.Fatalf("Unexpected error writing to buffer: %v", err) } b := make([]byte, 1) if n, err := serverConn.Read(b); n != 0 || err == nil { diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index f23b215a7b35..5983d75bafc2 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -198,10 +198,7 @@ func (tcc *TestClientConn) WaitForPickerWithErr(ctx context.Context, want error) case <-ctx.Done(): return fmt.Errorf("timeout when waiting for an error picker with %v; last picker error: %v", want, lastErr) case picker := <-tcc.NewPickerCh: - for i := 0; i < 5; i++ { - if _, lastErr = picker.Pick(balancer.PickInfo{}); lastErr == nil || lastErr.Error() != want.Error() { - break - } + if _, lastErr = picker.Pick(balancer.PickInfo{}); lastErr != nil && lastErr.Error() == want.Error() { return nil } } diff --git a/internal/testutils/roundrobin/roundrobin.go b/internal/testutils/roundrobin/roundrobin.go index 8c4b4e4bd06e..8f9e79b5056b 100644 --- a/internal/testutils/roundrobin/roundrobin.go +++ b/internal/testutils/roundrobin/roundrobin.go @@ -127,7 +127,7 @@ func CheckRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, } return nil } - return fmt.Errorf("Timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) + return fmt.Errorf("timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) } // CheckWeightedRoundRobinRPCs verifies that EmptyCall RPCs on the given diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index a0085872a416..1d531c1a52dc 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -76,13 +76,6 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} - func init() { NewRandomWRR = testutils.NewTestWRR } diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index d3329378cd73..d119076d1af9 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -70,16 +70,6 @@ func init() { balancer.Register(&anotherRR{Builder: balancer.Get(roundrobin.Name)}) } -func subConnFromPicker(t *testing.T, p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, err := p.Pick(balancer.PickInfo{}) - if err != nil { - t.Fatalf("unexpected error from picker.Pick: %v", err) - } - return scst.SubConn - } -} - // When a high priority is ready, adding/removing lower locality doesn't cause // changes. // From 182e9df160798063192858af72aec7148ccd5077 Mon Sep 17 00:00:00 2001 From: RedHawker <39493925+RedHawker@users.noreply.github.com> Date: Tue, 6 Sep 2022 15:35:40 -0400 Subject: [PATCH 603/998] Grab comment from proto file, similar to protoc-gen-go (#5540) --- .../grpc_lb_v1/load_balancer_grpc.pb.go | 19 ++++++++++++++++ channelz/grpc_channelz_v1/channelz_grpc.pb.go | 21 ++++++++++++++++++ cmd/protoc-gen-go-grpc/grpc.go | 22 +++++++++++++++++++ .../proto/grpc_gcp/handshaker_grpc.pb.go | 17 ++++++++++++++ examples/features/proto/echo/echo_grpc.pb.go | 17 ++++++++++++++ .../helloworld/helloworld_grpc.pb.go | 14 ++++++++++++ .../routeguide/route_guide_grpc.pb.go | 14 ++++++++++++ health/grpc_health_v1/health_grpc.pb.go | 17 ++++++++++++++ internal/proto/grpc_lookup_v1/rls_grpc.pb.go | 14 ++++++++++++ .../grpc_testing/benchmark_service_grpc.pb.go | 17 ++++++++++++++ .../report_qps_scenario_service_grpc.pb.go | 17 ++++++++++++++ interop/grpc_testing/test_grpc.pb.go | 17 ++++++++++++++ .../grpc_testing/worker_service_grpc.pb.go | 17 ++++++++++++++ profiling/proto/service_grpc.pb.go | 14 ++++++++++++ .../reflection_grpc.pb.go | 16 ++++++++++++++ reflection/grpc_testing/test_grpc.pb.go | 14 ++++++++++++ stress/grpc_testing/metrics_grpc.pb.go | 21 ++++++++++++++++++ test/grpc_testing/test_grpc.pb.go | 17 ++++++++++++++ 18 files changed, 305 insertions(+) diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index cb4b3c203c51..cf1034830d58 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,3 +1,22 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines the GRPCLB LoadBalancing protocol. +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/channelz/grpc_channelz_v1/channelz_grpc.pb.go b/channelz/grpc_channelz_v1/channelz_grpc.pb.go index d8803d011d89..958e51870cd7 100644 --- a/channelz/grpc_channelz_v1/channelz_grpc.pb.go +++ b/channelz/grpc_channelz_v1/channelz_grpc.pb.go @@ -1,3 +1,24 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines an interface for exporting monitoring information +// out of gRPC servers. See the full design at +// https://github.com/grpc/proposal/blob/master/A14-channelz.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/channelz/v1/channelz.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/cmd/protoc-gen-go-grpc/grpc.go b/cmd/protoc-gen-go-grpc/grpc.go index a21a97ac1be4..a997e29f9f1a 100644 --- a/cmd/protoc-gen-go-grpc/grpc.go +++ b/cmd/protoc-gen-go-grpc/grpc.go @@ -24,6 +24,7 @@ import ( "strings" "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" ) @@ -104,6 +105,12 @@ func (serviceGenerateHelper) formatHandlerFuncName(service *protogen.Service, hn var helper serviceGenerateHelperInterface = serviceGenerateHelper{} +// FileDescriptorProto.package field number +const fileDescriptorProtoPackageFieldNumber = 2 + +// FileDescriptorProto.syntax field number +const fileDescriptorProtoSyntaxFieldNumber = 12 + // generateFile generates a _grpc.pb.go file containing gRPC service definitions. func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { if len(file.Services) == 0 { @@ -111,6 +118,8 @@ func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.Generated } filename := file.GeneratedFilenamePrefix + "_grpc.pb.go" g := gen.NewGeneratedFile(filename, file.GoImportPath) + // Attach all comments associated with the syntax field. + genLeadingComments(g, file.Desc.SourceLocations().ByPath(protoreflect.SourcePath{fileDescriptorProtoSyntaxFieldNumber})) g.P("// Code generated by protoc-gen-go-grpc. DO NOT EDIT.") g.P("// versions:") g.P("// - protoc-gen-go-grpc v", version) @@ -121,6 +130,8 @@ func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.Generated g.P("// source: ", file.Desc.Path()) } g.P() + // Attach all comments associated with the package field. + genLeadingComments(g, file.Desc.SourceLocations().ByPath(protoreflect.SourcePath{fileDescriptorProtoPackageFieldNumber})) g.P("package ", file.GoPackageName) g.P() generateFileContent(gen, file, g) @@ -493,6 +504,17 @@ func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene return hname } +func genLeadingComments(g *protogen.GeneratedFile, loc protoreflect.SourceLocation) { + for _, s := range loc.LeadingDetachedComments { + g.P(protogen.Comments(s)) + g.P() + } + if s := loc.LeadingComments; s != "" { + g.P(protogen.Comments(s)) + g.P() + } +} + const deprecationComment = "// Deprecated: Do not use." func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index fd55176b9b69..d3562c6d5e62 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/examples/features/proto/echo/echo_grpc.pb.go b/examples/features/proto/echo/echo_grpc.pb.go index cf1ffe708118..7d0db54f8886 100644 --- a/examples/features/proto/echo/echo_grpc.pb.go +++ b/examples/features/proto/echo/echo_grpc.pb.go @@ -1,3 +1,20 @@ +// +// +// Copyright 2018 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/examples/helloworld/helloworld/helloworld_grpc.pb.go b/examples/helloworld/helloworld/helloworld_grpc.pb.go index b1423484d51b..0a878d460582 100644 --- a/examples/helloworld/helloworld/helloworld_grpc.pb.go +++ b/examples/helloworld/helloworld/helloworld_grpc.pb.go @@ -1,3 +1,17 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/examples/route_guide/routeguide/route_guide_grpc.pb.go b/examples/route_guide/routeguide/route_guide_grpc.pb.go index 32f7910a33dc..056a44f7c898 100644 --- a/examples/route_guide/routeguide/route_guide_grpc.pb.go +++ b/examples/route_guide/routeguide/route_guide_grpc.pb.go @@ -1,3 +1,17 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/health/grpc_health_v1/health_grpc.pb.go b/health/grpc_health_v1/health_grpc.pb.go index 69f525d1baeb..a332dfd7b54e 100644 --- a/health/grpc_health_v1/health_grpc.pb.go +++ b/health/grpc_health_v1/health_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index 3afbf8930f9d..076b966f3446 100644 --- a/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -1,3 +1,17 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/interop/grpc_testing/benchmark_service_grpc.pb.go b/interop/grpc_testing/benchmark_service_grpc.pb.go index 0e6f21e90286..7aaa44fd3faf 100644 --- a/interop/grpc_testing/benchmark_service_grpc.pb.go +++ b/interop/grpc_testing/benchmark_service_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go index f327326b0cae..1c40d481b9ed 100644 --- a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/interop/grpc_testing/test_grpc.pb.go b/interop/grpc_testing/test_grpc.pb.go index 9a39fdd95a9a..64e802bb9c6f 100644 --- a/interop/grpc_testing/test_grpc.pb.go +++ b/interop/grpc_testing/test_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015-2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/interop/grpc_testing/worker_service_grpc.pb.go b/interop/grpc_testing/worker_service_grpc.pb.go index 0c4e10a4072c..1a33f7310f9d 100644 --- a/interop/grpc_testing/worker_service_grpc.pb.go +++ b/interop/grpc_testing/worker_service_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/profiling/proto/service_grpc.pb.go b/profiling/proto/service_grpc.pb.go index 218c6123c15f..83ce1336faf5 100644 --- a/profiling/proto/service_grpc.pb.go +++ b/profiling/proto/service_grpc.pb.go @@ -1,3 +1,17 @@ +// Copyright 2019 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 4e6a6b1a857b..b8e76a87dca5 100644 --- a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,3 +1,19 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/reflection/grpc_testing/test_grpc.pb.go b/reflection/grpc_testing/test_grpc.pb.go index c9a461280088..7df4c7057d96 100644 --- a/reflection/grpc_testing/test_grpc.pb.go +++ b/reflection/grpc_testing/test_grpc.pb.go @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/stress/grpc_testing/metrics_grpc.pb.go b/stress/grpc_testing/metrics_grpc.pb.go index 29fa088882de..014c6d512611 100644 --- a/stress/grpc_testing/metrics_grpc.pb.go +++ b/stress/grpc_testing/metrics_grpc.pb.go @@ -1,3 +1,24 @@ +// Copyright 2015-2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Contains the definitions for a metrics service and the type of metrics +// exposed by the service. +// +// Currently, 'Gauge' (i.e a metric that represents the measured value of +// something at an instant of time) is the only metric type supported by the +// service. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/test/grpc_testing/test_grpc.pb.go b/test/grpc_testing/test_grpc.pb.go index b328eaa3b582..51e0938f62fd 100644 --- a/test/grpc_testing/test_grpc.pb.go +++ b/test/grpc_testing/test_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 From f7d2036712ca0e5f26eea358ab806e10081b2a6e Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 6 Sep 2022 16:30:08 -0400 Subject: [PATCH 604/998] xds: add Outlier Detection Balancer (#5435) * xds: add Outlier Detection Balancer --- internal/grpcrand/grpcrand.go | 7 + test/xds/xds_client_outlier_detection_test.go | 219 ++++ .../balancer/outlierdetection/balancer.go | 797 +++++++++++- .../outlierdetection/balancer_test.go | 1067 ++++++++++++++++- .../balancer/outlierdetection/callcounter.go | 66 + .../outlierdetection/callcounter_test.go | 94 ++ .../e2e_test/outlierdetection_test.go | 369 ++++++ .../balancer/outlierdetection/logging.go | 34 + .../outlierdetection/subconn_wrapper.go | 68 ++ 9 files changed, 2711 insertions(+), 10 deletions(-) create mode 100644 test/xds/xds_client_outlier_detection_test.go create mode 100644 xds/internal/balancer/outlierdetection/callcounter.go create mode 100644 xds/internal/balancer/outlierdetection/callcounter_test.go create mode 100644 xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go create mode 100644 xds/internal/balancer/outlierdetection/logging.go create mode 100644 xds/internal/balancer/outlierdetection/subconn_wrapper.go diff --git a/internal/grpcrand/grpcrand.go b/internal/grpcrand/grpcrand.go index 740f83c2b766..517ea70642a1 100644 --- a/internal/grpcrand/grpcrand.go +++ b/internal/grpcrand/grpcrand.go @@ -52,6 +52,13 @@ func Intn(n int) int { return r.Intn(n) } +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() diff --git a/test/xds/xds_client_outlier_detection_test.go b/test/xds/xds_client_outlier_detection_test.go new file mode 100644 index 000000000000..b53439cf66ce --- /dev/null +++ b/test/xds/xds_client_outlier_detection_test.go @@ -0,0 +1,219 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + "time" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils/xds/e2e" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// TestOutlierDetection_NoopConfig tests the scenario where the Outlier +// Detection feature is enabled on the gRPC client, but it receives no Outlier +// Detection configuration from the management server. This should result in a +// no-op Outlier Detection configuration being used to configure the Outlier +// Detection balancer. This test verifies that an RPC is able to proceed +// normally with this configuration. +func (s) TestOutlierDetection_NoopConfig(t *testing.T) { + oldOD := envconfig.XDSOutlierDetection + envconfig.XDSOutlierDetection = true + internal.RegisterOutlierDetectionBalancerForTesting() + defer func() { + envconfig.XDSOutlierDetection = oldOD + internal.UnregisterOutlierDetectionBalancerForTesting() + }() + + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + defer cleanup1() + + port, cleanup2 := startTestService(t, nil) + defer cleanup2() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} + +// clientResourcesMultipleBackendsAndOD returns xDS resources which correspond +// to multiple upstreams, corresponding different backends listening on +// different localhost:port combinations. The resources also configure an +// Outlier Detection Balancer set up with Failure Percentage Algorithm, which +// ejects endpoints based on failure rate. +func clientResourcesMultipleBackendsAndOD(params e2e.ResourceParams, ports []uint32) e2e.UpdateOptions { + routeConfigName := "route-" + params.DialTarget + clusterName := "cluster-" + params.DialTarget + endpointsName := "endpoints-" + params.DialTarget + return e2e.UpdateOptions{ + NodeID: params.NodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(params.DialTarget, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(routeConfigName, params.DialTarget, clusterName)}, + Clusters: []*v3clusterpb.Cluster{clusterWithOutlierDetection(clusterName, endpointsName, params.SecLevel)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(endpointsName, params.Host, ports)}, + } +} + +func clusterWithOutlierDetection(clusterName, edsServiceName string, secLevel e2e.SecurityLevel) *v3clusterpb.Cluster { + cluster := e2e.DefaultCluster(clusterName, edsServiceName, secLevel) + cluster.OutlierDetection = &v3clusterpb.OutlierDetection{ + Interval: &durationpb.Duration{Nanos: 50000000}, // .5 seconds + BaseEjectionTime: &durationpb.Duration{Seconds: 30}, + MaxEjectionTime: &durationpb.Duration{Seconds: 300}, + MaxEjectionPercent: &wrapperspb.UInt32Value{Value: 1}, + FailurePercentageThreshold: &wrapperspb.UInt32Value{Value: 50}, + EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 100}, + FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 1}, + FailurePercentageMinimumHosts: &wrapperspb.UInt32Value{Value: 1}, + } + return cluster +} + +// TestOutlierDetectionWithOutlier tests the Outlier Detection Balancer e2e. It +// spins up three backends, one which consistently errors, and configures the +// ClientConn using xDS to connect to all three of those backends. The Outlier +// Detection Balancer should eject the connection to the backend which +// constantly errors, and thus RPC's should mainly go to backend 1 and 2. +func (s) TestOutlierDetectionWithOutlier(t *testing.T) { + oldOD := envconfig.XDSOutlierDetection + envconfig.XDSOutlierDetection = true + internal.RegisterOutlierDetectionBalancerForTesting() + defer func() { + envconfig.XDSOutlierDetection = oldOD + internal.UnregisterOutlierDetectionBalancerForTesting() + }() + + managementServer, nodeID, _, resolver, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Counters for how many times backends got called. + var count1, count2, count3 int + + // Working backend 1. + port1, cleanup1 := startTestService(t, &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + count1++ + return &testpb.Empty{}, nil + }, + Address: "localhost:0", + }) + defer cleanup1() + + // Working backend 2. + port2, cleanup2 := startTestService(t, &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + count2++ + return &testpb.Empty{}, nil + }, + Address: "localhost:0", + }) + defer cleanup2() + + // Backend 3 that will always return an error and eventually ejected. + port3, cleanup3 := startTestService(t, &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + count3++ + return nil, errors.New("some error") + }, + Address: "localhost:0", + }) + defer cleanup3() + + const serviceName = "my-service-client-side-xds" + resources := clientResourcesMultipleBackendsAndOD(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + SecLevel: e2e.SecurityLevelNone, + }, []uint32{port1, port2, port3}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + for i := 0; i < 2000; i++ { + // Can either error or not depending on the backend called. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil && !strings.Contains(err.Error(), "some error") { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + time.Sleep(time.Millisecond) + } + + // Backend 1 should've gotten more than 1/3rd of the load as backend 3 + // should get ejected, leaving only 1 and 2. + if count1 < 700 { + t.Fatalf("backend 1 should've gotten more than 1/3rd of the load") + } + // Backend 2 should've gotten more than 1/3rd of the load as backend 3 + // should get ejected, leaving only 1 and 2. + if count2 < 700 { + t.Fatalf("backend 2 should've gotten more than 1/3rd of the load") + } + // Backend 3 should've gotten less than 1/3rd of the load since it gets + // ejected. + if count3 > 650 { + t.Fatalf("backend 1 should've gotten more than 1/3rd of the load") + } +} diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 8729461383e9..8e54a4a10d5e 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -25,13 +25,31 @@ import ( "encoding/json" "errors" "fmt" + "math" + "sync" + "sync/atomic" + "time" + "unsafe" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +// Globals to stub out in tests. +var ( + afterFunc = time.AfterFunc + now = time.Now +) + // Name is the name of the outlier detection balancer. const Name = "outlier_detection_experimental" @@ -51,7 +69,20 @@ func init() { type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { - return nil + b := &outlierDetectionBalancer{ + cc: cc, + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + addrs: make(map[string]*addressInfo), + scWrappers: make(map[balancer.SubConn]*subConnWrapper), + scUpdateCh: buffer.NewUnbounded(), + pickerUpdateCh: buffer.NewUnbounded(), + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + b.child = gracefulswitch.NewBalancer(b, bOpts) + go b.run() + return b } func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { @@ -82,6 +113,7 @@ func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, err return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.base_ejection_time = %s; must be >= 0", lbCfg.BaseEjectionTime) case lbCfg.MaxEjectionTime < 0: return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_time = %s; must be >= 0", lbCfg.MaxEjectionTime) + // "The fields max_ejection_percent, // success_rate_ejection.enforcement_percentage, // failure_percentage_ejection.threshold, and @@ -105,3 +137,766 @@ func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, err func (bb) Name() string { return Name } + +// scUpdate wraps a subConn update to be sent to the child balancer. +type scUpdate struct { + scw *subConnWrapper + state balancer.SubConnState +} + +type ejectionUpdate struct { + scw *subConnWrapper + isEjected bool // true for ejected, false for unejected +} + +type lbCfgUpdate struct { + lbCfg *LBConfig + // to make sure picker is updated synchronously. + done chan struct{} +} + +type outlierDetectionBalancer struct { + // These fields are safe to be accessed without holding any mutex because + // they are synchronized in run(), which makes these field accesses happen + // serially. + // + // childState is the latest balancer state received from the child. + childState balancer.State + // recentPickerNoop represents whether the most recent picker sent upward to + // the balancer.ClientConn is a noop picker, which doesn't count RPC's. Used + // to suppress redundant picker updates. + recentPickerNoop bool + + closed *grpcsync.Event + done *grpcsync.Event + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + // childMu guards calls into child (to uphold the balancer.Balancer API + // guarantee of synchronous calls). + childMu sync.Mutex + child *gracefulswitch.Balancer + + // mu guards access to the following fields. It also helps to synchronize + // behaviors of the following events: config updates, firing of the interval + // timer, SubConn State updates, SubConn address updates, and child state + // updates. + // + // For example, when we receive a config update in the middle of the + // interval timer algorithm, which uses knobs present in the config, the + // balancer will wait for the interval timer algorithm to finish before + // persisting the new configuration. + // + // Another example would be the updating of the addrs map, such as from a + // SubConn address update in the middle of the interval timer algorithm + // which uses addrs. This balancer waits for the interval timer algorithm to + // finish before making the update to the addrs map. + // + // This mutex is never held at the same time as childMu (within the context + // of a single goroutine). + mu sync.Mutex + addrs map[string]*addressInfo + cfg *LBConfig + scWrappers map[balancer.SubConn]*subConnWrapper + timerStartTime time.Time + intervalTimer *time.Timer + inhibitPickerUpdates bool + updateUnconditionally bool + numAddrsEjected int // For fast calculations of percentage of addrs ejected + + scUpdateCh *buffer.Unbounded + pickerUpdateCh *buffer.Unbounded +} + +// noopConfig returns whether this balancer is configured with a logical no-op +// configuration or not. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) noopConfig() bool { + return b.cfg.SuccessRateEjection == nil && b.cfg.FailurePercentageEjection == nil +} + +// onIntervalConfig handles logic required specifically on the receipt of a +// configuration which specifies to count RPC's and periodically perform passive +// health checking based on heuristics defined in configuration every configured +// interval. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onIntervalConfig() { + var interval time.Duration + if b.timerStartTime.IsZero() { + b.timerStartTime = time.Now() + for _, addrInfo := range b.addrs { + addrInfo.callCounter.clear() + } + interval = b.cfg.Interval + } else { + interval = b.cfg.Interval - now().Sub(b.timerStartTime) + if interval < 0 { + interval = 0 + } + } + b.intervalTimer = afterFunc(interval, b.intervalTimerAlgorithm) +} + +// onNoopConfig handles logic required specifically on the receipt of a +// configuration which specifies the balancer to be a noop. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onNoopConfig() { + // "If a config is provided with both the `success_rate_ejection` and + // `failure_percentage_ejection` fields unset, skip starting the timer and + // do the following:" + // "Unset the timer start timestamp." + b.timerStartTime = time.Time{} + for _, addrInfo := range b.addrs { + // "Uneject all currently ejected addresses." + if !addrInfo.latestEjectionTimestamp.IsZero() { + b.unejectAddress(addrInfo) + } + // "Reset each address's ejection time multiplier to 0." + addrInfo.ejectionTimeMultiplier = 0 + } +} + +func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + // Reject whole config if child policy doesn't exist, don't persist it for + // later. + bb := balancer.Get(lbCfg.ChildPolicy.Name) + if bb == nil { + return fmt.Errorf("outlier detection: child balancer %q not registered", lbCfg.ChildPolicy.Name) + } + + // It is safe to read b.cfg here without holding the mutex, as the only + // write to b.cfg happens later in this function. This function is part of + // the balancer.Balancer API, so it is guaranteed to be called in a + // synchronous manner, so it cannot race with this read. + if b.cfg == nil || b.cfg.ChildPolicy.Name != lbCfg.ChildPolicy.Name { + b.childMu.Lock() + err := b.child.SwitchTo(bb) + if err != nil { + b.childMu.Unlock() + return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) + } + b.childMu.Unlock() + } + + b.mu.Lock() + // Inhibit child picker updates until this UpdateClientConnState() call + // completes. If needed, a picker update containing the no-op config bit + // determined from this config and most recent state from the child will be + // sent synchronously upward at the end of this UpdateClientConnState() + // call. + b.inhibitPickerUpdates = true + b.updateUnconditionally = false + b.cfg = lbCfg + + addrs := make(map[string]bool, len(s.ResolverState.Addresses)) + for _, addr := range s.ResolverState.Addresses { + addrs[addr.Addr] = true + if _, ok := b.addrs[addr.Addr]; !ok { + b.addrs[addr.Addr] = newAddressInfo() + } + } + for addr := range b.addrs { + if !addrs[addr] { + delete(b.addrs, addr) + } + } + + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + + if b.noopConfig() { + b.onNoopConfig() + } else { + b.onIntervalConfig() + } + b.mu.Unlock() + + b.childMu.Lock() + err := b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: b.cfg.ChildPolicy.Config, + }) + b.childMu.Unlock() + + done := make(chan struct{}) + b.pickerUpdateCh.Put(lbCfgUpdate{ + lbCfg: lbCfg, + done: done, + }) + <-done + + return err +} + +func (b *outlierDetectionBalancer) ResolverError(err error) { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ResolverError(err) +} + +func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + scw, ok := b.scWrappers[sc] + if !ok { + // Shouldn't happen if passed down a SubConnWrapper to child on SubConn + // creation. + b.logger.Errorf("UpdateSubConnState called with SubConn that has no corresponding SubConnWrapper") + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(b.scWrappers, scw.SubConn) + } + b.scUpdateCh.Put(&scUpdate{ + scw: scw, + state: state, + }) +} + +func (b *outlierDetectionBalancer) Close() { + b.closed.Fire() + <-b.done.Done() + b.childMu.Lock() + b.child.Close() + b.childMu.Unlock() + + b.mu.Lock() + defer b.mu.Unlock() + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } +} + +func (b *outlierDetectionBalancer) ExitIdle() { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ExitIdle() +} + +// wrappedPicker delegates to the child policy's picker, and when the request +// finishes, it increments the corresponding counter in the map entry referenced +// by the subConnWrapper that was picked. If both the `success_rate_ejection` +// and `failure_percentage_ejection` fields are unset in the configuration, this +// picker will not count. +type wrappedPicker struct { + childPicker balancer.Picker + noopPicker bool +} + +func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + pr, err := wp.childPicker.Pick(info) + if err != nil { + return balancer.PickResult{}, err + } + + done := func(di balancer.DoneInfo) { + if !wp.noopPicker { + incrementCounter(pr.SubConn, di) + } + if pr.Done != nil { + pr.Done(di) + } + } + scw, ok := pr.SubConn.(*subConnWrapper) + if !ok { + // This can never happen, but check is present for defensive + // programming. + logger.Errorf("Picked SubConn from child picker is not a SubConnWrapper") + return balancer.PickResult{ + SubConn: pr.SubConn, + Done: done, + }, nil + } + return balancer.PickResult{ + SubConn: scw.SubConn, + Done: done, + }, nil +} + +func incrementCounter(sc balancer.SubConn, info balancer.DoneInfo) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Shouldn't happen, as comes from child + return + } + + // scw.addressInfo and callCounter.activeBucket can be written to + // concurrently (the pointers themselves). Thus, protect the reads here with + // atomics to prevent data corruption. There exists a race in which you read + // the addressInfo or active bucket pointer and then that pointer points to + // deprecated memory. If this goroutine yields the processor, in between + // reading the addressInfo pointer and writing to the active bucket, + // UpdateAddresses can switch the addressInfo the scw points to. Writing to + // an outdated addresses is a very small race and tolerable. After reading + // callCounter.activeBucket in this picker a swap call can concurrently + // change what activeBucket points to. A50 says to swap the pointer, which + // will cause this race to write to deprecated memory the interval timer + // algorithm will never read, which makes this race alright. + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + ab := (*bucket)(atomic.LoadPointer(&addrInfo.callCounter.activeBucket)) + + if info.Err == nil { + atomic.AddUint32(&ab.numSuccesses, 1) + } else { + atomic.AddUint32(&ab.numFailures, 1) + } +} + +func (b *outlierDetectionBalancer) UpdateState(s balancer.State) { + b.pickerUpdateCh.Put(s) +} + +func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + sc, err := b.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + scw := &subConnWrapper{ + SubConn: sc, + addresses: addrs, + scUpdateCh: b.scUpdateCh, + } + b.mu.Lock() + defer b.mu.Unlock() + b.scWrappers[sc] = scw + if len(addrs) != 1 { + return scw, nil + } + addrInfo, ok := b.addrs[addrs[0].Addr] + if !ok { + return scw, nil + } + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + if !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + return scw, nil +} + +func (b *outlierDetectionBalancer) RemoveSubConn(sc balancer.SubConn) { + scw, ok := sc.(*subConnWrapper) + if !ok { // Shouldn't happen + return + } + // Remove the wrapped SubConn from the parent Client Conn. We don't remove + // from map entry until we get a Shutdown state for the SubConn, as we need + // that data to forward that state down. + b.cc.RemoveSubConn(scw.SubConn) +} + +// appendIfPresent appends the scw to the address, if the address is present in +// the Outlier Detection balancers address map. Returns nil if not present, and +// the map entry if present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) appendIfPresent(addr string, scw *subConnWrapper) *addressInfo { + addrInfo, ok := b.addrs[addr] + if !ok { + return nil + } + + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + return addrInfo +} + +// removeSubConnFromAddressesMapEntry removes the scw from its map entry if +// present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) removeSubConnFromAddressesMapEntry(scw *subConnWrapper) { + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + for i, sw := range addrInfo.sws { + if scw == sw { + addrInfo.sws = append(addrInfo.sws[:i], addrInfo.sws[i+1:]...) + return + } + } +} + +func (b *outlierDetectionBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Return, shouldn't happen if passed up scw + return + } + + b.cc.UpdateAddresses(scw.SubConn, addrs) + b.mu.Lock() + defer b.mu.Unlock() + + // Note that 0 addresses is a valid update/state for a SubConn to be in. + // This is correctly handled by this algorithm (handled as part of a non singular + // old address/new address). + switch { + case len(scw.addresses) == 1 && len(addrs) == 1: // single address to single address + // If the updated address is the same, then there is nothing to do + // past this point. + if scw.addresses[0].Addr == addrs[0].Addr { + return + } + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo == nil { // uneject unconditionally because could have come from an ejected address + scw.uneject() + break + } + if addrInfo.latestEjectionTimestamp.IsZero() { // relay new updated subconn state + scw.uneject() + } else { + scw.eject() + } + case len(scw.addresses) == 1: // single address to multiple/no addresses + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo != nil { + addrInfo.callCounter.clear() + } + scw.uneject() + case len(addrs) == 1: // multiple/no addresses to single address + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo != nil && !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + } // otherwise multiple/no addresses to multiple/no addresses; ignore + + scw.addresses = addrs +} + +func (b *outlierDetectionBalancer) ResolveNow(opts resolver.ResolveNowOptions) { + b.cc.ResolveNow(opts) +} + +func (b *outlierDetectionBalancer) Target() string { + return b.cc.Target() +} + +func max(x, y int64) int64 { + if x < y { + return y + } + return x +} + +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +// handleSubConnUpdate stores the recent state and forward the update +// if the SubConn is not ejected. +func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { + scw := u.scw + scw.latestState = u.state + if !scw.ejected { + b.childMu.Lock() + b.child.UpdateSubConnState(scw, u.state) + b.childMu.Unlock() + } +} + +// handleEjectedUpdate handles any SubConns that get ejected/unejected, and +// forwards the appropriate corresponding subConnState to the child policy. +func (b *outlierDetectionBalancer) handleEjectedUpdate(u *ejectionUpdate) { + scw := u.scw + scw.ejected = u.isEjected + // If scw.latestState has never been written to will default to connectivity + // IDLE, which is fine. + stateToUpdate := scw.latestState + if u.isEjected { + stateToUpdate = balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + } + } + b.childMu.Lock() + b.child.UpdateSubConnState(scw, stateToUpdate) + b.childMu.Unlock() +} + +// handleChildStateUpdate forwards the picker update wrapped in a wrapped picker +// with the noop picker bit present. +func (b *outlierDetectionBalancer) handleChildStateUpdate(u balancer.State) { + b.childState = u + b.mu.Lock() + if b.inhibitPickerUpdates { + // If a child's state is updated during the suppression of child + // updates, the synchronous handleLBConfigUpdate function with respect + // to UpdateClientConnState should return a picker unconditionally. + b.updateUnconditionally = true + b.mu.Unlock() + return + } + noopCfg := b.noopConfig() + b.mu.Unlock() + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) +} + +// handleLBConfigUpdate compares whether the new config is a noop config or not, +// to the noop bit in the picker if present. It updates the picker if this bit +// changed compared to the picker currently in use. +func (b *outlierDetectionBalancer) handleLBConfigUpdate(u lbCfgUpdate) { + lbCfg := u.lbCfg + noopCfg := lbCfg.SuccessRateEjection == nil && lbCfg.FailurePercentageEjection == nil + // If the child has sent it's first update and this config flips the noop + // bit compared to the most recent picker update sent upward, then a new + // picker with this updated bit needs to be forwarded upward. If a child + // update was received during the suppression of child updates within + // UpdateClientConnState(), then a new picker needs to be forwarded with + // this updated state, irregardless of whether this new configuration flips + // the bit. + if b.childState.Picker != nil && noopCfg != b.recentPickerNoop || b.updateUnconditionally { + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) + } + b.inhibitPickerUpdates = false + b.updateUnconditionally = false + close(u.done) +} + +func (b *outlierDetectionBalancer) run() { + defer b.done.Fire() + for { + select { + case update := <-b.scUpdateCh.Get(): + b.scUpdateCh.Load() + if b.closed.HasFired() { // don't send SubConn updates to child after the balancer has been closed + return + } + switch u := update.(type) { + case *scUpdate: + b.handleSubConnUpdate(u) + case *ejectionUpdate: + b.handleEjectedUpdate(u) + } + case update := <-b.pickerUpdateCh.Get(): + b.pickerUpdateCh.Load() + if b.closed.HasFired() { // don't send picker updates to grpc after the balancer has been closed + return + } + switch u := update.(type) { + case balancer.State: + b.handleChildStateUpdate(u) + case lbCfgUpdate: + b.handleLBConfigUpdate(u) + } + case <-b.closed.Done(): + return + } + } +} + +// intervalTimerAlgorithm ejects and unejects addresses based on the Outlier +// Detection configuration and data about each address from the previous +// interval. +func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { + b.mu.Lock() + defer b.mu.Unlock() + b.timerStartTime = time.Now() + + for _, addrInfo := range b.addrs { + addrInfo.callCounter.swap() + } + + if b.cfg.SuccessRateEjection != nil { + b.successRateAlgorithm() + } + + if b.cfg.FailurePercentageEjection != nil { + b.failurePercentageAlgorithm() + } + + for _, addrInfo := range b.addrs { + if addrInfo.latestEjectionTimestamp.IsZero() && addrInfo.ejectionTimeMultiplier > 0 { + addrInfo.ejectionTimeMultiplier-- + continue + } + if addrInfo.latestEjectionTimestamp.IsZero() { + // Address is already not ejected, so no need to check for whether + // to uneject the address below. + continue + } + et := b.cfg.BaseEjectionTime.Nanoseconds() * addrInfo.ejectionTimeMultiplier + met := max(b.cfg.BaseEjectionTime.Nanoseconds(), b.cfg.MaxEjectionTime.Nanoseconds()) + curTimeAfterEt := now().After(addrInfo.latestEjectionTimestamp.Add(time.Duration(min(et, met)))) + if curTimeAfterEt { + b.unejectAddress(addrInfo) + } + } + + // This conditional only for testing (since the interval timer algorithm is + // called manually), will never hit in production. + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + b.intervalTimer = afterFunc(b.cfg.Interval, b.intervalTimerAlgorithm) +} + +// addrsWithAtLeastRequestVolume returns a slice of address information of all +// addresses with at least request volume passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) addrsWithAtLeastRequestVolume(requestVolume uint32) []*addressInfo { + var addrs []*addressInfo + for _, addrInfo := range b.addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + if rv >= requestVolume { + addrs = append(addrs, addrInfo) + } + } + return addrs +} + +// meanAndStdDev returns the mean and std dev of the fractions of successful +// requests of the addresses passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) meanAndStdDev(addrs []*addressInfo) (float64, float64) { + var totalFractionOfSuccessfulRequests float64 + var mean float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + totalFractionOfSuccessfulRequests += float64(bucket.numSuccesses) / float64(rv) + } + mean = totalFractionOfSuccessfulRequests / float64(len(addrs)) + var sumOfSquares float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + devFromMean := (float64(bucket.numSuccesses) / float64(rv)) - mean + sumOfSquares += devFromMean * devFromMean + } + variance := sumOfSquares / float64(len(addrs)) + return mean, math.Sqrt(variance) +} + +// successRateAlgorithm ejects any addresses where the success rate falls below +// the other addresses according to mean and standard deviation, and if overall +// applicable from other set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) successRateAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.SuccessRateEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.SuccessRateEjection.MinimumHosts) { + return + } + mean, stddev := b.meanAndStdDev(addrsToConsider) + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.SuccessRateEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + successRate := float64(bucket.numSuccesses) / float64(bucket.numSuccesses+bucket.numFailures) + if successRate < (mean - stddev*(float64(ejectionCfg.StdevFactor)/1000)) { + if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// failurePercentageAlgorithm ejects any addresses where the failure percentage +// rate exceeds a set enforcement percentage, if overall applicable from other +// set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.FailurePercentageEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.FailurePercentageEjection.MinimumHosts) { + return + } + + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.FailurePercentageEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 + if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { + if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) ejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected++ + addrInfo.latestEjectionTimestamp = b.timerStartTime + addrInfo.ejectionTimeMultiplier++ + for _, sbw := range addrInfo.sws { + sbw.eject() + } +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected-- + addrInfo.latestEjectionTimestamp = time.Time{} + for _, sbw := range addrInfo.sws { + sbw.uneject() + } +} + +// addressInfo contains the runtime information about an address that pertains +// to Outlier Detection. This struct and all of its fields is protected by +// outlierDetectionBalancer.mu in the case where it is accessed through the +// address map. In the case of Picker callbacks, the writes to the activeBucket +// of callCounter are protected by atomically loading and storing +// unsafe.Pointers (see further explanation in incrementCounter()). +type addressInfo struct { + // The call result counter object. + callCounter *callCounter + + // The latest ejection timestamp, or zero if the address is currently not + // ejected. + latestEjectionTimestamp time.Time + + // The current ejection time multiplier, starting at 0. + ejectionTimeMultiplier int64 + + // A list of subchannel wrapper objects that correspond to this address. + sws []*subConnWrapper +} + +func newAddressInfo() *addressInfo { + return &addressInfo{ + callCounter: newCallCounter(), + } +} diff --git a/xds/internal/balancer/outlierdetection/balancer_test.go b/xds/internal/balancer/outlierdetection/balancer_test.go index 106e2b64dbc2..151684ac7fd4 100644 --- a/xds/internal/balancer/outlierdetection/balancer_test.go +++ b/xds/internal/balancer/outlierdetection/balancer_test.go @@ -19,20 +19,36 @@ package outlierdetection import ( + "context" "encoding/json" "errors" + "fmt" + "math" "strings" + "sync" "testing" "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" ) +var ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + type s struct { grpctest.Tester } @@ -44,6 +60,13 @@ func Test(t *testing.T) { // TestParseConfig verifies the ParseConfig() method in the Outlier Detection // Balancer. func (s) TestParseConfig(t *testing.T) { + const errParseConfigName = "errParseConfigBalancer" + stub.Register(errParseConfigName, stub.BalancerFuncs{ + ParseConfig: func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return nil, errors.New("some error") + }, + }) + parser := bb{} tests := []struct { @@ -65,7 +88,7 @@ func (s) TestParseConfig(t *testing.T) { ] }`, wantCfg: &LBConfig{ - Interval: 1<<63 - 1, + Interval: math.MaxInt64, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: "xds_cluster_impl_experimental", Config: &clusterimpl.LBConfig{ @@ -270,20 +293,1046 @@ func (lbc *LBConfig) Equal(lbc2 *LBConfig) bool { return cmp.Equal(lbc.ChildPolicy, lbc2.ChildPolicy) } -func init() { - balancer.Register(errParseConfigBuilder{}) +type subConnWithState struct { + sc balancer.SubConn + state balancer.SubConnState +} + +func setup(t *testing.T) (*outlierDetectionBalancer, *testutils.TestClientConn, func()) { + t.Helper() + internal.RegisterOutlierDetectionBalancerForTesting() + builder := balancer.Get(Name) + if builder == nil { + t.Fatalf("balancer.Get(%q) returned nil", Name) + } + tcc := testutils.NewTestClientConn(t) + odB := builder.Build(tcc, balancer.BuildOptions{}) + return odB.(*outlierDetectionBalancer), tcc, func() { + odB.Close() + internal.UnregisterOutlierDetectionBalancerForTesting() + } +} + +type emptyChildConfig struct { + serviceconfig.LoadBalancingConfig } -type errParseConfigBuilder struct{} +// TestChildBasicOperations tests basic operations of the Outlier Detection +// Balancer and it's interaction with it's child. The following scenarios are +// tested, in a step by step fashion: +// 1. The Outlier Detection Balancer receives it's first good configuration. The +// balancer is expected to create a child and sent the child it's configuration. +// 2. The Outlier Detection Balancer receives new configuration that specifies a +// child's type, and the new type immediately reports READY inline. The first +// child balancer should be closed and the second child balancer should receive +// a config update. +// 3. The Outlier Detection Balancer is closed. The second child balancer should +// be closed. +func (s) TestChildBasicOperations(t *testing.T) { + bc := emptyChildConfig{} + + ccsCh := testutils.NewChannel() + closeCh := testutils.NewChannel() + + stub.Register(t.Name()+"child1", stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + ccsCh.Send(ccs.BalancerConfig) + return nil + }, + Close: func(bd *stub.BalancerData) { + closeCh.Send(nil) + }, + }) + + stub.Register(t.Name()+"child2", stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + // UpdateState inline to READY to complete graceful switch process + // synchronously from any UpdateClientConnState call. + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &testutils.TestConstPicker{}, + }) + ccsCh.Send(nil) + return nil + }, + Close: func(bd *stub.BalancerData) { + closeCh.Send(nil) + }, + }) + + od, tcc, _ := setup(t) + defer internal.UnregisterOutlierDetectionBalancerForTesting() + + // This first config update should cause a child to be built and forwarded + // it's first update. + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name() + "child1", + Config: bc, + }, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cr, err := ccsCh.Receive(ctx) + if err != nil { + t.Fatalf("timed out waiting for UpdateClientConnState on the first child balancer: %v", err) + } + if _, ok := cr.(emptyChildConfig); !ok { + t.Fatalf("Received child policy config of type %T, want %T", cr, emptyChildConfig{}) + } + + // This Update Client Conn State call should cause the first child balancer + // to close, and a new child to be created and also forwarded it's first + // config update. + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name() + "child2", + Config: emptyChildConfig{}, + }, + }, + }) -func (errParseConfigBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + // Verify inline UpdateState() call from the new child eventually makes it's + // way to the Test Client Conn. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Ready { + t.Fatalf("ClientConn received connectivity state %v, want %v", state, connectivity.Ready) + } + } + + // Verify the first child balancer closed. + if _, err = closeCh.Receive(ctx); err != nil { + t.Fatalf("timed out waiting for the first child balancer to be closed: %v", err) + } + // Verify the second child balancer received it's first config update. + if _, err = ccsCh.Receive(ctx); err != nil { + t.Fatalf("timed out waiting for UpdateClientConnState on the second child balancer: %v", err) + } + // Closing the Outlier Detection Balancer should close the newly created + // child. + od.Close() + if _, err = closeCh.Receive(ctx); err != nil { + t.Fatalf("timed out waiting for the second child balancer to be closed: %v", err) + } +} + +// TestUpdateAddresses tests the functionality of UpdateAddresses and any +// changes in the addresses/plurality of those addresses for a SubConn. The +// Balancer is set up with two upstreams, with one of the upstreams being +// ejected. Initially, there is one SubConn for each address. The following +// scenarios are tested, in a step by step fashion: +// 1. The SubConn not currently ejected switches addresses to the address that +// is ejected. This should cause the SubConn to get ejected. +// 2. Update this same SubConn to multiple addresses. This should cause the +// SubConn to get unejected, as it is no longer being tracked by Outlier +// Detection at that point. +// 3. Update this same SubConn to different addresses, still multiple. This +// should be a noop, as the SubConn is still no longer being tracked by Outlier +// Detection. +// 4. Update this same SubConn to the a single address which is ejected. This +// should cause the SubConn to be ejected. +func (s) TestUpdateAddresses(t *testing.T) { + scsCh := testutils.NewChannel() + var scw1, scw2 balancer.SubConn + var err error + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + scw1, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw2, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address2"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw1, scw2}, + }, + }) + return nil + }, + UpdateSubConnState: func(_ *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + scsCh.Send(subConnWithState{ + sc: sc, + state: state, + }) + }}) + + od, tcc, cleanup := setup(t) + defer cleanup() + + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 50, + EnforcementPercentage: 100, + MinimumHosts: 2, + RequestVolume: 3, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Setup the system to where one address is ejected and one address + // isn't. + select { + case <-ctx.Done(): + t.Fatal("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + // Simulate 5 successful RPC calls on the first SubConn (the first call + // to picker.Pick). + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + pi, err = picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + // Simulate 5 failed RPC calls on the second SubConn (the second call to + // picker.Pick). Thus, when the interval timer algorithm is run, the + // second SubConn's address should be ejected, which will allow us to + // further test UpdateAddresses() logic. + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{Err: errors.New("some error")}) + } + od.intervalTimerAlgorithm() + // verify UpdateSubConnState() got called with TRANSIENT_FAILURE for + // child with address that was ejected. + gotSCWS, err := scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw2, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + } + + // Update scw1 to another address that is currently ejected. This should + // cause scw1 to get ejected. + od.UpdateAddresses(scw1, []resolver.Address{{Addr: "address2"}}) + + // Verify that update addresses gets forwarded to ClientConn. + select { + case <-ctx.Done(): + t.Fatal("timeout while waiting for a UpdateState call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + // Verify scw1 got ejected (UpdateSubConnState called with TRANSIENT + // FAILURE). + gotSCWS, err := scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw1, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + + // Update scw1 to multiple addresses. This should cause scw1 to get + // unejected, as is it no longer being tracked for Outlier Detection. + od.UpdateAddresses(scw1, []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + }) + // Verify scw1 got unejected (UpdateSubConnState called with recent state). + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw1, + state: balancer.SubConnState{ConnectivityState: connectivity.Idle}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + + // Update scw1 to a different multiple addresses list. A change of addresses + // in which the plurality goes from multiple to multiple should be a no-op, + // as the address continues to be ignored by outlier detection. + od.UpdateAddresses(scw1, []resolver.Address{ + {Addr: "address2"}, + {Addr: "address3"}, + }) + // Verify no downstream effects. + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("no SubConn update should have been sent (no SubConn got ejected/unejected)") + } + + // Update scw1 back to a single address, which is ejected. This should cause + // the SubConn to be re-ejected. + od.UpdateAddresses(scw1, []resolver.Address{{Addr: "address2"}}) + // Verify scw1 got ejected (UpdateSubConnState called with TRANSIENT FAILURE). + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw1, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } +} + +func scwsEqual(gotSCWS subConnWithState, wantSCWS subConnWithState) error { + if !cmp.Equal(gotSCWS, wantSCWS, cmp.AllowUnexported(subConnWithState{}, testutils.TestSubConn{}, subConnWrapper{}, addressInfo{}), cmpopts.IgnoreFields(subConnWrapper{}, "scUpdateCh")) { + return fmt.Errorf("received SubConnState: %+v, want %+v", gotSCWS, wantSCWS) + } return nil } -func (errParseConfigBuilder) Name() string { - return "errParseConfigBalancer" +type rrPicker struct { + scs []balancer.SubConn + next int +} + +func (rrp *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + sc := rrp.scs[rrp.next] + rrp.next = (rrp.next + 1) % len(rrp.scs) + return balancer.PickResult{SubConn: sc}, nil +} + +// TestDurationOfInterval tests the configured interval timer. +// The following scenarios are tested: +// 1. The Outlier Detection Balancer receives it's first config. The balancer +// should configure the timer with whatever is directly specified on the config. +// 2. The Outlier Detection Balancer receives a subsequent config. The balancer +// should configure with whatever interval is configured minus the difference +// between the current time and the previous start timestamp. +// 3. The Outlier Detection Balancer receives a no-op configuration. The +// balancer should not configure a timer at all. +func (s) TestDurationOfInterval(t *testing.T) { + stub.Register(t.Name(), stub.BalancerFuncs{}) + + od, _, cleanup := setup(t) + defer func(af func(d time.Duration, f func()) *time.Timer) { + cleanup() + afterFunc = af + }(afterFunc) + + durationChan := testutils.NewChannel() + afterFunc = func(dur time.Duration, _ func()) *time.Timer { + durationChan.Send(dur) + return time.NewTimer(math.MaxInt64) + } + + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + Interval: 8 * time.Second, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1900, + EnforcementPercentage: 100, + MinimumHosts: 5, + RequestVolume: 100, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + d, err := durationChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving duration from afterFunc() call: %v", err) + } + dur := d.(time.Duration) + // The configured duration should be 8 seconds - what the balancer was + // configured with. + if dur != 8*time.Second { + t.Fatalf("configured duration should have been 8 seconds to start timer") + } + + // Override time.Now to time.Now() + 5 seconds. This will represent 5 + // seconds already passing for the next check in UpdateClientConnState. + defer func(n func() time.Time) { + now = n + }(now) + now = func() time.Time { + return time.Now().Add(time.Second * 5) + } + + // UpdateClientConnState with an interval of 9 seconds. Due to 5 seconds + // already passing (from overridden time.Now function), this should start an + // interval timer of ~4 seconds. + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + Interval: 9 * time.Second, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1900, + EnforcementPercentage: 100, + MinimumHosts: 5, + RequestVolume: 100, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + d, err = durationChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving duration from afterFunc() call: %v", err) + } + dur = d.(time.Duration) + if dur.Seconds() < 3.5 || 4.5 < dur.Seconds() { + t.Fatalf("configured duration should have been around 4 seconds to start timer") + } + + // UpdateClientConnState with a no-op config. This shouldn't configure the + // interval timer at all due to it being a no-op. + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + Interval: 10 * time.Second, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // No timer should have been started. + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err = durationChan.Receive(sCtx); err == nil { + t.Fatal("No timer should have started.") + } +} + +// TestEjectUnejectSuccessRate tests the functionality of the interval timer +// algorithm when configured with SuccessRateEjection. The Outlier Detection +// Balancer will be set up with 3 SubConns, each with a different address. +// It tests the following scenarios, in a step by step fashion: +// 1. The three addresses each have 5 successes. The interval timer algorithm should +// not eject any of the addresses. +// 2. Two of the addresses have 5 successes, the third has five failures. The +// interval timer algorithm should eject the third address with five failures. +// 3. The interval timer algorithm is run at a later time past max ejection +// time. The interval timer algorithm should uneject the third address. +func (s) TestEjectUnejectSuccessRate(t *testing.T) { + scsCh := testutils.NewChannel() + var scw1, scw2, scw3 balancer.SubConn + var err error + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + scw1, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw2, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address2"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw3, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address3"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw1, scw2, scw3}, + }, + }) + return nil + }, + UpdateSubConnState: func(_ *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + scsCh.Send(subConnWithState{ + sc: sc, + state: state, + }) + }, + }) + + od, tcc, cleanup := setup(t) + defer func() { + cleanup() + }() + + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + {Addr: "address3"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, // so the interval will never run unless called manually in test. + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 50, + EnforcementPercentage: 100, + MinimumHosts: 3, + RequestVolume: 3, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Set each of the three upstream addresses to have five successes each. + // This should cause none of the addresses to be ejected as none of them + // are outliers according to the success rate algorithm. + for i := 0; i < 3; i++ { + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + } + + od.intervalTimerAlgorithm() + + // verify no UpdateSubConnState() call on the child, as no addresses got + // ejected (ejected address will cause an UpdateSubConnState call). + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("no SubConn update should have been sent (no SubConn got ejected)") + } + + // Since no addresses are ejected, a SubConn update should forward down + // to the child. + od.UpdateSubConnState(scw1.(*subConnWrapper).SubConn, balancer.SubConnState{ + ConnectivityState: connectivity.Connecting, + }) + + gotSCWS, err := scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw1, + state: balancer.SubConnState{ConnectivityState: connectivity.Connecting}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + + // Set two of the upstream addresses to have five successes each, and + // one of the upstream addresses to have five failures. This should + // cause the address which has five failures to be ejected according to + // the SuccessRateAlgorithm. + for i := 0; i < 2; i++ { + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + } + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{Err: errors.New("some error")}) + } + + // should eject address that always errored. + od.intervalTimerAlgorithm() + // Due to the address being ejected, the SubConn with that address + // should be ejected, meaning a TRANSIENT_FAILURE connectivity state + // gets reported to the child. + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw3, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + // Only one address should be ejected. + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("Only one SubConn update should have been sent (only one SubConn got ejected)") + } + + // Now that an address is ejected, SubConn updates for SubConns using + // that address should not be forwarded downward. These SubConn updates + // will be cached to update the child sometime in the future when the + // address gets unejected. + od.UpdateSubConnState(pi.SubConn, balancer.SubConnState{ + ConnectivityState: connectivity.Connecting, + }) + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("SubConn update should not have been forwarded (the SubConn is ejected)") + } + + // Override now to cause the interval timer algorithm to always uneject + // the ejected address. This will always uneject the ejected address + // because this time is set way past the max ejection time set in the + // configuration, which will make the next interval timer algorithm run + // uneject any ejected addresses. + defer func(n func() time.Time) { + now = n + }(now) + now = func() time.Time { + return time.Now().Add(time.Second * 1000) + } + od.intervalTimerAlgorithm() + + // unejected SubConn should report latest persisted state - which is + // connecting from earlier. + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw3, + state: balancer.SubConnState{ConnectivityState: connectivity.Connecting}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + } +} + +// TestEjectFailureRate tests the functionality of the interval timer algorithm +// when configured with FailurePercentageEjection, and also the functionality of +// noop configuration. The Outlier Detection Balancer will be set up with 3 +// SubConns, each with a different address. It tests the following scenarios, in +// a step by step fashion: +// 1. The three addresses each have 5 successes. The interval timer algorithm +// should not eject any of the addresses. +// 2. Two of the addresses have 5 successes, the third has five failures. The +// interval timer algorithm should eject the third address with five failures. +// 3. The Outlier Detection Balancer receives a subsequent noop config update. +// The balancer should uneject all ejected addresses. +func (s) TestEjectFailureRate(t *testing.T) { + scsCh := testutils.NewChannel() + var scw1, scw2, scw3 balancer.SubConn + var err error + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + if scw1 != nil { // UpdateClientConnState was already called, no need to recreate SubConns. + return nil + } + scw1, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw2, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address2"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw3, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address3"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + return nil + }, + UpdateSubConnState: func(_ *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + scsCh.Send(subConnWithState{ + sc: sc, + state: state, + }) + }, + }) + + od, tcc, cleanup := setup(t) + defer func() { + cleanup() + }() + + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + {Addr: "address3"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, // so the interval will never run unless called manually in test. + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 500, + EnforcementPercentage: 100, + MinimumHosts: 3, + RequestVolume: 3, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + od.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw1, scw2, scw3}, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Set each upstream address to have five successes each. This should + // cause none of the addresses to be ejected as none of them are below + // the failure percentage threshold. + for i := 0; i < 3; i++ { + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + } + + od.intervalTimerAlgorithm() + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("no SubConn update should have been sent (no SubConn got ejected)") + } + + // Set two upstream addresses to have five successes each, and one + // upstream address to have five failures. This should cause the address + // with five failures to be ejected according to the Failure Percentage + // Algorithm. + for i := 0; i < 2; i++ { + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + } + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{Err: errors.New("some error")}) + } + + // should eject address that always errored. + od.intervalTimerAlgorithm() + + // verify UpdateSubConnState() got called with TRANSIENT_FAILURE for + // child in address that was ejected. + gotSCWS, err := scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw3, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + + // verify only one address got ejected. + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("Only one SubConn update should have been sent (only one SubConn got ejected)") + } + + // upon the Outlier Detection balancer being reconfigured with a noop + // configuration, every ejected SubConn should be unejected. + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + {Addr: "address3"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw3, + state: balancer.SubConnState{ConnectivityState: connectivity.Idle}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + } } -func (errParseConfigBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - return nil, errors.New("some error") +// TestConcurrentOperations calls different operations on the balancer in +// separate goroutines to test for any race conditions and deadlocks. It also +// uses a child balancer which verifies that no operations on the child get +// called after the child balancer is closed. +func (s) TestConcurrentOperations(t *testing.T) { + closed := grpcsync.NewEvent() + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(*stub.BalancerData, balancer.ClientConnState) error { + if closed.HasFired() { + t.Error("UpdateClientConnState was called after Close(), which breaks the balancer API") + } + return nil + }, + ResolverError: func(*stub.BalancerData, error) { + if closed.HasFired() { + t.Error("ResolverError was called after Close(), which breaks the balancer API") + } + }, + UpdateSubConnState: func(*stub.BalancerData, balancer.SubConn, balancer.SubConnState) { + if closed.HasFired() { + t.Error("UpdateSubConnState was called after Close(), which breaks the balancer API") + } + }, + Close: func(*stub.BalancerData) { + closed.Fire() + }, + ExitIdle: func(*stub.BalancerData) { + if closed.HasFired() { + t.Error("ExitIdle was called after Close(), which breaks the balancer API") + } + }, + }) + + od, tcc, cleanup := setup(t) + defer func() { + cleanup() + }() + + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + {Addr: "address3"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, // so the interval will never run unless called manually in test. + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateEjection: &SuccessRateEjection{ // Have both Success Rate and Failure Percentage to step through all the interval timer code + StdevFactor: 500, + EnforcementPercentage: 100, + MinimumHosts: 3, + RequestVolume: 3, + }, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 50, + EnforcementPercentage: 100, + MinimumHosts: 3, + RequestVolume: 3, + }, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + scw1, err := od.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error in od.NewSubConn call: %v", err) + } + if err != nil { + t.Fatalf("error in od.NewSubConn call: %v", err) + } + + scw2, err := od.NewSubConn([]resolver.Address{{Addr: "address2"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error in od.NewSubConn call: %v", err) + } + + scw3, err := od.NewSubConn([]resolver.Address{{Addr: "address3"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error in od.NewSubConn call: %v", err) + } + + od.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw2, scw3}, + }, + }) + + var picker balancer.Picker + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker = <-tcc.NewPickerCh: + } + + finished := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-finished: + return + default: + } + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + continue + } + pi.Done(balancer.DoneInfo{}) + pi.Done(balancer.DoneInfo{Err: errors.New("some error")}) + time.Sleep(1 * time.Nanosecond) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-finished: + return + default: + } + od.intervalTimerAlgorithm() + } + }() + + // call Outlier Detection's balancer.ClientConn operations asynchronously. + // balancer.ClientConn operations have no guarantee from the API to be + // called synchronously. + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-finished: + return + default: + } + od.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw2, scw3}, + }, + }) + time.Sleep(1 * time.Nanosecond) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + od.NewSubConn([]resolver.Address{{Addr: "address4"}}, balancer.NewSubConnOptions{}) + }() + + wg.Add(1) + go func() { + defer wg.Done() + od.RemoveSubConn(scw1) + }() + + wg.Add(1) + go func() { + defer wg.Done() + od.UpdateAddresses(scw2, []resolver.Address{{Addr: "address3"}}) + }() + + // Call balancer.Balancers synchronously in this goroutine, upholding the + // balancer.Balancer API guarantee of synchronous calls. + od.UpdateClientConnState(balancer.ClientConnState{ // This will delete addresses and flip to no op + ResolverState: resolver.State{ + Addresses: []resolver.Address{{Addr: "address1"}}, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + // Call balancer.Balancers synchronously in this goroutine, upholding the + // balancer.Balancer API guarantee. + od.UpdateSubConnState(scw1.(*subConnWrapper).SubConn, balancer.SubConnState{ + ConnectivityState: connectivity.Connecting, + }) + od.ResolverError(errors.New("some error")) + od.ExitIdle() + od.Close() + close(finished) + wg.Wait() } diff --git a/xds/internal/balancer/outlierdetection/callcounter.go b/xds/internal/balancer/outlierdetection/callcounter.go new file mode 100644 index 000000000000..4597f727b6e0 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/callcounter.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "sync/atomic" + "unsafe" +) + +type bucket struct { + numSuccesses uint32 + numFailures uint32 +} + +func newCallCounter() *callCounter { + return &callCounter{ + activeBucket: unsafe.Pointer(&bucket{}), + inactiveBucket: &bucket{}, + } +} + +// callCounter has two buckets, which each count successful and failing RPC's. +// The activeBucket is used to actively count any finished RPC's, and the +// inactiveBucket is populated with this activeBucket's data every interval for +// use by the Outlier Detection algorithm. +type callCounter struct { + // activeBucket updates every time a call finishes (from picker passed to + // Client Conn), so protect pointer read with atomic load of unsafe.Pointer + // so picker does not have to grab a mutex per RPC, the critical path. + activeBucket unsafe.Pointer // bucket + inactiveBucket *bucket +} + +func (cc *callCounter) clear() { + atomic.StorePointer(&cc.activeBucket, unsafe.Pointer(&bucket{})) + cc.inactiveBucket = &bucket{} +} + +// "When the timer triggers, the inactive bucket is zeroed and swapped with the +// active bucket. Then the inactive bucket contains the number of successes and +// failures since the last time the timer triggered. Those numbers are used to +// evaluate the ejection criteria." - A50. +func (cc *callCounter) swap() { + ib := cc.inactiveBucket + *ib = bucket{} + ab := (*bucket)(atomic.SwapPointer(&cc.activeBucket, unsafe.Pointer(ib))) + cc.inactiveBucket = &bucket{ + numSuccesses: atomic.LoadUint32(&ab.numSuccesses), + numFailures: atomic.LoadUint32(&ab.numFailures), + } +} diff --git a/xds/internal/balancer/outlierdetection/callcounter_test.go b/xds/internal/balancer/outlierdetection/callcounter_test.go new file mode 100644 index 000000000000..8e4f5f29b5f8 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/callcounter_test.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "sync/atomic" + "testing" + "unsafe" + + "github.com/google/go-cmp/cmp" +) + +func (b1 *bucket) Equal(b2 *bucket) bool { + if b1 == nil && b2 == nil { + return true + } + if (b1 != nil) != (b2 != nil) { + return false + } + if b1.numSuccesses != b2.numSuccesses { + return false + } + return b1.numFailures == b2.numFailures +} + +func (cc1 *callCounter) Equal(cc2 *callCounter) bool { + if cc1 == nil && cc2 == nil { + return true + } + if (cc1 != nil) != (cc2 != nil) { + return false + } + ab1 := (*bucket)(atomic.LoadPointer(&cc1.activeBucket)) + ab2 := (*bucket)(atomic.LoadPointer(&cc2.activeBucket)) + if !ab1.Equal(ab2) { + return false + } + return cc1.inactiveBucket.Equal(cc2.inactiveBucket) +} + +// TestClear tests that clear on the call counter clears (everything set to 0) +// the active and inactive buckets. +func (s) TestClear(t *testing.T) { + cc := newCallCounter() + ab := (*bucket)(atomic.LoadPointer(&cc.activeBucket)) + ab.numSuccesses = 1 + ab.numFailures = 2 + cc.inactiveBucket.numSuccesses = 4 + cc.inactiveBucket.numFailures = 5 + cc.clear() + // Both the active and inactive buckets should be cleared. + ccWant := newCallCounter() + if diff := cmp.Diff(cc, ccWant); diff != "" { + t.Fatalf("callCounter is different than expected, diff (-got +want): %v", diff) + } +} + +// TestSwap tests that swap() on the callCounter successfully has the desired +// end result of inactive bucket containing the previous active buckets data, +// and the active bucket being cleared. +func (s) TestSwap(t *testing.T) { + cc := newCallCounter() + ab := (*bucket)(atomic.LoadPointer(&cc.activeBucket)) + ab.numSuccesses = 1 + ab.numFailures = 2 + cc.inactiveBucket.numSuccesses = 4 + cc.inactiveBucket.numFailures = 5 + ib := cc.inactiveBucket + cc.swap() + // Inactive should pick up active's data, active should be swapped to zeroed + // inactive. + ccWant := newCallCounter() + ccWant.inactiveBucket.numSuccesses = 1 + ccWant.inactiveBucket.numFailures = 2 + atomic.StorePointer(&ccWant.activeBucket, unsafe.Pointer(ib)) + if diff := cmp.Diff(cc, ccWant); diff != "" { + t.Fatalf("callCounter is different than expected, diff (-got +want): %v", diff) + } +} diff --git a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go new file mode 100644 index 000000000000..a1987bf98a99 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go @@ -0,0 +1,369 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package e2e_test contains e2e test cases for the Outlier Detection LB Policy. +package e2e_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + testpb "google.golang.org/grpc/test/grpc_testing" + _ "google.golang.org/grpc/xds/internal/balancer/outlierdetection" // To register helper functions which register/unregister Outlier Detection LB Policy. +) + +var defaultTestTimeout = 5 * time.Second + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// Setup spins up three test backends, each listening on a port on localhost. +// Two of the backends are configured to always reply with an empty response and +// no error and one is configured to always return an error. +func setupBackends(t *testing.T) ([]string, func()) { + t.Helper() + + backends := make([]*stubserver.StubServer, 3) + addresses := make([]string, 3) + // Construct and start 2 working backends. + for i := 0; i < 2; i++ { + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started good TestService backend at: %q", backend.Address) + backends[i] = backend + addresses[i] = backend.Address + } + + // Construct and start a failing backend. + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return nil, errors.New("some error") + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started bad TestService backend at: %q", backend.Address) + backends[2] = backend + addresses[2] = backend.Address + cancel := func() { + for _, backend := range backends { + backend.Stop() + } + } + return addresses, cancel +} + +// checkRoundRobinRPCs verifies that EmptyCall RPCs on the given ClientConn, +// connected to a server exposing the test.grpc_testing.TestService, are +// roundrobined across the given backend addresses. +// +// Returns a non-nil error if context deadline expires before RPCs start to get +// roundrobined across the given backends. +func checkRoundRobinRPCs(ctx context.Context, client testpb.TestServiceClient, addrs []resolver.Address) error { + wantAddrCount := make(map[string]int) + for _, addr := range addrs { + wantAddrCount[addr.Addr]++ + } + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + // Perform 3 iterations. + var iterations [][]string + for i := 0; i < 3; i++ { + iteration := make([]string, len(addrs)) + for c := 0; c < len(addrs); c++ { + var peer peer.Peer + client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)) + if peer.Addr != nil { + iteration[c] = peer.Addr.String() + } + } + iterations = append(iterations, iteration) + } + // Ensure the the first iteration contains all addresses in addrs. + gotAddrCount := make(map[string]int) + for _, addr := range iterations[0] { + gotAddrCount[addr]++ + } + if diff := cmp.Diff(gotAddrCount, wantAddrCount); diff != "" { + continue + } + // Ensure all three iterations contain the same addresses. + if !cmp.Equal(iterations[0], iterations[1]) || !cmp.Equal(iterations[0], iterations[2]) { + continue + } + return nil + } + return fmt.Errorf("timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) +} + +// TestOutlierDetectionAlgorithmsE2E tests the Outlier Detection Success Rate +// and Failure Percentage algorithms in an e2e fashion. The Outlier Detection +// Balancer is configured as the top level LB Policy of the channel with a Round +// Robin child, and connects to three upstreams. Two of the upstreams are healthy and +// one is unhealthy. The two algorithms should at some point eject the failing +// upstream, causing RPC's to not be routed to those two upstreams, and only be +// Round Robined across the two healthy upstreams. Other than the intervals the +// two unhealthy upstreams are ejected, RPC's should regularly round robin +// across all three upstreams. +func (s) TestOutlierDetectionAlgorithmsE2E(t *testing.T) { + tests := []struct { + name string + odscJSON string + }{ + { + name: "Success Rate Algorithm", + odscJSON: ` +{ + "loadBalancingConfig": [ + { + "outlier_detection_experimental": { + "interval": 50000000, + "baseEjectionTime": 100000000, + "maxEjectionTime": 300000000000, + "maxEjectionPercent": 33, + "successRateEjection": { + "stdevFactor": 50, + "enforcementPercentage": 100, + "minimumHosts": 3, + "requestVolume": 5 + }, + "childPolicy": [{"round_robin": {}}] + } + } + ] +}`, + }, + { + name: "Failure Percentage Algorithm", + odscJSON: ` +{ + "loadBalancingConfig": [ + { + "outlier_detection_experimental": { + "interval": 50000000, + "baseEjectionTime": 100000000, + "maxEjectionTime": 300000000000, + "maxEjectionPercent": 33, + "failurePercentageEjection": { + "threshold": 50, + "enforcementPercentage": 100, + "minimumHosts": 3, + "requestVolume": 5 + }, + "childPolicy": [{"round_robin": {}} + ] + } + } + ] +}`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + internal.RegisterOutlierDetectionBalancerForTesting() + defer internal.UnregisterOutlierDetectionBalancerForTesting() + addresses, cancel := setupBackends(t) + defer cancel() + + mr := manual.NewBuilderWithScheme("od-e2e") + defer mr.Close() + + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(test.odscJSON) + // The full list of addresses. + fullAddresses := []resolver.Address{ + {Addr: addresses[0]}, + {Addr: addresses[1]}, + {Addr: addresses[2]}, + } + mr.InitialState(resolver.State{ + Addresses: fullAddresses, + ServiceConfig: sc, + }) + + cc, err := grpc.Dial(mr.Scheme()+":///", grpc.WithResolvers(mr), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testServiceClient := testpb.NewTestServiceClient(cc) + + // At first, due to no statistics on each of the backends, the 3 + // upstreams should all be round robined across. + if err = checkRoundRobinRPCs(ctx, testServiceClient, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The addresses which don't return errors. + okAddresses := []resolver.Address{ + {Addr: addresses[0]}, + {Addr: addresses[1]}, + } + // After calling the three upstreams, one of them constantly error + // and should eventually be ejected for a period of time. This + // period of time should cause the RPC's to be round robined only + // across the two that are healthy. + if err = checkRoundRobinRPCs(ctx, testServiceClient, okAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The failing upstream isn't ejected indefinitely, and eventually + // should be unejected in subsequent iterations of the interval + // algorithm as per the spec for the two specific algorithms. + if err = checkRoundRobinRPCs(ctx, testServiceClient, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + }) + } +} + +// TestNoopConfiguration tests the Outlier Detection Balancer configured with a +// noop configuration. The noop configuration should cause the Outlier Detection +// Balancer to not count RPC's, and thus never eject any upstreams and continue +// to route to every upstream connected to, even if they continuously error. +// Once the Outlier Detection Balancer gets reconfigured with configuration +// requiring counting RPC's, the Outlier Detection Balancer should start +// ejecting any upstreams as specified in the configuration. +func (s) TestNoopConfiguration(t *testing.T) { + internal.RegisterOutlierDetectionBalancerForTesting() + defer internal.UnregisterOutlierDetectionBalancerForTesting() + addresses, cancel := setupBackends(t) + defer cancel() + + mr := manual.NewBuilderWithScheme("od-e2e") + defer mr.Close() + + noopODServiceConfigJSON := ` +{ + "loadBalancingConfig": [ + { + "outlier_detection_experimental": { + "interval": 50000000, + "baseEjectionTime": 100000000, + "maxEjectionTime": 300000000000, + "maxEjectionPercent": 33, + "childPolicy": [{"round_robin": {}}] + } + } + ] +}` + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(noopODServiceConfigJSON) + // The full list of addresses. + fullAddresses := []resolver.Address{ + {Addr: addresses[0]}, + {Addr: addresses[1]}, + {Addr: addresses[2]}, + } + mr.InitialState(resolver.State{ + Addresses: fullAddresses, + ServiceConfig: sc, + }) + cc, err := grpc.Dial(mr.Scheme()+":///", grpc.WithResolvers(mr), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testServiceClient := testpb.NewTestServiceClient(cc) + + for i := 0; i < 2; i++ { + // Since the Outlier Detection Balancer starts with a noop + // configuration, it shouldn't count RPCs or eject any upstreams. Thus, + // even though an upstream it connects to constantly errors, it should + // continue to Round Robin across every upstream. + if err := checkRoundRobinRPCs(ctx, testServiceClient, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + } + + // Reconfigure the Outlier Detection Balancer with a configuration that + // specifies to count RPC's and eject upstreams. Due to the balancer no + // longer being a noop, it should eject any unhealthy addresses as specified + // by the failure percentage portion of the configuration. + countingODServiceConfigJSON := ` +{ + "loadBalancingConfig": [ + { + "outlier_detection_experimental": { + "interval": 50000000, + "baseEjectionTime": 100000000, + "maxEjectionTime": 300000000000, + "maxEjectionPercent": 33, + "failurePercentageEjection": { + "threshold": 50, + "enforcementPercentage": 100, + "minimumHosts": 3, + "requestVolume": 5 + }, + "childPolicy": [{"round_robin": {}}] + } + } + ] +}` + sc = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(countingODServiceConfigJSON) + + mr.UpdateState(resolver.State{ + Addresses: fullAddresses, + ServiceConfig: sc, + }) + + // At first on the reconfigured balancer, the balancer has no stats + // collected about upstreams. Thus, it should at first route across the full + // upstream list. + if err = checkRoundRobinRPCs(ctx, testServiceClient, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The addresses which don't return errors. + okAddresses := []resolver.Address{ + {Addr: addresses[0]}, + {Addr: addresses[1]}, + } + // Now that the reconfigured balancer has data about the failing upstream, + // it should eject the upstream and only route across the two healthy + // upstreams. + if err = checkRoundRobinRPCs(ctx, testServiceClient, okAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } +} diff --git a/xds/internal/balancer/outlierdetection/logging.go b/xds/internal/balancer/outlierdetection/logging.go new file mode 100644 index 000000000000..705b0cb6918d --- /dev/null +++ b/xds/internal/balancer/outlierdetection/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package outlierdetection + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[outlier-detection-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *outlierDetectionBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/xds/internal/balancer/outlierdetection/subconn_wrapper.go new file mode 100644 index 000000000000..8e25eb788b1d --- /dev/null +++ b/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/resolver" +) + +// subConnWrapper wraps every created SubConn in the Outlier Detection Balancer, +// to help track the latest state update from the underlying SubConn, and also +// whether or not this SubConn is ejected. +type subConnWrapper struct { + balancer.SubConn + + // addressInfo is a pointer to the subConnWrapper's corresponding address + // map entry, if the map entry exists. + addressInfo unsafe.Pointer // *addressInfo + // These two pieces of state will reach eventual consistency due to sync in + // run(), and child will always have the correctly updated SubConnState. + // latestState is the latest state update from the underlying SubConn. This + // is used whenever a SubConn gets unejected. + latestState balancer.SubConnState + ejected bool + + scUpdateCh *buffer.Unbounded + + // addresses is the list of address(es) this SubConn was created with to + // help support any change in address(es) + addresses []resolver.Address +} + +// eject causes the wrapper to report a state update with the TRANSIENT_FAILURE +// state, and to stop passing along updates from the underlying subchannel. +func (scw *subConnWrapper) eject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: true, + }) +} + +// uneject causes the wrapper to report a state update with the latest update +// from the underlying subchannel, and resume passing along updates from the +// underlying subchannel. +func (scw *subConnWrapper) uneject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: false, + }) +} From 1530d3b2413babecf2e35d60d4d81f33d653f5b4 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 6 Sep 2022 20:14:26 -0400 Subject: [PATCH 605/998] gcp/observability: fix End() to cleanup global state correctly (#5623) * gcp/observability: fix End() to cleanup global state correctly --- .github/workflows/testing.yml | 2 +- gcp/observability/observability.go | 1 + gcp/observability/observability_test.go | 13 ++++++------- gcp/observability/opencensus.go | 24 +++++++++++++++++++++++- 4 files changed, 31 insertions(+), 9 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 84c2907bbb6d..6d9571707bf8 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -109,7 +109,7 @@ jobs: cd "${GITHUB_WORKSPACE}" for MOD_FILE in $(find . -name 'go.mod' | grep -Ev '^\./go\.mod'); do pushd "$(dirname ${MOD_FILE})" - go test ${{ matrix.testflags }} -timeout 2m ./... + go test ${{ matrix.testflags }} -cpu 1,4 -timeout 2m ./... popd done diff --git a/gcp/observability/observability.go b/gcp/observability/observability.go index 1ab20d4cab5b..3855bc7ebaf0 100644 --- a/gcp/observability/observability.go +++ b/gcp/observability/observability.go @@ -80,4 +80,5 @@ func Start(ctx context.Context) error { // Note: this method should only be invoked once. func End() { defaultLogger.Close() + stopOpenCensus() } diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index 3e8f1d704bda..e9238f898abd 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -35,7 +35,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" - "google.golang.org/grpc/internal" iblog "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" @@ -387,6 +386,12 @@ func (fe *fakeOpenCensusExporter) ExportSpan(vd *trace.SpanData) { fe.t.Logf("Span[%v]", vd.Name) } +func (fe *fakeOpenCensusExporter) Flush() {} + +func (fe *fakeOpenCensusExporter) Close() error { + return nil +} + func (s) TestLoggingForOkCall(t *testing.T) { te := newTest(t) defer te.tearDown() @@ -875,12 +880,6 @@ func (s) TestCustomTagsTracingMetrics(t *testing.T) { cleanup, err := createTmpConfigInFileSystem(configJSON) defer cleanup() - // To clear globally registered tracing and metrics exporters. - defer func() { - internal.ClearExtraDialOptions() - internal.ClearExtraServerOptions() - }() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() err = Start(ctx) diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index 7d297f90bfc3..ccaa6a98a42c 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -56,8 +56,12 @@ func tagsToTraceAttributes(tags map[string]string) map[string]interface{} { type tracingMetricsExporter interface { trace.Exporter view.Exporter + Flush() + Close() error } +var exporter tracingMetricsExporter + // global to stub out in tests var newExporter = newStackdriverExporter @@ -87,7 +91,8 @@ func startOpenCensus(config *config) error { return nil } - exporter, err := newExporter(config) + var err error + exporter, err = newExporter(config) if err != nil { return err } @@ -118,3 +123,20 @@ func startOpenCensus(config *config) error { return nil } + +// stopOpenCensus flushes the exporter's and cleans up globals across all +// packages if exporter was created. +func stopOpenCensus() { + if exporter != nil { + internal.ClearExtraDialOptions() + internal.ClearExtraServerOptions() + + // Call these unconditionally, doesn't matter if not registered, will be + // a noop if not registered. + trace.UnregisterExporter(exporter) + view.UnregisterExporter(exporter) + + exporter.Flush() + exporter.Close() + } +} From 2ebd59436d74a8809fdc7381e65d6e17a24ec8ae Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 7 Sep 2022 14:58:06 +0000 Subject: [PATCH 606/998] Documentation/proxy: update due to Go 1.16 behavior change (#5630) --- Documentation/proxy.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Documentation/proxy.md b/Documentation/proxy.md index 8fd6ee5248a8..189cdfbcb09b 100644 --- a/Documentation/proxy.md +++ b/Documentation/proxy.md @@ -1,8 +1,9 @@ # Proxy HTTP CONNECT proxies are supported by default in gRPC. The proxy address can be -specified by the environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or -the lowercase versions thereof). +specified by the environment variables `HTTPS_PROXY` and `NO_PROXY`. Before Go +1.16, if the `HTTPS_PROXY` environment variable is unset, `HTTP_PROXY` will be +used instead. (Note that these environment variables are case insensitive.) ## Custom proxy @@ -12,4 +13,4 @@ connection before giving it to gRPC. If the default proxy doesn't work for you, replace the default dialer with your custom proxy dialer. This can be done using -[`WithDialer`](https://godoc.org/google.golang.org/grpc#WithDialer). \ No newline at end of file +[`WithDialer`](https://godoc.org/google.golang.org/grpc#WithDialer). From 60eecd9169ddc74ee6cf4bd52e2e06ef6766c5b7 Mon Sep 17 00:00:00 2001 From: horpto <__Singleton__@hackerdom.ru> Date: Wed, 7 Sep 2022 23:14:42 +0300 Subject: [PATCH 607/998] metadata: add ValueFromIncomingContext to more efficiently retrieve a single value (#5596) --- metadata/metadata.go | 55 ++++++++++++++++++++++++++++++++------- metadata/metadata_test.go | 37 ++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 10 deletions(-) diff --git a/metadata/metadata.go b/metadata/metadata.go index 8e0f6abe89d7..98d62e0675f6 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -50,7 +50,7 @@ type MD map[string][]string // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -74,7 +74,7 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} + md := make(MD, len(kv)/2) for i := 0; i < len(kv); i += 2 { key := strings.ToLower(kv[i]) md[key] = append(md[key], kv[i+1]) @@ -182,19 +182,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { if !ok { return nil, false } - out := MD{} + out := make(MD, len(md)) for k, v := range md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - s := make([]string, len(v)) - copy(s, v) - out[key] = s + out[key] = copyOf(v) } return out, true } +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + // FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD @@ -222,15 +254,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - out := MD{} + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) for k, v := range raw.md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - s := make([]string, len(v)) - copy(s, v) - out[key] = s + out[key] = copyOf(v) } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/metadata/metadata_test.go b/metadata/metadata_test.go index 89be06eaada0..57763cd6a973 100644 --- a/metadata/metadata_test.go +++ b/metadata/metadata_test.go @@ -198,6 +198,43 @@ func (s) TestDelete(t *testing.T) { } } +func (s) TestValueFromIncomingContext(t *testing.T) { + md := Pairs( + "X-My-Header-1", "42", + "X-My-Header-2", "43-1", + "X-My-Header-2", "43-2", + "x-my-header-3", "44", + ) + ctx := NewIncomingContext(context.Background(), md) + + for _, test := range []struct { + key string + want []string + }{ + { + key: "x-my-header-1", + want: []string{"42"}, + }, + { + key: "x-my-header-2", + want: []string{"43-1", "43-2"}, + }, + { + key: "x-my-header-3", + want: []string{"44"}, + }, + { + key: "x-unknown", + want: nil, + }, + } { + v := ValueFromIncomingContext(ctx, test.key) + if !reflect.DeepEqual(v, test.want) { + t.Errorf("value of metadata is %v, want %v", v, test.want) + } + } +} + func (s) TestAppendToOutgoingContext(t *testing.T) { // Pre-existing metadata tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) From 87d1a90a2b56a5e99c3a7913ddaec38dc50cd6ba Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 8 Sep 2022 12:51:35 -0400 Subject: [PATCH 608/998] orca: fix package used to reference service to use grpc suffix instead of pb (#5645) * orca: fix package used to reference service to use grpc suffix instead of pb --- orca/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orca/service.go b/orca/service.go index d36b76f2a9b0..07da569754bb 100644 --- a/orca/service.go +++ b/orca/service.go @@ -130,7 +130,7 @@ func (s *Service) sendMetricsResponse(stream v3orcaservicegrpc.OpenRcaService_St // StreamCoreMetrics streams custom backend metrics injected by the server // application. -func (s *Service) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, stream v3orcaservicepb.OpenRcaService_StreamCoreMetricsServer) error { +func (s *Service) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { ticker := time.NewTicker(s.determineReportingInterval(req)) defer ticker.Stop() From 552de12024bcb76b89badc0530880634d6476dbc Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 8 Sep 2022 15:54:27 -0400 Subject: [PATCH 609/998] orca: fix package used to reference service to use pb suffix instead of grpc (#5647) orca: fix package used to reference service to use pb suffix instead of grpc (#5647) --- orca/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orca/service.go b/orca/service.go index 07da569754bb..7816dcc1eca1 100644 --- a/orca/service.go +++ b/orca/service.go @@ -112,7 +112,7 @@ func Register(s *grpc.Server, opts ServiceOptions) (*Service, error) { // negative or is less than the configured minimum (via // ServiceOptions.MinReportingInterval), the latter is used. Else the value from // the incoming request is used. -func (s *Service) determineReportingInterval(req *v3orcaservicegrpc.OrcaLoadReportRequest) time.Duration { +func (s *Service) determineReportingInterval(req *v3orcaservicepb.OrcaLoadReportRequest) time.Duration { if req.GetReportInterval() == nil { return s.minReportingInterval } From 21f0259e42b562b3f69efcb1ef0754392f5fa25b Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 12 Sep 2022 22:20:29 +0000 Subject: [PATCH 610/998] test: loosen metadata error check to reduce dependence on exact library errors (#5650) --- test/metadata_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/metadata_test.go b/test/metadata_test.go index e3da918fc722..ad2b12cfc77f 100644 --- a/test/metadata_test.go +++ b/test/metadata_test.go @@ -23,6 +23,7 @@ import ( "fmt" "io" "reflect" + "strings" "testing" "time" @@ -45,12 +46,12 @@ func (s) TestInvalidMetadata(t *testing.T) { { md: map[string][]string{string(rune(0x19)): {"testVal"}}, want: status.Error(codes.Internal, "header key \"\\x19\" contains illegal characters not in [0-9a-z-_.]"), - recv: status.Error(codes.Internal, "invalid header field name \"\\x19\""), + recv: status.Error(codes.Internal, "invalid header field"), }, { md: map[string][]string{"test": {string(rune(0x19))}}, want: status.Error(codes.Internal, "header key \"test\" contains value with non-printable ASCII characters"), - recv: status.Error(codes.Internal, "invalid header field value \"\\x19\""), + recv: status.Error(codes.Internal, "invalid header field"), }, { md: map[string][]string{"test-bin": {string(rune(0x19))}}, @@ -113,7 +114,7 @@ func (s) TestInvalidMetadata(t *testing.T) { if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { t.Errorf("call ss.Client stream Send(nil) will success but got err :%v", err) } - if _, err := stream.Recv(); !reflect.DeepEqual(test.recv, err) { + if _, err := stream.Recv(); status.Code(err) != status.Code(test.recv) || !strings.Contains(err.Error(), test.recv.Error()) { t.Errorf("stream.Recv() = _, get err :%v, want err :%v", err, test.recv) } } From 7da8a056b66bf1ac3486627422aa99c7eb43128b Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 13 Sep 2022 15:54:12 -0400 Subject: [PATCH 611/998] xds: Enable Outlier Detection interop tests (#5632) --- test/kokoro/xds_k8s_lb.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh index f8a0b3a828c2..31aca363aafc 100755 --- a/test/kokoro/xds_k8s_lb.sh +++ b/test/kokoro/xds_k8s_lb.sh @@ -158,7 +158,7 @@ main() { # Run tests cd "${TEST_DRIVER_FULL_DIR}" local failed_tests=0 - test_suites=("api_listener_test" "change_backend_service_test" "failover_test" "remove_neg_test" "round_robin_test" "affinity_test") + test_suites=("api_listener_test" "change_backend_service_test" "failover_test" "remove_neg_test" "round_robin_test" "affinity_test" "outlier_detection_test") for test in "${test_suites[@]}"; do run_test $test || (( failed_tests++ )) done From 9c3e589d3ee62b589f75c3592adb2a6a5560f0cc Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 15 Sep 2022 15:55:46 -0700 Subject: [PATCH 612/998] rls: delegate pick to child policy as long as it is not in TransientFailure (#5656) --- balancer/rls/picker.go | 44 ++++------------ internal/testutils/xds/e2e/clientresources.go | 51 +++++++++++++++++-- .../xds_rls_clusterspecifier_plugin_test.go | 32 ++++++++++-- 3 files changed, 87 insertions(+), 40 deletions(-) diff --git a/balancer/rls/picker.go b/balancer/rls/picker.go index ece27f0fc2ed..f73fe7b1028f 100644 --- a/balancer/rls/picker.go +++ b/balancer/rls/picker.go @@ -162,21 +162,20 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // delegateToChildPolicies is a helper function which iterates through the list // of child policy wrappers in a cache entry and attempts to find a child policy -// to which this RPC can be routed to. If there is no child policy in READY -// state, we delegate to the first child policy arbitrarily. +// to which this RPC can be routed to. If all child policies are in +// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. // // Caller must hold at least a read-lock on p.lb.cacheMu. func (p *rlsPicker) delegateToChildPolicies(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { - for _, cpw := range dcEntry.childPolicyWrappers { - ok, res, err := p.pickIfFeasible(cpw, info) - if ok { - return res, err + for i, cpw := range dcEntry.childPolicyWrappers { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if + // it the last one (which handles the case of delegating to the last + // child picker if all child polcies are in TRANSIENT_FAILURE). + if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { + return state.Picker.Pick(info) } } - if len(dcEntry.childPolicyWrappers) != 0 { - state := (*balancer.State)(atomic.LoadPointer(&dcEntry.childPolicyWrappers[0].state)) - return state.Picker.Pick(info) - } // In the unlikely event that we have a cache entry with no targets, we end up // queueing the RPC. return balancer.PickResult{}, balancer.ErrNoSubConnAvailable @@ -249,8 +248,8 @@ func (p *rlsPicker) sendRequestAndReturnPick(cacheKey cacheKey, bs *backoffState // target if one is configured, or fails the pick with the given error. func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, error) { if p.defaultPolicy != nil { - _, res, err := p.pickIfFeasible(p.defaultPolicy, info) - return res, err + state := (*balancer.State)(atomic.LoadPointer(&p.defaultPolicy.state)) + return state.Picker.Pick(info) } return balancer.PickResult{}, errOnNoDefault } @@ -275,27 +274,6 @@ func (p *rlsPicker) sendRouteLookupRequest(cacheKey cacheKey, bs *backoffState, return throttled } -// pickIfFeasible determines if a pick can be delegated to child policy based on -// its connectivity state. -// - If state is CONNECTING, the pick is to be queued -// - If state is IDLE, the child policy is instructed to exit idle, and the pick -// is to be queued -// - If state is READY, pick it delegated to the child policy's picker -func (p *rlsPicker) pickIfFeasible(cpw *childPolicyWrapper, info balancer.PickInfo) (bool, balancer.PickResult, error) { - state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) - switch state.ConnectivityState { - case connectivity.Connecting: - return true, balancer.PickResult{}, balancer.ErrNoSubConnAvailable - case connectivity.Idle: - p.bg.ExitIdleOne(cpw.target) - return true, balancer.PickResult{}, balancer.ErrNoSubConnAvailable - case connectivity.Ready: - r, e := state.Picker.Pick(info) - return true, r, e - } - return false, balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} - // handleRouteLookupResponse is the callback invoked by the control channel upon // receipt of an RLS response. Modifies the data cache and pending requests map // and sends a new picker. diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go index f3f7f6307c53..2dacebb14653 100644 --- a/internal/testutils/xds/e2e/clientresources.go +++ b/internal/testutils/xds/e2e/clientresources.go @@ -305,8 +305,44 @@ func DefaultRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.Rou // DefaultCluster returns a basic xds Cluster resource. func DefaultCluster(clusterName, edsServiceName string, secLevel SecurityLevel) *v3clusterpb.Cluster { + return ClusterResourceWithOptions(&ClusterOptions{ + ClusterName: clusterName, + ServiceName: edsServiceName, + Policy: LoadBalancingPolicyRoundRobin, + SecurityLevel: secLevel, + }) +} + +// LoadBalancingPolicy determines the policy used for balancing load across +// endpoints in the Cluster. +type LoadBalancingPolicy int + +const ( + // LoadBalancingPolicyRoundRobin results in the use of the weighted_target + // LB policy to balance load across localities and endpoints in the cluster. + LoadBalancingPolicyRoundRobin LoadBalancingPolicy = iota + // LoadBalancingPolicyRingHash results in the use of the ring_hash LB policy + // as the leaf policy. + LoadBalancingPolicyRingHash +) + +// ClusterOptions contains options to configure a Cluster resource. +type ClusterOptions struct { + // ClusterName is the name of the Cluster resource. + ClusterName string + // ServiceName is the EDS service name of the Cluster. + ServiceName string + // Policy is the LB policy to be used. + Policy LoadBalancingPolicy + // SecurityLevel determines the security configuration for the Cluster. + SecurityLevel SecurityLevel +} + +// ClusterResourceWithOptions returns an xDS Cluster resource configured with +// the provided options. +func ClusterResourceWithOptions(opts *ClusterOptions) *v3clusterpb.Cluster { var tlsContext *v3tlspb.UpstreamTlsContext - switch secLevel { + switch opts.SecurityLevel { case SecurityLevelNone: case SecurityLevelTLS: tlsContext = &v3tlspb.UpstreamTlsContext{ @@ -333,8 +369,15 @@ func DefaultCluster(clusterName, edsServiceName string, secLevel SecurityLevel) } } + var lbPolicy v3clusterpb.Cluster_LbPolicy + switch opts.Policy { + case LoadBalancingPolicyRoundRobin: + lbPolicy = v3clusterpb.Cluster_ROUND_ROBIN + case LoadBalancingPolicyRingHash: + lbPolicy = v3clusterpb.Cluster_RING_HASH + } cluster := &v3clusterpb.Cluster{ - Name: clusterName, + Name: opts.ClusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ EdsConfig: &v3corepb.ConfigSource{ @@ -342,9 +385,9 @@ func DefaultCluster(clusterName, edsServiceName string, secLevel SecurityLevel) Ads: &v3corepb.AggregatedConfigSource{}, }, }, - ServiceName: edsServiceName, + ServiceName: opts.ServiceName, }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LbPolicy: lbPolicy, } if tlsContext != nil { cluster.TransportSocket = &v3corepb.TransportSocket{ diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go index 35b5fe37dc1b..f5ab17bca7ee 100644 --- a/test/xds/xds_rls_clusterspecifier_plugin_test.go +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -46,7 +46,7 @@ import ( // defaultClientResourcesWithRLSCSP returns a set of resources (LDS, RDS, CDS, EDS) for a // client to connect to a server with a RLS Load Balancer as a child of Cluster Manager. -func defaultClientResourcesWithRLSCSP(params e2e.ResourceParams, rlsProto *rlspb.RouteLookupConfig) e2e.UpdateOptions { +func defaultClientResourcesWithRLSCSP(lb e2e.LoadBalancingPolicy, params e2e.ResourceParams, rlsProto *rlspb.RouteLookupConfig) e2e.UpdateOptions { routeConfigName := "route-" + params.DialTarget clusterName := "cluster-" + params.DialTarget endpointsName := "endpoints-" + params.DialTarget @@ -54,7 +54,12 @@ func defaultClientResourcesWithRLSCSP(params e2e.ResourceParams, rlsProto *rlspb NodeID: params.NodeID, Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(params.DialTarget, routeConfigName)}, Routes: []*v3routepb.RouteConfiguration{defaultRouteConfigWithRLSCSP(routeConfigName, params.DialTarget, rlsProto)}, - Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, endpointsName, params.SecLevel)}, + Clusters: []*v3clusterpb.Cluster{e2e.ClusterResourceWithOptions(&e2e.ClusterOptions{ + ClusterName: clusterName, + ServiceName: endpointsName, + Policy: lb, + SecurityLevel: params.SecLevel, + })}, Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(endpointsName, params.Host, []uint32{params.Port})}, } } @@ -93,6 +98,27 @@ func defaultRouteConfigWithRLSCSP(routeName, ldsTarget string, rlsProto *rlspb.R // target corresponding to this test service. This test asserts an RPC proceeds // as normal with the RLS Balancer as part of system. func (s) TestRLSinxDS(t *testing.T) { + tests := []struct { + name string + lbPolicy e2e.LoadBalancingPolicy + }{ + { + name: "roundrobin", + lbPolicy: e2e.LoadBalancingPolicyRoundRobin, + }, + { + name: "ringhash", + lbPolicy: e2e.LoadBalancingPolicyRingHash, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testRLSinxDS(t, test.lbPolicy) + }) + } +} + +func testRLSinxDS(t *testing.T, lbPolicy e2e.LoadBalancingPolicy) { oldRLS := envconfig.XDSRLS envconfig.XDSRLS = true internal.RegisterRLSClusterSpecifierPluginForTesting() @@ -119,7 +145,7 @@ func (s) TestRLSinxDS(t *testing.T) { } const serviceName = "my-service-client-side-xds" - resources := defaultClientResourcesWithRLSCSP(e2e.ResourceParams{ + resources := defaultClientResourcesWithRLSCSP(lbPolicy, e2e.ResourceParams{ DialTarget: serviceName, NodeID: nodeID, Host: "localhost", From b1d7f56b81b7902d871111b82dec6ba45f854ede Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 21 Sep 2022 14:35:08 -0400 Subject: [PATCH 613/998] transport: Fix deadlock in transport caused by GOAWAY race with new stream creation (#5652) * transport: Fix deadlock in transport caused by GOAWAY race with new stream creation --- internal/transport/http2_client.go | 23 ++++-- test/clienttester.go | 109 +++++++++++++++++++++++++++++ test/end2end_test.go | 56 ++++++++++++++- 3 files changed, 181 insertions(+), 7 deletions(-) create mode 100644 test/clienttester.go diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 53643fa97477..5c2f35b24e75 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -1232,18 +1232,29 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } diff --git a/test/clienttester.go b/test/clienttester.go new file mode 100644 index 000000000000..7e223091164d --- /dev/null +++ b/test/clienttester.go @@ -0,0 +1,109 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test + +import ( + "bytes" + "io" + "net" + "testing" + + "golang.org/x/net/http2" +) + +var ( + clientPreface = []byte(http2.ClientPreface) +) + +func newClientTester(t *testing.T, conn net.Conn) *clientTester { + ct := &clientTester{ + t: t, + conn: conn, + } + ct.fr = http2.NewFramer(conn, conn) + ct.greet() + return ct +} + +type clientTester struct { + t *testing.T + conn net.Conn + fr *http2.Framer +} + +// greet() performs the necessary steps for http2 connection establishment on +// the server side. +func (ct *clientTester) greet() { + ct.wantClientPreface() + ct.wantSettingsFrame() + ct.writeSettingsFrame() + ct.writeSettingsAck() + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + ct.t.Errorf("error reading frame from client side: %v", err) + } + switch f := f.(type) { + case *http2.SettingsFrame: + if f.IsAck() { // HTTP/2 handshake completed. + return + } + default: + ct.t.Errorf("during greet, unexpected frame type %T", f) + } + } +} + +func (ct *clientTester) wantClientPreface() { + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(ct.conn, preface); err != nil { + ct.t.Errorf("Error at server-side while reading preface from client. Err: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + ct.t.Errorf("received bogus greeting from client %q", preface) + } +} + +func (ct *clientTester) wantSettingsFrame() { + frame, err := ct.fr.ReadFrame() + if err != nil { + ct.t.Errorf("error reading initial settings frame from client: %v", err) + } + _, ok := frame.(*http2.SettingsFrame) + if !ok { + ct.t.Errorf("initial frame sent from client is not a settings frame, type %T", frame) + } +} + +func (ct *clientTester) writeSettingsFrame() { + if err := ct.fr.WriteSettings(); err != nil { + ct.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (ct *clientTester) writeSettingsAck() { + if err := ct.fr.WriteSettingsAck(); err != nil { + ct.t.Fatalf("Error writing ACK of client's SETTINGS: %v", err) + } +} + +func (ct *clientTester) writeGoAway(maxStreamID uint32, code http2.ErrCode, debugData []byte) { + if err := ct.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { + ct.t.Fatalf("Error writing GOAWAY: %v", err) + } +} diff --git a/test/end2end_test.go b/test/end2end_test.go index 8a4f11515675..725bcdb641eb 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7407,7 +7407,6 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) { return } writer.Flush() // necessary since client is expecting preface before declaring connection fully setup. - var sid uint32 // Loop until conn is closed and framer returns io.EOF for requestNum := 0; ; requestNum = (requestNum + 1) % len(s.responses) { @@ -8130,3 +8129,58 @@ func (s) TestRecvWhileReturningStatus(t *testing.T) { } } } + +// TestGoAwayStreamIDSmallerThanCreatedStreams tests the scenario where a server +// sends a goaway with a stream id that is smaller than some created streams on +// the client, while the client is simultaneously creating new streams. This +// should not induce a deadlock. +func (s) TestGoAwayStreamIDSmallerThanCreatedStreams(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + + ctCh := testutils.NewChannel() + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("error in lis.Accept(): %v", err) + } + ct := newClientTester(t, conn) + ctCh.Send(ct) + }() + + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("error dialing: %v", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + val, err := ctCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout waiting for client transport (should be given after http2 creation)") + } + ct := val.(*clientTester) + + tc := testpb.NewTestServiceClient(cc) + someStreamsCreated := grpcsync.NewEvent() + goAwayWritten := grpcsync.NewEvent() + go func() { + for i := 0; i < 20; i++ { + if i == 10 { + <-goAwayWritten.Done() + } + tc.FullDuplexCall(ctx) + if i == 4 { + someStreamsCreated.Fire() + } + } + }() + + <-someStreamsCreated.Done() + ct.writeGoAway(1, http2.ErrCodeNo, []byte{}) + goAwayWritten.Fire() +} From a238cebacde409d423d0116523523deb9fe0bdc7 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 22 Sep 2022 11:56:44 -0400 Subject: [PATCH 614/998] xDS: Outlier Detection Env Var not hardcoded to false (#5664) --- internal/envconfig/xds.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index a83b26bb869c..55aaeea8b455 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -41,6 +41,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" @@ -85,7 +86,7 @@ var ( // XDSOutlierDetection indicates whether outlier detection support is // enabled, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". - XDSOutlierDetection = false + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") From 8458251c6b35036b90788223f67fc9e4396cd32c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 23 Sep 2022 13:26:04 -0700 Subject: [PATCH 615/998] xdsclient: ignore routes with cluster_specifier_plugin when GRPC_EXPERIMENTAL_XDS_RLS_LB is off (#5670) --- .../xdsclient/xdsresource/unmarshal_rds.go | 16 +++++++++++++++- .../xdsclient/xdsresource/unmarshal_rds_test.go | 6 +++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index a8fd65d974bb..32c48d46b691 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -361,8 +361,22 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif return nil, nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) } case *v3routepb.RouteAction_ClusterSpecifierPlugin: + // gRFC A28 was updated to say the following: + // + // The route’s action field must be route, and its + // cluster_specifier: + // - Can be Cluster + // - Can be Weighted_clusters + // - The sum of weights must add up to the total_weight. + // - Can be unset or an unsupported field. The route containing + // this action will be ignored. + // + // This means that if this env var is not set, we should treat + // it as if it we didn't know about the cluster_specifier_plugin + // at all. if !envconfig.XDSRLS { - return nil, nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) + logger.Infof("route %+v contains route_action with unsupported field: cluster_specifier_plugin, the route will be ignored", r) + continue } if _, ok := csps[a.ClusterSpecifierPlugin]; !ok { // "When processing RouteActions, if any action includes a diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index b6034c72e3b8..b6d02f30bef1 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -738,7 +738,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { rlsEnabled: true, }, { - name: "ignore-error-in-cluster-specifier-plugin", + name: "ignore-error-in-cluster-specifier-plugin-env-var-off", rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, false), }, []string{}), @@ -749,7 +749,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false), }, []string{"cspA"}), - wantError: true, + wantUpdate: goodUpdate, }, // This tests a scenario where a cluster specifier plugin is not found // and is optional. Any routes referencing that not found optional @@ -1545,7 +1545,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{}}}, }, }, - wantErr: true, + wantRoutes: []*Route{}, }, { name: "default totalWeight is 100 in weighted clusters action", From e8866a83edf51f5a5dcd8097c6d4fe4cdef49cdd Mon Sep 17 00:00:00 2001 From: Alex Date: Tue, 27 Sep 2022 20:23:03 +0200 Subject: [PATCH 616/998] build: harden GitHub Workflow permissions (#5660) Signed-off-by: Alex Low --- .github/workflows/release.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 673e05c069e5..d8f2cd854a51 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -6,6 +6,9 @@ on: jobs: release: + permissions: + contents: write # to upload release asset (actions/upload-release-asset) + name: Release cmd/protoc-gen-go-grpc runs-on: ubuntu-latest if: startsWith(github.event.release.tag_name, 'cmd/protoc-gen-go-grpc/') From 36e481079b224e01fc3134ba7f1b2717f0c4d4ef Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 27 Sep 2022 12:41:05 -0700 Subject: [PATCH 617/998] orca: cleanup old code, and get grpc package to use new code (#5627) --- balancer/balancer.go | 2 +- orca/orca.go | 22 +++++ xds/internal/balancer/clusterimpl/picker.go | 5 +- xds/internal/balancer/orca/orca.go | 84 ------------------ xds/internal/balancer/orca/orca_test.go | 96 --------------------- 5 files changed, 26 insertions(+), 183 deletions(-) delete mode 100644 xds/internal/balancer/orca/orca.go delete mode 100644 xds/internal/balancer/orca/orca_test.go diff --git a/balancer/balancer.go b/balancer/balancer.go index 25713908072c..f4f9408f3852 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -244,7 +244,7 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. + // The only supported type now is *orca_v3.LoadReport. ServerLoad interface{} } diff --git a/orca/orca.go b/orca/orca.go index 414f6ed6ef4f..676c66e2829b 100644 --- a/orca/orca.go +++ b/orca/orca.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/metadata" "google.golang.org/protobuf/proto" @@ -162,3 +163,24 @@ func ToLoadReport(md metadata.MD) (*v3orcapb.OrcaLoadReport, error) { } return ret, nil } + +// loadParser implements the Parser interface defined in `internal/balancerload` +// package. This interface is used by the client stream to parse load reports +// sent by the server in trailer metadata. The parsed loads are then sent to +// balancers via balancer.DoneInfo. +// +// The grpc package cannot directly call orca.ToLoadReport() as that would cause +// an import cycle. Hence this roundabout method is used. +type loadParser struct{} + +func (loadParser) Parse(md metadata.MD) interface{} { + lr, err := ToLoadReport(md) + if err != nil { + logger.Errorf("Parse(%v) failed: %v", err) + } + return lr +} + +func init() { + balancerload.SetParser(loadParser{}) +} diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index 8cce07553082..360fc44c9e4d 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -19,7 +19,6 @@ package clusterimpl import ( - orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" @@ -27,6 +26,8 @@ import ( "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/load" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" ) // NewRandomWRR is used when calculating drops. It's exported so that tests can @@ -158,7 +159,7 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { } d.loadStore.CallFinished(lIDStr, info.Err) - load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport) + load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport) if !ok { return } diff --git a/xds/internal/balancer/orca/orca.go b/xds/internal/balancer/orca/orca.go deleted file mode 100644 index 75b9439d4dba..000000000000 --- a/xds/internal/balancer/orca/orca.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package orca implements Open Request Cost Aggregation. -package orca - -import ( - orcapb "github.com/cncf/xds/go/xds/data/orca/v3" - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/balancerload" - "google.golang.org/grpc/metadata" -) - -const mdKey = "X-Endpoint-Load-Metrics-Bin" - -var logger = grpclog.Component("xds") - -// toBytes converts a orca load report into bytes. -func toBytes(r *orcapb.OrcaLoadReport) []byte { - if r == nil { - return nil - } - - b, err := proto.Marshal(r) - if err != nil { - logger.Warningf("orca: failed to marshal load report: %v", err) - return nil - } - return b -} - -// ToMetadata converts a orca load report into grpc metadata. -func ToMetadata(r *orcapb.OrcaLoadReport) metadata.MD { - b := toBytes(r) - if b == nil { - return nil - } - return metadata.Pairs(mdKey, string(b)) -} - -// fromBytes reads load report bytes and converts it to orca. -func fromBytes(b []byte) *orcapb.OrcaLoadReport { - ret := new(orcapb.OrcaLoadReport) - if err := proto.Unmarshal(b, ret); err != nil { - logger.Warningf("orca: failed to unmarshal load report: %v", err) - return nil - } - return ret -} - -// FromMetadata reads load report from metadata and converts it to orca. -// -// It returns nil if report is not found in metadata. -func FromMetadata(md metadata.MD) *orcapb.OrcaLoadReport { - vs := md.Get(mdKey) - if len(vs) == 0 { - return nil - } - return fromBytes([]byte(vs[0])) -} - -type loadParser struct{} - -func (*loadParser) Parse(md metadata.MD) interface{} { - return FromMetadata(md) -} - -func init() { - balancerload.SetParser(&loadParser{}) -} diff --git a/xds/internal/balancer/orca/orca_test.go b/xds/internal/balancer/orca/orca_test.go deleted file mode 100644 index 9979c1a9d107..000000000000 --- a/xds/internal/balancer/orca/orca_test.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package orca - -import ( - "strings" - "testing" - - orcapb "github.com/cncf/xds/go/xds/data/orca/v3" - "github.com/golang/protobuf/proto" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/metadata" -) - -var ( - testMessage = &orcapb.OrcaLoadReport{ - CpuUtilization: 0.1, - MemUtilization: 0.2, - RequestCost: map[string]float64{"ccc": 3.4}, - Utilization: map[string]float64{"ttt": 0.4}, - } - testBytes, _ = proto.Marshal(testMessage) -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -func (s) TestToMetadata(t *testing.T) { - tests := []struct { - name string - r *orcapb.OrcaLoadReport - want metadata.MD - }{{ - name: "nil", - r: nil, - want: nil, - }, { - name: "valid", - r: testMessage, - want: metadata.MD{ - strings.ToLower(mdKey): []string{string(testBytes)}, - }, - }} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := ToMetadata(tt.r); !cmp.Equal(got, tt.want) { - t.Errorf("ToMetadata() = %v, want %v", got, tt.want) - } - }) - } -} - -func (s) TestFromMetadata(t *testing.T) { - tests := []struct { - name string - md metadata.MD - want *orcapb.OrcaLoadReport - }{{ - name: "nil", - md: nil, - want: nil, - }, { - name: "valid", - md: metadata.MD{ - strings.ToLower(mdKey): []string{string(testBytes)}, - }, - want: testMessage, - }} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := FromMetadata(tt.md); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { - t.Errorf("FromMetadata() = %v, want %v", got, tt.want) - } - }) - } -} From 54521b22e01f52e6e112b33bbca8b2255915dd40 Mon Sep 17 00:00:00 2001 From: Jan Lamecki Date: Fri, 30 Sep 2022 18:34:05 +0200 Subject: [PATCH 618/998] client: remove trailing null from unix abstract socket address (#5678) --- examples/features/unix_abstract/server/main.go | 2 +- internal/resolver/unix/unix.go | 5 +++-- test/authority_test.go | 8 +++----- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/examples/features/unix_abstract/server/main.go b/examples/features/unix_abstract/server/main.go index 4ef4ff5b7637..7013466b4917 100644 --- a/examples/features/unix_abstract/server/main.go +++ b/examples/features/unix_abstract/server/main.go @@ -51,7 +51,7 @@ func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.Echo func main() { flag.Parse() netw := "unix" - socketAddr := fmt.Sprintf("\x00%v", *addr) + socketAddr := fmt.Sprintf("@%v", *addr) lis, err := net.Listen(netw, socketAddr) if err != nil { log.Fatalf("net.Listen(%q, %q) failed: %v", netw, socketAddr, err) diff --git a/internal/resolver/unix/unix.go b/internal/resolver/unix/unix.go index 20852e59df29..7f1a702cacbe 100644 --- a/internal/resolver/unix/unix.go +++ b/internal/resolver/unix/unix.go @@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv } addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { - // prepend "\x00" to address for unix-abstract - addr.Addr = "\x00" + addr.Addr + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr } cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil diff --git a/test/authority_test.go b/test/authority_test.go index 452b896eebf9..8148cd2347ea 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -125,10 +125,10 @@ var authorityTests = []authorityTest{ }, { name: "UnixAbstract", - address: "\x00abc efg", + address: "@abc efg", target: "unix-abstract:abc efg", authority: "localhost", - dialTargetWant: "\x00abc efg", + dialTargetWant: "unix:@abc efg", }, } @@ -155,9 +155,7 @@ func (s) TestUnixCustomDialer(t *testing.T) { if address != test.dialTargetWant { return nil, fmt.Errorf("expected target %v in custom dialer, instead got %v", test.dialTargetWant, address) } - if !strings.HasPrefix(test.target, "unix-abstract:") { - address = address[len("unix:"):] - } + address = address[len("unix:"):] return (&net.Dialer{}).DialContext(ctx, "unix", address) } runUnixTest(t, test.address, test.target, test.authority, dialer) From d83070ec0d9043f713b6a63e1963c593b447208c Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 30 Sep 2022 16:46:17 -0400 Subject: [PATCH 619/998] Changed Outlier Detection Env Var to default true (#5673) --- internal/envconfig/xds.go | 6 +- internal/internal.go | 16 -- test/xds/xds_client_outlier_detection_test.go | 18 --- xds/internal/balancer/balancer.go | 13 +- .../cdsbalancer/cdsbalancer_security_test.go | 15 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 16 +- .../clusterresolver/clusterresolver_test.go | 9 -- .../clusterresolver/configbuilder_test.go | 153 +----------------- .../balancer/outlierdetection/balancer.go | 8 - .../outlierdetection/balancer_test.go | 8 +- .../e2e_test/outlierdetection_test.go | 4 - .../xdsresource/unmarshal_cds_test.go | 3 - 12 files changed, 27 insertions(+), 242 deletions(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 55aaeea8b455..af09711a3e88 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -84,9 +84,9 @@ var ( // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". - XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") diff --git a/internal/internal.go b/internal/internal.go index 9ce1f18ae9d6..87cacb5b808c 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -120,22 +120,6 @@ var ( // // TODO: Remove this function once the RBAC env var is removed. UnregisterRBACHTTPFilterForTesting func() - - // RegisterOutlierDetectionBalancerForTesting registers the Outlier - // Detection Balancer for testing purposes, regardless of the Outlier - // Detection environment variable. - // - // TODO: Remove this function once the Outlier Detection env var is removed. - RegisterOutlierDetectionBalancerForTesting func() - - // UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier - // Detection Balancer for testing purposes. This is needed because there is - // no way to unregister the Outlier Detection Balancer after registering it - // solely for testing purposes using - // RegisterOutlierDetectionBalancerForTesting(). - // - // TODO: Remove this function once the Outlier Detection env var is removed. - UnregisterOutlierDetectionBalancerForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/test/xds/xds_client_outlier_detection_test.go b/test/xds/xds_client_outlier_detection_test.go index b53439cf66ce..fe47bc9b828a 100644 --- a/test/xds/xds_client_outlier_detection_test.go +++ b/test/xds/xds_client_outlier_detection_test.go @@ -32,8 +32,6 @@ import ( v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils/xds/e2e" testgrpc "google.golang.org/grpc/test/grpc_testing" @@ -49,14 +47,6 @@ import ( // Detection balancer. This test verifies that an RPC is able to proceed // normally with this configuration. func (s) TestOutlierDetection_NoopConfig(t *testing.T) { - oldOD := envconfig.XDSOutlierDetection - envconfig.XDSOutlierDetection = true - internal.RegisterOutlierDetectionBalancerForTesting() - defer func() { - envconfig.XDSOutlierDetection = oldOD - internal.UnregisterOutlierDetectionBalancerForTesting() - }() - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() @@ -129,14 +119,6 @@ func clusterWithOutlierDetection(clusterName, edsServiceName string, secLevel e2 // Detection Balancer should eject the connection to the backend which // constantly errors, and thus RPC's should mainly go to backend 1 and 2. func (s) TestOutlierDetectionWithOutlier(t *testing.T) { - oldOD := envconfig.XDSOutlierDetection - envconfig.XDSOutlierDetection = true - internal.RegisterOutlierDetectionBalancerForTesting() - defer func() { - envconfig.XDSOutlierDetection = oldOD - internal.UnregisterOutlierDetectionBalancerForTesting() - }() - managementServer, nodeID, _, resolver, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() diff --git a/xds/internal/balancer/balancer.go b/xds/internal/balancer/balancer.go index 8d81aced2dd5..68ed789f2a4d 100644 --- a/xds/internal/balancer/balancer.go +++ b/xds/internal/balancer/balancer.go @@ -20,10 +20,11 @@ package balancer import ( - _ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer - _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer - _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer - _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer - _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer + _ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer + _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer + _ "google.golang.org/grpc/xds/internal/balancer/outlierdetection" // Register the outlier_detection balancer + _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer ) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 685e77adf463..7d5898ada83d 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -34,7 +34,6 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -251,7 +250,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -307,7 +306,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // newChildBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -463,7 +462,7 @@ func (s) TestSecurityConfigUpdate_BadToGood(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -497,7 +496,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -550,7 +549,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -600,7 +599,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -678,7 +677,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { SubjectAltNameMatchers: testSANMatchers, }, } - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index dfa47913ae29..fa94e13f442a 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" @@ -366,14 +365,11 @@ func (s) TestUpdateClientConnStateWithSameState(t *testing.T) { // different updates and verifies that the expect ClientConnState is propagated // to the edsBalancer. func (s) TestHandleClusterUpdate(t *testing.T) { - oldOutlierDetection := envconfig.XDSOutlierDetection - envconfig.XDSOutlierDetection = true xdsC, cdsB, edsB, _, cancel := setupWithWatch(t) xdsC.SetBootstrapConfig(&bootstrap.Config{ XDSServer: defaultTestAuthorityServerConfig, }) defer func() { - envconfig.XDSOutlierDetection = oldOutlierDetection cancel() cdsB.Close() }() @@ -506,7 +502,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -591,7 +587,7 @@ func (s) TestResolverError(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -640,7 +636,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -675,7 +671,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // the service's counter with the new max requests. var maxRequests uint32 = 1 cdsUpdate := xdsresource.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} - wantCCS := edsCCS(clusterName, &maxRequests, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(clusterName, &maxRequests, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -708,7 +704,7 @@ func (s) TestClose(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -779,7 +775,7 @@ func (s) TestExitIdle(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{}) + wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index f6f6249d9e89..1973e1549188 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -30,8 +30,6 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" @@ -533,13 +531,6 @@ func newLBConfigWithOneEDSAndOutlierDetection(edsServiceName string, odCfg outli // Configuration sent downward should have a top level Outlier Detection Policy // for each priority. func (s) TestOutlierDetection(t *testing.T) { - oldOutlierDetection := envconfig.XDSOutlierDetection - envconfig.XDSOutlierDetection = true - internal.RegisterOutlierDetectionBalancerForTesting() - defer func() { - envconfig.XDSOutlierDetection = oldOutlierDetection - }() - edsLBCh := testutils.NewChannel() xdsC, cleanup := setup(edsLBCh) defer cleanup() diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index d050df11b02a..f3a830291605 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/balancer/weightedtarget" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" @@ -179,156 +178,10 @@ func TestBuildPriorityConfigJSON(t *testing.T) { } } +// TestBuildPriorityConfig tests the priority config generation. Each top level +// balancer per priority should be an Outlier Detection balancer, with a Cluster +// Impl Balancer as a child. func TestBuildPriorityConfig(t *testing.T) { - gotConfig, gotAddrs, _ := buildPriorityConfig([]priorityConfig{ - { - mechanism: DiscoveryMechanism{ - Cluster: testClusterName, - LoadReportingServer: testLRSServerConfig, - MaxConcurrentRequests: newUint32(testMaxRequests), - Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServiceName, - }, - edsResp: xdsresource.EndpointsUpdate{ - Drops: []xdsresource.OverloadDropConfig{ - { - Category: testDropCategory, - Numerator: testDropOverMillion, - Denominator: million, - }, - }, - Localities: []xdsresource.Locality{ - testLocalitiesP0[0], - testLocalitiesP0[1], - testLocalitiesP1[0], - testLocalitiesP1[1], - }, - }, - childNameGen: newNameGenerator(0), - }, - { - mechanism: DiscoveryMechanism{ - Cluster: testClusterName2, - Type: DiscoveryMechanismTypeLogicalDNS, - }, - addresses: testAddressStrs[4], - childNameGen: newNameGenerator(1), - }, - }, nil) - - wantConfig := &priority.LBConfig{ - Children: map[string]*priority.Child{ - "priority-0-0": { - Config: &internalserviceconfig.BalancerConfig{ - Name: clusterimpl.Name, - Config: &clusterimpl.LBConfig{ - Cluster: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServer: testLRSServerConfig, - MaxConcurrentRequests: newUint32(testMaxRequests), - DropCategories: []clusterimpl.DropConfig{ - { - Category: testDropCategory, - RequestsPerMillion: testDropOverMillion, - }, - }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedtarget.Name, - Config: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(testLocalityIDs[0].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - assertString(testLocalityIDs[1].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, - }, - }, - }, - }, - IgnoreReresolutionRequests: true, - }, - "priority-0-1": { - Config: &internalserviceconfig.BalancerConfig{ - Name: clusterimpl.Name, - Config: &clusterimpl.LBConfig{ - Cluster: testClusterName, - EDSServiceName: testEDSServiceName, - LoadReportingServer: testLRSServerConfig, - MaxConcurrentRequests: newUint32(testMaxRequests), - DropCategories: []clusterimpl.DropConfig{ - { - Category: testDropCategory, - RequestsPerMillion: testDropOverMillion, - }, - }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedtarget.Name, - Config: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(testLocalityIDs[2].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - assertString(testLocalityIDs[3].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, - }, - }, - }, - }, - IgnoreReresolutionRequests: true, - }, - "priority-1": { - Config: &internalserviceconfig.BalancerConfig{ - Name: clusterimpl.Name, - Config: &clusterimpl.LBConfig{ - Cluster: testClusterName2, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: "pick_first"}, - }, - }, - IgnoreReresolutionRequests: false, - }, - }, - Priorities: []string{"priority-0-0", "priority-0-1", "priority-1"}, - } - wantAddrs := []resolver.Address{ - testAddrWithAttrs(testAddressStrs[0][0], nil, "priority-0-0", &testLocalityIDs[0]), - testAddrWithAttrs(testAddressStrs[0][1], nil, "priority-0-0", &testLocalityIDs[0]), - testAddrWithAttrs(testAddressStrs[1][0], nil, "priority-0-0", &testLocalityIDs[1]), - testAddrWithAttrs(testAddressStrs[1][1], nil, "priority-0-0", &testLocalityIDs[1]), - testAddrWithAttrs(testAddressStrs[2][0], nil, "priority-0-1", &testLocalityIDs[2]), - testAddrWithAttrs(testAddressStrs[2][1], nil, "priority-0-1", &testLocalityIDs[2]), - testAddrWithAttrs(testAddressStrs[3][0], nil, "priority-0-1", &testLocalityIDs[3]), - testAddrWithAttrs(testAddressStrs[3][1], nil, "priority-0-1", &testLocalityIDs[3]), - testAddrWithAttrs(testAddressStrs[4][0], nil, "priority-1", nil), - testAddrWithAttrs(testAddressStrs[4][1], nil, "priority-1", nil), - } - - if diff := cmp.Diff(gotConfig, wantConfig); diff != "" { - t.Errorf("buildPriorityConfig() diff (-got +want) %v", diff) - } - if diff := cmp.Diff(gotAddrs, wantAddrs, addrCmpOpts); diff != "" { - t.Errorf("buildPriorityConfig() diff (-got +want) %v", diff) - } -} - -// TestBuildPriorityConfigWithOutlierDetection tests the priority config -// generation with Outlier Detection toggled on. Each top level balancer per -// priority should be an Outlier Detection balancer, with a Cluster Impl -// Balancer as a child. -func TestBuildPriorityConfigWithOutlierDetection(t *testing.T) { - oldOutlierDetection := envconfig.XDSOutlierDetection - envconfig.XDSOutlierDetection = true - defer func() { - envconfig.XDSOutlierDetection = oldOutlierDetection - }() - gotConfig, _, _ := buildPriorityConfig([]priorityConfig{ { // EDS - OD config should be the top level for both of the EDS diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 8e54a4a10d5e..062a8e5e48d2 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/envconfig" @@ -57,13 +56,6 @@ func init() { if envconfig.XDSOutlierDetection { balancer.Register(bb{}) } - // TODO: Remove these once the Outlier Detection env var is removed. - internal.RegisterOutlierDetectionBalancerForTesting = func() { - balancer.Register(bb{}) - } - internal.UnregisterOutlierDetectionBalancerForTesting = func() { - internal.BalancerUnregister(Name) - } } type bb struct{} diff --git a/xds/internal/balancer/outlierdetection/balancer_test.go b/xds/internal/balancer/outlierdetection/balancer_test.go index 151684ac7fd4..15e85fdb4661 100644 --- a/xds/internal/balancer/outlierdetection/balancer_test.go +++ b/xds/internal/balancer/outlierdetection/balancer_test.go @@ -33,7 +33,6 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" @@ -300,17 +299,13 @@ type subConnWithState struct { func setup(t *testing.T) (*outlierDetectionBalancer, *testutils.TestClientConn, func()) { t.Helper() - internal.RegisterOutlierDetectionBalancerForTesting() builder := balancer.Get(Name) if builder == nil { t.Fatalf("balancer.Get(%q) returned nil", Name) } tcc := testutils.NewTestClientConn(t) odB := builder.Build(tcc, balancer.BuildOptions{}) - return odB.(*outlierDetectionBalancer), tcc, func() { - odB.Close() - internal.UnregisterOutlierDetectionBalancerForTesting() - } + return odB.(*outlierDetectionBalancer), tcc, odB.Close } type emptyChildConfig struct { @@ -361,7 +356,6 @@ func (s) TestChildBasicOperations(t *testing.T) { }) od, tcc, _ := setup(t) - defer internal.UnregisterOutlierDetectionBalancerForTesting() // This first config update should cause a child to be built and forwarded // it's first update. diff --git a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go index a1987bf98a99..75e084723fce 100644 --- a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go +++ b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go @@ -199,8 +199,6 @@ func (s) TestOutlierDetectionAlgorithmsE2E(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - internal.RegisterOutlierDetectionBalancerForTesting() - defer internal.UnregisterOutlierDetectionBalancerForTesting() addresses, cancel := setupBackends(t) defer cancel() @@ -265,8 +263,6 @@ func (s) TestOutlierDetectionAlgorithmsE2E(t *testing.T) { // requiring counting RPC's, the Outlier Detection Balancer should start // ejecting any upstreams as specified in the configuration. func (s) TestNoopConfiguration(t *testing.T) { - internal.RegisterOutlierDetectionBalancerForTesting() - defer internal.UnregisterOutlierDetectionBalancerForTesting() addresses, cancel := setupBackends(t) defer cancel() diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 67e057a6c400..fa8f4d4bbcc8 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -1690,9 +1690,6 @@ func (s) TestUnmarshalCluster(t *testing.T) { } func (s) TestValidateClusterWithOutlierDetection(t *testing.T) { - oldOutlierDetectionSupportEnv := envconfig.XDSOutlierDetection - envconfig.XDSOutlierDetection = true - defer func() { envconfig.XDSOutlierDetection = oldOutlierDetectionSupportEnv }() odToClusterProto := func(od *v3clusterpb.OutlierDetection) *v3clusterpb.Cluster { // Cluster parsing doesn't fail with respect to fields orthogonal to // outlier detection. From be4b63b1fcab082f7e19b540abfbbaf13958addd Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 3 Oct 2022 08:37:41 -0700 Subject: [PATCH 620/998] test: minor test cleanup (#5679) --- test/authority_test.go | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/test/authority_test.go b/test/authority_test.go index 8148cd2347ea..c841c64736fb 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -98,10 +98,11 @@ type authorityTest struct { var authorityTests = []authorityTest{ { - name: "UnixRelative", - address: "sock.sock", - target: "unix:sock.sock", - authority: "localhost", + name: "UnixRelative", + address: "sock.sock", + target: "unix:sock.sock", + authority: "localhost", + dialTargetWant: "unix:sock.sock", }, { name: "UnixAbsolute", @@ -111,10 +112,11 @@ var authorityTests = []authorityTest{ dialTargetWant: "unix:///tmp/sock.sock", }, { - name: "UnixAbsoluteAlternate", - address: "/tmp/sock.sock", - target: "unix:///tmp/sock.sock", - authority: "localhost", + name: "UnixAbsoluteAlternate", + address: "/tmp/sock.sock", + target: "unix:///tmp/sock.sock", + authority: "localhost", + dialTargetWant: "unix:///tmp/sock.sock", }, { name: "UnixPassthrough", @@ -148,9 +150,6 @@ func (s) TestUnix(t *testing.T) { func (s) TestUnixCustomDialer(t *testing.T) { for _, test := range authorityTests { t.Run(test.name+"WithDialer", func(t *testing.T) { - if test.dialTargetWant == "" { - test.dialTargetWant = test.target - } dialer := func(ctx context.Context, address string) (net.Conn, error) { if address != test.dialTargetWant { return nil, fmt.Errorf("expected target %v in custom dialer, instead got %v", test.dialTargetWant, address) From 1451c62ccdce325a1f2fdbcc90db89518a36dc06 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 4 Oct 2022 19:29:30 +0200 Subject: [PATCH 621/998] internal/transport: optimize grpc-message encoding/decoding (#5654) --- internal/transport/http_util.go | 21 ++++++++++----------- internal/transport/http_util_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index d632dc4e812a..2c601a864d99 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -20,7 +20,6 @@ package transport import ( "bufio" - "bytes" "encoding/base64" "fmt" "io" @@ -251,13 +250,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -266,14 +265,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -291,23 +290,23 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { diff --git a/internal/transport/http_util_test.go b/internal/transport/http_util_test.go index bbd53180471e..cc7807670b62 100644 --- a/internal/transport/http_util_test.go +++ b/internal/transport/http_util_test.go @@ -214,3 +214,27 @@ func (s) TestParseDialTarget(t *testing.T) { } } } + +func BenchmarkDecodeGrpcMessage(b *testing.B) { + input := "Hello, %E4%B8%96%E7%95%8C" + want := "Hello, 世界" + b.ReportAllocs() + for i := 0; i < b.N; i++ { + got := decodeGrpcMessage(input) + if got != want { + b.Fatalf("decodeGrpcMessage(%q) = %s, want %s", input, got, want) + } + } +} + +func BenchmarkEncodeGrpcMessage(b *testing.B) { + input := "Hello, 世界" + want := "Hello, %E4%B8%96%E7%95%8C" + b.ReportAllocs() + for i := 0; i < b.N; i++ { + got := encodeGrpcMessage(input) + if got != want { + b.Fatalf("encodeGrpcMessage(%q) = %s, want %s", input, got, want) + } + } +} From 202d355a9bb06cd914345afc994d0b213a34427c Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 4 Oct 2022 14:17:45 -0700 Subject: [PATCH 622/998] Change version to 1.51.0-dev (#5687) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index ac8b4bf8f166..4a717069f310 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.50.0-dev" +const Version = "1.51.0-dev" From 12db695f1648ce84ad74c5240730fb9401f67844 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 4 Oct 2022 15:13:23 -0700 Subject: [PATCH 623/998] grpc: restrict status codes from control plane (gRFC A54) (#5653) --- credentials/credentials.go | 20 +-- internal/status/status.go | 10 ++ internal/transport/http2_client.go | 16 +- picker_wrapper.go | 7 +- stream.go | 8 + test/control_plane_status_test.go | 234 +++++++++++++++++++++++++++++ test/creds_test.go | 21 ++- 7 files changed, 296 insertions(+), 20 deletions(-) create mode 100644 test/control_plane_status_test.go diff --git a/credentials/credentials.go b/credentials/credentials.go index 96ff1877e754..5feac3aa0e41 100644 --- a/credentials/credentials.go +++ b/credentials/credentials.go @@ -36,16 +36,16 @@ import ( // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/internal/status/status.go b/internal/status/status.go index e5c6513edd13..b0ead4f54f82 100644 --- a/internal/status/status.go +++ b/internal/status/status.go @@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool { } return proto.Equal(e.s.s, tse.s.s) } + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 5c2f35b24e75..1d8c9be4fa4d 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -40,6 +40,7 @@ import ( icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" @@ -589,7 +590,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } @@ -618,7 +623,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { diff --git a/picker_wrapper.go b/picker_wrapper.go index 843633c910a1..a5d5516ee060 100644 --- a/picker_wrapper.go +++ b/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -129,8 +130,12 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other diff --git a/stream.go b/stream.go index 446a91e323ee..b678596949b0 100644 --- a/stream.go +++ b/stream.go @@ -39,6 +39,7 @@ import ( imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -195,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } diff --git a/test/control_plane_status_test.go b/test/control_plane_status_test.go new file mode 100644 index 000000000000..be191f456b2f --- /dev/null +++ b/test/control_plane_status_test.go @@ -0,0 +1,234 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/stub" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +func (s) TestConfigSelectorStatusCodes(t *testing.T) { + testCases := []struct { + name string + csErr error + want error + }{{ + name: "legal status code", + csErr: status.Errorf(codes.Unavailable, "this error is fine"), + want: status.Errorf(codes.Unavailable, "this error is fine"), + }, { + name: "illegal status code", + csErr: status.Errorf(codes.NotFound, "this error is bad"), + want: status.Errorf(codes.Internal, "this error is bad"), + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + ss.R = manual.NewBuilderWithScheme("confSel") + + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + state := iresolver.SetConfigSelector(resolver.State{ + Addresses: []resolver.Address{{Addr: ss.Address}}, + ServiceConfig: parseServiceConfig(t, ss.R, "{}"), + }, funcConfigSelector{ + f: func(i iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return nil, tc.csErr + }, + }) + ss.R.UpdateState(state) // Blocks until config selector is applied + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != status.Code(tc.want) || !strings.Contains(err.Error(), status.Convert(tc.want).Message()) { + t.Fatalf("client.EmptyCall(_, _) = _, %v; want _, %v", err, tc.want) + } + }) + } +} + +func (s) TestPickerStatusCodes(t *testing.T) { + testCases := []struct { + name string + pickerErr error + want error + }{{ + name: "legal status code", + pickerErr: status.Errorf(codes.Unavailable, "this error is fine"), + want: status.Errorf(codes.Unavailable, "this error is fine"), + }, { + name: "illegal status code", + pickerErr: status.Errorf(codes.NotFound, "this error is bad"), + want: status.Errorf(codes.Internal, "this error is bad"), + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + // Create a stub balancer that creates a picker that always returns + // an error. + sbf := stub.BalancerFuncs{ + UpdateClientConnState: func(d *stub.BalancerData, _ balancer.ClientConnState) error { + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(tc.pickerErr), + }) + return nil + }, + } + stub.Register("testPickerStatusCodesBalancer", sbf) + + ss.NewServiceConfig(`{"loadBalancingConfig": [{"testPickerStatusCodesBalancer":{}}] }`) + + // Make calls until pickerErr is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + var lastErr error + for ctx.Err() == nil { + if _, lastErr = ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(lastErr) == status.Code(tc.want) && strings.Contains(lastErr.Error(), status.Convert(tc.want).Message()) { + // Success! + return + } + time.Sleep(time.Millisecond) + } + + t.Fatalf("client.EmptyCall(_, _) = _, %v; want _, %v", lastErr, tc.want) + }) + } +} + +func (s) TestCallCredsFromDialOptionsStatusCodes(t *testing.T) { + testCases := []struct { + name string + credsErr error + want error + }{{ + name: "legal status code", + credsErr: status.Errorf(codes.Unavailable, "this error is fine"), + want: status.Errorf(codes.Unavailable, "this error is fine"), + }, { + name: "illegal status code", + credsErr: status.Errorf(codes.NotFound, "this error is bad"), + want: status.Errorf(codes.Internal, "this error is bad"), + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + errChan := make(chan error, 1) + creds := &testPerRPCCredentials{errChan: errChan} + + if err := ss.Start(nil, grpc.WithPerRPCCredentials(creds)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + errChan <- tc.credsErr + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != status.Code(tc.want) || !strings.Contains(err.Error(), status.Convert(tc.want).Message()) { + t.Fatalf("client.EmptyCall(_, _) = _, %v; want _, %v", err, tc.want) + } + }) + } +} + +func (s) TestCallCredsFromCallOptionsStatusCodes(t *testing.T) { + testCases := []struct { + name string + credsErr error + want error + }{{ + name: "legal status code", + credsErr: status.Errorf(codes.Unavailable, "this error is fine"), + want: status.Errorf(codes.Unavailable, "this error is fine"), + }, { + name: "illegal status code", + credsErr: status.Errorf(codes.NotFound, "this error is bad"), + want: status.Errorf(codes.Internal, "this error is bad"), + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + errChan := make(chan error, 1) + creds := &testPerRPCCredentials{errChan: errChan} + + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + errChan <- tc.credsErr + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(creds)); status.Code(err) != status.Code(tc.want) || !strings.Contains(err.Error(), status.Convert(tc.want).Message()) { + t.Fatalf("client.EmptyCall(_, _) = _, %v; want _, %v", err, tc.want) + } + }) + } +} diff --git a/test/creds_test.go b/test/creds_test.go index d886220d8a46..5323affa7903 100644 --- a/test/creds_test.go +++ b/test/creds_test.go @@ -68,7 +68,7 @@ func (c *testCredsBundle) PerRPCCredentials() credentials.PerRPCCredentials { if c.mode == bundleTLSOnly { return nil } - return testPerRPCCredentials{} + return testPerRPCCredentials{authdata: authdata} } func (c *testCredsBundle) NewWithMode(mode string) (credentials.Bundle, error) { @@ -284,10 +284,17 @@ var ( } ) -type testPerRPCCredentials struct{} +type testPerRPCCredentials struct { + authdata map[string]string + errChan chan error +} func (cr testPerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return authdata, nil + var err error + if cr.errChan != nil { + err = <-cr.errChan + } + return cr.authdata, err } func (cr testPerRPCCredentials) RequireTransportSecurity() bool { @@ -320,7 +327,7 @@ func (s) TestPerRPCCredentialsViaDialOptions(t *testing.T) { func testPerRPCCredentialsViaDialOptions(t *testing.T, e env) { te := newTest(t, e) te.tapHandle = authHandle - te.perRPCCreds = testPerRPCCredentials{} + te.perRPCCreds = testPerRPCCredentials{authdata: authdata} te.startServer(&testServer{security: e.security}) defer te.tearDown() @@ -349,7 +356,7 @@ func testPerRPCCredentialsViaCallOptions(t *testing.T, e env) { tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{authdata: authdata})); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } @@ -362,7 +369,7 @@ func (s) TestPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T) { func testPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T, e env) { te := newTest(t, e) - te.perRPCCreds = testPerRPCCredentials{} + te.perRPCCreds = testPerRPCCredentials{authdata: authdata} // When credentials are provided via both dial options and call options, // we apply both sets. te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) { @@ -391,7 +398,7 @@ func testPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T, e env) { tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{authdata: authdata})); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } From 5fc798be17db9565d4275386c4ebb42b67271248 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 6 Oct 2022 13:36:05 -0400 Subject: [PATCH 624/998] Add binary logger option for client and server (#5675) * Add binary logger option for client and server --- default_dial_option_server_option_test.go | 8 +- dialoptions.go | 15 ++- gcp/observability/opencensus.go | 8 +- internal/internal.go | 23 ++-- server.go | 128 +++++++++++++++------- stream.go | 115 ++++++++++++------- test/end2end_test.go | 91 +++++++++++++++ 7 files changed, 296 insertions(+), 92 deletions(-) diff --git a/default_dial_option_server_option_test.go b/default_dial_option_server_option_test.go index 3dc446f58b5a..eecd6b846f28 100644 --- a/default_dial_option_server_option_test.go +++ b/default_dial_option_server_option_test.go @@ -38,7 +38,7 @@ func (s) TestAddExtraDialOptions(t *testing.T) { // Set and check the DialOptions opts := []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithTransportCredentials(insecure.NewCredentials()), WithTransportCredentials(insecure.NewCredentials())} - internal.AddExtraDialOptions.(func(opt ...DialOption))(opts...) + internal.AddGlobalDialOptions.(func(opt ...DialOption))(opts...) for i, opt := range opts { if extraDialOptions[i] != opt { t.Fatalf("Unexpected extra dial option at index %d: %v != %v", i, extraDialOptions[i], opt) @@ -52,7 +52,7 @@ func (s) TestAddExtraDialOptions(t *testing.T) { cc.Close() } - internal.ClearExtraDialOptions() + internal.ClearGlobalDialOptions() if len(extraDialOptions) != 0 { t.Fatalf("Unexpected len of extraDialOptions: %d != 0", len(extraDialOptions)) } @@ -62,7 +62,7 @@ func (s) TestAddExtraServerOptions(t *testing.T) { const maxRecvSize = 998765 // Set and check the ServerOptions opts := []ServerOption{Creds(insecure.NewCredentials()), MaxRecvMsgSize(maxRecvSize)} - internal.AddExtraServerOptions.(func(opt ...ServerOption))(opts...) + internal.AddGlobalServerOptions.(func(opt ...ServerOption))(opts...) for i, opt := range opts { if extraServerOptions[i] != opt { t.Fatalf("Unexpected extra server option at index %d: %v != %v", i, extraServerOptions[i], opt) @@ -75,7 +75,7 @@ func (s) TestAddExtraServerOptions(t *testing.T) { t.Fatalf("Unexpected s.opts.maxReceiveMessageSize: %d != %d", s.opts.maxReceiveMessageSize, maxRecvSize) } - internal.ClearExtraServerOptions() + internal.ClearGlobalServerOptions() if len(extraServerOptions) != 0 { t.Fatalf("Unexpected len of extraServerOptions: %d != 0", len(extraServerOptions)) } diff --git a/dialoptions.go b/dialoptions.go index 60403bc160ec..9372dc322e80 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -36,12 +37,13 @@ import ( ) func init() { - internal.AddExtraDialOptions = func(opt ...DialOption) { + internal.AddGlobalDialOptions = func(opt ...DialOption) { extraDialOptions = append(extraDialOptions, opt...) } - internal.ClearExtraDialOptions = func() { + internal.ClearGlobalDialOptions = func() { extraDialOptions = nil } + internal.WithBinaryLogger = withBinaryLogger } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -61,6 +63,7 @@ type dialOptions struct { timeout time.Duration scChan <-chan ServiceConfig authority string + binaryLogger binarylog.Logger copts transport.ConnectOptions callOptions []CallOption channelzParentID *channelz.Identifier @@ -401,6 +404,14 @@ func WithStatsHandler(h stats.Handler) DialOption { }) } +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl + }) +} + // FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on // non-temporary dial errors. If f is true, and dialer returns a non-temporary // error, gRPC will fail the connection to the network address and won't try to diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index ccaa6a98a42c..1bca322cc52c 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -117,8 +117,8 @@ func startOpenCensus(config *config) error { } // Only register default StatsHandlers if other things are setup correctly. - internal.AddExtraServerOptions.(func(opt ...grpc.ServerOption))(grpc.StatsHandler(&ocgrpc.ServerHandler{StartOptions: so})) - internal.AddExtraDialOptions.(func(opt ...grpc.DialOption))(grpc.WithStatsHandler(&ocgrpc.ClientHandler{StartOptions: so})) + internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(grpc.StatsHandler(&ocgrpc.ServerHandler{StartOptions: so})) + internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(grpc.WithStatsHandler(&ocgrpc.ClientHandler{StartOptions: so})) logger.Infof("Enabled OpenCensus StatsHandlers for clients and servers") return nil @@ -128,8 +128,8 @@ func startOpenCensus(config *config) error { // packages if exporter was created. func stopOpenCensus() { if exporter != nil { - internal.ClearExtraDialOptions() - internal.ClearExtraServerOptions() + internal.ClearGlobalDialOptions() + internal.ClearGlobalServerOptions() // Call these unconditionally, doesn't matter if not registered, will be // a noop if not registered. diff --git a/internal/internal.go b/internal/internal.go index 87cacb5b808c..fd0ee3dcaf1e 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -63,24 +63,31 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) - // AddExtraServerOptions adds an array of ServerOption that will be + // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddExtraServerOptions interface{} // func(opt ...ServerOption) - // ClearExtraServerOptions clears the array of extra ServerOption. This + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. - ClearExtraServerOptions func() - // AddExtraDialOptions adds an array of DialOption that will be effective + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddExtraDialOptions interface{} // func(opt ...DialOption) - // ClearExtraDialOptions clears the array of extra DialOption. This + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. - ClearExtraDialOptions func() + ClearGlobalDialOptions func() // JoinServerOptions combines the server options passed as arguments into a // single server option. JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from // the supported environment variables. The resolver.Builder is meant to be diff --git a/server.go b/server.go index 6ef3df67d9e5..e8143492c25e 100644 --- a/server.go +++ b/server.go @@ -73,12 +73,13 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } - internal.AddExtraServerOptions = func(opt ...ServerOption) { + internal.AddGlobalServerOptions = func(opt ...ServerOption) { extraServerOptions = opt } - internal.ClearExtraServerOptions = func() { + internal.ClearGlobalServerOptions = func() { extraServerOptions = nil } + internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption } @@ -156,6 +157,7 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle statsHandlers []stats.Handler maxConcurrentStreams uint32 @@ -469,6 +471,14 @@ func StatsHandler(h stats.Handler) ServerOption { }) } +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl + }) +} + // UnknownServiceHandler returns a ServerOption that allows for adding a custom // unknown service handler. The provided method is a bidi-streaming RPC service // handler that will be invoked instead of returning the "unimplemented" gRPC @@ -1216,9 +1226,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ @@ -1238,7 +1255,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1278,7 +1297,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } var payInfo *payloadInfo - if len(shs) != 0 || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) @@ -1304,10 +1323,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Length: len(d), }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -1331,18 +1353,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return appErr } @@ -1368,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1399,11 +1435,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return err } @@ -1516,8 +1555,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1536,7 +1582,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1602,11 +1650,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } // TODO: Should we log an error from WriteStatus here and below? return appErr @@ -1617,11 +1668,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } return err } diff --git a/stream.go b/stream.go index b678596949b0..b10ab1ab6324 100644 --- a/stream.go +++ b/stream.go @@ -309,7 +309,14 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + } // Pick the transport to use and create a new stream on the transport. // Assign cs.attempt upon success. @@ -330,7 +337,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -344,7 +351,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } if desc != unaryStreamDesc { @@ -488,7 +497,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -752,7 +761,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { cs.finish(err) return nil, err } - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Only log if binary log is on and header has not been logged. logEntry := &binarylog.ServerHeader{ OnClientSide: true, @@ -762,8 +771,10 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } return m, nil } @@ -837,38 +848,44 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, Message: data, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cm) + } } return err } func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - if cs.binlog != nil { + if len(cs.binlogs) != 0 { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, @@ -881,7 +898,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } } return err @@ -902,10 +921,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(chc) + } } // We never returned an error here for reasons. return nil @@ -938,10 +960,13 @@ func (cs *clientStream) finish(err error) { // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(c) + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -1013,6 +1038,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -1461,7 +1487,7 @@ type serverStream struct { statsHandler []stats.Handler - binlog binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1495,12 +1521,15 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { } err = ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } return err } @@ -1557,17 +1586,23 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(sm) + } } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { @@ -1606,13 +1641,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if len(ss.statsHandler) != 0 || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(chc) + } } return err } @@ -1633,10 +1671,13 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { }) } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(cm) + } } return nil } diff --git a/test/end2end_test.go b/test/end2end_test.go index 725bcdb641eb..ecf5b5e303ed 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -57,6 +57,7 @@ import ( healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" @@ -8184,3 +8185,93 @@ func (s) TestGoAwayStreamIDSmallerThanCreatedStreams(t *testing.T) { ct.writeGoAway(1, http2.ErrCodeNo, []byte{}) goAwayWritten.Fire() } + +type mockBinaryLogger struct { + mml *mockMethodLogger +} + +func newMockBinaryLogger() *mockBinaryLogger { + return &mockBinaryLogger{ + mml: &mockMethodLogger{}, + } +} + +func (mbl *mockBinaryLogger) GetMethodLogger(string) binarylog.MethodLogger { + return mbl.mml +} + +type mockMethodLogger struct { + events uint64 +} + +func (mml *mockMethodLogger) Log(binarylog.LogEntryConfig) { + atomic.AddUint64(&mml.events, 1) +} + +// TestGlobalBinaryLoggingOptions tests the binary logging options for client +// and server side. The test configures a binary logger to be plumbed into every +// created ClientConn and server. It then makes a unary RPC call, and a +// streaming RPC call. A certain amount of logging calls should happen as a +// result of the stream operations on each of these calls. +func (s) TestGlobalBinaryLoggingOptions(t *testing.T) { + csbl := newMockBinaryLogger() + ssbl := newMockBinaryLogger() + + internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(internal.WithBinaryLogger.(func(bl binarylog.Logger) grpc.DialOption)(csbl)) + internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(internal.BinaryLogger.(func(bl binarylog.Logger) grpc.ServerOption)(ssbl)) + defer func() { + internal.ClearGlobalDialOptions() + internal.ClearGlobalServerOptions() + }() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + } + }, + } + + // No client or server options specified, because should pick up configured + // global options. + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Make a Unary RPC. This should cause Log calls on the MethodLogger. + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + if csbl.mml.events != 5 { + t.Fatalf("want 5 client side binary logging events, got %v", csbl.mml.events) + } + if ssbl.mml.events != 5 { + t.Fatalf("want 5 server side binary logging events, got %v", ssbl.mml.events) + } + + // Make a streaming RPC. This should cause Log calls on the MethodLogger. + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + if csbl.mml.events != 9 { + t.Fatalf("want 9 client side binary logging events, got %v", csbl.mml.events) + } + if ssbl.mml.events != 8 { + t.Fatalf("want 8 server side binary logging events, got %v", ssbl.mml.events) + } +} From c03925db8d3c8adf6c6f708b5f9ef054de019749 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 6 Oct 2022 13:23:45 -0700 Subject: [PATCH 625/998] priority: release references to child policies which are removed (#5682) --- internal/balancergroup/balancergroup.go | 30 ++++- internal/balancergroup/balancergroup_test.go | 10 +- .../balancer/clusterimpl/clusterimpl.go | 1 + .../clusterresolver/clusterresolver.go | 1 - xds/internal/balancer/priority/balancer.go | 9 +- .../balancer/priority/balancer_child.go | 34 +++--- .../balancer/priority/balancer_priority.go | 4 + .../balancer/priority/balancer_test.go | 104 ++++++++++++------ .../balancer/priority/ignore_resolve_now.go | 35 ++---- .../priority/ignore_resolve_now_test.go | 63 +++-------- xds/internal/balancer/ringhash/ringhash.go | 4 +- 11 files changed, 163 insertions(+), 132 deletions(-) diff --git a/internal/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go index 3daad14473ee..70edbb17c0c0 100644 --- a/internal/balancergroup/balancergroup.go +++ b/internal/balancergroup/balancergroup.go @@ -97,7 +97,7 @@ func (sbc *subBalancerWrapper) startBalancer() { if sbc.balancer == nil { sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts) } - sbc.group.logger.Infof("Creating child policy of type %v", sbc.builder.Name()) + sbc.group.logger.Infof("Creating child policy of type %q for locality %q", sbc.builder.Name(), sbc.id) sbc.balancer.SwitchTo(sbc.builder) if sbc.ccState != nil { sbc.balancer.UpdateClientConnState(*sbc.ccState) @@ -298,10 +298,22 @@ func (bg *BalancerGroup) Start() { bg.outgoingMu.Unlock() } -// Add adds a balancer built by builder to the group, with given id. -func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { +// AddWithClientConn adds a balancer with the given id to the group. The +// balancer is built with a balancer builder registered with balancerName. The +// given ClientConn is passed to the newly built balancer instead of the +// onepassed to balancergroup.New(). +// +// TODO: Get rid of the existing Add() API and replace it with this. +func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error { + bg.logger.Infof("Adding child policy of type %q for locality %q", balancerName, id) + builder := balancer.Get(balancerName) + if builder == nil { + return fmt.Errorf("unregistered balancer name %q", balancerName) + } + // Store data in static map, and then check to see if bg is started. bg.outgoingMu.Lock() + defer bg.outgoingMu.Unlock() var sbc *subBalancerWrapper // If outgoingStarted is true, search in the cache. Otherwise, cache is // guaranteed to be empty, searching is unnecessary. @@ -326,7 +338,7 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { } if sbc == nil { sbc = &subBalancerWrapper{ - ClientConn: bg.cc, + ClientConn: cc, id: id, group: bg, builder: builder, @@ -343,11 +355,18 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { sbc.updateBalancerStateWithCachedPicker() } bg.idToBalancerConfig[id] = sbc - bg.outgoingMu.Unlock() + return nil +} + +// Add adds a balancer built by builder to the group, with given id. +func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { + bg.AddWithClientConn(id, builder.Name(), bg.cc) } // UpdateBuilder updates the builder for a current child, starting the Graceful // Switch process for that child. +// +// TODO: update this API to take the name of the new builder instead. func (bg *BalancerGroup) UpdateBuilder(id string, builder balancer.Builder) { bg.outgoingMu.Lock() // This does not deal with the balancer cache because this call should come @@ -369,6 +388,7 @@ func (bg *BalancerGroup) UpdateBuilder(id string, builder balancer.Builder) { // closed after timeout. Cleanup work (closing sub-balancer and removing // subconns) will be done after timeout. func (bg *BalancerGroup) Remove(id string) { + bg.logger.Infof("Removing child policy for locality %q", id) bg.outgoingMu.Lock() if sbToRemove, ok := bg.idToBalancerConfig[id]; ok { if bg.outgoingStarted { diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index d962faa0ab82..57ab28895f1c 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -374,11 +374,19 @@ func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing. } } -// Wrap the rr builder, so it behaves the same, but has a different pointer. +// Wrap the rr builder, so it behaves the same, but has a different name. type noopBalancerBuilderWrapper struct { balancer.Builder } +func init() { + balancer.Register(&noopBalancerBuilderWrapper{Builder: rrBuilder}) +} + +func (*noopBalancerBuilderWrapper) Name() string { + return "noopBalancerBuilderWrapper" +} + // After removing a sub-balancer, re-add with same ID, but different balancer // builder. Old subconns should be removed, and new subconns should be created. func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) { diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 0a6cf6ca9065..b79b941ec79a 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -331,6 +331,7 @@ func (b *clusterImplBalancer) Close() { if b.childLB != nil { b.childLB.Close() b.childLB = nil + b.childState = balancer.State{} } <-b.done.Done() b.logger.Infof("Shutdown") diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index 9b373fb36970..b4a37f60c011 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -194,7 +194,6 @@ func (b *clusterResolverBalancer) handleWatchUpdate(update *resourceUpdate) { return } - b.logger.Infof("resource update: %+v", pretty.ToJSON(update.priorities)) b.watchUpdateReceived = true b.priorities = update.priorities diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index b5cace684960..17f57a576d33 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -127,7 +127,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // This is a new child, add it to the children list. But note that // the balancer isn't built, because this child can be a low // priority. If necessary, it will be built when syncing priorities. - cb := newChildBalancer(name, b, bb) + cb := newChildBalancer(name, b, bb.Name(), b.cc) cb.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, @@ -141,9 +141,9 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // The balancing policy name is changed, close the old child. But don't // rebuild, rebuild will happen when syncing priorities. - if currentChild.bb.Name() != bb.Name() { + if currentChild.balancerName != bb.Name() { currentChild.stop() - currentChild.updateBuilder(bb) + currentChild.updateBalancerName(bb.Name()) } // Update config and address, but note that this doesn't send the @@ -155,10 +155,11 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err Attributes: s.ResolverState.Attributes, }) } - // Remove child from children if it's not in new config. + // Cleanup resources used by children removed from the config. for name, oldChild := range b.children { if _, ok := newConfig.Children[name]; !ok { oldChild.stop() + delete(b.children, name) } } diff --git a/xds/internal/balancer/priority/balancer_child.go b/xds/internal/balancer/priority/balancer_child.go index 34bab34c915c..ea7778bb56f8 100644 --- a/xds/internal/balancer/priority/balancer_child.go +++ b/xds/internal/balancer/priority/balancer_child.go @@ -29,9 +29,11 @@ import ( ) type childBalancer struct { - name string - parent *priorityBalancer - bb *ignoreResolveNowBalancerBuilder + name string + parent *priorityBalancer + parentCC balancer.ClientConn + balancerName string + cc *ignoreResolveNowClientConn ignoreReresolutionRequests bool config serviceconfig.LoadBalancingConfig @@ -53,12 +55,14 @@ type childBalancer struct { // newChildBalancer creates a child balancer place holder, but doesn't // build/start the child balancer. -func newChildBalancer(name string, parent *priorityBalancer, bb balancer.Builder) *childBalancer { +func newChildBalancer(name string, parent *priorityBalancer, balancerName string, cc balancer.ClientConn) *childBalancer { return &childBalancer{ - name: name, - parent: parent, - bb: newIgnoreResolveNowBalancerBuilder(bb, false), - started: false, + name: name, + parent: parent, + parentCC: cc, + balancerName: balancerName, + cc: newIgnoreResolveNowClientConn(cc, false), + started: false, // Start with the connecting state and picker with re-pick error, so // that when a priority switch causes this child picked before it's // balancing policy is created, a re-pick will happen. @@ -69,9 +73,13 @@ func newChildBalancer(name string, parent *priorityBalancer, bb balancer.Builder } } -// updateBuilder updates builder for the child, but doesn't build. -func (cb *childBalancer) updateBuilder(bb balancer.Builder) { - cb.bb = newIgnoreResolveNowBalancerBuilder(bb, cb.ignoreReresolutionRequests) +// updateBalancerName updates balancer name for the child, but doesn't build a +// new one. The parent priority LB always closes the child policy before +// updating the balancer name, and the new balancer is built when it gets added +// to the balancergroup as part of start(). +func (cb *childBalancer) updateBalancerName(balancerName string) { + cb.balancerName = balancerName + cb.cc = newIgnoreResolveNowClientConn(cb.parentCC, cb.ignoreReresolutionRequests) } // updateConfig sets childBalancer's config and state, but doesn't send update to @@ -93,14 +101,14 @@ func (cb *childBalancer) start() { return } cb.started = true - cb.parent.bg.Add(cb.name, cb.bb) + cb.parent.bg.AddWithClientConn(cb.name, cb.balancerName, cb.cc) cb.startInitTimer() cb.sendUpdate() } // sendUpdate sends the addresses and config to the child balancer. func (cb *childBalancer) sendUpdate() { - cb.bb.updateIgnoreResolveNow(cb.ignoreReresolutionRequests) + cb.cc.updateIgnoreResolveNow(cb.ignoreReresolutionRequests) // TODO: return and aggregate the returned error in the parent. err := cb.parent.bg.UpdateClientConnState(cb.name, balancer.ClientConnState{ ResolverState: cb.rState, diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index c12dfe47ffea..ca6f118c5cc0 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -162,6 +162,10 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S b.logger.Warningf("priority: child balancer not found for child %v", childName) return } + if !child.started { + b.logger.Warningf("priority: ignoring update from child %q which is not in started state: %+v", childName, s) + return + } child.state = s // We start/stop the init timer of this child based on the new connectivity diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index d119076d1af9..5e29edf6698a 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -37,7 +37,10 @@ import ( "google.golang.org/grpc/resolver" ) -const defaultTestTimeout = 5 * time.Second +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 100 * time.Millisecond +) type s struct { grpctest.Tester @@ -1525,11 +1528,21 @@ func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { } } -// When the child policy's configured to ignore reresolution requests, the -// ResolveNow() calls from this child should be all ignored. +// TestPriority_IgnoreReresolutionRequest tests the case where the priority +// policy has a single child policy. The test verifies that ResolveNow() calls +// from the child policy are ignored based on the value of the +// IgnoreReresolutionRequests field in the configuration. func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() + // Register a stub balancer to act the child policy of the priority policy. + // Provide an init function to the stub balancer to capture the ClientConn + // passed to the child policy. + ccCh := testutils.NewChannel() + childPolicyName := t.Name() + stub.Register(childPolicyName, stub.BalancerFuncs{ + Init: func(data *stub.BalancerData) { + ccCh.Send(data.ClientConn) + }, + }) cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) @@ -1547,7 +1560,7 @@ func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { BalancerConfig: &LBConfig{ Children: map[string]*Child{ "child-0": { - Config: &internalserviceconfig.BalancerConfig{Name: resolveNowBalancerName}, + Config: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, IgnoreReresolutionRequests: true, }, }, @@ -1557,13 +1570,14 @@ func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { t.Fatalf("failed to update ClientConn state: %v", err) } - // This is the balancer.ClientConn that the inner resolverNowBalancer is - // built with. - balancerCCI, err := resolveNowBalancerCCCh.Receive(ctx) + // Retrieve the ClientConn passed to the child policy. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := ccCh.Receive(ctx) if err != nil { - t.Fatalf("timeout waiting for ClientConn from balancer builder") + t.Fatalf("timeout waiting for ClientConn from the child policy") } - balancerCC := balancerCCI.(balancer.ClientConn) + balancerCC := val.(balancer.ClientConn) // Since IgnoreReresolutionRequests was set to true, all ResolveNow() calls // should be ignored. @@ -1573,7 +1587,7 @@ func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { select { case <-cc.ResolveNowCh: t.Fatalf("got unexpected ResolveNow() call") - case <-time.After(time.Millisecond * 100): + case <-time.After(defaultTestShortTimeout): } // Send another update to set IgnoreReresolutionRequests to false. @@ -1586,7 +1600,7 @@ func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { BalancerConfig: &LBConfig{ Children: map[string]*Child{ "child-0": { - Config: &internalserviceconfig.BalancerConfig{Name: resolveNowBalancerName}, + Config: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, IgnoreReresolutionRequests: false, }, }, @@ -1606,12 +1620,38 @@ func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { } -// When the child policy's configured to ignore reresolution requests, the -// ResolveNow() calls from this child should be all ignored, from the other -// children are forwarded. +type wrappedRoundRobinBalancerBuilder struct { + name string + ccCh *testutils.Channel +} + +func (w *wrappedRoundRobinBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + w.ccCh.Send(cc) + rrBuilder := balancer.Get(roundrobin.Name) + return &wrappedRoundRobinBalancer{Balancer: rrBuilder.Build(cc, opts)} +} + +func (w *wrappedRoundRobinBalancerBuilder) Name() string { + return w.name +} + +type wrappedRoundRobinBalancer struct { + balancer.Balancer +} + +// TestPriority_IgnoreReresolutionRequestTwoChildren tests the case where the +// priority policy has two child policies, one of them has the +// IgnoreReresolutionRequests field set to true while the other one has it set +// to false. The test verifies that ResolveNow() calls from the child which is +// set to ignore reresolution requests are ignored, while calls from the other +// child are processed. func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() + // Register a wrapping balancer to act the child policy of the priority + // policy. The wrapping balancer builder's Build() method pushes the + // balancer.ClientConn on a channel for this test to use. + ccCh := testutils.NewChannel() + childPolicyName := t.Name() + balancer.Register(&wrappedRoundRobinBalancerBuilder{name: childPolicyName, ccCh: ccCh}) cc := testutils.NewTestClientConn(t) bb := balancer.Get(Name) @@ -1630,11 +1670,11 @@ func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { BalancerConfig: &LBConfig{ Children: map[string]*Child{ "child-0": { - Config: &internalserviceconfig.BalancerConfig{Name: resolveNowBalancerName}, + Config: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, IgnoreReresolutionRequests: true, }, "child-1": { - Config: &internalserviceconfig.BalancerConfig{Name: resolveNowBalancerName}, + Config: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, }, }, Priorities: []string{"child-0", "child-1"}, @@ -1643,12 +1683,14 @@ func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { t.Fatalf("failed to update ClientConn state: %v", err) } - // This is the balancer.ClientConn from p0. - balancerCCI0, err := resolveNowBalancerCCCh.Receive(ctx) + // Retrieve the ClientConn passed to the child policy from p0. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := ccCh.Receive(ctx) if err != nil { - t.Fatalf("timeout waiting for ClientConn from balancer builder 0") + t.Fatalf("timeout waiting for ClientConn from the child policy") } - balancerCC0 := balancerCCI0.(balancer.ClientConn) + balancerCC0 := val.(balancer.ClientConn) // Set p0 to transient failure, p1 will be started. addrs0 := <-cc.NewSubConnAddrsCh @@ -1658,14 +1700,12 @@ func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { sc0 := <-cc.NewSubConnCh pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - // This is the balancer.ClientConn from p1. - ctx1, cancel1 := context.WithTimeout(context.Background(), time.Second) - defer cancel1() - balancerCCI1, err := resolveNowBalancerCCCh.Receive(ctx1) + // Retrieve the ClientConn passed to the child policy from p1. + val, err = ccCh.Receive(ctx) if err != nil { - t.Fatalf("timeout waiting for ClientConn from balancer builder 1") + t.Fatalf("timeout waiting for ClientConn from the child policy") } - balancerCC1 := balancerCCI1.(balancer.ClientConn) + balancerCC1 := val.(balancer.ClientConn) // Since IgnoreReresolutionRequests was set to true for p0, ResolveNow() // from p0 should all be ignored. @@ -1675,7 +1715,7 @@ func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { select { case <-cc.ResolveNowCh: t.Fatalf("got unexpected ResolveNow() call") - case <-time.After(time.Millisecond * 100): + case <-time.After(defaultTestShortTimeout): } // But IgnoreReresolutionRequests was false for p1, ResolveNow() from p1 @@ -1683,7 +1723,7 @@ func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { balancerCC1.ResolveNow(resolver.ResolveNowOptions{}) select { case <-cc.ResolveNowCh: - case <-time.After(time.Second): + case <-time.After(defaultTestShortTimeout): t.Fatalf("timeout waiting for ResolveNow()") } } diff --git a/xds/internal/balancer/priority/ignore_resolve_now.go b/xds/internal/balancer/priority/ignore_resolve_now.go index 9a9f4777269a..792ee4b3f242 100644 --- a/xds/internal/balancer/priority/ignore_resolve_now.go +++ b/xds/internal/balancer/priority/ignore_resolve_now.go @@ -25,46 +25,31 @@ import ( "google.golang.org/grpc/resolver" ) -type ignoreResolveNowBalancerBuilder struct { - balancer.Builder +// ignoreResolveNowClientConn wraps a balancer.ClientConn and overrides the +// ResolveNow() method to ignore those calls if the ignoreResolveNow bit is set. +type ignoreResolveNowClientConn struct { + balancer.ClientConn ignoreResolveNow *uint32 } -// If `ignore` is true, all `ResolveNow()` from the balancer built from this -// builder will be ignored. -// -// `ignore` can be updated later by `updateIgnoreResolveNow`, and the update -// will be propagated to all the old and new balancers built with this. -func newIgnoreResolveNowBalancerBuilder(bb balancer.Builder, ignore bool) *ignoreResolveNowBalancerBuilder { - ret := &ignoreResolveNowBalancerBuilder{ - Builder: bb, +func newIgnoreResolveNowClientConn(cc balancer.ClientConn, ignore bool) *ignoreResolveNowClientConn { + ret := &ignoreResolveNowClientConn{ + ClientConn: cc, ignoreResolveNow: new(uint32), } ret.updateIgnoreResolveNow(ignore) return ret } -func (irnbb *ignoreResolveNowBalancerBuilder) updateIgnoreResolveNow(b bool) { +func (i *ignoreResolveNowClientConn) updateIgnoreResolveNow(b bool) { if b { - atomic.StoreUint32(irnbb.ignoreResolveNow, 1) + atomic.StoreUint32(i.ignoreResolveNow, 1) return } - atomic.StoreUint32(irnbb.ignoreResolveNow, 0) + atomic.StoreUint32(i.ignoreResolveNow, 0) } -func (irnbb *ignoreResolveNowBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - return irnbb.Builder.Build(&ignoreResolveNowClientConn{ - ClientConn: cc, - ignoreResolveNow: irnbb.ignoreResolveNow, - }, opts) -} - -type ignoreResolveNowClientConn struct { - balancer.ClientConn - ignoreResolveNow *uint32 -} - func (i ignoreResolveNowClientConn) ResolveNow(o resolver.ResolveNowOptions) { if atomic.LoadUint32(i.ignoreResolveNow) != 0 { return diff --git a/xds/internal/balancer/priority/ignore_resolve_now_test.go b/xds/internal/balancer/priority/ignore_resolve_now_test.go index 29b719d9e129..5a0083147888 100644 --- a/xds/internal/balancer/priority/ignore_resolve_now_test.go +++ b/xds/internal/balancer/priority/ignore_resolve_now_test.go @@ -23,81 +23,44 @@ import ( "testing" "time" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" ) -const resolveNowBalancerName = "test-resolve-now-balancer" - -var resolveNowBalancerCCCh = testutils.NewChannel() - -type resolveNowBalancerBuilder struct { - balancer.Builder -} - -func (r *resolveNowBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - resolveNowBalancerCCCh.Send(cc) - return r.Builder.Build(cc, opts) -} - -func (r *resolveNowBalancerBuilder) Name() string { - return resolveNowBalancerName -} - -func init() { - balancer.Register(&resolveNowBalancerBuilder{ - Builder: balancer.Get(roundrobin.Name), - }) -} - -func (s) TestIgnoreResolveNowBalancerBuilder(t *testing.T) { - resolveNowBB := balancer.Get(resolveNowBalancerName) - // Create a build wrapper, but will not ignore ResolveNow(). - ignoreResolveNowBB := newIgnoreResolveNowBalancerBuilder(resolveNowBB, false) - +func (s) TestIgnoreResolveNowClientConn(t *testing.T) { cc := testutils.NewTestClientConn(t) - tb := ignoreResolveNowBB.Build(cc, balancer.BuildOptions{}) - defer tb.Close() + ignoreCC := newIgnoreResolveNowClientConn(cc, false) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + // Call ResolveNow() on the CC, it should be forwarded. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - // This is the balancer.ClientConn that the inner resolverNowBalancer is - // built with. - balancerCCI, err := resolveNowBalancerCCCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout waiting for ClientConn from balancer builder") - } - balancerCC := balancerCCI.(balancer.ClientConn) - // Call ResolveNow() on the CC, it should be forwarded. - balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + ignoreCC.ResolveNow(resolver.ResolveNowOptions{}) select { case <-cc.ResolveNowCh: - case <-time.After(time.Second): - t.Fatalf("timeout waiting for ResolveNow()") + case <-ctx.Done(): + t.Fatalf("Timeout waiting for ResolveNow()") } // Update ignoreResolveNow to true, call ResolveNow() on the CC, they should // all be ignored. - ignoreResolveNowBB.updateIgnoreResolveNow(true) + ignoreCC.updateIgnoreResolveNow(true) for i := 0; i < 5; i++ { - balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + ignoreCC.ResolveNow(resolver.ResolveNowOptions{}) } select { case <-cc.ResolveNowCh: t.Fatalf("got unexpected ResolveNow() call") - case <-time.After(time.Millisecond * 100): + case <-time.After(defaultTestShortTimeout): } // Update ignoreResolveNow to false, new ResolveNow() calls should be // forwarded. - ignoreResolveNowBB.updateIgnoreResolveNow(false) - balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + ignoreCC.updateIgnoreResolveNow(false) + ignoreCC.ResolveNow(resolver.ResolveNowOptions{}) select { case <-cc.ResolveNowCh: - case <-time.After(time.Second): + case <-ctx.Done(): t.Fatalf("timeout waiting for ResolveNow()") } } diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index 59ccd0127a2a..eaa4d2638dd9 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -441,7 +441,9 @@ func (b *ringhashBalancer) regeneratePicker() { b.picker = newPicker(b.ring, b.logger) } -func (b *ringhashBalancer) Close() {} +func (b *ringhashBalancer) Close() { + b.logger.Infof("Shutdown") +} func (b *ringhashBalancer) ExitIdle() { // ExitIdle implementation is a no-op because connections are either From c672451950653990bd607c8ba08733d6f36d85fc Mon Sep 17 00:00:00 2001 From: Ernest Nguyen <58267404+erni27@users.noreply.github.com> Date: Mon, 10 Oct 2022 21:48:01 +0200 Subject: [PATCH 626/998] xds/xdsclient: add sum of EDS locality weights check (#5703) --- .../xdsclient/xdsresource/unmarshal_eds.go | 16 +++++++++++----- .../xdsclient/xdsresource/unmarshal_eds_test.go | 12 ++++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index 7d4b89dc9dc1..15b0d88f9f1f 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -19,6 +19,7 @@ package xdsresource import ( "fmt" + "math" "net" "strconv" @@ -118,6 +119,7 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.Pr ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) } priorities := make(map[uint32]map[string]bool) + sumOfWeights := make(map[uint32]uint64) for _, locality := range m.Endpoints { l := locality.GetLocality() if l == nil { @@ -128,17 +130,21 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.Pr logger.Warningf("Ignoring locality %s with weight 0", pretty.ToJSON(l)) continue } - lid := internal.LocalityID{ - Region: l.Region, - Zone: l.Zone, - SubZone: l.SubZone, - } priority := locality.GetPriority() + sumOfWeights[priority] += uint64(weight) + if sumOfWeights[priority] > math.MaxUint32 { + return EndpointsUpdate{}, fmt.Errorf("sum of weights of localities at the same priority %d exceeded maximal value", priority) + } localitiesWithPriority := priorities[priority] if localitiesWithPriority == nil { localitiesWithPriority = make(map[string]bool) priorities[priority] = localitiesWithPriority } + lid := internal.LocalityID{ + Region: l.Region, + Zone: l.Zone, + SubZone: l.SubZone, + } lidStr, _ := lid.ToString() if localitiesWithPriority[lidStr] { return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index 28ceb11c6e16..02718e09ddaf 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -99,6 +99,18 @@ func (s) TestEDSParseRespProto(t *testing.T) { }(), want: EndpointsUpdate{}, }, + { + name: "max sum of weights at the same priority exceeded", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) + clab0.addLocality("locality-2", 4294967295, 1, []string{"addr2:159"}, nil) + clab0.addLocality("locality-3", 1, 1, []string{"addr2:88"}, nil) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, { name: "good", m: func() *v3endpointpb.ClusterLoadAssignment { From 7b817b4d182b8f17e9765f51812cd4f168aca09f Mon Sep 17 00:00:00 2001 From: Ronak Jain Date: Wed, 12 Oct 2022 05:07:02 +0530 Subject: [PATCH 627/998] client: set grpc-accept-encoding to full list of registered compressors (#5541) --- encoding/encoding.go | 3 ++ internal/envconfig/envconfig.go | 8 ++- internal/grpcutil/compressor.go | 47 +++++++++++++++++ internal/grpcutil/compressor_test.go | 46 +++++++++++++++++ internal/transport/http2_client.go | 17 +++++- test/end2end_test.go | 77 ++++++++++++++++++++++++++++ 6 files changed, 195 insertions(+), 3 deletions(-) create mode 100644 internal/grpcutil/compressor.go create mode 100644 internal/grpcutil/compressor_test.go diff --git a/encoding/encoding.go b/encoding/encoding.go index 18e530fc9024..9151eba26ac9 100644 --- a/encoding/encoding.go +++ b/encoding/encoding.go @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -73,6 +75,7 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) } // GetCompressor returns Compressor for the given compressor name. diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index 6f0272543110..7edd196bd3da 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -25,11 +25,15 @@ import ( ) const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + prefix = "GRPC_GO_" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" ) var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") ) diff --git a/internal/grpcutil/compressor.go b/internal/grpcutil/compressor.go new file mode 100644 index 000000000000..9f4090967980 --- /dev/null +++ b/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/internal/grpcutil/compressor_test.go b/internal/grpcutil/compressor_test.go new file mode 100644 index 000000000000..0d639422a9a0 --- /dev/null +++ b/internal/grpcutil/compressor_test.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "testing" + + "google.golang.org/grpc/internal/envconfig" +) + +func TestRegisteredCompressors(t *testing.T) { + defer func(c []string) { RegisteredCompressorNames = c }(RegisteredCompressorNames) + defer func(v bool) { envconfig.AdvertiseCompressors = v }(envconfig.AdvertiseCompressors) + RegisteredCompressorNames = []string{"gzip", "snappy"} + tests := []struct { + desc string + enabled bool + want string + }{ + {desc: "compressor_ad_disabled", enabled: false, want: ""}, + {desc: "compressor_ad_enabled", enabled: true, want: "gzip,snappy"}, + } + for _, tt := range tests { + envconfig.AdvertiseCompressors = tt.enabled + compressors := RegisteredCompressors() + if compressors != tt.want { + t.Fatalf("Unexpected compressors got:%s, want:%s", compressors, tt.want) + } + } +} diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 1d8c9be4fa4d..5251e28d7349 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -110,6 +110,7 @@ type http2Client struct { streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables @@ -300,6 +301,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -508,9 +510,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. diff --git a/test/end2end_test.go b/test/end2end_test.go index ecf5b5e303ed..165cf19b9877 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -3250,6 +3250,7 @@ func testMetadataUnaryRPC(t *testing.T, e env) { delete(header, "date") // the Date header is also optional delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") } if !reflect.DeepEqual(header, testMetadata) { t.Fatalf("Received header metadata %v, want %v", header, testMetadata) @@ -3289,6 +3290,7 @@ func testMetadataOrderUnaryRPC(t *testing.T, e env) { delete(header, "date") // the Date header is also optional delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") } if !reflect.DeepEqual(header, newMetadata) { @@ -3401,6 +3403,8 @@ func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") + expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3445,6 +3449,7 @@ func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3488,6 +3493,7 @@ func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3528,6 +3534,7 @@ func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3591,6 +3598,7 @@ func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3651,6 +3659,7 @@ func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3982,6 +3991,7 @@ func testMetadataStreamingRPC(t *testing.T, e env) { delete(headerMD, "trailer") // ignore if present delete(headerMD, "user-agent") delete(headerMD, "content-type") + delete(headerMD, "grpc-accept-encoding") if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#1 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) } @@ -3990,6 +4000,7 @@ func testMetadataStreamingRPC(t *testing.T, e env) { delete(headerMD, "trailer") // ignore if present delete(headerMD, "user-agent") delete(headerMD, "content-type") + delete(headerMD, "grpc-accept-encoding") if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#2 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) } @@ -5432,6 +5443,72 @@ func (s) TestForceServerCodec(t *testing.T) { } } +// renameCompressor is a grpc.Compressor wrapper that allows customizing the +// Type() of another compressor. +type renameCompressor struct { + grpc.Compressor + name string +} + +func (r *renameCompressor) Type() string { return r.name } + +// renameDecompressor is a grpc.Decompressor wrapper that allows customizing the +// Type() of another Decompressor. +type renameDecompressor struct { + grpc.Decompressor + name string +} + +func (r *renameDecompressor) Type() string { return r.name } + +func (s) TestClientForwardsGrpcAcceptEncodingHeader(t *testing.T) { + wantGrpcAcceptEncodingCh := make(chan []string, 1) + defer close(wantGrpcAcceptEncodingCh) + + compressor := renameCompressor{Compressor: grpc.NewGZIPCompressor(), name: "testgzip"} + decompressor := renameDecompressor{Decompressor: grpc.NewGZIPDecompressor(), name: "testgzip"} + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Internal, "no metadata in context") + } + if got, want := md["grpc-accept-encoding"], <-wantGrpcAcceptEncodingCh; !reflect.DeepEqual(got, want) { + return nil, status.Errorf(codes.Internal, "got grpc-accept-encoding=%q; want [%q]", got, want) + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{grpc.RPCDecompressor(&decompressor)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + wantGrpcAcceptEncodingCh <- []string{"gzip"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + wantGrpcAcceptEncodingCh <- []string{"gzip"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.UseCompressor("gzip")); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + // Use compressor directly which is not registered via + // encoding.RegisterCompressor. + if err := ss.StartClient(grpc.WithCompressor(&compressor)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + wantGrpcAcceptEncodingCh <- []string{"gzip,testgzip"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } +} + func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) { const mdkey = "somedata" From e81d0a276fb35f2a7846900ab3fd8047ae241f49 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 11 Oct 2022 16:37:38 -0700 Subject: [PATCH 628/998] xdsclient: improve LDS watchers test (#5691) --- xds/internal/xdsclient/client_new.go | 4 +- xds/internal/xdsclient/dump_test.go | 8 +- .../xdsclient/e2e_test/lds_watchers_test.go | 187 ++++++++++++++---- xds/internal/xdsclient/loadreport_test.go | 2 +- 4 files changed, 158 insertions(+), 43 deletions(-) diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go index 0631d3b0fadb..5f422c84fd62 100644 --- a/xds/internal/xdsclient/client_new.go +++ b/xds/internal/xdsclient/client_new.go @@ -75,8 +75,8 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i // Testing Only // // This function should ONLY be used for testing purposes. -func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) { - cl, err := newWithConfig(config, watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) +func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout, authorityIdleTimeout time.Duration) (XDSClient, error) { + cl, err := newWithConfig(config, watchExpiryTimeout, authorityIdleTimeout) if err != nil { return nil, err } diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go index 6a1729675f8f..165d608d3312 100644 --- a/xds/internal/xdsclient/dump_test.go +++ b/xds/internal/xdsclient/dump_test.go @@ -78,7 +78,7 @@ func (s) TestLDSConfigDump(t *testing.T) { Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), NodeProto: xdstestutils.EmptyNodeProtoV2, }, - }, defaultTestWatchExpiryTimeout) + }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create client: %v", err) } @@ -194,7 +194,7 @@ func (s) TestRDSConfigDump(t *testing.T) { Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), NodeProto: xdstestutils.EmptyNodeProtoV2, }, - }, defaultTestWatchExpiryTimeout) + }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create client: %v", err) } @@ -310,7 +310,7 @@ func (s) TestCDSConfigDump(t *testing.T) { Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), NodeProto: xdstestutils.EmptyNodeProtoV2, }, - }, defaultTestWatchExpiryTimeout) + }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create client: %v", err) } @@ -412,7 +412,7 @@ func (s) TestEDSConfigDump(t *testing.T) { Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), NodeProto: xdstestutils.EmptyNodeProtoV2, }, - }, defaultTestWatchExpiryTimeout) + }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create client: %v", err) } diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go index 2a8951c51d28..1994cb1e3ba6 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -25,25 +25,30 @@ import ( "testing" "time" + "github.com/envoyproxy/go-control-plane/pkg/wellknown" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/envoyproxy/go-control-plane/pkg/wellknown" - _ "google.golang.org/grpc/xds" // To ensure internal.NewXDSResolverWithConfigForTesting is set. - _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. + _ "google.golang.org/grpc/xds" // To ensure internal.NewXDSResolverWithConfigForTesting is set. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. ) func overrideFedEnvVar(t *testing.T) { @@ -61,14 +66,15 @@ func Test(t *testing.T) { } const ( - defaultTestWatchExpiryTimeout = 500 * time.Millisecond - defaultTestTimeout = 5 * time.Second - defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. + defaultTestWatchExpiryTimeout = 500 * time.Millisecond + defaultTestIdleAuthorityTimeout = 50 * time.Millisecond + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ldsName = "xdsclient-test-lds-resource" rdsName = "xdsclient-test-rds-resource" ldsNameNewStyle = "xdstp:///envoy.config.listener.v3.Listener/xdsclient-test-lds-resource" - rdsNameNewStyle = "xdstp:///envoy.config.listener.v3.Listener/xdsclient-test-rds-resource" + rdsNameNewStyle = "xdstp:///envoy.config.route.v3.RouteConfiguration/xdsclient-test-rds-resource" ) // badListenerResource returns a listener resource for the given name which does @@ -93,7 +99,7 @@ func badListenerResource(name string) *v3listenerpb.Listener { // xdsClient is expected to produce an error containing this string when an // update is received containing a listener created using `badListenerResource`. -const wantNACKErr = "no RouteSpecifier" +const wantListenerNACKErr = "no RouteSpecifier" // verifyNoListenerUpdate verifies that no listener update is received on the // provided update channel, and returns an error if an update is received. @@ -148,17 +154,6 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want // // The test is run for old and new style names. func (s) TestLDSWatch(t *testing.T) { - overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) - defer cleanup() - - // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) - if err != nil { - t.Fatalf("Failed to create xDS client: %v", err) - } - defer client.Close() - tests := []struct { desc string resourceName string @@ -197,6 +192,17 @@ func (s) TestLDSWatch(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + // Register a watch for a listener resource and have the watch // callback push the received update on to a channel. updateCh := testutils.NewChannel() @@ -268,17 +274,6 @@ func (s) TestLDSWatch(t *testing.T) { // // The test is run for old and new style names. func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { - overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) - defer cleanup() - - // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) - if err != nil { - t.Fatalf("Failed to create xDS client: %v", err) - } - defer client.Close() - tests := []struct { desc string resourceName string @@ -327,6 +322,17 @@ func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + // Register two watches for the same listener resource and have the // callbacks push the received updates on to a channel. updateCh1 := testutils.NewChannel() @@ -562,6 +568,115 @@ func (s) TestLDSWatch_ResourceCaching(t *testing.T) { } } +// TestLDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client +// does not receive an LDS response for the request that it sends. The test +// verifies that the watch callback is invoked with an error once the +// watchExpiryTimer fires. +func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { + // No need to spin up a management server since we don't want the client to + // receive a response for the watch being registered by the test. + + // Create an xDS client talking to a non-existent management server. + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: "dummy management server address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + + // Register a watch for a resource which is expected to fail with an error + // after the watch expiry timer fires. + updateCh := testutils.NewChannel() + ldsCancel := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel() + + // Wait for the watch expiry timer to fire. + <-time.After(defaultTestWatchExpiryTimeout) + + // Verify that an empty update with the expected error is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantErr := fmt.Errorf("watch for resource %q of type Listener timed out", ldsName) + if err := verifyListenerUpdate(ctx, updateCh, xdsresource.ListenerUpdateErrTuple{Err: wantErr}); err != nil { + t.Fatal(err) + } +} + +// TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the +// client receives a valid LDS response for the request that it sends. The test +// verifies that the behavior associated with the expiry timer (i.e, callback +// invocation with error) does not take place. +func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(nil) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + + // Register a watch for a listener resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + ldsCancel := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel() + + // Configure the management server to return a single listener + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Wait for the watch expiry timer to fire, and verify that the callback is + // not invoked. + <-time.After(defaultTestWatchExpiryTimeout) + if err := verifyNoListenerUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } +} + // TestLDSWatch_ResourceRemoved covers the cases where a resource being watched // is removed from the management server. The test verifies the following // scenarios: @@ -720,8 +835,8 @@ func (s) TestLDSWatch_NACKError(t *testing.T) { t.Fatalf("timeout when waiting for a listener resource from the management server: %v", err) } gotErr := u.(xdsresource.ListenerUpdateErrTuple).Err - if gotErr == nil || !strings.Contains(gotErr.Error(), wantNACKErr) { - t.Fatalf("update received with error: %v, want %q", gotErr, wantNACKErr) + if gotErr == nil || !strings.Contains(gotErr.Error(), wantListenerNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantListenerNACKErr) } } @@ -760,7 +875,7 @@ func (s) TestLDSWatch_PartialValid(t *testing.T) { }) defer ldsCancel2() - // Configure the management with server two listener resources. One of these + // Configure the management server with two listener resources. One of these // is a bad resource causing the update to be NACKed. resources := e2e.UpdateOptions{ NodeID: nodeID, @@ -781,8 +896,8 @@ func (s) TestLDSWatch_PartialValid(t *testing.T) { t.Fatalf("timeout when waiting for a listener resource from the management server: %v", err) } gotErr := u.(xdsresource.ListenerUpdateErrTuple).Err - if gotErr == nil || !strings.Contains(gotErr.Error(), wantNACKErr) { - t.Fatalf("update received with error: %v, want %q", gotErr, wantNACKErr) + if gotErr == nil || !strings.Contains(gotErr.Error(), wantListenerNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantListenerNACKErr) } // Verify that the watcher watching the good resource receives a good diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index 3c564ea97c31..8a0703190b21 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -58,7 +58,7 @@ func (s) TestLRSClient(t *testing.T) { TransportAPI: version.TransportV2, NodeProto: &v2corepb.Node{}, }, - }, defaultClientWatchExpiryTimeout) + }, defaultClientWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) } From 8062981d4e01af2b3d80e5e16e2f192d629a167f Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 12 Oct 2022 09:52:45 -0700 Subject: [PATCH 629/998] vet: workaround buggy mac git grep behavior (#5716) --- vet.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/vet.sh b/vet.sh index c3fc8253b13a..bd8e0cdb33c8 100755 --- a/vet.sh +++ b/vet.sh @@ -67,7 +67,9 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -81,7 +83,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" From 8b3b10bd045e7d3f06d2e7413a2a399f834989c0 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 12 Oct 2022 15:18:49 -0400 Subject: [PATCH 630/998] gcp/observability: implement public preview config syntax, logging schema, and exposed metrics (#5704) --- gcp/observability/config.go | 246 ++-- gcp/observability/exporting.go | 54 +- gcp/observability/go.mod | 3 +- .../internal/logging/logging.pb.go | 914 -------------- .../internal/logging/logging.proto | 153 --- gcp/observability/logging.go | 586 +++++---- gcp/observability/logging_test.go | 1098 +++++++++++++++++ gcp/observability/observability.go | 10 +- gcp/observability/observability_test.go | 784 +++--------- gcp/observability/opencensus.go | 38 +- internal/binarylog/binarylog.go | 20 +- internal/binarylog/binarylog_test.go | 2 +- internal/binarylog/env_config.go | 2 +- internal/binarylog/method_logger.go | 19 +- internal/binarylog/method_logger_test.go | 38 +- internal/envconfig/observability.go | 36 + 16 files changed, 1904 insertions(+), 2099 deletions(-) delete mode 100644 gcp/observability/internal/logging/logging.pb.go delete mode 100644 gcp/observability/internal/logging/logging.proto create mode 100644 gcp/observability/logging_test.go create mode 100644 internal/envconfig/observability.go diff --git a/gcp/observability/config.go b/gcp/observability/config.go index 1a9f718e81b1..1920f086479f 100644 --- a/gcp/observability/config.go +++ b/gcp/observability/config.go @@ -21,6 +21,7 @@ package observability import ( "context" "encoding/json" + "errors" "fmt" "io/ioutil" "os" @@ -28,79 +29,15 @@ import ( gcplogging "cloud.google.com/go/logging" "golang.org/x/oauth2/google" + "google.golang.org/grpc/internal/envconfig" ) const ( - envObservabilityConfig = "GRPC_CONFIG_OBSERVABILITY" - envObservabilityConfigJSON = "GRPC_CONFIG_OBSERVABILITY_JSON" - envProjectID = "GOOGLE_CLOUD_PROJECT" - logFilterPatternRegexpStr = `^([\w./]+)/((?:\w+)|[*])$` + envProjectID = "GOOGLE_CLOUD_PROJECT" + methodStringRegexpStr = `^([\w./]+)/((?:\w+)|[*])$` ) -var logFilterPatternRegexp = regexp.MustCompile(logFilterPatternRegexpStr) - -// logFilter represents a method logging configuration. -type logFilter struct { - // Pattern is a string which can select a group of method names. By - // default, the Pattern is an empty string, matching no methods. - // - // Only "*" Wildcard is accepted for Pattern. A Pattern is in the form - // of / or just a character "*" . - // - // If the Pattern is "*", it specifies the defaults for all the - // services; If the Pattern is /*, it specifies the defaults - // for all methods in the specified service ; If the Pattern is - // */, this is not supported. - // - // Examples: - // - "Foo/Bar" selects only the method "Bar" from service "Foo" - // - "Foo/*" selects all methods from service "Foo" - // - "*" selects all methods from all services. - Pattern string `json:"pattern,omitempty"` - // HeaderBytes is the number of bytes of each header to log. If the size of - // the header is greater than the defined limit, content past the limit will - // be truncated. The default value is 0. - HeaderBytes int32 `json:"header_bytes,omitempty"` - // MessageBytes is the number of bytes of each message to log. If the size - // of the message is greater than the defined limit, content pass the limit - // will be truncated. The default value is 0. - MessageBytes int32 `json:"message_bytes,omitempty"` -} - -// config is configuration for observability behaviors. By default, no -// configuration is required for tracing/metrics/logging to function. This -// config captures the most common knobs for gRPC users. It's always possible to -// override with explicit config in code. -type config struct { - // EnableCloudTrace represents whether the tracing data upload to - // CloudTrace should be enabled or not. - EnableCloudTrace bool `json:"enable_cloud_trace,omitempty"` - // EnableCloudMonitoring represents whether the metrics data upload to - // CloudMonitoring should be enabled or not. - EnableCloudMonitoring bool `json:"enable_cloud_monitoring,omitempty"` - // EnableCloudLogging represents Whether the logging data upload to - // CloudLogging should be enabled or not. - EnableCloudLogging bool `json:"enable_cloud_logging,omitempty"` - // DestinationProjectID is the destination GCP project identifier for the - // uploading log entries. If empty, the gRPC Observability plugin will - // attempt to fetch the project_id from the GCP environment variables, or - // from the default credentials. - DestinationProjectID string `json:"destination_project_id,omitempty"` - // LogFilters is a list of method config. The order matters here - the first - // Pattern which matches the current method will apply the associated config - // options in the logFilter. Any other logFilter that also matches that - // comes later will be ignored. So a logFilter of "*/*" should appear last - // in this list. - LogFilters []logFilter `json:"log_filters,omitempty"` - // GlobalTraceSamplingRate is the global setting that controls the - // probability of a RPC being traced. For example, 0.05 means there is a 5% - // chance for a RPC to be traced, 1.0 means trace every call, 0 means don’t - // start new traces. - GlobalTraceSamplingRate float64 `json:"global_trace_sampling_rate,omitempty"` - // CustomTags a list of custom tags that will be attached to every log - // entry. - CustomTags map[string]string `json:"custom_tags,omitempty"` -} +var methodStringRegexp = regexp.MustCompile(methodStringRegexpStr) // fetchDefaultProjectID fetches the default GCP project id from environment. func fetchDefaultProjectID(ctx context.Context) string { @@ -123,14 +60,34 @@ func fetchDefaultProjectID(ctx context.Context) string { return credentials.ProjectID } -func validateFilters(config *config) error { - for _, filter := range config.LogFilters { - if filter.Pattern == "*" { +func validateLogEventMethod(methods []string, exclude bool) error { + for _, method := range methods { + if method == "*" { + if exclude { + return errors.New("cannot have exclude and a '*' wildcard") + } continue } - match := logFilterPatternRegexp.FindStringSubmatch(filter.Pattern) + match := methodStringRegexp.FindStringSubmatch(method) if match == nil { - return fmt.Errorf("invalid log filter Pattern: %v", filter.Pattern) + return fmt.Errorf("invalid method string: %v", method) + } + } + return nil +} + +func validateLoggingEvents(config *config) error { + if config.CloudLogging == nil { + return nil + } + for _, clientRPCEvent := range config.CloudLogging.ClientRPCEvents { + if err := validateLogEventMethod(clientRPCEvent.Methods, clientRPCEvent.Exclude); err != nil { + return fmt.Errorf("error in clientRPCEvent method: %v", err) + } + } + for _, serverRPCEvent := range config.CloudLogging.ServerRPCEvents { + if err := validateLogEventMethod(serverRPCEvent.Methods, serverRPCEvent.Exclude); err != nil { + return fmt.Errorf("error in serverRPCEvent method: %v", err) } } return nil @@ -144,38 +101,161 @@ func unmarshalAndVerifyConfig(rawJSON json.RawMessage) (*config, error) { if err := json.Unmarshal(rawJSON, &config); err != nil { return nil, fmt.Errorf("error parsing observability config: %v", err) } - if err := validateFilters(&config); err != nil { + if err := validateLoggingEvents(&config); err != nil { return nil, fmt.Errorf("error parsing observability config: %v", err) } - if config.GlobalTraceSamplingRate > 1 || config.GlobalTraceSamplingRate < 0 { - return nil, fmt.Errorf("error parsing observability config: invalid global trace sampling rate %v", config.GlobalTraceSamplingRate) + if config.CloudTrace != nil && (config.CloudTrace.SamplingRate > 1 || config.CloudTrace.SamplingRate < 0) { + return nil, fmt.Errorf("error parsing observability config: invalid cloud trace sampling rate %v", config.CloudTrace.SamplingRate) } logger.Infof("Parsed ObservabilityConfig: %+v", &config) return &config, nil } func parseObservabilityConfig() (*config, error) { - if fileSystemPath := os.Getenv(envObservabilityConfigJSON); fileSystemPath != "" { - content, err := ioutil.ReadFile(fileSystemPath) // TODO: Switch to os.ReadFile once dropped support for go 1.15 + if f := envconfig.ObservabilityConfigFile; f != "" { + if envconfig.ObservabilityConfig != "" { + logger.Warning("Ignoring GRPC_GCP_OBSERVABILITY_CONFIG and using GRPC_GCP_OBSERVABILITY_CONFIG_FILE contents.") + } + content, err := ioutil.ReadFile(f) // TODO: Switch to os.ReadFile once dropped support for go 1.15 if err != nil { - return nil, fmt.Errorf("error reading observability configuration file %q: %v", fileSystemPath, err) + return nil, fmt.Errorf("error reading observability configuration file %q: %v", f, err) } return unmarshalAndVerifyConfig(content) - } else if content := os.Getenv(envObservabilityConfig); content != "" { - return unmarshalAndVerifyConfig([]byte(content)) + } else if envconfig.ObservabilityConfig != "" { + return unmarshalAndVerifyConfig([]byte(envconfig.ObservabilityConfig)) } // If the ENV var doesn't exist, do nothing return nil, nil } func ensureProjectIDInObservabilityConfig(ctx context.Context, config *config) error { - if config.DestinationProjectID == "" { + if config.ProjectID == "" { // Try to fetch the GCP project id projectID := fetchDefaultProjectID(ctx) if projectID == "" { return fmt.Errorf("empty destination project ID") } - config.DestinationProjectID = projectID + config.ProjectID = projectID } return nil } + +type clientRPCEvents struct { + // Methods is a list of strings which can select a group of methods. By + // default, the list is empty, matching no methods. + // + // The value of the method is in the form of /. + // + // "*" is accepted as a wildcard for: + // 1. The method name. If the value is /*, it matches all + // methods in the specified service. + // 2. The whole value of the field which matches any /. + // It’s not supported when Exclude is true. + // 3. The * wildcard cannot be used on the service name independently, + // */ is not supported. + // + // The service name, when specified, must be the fully qualified service + // name, including the package name. + // + // Examples: + // 1."goo.Foo/Bar" selects only the method "Bar" from service "goo.Foo", + // here “goo” is the package name. + // 2."goo.Foo/*" selects all methods from service "goo.Foo" + // 3. "*" selects all methods from all services. + Methods []string `json:"method,omitempty"` + // Exclude represents whether the methods denoted by Methods should be + // excluded from logging. The default value is false, meaning the methods + // denoted by Methods are included in the logging. If Exclude is true, the + // wildcard `*` cannot be used as value of an entry in Methods. + Exclude bool `json:"exclude,omitempty"` + // MaxMetadataBytes is the maximum number of bytes of each header to log. If + // the size of the metadata is greater than the defined limit, content past + // the limit will be truncated. The default value is 0. + MaxMetadataBytes int `json:"max_metadata_bytes"` + // MaxMessageBytes is the maximum number of bytes of each message to log. If + // the size of the message is greater than the defined limit, content past + // the limit will be truncated. The default value is 0. + MaxMessageBytes int `json:"max_message_bytes"` +} + +type serverRPCEvents struct { + // Methods is a list of strings which can select a group of methods. By + // default, the list is empty, matching no methods. + // + // The value of the method is in the form of /. + // + // "*" is accepted as a wildcard for: + // 1. The method name. If the value is /*, it matches all + // methods in the specified service. + // 2. The whole value of the field which matches any /. + // It’s not supported when Exclude is true. + // 3. The * wildcard cannot be used on the service name independently, + // */ is not supported. + // + // The service name, when specified, must be the fully qualified service + // name, including the package name. + // + // Examples: + // 1."goo.Foo/Bar" selects only the method "Bar" from service "goo.Foo", + // here “goo” is the package name. + // 2."goo.Foo/*" selects all methods from service "goo.Foo" + // 3. "*" selects all methods from all services. + Methods []string `json:"method,omitempty"` + // Exclude represents whether the methods denoted by Methods should be + // excluded from logging. The default value is false, meaning the methods + // denoted by Methods are included in the logging. If Exclude is true, the + // wildcard `*` cannot be used as value of an entry in Methods. + Exclude bool `json:"exclude,omitempty"` + // MaxMetadataBytes is the maximum number of bytes of each header to log. If + // the size of the metadata is greater than the defined limit, content past + // the limit will be truncated. The default value is 0. + MaxMetadataBytes int `json:"max_metadata_bytes"` + // MaxMessageBytes is the maximum number of bytes of each message to log. If + // the size of the message is greater than the defined limit, content past + // the limit will be truncated. The default value is 0. + MaxMessageBytes int `json:"max_message_bytes"` +} + +type cloudLogging struct { + // ClientRPCEvents represents the configuration for outgoing RPC's from the + // binary. The client_rpc_events configs are evaluated in text order, the + // first one matched is used. If an RPC doesn't match an entry, it will + // continue on to the next entry in the list. + ClientRPCEvents []clientRPCEvents `json:"client_rpc_events,omitempty"` + + // ServerRPCEvents represents the configuration for incoming RPC's to the + // binary. The server_rpc_events configs are evaluated in text order, the + // first one matched is used. If an RPC doesn't match an entry, it will + // continue on to the next entry in the list. + ServerRPCEvents []serverRPCEvents `json:"server_rpc_events,omitempty"` +} + +type cloudMonitoring struct{} + +type cloudTrace struct { + // SamplingRate is the global setting that controls the probability of a RPC + // being traced. For example, 0.05 means there is a 5% chance for a RPC to + // be traced, 1.0 means trace every call, 0 means don’t start new traces. By + // default, the sampling_rate is 0. + SamplingRate float64 `json:"sampling_rate,omitempty"` +} + +type config struct { + // ProjectID is the destination GCP project identifier for uploading log + // entries. If empty, the gRPC Observability plugin will attempt to fetch + // the project_id from the GCP environment variables, or from the default + // credentials. If not found, the observability init functions will return + // an error. + ProjectID string `json:"project_id,omitempty"` + // CloudLogging defines the logging options. If not present, logging is disabled. + CloudLogging *cloudLogging `json:"cloud_logging,omitempty"` + // CloudMonitoring determines whether or not metrics are enabled based on + // whether it is present or not. If present, monitoring will be enabled, if + // not present, monitoring is disabled. + CloudMonitoring *cloudMonitoring `json:"cloud_monitoring,omitempty"` + // CloudTrace defines the tracing options. When present, tracing is enabled + // with default configurations. When absent, the tracing is disabled. + CloudTrace *cloudTrace `json:"cloud_trace,omitempty"` + // Labels are applied to cloud logging, monitoring, and trace. + Labels map[string]string `json:"labels,omitempty"` +} diff --git a/gcp/observability/exporting.go b/gcp/observability/exporting.go index 0c566f4b183c..73eac40b5ed0 100644 --- a/gcp/observability/exporting.go +++ b/gcp/observability/exporting.go @@ -20,12 +20,9 @@ package observability import ( "context" - "encoding/json" "fmt" gcplogging "cloud.google.com/go/logging" - grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" - "google.golang.org/protobuf/encoding/protojson" ) // loggingExporter is the interface of logging exporter for gRPC Observability. @@ -33,7 +30,7 @@ import ( // now, it exists for testing purposes. type loggingExporter interface { // EmitGrpcLogRecord writes a gRPC LogRecord to cache without blocking. - EmitGrpcLogRecord(*grpclogrecordpb.GrpcLogRecord) + EmitGcpLoggingEntry(entry gcplogging.Entry) // Close flushes all pending data and closes the exporter. Close() error } @@ -44,58 +41,23 @@ type cloudLoggingExporter struct { logger *gcplogging.Logger } -func newCloudLoggingExporter(ctx context.Context, config *config) (*cloudLoggingExporter, error) { - c, err := gcplogging.NewClient(ctx, fmt.Sprintf("projects/%v", config.DestinationProjectID)) +func newCloudLoggingExporter(ctx context.Context, config *config) (loggingExporter, error) { + c, err := gcplogging.NewClient(ctx, fmt.Sprintf("projects/%v", config.ProjectID)) if err != nil { return nil, fmt.Errorf("failed to create cloudLoggingExporter: %v", err) } defer logger.Infof("Successfully created cloudLoggingExporter") - if len(config.CustomTags) != 0 { - logger.Infof("Adding custom tags: %+v", config.CustomTags) + if len(config.Labels) != 0 { + logger.Infof("Adding labels: %+v", config.Labels) } return &cloudLoggingExporter{ - projectID: config.DestinationProjectID, + projectID: config.ProjectID, client: c, - logger: c.Logger("microservices.googleapis.com/observability/grpc", gcplogging.CommonLabels(config.CustomTags)), + logger: c.Logger("microservices.googleapis.com/observability/grpc", gcplogging.CommonLabels(config.Labels)), }, nil } -// mapLogLevelToSeverity maps the gRPC defined log level to Cloud Logging's -// Severity. The canonical definition can be found at -// https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#LogSeverity. -var logLevelToSeverity = map[grpclogrecordpb.GrpcLogRecord_LogLevel]gcplogging.Severity{ - grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_UNKNOWN: 0, - grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_TRACE: 100, // Cloud Logging doesn't have a trace level, treated as DEBUG. - grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_DEBUG: 100, - grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_INFO: 200, - grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_WARN: 400, - grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_ERROR: 500, - grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_CRITICAL: 600, -} - -var protoToJSONOptions = &protojson.MarshalOptions{ - UseProtoNames: true, - UseEnumNumbers: false, -} - -func (cle *cloudLoggingExporter) EmitGrpcLogRecord(l *grpclogrecordpb.GrpcLogRecord) { - // Converts the log record content to a more readable format via protojson. - jsonBytes, err := protoToJSONOptions.Marshal(l) - if err != nil { - logger.Infof("Unable to marshal log record: %v", l) - return - } - var payload map[string]interface{} - err = json.Unmarshal(jsonBytes, &payload) - if err != nil { - logger.Infof("Unable to unmarshal bytes to JSON: %v", jsonBytes) - return - } - entry := gcplogging.Entry{ - Timestamp: l.Timestamp.AsTime(), - Severity: logLevelToSeverity[l.LogLevel], - Payload: payload, - } +func (cle *cloudLoggingExporter) EmitGcpLoggingEntry(entry gcplogging.Entry) { cle.logger.Log(entry) if logger.V(2) { logger.Infof("Uploading event to CloudLogging: %+v", entry) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index dcbfdaebb6e3..a164fa48c23c 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -5,12 +5,11 @@ go 1.14 require ( cloud.google.com/go/logging v1.4.2 contrib.go.opencensus.io/exporter/stackdriver v0.13.12 - github.com/golang/protobuf v1.5.2 + github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.3.0 go.opencensus.io v0.23.0 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 google.golang.org/grpc v1.46.0 - google.golang.org/protobuf v1.27.1 ) // TODO(lidiz) remove the following line when we have a release containing the diff --git a/gcp/observability/internal/logging/logging.pb.go b/gcp/observability/internal/logging/logging.pb.go deleted file mode 100644 index a47c405759b6..000000000000 --- a/gcp/observability/internal/logging/logging.pb.go +++ /dev/null @@ -1,914 +0,0 @@ -// Copyright 2022 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: gcp/observability/internal/logging/logging.proto - -package logging - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// List of event types -type GrpcLogRecord_EventType int32 - -const ( - // Unknown event type - GrpcLogRecord_GRPC_CALL_UNKNOWN GrpcLogRecord_EventType = 0 - // Header sent from client to server - GrpcLogRecord_GRPC_CALL_REQUEST_HEADER GrpcLogRecord_EventType = 1 - // Header sent from server to client - GrpcLogRecord_GRPC_CALL_RESPONSE_HEADER GrpcLogRecord_EventType = 2 - // Message sent from client to server - GrpcLogRecord_GRPC_CALL_REQUEST_MESSAGE GrpcLogRecord_EventType = 3 - // Message sent from server to client - GrpcLogRecord_GRPC_CALL_RESPONSE_MESSAGE GrpcLogRecord_EventType = 4 - // Trailer indicates the end of the gRPC call - GrpcLogRecord_GRPC_CALL_TRAILER GrpcLogRecord_EventType = 5 - // A signal that client is done sending - GrpcLogRecord_GRPC_CALL_HALF_CLOSE GrpcLogRecord_EventType = 6 - // A signal that the rpc is canceled - GrpcLogRecord_GRPC_CALL_CANCEL GrpcLogRecord_EventType = 7 -) - -// Enum value maps for GrpcLogRecord_EventType. -var ( - GrpcLogRecord_EventType_name = map[int32]string{ - 0: "GRPC_CALL_UNKNOWN", - 1: "GRPC_CALL_REQUEST_HEADER", - 2: "GRPC_CALL_RESPONSE_HEADER", - 3: "GRPC_CALL_REQUEST_MESSAGE", - 4: "GRPC_CALL_RESPONSE_MESSAGE", - 5: "GRPC_CALL_TRAILER", - 6: "GRPC_CALL_HALF_CLOSE", - 7: "GRPC_CALL_CANCEL", - } - GrpcLogRecord_EventType_value = map[string]int32{ - "GRPC_CALL_UNKNOWN": 0, - "GRPC_CALL_REQUEST_HEADER": 1, - "GRPC_CALL_RESPONSE_HEADER": 2, - "GRPC_CALL_REQUEST_MESSAGE": 3, - "GRPC_CALL_RESPONSE_MESSAGE": 4, - "GRPC_CALL_TRAILER": 5, - "GRPC_CALL_HALF_CLOSE": 6, - "GRPC_CALL_CANCEL": 7, - } -) - -func (x GrpcLogRecord_EventType) Enum() *GrpcLogRecord_EventType { - p := new(GrpcLogRecord_EventType) - *p = x - return p -} - -func (x GrpcLogRecord_EventType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GrpcLogRecord_EventType) Descriptor() protoreflect.EnumDescriptor { - return file_gcp_observability_internal_logging_logging_proto_enumTypes[0].Descriptor() -} - -func (GrpcLogRecord_EventType) Type() protoreflect.EnumType { - return &file_gcp_observability_internal_logging_logging_proto_enumTypes[0] -} - -func (x GrpcLogRecord_EventType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GrpcLogRecord_EventType.Descriptor instead. -func (GrpcLogRecord_EventType) EnumDescriptor() ([]byte, []int) { - return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 0} -} - -// The entity that generates the log entry -type GrpcLogRecord_EventLogger int32 - -const ( - GrpcLogRecord_LOGGER_UNKNOWN GrpcLogRecord_EventLogger = 0 - GrpcLogRecord_LOGGER_CLIENT GrpcLogRecord_EventLogger = 1 - GrpcLogRecord_LOGGER_SERVER GrpcLogRecord_EventLogger = 2 -) - -// Enum value maps for GrpcLogRecord_EventLogger. -var ( - GrpcLogRecord_EventLogger_name = map[int32]string{ - 0: "LOGGER_UNKNOWN", - 1: "LOGGER_CLIENT", - 2: "LOGGER_SERVER", - } - GrpcLogRecord_EventLogger_value = map[string]int32{ - "LOGGER_UNKNOWN": 0, - "LOGGER_CLIENT": 1, - "LOGGER_SERVER": 2, - } -) - -func (x GrpcLogRecord_EventLogger) Enum() *GrpcLogRecord_EventLogger { - p := new(GrpcLogRecord_EventLogger) - *p = x - return p -} - -func (x GrpcLogRecord_EventLogger) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GrpcLogRecord_EventLogger) Descriptor() protoreflect.EnumDescriptor { - return file_gcp_observability_internal_logging_logging_proto_enumTypes[1].Descriptor() -} - -func (GrpcLogRecord_EventLogger) Type() protoreflect.EnumType { - return &file_gcp_observability_internal_logging_logging_proto_enumTypes[1] -} - -func (x GrpcLogRecord_EventLogger) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GrpcLogRecord_EventLogger.Descriptor instead. -func (GrpcLogRecord_EventLogger) EnumDescriptor() ([]byte, []int) { - return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 1} -} - -// The log severity level of the log entry -type GrpcLogRecord_LogLevel int32 - -const ( - GrpcLogRecord_LOG_LEVEL_UNKNOWN GrpcLogRecord_LogLevel = 0 - GrpcLogRecord_LOG_LEVEL_TRACE GrpcLogRecord_LogLevel = 1 - GrpcLogRecord_LOG_LEVEL_DEBUG GrpcLogRecord_LogLevel = 2 - GrpcLogRecord_LOG_LEVEL_INFO GrpcLogRecord_LogLevel = 3 - GrpcLogRecord_LOG_LEVEL_WARN GrpcLogRecord_LogLevel = 4 - GrpcLogRecord_LOG_LEVEL_ERROR GrpcLogRecord_LogLevel = 5 - GrpcLogRecord_LOG_LEVEL_CRITICAL GrpcLogRecord_LogLevel = 6 -) - -// Enum value maps for GrpcLogRecord_LogLevel. -var ( - GrpcLogRecord_LogLevel_name = map[int32]string{ - 0: "LOG_LEVEL_UNKNOWN", - 1: "LOG_LEVEL_TRACE", - 2: "LOG_LEVEL_DEBUG", - 3: "LOG_LEVEL_INFO", - 4: "LOG_LEVEL_WARN", - 5: "LOG_LEVEL_ERROR", - 6: "LOG_LEVEL_CRITICAL", - } - GrpcLogRecord_LogLevel_value = map[string]int32{ - "LOG_LEVEL_UNKNOWN": 0, - "LOG_LEVEL_TRACE": 1, - "LOG_LEVEL_DEBUG": 2, - "LOG_LEVEL_INFO": 3, - "LOG_LEVEL_WARN": 4, - "LOG_LEVEL_ERROR": 5, - "LOG_LEVEL_CRITICAL": 6, - } -) - -func (x GrpcLogRecord_LogLevel) Enum() *GrpcLogRecord_LogLevel { - p := new(GrpcLogRecord_LogLevel) - *p = x - return p -} - -func (x GrpcLogRecord_LogLevel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GrpcLogRecord_LogLevel) Descriptor() protoreflect.EnumDescriptor { - return file_gcp_observability_internal_logging_logging_proto_enumTypes[2].Descriptor() -} - -func (GrpcLogRecord_LogLevel) Type() protoreflect.EnumType { - return &file_gcp_observability_internal_logging_logging_proto_enumTypes[2] -} - -func (x GrpcLogRecord_LogLevel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GrpcLogRecord_LogLevel.Descriptor instead. -func (GrpcLogRecord_LogLevel) EnumDescriptor() ([]byte, []int) { - return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 2} -} - -type GrpcLogRecord_Address_Type int32 - -const ( - GrpcLogRecord_Address_TYPE_UNKNOWN GrpcLogRecord_Address_Type = 0 - GrpcLogRecord_Address_TYPE_IPV4 GrpcLogRecord_Address_Type = 1 // in 1.2.3.4 form - GrpcLogRecord_Address_TYPE_IPV6 GrpcLogRecord_Address_Type = 2 // IPv6 canonical form (RFC5952 section 4) - GrpcLogRecord_Address_TYPE_UNIX GrpcLogRecord_Address_Type = 3 // UDS string -) - -// Enum value maps for GrpcLogRecord_Address_Type. -var ( - GrpcLogRecord_Address_Type_name = map[int32]string{ - 0: "TYPE_UNKNOWN", - 1: "TYPE_IPV4", - 2: "TYPE_IPV6", - 3: "TYPE_UNIX", - } - GrpcLogRecord_Address_Type_value = map[string]int32{ - "TYPE_UNKNOWN": 0, - "TYPE_IPV4": 1, - "TYPE_IPV6": 2, - "TYPE_UNIX": 3, - } -) - -func (x GrpcLogRecord_Address_Type) Enum() *GrpcLogRecord_Address_Type { - p := new(GrpcLogRecord_Address_Type) - *p = x - return p -} - -func (x GrpcLogRecord_Address_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GrpcLogRecord_Address_Type) Descriptor() protoreflect.EnumDescriptor { - return file_gcp_observability_internal_logging_logging_proto_enumTypes[3].Descriptor() -} - -func (GrpcLogRecord_Address_Type) Type() protoreflect.EnumType { - return &file_gcp_observability_internal_logging_logging_proto_enumTypes[3] -} - -func (x GrpcLogRecord_Address_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GrpcLogRecord_Address_Type.Descriptor instead. -func (GrpcLogRecord_Address_Type) EnumDescriptor() ([]byte, []int) { - return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 2, 0} -} - -type GrpcLogRecord struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The timestamp of the log event - Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Uniquely identifies a call. The value must not be 0 in order to disambiguate - // from an unset value. - // Each call may have several log entries. They will all have the same rpc_id. - // Nothing is guaranteed about their value other than they are unique across - // different RPCs in the same gRPC process. - RpcId string `protobuf:"bytes,2,opt,name=rpc_id,json=rpcId,proto3" json:"rpc_id,omitempty"` - EventType GrpcLogRecord_EventType `protobuf:"varint,3,opt,name=event_type,json=eventType,proto3,enum=grpc.observability.logging.v1.GrpcLogRecord_EventType" json:"event_type,omitempty"` // one of the above EventType enum - EventLogger GrpcLogRecord_EventLogger `protobuf:"varint,4,opt,name=event_logger,json=eventLogger,proto3,enum=grpc.observability.logging.v1.GrpcLogRecord_EventLogger" json:"event_logger,omitempty"` // one of the above EventLogger enum - // the name of the service - ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - // the name of the RPC method - MethodName string `protobuf:"bytes,6,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` - LogLevel GrpcLogRecord_LogLevel `protobuf:"varint,7,opt,name=log_level,json=logLevel,proto3,enum=grpc.observability.logging.v1.GrpcLogRecord_LogLevel" json:"log_level,omitempty"` // one of the above LogLevel enum - // Peer address information. On client side, peer is logged on server - // header event or trailer event (if trailer-only). On server side, peer - // is always logged on the client header event. - PeerAddress *GrpcLogRecord_Address `protobuf:"bytes,8,opt,name=peer_address,json=peerAddress,proto3" json:"peer_address,omitempty"` - // the RPC timeout value - Timeout *durationpb.Duration `protobuf:"bytes,11,opt,name=timeout,proto3" json:"timeout,omitempty"` - // A single process may be used to run multiple virtual servers with - // different identities. - // The authority is the name of such a server identify. It is typically a - // portion of the URI in the form of or :. - Authority string `protobuf:"bytes,12,opt,name=authority,proto3" json:"authority,omitempty"` - // Size of the message or metadata, depending on the event type, - // regardless of whether the full message or metadata is being logged - // (i.e. could be truncated or omitted). - PayloadSize uint32 `protobuf:"varint,13,opt,name=payload_size,json=payloadSize,proto3" json:"payload_size,omitempty"` - // true if message or metadata field is either truncated or omitted due - // to config options - PayloadTruncated bool `protobuf:"varint,14,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` - // Used by header event or trailer event - Metadata *GrpcLogRecord_Metadata `protobuf:"bytes,15,opt,name=metadata,proto3" json:"metadata,omitempty"` - // The entry sequence ID for this call. The first message has a value of 1, - // to disambiguate from an unset value. The purpose of this field is to - // detect missing entries in environments where durability or ordering is - // not guaranteed. - SequenceId uint64 `protobuf:"varint,16,opt,name=sequence_id,json=sequenceId,proto3" json:"sequence_id,omitempty"` - // Used by message event - Message []byte `protobuf:"bytes,17,opt,name=message,proto3" json:"message,omitempty"` - // The gRPC status code - StatusCode uint32 `protobuf:"varint,18,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` - // The gRPC status message - StatusMessage string `protobuf:"bytes,19,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` - // The value of the grpc-status-details-bin metadata key, if any. - // This is always an encoded google.rpc.Status message - StatusDetails []byte `protobuf:"bytes,20,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` -} - -func (x *GrpcLogRecord) Reset() { - *x = GrpcLogRecord{} - if protoimpl.UnsafeEnabled { - mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcLogRecord) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcLogRecord) ProtoMessage() {} - -func (x *GrpcLogRecord) ProtoReflect() protoreflect.Message { - mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcLogRecord.ProtoReflect.Descriptor instead. -func (*GrpcLogRecord) Descriptor() ([]byte, []int) { - return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0} -} - -func (x *GrpcLogRecord) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -func (x *GrpcLogRecord) GetRpcId() string { - if x != nil { - return x.RpcId - } - return "" -} - -func (x *GrpcLogRecord) GetEventType() GrpcLogRecord_EventType { - if x != nil { - return x.EventType - } - return GrpcLogRecord_GRPC_CALL_UNKNOWN -} - -func (x *GrpcLogRecord) GetEventLogger() GrpcLogRecord_EventLogger { - if x != nil { - return x.EventLogger - } - return GrpcLogRecord_LOGGER_UNKNOWN -} - -func (x *GrpcLogRecord) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -func (x *GrpcLogRecord) GetMethodName() string { - if x != nil { - return x.MethodName - } - return "" -} - -func (x *GrpcLogRecord) GetLogLevel() GrpcLogRecord_LogLevel { - if x != nil { - return x.LogLevel - } - return GrpcLogRecord_LOG_LEVEL_UNKNOWN -} - -func (x *GrpcLogRecord) GetPeerAddress() *GrpcLogRecord_Address { - if x != nil { - return x.PeerAddress - } - return nil -} - -func (x *GrpcLogRecord) GetTimeout() *durationpb.Duration { - if x != nil { - return x.Timeout - } - return nil -} - -func (x *GrpcLogRecord) GetAuthority() string { - if x != nil { - return x.Authority - } - return "" -} - -func (x *GrpcLogRecord) GetPayloadSize() uint32 { - if x != nil { - return x.PayloadSize - } - return 0 -} - -func (x *GrpcLogRecord) GetPayloadTruncated() bool { - if x != nil { - return x.PayloadTruncated - } - return false -} - -func (x *GrpcLogRecord) GetMetadata() *GrpcLogRecord_Metadata { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *GrpcLogRecord) GetSequenceId() uint64 { - if x != nil { - return x.SequenceId - } - return 0 -} - -func (x *GrpcLogRecord) GetMessage() []byte { - if x != nil { - return x.Message - } - return nil -} - -func (x *GrpcLogRecord) GetStatusCode() uint32 { - if x != nil { - return x.StatusCode - } - return 0 -} - -func (x *GrpcLogRecord) GetStatusMessage() string { - if x != nil { - return x.StatusMessage - } - return "" -} - -func (x *GrpcLogRecord) GetStatusDetails() []byte { - if x != nil { - return x.StatusDetails - } - return nil -} - -// A list of metadata pairs -type GrpcLogRecord_Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Entry []*GrpcLogRecord_MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` -} - -func (x *GrpcLogRecord_Metadata) Reset() { - *x = GrpcLogRecord_Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcLogRecord_Metadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcLogRecord_Metadata) ProtoMessage() {} - -func (x *GrpcLogRecord_Metadata) ProtoReflect() protoreflect.Message { - mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcLogRecord_Metadata.ProtoReflect.Descriptor instead. -func (*GrpcLogRecord_Metadata) Descriptor() ([]byte, []int) { - return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *GrpcLogRecord_Metadata) GetEntry() []*GrpcLogRecord_MetadataEntry { - if x != nil { - return x.Entry - } - return nil -} - -// One metadata key value pair -type GrpcLogRecord_MetadataEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *GrpcLogRecord_MetadataEntry) Reset() { - *x = GrpcLogRecord_MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcLogRecord_MetadataEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcLogRecord_MetadataEntry) ProtoMessage() {} - -func (x *GrpcLogRecord_MetadataEntry) ProtoReflect() protoreflect.Message { - mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcLogRecord_MetadataEntry.ProtoReflect.Descriptor instead. -func (*GrpcLogRecord_MetadataEntry) Descriptor() ([]byte, []int) { - return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *GrpcLogRecord_MetadataEntry) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *GrpcLogRecord_MetadataEntry) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -// Address information -type GrpcLogRecord_Address struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type GrpcLogRecord_Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.observability.logging.v1.GrpcLogRecord_Address_Type" json:"type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` -} - -func (x *GrpcLogRecord_Address) Reset() { - *x = GrpcLogRecord_Address{} - if protoimpl.UnsafeEnabled { - mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcLogRecord_Address) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcLogRecord_Address) ProtoMessage() {} - -func (x *GrpcLogRecord_Address) ProtoReflect() protoreflect.Message { - mi := &file_gcp_observability_internal_logging_logging_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcLogRecord_Address.ProtoReflect.Descriptor instead. -func (*GrpcLogRecord_Address) Descriptor() ([]byte, []int) { - return file_gcp_observability_internal_logging_logging_proto_rawDescGZIP(), []int{0, 2} -} - -func (x *GrpcLogRecord_Address) GetType() GrpcLogRecord_Address_Type { - if x != nil { - return x.Type - } - return GrpcLogRecord_Address_TYPE_UNKNOWN -} - -func (x *GrpcLogRecord_Address) GetAddress() string { - if x != nil { - return x.Address - } - return "" -} - -func (x *GrpcLogRecord_Address) GetIpPort() uint32 { - if x != nil { - return x.IpPort - } - return 0 -} - -var File_gcp_observability_internal_logging_logging_proto protoreflect.FileDescriptor - -var file_gcp_observability_internal_logging_logging_proto_rawDesc = []byte{ - 0x0a, 0x30, 0x67, 0x63, 0x70, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6c, 0x6f, 0x67, - 0x67, 0x69, 0x6e, 0x67, 0x2f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x1d, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xe5, 0x0d, 0x0a, 0x0d, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, - 0x0a, 0x06, 0x72, 0x70, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x72, 0x70, 0x63, 0x49, 0x64, 0x12, 0x55, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, - 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, - 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x5b, 0x0a, 0x0c, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x0b, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a, - 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x4c, - 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x57, 0x0a, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, - 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, - 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0b, 0x70, - 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, - 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x21, 0x0a, - 0x0c, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x51, 0x0a, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x49, - 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x25, 0x0a, 0x0e, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x13, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, - 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x1a, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x1a, 0xd2, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x4d, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, - 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, - 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x22, 0xe5, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x52, 0x50, 0x43, 0x5f, 0x43, 0x41, 0x4c, - 0x4c, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x47, - 0x52, 0x50, 0x43, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, - 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, - 0x43, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, - 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, - 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x4d, 0x45, - 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, 0x5f, - 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x4d, 0x45, - 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x52, 0x50, 0x43, 0x5f, - 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x05, 0x12, 0x18, - 0x0a, 0x14, 0x47, 0x52, 0x50, 0x43, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x48, 0x41, 0x4c, 0x46, - 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x52, 0x50, 0x43, - 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x10, 0x07, 0x22, 0x47, - 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, - 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x22, 0xa0, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4c, - 0x65, 0x76, 0x65, 0x6c, 0x12, 0x15, 0x0a, 0x11, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, - 0x4c, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, - 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x01, - 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x44, 0x45, - 0x42, 0x55, 0x47, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, - 0x45, 0x4c, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x4f, 0x47, - 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, - 0x0f, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x10, 0x05, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x4f, 0x47, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, - 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x06, 0x42, 0x77, 0x0a, 0x1d, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x2e, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x42, 0x19, 0x4f, 0x62, 0x73, - 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, - 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, - 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x79, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6c, 0x6f, 0x67, 0x67, - 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_gcp_observability_internal_logging_logging_proto_rawDescOnce sync.Once - file_gcp_observability_internal_logging_logging_proto_rawDescData = file_gcp_observability_internal_logging_logging_proto_rawDesc -) - -func file_gcp_observability_internal_logging_logging_proto_rawDescGZIP() []byte { - file_gcp_observability_internal_logging_logging_proto_rawDescOnce.Do(func() { - file_gcp_observability_internal_logging_logging_proto_rawDescData = protoimpl.X.CompressGZIP(file_gcp_observability_internal_logging_logging_proto_rawDescData) - }) - return file_gcp_observability_internal_logging_logging_proto_rawDescData -} - -var file_gcp_observability_internal_logging_logging_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_gcp_observability_internal_logging_logging_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_gcp_observability_internal_logging_logging_proto_goTypes = []interface{}{ - (GrpcLogRecord_EventType)(0), // 0: grpc.observability.logging.v1.GrpcLogRecord.EventType - (GrpcLogRecord_EventLogger)(0), // 1: grpc.observability.logging.v1.GrpcLogRecord.EventLogger - (GrpcLogRecord_LogLevel)(0), // 2: grpc.observability.logging.v1.GrpcLogRecord.LogLevel - (GrpcLogRecord_Address_Type)(0), // 3: grpc.observability.logging.v1.GrpcLogRecord.Address.Type - (*GrpcLogRecord)(nil), // 4: grpc.observability.logging.v1.GrpcLogRecord - (*GrpcLogRecord_Metadata)(nil), // 5: grpc.observability.logging.v1.GrpcLogRecord.Metadata - (*GrpcLogRecord_MetadataEntry)(nil), // 6: grpc.observability.logging.v1.GrpcLogRecord.MetadataEntry - (*GrpcLogRecord_Address)(nil), // 7: grpc.observability.logging.v1.GrpcLogRecord.Address - (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 9: google.protobuf.Duration -} -var file_gcp_observability_internal_logging_logging_proto_depIdxs = []int32{ - 8, // 0: grpc.observability.logging.v1.GrpcLogRecord.timestamp:type_name -> google.protobuf.Timestamp - 0, // 1: grpc.observability.logging.v1.GrpcLogRecord.event_type:type_name -> grpc.observability.logging.v1.GrpcLogRecord.EventType - 1, // 2: grpc.observability.logging.v1.GrpcLogRecord.event_logger:type_name -> grpc.observability.logging.v1.GrpcLogRecord.EventLogger - 2, // 3: grpc.observability.logging.v1.GrpcLogRecord.log_level:type_name -> grpc.observability.logging.v1.GrpcLogRecord.LogLevel - 7, // 4: grpc.observability.logging.v1.GrpcLogRecord.peer_address:type_name -> grpc.observability.logging.v1.GrpcLogRecord.Address - 9, // 5: grpc.observability.logging.v1.GrpcLogRecord.timeout:type_name -> google.protobuf.Duration - 5, // 6: grpc.observability.logging.v1.GrpcLogRecord.metadata:type_name -> grpc.observability.logging.v1.GrpcLogRecord.Metadata - 6, // 7: grpc.observability.logging.v1.GrpcLogRecord.Metadata.entry:type_name -> grpc.observability.logging.v1.GrpcLogRecord.MetadataEntry - 3, // 8: grpc.observability.logging.v1.GrpcLogRecord.Address.type:type_name -> grpc.observability.logging.v1.GrpcLogRecord.Address.Type - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name -} - -func init() { file_gcp_observability_internal_logging_logging_proto_init() } -func file_gcp_observability_internal_logging_logging_proto_init() { - if File_gcp_observability_internal_logging_logging_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_gcp_observability_internal_logging_logging_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLogRecord); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_gcp_observability_internal_logging_logging_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLogRecord_Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_gcp_observability_internal_logging_logging_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLogRecord_MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_gcp_observability_internal_logging_logging_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLogRecord_Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_gcp_observability_internal_logging_logging_proto_rawDesc, - NumEnums: 4, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_gcp_observability_internal_logging_logging_proto_goTypes, - DependencyIndexes: file_gcp_observability_internal_logging_logging_proto_depIdxs, - EnumInfos: file_gcp_observability_internal_logging_logging_proto_enumTypes, - MessageInfos: file_gcp_observability_internal_logging_logging_proto_msgTypes, - }.Build() - File_gcp_observability_internal_logging_logging_proto = out.File - file_gcp_observability_internal_logging_logging_proto_rawDesc = nil - file_gcp_observability_internal_logging_logging_proto_goTypes = nil - file_gcp_observability_internal_logging_logging_proto_depIdxs = nil -} diff --git a/gcp/observability/internal/logging/logging.proto b/gcp/observability/internal/logging/logging.proto deleted file mode 100644 index 206d953a9ca8..000000000000 --- a/gcp/observability/internal/logging/logging.proto +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2022 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.observability.logging.v1; - -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; - -option java_package = "io.grpc.observability.logging"; -option java_multiple_files = true; -option java_outer_classname = "ObservabilityLoggingProto"; -option go_package = "google.golang.org/grpc/gcp/observability/internal/logging"; - -message GrpcLogRecord { - // List of event types - enum EventType { - // Unknown event type - GRPC_CALL_UNKNOWN = 0; - // Header sent from client to server - GRPC_CALL_REQUEST_HEADER = 1; - // Header sent from server to client - GRPC_CALL_RESPONSE_HEADER = 2; - // Message sent from client to server - GRPC_CALL_REQUEST_MESSAGE = 3; - // Message sent from server to client - GRPC_CALL_RESPONSE_MESSAGE = 4; - // Trailer indicates the end of the gRPC call - GRPC_CALL_TRAILER = 5; - // A signal that client is done sending - GRPC_CALL_HALF_CLOSE = 6; - // A signal that the rpc is canceled - GRPC_CALL_CANCEL = 7; - } - // The entity that generates the log entry - enum EventLogger { - LOGGER_UNKNOWN = 0; - LOGGER_CLIENT = 1; - LOGGER_SERVER = 2; - } - // The log severity level of the log entry - enum LogLevel { - LOG_LEVEL_UNKNOWN = 0; - LOG_LEVEL_TRACE = 1; - LOG_LEVEL_DEBUG = 2; - LOG_LEVEL_INFO = 3; - LOG_LEVEL_WARN = 4; - LOG_LEVEL_ERROR = 5; - LOG_LEVEL_CRITICAL = 6; - } - - // The timestamp of the log event - google.protobuf.Timestamp timestamp = 1; - - // Uniquely identifies a call. The value must not be 0 in order to disambiguate - // from an unset value. - // Each call may have several log entries. They will all have the same rpc_id. - // Nothing is guaranteed about their value other than they are unique across - // different RPCs in the same gRPC process. - string rpc_id = 2; - - EventType event_type = 3; // one of the above EventType enum - EventLogger event_logger = 4; // one of the above EventLogger enum - - // the name of the service - string service_name = 5; - // the name of the RPC method - string method_name = 6; - - LogLevel log_level = 7; // one of the above LogLevel enum - - // Peer address information. On client side, peer is logged on server - // header event or trailer event (if trailer-only). On server side, peer - // is always logged on the client header event. - Address peer_address = 8; - - // the RPC timeout value - google.protobuf.Duration timeout = 11; - - // A single process may be used to run multiple virtual servers with - // different identities. - // The authority is the name of such a server identify. It is typically a - // portion of the URI in the form of or :. - string authority = 12; - - // Size of the message or metadata, depending on the event type, - // regardless of whether the full message or metadata is being logged - // (i.e. could be truncated or omitted). - uint32 payload_size = 13; - - // true if message or metadata field is either truncated or omitted due - // to config options - bool payload_truncated = 14; - - // Used by header event or trailer event - Metadata metadata = 15; - - // The entry sequence ID for this call. The first message has a value of 1, - // to disambiguate from an unset value. The purpose of this field is to - // detect missing entries in environments where durability or ordering is - // not guaranteed. - uint64 sequence_id = 16; - - // Used by message event - bytes message = 17; - - // The gRPC status code - uint32 status_code = 18; - - // The gRPC status message - string status_message = 19; - - // The value of the grpc-status-details-bin metadata key, if any. - // This is always an encoded google.rpc.Status message - bytes status_details = 20; - - // A list of metadata pairs - message Metadata { - repeated MetadataEntry entry = 1; - } - - // One metadata key value pair - message MetadataEntry { - string key = 1; - bytes value = 2; - } - - // Address information - message Address { - enum Type { - TYPE_UNKNOWN = 0; - TYPE_IPV4 = 1; // in 1.2.3.4 form - TYPE_IPV6 = 2; // IPv6 canonical form (RFC5952 section 4) - TYPE_UNIX = 3; // UDS string - } - Type type = 1; - string address = 2; - // only for TYPE_IPV4 and TYPE_IPV6 - uint32 ip_port = 3; - } -} diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index 8f93564353cd..dee2656f7b84 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -19,325 +19,441 @@ package observability import ( + "bytes" "context" + "encoding/base64" "fmt" "strings" - "sync/atomic" - "unsafe" + "time" + gcplogging "cloud.google.com/go/logging" "github.com/google/uuid" + + "google.golang.org/grpc" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" iblog "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/grpcutil" ) +var lExporter loggingExporter + +var newLoggingExporter = newCloudLoggingExporter + // translateMetadata translates the metadata from Binary Logging format to -// its GrpcLogRecord equivalent. -func translateMetadata(m *binlogpb.Metadata) *grpclogrecordpb.GrpcLogRecord_Metadata { - var res grpclogrecordpb.GrpcLogRecord_Metadata - res.Entry = make([]*grpclogrecordpb.GrpcLogRecord_MetadataEntry, len(m.Entry)) - for i, e := range m.Entry { - res.Entry[i] = &grpclogrecordpb.GrpcLogRecord_MetadataEntry{ - Key: e.Key, - Value: e.Value, +// its GrpcLogEntry equivalent. +func translateMetadata(m *binlogpb.Metadata) map[string]string { + metadata := make(map[string]string) + for _, entry := range m.GetEntry() { + entryKey := entry.GetKey() + var newVal string + if strings.HasSuffix(entryKey, "-bin") { // bin header + newVal = base64.StdEncoding.EncodeToString(entry.GetValue()) + } else { // normal header + newVal = string(entry.GetValue()) } + var oldVal string + var ok bool + if oldVal, ok = metadata[entryKey]; !ok { + metadata[entryKey] = newVal + continue + } + metadata[entryKey] = oldVal + "," + newVal } - return &res + return metadata } -func setPeerIfPresent(binlogEntry *binlogpb.GrpcLogEntry, grpcLogRecord *grpclogrecordpb.GrpcLogRecord) { +func setPeerIfPresent(binlogEntry *binlogpb.GrpcLogEntry, grpcLogEntry *grpcLogEntry) { if binlogEntry.GetPeer() != nil { - grpcLogRecord.PeerAddress = &grpclogrecordpb.GrpcLogRecord_Address{ - Type: grpclogrecordpb.GrpcLogRecord_Address_Type(binlogEntry.Peer.Type), - Address: binlogEntry.Peer.Address, - IpPort: binlogEntry.Peer.IpPort, - } + grpcLogEntry.Peer.Type = addrType(binlogEntry.GetPeer().GetType()) + grpcLogEntry.Peer.Address = binlogEntry.GetPeer().GetAddress() + grpcLogEntry.Peer.IPPort = binlogEntry.GetPeer().GetIpPort() } } -var loggerTypeToEventLogger = map[binlogpb.GrpcLogEntry_Logger]grpclogrecordpb.GrpcLogRecord_EventLogger{ - binlogpb.GrpcLogEntry_LOGGER_UNKNOWN: grpclogrecordpb.GrpcLogRecord_LOGGER_UNKNOWN, - binlogpb.GrpcLogEntry_LOGGER_CLIENT: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - binlogpb.GrpcLogEntry_LOGGER_SERVER: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, +var loggerTypeToEventLogger = map[binlogpb.GrpcLogEntry_Logger]loggerType{ + binlogpb.GrpcLogEntry_LOGGER_UNKNOWN: loggerUnknown, + binlogpb.GrpcLogEntry_LOGGER_CLIENT: loggerClient, + binlogpb.GrpcLogEntry_LOGGER_SERVER: loggerServer, } -type binaryMethodLogger struct { - rpcID, serviceName, methodName string - originalMethodLogger iblog.MethodLogger - childMethodLogger iblog.MethodLogger - exporter loggingExporter -} +type eventType int + +const ( + // eventTypeUnknown is an unknown event type. + eventTypeUnknown eventType = iota + // eventTypeClientHeader is a header sent from client to server. + eventTypeClientHeader + // eventTypeServerHeader is a header sent from server to client. + eventTypeServerHeader + // eventTypeClientMessage is a message sent from client to server. + eventTypeClientMessage + // eventTypeServerMessage is a message sent from server to client. + eventTypeServerMessage + // eventTypeClientHalfClose is a signal that the loggerClient is done sending. + eventTypeClientHalfClose + // eventTypeServerTrailer indicated the end of a gRPC call. + eventTypeServerTrailer + // eventTypeCancel is a signal that the rpc is canceled. + eventTypeCancel +) -func (ml *binaryMethodLogger) Log(c iblog.LogEntryConfig) { - // Invoke the original MethodLogger to maintain backward compatibility - if ml.originalMethodLogger != nil { - ml.originalMethodLogger.Log(c) +func (t eventType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch t { + case eventTypeUnknown: + buffer.WriteString("EVENT_TYPE_UNKNOWN") + case eventTypeClientHeader: + buffer.WriteString("CLIENT_HEADER") + case eventTypeServerHeader: + buffer.WriteString("SERVER_HEADER") + case eventTypeClientMessage: + buffer.WriteString("CLIENT_MESSAGE") + case eventTypeServerMessage: + buffer.WriteString("SERVER_MESSAGE") + case eventTypeClientHalfClose: + buffer.WriteString("CLIENT_HALF_CLOSE") + case eventTypeServerTrailer: + buffer.WriteString("SERVER_TRAILER") + case eventTypeCancel: + buffer.WriteString("CANCEL") } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} - // Fetch the compiled binary logging log entry - if ml.childMethodLogger == nil { - logger.Info("No wrapped method logger found") - return +type loggerType int + +const ( + loggerUnknown loggerType = iota + loggerClient + loggerServer +) + +func (t loggerType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch t { + case loggerUnknown: + buffer.WriteString("LOGGER_UNKNOWN") + case loggerClient: + buffer.WriteString("CLIENT") + case loggerServer: + buffer.WriteString("SERVER") } - var binlogEntry *binlogpb.GrpcLogEntry - o, ok := ml.childMethodLogger.(interface { - Build(iblog.LogEntryConfig) *binlogpb.GrpcLogEntry - }) - if !ok { - logger.Error("Failed to locate the Build method in wrapped method logger") - return + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +type payload struct { + Metadata map[string]string `json:"metadata,omitempty"` + // Timeout is the RPC timeout value. + Timeout time.Duration `json:"timeout,omitempty"` + // StatusCode is the gRPC status code. + StatusCode uint32 `json:"statusCode,omitempty"` + // StatusMessage is the gRPC status message. + StatusMessage string `json:"statusMessage,omitempty"` + // StatusDetails is the value of the grpc-status-details-bin metadata key, + // if any. This is always an encoded google.rpc.Status message. + StatusDetails []byte `json:"statusDetails,omitempty"` + // MessageLength is the length of the message. + MessageLength uint32 `json:"messageLength,omitempty"` + // Message is the message of this entry. This is populated in the case of a + // message event. + Message []byte `json:"message,omitempty"` +} + +type addrType int + +const ( + typeUnknown addrType = iota // `json:"TYPE_UNKNOWN"` + typeIPv4 // `json:"TYPE_IPV4"` + typeIPv6 // `json:"TYPE_IPV6"` + typeUnix // `json:"TYPE_UNIX"` +) + +func (at addrType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch at { + case typeUnknown: + buffer.WriteString("TYPE_UNKNOWN") + case typeIPv4: + buffer.WriteString("TYPE_IPV4") + case typeIPv6: + buffer.WriteString("TYPE_IPV6") + case typeUnix: + buffer.WriteString("TYPE_UNIX") } - binlogEntry = o.Build(c) - - // Translate to GrpcLogRecord - grpcLogRecord := &grpclogrecordpb.GrpcLogRecord{ - Timestamp: binlogEntry.GetTimestamp(), - RpcId: ml.rpcID, - SequenceId: binlogEntry.GetSequenceIdWithinCall(), - EventLogger: loggerTypeToEventLogger[binlogEntry.Logger], - // Making DEBUG the default LogLevel - LogLevel: grpclogrecordpb.GrpcLogRecord_LOG_LEVEL_DEBUG, + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +type address struct { + // Type is the address type of the address of the peer of the RPC. + Type addrType `json:"type,omitempty"` + // Address is the address of the peer of the RPC. + Address string `json:"address,omitempty"` + // IPPort is the ip and port in string form. It is used only for addrType + // typeIPv4 and typeIPv6. + IPPort uint32 `json:"ipPort,omitempty"` +} + +type grpcLogEntry struct { + // CallID is a uuid which uniquely identifies a call. Each call may have + // several log entries. They will all have the same CallID. Nothing is + // guaranteed about their value other than they are unique across different + // RPCs in the same gRPC process. + CallID string `json:"callId,omitempty"` + // SequenceID is the entry sequence ID for this call. The first message has + // a value of 1, to disambiguate from an unset value. The purpose of this + // field is to detect missing entries in environments where durability or + // ordering is not guaranteed. + SequenceID uint64 `json:"sequenceId,omitempty"` + // Type is the type of binary logging event being logged. + Type eventType `json:"type,omitempty"` + // Logger is the entity that generates the log entry. + Logger loggerType `json:"logger,omitempty"` + // Payload is the payload of this log entry. + Payload payload `json:"payload,omitempty"` + // PayloadTruncated is whether the message or metadata field is either + // truncated or emitted due to options specified in the configuration. + PayloadTruncated bool `json:"payloadTruncated,omitempty"` + // Peer is information about the Peer of the RPC. + Peer address `json:"peer,omitempty"` + // A single process may be used to run multiple virtual servers with + // different identities. + // Authority is the name of such a server identify. It is typically a + // portion of the URI in the form of or :. + Authority string `json:"authority,omitempty"` + // ServiceName is the name of the service. + ServiceName string `json:"serviceName,omitempty"` + // MethodName is the name of the RPC method. + MethodName string `json:"methodName,omitempty"` +} + +type methodLoggerBuilder interface { + Build(iblog.LogEntryConfig) *binlogpb.GrpcLogEntry +} + +type binaryMethodLogger struct { + callID, serviceName, methodName, authority string + + mlb methodLoggerBuilder + exporter loggingExporter +} + +func (bml *binaryMethodLogger) Log(c iblog.LogEntryConfig) { + binLogEntry := bml.mlb.Build(c) + + grpcLogEntry := &grpcLogEntry{ + CallID: bml.callID, + SequenceID: binLogEntry.GetSequenceIdWithinCall(), + Logger: loggerTypeToEventLogger[binLogEntry.Logger], } - switch binlogEntry.GetType() { + switch binLogEntry.GetType() { case binlogpb.GrpcLogEntry_EVENT_TYPE_UNKNOWN: - grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_UNKNOWN + grpcLogEntry.Type = eventTypeUnknown case binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER: - grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_REQUEST_HEADER - if binlogEntry.GetClientHeader() != nil { - methodName := binlogEntry.GetClientHeader().MethodName + grpcLogEntry.Type = eventTypeClientHeader + if binLogEntry.GetClientHeader() != nil { + methodName := binLogEntry.GetClientHeader().MethodName // Example method name: /grpc.testing.TestService/UnaryCall if strings.Contains(methodName, "/") { tokens := strings.Split(methodName, "/") if len(tokens) == 3 { - // Record service name and method name for all events - ml.serviceName = tokens[1] - ml.methodName = tokens[2] + // Record service name and method name for all events. + bml.serviceName = tokens[1] + bml.methodName = tokens[2] } else { logger.Infof("Malformed method name: %v", methodName) } } - grpcLogRecord.Timeout = binlogEntry.GetClientHeader().Timeout - grpcLogRecord.Authority = binlogEntry.GetClientHeader().Authority - grpcLogRecord.Metadata = translateMetadata(binlogEntry.GetClientHeader().Metadata) + bml.authority = binLogEntry.GetClientHeader().GetAuthority() + grpcLogEntry.Payload.Timeout = binLogEntry.GetClientHeader().GetTimeout().AsDuration() + grpcLogEntry.Payload.Metadata = translateMetadata(binLogEntry.GetClientHeader().GetMetadata()) } - grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() - setPeerIfPresent(binlogEntry, grpcLogRecord) + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() + setPeerIfPresent(binLogEntry, grpcLogEntry) case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER: - grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_RESPONSE_HEADER - grpcLogRecord.Metadata = translateMetadata(binlogEntry.GetServerHeader().Metadata) - grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() - setPeerIfPresent(binlogEntry, grpcLogRecord) + grpcLogEntry.Type = eventTypeServerHeader + if binLogEntry.GetServerHeader() != nil { + grpcLogEntry.Payload.Metadata = translateMetadata(binLogEntry.GetServerHeader().GetMetadata()) + } + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() + setPeerIfPresent(binLogEntry, grpcLogEntry) case binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE: - grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_REQUEST_MESSAGE - grpcLogRecord.Message = binlogEntry.GetMessage().GetData() - grpcLogRecord.PayloadSize = binlogEntry.GetMessage().GetLength() - grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() + grpcLogEntry.Type = eventTypeClientMessage + grpcLogEntry.Payload.Message = binLogEntry.GetMessage().GetData() + grpcLogEntry.Payload.MessageLength = binLogEntry.GetMessage().GetLength() + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE: - grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_RESPONSE_MESSAGE - grpcLogRecord.Message = binlogEntry.GetMessage().GetData() - grpcLogRecord.PayloadSize = binlogEntry.GetMessage().GetLength() - grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() + grpcLogEntry.Type = eventTypeServerMessage + grpcLogEntry.Payload.Message = binLogEntry.GetMessage().GetData() + grpcLogEntry.Payload.MessageLength = binLogEntry.GetMessage().GetLength() + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() case binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE: - grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_HALF_CLOSE + grpcLogEntry.Type = eventTypeClientHalfClose case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER: - grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_TRAILER - grpcLogRecord.Metadata = translateMetadata(binlogEntry.GetTrailer().Metadata) - grpcLogRecord.StatusCode = binlogEntry.GetTrailer().GetStatusCode() - grpcLogRecord.StatusMessage = binlogEntry.GetTrailer().GetStatusMessage() - grpcLogRecord.StatusDetails = binlogEntry.GetTrailer().GetStatusDetails() - grpcLogRecord.PayloadTruncated = binlogEntry.GetPayloadTruncated() - setPeerIfPresent(binlogEntry, grpcLogRecord) + grpcLogEntry.Type = eventTypeServerTrailer + grpcLogEntry.Payload.Metadata = translateMetadata(binLogEntry.GetTrailer().Metadata) + grpcLogEntry.Payload.StatusCode = binLogEntry.GetTrailer().GetStatusCode() + grpcLogEntry.Payload.StatusMessage = binLogEntry.GetTrailer().GetStatusMessage() + grpcLogEntry.Payload.StatusDetails = binLogEntry.GetTrailer().GetStatusDetails() + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() + setPeerIfPresent(binLogEntry, grpcLogEntry) case binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL: - grpcLogRecord.EventType = grpclogrecordpb.GrpcLogRecord_GRPC_CALL_CANCEL + grpcLogEntry.Type = eventTypeCancel default: - logger.Infof("Unknown event type: %v", binlogEntry.Type) + logger.Infof("Unknown event type: %v", binLogEntry.Type) return } - grpcLogRecord.ServiceName = ml.serviceName - grpcLogRecord.MethodName = ml.methodName - ml.exporter.EmitGrpcLogRecord(grpcLogRecord) -} + grpcLogEntry.ServiceName = bml.serviceName + grpcLogEntry.MethodName = bml.methodName + grpcLogEntry.Authority = bml.authority -type binaryLogger struct { - // originalLogger is needed to ensure binary logging users won't be impacted - // by this plugin. Users are allowed to subscribe to a completely different - // set of methods. - originalLogger iblog.Logger - // exporter is a loggingExporter and the handle for uploading collected data - // to backends. - exporter unsafe.Pointer // loggingExporter - // logger is a iblog.Logger wrapped for reusing the pattern matching logic - // and the method logger creating logic. - logger unsafe.Pointer // iblog.Logger -} - -func (l *binaryLogger) loadExporter() loggingExporter { - ptrPtr := atomic.LoadPointer(&l.exporter) - if ptrPtr == nil { - return nil + gcploggingEntry := gcplogging.Entry{ + Timestamp: binLogEntry.GetTimestamp().AsTime(), + Severity: 100, + Payload: grpcLogEntry, } - exporterPtr := (*loggingExporter)(ptrPtr) - return *exporterPtr -} -func (l *binaryLogger) loadLogger() iblog.Logger { - ptrPtr := atomic.LoadPointer(&l.logger) - if ptrPtr == nil { - return nil - } - loggerPtr := (*iblog.Logger)(ptrPtr) - return *loggerPtr + bml.exporter.EmitGcpLoggingEntry(gcploggingEntry) } -func (l *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { - var ol iblog.MethodLogger +type eventConfig struct { + ServiceMethod map[string]bool + Services map[string]bool + MatchAll bool - if l.originalLogger != nil { - ol = l.originalLogger.GetMethodLogger(methodName) - } - - // Prevent logging from logging, traces, and metrics API calls. - if strings.HasPrefix(methodName, "/google.logging.v2.LoggingServiceV2/") || strings.HasPrefix(methodName, "/google.monitoring.v3.MetricService/") || - strings.HasPrefix(methodName, "/google.devtools.cloudtrace.v2.TraceService/") { - return ol - } - - // If no exporter is specified, there is no point creating a method - // logger. We don't have any chance to inject exporter after its - // creation. - exporter := l.loadExporter() - if exporter == nil { - return ol - } + // If true, won't log anything. + Exclude bool + HeaderBytes uint64 + MessageBytes uint64 +} - // If no logger is specified, e.g., during init period, do nothing. - binLogger := l.loadLogger() - if binLogger == nil { - return ol - } +type binaryLogger struct { + EventConfigs []eventConfig + exporter loggingExporter +} - // If this method is not picked by LoggerConfig, do nothing. - ml := binLogger.GetMethodLogger(methodName) - if ml == nil { - return ol +func (bl *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { + s, _, err := grpcutil.ParseMethod(methodName) + if err != nil { + logger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil } + for _, eventConfig := range bl.EventConfigs { + if eventConfig.MatchAll || eventConfig.ServiceMethod[methodName] || eventConfig.Services[s] { + if eventConfig.Exclude { + return nil + } - return &binaryMethodLogger{ - originalMethodLogger: ol, - childMethodLogger: ml, - rpcID: uuid.NewString(), - exporter: exporter, + return &binaryMethodLogger{ + exporter: bl.exporter, + mlb: iblog.NewTruncatingMethodLogger(eventConfig.HeaderBytes, eventConfig.MessageBytes), + callID: uuid.NewString(), + } + } } + return nil } -func (l *binaryLogger) Close() { - if l == nil { +func registerClientRPCEvents(clientRPCEvents []clientRPCEvents, exporter loggingExporter) { + if len(clientRPCEvents) == 0 { return } - ePtr := atomic.LoadPointer(&l.exporter) - if ePtr != nil { - exporter := (*loggingExporter)(ePtr) - if err := (*exporter).Close(); err != nil { - logger.Infof("Failed to close logging exporter: %v", err) + var eventConfigs []eventConfig + for _, clientRPCEvent := range clientRPCEvents { + eventConfig := eventConfig{ + Exclude: clientRPCEvent.Exclude, + HeaderBytes: uint64(clientRPCEvent.MaxMetadataBytes), + MessageBytes: uint64(clientRPCEvent.MaxMessageBytes), } + for _, method := range clientRPCEvent.Methods { + eventConfig.ServiceMethod = make(map[string]bool) + eventConfig.Services = make(map[string]bool) + if method == "*" { + eventConfig.MatchAll = true + continue + } + s, m, err := grpcutil.ParseMethod(method) + if err != nil { + continue + } + if m == "*" { + eventConfig.Services[s] = true + continue + } + eventConfig.ServiceMethod[method] = true + } + eventConfigs = append(eventConfigs, eventConfig) } -} - -func validateExistingMethodLoggerConfig(existing *iblog.MethodLoggerConfig, filter logFilter) bool { - // In future, we could add more validations. Currently, we only check if the - // new filter configs are different than the existing one, if so, we log a - // warning. - if existing != nil && (existing.Header != uint64(filter.HeaderBytes) || existing.Message != uint64(filter.MessageBytes)) { - logger.Warningf("Ignored log_filter config: %+v", filter) + clientSideLogger := &binaryLogger{ + EventConfigs: eventConfigs, + exporter: exporter, } - return existing == nil + internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(internal.WithBinaryLogger.(func(bl binarylog.Logger) grpc.DialOption)(clientSideLogger)) } -func createBinaryLoggerConfig(filters []logFilter) iblog.LoggerConfig { - config := iblog.LoggerConfig{ - Services: make(map[string]*iblog.MethodLoggerConfig), - Methods: make(map[string]*iblog.MethodLoggerConfig), +func registerServerRPCEvents(serverRPCEvents []serverRPCEvents, exporter loggingExporter) { + if len(serverRPCEvents) == 0 { + return } - // Try matching the filters one by one, pick the first match. The - // correctness of the log filter pattern is ensured by config.go. - for _, filter := range filters { - if filter.Pattern == "*" { - // Match a "*" - if !validateExistingMethodLoggerConfig(config.All, filter) { + var eventConfigs []eventConfig + for _, serverRPCEvent := range serverRPCEvents { + eventConfig := eventConfig{ + Exclude: serverRPCEvent.Exclude, + HeaderBytes: uint64(serverRPCEvent.MaxMetadataBytes), + MessageBytes: uint64(serverRPCEvent.MaxMessageBytes), + } + for _, method := range serverRPCEvent.Methods { + eventConfig.ServiceMethod = make(map[string]bool) + eventConfig.Services = make(map[string]bool) + if method == "*" { + eventConfig.MatchAll = true continue } - config.All = &iblog.MethodLoggerConfig{Header: uint64(filter.HeaderBytes), Message: uint64(filter.MessageBytes)} - continue - } - tokens := strings.SplitN(filter.Pattern, "/", 2) - filterService := tokens[0] - filterMethod := tokens[1] - if filterMethod == "*" { - // Handle "p.s/*" case - if !validateExistingMethodLoggerConfig(config.Services[filterService], filter) { + s, m, err := grpcutil.ParseMethod(method) + if err != nil { // Shouldn't happen, already validated at this point. continue } - config.Services[filterService] = &iblog.MethodLoggerConfig{Header: uint64(filter.HeaderBytes), Message: uint64(filter.MessageBytes)} - continue - } - // Exact match like "p.s/m" - if !validateExistingMethodLoggerConfig(config.Methods[filter.Pattern], filter) { - continue - } - config.Methods[filter.Pattern] = &iblog.MethodLoggerConfig{Header: uint64(filter.HeaderBytes), Message: uint64(filter.MessageBytes)} - } - return config -} - -// start is the core logic for setting up the custom binary logging logger, and -// it's also useful for testing. -func (l *binaryLogger) start(config *config, exporter loggingExporter) error { - filters := config.LogFilters - if len(filters) == 0 || exporter == nil { - // Doing nothing is allowed - if exporter != nil { - // The exporter is owned by binaryLogger, so we should close it if - // we are not planning to use it. - exporter.Close() + if m == "*" { + eventConfig.Services[s] = true + continue + } + eventConfig.ServiceMethod[method] = true } - logger.Info("Skipping gRPC Observability logger: no config") - return nil + eventConfigs = append(eventConfigs, eventConfig) } - - binLogger := iblog.NewLoggerFromConfig(createBinaryLoggerConfig(filters)) - if binLogger != nil { - atomic.StorePointer(&l.logger, unsafe.Pointer(&binLogger)) + serverSideLogger := &binaryLogger{ + EventConfigs: eventConfigs, + exporter: exporter, } - atomic.StorePointer(&l.exporter, unsafe.Pointer(&exporter)) - logger.Info("Start gRPC Observability logger") - return nil + internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(internal.BinaryLogger.(func(bl binarylog.Logger) grpc.ServerOption)(serverSideLogger)) } -func (l *binaryLogger) Start(ctx context.Context, config *config) error { - if config == nil || !config.EnableCloudLogging { +func startLogging(ctx context.Context, config *config) error { + if config == nil || config.CloudLogging == nil { return nil } - if config.DestinationProjectID == "" { - return fmt.Errorf("failed to enable CloudLogging: empty destination_project_id") - } - exporter, err := newCloudLoggingExporter(ctx, config) + var err error + lExporter, err = newLoggingExporter(ctx, config) if err != nil { return fmt.Errorf("unable to create CloudLogging exporter: %v", err) } - l.start(config, exporter) + + cl := config.CloudLogging + registerClientRPCEvents(cl.ClientRPCEvents, lExporter) + registerServerRPCEvents(cl.ServerRPCEvents, lExporter) return nil } -func newBinaryLogger(iblogger iblog.Logger) *binaryLogger { - return &binaryLogger{ - originalLogger: iblogger, +func stopLogging() { + internal.ClearGlobalDialOptions() + internal.ClearGlobalServerOptions() + if lExporter != nil { + // This Close() call handles the flushing of the logging buffer. + lExporter.Close() } } - -var defaultLogger *binaryLogger - -func prepareLogging() { - defaultLogger = newBinaryLogger(iblog.GetLogger()) - iblog.SetLogger(defaultLogger) -} diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go new file mode 100644 index 000000000000..d91af4afc67f --- /dev/null +++ b/gcp/observability/logging_test.go @@ -0,0 +1,1098 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "sync" + "testing" + + gcplogging "cloud.google.com/go/logging" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/test/grpc_testing" +) + +func cmpLoggingEntryList(got []*grpcLogEntry, want []*grpcLogEntry) error { + if diff := cmp.Diff(got, want, + // For nondeterministic metadata iteration. + cmp.Comparer(func(a map[string]string, b map[string]string) bool { + if len(a) > len(b) { + a, b = b, a + } + for k, v := range a { + if b[k] != v { + return false + } + } + return true + }), + cmpopts.IgnoreFields(grpcLogEntry{}, "CallID", "Peer"), + cmpopts.IgnoreFields(address{}, "IPPort", "Type"), + cmpopts.IgnoreFields(payload{}, "Timeout")); diff != "" { + return fmt.Errorf("got unexpected grpcLogEntry list, diff (-got, +want): %v", diff) + } + return nil +} + +type fakeLoggingExporter struct { + t *testing.T + + mu sync.Mutex + entries []*grpcLogEntry +} + +func (fle *fakeLoggingExporter) EmitGcpLoggingEntry(entry gcplogging.Entry) { + fle.mu.Lock() + defer fle.mu.Unlock() + if entry.Severity != 100 { + fle.t.Errorf("entry.Severity is not 100, this should be hardcoded") + } + grpcLogEntry, ok := entry.Payload.(*grpcLogEntry) + if !ok { + fle.t.Errorf("payload passed in isn't grpcLogEntry") + } + fle.entries = append(fle.entries, grpcLogEntry) +} + +func (fle *fakeLoggingExporter) Close() error { + return nil +} + +// setupObservabilitySystemWithConfig sets up the observability system with the +// specified config, and returns a function which cleans up the observability +// system. +func setupObservabilitySystemWithConfig(cfg *config) (func(), error) { + validConfigJSON, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("failed to convert config to JSON: %v", err) + } + oldObservabilityConfig := envconfig.ObservabilityConfig + envconfig.ObservabilityConfig = string(validConfigJSON) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + err = Start(ctx) + if err != nil { + return nil, fmt.Errorf("error in Start: %v", err) + } + return func() { + End() + envconfig.ObservabilityConfig = oldObservabilityConfig + }, nil +} + +// TestClientRPCEventsLogAll tests the observability system configured with a +// client RPC event that logs every call. It performs a Unary and Bidirectional +// Streaming RPC, and expects certain grpcLogEntries to make it's way to the +// exporter. +func (s) TestClientRPCEventsLogAll(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + clientRPCEventLogAllConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(clientRPCEventLogAllConfig) + if err != nil { + t.Fatalf("error setting up observability: %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + if err := stream.Send(&grpc_testing.StreamingOutputCallResponse{}); err != nil { + return err + } + if _, err := stream.Recv(); err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + + fle.entries = nil + fle.mu.Unlock() + + // Make a streaming RPC. This should cause Log calls on the MethodLogger. + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + if err := stream.Send(&grpc_testing.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("stream.Send() failed: %v", err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("stream.Recv() failed: %v", err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("stream.CloseSend()() failed: %v", err) + } + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + grpcLogEntriesWant = []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 4, + Authority: ss.Address, + }, + { + Type: eventTypeClientHalfClose, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 5, + Authority: ss.Address, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 6, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.mu.Unlock() +} + +func (s) TestServerRPCEventsLogAll(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + serverRPCEventLogAllConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ServerRPCEvents: []serverRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(serverRPCEventLogAllConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + if err := stream.Send(&grpc_testing.StreamingOutputCallResponse{}); err != nil { + return err + } + if _, err := stream.Recv(); err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + }, + { + Type: eventTypeServerHeader, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.entries = nil + fle.mu.Unlock() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + if err := stream.Send(&grpc_testing.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("stream.Send() failed: %v", err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("stream.Recv() failed: %v", err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("stream.CloseSend()() failed: %v", err) + } + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + grpcLogEntriesWant = []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 2, + Authority: ss.Address, + }, + { + Type: eventTypeServerHeader, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 4, + Authority: ss.Address, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeClientHalfClose, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 5, + Authority: ss.Address, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 6, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.mu.Unlock() +} + +// TestBothClientAndServerRPCEvents tests the scenario where you have both +// Client and Server RPC Events configured to log. Both sides should log and +// share the exporter, so the exporter should receive the collective amount of +// calls for both a client stream (corresponding to a Client RPC Event) and a +// server stream (corresponding ot a Server RPC Event). The specificity of the +// entries are tested in previous tests. +func (s) TestBothClientAndServerRPCEvents(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + serverRPCEventLogAllConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + ServerRPCEvents: []serverRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + + cleanup, err := setupObservabilitySystemWithConfig(serverRPCEventLogAllConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + // Make a Unary RPC. Both client side and server side streams should log + // entries, which share the same exporter. The exporter should thus receive + // entries from both the client and server streams (the specificity of + // entries is checked in previous tests). + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + fle.mu.Lock() + if len(fle.entries) != 10 { + fle.mu.Unlock() + t.Fatalf("Unexpected length of entries %v, want 10 (collective of client and server)", len(fle.entries)) + } + fle.mu.Unlock() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + fle.mu.Lock() + if len(fle.entries) != 17 { + fle.mu.Unlock() + t.Fatalf("Unexpected length of entries %v, want 17 (collective of client and server)", len(fle.entries)) + } + fle.mu.Unlock() +} + +// TestClientRPCEventsLogAll tests the observability system configured with a +// client RPC event that logs every call and that truncates headers and +// messages. It performs a Unary RPC, and expects events with truncated payloads +// and payloadTruncated set to true, signifying the system properly truncated +// headers and messages logged. +func (s) TestClientRPCEventsTruncateHeaderAndMetadata(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + clientRPCEventLogAllConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 10, + MaxMessageBytes: 2, + }, + }, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(clientRPCEventLogAllConfig) + if err != nil { + t.Fatalf("error setting up observability: %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + md := metadata.MD{ + "key1": []string{"value1"}, + "key2": []string{"value2"}, + } + ctx = metadata.NewOutgoingContext(ctx, md) + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: []byte("00000")}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + PayloadTruncated: true, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + MessageLength: 9, + Message: []uint8{ + 0x1a, + 0x07, + }, + }, + PayloadTruncated: true, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + // Only one metadata entry should have been present in logging due to + // truncation. + if mdLen := len(fle.entries[0].Payload.Metadata); mdLen != 1 { + t.Fatalf("Metadata should have only 1 entry due to truncation, got %v", mdLen) + } + fle.mu.Unlock() +} + +// TestPrecedenceOrderingInConfiguration tests the scenario where the logging +// part of observability is configured with three client RPC events, the first +// two on specific methods in the service, the last one for any method within +// the service. This test sends three RPC's, one corresponding to each log +// entry. The logging logic dictated by that specific event should be what is +// used for emission. The second event will specify to exclude logging on RPC's, +// which should generate no log entries if an RPC gets to and matches that +// event. +func (s) TestPrecedenceOrderingInConfiguration(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + threeEventsConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"/grpc.testing.TestService/UnaryCall"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + { + Methods: []string{"/grpc.testing.TestService/EmptyCall"}, + Exclude: true, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + { + Methods: []string{"/grpc.testing.TestService/*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + + cleanup, err := setupObservabilitySystemWithConfig(threeEventsConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *grpc_testing.Empty) (*grpc_testing.Empty, error) { + return &grpc_testing.Empty{}, nil + }, + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + // A Unary RPC should match with first event and logs should correspond + // accordingly. The first event it matches to should be used for the + // configuration, even though it could potentially match to events in the + // future. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + } + + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.entries = nil + fle.mu.Unlock() + + // A unary empty RPC should match with the second event, which has the exclude + // flag set. Thus, a unary empty RPC should cause no downstream logs. + if _, err := ss.Client.EmptyCall(ctx, &grpc_testing.Empty{}); err != nil { + t.Fatalf("Unexpected error from EmptyCall: %v", err) + } + // The exporter should have received no new log entries due to this call. + fle.mu.Lock() + if len(fle.entries) != 0 { + fle.mu.Unlock() + t.Fatalf("Unexpected length of entries %v, want 0", len(fle.entries)) + } + fle.mu.Unlock() + + // A third RPC, a full duplex call, which doesn't match with first two and + // matches to last one, due to being a wildcard for every method in the + // service, should log accordingly to the last event's logic. + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + grpcLogEntriesWant = []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientHalfClose, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 2, + Authority: ss.Address, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 4, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.mu.Unlock() +} + +func (s) TestTranslateMetadata(t *testing.T) { + concatBinLogValue := base64.StdEncoding.EncodeToString([]byte("value1")) + "," + base64.StdEncoding.EncodeToString([]byte("value2")) + tests := []struct { + name string + binLogMD *binlogpb.Metadata + wantMD map[string]string + }{ + { + name: "two-entries-different-key", + binLogMD: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ + { + Key: "header1", + Value: []byte("value1"), + }, + { + Key: "header2", + Value: []byte("value2"), + }, + }, + }, + wantMD: map[string]string{ + "header1": "value1", + "header2": "value2", + }, + }, + { + name: "two-entries-same-key", + binLogMD: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ + { + Key: "header1", + Value: []byte("value1"), + }, + { + Key: "header1", + Value: []byte("value2"), + }, + }, + }, + wantMD: map[string]string{ + "header1": "value1,value2", + }, + }, + { + name: "two-entries-same-key-bin-header", + binLogMD: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ + { + Key: "header1-bin", + Value: []byte("value1"), + }, + { + Key: "header1-bin", + Value: []byte("value2"), + }, + }, + }, + wantMD: map[string]string{ + "header1-bin": concatBinLogValue, + }, + }, + { + name: "four-entries-two-keys", + binLogMD: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ + { + Key: "header1", + Value: []byte("value1"), + }, + { + Key: "header1", + Value: []byte("value2"), + }, + { + Key: "header1-bin", + Value: []byte("value1"), + }, + { + Key: "header1-bin", + Value: []byte("value2"), + }, + }, + }, + wantMD: map[string]string{ + "header1": "value1,value2", + "header1-bin": concatBinLogValue, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if gotMD := translateMetadata(test.binLogMD); !cmp.Equal(gotMD, test.wantMD) { + t.Fatalf("translateMetadata(%v) = %v, want %v", test.binLogMD, gotMD, test.wantMD) + } + }) + } +} + +func (s) TestMarshalJSON(t *testing.T) { + logEntry := &grpcLogEntry{ + CallID: "300-300-300", + SequenceID: 3, + Type: eventTypeUnknown, + Logger: loggerClient, + Payload: payload{ + Metadata: map[string]string{"header1": "value1"}, + Timeout: 20, + StatusCode: 3, + StatusMessage: "ok", + StatusDetails: []byte("ok"), + MessageLength: 3, + Message: []byte("wow"), + }, + Peer: address{ + Type: typeIPv4, + Address: "localhost", + IPPort: 16000, + }, + PayloadTruncated: false, + Authority: "server", + ServiceName: "grpc-testing", + MethodName: "UnaryRPC", + } + if _, err := json.Marshal(logEntry); err != nil { + t.Fatalf("json.Marshal(%v) failed with error: %v", logEntry, err) + } +} diff --git a/gcp/observability/observability.go b/gcp/observability/observability.go index 3855bc7ebaf0..1010f72f743c 100644 --- a/gcp/observability/observability.go +++ b/gcp/observability/observability.go @@ -34,10 +34,6 @@ import ( var logger = grpclog.Component("observability") -func init() { - prepareLogging() -} - // Start is the opt-in API for gRPC Observability plugin. This function should // be invoked in the main function, and before creating any gRPC clients or // servers, otherwise, they might not be instrumented. At high-level, this @@ -55,7 +51,7 @@ func Start(ctx context.Context) error { return err } if config == nil { - return fmt.Errorf("no ObservabilityConfig found, it can be set via env %s", envObservabilityConfig) + return fmt.Errorf("no ObservabilityConfig found") } // Set the project ID if it isn't configured manually. @@ -69,7 +65,7 @@ func Start(ctx context.Context) error { } // Logging is controlled by the config at methods level. - return defaultLogger.Start(ctx, config) + return startLogging(ctx, config) } // End is the clean-up API for gRPC Observability plugin. It is expected to be @@ -79,6 +75,6 @@ func Start(ctx context.Context) error { // // Note: this method should only be invoked once. func End() { - defaultLogger.Close() + stopLogging() stopOpenCensus() } diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index e9238f898abd..1ea0424b7f5e 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -19,12 +19,11 @@ package observability import ( - "bytes" "context" "encoding/json" "fmt" + "io" "io/ioutil" - "net" "os" "sync" "testing" @@ -32,18 +31,12 @@ import ( "go.opencensus.io/stats/view" "go.opencensus.io/trace" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - grpclogrecordpb "google.golang.org/grpc/gcp/observability/internal/logging" - iblog "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" - testgrpc "google.golang.org/grpc/interop/grpc_testing" - testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" + "google.golang.org/grpc/test/grpc_testing" ) type s struct { @@ -75,275 +68,6 @@ var ( defaultRequestCount = 24 ) -type testServer struct { - testgrpc.UnimplementedTestServiceServer -} - -func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - if err := grpc.SendHeader(ctx, testHeaderMetadata); err != nil { - return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want ", testHeaderMetadata, err) - } - if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil { - return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want ", testTrailerMetadata, err) - } - - if bytes.Equal(in.Payload.Body, testErrorPayload) { - return nil, fmt.Errorf(testErrorMessage) - } - - return &testpb.SimpleResponse{Payload: in.Payload}, nil -} - -type fakeLoggingExporter struct { - t *testing.T - clientEvents []*grpclogrecordpb.GrpcLogRecord - serverEvents []*grpclogrecordpb.GrpcLogRecord - isClosed bool - mu sync.Mutex -} - -func (fle *fakeLoggingExporter) EmitGrpcLogRecord(l *grpclogrecordpb.GrpcLogRecord) { - fle.mu.Lock() - defer fle.mu.Unlock() - switch l.EventLogger { - case grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT: - fle.clientEvents = append(fle.clientEvents, l) - case grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER: - fle.serverEvents = append(fle.serverEvents, l) - default: - fle.t.Fatalf("unexpected event logger: %v", l.EventLogger) - } - eventJSON, _ := protojson.Marshal(l) - fle.t.Logf("fakeLoggingExporter Emit: %s", eventJSON) -} - -func (fle *fakeLoggingExporter) Close() error { - fle.isClosed = true - return nil -} - -// test is an end-to-end test. It should be created with the newTest -// func, modified as needed, and then started with its startServer method. -// It should be cleaned up with the tearDown method. -type test struct { - t *testing.T - fle *fakeLoggingExporter - - testServer testgrpc.TestServiceServer // nil means none - // srv and srvAddr are set once startServer is called. - srv *grpc.Server - srvAddr string - - cc *grpc.ClientConn // nil until requested via clientConn -} - -func (te *test) tearDown() { - if te.cc != nil { - te.cc.Close() - te.cc = nil - } - te.srv.Stop() - End() - - if te.fle != nil && !te.fle.isClosed { - te.t.Fatalf("fakeLoggingExporter not closed!") - } -} - -// newTest returns a new test using the provided testing.T and -// environment. It is returned with default values. Tests should -// modify it before calling its startServer and clientConn methods. -func newTest(t *testing.T) *test { - return &test{ - t: t, - } -} - -// startServer starts a gRPC server listening. Callers should defer a -// call to te.tearDown to clean up. -func (te *test) startServer(ts testgrpc.TestServiceServer) { - te.testServer = ts - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - te.t.Fatalf("Failed to listen: %v", err) - } - var opts []grpc.ServerOption - s := grpc.NewServer(opts...) - te.srv = s - if te.testServer != nil { - testgrpc.RegisterTestServiceServer(s, te.testServer) - } - - go s.Serve(lis) - te.srvAddr = lis.Addr().String() -} - -func (te *test) clientConn() *grpc.ClientConn { - if te.cc != nil { - return te.cc - } - opts := []grpc.DialOption{ - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), - grpc.WithUserAgent("test/0.0.1"), - } - var err error - te.cc, err = grpc.Dial(te.srvAddr, opts...) - if err != nil { - te.t.Fatalf("Dial(%q) = %v", te.srvAddr, err) - } - return te.cc -} - -func (te *test) enablePluginWithConfig(config *config) { - // Injects the fake exporter for testing purposes - te.fle = &fakeLoggingExporter{t: te.t} - defaultLogger = newBinaryLogger(nil) - iblog.SetLogger(defaultLogger) - if err := defaultLogger.start(config, te.fle); err != nil { - te.t.Fatalf("Failed to start plugin: %v", err) - } -} - -func (te *test) enablePluginWithCaptureAll() { - te.enablePluginWithConfig(&config{ - EnableCloudLogging: true, - DestinationProjectID: "fake", - LogFilters: []logFilter{ - { - Pattern: "*", - HeaderBytes: infinitySizeBytes, - MessageBytes: infinitySizeBytes, - }, - }, - }) -} - -func (te *test) enableOpenCensus() { - defaultMetricsReportingInterval = time.Millisecond * 100 - config := &config{ - EnableCloudLogging: true, - EnableCloudTrace: true, - EnableCloudMonitoring: true, - GlobalTraceSamplingRate: 1.0, - } - startOpenCensus(config) -} - -func checkEventCommon(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord) { - if seen.RpcId == "" { - t.Fatalf("expect non-empty RpcId") - } - if seen.SequenceId == 0 { - t.Fatalf("expect non-zero SequenceId") - } -} - -func checkEventRequestHeader(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord) { - checkEventCommon(t, seen) - if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_REQUEST_HEADER { - t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_REQUEST_HEADER", seen.EventType.String()) - } - if seen.EventLogger != want.EventLogger { - t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) - } - if want.Authority != "" && seen.Authority != want.Authority { - t.Fatalf("l.Authority = %v, want %v", seen.Authority, want.Authority) - } - if want.ServiceName != "" && seen.ServiceName != want.ServiceName { - t.Fatalf("l.ServiceName = %v, want %v", seen.ServiceName, want.ServiceName) - } - if want.MethodName != "" && seen.MethodName != want.MethodName { - t.Fatalf("l.MethodName = %v, want %v", seen.MethodName, want.MethodName) - } - if len(seen.Metadata.Entry) != 1 { - t.Fatalf("unexpected header size: %v != 1", len(seen.Metadata.Entry)) - } - if seen.Metadata.Entry[0].Key != "header" { - t.Fatalf("unexpected header: %v", seen.Metadata.Entry[0].Key) - } - if !bytes.Equal(seen.Metadata.Entry[0].Value, []byte(testHeaderMetadata["header"][0])) { - t.Fatalf("unexpected header value: %v", seen.Metadata.Entry[0].Value) - } -} - -func checkEventResponseHeader(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord) { - checkEventCommon(t, seen) - if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_RESPONSE_HEADER { - t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_RESPONSE_HEADER", seen.EventType.String()) - } - if seen.EventLogger != want.EventLogger { - t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) - } - if len(seen.Metadata.Entry) != 1 { - t.Fatalf("unexpected header size: %v != 1", len(seen.Metadata.Entry)) - } - if seen.Metadata.Entry[0].Key != "header" { - t.Fatalf("unexpected header: %v", seen.Metadata.Entry[0].Key) - } - if !bytes.Equal(seen.Metadata.Entry[0].Value, []byte(testHeaderMetadata["header"][0])) { - t.Fatalf("unexpected header value: %v", seen.Metadata.Entry[0].Value) - } -} - -func checkEventRequestMessage(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord, payload []byte) { - checkEventCommon(t, seen) - if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_REQUEST_MESSAGE { - t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_REQUEST_MESSAGE", seen.EventType.String()) - } - if seen.EventLogger != want.EventLogger { - t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) - } - msg := &testpb.SimpleRequest{Payload: &testpb.Payload{Body: payload}} - msgBytes, _ := proto.Marshal(msg) - if !bytes.Equal(seen.Message, msgBytes) { - t.Fatalf("unexpected payload: %v != %v", seen.Message, payload) - } -} - -func checkEventResponseMessage(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord, payload []byte) { - checkEventCommon(t, seen) - if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_RESPONSE_MESSAGE { - t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_RESPONSE_MESSAGE", seen.EventType.String()) - } - if seen.EventLogger != want.EventLogger { - t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) - } - msg := &testpb.SimpleResponse{Payload: &testpb.Payload{Body: payload}} - msgBytes, _ := proto.Marshal(msg) - if !bytes.Equal(seen.Message, msgBytes) { - t.Fatalf("unexpected payload: %v != %v", seen.Message, payload) - } -} - -func checkEventTrailer(t *testing.T, seen *grpclogrecordpb.GrpcLogRecord, want *grpclogrecordpb.GrpcLogRecord) { - checkEventCommon(t, seen) - if seen.EventType != grpclogrecordpb.GrpcLogRecord_GRPC_CALL_TRAILER { - t.Fatalf("got %v, want GrpcLogRecord_GRPC_CALL_TRAILER", seen.EventType.String()) - } - if seen.EventLogger != want.EventLogger { - t.Fatalf("l.EventLogger = %v, want %v", seen.EventLogger, want.EventLogger) - } - if seen.StatusCode != want.StatusCode { - t.Fatalf("l.StatusCode = %v, want %v", seen.StatusCode, want.StatusCode) - } - if seen.StatusMessage != want.StatusMessage { - t.Fatalf("l.StatusMessage = %v, want %v", seen.StatusMessage, want.StatusMessage) - } - if !bytes.Equal(seen.StatusDetails, want.StatusDetails) { - t.Fatalf("l.StatusDetails = %v, want %v", seen.StatusDetails, want.StatusDetails) - } - if len(seen.Metadata.Entry) != 1 { - t.Fatalf("unexpected trailer size: %v != 1", len(seen.Metadata.Entry)) - } - if seen.Metadata.Entry[0].Key != "trailer" { - t.Fatalf("unexpected trailer: %v", seen.Metadata.Entry[0].Key) - } - if !bytes.Equal(seen.Metadata.Entry[0].Value, []byte(testTrailerMetadata["trailer"][0])) { - t.Fatalf("unexpected trailer value: %v", seen.Metadata.Entry[0].Value) - } -} - const ( TypeOpenCensusViewDistribution string = "distribution" TypeOpenCensusViewCount = "count" @@ -392,296 +116,67 @@ func (fe *fakeOpenCensusExporter) Close() error { return nil } -func (s) TestLoggingForOkCall(t *testing.T) { - te := newTest(t) - defer te.tearDown() - te.enablePluginWithCaptureAll() - te.startServer(&testServer{}) - tc := testgrpc.NewTestServiceClient(te.clientConn()) - - var ( - resp *testpb.SimpleResponse - req *testpb.SimpleRequest - err error - ) - req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} - tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - resp, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) - if err != nil { - t.Fatalf("unary call failed: %v", err) - } - t.Logf("unary call passed: %v", resp) - - // Wait for the gRPC transport to gracefully close to ensure no lost event. - te.cc.Close() - te.srv.GracefulStop() - // Check size of events - if len(te.fle.clientEvents) != 5 { - t.Fatalf("expects 5 client events, got %d", len(te.fle.clientEvents)) - } - if len(te.fle.serverEvents) != 5 { - t.Fatalf("expects 5 server events, got %d", len(te.fle.serverEvents)) - } - // Client events - checkEventRequestHeader(te.t, te.fle.clientEvents[0], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - Authority: te.srvAddr, - ServiceName: "grpc.testing.TestService", - MethodName: "UnaryCall", - }) - checkEventRequestMessage(te.t, te.fle.clientEvents[1], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - }, testOkPayload) - checkEventResponseHeader(te.t, te.fle.clientEvents[2], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - }) - checkEventResponseMessage(te.t, te.fle.clientEvents[3], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - }, testOkPayload) - checkEventTrailer(te.t, te.fle.clientEvents[4], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - StatusCode: 0, - }) - // Server events - checkEventRequestHeader(te.t, te.fle.serverEvents[0], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - }) - checkEventRequestMessage(te.t, te.fle.serverEvents[1], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - }, testOkPayload) - checkEventResponseHeader(te.t, te.fle.serverEvents[2], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - }) - checkEventResponseMessage(te.t, te.fle.serverEvents[3], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - }, testOkPayload) - checkEventTrailer(te.t, te.fle.serverEvents[4], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - StatusCode: 0, - }) -} - -func (s) TestLoggingForErrorCall(t *testing.T) { - te := newTest(t) - defer te.tearDown() - te.enablePluginWithCaptureAll() - te.startServer(&testServer{}) - tc := testgrpc.NewTestServiceClient(te.clientConn()) - - req := &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testErrorPayload}} - tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - _, err := tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) - if err == nil { - t.Fatalf("unary call expected to fail, but passed") - } - - // Wait for the gRPC transport to gracefully close to ensure no lost event. - te.cc.Close() - te.srv.GracefulStop() - // Check size of events - if len(te.fle.clientEvents) != 4 { - t.Fatalf("expects 4 client events, got %d", len(te.fle.clientEvents)) - } - if len(te.fle.serverEvents) != 4 { - t.Fatalf("expects 4 server events, got %d", len(te.fle.serverEvents)) - } - // Client events - checkEventRequestHeader(te.t, te.fle.clientEvents[0], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - Authority: te.srvAddr, - ServiceName: "grpc.testing.TestService", - MethodName: "UnaryCall", - }) - checkEventRequestMessage(te.t, te.fle.clientEvents[1], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - }, testErrorPayload) - checkEventResponseHeader(te.t, te.fle.clientEvents[2], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - }) - checkEventTrailer(te.t, te.fle.clientEvents[3], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - StatusCode: 2, - StatusMessage: testErrorMessage, - }) - // Server events - checkEventRequestHeader(te.t, te.fle.serverEvents[0], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - }) - checkEventRequestMessage(te.t, te.fle.serverEvents[1], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - }, testErrorPayload) - checkEventResponseHeader(te.t, te.fle.serverEvents[2], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - }) - checkEventTrailer(te.t, te.fle.serverEvents[3], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - StatusCode: 2, - StatusMessage: testErrorMessage, - }) -} - -func (s) TestEmptyConfig(t *testing.T) { - te := newTest(t) - defer te.tearDown() - te.enablePluginWithConfig(&config{}) - te.startServer(&testServer{}) - tc := testgrpc.NewTestServiceClient(te.clientConn()) - - req := &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} - tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - resp, err := tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) - if err != nil { - t.Fatalf("unary call failed: %v", err) - } - t.Logf("unary call passed: %v", resp) - - // Wait for the gRPC transport to gracefully close to ensure no lost event. - te.cc.Close() - te.srv.GracefulStop() - // Check size of events - if len(te.fle.clientEvents) != 0 { - t.Fatalf("expects 0 client events, got %d", len(te.fle.clientEvents)) - } - if len(te.fle.serverEvents) != 0 { - t.Fatalf("expects 0 server events, got %d", len(te.fle.serverEvents)) - } -} - -func (s) TestOverrideConfig(t *testing.T) { - te := newTest(t) - defer te.tearDown() - // Setting 3 filters, expected to use the third filter, because it's the - // most specific one. The third filter allows message payload logging, and - // others disabling the message payload logging. We should observe this - // behavior latter. - te.enablePluginWithConfig(&config{ - EnableCloudLogging: true, - DestinationProjectID: "fake", - LogFilters: []logFilter{ - { - Pattern: "wont/match", - MessageBytes: 0, - }, - { - Pattern: "*", - MessageBytes: 0, - }, - { - Pattern: "grpc.testing.TestService/*", - MessageBytes: 4096, - }, - }, - }) - te.startServer(&testServer{}) - tc := testgrpc.NewTestServiceClient(te.clientConn()) - - var ( - resp *testpb.SimpleResponse - req *testpb.SimpleRequest - err error - ) - req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} - tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - resp, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) - if err != nil { - t.Fatalf("unary call failed: %v", err) - } - t.Logf("unary call passed: %v", resp) - - // Wait for the gRPC transport to gracefully close to ensure no lost event. - te.cc.Close() - te.srv.GracefulStop() - // Check size of events - if len(te.fle.clientEvents) != 5 { - t.Fatalf("expects 5 client events, got %d", len(te.fle.clientEvents)) - } - if len(te.fle.serverEvents) != 5 { - t.Fatalf("expects 5 server events, got %d", len(te.fle.serverEvents)) - } - // Check Client message payloads - checkEventRequestMessage(te.t, te.fle.clientEvents[1], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - }, testOkPayload) - checkEventResponseMessage(te.t, te.fle.clientEvents[3], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_CLIENT, - }, testOkPayload) - // Check Server message payloads - checkEventRequestMessage(te.t, te.fle.serverEvents[1], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - }, testOkPayload) - checkEventResponseMessage(te.t, te.fle.serverEvents[3], &grpclogrecordpb.GrpcLogRecord{ - EventLogger: grpclogrecordpb.GrpcLogRecord_LOGGER_SERVER, - }, testOkPayload) -} - -func (s) TestNoMatch(t *testing.T) { - te := newTest(t) - defer te.tearDown() - // Setting 3 filters, expected to use the second filter. The second filter - // allows message payload logging, and others disabling the message payload - // logging. We should observe this behavior latter. - te.enablePluginWithConfig(&config{ - EnableCloudLogging: true, - DestinationProjectID: "fake", - LogFilters: []logFilter{ - { - Pattern: "wont/match", - MessageBytes: 0, +func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { + invalidConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{":-)"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, }, }, - }) - te.startServer(&testServer{}) - tc := testgrpc.NewTestServiceClient(te.clientConn()) - - var ( - resp *testpb.SimpleResponse - req *testpb.SimpleRequest - err error - ) - req = &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} - tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - resp, err = tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) - if err != nil { - t.Fatalf("unary call failed: %v", err) } - t.Logf("unary call passed: %v", resp) - - // Wait for the gRPC transport to gracefully close to ensure no lost event. - te.cc.Close() - te.srv.GracefulStop() - // Check size of events - if len(te.fle.clientEvents) != 0 { - t.Fatalf("expects 0 client events, got %d", len(te.fle.clientEvents)) + invalidConfigJSON, err := json.Marshal(invalidConfig) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) } - if len(te.fle.serverEvents) != 0 { - t.Fatalf("expects 0 server events, got %d", len(te.fle.serverEvents)) + oldObservabilityConfig := envconfig.ObservabilityConfig + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfig = string(invalidConfigJSON) + envconfig.ObservabilityConfigFile = "" + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() + // If there is at least one invalid pattern, which should not be silently tolerated. + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") } } -func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { - config := &config{ - EnableCloudLogging: true, - DestinationProjectID: "fake", - LogFilters: []logFilter{ - { - Pattern: ":-)", - }, - { - Pattern: "*", +// TestRefuseStartWithExcludeAndWildCardAll tests the sceanrio where an +// observability configuration is provided with client RPC event specifying to +// exclude, and which matches on the '*' wildcard (any). This should cause an +// error when trying to start the observability system. +func (s) TestRefuseStartWithExcludeAndWildCardAll(t *testing.T) { + invalidConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + Exclude: true, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, }, }, } - configJSON, err := json.Marshal(config) + invalidConfigJSON, err := json.Marshal(invalidConfig) if err != nil { t.Fatalf("failed to convert config to JSON: %v", err) } - os.Setenv(envObservabilityConfigJSON, "") - os.Setenv(envObservabilityConfig, string(configJSON)) + oldObservabilityConfig := envconfig.ObservabilityConfig + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfig = string(invalidConfigJSON) + envconfig.ObservabilityConfigFile = "" + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() // If there is at least one invalid pattern, which should not be silently tolerated. if err := Start(context.Background()); err == nil { t.Fatalf("Invalid patterns not triggering error") @@ -701,10 +196,11 @@ func createTmpConfigInFileSystem(rawJSON string) (func(), error) { if err != nil { return nil, fmt.Errorf("cannot write marshalled JSON: %v", err) } - os.Setenv(envObservabilityConfigJSON, configJSONFile.Name()) + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfigFile = configJSONFile.Name() return func() { configJSONFile.Close() - os.Setenv(envObservabilityConfigJSON, "") + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile }, nil } @@ -713,8 +209,7 @@ func createTmpConfigInFileSystem(rawJSON string) (func(), error) { // file path pointing to a JSON encoded config. func (s) TestJSONEnvVarSet(t *testing.T) { configJSON := `{ - "destination_project_id": "fake", - "log_filters":[{"pattern":"*","header_bytes":1073741824,"message_bytes":1073741824}] + "project_id": "fake" }` cleanup, err := createTmpConfigInFileSystem(configJSON) defer cleanup() @@ -736,24 +231,37 @@ func (s) TestJSONEnvVarSet(t *testing.T) { // configuration being invalid, even if the direct configuration environment // variable is set and valid. func (s) TestBothConfigEnvVarsSet(t *testing.T) { - configJSON := `{ - "destination_project_id":"fake", - "log_filters":[{"pattern":":-)"}, {"pattern_string":"*"}] - }` - cleanup, err := createTmpConfigInFileSystem(configJSON) + invalidConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{":-)"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + invalidConfigJSON, err := json.Marshal(invalidConfig) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) + } + cleanup, err := createTmpConfigInFileSystem(string(invalidConfigJSON)) defer cleanup() if err != nil { t.Fatalf("failed to create config in file system: %v", err) } // This configuration should be ignored, as precedence 2. validConfig := &config{ - EnableCloudLogging: true, - DestinationProjectID: "fake", - LogFilters: []logFilter{ - { - Pattern: "*", - HeaderBytes: infinitySizeBytes, - MessageBytes: infinitySizeBytes, + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, }, }, } @@ -761,7 +269,11 @@ func (s) TestBothConfigEnvVarsSet(t *testing.T) { if err != nil { t.Fatalf("failed to convert config to JSON: %v", err) } - os.Setenv(envObservabilityConfig, string(validConfigJSON)) + oldObservabilityConfig := envconfig.ObservabilityConfig + envconfig.ObservabilityConfig = string(validConfigJSON) + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + }() if err := Start(context.Background()); err == nil { t.Fatalf("Invalid patterns not triggering error") } @@ -772,16 +284,25 @@ func (s) TestBothConfigEnvVarsSet(t *testing.T) { // location in the file system for configuration, and this location doesn't have // a file (or valid configuration). func (s) TestErrInFileSystemEnvVar(t *testing.T) { - os.Setenv(envObservabilityConfigJSON, "/this-file/does-not-exist") - defer os.Setenv(envObservabilityConfigJSON, "") + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfigFile = "/this-file/does-not-exist" + defer func() { + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() if err := Start(context.Background()); err == nil { t.Fatalf("Invalid file system path not triggering error") } } func (s) TestNoEnvSet(t *testing.T) { - os.Setenv(envObservabilityConfigJSON, "") - os.Setenv(envObservabilityConfig, "") + oldObservabilityConfig := envconfig.ObservabilityConfig + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfig = "" + envconfig.ObservabilityConfigFile = "" + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() // If there is no observability config set at all, the Start should return an error. if err := Start(context.Background()); err == nil { t.Fatalf("Invalid patterns not triggering error") @@ -789,9 +310,8 @@ func (s) TestNoEnvSet(t *testing.T) { } func (s) TestOpenCensusIntegration(t *testing.T) { - te := newTest(t) - defer te.tearDown() - fe := &fakeOpenCensusExporter{SeenViews: make(map[string]string), t: te.t} + defaultMetricsReportingInterval = time.Millisecond * 100 + fe := &fakeOpenCensusExporter{SeenViews: make(map[string]string), t: t} defer func(ne func(config *config) (tracingMetricsExporter, error)) { newExporter = ne @@ -801,28 +321,58 @@ func (s) TestOpenCensusIntegration(t *testing.T) { return fe, nil } - te.enableOpenCensus() - te.startServer(&testServer{}) - tc := testgrpc.NewTestServiceClient(te.clientConn()) + openCensusOnConfig := &config{ + ProjectID: "fake", + CloudMonitoring: &cloudMonitoring{}, + CloudTrace: &cloudTrace{ + SamplingRate: 1.0, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(openCensusOnConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + } + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() for i := 0; i < defaultRequestCount; i++ { - req := &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}} - tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - _, err := tc.UnaryCall(metadata.NewOutgoingContext(tCtx, testHeaderMetadata), req) - if err != nil { - t.Fatalf("unary call failed: %v", err) + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: testOkPayload}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) } } t.Logf("unary call passed count=%v", defaultRequestCount) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } - // Wait for the gRPC transport to gracefully close to ensure no lost event. - te.cc.Close() - te.srv.GracefulStop() + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } var errs []error - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() for ctx.Err() == nil { errs = nil fe.mu.RLock() @@ -855,7 +405,7 @@ func (s) TestCustomTagsTracingMetrics(t *testing.T) { }(newExporter) fe := &fakeOpenCensusExporter{SeenViews: make(map[string]string), t: t} newExporter = func(config *config) (tracingMetricsExporter, error) { - ct := config.CustomTags + ct := config.Labels if len(ct) < 1 { t.Fatalf("less than 2 custom tags sent in") } @@ -871,12 +421,12 @@ func (s) TestCustomTagsTracingMetrics(t *testing.T) { // This configuration present in file system and it's defined custom tags should make it // to the created exporter. configJSON := `{ - "destination_project_id": "fake", - "enable_cloud_trace": true, - "enable_cloud_monitoring": true, - "global_trace_sampling_rate": 1.0, - "custom_tags":{"customtag1":"wow","customtag2":"nice"} + "project_id": "fake", + "cloud_trace": {}, + "cloud_monitoring": {"sampling_rate": 1.0}, + "labels":{"customtag1":"wow","customtag2":"nice"} }` + cleanup, err := createTmpConfigInFileSystem(configJSON) defer cleanup() @@ -888,3 +438,37 @@ func (s) TestCustomTagsTracingMetrics(t *testing.T) { t.Fatalf("Start() failed with err: %v", err) } } + +// TestStartErrorsThenEnd tests that an End call after Start errors works +// without problems, as this is a possible codepath in the public observability +// API. +func (s) TestStartErrorsThenEnd(t *testing.T) { + invalidConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{":-)"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + invalidConfigJSON, err := json.Marshal(invalidConfig) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) + } + oldObservabilityConfig := envconfig.ObservabilityConfig + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfig = string(invalidConfigJSON) + envconfig.ObservabilityConfigFile = "" + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") + } + End() +} diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index 1bca322cc52c..08b853d2adc9 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -37,17 +37,17 @@ var ( defaultMetricsReportingInterval = time.Second * 30 ) -func tagsToMonitoringLabels(tags map[string]string) *stackdriver.Labels { - labels := &stackdriver.Labels{} - for k, v := range tags { - labels.Set(k, v, "") +func labelsToMonitoringLabels(labels map[string]string) *stackdriver.Labels { + sdLabels := &stackdriver.Labels{} + for k, v := range labels { + sdLabels.Set(k, v, "") } - return labels + return sdLabels } -func tagsToTraceAttributes(tags map[string]string) map[string]interface{} { - ta := make(map[string]interface{}, len(tags)) - for k, v := range tags { +func labelsToTraceAttributes(labels map[string]string) map[string]interface{} { + ta := make(map[string]interface{}, len(labels)) + for k, v := range labels { ta[k] = v } return ta @@ -62,7 +62,6 @@ type tracingMetricsExporter interface { var exporter tracingMetricsExporter -// global to stub out in tests var newExporter = newStackdriverExporter func newStackdriverExporter(config *config) (tracingMetricsExporter, error) { @@ -71,10 +70,10 @@ func newStackdriverExporter(config *config) (tracingMetricsExporter, error) { logger.Infof("Detected MonitoredResource:: %+v", mr) var err error exporter, err := stackdriver.NewExporter(stackdriver.Options{ - ProjectID: config.DestinationProjectID, + ProjectID: config.ProjectID, MonitoredResource: mr, - DefaultMonitoringLabels: tagsToMonitoringLabels(config.CustomTags), - DefaultTraceAttributes: tagsToTraceAttributes(config.CustomTags), + DefaultMonitoringLabels: labelsToMonitoringLabels(config.Labels), + DefaultTraceAttributes: labelsToTraceAttributes(config.Labels), }) if err != nil { return nil, fmt.Errorf("failed to create Stackdriver exporter: %v", err) @@ -87,7 +86,7 @@ func newStackdriverExporter(config *config) (tracingMetricsExporter, error) { func startOpenCensus(config *config) error { // If both tracing and metrics are disabled, there's no point inject default // StatsHandler. - if config == nil || (!config.EnableCloudTrace && !config.EnableCloudMonitoring) { + if config == nil || (config.CloudTrace == nil && config.CloudMonitoring == nil) { return nil } @@ -98,17 +97,17 @@ func startOpenCensus(config *config) error { } var so trace.StartOptions - if config.EnableCloudTrace { - so.Sampler = trace.ProbabilitySampler(config.GlobalTraceSamplingRate) + if config.CloudTrace != nil { + so.Sampler = trace.ProbabilitySampler(config.CloudTrace.SamplingRate) trace.RegisterExporter(exporter.(trace.Exporter)) - logger.Infof("Start collecting and exporting trace spans with global_trace_sampling_rate=%.2f", config.GlobalTraceSamplingRate) + logger.Infof("Start collecting and exporting trace spans with global_trace_sampling_rate=%.2f", config.CloudTrace.SamplingRate) } - if config.EnableCloudMonitoring { - if err := view.Register(ocgrpc.DefaultClientViews...); err != nil { + if config.CloudMonitoring != nil { + if err := view.Register(ocgrpc.ClientCompletedRPCsView); err != nil { return fmt.Errorf("failed to register default client views: %v", err) } - if err := view.Register(ocgrpc.DefaultServerViews...); err != nil { + if err := view.Register(ocgrpc.ServerCompletedRPCsView); err != nil { return fmt.Errorf("failed to register default server views: %v", err) } view.SetReportingPeriod(defaultMetricsReportingInterval) @@ -130,7 +129,6 @@ func stopOpenCensus() { if exporter != nil { internal.ClearGlobalDialOptions() internal.ClearGlobalServerOptions() - // Call these unconditionally, doesn't matter if not registered, will be // a noop if not registered. trace.UnregisterExporter(exporter) diff --git a/internal/binarylog/binarylog.go b/internal/binarylog/binarylog.go index e3dfe204f9ae..809d73ccafb0 100644 --- a/internal/binarylog/binarylog.go +++ b/internal/binarylog/binarylog.go @@ -37,7 +37,7 @@ type Logger interface { // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger var grpclogLogger = grpclog.Component("binarylog") @@ -56,11 +56,11 @@ func GetLogger() Logger { return binLogger } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { @@ -117,7 +117,7 @@ func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. +// New MethodLogger with same service overrides the old one. func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) @@ -131,7 +131,7 @@ func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. +// New MethodLogger with same method overrides the old one. func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) @@ -161,11 +161,11 @@ func (l *logger) setBlacklist(method string) error { return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) @@ -174,16 +174,16 @@ func (l *logger) GetMethodLogger(methodName string) MethodLogger { return nil } if ml, ok := l.config.Methods[s+"/"+m]; ok { - return newMethodLogger(ml.Header, ml.Message) + return NewTruncatingMethodLogger(ml.Header, ml.Message) } if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } if ml, ok := l.config.Services[s]; ok { - return newMethodLogger(ml.Header, ml.Message) + return NewTruncatingMethodLogger(ml.Header, ml.Message) } if l.config.All == nil { return nil } - return newMethodLogger(l.config.All.Header, l.config.All.Message) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/internal/binarylog/binarylog_test.go b/internal/binarylog/binarylog_test.go index 617aeb2e83fb..05138f8f309f 100644 --- a/internal/binarylog/binarylog_test.go +++ b/internal/binarylog/binarylog_test.go @@ -93,7 +93,7 @@ func (s) TestGetMethodLogger(t *testing.T) { t.Errorf("in: %q, failed to create logger from config string", tc.in) continue } - ml := l.GetMethodLogger(tc.method).(*methodLogger) + ml := l.GetMethodLogger(tc.method).(*TruncatingMethodLogger) if ml == nil { t.Errorf("in: %q, method logger is nil, want non-nil", tc.in) continue diff --git a/internal/binarylog/env_config.go b/internal/binarylog/env_config.go index ab589a76bf96..c5579e65065f 100644 --- a/internal/binarylog/env_config.go +++ b/internal/binarylog/env_config.go @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. diff --git a/internal/binarylog/method_logger.go b/internal/binarylog/method_logger.go index 24df0a1a0c4e..179f4a26d135 100644 --- a/internal/binarylog/method_logger.go +++ b/internal/binarylog/method_logger.go @@ -52,7 +52,9 @@ type MethodLogger interface { Log(LogEntryConfig) } -type methodLogger struct { +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -61,8 +63,9 @@ type methodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *methodLogger { - return &methodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -75,8 +78,8 @@ func newMethodLogger(h, m uint64) *methodLogger { // Build is an internal only method for building the proto message out of the // input event. It's made public to enable other library to reuse as much logic -// in methodLogger as possible. -func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -95,11 +98,11 @@ func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *methodLogger) Log(c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } -func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -129,7 +132,7 @@ func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/internal/binarylog/method_logger_test.go b/internal/binarylog/method_logger_test.go index 31cc076343ff..ff87b6a2ec4e 100644 --- a/internal/binarylog/method_logger_test.go +++ b/internal/binarylog/method_logger_test.go @@ -34,7 +34,7 @@ import ( func (s) TestLog(t *testing.T) { idGen.reset() - ml := newMethodLogger(10, 10) + ml := NewTruncatingMethodLogger(10, 10) // Set sink to testing buffer. buf := bytes.NewBuffer(nil) ml.sink = newWriterSink(buf) @@ -350,11 +350,11 @@ func (s) TestLog(t *testing.T) { func (s) TestTruncateMetadataNotTruncated(t *testing.T) { testCases := []struct { - ml *methodLogger + ml *TruncatingMethodLogger mpPb *pb.Metadata }{ { - ml: newMethodLogger(maxUInt, maxUInt), + ml: NewTruncatingMethodLogger(maxUInt, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, @@ -362,7 +362,7 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { }, }, { - ml: newMethodLogger(2, maxUInt), + ml: NewTruncatingMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, @@ -370,7 +370,7 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { }, }, { - ml: newMethodLogger(1, maxUInt), + ml: NewTruncatingMethodLogger(1, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: nil}, @@ -378,7 +378,7 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { }, }, { - ml: newMethodLogger(2, maxUInt), + ml: NewTruncatingMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1, 1}}, @@ -386,7 +386,7 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { }, }, { - ml: newMethodLogger(2, maxUInt), + ml: NewTruncatingMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, @@ -397,7 +397,7 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { // "grpc-trace-bin" is kept in log but not counted towards the size // limit. { - ml: newMethodLogger(1, maxUInt), + ml: NewTruncatingMethodLogger(1, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, @@ -417,13 +417,13 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { func (s) TestTruncateMetadataTruncated(t *testing.T) { testCases := []struct { - ml *methodLogger + ml *TruncatingMethodLogger mpPb *pb.Metadata entryLen int }{ { - ml: newMethodLogger(2, maxUInt), + ml: NewTruncatingMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1, 1, 1}}, @@ -432,7 +432,7 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { entryLen: 0, }, { - ml: newMethodLogger(2, maxUInt), + ml: NewTruncatingMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, @@ -443,7 +443,7 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { entryLen: 2, }, { - ml: newMethodLogger(2, maxUInt), + ml: NewTruncatingMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1, 1}}, @@ -453,7 +453,7 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { entryLen: 1, }, { - ml: newMethodLogger(2, maxUInt), + ml: NewTruncatingMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, @@ -478,23 +478,23 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { func (s) TestTruncateMessageNotTruncated(t *testing.T) { testCases := []struct { - ml *methodLogger + ml *TruncatingMethodLogger msgPb *pb.Message }{ { - ml: newMethodLogger(maxUInt, maxUInt), + ml: NewTruncatingMethodLogger(maxUInt, maxUInt), msgPb: &pb.Message{ Data: []byte{1}, }, }, { - ml: newMethodLogger(maxUInt, 3), + ml: NewTruncatingMethodLogger(maxUInt, 3), msgPb: &pb.Message{ Data: []byte{1, 1}, }, }, { - ml: newMethodLogger(maxUInt, 2), + ml: NewTruncatingMethodLogger(maxUInt, 2), msgPb: &pb.Message{ Data: []byte{1, 1}, }, @@ -511,13 +511,13 @@ func (s) TestTruncateMessageNotTruncated(t *testing.T) { func (s) TestTruncateMessageTruncated(t *testing.T) { testCases := []struct { - ml *methodLogger + ml *TruncatingMethodLogger msgPb *pb.Message oldLength uint32 }{ { - ml: newMethodLogger(maxUInt, 2), + ml: NewTruncatingMethodLogger(maxUInt, 2), msgPb: &pb.Message{ Length: 3, Data: []byte{1, 1, 1}, diff --git a/internal/envconfig/observability.go b/internal/envconfig/observability.go new file mode 100644 index 000000000000..821dd0a7c198 --- /dev/null +++ b/internal/envconfig/observability.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) From 9eba57430c09de0fc5fad54b90d3e4467661bf2c Mon Sep 17 00:00:00 2001 From: apolcyn Date: Wed, 12 Oct 2022 12:57:55 -0700 Subject: [PATCH 631/998] xds: de-experimentalize google c2p resolver (#5707) --- xds/googledirectpath/googlec2p.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index d759d25c8519..ef86f7b56c5f 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -47,7 +47,8 @@ import ( ) const ( - c2pScheme = "google-c2p-experimental" + c2pScheme = "google-c2p" + c2pExperimentalScheme = "google-c2p-experimental" tdURL = "dns:///directpath-pa.googleapis.com" httpReqTimeout = 10 * time.Second @@ -75,10 +76,18 @@ var ( ) func init() { - resolver.Register(c2pResolverBuilder{}) + resolver.Register(c2pResolverBuilder{ + scheme: c2pScheme, + }) + // TODO(apolcyn): remove this experimental scheme before the 1.52 release + resolver.Register(c2pResolverBuilder{ + scheme: c2pExperimentalScheme, + }) } -type c2pResolverBuilder struct{} +type c2pResolverBuilder struct { + scheme string +} func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { if !runDirectPath() { @@ -131,8 +140,8 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts }, nil } -func (c2pResolverBuilder) Scheme() string { - return c2pScheme +func (b c2pResolverBuilder) Scheme() string { + return b.scheme } type c2pResolver struct { From e163a9085f6c004e3083e3f31a562f875e0cf9ae Mon Sep 17 00:00:00 2001 From: Ernest Nguyen <58267404+erni27@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:15:09 +0200 Subject: [PATCH 632/998] xds/xdsclient: add EDS resource endpoint address duplication check (#5715) --- xds/internal/xdsclient/xdsresource/unmarshal_eds.go | 12 +++++++++--- .../xdsclient/xdsresource/unmarshal_eds_test.go | 11 +++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index 15b0d88f9f1f..e091d0ddea0f 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -91,7 +91,7 @@ func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropO } } -func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) ([]Endpoint, error) { +func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs map[string]bool) ([]Endpoint, error) { endpoints := make([]Endpoint, 0, len(lbEndpoints)) for _, lbEndpoint := range lbEndpoints { // If the load_balancing_weight field is specified, it must be set to a @@ -104,9 +104,14 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) ([]Endpoint, error) } weight = w.GetValue() } + addr := parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()) + if uniqueEndpointAddrs[addr] { + return nil, fmt.Errorf("duplicate endpoint with the same address %s", addr) + } + uniqueEndpointAddrs[addr] = true endpoints = append(endpoints, Endpoint{ HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), - Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), + Address: addr, Weight: weight, }) } @@ -120,6 +125,7 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.Pr } priorities := make(map[uint32]map[string]bool) sumOfWeights := make(map[uint32]uint64) + uniqueEndpointAddrs := make(map[string]bool) for _, locality := range m.Endpoints { l := locality.GetLocality() if l == nil { @@ -150,7 +156,7 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.Pr return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) } localitiesWithPriority[lidStr] = true - endpoints, err := parseEndpoints(locality.GetLbEndpoints()) + endpoints, err := parseEndpoints(locality.GetLbEndpoints(), uniqueEndpointAddrs) if err != nil { return EndpointsUpdate{}, err } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index 02718e09ddaf..f89333c76e76 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -111,6 +111,17 @@ func (s) TestEDSParseRespProto(t *testing.T) { want: EndpointsUpdate{}, wantErr: true, }, + { + name: "duplicate endpoint address", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 1, []string{"addr:997"}, nil) + clab0.addLocality("locality-2", 1, 0, []string{"addr:997"}, nil) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, { name: "good", m: func() *v3endpointpb.ClusterLoadAssignment { From 00d1830c1958df6377b089bdd8a6b7f95dafd7d7 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 13 Oct 2022 13:24:11 -0400 Subject: [PATCH 633/998] Fix o11y typo (#5719) --- gcp/observability/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gcp/observability/config.go b/gcp/observability/config.go index 1920f086479f..0b6067d8e770 100644 --- a/gcp/observability/config.go +++ b/gcp/observability/config.go @@ -162,7 +162,7 @@ type clientRPCEvents struct { // here “goo” is the package name. // 2."goo.Foo/*" selects all methods from service "goo.Foo" // 3. "*" selects all methods from all services. - Methods []string `json:"method,omitempty"` + Methods []string `json:"methods,omitempty"` // Exclude represents whether the methods denoted by Methods should be // excluded from logging. The default value is false, meaning the methods // denoted by Methods are included in the logging. If Exclude is true, the @@ -200,7 +200,7 @@ type serverRPCEvents struct { // here “goo” is the package name. // 2."goo.Foo/*" selects all methods from service "goo.Foo" // 3. "*" selects all methods from all services. - Methods []string `json:"method,omitempty"` + Methods []string `json:"methods,omitempty"` // Exclude represents whether the methods denoted by Methods should be // excluded from logging. The default value is false, meaning the methods // denoted by Methods are included in the logging. If Exclude is true, the From f52b910b10bdbc81c7563440c50b70d476432dba Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 14 Oct 2022 14:48:39 -0400 Subject: [PATCH 634/998] o11y: Fixed o11y bug (#5720) --- server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.go b/server.go index e8143492c25e..f4dde72b41f8 100644 --- a/server.go +++ b/server.go @@ -74,7 +74,7 @@ func init() { srv.drainServerTransports(addr) } internal.AddGlobalServerOptions = func(opt ...ServerOption) { - extraServerOptions = opt + extraServerOptions = append(extraServerOptions, opt...) } internal.ClearGlobalServerOptions = func() { extraServerOptions = nil From 912765f7496aca3c30b7a09ee5c7b5deab741adb Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 17 Oct 2022 09:34:01 -0700 Subject: [PATCH 635/998] xds: move bootstrap config generating utility package to testutils (#5713) --- admin/test/utils.go | 13 +++++----- .../xds/bootstrap}/bootstrap.go | 26 +++++++++---------- .../xds/e2e/setup_management_server.go | 6 ++--- test/xds/xds_client_federation_test.go | 15 ++++++----- xds/csds/csds_test.go | 6 ++--- xds/internal/httpfilter/fault/fault_test.go | 6 ++--- xds/internal/test/e2e/controlplane.go | 6 ++--- 7 files changed, 39 insertions(+), 39 deletions(-) rename internal/{xds => testutils/xds/bootstrap}/bootstrap.go (87%) diff --git a/admin/test/utils.go b/admin/test/utils.go index 6540797ff2ba..e85ab9240e1a 100644 --- a/admin/test/utils.go +++ b/admin/test/utils.go @@ -26,16 +26,17 @@ import ( "testing" "time" - v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" - v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/admin" - channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/status" + + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" ) const ( @@ -53,8 +54,8 @@ type ExpectedStatusCodes struct { // codes. func RunRegisterTests(t *testing.T, ec ExpectedStatusCodes) { nodeID := uuid.New().String() - bootstrapCleanup, err := xds.SetupBootstrapFile(xds.BootstrapOptions{ - Version: xds.TransportV3, + bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ + Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: "no.need.for.a.server", }) diff --git a/internal/xds/bootstrap.go b/internal/testutils/xds/bootstrap/bootstrap.go similarity index 87% rename from internal/xds/bootstrap.go rename to internal/testutils/xds/bootstrap/bootstrap.go index 4f583d1f4fd3..ba7953bd05e2 100644 --- a/internal/xds/bootstrap.go +++ b/internal/testutils/xds/bootstrap/bootstrap.go @@ -16,9 +16,8 @@ * */ -// Package xds contains types that need to be shared between code under -// google.golang.org/grpc/xds/... and the rest of gRPC. -package xds +// Package bootstrap provides functionality to generate bootstrap configuration. +package bootstrap import ( "encoding/json" @@ -42,8 +41,8 @@ const ( TransportV3 ) -// BootstrapOptions wraps the parameters passed to SetupBootstrapFile. -type BootstrapOptions struct { +// Options wraps the parameters used to generate bootstrap configuration. +type Options struct { // Version is the xDS transport protocol version. Version TransportAPI // NodeID is the node identifier of the gRPC client/server node in the @@ -70,15 +69,15 @@ type BootstrapOptions struct { Authorities map[string]string } -// SetupBootstrapFile creates a temporary file with bootstrap contents, based on -// the passed in options, and updates the bootstrap environment variable to -// point to this file. +// CreateFile creates a temporary file with bootstrap contents, based on the +// passed in options, and updates the bootstrap environment variable to point to +// this file. // // Returns a cleanup function which will be non-nil if the setup process was // completed successfully. It is the responsibility of the caller to invoke the // cleanup function at the end of the test. -func SetupBootstrapFile(opts BootstrapOptions) (func(), error) { - bootstrapContents, err := BootstrapContents(opts) +func CreateFile(opts Options) (func(), error) { + bootstrapContents, err := Contents(opts) if err != nil { return nil, err } @@ -100,10 +99,9 @@ func SetupBootstrapFile(opts BootstrapOptions) (func(), error) { }, nil } -// BootstrapContents returns the contents to go into a bootstrap file, -// environment, or configuration passed to -// xds.NewXDSResolverWithConfigForTesting. -func BootstrapContents(opts BootstrapOptions) ([]byte, error) { +// Contents returns the contents to go into a bootstrap file, environment, or +// configuration passed to xds.NewXDSResolverWithConfigForTesting. +func Contents(opts Options) ([]byte, error) { cfg := &bootstrapConfig{ XdsServers: []server{ { diff --git a/internal/testutils/xds/e2e/setup_management_server.go b/internal/testutils/xds/e2e/setup_management_server.go index b5efa2bd175c..9ab76edc99a4 100644 --- a/internal/testutils/xds/e2e/setup_management_server.go +++ b/internal/testutils/xds/e2e/setup_management_server.go @@ -25,7 +25,7 @@ import ( "github.com/google/uuid" "google.golang.org/grpc/internal" - xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/resolver" ) @@ -78,8 +78,8 @@ func SetupManagementServer(t *testing.T, opts *ManagementServerOptions) (*Manage // Create a bootstrap file in a temporary directory. nodeID := uuid.New().String() - bootstrapContents, err := xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ - Version: xdsinternal.TransportV3, + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: server.Address, CertificateProviders: cpc, diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go index 29cf350337d7..1ae54b682389 100644 --- a/test/xds/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -23,18 +23,19 @@ import ( "fmt" "testing" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/internal/testutils/xds/e2e" - xdsinternal "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/resolver" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -69,8 +70,8 @@ func (s) TestClientSideFederation(t *testing.T) { // Create a bootstrap file in a temporary directory. nodeID := uuid.New().String() - bootstrapContents, err := xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ - Version: xdsinternal.TransportV3, + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: serverDefaultAuth.Address, ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index c82eb601b661..42ba3da6b40c 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/internal/testutils/xds/e2e" - "google.golang.org/grpc/internal/xds" _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -242,8 +242,8 @@ func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.M } // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := xds.SetupBootstrapFile(xds.BootstrapOptions{ - Version: xds.TransportV3, + bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ + Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: fs.Address, }) diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 904585fc4a6e..a17c6816f711 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -39,8 +39,8 @@ import ( "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/internal/testutils/xds/e2e" - "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/wrapperspb" @@ -106,8 +106,8 @@ func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { } // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := xds.SetupBootstrapFile(xds.BootstrapOptions{ - Version: xds.TransportV3, + bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ + Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: fs.Address, ServerListenerResourceNameTemplate: "grpc/server", diff --git a/xds/internal/test/e2e/controlplane.go b/xds/internal/test/e2e/controlplane.go index 0ad6fa201fb7..97bb38321fc4 100644 --- a/xds/internal/test/e2e/controlplane.go +++ b/xds/internal/test/e2e/controlplane.go @@ -21,8 +21,8 @@ import ( "fmt" "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/internal/testutils/xds/e2e" - xdsinternal "google.golang.org/grpc/internal/xds" ) type controlPlane struct { @@ -39,8 +39,8 @@ func newControlPlane() (*controlPlane, error) { } nodeID := uuid.New().String() - bootstrapContentBytes, err := xdsinternal.BootstrapContents(xdsinternal.BootstrapOptions{ - Version: xdsinternal.TransportV3, + bootstrapContentBytes, err := bootstrap.Contents(bootstrap.Options{ + Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: server.Address, ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, From bb3d739418ccd1153c7067c50ed274d4a0f3054f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 17 Oct 2022 09:38:52 -0700 Subject: [PATCH 636/998] fakeserver: add v3 support to the xDS fakeserver implementation (#5698) --- xds/internal/testutils/fakeserver/server.go | 169 +++++++++++++++++--- 1 file changed, 146 insertions(+), 23 deletions(-) diff --git a/xds/internal/testutils/fakeserver/server.go b/xds/internal/testutils/fakeserver/server.go index 94412171003e..1d7e6b482228 100644 --- a/xds/internal/testutils/fakeserver/server.go +++ b/xds/internal/testutils/fakeserver/server.go @@ -17,6 +17,10 @@ */ // Package fakeserver provides a fake implementation of the management server. +// +// This package is recommended only for scenarios which cannot be tested using +// the xDS management server (which uses envoy-go-control-plane) provided by the +// `internal/testutils/xds/e2e` package. package fakeserver import ( @@ -31,10 +35,14 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/status" - discoverypb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" - lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" - lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" + v2discoverypb "github.com/envoyproxy/go-control-plane/envoy/api/v2" + v2discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" + v3discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v2lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" + v2lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" ) const ( @@ -61,6 +69,10 @@ type Response struct { // Server is a fake implementation of xDS and LRS protocols. It listens on the // same port for both services and exposes a bunch of channels to send/receive // messages. +// +// This server is recommended only for scenarios which cannot be tested using +// the xDS management server (which uses envoy-go-control-plane) provided by the +// `internal/testutils/xds/e2e` package. type Server struct { // XDSRequestChan is a channel on which received xDS requests are made // available to the users of this Server. @@ -74,6 +86,12 @@ type Server struct { // LRSResponseChan is a channel on which the Server accepts the LRS // response to be sent to the client. LRSResponseChan chan *Response + // LRSStreamOpenChan is a channel on which the Server sends notifications + // when a new LRS stream is created. + LRSStreamOpenChan *testutils.Channel + // LRSStreamCloseChan is a channel on which the Server sends notifications + // when an existing LRS stream is closed. + LRSStreamCloseChan *testutils.Channel // NewConnChan is a channel on which the fake server notifies receipt of new // connection attempts. Tests can gate on this event before proceeding to // other actions which depend on a connection to the fake server being up. @@ -82,8 +100,10 @@ type Server struct { Address string // The underlying fake implementation of xDS and LRS. - xdsS *xdsServer - lrsS *lrsServer + xdsV2 *xdsServerV2 + xdsV3 *xdsServerV3 + lrsV2 *lrsServerV2 + lrsV3 *lrsServerV3 } type wrappedListener struct { @@ -110,34 +130,40 @@ func StartServer() (*Server, func(), error) { } s := &Server{ - XDSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - LRSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - NewConnChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - XDSResponseChan: make(chan *Response, defaultChannelBufferSize), - LRSResponseChan: make(chan *Response, 1), // The server only ever sends one response. - Address: lis.Addr().String(), + XDSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + LRSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + NewConnChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + XDSResponseChan: make(chan *Response, defaultChannelBufferSize), + LRSResponseChan: make(chan *Response, 1), // The server only ever sends one response. + LRSStreamOpenChan: testutils.NewChannel(), + LRSStreamCloseChan: testutils.NewChannel(), + Address: lis.Addr().String(), } - s.xdsS = &xdsServer{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} - s.lrsS = &lrsServer{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan} + s.xdsV2 = &xdsServerV2{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} + s.xdsV3 = &xdsServerV3{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} + s.lrsV2 = &lrsServerV2{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan, streamOpenChan: s.LRSStreamOpenChan, streamCloseChan: s.LRSStreamCloseChan} + s.lrsV3 = &lrsServerV3{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan, streamOpenChan: s.LRSStreamOpenChan, streamCloseChan: s.LRSStreamCloseChan} wp := &wrappedListener{ Listener: lis, server: s, } server := grpc.NewServer() - lrsgrpc.RegisterLoadReportingServiceServer(server, s.lrsS) - adsgrpc.RegisterAggregatedDiscoveryServiceServer(server, s.xdsS) + v2lrsgrpc.RegisterLoadReportingServiceServer(server, s.lrsV2) + v2discoverygrpc.RegisterAggregatedDiscoveryServiceServer(server, s.xdsV2) + v3lrsgrpc.RegisterLoadReportingServiceServer(server, s.lrsV3) + v3discoverygrpc.RegisterAggregatedDiscoveryServiceServer(server, s.xdsV3) go server.Serve(wp) return s, func() { server.Stop() }, nil } -type xdsServer struct { +type xdsServerV2 struct { reqChan *testutils.Channel respChan chan *Response } -func (xdsS *xdsServer) StreamAggregatedResources(s adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { +func (xdsS *xdsServerV2) StreamAggregatedResources(s v2discoverygrpc.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { errCh := make(chan error, 2) go func() { for { @@ -162,7 +188,7 @@ func (xdsS *xdsServer) StreamAggregatedResources(s adsgrpc.AggregatedDiscoverySe retErr = r.Err return } - if err := s.Send(r.Resp.(*discoverypb.DiscoveryResponse)); err != nil { + if err := s.Send(r.Resp.(*v2discoverypb.DiscoveryResponse)); err != nil { retErr = err return } @@ -179,16 +205,113 @@ func (xdsS *xdsServer) StreamAggregatedResources(s adsgrpc.AggregatedDiscoverySe return nil } -func (xdsS *xdsServer) DeltaAggregatedResources(adsgrpc.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { +func (xdsS *xdsServerV2) DeltaAggregatedResources(v2discoverygrpc.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { return status.Error(codes.Unimplemented, "") } -type lrsServer struct { +type xdsServerV3 struct { reqChan *testutils.Channel respChan chan *Response } -func (lrsS *lrsServer) StreamLoadStats(s lrsgrpc.LoadReportingService_StreamLoadStatsServer) error { +func (xdsS *xdsServerV3) StreamAggregatedResources(s v3discoverygrpc.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { + errCh := make(chan error, 2) + go func() { + for { + req, err := s.Recv() + if err != nil { + errCh <- err + return + } + xdsS.reqChan.Send(&Request{req, err}) + } + }() + go func() { + var retErr error + defer func() { + errCh <- retErr + }() + + for { + select { + case r := <-xdsS.respChan: + if r.Err != nil { + retErr = r.Err + return + } + if err := s.Send(r.Resp.(*v3discoverypb.DiscoveryResponse)); err != nil { + retErr = err + return + } + case <-s.Context().Done(): + retErr = s.Context().Err() + return + } + } + }() + + if err := <-errCh; err != nil { + return err + } + return nil +} + +func (xdsS *xdsServerV3) DeltaAggregatedResources(v3discoverygrpc.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { + return status.Error(codes.Unimplemented, "") +} + +type lrsServerV2 struct { + reqChan *testutils.Channel + respChan chan *Response + streamOpenChan *testutils.Channel + streamCloseChan *testutils.Channel +} + +func (lrsS *lrsServerV2) StreamLoadStats(s v2lrsgrpc.LoadReportingService_StreamLoadStatsServer) error { + lrsS.streamOpenChan.Send(nil) + defer lrsS.streamCloseChan.Send(nil) + + req, err := s.Recv() + lrsS.reqChan.Send(&Request{req, err}) + if err != nil { + return err + } + + select { + case r := <-lrsS.respChan: + if r.Err != nil { + return r.Err + } + if err := s.Send(r.Resp.(*v2lrspb.LoadStatsResponse)); err != nil { + return err + } + case <-s.Context().Done(): + return s.Context().Err() + } + + for { + req, err := s.Recv() + lrsS.reqChan.Send(&Request{req, err}) + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} + +type lrsServerV3 struct { + reqChan *testutils.Channel + respChan chan *Response + streamOpenChan *testutils.Channel + streamCloseChan *testutils.Channel +} + +func (lrsS *lrsServerV3) StreamLoadStats(s v3lrsgrpc.LoadReportingService_StreamLoadStatsServer) error { + lrsS.streamOpenChan.Send(nil) + defer lrsS.streamCloseChan.Send(nil) + req, err := s.Recv() lrsS.reqChan.Send(&Request{req, err}) if err != nil { @@ -200,7 +323,7 @@ func (lrsS *lrsServer) StreamLoadStats(s lrsgrpc.LoadReportingService_StreamLoad if r.Err != nil { return r.Err } - if err := s.Send(r.Resp.(*lrspb.LoadStatsResponse)); err != nil { + if err := s.Send(r.Resp.(*v3lrspb.LoadStatsResponse)); err != nil { return err } case <-s.Context().Done(): From eb8aa3192b883c42639a1f90c330d61ba1414785 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Mon, 17 Oct 2022 14:38:11 -0700 Subject: [PATCH 637/998] weightedtarget: return a more meaningful error when no child policy is reporting READY (#5391) --- .../weightedaggregator/aggregator.go | 15 +++--- .../weightedtarget/weightedtarget_test.go | 47 +++++++++++++------ .../balancer/clusterresolver/priority_test.go | 23 ++++++--- 3 files changed, 59 insertions(+), 26 deletions(-) diff --git a/balancer/weightedtarget/weightedaggregator/aggregator.go b/balancer/weightedtarget/weightedaggregator/aggregator.go index 38bd9b223f80..dffc539b85d5 100644 --- a/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -240,20 +240,23 @@ func (wbsa *Aggregator) BuildAndUpdate() { // Caller must hold wbsa.mu. func (wbsa *Aggregator) build() balancer.State { wbsa.logger.Infof("Child pickers with config: %+v", wbsa.idToPickerState) - m := wbsa.idToPickerState // TODO: use balancer.ConnectivityStateEvaluator to calculate the aggregated // state. var readyN, connectingN, idleN int - readyPickerWithWeights := make([]weightedPickerState, 0, len(m)) - for _, ps := range m { + pickerN := len(wbsa.idToPickerState) + readyPickers := make([]weightedPickerState, 0, pickerN) + errorPickers := make([]weightedPickerState, 0, pickerN) + for _, ps := range wbsa.idToPickerState { switch ps.stateToAggregate { case connectivity.Ready: readyN++ - readyPickerWithWeights = append(readyPickerWithWeights, *ps) + readyPickers = append(readyPickers, *ps) case connectivity.Connecting: connectingN++ case connectivity.Idle: idleN++ + case connectivity.TransientFailure: + errorPickers = append(errorPickers, *ps) } } var aggregatedState connectivity.State @@ -272,11 +275,11 @@ func (wbsa *Aggregator) build() balancer.State { var picker balancer.Picker switch aggregatedState { case connectivity.TransientFailure: - picker = base.NewErrPicker(balancer.ErrTransientFailure) + picker = newWeightedPickerGroup(errorPickers, wbsa.newWRR) case connectivity.Connecting: picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) default: - picker = newWeightedPickerGroup(readyPickerWithWeights, wbsa.newWRR) + picker = newWeightedPickerGroup(readyPickers, wbsa.newWRR) } return balancer.State{ConnectivityState: aggregatedState, Picker: picker} } diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go index cc9235264224..ea76ea1297cb 100644 --- a/balancer/weightedtarget/weightedtarget_test.go +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -20,7 +20,9 @@ package weightedtarget import ( "encoding/json" + "errors" "fmt" + "strings" "testing" "time" @@ -569,7 +571,11 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { } // Turn sc1's connection down. - wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + scConnErr := errors.New("subConn connection error") + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: scConnErr, + }) p = <-cc.NewPickerCh want = []balancer.SubConn{sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { @@ -586,11 +592,14 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { } // Turn all connections down. - wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: scConnErr, + }) p = <-cc.NewPickerCh for i := 0; i < 5; i++ { - if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) + if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), scConnErr.Error()) { + t.Fatalf("want pick error %q, got error %q", scConnErr, err) } } } @@ -793,7 +802,11 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { } // Move balancer 3 into transient failure. - wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + scConnErr := errors.New("subConn connection error") + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: scConnErr, + }) <-cc.NewPickerCh // Remove the first balancer, while the third is transient failure. @@ -827,8 +840,8 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) } for i := 0; i < 5; i++ { - if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) + if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), scConnErr.Error()) { + t.Fatalf("want pick error %q, got error %q", scConnErr, err) } } } @@ -1064,15 +1077,21 @@ func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *tes // Set both subconn to TransientFailure, this will put both sub-balancers in // transient failure. - wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + scConnErr := errors.New("subConn connection error") + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: scConnErr, + }) <-cc.NewPickerCh - wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: scConnErr, + }) p := <-cc.NewPickerCh for i := 0; i < 5; i++ { - r, err := p.Pick(balancer.PickInfo{}) - if err != balancer.ErrTransientFailure { - t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err) + if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), scConnErr.Error()) { + t.Fatalf("want pick error %q, got error %q", scConnErr, err) } } @@ -1086,8 +1105,8 @@ func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *tes for i := 0; i < 5; i++ { r, err := p.Pick(balancer.PickInfo{}) - if err != balancer.ErrTransientFailure { - t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err) + if err == nil || !strings.Contains(err.Error(), scConnErr.Error()) { + t.Fatalf("want pick error %q, got result %v, err %q", scConnErr, r, err) } } } diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index b08b82089898..0ba5e1e80946 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -19,6 +19,7 @@ package clusterresolver import ( "context" + "errors" "fmt" "testing" "time" @@ -247,7 +248,11 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { } // Turn down 1, use 2 - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + scConnErr := errors.New("subConn connection error") + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: scConnErr, + }) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -274,7 +279,8 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { } // Should get an update with 1's old picker, to override 2's old picker. - if err := testErrPickerFromCh(cc.NewPickerCh, balancer.ErrTransientFailure); err != nil { + want := errors.New("last connection error: subConn connection error") + if err := testErrPickerFromCh(cc.NewPickerCh, want); err != nil { t.Fatal(err) } @@ -305,10 +311,15 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { } sc1 := <-cc.NewSubConnCh // Turn down 1, pick should error. - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + scConnErr := errors.New("subConn connection error") + edsb.UpdateSubConnState(sc1, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: scConnErr, + }) // Test pick failure. - if err := testErrPickerFromCh(cc.NewPickerCh, balancer.ErrTransientFailure); err != nil { + want := errors.New("last connection error: subConn connection error") + if err := testErrPickerFromCh(cc.NewPickerCh, want); err != nil { t.Fatal(err) } @@ -463,8 +474,8 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) { // Add localities to existing priorities. // -// - start with 2 locality with p0 and p1 -// - add localities to existing p0 and p1 +// - start with 2 locality with p0 and p1 +// - add localities to existing p0 and p1 func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) defer cleanup() From 778860e606e35d096773133e2995fe9ecc470ba0 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 17 Oct 2022 15:04:34 -0700 Subject: [PATCH 638/998] testing: update Go to 1.19 (#5717) --- .github/workflows/testing.yml | 18 ++--- admin/admin.go | 2 +- attributes/attributes.go | 2 +- authz/rbac_translator.go | 2 +- backoff.go | 2 +- balancer/base/balancer.go | 4 +- balancer/conn_state_evaluator.go | 8 +-- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 + balancer/grpclb/grpclb.go | 24 ++++--- balancer/rls/balancer.go | 23 +++---- balancer/rls/config.go | 57 ++++++++-------- balancer/rls/helpers_test.go | 12 ++-- balancer/rls/internal/adaptive/adaptive.go | 30 ++++---- .../weightedroundrobin/weightedroundrobin.go | 4 +- benchmark/benchmain/main.go | 17 ++--- benchmark/benchresult/main.go | 8 ++- benchmark/server/main.go | 1 + binarylog/grpc_binarylog_v1/binarylog.pb.go | 13 ++-- channelz/channelz.go | 2 +- channelz/grpc_channelz_v1/channelz.pb.go | 4 ++ clientconn.go | 16 ++--- cmd/protoc-gen-go-grpc/main.go | 3 + .../internal/proto/grpc_gcp/handshaker.pb.go | 2 + credentials/google/xds.go | 1 + credentials/local/local.go | 2 +- credentials/sts/sts.go | 14 ++-- credentials/tls.go | 2 +- credentials/tls/certprovider/distributor.go | 10 +-- .../tls/certprovider/pemfile/watcher.go | 2 +- credentials/tls/certprovider/provider.go | 2 +- encoding/encoding.go | 2 +- encoding/gzip/gzip.go | 2 +- gcp/observability/observability.go | 2 +- grpclog/loggerv2.go | 2 +- grpclog/loggerv2_test.go | 3 +- internal/balancergroup/balancergroup.go | 24 +++---- internal/balancergroup/balancergroup_test.go | 5 +- internal/binarylog/env_config.go | 18 ++--- internal/channelz/types.go | 16 ++--- internal/grpclog/grpclog.go | 2 +- internal/grpctest/grpctest.go | 6 +- internal/grpcutil/method.go | 1 - internal/hierarchy/hierarchy.go | 31 +++++---- internal/profiling/goid_modified.go | 68 +++++++++---------- .../proto/grpc_lookup_v1/rls_config.pb.go | 11 +-- internal/serviceconfig/serviceconfig.go | 8 +-- internal/testutils/balancer.go | 8 +-- internal/transport/handler_server.go | 8 +-- internal/transport/http2_client.go | 14 ++-- internal/transport/transport_test.go | 2 +- interop/grpc_testing/control.pb.go | 4 ++ interop/grpc_testing/core/stats.pb.go | 1 + interop/grpc_testing/empty.pb.go | 7 +- interop/grpc_testing/payloads.pb.go | 1 + interop/xds/client/client.go | 3 +- metadata/metadata.go | 20 +++--- orca/orca.go | 2 +- preloader.go | 2 +- profiling/profiling.go | 2 +- profiling/service/service.go | 2 +- .../grpc_reflection_v1alpha/reflection.pb.go | 2 + .../grpc_testing_not_regenerate/testv3.pb.go | 2 + reflection/serverreflection.go | 10 +-- resolver/resolver.go | 14 ++-- rpc_util.go | 39 +++++------ security/advancedtls/advancedtls.go | 6 +- security/advancedtls/crl.go | 16 ++--- security/authorization/engine/engine.go | 17 +++-- serviceconfig/serviceconfig.go | 2 +- status/status.go | 12 ++-- stress/grpc_testing/metrics.pb.go | 1 + tap/tap.go | 2 +- test/xds/xds_server_integration_test.go | 30 ++++---- xds/bootstrap/bootstrap.go | 2 +- .../balancer/clusterresolver/configbuilder.go | 60 ++++++++-------- .../clusterresolver/e2e_test/eds_impl_test.go | 30 ++++---- .../balancer/priority/balancer_priority.go | 58 ++++++++++------ xds/internal/balancer/ringhash/ringhash.go | 15 ++-- xds/internal/httpfilter/fault/fault_test.go | 10 +-- xds/internal/server/conn_wrapper.go | 14 ++-- xds/internal/test/e2e/e2e_test.go | 1 + xds/internal/xdsclient/authority_test.go | 12 ++-- xds/internal/xdsclient/client_new.go | 5 +- .../xdsclient/controller/version/v2/client.go | 8 +-- .../xdsclient/controller/version/v3/client.go | 8 +-- .../xdsclient/e2e_test/lds_watchers_test.go | 42 ++++++------ .../xdsclient/watchers_cluster_test.go | 1 + .../xdsclient/watchers_federation_test.go | 32 ++++----- xds/internal/xdsclient/watchers_test.go | 1 + .../xdsclient/xdsresource/filter_chain.go | 10 +-- xds/internal/xdsclient/xdsresource/matcher.go | 25 ++++--- xds/server_options.go | 4 +- xds/server_test.go | 12 ++-- xds/xds.go | 4 +- 94 files changed, 570 insertions(+), 503 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 6d9571707bf8..0ebfcce44281 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -24,7 +24,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 - name: Checkout repo uses: actions/checkout@v2 @@ -44,31 +44,31 @@ jobs: matrix: include: - type: vet+tests - goversion: 1.18 + goversion: 1.19 - type: tests - goversion: 1.18 + goversion: 1.19 testflags: -race - type: tests - goversion: 1.18 + goversion: 1.19 goarch: 386 - type: tests - goversion: 1.18 + goversion: 1.19 goarch: arm64 - type: tests - goversion: 1.17 + goversion: 1.18 - type: tests - goversion: 1.16 + goversion: 1.17 - type: tests - goversion: 1.15 + goversion: 1.16 - type: extras - goversion: 1.18 + goversion: 1.19 steps: # Setup the environment. diff --git a/admin/admin.go b/admin/admin.go index 803a4b935340..41ae156b4d15 100644 --- a/admin/admin.go +++ b/admin/admin.go @@ -23,7 +23,7 @@ // // - CSDS: https://github.com/grpc/proposal/blob/master/A40-csds-support.md // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/attributes/attributes.go b/attributes/attributes.go index ae13ddac14e0..02f5dc531891 100644 --- a/attributes/attributes.go +++ b/attributes/attributes.go @@ -19,7 +19,7 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index 75bbdb44d497..b01fc2fcdb1d 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -16,7 +16,7 @@ // Package authz exposes methods to manage authorization within gRPC. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed // in a later release. diff --git a/backoff.go b/backoff.go index 542594f5cc51..29475e31c979 100644 --- a/backoff.go +++ b/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/balancer/base/balancer.go b/balancer/base/balancer.go index e8dfc828aaac..3929c26d31e1 100644 --- a/balancer/base/balancer.go +++ b/balancer/base/balancer.go @@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) diff --git a/balancer/conn_state_evaluator.go b/balancer/conn_state_evaluator.go index a87b6809af38..33ea9b5821dd 100644 --- a/balancer/conn_state_evaluator.go +++ b/balancer/conn_state_evaluator.go @@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct { // RecordTransition records state change happening in subConn and based on that // it evaluates what aggregated state should be. // -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. // // Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index c393d7ffd3b2..bf4c3cb4449e 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -52,6 +52,7 @@ type LoadBalanceRequest struct { unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceRequestType: + // // *LoadBalanceRequest_InitialRequest // *LoadBalanceRequest_ClientStats LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` @@ -340,6 +341,7 @@ type LoadBalanceResponse struct { unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceResponseType: + // // *LoadBalanceResponse_InitialResponse // *LoadBalanceResponse_ServerList // *LoadBalanceResponse_FallbackResponse diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index 6c3402e36c60..dd15810d0aeb 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -19,7 +19,8 @@ // Package grpclb defines a grpclb balancer. // // To install grpclb balancer, import this package as: -// import _ "google.golang.org/grpc/balancer/grpclb" +// +// import _ "google.golang.org/grpc/balancer/grpclb" package grpclb import ( @@ -229,8 +230,9 @@ type lbBalancer struct { // regeneratePicker takes a snapshot of the balancer, and generates a picker from // it. The picker -// - always returns ErrTransientFailure if the balancer is in TransientFailure, -// - does two layer roundrobin pick otherwise. +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { @@ -290,14 +292,14 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // fallback and grpclb). lb.scState contains states for all SubConns, including // those in cache (SubConns are cached for 10 seconds after remove). // -// The aggregated state is: -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; -// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, -// they start to connect immediately. But there's a race between the overall -// state is reported, and when the new SubConn state arrives. And SubConns -// never go back to IDLE. -// - Else the aggregated state is TransientFailure. +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; +// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, +// they start to connect immediately. But there's a race between the overall +// state is reported, and when the new SubConn state arrives. And SubConns +// never go back to IDLE. +// - Else the aggregated state is TransientFailure. func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { var numConnecting uint64 diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go index b7a11e8851b9..036ad4454c71 100644 --- a/balancer/rls/balancer.go +++ b/balancer/rls/balancer.go @@ -426,7 +426,6 @@ func (b *rlsBalancer) ExitIdle() { // sendNewPickerLocked pushes a new picker on to the channel. // -// // Note that regardless of what connectivity state is reported, the policy will // return its own picker, and not a picker that unconditionally queues // (typically used for IDLE or CONNECTING) or a picker that unconditionally @@ -485,14 +484,14 @@ func (b *rlsBalancer) sendNewPicker() { } // The aggregated connectivity state reported is determined as follows: -// - If there is at least one child policy in state READY, the connectivity -// state is READY. -// - Otherwise, if there is at least one child policy in state CONNECTING, the -// connectivity state is CONNECTING. -// - Otherwise, if there is at least one child policy in state IDLE, the -// connectivity state is IDLE. -// - Otherwise, all child policies are in TRANSIENT_FAILURE, and the -// connectivity state is TRANSIENT_FAILURE. +// - If there is at least one child policy in state READY, the connectivity +// state is READY. +// - Otherwise, if there is at least one child policy in state CONNECTING, the +// connectivity state is CONNECTING. +// - Otherwise, if there is at least one child policy in state IDLE, the +// connectivity state is IDLE. +// - Otherwise, all child policies are in TRANSIENT_FAILURE, and the +// connectivity state is TRANSIENT_FAILURE. // // If the RLS policy has no child policies and no configured default target, // then we will report connectivity state IDLE. @@ -542,9 +541,9 @@ func (b *rlsBalancer) UpdateState(id string, state balancer.State) { // This method is invoked by the BalancerGroup whenever a child policy sends a // state update. We cache the child policy's connectivity state and picker for // two reasons: -// - to suppress connectivity state transitions from TRANSIENT_FAILURE to states -// other than READY -// - to delegate picks to child policies +// - to suppress connectivity state transitions from TRANSIENT_FAILURE to states +// other than READY +// - to delegate picks to child policies func (b *rlsBalancer) handleChildPolicyStateUpdate(id string, newState balancer.State) { b.stateMu.Lock() defer b.stateMu.Unlock() diff --git a/balancer/rls/config.go b/balancer/rls/config.go index 4cd0738eb695..77b6bdcd1cca 100644 --- a/balancer/rls/config.go +++ b/balancer/rls/config.go @@ -113,33 +113,36 @@ type lbConfigJSON struct { // ParseConfig parses the JSON load balancer config provided into an // internal form or returns an error if the config is invalid. // -// When parsing a config update, the following validations are performed: -// - routeLookupConfig: -// - grpc_keybuilders field: -// - must have at least one entry -// - must not have two entries with the same `Name` -// - within each entry: -// - must have at least one `Name` -// - must not have a `Name` with the `service` field unset or empty -// - within each `headers` entry: -// - must not have `required_match` set -// - must not have `key` unset or empty -// - across all `headers`, `constant_keys` and `extra_keys` fields: -// - must not have the same `key` specified twice -// - no `key` must be the empty string -// - `lookup_service` field must be set and must parse as a target URI -// - if `max_age` > 5m, it should be set to 5 minutes -// - if `stale_age` > `max_age`, ignore it -// - if `stale_age` is set, then `max_age` must also be set -// - ignore `valid_targets` field -// - `cache_size_bytes` field must have a value greater than 0, and if its -// value is greater than 5M, we cap it at 5M -// - routeLookupChannelServiceConfig: -// - if specified, must parse as valid service config -// - childPolicy: -// - must find a valid child policy with a valid config -// - childPolicyConfigTargetFieldName: -// - must be set and non-empty +// When parsing a config update, the following validations are performed: +// - routeLookupConfig: +// - grpc_keybuilders field: +// - must have at least one entry +// - must not have two entries with the same `Name` +// - within each entry: +// - must have at least one `Name` +// - must not have a `Name` with the `service` field unset or empty +// - within each `headers` entry: +// - must not have `required_match` set +// - must not have `key` unset or empty +// - across all `headers`, `constant_keys` and `extra_keys` fields: +// - must not have the same `key` specified twice +// - no `key` must be the empty string +// - `lookup_service` field must be set and must parse as a target URI +// - if `max_age` > 5m, it should be set to 5 minutes +// - if `stale_age` > `max_age`, ignore it +// - if `stale_age` is set, then `max_age` must also be set +// - ignore `valid_targets` field +// - `cache_size_bytes` field must have a value greater than 0, and if its +// value is greater than 5M, we cap it at 5M +// +// - routeLookupChannelServiceConfig: +// - if specified, must parse as valid service config +// +// - childPolicy: +// - must find a valid child policy with a valid config +// +// - childPolicyConfigTargetFieldName: +// - must be set and non-empty func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) cfgJSON := &lbConfigJSON{} diff --git a/balancer/rls/helpers_test.go b/balancer/rls/helpers_test.go index 5fca54a63ace..67ca18755ee9 100644 --- a/balancer/rls/helpers_test.go +++ b/balancer/rls/helpers_test.go @@ -220,12 +220,12 @@ func startManualResolverWithConfig(t *testing.T, rlsConfig *e2e.RLSConfig) *manu // // There are many instances where it can take a while before the attempted RPC // reaches the expected backend. Examples include, but are not limited to: -// - control channel is changed in a config update. The RLS LB policy creates a -// new control channel, and sends a new picker to gRPC. But it takes a while -// before gRPC actually starts using the new picker. -// - test is waiting for a cache entry to expire after which we expect a -// different behavior because we have configured the fake RLS server to return -// different backends. +// - control channel is changed in a config update. The RLS LB policy creates a +// new control channel, and sends a new picker to gRPC. But it takes a while +// before gRPC actually starts using the new picker. +// - test is waiting for a cache entry to expire after which we expect a +// different behavior because we have configured the fake RLS server to return +// different backends. // // Therefore, we do not return an error when the RPC fails. Instead, we wait for // the context to expire before failing. diff --git a/balancer/rls/internal/adaptive/adaptive.go b/balancer/rls/internal/adaptive/adaptive.go index 4adae1bde6b4..a3b0931b2955 100644 --- a/balancer/rls/internal/adaptive/adaptive.go +++ b/balancer/rls/internal/adaptive/adaptive.go @@ -45,21 +45,21 @@ const ( // The throttler has the following knobs for which we will use defaults for // now. If there is a need to make them configurable at a later point in time, // support for the same will be added. -// * Duration: amount of recent history that will be taken into account for -// making client-side throttling decisions. A default of 30 seconds is used. -// * Bins: number of bins to be used for bucketing historical data. A default -// of 100 is used. -// * RatioForAccepts: ratio by which accepts are multiplied, typically a value -// slightly larger than 1.0. This is used to make the throttler behave as if -// the backend had accepted more requests than it actually has, which lets us -// err on the side of sending to the backend more requests than we think it -// will accept for the sake of speeding up the propagation of state. A -// default of 2.0 is used. -// * RequestsPadding: is used to decrease the (client-side) throttling -// probability in the low QPS regime (to speed up propagation of state), as -// well as to safeguard against hitting a client-side throttling probability -// of 100%. The weight of this value decreases as the number of requests in -// recent history grows. A default of 8 is used. +// - Duration: amount of recent history that will be taken into account for +// making client-side throttling decisions. A default of 30 seconds is used. +// - Bins: number of bins to be used for bucketing historical data. A default +// of 100 is used. +// - RatioForAccepts: ratio by which accepts are multiplied, typically a value +// slightly larger than 1.0. This is used to make the throttler behave as if +// the backend had accepted more requests than it actually has, which lets us +// err on the side of sending to the backend more requests than we think it +// will accept for the sake of speeding up the propagation of state. A +// default of 2.0 is used. +// - RequestsPadding: is used to decrease the (client-side) throttling +// probability in the low QPS regime (to speed up propagation of state), as +// well as to safeguard against hitting a client-side throttling probability +// of 100%. The weight of this value decreases as the number of requests in +// recent history grows. A default of 8 is used. // // The adaptive throttler attempts to estimate the probability that a request // will be throttled using recent history. Server requests (both throttled and diff --git a/balancer/weightedroundrobin/weightedroundrobin.go b/balancer/weightedroundrobin/weightedroundrobin.go index d82b714e0701..6fc4d1910e67 100644 --- a/balancer/weightedroundrobin/weightedroundrobin.go +++ b/balancer/weightedroundrobin/weightedroundrobin.go @@ -45,7 +45,7 @@ func (a AddrInfo) Equal(o interface{}) bool { // SetAddrInfo returns a copy of addr in which the BalancerAttributes field is // updated with addrInfo. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -57,7 +57,7 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { // GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of // addr. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index d9707553a688..5e2caf5d204d 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -21,10 +21,10 @@ Package main provides benchmark with setting flags. An example to run some benchmarks with profiling enabled: -go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \ - -compression=gzip -maxConcurrentCalls=1 -trace=off \ - -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \ - -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result + go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \ + -compression=gzip -maxConcurrentCalls=1 -trace=off \ + -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \ + -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result As a suggestion, when creating a branch, you can run this benchmark and save the result file "-resultFile=basePerf", and later when you at the middle of the work or finish the @@ -32,10 +32,11 @@ work, you can get the benchmark result and compare it with the base anytime. Assume there are two result files names as "basePerf" and "curPerf" created by adding -resultFile=basePerf and -resultFile=curPerf. - To format the curPerf, run: - go run benchmark/benchresult/main.go curPerf - To observe how the performance changes based on a base result, run: - go run benchmark/benchresult/main.go basePerf curPerf + + To format the curPerf, run: + go run benchmark/benchresult/main.go curPerf + To observe how the performance changes based on a base result, run: + go run benchmark/benchresult/main.go basePerf curPerf */ package main diff --git a/benchmark/benchresult/main.go b/benchmark/benchresult/main.go index 587a0f6bda32..5bd9ce6ff891 100644 --- a/benchmark/benchresult/main.go +++ b/benchmark/benchresult/main.go @@ -18,12 +18,14 @@ /* To format the benchmark result: - go run benchmark/benchresult/main.go resultfile + + go run benchmark/benchresult/main.go resultfile To see the performance change based on a old result: - go run benchmark/benchresult/main.go resultfile_old resultfile -It will print the comparison result of intersection benchmarks between two files. + go run benchmark/benchresult/main.go resultfile_old resultfile + +It will print the comparison result of intersection benchmarks between two files. */ package main diff --git a/benchmark/server/main.go b/benchmark/server/main.go index 5a82b1c78012..da23a0270bd6 100644 --- a/benchmark/server/main.go +++ b/benchmark/server/main.go @@ -20,6 +20,7 @@ Package main provides a server used for benchmarking. It launches a server which is listening on port 50051. An example to start the server can be found at: + go run benchmark/server/main.go -test_name=grpc_test After starting the server, the client can be run separately and used to test diff --git a/binarylog/grpc_binarylog_v1/binarylog.pb.go b/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cdf34..64a232f28111 100644 --- a/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -261,6 +261,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -694,12 +695,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/channelz/channelz.go b/channelz/channelz.go index a220c47c59a5..32b7fa5794e1 100644 --- a/channelz/channelz.go +++ b/channelz/channelz.go @@ -23,7 +23,7 @@ // https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by // the `internal/channelz` package. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/channelz/grpc_channelz_v1/channelz.pb.go b/channelz/grpc_channelz_v1/channelz.pb.go index 4caf9e76e54c..b39fd928f432 100644 --- a/channelz/grpc_channelz_v1/channelz.pb.go +++ b/channelz/grpc_channelz_v1/channelz.pb.go @@ -514,6 +514,7 @@ type ChannelTraceEvent struct { // created. // // Types that are assignable to ChildRef: + // // *ChannelTraceEvent_ChannelRef // *ChannelTraceEvent_SubchannelRef ChildRef isChannelTraceEvent_ChildRef `protobuf_oneof:"child_ref"` @@ -1338,6 +1339,7 @@ type Address struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Address: + // // *Address_TcpipAddress // *Address_UdsAddress_ // *Address_OtherAddress_ @@ -1433,6 +1435,7 @@ type Security struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Model: + // // *Security_Tls_ // *Security_Other Model isSecurity_Model `protobuf_oneof:"model"` @@ -2908,6 +2911,7 @@ type Security_Tls struct { unknownFields protoimpl.UnknownFields // Types that are assignable to CipherSuite: + // // *Security_Tls_StandardName // *Security_Tls_OtherName CipherSuite isSecurity_Tls_CipherSuite `protobuf_oneof:"cipher_suite"` diff --git a/clientconn.go b/clientconn.go index 779b03bca1c3..9691444d0403 100644 --- a/clientconn.go +++ b/clientconn.go @@ -503,7 +503,7 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -522,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -534,7 +534,7 @@ func (cc *ClientConn) GetState() connectivity.State { // the channel is idle. Does not wait for the connection attempts to begin // before returning. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -761,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -831,9 +831,9 @@ func equalAddresses(a, b []resolver.Address) bool { // // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() @@ -998,7 +998,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/cmd/protoc-gen-go-grpc/main.go b/cmd/protoc-gen-go-grpc/main.go index 58cde2eb6df2..e1943024ac53 100644 --- a/cmd/protoc-gen-go-grpc/main.go +++ b/cmd/protoc-gen-go-grpc/main.go @@ -19,14 +19,17 @@ // protoc-gen-go-grpc is a plugin for the Google protocol buffer compiler to // generate Go code. Install it by building this program and making it // accessible within your PATH with the name: +// // protoc-gen-go-grpc // // The 'go-grpc' suffix becomes part of the argument for the protocol compiler, // such that it can be invoked as: +// // protoc --go-grpc_out=. path/to/file.proto // // This generates Go service definitions for the protocol buffer defined by // file.proto. With that input, the output will be written to: +// // path/to/file_grpc.pb.go package main diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 40570e9bf2de..383c5fb97a77 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -216,6 +216,7 @@ type Identity struct { unknownFields protoimpl.UnknownFields // Types that are assignable to IdentityOneof: + // // *Identity_ServiceAccount // *Identity_Hostname IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` @@ -664,6 +665,7 @@ type HandshakerReq struct { unknownFields protoimpl.UnknownFields // Types that are assignable to ReqOneof: + // // *HandshakerReq_ClientStart // *HandshakerReq_ServerStart // *HandshakerReq_Next diff --git a/credentials/google/xds.go b/credentials/google/xds.go index e32edc0421c3..2c5c8b9eee13 100644 --- a/credentials/google/xds.go +++ b/credentials/google/xds.go @@ -40,6 +40,7 @@ const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" // "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", // use TLS // - otherwise, use ALTS +// // - else, do TLS // // On the server, ServerHandshake always does TLS. diff --git a/credentials/local/local.go b/credentials/local/local.go index f772bc1307b2..d5a3a8596056 100644 --- a/credentials/local/local.go +++ b/credentials/local/local.go @@ -23,7 +23,7 @@ // reported. If local credentials is not used in local connections // (local TCP or UDS), it will fail. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/credentials/sts/sts.go b/credentials/sts/sts.go index 981537ca1175..19ca7d0b3305 100644 --- a/credentials/sts/sts.go +++ b/credentials/sts/sts.go @@ -19,7 +19,7 @@ // Package sts implements call credentials using STS (Security Token Service) as // defined in https://tools.ietf.org/html/rfc8693. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be changed or // removed in a later release. @@ -245,12 +245,12 @@ func (c *callCreds) cachedMetadata() map[string]string { // constructRequest creates the STS request body in JSON based on the provided // options. -// - Contents of the subjectToken are read from the file specified in -// options. If we encounter an error here, we bail out. -// - Contents of the actorToken are read from the file specified in options. -// If we encounter an error here, we ignore this field because this is -// optional. -// - Most of the other fields in the request come directly from options. +// - Contents of the subjectToken are read from the file specified in +// options. If we encounter an error here, we bail out. +// - Contents of the actorToken are read from the file specified in options. +// If we encounter an error here, we ignore this field because this is +// optional. +// - Most of the other fields in the request come directly from options. // // A new HTTP request is created by calling http.NewRequestWithContext() and // passing the provided context, thereby enforcing any timeouts specified in diff --git a/credentials/tls.go b/credentials/tls.go index 784822d0560a..ce2bbc10a142 100644 --- a/credentials/tls.go +++ b/credentials/tls.go @@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/credentials/tls/certprovider/distributor.go b/credentials/tls/certprovider/distributor.go index fdb38a663fe8..11fae92adace 100644 --- a/credentials/tls/certprovider/distributor.go +++ b/credentials/tls/certprovider/distributor.go @@ -31,11 +31,11 @@ import ( // // Provider implementations which choose to use a Distributor should do the // following: -// - create a new Distributor using the NewDistributor() function. -// - invoke the Set() method whenever they have new key material or errors to -// report. -// - delegate to the distributor when handing calls to KeyMaterial(). -// - invoke the Stop() method when they are done using the distributor. +// - create a new Distributor using the NewDistributor() function. +// - invoke the Set() method whenever they have new key material or errors to +// report. +// - delegate to the distributor when handing calls to KeyMaterial(). +// - invoke the Stop() method when they are done using the distributor. type Distributor struct { // mu protects the underlying key material. mu sync.Mutex diff --git a/credentials/tls/certprovider/pemfile/watcher.go b/credentials/tls/certprovider/pemfile/watcher.go index e154030cbe8a..3c62491f7be8 100644 --- a/credentials/tls/certprovider/pemfile/watcher.go +++ b/credentials/tls/certprovider/pemfile/watcher.go @@ -19,7 +19,7 @@ // Package pemfile provides a file watching certificate provider plugin // implementation which works for files with PEM contents. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/credentials/tls/certprovider/provider.go b/credentials/tls/certprovider/provider.go index 275c176afdf8..f24df7c5008b 100644 --- a/credentials/tls/certprovider/provider.go +++ b/credentials/tls/certprovider/provider.go @@ -18,7 +18,7 @@ // Package certprovider defines APIs for Certificate Providers in gRPC. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/encoding/encoding.go b/encoding/encoding.go index 9151eba26ac9..711763d54fb7 100644 --- a/encoding/encoding.go +++ b/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/encoding/gzip/gzip.go b/encoding/gzip/gzip.go index ce2f15ed288f..ca820bd47d44 100644 --- a/encoding/gzip/gzip.go +++ b/encoding/gzip/gzip.go @@ -19,7 +19,7 @@ // Package gzip implements and registers the gzip compressor // during the initialization. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/gcp/observability/observability.go b/gcp/observability/observability.go index 1010f72f743c..d8dde6404de0 100644 --- a/gcp/observability/observability.go +++ b/gcp/observability/observability.go @@ -19,7 +19,7 @@ // Package observability implements the tracing, metrics, and logging data // collection, and provides controlling knobs via a config file. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/grpclog/loggerv2.go b/grpclog/loggerv2.go index 7c1f66409034..b5560b47ec4b 100644 --- a/grpclog/loggerv2.go +++ b/grpclog/loggerv2.go @@ -242,7 +242,7 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/grpclog/loggerv2_test.go b/grpclog/loggerv2_test.go index 0b2c8b23d668..119cea4c6ecd 100644 --- a/grpclog/loggerv2_test.go +++ b/grpclog/loggerv2_test.go @@ -52,7 +52,8 @@ func TestLoggerV2Severity(t *testing.T) { } // check if b is in the format of: -// 2017/04/07 14:55:42 WARNING: WARNING +// +// 2017/04/07 14:55:42 WARNING: WARNING func checkLogForSeverity(s int, b []byte) error { expected := regexp.MustCompile(fmt.Sprintf(`^[0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} %s: %s\n$`, severityName[s], severityName[s])) if m := expected.Match(b); !m { diff --git a/internal/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go index 70edbb17c0c0..ae17801fe2f0 100644 --- a/internal/balancergroup/balancergroup.go +++ b/internal/balancergroup/balancergroup.go @@ -185,19 +185,19 @@ func (sbc *subBalancerWrapper) stopBalancer() { // intended to be used directly as a balancer. It's expected to be used as a // sub-balancer manager by a high level balancer. // -// Updates from ClientConn are forwarded to sub-balancers -// - service config update -// - address update -// - subConn state change -// - find the corresponding balancer and forward +// Updates from ClientConn are forwarded to sub-balancers +// - service config update +// - address update +// - subConn state change +// - find the corresponding balancer and forward // -// Actions from sub-balances are forwarded to parent ClientConn -// - new/remove SubConn -// - picker update and health states change -// - sub-pickers are sent to an aggregator provided by the parent, which -// will group them into a group-picker. The aggregated connectivity state is -// also handled by the aggregator. -// - resolveNow +// Actions from sub-balances are forwarded to parent ClientConn +// - new/remove SubConn +// - picker update and health states change +// - sub-pickers are sent to an aggregator provided by the parent, which +// will group them into a group-picker. The aggregated connectivity state is +// also handled by the aggregator. +// - resolveNow // // Sub-balancers are only built when the balancer group is started. If the // balancer group is closed, the sub-balancers are also closed. And it's diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index 57ab28895f1c..566ffc386c04 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -167,8 +167,9 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { // into balancer group inline when it gets an update. // // The potential deadlock can happen if we -// - hold a lock and send updates to balancer (e.g. update resolved addresses) -// - the balancer calls back (NewSubConn or update picker) in line +// - hold a lock and send updates to balancer (e.g. update resolved addresses) +// - the balancer calls back (NewSubConn or update picker) in line +// // The callback will try to hold hte same lock again, which will cause a // deadlock. // diff --git a/internal/binarylog/env_config.go b/internal/binarylog/env_config.go index c5579e65065f..f9e80e27ab68 100644 --- a/internal/binarylog/env_config.go +++ b/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. diff --git a/internal/channelz/types.go b/internal/channelz/types.go index ad0ce4dabf06..7b2f350e2e64 100644 --- a/internal/channelz/types.go +++ b/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return diff --git a/internal/grpclog/grpclog.go b/internal/grpclog/grpclog.go index 30a3b4258fc0..b68e26a36493 100644 --- a/internal/grpclog/grpclog.go +++ b/internal/grpclog/grpclog.go @@ -110,7 +110,7 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/internal/grpctest/grpctest.go b/internal/grpctest/grpctest.go index a4b49d5f9a5e..d0a2c533b855 100644 --- a/internal/grpctest/grpctest.go +++ b/internal/grpctest/grpctest.go @@ -79,10 +79,12 @@ func getTestFunc(t *testing.T, xv reflect.Value, name string) func(*testing.T) { // functions, respectively. // // For example usage, see example_test.go. Run it using: -// $ go test -v -run TestExample . +// +// $ go test -v -run TestExample . // // To run a specific test/subtest: -// $ go test -v -run 'TestExample/^Something$' . +// +// $ go test -v -run 'TestExample/^Something$' . func RunSubTests(t *testing.T, x interface{}) { xt := reflect.TypeOf(x) xv := reflect.ValueOf(x) diff --git a/internal/grpcutil/method.go b/internal/grpcutil/method.go index e9c4af64830c..ec62b4775e5b 100644 --- a/internal/grpcutil/method.go +++ b/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") diff --git a/internal/hierarchy/hierarchy.go b/internal/hierarchy/hierarchy.go index 341d3405dc6c..884ae22292dc 100644 --- a/internal/hierarchy/hierarchy.go +++ b/internal/hierarchy/hierarchy.go @@ -70,26 +70,29 @@ func Set(addr resolver.Address, path []string) resolver.Address { // // Input: // [ -// {addr0, path: [p0, wt0]} -// {addr1, path: [p0, wt1]} -// {addr2, path: [p1, wt2]} -// {addr3, path: [p1, wt3]} +// +// {addr0, path: [p0, wt0]} +// {addr1, path: [p0, wt1]} +// {addr2, path: [p1, wt2]} +// {addr3, path: [p1, wt3]} +// // ] // // Addresses will be split into p0/p1, and the p0/p1 will be removed from the // path. // // Output: -// { -// p0: [ -// {addr0, path: [wt0]}, -// {addr1, path: [wt1]}, -// ], -// p1: [ -// {addr2, path: [wt2]}, -// {addr3, path: [wt3]}, -// ], -// } +// +// { +// p0: [ +// {addr0, path: [wt0]}, +// {addr1, path: [wt1]}, +// ], +// p1: [ +// {addr2, path: [wt2]}, +// {addr3, path: [wt3]}, +// ], +// } // // If hierarchical path is not set, or has no path in it, the address is // dropped. diff --git a/internal/profiling/goid_modified.go b/internal/profiling/goid_modified.go index ff1a5f5933b7..f2bae99b2a95 100644 --- a/internal/profiling/goid_modified.go +++ b/internal/profiling/goid_modified.go @@ -33,48 +33,48 @@ import ( // // Several other approaches were considered before arriving at this: // -// 1. Using a CGO module: CGO usually has access to some things that regular -// Go does not. Till go1.4, CGO used to have access to the goroutine struct -// because the Go runtime was written in C. However, 1.5+ uses a native Go -// runtime; as a result, CGO does not have access to the goroutine structure -// anymore in modern Go. Besides, CGO interop wasn't fast enough (estimated -// to be ~170ns/op). This would also make building grpc require a C -// compiler, which isn't a requirement currently, breaking a lot of stuff. +// 1. Using a CGO module: CGO usually has access to some things that regular +// Go does not. Till go1.4, CGO used to have access to the goroutine struct +// because the Go runtime was written in C. However, 1.5+ uses a native Go +// runtime; as a result, CGO does not have access to the goroutine structure +// anymore in modern Go. Besides, CGO interop wasn't fast enough (estimated +// to be ~170ns/op). This would also make building grpc require a C +// compiler, which isn't a requirement currently, breaking a lot of stuff. // -// 2. Using runtime.Stack stacktrace: While this would remove the need for a -// modified Go runtime, this is ridiculously slow, thanks to the all the -// string processing shenanigans required to extract the goroutine ID (about -// ~2000ns/op). +// 2. Using runtime.Stack stacktrace: While this would remove the need for a +// modified Go runtime, this is ridiculously slow, thanks to the all the +// string processing shenanigans required to extract the goroutine ID (about +// ~2000ns/op). // -// 3. Using Go version-specific build tags: For any given Go version, the -// goroutine struct has a fixed structure. As a result, the goroutine ID -// could be extracted if we know the offset using some assembly. This would -// be faster then #1 and #2, but is harder to maintain. This would require -// special Go code that's both architecture-specific and go version-specific -// (a quadratic number of variants to maintain). +// 3. Using Go version-specific build tags: For any given Go version, the +// goroutine struct has a fixed structure. As a result, the goroutine ID +// could be extracted if we know the offset using some assembly. This would +// be faster then #1 and #2, but is harder to maintain. This would require +// special Go code that's both architecture-specific and go version-specific +// (a quadratic number of variants to maintain). // -// 4. This approach, which requires a simple modification [1] to the Go runtime -// to expose the current goroutine's ID. This is the chosen approach and it -// takes about ~2 ns/op, which is negligible in the face of the tens of -// microseconds that grpc takes to complete a RPC request. +// 4. This approach, which requires a simple modification [1] to the Go runtime +// to expose the current goroutine's ID. This is the chosen approach and it +// takes about ~2 ns/op, which is negligible in the face of the tens of +// microseconds that grpc takes to complete a RPC request. // // [1] To make the goroutine ID visible to Go programs apply the following // change to the runtime2.go file in your Go runtime installation: // -// diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go -// --- a/src/runtime/runtime2.go -// +++ b/src/runtime/runtime2.go -// @@ -392,6 +392,10 @@ type stack struct { -// hi uintptr -// } +// diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go +// --- a/src/runtime/runtime2.go +// +++ b/src/runtime/runtime2.go +// @@ -392,6 +392,10 @@ type stack struct { +// hi uintptr +// } // -// +func Goid() int64 { -// + return getg().goid -// +} -// + -// type g struct { -// // Stack parameters. -// // stack describes the actual stack memory: [stack.lo, stack.hi). +// +func Goid() int64 { +// + return getg().goid +// +} +// + +// type g struct { +// // Stack parameters. +// // stack describes the actual stack memory: [stack.lo, stack.hi). // // The exposed runtime.Goid() function will return a int64 goroutine ID. func goid() int64 { diff --git a/internal/proto/grpc_lookup_v1/rls_config.pb.go b/internal/proto/grpc_lookup_v1/rls_config.pb.go index 7e4c932e20ff..d481f8acb004 100644 --- a/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -204,8 +204,10 @@ func (x *GrpcKeyBuilder) GetConstantKeys() map[string]string { // // For a service where the project id can be expressed either as a subdomain or // in the path, separate HttpKeyBuilders must be used: -// host_pattern: 'example.com' path_pattern: '/{id}/{object}/**' -// host_pattern: '{id}.example.com' path_pattern: '/{object}/**' +// +// host_pattern: 'example.com' path_pattern: '/{id}/{object}/**' +// host_pattern: '{id}.example.com' path_pattern: '/{object}/**' +// // If the host is exactly 'example.com', the first path segment will be used as // the id and the second segment as the object. If the host has a subdomain, the // subdomain will be used as the id and the first segment as the object. If @@ -223,7 +225,7 @@ type HttpKeyBuilder struct { // - "*": Matches any single label. // - "**": Matches zero or more labels (first or last part of host only). // - "{=...}": One or more label capture, where "..." can be any - // template that does not include a capture. + // template that does not include a capture. // - "{}": A single label capture. Identical to {=*}. // // Examples: @@ -242,8 +244,9 @@ type HttpKeyBuilder struct { // - "*": Matches any single segment. // - "**": Matches zero or more segments (first or last part of path only). // - "{=...}": One or more segment capture, where "..." can be any - // template that does not include a capture. + // template that does not include a capture. // - "{}": A single segment capture. Identical to {=*}. + // // A custom method may also be specified by appending ":" and the custom // method name or "*" to indicate any custom method (including no custom // method). For example, "/*/projects/{project_id}/**:*" extracts diff --git a/internal/serviceconfig/serviceconfig.go b/internal/serviceconfig/serviceconfig.go index badbdbf597f3..51e733e495a3 100644 --- a/internal/serviceconfig/serviceconfig.go +++ b/internal/serviceconfig/serviceconfig.go @@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index 5983d75bafc2..d45486abd251 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -290,16 +290,16 @@ func (tcc *TestClientConn) WaitForPicker(ctx context.Context, f func(balancer.Pi // Step 1. the return values of f should form a permutation of all elements in // want, but not necessary in the same order. E.g. if want is {a,a,b}, the check // fails if f returns: -// - {a,a,a}: third a is returned before b -// - {a,b,b}: second b is returned before the second a +// - {a,a,a}: third a is returned before b +// - {a,b,b}: second b is returned before the second a // // If error is found in this step, the returned error contains only the first // iteration until where it goes wrong. // // Step 2. the return values of f should be repetitions of the same permutation. // E.g. if want is {a,a,b}, the check failes if f returns: -// - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not -// repeating the first iteration. +// - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not +// repeating the first iteration. // // If error is found in this step, the returned error contains the first // iteration + the second iteration until where it goes wrong. diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index 090120925bb4..fb272235d817 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -442,10 +442,10 @@ func (ht *serverHandlerTransport) Drain() { // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 5251e28d7349..256fcb71f47a 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -661,13 +661,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // NewStream errors result in transparent retry, as they mean nothing went onto // the wire. However, there are two notable exceptions: // -// 1. If the stream headers violate the max header list size allowed by the -// server. It's possible this could succeed on another transport, even if -// it's unlikely, but do not transparently retry. -// 2. If the credentials errored when requesting their headers. In this case, -// it's possible a retry can fix the problem, but indefinitely transparently -// retrying is not appropriate as it is likely the credentials, if they can -// eventually succeed, would need I/O to do so. +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 760e1b64f358..8cd0d4174405 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -2005,7 +2005,7 @@ func (s) TestPingPong1MB(t *testing.T) { runPingPongTest(t, 1048576) } -//This is a stress-test of flow control logic. +// This is a stress-test of flow control logic. func runPingPongTest(t *testing.T, msgSize int) { server, client, cancel := setUp(t, 0, 0, pingpong) defer cancel() diff --git a/interop/grpc_testing/control.pb.go b/interop/grpc_testing/control.pb.go index ce60ba06f5a2..74e2a8612468 100644 --- a/interop/grpc_testing/control.pb.go +++ b/interop/grpc_testing/control.pb.go @@ -300,6 +300,7 @@ type LoadParams struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Load: + // // *LoadParams_ClosedLoop // *LoadParams_Poisson Load isLoadParams_Load `protobuf_oneof:"load"` @@ -445,6 +446,7 @@ type ChannelArg struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Types that are assignable to Value: + // // *ChannelArg_StrValue // *ChannelArg_IntValue Value isChannelArg_Value `protobuf_oneof:"value"` @@ -834,6 +836,7 @@ type ClientArgs struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Argtype: + // // *ClientArgs_Setup // *ClientArgs_Mark Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"` @@ -1061,6 +1064,7 @@ type ServerArgs struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Argtype: + // // *ServerArgs_Setup // *ServerArgs_Mark Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"` diff --git a/interop/grpc_testing/core/stats.pb.go b/interop/grpc_testing/core/stats.pb.go index e5652dc78cc3..d25d27b26fb1 100644 --- a/interop/grpc_testing/core/stats.pb.go +++ b/interop/grpc_testing/core/stats.pb.go @@ -148,6 +148,7 @@ type Metric struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Types that are assignable to Value: + // // *Metric_Count // *Metric_Histogram Value isMetric_Value `protobuf_oneof:"value"` diff --git a/interop/grpc_testing/empty.pb.go b/interop/grpc_testing/empty.pb.go index 5b239de631b0..24ae8a4349f6 100644 --- a/interop/grpc_testing/empty.pb.go +++ b/interop/grpc_testing/empty.pb.go @@ -43,10 +43,9 @@ const _ = proto.ProtoPackageIsVersion4 // messages in your project. A typical example is to use it as argument or the // return value of a service API. For instance: // -// service Foo { -// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { }; -// }; -// +// service Foo { +// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { }; +// }; type Empty struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/interop/grpc_testing/payloads.pb.go b/interop/grpc_testing/payloads.pb.go index 1db2725915d4..834de17b91b2 100644 --- a/interop/grpc_testing/payloads.pb.go +++ b/interop/grpc_testing/payloads.pb.go @@ -193,6 +193,7 @@ type PayloadConfig struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Payload: + // // *PayloadConfig_BytebufParams // *PayloadConfig_SimpleParams // *PayloadConfig_ComplexParams diff --git a/interop/xds/client/client.go b/interop/xds/client/client.go index 190c8a7d7866..f5e8469e72cb 100644 --- a/interop/xds/client/client.go +++ b/interop/xds/client/client.go @@ -340,7 +340,8 @@ type rpcConfig struct { } // parseRPCMetadata turns EmptyCall:key1:value1 into -// {typ: emptyCall, md: {key1:value1}}. +// +// {typ: emptyCall, md: {key1:value1}}. func parseRPCMetadata(rpcMetadataStr string, rpcs []string) []*rpcConfig { rpcMetadataSplit := strings.Split(rpcMetadataStr, ",") rpcsToMD := make(map[string][]string) diff --git a/metadata/metadata.go b/metadata/metadata.go index 98d62e0675f6..fb4a88f59bd3 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -41,10 +41,11 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -196,7 +198,7 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Key must be lower-case. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/orca/orca.go b/orca/orca.go index 676c66e2829b..bacc4a89ab0b 100644 --- a/orca/orca.go +++ b/orca/orca.go @@ -20,7 +20,7 @@ // Envoy) on the data plane. In a proxyless world with gRPC enabled // applications, aggregation of such reports will be done by the gRPC client. // -// Experimental +// # Experimental // // Notice: All APIs is this package are EXPERIMENTAL and may be changed or // removed in a later release. diff --git a/preloader.go b/preloader.go index 0a1e975ad916..cd45547854f0 100644 --- a/preloader.go +++ b/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/profiling/profiling.go b/profiling/profiling.go index 7112ef2e6a42..869054ea794a 100644 --- a/profiling/profiling.go +++ b/profiling/profiling.go @@ -18,7 +18,7 @@ // Package profiling exposes methods to manage profiling within gRPC. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/profiling/service/service.go b/profiling/service/service.go index 5b034372842e..c0234987392c 100644 --- a/profiling/service/service.go +++ b/profiling/service/service.go @@ -21,7 +21,7 @@ // queried by a client to remotely manage the gRPC profiling behaviour of an // application. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/reflection/grpc_reflection_v1alpha/reflection.pb.go b/reflection/grpc_reflection_v1alpha/reflection.pb.go index 1f859f764881..c22f9a52db4e 100644 --- a/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -53,6 +53,7 @@ type ServerReflectionRequest struct { // defined field and then handles them using corresponding methods. // // Types that are assignable to MessageRequest: + // // *ServerReflectionRequest_FileByFilename // *ServerReflectionRequest_FileContainingSymbol // *ServerReflectionRequest_FileContainingExtension @@ -263,6 +264,7 @@ type ServerReflectionResponse struct { // message_request in the request. // // Types that are assignable to MessageResponse: + // // *ServerReflectionResponse_FileDescriptorResponse // *ServerReflectionResponse_AllExtensionNumbersResponse // *ServerReflectionResponse_ListServicesResponse diff --git a/reflection/grpc_testing_not_regenerate/testv3.pb.go b/reflection/grpc_testing_not_regenerate/testv3.pb.go index a99a4c9f1ba2..8a690963ec10 100644 --- a/reflection/grpc_testing_not_regenerate/testv3.pb.go +++ b/reflection/grpc_testing_not_regenerate/testv3.pb.go @@ -23,9 +23,11 @@ Package grpc_testing_not_regenerate is a generated protocol buffer package. It is generated from these files: + testv3.proto It has these top-level messages: + SearchResponseV3 SearchRequestV3 */ diff --git a/reflection/serverreflection.go b/reflection/serverreflection.go index 81344abd77da..0b41783aa533 100644 --- a/reflection/serverreflection.go +++ b/reflection/serverreflection.go @@ -23,6 +23,7 @@ The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" s := grpc.NewServer() @@ -32,7 +33,6 @@ To register server reflection on a gRPC server: reflection.Register(s) s.Serve(lis) - */ package reflection // import "google.golang.org/grpc/reflection" @@ -74,7 +74,7 @@ func Register(s GRPCServer) { // for a custom implementation to return zero values for the // grpc.ServiceInfo values in the map. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -85,7 +85,7 @@ type ServiceInfoProvider interface { // ExtensionResolver is the interface used to query details about extensions. // This interface is satisfied by protoregistry.GlobalTypes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -96,7 +96,7 @@ type ExtensionResolver interface { // ServerOptions represents the options used to construct a reflection server. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -120,7 +120,7 @@ type ServerOptions struct { // This can be used to customize behavior of the reflection service. Most usages // should prefer to use Register instead. // -// Experimental +// # Experimental // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/resolver/resolver.go b/resolver/resolver.go index ca2e35a3596f..967cbc7373ab 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -96,7 +96,7 @@ const ( // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -236,12 +236,12 @@ type ClientConn interface { // // Examples: // -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { // Deprecated: use URL.Scheme instead. Scheme string diff --git a/rpc_util.go b/rpc_util.go index 5d407b004b0e..934fc1aa015e 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -328,7 +328,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -351,7 +351,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -369,7 +369,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -379,7 +379,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -416,7 +416,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -444,7 +444,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -455,7 +455,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -480,7 +480,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -497,7 +497,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -508,7 +508,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -548,10 +548,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. diff --git a/security/advancedtls/advancedtls.go b/security/advancedtls/advancedtls.go index 1892c5ed7661..9a33bd583f6e 100644 --- a/security/advancedtls/advancedtls.go +++ b/security/advancedtls/advancedtls.go @@ -449,9 +449,9 @@ func (c *advancedTLSCreds) OverrideServerName(serverNameOverride string) error { // and possibly custom verification check. // We have to build our own verification function here because current // tls module: -// 1. does not have a good support on root cert reloading. -// 2. will ignore basic certificate check when setting InsecureSkipVerify -// to true. +// 1. does not have a good support on root cert reloading. +// 2. will ignore basic certificate check when setting InsecureSkipVerify +// to true. func buildVerifyFunc(c *advancedTLSCreds, serverName string, rawConn net.Conn) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { diff --git a/security/advancedtls/crl.go b/security/advancedtls/crl.go index b54c1c571e6a..4812435881ac 100644 --- a/security/advancedtls/crl.go +++ b/security/advancedtls/crl.go @@ -150,12 +150,12 @@ func x509NameHash(r pkix.RDNSequence) string { // CheckRevocation checks the connection for revoked certificates based on RFC5280. // This implementation has the following major limitations: -// * Indirect CRL files are not supported. -// * CRL loading is only supported from directories in the X509_LOOKUP_hash_dir format. -// * OnlySomeReasons is not supported. -// * Delta CRL files are not supported. -// * Certificate CRLDistributionPoint must be URLs, but are then ignored and converted into a file path. -// * CRL checks are done after path building, which goes against RFC4158. +// - Indirect CRL files are not supported. +// - CRL loading is only supported from directories in the X509_LOOKUP_hash_dir format. +// - OnlySomeReasons is not supported. +// - Delta CRL files are not supported. +// - Certificate CRLDistributionPoint must be URLs, but are then ignored and converted into a file path. +// - CRL checks are done after path building, which goes against RFC4158. func CheckRevocation(conn tls.ConnectionState, cfg RevocationConfig) error { return CheckChainRevocation(conn.VerifiedChains, cfg) } @@ -359,8 +359,8 @@ type authKeyID struct { // indirectCRL [4] BOOLEAN DEFAULT FALSE, // onlyContainsAttributeCerts [5] BOOLEAN DEFAULT FALSE } -// -- at most one of onlyContainsUserCerts, onlyContainsCACerts, -// -- and onlyContainsAttributeCerts may be set to TRUE. +// -- at most one of onlyContainsUserCerts, onlyContainsCACerts, +// -- and onlyContainsAttributeCerts may be set to TRUE. type issuingDistributionPoint struct { DistributionPoint asn1.RawValue `asn1:"optional,tag:0"` OnlyContainsUserCerts bool `asn1:"optional,tag:1"` diff --git a/security/authorization/engine/engine.go b/security/authorization/engine/engine.go index 596382e49b8d..970c560af722 100644 --- a/security/authorization/engine/engine.go +++ b/security/authorization/engine/engine.go @@ -253,13 +253,16 @@ func getDecision(engine *policyEngine, match bool) Decision { return DecisionDeny } -// Returns the authorization decision of a single policy engine based on activation. -// If any policy matches, the decision matches the engine's action, and the first -// matching policy name will be returned. -// Else if any policy is missing attributes, the decision is unknown, and the list of -// policy names that can't be evaluated due to missing attributes will be returned. -// Else, the decision is the opposite of the engine's action, i.e. an ALLOW engine -// will return DecisionDeny, and vice versa. +// Returns the authorization decision of a single policy engine based on +// activation. If any policy matches, the decision matches the engine's +// action, and the first matching policy name will be returned. +// +// Else if any policy is missing attributes, the decision is unknown, and the +// list of policy names that can't be evaluated due to missing attributes will +// be returned. +// +// Else, the decision is the opposite of the engine's action, i.e. an ALLOW +// engine will return DecisionDeny, and vice versa. func (engine *policyEngine) evaluate(activation interpreter.Activation) (Decision, []string) { unknownPolicyNames := []string{} for policyName, program := range engine.programs { diff --git a/serviceconfig/serviceconfig.go b/serviceconfig/serviceconfig.go index 73a2f926613e..35e7a20a04ba 100644 --- a/serviceconfig/serviceconfig.go +++ b/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/status/status.go b/status/status.go index 6d163b6e3842..623be39f26ba 100644 --- a/status/status.go +++ b/status/status.go @@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message. // -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/stress/grpc_testing/metrics.pb.go b/stress/grpc_testing/metrics.pb.go index f8e95188fec0..f9d359bf1041 100644 --- a/stress/grpc_testing/metrics.pb.go +++ b/stress/grpc_testing/metrics.pb.go @@ -54,6 +54,7 @@ type GaugeResponse struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Types that are assignable to Value: + // // *GaugeResponse_LongValue // *GaugeResponse_DoubleValue // *GaugeResponse_StringValue diff --git a/tap/tap.go b/tap/tap.go index dbf34e6bb5f5..bfa5dfa40e4d 100644 --- a/tap/tap.go +++ b/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/test/xds/xds_server_integration_test.go b/test/xds/xds_server_integration_test.go index 3da983e1e89c..ac041ecd6849 100644 --- a/test/xds/xds_server_integration_test.go +++ b/test/xds/xds_server_integration_test.go @@ -51,9 +51,9 @@ func (*testService) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.S } // setupGRPCServer performs the following: -// - spin up an xDS-enabled gRPC server, configure it with xdsCredentials and -// register the test service on it -// - create a local TCP listener and start serving on it +// - spin up an xDS-enabled gRPC server, configure it with xdsCredentials and +// register the test service on it +// - create a local TCP listener and start serving on it // // Returns the following: // - local listener on which the xDS-enabled gRPC server is serving on @@ -118,12 +118,12 @@ func hostPortFromListener(lis net.Listener) (string, uint32, error) { // fallback functionality. // // The following sequence of events happen as part of this test: -// - An xDS-enabled gRPC server is created and xDS credentials are configured. -// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS -// credentials are configured. -// - Control plane is configured to not send any security configuration to both -// the client and the server. This results in both of them using the -// configured fallback credentials (which is insecure creds in this case). +// - An xDS-enabled gRPC server is created and xDS credentials are configured. +// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS +// credentials are configured. +// - Control plane is configured to not send any security configuration to both +// the client and the server. This results in both of them using the +// configured fallback credentials (which is insecure creds in this case). func (s) TestServerSideXDS_Fallback(t *testing.T) { managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) defer cleanup1() @@ -185,12 +185,12 @@ func (s) TestServerSideXDS_Fallback(t *testing.T) { // credentials with file watcher certificate provider. // // The following sequence of events happen as part of this test: -// - An xDS-enabled gRPC server is created and xDS credentials are configured. -// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS -// credentials are configured. -// - Control plane is configured to send security configuration to both the -// client and the server, pointing to the file watcher certificate provider. -// We verify both TLS and mTLS scenarios. +// - An xDS-enabled gRPC server is created and xDS credentials are configured. +// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS +// credentials are configured. +// - Control plane is configured to send security configuration to both the +// client and the server, pointing to the file watcher certificate provider. +// We verify both TLS and mTLS scenarios. func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { tests := []struct { name string diff --git a/xds/bootstrap/bootstrap.go b/xds/bootstrap/bootstrap.go index db6c7d6754ac..fcb99bdfd967 100644 --- a/xds/bootstrap/bootstrap.go +++ b/xds/bootstrap/bootstrap.go @@ -19,7 +19,7 @@ // Package bootstrap provides the functionality to register possible options // for aspects of the xDS client through the bootstrap file. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed // in a later release. diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index a29658ec3141..b76a40355cc8 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -66,41 +66,41 @@ type priorityConfig struct { // If xds lb policy is ROUND_ROBIN, the children will be weighted_target for // locality picking, and round_robin for endpoint picking. // -// ┌────────┐ -// │priority│ -// └┬──────┬┘ -// │ │ -// ┌───────────▼┐ ┌▼───────────┐ -// │cluster_impl│ │cluster_impl│ -// └─┬──────────┘ └──────────┬─┘ -// │ │ -// ┌──────────────▼─┐ ┌─▼──────────────┐ -// │locality_picking│ │locality_picking│ -// └┬──────────────┬┘ └┬──────────────┬┘ -// │ │ │ │ -// ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ -// │LRS│ │LRS│ │LRS│ │LRS│ -// └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘ -// │ │ │ │ -// ┌──────────▼─────┐ ┌─────▼──────────┐ ┌──────────▼─────┐ ┌─────▼──────────┐ -// │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ -// └────────────────┘ └────────────────┘ └────────────────┘ └────────────────┘ +// ┌────────┐ +// │priority│ +// └┬──────┬┘ +// │ │ +// ┌───────────▼┐ ┌▼───────────┐ +// │cluster_impl│ │cluster_impl│ +// └─┬──────────┘ └──────────┬─┘ +// │ │ +// ┌──────────────▼─┐ ┌─▼──────────────┐ +// │locality_picking│ │locality_picking│ +// └┬──────────────┬┘ └┬──────────────┬┘ +// │ │ │ │ +// ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ +// │LRS│ │LRS│ │LRS│ │LRS│ +// └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘ +// │ │ │ │ +// ┌──────────▼─────┐ ┌─────▼──────────┐ ┌──────────▼─────┐ ┌─────▼──────────┐ +// │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ +// └────────────────┘ └────────────────┘ └────────────────┘ └────────────────┘ // // If xds lb policy is RING_HASH, the children will be just a ring_hash policy. // The endpoints from all localities will be flattened to one addresses list, // and the ring_hash policy will pick endpoints from it. // -// ┌────────┐ -// │priority│ -// └┬──────┬┘ -// │ │ -// ┌──────────▼─┐ ┌─▼──────────┐ -// │cluster_impl│ │cluster_impl│ -// └──────┬─────┘ └─────┬──────┘ -// │ │ -// ┌──────▼─────┐ ┌─────▼──────┐ -// │ ring_hash │ │ ring_hash │ -// └────────────┘ └────────────┘ +// ┌────────┐ +// │priority│ +// └┬──────┬┘ +// │ │ +// ┌──────────▼─┐ ┌─▼──────────┐ +// │cluster_impl│ │cluster_impl│ +// └──────┬─────┘ └─────┬──────┘ +// │ │ +// ┌──────▼─────┐ ┌─────▼──────┐ +// │ ring_hash │ │ ring_hash │ +// └────────────┘ └────────────┘ // // If endpointPickingPolicy is nil, roundrobin will be used. // diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index 6742675ed6ce..f9fe74dd5401 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -168,11 +168,11 @@ func clientEndpointsResource(nodeID, edsServiceName string, localities []localit // TestEDS_OneLocality tests the cluster_resolver LB policy using an EDS // resource with one locality. The following scenarios are tested: -// 1. Single backend. Test verifies that RPCs reach this backend. -// 2. Add a backend. Test verifies that RPCs are roundrobined across the two -// backends. -// 3. Remove one backend. Test verifies that all RPCs reach the other backend. -// 4. Replace the backend. Test verifies that all RPCs reach the new backend. +// 1. Single backend. Test verifies that RPCs reach this backend. +// 2. Add a backend. Test verifies that RPCs are roundrobined across the two +// backends. +// 3. Remove one backend. Test verifies that all RPCs reach the other backend. +// 4. Replace the backend. Test verifies that all RPCs reach the new backend. func (s) TestEDS_OneLocality(t *testing.T) { // Spin up a management server to receive xDS resources from. managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil) @@ -262,16 +262,16 @@ func (s) TestEDS_OneLocality(t *testing.T) { // TestEDS_MultipleLocalities tests the cluster_resolver LB policy using an EDS // resource with multiple localities. The following scenarios are tested: -// 1. Two localities, each with a single backend. Test verifies that RPCs are -// weighted roundrobined across these two backends. -// 2. Add another locality, with a single backend. Test verifies that RPCs are -// weighted roundrobined across all the backends. -// 3. Remove one locality. Test verifies that RPCs are weighted roundrobined -// across backends from the remaining localities. -// 4. Add a backend to one locality. Test verifies that RPCs are weighted -// roundrobined across localities. -// 5. Change the weight of one of the localities. Test verifies that RPCs are -// weighted roundrobined across the localities. +// 1. Two localities, each with a single backend. Test verifies that RPCs are +// weighted roundrobined across these two backends. +// 2. Add another locality, with a single backend. Test verifies that RPCs are +// weighted roundrobined across all the backends. +// 3. Remove one locality. Test verifies that RPCs are weighted roundrobined +// across backends from the remaining localities. +// 4. Add a backend to one locality. Test verifies that RPCs are weighted +// roundrobined across localities. +// 5. Change the weight of one of the localities. Test verifies that RPCs are +// weighted roundrobined across the localities. // // In our LB policy tree, one of the descendents of the "cluster_resolver" LB // policy is the "weighted_target" LB policy which performs weighted roundrobin diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index ca6f118c5cc0..916f94ec0c02 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -41,30 +41,44 @@ var ( // from a priority to another). // // It's guaranteed that after this function returns: -// - If some child is READY, it is childInUse, and all lower priorities are -// closed. -// - If some child is newly started(in Connecting for the first time), it is -// childInUse, and all lower priorities are closed. -// - Otherwise, the lowest priority is childInUse (none of the children is -// ready, and the overall state is not ready). +// +// If some child is READY, it is childInUse, and all lower priorities are +// closed. +// +// If some child is newly started(in Connecting for the first time), it is +// childInUse, and all lower priorities are closed. +// +// Otherwise, the lowest priority is childInUse (none of the children is +// ready, and the overall state is not ready). // // Steps: -// - If all priorities were deleted, unset childInUse (to an empty string), and -// set parent ClientConn to TransientFailure -// - Otherwise, Scan all children from p0, and check balancer stats: -// - For any of the following cases: -// - If balancer is not started (not built), this is either a new child with -// high priority, or a new builder for an existing child. -// - If balancer is Connecting and has non-nil initTimer (meaning it -// transitioned from Ready or Idle to connecting, not from TF, so we -// should give it init-time to connect). -// - If balancer is READY or IDLE -// - If this is the lowest priority -// - do the following: -// - if this is not the old childInUse, override picker so old picker is no -// longer used. -// - switch to it (because all higher priorities are neither new or Ready) -// - forward the new addresses and config +// +// If all priorities were deleted, unset childInUse (to an empty string), and +// set parent ClientConn to TransientFailure +// +// Otherwise, Scan all children from p0, and check balancer stats: +// +// For any of the following cases: +// +// If balancer is not started (not built), this is either a new child with +// high priority, or a new builder for an existing child. +// +// If balancer is Connecting and has non-nil initTimer (meaning it +// transitioned from Ready or Idle to connecting, not from TF, so we +// should give it init-time to connect). +// +// If balancer is READY or IDLE +// +// If this is the lowest priority +// +// do the following: +// +// if this is not the old childInUse, override picker so old picker is no +// longer used. +// +// switch to it (because all higher priorities are neither new or Ready) +// +// forward the new addresses and config // // Caller must hold b.mu. func (b *priorityBalancer) syncPriority(childUpdating string) { diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index eaa4d2638dd9..6f91ff303317 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -324,13 +324,14 @@ func (b *ringhashBalancer) ResolverError(err error) { // UpdateSubConnState updates the per-SubConn state stored in the ring, and also // the aggregated state. // -// It triggers an update to cc when: -// - the new state is TransientFailure, to update the error message -// - it's possible that this is a noop, but sending an extra update is easier -// than comparing errors -// - the aggregated state is changed -// - the same picker will be sent again, but this update may trigger a re-pick -// for some RPCs. +// It triggers an update to cc when: +// - the new state is TransientFailure, to update the error message +// - it's possible that this is a noop, but sending an extra update is easier +// than comparing errors +// +// - the aggregated state is changed +// - the same picker will be sent again, but this update may trigger a re-pick +// for some RPCs. func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index a17c6816f711..3df3281c7fa3 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -92,11 +92,11 @@ func (*testService) FullDuplexCall(stream testpb.TestService_FullDuplexCallServe // - create a local TCP listener and start serving on it // // Returns the following: -// - the management server: tests use this to configure resources -// - nodeID expected by the management server: this is set in the Node proto -// sent by the xdsClient for queries. -// - the port the server is listening on -// - cleanup function to be invoked by the tests when done +// - the management server: tests use this to configure resources +// - nodeID expected by the management server: this is set in the Node proto +// sent by the xdsClient for queries. +// - the port the server is listening on +// - cleanup function to be invoked by the tests when done func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { // Spin up a xDS management server on a local port. nodeID := uuid.New().String() diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index f1ee06e7b553..ec6da32fad18 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -32,13 +32,13 @@ import ( // connWrapper is a thin wrapper around a net.Conn returned by Accept(). It // provides the following additional functionality: -// 1. A way to retrieve the configured deadline. This is required by the -// ServerHandshake() method of the xdsCredentials when it attempts to read -// key material from the certificate providers. -// 2. Implements the XDSHandshakeInfo() method used by the xdsCredentials to -// retrieve the configured certificate providers. -// 3. xDS filter_chain matching logic to select appropriate security -// configuration for the incoming connection. +// 1. A way to retrieve the configured deadline. This is required by the +// ServerHandshake() method of the xdsCredentials when it attempts to read +// key material from the certificate providers. +// 2. Implements the XDSHandshakeInfo() method used by the xdsCredentials to +// retrieve the configured certificate providers. +// 3. xDS filter_chain matching logic to select appropriate security +// configuration for the incoming connection. type connWrapper struct { net.Conn diff --git a/xds/internal/test/e2e/e2e_test.go b/xds/internal/test/e2e/e2e_test.go index 309c58010cf3..be8af2b0a26a 100644 --- a/xds/internal/test/e2e/e2e_test.go +++ b/xds/internal/test/e2e/e2e_test.go @@ -123,6 +123,7 @@ func TestPingPong(t *testing.T) { // - verify that // - all RPCs with the same metadata value are sent to the same backend // - only one backend is Ready +// // - send more RPCs with different metadata values until a new backend is picked, and verify that // - only two backends are in Ready func TestAffinity(t *testing.T) { diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index f55d076f8488..5aa70a525038 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -163,9 +163,9 @@ func (s) TestAuthorityNoneDefaultAuthority(t *testing.T) { } // TestAuthorityShare covers that -// - watch with the same authority name doesn't create new authority -// - watch with different authority name but same authority config doesn't -// create new authority +// - watch with the same authority name doesn't create new authority +// - watch with different authority name but same authority config doesn't +// create new authority func (s) TestAuthorityShare(t *testing.T) { overrideFedEnvVar(t) ctrlCh := overrideNewController(t) @@ -210,9 +210,9 @@ func (s) TestAuthorityShare(t *testing.T) { } // TestAuthorityIdle covers that -// - authorities are put in a timeout cache when the last watch is canceled -// - idle authorities are not immediately closed. They will be closed after a -// timeout. +// - authorities are put in a timeout cache when the last watch is canceled +// - idle authorities are not immediately closed. They will be closed after a +// timeout. func (s) TestAuthorityIdleTimeout(t *testing.T) { overrideFedEnvVar(t) ctrlCh := overrideNewController(t) diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go index 5f422c84fd62..792c17d7e1fa 100644 --- a/xds/internal/xdsclient/client_new.go +++ b/xds/internal/xdsclient/client_new.go @@ -72,7 +72,7 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i // NewWithConfigForTesting returns an xDS client for the specified bootstrap // config, separate from the global singleton. // -// Testing Only +// # Testing Only // // This function should ONLY be used for testing purposes. func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout, authorityIdleTimeout time.Duration) (XDSClient, error) { @@ -86,8 +86,7 @@ func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout, autho // NewWithBootstrapContentsForTesting returns an xDS client for this config, // separate from the global singleton. // -// -// Testing Only +// # Testing Only // // This function should ONLY be used for testing purposes. func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, error) { diff --git a/xds/internal/xdsclient/controller/version/v2/client.go b/xds/internal/xdsclient/controller/version/v2/client.go index ae3ae559e5dd..968947b0669e 100644 --- a/xds/internal/xdsclient/controller/version/v2/client.go +++ b/xds/internal/xdsclient/controller/version/v2/client.go @@ -79,10 +79,10 @@ func (v2c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.Cli // rType, on the provided stream. // // version is the ack version to be sent with the request -// - If this is the new request (not an ack/nack), version will be empty. -// - If this is an ack, version will be the version from the response. -// - If this is a nack, version will be the previous acked version (from -// versionMap). If there was no ack before, it will be empty. +// - If this is the new request (not an ack/nack), version will be empty. +// - If this is an ack, version will be the version from the response. +// - If this is a nack, version will be the previous acked version (from +// versionMap). If there was no ack before, it will be empty. func (v2c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error { stream, ok := s.(adsStream) if !ok { diff --git a/xds/internal/xdsclient/controller/version/v3/client.go b/xds/internal/xdsclient/controller/version/v3/client.go index 1c7f11ad2527..4cacd94dd19b 100644 --- a/xds/internal/xdsclient/controller/version/v3/client.go +++ b/xds/internal/xdsclient/controller/version/v3/client.go @@ -81,10 +81,10 @@ func (v3c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.Cli // rType, on the provided stream. // // version is the ack version to be sent with the request -// - If this is the new request (not an ack/nack), version will be empty. -// - If this is an ack, version will be the version from the response. -// - If this is a nack, version will be the previous acked version (from -// versionMap). If there was no ack before, it will be empty. +// - If this is the new request (not an ack/nack), version will be empty. +// - If this is an ack, version will be the version from the response. +// - If this is a nack, version will be the previous acked version (from +// versionMap). If there was no ack before, it will be empty. func (v3c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error { stream, ok := s.(adsStream) if !ok { diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go index 1994cb1e3ba6..d104090925a4 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -144,13 +144,13 @@ func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, want // TestLDSWatch covers the case where a single watcher exists for a single // listener resource. The test verifies the following scenarios: -// 1. An update from the management server containing the resource being -// watched should result in the invocation of the watch callback. -// 2. An update from the management server containing a resource *not* being -// watched should not result in the invocation of the watch callback. -// 3. After the watch is cancelled, an update from the management server -// containing the resource that was being watched should not result in the -// invocation of the watch callback. +// 1. An update from the management server containing the resource being +// watched should result in the invocation of the watch callback. +// 2. An update from the management server containing a resource *not* being +// watched should not result in the invocation of the watch callback. +// 3. After the watch is cancelled, an update from the management server +// containing the resource that was being watched should not result in the +// invocation of the watch callback. // // The test is run for old and new style names. func (s) TestLDSWatch(t *testing.T) { @@ -263,14 +263,14 @@ func (s) TestLDSWatch(t *testing.T) { // TestLDSWatch_TwoWatchesForSameResourceName covers the case where two watchers // exist for a single listener resource. The test verifies the following // scenarios: -// 1. An update from the management server containing the resource being -// watched should result in the invocation of both watch callbacks. -// 2. After one of the watches is cancelled, a redundant update from the -// management server should not result in the invocation of either of the -// watch callbacks. -// 3. An update from the management server containing the resource being -// watched should result in the invocation of the un-cancelled watch -// callback. +// 1. An update from the management server containing the resource being +// watched should result in the invocation of both watch callbacks. +// 2. After one of the watches is cancelled, a redundant update from the +// management server should not result in the invocation of either of the +// watch callbacks. +// 3. An update from the management server containing the resource being +// watched should result in the invocation of the un-cancelled watch +// callback. // // The test is run for old and new style names. func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { @@ -680,12 +680,12 @@ func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // TestLDSWatch_ResourceRemoved covers the cases where a resource being watched // is removed from the management server. The test verifies the following // scenarios: -// 1. Removing a resource should trigger the watch callback with a resource -// removed error. It should not trigger the watch callback for an unrelated -// resource. -// 2. An update to another resource should result in the invocation of the watch -// callback associated with that resource. It should not result in the -// invocation of the watch callback associated with the deleted resource. +// 1. Removing a resource should trigger the watch callback with a resource +// removed error. It should not trigger the watch callback for an unrelated +// resource. +// 2. An update to another resource should result in the invocation of the watch +// callback associated with that resource. It should not result in the +// invocation of the watch callback associated with the deleted resource. // // The test is run with both old and new style names. func (s) TestLDSWatch_ResourceRemoved(t *testing.T) { diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go index 52c6d42d340b..955bbe099b1f 100644 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ b/xds/internal/xdsclient/watchers_cluster_test.go @@ -105,6 +105,7 @@ func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { // - an update is received after a watch() // - another update is received, with one resource removed // - this should trigger callback with resource removed error +// // - one more update without the removed resource // - the callback (above) shouldn't receive any update func (s) TestClusterResourceRemoved(t *testing.T) { diff --git a/xds/internal/xdsclient/watchers_federation_test.go b/xds/internal/xdsclient/watchers_federation_test.go index 527999ebc8ae..302623e19945 100644 --- a/xds/internal/xdsclient/watchers_federation_test.go +++ b/xds/internal/xdsclient/watchers_federation_test.go @@ -57,19 +57,19 @@ func testFedTwoWatchDifferentContextParameterOrder(t *testing.T, typ xdsresource } // TestLDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name -// - Two watches with the same query string, but in different order. The two -// watches should watch the same resource. -// - The response has the same query string, but in different order. The watch -// should still be notified. +// - Two watches with the same query string, but in different order. The two +// watches should watch the same resource. +// - The response has the same query string, but in different order. The watch +// should still be notified. func (s) TestLDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) { testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}) } // TestRDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name -// - Two watches with the same query string, but in different order. The two -// watches should watch the same resource. -// - The response has the same query string, but in different order. The watch -// should still be notified. +// - Two watches with the same query string, but in different order. The two +// watches should watch the same resource. +// - The response has the same query string, but in different order. The watch +// should still be notified. func (s) TestRDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) { testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ VirtualHosts: []*xdsresource.VirtualHost{ @@ -82,19 +82,19 @@ func (s) TestRDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) { } // TestClusterFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name -// - Two watches with the same query string, but in different order. The two -// watches should watch the same resource. -// - The response has the same query string, but in different order. The watch -// should still be notified. +// - Two watches with the same query string, but in different order. The two +// watches should watch the same resource. +// - The response has the same query string, but in different order. The watch +// should still be notified. func (s) TestClusterFedTwoWatchDifferentContextParameterOrder(t *testing.T) { testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}) } // TestEndpointsFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name -// - Two watches with the same query string, but in different order. The two -// watches should watch the same resource. -// - The response has the same query string, but in different order. The watch -// should still be notified. +// - Two watches with the same query string, but in different order. The two +// watches should watch the same resource. +// - The response has the same query string, but in different order. The watch +// should still be notified. func (s) TestEndpointsFedTwoWatchDifferentContextParameterOrder(t *testing.T) { testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}) } diff --git a/xds/internal/xdsclient/watchers_test.go b/xds/internal/xdsclient/watchers_test.go index 2405bd684a05..5e2cdb123eff 100644 --- a/xds/internal/xdsclient/watchers_test.go +++ b/xds/internal/xdsclient/watchers_test.go @@ -428,6 +428,7 @@ func testWatchAfterCache(t *testing.T, typ xdsresource.ResourceType, update inte // - an update is received after a watch() // - another update is received, with one resource removed // - this should trigger callback with resource removed error +// // - one more update without the removed resource // - the callback (above) shouldn't receive any update func testResourceRemoved(t *testing.T, typ xdsresource.ResourceType, update1 interface{}, resourceName1 string, update2 interface{}, resourceName2 string) { diff --git a/xds/internal/xdsclient/xdsresource/filter_chain.go b/xds/internal/xdsclient/xdsresource/filter_chain.go index 78b2a56e8939..20cd40879555 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -158,11 +158,11 @@ const ( // filter chains in a single Listener resource. It also contains the default // filter chain specified in the Listener resource. It provides two important // pieces of functionality: -// 1. Validate the filter chains in an incoming Listener resource to make sure -// that there aren't filter chains which contain the same match criteria. -// 2. As part of performing the above validation, it builds an internal data -// structure which will if used to look up the matching filter chain at -// connection time. +// 1. Validate the filter chains in an incoming Listener resource to make sure +// that there aren't filter chains which contain the same match criteria. +// 2. As part of performing the above validation, it builds an internal data +// structure which will if used to look up the matching filter chain at +// connection time. // // The logic specified in the documentation around the xDS FilterChainMatch // proto mentions 8 criteria to match on. diff --git a/xds/internal/xdsclient/xdsresource/matcher.go b/xds/internal/xdsclient/xdsresource/matcher.go index d7da32a750e0..6a056235f3bd 100644 --- a/xds/internal/xdsclient/xdsresource/matcher.go +++ b/xds/internal/xdsclient/xdsresource/matcher.go @@ -208,18 +208,21 @@ func match(domain, host string) (domainMatchType, bool) { // FindBestMatchingVirtualHost returns the virtual host whose domains field best // matches host // -// The domains field support 4 different matching pattern types: -// - Exact match -// - Suffix match (e.g. “*ABC”) -// - Prefix match (e.g. “ABC*) -// - Universal match (e.g. “*”) +// The domains field support 4 different matching pattern types: // -// The best match is defined as: -// - A match is better if it’s matching pattern type is better -// - Exact match > suffix match > prefix match > universal match -// - If two matches are of the same pattern type, the longer match is better -// - This is to compare the length of the matching pattern, e.g. “*ABCDE” > -// “*ABC” +// - Exact match +// - Suffix match (e.g. “*ABC”) +// - Prefix match (e.g. “ABC*) +// - Universal match (e.g. “*”) +// +// The best match is defined as: +// - A match is better if it’s matching pattern type is better. +// * Exact match > suffix match > prefix match > universal match. +// +// - If two matches are of the same pattern type, the longer match is +// better. +// * This is to compare the length of the matching pattern, e.g. “*ABCDE” > +// “*ABC” func FindBestMatchingVirtualHost(host string, vHosts []*VirtualHost) *VirtualHost { // Maybe move this crap to client var ( matchVh *VirtualHost diff --git a/xds/server_options.go b/xds/server_options.go index 9bfa41c41a84..9b9700cf3b33 100644 --- a/xds/server_options.go +++ b/xds/server_options.go @@ -62,12 +62,12 @@ type ServingModeChangeArgs struct { // to inject a bootstrap configuration used by only this server, instead of the // global configuration from the environment variables. // -// Testing Only +// # Testing Only // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/xds/server_test.go b/xds/server_test.go index 4ad86879df07..78e4725304c0 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -375,12 +375,12 @@ func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, // TestServeSuccess tests the successful case of calling Serve(). // The following sequence of events happen: -// 1. Create a new GRPCServer and call Serve() in a goroutine. -// 2. Make sure an xdsClient is created, and an LDS watch is registered. -// 3. Push an error response from the xdsClient, and make sure that Serve() does -// not exit. -// 4. Push a good response from the xdsClient, and make sure that Serve() on the -// underlying grpc.Server is called. +// 1. Create a new GRPCServer and call Serve() in a goroutine. +// 2. Make sure an xdsClient is created, and an LDS watch is registered. +// 3. Push an error response from the xdsClient, and make sure that Serve() does +// not exit. +// 4. Push a good response from the xdsClient, and make sure that Serve() on the +// underlying grpc.Server is called. func (s) TestServeSuccess(t *testing.T) { fs, clientCh, cleanup := setupOverrides() defer cleanup() diff --git a/xds/xds.go b/xds/xds.go index 2fbce34663c0..7c479f5f8a86 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -81,12 +81,12 @@ func init() { // the supported environment variables. The resolver.Builder is meant to be // used in conjunction with the grpc.WithResolvers DialOption. // -// Testing Only +// # Testing Only // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. From 79ccdd8f8e4545e1bde9c55cbae30fedeb19ff27 Mon Sep 17 00:00:00 2001 From: Fu Wei Date: Wed, 19 Oct 2022 00:01:08 +0800 Subject: [PATCH 639/998] clientconn: go idle if conn closed after preface received (#5714) --- clientconn.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/clientconn.go b/clientconn.go index 9691444d0403..b75d6d72e0f5 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1325,9 +1325,15 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne return nil case <-connClosed.Done(): // The transport has already closed. If we received the preface, too, - // this is not an error. + // this is not an error and go idle. select { case <-prefaceReceived.Done(): + ac.mu.Lock() + defer ac.mu.Unlock() + + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } return nil default: return errors.New("connection closed before server preface received") From dbb8e2bf901b880186ca69c8157b43c17b5caf22 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 18 Oct 2022 12:20:31 -0700 Subject: [PATCH 640/998] xdsclient: improve CDS watchers test (#5693) --- .../xdsclient/e2e_test/cds_watchers_test.go | 868 ++++++++++++++++++ .../xdsclient/e2e_test/lds_watchers_test.go | 4 + .../xdsclient/watchers_cluster_test.go | 142 --- 3 files changed, 872 insertions(+), 142 deletions(-) create mode 100644 xds/internal/xdsclient/e2e_test/cds_watchers_test.go delete mode 100644 xds/internal/xdsclient/watchers_cluster_test.go diff --git a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go new file mode 100644 index 000000000000..b399687527a9 --- /dev/null +++ b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go @@ -0,0 +1,868 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +// badClusterResource returns a cluster resource for the given name which +// contains a config_source_specifier for the `lrs_server` field which is not +// set to `self`, and hence is expected to be NACKed by the client. +func badClusterResource(clusterName, edsServiceName string, secLevel e2e.SecurityLevel) *v3clusterpb.Cluster { + cluster := e2e.DefaultCluster(clusterName, edsServiceName, secLevel) + cluster.LrsServer = &v3corepb.ConfigSource{ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{}} + return cluster +} + +// xdsClient is expected to produce an error containing this string when an +// update is received containing a cluster created using `badClusterResource`. +const wantClusterNACKErr = "unsupported config_source_specifier" + +// verifyClusterUpdate waits for an update to be received on the provided update +// channel and verifies that it matches the expected update. +// +// Returns an error if no update is received before the context deadline expires +// or the received update does not match the expected one. +func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ClusterUpdateErrTuple) error { + u, err := updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("timeout when waiting for a cluster resource from the management server: %v", err) + } + got := u.(xdsresource.ClusterUpdateErrTuple) + if wantUpdate.Err != nil { + if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { + return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) + } + } + cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw")} + if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { + return fmt.Errorf("received unepected diff in the cluster resource update: (-want, got):\n%s", diff) + } + return nil +} + +// verifyNoClusterUpdate verifies that no cluster update is received on the +// provided update channel, and returns an error if an update is received. +// +// A very short deadline is used while waiting for the update, as this function +// is intended to be used when an update is not expected. +func verifyNoClusterUpdate(ctx context.Context, updateCh *testutils.Channel) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + return fmt.Errorf("received unexpected ClusterUpdate when expecting none: %v", u) + } + return nil +} + +// TestCDSWatch covers the case where a single watcher exists for a single +// cluster resource. The test verifies the following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of the watch callback. +// 2. An update from the management server containing a resource *not* being +// watched should not result in the invocation of the watch callback. +// 3. After the watch is cancelled, an update from the management server +// containing the resource that was being watched should not result in the +// invocation of the watch callback. +// +// The test is run for old and new style names. +func (s) TestCDSWatch(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3clusterpb.Cluster // The resource being watched. + updatedWatchedResource *v3clusterpb.Cluster // The watched resource after an update. + notWatchedResource *v3clusterpb.Cluster // A resource which is not being watched. + wantUpdate xdsresource.ClusterUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: cdsName, + watchedResource: e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone), + updatedWatchedResource: e2e.DefaultCluster(cdsName, "new-eds-resource", e2e.SecurityLevelNone), + notWatchedResource: e2e.DefaultCluster("unsubscribed-cds-resource", edsName, e2e.SecurityLevelNone), + wantUpdate: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + }, + }, + { + desc: "new style resource", + resourceName: cdsNameNewStyle, + watchedResource: e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone), + updatedWatchedResource: e2e.DefaultCluster(cdsNameNewStyle, "new-eds-resource", e2e.SecurityLevelNone), + notWatchedResource: e2e.DefaultCluster("unsubscribed-cds-resource", edsNameNewStyle, e2e.SecurityLevelNone), + wantUpdate: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsNameNewStyle, + EDSServiceName: edsNameNewStyle, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a cluster resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + cdsCancel := client.WatchCluster(test.resourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single cluster + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyClusterUpdate(ctx, updateCh, test.wantUpdate); err != nil { + t.Fatal(err) + } + + // Configure the management server to return an additional cluster + // resource, one that we are not interested in. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.watchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoClusterUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + + // Cancel the watch and update the resource corresponding to the original + // watch. Ensure that the cancelled watch callback is not invoked. + cdsCancel() + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.updatedWatchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoClusterUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestCDSWatch_TwoWatchesForSameResourceName covers the case where two watchers +// exist for a single cluster resource. The test verifies the following +// scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of both watch callbacks. +// 2. After one of the watches is cancelled, a redundant update from the +// management server should not result in the invocation of either of the +// watch callbacks. +// 3. A new update from the management server containing the resource being +// watched should result in the invocation of the un-cancelled watch +// callback. +// +// The test is run for old and new style names. +func (s) TestCDSWatch_TwoWatchesForSameResourceName(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3clusterpb.Cluster // The resource being watched. + updatedWatchedResource *v3clusterpb.Cluster // The watched resource after an update. + wantUpdateV1 xdsresource.ClusterUpdateErrTuple + wantUpdateV2 xdsresource.ClusterUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: cdsName, + watchedResource: e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone), + updatedWatchedResource: e2e.DefaultCluster(cdsName, "new-eds-resource", e2e.SecurityLevelNone), + wantUpdateV1: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + }, + wantUpdateV2: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: "new-eds-resource", + }, + }, + }, + { + desc: "new style resource", + resourceName: cdsNameNewStyle, + watchedResource: e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone), + updatedWatchedResource: e2e.DefaultCluster(cdsNameNewStyle, "new-eds-resource", e2e.SecurityLevelNone), + wantUpdateV1: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsNameNewStyle, + EDSServiceName: edsNameNewStyle, + }, + }, + wantUpdateV2: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsNameNewStyle, + EDSServiceName: "new-eds-resource", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for the same cluster resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(test.resourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(test.resourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single cluster + // resource, corresponding to the one we registered watches for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyClusterUpdate(ctx, updateCh1, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh2, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + + // Cancel the second watch and force the management server to push a + // redundant update for the resource being watched. Neither of the + // two watch callbacks should be invoked. + cdsCancel2() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoClusterUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + if err := verifyNoClusterUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update to the resource being watched. The un-cancelled callback + // should be invoked while the cancelled one should not be. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.updatedWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyClusterUpdate(ctx, updateCh1, test.wantUpdateV2); err != nil { + t.Fatal(err) + } + if err := verifyNoClusterUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestCDSWatch_ThreeWatchesForDifferentResourceNames covers the case where +// three watchers (two watchers for one resource, and the third watcher for +// another resource) exist across two cluster resources (one with an old style +// name and one with a new style name). The test verifies that an update from +// the management server containing both resources results in the invocation of +// all watch callbacks. +func (s) TestCDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for the same cluster resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Register the third watch for a different cluster resource, and push the + // received updates onto a channel. + updateCh3 := testutils.NewChannel() + cdsCancel3 := client.WatchCluster(cdsNameNewStyle, func(u xdsresource.ClusterUpdate, err error) { + updateCh3.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel3() + + // Configure the management server to return two cluster resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the all watchers. + wantUpdate12 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + } + wantUpdate3 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsNameNewStyle, + EDSServiceName: edsNameNewStyle, + }, + } + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate12); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate12); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh3, wantUpdate3); err != nil { + t.Fatal(err) + } +} + +// TestCDSWatch_ResourceCaching covers the case where a watch is registered for +// a resource which is already present in the cache. The test verifies that the +// watch callback is invoked with the contents from the cache, instead of a +// request being sent to the management server. +func (s) TestCDSWatch_ResourceCaching(t *testing.T) { + overrideFedEnvVar(t) + firstRequestReceived := false + firstAckReceived := grpcsync.NewEvent() + secondRequestReceived := grpcsync.NewEvent() + + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // The first request has an empty version string. + if !firstRequestReceived && req.GetVersionInfo() == "" { + firstRequestReceived = true + return nil + } + // The first ack has a non-empty version string. + if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" { + firstAckReceived.Fire() + return nil + } + // Any requests after the first request and ack, are not expected. + secondRequestReceived.Fire() + return nil + }, + }) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a cluster resource and have the watch + // callback push the received update on to a channel. + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + + // Configure the management server to return a single cluster + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + } + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for receipt of ACK at the management server") + case <-firstAckReceived.Done(): + } + + // Register another watch for the same resource. This should get the update + // from the cache. + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + // No request should get sent out as part of this watch. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-secondRequestReceived.Done(): + t.Fatal("xdsClient sent out request instead of using update from cache") + } +} + +// TestCDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client +// does not receive an CDS response for the request that it sends. The test +// verifies that the watch callback is invoked with an error once the +// watchExpiryTimer fires. +func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { + // No need to spin up a management server since we don't want the client to + // receive a response for the watch being registered by the test. + + // Create an xDS client talking to a non-existent management server. + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: "dummy management server address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + + // Register a watch for a resource which is expected to be invoked with an + // error after the watch expiry timer fires. + updateCh := testutils.NewChannel() + cdsCancel := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel() + + // Wait for the watch expiry timer to fire. + <-time.After(defaultTestWatchExpiryTimeout) + + // Verify that an empty update with the expected error is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantErr := fmt.Errorf("watch for resource %q of type Cluster timed out", cdsName) + if err := verifyClusterUpdate(ctx, updateCh, xdsresource.ClusterUpdateErrTuple{Err: wantErr}); err != nil { + t.Fatal(err) + } +} + +// TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the +// client receives a valid LDS response for the request that it sends. The test +// verifies that the behavior associated with the expiry timer (i.e, callback +// invocation with error) does not take place. +func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(nil) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + + // Register a watch for a cluster resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + cdsCancel := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel() + + // Configure the management server to return a single cluster resource, + // corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + } + if err := verifyClusterUpdate(ctx, updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Wait for the watch expiry timer to fire, and verify that the callback is + // not invoked. + <-time.After(defaultTestWatchExpiryTimeout) + if err := verifyNoClusterUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } +} + +// TestCDSWatch_ResourceRemoved covers the cases where two watchers exists for +// two different resources (one with an old style name and one with a new style +// name). One of these resources being watched is removed from the management +// server. The test verifies the following scenarios: +// 1. Removing a resource should trigger the watch callback associated with that +// resource with a resource removed error. It should not trigger the watch +// callback for an unrelated resource. +// 2. An update to other resource should result in the invocation of the watch +// callback associated with that resource. It should not result in the +// invocation of the watch callback associated with the deleted resource. +func (s) TesCDSWatch_ResourceRemoved(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for two cluster resources and have the + // callbacks push the received updates on to a channel. + resourceName1 := cdsName + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(resourceName1, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + resourceName2 := cdsNameNewStyle + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(resourceName2, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Configure the management server to return two cluster resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for both watchers. + wantUpdate1 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName1, + EDSServiceName: edsName, + }, + } + wantUpdate2 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName2, + EDSServiceName: edsNameNewStyle, + }, + } + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate1); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate2); err != nil { + t.Fatal(err) + } + + // Remove the first cluster resource on the management server. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // The first watcher should receive a resource removed error, while the + // second watcher should not receive an update. + if err := verifyClusterUpdate(ctx, updateCh1, xdsresource.ClusterUpdateErrTuple{Err: xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "")}); err != nil { + t.Fatal(err) + } + if err := verifyNoClusterUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update the second cluster resource on the management server. The first + // watcher should not receive an update, while the second watcher should. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName2, "new-eds-resource", e2e.SecurityLevelNone)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoClusterUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName2, + EDSServiceName: "new-eds-resource", + }, + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestCDSWatch_NACKError covers the case where an update from the management +// server is NACK'ed by the xdsclient. The test verifies that the error is +// propagated to the watcher. +func (s) TestCDSWatch_NACKError(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a cluster resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + cdsCancel := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel() + + // Configure the management server to return a single cluster resource + // which is expected to be NACK'ed by the client. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{badClusterResource(cdsName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher. + u, err := updateCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a cluster resource from the management server: %v", err) + } + gotErr := u.(xdsresource.ClusterUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantClusterNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantClusterNACKErr) + } +} + +// TestCDSWatch_PartialValid covers the case where a response from the +// management server contains both valid and invalid resources and is expected +// to be NACK'ed by the xdsclient. The test verifies that watchers corresponding +// to the valid resource receive the update, while watchers corresponding to the +// invalid resource receive an error. +func (s) TestCDSWatch_PartialValid(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for cluster resources. The first watch is expected + // to receive an error because the received resource is NACK'ed. The second + // watch is expected to get a good update. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + badResourceName := cdsName + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(badResourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.SendContext(ctx, xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + goodResourceName := cdsNameNewStyle + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(goodResourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.SendContext(ctx, xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Configure the management server with two cluster resources. One of these + // is a bad resource causing the update to be NACKed. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + badClusterResource(badResourceName, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(goodResourceName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher which is + // watching the bad resource. + u, err := updateCh1.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a cluster resource from the management server: %v", err) + } + gotErr := u.(xdsresource.ClusterUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantClusterNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantClusterNACKErr) + } + + // Verify that the watcher watching the good resource receives a good + // update. + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: goodResourceName, + EDSServiceName: edsName, + }, + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go index d104090925a4..59589f6393da 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -73,8 +73,12 @@ const ( ldsName = "xdsclient-test-lds-resource" rdsName = "xdsclient-test-rds-resource" + cdsName = "xdsclient-test-cds-resource" + edsName = "xdsclient-test-eds-resource" ldsNameNewStyle = "xdstp:///envoy.config.listener.v3.Listener/xdsclient-test-lds-resource" rdsNameNewStyle = "xdstp:///envoy.config.route.v3.RouteConfiguration/xdsclient-test-rds-resource" + cdsNameNewStyle = "xdstp:///envoy.config.cluster.v3.Cluster/xdsclient-test-cds-resource" + edsNameNewStyle = "xdstp:///envoy.config.endpoint.v3.ClusterLoadAssignment/xdsclient-test-eds-resource" ) // badListenerResource returns a listener resource for the given name which does diff --git a/xds/internal/xdsclient/watchers_cluster_test.go b/xds/internal/xdsclient/watchers_cluster_test.go deleted file mode 100644 index 955bbe099b1f..000000000000 --- a/xds/internal/xdsclient/watchers_cluster_test.go +++ /dev/null @@ -1,142 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// TestClusterWatch covers the cases: -// - an update is received after a watch() -// - an update for another resource name -// - an update is received after cancel() -func (s) TestClusterWatch(t *testing.T) { - testWatch(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName) -} - -// TestClusterTwoWatchSameResourceName covers the case where an update is received -// after two watch() for the same resource name. -func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { - testTwoWatchSameResourceName(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName) -} - -// TestClusterThreeWatchDifferentResourceName covers the case where an update is -// received after three watch() for different resource names. -func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { - testThreeWatchDifferentResourceName(t, xdsresource.ClusterResource, - xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}, testCDSName+"1", - xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}, testCDSName+"2", - ) -} - -// TestClusterWatchAfterCache covers the case where watch is called after the update -// is in cache. -func (s) TestClusterWatchAfterCache(t *testing.T) { - testWatchAfterCache(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName) -} - -// TestClusterWatchExpiryTimer tests the case where the client does not receive -// an CDS response for the request that it sends out. We want the watch callback -// to be invoked with an error once the watchExpiryTimer fires. -func (s) TestClusterWatchExpiryTimer(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, _ := testClientSetup(t, true) - clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName) - - u, err := clusterUpdateCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for cluster update: %v", err) - } - gotUpdate := u.(xdsresource.ClusterUpdateErrTuple) - if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, xdsresource.ClusterUpdate{}) { - t.Fatalf("unexpected clusterUpdate: (%v, %v), want: (ClusterUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err) - } -} - -// TestClusterWatchExpiryTimerStop tests the case where the client does receive -// an CDS response for the request that it sends out. We want no error even -// after expiry timeout. -func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, true) - clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, testCDSName) - - wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} - updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ - testCDSName: {Update: wantUpdate}, - }, xdsresource.UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // Wait for an error, the error should never happen. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestWatchExpiryTimeout) - defer sCancel() - if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestClusterResourceRemoved covers the cases: -// - an update is received after a watch() -// - another update is received, with one resource removed -// - this should trigger callback with resource removed error -// -// - one more update without the removed resource -// - the callback (above) shouldn't receive any update -func (s) TestClusterResourceRemoved(t *testing.T) { - testResourceRemoved(t, xdsresource.ClusterResource, - xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}, testCDSName+"1", - xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}, testCDSName+"2", - ) -} - -// TestClusterWatchNACKError covers the case that an update is NACK'ed, and the -// watcher should also receive the error. -func (s) TestClusterWatchNACKError(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, testCDSName) - - wantError := fmt.Errorf("testing error") - updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: { - Err: wantError, - }}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, xdsresource.ClusterUpdate{}, wantError); err != nil { - t.Fatal(err) - } -} - -// TestClusterWatchPartialValid covers the case that a response contains both -// valid and invalid resources. This response will be NACK'ed by the xdsclient. -// But the watchers with valid resources should receive the update, those with -// invalida resources should receive an error. -func (s) TestClusterWatchPartialValid(t *testing.T) { - testWatchPartialValid(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName) -} From 439221d85ac92c5cfa7bb8768bed373436e225ed Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 18 Oct 2022 15:44:48 -0700 Subject: [PATCH 641/998] xdsclient: add a convenience type to synchronize execution of callbacks (#5702) --- xds/internal/xdsclient/callback_serializer.go | 65 +++++++ .../xdsclient/callback_serializer_test.go | 184 ++++++++++++++++++ 2 files changed, 249 insertions(+) create mode 100644 xds/internal/xdsclient/callback_serializer.go create mode 100644 xds/internal/xdsclient/callback_serializer_test.go diff --git a/xds/internal/xdsclient/callback_serializer.go b/xds/internal/xdsclient/callback_serializer.go new file mode 100644 index 000000000000..4c799e21638c --- /dev/null +++ b/xds/internal/xdsclient/callback_serializer.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "context" + + "google.golang.org/grpc/internal/buffer" +) + +// callbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type callbackSerializer struct { + callbacks *buffer.Unbounded +} + +// newCallbackSerializer returns a new callbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the callbackSerializer. It is guaranteed that no +// callbacks will be executed once this context is canceled. +func newCallbackSerializer(ctx context.Context) *callbackSerializer { + t := &callbackSerializer{callbacks: buffer.NewUnbounded()} + go t.run(ctx) + return t +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (t *callbackSerializer) Schedule(f func(ctx context.Context)) { + t.callbacks.Put(f) +} + +func (t *callbackSerializer) run(ctx context.Context) { + for ctx.Err() == nil { + select { + case <-ctx.Done(): + return + case callback := <-t.callbacks.Get(): + t.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } +} diff --git a/xds/internal/xdsclient/callback_serializer_test.go b/xds/internal/xdsclient/callback_serializer_test.go new file mode 100644 index 000000000000..bf5339971be5 --- /dev/null +++ b/xds/internal/xdsclient/callback_serializer_test.go @@ -0,0 +1,184 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +// TestCallbackSerializer_Schedule_FIFO verifies that callbacks are executed in +// the same order in which they were scheduled. +func (s) TestCallbackSerializer_Schedule_FIFO(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + cs := newCallbackSerializer(ctx) + defer cancel() + + // We have two channels, one to record the order of scheduling, and the + // other to record the order of execution. We spawn a bunch of goroutines + // which record the order of scheduling and call the actual Schedule() + // method as well. The callbacks record the order of execution. + // + // We need to grab a lock to record order of scheduling to guarantee that + // the act of recording and the act of calling Schedule() happen atomically. + const numCallbacks = 100 + var mu sync.Mutex + scheduleOrderCh := make(chan int, numCallbacks) + executionOrderCh := make(chan int, numCallbacks) + for i := 0; i < numCallbacks; i++ { + go func(id int) { + mu.Lock() + defer mu.Unlock() + scheduleOrderCh <- id + cs.Schedule(func(ctx context.Context) { + select { + case <-ctx.Done(): + return + case executionOrderCh <- id: + } + }) + }(i) + } + + // Spawn a couple of goroutines to capture the order or scheduling and the + // order of execution. + scheduleOrder := make([]int, numCallbacks) + executionOrder := make([]int, numCallbacks) + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + for i := 0; i < numCallbacks; i++ { + select { + case <-ctx.Done(): + return + case id := <-scheduleOrderCh: + scheduleOrder[i] = id + } + } + }() + go func() { + defer wg.Done() + for i := 0; i < numCallbacks; i++ { + select { + case <-ctx.Done(): + return + case id := <-executionOrderCh: + executionOrder[i] = id + } + } + }() + wg.Wait() + + if diff := cmp.Diff(executionOrder, scheduleOrder); diff != "" { + t.Fatalf("Callbacks are not executed in scheduled order. diff(-want, +got):\n%s", diff) + } +} + +// TestCallbackSerializer_Schedule_Concurrent verifies that all concurrently +// scheduled callbacks get executed. +func (s) TestCallbackSerializer_Schedule_Concurrent(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + cs := newCallbackSerializer(ctx) + defer cancel() + + // Schedule callbacks concurrently by calling Schedule() from goroutines. + // The execution of the callbacks call Done() on the waitgroup, which + // eventually unblocks the test and allows it to complete. + const numCallbacks = 100 + var wg sync.WaitGroup + wg.Add(numCallbacks) + for i := 0; i < numCallbacks; i++ { + go func() { + cs.Schedule(func(context.Context) { + wg.Done() + }) + }() + } + + // We call Wait() on the waitgroup from a goroutine so that we can select on + // the Wait() being unblocked and the overall test deadline expiring. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-ctx.Done(): + t.Fatal("Timeout waiting for all scheduled callbacks to be executed") + case <-done: + } +} + +// TestCallbackSerializer_Schedule_Close verifies that callbacks in the queue +// are not executed once Close() returns. +func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + cs := newCallbackSerializer(ctx) + + // Schedule a callback which blocks until the context passed to it is + // canceled. It also closes a couple of channels to signal that it started + // and finished respectively. + firstCallbackStartedCh := make(chan struct{}) + firstCallbackFinishCh := make(chan struct{}) + cs.Schedule(func(ctx context.Context) { + close(firstCallbackStartedCh) + <-ctx.Done() + close(firstCallbackFinishCh) + }) + + // Wait for the first callback to start before scheduling the others. + <-firstCallbackStartedCh + + // Schedule a bunch of callbacks. These should not be exeuted since the first + // one started earlier is blocked. + const numCallbacks = 10 + errCh := make(chan error, numCallbacks) + for i := 0; i < numCallbacks; i++ { + cs.Schedule(func(_ context.Context) { + errCh <- fmt.Errorf("callback %d executed when not expected to", i) + }) + } + + // Ensure that none of the newer callbacks are executed at this point. + select { + case <-time.After(defaultTestShortTimeout): + case err := <-errCh: + t.Fatal(err) + } + + // Cancel the context which will unblock the first callback. None of the + // other callbacks (which have not started executing at this point) should + // be executed after this. + cancel() + <-firstCallbackFinishCh + + // Ensure that the newer callbacks are not executed. + select { + case <-time.After(defaultTestShortTimeout): + case err := <-errCh: + t.Fatal(err) + } +} From f88cc6594102e5e1ff0da696abc47ae231be302a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 18 Oct 2022 16:54:04 -0700 Subject: [PATCH 642/998] xdsclient: improve EDS watchers test (#5694) --- .../xdsclient/e2e_test/eds_watchers_test.go | 821 ++++++++++++++++++ .../xdsclient/watchers_endpoints_test.go | 119 --- .../xdsclient/watchers_federation_test.go | 18 + 3 files changed, 839 insertions(+), 119 deletions(-) create mode 100644 xds/internal/xdsclient/e2e_test/eds_watchers_test.go delete mode 100644 xds/internal/xdsclient/watchers_endpoints_test.go diff --git a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go new file mode 100644 index 000000000000..790e83778492 --- /dev/null +++ b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go @@ -0,0 +1,821 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +const ( + edsHost1 = "1.foo.bar.com" + edsHost2 = "2.foo.bar.com" + edsHost3 = "3.foo.bar.com" + edsPort1 = 1 + edsPort2 = 2 + edsPort3 = 3 +) + +// badEndpointsResource returns a endpoints resource for the given +// edsServiceName which contains an endpoint with a load_balancing weight of +// `0`. This is expected to be NACK'ed by the xDS client. +func badEndpointsResource(edsServiceName string, host string, ports []uint32) *v3endpointpb.ClusterLoadAssignment { + e := e2e.DefaultEndpoint(edsServiceName, host, ports) + e.Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + return e +} + +// xdsClient is expected to produce an error containing this string when an +// update is received containing an endpoints resource created using +// `badEndpointsResource`. +const wantEndpointsNACKErr = "EDS response contains an endpoint with zero weight" + +// verifyEndpointsUpdate waits for an update to be received on the provided +// update channel and verifies that it matches the expected update. +// +// Returns an error if no update is received before the context deadline expires +// or the received update does not match the expected one. +func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.EndpointsUpdateErrTuple) error { + u, err := updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("timeout when waiting for a endpoints resource from the management server: %v", err) + } + got := u.(xdsresource.EndpointsUpdateErrTuple) + if wantUpdate.Err != nil { + if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { + return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) + } + } + cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.EndpointsUpdate{}, "Raw")} + if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { + return fmt.Errorf("received unepected diff in the endpoints resource update: (-want, got):\n%s", diff) + } + return nil +} + +// verifyNoEndpointsUpdate verifies that no endpoints update is received on the +// provided update channel, and returns an error if an update is received. +// +// A very short deadline is used while waiting for the update, as this function +// is intended to be used when an update is not expected. +func verifyNoEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + return fmt.Errorf("unexpected EndpointsUpdate: %v", u) + } + return nil +} + +// TestEDSWatch covers the case where a single endpoint exists for a single +// endpoints resource. The test verifies the following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of the watch callback. +// 2. An update from the management server containing a resource *not* being +// watched should not result in the invocation of the watch callback. +// 3. After the watch is cancelled, an update from the management server +// containing the resource that was being watched should not result in the +// invocation of the watch callback. +// +// The test is run for old and new style names. +func (s) TestEDSWatch(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3endpointpb.ClusterLoadAssignment // The resource being watched. + updatedWatchedResource *v3endpointpb.ClusterLoadAssignment // The watched resource after an update. + notWatchedResource *v3endpointpb.ClusterLoadAssignment // A resource which is not being watched. + wantUpdate xdsresource.EndpointsUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: edsName, + watchedResource: e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}), + updatedWatchedResource: e2e.DefaultEndpoint(edsName, edsHost2, []uint32{edsPort2}), + notWatchedResource: e2e.DefaultEndpoint("unsubscribed-eds-resource", edsHost3, []uint32{edsPort3}), + wantUpdate: xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + }, + { + desc: "new style resource", + resourceName: edsNameNewStyle, + watchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}), + updatedWatchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost2, []uint32{edsPort2}), + notWatchedResource: e2e.DefaultEndpoint("unsubscribed-eds-resource", edsHost3, []uint32{edsPort3}), + wantUpdate: xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a endpoint resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + edsCancel := client.WatchEndpoints(test.resourceName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single endpoint + // resource, corresponding to the one being watched. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyEndpointsUpdate(ctx, updateCh, test.wantUpdate); err != nil { + t.Fatal(err) + } + + // Configure the management server to return an additional endpoint + // resource, one that we are not interested in. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.watchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoEndpointsUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + + // Cancel the watch and update the resource corresponding to the original + // watch. Ensure that the cancelled watch callback is not invoked. + edsCancel() + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.updatedWatchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoEndpointsUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestEDSWatch_TwoWatchesForSameResourceName covers the case where two watchers +// exist for a single endpoint resource. The test verifies the following +// scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of both watch callbacks. +// 2. After one of the watches is cancelled, a redundant update from the +// management server should not result in the invocation of either of the +// watch callbacks. +// 3. An update from the management server containing the resource being +// watched should result in the invocation of the un-cancelled watch +// callback. +// +// The test is run for old and new style names. +func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3endpointpb.ClusterLoadAssignment // The resource being watched. + updatedWatchedResource *v3endpointpb.ClusterLoadAssignment // The watched resource after an update. + wantUpdateV1 xdsresource.EndpointsUpdateErrTuple + wantUpdateV2 xdsresource.EndpointsUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: edsName, + watchedResource: e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}), + updatedWatchedResource: e2e.DefaultEndpoint(edsName, edsHost2, []uint32{edsPort2}), + wantUpdateV1: xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + wantUpdateV2: xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + }, + { + desc: "new style resource", + resourceName: edsNameNewStyle, + watchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}), + updatedWatchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost2, []uint32{edsPort2}), + wantUpdateV1: xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + wantUpdateV2: xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for the same endpoint resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + edsCancel1 := client.WatchEndpoints(test.resourceName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh1.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel1() + updateCh2 := testutils.NewChannel() + edsCancel2 := client.WatchEndpoints(test.resourceName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single endpoint + // resource, corresponding to the one being watched. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyEndpointsUpdate(ctx, updateCh1, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + if err := verifyEndpointsUpdate(ctx, updateCh2, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + + // Cancel the second watch and force the management server to push a + // redundant update for the resource being watched. Neither of the + // two watch callbacks should be invoked. + edsCancel2() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoEndpointsUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + if err := verifyNoEndpointsUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update to the resource being watched. The un-cancelled callback + // should be invoked while the cancelled one should not be. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.updatedWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyEndpointsUpdate(ctx, updateCh1, test.wantUpdateV2); err != nil { + t.Fatal(err) + } + if err := verifyNoEndpointsUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestEDSWatch_ThreeWatchesForDifferentResourceNames covers the case with three +// watchers (two watchers for one resource, and the third watcher for another +// resource), exist across two endpoint configuration resources. The test verifies +// that an update from the management server containing both resources results +// in the invocation of all watch callbacks. +// +// The test is run with both old and new style names. +func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for the same endpoint resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + edsCancel1 := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh1.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel1() + updateCh2 := testutils.NewChannel() + edsCancel2 := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel2() + + // Register the third watch for a different endpoint resource. + updateCh3 := testutils.NewChannel() + edsCancel3 := client.WatchEndpoints(edsNameNewStyle, func(u xdsresource.EndpointsUpdate, err error) { + updateCh3.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel3() + + // Configure the management server to return two endpoint resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}), + e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the all watchers. The two + // resources returned differ only in the resource name. Therefore the + // expected update is the same for all the watchers. + wantUpdate := xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + } + if err := verifyEndpointsUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyEndpointsUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyEndpointsUpdate(ctx, updateCh3, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestEDSWatch_ResourceCaching covers the case where a watch is registered for +// a resource which is already present in the cache. The test verifies that the +// watch callback is invoked with the contents from the cache, instead of a +// request being sent to the management server. +func (s) TestEDSWatch_ResourceCaching(t *testing.T) { + overrideFedEnvVar(t) + firstRequestReceived := false + firstAckReceived := grpcsync.NewEvent() + secondRequestReceived := grpcsync.NewEvent() + + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // The first request has an empty version string. + if !firstRequestReceived && req.GetVersionInfo() == "" { + firstRequestReceived = true + return nil + } + // The first ack has a non-empty version string. + if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" { + firstAckReceived.Fire() + return nil + } + // Any requests after the first request and ack, are not expected. + secondRequestReceived.Fire() + return nil + }, + }) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for an endpoint resource and have the watch callback + // push the received update on to a channel. + updateCh1 := testutils.NewChannel() + edsCancel1 := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh1.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel1() + + // Configure the management server to return a single endpoint resource, + // corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + } + if err := verifyEndpointsUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for receipt of ACK at the management server") + case <-firstAckReceived.Done(): + } + + // Register another watch for the same resource. This should get the update + // from the cache. + updateCh2 := testutils.NewChannel() + edsCancel2 := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel2() + if err := verifyEndpointsUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + + // No request should get sent out as part of this watch. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-secondRequestReceived.Done(): + t.Fatal("xdsClient sent out request instead of using update from cache") + } +} + +// TestEDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client +// does not receive an EDS response for the request that it sends. The test +// verifies that the watch callback is invoked with an error once the +// watchExpiryTimer fires. +func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { + // No need to spin up a management server since we don't want the client to + // receive a response for the watch being registered by the test. + + // Create an xDS client talking to a non-existent management server. + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: "dummy management server address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + + // Register a watch for a resource which is expected to fail with an error + // after the watch expiry timer fires. + updateCh := testutils.NewChannel() + edsCancel := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel() + + // Wait for the watch expiry timer to fire. + <-time.After(defaultTestWatchExpiryTimeout) + + // Verify that an empty update with the expected error is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantErr := fmt.Errorf("watch for resource %q of type ClusterLoadAssignment timed out", rdsName) + if err := verifyEndpointsUpdate(ctx, updateCh, xdsresource.EndpointsUpdateErrTuple{Err: wantErr}); err != nil { + t.Fatal(err) + } +} + +// TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the +// client receives a valid EDS response for the request that it sends. The test +// verifies that the behavior associated with the expiry timer (i.e, callback +// invocation with error) does not take place. +func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(nil) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + + // Register a watch for an endpoint resource and have the watch callback + // push the received update on to a channel. + updateCh := testutils.NewChannel() + edsCancel := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel() + + // Configure the management server to return a single endpoint resource, + // corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + } + if err := verifyEndpointsUpdate(ctx, updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Wait for the watch expiry timer to fire, and verify that the callback is + // not invoked. + <-time.After(defaultTestWatchExpiryTimeout) + if err := verifyNoEndpointsUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } +} + +// TestEDSWatch_NACKError covers the case where an update from the management +// server is NACK'ed by the xdsclient. The test verifies that the error is +// propagated to the watcher. +func (s) TestEDSWatch_NACKError(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a route configuration resource and have the watch + // callback push the received update on to a channel. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + updateCh := testutils.NewChannel() + edsCancel := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh.SendContext(ctx, xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel() + + // Configure the management server to return a single route configuration + // resource which is expected to be NACKed by the client. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{badEndpointsResource(edsName, edsHost1, []uint32{edsPort1})}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher. + u, err := updateCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for an endpoints resource from the management server: %v", err) + } + gotErr := u.(xdsresource.EndpointsUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantEndpointsNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantEndpointsNACKErr) + } +} + +// TestEDSWatch_PartialValid covers the case where a response from the +// management server contains both valid and invalid resources and is expected +// to be NACK'ed by the xdsclient. The test verifies that watchers corresponding +// to the valid resource receive the update, while watchers corresponding to the +// invalid resource receive an error. +func (s) TestEDSWatch_PartialValid(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for two endpoint resources. The first watch is + // expected to receive an error because the received resource is NACKed. + // The second watch is expected to get a good update. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + badResourceName := rdsName + updateCh1 := testutils.NewChannel() + edsCancel1 := client.WatchEndpoints(badResourceName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh1.SendContext(ctx, xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel1() + goodResourceName := ldsNameNewStyle + updateCh2 := testutils.NewChannel() + edsCancel2 := client.WatchEndpoints(goodResourceName, func(u xdsresource.EndpointsUpdate, err error) { + updateCh2.SendContext(ctx, xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer edsCancel2() + + // Configure the management server to return two endpoints resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + badEndpointsResource(badResourceName, edsHost1, []uint32{edsPort1}), + e2e.DefaultEndpoint(goodResourceName, edsHost1, []uint32{edsPort1}), + }, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher which + // requested for the bad resource. + u, err := updateCh1.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for an endpoints resource from the management server: %v", err) + } + gotErr := u.(xdsresource.EndpointsUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantEndpointsNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantEndpointsNACKErr) + } + + // Verify that the watcher watching the good resource receives an update. + wantUpdate := xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{SubZone: "subzone"}, + Priority: 0, + Weight: 1, + }, + }, + }, + } + if err := verifyEndpointsUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/watchers_endpoints_test.go b/xds/internal/xdsclient/watchers_endpoints_test.go deleted file mode 100644 index b0e84edcf487..000000000000 --- a/xds/internal/xdsclient/watchers_endpoints_test.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "context" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - - "google.golang.org/grpc/xds/internal" -) - -var ( - testLocalities = []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []xdsresource.Endpoint{{Address: "addr2:159"}}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, - } -) - -// TestEndpointsWatch covers the cases: -// - an update is received after a watch() -// - an update for another resource name (which doesn't trigger callback) -// - an update is received after cancel() -func (s) TestEndpointsWatch(t *testing.T) { - testWatch(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName) -} - -// TestEndpointsTwoWatchSameResourceName covers the case where an update is received -// after two watch() for the same resource name. -func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { - testTwoWatchSameResourceName(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName) -} - -// TestEndpointsThreeWatchDifferentResourceName covers the case where an update is -// received after three watch() for different resource names. -func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { - testThreeWatchDifferentResourceName(t, xdsresource.EndpointsResource, - xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName+"1", - xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[1]}}, testCDSName+"2", - ) -} - -// TestEndpointsWatchAfterCache covers the case where watch is called after the update -// is in cache. -func (s) TestEndpointsWatchAfterCache(t *testing.T) { - testWatchAfterCache(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName) -} - -// TestEndpointsWatchExpiryTimer tests the case where the client does not receive -// an CDS response for the request that it sends out. We want the watch callback -// to be invoked with an error once the watchExpiryTimer fires. -func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, _ := testClientSetup(t, true) - endpointsUpdateCh, _ := newWatch(t, client, xdsresource.EndpointsResource, testCDSName) - - u, err := endpointsUpdateCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for endpoints update: %v", err) - } - gotUpdate := u.(xdsresource.EndpointsUpdateErrTuple) - if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, xdsresource.EndpointsUpdate{}) { - t.Fatalf("unexpected endpointsUpdate: (%v, %v), want: (EndpointsUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err) - } -} - -// TestEndpointsWatchNACKError covers the case that an update is NACK'ed, and -// the watcher should also receive the error. -func (s) TestEndpointsWatchNACKError(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - endpointsUpdateCh, _ := newWatch(t, client, xdsresource.EndpointsResource, testCDSName) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.EndpointsResource, testCDSName) - - wantError := fmt.Errorf("testing error") - updateHandler.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{testCDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, xdsresource.EndpointsUpdate{}, wantError); err != nil { - t.Fatal(err) - } -} - -// TestEndpointsWatchPartialValid covers the case that a response contains both -// valid and invalid resources. This response will be NACK'ed by the xdsclient. -// But the watchers with valid resources should receive the update, those with -// invalida resources should receive an error. -func (s) TestEndpointsWatchPartialValid(t *testing.T) { - testWatchPartialValid(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}, testCDSName) -} diff --git a/xds/internal/xdsclient/watchers_federation_test.go b/xds/internal/xdsclient/watchers_federation_test.go index 302623e19945..1e3dbf7f1cf3 100644 --- a/xds/internal/xdsclient/watchers_federation_test.go +++ b/xds/internal/xdsclient/watchers_federation_test.go @@ -22,10 +22,28 @@ import ( "testing" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) +var ( + testLocalities = []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159"}}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, + }, + } +) + func overrideFedEnvVar(t *testing.T) { oldFed := envconfig.XDSFederation envconfig.XDSFederation = true From 28fae96c98813777d02f25b2aa2d701b468b41c0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 18 Oct 2022 17:25:47 -0700 Subject: [PATCH 643/998] xdsclient: improve federation watchers test (#5696) --- xds/internal/xdsclient/authority_test.go | 7 + .../e2e_test/federation_watchers_test.go | 353 ++++++++++++++++++ .../xdsclient/watchers_federation_test.go | 118 ------ 3 files changed, 360 insertions(+), 118 deletions(-) create mode 100644 xds/internal/xdsclient/e2e_test/federation_watchers_test.go delete mode 100644 xds/internal/xdsclient/watchers_federation_test.go diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index 5aa70a525038..b8704a9c0c7a 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -25,6 +25,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/testutils" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -64,6 +65,12 @@ var ( } ) +func overrideFedEnvVar(t *testing.T) { + oldFed := envconfig.XDSFederation + envconfig.XDSFederation = true + t.Cleanup(func() { envconfig.XDSFederation = oldFed }) +} + // watchAndFetchNewController starts a CDS watch on the client for the given // resourceName, and tries to receive a new controller from the ctrlCh. // diff --git a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go new file mode 100644 index 000000000000..16db9da3b6a9 --- /dev/null +++ b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go @@ -0,0 +1,353 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e_test + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" +) + +const testNonDefaultAuthority = "non-default-authority" + +// setupForFederationWatchersTest spins up two management servers, one for the +// default (empty) authority and another for a non-default authority. +// +// Returns the management server associated with the non-default authority, the +// nodeID to use, and the xDS client. +func setupForFederationWatchersTest(t *testing.T) (*e2e.ManagementServer, string, xdsclient.XDSClient) { + overrideFedEnvVar(t) + + // Start a management server as the default authority. + serverDefaultAuthority, err := e2e.StartManagementServer(nil) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(serverDefaultAuthority.Stop) + + // Start another management server as the other authority. + serverNonDefaultAuthority, err := e2e.StartManagementServer(nil) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(serverNonDefaultAuthority.Stop) + + nodeID := uuid.New().String() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + Version: bootstrap.TransportV3, + NodeID: nodeID, + ServerURI: serverDefaultAuthority.Address, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + // Specify the address of the non-default authority. + Authorities: map[string]string{testNonDefaultAuthority: serverNonDefaultAuthority.Address}, + }) + if err != nil { + t.Fatalf("Failed to create bootstrap file: %v", err) + } + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + return serverNonDefaultAuthority, nodeID, client +} + +// TestFederation_ListenerResourceContextParamOrder covers the case of watching +// a Listener resource with the new style resource name and context parameters. +// The test registers watches for two resources which differ only in the order +// of context parameters in their URI. The server is configured to respond with +// a single resource with canonicalized context parameters. The test verifies +// that both watchers are notified. +func (s) TestFederation_ListenerResourceContextParamOrder(t *testing.T) { + serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) + defer client.Close() + + var ( + // Two resource names only differ in context parameter order. + resourceName1 = fmt.Sprintf("xdstp://%s/envoy.config.listener.v3.Listener/xdsclient-test-lds-resource?a=1&b=2", testNonDefaultAuthority) + resourceName2 = fmt.Sprintf("xdstp://%s/envoy.config.listener.v3.Listener/xdsclient-test-lds-resource?b=2&a=1", testNonDefaultAuthority) + ) + + // Register two watches for listener resources with the same query string, + // but context parameters in different order. + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(resourceName1, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(resourceName2, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Configure the management server for the non-default authority to return a + // single listener resource, corresponding to the watches registered above. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(resourceName1, "rds-resource")}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := serverNonDefaultAuthority.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: "rds-resource", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + // Verify the contents of the received update. + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestFederation_RouteConfigResourceContextParamOrder covers the case of +// watching a RouteConfiguration resource with the new style resource name and +// context parameters. The test registers watches for two resources which +// differ only in the order of context parameters in their URI. The server is +// configured to respond with a single resource with canonicalized context +// parameters. The test verifies that both watchers are notified. +func (s) TestFederation_RouteConfigResourceContextParamOrder(t *testing.T) { + serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) + defer client.Close() + + var ( + // Two resource names only differ in context parameter order. + resourceName1 = fmt.Sprintf("xdstp://%s/envoy.config.route.v3.RouteConfiguration/xdsclient-test-rds-resource?a=1&b=2", testNonDefaultAuthority) + resourceName2 = fmt.Sprintf("xdstp://%s/envoy.config.route.v3.RouteConfiguration/xdsclient-test-rds-resource?b=2&a=1", testNonDefaultAuthority) + ) + + // Register two watches for route configuration resources with the same + // query string, but context parameters in different order. + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(resourceName1, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(resourceName2, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel2() + + // Configure the management server for the non-default authority to return a + // single route config resource, corresponding to the watches registered. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(resourceName1, "listener-resource", "cluster-resource")}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := serverNonDefaultAuthority.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{"listener-resource"}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{"cluster-resource": {Weight: 1}}, + }, + }, + }, + }, + }, + } + // Verify the contents of the received update. + if err := verifyRouteConfigUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestFederation_ClusterResourceContextParamOrder covers the case of watching a +// Cluster resource with the new style resource name and context parameters. +// The test registers watches for two resources which differ only in the order +// of context parameters in their URI. The server is configured to respond with +// a single resource with canonicalized context parameters. The test verifies +// that both watchers are notified. +func (s) TestFederation_ClusterResourceContextParamOrder(t *testing.T) { + serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) + defer client.Close() + + var ( + // Two resource names only differ in context parameter order. + resourceName1 = fmt.Sprintf("xdstp://%s/envoy.config.cluster.v3.Cluster/xdsclient-test-cds-resource?a=1&b=2", testNonDefaultAuthority) + resourceName2 = fmt.Sprintf("xdstp://%s/envoy.config.cluster.v3.Cluster/xdsclient-test-cds-resource?b=2&a=1", testNonDefaultAuthority) + ) + + // Register two watches for cluster resources with the same query string, + // but context parameters in different order. + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(resourceName1, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(resourceName2, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Configure the management server for the non-default authority to return a + // single cluster resource, corresponding to the watches registered. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName1, "eds-service-name", e2e.SecurityLevelNone)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := serverNonDefaultAuthority.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: "xdstp://non-default-authority/envoy.config.cluster.v3.Cluster/xdsclient-test-cds-resource?a=1&b=2", + EDSServiceName: "eds-service-name", + }, + } + // Verify the contents of the received update. + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestFederation_EndpointsResourceContextParamOrder covers the case of watching +// an Endpoints resource with the new style resource name and context parameters. +// The test registers watches for two resources which differ only in the order +// of context parameters in their URI. The server is configured to respond with +// a single resource with canonicalized context parameters. The test verifies +// that both watchers are notified. +func (s) TestFederation_EndpointsResourceContextParamOrder(t *testing.T) { + serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) + defer client.Close() + + var ( + // Two resource names only differ in context parameter order. + resourceName1 = fmt.Sprintf("xdstp://%s/envoy.config.endpoint.v3.ClusterLoadAssignment/xdsclient-test-eds-resource?a=1&b=2", testNonDefaultAuthority) + resourceName2 = fmt.Sprintf("xdstp://%s/envoy.config.endpoint.v3.ClusterLoadAssignment/xdsclient-test-eds-resource?b=2&a=1", testNonDefaultAuthority) + ) + + // Register two watches for endpoint resources with the same query string, + // but context parameters in different order. + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchEndpoints(resourceName1, func(u xdsresource.EndpointsUpdate, err error) { + updateCh1.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchEndpoints(resourceName2, func(u xdsresource.EndpointsUpdate, err error) { + updateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Configure the management server for the non-default authority to return a + // single endpoints resource, corresponding to the watches registered. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(resourceName1, "localhost", []uint32{666})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := serverNonDefaultAuthority.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + wantUpdate := xdsresource.EndpointsUpdateErrTuple{ + Update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: "localhost:666", Weight: 1}}, + Weight: 1, + ID: internal.LocalityID{SubZone: "subzone"}, + }, + }, + }, + } + // Verify the contents of the received update. + if err := verifyEndpointsUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyEndpointsUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +func newStringP(s string) *string { + return &s +} + +// verifyRouteConfigUpdate waits for an update to be received on the provided +// update channel and verifies that it matches the expected update. +// +// Returns an error if no update is received before the context deadline expires +// or the received update does not match the expected one. +func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.RouteConfigUpdateErrTuple) error { + u, err := updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("timeout when waiting for a route configuration resource from the management server: %v", err) + } + got := u.(xdsresource.RouteConfigUpdateErrTuple) + if wantUpdate.Err != nil { + if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { + return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) + } + } + cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.RouteConfigUpdate{}, "Raw")} + if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { + return fmt.Errorf("received unepected diff in the route configuration resource update: (-want, got):\n%s", diff) + } + return nil +} diff --git a/xds/internal/xdsclient/watchers_federation_test.go b/xds/internal/xdsclient/watchers_federation_test.go deleted file mode 100644 index 1e3dbf7f1cf3..000000000000 --- a/xds/internal/xdsclient/watchers_federation_test.go +++ /dev/null @@ -1,118 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package xdsclient - -import ( - "context" - "testing" - - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -var ( - testLocalities = []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{{Address: "addr1:314"}}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []xdsresource.Endpoint{{Address: "addr2:159"}}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, - } -) - -func overrideFedEnvVar(t *testing.T) { - oldFed := envconfig.XDSFederation - envconfig.XDSFederation = true - t.Cleanup(func() { envconfig.XDSFederation = oldFed }) -} - -func testFedTwoWatchDifferentContextParameterOrder(t *testing.T, typ xdsresource.ResourceType, update interface{}) { - overrideFedEnvVar(t) - var ( - // Two resource names only differ in context parameter __order__. - resourceName1 = testutils.BuildResourceName(typ, testAuthority, "test-resource-name", nil) + "?a=1&b=2" - resourceName2 = testutils.BuildResourceName(typ, testAuthority, "test-resource-name", nil) + "?b=2&a=1" - ) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - updateCh, _ := newWatch(t, client, typ, resourceName1) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, resourceName1) - newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) - - // Start a watch on the second resource name. - updateCh2, _ := newWatchF(client, resourceName2) - - // Send an update on the first resoruce, both watchers should be updated. - newUpdateF(updateHandler, map[string]interface{}{resourceName1: update}) - verifyUpdateF(ctx, t, updateCh, update, nil) - verifyUpdateF(ctx, t, updateCh2, update, nil) -} - -// TestLDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name -// - Two watches with the same query string, but in different order. The two -// watches should watch the same resource. -// - The response has the same query string, but in different order. The watch -// should still be notified. -func (s) TestLDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) { - testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ListenerResource, xdsresource.ListenerUpdate{RouteConfigName: testRDSName}) -} - -// TestRDSFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name -// - Two watches with the same query string, but in different order. The two -// watches should watch the same resource. -// - The response has the same query string, but in different order. The watch -// should still be notified. -func (s) TestRDSFedTwoWatchDifferentContextParameterOrder(t *testing.T) { - testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, - }, - }, - }) -} - -// TestClusterFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name -// - Two watches with the same query string, but in different order. The two -// watches should watch the same resource. -// - The response has the same query string, but in different order. The watch -// should still be notified. -func (s) TestClusterFedTwoWatchDifferentContextParameterOrder(t *testing.T) { - testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}) -} - -// TestEndpointsFedTwoWatchDifferentContextParameterOrder covers the case with new style resource name -// - Two watches with the same query string, but in different order. The two -// watches should watch the same resource. -// - The response has the same query string, but in different order. The watch -// should still be notified. -func (s) TestEndpointsFedTwoWatchDifferentContextParameterOrder(t *testing.T) { - testFedTwoWatchDifferentContextParameterOrder(t, xdsresource.EndpointsResource, xdsresource.EndpointsUpdate{Localities: []xdsresource.Locality{testLocalities[0]}}) -} From 7c16802641d369c50267b7f018106630cdb5d807 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 19 Oct 2022 12:29:23 -0700 Subject: [PATCH 644/998] tests: refactor tests to use testutils helper functions (#5728) --- .../weightedtarget/weightedtarget_test.go | 93 +++++++++++-------- internal/balancergroup/balancergroup_test.go | 21 ++--- internal/testutils/balancer.go | 10 ++ .../balancer/clusterresolver/priority_test.go | 82 +++++++++------- .../balancer/clusterresolver/testutil_test.go | 71 -------------- 5 files changed, 119 insertions(+), 158 deletions(-) diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go index ea76ea1297cb..c7c04f0da232 100644 --- a/balancer/weightedtarget/weightedtarget_test.go +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -19,6 +19,7 @@ package weightedtarget import ( + "context" "encoding/json" "errors" "fmt" @@ -40,6 +41,10 @@ import ( "google.golang.org/grpc/serviceconfig" ) +const ( + defaultTestTimeout = 5 * time.Second +) + type s struct { grpctest.Tester } @@ -58,6 +63,20 @@ func newTestConfigBalancerBuilder() *testConfigBalancerBuilder { } } +// pickAndCheckError returns a function which takes a picker, invokes the Pick() method +// multiple times and ensures that the error returned by the picker matches the provided error. +func pickAndCheckError(want error) func(balancer.Picker) error { + const rpcCount = 5 + return func(p balancer.Picker) error { + for i := 0; i < rpcCount; i++ { + if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), want.Error()) { + return fmt.Errorf("picker.Pick() returned error: %v, want: %v", err, want) + } + } + return nil + } +} + func (t *testConfigBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { rr := t.Builder.Build(cc, opts) return &testConfigBalancer{ @@ -289,13 +308,6 @@ func (s) TestWeightedTarget(t *testing.T) { } } -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} - // TestWeightedTarget_OneSubBalancer_AddRemoveBackend tests the case where we // have a weighted target balancer will one sub-balancer, and we add and remove // backends from the subBalancer. @@ -366,7 +378,7 @@ func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) { // Test round robin pick. want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -455,7 +467,7 @@ func (s) TestWeightedTarget_TwoSubBalancers_OneBackend(t *testing.T) { // Test roundrobin on the last picker. want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } } @@ -536,7 +548,7 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { // Test roundrobin on the last picker. RPCs should be sent equally to all // backends. want := []balancer.SubConn{sc1, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -544,7 +556,7 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) p = <-cc.NewPickerCh want = []balancer.SubConn{sc1, sc1, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -566,19 +578,19 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) p = <-cc.NewPickerCh want = []balancer.SubConn{sc1, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Turn sc1's connection down. - scConnErr := errors.New("subConn connection error") + wantSubConnErr := errors.New("subConn connection error") wtb.UpdateSubConnState(sc1, balancer.SubConnState{ ConnectivityState: connectivity.TransientFailure, - ConnectionError: scConnErr, + ConnectionError: wantSubConnErr, }) p = <-cc.NewPickerCh want = []balancer.SubConn{sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -594,13 +606,13 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { // Turn all connections down. wtb.UpdateSubConnState(sc4, balancer.SubConnState{ ConnectivityState: connectivity.TransientFailure, - ConnectionError: scConnErr, + ConnectionError: wantSubConnErr, }) - p = <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), scConnErr.Error()) { - t.Fatalf("want pick error %q, got error %q", scConnErr, err) - } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForPicker(ctx, pickAndCheckError(wantSubConnErr)); err != nil { + t.Fatal(err) } } @@ -680,7 +692,7 @@ func (s) TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends(t *test // Test roundrobin on the last picker. Twice the number of RPCs should be // sent to cluster_1 when compared to cluster_2. want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } } @@ -757,7 +769,7 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { p := <-cc.NewPickerCh want := []balancer.SubConn{sc1, sc2, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -797,15 +809,15 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scRemoved) } want = []balancer.SubConn{sc1, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Move balancer 3 into transient failure. - scConnErr := errors.New("subConn connection error") + wantSubConnErr := errors.New("subConn connection error") wtb.UpdateSubConnState(sc3, balancer.SubConnState{ ConnectivityState: connectivity.TransientFailure, - ConnectionError: scConnErr, + ConnectionError: wantSubConnErr, }) <-cc.NewPickerCh @@ -833,16 +845,16 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { // Removing a subBalancer causes the weighted target LB policy to push a new // picker which ensures that the removed subBalancer is not picked for RPCs. - p = <-cc.NewPickerCh scRemoved = <-cc.RemoveSubConnCh if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) } - for i := 0; i < 5; i++ { - if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), scConnErr.Error()) { - t.Fatalf("want pick error %q, got error %q", scConnErr, err) - } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForPicker(ctx, pickAndCheckError(wantSubConnErr)); err != nil { + t.Fatal(err) } } @@ -922,7 +934,7 @@ func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing // Test roundrobin on the last picker. Twice the number of RPCs should be // sent to cluster_1 when compared to cluster_2. want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -958,7 +970,7 @@ func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing // Weight change causes a new picker to be pushed to the channel. p = <-cc.NewPickerCh want = []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { t.Fatalf("want %v, got %v", want, err) } } @@ -1077,21 +1089,21 @@ func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *tes // Set both subconn to TransientFailure, this will put both sub-balancers in // transient failure. - scConnErr := errors.New("subConn connection error") + wantSubConnErr := errors.New("subConn connection error") wtb.UpdateSubConnState(sc1, balancer.SubConnState{ ConnectivityState: connectivity.TransientFailure, - ConnectionError: scConnErr, + ConnectionError: wantSubConnErr, }) <-cc.NewPickerCh wtb.UpdateSubConnState(sc2, balancer.SubConnState{ ConnectivityState: connectivity.TransientFailure, - ConnectionError: scConnErr, + ConnectionError: wantSubConnErr, }) p := <-cc.NewPickerCh for i := 0; i < 5; i++ { - if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), scConnErr.Error()) { - t.Fatalf("want pick error %q, got error %q", scConnErr, err) + if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), wantSubConnErr.Error()) { + t.Fatalf("picker.Pick() returned error: %v, want: %v", err, wantSubConnErr) } } @@ -1104,9 +1116,8 @@ func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *tes } for i := 0; i < 5; i++ { - r, err := p.Pick(balancer.PickInfo{}) - if err == nil || !strings.Contains(err.Error(), scConnErr.Error()) { - t.Fatalf("want pick error %q, got result %v, err %q", scConnErr, r, err) + if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), wantSubConnErr.Error()) { + t.Fatalf("picker.Pick() returned error: %v, want: %v", err, wantSubConnErr) } } } diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index 566ffc386c04..1228f85ad986 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -67,13 +67,6 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} - // Create a new balancer group, add balancer and backends, but not start. // - b1, weight 2, backends [0,1] // - b2, weight 1, backends [2,3] @@ -116,7 +109,7 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -158,7 +151,7 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[1]], m2[testBackendAddrs[2]], } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } } @@ -241,7 +234,7 @@ func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregat m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -263,7 +256,7 @@ func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregat want = []balancer.SubConn{ m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -304,7 +297,7 @@ func (s) TestBalancerGroup_locality_caching(t *testing.T) { // addr2 is down, b2 only has addr3 in READY state. addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]], } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) } @@ -452,7 +445,7 @@ func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *test addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]], } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) } } @@ -583,7 +576,7 @@ func (s) TestBalancerGracefulSwitch(t *testing.T) { want := []balancer.SubConn{ m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index d45486abd251..95ec79616eff 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -339,6 +339,16 @@ func IsRoundRobin(want []balancer.SubConn, f func() balancer.SubConn) error { return nil } +// SubConnFromPicker returns a function which returns a SubConn by calling the +// Pick() method of the provided picker. There is no caching of SubConns here. +// Every invocation of the returned function results in a new pick. +func SubConnFromPicker(p balancer.Picker) func() balancer.SubConn { + return func() balancer.SubConn { + scst, _ := p.Pick(balancer.PickInfo{}) + return scst.SubConn + } +} + // ErrTestConstPicker is error returned by test const picker. var ErrTestConstPicker = fmt.Errorf("const picker error") diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index 0ba5e1e80946..ce3df8fdfad3 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -117,7 +117,9 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { t.Fatal(err) } @@ -139,8 +141,8 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { select { case p := <-cc.NewPickerCh: // If we do get a new picker, ensure it is still a p1 picker. - if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, subConnFromPicker(p)); err != nil { - t.Fatal(err.Error()) + if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, testutils.SubConnFromPicker(p)); err != nil { + t.Fatal(err) } default: // No new picker; we were previously using p1 and should still be using @@ -165,8 +167,8 @@ func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { select { case p := <-cc.NewPickerCh: // If we do get a new picker, ensure it is still a p1 picker. - if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, subConnFromPicker(p)); err != nil { - t.Fatal(err.Error()) + if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, testutils.SubConnFromPicker(p)); err != nil { + t.Fatal(err) } default: // No new picker; we were previously using p1 and should still be using @@ -201,7 +203,9 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { t.Fatal(err) } @@ -216,7 +220,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { t.Fatal(err) } @@ -238,8 +242,8 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { select { case p := <-cc.NewPickerCh: // If we do get a new picker, ensure it is still a p1 picker. - if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, subConnFromPicker(p)); err != nil { - t.Fatal(err.Error()) + if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, testutils.SubConnFromPicker(p)); err != nil { + t.Fatal(err) } default: // No new picker; we were previously using p1 and should still be using @@ -262,7 +266,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { t.Fatal(err) } @@ -280,7 +284,7 @@ func (s) TestEDSPriority_SwitchPriority(t *testing.T) { // Should get an update with 1's old picker, to override 2's old picker. want := errors.New("last connection error: subConn connection error") - if err := testErrPickerFromCh(cc.NewPickerCh, want); err != nil { + if err := cc.WaitForPickerWithErr(ctx, want); err != nil { t.Fatal(err) } @@ -318,8 +322,10 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { }) // Test pick failure. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() want := errors.New("last connection error: subConn connection error") - if err := testErrPickerFromCh(cc.NewPickerCh, want); err != nil { + if err := cc.WaitForPickerWithErr(ctx, want); err != nil { t.Fatal(err) } @@ -338,7 +344,7 @@ func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { t.Fatal(err) } @@ -380,7 +386,9 @@ func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { t.Fatal(err) } @@ -415,7 +423,7 @@ func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { } // Test pick with 0. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { t.Fatal(err) } } @@ -467,7 +475,9 @@ func (s) TestEDSPriority_InitTimeout(t *testing.T) { edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { t.Fatal(err) } } @@ -493,7 +503,9 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { t.Fatal(err) } @@ -509,7 +521,7 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { t.Fatal(err) } @@ -522,7 +534,7 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { } // Test roundrobin with only p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { t.Fatal(err) } @@ -542,7 +554,7 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only two p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0, sc2}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc0, sc2); err != nil { t.Fatal(err) } @@ -558,7 +570,7 @@ func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc3, sc4}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc3, sc4); err != nil { t.Fatal(err) } } @@ -590,7 +602,9 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { t.Fatal(err) } @@ -606,7 +620,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { // time.Sleep(time.Second) // Test pick return TransientFailure. - if err := testErrPickerFromCh(cc.NewPickerCh, priority.ErrAllPrioritiesRemoved); err != nil { + if err := cc.WaitForPickerWithErr(ctx, priority.ErrAllPrioritiesRemoved); err != nil { t.Fatal(err) } @@ -635,7 +649,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { edsb.UpdateSubConnState(sc11, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc11}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc11); err != nil { t.Fatal(err) } @@ -651,7 +665,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { } // Test pick return TransientFailure. - if err := testErrPickerFromCh(cc.NewPickerCh, balancer.ErrNoSubConnAvailable); err != nil { + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { t.Fatal(err) } @@ -661,7 +675,7 @@ func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { edsb.UpdateSubConnState(sc01, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc01}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc01); err != nil { t.Fatal(err) } @@ -697,7 +711,9 @@ func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) { edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { t.Fatal(err) } @@ -722,7 +738,7 @@ func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) { edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { t.Fatal(err) } } @@ -748,7 +764,9 @@ func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) { edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { t.Fatal(err) } @@ -775,7 +793,7 @@ func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) { edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc2}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { t.Fatal(err) } } @@ -865,7 +883,7 @@ func (s) TestFallbackToDNS(t *testing.T) { edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc0}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { t.Fatal(err) } @@ -893,7 +911,7 @@ func (s) TestFallbackToDNS(t *testing.T) { edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - if err := testRoundRobinPickerFromCh(cc.NewPickerCh, []balancer.SubConn{sc1}); err != nil { + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { t.Fatal(err) } diff --git a/xds/internal/balancer/clusterresolver/testutil_test.go b/xds/internal/balancer/clusterresolver/testutil_test.go index 9c51db49f921..2792f802b258 100644 --- a/xds/internal/balancer/clusterresolver/testutil_test.go +++ b/xds/internal/balancer/clusterresolver/testutil_test.go @@ -19,16 +19,12 @@ package clusterresolver import ( "fmt" "net" - "reflect" "strconv" - "time" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" typepb "github.com/envoyproxy/go-control-plane/envoy/type" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -115,70 +111,3 @@ func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsresource.Endpoint } return endpoints } - -// testPickerFromCh receives pickers from the channel, and check if their -// behaviors are as expected (that the given function returns nil err). -// -// It returns nil if one picker has the correct behavior. -// -// It returns error when there's no picker from channel after 1 second timeout, -// and the error returned is the mismatch error from the previous picker. -func testPickerFromCh(ch chan balancer.Picker, f func(balancer.Picker) error) error { - var ( - p balancer.Picker - err error - ) - for { - select { - case p = <-ch: - case <-time.After(defaultTestTimeout): - // TODO: this function should take a context, and use the context - // here, instead of making a new timer. - return fmt.Errorf("timeout waiting for picker with expected behavior, error from previous picker: %v", err) - } - - err = f(p) - if err == nil { - return nil - } - } -} - -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} - -// testRoundRobinPickerFromCh receives pickers from the channel, and check if -// their behaviors are round-robin of want. -// -// It returns nil if one picker has the correct behavior. -// -// It returns error when there's no picker from channel after 1 second timeout, -// and the error returned is the mismatch error from the previous picker. -func testRoundRobinPickerFromCh(ch chan balancer.Picker, want []balancer.SubConn) error { - return testPickerFromCh(ch, func(p balancer.Picker) error { - return testutils.IsRoundRobin(want, subConnFromPicker(p)) - }) -} - -// testErrPickerFromCh receives pickers from the channel, and check if they -// return the wanted error. -// -// It returns nil if one picker has the correct behavior. -// -// It returns error when there's no picker from channel after 1 second timeout, -// and the error returned is the mismatch error from the previous picker. -func testErrPickerFromCh(ch chan balancer.Picker, want error) error { - return testPickerFromCh(ch, func(p balancer.Picker) error { - for i := 0; i < 5; i++ { - _, err := p.Pick(balancer.PickInfo{}) - if !reflect.DeepEqual(err, want) { - return fmt.Errorf("picker.Pick, got err %q, want err %q", err, want) - } - } - return nil - }) -} From f51d21267df53c7f4e3533e18fdf5b61c76e005d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 19 Oct 2022 13:31:56 -0700 Subject: [PATCH 645/998] xdsclient: improve RDS watchers test (#5692) --- .../e2e_test/federation_watchers_test.go | 25 - .../xdsclient/e2e_test/rds_watchers_test.go | 863 ++++++++++++++++++ xds/internal/xdsclient/watchers_route_test.go | 122 --- 3 files changed, 863 insertions(+), 147 deletions(-) create mode 100644 xds/internal/xdsclient/e2e_test/rds_watchers_test.go delete mode 100644 xds/internal/xdsclient/watchers_route_test.go diff --git a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go index 16db9da3b6a9..91c92c6fd01a 100644 --- a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go @@ -22,8 +22,6 @@ import ( "fmt" "testing" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/bootstrap" @@ -328,26 +326,3 @@ func (s) TestFederation_EndpointsResourceContextParamOrder(t *testing.T) { func newStringP(s string) *string { return &s } - -// verifyRouteConfigUpdate waits for an update to be received on the provided -// update channel and verifies that it matches the expected update. -// -// Returns an error if no update is received before the context deadline expires -// or the received update does not match the expected one. -func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.RouteConfigUpdateErrTuple) error { - u, err := updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("timeout when waiting for a route configuration resource from the management server: %v", err) - } - got := u.(xdsresource.RouteConfigUpdateErrTuple) - if wantUpdate.Err != nil { - if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { - return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) - } - } - cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.RouteConfigUpdate{}, "Raw")} - if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { - return fmt.Errorf("received unepected diff in the route configuration resource update: (-want, got):\n%s", diff) - } - return nil -} diff --git a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go new file mode 100644 index 000000000000..3f4dd419f96c --- /dev/null +++ b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go @@ -0,0 +1,863 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +// badRouteConfigResource returns a RouteConfiguration resource for the given +// routeName which contains a retry config with num_retries set to `0`. This is +// expected to be NACK'ed by the xDS client. +func badRouteConfigResource(routeName, ldsTarget, clusterName string) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}}}, + RetryPolicy: &v3routepb.RetryPolicy{ + NumRetries: &wrapperspb.UInt32Value{Value: 0}, + }, + }}, + } +} + +// xdsClient is expected to produce an error containing this string when an +// update is received containing a route configuration resource created using +// `badRouteConfigResource`. +const wantRouteConfigNACKErr = "received route is invalid: retry_policy.num_retries = 0; must be >= 1" + +// verifyRouteConfigUpdate waits for an update to be received on the provided +// update channel and verifies that it matches the expected update. +// +// Returns an error if no update is received before the context deadline expires +// or the received update does not match the expected one. +func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.RouteConfigUpdateErrTuple) error { + u, err := updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("timeout when waiting for a route configuration resource from the management server: %v", err) + } + got := u.(xdsresource.RouteConfigUpdateErrTuple) + if wantUpdate.Err != nil { + if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { + return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) + } + } + cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.RouteConfigUpdate{}, "Raw")} + if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { + return fmt.Errorf("received unepected diff in the route configuration resource update: (-want, got):\n%s", diff) + } + return nil +} + +// verifyNoRouteConfigUpdate verifies that no route configuration update is +// received on the provided update channel, and returns an error if an update is +// received. +// +// A very short deadline is used while waiting for the update, as this function +// is intended to be used when an update is not expected. +func verifyNoRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + return fmt.Errorf("unexpected RouteConfigUpdate: %v", u) + } + return nil +} + +// TestRDSWatch covers the case where a single watcher exists for a single route +// configuration resource. The test verifies the following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of the watch callback. +// 2. An update from the management server containing a resource *not* being +// watched should not result in the invocation of the watch callback. +// 3. After the watch is cancelled, an update from the management server +// containing the resource that was being watched should not result in the +// invocation of the watch callback. +// +// The test is run for old and new style names. +func (s) TestRDSWatch(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3routepb.RouteConfiguration // The resource being watched. + updatedWatchedResource *v3routepb.RouteConfiguration // The watched resource after an update. + notWatchedResource *v3routepb.RouteConfiguration // A resource which is not being watched. + wantUpdate xdsresource.RouteConfigUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: rdsName, + watchedResource: e2e.DefaultRouteConfig(rdsName, ldsName, cdsName), + updatedWatchedResource: e2e.DefaultRouteConfig(rdsName, ldsName, "new-cds-resource"), + notWatchedResource: e2e.DefaultRouteConfig("unsubscribed-rds-resource", ldsName, cdsName), + wantUpdate: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + }, + }, + }, + }, + }, + }, + }, + { + desc: "new style resource", + resourceName: rdsNameNewStyle, + watchedResource: e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, cdsNameNewStyle), + updatedWatchedResource: e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, "new-cds-resource"), + notWatchedResource: e2e.DefaultRouteConfig("unsubscribed-rds-resource", ldsNameNewStyle, cdsNameNewStyle), + wantUpdate: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsNameNewStyle}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsNameNewStyle: {Weight: 1}}, + }, + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a route configuration resource and have the + // watch callback push the received update on to a channel. + updateCh := testutils.NewChannel() + rdsCancel := client.WatchRouteConfig(test.resourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single route + // configuration resource, corresponding to the one being watched. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyRouteConfigUpdate(ctx, updateCh, test.wantUpdate); err != nil { + t.Fatal(err) + } + + // Configure the management server to return an additional route + // configuration resource, one that we are not interested in. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.watchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + + // Cancel the watch and update the resource corresponding to the original + // watch. Ensure that the cancelled watch callback is not invoked. + rdsCancel() + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.updatedWatchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestRDSWatch_TwoWatchesForSameResourceName covers the case where two watchers +// exist for a single route configuration resource. The test verifies the +// following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of both watch callbacks. +// 2. After one of the watches is cancelled, a redundant update from the +// management server should not result in the invocation of either of the +// watch callbacks. +// 3. An update from the management server containing the resource being +// watched should result in the invocation of the un-cancelled watch +// callback. +// +// The test is run for old and new style names. +func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3routepb.RouteConfiguration // The resource being watched. + updatedWatchedResource *v3routepb.RouteConfiguration // The watched resource after an update. + wantUpdateV1 xdsresource.RouteConfigUpdateErrTuple + wantUpdateV2 xdsresource.RouteConfigUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: rdsName, + watchedResource: e2e.DefaultRouteConfig(rdsName, ldsName, cdsName), + updatedWatchedResource: e2e.DefaultRouteConfig(rdsName, ldsName, "new-cds-resource"), + wantUpdateV1: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + }, + }, + }, + }, + }, + }, + wantUpdateV2: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{"new-cds-resource": {Weight: 1}}, + }, + }, + }, + }, + }, + }, + }, + { + desc: "new style resource", + resourceName: rdsNameNewStyle, + watchedResource: e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, cdsNameNewStyle), + updatedWatchedResource: e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, "new-cds-resource"), + wantUpdateV1: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsNameNewStyle}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsNameNewStyle: {Weight: 1}}, + }, + }, + }, + }, + }, + }, + wantUpdateV2: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsNameNewStyle}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{"new-cds-resource": {Weight: 1}}, + }, + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for the same route configuration resource + // and have the callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(test.resourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(test.resourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single route + // configuration resource, corresponding to the one being watched. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyRouteConfigUpdate(ctx, updateCh1, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + + // Cancel the second watch and force the management server to push a + // redundant update for the resource being watched. Neither of the + // two watch callbacks should be invoked. + rdsCancel2() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update to the resource being watched. The un-cancelled callback + // should be invoked while the cancelled one should not be. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.updatedWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh1, test.wantUpdateV2); err != nil { + t.Fatal(err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestRDSWatch_ThreeWatchesForDifferentResourceNames covers the case with three +// watchers (two watchers for one resource, and the third watcher for another +// resource), exist across two route configuration resources. The test verifies +// that an update from the management server containing both resources results +// in the invocation of all watch callbacks. +// +// The test is run with both old and new style names. +func (s) TestRDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for the same route configuration resource + // and have the callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel2() + + // Register the third watch for a different route configuration resource. + updateCh3 := testutils.NewChannel() + rdsCancel3 := client.WatchRouteConfig(rdsNameNewStyle, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh3.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel3() + + // Configure the management server to return two route configuration + // resources, corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{ + e2e.DefaultRouteConfig(rdsName, ldsName, cdsName), + e2e.DefaultRouteConfig(rdsNameNewStyle, ldsName, cdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the all watchers. The two + // resources returned differ only in the resource name. Therefore the + // expected update is the same for all the watchers. + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh3, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestRDSWatch_ResourceCaching covers the case where a watch is registered for +// a resource which is already present in the cache. The test verifies that the +// watch callback is invoked with the contents from the cache, instead of a +// request being sent to the management server. +func (s) TestRDSWatch_ResourceCaching(t *testing.T) { + overrideFedEnvVar(t) + firstRequestReceived := false + firstAckReceived := grpcsync.NewEvent() + secondRequestReceived := grpcsync.NewEvent() + + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // The first request has an empty version string. + if !firstRequestReceived && req.GetVersionInfo() == "" { + firstRequestReceived = true + return nil + } + // The first ack has a non-empty version string. + if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" { + firstAckReceived.Fire() + return nil + } + // Any requests after the first request and ack, are not expected. + secondRequestReceived.Fire() + return nil + }, + }) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a route configuration resource and have the watch + // callback push the received update on to a channel. + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + + // Configure the management server to return a single route configuration + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, cdsName)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for receipt of ACK at the management server") + case <-firstAckReceived.Done(): + } + + // Register another watch for the same resource. This should get the update + // from the cache. + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel2() + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + // No request should get sent out as part of this watch. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-secondRequestReceived.Done(): + t.Fatal("xdsClient sent out request instead of using update from cache") + } +} + +// TestRDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client +// does not receive an RDS response for the request that it sends. The test +// verifies that the watch callback is invoked with an error once the +// watchExpiryTimer fires. +func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { + // No need to spin up a management server since we don't want the client to + // receive a response for the watch being registered by the test. + + // Create an xDS client talking to a non-existent management server. + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: "dummy management server address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + + // Register a watch for a resource which is expected to fail with an error + // after the watch expiry timer fires. + updateCh := testutils.NewChannel() + rdsCancel := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel() + + // Wait for the watch expiry timer to fire. + <-time.After(defaultTestWatchExpiryTimeout) + + // Verify that an empty update with the expected error is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantErr := fmt.Errorf("watch for resource %q of type RouteConfiguration timed out", rdsName) + if err := verifyRouteConfigUpdate(ctx, updateCh, xdsresource.RouteConfigUpdateErrTuple{Err: wantErr}); err != nil { + t.Fatal(err) + } +} + +// TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the +// client receives a valid RDS response for the request that it sends. The test +// verifies that the behavior associated with the expiry timer (i.e, callback +// invocation with error) does not take place. +func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(nil) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + + // Register a watch for a route configuration resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + rdsCancel := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel() + + // Configure the management server to return a single route configuration + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, cdsName)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Wait for the watch expiry timer to fire, and verify that the callback is + // not invoked. + <-time.After(defaultTestWatchExpiryTimeout) + if err := verifyNoRouteConfigUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } +} + +// TestRDSWatch_NACKError covers the case where an update from the management +// server is NACK'ed by the xdsclient. The test verifies that the error is +// propagated to the watcher. +func (s) TestRDSWatch_NACKError(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register a watch for a route configuration resource and have the watch + // callback push the received update on to a channel. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + updateCh := testutils.NewChannel() + rdsCancel := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh.SendContext(ctx, xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel() + + // Configure the management server to return a single route configuration + // resource which is expected to be NACKed by the client. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{badRouteConfigResource(rdsName, ldsName, cdsName)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher. + u, err := updateCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a route configuration resource from the management server: %v", err) + } + gotErr := u.(xdsresource.RouteConfigUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantRouteConfigNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantRouteConfigNACKErr) + } +} + +// TestRDSWatch_PartialValid covers the case where a response from the +// management server contains both valid and invalid resources and is expected +// to be NACK'ed by the xdsclient. The test verifies that watchers corresponding +// to the valid resource receive the update, while watchers corresponding to the +// invalid resource receive an error. +func (s) TestRDSWatch_PartialValid(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for route configuration resources. The first watch + // is expected to receive an error because the received resource is NACKed. + // The second watch is expected to get a good update. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + badResourceName := rdsName + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(badResourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.SendContext(ctx, xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + goodResourceName := rdsNameNewStyle + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(goodResourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.SendContext(ctx, xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel2() + + // Configure the management server to return two route configuration + // resources, corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{ + badRouteConfigResource(badResourceName, ldsName, cdsName), + e2e.DefaultRouteConfig(goodResourceName, ldsName, cdsName), + }, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher which + // requested for the bad resource. + u, err := updateCh1.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a route configuration resource from the management server: %v", err) + } + gotErr := u.(xdsresource.RouteConfigUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantRouteConfigNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantRouteConfigNACKErr) + } + + // Verify that the watcher watching the good resource receives a good + // update. + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/watchers_route_test.go b/xds/internal/xdsclient/watchers_route_test.go deleted file mode 100644 index 669785084c27..000000000000 --- a/xds/internal/xdsclient/watchers_route_test.go +++ /dev/null @@ -1,122 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "context" - "fmt" - "testing" - - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// TestRDSWatch covers the cases: -// - an update is received after a watch() -// - an update for another resource name (which doesn't trigger callback) -// - an update is received after cancel() -func (s) TestRDSWatch(t *testing.T) { - testWatch(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, - }, - }, - }, testRDSName) -} - -// TestRDSTwoWatchSameResourceName covers the case where an update is received -// after two watch() for the same resource name. -func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { - testTwoWatchSameResourceName(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, - }, - }, - }, testRDSName) -} - -// TestRDSThreeWatchDifferentResourceName covers the case where an update is -// received after three watch() for different resource names. -func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { - testThreeWatchDifferentResourceName(t, xdsresource.RouteConfigResource, - xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "1": {Weight: 1}}}}, - }, - }, - }, testRDSName+"1", - xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName + "2": {Weight: 1}}}}, - }, - }, - }, testRDSName+"2", - ) -} - -// TestRDSWatchAfterCache covers the case where watch is called after the update -// is in cache. -func (s) TestRDSWatchAfterCache(t *testing.T) { - testWatchAfterCache(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, - }, - }, - }, testRDSName) -} - -// TestRouteWatchNACKError covers the case that an update is NACK'ed, and the -// watcher should also receive the error. -func (s) TestRouteWatchNACKError(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - rdsUpdateCh, _ := newWatch(t, client, xdsresource.RouteConfigResource, testRDSName) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.RouteConfigResource, testRDSName) - - wantError := fmt.Errorf("testing error") - updateHandler.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{testRDSName: {Err: wantError}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, xdsresource.RouteConfigUpdate{}, wantError); err != nil { - t.Fatal(err) - } -} - -// TestRouteWatchPartialValid covers the case that a response contains both -// valid and invalid resources. This response will be NACK'ed by the xdsclient. -// But the watchers with valid resources should receive the update, those with -// invalida resources should receive an error. -func (s) TestRouteWatchPartialValid(t *testing.T) { - testWatchPartialValid(t, xdsresource.RouteConfigResource, xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{testCDSName: {Weight: 1}}}}, - }, - }, - }, testRDSName) -} From 9127159caf5a3879dad56b795938fde3bc0a7eaa Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 20 Oct 2022 09:29:17 -0700 Subject: [PATCH 646/998] client: synchronously verify server preface in newClientTransport (#5731) --- clientconn.go | 119 +++++++++----------------- internal/grpcsync/oncefunc.go | 32 +++++++ internal/grpcsync/oncefunc_test.go | 53 ++++++++++++ internal/transport/http2_client.go | 120 +++++++++++++++------------ internal/transport/transport.go | 4 +- internal/transport/transport_test.go | 75 +++++++++++++++-- test/end2end_test.go | 86 ------------------- 7 files changed, 266 insertions(+), 223 deletions(-) create mode 100644 internal/grpcsync/oncefunc.go create mode 100644 internal/grpcsync/oncefunc_test.go diff --git a/clientconn.go b/clientconn.go index b75d6d72e0f5..4a5dd561d345 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1228,38 +1228,33 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // address was not successfully connected, or updates ac appropriately with the // new transport. func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { - // TODO: Delete prefaceReceived and move the logic to wait for it into the - // transport. - prefaceReceived := grpcsync.NewEvent() - connClosed := grpcsync.NewEvent() - addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) - hcStarted := false // protected by ac.mu - onClose := func() { + onClose := grpcsync.OnceFunc(func() { ac.mu.Lock() defer ac.mu.Unlock() - defer connClosed.Fire() - defer hcancel() - if !hcStarted || hctx.Err() != nil { - // We didn't start the health check or set the state to READY, so - // no need to do anything else here. - // - // OR, we have already cancelled the health check context, meaning - // we have already called onClose once for this transport. In this - // case it would be dangerous to clear the transport and update the - // state, since there may be a new transport in this addrConn. + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. return } ac.transport = nil - // Refresh the name resolver + // Refresh the name resolver on any connection loss. ac.cc.resolveNow(resolver.ResolveNowOptions{}) - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - } - + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + }) onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() ac.adjustParams(r) @@ -1271,7 +1266,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne defer cancel() copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. hcancel() @@ -1279,66 +1274,34 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne return err } - select { - case <-connectCtx.Done(): - // We didn't get the preface in time. + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - newTr.Close(transport.ErrConnClosing) - if connectCtx.Err() == context.DeadlineExceeded { - err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) - return err - } + go newTr.Close(transport.ErrConnClosing) return nil - case <-prefaceReceived.Done(): - // We got the preface - huzzah! things are good. - ac.mu.Lock() - defer ac.mu.Unlock() - if connClosed.HasFired() { - // onClose called first; go idle but do nothing else. - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - return nil - } - if ac.state == connectivity.Shutdown { - // This can happen if the subConn was removed while in `Connecting` - // state. tearDown() would have set the state to `Shutdown`, but - // would not have closed the transport since ac.transport would not - // been set at that point. - // - // We run this in a goroutine because newTr.Close() calls onClose() - // inline, which requires locking ac.mu. - // - // The error we pass to Close() is immaterial since there are no open - // streams at this point, so no trailers with error details will be sent - // out. We just need to pass a non-nil error. - go newTr.Close(transport.ErrConnClosing) - return nil - } - ac.curAddr = addr - ac.transport = newTr - hcStarted = true - ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) return nil - case <-connClosed.Done(): - // The transport has already closed. If we received the preface, too, - // this is not an error and go idle. - select { - case <-prefaceReceived.Done(): - ac.mu.Lock() - defer ac.mu.Unlock() - - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - return nil - default: - return errors.New("connection closed before server preface received") - } } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health diff --git a/internal/grpcsync/oncefunc.go b/internal/grpcsync/oncefunc.go new file mode 100644 index 000000000000..6635f7bca96d --- /dev/null +++ b/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/internal/grpcsync/oncefunc_test.go b/internal/grpcsync/oncefunc_test.go new file mode 100644 index 000000000000..2b0db8d3eaa3 --- /dev/null +++ b/internal/grpcsync/oncefunc_test.go @@ -0,0 +1,53 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" + "sync/atomic" + "testing" + "time" +) + +// TestOnceFunc tests that a OnceFunc is executed only once even with multiple +// simultaneous callers of it. +func (s) TestOnceFunc(t *testing.T) { + var v int32 + inc := OnceFunc(func() { atomic.AddInt32(&v, 1) }) + + const numWorkers = 100 + var wg sync.WaitGroup // Blocks until all workers have called inc. + wg.Add(numWorkers) + + block := NewEvent() // Signal to worker goroutines to call inc + + for i := 0; i < numWorkers; i++ { + go func() { + <-block.Done() // Wait for a signal. + inc() // Call the OnceFunc. + wg.Done() + }() + } + time.Sleep(time.Millisecond) // Allow goroutines to get to the block. + block.Fire() // Unblock them. + wg.Wait() // Wait for them to complete. + if v != 1 { + t.Fatalf("OnceFunc() called %v times; want 1", v) + } +} diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 256fcb71f47a..d518b07e16f7 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" @@ -100,10 +101,6 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 @@ -196,7 +193,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -218,12 +215,35 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if connectCtx.Err() != nil { + // connectCtx expired before exiting the function. Hard close the connection. + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -255,15 +275,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - rawConn := conn - // Pull the deadline from the connectCtx, which will be used for - // timeouts in the authentication protocol handshake. Can ignore the - // boolean as the deadline will return the zero value, which will make - // the conn not timeout on I/O operations. - deadline, _ := connectCtx.Deadline() - rawConn.SetDeadline(deadline) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) - rawConn.SetDeadline(time.Time{}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -318,16 +330,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts kp: kp, statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -366,21 +377,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - t.Close(err) return nil, err } if n != len(clientPreface) { err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - t.Close(err) return nil, err } var ss []http2.Setting @@ -400,14 +422,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts err = t.framer.fr.WriteSettings(ss...) if err != nil { err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - t.Close(err) return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - t.Close(err) return nil, err } } @@ -907,19 +927,15 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() return } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. t.onClose() t.state = closing streams := t.activeStreams @@ -1509,33 +1525,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - err = connectionErrorf(true, err, "error reading server preface: %v", err) - t.Close(err) // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - // this kicks off resetTransport, so must be last before return - t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 6c3ba8515940..e21587b5321c 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -573,8 +573,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) } // Options provides additional hints and information for message diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 8cd0d4174405..16bbf8c8ac3f 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -452,7 +452,7 @@ func setUpWithOptions(t *testing.T, port int, sc *ServerConfig, ht hType, copts copts.ChannelzParentID = channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil) connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) - ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func() {}, func(GoAwayReason) {}, func() {}) + ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func(GoAwayReason) {}, func() {}) if connErr != nil { cancel() // Do not cancel in success path. t.Fatalf("failed to create transport: %v", connErr) @@ -474,10 +474,16 @@ func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, connCh chan net.C close(connCh) return } + framer := http2.NewFramer(conn, conn) + if err := framer.WriteSettings(); err != nil { + t.Errorf("Error at server-side while writing settings: %v", err) + close(connCh) + return + } connCh <- conn }() connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) - tr, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func() {}, func(GoAwayReason) {}, func() {}) + tr, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}, func() {}) if err != nil { cancel() // Do not cancel in success path. // Server clean-up. @@ -1248,6 +1254,59 @@ func (s) TestServerWithMisbehavedClient(t *testing.T) { } } +func (s) TestClientHonorsConnectContext(t *testing.T) { + // Create a server that will not send a preface. + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Error while listening: %v", err) + } + defer lis.Close() + go func() { // Launch the misbehaving server. + sconn, err := lis.Accept() + if err != nil { + t.Errorf("Error while accepting: %v", err) + return + } + defer sconn.Close() + if _, err := io.ReadFull(sconn, make([]byte, len(clientPreface))); err != nil { + t.Errorf("Error while reading client preface: %v", err) + return + } + sfr := http2.NewFramer(sconn, sconn) + // Do not write a settings frame, but read from the conn forever. + for { + if _, err := sfr.ReadFrame(); err != nil { + return + } + } + }() + + // Test context cancelation. + timeBefore := time.Now() + connectCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + time.AfterFunc(100*time.Millisecond, cancel) + + copts := ConnectOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)} + _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}, func() {}) + if err == nil { + t.Fatalf("NewClientTransport() returned successfully; wanted error") + } + t.Logf("NewClientTransport() = _, %v", err) + if time.Now().Sub(timeBefore) > 3*time.Second { + t.Fatalf("NewClientTransport returned > 2.9s after context cancelation") + } + + // Test context deadline. + timeBefore = time.Now() + connectCtx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}, func() {}) + if err == nil { + t.Fatalf("NewClientTransport() returned successfully; wanted error") + } + t.Logf("NewClientTransport() = _, %v", err) +} + func (s) TestClientWithMisbehavedServer(t *testing.T) { // Create a misbehaving server. lis, err := net.Listen("tcp", "localhost:0") @@ -1266,10 +1325,14 @@ func (s) TestClientWithMisbehavedServer(t *testing.T) { } defer sconn.Close() if _, err := io.ReadFull(sconn, make([]byte, len(clientPreface))); err != nil { - t.Errorf("Error while reading clieng preface: %v", err) + t.Errorf("Error while reading client preface: %v", err) return } sfr := http2.NewFramer(sconn, sconn) + if err := sfr.WriteSettings(); err != nil { + t.Errorf("Error while writing settings: %v", err) + return + } if err := sfr.WriteSettingsAck(); err != nil { t.Errorf("Error while writing settings: %v", err) return @@ -1316,7 +1379,7 @@ func (s) TestClientWithMisbehavedServer(t *testing.T) { defer cancel() copts := ConnectOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)} - ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func() {}, func(GoAwayReason) {}, func() {}) + ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}, func() {}) if err != nil { t.Fatalf("Error while creating client transport: %v", err) } @@ -2217,7 +2280,7 @@ func (s) TestClientHandshakeInfo(t *testing.T) { TransportCredentials: creds, ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), } - tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func() {}, func(GoAwayReason) {}, func() {}) + tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}, func() {}) if err != nil { t.Fatalf("NewClientTransport(): %v", err) } @@ -2258,7 +2321,7 @@ func (s) TestClientHandshakeInfoDialer(t *testing.T) { Dialer: dialer, ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), } - tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func() {}, func(GoAwayReason) {}, func() {}) + tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}, func() {}) if err != nil { t.Fatalf("NewClientTransport(): %v", err) } diff --git a/test/end2end_test.go b/test/end2end_test.go index 165cf19b9877..438b43ca82f4 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7798,92 +7798,6 @@ func (s) TestClientSettingsFloodCloseConn(t *testing.T) { timer.Stop() } -// TestDeadlineSetOnConnectionOnClientCredentialHandshake tests that there is a deadline -// set on the net.Conn when a credential handshake happens in http2_client. -func (s) TestDeadlineSetOnConnectionOnClientCredentialHandshake(t *testing.T) { - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Failed to listen: %v", err) - } - connCh := make(chan net.Conn, 1) - go func() { - defer close(connCh) - conn, err := lis.Accept() - if err != nil { - t.Errorf("Error accepting connection: %v", err) - return - } - connCh <- conn - }() - defer func() { - conn := <-connCh - if conn != nil { - conn.Close() - } - }() - deadlineCh := testutils.NewChannel() - cvd := &credentialsVerifyDeadline{ - deadlineCh: deadlineCh, - } - dOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { - conn, err := (&net.Dialer{}).DialContext(ctx, "tcp", addr) - if err != nil { - return nil, err - } - return &infoConn{Conn: conn}, nil - }) - cc, err := grpc.Dial(lis.Addr().String(), dOpt, grpc.WithTransportCredentials(cvd)) - if err != nil { - t.Fatalf("Failed to dial: %v", err) - } - defer cc.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - deadline, err := deadlineCh.Receive(ctx) - if err != nil { - t.Fatalf("Error receiving from credsInvoked: %v", err) - } - // Default connection timeout is 20 seconds, so if the deadline exceeds now - // + 18 seconds it should be valid. - if !deadline.(time.Time).After(time.Now().Add(time.Second * 18)) { - t.Fatalf("Connection did not have deadline set.") - } -} - -type infoConn struct { - net.Conn - deadline time.Time -} - -func (c *infoConn) SetDeadline(t time.Time) error { - c.deadline = t - return c.Conn.SetDeadline(t) -} - -type credentialsVerifyDeadline struct { - deadlineCh *testutils.Channel -} - -func (cvd *credentialsVerifyDeadline) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - return rawConn, nil, nil -} - -func (cvd *credentialsVerifyDeadline) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - cvd.deadlineCh.Send(rawConn.(*infoConn).deadline) - return rawConn, nil, nil -} - -func (cvd *credentialsVerifyDeadline) Info() credentials.ProtocolInfo { - return credentials.ProtocolInfo{} -} -func (cvd *credentialsVerifyDeadline) Clone() credentials.TransportCredentials { - return cvd -} -func (cvd *credentialsVerifyDeadline) OverrideServerName(s string) error { - return nil -} - func unaryInterceptorVerifyConn(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { conn := transport.GetConnection(ctx) if conn == nil { From 26071c24f30ed6b9ba8a512f26e9f266ec8b083e Mon Sep 17 00:00:00 2001 From: apolcyn Date: Mon, 24 Oct 2022 14:21:47 -0700 Subject: [PATCH 647/998] google-c2p resolver: add authority entry to bootstrap config (#5680) --- xds/googledirectpath/googlec2p.go | 18 ++++++++++++------ xds/googledirectpath/googlec2p_test.go | 16 +++++++++++----- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index ef86f7b56c5f..335251241db7 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -110,14 +110,20 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts if balancerName == "" { balancerName = tdURL } + serverConfig := &bootstrap.ServerConfig{ + ServerURI: balancerName, + Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()), + TransportAPI: version.TransportV3, + NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), + } config := &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: balancerName, - Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()), - TransportAPI: version.TransportV3, - NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), - }, + XDSServer: serverConfig, ClientDefaultListenerResourceNameTemplate: "%s", + Authorities: map[string]*bootstrap.Authority{ + "traffic-director-c2p.xds.googleapis.com": { + XDSServer: serverConfig, + }, + }, } // Create singleton xds client with this config. The xds client will be diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 00f8aa0b21be..f32eafbf26cc 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -211,13 +211,19 @@ func TestBuildXDS(t *testing.T) { }, } } + serverConfig := &bootstrap.ServerConfig{ + ServerURI: tdURL, + TransportAPI: version.TransportV3, + NodeProto: wantNode, + } wantConfig := &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: tdURL, - TransportAPI: version.TransportV3, - NodeProto: wantNode, - }, + XDSServer: serverConfig, ClientDefaultListenerResourceNameTemplate: "%s", + Authorities: map[string]*bootstrap.Authority{ + "traffic-director-c2p.xds.googleapis.com": { + XDSServer: serverConfig, + }, + }, } if tt.tdURI != "" { wantConfig.XDSServer.ServerURI = tt.tdURI From 3fd80b0c5298a18271823a8b63e56b4c4df9d1e9 Mon Sep 17 00:00:00 2001 From: andremissaglia Date: Tue, 25 Oct 2022 14:56:33 -0300 Subject: [PATCH 648/998] Fix flaky test MultipleClientStatsHandler (#5739) --- stats/stats_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stats/stats_test.go b/stats/stats_test.go index 9a1a6c11253c..0b7276e48cb7 100644 --- a/stats/stats_test.go +++ b/stats/stats_test.go @@ -1393,7 +1393,7 @@ func (s) TestMultipleClientStatsHandler(t *testing.T) { for start := time.Now(); time.Since(start) < defaultTestTimeout; { h.mu.Lock() - if _, ok := h.gotRPC[len(h.gotRPC)-1].s.(*stats.End); ok { + if _, ok := h.gotRPC[len(h.gotRPC)-1].s.(*stats.End); ok && len(h.gotRPC) == 12 { h.mu.Unlock() break } @@ -1403,7 +1403,7 @@ func (s) TestMultipleClientStatsHandler(t *testing.T) { for start := time.Now(); time.Since(start) < defaultTestTimeout; { h.mu.Lock() - if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok { + if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok && len(h.gotConn) == 4 { h.mu.Unlock() break } From 3c09650e05246fb05f0447b13873caf48d253a60 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 26 Oct 2022 11:33:49 -0700 Subject: [PATCH 649/998] balancer/weightedtarget: use ConnectivityStateEvaluator (#5734) --- balancer/conn_state_evaluator.go | 4 + .../weightedaggregator/aggregator.go | 101 ++++++++---------- balancer/weightedtarget/weightedtarget.go | 16 +-- internal/balancergroup/balancergroup_test.go | 1 - 4 files changed, 50 insertions(+), 72 deletions(-) diff --git a/balancer/conn_state_evaluator.go b/balancer/conn_state_evaluator.go index 33ea9b5821dd..c33413581091 100644 --- a/balancer/conn_state_evaluator.go +++ b/balancer/conn_state_evaluator.go @@ -55,7 +55,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numIdle += updateVal } } + return cse.CurrentState() +} +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { // Evaluate. if cse.numReady > 0 { return connectivity.Ready diff --git a/balancer/weightedtarget/weightedaggregator/aggregator.go b/balancer/weightedtarget/weightedaggregator/aggregator.go index dffc539b85d5..176d3ec0c448 100644 --- a/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -57,6 +57,8 @@ type Aggregator struct { logger *grpclog.PrefixLogger newWRR func() wrr.WRR + csEvltr *balancer.ConnectivityStateEvaluator + mu sync.Mutex // If started is false, no updates should be sent to the parent cc. A closed // sub-balancer could still send pickers to this aggregator. This makes sure @@ -81,11 +83,12 @@ func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr cc: cc, logger: logger, newWRR: newWRR, + csEvltr: &balancer.ConnectivityStateEvaluator{}, idToPickerState: make(map[string]*weightedPickerState), } } -// Start starts the aggregator. It can be called after Close to restart the +// Start starts the aggregator. It can be called after Stop to restart the // aggretator. func (wbsa *Aggregator) Start() { wbsa.mu.Lock() @@ -93,7 +96,7 @@ func (wbsa *Aggregator) Start() { wbsa.started = true } -// Stop stops the aggregator. When the aggregator is closed, it won't call +// Stop stops the aggregator. When the aggregator is stopped, it won't call // parent ClientConn to update balancer state. func (wbsa *Aggregator) Stop() { wbsa.mu.Lock() @@ -118,6 +121,9 @@ func (wbsa *Aggregator) Add(id string, weight uint32) { }, stateToAggregate: connectivity.Connecting, } + wbsa.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Connecting) + + wbsa.buildAndUpdateLocked() } // Remove removes the sub-balancer state. Future updates from this sub-balancer, @@ -128,9 +134,14 @@ func (wbsa *Aggregator) Remove(id string) { if _, ok := wbsa.idToPickerState[id]; !ok { return } + // Setting the state of the deleted sub-balancer to Shutdown will get csEvltr + // to remove the previous state for any aggregated state evaluations. + // transitions to and from connectivity.Shutdown are ignored by csEvltr. + wbsa.csEvltr.RecordTransition(wbsa.idToPickerState[id].stateToAggregate, connectivity.Shutdown) // Remove id and picker from picker map. This also results in future updates // for this ID to be ignored. delete(wbsa.idToPickerState, id) + wbsa.buildAndUpdateLocked() } // UpdateWeight updates the weight for the given id. Note that this doesn't @@ -180,6 +191,9 @@ func (wbsa *Aggregator) UpdateState(id string, newState balancer.State) { // it's either removed, or never existed. return } + + wbsa.csEvltr.RecordTransition(oldState.stateToAggregate, newState.ConnectivityState) + if !(oldState.state.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting) { // If old state is TransientFailure, and new state is Connecting, don't // update the state, to prevent the aggregated state from being always @@ -189,18 +203,7 @@ func (wbsa *Aggregator) UpdateState(id string, newState balancer.State) { } oldState.state = newState - if !wbsa.started { - return - } - - if wbsa.pauseUpdateState { - // If updates are paused, do not call UpdateState, but remember that we - // need to call it when they are resumed. - wbsa.needUpdateStateOnResume = true - return - } - - wbsa.cc.UpdateState(wbsa.build()) + wbsa.buildAndUpdateLocked() } // clearState Reset everything to init state (Connecting) but keep the entry in @@ -217,11 +220,11 @@ func (wbsa *Aggregator) clearStates() { } } -// BuildAndUpdate combines the sub-state from each sub-balancer into one state, -// and update it to parent ClientConn. -func (wbsa *Aggregator) BuildAndUpdate() { - wbsa.mu.Lock() - defer wbsa.mu.Unlock() +// buildAndUpdateLocked aggregates the connectivity states of the sub-balancers, +// builds a new picker and sends an update to the parent ClientConn. +// +// Caller must hold wbsa.mu. +func (wbsa *Aggregator) buildAndUpdateLocked() { if !wbsa.started { return } @@ -240,48 +243,34 @@ func (wbsa *Aggregator) BuildAndUpdate() { // Caller must hold wbsa.mu. func (wbsa *Aggregator) build() balancer.State { wbsa.logger.Infof("Child pickers with config: %+v", wbsa.idToPickerState) - // TODO: use balancer.ConnectivityStateEvaluator to calculate the aggregated - // state. - var readyN, connectingN, idleN int - pickerN := len(wbsa.idToPickerState) - readyPickers := make([]weightedPickerState, 0, pickerN) - errorPickers := make([]weightedPickerState, 0, pickerN) - for _, ps := range wbsa.idToPickerState { - switch ps.stateToAggregate { - case connectivity.Ready: - readyN++ - readyPickers = append(readyPickers, *ps) - case connectivity.Connecting: - connectingN++ - case connectivity.Idle: - idleN++ - case connectivity.TransientFailure: - errorPickers = append(errorPickers, *ps) - } - } - var aggregatedState connectivity.State - switch { - case readyN > 0: - aggregatedState = connectivity.Ready - case connectingN > 0: - aggregatedState = connectivity.Connecting - case idleN > 0: - aggregatedState = connectivity.Idle - default: - aggregatedState = connectivity.TransientFailure - } // Make sure picker's return error is consistent with the aggregatedState. - var picker balancer.Picker - switch aggregatedState { - case connectivity.TransientFailure: - picker = newWeightedPickerGroup(errorPickers, wbsa.newWRR) + pickers := make([]weightedPickerState, 0, len(wbsa.idToPickerState)) + + switch aggState := wbsa.csEvltr.CurrentState(); aggState { case connectivity.Connecting: - picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) + return balancer.State{ + ConnectivityState: aggState, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)} + case connectivity.TransientFailure: + // this means that all sub-balancers are now in TransientFailure. + for _, ps := range wbsa.idToPickerState { + pickers = append(pickers, *ps) + } + return balancer.State{ + ConnectivityState: aggState, + Picker: newWeightedPickerGroup(pickers, wbsa.newWRR)} default: - picker = newWeightedPickerGroup(readyPickers, wbsa.newWRR) + for _, ps := range wbsa.idToPickerState { + if ps.stateToAggregate == connectivity.Ready { + pickers = append(pickers, *ps) + } + } + return balancer.State{ + ConnectivityState: aggState, + Picker: newWeightedPickerGroup(pickers, wbsa.newWRR)} } - return balancer.State{ConnectivityState: aggregatedState, Picker: picker} + } type weightedPickerGroup struct { diff --git a/balancer/weightedtarget/weightedtarget.go b/balancer/weightedtarget/weightedtarget.go index 2582c84c5488..83bb7d701f19 100644 --- a/balancer/weightedtarget/weightedtarget.go +++ b/balancer/weightedtarget/weightedtarget.go @@ -88,8 +88,6 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat } addressesSplit := hierarchy.Group(s.ResolverState.Addresses) - var rebuildStateAndPicker bool - b.stateAggregator.PauseStateUpdates() defer b.stateAggregator.ResumeStateUpdates() @@ -98,9 +96,6 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat if _, ok := newConfig.Targets[name]; !ok { b.stateAggregator.Remove(name) b.bg.Remove(name) - // Trigger a state/picker update, because we don't want `ClientConn` - // to pick this sub-balancer anymore. - rebuildStateAndPicker = true } } @@ -119,21 +114,15 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat // Not trigger a state/picker update. Wait for the new sub-balancer // to send its updates. } else if newT.ChildPolicy.Name != oldT.ChildPolicy.Name { - // If the child policy name is differet, remove from balancer group + // If the child policy name is different, remove from balancer group // and re-add. b.stateAggregator.Remove(name) b.bg.Remove(name) b.stateAggregator.Add(name, newT.Weight) b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) - // Trigger a state/picker update, because we don't want `ClientConn` - // to pick this sub-balancer anymore. - rebuildStateAndPicker = true } else if newT.Weight != oldT.Weight { // If this is an existing sub-balancer, update weight if necessary. b.stateAggregator.UpdateWeight(name, newT.Weight) - // Trigger a state/picker update, because we don't want `ClientConn` - // should do picks with the new weights now. - rebuildStateAndPicker = true } // Forwards all the update: @@ -154,9 +143,6 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat b.targets = newConfig.Targets - if rebuildStateAndPicker { - b.stateAggregator.BuildAndUpdate() - } return nil } diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index 1228f85ad986..90c5c20f4158 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -240,7 +240,6 @@ func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregat gator.Remove(testBalancerIDs[1]) bg.Remove(testBalancerIDs[1]) - gator.BuildAndUpdate() // Don't wait for SubConns to be removed after close, because they are only // removed after close timeout. for i := 0; i < 10; i++ { From 64df65262ec80268846a0c9b1dec9d9f2a5fcb32 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Mon, 31 Oct 2022 14:00:44 -0700 Subject: [PATCH 650/998] google-c2p: include federation env var in the logic which determines when to use directpath (#5745) --- xds/googledirectpath/googlec2p.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 335251241db7..af28229d8099 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -189,7 +189,10 @@ func newNode(zone string, ipv6Capable bool) *v3corepb.Node { // runDirectPath returns whether this resolver should use direct path. // // direct path is enabled if this client is running on GCE, and the normal xDS -// is not used (bootstrap env vars are not set). +// is not used (bootstrap env vars are not set) or federation is enabled. func runDirectPath() bool { - return envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" && onGCE() + if !onGCE() { + return false + } + return envconfig.XDSFederation || envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" } From c858a770aae22eb79c5cdd96c80fdc4bad3109d2 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Mon, 31 Oct 2022 14:58:05 -0700 Subject: [PATCH 651/998] balancer/weightedtarget: fix ConnStateEvltr to ignore transition from TF to Connecting (#5747) --- .../weightedaggregator/aggregator.go | 11 +++-- .../weightedtarget/weightedtarget_test.go | 40 +++++++++++++++++++ 2 files changed, 45 insertions(+), 6 deletions(-) diff --git a/balancer/weightedtarget/weightedaggregator/aggregator.go b/balancer/weightedtarget/weightedaggregator/aggregator.go index 176d3ec0c448..37fc41c16885 100644 --- a/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -185,23 +185,22 @@ func (wbsa *Aggregator) ResumeStateUpdates() { func (wbsa *Aggregator) UpdateState(id string, newState balancer.State) { wbsa.mu.Lock() defer wbsa.mu.Unlock() - oldState, ok := wbsa.idToPickerState[id] + state, ok := wbsa.idToPickerState[id] if !ok { // All state starts with an entry in pickStateMap. If ID is not in map, // it's either removed, or never existed. return } - wbsa.csEvltr.RecordTransition(oldState.stateToAggregate, newState.ConnectivityState) - - if !(oldState.state.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting) { + if !(state.state.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting) { // If old state is TransientFailure, and new state is Connecting, don't // update the state, to prevent the aggregated state from being always // CONNECTING. Otherwise, stateToAggregate is the same as // state.ConnectivityState. - oldState.stateToAggregate = newState.ConnectivityState + wbsa.csEvltr.RecordTransition(state.stateToAggregate, newState.ConnectivityState) + state.stateToAggregate = newState.ConnectivityState } - oldState.state = newState + state.state = newState wbsa.buildAndUpdateLocked() } diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go index c7c04f0da232..a20cb0dc1ce4 100644 --- a/balancer/weightedtarget/weightedtarget_test.go +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -1249,6 +1249,46 @@ func (s) TestInitialIdle(t *testing.T) { } } +// TestIgnoreSubBalancerStateTransitions covers the case that if the child reports a +// transition from TF to Connecting, the overall state will still be TF. +func (s) TestIgnoreSubBalancerStateTransitions(t *testing.T) { + cc := &tcc{TestClientConn: testutils.NewTestClientConn(t)} + + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"round_robin": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addr := resolver.Address{Addr: testBackendAddrStrs[0], Attributes: nil} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr, []string{"cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + sc := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + wtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + + // Verify that the SubConnState update from TF to Connecting is ignored. + if len(cc.states) != 2 || cc.states[0].ConnectivityState != connectivity.Connecting || cc.states[1].ConnectivityState != connectivity.TransientFailure { + t.Fatalf("cc.states = %v; want [Connecting, TransientFailure]", cc.states) + } +} + // tcc wraps a testutils.TestClientConn but stores all state transitions in a // slice. type tcc struct { From aa44ccaf84ff5369dff76266b7083ffcca397d78 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Mon, 31 Oct 2022 15:36:43 -0700 Subject: [PATCH 652/998] google-c2p: use new-style resource name for LDS subscription (#5743) --- xds/googledirectpath/googlec2p.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index af28229d8099..1789334fd32f 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -27,6 +27,7 @@ package googledirectpath import ( "fmt" + "net/url" "time" "google.golang.org/grpc" @@ -49,6 +50,7 @@ import ( const ( c2pScheme = "google-c2p" c2pExperimentalScheme = "google-c2p-experimental" + c2pAuthority = "traffic-director-c2p.xds.googleapis.com" tdURL = "dns:///directpath-pa.googleapis.com" httpReqTimeout = 10 * time.Second @@ -93,6 +95,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts if !runDirectPath() { // If not xDS, fallback to DNS. t.Scheme = dnsName + t.URL.Scheme = dnsName return resolver.Get(dnsName).Build(t, cc, opts) } @@ -120,7 +123,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts XDSServer: serverConfig, ClientDefaultListenerResourceNameTemplate: "%s", Authorities: map[string]*bootstrap.Authority{ - "traffic-director-c2p.xds.googleapis.com": { + c2pAuthority: { XDSServer: serverConfig, }, }, @@ -135,6 +138,16 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts // Create and return an xDS resolver. t.Scheme = xdsName + t.URL.Scheme = xdsName + if envconfig.XDSFederation { + t = resolver.Target{ + URL: url.URL{ + Scheme: xdsName, + Host: c2pAuthority, + Path: t.URL.Path, + }, + } + } xdsR, err := resolver.Get(xdsName).Build(t, cc, opts) if err != nil { xdsC.Close() From fdcc01b8c12bf76885cfe93e2c3dd6080a655d81 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 31 Oct 2022 16:50:41 -0700 Subject: [PATCH 653/998] transport/test: implement staticcheck suggestion (#5752) --- internal/transport/transport_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 16bbf8c8ac3f..d494ed514b0d 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -1297,7 +1297,6 @@ func (s) TestClientHonorsConnectContext(t *testing.T) { } // Test context deadline. - timeBefore = time.Now() connectCtx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}, func() {}) From 0d6481fb85c03d658e45bf3a2f7beb9041ec4549 Mon Sep 17 00:00:00 2001 From: littlejian <17816869670@163.com> Date: Wed, 2 Nov 2022 02:08:00 +0800 Subject: [PATCH 654/998] target: replace parsedTarget.Scheme to parsedTarget.URL.Scheme (#5750) --- clientconn.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clientconn.go b/clientconn.go index 4a5dd561d345..422639c79dbf 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1552,7 +1552,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget return rb, nil @@ -1573,9 +1573,9 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { return nil, err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget return rb, nil From 040b795b51ea6e3f29ee273acf80505ca495f798 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 1 Nov 2022 17:08:43 -0700 Subject: [PATCH 655/998] xdsclient/e2e_test: use SendContext() where appropriate (#5729) --- xds/internal/xdsclient/e2e_test/cds_watchers_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go index b399687527a9..22efe2ccb699 100644 --- a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go @@ -766,8 +766,10 @@ func (s) TestCDSWatch_NACKError(t *testing.T) { // Register a watch for a cluster resource and have the watch // callback push the received update on to a channel. updateCh := testutils.NewChannel() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() cdsCancel := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { - updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + updateCh.SendContext(ctx, xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) }) defer cdsCancel() @@ -778,8 +780,6 @@ func (s) TestCDSWatch_NACKError(t *testing.T) { Clusters: []*v3clusterpb.Cluster{badClusterResource(cdsName, edsName, e2e.SecurityLevelNone)}, SkipValidation: true, } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() if err := mgmtServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } From fcb8bdf7219c76d2b608d45317427cbbd6e69d6c Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 2 Nov 2022 13:11:13 -0700 Subject: [PATCH 656/998] xds/google-c2p: validate url for no authorities (#5756) --- xds/googledirectpath/googlec2p.go | 4 ++++ xds/googledirectpath/googlec2p_test.go | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 1789334fd32f..669c4fc1c483 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -92,6 +92,10 @@ type c2pResolverBuilder struct { } func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if t.URL.Host != "" { + return nil, fmt.Errorf("google-c2p URI scheme does not support authorities") + } + if !runDirectPath() { // If not xDS, fallback to DNS. t.Scheme = dnsName diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index f32eafbf26cc..f9357e9bf3aa 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -20,12 +20,14 @@ package googledirectpath import ( "strconv" + "strings" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/xdsclient" @@ -251,3 +253,20 @@ func TestBuildXDS(t *testing.T) { }) } } + +// TestDialFailsWhenTargetContainsAuthority attempts to Dial a target URI of +// google-c2p scheme with a non-empty authority and verifies that it fails with +// an expected error. +func TestBuildFailsWhenCalledWithAuthority(t *testing.T) { + uri := "google-c2p://an-authority/resource" + cc, err := grpc.Dial(uri, grpc.WithTransportCredentials(insecure.NewCredentials())) + defer func() { + if cc != nil { + cc.Close() + } + }() + wantErr := "google-c2p URI scheme does not support authorities" + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("grpc.Dial(%s) returned error: %v, want: %v", uri, err, wantErr) + } +} From 36d14dbf6665119337650b37629beced691661c4 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 2 Nov 2022 19:46:50 -0400 Subject: [PATCH 657/998] Fix binary logging bug which logs a server header on a trailers only response (#5763) --- gcp/observability/logging_test.go | 17 +++-------------- internal/transport/transport.go | 10 ++++++++++ stream.go | 12 ++++++++++-- test/end2end_test.go | 4 ++-- 4 files changed, 25 insertions(+), 18 deletions(-) diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index d91af4afc67f..03638a9b49d7 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -608,9 +608,9 @@ func (s) TestBothClientAndServerRPCEvents(t *testing.T) { t.Fatalf("unexpected error: %v, expected an EOF error", err) } fle.mu.Lock() - if len(fle.entries) != 17 { + if len(fle.entries) != 16 { fle.mu.Unlock() - t.Fatalf("Unexpected length of entries %v, want 17 (collective of client and server)", len(fle.entries)) + t.Fatalf("Unexpected length of entries %v, want 16 (collective of client and server)", len(fle.entries)) } fle.mu.Unlock() } @@ -936,24 +936,13 @@ func (s) TestPrecedenceOrderingInConfiguration(t *testing.T) { SequenceID: 2, Authority: ss.Address, }, - { - Type: eventTypeServerHeader, - Logger: loggerClient, - ServiceName: "grpc.testing.TestService", - MethodName: "FullDuplexCall", - SequenceID: 3, - Authority: ss.Address, - Payload: payload{ - Metadata: map[string]string{}, - }, - }, { Type: eventTypeServerTrailer, Logger: loggerClient, ServiceName: "grpc.testing.TestService", MethodName: "FullDuplexCall", Authority: ss.Address, - SequenceID: 4, + SequenceID: 3, Payload: payload{ Metadata: map[string]string{}, }, diff --git a/internal/transport/transport.go b/internal/transport/transport.go index e21587b5321c..2e615ee20cc5 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -43,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -366,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } diff --git a/stream.go b/stream.go index b10ab1ab6324..960c3e33dfd3 100644 --- a/stream.go +++ b/stream.go @@ -752,17 +752,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, diff --git a/test/end2end_test.go b/test/end2end_test.go index 438b43ca82f4..8e50ca70e569 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -8259,8 +8259,8 @@ func (s) TestGlobalBinaryLoggingOptions(t *testing.T) { t.Fatalf("unexpected error: %v, expected an EOF error", err) } - if csbl.mml.events != 9 { - t.Fatalf("want 9 client side binary logging events, got %v", csbl.mml.events) + if csbl.mml.events != 8 { + t.Fatalf("want 8 client side binary logging events, got %v", csbl.mml.events) } if ssbl.mml.events != 8 { t.Fatalf("want 8 server side binary logging events, got %v", ssbl.mml.events) From e41e8940c0c481d954a7c23973cd5440b2f0d138 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 3 Nov 2022 10:27:40 -0700 Subject: [PATCH 658/998] orca: create ORCA producer for LB policies to use to receive OOB load reports (#5669) --- balancer/balancer.go | 23 + balancer/base/balancer_test.go | 4 + balancer_conn_wrappers.go | 71 ++- internal/testutils/balancer.go | 5 + orca/internal/internal.go | 7 + orca/producer.go | 221 +++++++ orca/producer_test.go | 549 ++++++++++++++++++ .../clusterresolver/clusterresolver_test.go | 3 + 8 files changed, 880 insertions(+), 3 deletions(-) create mode 100644 orca/producer.go create mode 100644 orca/producer_test.go diff --git a/balancer/balancer.go b/balancer/balancer.go index f4f9408f3852..392b21fb2d8e 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -110,6 +110,11 @@ type SubConn interface { UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -371,3 +376,21 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { +} diff --git a/balancer/base/balancer_test.go b/balancer/base/balancer_test.go index 3a3ccd6ba71a..b50abf8526e6 100644 --- a/balancer/base/balancer_test.go +++ b/balancer/base/balancer_test.go @@ -44,6 +44,10 @@ func (sc *testSubConn) UpdateAddresses(addresses []resolver.Address) {} func (sc *testSubConn) Connect() {} +func (sc *testSubConn) GetOrBuildProducer(balancer.ProducerBuilder) (balancer.Producer, func()) { + return nil, nil +} + // testPickBuilder creates balancer.Picker for test. type testPickBuilder struct { validate func(info PickerBuildInfo) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index b1c23eaae0db..0359956d36fa 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -19,17 +19,20 @@ package grpc import ( + "context" "fmt" "strings" "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -305,7 +308,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() @@ -359,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + mu sync.Mutex + ac *addrConn + producers map[balancer.ProducerBuilder]*refCountedProducer } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { @@ -414,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn { defer acbw.mu.Unlock() return acbw.ac } + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index 95ec79616eff..8927823d09da 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -68,6 +68,11 @@ func (tsc *TestSubConn) Connect() { } } +// GetOrBuildProducer is a no-op. +func (tsc *TestSubConn) GetOrBuildProducer(balancer.ProducerBuilder) (balancer.Producer, func()) { + return nil, nil +} + // String implements stringer to print human friendly error message. func (tsc *TestSubConn) String() string { return tsc.id diff --git a/orca/internal/internal.go b/orca/internal/internal.go index 882fd8287a9b..865d94d86945 100644 --- a/orca/internal/internal.go +++ b/orca/internal/internal.go @@ -20,8 +20,15 @@ // avoid polluting the godoc of the top-level orca package. package internal +import ibackoff "google.golang.org/grpc/internal/backoff" + // AllowAnyMinReportingInterval prevents clamping of the MinReportingInterval // configured via ServiceOptions, to a minimum of 30s. // // For testing purposes only. var AllowAnyMinReportingInterval interface{} // func(*ServiceOptions) + +// DefaultBackoffFunc is used by the producer to control its backoff behavior. +// +// For testing purposes only. +var DefaultBackoffFunc = ibackoff.DefaultExponential.Backoff diff --git a/orca/producer.go b/orca/producer.go new file mode 100644 index 000000000000..559033116667 --- /dev/null +++ b/orca/producer.go @@ -0,0 +1,221 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orca + +import ( + "context" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" + "google.golang.org/protobuf/types/known/durationpb" +) + +type producerBuilder struct{} + +// Build constructs and returns a producer and its cleanup function +func (*producerBuilder) Build(cci interface{}) (balancer.Producer, func()) { + ctx, cancel := context.WithCancel(context.Background()) + p := &producer{ + client: v3orcaservicegrpc.NewOpenRcaServiceClient(cci.(grpc.ClientConnInterface)), + closed: grpcsync.NewEvent(), + intervals: make(map[time.Duration]int), + listeners: make(map[OOBListener]struct{}), + backoff: internal.DefaultBackoffFunc, + } + go p.run(ctx) + return p, func() { + cancel() + <-p.closed.Done() // Block until stream stopped. + } +} + +var producerBuilderSingleton = &producerBuilder{} + +// OOBListener is used to receive out-of-band load reports as they arrive. +type OOBListener interface { + // OnLoadReport is called when a load report is received. + OnLoadReport(*v3orcapb.OrcaLoadReport) +} + +// OOBListenerOptions contains options to control how an OOBListener is called. +type OOBListenerOptions struct { + // ReportInterval specifies how often to request the server to provide a + // load report. May be provided less frequently if the server requires a + // longer interval, or may be provided more frequently if another + // subscriber requests a shorter interval. + ReportInterval time.Duration +} + +// RegisterOOBListener registers an out-of-band load report listener on sc. +// Any OOBListener may only be registered once per subchannel at a time. The +// returned stop function must be called when no longer needed. Do not +// register a single OOBListener more than once per SubConn. +func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOptions) (stop func()) { + pr, close := sc.GetOrBuildProducer(producerBuilderSingleton) + p := pr.(*producer) + p.registerListener(l, opts.ReportInterval) + + // TODO: When we can register for SubConn state updates, don't call run() + // until READY and automatically call stop() on SHUTDOWN. + + // If stop is called multiple times, prevent it from having any effect on + // subsequent calls. + return grpcsync.OnceFunc(func() { + p.unregisterListener(l, opts.ReportInterval) + close() + }) +} + +type producer struct { + client v3orcaservicegrpc.OpenRcaServiceClient + + closed *grpcsync.Event // fired when closure completes + // backoff is called between stream attempts to determine how long to delay + // to avoid overloading a server experiencing problems. The attempt count + // is incremented when stream errors occur and is reset when the stream + // reports a result. + backoff func(int) time.Duration + + mu sync.Mutex + intervals map[time.Duration]int // map from interval time to count of listeners requesting that time + listeners map[OOBListener]struct{} // set of registered listeners +} + +// registerListener adds the listener and its requested report interval to the +// producer. +func (p *producer) registerListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + p.listeners[l] = struct{}{} + p.intervals[interval]++ +} + +// registerListener removes the listener and its requested report interval to +// the producer. +func (p *producer) unregisterListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + delete(p.listeners, l) + p.intervals[interval]-- + if p.intervals[interval] == 0 { + delete(p.intervals, interval) + } +} + +// minInterval returns the smallest key in p.intervals. +func (p *producer) minInterval() time.Duration { + p.mu.Lock() + defer p.mu.Unlock() + var min time.Duration + first := true + for t := range p.intervals { + if t < min || first { + min = t + first = false + } + } + return min +} + +// run manages the ORCA OOB stream on the subchannel. +func (p *producer) run(ctx context.Context) { + defer p.closed.Fire() + backoffAttempt := 0 + backoffTimer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-backoffTimer.C: + case <-ctx.Done(): + return + } + + resetBackoff, err := p.runStream(ctx) + + if resetBackoff { + backoffTimer.Reset(0) + backoffAttempt = 0 + } else { + backoffTimer.Reset(p.backoff(backoffAttempt)) + backoffAttempt++ + } + + switch { + case err == nil: + // No error was encountered; restart the stream. + case ctx.Err() != nil: + // Producer was stopped; exit immediately and without logging an + // error. + return + case status.Code(err) == codes.Unimplemented: + // Unimplemented; do not retry. + logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") + return + case status.Code(err) == codes.Unavailable: + // The SubConn is not currently ready; backoff silently. + // + // TODO: don't attempt the stream until the state is READY to + // minimize the chances of this case and to avoid using the + // exponential backoff mechanism, as we should know it's safe to + // retry when the state is READY again. + default: + // Log all other errors. + logger.Error("Received unexpected stream error:", err) + } + } +} + +// runStream runs a single stream on the subchannel and returns the resulting +// error, if any, and whether or not the run loop should reset the backoff +// timer to zero or advance it. +func (p *producer) runStream(ctx context.Context) (resetBackoff bool, err error) { + interval := p.minInterval() + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := p.client.StreamCoreMetrics(streamCtx, &v3orcaservicepb.OrcaLoadReportRequest{ + ReportInterval: durationpb.New(interval), + }) + if err != nil { + return false, err + } + + for { + report, err := stream.Recv() + if err != nil { + return resetBackoff, err + } + resetBackoff = true + p.mu.Lock() + for l := range p.listeners { + l.OnLoadReport(report) + } + p.mu.Unlock() + if interval != p.minInterval() { + // restart stream to use new interval + return true, nil + } + } +} diff --git a/orca/producer_test.go b/orca/producer_test.go new file mode 100644 index 000000000000..f15317995dec --- /dev/null +++ b/orca/producer_test.go @@ -0,0 +1,549 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orca_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" +) + +// customLBB wraps a round robin LB policy but provides a ClientConn wrapper to +// add an ORCA OOB report producer for all created SubConns. +type customLBB struct{} + +func (customLBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return balancer.Get(roundrobin.Name).Build(&ccWrapper{ClientConn: cc}, opts) +} + +func (customLBB) Name() string { return "customLB" } + +func init() { + balancer.Register(customLBB{}) +} + +type ccWrapper struct { + balancer.ClientConn +} + +func (w *ccWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) != 1 { + panic(fmt.Sprintf("got addrs=%v; want len(addrs) == 1", addrs)) + } + sc, err := w.ClientConn.NewSubConn(addrs, opts) + if err != nil { + return sc, err + } + l := getListenerInfo(addrs[0]) + l.listener.cleanup = orca.RegisterOOBListener(sc, l.listener, l.opts) + l.sc = sc + return sc, nil +} + +// listenerInfo is stored in an address's attributes to allow ORCA +// listeners to be registered on subconns created for that address. +type listenerInfo struct { + listener *testOOBListener + opts orca.OOBListenerOptions + sc balancer.SubConn // Set by the LB policy +} + +type listenerInfoKey struct{} + +func setListenerInfo(addr resolver.Address, l *listenerInfo) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(listenerInfoKey{}, l) + return addr +} + +func getListenerInfo(addr resolver.Address) *listenerInfo { + return addr.Attributes.Value(listenerInfoKey{}).(*listenerInfo) +} + +// testOOBListener is a simple listener that pushes load reports to a channel. +type testOOBListener struct { + cleanup func() + loadReportCh chan *v3orcapb.OrcaLoadReport +} + +func newTestOOBListener() *testOOBListener { + return &testOOBListener{cleanup: func() {}, loadReportCh: make(chan *v3orcapb.OrcaLoadReport)} +} + +func (t *testOOBListener) Stop() { t.cleanup() } + +func (t *testOOBListener) OnLoadReport(r *v3orcapb.OrcaLoadReport) { + t.loadReportCh <- r +} + +// TestProducer is a basic, end-to-end style test of an LB policy with an +// OOBListener communicating with a server with an ORCA service. +func (s) TestProducer(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Use a fixed backoff for stream recreation. + oldBackoff := internal.DefaultBackoffFunc + internal.DefaultBackoffFunc = func(int) time.Duration { return 10 * time.Millisecond } + defer func() { internal.DefaultBackoffFunc = oldBackoff }() + + // Initialize listener for our ORCA server. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + // Register the OpenRCAService with a very short metrics reporting interval. + const shortReportingInterval = 50 * time.Millisecond + opts := orca.ServiceOptions{MinReportingInterval: shortReportingInterval} + internal.AllowAnyMinReportingInterval.(func(*orca.ServiceOptions))(&opts) + s := grpc.NewServer() + orcaSrv, err := orca.Register(s, opts) + if err != nil { + t.Fatalf("orca.Register failed: %v", err) + } + go s.Serve(lis) + defer s.Stop() + + // Create our client with an OOB listener in the LB policy it selects. + r := manual.NewBuilderWithScheme("whatever") + oobLis := newTestOOBListener() + + lisOpts := orca.OOBListenerOptions{ReportInterval: 50 * time.Millisecond} + li := &listenerInfo{listener: oobLis, opts: lisOpts} + addr := setListenerInfo(resolver.Address{Addr: lis.Addr().String()}, li) + r.InitialState(resolver.State{Addresses: []resolver.Address{addr}}) + cc, err := grpc.Dial("whatever:///whatever", grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"customLB":{}}]}`), grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial failed: %v", err) + } + defer cc.Close() + + // Ensure the OOB listener is stopped before the client is closed to avoid + // a potential irrelevant error in the logs. + defer oobLis.Stop() + + // Set a few metrics and wait for them on the client side. + orcaSrv.SetCPUUtilization(10) + orcaSrv.SetMemoryUtilization(100) + orcaSrv.SetUtilization("bob", 555) + loadReportWant := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 10, + MemUtilization: 100, + Utilization: map[string]float64{"bob": 555}, + } + +testReport: + for { + select { + case r := <-oobLis.loadReportCh: + t.Log("Load report received: ", r) + if proto.Equal(r, loadReportWant) { + // Success! + break testReport + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for load report: %v", loadReportWant) + } + } + + // Change and add metrics and wait for them on the client side. + orcaSrv.SetCPUUtilization(50) + orcaSrv.SetMemoryUtilization(200) + orcaSrv.SetUtilization("mary", 321) + loadReportWant = &v3orcapb.OrcaLoadReport{ + CpuUtilization: 50, + MemUtilization: 200, + Utilization: map[string]float64{"bob": 555, "mary": 321}, + } + + for { + select { + case r := <-oobLis.loadReportCh: + t.Log("Load report received: ", r) + if proto.Equal(r, loadReportWant) { + // Success! + return + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for load report: %v", loadReportWant) + } + } +} + +// fakeORCAService is a simple implementation of an ORCA service that pushes +// requests it receives from clients to a channel and sends responses from a +// channel back. This allows tests to verify the client is sending requests +// and processing responses properly. +type fakeORCAService struct { + v3orcaservicegrpc.UnimplementedOpenRcaServiceServer + + reqCh chan *v3orcaservicepb.OrcaLoadReportRequest + respCh chan interface{} // either *v3orcapb.OrcaLoadReport or error +} + +func newFakeORCAService() *fakeORCAService { + return &fakeORCAService{ + reqCh: make(chan *v3orcaservicepb.OrcaLoadReportRequest), + respCh: make(chan interface{}), + } +} + +func (f *fakeORCAService) close() { + close(f.respCh) +} + +func (f *fakeORCAService) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + f.reqCh <- req + for resp := range f.respCh { + if err, ok := resp.(error); ok { + return err + } + if err := stream.Send(resp.(*v3orcapb.OrcaLoadReport)); err != nil { + // In the event that a stream error occurs, a new stream will have + // been created that was waiting for this response message. Push + // it back onto the channel and return. + // + // This happens because we range over respCh. If we changed to + // instead select on respCh + stream.Context(), the same situation + // could still occur due to a race between noticing the two events, + // so such a workaround would still be needed to prevent flakiness. + f.respCh <- resp + return err + } + } + return nil +} + +// TestProducerBackoff verifies that the ORCA producer applies the proper +// backoff after stream failures. +func (s) TestProducerBackoff(t *testing.T) { + grpctest.TLogger.ExpectErrorN("injected error", 4) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Provide a convenient way to expect backoff calls and return a minimal + // value. + const backoffShouldNotBeCalled = 9999 // Use to assert backoff function is not called. + const backoffAllowAny = -1 // Use to ignore any backoff calls. + expectedBackoff := backoffAllowAny + oldBackoff := internal.DefaultBackoffFunc + internal.DefaultBackoffFunc = func(got int) time.Duration { + if expectedBackoff == backoffShouldNotBeCalled { + t.Errorf("Unexpected backoff call; parameter = %v", got) + } else if expectedBackoff != backoffAllowAny { + if got != expectedBackoff { + t.Errorf("Unexpected backoff received; got %v want %v", got, expectedBackoff) + } + } + return time.Millisecond + } + defer func() { internal.DefaultBackoffFunc = oldBackoff }() + + // Initialize listener for our ORCA server. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + // Register our fake ORCA service. + s := grpc.NewServer() + fake := newFakeORCAService() + defer fake.close() + v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, fake) + go s.Serve(lis) + defer s.Stop() + + // Define the report interval and a function to wait for it to be sent to + // the server. + const reportInterval = 123 * time.Second + awaitRequest := func(interval time.Duration) { + select { + case req := <-fake.reqCh: + if got := req.GetReportInterval().AsDuration(); got != interval { + t.Errorf("Unexpected report interval; got %v want %v", got, interval) + } + case <-ctx.Done(): + t.Fatalf("Did not receive client request") + } + } + + // Create our client with an OOB listener in the LB policy it selects. + r := manual.NewBuilderWithScheme("whatever") + oobLis := newTestOOBListener() + + lisOpts := orca.OOBListenerOptions{ReportInterval: reportInterval} + li := &listenerInfo{listener: oobLis, opts: lisOpts} + r.InitialState(resolver.State{Addresses: []resolver.Address{setListenerInfo(resolver.Address{Addr: lis.Addr().String()}, li)}}) + cc, err := grpc.Dial("whatever:///whatever", grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"customLB":{}}]}`), grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial failed: %v", err) + } + defer cc.Close() + + // Ensure the OOB listener is stopped before the client is closed to avoid + // a potential irrelevant error in the logs. + defer oobLis.Stop() + + // Define a load report to send and expect the client to see. + loadReportWant := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 10, + MemUtilization: 100, + Utilization: map[string]float64{"bob": 555}, + } + + // Unblock the fake. + awaitRequest(reportInterval) + fake.respCh <- loadReportWant + select { + case r := <-oobLis.loadReportCh: + t.Log("Load report received: ", r) + if proto.Equal(r, loadReportWant) { + // Success! + break + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for load report: %v", loadReportWant) + } + + // The next request should be immediate, since there was a message + // received. + expectedBackoff = backoffShouldNotBeCalled + fake.respCh <- status.Errorf(codes.Internal, "injected error") + awaitRequest(reportInterval) + + // The next requests will need to backoff. + expectedBackoff = 0 + fake.respCh <- status.Errorf(codes.Internal, "injected error") + awaitRequest(reportInterval) + expectedBackoff = 1 + fake.respCh <- status.Errorf(codes.Internal, "injected error") + awaitRequest(reportInterval) + expectedBackoff = 2 + fake.respCh <- status.Errorf(codes.Internal, "injected error") + awaitRequest(reportInterval) + // The next request should be immediate, since there was a message + // received. + expectedBackoff = backoffShouldNotBeCalled + + // Send another valid response and wait for it on the client. + fake.respCh <- loadReportWant + select { + case r := <-oobLis.loadReportCh: + t.Log("Load report received: ", r) + if proto.Equal(r, loadReportWant) { + // Success! + break + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for load report: %v", loadReportWant) + } +} + +// TestProducerMultipleListeners tests that multiple listeners works as +// expected in a producer: requesting the proper interval and delivering the +// update to all listeners. +func (s) TestProducerMultipleListeners(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Provide a convenient way to expect backoff calls and return a minimal + // value. + oldBackoff := internal.DefaultBackoffFunc + internal.DefaultBackoffFunc = func(got int) time.Duration { + return time.Millisecond + } + defer func() { internal.DefaultBackoffFunc = oldBackoff }() + + // Initialize listener for our ORCA server. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + // Register our fake ORCA service. + s := grpc.NewServer() + fake := newFakeORCAService() + defer fake.close() + v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, fake) + go s.Serve(lis) + defer s.Stop() + + // Define the report interval and a function to wait for it to be sent to + // the server. + const reportInterval1 = 123 * time.Second + const reportInterval2 = 234 * time.Second + const reportInterval3 = 56 * time.Second + awaitRequest := func(interval time.Duration) { + select { + case req := <-fake.reqCh: + if got := req.GetReportInterval().AsDuration(); got != interval { + t.Errorf("Unexpected report interval; got %v want %v", got, interval) + } + case <-ctx.Done(): + t.Fatalf("Did not receive client request") + } + } + + // Create our client with an OOB listener in the LB policy it selects. + r := manual.NewBuilderWithScheme("whatever") + oobLis1 := newTestOOBListener() + lisOpts1 := orca.OOBListenerOptions{ReportInterval: reportInterval1} + li := &listenerInfo{listener: oobLis1, opts: lisOpts1} + r.InitialState(resolver.State{Addresses: []resolver.Address{setListenerInfo(resolver.Address{Addr: lis.Addr().String()}, li)}}) + cc, err := grpc.Dial("whatever:///whatever", grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"customLB":{}}]}`), grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial failed: %v", err) + } + defer cc.Close() + + // Ensure the OOB listener is stopped before the client is closed to avoid + // a potential irrelevant error in the logs. + defer oobLis1.Stop() + + oobLis2 := newTestOOBListener() + lisOpts2 := orca.OOBListenerOptions{ReportInterval: reportInterval2} + + oobLis3 := newTestOOBListener() + lisOpts3 := orca.OOBListenerOptions{ReportInterval: reportInterval3} + + // Define a load report to send and expect the client to see. + loadReportWant := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 10, + MemUtilization: 100, + Utilization: map[string]float64{"bob": 555}, + } + + // Receive reports and update counts for the three listeners. + var reportsMu sync.Mutex + var reportsReceived1, reportsReceived2, reportsReceived3 int + go func() { + for { + select { + case r := <-oobLis1.loadReportCh: + t.Log("Load report 1 received: ", r) + if !proto.Equal(r, loadReportWant) { + t.Errorf("Unexpected report received: %+v", r) + } + reportsMu.Lock() + reportsReceived1++ + reportsMu.Unlock() + case r := <-oobLis2.loadReportCh: + t.Log("Load report 2 received: ", r) + if !proto.Equal(r, loadReportWant) { + t.Errorf("Unexpected report received: %+v", r) + } + reportsMu.Lock() + reportsReceived2++ + reportsMu.Unlock() + case r := <-oobLis3.loadReportCh: + t.Log("Load report 3 received: ", r) + if !proto.Equal(r, loadReportWant) { + t.Errorf("Unexpected report received: %+v", r) + } + reportsMu.Lock() + reportsReceived3++ + reportsMu.Unlock() + case <-ctx.Done(): + // Test has ended; exit + return + } + } + }() + + // checkReports is a helper function to check the report counts for the three listeners. + checkReports := func(r1, r2, r3 int) { + t.Helper() + for ctx.Err() == nil { + reportsMu.Lock() + if r1 == reportsReceived1 && r2 == reportsReceived2 && r3 == reportsReceived3 { + // Success! + reportsMu.Unlock() + return + } + if reportsReceived1 > r1 || reportsReceived2 > r2 || reportsReceived3 > r3 { + reportsMu.Unlock() + t.Fatalf("received excess reports. got %v %v %v; want %v %v %v", reportsReceived1, reportsReceived2, reportsReceived3, r1, r2, r3) + return + } + reportsMu.Unlock() + time.Sleep(10 * time.Millisecond) + } + t.Fatalf("timed out waiting for reports received. got %v %v %v; want %v %v %v", reportsReceived1, reportsReceived2, reportsReceived3, r1, r2, r3) + } + + // Only 1 listener; expect reportInterval1 to be used and expect the report + // to be sent to the listener. + awaitRequest(reportInterval1) + fake.respCh <- loadReportWant + checkReports(1, 0, 0) + + // Register listener 2 with a less frequent interval; no need to recreate + // stream. Report should go to both listeners. + oobLis2.cleanup = orca.RegisterOOBListener(li.sc, oobLis2, lisOpts2) + fake.respCh <- loadReportWant + checkReports(2, 1, 0) + + // Register listener 3 with a more frequent interval; stream is recreated + // with this interval after the next report is received. The first report + // will go to all three listeners. + oobLis3.cleanup = orca.RegisterOOBListener(li.sc, oobLis3, lisOpts3) + fake.respCh <- loadReportWant + checkReports(3, 2, 1) + awaitRequest(reportInterval3) + + // Another report without a change in listeners should go to all three listeners. + fake.respCh <- loadReportWant + checkReports(4, 3, 2) + + // Stop listener 2. This does not affect the interval as listener 3 is + // still the shortest. The next update goes to listeners 1 and 3. + oobLis2.Stop() + fake.respCh <- loadReportWant + checkReports(5, 3, 3) + + // Stop listener 3. This makes the interval longer, with stream recreation + // delayed until the next report is received. Reports should only go to + // listener 1 now. + oobLis3.Stop() + fake.respCh <- loadReportWant + checkReports(6, 3, 3) + awaitRequest(reportInterval1) + // Another report without a change in listeners should go to the first listener. + fake.respCh <- loadReportWant + checkReports(7, 3, 3) +} diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 1973e1549188..c2a5729e3bcb 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -190,6 +190,9 @@ type fakeSubConn struct{} func (*fakeSubConn) UpdateAddresses([]resolver.Address) { panic("implement me") } func (*fakeSubConn) Connect() { panic("implement me") } +func (*fakeSubConn) GetOrBuildProducer(balancer.ProducerBuilder) (balancer.Producer, func()) { + panic("implement me") +} // waitForNewChildLB makes sure that a new child LB is created by the top-level // clusterResolverBalancer. From b597a8e1d0ce3f63ef8a7b62a23ca1fcc3a60678 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 4 Nov 2022 10:59:28 -0700 Subject: [PATCH 659/998] xdsclient: improve authority watchers test (#5700) --- xds/internal/xdsclient/authority_test.go | 360 ------------------ .../xdsclient/e2e_test/authority_test.go | 315 +++++++++++++++ xds/internal/xdsclient/watchers_test.go | 7 + 3 files changed, 322 insertions(+), 360 deletions(-) delete mode 100644 xds/internal/xdsclient/authority_test.go create mode 100644 xds/internal/xdsclient/e2e_test/authority_test.go diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go deleted file mode 100644 index b8704a9c0c7a..000000000000 --- a/xds/internal/xdsclient/authority_test.go +++ /dev/null @@ -1,360 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package xdsclient - -import ( - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/testutils" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - "google.golang.org/protobuf/testing/protocmp" -) - -var ( - serverConfigs = []*bootstrap.ServerConfig{ - { - ServerURI: testXDSServer + "0", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "creds-0", - TransportAPI: version.TransportV2, - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - { - ServerURI: testXDSServer + "1", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "creds-1", - TransportAPI: version.TransportV3, - NodeProto: xdstestutils.EmptyNodeProtoV3, - }, - { - ServerURI: testXDSServer + "2", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "creds-2", - TransportAPI: version.TransportV2, - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - } - - serverConfigCmpOptions = cmp.Options{ - cmpopts.IgnoreFields(bootstrap.ServerConfig{}, "Creds"), - protocmp.Transform(), - } -) - -func overrideFedEnvVar(t *testing.T) { - oldFed := envconfig.XDSFederation - envconfig.XDSFederation = true - t.Cleanup(func() { envconfig.XDSFederation = oldFed }) -} - -// watchAndFetchNewController starts a CDS watch on the client for the given -// resourceName, and tries to receive a new controller from the ctrlCh. -// -// It returns false if there's no controller in the ctrlCh. -func watchAndFetchNewController(t *testing.T, client *clientImpl, resourceName string, ctrlCh *testutils.Channel) (*testController, bool, func()) { - updateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(resourceName, func(update xdsresource.ClusterUpdate, err error) { - updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - - // Clear the item in the watch channel, otherwise the next watch will block. - authority := xdsresource.ParseName(resourceName).Authority - var config *bootstrap.ServerConfig - if authority == "" { - config = client.config.XDSServer - } else { - authConfig, ok := client.config.Authorities[authority] - if !ok { - t.Fatalf("failed to find authority %q", authority) - } - config = authConfig.XDSServer - } - a := client.authorities[config.String()] - if a == nil { - t.Fatalf("authority for %q is not created", authority) - } - ctrlTemp := a.controller.(*testController) - // Clear the channel so the next watch on this controller can proceed. - ctrlTemp.addWatches[xdsresource.ClusterResource].ReceiveOrFail() - - cancelWatchRet := func() { - cancelWatch() - ctrlTemp.removeWatches[xdsresource.ClusterResource].ReceiveOrFail() - } - - // Try to receive a new controller. - c, ok := ctrlCh.ReceiveOrFail() - if !ok { - return nil, false, cancelWatchRet - } - ctrl := c.(*testController) - return ctrl, true, cancelWatchRet -} - -// TestAuthorityDefaultAuthority covers that a watch for an old style resource -// name (one without authority) builds a controller using the top level server -// config. -func (s) TestAuthorityDefaultAuthority(t *testing.T) { - overrideFedEnvVar(t) - ctrlCh := overrideNewController(t) - - client, err := newWithConfig(&bootstrap.Config{ - XDSServer: serverConfigs[0], - Authorities: map[string]*bootstrap.Authority{testAuthority: {XDSServer: serverConfigs[1]}}, - }, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - t.Cleanup(client.Close) - - ctrl, ok, _ := watchAndFetchNewController(t, client, testCDSName, ctrlCh) - if !ok { - t.Fatalf("want a new controller to be built, got none") - } - // Want the default server config. - wantConfig := serverConfigs[0] - if diff := cmp.Diff(ctrl.config, wantConfig, serverConfigCmpOptions); diff != "" { - t.Fatalf("controller is built with unexpected config, diff (-got +want): %v", diff) - } -} - -// TestAuthorityNoneDefaultAuthority covers that a watch with a new style -// resource name creates a controller with the corresponding server config. -func (s) TestAuthorityNoneDefaultAuthority(t *testing.T) { - overrideFedEnvVar(t) - ctrlCh := overrideNewController(t) - - client, err := newWithConfig(&bootstrap.Config{ - XDSServer: serverConfigs[0], - Authorities: map[string]*bootstrap.Authority{testAuthority: {XDSServer: serverConfigs[1]}}, - }, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - t.Cleanup(client.Close) - - resourceName := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) - ctrl, ok, _ := watchAndFetchNewController(t, client, resourceName, ctrlCh) - if !ok { - t.Fatalf("want a new controller to be built, got none") - } - // Want the server config for this authority. - wantConfig := serverConfigs[1] - if diff := cmp.Diff(ctrl.config, wantConfig, serverConfigCmpOptions); diff != "" { - t.Fatalf("controller is built with unexpected config, diff (-got +want): %v", diff) - } -} - -// TestAuthorityShare covers that -// - watch with the same authority name doesn't create new authority -// - watch with different authority name but same authority config doesn't -// create new authority -func (s) TestAuthorityShare(t *testing.T) { - overrideFedEnvVar(t) - ctrlCh := overrideNewController(t) - - client, err := newWithConfig(&bootstrap.Config{ - XDSServer: serverConfigs[0], - Authorities: map[string]*bootstrap.Authority{ - testAuthority: {XDSServer: serverConfigs[1]}, - testAuthority2: {XDSServer: serverConfigs[1]}, // Another authority name, but with the same config. - }, - }, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - t.Cleanup(client.Close) - - resourceName := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) - ctrl1, ok1, _ := watchAndFetchNewController(t, client, resourceName, ctrlCh) - if !ok1 { - t.Fatalf("want a new controller to be built, got none") - } - // Want the server config for this authority. - wantConfig := serverConfigs[1] - if diff := cmp.Diff(ctrl1.config, wantConfig, serverConfigCmpOptions); diff != "" { - t.Fatalf("controller is built with unexpected config, diff (-got +want): %v", diff) - } - - // Call the watch with the same authority name. This shouldn't create a new - // controller. - resourceNameSameAuthority := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) - ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameSameAuthority, ctrlCh) - if ok2 { - t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config) - } - - // Call the watch with a different authority name, but the same server - // config. This shouldn't create a new controller. - resourceNameSameConfig := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority2, testCDSName+"1", nil) - if ctrl, ok, _ := watchAndFetchNewController(t, client, resourceNameSameConfig, ctrlCh); ok { - t.Fatalf("an unexpected controller is built with config: %v", ctrl.config) - } -} - -// TestAuthorityIdle covers that -// - authorities are put in a timeout cache when the last watch is canceled -// - idle authorities are not immediately closed. They will be closed after a -// timeout. -func (s) TestAuthorityIdleTimeout(t *testing.T) { - overrideFedEnvVar(t) - ctrlCh := overrideNewController(t) - - const idleTimeout = 50 * time.Millisecond - - client, err := newWithConfig(&bootstrap.Config{ - XDSServer: serverConfigs[0], - Authorities: map[string]*bootstrap.Authority{ - testAuthority: {XDSServer: serverConfigs[1]}, - }, - }, defaultWatchExpiryTimeout, idleTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - t.Cleanup(client.Close) - - resourceName := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) - ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh) - if !ok1 { - t.Fatalf("want a new controller to be built, got none") - } - - var cancelWatch2 func() - // Call the watch with the same authority name. This shouldn't create a new - // controller. - resourceNameSameAuthority := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) - ctrl2, ok2, cancelWatch2 := watchAndFetchNewController(t, client, resourceNameSameAuthority, ctrlCh) - if ok2 { - t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config) - } - - cancelWatch1() - if ctrl1.done.HasFired() { - t.Fatalf("controller is closed immediately when the watch is canceled, wanted to be put in the idle cache") - } - - // Cancel the second watch, should put controller in the idle cache. - cancelWatch2() - if ctrl1.done.HasFired() { - t.Fatalf("controller is closed when the second watch is closed") - } - - time.Sleep(idleTimeout * 2) - if !ctrl1.done.HasFired() { - t.Fatalf("controller is not closed after idle timeout") - } -} - -// TestAuthorityClientClose covers that the authorities in use and in idle cache -// are all closed when the client is closed. -func (s) TestAuthorityClientClose(t *testing.T) { - overrideFedEnvVar(t) - ctrlCh := overrideNewController(t) - - client, err := newWithConfig(&bootstrap.Config{ - XDSServer: serverConfigs[0], - Authorities: map[string]*bootstrap.Authority{ - testAuthority: {XDSServer: serverConfigs[1]}, - }, - }, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - t.Cleanup(client.Close) - - resourceName := testCDSName - ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh) - if !ok1 { - t.Fatalf("want a new controller to be built, got none") - } - - resourceNameWithAuthority := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) - ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameWithAuthority, ctrlCh) - if !ok2 { - t.Fatalf("want a new controller to be built, got none") - } - - cancelWatch1() - if ctrl1.done.HasFired() { - t.Fatalf("controller is closed immediately when the watch is canceled, wanted to be put in the idle cache") - } - - // Close the client while watch2 is not canceled. ctrl1 is in the idle - // cache, ctrl2 is in use. Both should be closed. - client.Close() - - if !ctrl1.done.HasFired() { - t.Fatalf("controller in idle cache is not closed after client is closed") - } - if !ctrl2.done.HasFired() { - t.Fatalf("controller in use is not closed after client is closed") - } -} - -// TestAuthorityRevive covers that the authorities in the idle cache is revived -// when a new watch is started on this authority. -func (s) TestAuthorityRevive(t *testing.T) { - overrideFedEnvVar(t) - ctrlCh := overrideNewController(t) - - const idleTimeout = 50 * time.Millisecond - - client, err := newWithConfig(&bootstrap.Config{ - XDSServer: serverConfigs[0], - Authorities: map[string]*bootstrap.Authority{ - testAuthority: {XDSServer: serverConfigs[1]}, - }, - }, defaultWatchExpiryTimeout, idleTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - t.Cleanup(client.Close) - - // Start a watch on the authority, and cancel it. This puts the authority in - // the idle cache. - resourceName := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName, nil) - ctrl1, ok1, cancelWatch1 := watchAndFetchNewController(t, client, resourceName, ctrlCh) - if !ok1 { - t.Fatalf("want a new controller to be built, got none") - } - cancelWatch1() - - // Start another watch on this authority, it should retrieve the authority - // from the cache, instead of creating a new one. - resourceNameWithAuthority := xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority, testCDSName+"1", nil) - ctrl2, ok2, _ := watchAndFetchNewController(t, client, resourceNameWithAuthority, ctrlCh) - if ok2 { - t.Fatalf("an unexpected controller is built with config: %v", ctrl2.config) - } - - // Wait for double the idle timeout, the controller shouldn't be closed, - // since it was revived. - time.Sleep(idleTimeout * 2) - if ctrl1.done.HasFired() { - t.Fatalf("controller that was revived is closed after timeout, want not closed") - } -} diff --git a/xds/internal/xdsclient/e2e_test/authority_test.go b/xds/internal/xdsclient/e2e_test/authority_test.go new file mode 100644 index 000000000000..93b63636dc68 --- /dev/null +++ b/xds/internal/xdsclient/e2e_test/authority_test.go @@ -0,0 +1,315 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e_test + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +const ( + testAuthority1 = "test-authority1" + testAuthority2 = "test-authority2" + testAuthority3 = "test-authority3" +) + +var ( + // These two resources use `testAuthority1`, which contains an empty server + // config in the bootstrap file, and therefore will use the default + // management server. + authorityTestResourceName11 = xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority1, cdsName+"1", nil) + authorityTestResourceName12 = xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority1, cdsName+"2", nil) + // This resource uses `testAuthority2`, which contains an empty server + // config in the bootstrap file, and therefore will use the default + // management server. + authorityTestResourceName2 = xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority2, cdsName+"3", nil) + // This resource uses `testAuthority3`, which contains a non-empty server + // config in the bootstrap file, and therefore will use the non-default + // management server. + authorityTestResourceName3 = xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority3, cdsName+"3", nil) +) + +// setupForAuthorityTests spins up two management servers, one to act as the +// default and the other to act as the non-default. It also generates a +// bootstrap configuration with three authorities (the first two pointing to the +// default and the third one pointing to the non-default). +// +// Returns two listeners used by the default and non-default management servers +// respectively, and the xDS client. +func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time.Duration) (*testutils.ListenerWrapper, *testutils.ListenerWrapper, xdsclient.XDSClient) { + overrideFedEnvVar(t) + + // Create listener wrappers which notify on to a channel whenever a new + // connection is accepted. We use this to track the number of transports + // used by the xDS client. + lisDefault := testutils.NewListenerWrapper(t, nil) + lisNonDefault := testutils.NewListenerWrapper(t, nil) + + // Start a management server to act as the default authority. + defaultAuthorityServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{Listener: lisDefault}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(func() { defaultAuthorityServer.Stop() }) + + // Start a management server to act as the non-default authority. + nonDefaultAuthorityServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{Listener: lisNonDefault}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(func() { nonDefaultAuthorityServer.Stop() }) + + // Create a bootstrap configuration with two non-default authorities which + // have empty server configs, and therefore end up using the default server + // config, which points to the above management server. + nodeID := uuid.New().String() + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: defaultAuthorityServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + Authorities: map[string]*bootstrap.Authority{ + testAuthority1: {}, + testAuthority2: {}, + testAuthority3: { + XDSServer: &bootstrap.ServerConfig{ + ServerURI: nonDefaultAuthorityServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + }, + }, + }, defaultTestWatchExpiryTimeout, idleTimeout) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + t.Cleanup(func() { client.Close() }) + + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + e2e.DefaultCluster(authorityTestResourceName11, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(authorityTestResourceName12, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(authorityTestResourceName2, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(authorityTestResourceName3, edsName, e2e.SecurityLevelNone), + }, + SkipValidation: true, + } + if err := defaultAuthorityServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + return lisDefault, lisNonDefault, client +} + +// TestAuthorityShare tests the authority sharing logic. The test verifies the +// following scenarios: +// - A watch for a resource name with an authority matching an existing watch +// should not result in a new transport being created. +// - A watch for a resource name with different authority name but same +// authority config as an existing watch should not result in a new transport +// being created. +func (s) TestAuthorityShare(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + lis, _, client := setupForAuthorityTests(ctx, t, time.Duration(0)) + + // Verify that no connection is established to the management server at this + // point. A transport is created only when a resource (which belongs to that + // authority) is requested. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } + + // Request the first resource. Verify that a new transport is created. + cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) + defer cdsCancel1() + if _, err := lis.NewConnCh.Receive(ctx); err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + + // Request the second resource. Verify that no new transport is created. + cdsCancel2 := client.WatchCluster(authorityTestResourceName12, func(u xdsresource.ClusterUpdate, err error) {}) + defer cdsCancel2() + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } + + // Request the third resource. Verify that no new transport is created. + cdsCancel3 := client.WatchCluster(authorityTestResourceName2, func(u xdsresource.ClusterUpdate, err error) {}) + defer cdsCancel3() + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } +} + +// TestAuthorityIdle test the authority idle timeout logic. The test verifies +// that the xDS client does not close authorities immediately after the last +// watch is canceled, but waits for the configured idle timeout to expire before +// closing them. +func (s) TestAuthorityIdleTimeout(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + lis, _, client := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + + // Request the first resource. Verify that a new transport is created. + cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) + val, err := lis.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + conn := val.(*testutils.ConnWrapper) + + // Request the second resource. Verify that no new transport is created. + cdsCancel2 := client.WatchCluster(authorityTestResourceName12, func(u xdsresource.ClusterUpdate, err error) {}) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } + + // Cancel both watches, and verify that the connection to the management + // server is not closed immediately. + cdsCancel1() + cdsCancel2() + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Connection to management server closed unexpectedly") + } + + // Wait for the authority idle timeout to fire. + time.Sleep(2 * defaultTestIdleAuthorityTimeout) + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != nil { + t.Fatal("Connection to management server not closed after idle timeout expiry") + } +} + +// TestAuthorityClientClose verifies that authorities in use and in the idle +// cache are all closed when the client is closed. +func (s) TestAuthorityClientClose(t *testing.T) { + // Set the authority idle timeout to twice the defaultTestTimeout. This will + // ensure that idle authorities stay in the cache for the duration of this + // test, until explicitly closed. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + lisDefault, lisNonDefault, client := setupForAuthorityTests(ctx, t, time.Duration(2*defaultTestTimeout)) + + // Request the first resource. Verify that a new transport is created to the + // default management server. + cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) + val, err := lisDefault.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + connDefault := val.(*testutils.ConnWrapper) + + // Request another resource which is served by the non-default authority. + // Verify that a new transport is created to the non-default management + // server. + client.WatchCluster(authorityTestResourceName3, func(u xdsresource.ClusterUpdate, err error) {}) + val, err = lisNonDefault.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + connNonDefault := val.(*testutils.ConnWrapper) + + // Cancel the first watch. This should move the default authority to the + // idle cache, but the connection should not be closed yet, because the idle + // timeout would not have fired. + cdsCancel1() + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := connDefault.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Connection to management server closed unexpectedly") + } + + // Closing the xDS client should close the connection to both management + // servers, even though we have an open watch to one of them. + client.Close() + if _, err := connDefault.CloseCh.Receive(ctx); err != nil { + t.Fatal("Connection to management server not closed after client close") + } + if _, err := connNonDefault.CloseCh.Receive(ctx); err != nil { + t.Fatal("Connection to management server not closed after client close") + } +} + +// TestAuthorityRevive verifies that an authority in the idle cache is revived +// when a new watch is started on this authority. +func (s) TestAuthorityRevive(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + lis, _, client := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + + // Request the first resource. Verify that a new transport is created. + cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) + val, err := lis.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + conn := val.(*testutils.ConnWrapper) + + // Cancel the above watch. This should move the authority to the idle cache. + cdsCancel1() + + // Request the second resource. Verify that no new transport is created. + // This should move the authority out of the idle cache. + cdsCancel2 := client.WatchCluster(authorityTestResourceName12, func(u xdsresource.ClusterUpdate, err error) {}) + defer cdsCancel2() + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } + + // Wait for double the idle timeout, and the connection to the management + // server should not be closed, since it was revived from the idle cache. + time.Sleep(2 * defaultTestIdleAuthorityTimeout) + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Connection to management server closed unexpectedly") + } +} diff --git a/xds/internal/xdsclient/watchers_test.go b/xds/internal/xdsclient/watchers_test.go index 5e2cdb123eff..87789d11ebc9 100644 --- a/xds/internal/xdsclient/watchers_test.go +++ b/xds/internal/xdsclient/watchers_test.go @@ -22,6 +22,7 @@ import ( "fmt" "testing" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/testutils" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -30,6 +31,12 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) +func overrideFedEnvVar(t *testing.T) { + oldFed := envconfig.XDSFederation + envconfig.XDSFederation = true + t.Cleanup(func() { envconfig.XDSFederation = oldFed }) +} + // testClientSetup sets up the client and controller for the test. It returns a // newly created client, and a channel where new controllers will be sent to. func testClientSetup(t *testing.T, overrideWatchExpiryTimeout bool) (*clientImpl, *testutils.Channel) { From 32f969e8f3f94359b589d85d27a8dd5cbd5c003b Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 4 Nov 2022 18:03:17 -0400 Subject: [PATCH 660/998] o11y: Added started rpc metric in o11y plugin (#5768) --- gcp/observability/go.mod | 2 +- gcp/observability/go.sum | 12 ++++++++++-- gcp/observability/observability_test.go | 7 +++++++ gcp/observability/opencensus.go | 4 ++-- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index a164fa48c23c..f0f34ac8ef58 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -7,7 +7,7 @@ require ( contrib.go.opencensus.io/exporter/stackdriver v0.13.12 github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.3.0 - go.opencensus.io v0.23.0 + go.opencensus.io v0.24.0 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 google.golang.org/grpc v1.46.0 ) diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index ff0b941c7c48..50f860099c78 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -68,8 +68,9 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -179,9 +180,14 @@ github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -193,8 +199,9 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -541,6 +548,7 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index 1ea0424b7f5e..b4ef9774282a 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -376,6 +376,13 @@ func (s) TestOpenCensusIntegration(t *testing.T) { for ctx.Err() == nil { errs = nil fe.mu.RLock() + if value := fe.SeenViews["grpc.io/client/started_rpcs"]; value != TypeOpenCensusViewCount { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/started_rpcs: %s != %s", value, TypeOpenCensusViewCount)) + } + if value := fe.SeenViews["grpc.io/server/started_rpcs"]; value != TypeOpenCensusViewCount { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/started_rpcs: %s != %s", value, TypeOpenCensusViewCount)) + } + if value := fe.SeenViews["grpc.io/client/completed_rpcs"]; value != TypeOpenCensusViewCount { errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/completed_rpcs: %s != %s", value, TypeOpenCensusViewCount)) } diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index 08b853d2adc9..482c8d4f0078 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -104,10 +104,10 @@ func startOpenCensus(config *config) error { } if config.CloudMonitoring != nil { - if err := view.Register(ocgrpc.ClientCompletedRPCsView); err != nil { + if err := view.Register(ocgrpc.ServerStartedRPCsView, ocgrpc.ClientCompletedRPCsView); err != nil { return fmt.Errorf("failed to register default client views: %v", err) } - if err := view.Register(ocgrpc.ServerCompletedRPCsView); err != nil { + if err := view.Register(ocgrpc.ClientStartedRPCsView, ocgrpc.ServerCompletedRPCsView); err != nil { return fmt.Errorf("failed to register default server views: %v", err) } view.SetReportingPeriod(defaultMetricsReportingInterval) From 7f23df022299ea52c9cd00ebe77f5f5cccbb85dc Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 4 Nov 2022 15:13:52 -0700 Subject: [PATCH 661/998] xdsclient: switch xdsclient watch deadlock test to e2e style (#5697) --- internal/testutils/xds/e2e/server.go | 15 +- xds/internal/xdsclient/client_test.go | 44 ------ .../xdsclient/e2e_test/misc_watchers_test.go | 133 ++++++++++++++++++ 3 files changed, 146 insertions(+), 46 deletions(-) create mode 100644 xds/internal/xdsclient/e2e_test/misc_watchers_test.go diff --git a/internal/testutils/xds/e2e/server.go b/internal/testutils/xds/e2e/server.go index efe68be299b5..38eb01adea8d 100644 --- a/internal/testutils/xds/e2e/server.go +++ b/internal/testutils/xds/e2e/server.go @@ -60,6 +60,14 @@ type ManagementServerOptions struct { // will be created and used. Listener net.Listener + // AllowResourceSubSet allows the management server to respond to requests + // before all configured resources are explicitly named in the request. The + // default behavior that we want is for the management server to wait for + // all configured resources to be requested before responding to any of + // them, since this is how we have run our tests historically, and should be + // set to true only for tests which explicitly require the other behavior. + AllowResourceSubset bool + // The callbacks defined below correspond to the state of the world (sotw) // version of the xDS API on the management server. @@ -97,8 +105,11 @@ type ManagementServerOptions struct { // logic. When the test is done, it should call the Stop() method to cleanup // resources allocated by the management server. func StartManagementServer(opts *ManagementServerOptions) (*ManagementServer, error) { - // Create a snapshot cache. - cache := v3cache.NewSnapshotCache(true, v3cache.IDHash{}, serverLogger{}) + // Create a snapshot cache. The first parameter to NewSnapshotCache() + // controls whether the server should wait for all resources to be + // explicitly named in the request before responding to any of them. + wait := opts == nil || !opts.AllowResourceSubset + cache := v3cache.NewSnapshotCache(wait, v3cache.IDHash{}, serverLogger{}) logger.Infof("Created new snapshot cache...") var lis net.Listener diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index d496aa59adc3..9501a154c018 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -148,49 +147,6 @@ func (c *testController) Close() { c.done.Fire() } -// TestWatchCallAnotherWatch covers the case where watch() is called inline by a -// callback. It makes sure it doesn't cause a deadlock. -func (s) TestWatchCallAnotherWatch(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Start a watch for some resource, so that the controller and update - // handlers are built for this authority. The test needs these to make an - // inline watch in a callback. - client, ctrlCh := testClientSetup(t, false) - newWatch(t, client, xdsresource.ClusterResource, "doesnot-matter") - controller, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, "doesnot-matter") - - clusterUpdateCh := testutils.NewChannel() - firstTime := true - client.WatchCluster(testCDSName, func(update xdsresource.ClusterUpdate, err error) { - clusterUpdateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - // Calls another watch inline, to ensure there's deadlock. - client.WatchCluster("another-random-name", func(xdsresource.ClusterUpdate, error) {}) - - if _, err := controller.addWatches[xdsresource.ClusterResource].Receive(ctx); firstTime && err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - firstTime = false - }) - if _, err := controller.addWatches[xdsresource.ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName} - updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate}}, xdsresource.UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil { - t.Fatal(err) - } - - // The second update needs to be different in the underlying resource proto - // for the watch callback to be invoked. - wantUpdate2 := xdsresource.ClusterUpdate{ClusterName: testEDSName + "2", Raw: &anypb.Any{}} - updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {Update: wantUpdate2}}, xdsresource.UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2, nil); err != nil { - t.Fatal(err) - } -} - func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ListenerUpdate, wantErr error) error { u, err := updateCh.Receive(ctx) if err != nil { diff --git a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go new file mode 100644 index 000000000000..a22970ccdab3 --- /dev/null +++ b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go @@ -0,0 +1,133 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e_test + +import ( + "context" + "testing" + + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" +) + +// TestWatchCallAnotherWatch tests the scenario where a watch is registered for +// a resource, and more watches are registered from the first watch's callback. +// The test verifies that this scenario does not lead to a deadlock. +func (s) TestWatchCallAnotherWatch(t *testing.T) { + overrideFedEnvVar(t) + + // Start an xDS management server and set the option to allow it to respond + // to requests which only specify a subset of the configured resources. + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Configure the management server to respond with route config resources. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{ + e2e.DefaultRouteConfig(rdsName, ldsName, cdsName), + e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, cdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Start a watch for one route configuration resource. From the watch + // callback of the first resource, register two more watches (one for the + // same resource name, which would be satisfied from the cache, and another + // for a different resource name, which would be satisfied from the server). + updateCh1 := testutils.NewChannel() + updateCh2 := testutils.NewChannel() + updateCh3 := testutils.NewChannel() + var rdsCancel2, rdsCancel3 func() + rdsCancel1 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + // Watch for the same resource name. + rdsCancel2 = client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + t.Cleanup(rdsCancel2) + // Watch for a different resource name. + rdsCancel3 = client.WatchRouteConfig(rdsNameNewStyle, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh3.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + rdsCancel3() + }) + t.Cleanup(rdsCancel3) + }) + // defer rdsCancel1() + t.Cleanup(rdsCancel1) + + // Verify the contents of the received update for the all watchers. + wantUpdate12 := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + }, + }, + }, + }, + }, + } + wantUpdate3 := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsNameNewStyle}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh1, wantUpdate12); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate12); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh3, wantUpdate3); err != nil { + t.Fatal(err) + } +} From 824f44910d8c300989893d0b3a8ddbea6bee9c8f Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 7 Nov 2022 07:51:22 -0800 Subject: [PATCH 662/998] go.mod: upgrade x/text to v0.4 to address CVE (#5769) --- examples/go.sum | 24 +++++++++++++++++++++--- gcp/observability/go.mod | 4 +--- gcp/observability/go.sum | 20 ++++++++++++++++---- go.mod | 6 +++--- go.sum | 24 +++++++++++++++++++++--- security/advancedtls/examples/go.sum | 26 ++++++++++++++++++++++---- security/advancedtls/go.mod | 2 +- security/advancedtls/go.sum | 26 ++++++++++++++++++++++---- 8 files changed, 107 insertions(+), 25 deletions(-) diff --git a/examples/go.sum b/examples/go.sum index 8508780ea80f..670eaca43abb 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -49,34 +49,52 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index f0f34ac8ef58..396622b97d81 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -9,9 +9,7 @@ require ( github.com/google/uuid v1.3.0 go.opencensus.io v0.24.0 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 - google.golang.org/grpc v1.46.0 + google.golang.org/grpc v1.50.1 ) -// TODO(lidiz) remove the following line when we have a release containing the -// necessary internal binary logging changes replace google.golang.org/grpc => ../../ diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index 50f860099c78..fbee489030a5 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -193,6 +193,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -208,6 +209,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -242,6 +244,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -275,8 +278,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -304,8 +308,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -346,15 +351,19 @@ golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -362,8 +371,10 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -417,6 +428,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/go.mod b/go.mod index 3ad611bf2d9a..45960178706a 100644 --- a/go.mod +++ b/go.mod @@ -11,9 +11,9 @@ require ( github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.1.2 - golang.org/x/net v0.0.0-20201021035429-f5854403a974 + golang.org/x/net v0.0.0-20220722155237-a158d28d115b golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 google.golang.org/protobuf v1.27.1 ) @@ -22,7 +22,7 @@ require ( cloud.google.com/go v0.34.0 // indirect github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect - golang.org/x/text v0.3.3 // indirect + golang.org/x/text v0.4.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index be33e4b1e355..d3d81e7fbcd8 100644 --- a/go.sum +++ b/go.sum @@ -63,22 +63,28 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -86,21 +92,33 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 2918eee66e0f..238fb5ce652a 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -40,34 +40,52 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index b8904e21484e..65ac445d405b 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -4,7 +4,7 @@ go 1.14 require ( github.com/hashicorp/golang-lru v0.5.4 - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 google.golang.org/grpc v1.38.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 2918eee66e0f..238fb5ce652a 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -40,34 +40,52 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 0ae33e69dc6542a4e7a92f30e335376431d2ea4d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 7 Nov 2022 09:52:52 -0800 Subject: [PATCH 663/998] xdsclient: remove unused test code (#5772) --- xds/internal/xdsclient/client_test.go | 177 -------- xds/internal/xdsclient/watchers_test.go | 554 ------------------------ 2 files changed, 731 deletions(-) diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index 9501a154c018..b5073d76250a 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -19,27 +19,10 @@ package xdsclient import ( - "context" - "fmt" - "strings" "testing" "time" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/protobuf/testing/protocmp" ) type s struct { @@ -54,167 +37,7 @@ const ( testXDSServer = "xds-server" testXDSServerAuthority = "xds-server-authority" - testAuthority = "test-authority" - testAuthority2 = "test-authority-2" - testLDSName = "test-lds" - testRDSName = "test-rds" - testCDSName = "test-cds" - testEDSName = "test-eds" - defaultTestWatchExpiryTimeout = 500 * time.Millisecond defaultTestTimeout = 5 * time.Second defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ) - -func newStringP(s string) *string { - return &s -} - -func clientOpts() *bootstrap.Config { - return &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - Authorities: map[string]*bootstrap.Authority{ - testAuthority: { - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServerAuthority, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - }, - }, - } -} - -type testController struct { - // config is the config this controller is created with. - config *bootstrap.ServerConfig - - done *grpcsync.Event - addWatches map[xdsresource.ResourceType]*testutils.Channel - removeWatches map[xdsresource.ResourceType]*testutils.Channel -} - -func overrideNewController(t *testing.T) *testutils.Channel { - origNewController := newController - ch := testutils.NewChannel() - newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, _ func(int) time.Duration) (controllerInterface, error) { - ret := newTestController(config) - ch.Send(ret) - return ret, nil - } - t.Cleanup(func() { newController = origNewController }) - return ch -} - -func newTestController(config *bootstrap.ServerConfig) *testController { - addWatches := map[xdsresource.ResourceType]*testutils.Channel{ - xdsresource.ListenerResource: testutils.NewChannel(), - xdsresource.RouteConfigResource: testutils.NewChannel(), - xdsresource.ClusterResource: testutils.NewChannel(), - xdsresource.EndpointsResource: testutils.NewChannel(), - } - removeWatches := map[xdsresource.ResourceType]*testutils.Channel{ - xdsresource.ListenerResource: testutils.NewChannel(), - xdsresource.RouteConfigResource: testutils.NewChannel(), - xdsresource.ClusterResource: testutils.NewChannel(), - xdsresource.EndpointsResource: testutils.NewChannel(), - } - return &testController{ - config: config, - done: grpcsync.NewEvent(), - addWatches: addWatches, - removeWatches: removeWatches, - } -} - -func (c *testController) AddWatch(resourceType xdsresource.ResourceType, resourceName string) { - c.addWatches[resourceType].Send(resourceName) -} - -func (c *testController) RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) { - c.removeWatches[resourceType].Send(resourceName) -} - -func (c *testController) ReportLoad(server string) (*load.Store, func()) { - panic("ReportLoad is not implemented") -} - -func (c *testController) Close() { - c.done.Fire() -} - -func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ListenerUpdate, wantErr error) error { - u, err := updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("timeout when waiting for listener update: %v", err) - } - gotUpdate := u.(xdsresource.ListenerUpdateErrTuple) - if wantErr != nil { - if !strings.Contains(gotUpdate.Err.Error(), wantErr.Error()) { - return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) - } - return nil - } - if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate, protocmp.Transform()) { - return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) - } - return nil -} - -func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.RouteConfigUpdate, wantErr error) error { - u, err := updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("timeout when waiting for route configuration update: %v", err) - } - gotUpdate := u.(xdsresource.RouteConfigUpdateErrTuple) - if wantErr != nil { - if !strings.Contains(gotUpdate.Err.Error(), wantErr.Error()) { - return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) - } - return nil - } - if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate, protocmp.Transform()) { - return fmt.Errorf("unexpected route config update: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) - } - return nil -} - -func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ClusterUpdate, wantErr error) error { - u, err := updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("timeout when waiting for cluster update: %v", err) - } - gotUpdate := u.(xdsresource.ClusterUpdateErrTuple) - if wantErr != nil { - if !strings.Contains(gotUpdate.Err.Error(), wantErr.Error()) { - return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) - } - return nil - } - if !cmp.Equal(gotUpdate.Update, wantUpdate, protocmp.Transform()) { - return fmt.Errorf("unexpected clusterUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) - } - return nil -} - -func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.EndpointsUpdate, wantErr error) error { - u, err := updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("timeout when waiting for endpoints update: %v", err) - } - gotUpdate := u.(xdsresource.EndpointsUpdateErrTuple) - if wantErr != nil { - if !strings.Contains(gotUpdate.Err.Error(), wantErr.Error()) { - return fmt.Errorf("unexpected error: %v, want %v", gotUpdate.Err, wantErr) - } - return nil - } - if gotUpdate.Err != nil || !cmp.Equal(gotUpdate.Update, wantUpdate, cmpopts.EquateEmpty(), protocmp.Transform()) { - return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.Update, gotUpdate.Err, wantUpdate) - } - return nil -} diff --git a/xds/internal/xdsclient/watchers_test.go b/xds/internal/xdsclient/watchers_test.go index 87789d11ebc9..36409821ab0d 100644 --- a/xds/internal/xdsclient/watchers_test.go +++ b/xds/internal/xdsclient/watchers_test.go @@ -18,74 +18,12 @@ package xdsclient import ( - "context" - "fmt" "testing" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/testutils" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/pubsub" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/types/known/anypb" ) -func overrideFedEnvVar(t *testing.T) { - oldFed := envconfig.XDSFederation - envconfig.XDSFederation = true - t.Cleanup(func() { envconfig.XDSFederation = oldFed }) -} - -// testClientSetup sets up the client and controller for the test. It returns a -// newly created client, and a channel where new controllers will be sent to. -func testClientSetup(t *testing.T, overrideWatchExpiryTimeout bool) (*clientImpl, *testutils.Channel) { - t.Helper() - ctrlCh := overrideNewController(t) - - watchExpiryTimeout := defaultWatchExpiryTimeout - if overrideWatchExpiryTimeout { - watchExpiryTimeout = defaultTestWatchExpiryTimeout - } - - client, err := newWithConfig(clientOpts(), watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - t.Cleanup(client.Close) - return client, ctrlCh -} - -// newWatch starts a new watch on the client. -func newWatch(t *testing.T, client *clientImpl, typ xdsresource.ResourceType, resourceName string) (updateCh *testutils.Channel, cancelWatch func()) { - newWatchF, _, _ := typeToTestFuncs(typ) - updateCh, cancelWatch = newWatchF(client, resourceName) - t.Cleanup(cancelWatch) - - if u, ok := updateCh.ReceiveOrFail(); ok { - t.Fatalf("received unexpected update immediately after watch: %+v", u) - } - return -} - -// getControllerAndPubsub returns the controller and pubsub for the given -// type+resourceName from the client. -func getControllerAndPubsub(ctx context.Context, t *testing.T, client *clientImpl, ctrlCh *testutils.Channel, typ xdsresource.ResourceType, resourceName string) (*testController, pubsub.UpdateHandler) { - c, err := ctrlCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - ctrl := c.(*testController) - - if _, err := ctrl.addWatches[typ].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - updateHandler := findPubsubForTest(t, client, xdsresource.ParseName(resourceName).Authority) - - return ctrl, updateHandler -} - // findPubsubForTest returns the pubsub for the given authority, to send updates // to. If authority is "", the default is returned. If the authority is not // found, the test will fail. @@ -107,495 +45,3 @@ func findPubsubForTest(t *testing.T, c *clientImpl, authority string) pubsub.Upd } return a.pubsub } - -var ( - newLDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) { - updateCh := testutils.NewChannel() - cancelLastWatch := client.WatchListener(resourceName, func(update xdsresource.ListenerUpdate, err error) { - updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: update, Err: err}) - }) - return updateCh, cancelLastWatch - } - newLDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) { - wantUpdates := map[string]xdsresource.ListenerUpdateErrTuple{} - for n, u := range updates { - wantUpdate := u.(xdsresource.ListenerUpdate) - wantUpdates[n] = xdsresource.ListenerUpdateErrTuple{Update: wantUpdate} - } - updateHandler.NewListeners(wantUpdates, xdsresource.UpdateMetadata{}) - } - verifyLDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) { - t.Helper() - wantUpdate := update.(xdsresource.ListenerUpdate) - if err := verifyListenerUpdate(ctx, updateCh, wantUpdate, err); err != nil { - t.Fatal(err) - } - } - - newRDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) { - updateCh := testutils.NewChannel() - cancelLastWatch := client.WatchRouteConfig(resourceName, func(update xdsresource.RouteConfigUpdate, err error) { - updateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: update, Err: err}) - }) - return updateCh, cancelLastWatch - } - newRDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) { - wantUpdates := map[string]xdsresource.RouteConfigUpdateErrTuple{} - for n, u := range updates { - wantUpdate := u.(xdsresource.RouteConfigUpdate) - wantUpdates[n] = xdsresource.RouteConfigUpdateErrTuple{Update: wantUpdate} - } - updateHandler.NewRouteConfigs(wantUpdates, xdsresource.UpdateMetadata{}) - } - verifyRDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) { - t.Helper() - wantUpdate := update.(xdsresource.RouteConfigUpdate) - if err := verifyRouteConfigUpdate(ctx, updateCh, wantUpdate, err); err != nil { - t.Fatal(err) - } - } - - newCDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) { - updateCh := testutils.NewChannel() - cancelLastWatch := client.WatchCluster(resourceName, func(update xdsresource.ClusterUpdate, err error) { - updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: update, Err: err}) - }) - return updateCh, cancelLastWatch - } - newCDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) { - wantUpdates := map[string]xdsresource.ClusterUpdateErrTuple{} - for n, u := range updates { - wantUpdate := u.(xdsresource.ClusterUpdate) - wantUpdates[n] = xdsresource.ClusterUpdateErrTuple{Update: wantUpdate} - } - updateHandler.NewClusters(wantUpdates, xdsresource.UpdateMetadata{}) - } - verifyCDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) { - t.Helper() - wantUpdate := update.(xdsresource.ClusterUpdate) - if err := verifyClusterUpdate(ctx, updateCh, wantUpdate, err); err != nil { - t.Fatal(err) - } - } - - newEDSWatchF = func(client *clientImpl, resourceName string) (*testutils.Channel, func()) { - updateCh := testutils.NewChannel() - cancelLastWatch := client.WatchEndpoints(resourceName, func(update xdsresource.EndpointsUpdate, err error) { - updateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: update, Err: err}) - }) - return updateCh, cancelLastWatch - } - newEDSUpdateF = func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}) { - wantUpdates := map[string]xdsresource.EndpointsUpdateErrTuple{} - for n, u := range updates { - wantUpdate := u.(xdsresource.EndpointsUpdate) - wantUpdates[n] = xdsresource.EndpointsUpdateErrTuple{Update: wantUpdate} - } - updateHandler.NewEndpoints(wantUpdates, xdsresource.UpdateMetadata{}) - } - verifyEDSUpdateF = func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error) { - t.Helper() - wantUpdate := update.(xdsresource.EndpointsUpdate) - if err := verifyEndpointsUpdate(ctx, updateCh, wantUpdate, err); err != nil { - t.Fatal(err) - } - } -) - -func typeToTestFuncs(typ xdsresource.ResourceType) ( - newWatchF func(client *clientImpl, resourceName string) (*testutils.Channel, func()), - newUpdateF func(updateHandler pubsub.UpdateHandler, updates map[string]interface{}), - verifyUpdateF func(ctx context.Context, t *testing.T, updateCh *testutils.Channel, update interface{}, err error), -) { - switch typ { - case xdsresource.ListenerResource: - newWatchF = newLDSWatchF - newUpdateF = newLDSUpdateF - verifyUpdateF = verifyLDSUpdateF - case xdsresource.RouteConfigResource: - newWatchF = newRDSWatchF - newUpdateF = newRDSUpdateF - verifyUpdateF = verifyRDSUpdateF - case xdsresource.ClusterResource: - newWatchF = newCDSWatchF - newUpdateF = newCDSUpdateF - verifyUpdateF = verifyCDSUpdateF - case xdsresource.EndpointsResource: - newWatchF = newEDSWatchF - newUpdateF = newEDSUpdateF - verifyUpdateF = verifyEDSUpdateF - } - return -} - -// TestClusterWatch covers the cases: -// - an update is received after a watch() -// - an update for another resource name -// - an update is received after cancel() -func testWatch(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { - overrideFedEnvVar(t) - for _, rName := range []string{resourceName, xdstestutils.BuildResourceName(typ, testAuthority, resourceName, nil)} { - t.Run(rName, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - updateCh, cancelWatch := newWatch(t, client, typ, rName) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName) - _, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) - - // Send an update, and check the result. - newUpdateF(updateHandler, map[string]interface{}{rName: update}) - verifyUpdateF(ctx, t, updateCh, update, nil) - - // Push an update, with an extra resource for a different resource name. - // Specify a non-nil raw proto in the original resource to ensure that the - // new update is not considered equal to the old one. - var newUpdate interface{} - switch typ { - case xdsresource.ListenerResource: - newU := update.(xdsresource.ListenerUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - case xdsresource.RouteConfigResource: - newU := update.(xdsresource.RouteConfigUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - case xdsresource.ClusterResource: - newU := update.(xdsresource.ClusterUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - case xdsresource.EndpointsResource: - newU := update.(xdsresource.EndpointsUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - } - newUpdateF(updateHandler, map[string]interface{}{rName: newUpdate}) - verifyUpdateF(ctx, t, updateCh, newUpdate, nil) - - // Cancel watch, and send update again. - cancelWatch() - newUpdateF(updateHandler, map[string]interface{}{rName: update}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected update: %v, %v, want channel recv timeout", u, err) - } - }) - } -} - -// testClusterTwoWatchSameResourceName covers the case where an update is -// received after two watch() for the same resource name. -func testTwoWatchSameResourceName(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { - overrideFedEnvVar(t) - for _, rName := range []string{resourceName, xdstestutils.BuildResourceName(typ, testAuthority, resourceName, nil)} { - t.Run(rName, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - updateCh, _ := newWatch(t, client, typ, resourceName) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, resourceName) - newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) - - updateChs := []*testutils.Channel{updateCh} - var cancelLastWatch func() - const count = 1 - for i := 0; i < count; i++ { - var updateCh *testutils.Channel - updateCh, cancelLastWatch = newWatchF(client, resourceName) - updateChs = append(updateChs, updateCh) - } - - newUpdateF(updateHandler, map[string]interface{}{resourceName: update}) - for i := 0; i < count+1; i++ { - verifyUpdateF(ctx, t, updateChs[i], update, nil) - } - - // Cancel the last watch, and send update again. None of the watchers should - // be notified because one has been cancelled, and the other is receiving - // the same update. - cancelLastWatch() - newUpdateF(updateHandler, map[string]interface{}{resourceName: update}) - for i := 0; i < count+1; i++ { - func() { - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := updateChs[i].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected update: %v, %v, want channel recv timeout", u, err) - } - }() - } - - // Push a new update and make sure the uncancelled watcher is invoked. - // Specify a non-nil raw proto to ensure that the new update is not - // considered equal to the old one. - var newUpdate interface{} - switch typ { - case xdsresource.ListenerResource: - newU := update.(xdsresource.ListenerUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - case xdsresource.RouteConfigResource: - newU := update.(xdsresource.RouteConfigUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - case xdsresource.ClusterResource: - newU := update.(xdsresource.ClusterUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - case xdsresource.EndpointsResource: - newU := update.(xdsresource.EndpointsUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - } - newUpdateF(updateHandler, map[string]interface{}{resourceName: newUpdate}) - verifyUpdateF(ctx, t, updateCh, newUpdate, nil) - }) - } -} - -// testThreeWatchDifferentResourceName starts two watches for name1, and one -// watch for name2. This test verifies that two watches for name1 receive the -// same update, and name2 watch receives a different update. -func testThreeWatchDifferentResourceName(t *testing.T, typ xdsresource.ResourceType, update1 interface{}, resourceName1 string, update2 interface{}, resourceName2 string) { - overrideFedEnvVar(t) - for _, rName := range [][]string{ - {resourceName1, resourceName2}, - {xdstestutils.BuildResourceName(typ, testAuthority, resourceName1, nil), xdstestutils.BuildResourceName(typ, testAuthority, resourceName2, nil)}, - } { - t.Run(rName[0], func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - updateCh, _ := newWatch(t, client, typ, rName[0]) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName[0]) - newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) - - // Two watches for the same name. - updateChs := []*testutils.Channel{updateCh} - const count = 1 - for i := 0; i < count; i++ { - var updateCh *testutils.Channel - updateCh, _ = newWatchF(client, rName[0]) - updateChs = append(updateChs, updateCh) - } - // Third watch for a different name. - updateCh2, _ := newWatchF(client, rName[1]) - - newUpdateF(updateHandler, map[string]interface{}{ - rName[0]: update1, - rName[1]: update2, - }) - - // The first several watches for the same resource should all - // receive the first update. - for i := 0; i < count+1; i++ { - verifyUpdateF(ctx, t, updateChs[i], update1, nil) - } - // The last watch for the different resource should receive the - // second update. - verifyUpdateF(ctx, t, updateCh2, update2, nil) - }) - } -} - -// testWatchAfterCache covers the case where watch is called after the update is -// in cache. -func testWatchAfterCache(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { - overrideFedEnvVar(t) - for _, rName := range []string{resourceName, xdstestutils.BuildResourceName(typ, testAuthority, resourceName, nil)} { - t.Run(rName, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - updateCh, _ := newWatch(t, client, typ, rName) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName) - newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) - - newUpdateF(updateHandler, map[string]interface{}{rName: update}) - verifyUpdateF(ctx, t, updateCh, update, nil) - - // Another watch for the resource in cache. - updateCh2, _ := newWatchF(client, rName) - - // New watch should receive the update. - verifyUpdateF(ctx, t, updateCh2, update, nil) - - // Old watch should see nothing. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected update: %v, %v, want channel recv timeout", u, err) - } - }) - } -} - -// testResourceRemoved covers the cases: -// - an update is received after a watch() -// - another update is received, with one resource removed -// - this should trigger callback with resource removed error -// -// - one more update without the removed resource -// - the callback (above) shouldn't receive any update -func testResourceRemoved(t *testing.T, typ xdsresource.ResourceType, update1 interface{}, resourceName1 string, update2 interface{}, resourceName2 string) { - overrideFedEnvVar(t) - for _, rName := range [][]string{ - {resourceName1, resourceName2}, - {xdstestutils.BuildResourceName(typ, testAuthority, resourceName1, nil), xdstestutils.BuildResourceName(typ, testAuthority, resourceName2, nil)}, - } { - t.Run(rName[0], func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - updateCh, _ := newWatch(t, client, typ, rName[0]) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName[0]) - newWatchF, newUpdateF, verifyUpdateF := typeToTestFuncs(typ) - - // Another watch for a different name. - updateCh2, _ := newWatchF(client, rName[1]) - - newUpdateF(updateHandler, map[string]interface{}{ - rName[0]: update1, - rName[1]: update2, - }) - verifyUpdateF(ctx, t, updateCh, update1, nil) - verifyUpdateF(ctx, t, updateCh2, update2, nil) - - // Send another update to remove resource 1. - newUpdateF(updateHandler, map[string]interface{}{ - rName[1]: update2, - }) - - // Watcher 1 should get an error. - if u, err := updateCh.Receive(ctx); err != nil { - t.Errorf("failed to receive update: %v", err) - } else { - var gotErr error - switch typ { - case xdsresource.ListenerResource: - newU := u.(xdsresource.ListenerUpdateErrTuple) - gotErr = newU.Err - case xdsresource.RouteConfigResource: - newU := u.(xdsresource.RouteConfigUpdateErrTuple) - gotErr = newU.Err - case xdsresource.ClusterResource: - newU := u.(xdsresource.ClusterUpdateErrTuple) - gotErr = newU.Err - case xdsresource.EndpointsResource: - newU := u.(xdsresource.EndpointsUpdateErrTuple) - gotErr = newU.Err - } - if xdsresource.ErrType(gotErr) != xdsresource.ErrorTypeResourceNotFound { - t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) - } - } - - // Watcher 2 should not see an update since the resource has not changed. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := updateCh2.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ClusterUpdate: %v, want receiving from channel timeout", u) - } - - // Send another update with resource 2 modified. Specify a non-nil raw proto - // to ensure that the new update is not considered equal to the old one. - var newUpdate interface{} - switch typ { - case xdsresource.ListenerResource: - newU := update2.(xdsresource.ListenerUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - case xdsresource.RouteConfigResource: - newU := update2.(xdsresource.RouteConfigUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - case xdsresource.ClusterResource: - newU := update2.(xdsresource.ClusterUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - case xdsresource.EndpointsResource: - newU := update2.(xdsresource.EndpointsUpdate) - newU.Raw = &anypb.Any{} - newUpdate = newU - } - newUpdateF(updateHandler, map[string]interface{}{ - rName[1]: newUpdate, - }) - - // Watcher 1 should not see an update. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected Cluster: %v, want receiving from channel timeout", u) - } - - // Watcher 2 should get the update. - verifyUpdateF(ctx, t, updateCh2, newUpdate, nil) - }) - } -} - -// testWatchPartialValid covers the case that a response contains both -// valid and invalid resources. This response will be NACK'ed by the xdsclient. -// But the watchers with valid resources should receive the update, those with -// invalid resources should receive an error. -func testWatchPartialValid(t *testing.T, typ xdsresource.ResourceType, update interface{}, resourceName string) { - overrideFedEnvVar(t) - const badResourceName = "bad-resource" - - for _, rName := range [][]string{ - {resourceName, badResourceName}, - {xdstestutils.BuildResourceName(typ, testAuthority, resourceName, nil), xdstestutils.BuildResourceName(typ, testAuthority, badResourceName, nil)}, - } { - t.Run(rName[0], func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - client, ctrlCh := testClientSetup(t, false) - updateCh, _ := newWatch(t, client, typ, rName[0]) - _, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, typ, rName[0]) - newWatchF, _, verifyUpdateF := typeToTestFuncs(typ) - - updateChs := map[string]*testutils.Channel{ - rName[0]: updateCh, - } - - for _, name := range []string{rName[1]} { - updateChT, _ := newWatchF(client, rName[1]) - updateChs[name] = updateChT - } - - wantError := fmt.Errorf("testing error") - wantError2 := fmt.Errorf("individual error") - - switch typ { - case xdsresource.ListenerResource: - updateHandler.NewListeners(map[string]xdsresource.ListenerUpdateErrTuple{ - rName[0]: {Update: update.(xdsresource.ListenerUpdate)}, - rName[1]: {Err: wantError2}, - }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - case xdsresource.RouteConfigResource: - updateHandler.NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple{ - rName[0]: {Update: update.(xdsresource.RouteConfigUpdate)}, - rName[1]: {Err: wantError2}, - }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - case xdsresource.ClusterResource: - updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{ - rName[0]: {Update: update.(xdsresource.ClusterUpdate)}, - rName[1]: {Err: wantError2}, - }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - case xdsresource.EndpointsResource: - updateHandler.NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple{ - rName[0]: {Update: update.(xdsresource.EndpointsUpdate)}, - rName[1]: {Err: wantError2}, - }, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}}) - } - - // The valid resource should be sent to the watcher. - verifyUpdateF(ctx, t, updateChs[rName[0]], update, nil) - - // The failed watcher should receive an error. - verifyUpdateF(ctx, t, updateChs[rName[1]], update, wantError2) - }) - } -} From 72812fe3aa93756aca9382ff07d0a3a54eff0b96 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 7 Nov 2022 18:32:07 -0500 Subject: [PATCH 664/998] gcp/observability: filter logging from cloud ops endpoints calls (#5765) --- gcp/observability/logging.go | 5 +++ gcp/observability/logging_test.go | 60 +++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index dee2656f7b84..dcd7bf848fd7 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -338,6 +338,11 @@ type binaryLogger struct { } func (bl *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { + // Prevent logging from logging, traces, and metrics API calls. + if strings.HasPrefix(methodName, "/google.logging.v2.LoggingServiceV2/") || strings.HasPrefix(methodName, "/google.monitoring.v3.MetricService/") || + strings.HasPrefix(methodName, "/google.devtools.cloudtrace.v2.TraceService/") { + return nil + } s, _, err := grpcutil.ParseMethod(methodName) if err != nil { logger.Infof("binarylogging: failed to parse %q: %v", methodName, err) diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index 03638a9b49d7..91c109678818 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -166,6 +166,7 @@ func (s) TestClientRPCEventsLogAll(t *testing.T) { if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } + grpcLogEntriesWant := []*grpcLogEntry{ { Type: eventTypeClientHeader, @@ -1056,6 +1057,65 @@ func (s) TestTranslateMetadata(t *testing.T) { } } +// TestCloudLoggingAPICallsFiltered tests that the observability plugin does not +// emit logs for cloud logging API calls. +func (s) TestCloudLoggingAPICallsFiltered(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + configLogAll := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(configLogAll) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{} + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Any of the three cloud logging API calls should not cause any logs to be + // emitted, even though the configuration specifies to log any rpc + // regardless of method. + req := &grpc_testing.SimpleRequest{} + resp := &grpc_testing.SimpleResponse{} + + ss.CC.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/some-method", req, resp) + ss.CC.Invoke(ctx, "/google.monitoring.v3.MetricService/some-method", req, resp) + ss.CC.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/some-method", req, resp) + // The exporter should have received no new log entries due to these three + // calls, as they should be filtered out. + fle.mu.Lock() + defer fle.mu.Unlock() + if len(fle.entries) != 0 { + t.Fatalf("Unexpected length of entries %v, want 0", len(fle.entries)) + } +} + func (s) TestMarshalJSON(t *testing.T) { logEntry := &grpcLogEntry{ CallID: "300-300-300", From 81db25066bf395f3f49a8a8125eb199547d03469 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 8 Nov 2022 18:44:59 -0500 Subject: [PATCH 665/998] Change version to 1.52.0-dev (#5784) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 4a717069f310..02de108d42f0 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.51.0-dev" +const Version = "1.52.0-dev" From 5331dbd3ab12ba6a10242d03ef20c3de8a14204a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 9 Nov 2022 15:01:44 -0800 Subject: [PATCH 666/998] outlierdetection: remove an unused variable in a test (#5778) --- xds/internal/balancer/outlierdetection/balancer_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/xds/internal/balancer/outlierdetection/balancer_test.go b/xds/internal/balancer/outlierdetection/balancer_test.go index 15e85fdb4661..8b86ebbb19f2 100644 --- a/xds/internal/balancer/outlierdetection/balancer_test.go +++ b/xds/internal/balancer/outlierdetection/balancer_test.go @@ -725,8 +725,6 @@ func (s) TestDurationOfInterval(t *testing.T) { }, }) - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() // No timer should have been started. sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer cancel() From 689d061d46c76f52f6a965ef621f988f15655200 Mon Sep 17 00:00:00 2001 From: littlejian <17816869670@163.com> Date: Thu, 10 Nov 2022 15:06:01 +0800 Subject: [PATCH 667/998] Cleanup usages of resolver.Target's Scheme and Authority (#5761) --- internal/resolver/dns/dns_resolver.go | 4 ++-- internal/resolver/dns/dns_resolver_test.go | 8 +++++++- internal/resolver/unix/unix.go | 4 ++-- stream.go | 2 +- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index 75301c514913..b08ac30adfef 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index 69307a981cf2..6bfcf299b33c 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "net" + "net/url" "os" "reflect" "strings" @@ -1439,7 +1440,12 @@ func TestCustomAuthority(t *testing.T) { b := NewBuilder() cc := &testClientConn{target: "foo.bar.com", errChan: make(chan error, 1)} - r, err := b.Build(resolver.Target{Endpoint: "foo.bar.com", Authority: a.authority}, cc, resolver.BuildOptions{}) + target := resolver.Target{ + Endpoint: "foo.bar.com", + Authority: a.authority, + URL: url.URL{Host: a.authority}, + } + r, err := b.Build(target, cc, resolver.BuildOptions{}) if err == nil { r.Close() diff --git a/internal/resolver/unix/unix.go b/internal/resolver/unix/unix.go index 7f1a702cacbe..160911687738 100644 --- a/internal/resolver/unix/unix.go +++ b/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we diff --git a/stream.go b/stream.go index 960c3e33dfd3..0f8e6c0149da 100644 --- a/stream.go +++ b/stream.go @@ -416,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( From 457c2f5481f38d6b50a5e9ea8809770ab01ac3fb Mon Sep 17 00:00:00 2001 From: Antoine Tollenaere Date: Thu, 10 Nov 2022 22:56:40 +0100 Subject: [PATCH 668/998] benchmark: use default buffer sizes (#5762) --- benchmark/benchmark.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index a8ae40fa6ada..e75f8f833188 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -198,8 +198,6 @@ type ServerInfo struct { // StartServer starts a gRPC server serving a benchmark service according to info. // It returns a function to stop the server. func StartServer(info ServerInfo, opts ...grpc.ServerOption) func() { - opts = append(opts, grpc.WriteBufferSize(128*1024)) - opts = append(opts, grpc.ReadBufferSize(128*1024)) s := grpc.NewServer(opts...) switch info.Type { case "protobuf": @@ -278,8 +276,6 @@ func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { // NewClientConnWithContext creates a gRPC client connection to addr using ctx. func NewClientConnWithContext(ctx context.Context, addr string, opts ...grpc.DialOption) *grpc.ClientConn { - opts = append(opts, grpc.WithWriteBufferSize(128*1024)) - opts = append(opts, grpc.WithReadBufferSize(128*1024)) conn, err := grpc.DialContext(ctx, addr, opts...) if err != nil { logger.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) From 56ac86fa0f3940cb79946ce2c6e56f7ee7ecae84 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 10 Nov 2022 16:36:19 -0800 Subject: [PATCH 669/998] xdsclient: wait for underlying transport to close (#5775) --- .../xdsclient/controller/controller.go | 19 +++++++++++++++++++ .../xdsclient/controller/transport.go | 12 ++++++++++-- .../xdsclient/e2e_test/misc_watchers_test.go | 1 - 3 files changed, 29 insertions(+), 3 deletions(-) diff --git a/xds/internal/xdsclient/controller/controller.go b/xds/internal/xdsclient/controller/controller.go index 4b07dc8d6ac5..520da06a103d 100644 --- a/xds/internal/xdsclient/controller/controller.go +++ b/xds/internal/xdsclient/controller/controller.go @@ -57,6 +57,11 @@ type Controller struct { cc *grpc.ClientConn // Connection to the management server. vClient version.VersionedClient stopRunGoroutine context.CancelFunc + // The run goroutine closes this channel when it exits, and we block on this + // channel in Close(). This ensures that when Close() returns, the + // underlying transport is closed, and we can guarantee that we will not + // process any subsequent responses from the management server. + runDoneCh chan struct{} backoff func(int) time.Duration streamCh chan grpc.ClientStream @@ -77,6 +82,7 @@ type Controller struct { versionMap map[xdsresource.ResourceType]string // nonceMap contains the nonce from the most recent received response. nonceMap map[xdsresource.ResourceType]string + closed bool // Changes to map lrsClients and the lrsClient inside the map need to be // protected by lrsMu. @@ -127,6 +133,7 @@ func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, val config: config, updateValidator: validator, updateHandler: updateHandler, + runDoneCh: make(chan struct{}), backoff: boff, streamCh: make(chan grpc.ClientStream, 1), @@ -170,6 +177,14 @@ func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, val // Close closes the controller. func (t *Controller) Close() { + t.mu.Lock() + if t.closed { + t.mu.Unlock() + return + } + t.closed = true + t.mu.Unlock() + // Note that Close needs to check for nils even if some of them are always // set in the constructor. This is because the constructor defers Close() in // error cases, and the fields might not be set when the error happens. @@ -179,4 +194,8 @@ func (t *Controller) Close() { if t.cc != nil { t.cc.Close() } + // Wait on the run goroutine to be done only if it was started. + if t.stopRunGoroutine != nil { + <-t.runDoneCh + } } diff --git a/xds/internal/xdsclient/controller/transport.go b/xds/internal/xdsclient/controller/transport.go index 28641dc874a4..526aefae29b0 100644 --- a/xds/internal/xdsclient/controller/transport.go +++ b/xds/internal/xdsclient/controller/transport.go @@ -54,7 +54,13 @@ func (t *Controller) RemoveWatch(rType xdsresource.ResourceType, resourceName st // stream failed without receiving a single reply) and runs the sender and // receiver routines to send and receive data from the stream respectively. func (t *Controller) run(ctx context.Context) { - go t.send(ctx) + sendDoneCh := make(chan struct{}) + defer func() { + <-sendDoneCh + close(t.runDoneCh) + }() + go t.send(ctx, sendDoneCh) + // TODO: start a goroutine monitoring ClientConn's connectivity state, and // report error (and log) when stats is transient failure. @@ -109,7 +115,9 @@ func (t *Controller) run(ctx context.Context) { // Note that this goroutine doesn't do anything to the old stream when there's a // new one. In fact, there should be only one stream in progress, and new one // should only be created when the old one fails (recv returns an error). -func (t *Controller) send(ctx context.Context) { +func (t *Controller) send(ctx context.Context, doneCh chan struct{}) { + defer func() { close(doneCh) }() + var stream grpc.ClientStream for { select { diff --git a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go index a22970ccdab3..414fb249b9a7 100644 --- a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go @@ -85,7 +85,6 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { }) t.Cleanup(rdsCancel3) }) - // defer rdsCancel1() t.Cleanup(rdsCancel1) // Verify the contents of the received update for the all watchers. From 817c1e8c417e41bc4e697dae9c94645cbd059639 Mon Sep 17 00:00:00 2001 From: Huang Chong Date: Thu, 17 Nov 2022 02:02:07 +0800 Subject: [PATCH 670/998] passthrough: return error if endpoint is empty and opt.Dialer is nil when building resolver (#5732) --- clientconn_parsed_target_test.go | 8 +++++++- internal/resolver/passthrough/passthrough.go | 9 ++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/clientconn_parsed_target_test.go b/clientconn_parsed_target_test.go index 0993998efc9d..8f832a2c7cb4 100644 --- a/clientconn_parsed_target_test.go +++ b/clientconn_parsed_target_test.go @@ -40,7 +40,6 @@ func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { wantParsed resolver.Target }{ // No scheme is specified. - {target: "", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ""}}, {target: "://", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://"}}, {target: ":///", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///"}}, {target: "://a/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/"}}, @@ -110,6 +109,7 @@ func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { func (s) TestParsedTarget_Failure_WithoutCustomDialer(t *testing.T) { targets := []string{ + "", "unix://a/b/c", "unix://authority", "unix-abstract://authority/a/b/c", @@ -179,6 +179,12 @@ func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address"}, wantDialerAddress: "/unix/socket/address", }, + { + target: "", + badScheme: true, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ""}, + wantDialerAddress: "", + }, { target: "passthrough://a.server.com/google.com", wantParsed: resolver.Target{Scheme: "passthrough", Authority: "a.server.com", Endpoint: "google.com"}, diff --git a/internal/resolver/passthrough/passthrough.go b/internal/resolver/passthrough/passthrough.go index 520d9229e1ed..c6e08221ff64 100644 --- a/internal/resolver/passthrough/passthrough.go +++ b/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, From 0238b6e1cec37b55820b461d3d30652c54efe2c4 Mon Sep 17 00:00:00 2001 From: wby Date: Sat, 19 Nov 2022 00:57:37 +0800 Subject: [PATCH 671/998] transport: new stream with actual server name (#5748) --- internal/transport/http2_client.go | 27 +++++++++++++++++++----- test/authority_test.go | 34 ++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index d518b07e16f7..23d6ec6bc497 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -59,11 +59,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -314,6 +318,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts cancel: cancel, userAgent: opts.UserAgent, registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -702,6 +707,18 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} diff --git a/test/authority_test.go b/test/authority_test.go index c841c64736fb..ccc99b18dba0 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -36,6 +36,8 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -205,3 +207,35 @@ func (s) TestColonPortAuthority(t *testing.T) { t.Errorf("us.client.EmptyCall(_, _) = _, %v; want _, nil", err) } } + +// TestAuthorityReplacedWithResolverAddress tests the scenario where the resolver +// returned address contains a ServerName override. The test verifies that the the +// :authority header value sent to the server as part of the http/2 HEADERS frame +// is set to the value specified in the resolver returned address. +func (s) TestAuthorityReplacedWithResolverAddress(t *testing.T) { + const expectedAuthority = "test.server.name" + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + return authorityChecker(ctx, expectedAuthority) + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: ss.Address, ServerName: expectedAuthority}}}) + cc, err := grpc.Dial(r.Scheme()+":///whatever", grpc.WithInsecure(), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial(%q) = %v", ss.Address, err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err = testpb.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() rpc failed: %v", err) + } +} From ff146806d2ecf50ca84697eba34b89a47e98cbad Mon Sep 17 00:00:00 2001 From: apolcyn Date: Fri, 18 Nov 2022 10:22:08 -0800 Subject: [PATCH 672/998] Cap min and max ring size to 4K (#5801) --- xds/internal/balancer/clusterresolver/config_test.go | 2 +- xds/internal/balancer/ringhash/config.go | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index 6e2d8624050b..cca1bd8eadc1 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -249,7 +249,7 @@ func TestParseConfig(t *testing.T) { }, XDSLBPolicy: &internalserviceconfig.BalancerConfig{ Name: ringhash.Name, - Config: &ringhash.LBConfig{MinRingSize: 1024, MaxRingSize: 8388608}, // Ringhash LB config with default min and max. + Config: &ringhash.LBConfig{MinRingSize: 1024, MaxRingSize: 4096}, // Ringhash LB config with default min and max. }, }, wantErr: false, diff --git a/xds/internal/balancer/ringhash/config.go b/xds/internal/balancer/ringhash/config.go index 5cb4aab3d9c9..4278b0636c7d 100644 --- a/xds/internal/balancer/ringhash/config.go +++ b/xds/internal/balancer/ringhash/config.go @@ -35,7 +35,9 @@ type LBConfig struct { const ( defaultMinSize = 1024 - defaultMaxSize = 8 * 1024 * 1024 // 8M + defaultMaxSize = 4096 + // TODO(apolcyn): make makeRingSizeCap configurable, with either a dial option or global setting + maxRingSizeCap = 4096 ) func parseConfig(c json.RawMessage) (*LBConfig, error) { @@ -49,6 +51,12 @@ func parseConfig(c json.RawMessage) (*LBConfig, error) { if cfg.MaxRingSize == 0 { cfg.MaxRingSize = defaultMaxSize } + if cfg.MinRingSize > maxRingSizeCap { + cfg.MinRingSize = maxRingSizeCap + } + if cfg.MaxRingSize > maxRingSizeCap { + cfg.MaxRingSize = maxRingSizeCap + } if cfg.MinRingSize > cfg.MaxRingSize { return nil, fmt.Errorf("min %v is greater than max %v", cfg.MinRingSize, cfg.MaxRingSize) } From 50be6ae2f92c064771c39e8730bf4bdbd5df3cfb Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 18 Nov 2022 10:56:02 -0800 Subject: [PATCH 673/998] go.mod: update all dependencies (#5803) --- .github/workflows/testing.yml | 3 - cmd/protoc-gen-go-grpc/go.mod | 4 +- cmd/protoc-gen-go-grpc/go.sum | 4 +- examples/go.mod | 25 +- examples/go.sum | 874 +++++++++++++++++++++++++- gcp/observability/go.mod | 31 +- gcp/observability/go.sum | 481 +++++++++++++- go.mod | 20 +- go.sum | 899 ++++++++++++++++++++++++++- security/advancedtls/examples/go.mod | 14 +- security/advancedtls/examples/go.sum | 862 ++++++++++++++++++++++++- security/advancedtls/go.mod | 15 +- security/advancedtls/go.sum | 862 ++++++++++++++++++++++++- security/authorization/go.mod | 11 +- security/authorization/go.sum | 1 - 15 files changed, 4012 insertions(+), 94 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 0ebfcce44281..c351b0942de6 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -64,9 +64,6 @@ jobs: - type: tests goversion: 1.17 - - type: tests - goversion: 1.16 - - type: extras goversion: 1.19 diff --git a/cmd/protoc-gen-go-grpc/go.mod b/cmd/protoc-gen-go-grpc/go.mod index e0f9440a65f7..7d0b7794dcad 100644 --- a/cmd/protoc-gen-go-grpc/go.mod +++ b/cmd/protoc-gen-go-grpc/go.mod @@ -1,5 +1,5 @@ module google.golang.org/grpc/cmd/protoc-gen-go-grpc -go 1.9 +go 1.17 -require google.golang.org/protobuf v1.27.1 +require google.golang.org/protobuf v1.28.1 diff --git a/cmd/protoc-gen-go-grpc/go.sum b/cmd/protoc-gen-go-grpc/go.sum index 03b1917b5a42..00f5993c956c 100644 --- a/cmd/protoc-gen-go-grpc/go.sum +++ b/cmd/protoc-gen-go-grpc/go.sum @@ -4,5 +4,5 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/examples/go.mod b/examples/go.mod index e168fbdf2150..2e6a7c299504 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -1,13 +1,28 @@ module google.golang.org/grpc/examples -go 1.14 +go 1.17 require ( github.com/golang/protobuf v1.5.2 - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 - google.golang.org/grpc v1.36.0 - google.golang.org/protobuf v1.27.1 + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 + google.golang.org/grpc v1.50.1 + google.golang.org/protobuf v1.28.1 +) + +require ( + cloud.google.com/go/compute v1.12.1 // indirect + cloud.google.com/go/compute/metadata v0.2.1 // indirect + github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect + github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect + github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect + github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + golang.org/x/net v0.2.0 // indirect + golang.org/x/sys v0.2.0 // indirect + golang.org/x/text v0.4.0 // indirect + google.golang.org/appengine v1.6.7 // indirect ) replace google.golang.org/grpc => ../ diff --git a/examples/go.sum b/examples/go.sum index 670eaca43abb..942564bff6fa 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -1,109 +1,939 @@ -cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -116,11 +946,23 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 396622b97d81..cac2d1d78ff2 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -1,15 +1,40 @@ module google.golang.org/grpc/gcp/observability -go 1.14 +go 1.17 require ( cloud.google.com/go/logging v1.4.2 contrib.go.opencensus.io/exporter/stackdriver v0.13.12 - github.com/google/go-cmp v0.5.6 + github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 go.opencensus.io v0.24.0 - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 google.golang.org/grpc v1.50.1 ) +require ( + cloud.google.com/go v0.105.0 // indirect + cloud.google.com/go/compute v1.12.1 // indirect + cloud.google.com/go/compute/metadata v0.2.1 // indirect + cloud.google.com/go/longrunning v0.3.0 // indirect + cloud.google.com/go/monitoring v1.8.0 // indirect + cloud.google.com/go/trace v1.4.0 // indirect + github.com/aws/aws-sdk-go v1.37.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect + github.com/googleapis/gax-go/v2 v2.6.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/prometheus/prometheus v2.5.0+incompatible // indirect + golang.org/x/net v0.2.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.2.0 // indirect + golang.org/x/text v0.4.0 // indirect + google.golang.org/api v0.102.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + google.golang.org/protobuf v1.28.1 // indirect +) + replace google.golang.org/grpc => ../../ diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index fbee489030a5..50272f7c7734 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -23,32 +23,351 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= cloud.google.com/go/logging v1.4.2 h1:Mu2Q75VBDQlW1HlBMjTX4X84UFR73G1TiLlRYc/b7tA= cloud.google.com/go/logging v1.4.2/go.mod h1:jco9QZSx8HiVVqLJReq7z7bVdj0P1Jb9PDFs63T+axo= -cloud.google.com/go/monitoring v1.1.0 h1:ZnyNdf/XRcynMmKzRSNTOdOyYPs6G7do1l2D2hIvIKo= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/trace v1.0.0 h1:laKx2y7IWMjguCe5zZx6n7qLtREk4kyE69SXVC0VSN8= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0 h1:qO9eLn2esajC9sxpqp1YKX37nXC3L4BfGnPS0Cx9dYo= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -108,7 +427,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -123,13 +441,14 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -149,11 +468,21 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -166,10 +495,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -279,8 +606,20 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -297,8 +636,17 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -309,8 +657,11 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -359,11 +710,27 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -373,6 +740,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -432,8 +800,11 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -465,8 +836,29 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= -google.golang.org/api v0.59.0 h1:fPfFO7gttlXYo2ALuD3HxJzh8vaF++4youI0BkFL6GE= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -512,6 +904,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= @@ -534,8 +927,55 @@ google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2 h1:CUp93KYgL06Y/PdI8aRJaFiAHevPIGWQmijSqaUhue8= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -549,10 +989,11 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/go.mod b/go.mod index 45960178706a..9964600f921e 100644 --- a/go.mod +++ b/go.mod @@ -9,20 +9,20 @@ require ( github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.5.2 - github.com/google/go-cmp v0.5.6 - github.com/google/uuid v1.1.2 - golang.org/x/net v0.0.0-20220722155237-a158d28d115b - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/protobuf v1.27.1 + github.com/google/go-cmp v0.5.9 + github.com/google/uuid v1.3.0 + golang.org/x/net v0.2.0 + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 + golang.org/x/sys v0.2.0 + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 + google.golang.org/protobuf v1.28.1 ) require ( - cloud.google.com/go v0.34.0 // indirect + cloud.google.com/go/compute v1.12.1 // indirect + cloud.google.com/go/compute/metadata v0.2.1 // indirect github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect golang.org/x/text v0.4.0 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.4.0 // indirect + google.golang.org/appengine v1.6.7 // indirect ) diff --git a/go.sum b/go.sum index d3d81e7fbcd8..cdcf905cf85a 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,389 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= @@ -21,18 +393,39 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -42,36 +435,141 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -79,63 +577,431 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -144,15 +1010,28 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index 20ed81e24d38..aa41ccbd92cd 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -1,13 +1,23 @@ module google.golang.org/grpc/security/advancedtls/examples -go 1.15 +go 1.17 require ( - google.golang.org/grpc v1.38.0 + google.golang.org/grpc v1.51.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b google.golang.org/grpc/security/advancedtls v0.0.0-20201112215255-90f1b3ee835b ) +require ( + github.com/golang/protobuf v1.5.2 // indirect + golang.org/x/crypto v0.3.0 // indirect + golang.org/x/net v0.2.0 // indirect + golang.org/x/sys v0.2.0 // indirect + golang.org/x/text v0.4.0 // indirect + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + google.golang.org/protobuf v1.28.1 // indirect +) + replace google.golang.org/grpc => ../../.. replace google.golang.org/grpc/examples => ../../../examples diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 238fb5ce652a..b838ed1b97b1 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -1,7 +1,372 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -10,90 +375,557 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -106,9 +938,23 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index 65ac445d405b..fe75f27fc6c3 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -1,14 +1,23 @@ module google.golang.org/grpc/security/advancedtls -go 1.14 +go 1.17 require ( github.com/hashicorp/golang-lru v0.5.4 - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 - google.golang.org/grpc v1.38.0 + golang.org/x/crypto v0.3.0 + google.golang.org/grpc v1.51.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) +require ( + github.com/golang/protobuf v1.5.2 // indirect + golang.org/x/net v0.2.0 // indirect + golang.org/x/sys v0.2.0 // indirect + golang.org/x/text v0.4.0 // indirect + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + google.golang.org/protobuf v1.28.1 // indirect +) + replace google.golang.org/grpc => ../../ replace google.golang.org/grpc/examples => ../../examples diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 238fb5ce652a..b838ed1b97b1 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,7 +1,372 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -10,90 +375,557 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -106,9 +938,23 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/security/authorization/go.mod b/security/authorization/go.mod index fbaa0ada0974..0413d91056af 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/security/authorization -go 1.14 +go 1.17 require ( github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 @@ -10,3 +10,12 @@ require ( google.golang.org/grpc v1.40.0 google.golang.org/protobuf v1.27.1 ) + +require ( + github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e // indirect + github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed // indirect + github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + golang.org/x/text v0.3.7 // indirect +) diff --git a/security/authorization/go.sum b/security/authorization/go.sum index bcd03a229247..db688f8fd778 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -157,7 +157,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= From fefb3ec0c095f5bf30d3c4ae1ff5911abfc2a989 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 18 Nov 2022 11:26:37 -0800 Subject: [PATCH 674/998] test/tools: update everything to latest versions except staticcheck (#5805) --- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 7 +- binarylog/grpc_binarylog_v1/binarylog.pb.go | 7 +- channelz/grpc_channelz_v1/channelz.pb.go | 7 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 7 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 7 +- .../grpc_gcp/transport_security_common.pb.go | 7 +- examples/features/proto/echo/echo.pb.go | 7 +- .../helloworld/helloworld/helloworld.pb.go | 7 +- .../route_guide/routeguide/route_guide.pb.go | 7 +- health/grpc_health_v1/health.pb.go | 7 +- internal/proto/grpc_lookup_v1/rls.pb.go | 7 +- .../proto/grpc_lookup_v1/rls_config.pb.go | 7 +- interop/grpc_testing/benchmark_service.pb.go | 7 +- interop/grpc_testing/control.pb.go | 7 +- interop/grpc_testing/core/stats.pb.go | 9 +- interop/grpc_testing/empty.pb.go | 7 +- interop/grpc_testing/messages.pb.go | 7 +- interop/grpc_testing/payloads.pb.go | 7 +- .../report_qps_scenario_service.pb.go | 7 +- interop/grpc_testing/stats.pb.go | 7 +- interop/grpc_testing/test.pb.go | 7 +- interop/grpc_testing/worker_service.pb.go | 7 +- profiling/proto/service.pb.go | 7 +- .../grpc_reflection_v1alpha/reflection.pb.go | 7 +- reflection/grpc_testing/proto2.pb.go | 7 +- reflection/grpc_testing/proto2_ext.pb.go | 7 +- reflection/grpc_testing/proto2_ext2.pb.go | 7 +- reflection/grpc_testing/test.pb.go | 7 +- regenerate.sh | 4 - stress/grpc_testing/metrics.pb.go | 7 +- test/codec_perf/perf.pb.go | 7 +- test/grpc_testing/test.pb.go | 7 +- test/tools/go.mod | 8 +- test/tools/go.sum | 105 ++++++++---------- 34 files changed, 82 insertions(+), 254 deletions(-) diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index bf4c3cb4449e..428672be2b2e 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,14 +19,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -42,10 +41,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type LoadBalanceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/binarylog/grpc_binarylog_v1/binarylog.pb.go b/binarylog/grpc_binarylog_v1/binarylog.pb.go index 64a232f28111..dcd0b2609ac9 100644 --- a/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. diff --git a/channelz/grpc_channelz_v1/channelz.pb.go b/channelz/grpc_channelz_v1/channelz.pb.go index b39fd928f432..6dd71f9ebdce 100644 --- a/channelz/grpc_channelz_v1/channelz.pb.go +++ b/channelz/grpc_channelz_v1/channelz.pb.go @@ -21,14 +21,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/channelz/v1/channelz.proto package grpc_channelz_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" @@ -46,10 +45,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ChannelConnectivityState_State int32 const ( diff --git a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 703b48da753b..e4d5610efb45 100644 --- a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/gcp/altscontext.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type AltsContext struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 383c5fb97a77..d3c56877362e 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/gcp/handshaker.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HandshakeProtocol int32 const ( diff --git a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 4fc3c79d6a39..0fecf5c0bae4 100644 --- a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/gcp/transport_security_common.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The security level of the created channel. The list is sorted in increasing // level of security. This order must always be maintained. type SecurityLevel int32 diff --git a/examples/features/proto/echo/echo.pb.go b/examples/features/proto/echo/echo.pb.go index 5af638d5280e..00156e5f09b3 100644 --- a/examples/features/proto/echo/echo.pb.go +++ b/examples/features/proto/echo/echo.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: examples/features/proto/echo/echo.proto package echo import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // EchoRequest is the request for echo. type EchoRequest struct { state protoimpl.MessageState diff --git a/examples/helloworld/helloworld/helloworld.pb.go b/examples/helloworld/helloworld/helloworld.pb.go index 2d5cbf5d7805..b2c31770a428 100644 --- a/examples/helloworld/helloworld/helloworld.pb.go +++ b/examples/helloworld/helloworld/helloworld.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: examples/helloworld/helloworld/helloworld.proto package helloworld import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The request message containing the user's name. type HelloRequest struct { state protoimpl.MessageState diff --git a/examples/route_guide/routeguide/route_guide.pb.go b/examples/route_guide/routeguide/route_guide.pb.go index 85c3033c7cd2..9229f7e66041 100644 --- a/examples/route_guide/routeguide/route_guide.pb.go +++ b/examples/route_guide/routeguide/route_guide.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: examples/route_guide/routeguide/route_guide.proto package routeguide import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Points are represented as latitude-longitude pairs in the E7 representation // (degrees multiplied by 10**7 and rounded to the nearest integer). // Latitudes should be in the range +/- 90 degrees and longitude should be in diff --git a/health/grpc_health_v1/health.pb.go b/health/grpc_health_v1/health.pb.go index a66024d23e30..b907eebf3663 100644 --- a/health/grpc_health_v1/health.pb.go +++ b/health/grpc_health_v1/health.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HealthCheckResponse_ServingStatus int32 const ( diff --git a/internal/proto/grpc_lookup_v1/rls.pb.go b/internal/proto/grpc_lookup_v1/rls.pb.go index 437dff2201c9..e0d1a641efa3 100644 --- a/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Possible reasons for making a request. type RouteLookupRequest_Reason int32 diff --git a/internal/proto/grpc_lookup_v1/rls_config.pb.go b/internal/proto/grpc_lookup_v1/rls_config.pb.go index d481f8acb004..f842e7efb93c 100644 --- a/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/lookup/v1/rls_config.proto package grpc_lookup_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Extract a key based on a given name (e.g. header name or query parameter // name). The name must match one of the names listed in the "name" field. If // the "required_match" field is true, one of the specified names must be diff --git a/interop/grpc_testing/benchmark_service.pb.go b/interop/grpc_testing/benchmark_service.pb.go index c528619fe6dd..86e320377d1c 100644 --- a/interop/grpc_testing/benchmark_service.pb.go +++ b/interop/grpc_testing/benchmark_service.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/testing/benchmark_service.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var File_grpc_testing_benchmark_service_proto protoreflect.FileDescriptor var file_grpc_testing_benchmark_service_proto_rawDesc = []byte{ diff --git a/interop/grpc_testing/control.pb.go b/interop/grpc_testing/control.pb.go index 74e2a8612468..88dd59a4117e 100644 --- a/interop/grpc_testing/control.pb.go +++ b/interop/grpc_testing/control.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/testing/control.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ClientType int32 const ( diff --git a/interop/grpc_testing/core/stats.pb.go b/interop/grpc_testing/core/stats.pb.go index d25d27b26fb1..e5b170a2d0bf 100644 --- a/interop/grpc_testing/core/stats.pb.go +++ b/interop/grpc_testing/core/stats.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/core/stats.proto -package grpc_core +package core import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type Bucket struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/interop/grpc_testing/empty.pb.go b/interop/grpc_testing/empty.pb.go index 24ae8a4349f6..fa25aa424fc6 100644 --- a/interop/grpc_testing/empty.pb.go +++ b/interop/grpc_testing/empty.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/testing/empty.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // An empty message that you can re-use to avoid defining duplicated empty // messages in your project. A typical example is to use it as argument or the // return value of a service API. For instance: diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index 2ccd4d9ab388..7df5c638864a 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -16,14 +16,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/testing/messages.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The type of payload that should be returned. type PayloadType int32 diff --git a/interop/grpc_testing/payloads.pb.go b/interop/grpc_testing/payloads.pb.go index 834de17b91b2..e8719673b5c7 100644 --- a/interop/grpc_testing/payloads.pb.go +++ b/interop/grpc_testing/payloads.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/testing/payloads.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ByteBufferParams struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/interop/grpc_testing/report_qps_scenario_service.pb.go b/interop/grpc_testing/report_qps_scenario_service.pb.go index f007d47fcda3..a96426a16474 100644 --- a/interop/grpc_testing/report_qps_scenario_service.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/testing/report_qps_scenario_service.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var File_grpc_testing_report_qps_scenario_service_proto protoreflect.FileDescriptor var file_grpc_testing_report_qps_scenario_service_proto_rawDesc = []byte{ diff --git a/interop/grpc_testing/stats.pb.go b/interop/grpc_testing/stats.pb.go index ace41211c00f..a414c71e96ae 100644 --- a/interop/grpc_testing/stats.pb.go +++ b/interop/grpc_testing/stats.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/testing/stats.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" core "google.golang.org/grpc/interop/grpc_testing/core" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ServerStats struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/interop/grpc_testing/test.pb.go b/interop/grpc_testing/test.pb.go index 742935c97aee..b29395f901fc 100644 --- a/interop/grpc_testing/test.pb.go +++ b/interop/grpc_testing/test.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/testing/test.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var File_grpc_testing_test_proto protoreflect.FileDescriptor var file_grpc_testing_test_proto_rawDesc = []byte{ diff --git a/interop/grpc_testing/worker_service.pb.go b/interop/grpc_testing/worker_service.pb.go index 12b2f13e100d..09d290f343c6 100644 --- a/interop/grpc_testing/worker_service.pb.go +++ b/interop/grpc_testing/worker_service.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: grpc/testing/worker_service.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var File_grpc_testing_worker_service_proto protoreflect.FileDescriptor var file_grpc_testing_worker_service_proto_rawDesc = []byte{ diff --git a/profiling/proto/service.pb.go b/profiling/proto/service.pb.go index 22bc4bc47f48..0bc942db4999 100644 --- a/profiling/proto/service.pb.go +++ b/profiling/proto/service.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: profiling/proto/service.proto package proto import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // EnableRequest defines the fields in a /Profiling/Enable method request to // toggle profiling on and off within a gRPC program. type EnableRequest struct { diff --git a/reflection/grpc_reflection_v1alpha/reflection.pb.go b/reflection/grpc_reflection_v1alpha/reflection.pb.go index c22f9a52db4e..7f9ef9f6b6fa 100644 --- a/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -16,14 +16,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: reflection/grpc_reflection_v1alpha/reflection.proto package grpc_reflection_v1alpha import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The message sent by the client when calling ServerReflectionInfo method. type ServerReflectionRequest struct { state protoimpl.MessageState diff --git a/reflection/grpc_testing/proto2.pb.go b/reflection/grpc_testing/proto2.pb.go index 9a8f643adb17..39a211e57555 100644 --- a/reflection/grpc_testing/proto2.pb.go +++ b/reflection/grpc_testing/proto2.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: reflection/grpc_testing/proto2.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ToBeExtended struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/reflection/grpc_testing/proto2_ext.pb.go b/reflection/grpc_testing/proto2_ext.pb.go index 4fe2b2a17d86..8b5524e54594 100644 --- a/reflection/grpc_testing/proto2_ext.pb.go +++ b/reflection/grpc_testing/proto2_ext.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: reflection/grpc_testing/proto2_ext.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type Extension struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/reflection/grpc_testing/proto2_ext2.pb.go b/reflection/grpc_testing/proto2_ext2.pb.go index e84c44f22c96..3d4b069dfa3c 100644 --- a/reflection/grpc_testing/proto2_ext2.pb.go +++ b/reflection/grpc_testing/proto2_ext2.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: reflection/grpc_testing/proto2_ext2.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type AnotherExtension struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/reflection/grpc_testing/test.pb.go b/reflection/grpc_testing/test.pb.go index 6740b629d767..398c5dd7bc0b 100644 --- a/reflection/grpc_testing/test.pb.go +++ b/reflection/grpc_testing/test.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: reflection/grpc_testing/test.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type SearchResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/regenerate.sh b/regenerate.sh index 99db79fafcfb..91ad1cedbca2 100755 --- a/regenerate.sh +++ b/regenerate.sh @@ -119,8 +119,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/stress/grpc_testing/metrics.pb.go b/stress/grpc_testing/metrics.pb.go index f9d359bf1041..b2002e54706e 100644 --- a/stress/grpc_testing/metrics.pb.go +++ b/stress/grpc_testing/metrics.pb.go @@ -21,14 +21,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: stress/grpc_testing/metrics.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -42,10 +41,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Response message containing the gauge name and value type GaugeResponse struct { state protoimpl.MessageState diff --git a/test/codec_perf/perf.pb.go b/test/codec_perf/perf.pb.go index d09ce726b63f..f7a8a9c647b2 100644 --- a/test/codec_perf/perf.pb.go +++ b/test/codec_perf/perf.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: test/codec_perf/perf.proto package codec_perf import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Buffer is a message that contains a body of bytes that is used to exercise // encoding and decoding overheads. type Buffer struct { diff --git a/test/grpc_testing/test.pb.go b/test/grpc_testing/test.pb.go index 89ebb3420f24..10231bb5c975 100644 --- a/test/grpc_testing/test.pb.go +++ b/test/grpc_testing/test.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.26.0 // protoc v3.14.0 // source: test/grpc_testing/test.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The type of payload that should be returned. type PayloadType int32 diff --git a/test/tools/go.mod b/test/tools/go.mod index 9c964971413e..2aec0151b3b8 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -3,10 +3,10 @@ module google.golang.org/grpc/test/tools go 1.14 require ( + github.com/BurntSushi/toml v0.3.1 // indirect github.com/client9/misspell v0.3.4 - github.com/golang/protobuf v1.4.1 - golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 - golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 - google.golang.org/protobuf v1.25.0 // indirect + github.com/golang/protobuf v1.5.2 + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 + golang.org/x/tools v0.3.0 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc ) diff --git a/test/tools/go.sum b/test/tools/go.sum index 09acda10d25c..d5ce24608159 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -1,71 +1,58 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 3011eaf70e31cfde5c5a8e63248f460ea2bbcf95 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 18 Nov 2022 13:51:43 -0800 Subject: [PATCH 675/998] test/tools: update staticcheck version to latest (#5806) --- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 +- binarylog/grpc_binarylog_v1/binarylog.pb.go | 2 +- channelz/grpc_channelz_v1/channelz.pb.go | 2 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 2 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 2 +- .../grpc_gcp/transport_security_common.pb.go | 2 +- examples/features/proto/echo/echo.pb.go | 2 +- .../helloworld/helloworld/helloworld.pb.go | 2 +- .../route_guide/routeguide/route_guide.pb.go | 2 +- health/grpc_health_v1/health.pb.go | 2 +- internal/proto/grpc_lookup_v1/rls.pb.go | 2 +- .../proto/grpc_lookup_v1/rls_config.pb.go | 2 +- internal/transport/transport_test.go | 2 +- interop/grpc_testing/benchmark_service.pb.go | 2 +- interop/grpc_testing/control.pb.go | 2 +- interop/grpc_testing/core/stats.pb.go | 2 +- interop/grpc_testing/empty.pb.go | 2 +- interop/grpc_testing/messages.pb.go | 2 +- interop/grpc_testing/payloads.pb.go | 2 +- .../report_qps_scenario_service.pb.go | 2 +- interop/grpc_testing/stats.pb.go | 2 +- interop/grpc_testing/test.pb.go | 2 +- interop/grpc_testing/worker_service.pb.go | 2 +- profiling/proto/service.pb.go | 2 +- .../grpc_reflection_v1alpha/reflection.pb.go | 2 +- reflection/grpc_testing/proto2.pb.go | 12 +--------- reflection/grpc_testing/proto2_ext.pb.go | 2 +- reflection/grpc_testing/proto2_ext2.pb.go | 2 +- reflection/grpc_testing/test.pb.go | 2 +- stress/grpc_testing/metrics.pb.go | 2 +- test/codec_perf/perf.pb.go | 2 +- test/grpc_testing/test.pb.go | 2 +- test/tools/go.mod | 6 +++-- test/tools/go.sum | 22 ++++++++++++++----- vet.sh | 3 ++- 35 files changed, 55 insertions(+), 50 deletions(-) diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 428672be2b2e..1205aff23f79 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/binarylog/grpc_binarylog_v1/binarylog.pb.go b/binarylog/grpc_binarylog_v1/binarylog.pb.go index dcd0b2609ac9..66d141fce707 100644 --- a/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto diff --git a/channelz/grpc_channelz_v1/channelz.pb.go b/channelz/grpc_channelz_v1/channelz.pb.go index 6dd71f9ebdce..0bf728630e6b 100644 --- a/channelz/grpc_channelz_v1/channelz.pb.go +++ b/channelz/grpc_channelz_v1/channelz.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/channelz/v1/channelz.proto diff --git a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index e4d5610efb45..1a40e17e8d3f 100644 --- a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/altscontext.proto diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index d3c56877362e..50eefa53830e 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/handshaker.proto diff --git a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 0fecf5c0bae4..b07412f18585 100644 --- a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/transport_security_common.proto diff --git a/examples/features/proto/echo/echo.pb.go b/examples/features/proto/echo/echo.pb.go index 00156e5f09b3..f30af166bd1c 100644 --- a/examples/features/proto/echo/echo.pb.go +++ b/examples/features/proto/echo/echo.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: examples/features/proto/echo/echo.proto diff --git a/examples/helloworld/helloworld/helloworld.pb.go b/examples/helloworld/helloworld/helloworld.pb.go index b2c31770a428..d75336b204e2 100644 --- a/examples/helloworld/helloworld/helloworld.pb.go +++ b/examples/helloworld/helloworld/helloworld.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: examples/helloworld/helloworld/helloworld.proto diff --git a/examples/route_guide/routeguide/route_guide.pb.go b/examples/route_guide/routeguide/route_guide.pb.go index 9229f7e66041..9c42c50b004e 100644 --- a/examples/route_guide/routeguide/route_guide.pb.go +++ b/examples/route_guide/routeguide/route_guide.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: examples/route_guide/routeguide/route_guide.proto diff --git a/health/grpc_health_v1/health.pb.go b/health/grpc_health_v1/health.pb.go index b907eebf3663..8e29a62f164f 100644 --- a/health/grpc_health_v1/health.pb.go +++ b/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/health/v1/health.proto diff --git a/internal/proto/grpc_lookup_v1/rls.pb.go b/internal/proto/grpc_lookup_v1/rls.pb.go index e0d1a641efa3..21b6429d6521 100644 --- a/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/lookup/v1/rls.proto diff --git a/internal/proto/grpc_lookup_v1/rls_config.pb.go b/internal/proto/grpc_lookup_v1/rls_config.pb.go index f842e7efb93c..05a307092a80 100644 --- a/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/lookup/v1/rls_config.proto diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index d494ed514b0d..45257ba0c579 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -1292,7 +1292,7 @@ func (s) TestClientHonorsConnectContext(t *testing.T) { t.Fatalf("NewClientTransport() returned successfully; wanted error") } t.Logf("NewClientTransport() = _, %v", err) - if time.Now().Sub(timeBefore) > 3*time.Second { + if time.Since(timeBefore) > 3*time.Second { t.Fatalf("NewClientTransport returned > 2.9s after context cancelation") } diff --git a/interop/grpc_testing/benchmark_service.pb.go b/interop/grpc_testing/benchmark_service.pb.go index 86e320377d1c..3df10459c572 100644 --- a/interop/grpc_testing/benchmark_service.pb.go +++ b/interop/grpc_testing/benchmark_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/testing/benchmark_service.proto diff --git a/interop/grpc_testing/control.pb.go b/interop/grpc_testing/control.pb.go index 88dd59a4117e..5bd5aaffb5b5 100644 --- a/interop/grpc_testing/control.pb.go +++ b/interop/grpc_testing/control.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/testing/control.proto diff --git a/interop/grpc_testing/core/stats.pb.go b/interop/grpc_testing/core/stats.pb.go index e5b170a2d0bf..d2649da373d7 100644 --- a/interop/grpc_testing/core/stats.pb.go +++ b/interop/grpc_testing/core/stats.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/core/stats.proto diff --git a/interop/grpc_testing/empty.pb.go b/interop/grpc_testing/empty.pb.go index fa25aa424fc6..d7671accfffd 100644 --- a/interop/grpc_testing/empty.pb.go +++ b/interop/grpc_testing/empty.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/testing/empty.proto diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index 7df5c638864a..3e09a8aa55c9 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/testing/messages.proto diff --git a/interop/grpc_testing/payloads.pb.go b/interop/grpc_testing/payloads.pb.go index e8719673b5c7..2b8388c7e7ed 100644 --- a/interop/grpc_testing/payloads.pb.go +++ b/interop/grpc_testing/payloads.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/testing/payloads.proto diff --git a/interop/grpc_testing/report_qps_scenario_service.pb.go b/interop/grpc_testing/report_qps_scenario_service.pb.go index a96426a16474..118dfd720ecc 100644 --- a/interop/grpc_testing/report_qps_scenario_service.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/testing/report_qps_scenario_service.proto diff --git a/interop/grpc_testing/stats.pb.go b/interop/grpc_testing/stats.pb.go index a414c71e96ae..8ae929219109 100644 --- a/interop/grpc_testing/stats.pb.go +++ b/interop/grpc_testing/stats.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/testing/stats.proto diff --git a/interop/grpc_testing/test.pb.go b/interop/grpc_testing/test.pb.go index b29395f901fc..266bed6f64b4 100644 --- a/interop/grpc_testing/test.pb.go +++ b/interop/grpc_testing/test.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/testing/test.proto diff --git a/interop/grpc_testing/worker_service.pb.go b/interop/grpc_testing/worker_service.pb.go index 09d290f343c6..bc045a3a0577 100644 --- a/interop/grpc_testing/worker_service.pb.go +++ b/interop/grpc_testing/worker_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/testing/worker_service.proto diff --git a/profiling/proto/service.pb.go b/profiling/proto/service.pb.go index 0bc942db4999..fc2d51bed131 100644 --- a/profiling/proto/service.pb.go +++ b/profiling/proto/service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: profiling/proto/service.proto diff --git a/reflection/grpc_reflection_v1alpha/reflection.pb.go b/reflection/grpc_reflection_v1alpha/reflection.pb.go index 7f9ef9f6b6fa..606462e8e78a 100644 --- a/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: reflection/grpc_reflection_v1alpha/reflection.proto diff --git a/reflection/grpc_testing/proto2.pb.go b/reflection/grpc_testing/proto2.pb.go index 39a211e57555..c475c197be98 100644 --- a/reflection/grpc_testing/proto2.pb.go +++ b/reflection/grpc_testing/proto2.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: reflection/grpc_testing/proto2.proto @@ -22,7 +22,6 @@ package grpc_testing import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" @@ -76,15 +75,6 @@ func (*ToBeExtended) Descriptor() ([]byte, []int) { return file_reflection_grpc_testing_proto2_proto_rawDescGZIP(), []int{0} } -var extRange_ToBeExtended = []protoiface.ExtensionRangeV1{ - {Start: 10, End: 30}, -} - -// Deprecated: Use ToBeExtended.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ToBeExtended) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ToBeExtended -} - func (x *ToBeExtended) GetFoo() int32 { if x != nil && x.Foo != nil { return *x.Foo diff --git a/reflection/grpc_testing/proto2_ext.pb.go b/reflection/grpc_testing/proto2_ext.pb.go index 8b5524e54594..bd33e953bbfc 100644 --- a/reflection/grpc_testing/proto2_ext.pb.go +++ b/reflection/grpc_testing/proto2_ext.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: reflection/grpc_testing/proto2_ext.proto diff --git a/reflection/grpc_testing/proto2_ext2.pb.go b/reflection/grpc_testing/proto2_ext2.pb.go index 3d4b069dfa3c..c54e5f76ecbf 100644 --- a/reflection/grpc_testing/proto2_ext2.pb.go +++ b/reflection/grpc_testing/proto2_ext2.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: reflection/grpc_testing/proto2_ext2.proto diff --git a/reflection/grpc_testing/test.pb.go b/reflection/grpc_testing/test.pb.go index 398c5dd7bc0b..09408daf9410 100644 --- a/reflection/grpc_testing/test.pb.go +++ b/reflection/grpc_testing/test.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: reflection/grpc_testing/test.proto diff --git a/stress/grpc_testing/metrics.pb.go b/stress/grpc_testing/metrics.pb.go index b2002e54706e..fdcaf94fbb70 100644 --- a/stress/grpc_testing/metrics.pb.go +++ b/stress/grpc_testing/metrics.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: stress/grpc_testing/metrics.proto diff --git a/test/codec_perf/perf.pb.go b/test/codec_perf/perf.pb.go index f7a8a9c647b2..7ef3b7a58242 100644 --- a/test/codec_perf/perf.pb.go +++ b/test/codec_perf/perf.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: test/codec_perf/perf.proto diff --git a/test/grpc_testing/test.pb.go b/test/grpc_testing/test.pb.go index 10231bb5c975..3ee33a8e74c3 100644 --- a/test/grpc_testing/test.pb.go +++ b/test/grpc_testing/test.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: test/grpc_testing/test.proto diff --git a/test/tools/go.mod b/test/tools/go.mod index 2aec0151b3b8..f0be567a7d4a 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -3,10 +3,12 @@ module google.golang.org/grpc/test/tools go 1.14 require ( - github.com/BurntSushi/toml v0.3.1 // indirect + github.com/BurntSushi/toml v1.2.1 // indirect github.com/client9/misspell v0.3.4 github.com/golang/protobuf v1.5.2 + golang.org/x/exp/typeparams v0.0.0-20221114191408-850992195362 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/tools v0.3.0 - honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc + google.golang.org/protobuf v1.28.1 // indirect + honnef.co/go/tools v0.3.3 ) diff --git a/test/tools/go.sum b/test/tools/go.sum index d5ce24608159..2a7e36966de0 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -1,5 +1,6 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= @@ -7,10 +8,14 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20221114191408-850992195362 h1:rUI77tLrgYyDn6S179yfuWoysXhXQTHvvlRfOOS9ffw= +golang.org/x/exp/typeparams v0.0.0-20221114191408-850992195362/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -20,16 +25,20 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= @@ -39,11 +48,13 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= @@ -52,7 +63,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= +honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= diff --git a/vet.sh b/vet.sh index bd8e0cdb33c8..1d03c0914810 100755 --- a/vet.sh +++ b/vet.sh @@ -121,8 +121,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. From 0abb6f9b69710f9a98ef04b3c4b6308eadce4e56 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 21 Nov 2022 12:42:50 -0800 Subject: [PATCH 676/998] xdsclient: resource type agnostic WatchResource() API (#5777) --- xds/internal/xdsclient/client.go | 14 ++ xds/internal/xdsclient/client_new.go | 5 + xds/internal/xdsclient/clientimpl.go | 25 ++- xds/internal/xdsclient/clientimpl_watchers.go | 68 +++++++ .../xdsresource/cluster_resource_type.go | 149 ++++++++++++++ .../xdsresource/endpoints_resource_type.go | 144 ++++++++++++++ .../xdsresource/listener_resource_type.go | 181 ++++++++++++++++++ .../xdsclient/xdsresource/resource_type.go | 158 +++++++++++++++ .../xdsresource/route_config_resource_type.go | 145 ++++++++++++++ 9 files changed, 875 insertions(+), 14 deletions(-) create mode 100644 xds/internal/xdsclient/xdsresource/cluster_resource_type.go create mode 100644 xds/internal/xdsclient/xdsresource/endpoints_resource_type.go create mode 100644 xds/internal/xdsclient/xdsresource/listener_resource_type.go create mode 100644 xds/internal/xdsclient/xdsresource/resource_type.go create mode 100644 xds/internal/xdsclient/xdsresource/route_config_resource_type.go diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 8ae7301fb7e3..976b86fe34d7 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -36,6 +36,20 @@ type XDSClient interface { WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) + // WatchResource uses xDS to discover the resource associated with the + // provided resource name. The resource type implementation determines how + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. + // + // Most callers will not have a need to use this API directly. They will + // instead use a resource-type-specific wrapper API provided by the relevant + // resource type implementation. + // + // TODO: Once this generic client API is fully implemented and integrated, + // delete the resource type specific watch APIs on this interface. + WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) + DumpLDS() map[string]xdsresource.UpdateWithMD DumpRDS() map[string]xdsresource.UpdateWithMD DumpCDS() map[string]xdsresource.UpdateWithMD diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go index 792c17d7e1fa..4b2b8cfd39eb 100644 --- a/xds/internal/xdsclient/client_new.go +++ b/xds/internal/xdsclient/client_new.go @@ -20,6 +20,7 @@ package xdsclient import ( "bytes" + "context" "encoding/json" "fmt" "sync" @@ -55,10 +56,14 @@ func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { // newWithConfig returns a new xdsClient with the given config. func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { + ctx, cancel := context.WithCancel(context.Background()) c := &clientImpl{ done: grpcsync.NewEvent(), config: config, watchExpiryTimeout: watchExpiryTimeout, + serializer: newCallbackSerializer(ctx), + serializerClose: cancel, + resourceTypes: newResourceTypeRegistry(), authorities: make(map[string]*authority), idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), } diff --git a/xds/internal/xdsclient/clientimpl.go b/xds/internal/xdsclient/clientimpl.go index 800ae91fa311..e9224cb1a31b 100644 --- a/xds/internal/xdsclient/clientimpl.go +++ b/xds/internal/xdsclient/clientimpl.go @@ -32,14 +32,14 @@ var _ XDSClient = &clientImpl{} // clientImpl is the real implementation of the xds client. The exported Client // is a wrapper of this struct with a ref count. -// -// Implements UpdateHandler interface. -// TODO(easwars): Make a wrapper struct which implements this interface in the -// style of ccBalancerWrapper so that the Client type does not implement these -// exported methods. type clientImpl struct { - done *grpcsync.Event - config *bootstrap.Config + done *grpcsync.Event + config *bootstrap.Config + logger *grpclog.PrefixLogger + watchExpiryTimeout time.Duration + serializer *callbackSerializer + serializerClose func() + resourceTypes *resourceTypeRegistry // authorityMu protects the authority fields. It's necessary because an // authority is created when it's used. @@ -60,9 +60,6 @@ type clientImpl struct { // An authority is either in authorities, or idleAuthorities, // never both. idleAuthorities *cache.TimeoutCache - - logger *grpclog.PrefixLogger - watchExpiryTimeout time.Duration } // BootstrapConfig returns the configuration read from the bootstrap file. @@ -72,6 +69,9 @@ func (c *clientImpl) BootstrapConfig() *bootstrap.Config { } // Close closes the gRPC connection to the management server. +// +// TODO: ensure that all underlying transports are closed before this function +// returns. func (c *clientImpl) Close() { if c.done.HasFired() { return @@ -80,16 +80,13 @@ func (c *clientImpl) Close() { // TODO: Should we invoke the registered callbacks here with an error that // the client is closed? - // Note that Close needs to check for nils even if some of them are always - // set in the constructor. This is because the constructor defers Close() in - // error cases, and the fields might not be set when the error happens. - c.authorityMu.Lock() for _, a := range c.authorities { a.close() } c.idleAuthorities.Clear(true) c.authorityMu.Unlock() + c.serializerClose() c.logger.Infof("Shutdown") } diff --git a/xds/internal/xdsclient/clientimpl_watchers.go b/xds/internal/xdsclient/clientimpl_watchers.go index 0c9f87aa7dc0..7095a8394487 100644 --- a/xds/internal/xdsclient/clientimpl_watchers.go +++ b/xds/internal/xdsclient/clientimpl_watchers.go @@ -18,6 +18,10 @@ package xdsclient import ( + "context" + "fmt" + "sync" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -103,3 +107,67 @@ func (c *clientImpl) WatchEndpoints(clusterName string, cb func(xdsresource.Endp unref() } } + +// WatchResource uses xDS to discover the resource associated with the provided +// resource name. The resource type implementation determines how xDS requests +// are sent out and how responses are deserialized and validated. Upon receipt +// of a response from the management server, an appropriate callback on the +// watcher is invoked. +func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) { + // Return early if the client is already closed. + // + // The client returned from the top-level API is a ref-counted client which + // contains a pointer to `clientImpl`. When all references are released, the + // ref-counted client sets its pointer to `nil`. And if any watch APIs are + // made on such a closed client, we will get here with a `nil` receiver. + if c == nil || c.done.HasFired() { + logger.Warningf("Watch registered for name %q of type %q, but client is closed", rType.TypeEnum().String(), resourceName) + return func() {} + } + + if err := c.resourceTypes.maybeRegister(rType); err != nil { + c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + return func() {} + } + + // TODO: replace this with the code does the following when we have + // implemented generic watch API on the authority: + // - Parse the resource name and extract the authority. + // - Locate the corresponding authority object and acquire a reference to + // it. If the authority is not found, error out. + // - Call the watchResource() method on the authority. + // - Return a cancel function to cancel the watch on the authority and to + // release the reference. + return func() {} +} + +// A registry of xdsresource.Type implementations indexed by their corresponding +// type URLs. Registration of an xdsresource.Type happens the first time a watch +// for a resource of that type is invoked. +type resourceTypeRegistry struct { + mu sync.Mutex + types map[string]xdsresource.Type +} + +func newResourceTypeRegistry() *resourceTypeRegistry { + return &resourceTypeRegistry{types: make(map[string]xdsresource.Type)} +} + +func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { + r.mu.Lock() + defer r.mu.Unlock() + + urls := []string{rType.V2TypeURL(), rType.V3TypeURL()} + for _, u := range urls { + if u == "" { + // Silently ignore unsupported versions of the resource. + continue + } + typ, ok := r.types[u] + if ok && typ != rType { + return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeEnum()) + } + r.types[u] = rType + } + return nil +} diff --git a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go new file mode 100644 index 000000000000..a2529793392b --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -0,0 +1,149 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +var ( + // Compile time interface checks. + _ Type = clusterResourceType{} + _ ResourceData = &ClusterResourceData{} + + // Singleton instantiation of the resource type implementation. + clusterType = clusterResourceType{ + resourceTypeState: resourceTypeState{ + v2TypeURL: "type.googleapis.com/envoy.api.v2.Cluster", + v3TypeURL: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + typeEnum: ClusterResource, + allResourcesRequiredInSotW: true, + }, + } +) + +// clusterResourceType provides the resource-type specific functionality for a +// Cluster resource. +// +// Implements the Type interface. +type clusterResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (clusterResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, cluster, err := unmarshalClusterResource(resource, nil, opts.Logger) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: ClusterUpdate{}}}, err + } + + // Perform extra validation here. + if err := securityConfigValidator(opts.BootstrapConfig, cluster.SecurityCfg); err != nil { + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: ClusterUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: cluster}}, nil + +} + +// ClusterResourceData wraps the configuration of a Cluster resource as received +// from the management server. +// +// Implements the ResourceData interface. +type ClusterResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource ClusterUpdate +} + +// Equal returns true if other is equal to r. +func (c *ClusterResourceData) Equal(other ResourceData) bool { + if c == nil && other == nil { + return true + } + if (c == nil) != (other == nil) { + return false + } + return proto.Equal(c.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (c *ClusterResourceData) ToJSON() string { + return pretty.ToJSON(c.Resource) +} + +// Raw returns the underlying raw protobuf form of the cluster resource. +func (c *ClusterResourceData) Raw() *anypb.Any { + return c.Resource.Raw +} + +// ClusterWatcher wraps the callbacks to be invoked for different events +// corresponding to the cluster resource being watched. +type ClusterWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*ClusterResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +type delegatingClusterWatcher struct { + watcher ClusterWatcher +} + +func (d *delegatingClusterWatcher) OnUpdate(data ResourceData) { + c := data.(*ClusterResourceData) + d.watcher.OnUpdate(c) +} + +func (d *delegatingClusterWatcher) OnError(err error) { + d.watcher.OnError(err) +} + +func (d *delegatingClusterWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() +} + +// WatchCluster uses xDS to discover the configuration associated with the +// provided cluster resource name. +func WatchCluster(p Producer, name string, w ClusterWatcher) (cancel func()) { + delegator := &delegatingClusterWatcher{watcher: w} + return p.WatchResource(clusterType, name, delegator) +} diff --git a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go new file mode 100644 index 000000000000..2ba7e494aeca --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +var ( + // Compile time interface checks. + _ Type = endpointsResourceType{} + _ ResourceData = &EndpointsResourceData{} + + // Singleton instantiation of the resource type implementation. + endpointsType = endpointsResourceType{ + resourceTypeState: resourceTypeState{ + v2TypeURL: "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment", + v3TypeURL: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + typeEnum: EndpointsResource, + allResourcesRequiredInSotW: false, + }, + } +) + +// endpointsResourceType provides the resource-type specific functionality for a +// ClusterLoadAssignment (or Endpoints) resource. +// +// Implements the Type interface. +type endpointsResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (endpointsResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, rc, err := unmarshalEndpointsResource(resource, opts.Logger) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &EndpointsResourceData{Resource: EndpointsUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &EndpointsResourceData{Resource: rc}}, nil + +} + +// EndpointsResourceData wraps the configuration of an Endpoints resource as +// received from the management server. +// +// Implements the ResourceData interface. +type EndpointsResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource EndpointsUpdate +} + +// Equal returns true if other is equal to r. +func (e *EndpointsResourceData) Equal(other ResourceData) bool { + if e == nil && other == nil { + return true + } + if (e == nil) != (other == nil) { + return false + } + return proto.Equal(e.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (e *EndpointsResourceData) ToJSON() string { + return pretty.ToJSON(e.Resource) +} + +// Raw returns the underlying raw protobuf form of the listener resource. +func (e *EndpointsResourceData) Raw() *anypb.Any { + return e.Resource.Raw +} + +// EndpointsWatcher wraps the callbacks to be invoked for different +// events corresponding to the endpoints resource being watched. +type EndpointsWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*EndpointsResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +type delegatingEndpointsWatcher struct { + watcher EndpointsWatcher +} + +func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData) { + e := data.(*EndpointsResourceData) + d.watcher.OnUpdate(e) +} + +func (d *delegatingEndpointsWatcher) OnError(err error) { + d.watcher.OnError(err) +} + +func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() +} + +// WatchEndpoints uses xDS to discover the configuration associated with the +// provided endpoints resource name. +func WatchEndpoints(p Producer, name string, w EndpointsWatcher) (cancel func()) { + delegator := &delegatingEndpointsWatcher{watcher: w} + return p.WatchResource(endpointsType, name, delegator) +} diff --git a/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/xds/internal/xdsclient/xdsresource/listener_resource_type.go new file mode 100644 index 000000000000..442389f1cc18 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -0,0 +1,181 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +var ( + // Compile time interface checks. + _ Type = listenerResourceType{} + _ ResourceData = &ListenerResourceData{} + + // Singleton instantiation of the resource type implementation. + listenerType = listenerResourceType{ + resourceTypeState: resourceTypeState{ + v2TypeURL: "type.googleapis.com/envoy.api.v2.Listener", + v3TypeURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + typeEnum: ListenerResource, + allResourcesRequiredInSotW: true, + }, + } +) + +// listenerResourceType provides the resource-type specific functionality for a +// Listener resource. +// +// Implements the Type interface. +type listenerResourceType struct { + resourceTypeState +} + +func securityConfigValidator(bc *bootstrap.Config, sc *SecurityConfig) error { + if sc == nil { + return nil + } + if sc.IdentityInstanceName != "" { + if _, ok := bc.CertProviderConfigs[sc.IdentityInstanceName]; !ok { + return fmt.Errorf("identitiy certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) + } + } + if sc.RootInstanceName != "" { + if _, ok := bc.CertProviderConfigs[sc.RootInstanceName]; !ok { + return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) + } + } + return nil +} + +func listenerValidator(bc *bootstrap.Config, lis ListenerUpdate) error { + if lis.InboundListenerCfg == nil || lis.InboundListenerCfg.FilterChains == nil { + return nil + } + return lis.InboundListenerCfg.FilterChains.Validate(func(fc *FilterChain) error { + if fc == nil { + return nil + } + return securityConfigValidator(bc, fc.SecurityCfg) + }) +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (listenerResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, listener, err := unmarshalListenerResource(resource, nil, opts.Logger) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: ListenerUpdate{}}}, err + } + + // Perform extra validation here. + if err := listenerValidator(opts.BootstrapConfig, listener); err != nil { + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: ListenerUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: listener}}, nil + +} + +// ListenerResourceData wraps the configuration of a Listener resource as +// received from the management server. +// +// Implements the ResourceData interface. +type ListenerResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource ListenerUpdate +} + +// Equal returns true if other is equal to l. +func (l *ListenerResourceData) Equal(other ResourceData) bool { + if l == nil && other == nil { + return true + } + if (l == nil) != (other == nil) { + return false + } + return proto.Equal(l.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (l *ListenerResourceData) ToJSON() string { + return pretty.ToJSON(l.Resource) +} + +// Raw returns the underlying raw protobuf form of the listener resource. +func (l *ListenerResourceData) Raw() *anypb.Any { + return l.Resource.Raw +} + +// ListenerWatcher wraps the callbacks to be invoked for different +// events corresponding to the listener resource being watched. +type ListenerWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*ListenerResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +type delegatingListenerWatcher struct { + watcher ListenerWatcher +} + +func (d *delegatingListenerWatcher) OnUpdate(data ResourceData) { + l := data.(*ListenerResourceData) + d.watcher.OnUpdate(l) +} + +func (d *delegatingListenerWatcher) OnError(err error) { + d.watcher.OnError(err) +} + +func (d *delegatingListenerWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() +} + +// WatchListener uses xDS to discover the configuration associated with the +// provided listener resource name. +func WatchListener(p Producer, name string, w ListenerWatcher) (cancel func()) { + delegator := &delegatingListenerWatcher{watcher: w} + return p.WatchResource(listenerType, name, delegator) +} diff --git a/xds/internal/xdsclient/xdsresource/resource_type.go b/xds/internal/xdsclient/xdsresource/resource_type.go new file mode 100644 index 000000000000..6946c5647fc0 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/resource_type.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/protobuf/types/known/anypb" +) + +// Producer contains a single method to discover resource configuration from a +// remote management server using xDS APIs. +// +// The xdsclient package provides a concrete implementation of this interface. +type Producer interface { + // WatchResource uses xDS to discover the resource associated with the + // provided resource name. The resource type implementation determines how + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. + WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func()) +} + +// ResourceWatcher wraps the callbacks to be invoked for different events +// corresponding to the resource being watched. +type ResourceWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + // The ResourceData parameter needs to be type asserted to the appropriate + // type for the resource being watched. + OnUpdate(ResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +// TODO: Once the implementation is complete, rename this interface as +// ResourceType and get rid of the existing ResourceType enum. + +// Type wraps all resource-type specific functionality. Each supported resource +// type will provide an implementation of this interface. +type Type interface { + // V2TypeURL is the xDS type URL of this resource type for v2 transport. + V2TypeURL() string + + // V3TypeURL is the xDS type URL of this resource type for v3 transport. + V3TypeURL() string + + // TypeEnum is an enumerated value for this resource type. This can be used + // for logging/debugging purposes, as well in cases where the resource type + // is to be uniquely identified but the actual functionality provided by the + // resource type is not required. + // + // TODO: once Type is renamed to ResourceType, rename ResourceType to + // ResourceTypeEnum. + TypeEnum() ResourceType + + // AllResourcesRequiredInSotW indicates whether this resource type requires + // that all resources be present in every SotW response from the server. If + // true, a response that does not include a previously seen resource will be + // interpreted as a deletion of that resource. + AllResourcesRequiredInSotW() bool + + // Decode deserializes and validates an xDS resource serialized inside the + // provided `Any` proto, as received from the xDS management server. + // + // If protobuf deserialization fails or resource validation fails, + // returns a non-nil error. Otherwise, returns a fully populated + // DecodeResult. + Decode(*DecodeOptions, *anypb.Any) (*DecodeResult, error) +} + +// ResourceData contains the configuration data sent by the xDS management +// server, associated with the resource being watched. Every resource type must +// provide an implementation of this interface to represent the configuration +// received from the xDS management server. +type ResourceData interface { + isResourceData() + + // Equal returns true if the passed in resource data is equal to that of the + // receiver. + Equal(ResourceData) bool + + // ToJSON returns a JSON string representation of the resource data. + ToJSON() string + + Raw() *anypb.Any +} + +// DecodeOptions wraps the options required by ResourceType implementation for +// decoding configuration received from the xDS management server. +type DecodeOptions struct { + // BootstrapConfig contains the bootstrap configuration passed to the + // top-level xdsClient. This contains useful data for resource validation. + BootstrapConfig *bootstrap.Config + // Logger is to be used for emitting logs during the Decode operation. + Logger *grpclog.PrefixLogger +} + +// DecodeResult is the result of a decode operation. +type DecodeResult struct { + // Name is the name of the resource being watched. + Name string + // Resource contains the configuration associated with the resource being + // watched. + Resource ResourceData +} + +// resourceTypeState wraps the static state associated with concrete resource +// type implementations, which can then embed this struct and get the methods +// implemented here for free. +type resourceTypeState struct { + v2TypeURL string + v3TypeURL string + typeEnum ResourceType + allResourcesRequiredInSotW bool +} + +func (r resourceTypeState) V2TypeURL() string { + return r.v2TypeURL +} + +func (r resourceTypeState) V3TypeURL() string { + return r.v3TypeURL +} + +func (r resourceTypeState) TypeEnum() ResourceType { + return r.typeEnum +} + +func (r resourceTypeState) AllResourcesRequiredInSotW() bool { + return r.allResourcesRequiredInSotW +} diff --git a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go new file mode 100644 index 000000000000..9c7465fd7de0 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -0,0 +1,145 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +var ( + // Compile time interface checks. + _ Type = routeConfigResourceType{} + _ ResourceData = &RouteConfigResourceData{} + + // Singleton instantiation of the resource type implementation. + routeConfigType = routeConfigResourceType{ + resourceTypeState: resourceTypeState{ + v2TypeURL: "type.googleapis.com/envoy.api.v2.RouteConfiguration", + v3TypeURL: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + typeEnum: RouteConfigResource, + allResourcesRequiredInSotW: false, + }, + } +) + +// routeConfigResourceType provides the resource-type specific functionality for +// a RouteConfiguration resource. +// +// Implements the Type interface. +type routeConfigResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (routeConfigResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, rc, err := unmarshalRouteConfigResource(resource, opts.Logger) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &RouteConfigResourceData{Resource: RouteConfigUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &RouteConfigResourceData{Resource: rc}}, nil + +} + +// RouteConfigResourceData wraps the configuration of a RouteConfiguration +// resource as received from the management server. +// +// Implements the ResourceData interface. +type RouteConfigResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource RouteConfigUpdate +} + +// Equal returns true if other is equal to r. +func (r *RouteConfigResourceData) Equal(other ResourceData) bool { + if r == nil && other == nil { + return true + } + if (r == nil) != (other == nil) { + return false + } + return proto.Equal(r.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (r *RouteConfigResourceData) ToJSON() string { + return pretty.ToJSON(r.Resource) +} + +// Raw returns the underlying raw protobuf form of the route configuration +// resource. +func (r *RouteConfigResourceData) Raw() *anypb.Any { + return r.Resource.Raw +} + +// RouteConfigWatcher wraps the callbacks to be invoked for different +// events corresponding to the route configuration resource being watched. +type RouteConfigWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*RouteConfigResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +type delegatingRouteConfigWatcher struct { + watcher RouteConfigWatcher +} + +func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData) { + rc := data.(*RouteConfigResourceData) + d.watcher.OnUpdate(rc) +} + +func (d *delegatingRouteConfigWatcher) OnError(err error) { + d.watcher.OnError(err) +} + +func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() +} + +// WatchRouteConfig uses xDS to discover the configuration associated with the +// provided route configuration resource name. +func WatchRouteConfig(p Producer, name string, w RouteConfigWatcher) (cancel func()) { + delegator := &delegatingRouteConfigWatcher{watcher: w} + return p.WatchResource(routeConfigType, name, delegator) +} From 6e43203eb47aaede137e6a4e5abed5e6a98fb2c4 Mon Sep 17 00:00:00 2001 From: Theodore Salvo Date: Mon, 21 Nov 2022 18:48:12 -0500 Subject: [PATCH 677/998] reflection: generate protobuf files from grpc-proto (#5799) --- .../grpc_reflection_v1/reflection.pb.go | 958 ++++++++++++++++++ .../grpc_reflection_v1/reflection_grpc.pb.go | 160 +++ .../grpc_reflection_v1alpha/reflection.pb.go | 353 +++---- .../grpc_reflection_v1alpha/reflection.proto | 138 --- .../reflection_grpc.pb.go | 10 +- regenerate.sh | 3 +- 6 files changed, 1305 insertions(+), 317 deletions(-) create mode 100644 reflection/grpc_reflection_v1/reflection.pb.go create mode 100644 reflection/grpc_reflection_v1/reflection_grpc.pb.go delete mode 100644 reflection/grpc_reflection_v1alpha/reflection.proto diff --git a/reflection/grpc_reflection_v1/reflection.pb.go b/reflection/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 000000000000..ae1ec9c589b9 --- /dev/null +++ b/reflection/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,958 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/reflection/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 000000000000..2382d205cf5e --- /dev/null +++ b/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,160 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff --git a/reflection/grpc_reflection_v1alpha/reflection.pb.go b/reflection/grpc_reflection_v1alpha/reflection.pb.go index 606462e8e78a..ee4b04caf0da 100644 --- a/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -1,4 +1,4 @@ -// Copyright 2016 gRPC authors. +// Copyright 2016 The gRPC Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - // Service exported by server reflection +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 // protoc v3.14.0 -// source: reflection/grpc_reflection_v1alpha/reflection.proto +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -60,7 +62,7 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -73,7 +75,7 @@ func (x *ServerReflectionRequest) String() string { func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -86,7 +88,7 @@ func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} } func (x *ServerReflectionRequest) GetHost() string { @@ -204,7 +206,7 @@ type ExtensionRequest struct { func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -217,7 +219,7 @@ func (x *ExtensionRequest) String() string { func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -230,7 +232,7 @@ func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} } func (x *ExtensionRequest) GetContainingType() string { @@ -255,8 +257,8 @@ type ServerReflectionResponse struct { ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` - // The server sets one of the following fields according to the - // message_request in the request. + // The server set one of the following fields according to the message_request + // in the request. // // Types that are assignable to MessageResponse: // @@ -270,7 +272,7 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -283,7 +285,7 @@ func (x *ServerReflectionResponse) String() string { func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -296,7 +298,7 @@ func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} } func (x *ServerReflectionResponse) GetValidHost() string { @@ -354,8 +356,8 @@ type isServerReflectionResponse_MessageResponse interface { type ServerReflectionResponse_FileDescriptorResponse struct { // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a // FileDescriptorResponse message to encapsulate the repeated fields. // The reflection service is allowed to avoid sending FileDescriptorProtos // that were previously sent in response to earlier requests in the stream. @@ -363,12 +365,12 @@ type ServerReflectionResponse_FileDescriptorResponse struct { } type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requests. + // This message is used to answer all_extension_numbers_of_type requst. AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` } type ServerReflectionResponse_ListServicesResponse struct { - // This message is used to answer list_services requests. + // This message is used to answer list_services request. ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` } @@ -404,7 +406,7 @@ type FileDescriptorResponse struct { func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -417,7 +419,7 @@ func (x *FileDescriptorResponse) String() string { func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -430,7 +432,7 @@ func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} } func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { @@ -456,7 +458,7 @@ type ExtensionNumberResponse struct { func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -469,7 +471,7 @@ func (x *ExtensionNumberResponse) String() string { func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -482,7 +484,7 @@ func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} } func (x *ExtensionNumberResponse) GetBaseTypeName() string { @@ -513,7 +515,7 @@ type ListServiceResponse struct { func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -526,7 +528,7 @@ func (x *ListServiceResponse) String() string { func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -539,7 +541,7 @@ func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} } func (x *ListServiceResponse) GetService() []*ServiceResponse { @@ -564,7 +566,7 @@ type ServiceResponse struct { func (x *ServiceResponse) Reset() { *x = ServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -577,7 +579,7 @@ func (x *ServiceResponse) String() string { func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -590,7 +592,7 @@ func (x *ServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} } func (x *ServiceResponse) GetName() string { @@ -614,7 +616,7 @@ type ErrorResponse struct { func (x *ErrorResponse) Reset() { *x = ErrorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -627,7 +629,7 @@ func (x *ErrorResponse) String() string { func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -640,7 +642,7 @@ func (x *ErrorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} } func (x *ErrorResponse) GetErrorCode() int32 { @@ -657,136 +659,139 @@ func (x *ErrorResponse) GetErrorMessage() string { return "" } -var File_reflection_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor - -var file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, - 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x22, 0xf8, - 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, - 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, - 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, - 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, - 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, - 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, - 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, - 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x5b, 0x0a, - 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6b, 0x0a, 0x18, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, +var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, + 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, - 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, - 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, - 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, - 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, - 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x93, 0x01, 0x0a, - 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc ) -func file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData) +func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) }) - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData + return file_grpc_reflection_v1alpha_reflection_proto_rawDescData } -var file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ +var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse @@ -796,7 +801,7 @@ var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interfa (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse } -var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ +var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse @@ -813,13 +818,13 @@ var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 0, // [0:7] is the sub-list for field type_name } -func init() { file_reflection_grpc_reflection_v1alpha_reflection_proto_init() } -func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { - if File_reflection_grpc_reflection_v1alpha_reflection_proto != nil { +func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } +func file_grpc_reflection_v1alpha_reflection_proto_init() { + if File_grpc_reflection_v1alpha_reflection_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -831,7 +836,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -843,7 +848,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -855,7 +860,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -867,7 +872,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -879,7 +884,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -891,7 +896,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -903,7 +908,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -916,14 +921,14 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { } } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), @@ -933,18 +938,18 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc, + RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, NumEnums: 0, NumMessages: 8, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes, - DependencyIndexes: file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs, - MessageInfos: file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes, + GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, }.Build() - File_reflection_grpc_reflection_v1alpha_reflection_proto = out.File - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil - file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = nil - file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil + File_grpc_reflection_v1alpha_reflection_proto = out.File + file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil } diff --git a/reflection/grpc_reflection_v1alpha/reflection.proto b/reflection/grpc_reflection_v1alpha/reflection.proto deleted file mode 100644 index ee2b82c0a5b3..000000000000 --- a/reflection/grpc_reflection_v1alpha/reflection.proto +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection - -syntax = "proto3"; - -option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; - -package grpc.reflection.v1alpha; - -service ServerReflection { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - rpc ServerReflectionInfo(stream ServerReflectionRequest) - returns (stream ServerReflectionResponse); -} - -// The message sent by the client when calling ServerReflectionInfo method. -message ServerReflectionRequest { - string host = 1; - // To use reflection service, the client should set one of the following - // fields in message_request. The server distinguishes requests by their - // defined field and then handles them using corresponding methods. - oneof message_request { - // Find a proto file by the file name. - string file_by_filename = 3; - - // Find the proto file that declares the given fully-qualified symbol name. - // This field should be a fully-qualified symbol name - // (e.g. .[.] or .). - string file_containing_symbol = 4; - - // Find the proto file which defines an extension extending the given - // message type with the given field number. - ExtensionRequest file_containing_extension = 5; - - // Finds the tag numbers used by all known extensions of extendee_type, and - // appends them to ExtensionNumberResponse in an undefined order. - // Its corresponding method is best-effort: it's not guaranteed that the - // reflection service will implement this method, and it's not guaranteed - // that this method will provide all extensions. Returns - // StatusCode::UNIMPLEMENTED if it's not implemented. - // This field should be a fully-qualified type name. The format is - // . - string all_extension_numbers_of_type = 6; - - // List the full names of registered services. The content will not be - // checked. - string list_services = 7; - } -} - -// The type name and extension number sent by the client when requesting -// file_containing_extension. -message ExtensionRequest { - // Fully-qualified type name. The format should be . - string containing_type = 1; - int32 extension_number = 2; -} - -// The message sent by the server to answer ServerReflectionInfo method. -message ServerReflectionResponse { - string valid_host = 1; - ServerReflectionRequest original_request = 2; - // The server sets one of the following fields according to the - // message_request in the request. - oneof message_response { - // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a - // FileDescriptorResponse message to encapsulate the repeated fields. - // The reflection service is allowed to avoid sending FileDescriptorProtos - // that were previously sent in response to earlier requests in the stream. - FileDescriptorResponse file_descriptor_response = 4; - - // This message is used to answer all_extension_numbers_of_type requests. - ExtensionNumberResponse all_extension_numbers_response = 5; - - // This message is used to answer list_services requests. - ListServiceResponse list_services_response = 6; - - // This message is used when an error occurs. - ErrorResponse error_response = 7; - } -} - -// Serialized FileDescriptorProto messages sent by the server answering -// a file_by_filename, file_containing_symbol, or file_containing_extension -// request. -message FileDescriptorResponse { - // Serialized FileDescriptorProto messages. We avoid taking a dependency on - // descriptor.proto, which uses proto2 only features, by making them opaque - // bytes instead. - repeated bytes file_descriptor_proto = 1; -} - -// A list of extension numbers sent by the server answering -// all_extension_numbers_of_type request. -message ExtensionNumberResponse { - // Full name of the base type, including the package name. The format - // is . - string base_type_name = 1; - repeated int32 extension_number = 2; -} - -// A list of ServiceResponse sent by the server answering list_services request. -message ListServiceResponse { - // The information of each service may be expanded in the future, so we use - // ServiceResponse message to encapsulate it. - repeated ServiceResponse service = 1; -} - -// The information of a single service used by ListServiceResponse to answer -// list_services request. -message ServiceResponse { - // Full name of a registered service, including its package name. The format - // is . - string name = 1; -} - -// The error code and error message sent by the server when an error occurs. -message ErrorResponse { - // This field uses the error codes defined in grpc::StatusCode. - int32 error_code = 1; - string error_message = 2; -} diff --git a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index b8e76a87dca5..ed54ab1378eb 100644 --- a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,4 +1,4 @@ -// Copyright 2016 gRPC authors. +// Copyright 2016 The gRPC Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - // Service exported by server reflection +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 -// source: reflection/grpc_reflection_v1alpha/reflection.proto +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -151,5 +153,5 @@ var ServerReflection_ServiceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", + Metadata: "grpc/reflection/v1alpha/reflection.proto", } diff --git a/regenerate.sh b/regenerate.sh index 91ad1cedbca2..a6f26c8ab0f0 100755 --- a/regenerate.sh +++ b/regenerate.sh @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols From 6f96f961f30f547b9a19ad4e5e2c5d8e39b20cd6 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 22 Nov 2022 08:58:26 -0800 Subject: [PATCH 678/998] reflection: update proto (#5809) --- reflection/grpc_reflection_v1/reflection.pb.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/reflection/grpc_reflection_v1/reflection.pb.go b/reflection/grpc_reflection_v1/reflection.pb.go index ae1ec9c589b9..4e7cbd5dcf55 100644 --- a/reflection/grpc_reflection_v1/reflection.pb.go +++ b/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,14 +21,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/reflection/v1/reflection.proto package grpc_reflection_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -42,10 +41,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The message sent by the client when calling ServerReflectionInfo method. type ServerReflectionRequest struct { state protoimpl.MessageState From e0a9f1112a1ec04c1daf9618184becf82af85917 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 22 Nov 2022 10:40:31 -0800 Subject: [PATCH 679/998] reflection: split grpc and pb imports (#5810) --- reflection/serverreflection.go | 66 ++++++++++---------- reflection/serverreflection_test.go | 94 +++++++++++++++-------------- 2 files changed, 82 insertions(+), 78 deletions(-) diff --git a/reflection/serverreflection.go b/reflection/serverreflection.go index 0b41783aa533..e2f9ebfbbce8 100644 --- a/reflection/serverreflection.go +++ b/reflection/serverreflection.go @@ -42,12 +42,14 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" - rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" + + v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -63,7 +65,7 @@ var _ GRPCServer = (*grpc.Server)(nil) // Register registers the server reflection service on the given gRPC server. func Register(s GRPCServer) { svr := NewServer(ServerOptions{Services: s}) - rpb.RegisterServerReflectionServer(s, svr) + v1alphagrpc.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -124,7 +126,7 @@ type ServerOptions struct { // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServer(opts ServerOptions) rpb.ServerReflectionServer { +func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -139,7 +141,7 @@ func NewServer(opts ServerOptions) rpb.ServerReflectionServer { } type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer + v1alphagrpc.UnimplementedServerReflectionServer s ServiceInfoProvider descResolver protodesc.Resolver extResolver ExtensionResolver @@ -213,11 +215,11 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*v1alphapb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &rpb.ServiceResponse{Name: svc}) + resp = append(resp, &v1alphapb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -226,7 +228,7 @@ func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -237,79 +239,79 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio return err } - out := &rpb.ServerReflectionResponse{ + out := &v1alphapb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *rpb.ServerReflectionRequest_FileByFilename: + case *v1alphapb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_FileContainingSymbol: + case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_FileContainingExtension: + case *v1alphapb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *rpb.ServerReflectionRequest_ListServices: - out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &rpb.ListServiceResponse{ + case *v1alphapb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphapb.ListServiceResponse{ Service: s.listServices(), }, } diff --git a/reflection/serverreflection_test.go b/reflection/serverreflection_test.go index 3aeac2be0722..04e7ba1dbfbd 100644 --- a/reflection/serverreflection_test.go +++ b/reflection/serverreflection_test.go @@ -30,15 +30,17 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" - rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - pb "google.golang.org/grpc/reflection/grpc_testing" - pbv3 "google.golang.org/grpc/reflection/grpc_testing_not_regenerate" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/descriptorpb" "google.golang.org/protobuf/types/dynamicpb" + + v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + pb "google.golang.org/grpc/reflection/grpc_testing" + pbv3 "google.golang.org/grpc/reflection/grpc_testing_not_regenerate" ) var ( @@ -216,7 +218,7 @@ func (x) TestReflectionEnd2end(t *testing.T) { } defer conn.Close() - c := rpb.NewServerReflectionClient(conn) + c := v1alphagrpc.NewServerReflectionClient(conn) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() stream, err := c.ServerReflectionInfo(ctx, grpc.WaitForReady(true)) @@ -239,10 +241,10 @@ func (x) TestReflectionEnd2end(t *testing.T) { s.Stop() } -func testFileByFilenameTransitiveClosure(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient, expectClosure bool) { +func testFileByFilenameTransitiveClosure(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient, expectClosure bool) { filename := "reflection/grpc_testing/proto2_ext2.proto" - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_FileByFilename{ FileByFilename: filename, }, }); err != nil { @@ -254,7 +256,7 @@ func testFileByFilenameTransitiveClosure(t *testing.T, stream rpb.ServerReflecti t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_FileDescriptorResponse: + case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], fdProto2Ext2Byte) { t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], fdProto2Ext2Byte) } @@ -272,7 +274,7 @@ func testFileByFilenameTransitiveClosure(t *testing.T, stream rpb.ServerReflecti } } -func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileByFilename(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { filename string want []byte @@ -282,8 +284,8 @@ func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflecti {"reflection/grpc_testing/proto2_ext.proto", fdProto2ExtByte}, {"dynamic.proto", fdDynamicByte}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_FileByFilename{ FileByFilename: test.filename, }, }); err != nil { @@ -296,7 +298,7 @@ func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflecti } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_FileDescriptorResponse: + case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", test.filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -306,14 +308,14 @@ func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflecti } } -func testFileByFilenameError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileByFilenameError(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "test.poto", "proo2.proto", "proto2_et.proto", } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_FileByFilename{ FileByFilename: test, }, }); err != nil { @@ -326,14 +328,14 @@ func testFileByFilenameError(t *testing.T, stream rpb.ServerReflection_ServerRef } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ErrorResponse: + case *v1alphapb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileByFilename(%v) = %v, want type ", test, r.MessageResponse) } } } -func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingSymbol(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { symbol string want []byte @@ -359,8 +361,8 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe {"grpc.testing.DynamicReq", fdDynamicByte}, {"grpc.testing.DynamicRes", fdDynamicByte}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test.symbol, }, }); err != nil { @@ -373,7 +375,7 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_FileDescriptorResponse: + case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingSymbol(%v)\nreceived: %q,\nwant: %q", test.symbol, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -383,15 +385,15 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe } } -func testFileContainingSymbolError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingSymbolError(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.SerchService", "grpc.testing.SearchService.SearchE", "grpc.tesing.SearchResponse", "gpc.testing.ToBeExtended", } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test, }, }); err != nil { @@ -404,14 +406,14 @@ func testFileContainingSymbolError(t *testing.T, stream rpb.ServerReflection_Ser } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ErrorResponse: + case *v1alphapb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingSymbol(%v) = %v, want type ", test, r.MessageResponse) } } } -func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingExtension(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 @@ -423,9 +425,9 @@ func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_Serve {"grpc.testing.ToBeExtended", 23, fdProto2Ext2Byte}, {"grpc.testing.ToBeExtended", 29, fdProto2Ext2Byte}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &rpb.ExtensionRequest{ + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphapb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, @@ -440,7 +442,7 @@ func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_Serve } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_FileDescriptorResponse: + case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingExtension(%v, %v)\nreceived: %q,\nwant: %q", test.typeName, test.extNum, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -450,7 +452,7 @@ func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_Serve } } -func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingExtensionError(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 @@ -458,9 +460,9 @@ func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ {"grpc.testing.ToBExtended", 17}, {"grpc.testing.ToBeExtended", 15}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &rpb.ExtensionRequest{ + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphapb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, @@ -475,14 +477,14 @@ func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ErrorResponse: + case *v1alphapb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) } } } -func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testAllExtensionNumbersOfType(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string want []int32 @@ -490,8 +492,8 @@ func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_Ser {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, {"grpc.testing.DynamicReq", nil}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test.typeName, }, }); err != nil { @@ -504,7 +506,7 @@ func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_Ser } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_AllExtensionNumbersResponse: + case *v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse: extNum := r.GetAllExtensionNumbersResponse().ExtensionNumber sort.Sort(intArray(extNum)) if r.GetAllExtensionNumbersResponse().BaseTypeName != test.typeName || @@ -517,12 +519,12 @@ func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_Ser } } -func testAllExtensionNumbersOfTypeError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testAllExtensionNumbersOfTypeError(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.ToBeExtendedE", } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test, }, }); err != nil { @@ -535,16 +537,16 @@ func testAllExtensionNumbersOfTypeError(t *testing.T, stream rpb.ServerReflectio } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ErrorResponse: + case *v1alphapb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test, r.MessageResponse) } } } -func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_ListServices{}, +func testListServices(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { + if err := stream.Send(&v1alphapb.ServerReflectionRequest{ + MessageRequest: &v1alphapb.ServerReflectionRequest_ListServices{}, }); err != nil { t.Fatalf("failed to send request: %v", err) } @@ -555,7 +557,7 @@ func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflection } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ListServicesResponse: + case *v1alphapb.ServerReflectionResponse_ListServicesResponse: services := r.GetListServicesResponse().Service want := []string{ "grpc.testingv3.SearchServiceV3", From adfb9155e49e6f01daa587e16cfbe9050eb6c545 Mon Sep 17 00:00:00 2001 From: Yimin Chen Date: Tue, 22 Nov 2022 12:58:04 -0800 Subject: [PATCH 680/998] server: fix ChainUnaryInterceptor and ChainStreamInterceptor to allow retrying handlers (#5666) --- server.go | 50 ++++++++++++++++++++------------------------------ server_test.go | 29 +++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 30 deletions(-) diff --git a/server.go b/server.go index f4dde72b41f8..2ed550c91e57 100644 --- a/server.go +++ b/server.go @@ -1150,21 +1150,16 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1470,21 +1465,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } diff --git a/server_test.go b/server_test.go index 7d4cf7bfc21e..85a8f5bf72eb 100644 --- a/server_test.go +++ b/server_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -130,6 +131,34 @@ func (s) TestGetServiceInfo(t *testing.T) { } } +func (s) TestRetryChainedInterceptor(t *testing.T) { + var records []int + i1 := func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) { + records = append(records, 1) + // call handler twice to simulate a retry here. + handler(ctx, req) + return handler(ctx, req) + } + i2 := func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) { + records = append(records, 2) + return handler(ctx, req) + } + i3 := func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) { + records = append(records, 3) + return handler(ctx, req) + } + + ii := chainUnaryInterceptors([]UnaryServerInterceptor{i1, i2, i3}) + + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, nil + } + ii(context.Background(), nil, nil, handler) + if !cmp.Equal(records, []int{1, 2, 3, 2, 3}) { + t.Fatalf("retry failed on chained interceptors: %v", records) + } +} + func (s) TestStreamContext(t *testing.T) { expectedStream := &transport.Stream{} ctx := NewContextWithServerTransportStream(context.Background(), expectedStream) From 09fc1a349826f04420292e0915fd11a5ce3dc347 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 22 Nov 2022 14:16:13 -0800 Subject: [PATCH 681/998] interop: update Go version in docker container used for psm interop (#5811) --- interop/xds/client/Dockerfile | 2 +- interop/xds/server/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/interop/xds/client/Dockerfile b/interop/xds/client/Dockerfile index 533bb6adb3ed..0d2c44a521af 100644 --- a/interop/xds/client/Dockerfile +++ b/interop/xds/client/Dockerfile @@ -16,7 +16,7 @@ # following command from grpc-go directory: # docker build -t -f interop/xds/client/Dockerfile . -FROM golang:1.16-alpine as build +FROM golang:1.19-alpine as build # Make a grpc-go directory and copy the repo into it. WORKDIR /go/src/grpc-go diff --git a/interop/xds/server/Dockerfile b/interop/xds/server/Dockerfile index 21dadb918810..db5b2940953d 100644 --- a/interop/xds/server/Dockerfile +++ b/interop/xds/server/Dockerfile @@ -16,7 +16,7 @@ # following command from grpc-go directory: # docker build -t -f interop/xds/server/Dockerfile . -FROM golang:1.16-alpine as build +FROM golang:1.19-alpine as build # Make a grpc-go directory and copy the repo into it. WORKDIR /go/src/grpc-go From 0fe49e823fcd9904afba6cd5e5980da4390d1899 Mon Sep 17 00:00:00 2001 From: Theodore Salvo Date: Mon, 28 Nov 2022 13:17:00 -0500 Subject: [PATCH 682/998] grpc: Improve documentation of read/write buffer size server and dial options (#5800) Fixes https://github.com/grpc/grpc-go/issues/5798 --- dialoptions.go | 10 ++++++---- server.go | 18 +++++++++--------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/dialoptions.go b/dialoptions.go index 9372dc322e80..8f5b536f11eb 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -116,8 +116,9 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -127,8 +128,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s diff --git a/server.go b/server.go index 2ed550c91e57..8c8a1915008d 100644 --- a/server.go +++ b/server.go @@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s From 9f97673ba41adb25a579d5002f2c22e4695b2c4d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 29 Nov 2022 10:08:03 -0800 Subject: [PATCH 683/998] test: move e2e goaway tests to goaway_test.go (#5820) --- test/end2end_test.go | 611 ------------------------------------------ test/goaway_test.go | 621 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 621 insertions(+), 611 deletions(-) diff --git a/test/end2end_test.go b/test/end2end_test.go index 8e50ca70e569..61907e4ca903 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -64,7 +64,6 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -997,338 +996,6 @@ func testServerGracefulStopIdempotent(t *testing.T, e env) { } } -func (s) TestServerGoAway(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testServerGoAway(t, e) - } -} - -func testServerGoAway(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - // Finish an RPC to make sure the connection is good. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) - } - ch := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch) - }() - // Loop until the server side GoAway signal is propagated to the client. - for { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded { - cancel() - break - } - cancel() - } - // A new RPC should fail. - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal) - } - <-ch - awaitNewConnLogOutput() -} - -func (s) TestServerGoAwayPendingRPC(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testServerGoAwayPendingRPC(t, e) - } -} - -func testServerGoAwayPendingRPC(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - // Finish an RPC to make sure the connection is good. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) - } - ch := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch) - }() - // Loop until the server side GoAway signal is propagated to the client. - start := time.Now() - errored := false - for time.Since(start) < time.Second { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) - cancel() - if err != nil { - errored = true - break - } - } - if !errored { - t.Fatalf("GoAway never received by client") - } - respParam := []*testpb.ResponseParameters{{Size: 1}} - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) - if err != nil { - t.Fatal(err) - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - // The existing RPC should be still good to proceed. - if err := stream.Send(req); err != nil { - t.Fatalf("%v.Send(_) = %v, want ", stream, err) - } - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) - } - // The RPC will run until canceled. - cancel() - <-ch - awaitNewConnLogOutput() -} - -func (s) TestServerMultipleGoAwayPendingRPC(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testServerMultipleGoAwayPendingRPC(t, e) - } -} - -func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithCancel(context.Background()) - stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - // Finish an RPC to make sure the connection is good. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) - } - ch1 := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch1) - }() - ch2 := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch2) - }() - // Loop until the server side GoAway signal is propagated to the client. - for { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - cancel() - break - } - cancel() - } - select { - case <-ch1: - t.Fatal("GracefulStop() terminated early") - case <-ch2: - t.Fatal("GracefulStop() terminated early") - default: - } - respParam := []*testpb.ResponseParameters{ - { - Size: 1, - }, - } - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) - if err != nil { - t.Fatal(err) - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - // The existing RPC should be still good to proceed. - if err := stream.Send(req); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) - } - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) - } - if err := stream.CloseSend(); err != nil { - t.Fatalf("%v.CloseSend() = %v, want ", stream, err) - } - <-ch1 - <-ch2 - cancel() - awaitNewConnLogOutput() -} - -func (s) TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testConcurrentClientConnCloseAndServerGoAway(t, e) - } -} - -func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) - } - ch := make(chan struct{}) - // Close ClientConn and Server concurrently. - go func() { - te.srv.GracefulStop() - close(ch) - }() - go func() { - cc.Close() - }() - <-ch -} - -func (s) TestConcurrentServerStopAndGoAway(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testConcurrentServerStopAndGoAway(t, e) - } -} - -func testConcurrentServerStopAndGoAway(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - - // Finish an RPC to make sure the connection is good. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) - } - - ch := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch) - }() - // Loop until the server side GoAway signal is propagated to the client. - for { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - cancel() - break - } - cancel() - } - // Stop the server and close all the connections. - te.srv.Stop() - respParam := []*testpb.ResponseParameters{ - { - Size: 1, - }, - } - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) - if err != nil { - t.Fatal(err) - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - sendStart := time.Now() - for { - if err := stream.Send(req); err == io.EOF { - // stream.Send should eventually send io.EOF - break - } else if err != nil { - // Send should never return a transport-level error. - t.Fatalf("stream.Send(%v) = %v; want ", req, err) - } - if time.Since(sendStart) > 2*time.Second { - t.Fatalf("stream.Send(_) did not return io.EOF after 2s") - } - time.Sleep(time.Millisecond) - } - if _, err := stream.Recv(); err == nil || err == io.EOF { - t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) - } - <-ch - awaitNewConnLogOutput() -} - func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) { rpcStartedOnServer := make(chan struct{}) rpcDoneOnClient := make(chan struct{}) @@ -1376,120 +1043,6 @@ func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) { close(rpcDoneOnClient) } -func (s) TestDetailedGoawayErrorOnGracefulClosePropagatesToRPCError(t *testing.T) { - rpcDoneOnClient := make(chan struct{}) - ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { - <-rpcDoneOnClient - return status.Error(codes.Internal, "arbitrary status") - }, - } - sopts := []grpc.ServerOption{ - grpc.KeepaliveParams(keepalive.ServerParameters{ - MaxConnectionAge: time.Millisecond * 100, - MaxConnectionAgeGrace: time.Millisecond, - }), - } - if err := ss.Start(sopts); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - stream, err := ss.Client.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) - } - const expectedErrorMessageSubstring = "received prior goaway: code: NO_ERROR" - _, err = stream.Recv() - close(rpcDoneOnClient) - if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) { - t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q", stream, err, expectedErrorMessageSubstring) - } -} - -func (s) TestDetailedGoawayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) { - // set the min keepalive time very low so that this test can take - // a reasonable amount of time - prev := internal.KeepaliveMinPingTime - internal.KeepaliveMinPingTime = time.Millisecond - defer func() { internal.KeepaliveMinPingTime = prev }() - - rpcDoneOnClient := make(chan struct{}) - ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { - <-rpcDoneOnClient - return status.Error(codes.Internal, "arbitrary status") - }, - } - sopts := []grpc.ServerOption{ - grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: time.Second * 1000, /* arbitrary, large value */ - }), - } - dopts := []grpc.DialOption{ - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: time.Millisecond, /* should trigger "too many pings" error quickly */ - Timeout: time.Second * 1000, /* arbitrary, large value */ - PermitWithoutStream: false, - }), - } - if err := ss.Start(sopts, dopts...); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - stream, err := ss.Client.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) - } - const expectedErrorMessageSubstring = `received prior goaway: code: ENHANCE_YOUR_CALM, debug data: "too_many_pings"` - _, err = stream.Recv() - close(rpcDoneOnClient) - if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) { - t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: |%v|", stream, err, expectedErrorMessageSubstring) - } -} - -func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testClientConnCloseAfterGoAwayWithActiveStream(t, e) - } -} - -func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) { - te := newTest(t, e) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - if _, err := tc.FullDuplexCall(ctx); err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, ", tc, err) - } - done := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(done) - }() - time.Sleep(50 * time.Millisecond) - cc.Close() - timeout := time.NewTimer(time.Second) - select { - case <-done: - case <-timeout.C: - t.Fatalf("Test timed-out.") - } -} - func (s) TestFailFast(t *testing.T) { for _, e := range listTestEnv() { testFailFast(t, e) @@ -7114,115 +6667,6 @@ func testLargeTimeout(t *testing.T, e env) { } } -// Proxies typically send GO_AWAY followed by connection closure a minute or so later. This -// test ensures that the connection is re-created after GO_AWAY and not affected by the -// subsequent (old) connection closure. -func (s) TestGoAwayThenClose(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - - lis1, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error while listening. Err: %v", err) - } - s1 := grpc.NewServer() - defer s1.Stop() - ts := &funcServer{ - unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{}, nil - }, - fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { - if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { - t.Errorf("unexpected error from send: %v", err) - return err - } - // Wait forever. - _, err := stream.Recv() - if err == nil { - t.Error("expected to never receive any message") - } - return err - }, - } - testpb.RegisterTestServiceServer(s1, ts) - go s1.Serve(lis1) - - conn2Established := grpcsync.NewEvent() - lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established) - if err != nil { - t.Fatalf("Error while listening. Err: %v", err) - } - s2 := grpc.NewServer() - defer s2.Stop() - testpb.RegisterTestServiceServer(s2, ts) - - r := manual.NewBuilderWithScheme("whatever") - r.InitialState(resolver.State{Addresses: []resolver.Address{ - {Addr: lis1.Addr().String()}, - {Addr: lis2.Addr().String()}, - }}) - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatalf("Error creating client: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - - // We make a streaming RPC and do an one-message-round-trip to make sure - // it's created on connection 1. - // - // We use a long-lived RPC because it will cause GracefulStop to send - // GO_AWAY, but the connection doesn't get closed until the server stops and - // the client receives the error. - stream, err := client.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err) - } - if _, err = stream.Recv(); err != nil { - t.Fatalf("unexpected error from first recv: %v", err) - } - - go s2.Serve(lis2) - - // Send GO_AWAY to connection 1. - go s1.GracefulStop() - - // Wait for the ClientConn to enter IDLE state. - state := cc.GetState() - for ; state != connectivity.Idle && cc.WaitForStateChange(ctx, state); state = cc.GetState() { - } - if state != connectivity.Idle { - t.Fatalf("timed out waiting for IDLE channel state; last state = %v", state) - } - - // Initiate another RPC to create another connection. - if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { - t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) - } - - // Assert that connection 2 has been established. - <-conn2Established.Done() - - // Close the listener for server2 to prevent it from allowing new connections. - lis2.Close() - - // Close connection 1. - s1.Stop() - - // Wait for client to close. - if _, err = stream.Recv(); err == nil { - t.Fatal("expected the stream to die, but got a successful Recv") - } - - // Do a bunch of RPCs, make sure it stays stable. These should go to connection 2. - for i := 0; i < 10; i++ { - if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { - t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) - } - } -} - func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) { lis, err := net.Listen(network, address) if err != nil { @@ -8122,61 +7566,6 @@ func (s) TestRecvWhileReturningStatus(t *testing.T) { } } -// TestGoAwayStreamIDSmallerThanCreatedStreams tests the scenario where a server -// sends a goaway with a stream id that is smaller than some created streams on -// the client, while the client is simultaneously creating new streams. This -// should not induce a deadlock. -func (s) TestGoAwayStreamIDSmallerThanCreatedStreams(t *testing.T) { - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("error listening: %v", err) - } - - ctCh := testutils.NewChannel() - go func() { - conn, err := lis.Accept() - if err != nil { - t.Errorf("error in lis.Accept(): %v", err) - } - ct := newClientTester(t, conn) - ctCh.Send(ct) - }() - - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatalf("error dialing: %v", err) - } - defer cc.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - val, err := ctCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout waiting for client transport (should be given after http2 creation)") - } - ct := val.(*clientTester) - - tc := testpb.NewTestServiceClient(cc) - someStreamsCreated := grpcsync.NewEvent() - goAwayWritten := grpcsync.NewEvent() - go func() { - for i := 0; i < 20; i++ { - if i == 10 { - <-goAwayWritten.Done() - } - tc.FullDuplexCall(ctx) - if i == 4 { - someStreamsCreated.Fire() - } - } - }() - - <-someStreamsCreated.Done() - ct.writeGoAway(1, http2.ErrCodeNo, []byte{}) - goAwayWritten.Fire() -} - type mockBinaryLogger struct { mml *mockMethodLogger } diff --git a/test/goaway_test.go b/test/goaway_test.go index 1b5a3b7a04e7..92551dbf2a4c 100644 --- a/test/goaway_test.go +++ b/test/goaway_test.go @@ -20,14 +20,25 @@ package test import ( "context" + "io" "net" + "strings" "testing" "time" + "golang.org/x/net/http2" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -74,3 +85,613 @@ func (s) TestGracefulClientOnGoAway(t *testing.T) { cancel() } } + +func (s) TestDetailedGoAwayErrorOnGracefulClosePropagatesToRPCError(t *testing.T) { + rpcDoneOnClient := make(chan struct{}) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + <-rpcDoneOnClient + return status.Error(codes.Internal, "arbitrary status") + }, + } + sopts := []grpc.ServerOption{ + grpc.KeepaliveParams(keepalive.ServerParameters{ + MaxConnectionAge: time.Millisecond * 100, + MaxConnectionAgeGrace: time.Millisecond, + }), + } + if err := ss.Start(sopts); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) + } + const expectedErrorMessageSubstring = "received prior goaway: code: NO_ERROR" + _, err = stream.Recv() + close(rpcDoneOnClient) + if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) { + t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q", stream, err, expectedErrorMessageSubstring) + } +} + +func (s) TestDetailedGoAwayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) { + // set the min keepalive time very low so that this test can take + // a reasonable amount of time + prev := internal.KeepaliveMinPingTime + internal.KeepaliveMinPingTime = time.Millisecond + defer func() { internal.KeepaliveMinPingTime = prev }() + + rpcDoneOnClient := make(chan struct{}) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + <-rpcDoneOnClient + return status.Error(codes.Internal, "arbitrary status") + }, + } + sopts := []grpc.ServerOption{ + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: time.Second * 1000, /* arbitrary, large value */ + }), + } + dopts := []grpc.DialOption{ + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: time.Millisecond, /* should trigger "too many pings" error quickly */ + Timeout: time.Second * 1000, /* arbitrary, large value */ + PermitWithoutStream: false, + }), + } + if err := ss.Start(sopts, dopts...); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) + } + const expectedErrorMessageSubstring = `received prior goaway: code: ENHANCE_YOUR_CALM, debug data: "too_many_pings"` + _, err = stream.Recv() + close(rpcDoneOnClient) + if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) { + t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: |%v|", stream, err, expectedErrorMessageSubstring) + } +} + +func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testClientConnCloseAfterGoAwayWithActiveStream(t, e) + } +} + +func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if _, err := tc.FullDuplexCall(ctx); err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, ", tc, err) + } + done := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(done) + }() + time.Sleep(50 * time.Millisecond) + cc.Close() + timeout := time.NewTimer(time.Second) + select { + case <-done: + case <-timeout.C: + t.Fatalf("Test timed-out.") + } +} + +func (s) TestServerGoAway(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerGoAway(t, e) + } +} + +func testServerGoAway(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + // Finish an RPC to make sure the connection is good. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + ch := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch) + }() + // Loop until the server side GoAway signal is propagated to the client. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded { + cancel() + break + } + cancel() + } + // A new RPC should fail. + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal) + } + <-ch + awaitNewConnLogOutput() +} + +func (s) TestServerGoAwayPendingRPC(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerGoAwayPendingRPC(t, e) + } +} + +func testServerGoAwayPendingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch) + }() + // Loop until the server side GoAway signal is propagated to the client. + start := time.Now() + errored := false + for time.Since(start) < time.Second { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) + cancel() + if err != nil { + errored = true + break + } + } + if !errored { + t.Fatalf("GoAway never received by client") + } + respParam := []*testpb.ResponseParameters{{Size: 1}} + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + // The existing RPC should be still good to proceed. + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(_) = %v, want ", stream, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + // The RPC will run until canceled. + cancel() + <-ch + awaitNewConnLogOutput() +} + +func (s) TestServerMultipleGoAwayPendingRPC(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerMultipleGoAwayPendingRPC(t, e) + } +} + +func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithCancel(context.Background()) + stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch1 := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch1) + }() + ch2 := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch2) + }() + // Loop until the server side GoAway signal is propagated to the client. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + cancel() + break + } + cancel() + } + select { + case <-ch1: + t.Fatal("GracefulStop() terminated early") + case <-ch2: + t.Fatal("GracefulStop() terminated early") + default: + } + respParam := []*testpb.ResponseParameters{ + { + Size: 1, + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + // The existing RPC should be still good to proceed. + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() = %v, want ", stream, err) + } + <-ch1 + <-ch2 + cancel() + awaitNewConnLogOutput() +} + +func (s) TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testConcurrentClientConnCloseAndServerGoAway(t, e) + } +} + +func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch := make(chan struct{}) + // Close ClientConn and Server concurrently. + go func() { + te.srv.GracefulStop() + close(ch) + }() + go func() { + cc.Close() + }() + <-ch +} + +func (s) TestConcurrentServerStopAndGoAway(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testConcurrentServerStopAndGoAway(t, e) + } +} + +func testConcurrentServerStopAndGoAway(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + + ch := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch) + }() + // Loop until the server side GoAway signal is propagated to the client. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + cancel() + break + } + cancel() + } + // Stop the server and close all the connections. + te.srv.Stop() + respParam := []*testpb.ResponseParameters{ + { + Size: 1, + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + sendStart := time.Now() + for { + if err := stream.Send(req); err == io.EOF { + // stream.Send should eventually send io.EOF + break + } else if err != nil { + // Send should never return a transport-level error. + t.Fatalf("stream.Send(%v) = %v; want ", req, err) + } + if time.Since(sendStart) > 2*time.Second { + t.Fatalf("stream.Send(_) did not return io.EOF after 2s") + } + time.Sleep(time.Millisecond) + } + if _, err := stream.Recv(); err == nil || err == io.EOF { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + <-ch + awaitNewConnLogOutput() +} + +// Proxies typically send GO_AWAY followed by connection closure a minute or so later. This +// test ensures that the connection is re-created after GO_AWAY and not affected by the +// subsequent (old) connection closure. +func (s) TestGoAwayThenClose(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + lis1, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Error while listening. Err: %v", err) + } + s1 := grpc.NewServer() + defer s1.Stop() + ts := &funcServer{ + unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { + if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { + t.Errorf("unexpected error from send: %v", err) + return err + } + // Wait forever. + _, err := stream.Recv() + if err == nil { + t.Error("expected to never receive any message") + } + return err + }, + } + testpb.RegisterTestServiceServer(s1, ts) + go s1.Serve(lis1) + + conn2Established := grpcsync.NewEvent() + lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established) + if err != nil { + t.Fatalf("Error while listening. Err: %v", err) + } + s2 := grpc.NewServer() + defer s2.Stop() + testpb.RegisterTestServiceServer(s2, ts) + + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{Addresses: []resolver.Address{ + {Addr: lis1.Addr().String()}, + {Addr: lis2.Addr().String()}, + }}) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Error creating client: %v", err) + } + defer cc.Close() + + client := testpb.NewTestServiceClient(cc) + + // We make a streaming RPC and do an one-message-round-trip to make sure + // it's created on connection 1. + // + // We use a long-lived RPC because it will cause GracefulStop to send + // GO_AWAY, but the connection doesn't get closed until the server stops and + // the client receives the error. + stream, err := client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err) + } + if _, err = stream.Recv(); err != nil { + t.Fatalf("unexpected error from first recv: %v", err) + } + + go s2.Serve(lis2) + + // Send GO_AWAY to connection 1. + go s1.GracefulStop() + + // Wait for the ClientConn to enter IDLE state. + state := cc.GetState() + for ; state != connectivity.Idle && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + if state != connectivity.Idle { + t.Fatalf("timed out waiting for IDLE channel state; last state = %v", state) + } + + // Initiate another RPC to create another connection. + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) + } + + // Assert that connection 2 has been established. + <-conn2Established.Done() + + // Close the listener for server2 to prevent it from allowing new connections. + lis2.Close() + + // Close connection 1. + s1.Stop() + + // Wait for client to close. + if _, err = stream.Recv(); err == nil { + t.Fatal("expected the stream to die, but got a successful Recv") + } + + // Do a bunch of RPCs, make sure it stays stable. These should go to connection 2. + for i := 0; i < 10; i++ { + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) + } + } +} + +// TestGoAwayStreamIDSmallerThanCreatedStreams tests the scenario where a server +// sends a goaway with a stream id that is smaller than some created streams on +// the client, while the client is simultaneously creating new streams. This +// should not induce a deadlock. +func (s) TestGoAwayStreamIDSmallerThanCreatedStreams(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + + ctCh := testutils.NewChannel() + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("error in lis.Accept(): %v", err) + } + ct := newClientTester(t, conn) + ctCh.Send(ct) + }() + + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("error dialing: %v", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + val, err := ctCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout waiting for client transport (should be given after http2 creation)") + } + ct := val.(*clientTester) + + tc := testpb.NewTestServiceClient(cc) + someStreamsCreated := grpcsync.NewEvent() + goAwayWritten := grpcsync.NewEvent() + go func() { + for i := 0; i < 20; i++ { + if i == 10 { + <-goAwayWritten.Done() + } + tc.FullDuplexCall(ctx) + if i == 4 { + someStreamsCreated.Fire() + } + } + }() + + <-someStreamsCreated.Done() + ct.writeGoAway(1, http2.ErrCodeNo, []byte{}) + goAwayWritten.Fire() +} From be202a26011e646189b1ee85c42b7be9f7675704 Mon Sep 17 00:00:00 2001 From: Yash Handa Date: Wed, 30 Nov 2022 00:06:32 +0530 Subject: [PATCH 684/998] examples: add an example to illustrate the usage of stats handler (#5657) --- examples/features/stats_monitoring/README.md | 58 ++++++++++++ .../features/stats_monitoring/client/main.go | 60 ++++++++++++ .../features/stats_monitoring/server/main.go | 62 +++++++++++++ .../stats_monitoring/statshandler/handler.go | 93 +++++++++++++++++++ 4 files changed, 273 insertions(+) create mode 100644 examples/features/stats_monitoring/README.md create mode 100644 examples/features/stats_monitoring/client/main.go create mode 100644 examples/features/stats_monitoring/server/main.go create mode 100644 examples/features/stats_monitoring/statshandler/handler.go diff --git a/examples/features/stats_monitoring/README.md b/examples/features/stats_monitoring/README.md new file mode 100644 index 000000000000..079b6b4f1ee6 --- /dev/null +++ b/examples/features/stats_monitoring/README.md @@ -0,0 +1,58 @@ +# Stats Monitoring Handler + +This example demonstrates the use of the [`stats`](https://pkg.go.dev/google.golang.org/grpc/stats) package for reporting various +network and RPC stats. +_Note that all fields are READ-ONLY and the APIs of the `stats` package are +experimental_. + +## Try it + +``` +go run server/main.go +``` + +``` +go run client/main.go +``` + +## Explanation + +gRPC provides a mechanism to hook on to various events (phases) of the +request-response network cycle through the [`stats.Handler`](https://pkg.go.dev/google.golang.org/grpc/stats#Handler) interface. To access +these events, a concrete type that implements `stats.Handler` should be passed to +`grpc.WithStatsHandler()` on the client side and `grpc.StatsHandler()` on the +server side. + +The `HandleRPC(context.Context, RPCStats)` method on `stats.Handler` is called +multiple times during a request-response cycle, and various event stats are +passed to its `RPCStats` parameter (an interface). The concrete types that +implement this interface are: `*stats.Begin`, `*stats.InHeader`, `*stats.InPayload`, +`*stats.InTrailer`, `*stats.OutHeader`, `*stats.OutPayload`, `*stats.OutTrailer`, and +`*stats.End`. The order of these events differs on client and server. + +Similarly, the `HandleConn(context.Context, ConnStats)` method on `stats.Handler` +is called twice, once at the beginning of the connection with `*stats.ConnBegin` +and once at the end with `*stats.ConnEnd`. + +The [`stats.Handler`](https://pkg.go.dev/google.golang.org/grpc/stats#Handler) interface also provides +`TagRPC(context.Context, *RPCTagInfo) context.Context` and +`TagConn(context.Context, *ConnTagInfo) context.Context` methods. These methods +are mainly used to attach network related information to the given context. + +The `TagRPC(context.Context, *RPCTagInfo) context.Context` method returns a +context from which the context used for the rest lifetime of the RPC will be +derived. This behavior is consistent between the gRPC client and server. + +The context returned from +`TagConn(context.Context, *ConnTagInfo) context.Context` has varied lifespan: + +- In the gRPC client: + The context used for the rest lifetime of the RPC will NOT be derived from + this context. Hence the information attached to this context can only be + consumed by `HandleConn(context.Context, ConnStats)` method. +- In the gRPC server: + The context used for the rest lifetime of the RPC will be derived from + this context. + +NOTE: The [stats](https://pkg.go.dev/google.golang.org/grpc/stats) package should only be used for network monitoring purposes, +and not as an alternative to [interceptors](https://github.com/grpc/grpc-go/blob/master/examples/features/metadata). diff --git a/examples/features/stats_monitoring/client/main.go b/examples/features/stats_monitoring/client/main.go new file mode 100644 index 000000000000..0fb820d11c63 --- /dev/null +++ b/examples/features/stats_monitoring/client/main.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client to illustrate the use of the stats handler. +package main + +import ( + "context" + "flag" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + echogrpc "google.golang.org/grpc/examples/features/proto/echo" + echopb "google.golang.org/grpc/examples/features/proto/echo" + "google.golang.org/grpc/examples/features/stats_monitoring/statshandler" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") + +func main() { + flag.Parse() + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithStatsHandler(statshandler.New()), + } + conn, err := grpc.Dial(*addr, opts...) + if err != nil { + log.Fatalf("failed to connect to server %q: %v", *addr, err) + } + defer conn.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + c := echogrpc.NewEchoClient(conn) + + resp, err := c.UnaryEcho(ctx, &echopb.EchoRequest{Message: "stats handler demo"}) + if err != nil { + log.Fatalf("unexpected error from UnaryEcho: %v", err) + } + log.Printf("RPC response: %s", resp.Message) +} diff --git a/examples/features/stats_monitoring/server/main.go b/examples/features/stats_monitoring/server/main.go new file mode 100644 index 000000000000..a460522c29db --- /dev/null +++ b/examples/features/stats_monitoring/server/main.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server to illustrate the use of the stats handler. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + "time" + + "google.golang.org/grpc" + + echogrpc "google.golang.org/grpc/examples/features/proto/echo" + echopb "google.golang.org/grpc/examples/features/proto/echo" + "google.golang.org/grpc/examples/features/stats_monitoring/statshandler" +) + +var port = flag.Int("port", 50051, "the port to serve on") + +type server struct { + echogrpc.UnimplementedEchoServer +} + +func (s *server) UnaryEcho(ctx context.Context, req *echopb.EchoRequest) (*echopb.EchoResponse, error) { + time.Sleep(2 * time.Second) + return &echopb.EchoResponse{Message: req.Message}, nil +} + +func main() { + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen on port %d: %v", *port, err) + } + log.Printf("server listening at %v\n", lis.Addr()) + + s := grpc.NewServer(grpc.StatsHandler(statshandler.New())) + echogrpc.RegisterEchoServer(s, &server{}) + + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/examples/features/stats_monitoring/statshandler/handler.go b/examples/features/stats_monitoring/statshandler/handler.go new file mode 100644 index 000000000000..85688b8c3856 --- /dev/null +++ b/examples/features/stats_monitoring/statshandler/handler.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package statshandler is an example pkg to illustrate the use of the stats handler. +package statshandler + +import ( + "context" + "log" + "net" + "path/filepath" + + "google.golang.org/grpc/stats" +) + +// Handler implements [stats.Handler](https://pkg.go.dev/google.golang.org/grpc/stats#Handler) interface. +type Handler struct{} + +type connStatCtxKey struct{} + +// TagConn can attach some information to the given context. +// The context used in HandleConn for this connection will be derived from the context returned. +// In the gRPC client: +// The context used in HandleRPC for RPCs on this connection will be the user's context and NOT derived from the context returned here. +// In the gRPC server: +// The context used in HandleRPC for RPCs on this connection will be derived from the context returned here. +func (st *Handler) TagConn(ctx context.Context, stat *stats.ConnTagInfo) context.Context { + log.Printf("[TagConn] [%T]: %+[1]v", stat) + return context.WithValue(ctx, connStatCtxKey{}, stat) +} + +// HandleConn processes the Conn stats. +func (st *Handler) HandleConn(ctx context.Context, stat stats.ConnStats) { + var rAddr net.Addr + if s, ok := ctx.Value(connStatCtxKey{}).(*stats.ConnTagInfo); ok { + rAddr = s.RemoteAddr + } + + if stat.IsClient() { + log.Printf("[server addr: %s] [HandleConn] [%T]: %+[2]v", rAddr, stat) + } else { + log.Printf("[client addr: %s] [HandleConn] [%T]: %+[2]v", rAddr, stat) + } +} + +type rpcStatCtxKey struct{} + +// TagRPC can attach some information to the given context. +// The context used for the rest lifetime of the RPC will be derived from the returned context. +func (st *Handler) TagRPC(ctx context.Context, stat *stats.RPCTagInfo) context.Context { + log.Printf("[TagRPC] [%T]: %+[1]v", stat) + return context.WithValue(ctx, rpcStatCtxKey{}, stat) +} + +// HandleRPC processes the RPC stats. Note: All stat fields are read-only. +func (st *Handler) HandleRPC(ctx context.Context, stat stats.RPCStats) { + var sMethod string + if s, ok := ctx.Value(rpcStatCtxKey{}).(*stats.RPCTagInfo); ok { + sMethod = filepath.Base(s.FullMethodName) + } + + var cAddr net.Addr + // for gRPC clients, key connStatCtxKey{} will not be present in HandleRPC's context. + if s, ok := ctx.Value(connStatCtxKey{}).(*stats.ConnTagInfo); ok { + cAddr = s.RemoteAddr + } + + if stat.IsClient() { + log.Printf("[server method: %s] [HandleRPC] [%T]: %+[2]v", sMethod, stat) + } else { + log.Printf("[client addr: %s] [HandleRPC] [%T]: %+[2]v", cAddr, stat) + } +} + +// New returns a new implementation of [stats.Handler](https://pkg.go.dev/google.golang.org/grpc/stats#Handler) interface. +func New() *Handler { + return &Handler{} +} From dd123b7f866d8a8aa075393f94f4e34369a727a4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 29 Nov 2022 12:03:36 -0800 Subject: [PATCH 685/998] testutils/pickfirst: move helper function to testutils (#5822) --- internal/testutils/pickfirst/pickfirst.go | 72 +++++++++++++++++++++++ test/balancer_switching_test.go | 17 +++--- test/pickfirst_test.go | 61 ++++--------------- 3 files changed, 94 insertions(+), 56 deletions(-) create mode 100644 internal/testutils/pickfirst/pickfirst.go diff --git a/internal/testutils/pickfirst/pickfirst.go b/internal/testutils/pickfirst/pickfirst.go new file mode 100644 index 000000000000..ee5bff0d88fb --- /dev/null +++ b/internal/testutils/pickfirst/pickfirst.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirst contains helper functions to check for pickfirst load +// balancing of RPCs in tests. +package pickfirst + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +// CheckRPCsToBackend makes a bunch of RPCs on the given ClientConn and verifies +// if the RPCs are routed to a peer matching wantAddr. +// +// Returns a non-nil error if context deadline expires before all RPCs begin to +// be routed to the peer matching wantAddr, or if the backend returns RPC errors. +func CheckRPCsToBackend(ctx context.Context, cc *grpc.ClientConn, wantAddr resolver.Address) error { + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + // Make sure the RPC reaches the expected backend once. + for { + time.Sleep(time.Millisecond) + if ctx.Err() != nil { + return fmt.Errorf("timeout waiting for RPC to be routed to %s", wantAddr.Addr) + } + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + // Some tests remove backends and check if pick_first is happening across + // the remaining backends. In such cases, RPCs can initially fail on the + // connection using the removed backend. Just keep retrying and eventually + // the connection using the removed backend will shutdown and will be + // removed. + continue + } + if peer.Addr.String() == wantAddr.Addr { + break + } + } + // Make sure subsequent RPCs are all routed to the same backend. + for i := 0; i < 10; i++ { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + return fmt.Errorf("EmptyCall() = %v, want ", err) + } + if gotAddr := peer.Addr.String(); gotAddr != wantAddr.Addr { + return fmt.Errorf("rpc sent to peer %q, want peer %q", gotAddr, wantAddr) + } + } + return nil +} diff --git a/test/balancer_switching_test.go b/test/balancer_switching_test.go index 94ac796558bc..0337cd19014a 100644 --- a/test/balancer_switching_test.go +++ b/test/balancer_switching_test.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils/fakegrpclb" + "google.golang.org/grpc/internal/testutils/pickfirst" rrutil "google.golang.org/grpc/internal/testutils/roundrobin" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" @@ -127,7 +128,7 @@ func (s) TestBalancerSwitch_Basic(t *testing.T) { r.UpdateState(resolver.State{Addresses: addrs}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } @@ -146,7 +147,7 @@ func (s) TestBalancerSwitch_Basic(t *testing.T) { Addresses: addrs, ServiceConfig: parseServiceConfig(t, r, pickFirstServiceConfig), }) - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } } @@ -191,7 +192,7 @@ func (s) TestBalancerSwitch_grpclbToPickFirst(t *testing.T) { // returned by the "grpclb" balancer. So, we should see RPCs going to the // newly configured backends, as part of the balancer switch. r.UpdateState(resolver.State{Addresses: addrs[1:]}) - if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { t.Fatal(err) } } @@ -216,7 +217,7 @@ func (s) TestBalancerSwitch_pickFirstToGRPCLB(t *testing.T) { r.UpdateState(resolver.State{Addresses: addrs[1:]}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { t.Fatal(err) } @@ -238,7 +239,7 @@ func (s) TestBalancerSwitch_pickFirstToGRPCLB(t *testing.T) { // Switch to "pick_first" again by sending no grpclb server addresses. r.UpdateState(resolver.State{Addresses: addrs[1:]}) - if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { t.Fatal(err) } } @@ -332,7 +333,7 @@ func (s) TestBalancerSwitch_grpclbNotRegistered(t *testing.T) { r.UpdateState(resolver.State{Addresses: addrs}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { t.Fatal(err) } @@ -371,7 +372,7 @@ func (s) TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy(t *testing r.UpdateState(resolver.State{Addresses: addrs[1:]}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { t.Fatal(err) } @@ -609,7 +610,7 @@ func (s) TestBalancerSwitch_Graceful(t *testing.T) { // underlying "pick_first" balancer which will result in a healthy picker // being reported to the channel. RPCs should start using the new balancer. close(waitToProceed) - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } } diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go index 00a40055ea77..129693944475 100644 --- a/test/pickfirst_test.go +++ b/test/pickfirst_test.go @@ -20,7 +20,6 @@ package test import ( "context" - "fmt" "testing" "time" @@ -30,10 +29,11 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/stubserver" - "google.golang.org/grpc/peer" + "google.golang.org/grpc/internal/testutils/pickfirst" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" + testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" ) @@ -93,41 +93,6 @@ func setupPickFirst(t *testing.T, backendCount int, opts ...grpc.DialOption) (*g return cc, r, backends } -// checkPickFirst makes a bunch of RPCs on the given ClientConn and verifies if -// the RPCs are routed to a peer matching wantAddr. -func checkPickFirst(ctx context.Context, cc *grpc.ClientConn, wantAddr string) error { - client := testgrpc.NewTestServiceClient(cc) - peer := &peer.Peer{} - // Make sure the RPC reaches the expected backend once. - for { - time.Sleep(time.Millisecond) - if ctx.Err() != nil { - return fmt.Errorf("timeout waiting for RPC to be routed to %q", wantAddr) - } - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { - // Some tests remove backends and check if pick_first is happening across - // the remaining backends. In such cases, RPCs can initially fail on the - // connection using the removed backend. Just keep retrying and eventually - // the connection using the removed backend will shutdown and will be - // removed. - continue - } - if peer.Addr.String() == wantAddr { - break - } - } - // Make sure subsequent RPCs are all routed to the same backend. - for i := 0; i < 10; i++ { - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { - return fmt.Errorf("EmptyCall() = %v, want ", err) - } - if gotAddr := peer.Addr.String(); gotAddr != wantAddr { - return fmt.Errorf("rpc sent to peer %q, want peer %q", gotAddr, wantAddr) - } - } - return nil -} - // stubBackendsToResolverAddrs converts from a set of stub server backends to // resolver addresses. Useful when pushing addresses to the manual resolver. func stubBackendsToResolverAddrs(backends []*stubserver.StubServer) []resolver.Address { @@ -148,7 +113,7 @@ func (s) TestPickFirst_OneBackend(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } } @@ -163,7 +128,7 @@ func (s) TestPickFirst_MultipleBackends(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } } @@ -179,14 +144,14 @@ func (s) TestPickFirst_OneServerDown(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } // Stop the backend which is currently being used. RPCs should get routed to // the next backend in the list. backends[0].Stop() - if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { t.Fatal(err) } } @@ -202,7 +167,7 @@ func (s) TestPickFirst_AllServersDown(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } @@ -233,35 +198,35 @@ func (s) TestPickFirst_AddressesRemoved(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } // Remove the first backend from the list of addresses originally pushed. // RPCs should get routed to the first backend in the new list. r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[1], addrs[2]}}) - if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { t.Fatal(err) } // Append the backend that we just removed to the end of the list. // Nothing should change. r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[1], addrs[2], addrs[0]}}) - if err := checkPickFirst(ctx, cc, addrs[1].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { t.Fatal(err) } // Remove the first backend from the existing list of addresses. // RPCs should get routed to the first backend in the new list. r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[2], addrs[0]}}) - if err := checkPickFirst(ctx, cc, addrs[2].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[2]); err != nil { t.Fatal(err) } // Remove the first backend from the existing list of addresses. // RPCs should get routed to the first backend in the new list. r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0]}}) - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } } @@ -278,7 +243,7 @@ func (s) TestPickFirst_NewAddressWhileBlocking(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if err := checkPickFirst(ctx, cc, addrs[0].Addr); err != nil { + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { t.Fatal(err) } From 087387ca181346d01a0b0bb6bc1bde40f6c02f05 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 29 Nov 2022 17:48:52 -0500 Subject: [PATCH 686/998] Deflake Outlier Detection xDS e2e test (#5819) --- test/xds/xds_client_outlier_detection_test.go | 128 ++++++++++++------ .../e2e_test/outlierdetection_test.go | 4 +- 2 files changed, 87 insertions(+), 45 deletions(-) diff --git a/test/xds/xds_client_outlier_detection_test.go b/test/xds/xds_client_outlier_detection_test.go index fe47bc9b828a..424012700770 100644 --- a/test/xds/xds_client_outlier_detection_test.go +++ b/test/xds/xds_client_outlier_detection_test.go @@ -22,7 +22,6 @@ import ( "context" "errors" "fmt" - "strings" "testing" "time" @@ -30,10 +29,13 @@ import ( v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/protobuf/types/known/durationpb" @@ -107,52 +109,91 @@ func clusterWithOutlierDetection(clusterName, edsServiceName string, secLevel e2 MaxEjectionPercent: &wrapperspb.UInt32Value{Value: 1}, FailurePercentageThreshold: &wrapperspb.UInt32Value{Value: 50}, EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 100}, - FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 1}, - FailurePercentageMinimumHosts: &wrapperspb.UInt32Value{Value: 1}, + FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 8}, + FailurePercentageMinimumHosts: &wrapperspb.UInt32Value{Value: 3}, } return cluster } +// checkRoundRobinRPCs verifies that EmptyCall RPCs on the given ClientConn, +// connected to a server exposing the test.grpc_testing.TestService, are +// roundrobined across the given backend addresses. +// +// Returns a non-nil error if context deadline expires before RPCs start to get +// roundrobined across the given backends. +func checkRoundRobinRPCs(ctx context.Context, client testpb.TestServiceClient, addrs []resolver.Address) error { + wantAddrCount := make(map[string]int) + for _, addr := range addrs { + wantAddrCount[addr.Addr]++ + } + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + // Perform 3 iterations. + var iterations [][]string + for i := 0; i < 3; i++ { + iteration := make([]string, len(addrs)) + for c := 0; c < len(addrs); c++ { + var peer peer.Peer + client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)) + if peer.Addr != nil { + iteration[c] = peer.Addr.String() + } + } + iterations = append(iterations, iteration) + } + // Ensure the the first iteration contains all addresses in addrs. + gotAddrCount := make(map[string]int) + for _, addr := range iterations[0] { + gotAddrCount[addr]++ + } + if diff := cmp.Diff(gotAddrCount, wantAddrCount); diff != "" { + continue + } + // Ensure all three iterations contain the same addresses. + if !cmp.Equal(iterations[0], iterations[1]) || !cmp.Equal(iterations[0], iterations[2]) { + continue + } + return nil + } + return fmt.Errorf("timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) +} + // TestOutlierDetectionWithOutlier tests the Outlier Detection Balancer e2e. It // spins up three backends, one which consistently errors, and configures the // ClientConn using xDS to connect to all three of those backends. The Outlier // Detection Balancer should eject the connection to the backend which -// constantly errors, and thus RPC's should mainly go to backend 1 and 2. +// constantly errors, causing RPC's to not be routed to that upstream, and only +// be Round Robined across the two healthy upstreams. Other than the intervals +// the unhealthy upstream is ejected, RPC's should regularly round robin across +// all three upstreams. func (s) TestOutlierDetectionWithOutlier(t *testing.T) { - managementServer, nodeID, _, resolver, cleanup := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, _, r, cleanup := e2e.SetupManagementServer(t, nil) defer cleanup() - // Counters for how many times backends got called. - var count1, count2, count3 int - // Working backend 1. - port1, cleanup1 := startTestService(t, &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { - count1++ + backend1 := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, - Address: "localhost:0", - }) + } + port1, cleanup1 := startTestService(t, backend1) defer cleanup1() // Working backend 2. - port2, cleanup2 := startTestService(t, &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { - count2++ + backend2 := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, - Address: "localhost:0", - }) + } + port2, cleanup2 := startTestService(t, backend2) defer cleanup2() // Backend 3 that will always return an error and eventually ejected. - port3, cleanup3 := startTestService(t, &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { - count3++ + backend3 := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return nil, errors.New("some error") }, - Address: "localhost:0", - }) + } + port3, cleanup3 := startTestService(t, backend3) defer cleanup3() const serviceName = "my-service-client-side-xds" @@ -168,34 +209,35 @@ func (s) TestOutlierDetectionWithOutlier(t *testing.T) { t.Fatal(err) } - cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } defer cc.Close() client := testgrpc.NewTestServiceClient(cc) - for i := 0; i < 2000; i++ { - // Can either error or not depending on the backend called. - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil && !strings.Contains(err.Error(), "some error") { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } - time.Sleep(time.Millisecond) - } - // Backend 1 should've gotten more than 1/3rd of the load as backend 3 - // should get ejected, leaving only 1 and 2. - if count1 < 700 { - t.Fatalf("backend 1 should've gotten more than 1/3rd of the load") + fullAddresses := []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend2.Address}, + {Addr: backend3.Address}, } - // Backend 2 should've gotten more than 1/3rd of the load as backend 3 - // should get ejected, leaving only 1 and 2. - if count2 < 700 { - t.Fatalf("backend 2 should've gotten more than 1/3rd of the load") + // At first, due to no statistics on each of the backends, the 3 + // upstreams should all be round robined across. + if err = checkRoundRobinRPCs(ctx, client, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The addresses which don't return errors. + okAddresses := []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend2.Address}, } - // Backend 3 should've gotten less than 1/3rd of the load since it gets - // ejected. - if count3 > 650 { - t.Fatalf("backend 1 should've gotten more than 1/3rd of the load") + // After calling the three upstreams, one of them constantly error + // and should eventually be ejected for a period of time. This + // period of time should cause the RPC's to be round robined only + // across the two that are healthy. + if err = checkRoundRobinRPCs(ctx, client, okAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) } } diff --git a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go index 75e084723fce..0dfcc19d245e 100644 --- a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go +++ b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go @@ -140,9 +140,9 @@ func checkRoundRobinRPCs(ctx context.Context, client testpb.TestServiceClient, a // Balancer is configured as the top level LB Policy of the channel with a Round // Robin child, and connects to three upstreams. Two of the upstreams are healthy and // one is unhealthy. The two algorithms should at some point eject the failing -// upstream, causing RPC's to not be routed to those two upstreams, and only be +// upstream, causing RPC's to not be routed to that upstream, and only be // Round Robined across the two healthy upstreams. Other than the intervals the -// two unhealthy upstreams are ejected, RPC's should regularly round robin +// unhealthy upstream is ejected, RPC's should regularly round robin // across all three upstreams. func (s) TestOutlierDetectionAlgorithmsE2E(t *testing.T) { tests := []struct { From 94f0e7fa77bc92cf8c17647d00efc7df7de25160 Mon Sep 17 00:00:00 2001 From: Antoine Tollenaere Date: Wed, 30 Nov 2022 17:52:40 +0100 Subject: [PATCH 687/998] benchmark: add a feature for read and write buffer sizes (#5774) * benchmark: add a feature for read and write buffer sizes --- benchmark/benchmain/main.go | 97 +++++++++++++++++++++++++------------ benchmark/stats/stats.go | 27 ++++++++++- 2 files changed, 92 insertions(+), 32 deletions(-) diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index 5e2caf5d204d..d9d1e6d6a8cf 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -106,6 +106,10 @@ var ( useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O") enableKeepalive = flag.Bool("enable_keepalive", false, "Enable client keepalive. \n"+ "Keepalive.Time is set to 10s, Keepalive.Timeout is set to 1s, Keepalive.PermitWithoutStream is set to true.") + clientReadBufferSize = flags.IntSlice("clientReadBufferSize", []int{-1}, "Configures the client read buffer size in bytes. If negative, use the default - may be a a comma-separated list") + clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a a comma-separated list") + serverReadBufferSize = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a a comma-separated list") + serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a a comma-separated list") logger = grpclog.Component("benchmark") ) @@ -306,6 +310,19 @@ func makeClient(bf stats.Features) (testgrpc.BenchmarkServiceClient, func()) { }), ) } + if bf.ClientReadBufferSize >= 0 { + opts = append(opts, grpc.WithReadBufferSize(bf.ClientReadBufferSize)) + } + if bf.ClientWriteBufferSize >= 0 { + opts = append(opts, grpc.WithWriteBufferSize(bf.ClientWriteBufferSize)) + } + if bf.ServerReadBufferSize >= 0 { + sopts = append(sopts, grpc.ReadBufferSize(bf.ServerReadBufferSize)) + } + if bf.ServerWriteBufferSize >= 0 { + sopts = append(sopts, grpc.WriteBufferSize(bf.ServerWriteBufferSize)) + } + sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(bf.MaxConcurrentCalls+1))) opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) @@ -496,18 +513,22 @@ type benchOpts struct { // features through command line flags. We generate all possible combinations // for the provided values and run the benchmarks for each combination. type featureOpts struct { - enableTrace []bool - readLatencies []time.Duration - readKbps []int - readMTU []int - maxConcurrentCalls []int - reqSizeBytes []int - respSizeBytes []int - reqPayloadCurves []*stats.PayloadCurve - respPayloadCurves []*stats.PayloadCurve - compModes []string - enableChannelz []bool - enablePreloader []bool + enableTrace []bool + readLatencies []time.Duration + readKbps []int + readMTU []int + maxConcurrentCalls []int + reqSizeBytes []int + respSizeBytes []int + reqPayloadCurves []*stats.PayloadCurve + respPayloadCurves []*stats.PayloadCurve + compModes []string + enableChannelz []bool + enablePreloader []bool + clientReadBufferSize []int + clientWriteBufferSize []int + serverReadBufferSize []int + serverWriteBufferSize []int } // makeFeaturesNum returns a slice of ints of size 'maxFeatureIndex' where each @@ -544,6 +565,14 @@ func makeFeaturesNum(b *benchOpts) []int { featuresNum[i] = len(b.features.enableChannelz) case stats.EnablePreloaderIndex: featuresNum[i] = len(b.features.enablePreloader) + case stats.ClientReadBufferSize: + featuresNum[i] = len(b.features.clientReadBufferSize) + case stats.ClientWriteBufferSize: + featuresNum[i] = len(b.features.clientWriteBufferSize) + case stats.ServerReadBufferSize: + featuresNum[i] = len(b.features.serverReadBufferSize) + case stats.ServerWriteBufferSize: + featuresNum[i] = len(b.features.serverWriteBufferSize) default: log.Fatalf("Unknown feature index %v in generateFeatures. maxFeatureIndex is %v", i, stats.MaxFeatureIndex) } @@ -598,14 +627,18 @@ func (b *benchOpts) generateFeatures(featuresNum []int) []stats.Features { EnableKeepalive: b.enableKeepalive, BenchTime: b.benchTime, // These features can potentially change for each iteration. - EnableTrace: b.features.enableTrace[curPos[stats.EnableTraceIndex]], - Latency: b.features.readLatencies[curPos[stats.ReadLatenciesIndex]], - Kbps: b.features.readKbps[curPos[stats.ReadKbpsIndex]], - MTU: b.features.readMTU[curPos[stats.ReadMTUIndex]], - MaxConcurrentCalls: b.features.maxConcurrentCalls[curPos[stats.MaxConcurrentCallsIndex]], - ModeCompressor: b.features.compModes[curPos[stats.CompModesIndex]], - EnableChannelz: b.features.enableChannelz[curPos[stats.EnableChannelzIndex]], - EnablePreloader: b.features.enablePreloader[curPos[stats.EnablePreloaderIndex]], + EnableTrace: b.features.enableTrace[curPos[stats.EnableTraceIndex]], + Latency: b.features.readLatencies[curPos[stats.ReadLatenciesIndex]], + Kbps: b.features.readKbps[curPos[stats.ReadKbpsIndex]], + MTU: b.features.readMTU[curPos[stats.ReadMTUIndex]], + MaxConcurrentCalls: b.features.maxConcurrentCalls[curPos[stats.MaxConcurrentCallsIndex]], + ModeCompressor: b.features.compModes[curPos[stats.CompModesIndex]], + EnableChannelz: b.features.enableChannelz[curPos[stats.EnableChannelzIndex]], + EnablePreloader: b.features.enablePreloader[curPos[stats.EnablePreloaderIndex]], + ClientReadBufferSize: b.features.clientReadBufferSize[curPos[stats.ClientReadBufferSize]], + ClientWriteBufferSize: b.features.clientWriteBufferSize[curPos[stats.ClientWriteBufferSize]], + ServerReadBufferSize: b.features.serverReadBufferSize[curPos[stats.ServerReadBufferSize]], + ServerWriteBufferSize: b.features.serverWriteBufferSize[curPos[stats.ServerWriteBufferSize]], } if len(b.features.reqPayloadCurves) == 0 { f.ReqSizeBytes = b.features.reqSizeBytes[curPos[stats.ReqSizeBytesIndex]] @@ -662,16 +695,20 @@ func processFlags() *benchOpts { useBufconn: *useBufconn, enableKeepalive: *enableKeepalive, features: &featureOpts{ - enableTrace: setToggleMode(*traceMode), - readLatencies: append([]time.Duration(nil), *readLatency...), - readKbps: append([]int(nil), *readKbps...), - readMTU: append([]int(nil), *readMTU...), - maxConcurrentCalls: append([]int(nil), *maxConcurrentCalls...), - reqSizeBytes: append([]int(nil), *readReqSizeBytes...), - respSizeBytes: append([]int(nil), *readRespSizeBytes...), - compModes: setCompressorMode(*compressorMode), - enableChannelz: setToggleMode(*channelzOn), - enablePreloader: setToggleMode(*preloaderMode), + enableTrace: setToggleMode(*traceMode), + readLatencies: append([]time.Duration(nil), *readLatency...), + readKbps: append([]int(nil), *readKbps...), + readMTU: append([]int(nil), *readMTU...), + maxConcurrentCalls: append([]int(nil), *maxConcurrentCalls...), + reqSizeBytes: append([]int(nil), *readReqSizeBytes...), + respSizeBytes: append([]int(nil), *readRespSizeBytes...), + compModes: setCompressorMode(*compressorMode), + enableChannelz: setToggleMode(*channelzOn), + enablePreloader: setToggleMode(*preloaderMode), + clientReadBufferSize: append([]int(nil), *clientReadBufferSize...), + clientWriteBufferSize: append([]int(nil), *clientWriteBufferSize...), + serverReadBufferSize: append([]int(nil), *serverReadBufferSize...), + serverWriteBufferSize: append([]int(nil), *serverWriteBufferSize...), }, } diff --git a/benchmark/stats/stats.go b/benchmark/stats/stats.go index 6275c3c3a71c..7f4db236277e 100644 --- a/benchmark/stats/stats.go +++ b/benchmark/stats/stats.go @@ -52,6 +52,10 @@ const ( CompModesIndex EnableChannelzIndex EnablePreloaderIndex + ClientReadBufferSize + ClientWriteBufferSize + ServerReadBufferSize + ServerWriteBufferSize // MaxFeatureIndex is a place holder to indicate the total number of feature // indices we have. Any new feature indices should be added above this. @@ -109,6 +113,14 @@ type Features struct { EnableChannelz bool // EnablePreloader indicates if preloading was turned on. EnablePreloader bool + // ClientReadBufferSize is the size of the client read buffer in bytes. If negative, use the default buffer size. + ClientReadBufferSize int + // ClientWriteBufferSize is the size of the client write buffer in bytes. If negative, use the default buffer size. + ClientWriteBufferSize int + // ServerReadBufferSize is the size of the server read buffer in bytes. If negative, use the default buffer size. + ServerReadBufferSize int + // ServerWriteBufferSize is the size of the server write buffer in bytes. If negative, use the default buffer size. + ServerWriteBufferSize int } // String returns all the feature values as a string. @@ -126,10 +138,13 @@ func (f Features) String() string { } return fmt.Sprintf("networkMode_%v-bufConn_%v-keepalive_%v-benchTime_%v-"+ "trace_%v-latency_%v-kbps_%v-MTU_%v-maxConcurrentCalls_%v-%s-%s-"+ - "compressor_%v-channelz_%v-preloader_%v", + "compressor_%v-channelz_%v-preloader_%v-clientReadBufferSize_%v-"+ + "clientWriteBufferSize_%v-serverReadBufferSize_%v-serverWriteBufferSize_%v-", f.NetworkMode, f.UseBufConn, f.EnableKeepalive, f.BenchTime, f.EnableTrace, f.Latency, f.Kbps, f.MTU, f.MaxConcurrentCalls, reqPayloadString, - respPayloadString, f.ModeCompressor, f.EnableChannelz, f.EnablePreloader) + respPayloadString, f.ModeCompressor, f.EnableChannelz, f.EnablePreloader, + f.ClientReadBufferSize, f.ClientWriteBufferSize, f.ServerReadBufferSize, + f.ServerWriteBufferSize) } // SharedFeatures returns the shared features as a pretty printable string. @@ -193,6 +208,14 @@ func (f Features) partialString(b *bytes.Buffer, wantFeatures []bool, sep, delim b.WriteString(fmt.Sprintf("Channelz%v%v%v", sep, f.EnableChannelz, delim)) case EnablePreloaderIndex: b.WriteString(fmt.Sprintf("Preloader%v%v%v", sep, f.EnablePreloader, delim)) + case ClientReadBufferSize: + b.WriteString(fmt.Sprintf("ClientReadBufferSize%v%v%v", sep, f.ClientWriteBufferSize, delim)) + case ClientWriteBufferSize: + b.WriteString(fmt.Sprintf("ClientWriteBufferSize%v%v%v", sep, f.ClientWriteBufferSize, delim)) + case ServerReadBufferSize: + b.WriteString(fmt.Sprintf("ServerReadBufferSize%v%v%v", sep, f.ServerReadBufferSize, delim)) + case ServerWriteBufferSize: + b.WriteString(fmt.Sprintf("ServerWriteBufferSize%v%v%v", sep, f.ServerWriteBufferSize, delim)) default: log.Fatalf("Unknown feature index %v. maxFeatureIndex is %v", i, MaxFeatureIndex) } From c91396d4e1406768fa8b43bb5aa7c2e8e4eca79e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 30 Nov 2022 08:57:17 -0800 Subject: [PATCH 688/998] pickfirst: do not return initial subconn while connecting (#5825) Fixes https://github.com/grpc/grpc-go/issues/5293 --- clientconn.go | 6 ++++++ pickfirst.go | 4 ++-- test/goaway_test.go | 36 ++++++++++++++++++++++++------------ 3 files changed, 32 insertions(+), 14 deletions(-) diff --git a/clientconn.go b/clientconn.go index 422639c79dbf..78c81a108ed0 100644 --- a/clientconn.go +++ b/clientconn.go @@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } diff --git a/pickfirst.go b/pickfirst.go index fb7a99e0a273..b3a55481b944 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) b.subConn.Connect() return nil diff --git a/test/goaway_test.go b/test/goaway_test.go index 92551dbf2a4c..7f44937d703d 100644 --- a/test/goaway_test.go +++ b/test/goaway_test.go @@ -536,7 +536,7 @@ func testConcurrentServerStopAndGoAway(t *testing.T, e env) { // test ensures that the connection is re-created after GO_AWAY and not affected by the // subsequent (old) connection closure. func (s) TestGoAwayThenClose(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() lis1, err := net.Listen("tcp", "localhost:0") @@ -587,12 +587,21 @@ func (s) TestGoAwayThenClose(t *testing.T) { client := testpb.NewTestServiceClient(cc) + t.Log("Waiting for the ClientConn to enter READY state.") + state := cc.GetState() + for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + if ctx.Err() != nil { + t.Fatalf("timed out waiting for READY channel state; last state = %v", state) + } + // We make a streaming RPC and do an one-message-round-trip to make sure // it's created on connection 1. // // We use a long-lived RPC because it will cause GracefulStop to send - // GO_AWAY, but the connection doesn't get closed until the server stops and + // GO_AWAY, but the connection won't get closed until the server stops and // the client receives the error. + t.Log("Creating first streaming RPC to server 1.") stream, err := client.FullDuplexCall(ctx) if err != nil { t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err) @@ -603,37 +612,40 @@ func (s) TestGoAwayThenClose(t *testing.T) { go s2.Serve(lis2) - // Send GO_AWAY to connection 1. + t.Log("Gracefully stopping server 1.") go s1.GracefulStop() - // Wait for the ClientConn to enter IDLE state. - state := cc.GetState() + t.Log("Waiting for the ClientConn to enter IDLE state.") for ; state != connectivity.Idle && cc.WaitForStateChange(ctx, state); state = cc.GetState() { } - if state != connectivity.Idle { + if ctx.Err() != nil { t.Fatalf("timed out waiting for IDLE channel state; last state = %v", state) } - // Initiate another RPC to create another connection. + t.Log("Performing another RPC to create a connection to server 2.") if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) } - // Assert that connection 2 has been established. - <-conn2Established.Done() + t.Log("Waiting for a connection to server 2.") + select { + case <-conn2Established.Done(): + case <-ctx.Done(): + t.Fatalf("timed out waiting for connection 2 to be established") + } // Close the listener for server2 to prevent it from allowing new connections. lis2.Close() - // Close connection 1. + t.Log("Hard closing connection 1.") s1.Stop() - // Wait for client to close. + t.Log("Waiting for the first stream to error.") if _, err = stream.Recv(); err == nil { t.Fatal("expected the stream to die, but got a successful Recv") } - // Do a bunch of RPCs, make sure it stays stable. These should go to connection 2. + t.Log("Ensuring connection 2 is stable.") for i := 0; i < 10; i++ { if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) From 110ed9e6ccbd391a691b24b6b918ce0991f7588f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 30 Nov 2022 11:34:19 -0800 Subject: [PATCH 689/998] xdsclient: resource-type-agnostic transport layer (#5808) --- .../xdsclient/transport/loadreport.go | 260 ++++++++ .../xdsclient/transport/loadreport_test.go | 193 ++++++ xds/internal/xdsclient/transport/transport.go | 573 ++++++++++++++++++ .../transport/transport_ack_nack_test.go | 525 ++++++++++++++++ .../transport/transport_backoff_test.go | 467 ++++++++++++++ .../xdsclient/transport/transport_new_test.go | 118 ++++ .../transport/transport_resource_test.go | 228 +++++++ .../xdsclient/transport/transport_test.go | 88 +++ xds/internal/xdsclient/xdsresource/errors.go | 3 + 9 files changed, 2455 insertions(+) create mode 100644 xds/internal/xdsclient/transport/loadreport.go create mode 100644 xds/internal/xdsclient/transport/loadreport_test.go create mode 100644 xds/internal/xdsclient/transport/transport.go create mode 100644 xds/internal/xdsclient/transport/transport_ack_nack_test.go create mode 100644 xds/internal/xdsclient/transport/transport_backoff_test.go create mode 100644 xds/internal/xdsclient/transport/transport_new_test.go create mode 100644 xds/internal/xdsclient/transport/transport_resource_test.go create mode 100644 xds/internal/xdsclient/transport/transport_test.go diff --git a/xds/internal/xdsclient/transport/loadreport.go b/xds/internal/xdsclient/transport/loadreport.go new file mode 100644 index 000000000000..a683afd57938 --- /dev/null +++ b/xds/internal/xdsclient/transport/loadreport.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/protobuf/proto" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" +) + +type lrsStream = v3lrsgrpc.LoadReportingService_StreamLoadStatsClient + +// ReportLoad starts reporting loads to the management server the transport is +// configured to use. +// +// It returns a Store for the user to report loads and a function to cancel the +// load reporting. +func (t *Transport) ReportLoad() (*load.Store, func()) { + t.lrsStartStream() + return t.lrsStore, grpcsync.OnceFunc(func() { t.lrsStopStream() }) +} + +// lrsStartStream starts an LRS stream to the server, if none exists. +func (t *Transport) lrsStartStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount++ + if t.lrsRefCount != 1 { + // Return early if the stream has already been started. + return + } + + ctx, cancel := context.WithCancel(context.Background()) + t.lrsCancelStream = cancel + go t.lrsRunner(ctx) +} + +// lrsStopStream closes the LRS stream, if this is the last user of the stream. +func (t *Transport) lrsStopStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount-- + if t.lrsRefCount != 0 { + // Return early if the stream has other references. + return + } + + t.lrsCancelStream() + t.logger.Infof("Stopping LRS stream") + <-t.lrsRunnerDoneCh +} + +// lrsRunner starts an LRS stream to report load data to the management server. +// It reports load at constant intervals (as configured by the management +// server) until the context is cancelled. +func (t *Transport) lrsRunner(ctx context.Context) { + defer close(t.lrsRunnerDoneCh) + + // This feature indicates that the client supports the + // LoadStatsResponse.send_all_clusters field in the LRS response. + node := proto.Clone(t.nodeProto).(*v3corepb.Node) + node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") + + backoffAttempt := 0 + backoffTimer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-backoffTimer.C: + case <-ctx.Done(): + backoffTimer.Stop() + return + } + + // We reset backoff state when we successfully receive at least one + // message from the server. + resetBackoff := func() bool { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) + if err != nil { + t.logger.Warningf("Failed to create LRS stream: %v", err) + return false + } + t.logger.Infof("Created LRS stream to server: %s", t.serverURI) + + if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { + t.logger.Warningf("Failed to send first LRS request: %v", err) + return false + } + + clusters, interval, err := t.recvFirstLoadStatsResponse(stream) + if err != nil { + t.logger.Warningf("Failed to read from LRS stream: %v", err) + return false + } + + t.sendLoads(streamCtx, stream, clusters, interval) + return true + }() + + if resetBackoff { + backoffTimer.Reset(0) + backoffAttempt = 0 + } else { + backoffTimer.Reset(t.backoff(backoffAttempt)) + backoffAttempt++ + } + } +} + +func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) { + tick := time.NewTicker(interval) + defer tick.Stop() + for { + select { + case <-tick.C: + case <-ctx.Done(): + return + } + if err := t.sendLoadStatsRequest(stream, t.lrsStore.Stats(clusterNames)); err != nil { + t.logger.Warningf("Failed to write to LRS stream: %v", err) + return + } + } +} + +func (t *Transport) sendFirstLoadStatsRequest(stream lrsStream, node *v3corepb.Node) error { + req := &v3lrspb.LoadStatsRequest{Node: node} + t.logger.Debugf("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func (t *Transport) recvFirstLoadStatsResponse(stream lrsStream) ([]string, time.Duration, error) { + resp, err := stream.Recv() + if err != nil { + return nil, 0, fmt.Errorf("failed to receive first LoadStatsResponse: %v", err) + } + t.logger.Debugf("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + + interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) + if err != nil { + return nil, 0, fmt.Errorf("invalid load_reporting_interval: %v", err) + } + + if resp.ReportEndpointGranularity { + // TODO(easwars): Support per endpoint loads. + return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") + } + + clusters := resp.Clusters + if resp.SendAllClusters { + // Return nil to send stats for all clusters. + clusters = nil + } + + return clusters, interval, nil +} + +func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) error { + clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) + for _, sd := range loads { + droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) + for category, count := range sd.Drops { + droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ + Category: category, + DroppedCount: count, + }) + } + localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) + for l, localityData := range sd.LocalityStats { + lid, err := internal.LocalityIDFromString(l) + if err != nil { + return err + } + loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) + for name, loadData := range localityData.LoadStats { + loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ + MetricName: name, + NumRequestsFinishedWithMetric: loadData.Count, + TotalMetricValue: loadData.Sum, + }) + } + localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ + Locality: &v3corepb.Locality{ + Region: lid.Region, + Zone: lid.Zone, + SubZone: lid.SubZone, + }, + TotalSuccessfulRequests: localityData.RequestStats.Succeeded, + TotalRequestsInProgress: localityData.RequestStats.InProgress, + TotalErrorRequests: localityData.RequestStats.Errored, + LoadMetricStats: loadMetricStats, + UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. + }) + } + + clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ + ClusterName: sd.Cluster, + ClusterServiceName: sd.Service, + UpstreamLocalityStats: localityStats, + TotalDroppedRequests: sd.TotalDrops, + DroppedRequests: droppedReqs, + LoadReportInterval: ptypes.DurationProto(sd.ReportInterval), + }) + } + + req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} + t.logger.Debugf("Sending LRS loads: %s", pretty.ToJSON(req)) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream lrsStream) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } +} diff --git a/xds/internal/xdsclient/transport/loadreport_test.go b/xds/internal/xdsclient/transport/loadreport_test.go new file mode 100644 index 000000000000..f6203c9b4425 --- /dev/null +++ b/xds/internal/xdsclient/transport/loadreport_test.go @@ -0,0 +1,193 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport_test + +import ( + "context" + "testing" + "time" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/xds/internal/testutils/fakeserver" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/durationpb" + + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" +) + +func (s) TestReportLoad(t *testing.T) { + // Create a fake xDS management server listening on a local port. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Construct the server config to represent the management server. + nodeProto := &v3corepb.Node{Id: uuid.New().String()} + serverCfg := bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: nodeProto, + } + + // Create a transport to the fake server. + tr, err := transport.New(transport.Options{ + ServerCfg: serverCfg, + UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No ADS validation. + StreamErrorHandler: func(error) {}, // No ADS stream error handling. + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Ensure that a new connection is made to the management server. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := mgmtServer.NewConnChan.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for a new connection to the management server: %v", err) + } + + // Call the load reporting API, and ensure that an LRS stream is created. + store1, cancelLRS1 := tr.ReportLoad() + if err != nil { + t.Fatalf("Failed to start LRS load reporting: %v", err) + } + if _, err := mgmtServer.LRSStreamOpenChan.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for LRS stream to be created: %v", err) + } + + // Push some loads on the received store. + store1.PerCluster("cluster1", "eds1").CallDropped("test") + + // Ensure the initial request is received. + req, err := mgmtServer.LRSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for initial LRS request: %v", err) + } + gotInitialReq := req.(*fakeserver.Request).Req.(*v3lrspb.LoadStatsRequest) + nodeProto.ClientFeatures = []string{"envoy.lrs.supports_send_all_clusters"} + wantInitialReq := &v3lrspb.LoadStatsRequest{Node: nodeProto} + if diff := cmp.Diff(gotInitialReq, wantInitialReq, protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in initial LRS request (-got, +want):\n%s", diff) + } + + // Send a response from the server with a small deadline. + mgmtServer.LRSResponseChan <- &fakeserver.Response{ + Resp: &v3lrspb.LoadStatsResponse{ + SendAllClusters: true, + LoadReportingInterval: &durationpb.Duration{Nanos: 50000000}, // 50ms + }, + } + + // Ensure that loads are seen on the server. + req, err = mgmtServer.LRSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for LRS request with loads: %v", err) + } + gotLoad := req.(*fakeserver.Request).Req.(*v3lrspb.LoadStatsRequest).ClusterStats + if l := len(gotLoad); l != 1 { + t.Fatalf("Received load for %d clusters, want 1", l) + } + // This field is set by the client to indicate the actual time elapsed since + // the last report was sent. We cannot deterministically compare this, and + // we cannot use the cmpopts.IgnoreFields() option on proto structs, since + // we already use the protocmp.Transform() which marshals the struct into + // another message. Hence setting this field to nil is the best option here. + gotLoad[0].LoadReportInterval = nil + wantLoad := &v3endpointpb.ClusterStats{ + ClusterName: "cluster1", + ClusterServiceName: "eds1", + TotalDroppedRequests: 1, + DroppedRequests: []*v3endpointpb.ClusterStats_DroppedRequests{{Category: "test", DroppedCount: 1}}, + } + if diff := cmp.Diff(wantLoad, gotLoad[0], protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in LRS request (-got, +want):\n%s", diff) + } + + // Make another call to the load reporting API, and ensure that a new LRS + // stream is not created. + store2, cancelLRS2 := tr.ReportLoad() + if err != nil { + t.Fatalf("Failed to start LRS load reporting: %v", err) + } + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := mgmtServer.LRSStreamOpenChan.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("New LRS stream created when expected to use an existing one") + } + + // Push more loads. + store2.PerCluster("cluster2", "eds2").CallDropped("test") + + // Ensure that loads are seen on the server. We need a loop here because + // there could have been some requests from the client in the time between + // us reading the first request and now. Those would have been queued in the + // request channel that we read out of. + for { + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for new loads to be seen on the server") + } + + req, err = mgmtServer.LRSRequestChan.Receive(ctx) + if err != nil { + continue + } + gotLoad = req.(*fakeserver.Request).Req.(*v3lrspb.LoadStatsRequest).ClusterStats + if l := len(gotLoad); l != 1 { + continue + } + gotLoad[0].LoadReportInterval = nil + wantLoad := &v3endpointpb.ClusterStats{ + ClusterName: "cluster2", + ClusterServiceName: "eds2", + TotalDroppedRequests: 1, + DroppedRequests: []*v3endpointpb.ClusterStats_DroppedRequests{{Category: "test", DroppedCount: 1}}, + } + if diff := cmp.Diff(wantLoad, gotLoad[0], protocmp.Transform()); diff != "" { + t.Logf("Unexpected diff in LRS request (-got, +want):\n%s", diff) + continue + } + break + } + + // Cancel the first load reporting call, and ensure that the stream does not + // close (because we have aother call open). + cancelLRS1() + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := mgmtServer.LRSStreamCloseChan.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("LRS stream closed when expected to stay open") + } + + // Cancel the second load reporting call, and ensure the stream is closed. + cancelLRS2() + if _, err := mgmtServer.LRSStreamCloseChan.Receive(ctx); err != nil { + t.Fatal("Timeout waiting for LRS stream to close") + } +} diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go new file mode 100644 index 000000000000..e0b9807c1648 --- /dev/null +++ b/xds/internal/xdsclient/transport/transport.go @@ -0,0 +1,573 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package transport implements the xDS transport protocol functionality +// required by the xdsclient. +package transport + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient + +// Transport provides a resource-type agnostic implementation of the xDS +// transport protocol. At this layer, resource contents are supposed to be +// opaque blobs which should be be meaningful only to the xDS data model layer +// which is implemented by the `xdsresource` package. +// +// Under the hood, it owns the gRPC connection to a single management server and +// manages the lifecycle of ADS/LRS streams. It uses the xDS v3 transport +// protocol version. +type Transport struct { + // These fields are initialized at creation time and are read-only afterwards. + cc *grpc.ClientConn // ClientConn to the mangement server. + serverURI string // URI of the management server. + updateHandler UpdateHandlerFunc // Resource update handler. xDS data model layer. + adsStreamErrHandler func(error) // To report underlying stream errors. + lrsStore *load.Store // Store returned to user for pushing loads. + backoff func(int) time.Duration // Backoff after stream failures. + nodeProto *v3corepb.Node // Identifies the gRPC application. + logger *grpclog.PrefixLogger // Prefix logger for transport logs. + adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. + adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. + lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. + + // These channels enable synchronization amongst the different goroutines + // spawned by the transport, and between asynchorous events resulting from + // receipt of responses from the management server. + adsStreamCh chan adsStream // New ADS streams are pushed here. + adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. + + // mu guards the following runtime state maintained by the transport. + mu sync.Mutex + // resources is map from resource type URL to the set of resource names + // being requested for that type. When the ADS stream is restarted, the + // transport requests all these resources again from the management server. + resources map[string]map[string]bool + // versions is a map from resource type URL to the most recently ACKed + // version for that resource. Resource versions are a property of the + // resource type and not the stream, and will not be reset upon stream + // restarts. + versions map[string]string + // nonces is a map from resource type URL to the most recently received + // nonce for that resource type. Nonces are a property of the ADS stream and + // will be reset upon stream restarts. + nonces map[string]string + + lrsMu sync.Mutex // Protects all LRS state. + lrsCancelStream context.CancelFunc // CancelFunc for the LRS stream. + lrsRefCount int // Reference count on the load store. +} + +// UpdateHandlerFunc is the implementation at the xDS data model layer, which +// determines if the configuration received from the management server can be +// applied locally or not. +// +// A nil error is returned from this function when the data model layer believes +// that the received configuration is good and can be applied locally. This will +// cause the transport layer to send an ACK to the management server. A non-nil +// error is returned from this function when the data model layer believes +// otherwise, and this will cause the transport layer to send a NACK. +type UpdateHandlerFunc func(update ResourceUpdate) error + +// ResourceUpdate is a representation of the configuration update received from +// the management server. It only contains fields which are useful to the data +// model layer, and layers above it. +type ResourceUpdate struct { + // Resources is the list of resources received from the management server. + Resources []*anypb.Any + // URL is the resource type URL for the above resources. + URL string + // Version is the resource version, for the above resources, as specified by + // the management server. + Version string +} + +// Options specifies configuration knobs used when creating a new Transport. +type Options struct { + // ServerCfg contains all the configuration required to connect to the xDS + // management server. + ServerCfg bootstrap.ServerConfig + // UpdateHandler is the component which makes ACK/NACK decisions based on + // the received resources. + // + // Invoked inline and implementations must not block. + UpdateHandler UpdateHandlerFunc + // StreamErrorHandler provides a way for the transport layer to report + // underlying stream errors. These can be bubbled all the way up to the user + // of the xdsClient. + // + // Invoked inline and implementations must not block. + StreamErrorHandler func(error) + // Backoff controls the amount of time to backoff before recreating failed + // ADS streams. If unspecified, a default exponential backoff implementation + // is used. For more details, see: + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. + Backoff func(retries int) time.Duration + // Logger does logging with a prefix. + Logger *grpclog.PrefixLogger +} + +// For overriding in unit tests. +var grpcDial = grpc.Dial + +// New creates a new Transport. +func New(opts Options) (*Transport, error) { + switch { + case opts.ServerCfg.ServerURI == "": + return nil, errors.New("missing server URI when creating a new transport") + case opts.ServerCfg.Creds == nil: + return nil, errors.New("missing credentials when creating a new transport") + case opts.UpdateHandler == nil: + return nil, errors.New("missing update handler when creating a new transport") + case opts.StreamErrorHandler == nil: + return nil, errors.New("missing stream error handler when creating a new transport") + } + + node, ok := opts.ServerCfg.NodeProto.(*v3corepb.Node) + if !ok { + return nil, fmt.Errorf("unexpected type %T for NodeProto, want %T", opts.ServerCfg.NodeProto, &v3corepb.Node{}) + } + + // Dial the xDS management with the passed in credentials. + dopts := []grpc.DialOption{ + opts.ServerCfg.Creds, + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + // We decided to use these sane defaults in all languages, and + // kicked the can down the road as far making these configurable. + Time: 5 * time.Minute, + Timeout: 20 * time.Second, + }), + } + cc, err := grpcDial(opts.ServerCfg.ServerURI, dopts...) + if err != nil { + // An error from a non-blocking dial indicates something serious. + return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI, err) + } + + boff := opts.Backoff + if boff == nil { + boff = backoff.DefaultExponential.Backoff + } + ret := &Transport{ + cc: cc, + serverURI: opts.ServerCfg.ServerURI, + updateHandler: opts.UpdateHandler, + adsStreamErrHandler: opts.StreamErrorHandler, + lrsStore: load.NewStore(), + backoff: boff, + nodeProto: node, + logger: opts.Logger, + + adsStreamCh: make(chan adsStream, 1), + adsRequestCh: buffer.NewUnbounded(), + resources: make(map[string]map[string]bool), + versions: make(map[string]string), + nonces: make(map[string]string), + adsRunnerDoneCh: make(chan struct{}), + lrsRunnerDoneCh: make(chan struct{}), + } + + // This context is used for sending and receiving RPC requests and + // responses. It is also used by all the goroutines spawned by this + // Transport. Therefore, cancelling this context when the transport is + // closed will essentially cancel any pending RPCs, and cause the goroutines + // to terminate. + ctx, cancel := context.WithCancel(context.Background()) + ret.adsRunnerCancel = cancel + go ret.adsRunner(ctx) + + ret.logger.Infof("Created transport to server %q", ret.serverURI) + return ret, nil +} + +// resourceRequest wraps the resource type url and the resource names requested +// by the user of this transport. +type resourceRequest struct { + resources []string + url string +} + +// SendRequest sends out an ADS request for the provided resources of the +// specified resource type. +// +// The request is sent out asynchronously. If no valid stream exists at the time +// of processing this request, it is queued and will be sent out once a valid +// stream exists. +// +// If a successful response is received, the update handler callback provided at +// creation time is invoked. If an error is encountered, the stream error +// handler callback provided at creation time is invoked. +func (t *Transport) SendRequest(url string, resources []string) { + t.adsRequestCh.Put(&resourceRequest{ + url: url, + resources: resources, + }) +} + +func (t *Transport) newAggregatedDiscoveryServiceStream(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { + // The transport retries the stream with an exponential backoff whenever the + // stream breaks. But if the channel is broken, we don't want the backoff + // logic to continuously retry the stream. Setting WaitForReady() blocks the + // stream creation until the channel is READY. + // + // TODO(easwars): Make changes required to comply with A57: + // https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx, grpc.WaitForReady(true)) +} + +func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { + req := &v3discoverypb.DiscoveryRequest{ + Node: t.nodeProto, + TypeUrl: resourceURL, + ResourceNames: resourceNames, + VersionInfo: version, + ResponseNonce: nonce, + } + if nackErr != nil { + req.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), Message: nackErr.Error(), + } + } + if err := stream.Send(req); err != nil { + return fmt.Errorf("sending ADS request %s failed: %v", pretty.ToJSON(req), err) + } + t.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req)) + return nil +} + +func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (resources []*anypb.Any, resourceURL, version, nonce string, err error) { + resp, err := stream.Recv() + if err != nil { + return nil, "", "", "", fmt.Errorf("failed to read ADS response: %v", err) + } + t.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) + t.logger.Debugf("ADS response received: %v", pretty.ToJSON(resp)) + return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil +} + +// adsRunner starts an ADS stream (and backs off exponentially, if the previous +// stream failed without receiving a single reply) and runs the sender and +// receiver routines to send and receive data from the stream respectively. +func (t *Transport) adsRunner(ctx context.Context) { + defer close(t.adsRunnerDoneCh) + + go t.send(ctx) + + // TODO: start a goroutine monitoring ClientConn's connectivity state, and + // report error (and log) when stats is transient failure. + + backoffAttempt := 0 + backoffTimer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-backoffTimer.C: + case <-ctx.Done(): + backoffTimer.Stop() + return + } + + // We reset backoff state when we successfully receive at least one + // message from the server. + resetBackoff := func() bool { + stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) + if err != nil { + t.adsStreamErrHandler(err) + t.logger.Warningf("ADS stream creation failed: %v", err) + return false + } + t.logger.Infof("ADS stream created") + + select { + case <-t.adsStreamCh: + default: + } + t.adsStreamCh <- stream + return t.recv(stream) + }() + + if resetBackoff { + backoffTimer.Reset(0) + backoffAttempt = 0 + } else { + backoffTimer.Reset(t.backoff(backoffAttempt)) + backoffAttempt++ + } + } +} + +// send is a separate goroutine for sending resource requests on the ADS stream. +// +// For every new stream received on the stream channel, all existing resources +// are re-requested from the management server. +// +// For every new resource request received on the resources channel, the +// resources map is updated (this ensures that resend will pick them up when +// there are new streams) and the appropriate request is sent out. +func (t *Transport) send(ctx context.Context) { + var stream adsStream + for { + select { + case <-ctx.Done(): + return + case stream = <-t.adsStreamCh: + if !t.sendExisting(stream) { + // Send failed, clear the current stream. Attempt to resend will + // only be made after a new stream is created. + stream = nil + } + case u := <-t.adsRequestCh.Get(): + t.adsRequestCh.Load() + + var ( + resources []string + url, version, nonce string + send bool + nackErr error + ) + switch update := u.(type) { + case *resourceRequest: + resources, url, version, nonce = t.processResourceRequest(update) + case *ackRequest: + resources, url, version, nonce, send = t.processAckRequest(update, stream) + if !send { + continue + } + nackErr = update.nackErr + } + if stream == nil { + // There's no stream yet. Skip the request. This request + // will be resent to the new streams. If no stream is + // created, the watcher will timeout (same as server not + // sending response back). + continue + } + if err := t.sendAggregatedDiscoveryServiceRequest(stream, resources, url, version, nonce, nackErr); err != nil { + t.logger.Warningf("ADS request for {resources: %q, url: %v, version: %q, nonce: %q} failed: %v", resources, url, version, nonce, err) + // Send failed, clear the current stream. + stream = nil + } + } + } +} + +// sendExisting sends out xDS requests for existing resources when recovering +// from a broken stream. +// +// We call stream.Send() here with the lock being held. It should be OK to do +// that here because the stream has just started and Send() usually returns +// quickly (once it pushes the message onto the transport layer) and is only +// ever blocked if we don't have enough flow control quota. +func (t *Transport) sendExisting(stream adsStream) bool { + t.mu.Lock() + defer t.mu.Unlock() + + // Reset only the nonces map when the stream restarts. + // + // xDS spec says the following. See section: + // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version + // + // Note that the version for a resource type is not a property of an + // individual xDS stream but rather a property of the resources themselves. If + // the stream becomes broken and the client creates a new stream, the client’s + // initial request on the new stream should indicate the most recent version + // seen by the client on the previous stream + t.nonces = make(map[string]string) + + for url, resources := range t.resources { + if err := t.sendAggregatedDiscoveryServiceRequest(stream, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { + t.logger.Warningf("ADS request failed: %v", err) + return false + } + } + + return true +} + +// recv receives xDS responses on the provided ADS stream and branches out to +// message specific handlers. Returns true if at least one message was +// successfully received. +func (t *Transport) recv(stream adsStream) bool { + msgReceived := false + for { + resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) + if err != nil { + t.adsStreamErrHandler(err) + t.logger.Warningf("ADS stream is closed with error: %v", err) + return msgReceived + } + msgReceived = true + + err = t.updateHandler(ResourceUpdate{ + Resources: resources, + URL: url, + Version: rVersion, + }) + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { + t.logger.Warningf("%v", err) + continue + } + // If the data model layer returned an error, we need to NACK the + // response in which case we need to set the version to the most + // recently accepted version of this resource type. + if err != nil { + t.mu.Lock() + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: t.versions[url], + nackErr: err, + }) + t.mu.Unlock() + t.logger.Warningf("Sending NACK for resource type: %v, version: %v, nonce: %v, reason: %v", url, rVersion, nonce, err) + continue + } + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: rVersion, + }) + t.logger.Infof("Sending ACK for resource type: %v, version: %v, nonce: %v", url, rVersion, nonce) + } +} + +func mapToSlice(m map[string]bool) []string { + ret := make([]string, 0, len(m)) + for i := range m { + ret = append(ret, i) + } + return ret +} + +func sliceToMap(ss []string) map[string]bool { + ret := make(map[string]bool, len(ss)) + for _, s := range ss { + ret[s] = true + } + return ret +} + +// processResourceRequest pulls the fields needed to send out an ADS request. +// The resource type and the list of resources to request are provided by the +// user, while the version and nonce are maintained internally. +// +// The resources map, which keeps track of the resources being requested, is +// updated here. Any subsequent stream failure will re-request resources stored +// in this map. +// +// Returns the list of resources, resource type url, version and nonce. +func (t *Transport) processResourceRequest(req *resourceRequest) ([]string, string, string, string) { + t.mu.Lock() + defer t.mu.Unlock() + + resources := sliceToMap(req.resources) + t.resources[req.url] = resources + return req.resources, req.url, t.versions[req.url], t.nonces[req.url] +} + +type ackRequest struct { + url string // Resource type URL. + version string // NACK if version is an empty string. + nonce string + nackErr error // nil for ACK, non-nil for NACK. + // ACK/NACK are tagged with the stream it's for. When the stream is down, + // all the ACK/NACK for this stream will be dropped, and the version/nonce + // won't be updated. + stream grpc.ClientStream +} + +// processAckRequest pulls the fields needed to send out an ADS ACK. The nonces +// and versions map is updated. +// +// Returns the list of resources, resource type url, version, nonce, and an +// indication of whether an ACK should be sent on the wire or not. +func (t *Transport) processAckRequest(ack *ackRequest, stream grpc.ClientStream) ([]string, string, string, string, bool) { + if ack.stream != stream { + // If ACK's stream isn't the current sending stream, this means the ACK + // was pushed to queue before the old stream broke, and a new stream has + // been started since. Return immediately here so we don't update the + // nonce for the new stream. + return nil, "", "", "", false + } + + t.mu.Lock() + defer t.mu.Unlock() + + // Update the nonce irrespective of whether we send the ACK request on wire. + // An up-to-date nonce is required for the next request. + nonce := ack.nonce + t.nonces[ack.url] = nonce + + s, ok := t.resources[ack.url] + if !ok || len(s) == 0 { + // We don't send the ACK request if there are no resources of this type + // in our resources map. This can be either when the server sends + // responses before any request, or the resources are removed while the + // ackRequest was in queue). If we send a request with an empty + // resource name list, the server may treat it as a wild card and send + // us everything. + return nil, "", "", "", false + } + resources := mapToSlice(s) + + // Update the versions map only when we plan to send an ACK. + if ack.nackErr == nil { + t.versions[ack.url] = ack.version + } + + return resources, ack.url, ack.version, nonce, true +} + +// Close closes the Transport and frees any associated resources. +func (t *Transport) Close() { + t.adsRunnerCancel() + <-t.adsRunnerDoneCh + t.cc.Close() +} + +// ChannelConnectivityStateForTesting returns the connectivity state of the gRPC +// channel to the management server. +// +// Only for testing purposes. +func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { + return t.cc.GetState() +} diff --git a/xds/internal/xdsclient/transport/transport_ack_nack_test.go b/xds/internal/xdsclient/transport/transport_ack_nack_test.go new file mode 100644 index 000000000000..b9eeb02e85f2 --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_ack_nack_test.go @@ -0,0 +1,525 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +var ( + errWantNack = errors.New("unsupported field 'use_original_dst' is present and set to true") + + // A simple update handler for listener resources which validates only the + // `use_original_dst` field. + dataModelValidator = func(update transport.ResourceUpdate) error { + for _, r := range update.Resources { + inner := &v3discoverypb.Resource{} + if err := proto.Unmarshal(r.GetValue(), inner); err != nil { + return fmt.Errorf("failed to unmarshal DiscoveryResponse: %v", err) + } + lis := &v3listenerpb.Listener{} + if err := proto.Unmarshal(r.GetValue(), lis); err != nil { + return fmt.Errorf("failed to unmarshal DiscoveryResponse: %v", err) + } + if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { + return errWantNack + } + } + return nil + } +) + +// TestSimpleAckAndNack tests simple ACK and NACK scenarios. +// 1. When the data model layer likes a received response, the test verifies +// that an ACK is sent matching the version and nonce from the response. +// 2. When a subsequent response is disliked by the data model layer, the test +// verifies that a NACK is sent matching the previously ACKed version and +// current nonce from the response. +// 3. When a subsequent response is liked by the data model layer, the test +// verifies that an ACK is sent matching the version and nonce from the +// current response. +func (s) TestSimpleAckAndNack(t *testing.T) { + // Create an xDS management server listening on a local port. Configure the + // request and response handlers to push on channels which are inspected by + // the test goroutine to verify ack version and nonce. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) + streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) + mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + streamRequestCh <- req + return nil + }, + OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { + streamResponseCh <- resp + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Configure the management server with appropriate resources. + apiListener := &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + const resourceName = "resource name 1" + listenerResource := &v3listenerpb.Listener{ + Name: resourceName, + ApiListener: apiListener, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + nodeID := uuid.New().String() + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource}, + SkipValidation: true, + }) + + // Construct the server config to represent the management server. + serverCfg := bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + } + + // Create a new transport. + tr, err := transport.New(transport.Options{ + ServerCfg: serverCfg, + UpdateHandler: dataModelValidator, + StreamErrorHandler: func(err error) {}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Capture the version and nonce from the response. + var gotResp *v3discoverypb.DiscoveryResponse + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // Verify that the ACK contains the appropriate version and nonce. + wantReq.VersionInfo = gotResp.GetVersionInfo() + wantReq.ResponseNonce = gotResp.GetNonce() + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Update the management server's copy of the resource to include a field + // which will cause the resource to be NACKed. + badListener := proto.Clone(listenerResource).(*v3listenerpb.Listener) + badListener.UseOriginalDst = &wrapperspb.BoolValue{Value: true} + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{badListener}, + SkipValidation: true, + }) + + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // Verify that the NACK contains the appropriate version, nonce and error. + // We expect the version to not change as this is a NACK. + wantReq.ResponseNonce = gotResp.GetNonce() + wantReq.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), + Message: errWantNack.Error(), + } + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Update the management server to send a good resource again. + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource}, + SkipValidation: true, + }) + + // The envoy-go-control-plane management server keeps resending the same + // resource as long as we keep NACK'ing it. So, we will see the bad resource + // sent to us a few times here, before receiving the good resource. + for { + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // Verify that the ACK contains the appropriate version and nonce. + wantReq.VersionInfo = gotResp.GetVersionInfo() + wantReq.ResponseNonce = gotResp.GetNonce() + wantReq.ErrorDetail = nil + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)) + if diff == "" { + break + } + t.Logf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} + +// TestInvalidFirstResponse tests the case where the first response is invalid. +// The test verifies that the NACK contains an empty version string. +func (s) TestInvalidFirstResponse(t *testing.T) { + // Create an xDS management server listening on a local port. Configure the + // request and response handlers to push on channels which are inspected by + // the test goroutine to verify ack version and nonce. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) + streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) + mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + streamRequestCh <- req + return nil + }, + OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { + select { + case streamResponseCh <- resp: + default: + } + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Configure the management server with appropriate resources. + apiListener := &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + const resourceName = "resource name 1" + listenerResource := &v3listenerpb.Listener{ + Name: resourceName, + ApiListener: apiListener, + UseOriginalDst: &wrapperspb.BoolValue{Value: true}, // This will cause the resource to be NACKed. + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + nodeID := uuid.New().String() + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource}, + SkipValidation: true, + }) + + // Construct the server config to represent the management server. + serverCfg := bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + } + + // Create a new transport. + tr, err := transport.New(transport.Options{ + ServerCfg: serverCfg, + UpdateHandler: dataModelValidator, + StreamErrorHandler: func(err error) {}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + var gotResp *v3discoverypb.DiscoveryResponse + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // NACK should contain the appropriate error, nonce, but empty version. + wantReq.VersionInfo = "" + wantReq.ResponseNonce = gotResp.GetNonce() + wantReq.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), + Message: errWantNack.Error(), + } + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} + +// TestResourceIsNotRequestedAnymore tests the scenario where the xDS client is +// no longer interested in a resource. The following sequence of events are +// tested: +// 1. A resource is requested and a good response is received. The test verifies +// that an ACK is sent for this resource. +// 2. The previously requested resource is no longer requested. The test +// verifies that a request with no resource names is sent out. +// 3. The same resource is requested again. The test verifies that the request +// is sent with the previously ACKed version. +func (s) TestResourceIsNotRequestedAnymore(t *testing.T) { + // Create an xDS management server listening on a local port. Configure the + // request and response handlers to push on channels which are inspected by + // the test goroutine to verify ack version and nonce. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) + streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) + mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + streamRequestCh <- req + return nil + }, + OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { + streamResponseCh <- resp + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Configure the management server with appropriate resources. + apiListener := &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + const resourceName = "resource name 1" + listenerResource := &v3listenerpb.Listener{ + Name: resourceName, + ApiListener: apiListener, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + nodeID := uuid.New().String() + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource}, + SkipValidation: true, + }) + + // Construct the server config to represent the management server. + serverCfg := bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + } + + // Create a new transport. + tr, err := transport.New(transport.Options{ + ServerCfg: serverCfg, + UpdateHandler: dataModelValidator, + StreamErrorHandler: func(err error) {}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Capture the version and nonce from the response. + var gotResp *v3discoverypb.DiscoveryResponse + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // Verify that the ACK contains the appropriate version and nonce. + wantReq.VersionInfo = gotResp.GetVersionInfo() + wantReq.ResponseNonce = gotResp.GetNonce() + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Send a discovery request with no resource names. + tr.SendRequest(version.V3ListenerURL, []string{}) + + // Verify that the discovery request matches expectation. + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq.ResourceNames = nil + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Send a discovery request for the same resource requested earlier. + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Verify that the discovery request contains the version from the + // previously received response. + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq.ResourceNames = []string{resourceName} + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} diff --git a/xds/internal/xdsclient/transport/transport_backoff_test.go b/xds/internal/xdsclient/transport/transport_backoff_test.go new file mode 100644 index 000000000000..e7c44e1873e4 --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_backoff_test.go @@ -0,0 +1,467 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport_test + +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +var strSort = func(s1, s2 string) bool { return s1 < s2 } + +// TestTransport_BackoffAfterStreamFailure tests the case where the management +// server returns an error in the ADS streaming RPC. The test verifies the +// following: +// 1. Initial discovery request matches expectation. +// 2. RPC error is propagated via the stream error handler. +// 3. When the stream is closed, the transport backs off. +// 4. The same discovery request is sent on the newly created stream. +func (s) TestTransport_BackoffAfterStreamFailure(t *testing.T) { + // Channels used for verifying different events in the test. + streamCloseCh := make(chan struct{}, 1) // ADS stream is closed. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) // Discovery request is received. + backoffCh := make(chan struct{}, 1) // Transport backoff after stream failure. + streamErrCh := make(chan error, 1) // Stream error seen by the transport. + + // Create an xDS management server listening on a local port. + streamErr := errors.New("ADS stream error") + mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + // Push on a channel whenever the stream is closed. + OnStreamClosed: func(int64) { + select { + case streamCloseCh <- struct{}{}: + default: + } + }, + + // Return an error everytime a request is sent on the stream. This + // should cause the transport to backoff before attempting to recreate + // the stream. + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + default: + } + return streamErr + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Override the backoff implementation to push on a channel that is read by + // the test goroutine. + transportBackoff := func(v int) time.Duration { + select { + case backoffCh <- struct{}{}: + default: + } + return 0 + } + + // Construct the server config to represent the management server. + nodeID := uuid.New().String() + serverCfg := bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + } + + // Create a new transport. Since we are only testing backoff behavior here, + // we can pass a no-op data model layer implementation. + tr, err := transport.New(transport.Options{ + ServerCfg: serverCfg, + UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. + StreamErrorHandler: func(err error) { + select { + case streamErrCh <- err: + default: + } + }, + Backoff: transportBackoff, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + const resourceName = "resource name" + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Verify that the received stream error is reported to the user. + var gotErr error + select { + case gotErr = <-streamErrCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for stream error to be reported to the user") + } + if !strings.Contains(gotErr.Error(), streamErr.Error()) { + t.Fatalf("Received stream error: %v, wantErr: %v", gotErr, streamErr) + } + + // Verify that the stream is closed. + select { + case <-streamCloseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for stream to be closed after an error") + } + + // Verify that the transport backs off before recreating the stream. + select { + case <-backoffCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for transport to backoff after stream failure") + } + + // Verify that the same discovery request is resent on the new stream. + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} + +// TestTransport_RetriesAfterBrokenStream tests the case where a stream breaks +// because the server goes down. The test verifies the following: +// 1. Initial discovery request matches expectation. +// 2. Good response from the server leads to an ACK with appropriate version. +// 3. Management server going down, leads to stream failure. +// 4. Once the management server comes back up, the same resources are +// re-requested, this time with an empty nonce. +func (s) TestTransport_RetriesAfterBrokenStream(t *testing.T) { + // Channels used for verifying different events in the test. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) // Discovery request is received. + streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) // Discovery response is received. + streamErrCh := make(chan error, 1) // Stream error seen by the transport. + + // Create an xDS management server listening on a local port. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("Failed to create a local listener for the xDS management server: %v", err) + } + lis := testutils.NewRestartableListener(l) + mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + Listener: lis, + // Push the received request on to a channel for the test goroutine to + // verify that it matches expectations. + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + default: + } + return nil + }, + // Push the response that the management server is about to send on to a + // channel. The test goroutine to uses this to extract the version and + // nonce, expected on subsequent requests. + OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { + select { + case streamResponseCh <- resp: + default: + } + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", lis.Addr().String()) + + // Configure the management server with appropriate resources. + apiListener := &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + const resourceName1 = "resource name 1" + const resourceName2 = "resource name 2" + listenerResource1 := &v3listenerpb.Listener{ + Name: resourceName1, + ApiListener: apiListener, + } + listenerResource2 := &v3listenerpb.Listener{ + Name: resourceName2, + ApiListener: apiListener, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + nodeID := uuid.New().String() + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource1, listenerResource2}, + SkipValidation: true, + }) + + // Construct the server config to represent the management server. + serverCfg := bootstrap.ServerConfig{ + ServerURI: lis.Addr().String(), + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + } + + // Create a new transport. Since we are only testing backoff behavior here, + // we can pass a no-op data model layer implementation. + tr, err := transport.New(transport.Options{ + ServerCfg: serverCfg, + UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. + StreamErrorHandler: func(err error) { + select { + case streamErrCh <- err: + default: + } + }, + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + tr.SendRequest(version.V3ListenerURL, []string{resourceName1, resourceName2}) + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName1, resourceName2}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Capture the version and nonce from the response. + var gotResp *v3discoverypb.DiscoveryResponse + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + version := gotResp.GetVersionInfo() + nonce := gotResp.GetNonce() + + // Verify that the ACK contains the appropriate version and nonce. + wantReq.VersionInfo = version + wantReq.ResponseNonce = nonce + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Bring down the management server to simulate a broken stream. + lis.Stop() + + // We don't care about the exact error here and it can vary based on which + // error gets reported first, the Recv() failure or the new stream creation + // failure. So, all we check here is whether we get an error or not. + select { + case <-streamErrCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for stream error to be reported to the user") + } + + // Bring up the connection to the management server. + lis.Restart() + + // Verify that the transport creates a new stream and sends out a new + // request which contains the previously acked version, but an empty nonce. + wantReq.ResponseNonce = "" + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} + +// TestTransport_ResourceRequestedBeforeStreamCreation tests the case where a +// resource is requested before the transport has a valid stream. Verifies that +// the transport sends out the request once it has a valid stream. +func (s) TestTransport_ResourceRequestedBeforeStreamCreation(t *testing.T) { + // Channels used for verifying different events in the test. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) // Discovery request is received. + + // Create an xDS management server listening on a local port. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("Failed to create a local listener for the xDS management server: %v", err) + } + lis := testutils.NewRestartableListener(l) + streamErr := errors.New("ADS stream error") + + mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + Listener: lis, + + // Return an error everytime a request is sent on the stream. This + // should cause the transport to backoff before attempting to recreate + // the stream. + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + default: + } + return streamErr + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", lis.Addr().String()) + + // Bring down the management server before creating the transport. This + // allows us to test the case where SendRequest() is called when there is no + // stream to the management server. + lis.Stop() + + // Construct the server config to represent the management server. + nodeID := uuid.New().String() + serverCfg := bootstrap.ServerConfig{ + ServerURI: lis.Addr().String(), + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + } + + // Create a new transport. Since we are only testing backoff behavior here, + // we can pass a no-op data model layer implementation. + tr, err := transport.New(transport.Options{ + ServerCfg: serverCfg, + UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. + StreamErrorHandler: func(error) {}, // No stream error handling. + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + const resourceName = "resource name" + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Wait until the transport has attempted to connect to the management + // server and has seen the connection fail. In this case, since the + // connection is down, and the transport creates streams with WaitForReady() + // set to true, stream creation will never fail (unless the context + // expires), and therefore we cannot rely on the stream error handler. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if tr.ChannelConnectivityStateForTesting() == connectivity.TransientFailure { + break + } + } + + lis.Restart() + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} diff --git a/xds/internal/xdsclient/transport/transport_new_test.go b/xds/internal/xdsclient/transport/transport_new_test.go new file mode 100644 index 000000000000..60286d9eb156 --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_new_test.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport_test + +import ( + "strings" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// TestNew covers that New() returns an error if the input *ServerConfig +// contains invalid content. +func (s) TestNew(t *testing.T) { + tests := []struct { + name string + opts transport.Options + wantErrStr string + }{ + { + name: "missing server URI", + opts: transport.Options{ServerCfg: bootstrap.ServerConfig{}}, + wantErrStr: "missing server URI when creating a new transport", + }, + { + name: "missing credentials", + opts: transport.Options{ServerCfg: bootstrap.ServerConfig{ServerURI: "server-address"}}, + wantErrStr: "missing credentials when creating a new transport", + }, + { + name: "missing update handler", + opts: transport.Options{ServerCfg: bootstrap.ServerConfig{ + ServerURI: "server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: &v3corepb.Node{}, + }}, + wantErrStr: "missing update handler when creating a new transport", + }, + { + name: "missing stream error handler", + opts: transport.Options{ + ServerCfg: bootstrap.ServerConfig{ + ServerURI: "server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: &v3corepb.Node{}, + }, + UpdateHandler: func(transport.ResourceUpdate) error { return nil }, + }, + wantErrStr: "missing stream error handler when creating a new transport", + }, + { + name: "node proto version mismatch for v3", + opts: transport.Options{ + ServerCfg: bootstrap.ServerConfig{ + ServerURI: "server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: &v2corepb.Node{}, + TransportAPI: version.TransportV3, + }, + UpdateHandler: func(transport.ResourceUpdate) error { return nil }, + StreamErrorHandler: func(error) {}, + }, + wantErrStr: "unexpected type *core.Node for NodeProto, want *corev3.Node", + }, + { + name: "happy case", + opts: transport.Options{ + ServerCfg: bootstrap.ServerConfig{ + ServerURI: "server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: &v3corepb.Node{}, + TransportAPI: version.TransportV3, + }, + UpdateHandler: func(transport.ResourceUpdate) error { return nil }, + StreamErrorHandler: func(error) {}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + c, err := transport.New(test.opts) + defer func() { + if c != nil { + c.Close() + } + }() + if (err != nil) != (test.wantErrStr != "") { + t.Fatalf("New(%+v) = %v, wantErr: %v", test.opts, err, test.wantErrStr) + } + if err != nil && !strings.Contains(err.Error(), test.wantErrStr) { + t.Fatalf("New(%+v) = %v, wantErr: %v", test.opts, err, test.wantErrStr) + } + }) + } +} diff --git a/xds/internal/xdsclient/transport/transport_resource_test.go b/xds/internal/xdsclient/transport/transport_resource_test.go new file mode 100644 index 000000000000..62149fa4bf0b --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_resource_test.go @@ -0,0 +1,228 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package transport_test contains e2e style tests for the xDS transport +// implementation. It uses the envoy-go-control-plane as the management server. +package transport_test + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/xds/internal/testutils/fakeserver" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + +// startFakeManagementServer starts a fake xDS management server and returns a +// cleanup function to close the fake server. +func startFakeManagementServer(t *testing.T) (*fakeserver.Server, func()) { + t.Helper() + fs, sCleanup, err := fakeserver.StartServer() + if err != nil { + t.Fatalf("Failed to start fake xDS server: %v", err) + } + return fs, sCleanup +} + +// resourcesWithTypeURL wraps resources and type URL received from server. +type resourcesWithTypeURL struct { + resources []*anypb.Any + url string +} + +// TestHandleResponseFromManagementServer covers different scenarios of the +// transport receiving a response from the management server. In all scenarios, +// the trasport is expected to pass the received responses as-is to the data +// model layer for validation and not perform any validation on its own. +func (s) TestHandleResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + var ( + badlyMarshaledResource = &anypb.Any{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Value: []byte{1, 2, 3, 4}, + } + apiListener = &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + resource1 = &v3listenerpb.Listener{ + Name: resourceName1, + ApiListener: apiListener, + } + resource2 = &v3listenerpb.Listener{ + Name: resourceName2, + ApiListener: apiListener, + } + ) + + tests := []struct { + desc string + resourceNamesToRequest []string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantURL string + wantResources []*anypb.Any + }{ + { + desc: "badly marshaled response", + resourceNamesToRequest: []string{resourceName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{badlyMarshaledResource}, + }, + wantURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + wantResources: []*anypb.Any{badlyMarshaledResource}, + }, + { + desc: "empty response", + resourceNamesToRequest: []string{resourceName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{}, + wantURL: "", + wantResources: nil, + }, + { + desc: "one good resource", + resourceNamesToRequest: []string{resourceName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + wantResources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + { + desc: "two good resources", + resourceNamesToRequest: []string{resourceName1, resourceName2}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + wantResources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + { + desc: "two resources when we requested one", + resourceNamesToRequest: []string{resourceName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + wantResources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Construct the server config to represent the management server. + serverCfg := bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: uuid.New().String()}, + } + + // Create a new transport. + resourcesCh := testutils.NewChannel() + tr, err := transport.New(transport.Options{ + ServerCfg: serverCfg, + // No validation. Simply push received resources on a channel. + UpdateHandler: func(update transport.ResourceUpdate) error { + resourcesCh.Send(&resourcesWithTypeURL{ + resources: update.Resources, + url: update.URL, + // Ignore resource version here. + }) + return nil + }, + StreamErrorHandler: func(error) {}, // No stream error handling. + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send the request, and validate that the response sent by the + // management server is propagated to the data model layer. + tr.SendRequest(version.V3ListenerURL, test.resourceNamesToRequest) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + v, err := resourcesCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive resources at the data model layer: %v", err) + } + gotURL := v.(*resourcesWithTypeURL).url + gotResources := v.(*resourcesWithTypeURL).resources + if gotURL != test.wantURL { + t.Fatalf("Received resource URL in response: %s, want %s", gotURL, test.wantURL) + } + if diff := cmp.Diff(gotResources, test.wantResources, protocmp.Transform()); diff != "" { + t.Fatalf("Received unexpected resources. Diff (-got, +want):\n%s", diff) + } + }) + } +} diff --git a/xds/internal/xdsclient/transport/transport_test.go b/xds/internal/xdsclient/transport/transport_test.go new file mode 100644 index 000000000000..41ebc1b4fe55 --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_test.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport + +import ( + "testing" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestNewWithGRPCDial(t *testing.T) { + // Override the dialer with a custom one. + customDialerCalled := false + customDialer := func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + customDialerCalled = true + return grpc.Dial(target, opts...) + } + oldDial := grpcDial + grpcDial = customDialer + defer func() { grpcDial = oldDial }() + + // Create a new transport and ensure that the custom dialer was called. + opts := Options{ + ServerCfg: bootstrap.ServerConfig{ + ServerURI: "server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + NodeProto: &v3corepb.Node{}, + TransportAPI: version.TransportV3, + }, + UpdateHandler: func(ResourceUpdate) error { return nil }, + StreamErrorHandler: func(error) {}, + } + c, err := New(opts) + if err != nil { + t.Fatalf("New(%v) failed: %v", opts, err) + } + defer c.Close() + + if !customDialerCalled { + t.Fatalf("New(%+v) custom dialer called = false, want true", opts) + } + customDialerCalled = false + + // Reset the dialer, create a new transport and ensure that our custom + // dialer is no longer called. + grpcDial = grpc.Dial + c, err = New(opts) + defer func() { + if c != nil { + c.Close() + } + }() + if err != nil { + t.Fatalf("New(%v) failed: %v", opts, err) + } + + if customDialerCalled { + t.Fatalf("New(%+v) custom dialer called = true, want false", opts) + } +} diff --git a/xds/internal/xdsclient/xdsresource/errors.go b/xds/internal/xdsclient/xdsresource/errors.go index cfaf63b30396..2d1b179db130 100644 --- a/xds/internal/xdsclient/xdsresource/errors.go +++ b/xds/internal/xdsclient/xdsresource/errors.go @@ -34,6 +34,9 @@ const ( // response. It's typically returned if the resource is removed in the xds // server. ErrorTypeResourceNotFound + // ErrorTypeResourceTypeUnsupported indicates the receipt of a message from + // the management server with resources of an unsupported resource type. + ErrorTypeResourceTypeUnsupported ) type xdsClientError struct { From 20c937eebe8aec29b3e0a83cb4a53363cbe7578a Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 30 Nov 2022 17:07:48 -0800 Subject: [PATCH 690/998] transport: limit AccountCheck tests to fewer streams and iterations to avoid flakes (#5828) Fixes https://github.com/grpc/grpc-go/issues/5283 --- internal/transport/transport_test.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 45257ba0c579..6cd2201b5919 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -1532,9 +1532,10 @@ func (s) TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) { } func (s) TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) { + // These settings disable dynamic window sizes based on BDP estimation; + // must be at least defaultWindowSize or the setting is ignored. wc := windowSizeConfig{ serverStream: defaultWindowSize, - // Note this is smaller than initialConnWindowSize which is the current default. serverConn: defaultWindowSize, clientStream: defaultWindowSize, clientConn: defaultWindowSize, @@ -1576,10 +1577,11 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) for k := range server.conns { st = k.(*http2Server) } + server.mu.Unlock() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - server.mu.Unlock() - const numStreams = 10 + const numStreams = 5 clientStreams := make([]*Stream, numStreams) for i := 0; i < numStreams; i++ { var err error @@ -1599,26 +1601,27 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) binary.BigEndian.PutUint32(buf[1:], uint32(msgSize)) opts := Options{} header := make([]byte, 5) - for i := 1; i <= 10; i++ { + for i := 1; i <= 5; i++ { if err := client.Write(stream, nil, buf, &opts); err != nil { - t.Errorf("Error on client while writing message: %v", err) + t.Errorf("Error on client while writing message %v on stream %v: %v", i, stream.id, err) return } if _, err := stream.Read(header); err != nil { - t.Errorf("Error on client while reading data frame header: %v", err) + t.Errorf("Error on client while reading data frame header %v on stream %v: %v", i, stream.id, err) return } sz := binary.BigEndian.Uint32(header[1:]) recvMsg := make([]byte, int(sz)) if _, err := stream.Read(recvMsg); err != nil { - t.Errorf("Error on client while reading data: %v", err) + t.Errorf("Error on client while reading data %v on stream %v: %v", i, stream.id, err) return } if len(recvMsg) != msgSize { - t.Errorf("Length of message received by client: %v, want: %v", len(recvMsg), msgSize) + t.Errorf("Length of message %v received by client on stream %v: %v, want: %v", i, stream.id, len(recvMsg), msgSize) return } } + t.Logf("stream %v done with pingpongs", stream.id) }(stream) } wg.Wait() From 99ba98231e6e65d7a806f49dd935531c09cf466e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 1 Dec 2022 09:02:41 -0800 Subject: [PATCH 691/998] transport/server: flush GOAWAY before closing conn due to max age (#5821) Fixes https://github.com/grpc/grpc-go/issues/4859 --- internal/transport/controlbuf.go | 18 ++++++++++++++++++ internal/transport/http2_server.go | 2 +- test/goaway_test.go | 2 +- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 409769f48fdc..0e221af728bc 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -817,6 +825,14 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + l.framer.writer.Flush() + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -845,6 +861,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) + case closeConnection: + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 3dd15647bc84..6c8aa403539e 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -1153,7 +1153,7 @@ func (t *http2Server) keepalive() { if logger.V(logLevel) { logger.Infof("transport: closing server transport due to maximum connection age.") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return diff --git a/test/goaway_test.go b/test/goaway_test.go index 7f44937d703d..bcd13ae7da66 100644 --- a/test/goaway_test.go +++ b/test/goaway_test.go @@ -97,7 +97,7 @@ func (s) TestDetailedGoAwayErrorOnGracefulClosePropagatesToRPCError(t *testing.T sopts := []grpc.ServerOption{ grpc.KeepaliveParams(keepalive.ServerParameters{ MaxConnectionAge: time.Millisecond * 100, - MaxConnectionAgeGrace: time.Millisecond, + MaxConnectionAgeGrace: time.Nanosecond, // ~instantaneously, but non-zero to avoid default }), } if err := ss.Start(sopts); err != nil { From fa99649f0dc51b331ed6b23be483ed3b6cc194b9 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 1 Dec 2022 10:25:30 -0800 Subject: [PATCH 692/998] xdsclient: deflake new transport ack/nack tests (#5830) --- .../transport/transport_ack_nack_test.go | 48 +++++++++++++------ 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/xds/internal/xdsclient/transport/transport_ack_nack_test.go b/xds/internal/xdsclient/transport/transport_ack_nack_test.go index b9eeb02e85f2..3d5740905b82 100644 --- a/xds/internal/xdsclient/transport/transport_ack_nack_test.go +++ b/xds/internal/xdsclient/transport/transport_ack_nack_test.go @@ -79,18 +79,27 @@ var ( // verifies that an ACK is sent matching the version and nonce from the // current response. func (s) TestSimpleAckAndNack(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Create an xDS management server listening on a local port. Configure the // request and response handlers to push on channels which are inspected by // the test goroutine to verify ack version and nonce. streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ - OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { - streamRequestCh <- req + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + case <-ctx.Done(): + } return nil }, OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { - streamResponseCh <- resp + select { + case streamResponseCh <- resp: + case <-ctx.Done(): + } }, }) if err != nil { @@ -119,8 +128,6 @@ func (s) TestSimpleAckAndNack(t *testing.T) { Name: resourceName, ApiListener: apiListener, } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() nodeID := uuid.New().String() mgmtServer.Update(ctx, e2e.UpdateOptions{ NodeID: nodeID, @@ -258,20 +265,26 @@ func (s) TestSimpleAckAndNack(t *testing.T) { // TestInvalidFirstResponse tests the case where the first response is invalid. // The test verifies that the NACK contains an empty version string. func (s) TestInvalidFirstResponse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Create an xDS management server listening on a local port. Configure the // request and response handlers to push on channels which are inspected by // the test goroutine to verify ack version and nonce. streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ - OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { - streamRequestCh <- req + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + case <-ctx.Done(): + } return nil }, OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { select { case streamResponseCh <- resp: - default: + case <-ctx.Done(): } }, }) @@ -302,8 +315,6 @@ func (s) TestInvalidFirstResponse(t *testing.T) { ApiListener: apiListener, UseOriginalDst: &wrapperspb.BoolValue{Value: true}, // This will cause the resource to be NACKed. } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() nodeID := uuid.New().String() mgmtServer.Update(ctx, e2e.UpdateOptions{ NodeID: nodeID, @@ -384,18 +395,27 @@ func (s) TestInvalidFirstResponse(t *testing.T) { // 3. The same resource is requested again. The test verifies that the request // is sent with the previously ACKed version. func (s) TestResourceIsNotRequestedAnymore(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Create an xDS management server listening on a local port. Configure the // request and response handlers to push on channels which are inspected by // the test goroutine to verify ack version and nonce. streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ - OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { - streamRequestCh <- req + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + case <-ctx.Done(): + } return nil }, OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { - streamResponseCh <- resp + select { + case streamResponseCh <- resp: + case <-ctx.Done(): + } }, }) if err != nil { @@ -424,8 +444,6 @@ func (s) TestResourceIsNotRequestedAnymore(t *testing.T) { Name: resourceName, ApiListener: apiListener, } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() nodeID := uuid.New().String() mgmtServer.Update(ctx, e2e.UpdateOptions{ NodeID: nodeID, From ef51864f488369b4c4511540f930a9251f6edc06 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 1 Dec 2022 10:52:58 -0800 Subject: [PATCH 693/998] grpclb: improve grpclb tests (#5826) Fixes https://github.com/grpc/grpc-go/issues/4392 --- balancer/grpclb/grpclb.go | 2 +- balancer/grpclb/grpclb_test.go | 625 ++++++++---------- .../grpclb/{state => grpclbstate}/state.go | 8 +- internal/resolver/dns/dns_resolver.go | 2 +- internal/resolver/dns/dns_resolver_test.go | 2 +- 5 files changed, 296 insertions(+), 343 deletions(-) rename balancer/grpclb/{state => grpclbstate}/state.go (87%) diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index dd15810d0aeb..b0dd72fce141 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/balancer/grpclb/grpclbstate" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index ed5297684dcd..f8fbfe03d5d1 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -36,15 +36,19 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/balancer/grpclb/grpclbstate" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/pickfirst" + "google.golang.org/grpc/internal/testutils/roundrobin" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" durationpb "github.com/golang/protobuf/ptypes/duration" @@ -318,7 +322,7 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ return nil } -func startBackends(sn string, fallback bool, lis ...net.Listener) (servers []*grpc.Server) { +func startBackends(t *testing.T, sn string, fallback bool, lis ...net.Listener) (servers []*grpc.Server) { for _, l := range lis { creds := &serverNameCheckCreds{ sn: sn, @@ -329,6 +333,7 @@ func startBackends(sn string, fallback bool, lis ...net.Listener) (servers []*gr go func(s *grpc.Server, l net.Listener) { s.Serve(l) }(s, l) + t.Logf("Started backend server listening on %s", l.Addr().String()) } return } @@ -351,7 +356,7 @@ type testServers struct { beListeners []net.Listener } -func startBackendsAndRemoteLoadBalancer(numberOfBackends int, customUserAgent string, statsChan chan *lbpb.ClientStats) (tss *testServers, cleanup func(), err error) { +func startBackendsAndRemoteLoadBalancer(t *testing.T, numberOfBackends int, customUserAgent string, statsChan chan *lbpb.ClientStats) (tss *testServers, cleanup func(), err error) { var ( beListeners []net.Listener ls *remoteBalancer @@ -360,7 +365,6 @@ func startBackendsAndRemoteLoadBalancer(numberOfBackends int, customUserAgent st bePorts []int ) for i := 0; i < numberOfBackends; i++ { - // Start a backend. beLis, e := net.Listen("tcp", "localhost:0") if e != nil { err = fmt.Errorf("failed to listen %v", err) @@ -371,9 +375,8 @@ func startBackendsAndRemoteLoadBalancer(numberOfBackends int, customUserAgent st beListeners = append(beListeners, testutils.NewRestartableListener(beLis)) } - backends := startBackends(beServerName, false, beListeners...) + backends := startBackends(t, beServerName, false, beListeners...) - // Start a load balancer. lbLis, err := net.Listen("tcp", "localhost:0") if err != nil { err = fmt.Errorf("failed to create the listener for the load balancer %v", err) @@ -389,6 +392,7 @@ func startBackendsAndRemoteLoadBalancer(numberOfBackends int, customUserAgent st go func() { lb.Serve(lbLis) }() + t.Logf("Started remote load balancer server listening on %s", lbLis.Addr().String()) tss = &testServers{ lbAddr: net.JoinHostPort(fakeName, strconv.Itoa(lbLis.Addr().(*net.TCPAddr).Port)), @@ -411,15 +415,16 @@ func startBackendsAndRemoteLoadBalancer(numberOfBackends int, customUserAgent st return } -func (s) TestGRPCLB(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, testUserAgent, nil) +// TestGRPCLB_Basic tests the basic case of a channel being configured with +// grpclb as the load balancing policy. +func (s) TestGRPCLB_Basic(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, testUserAgent, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() + // Push the test backend address to the remote balancer. tss.ls.sls <- &lbpb.ServerList{ Servers: []*lbpb.Server{ { @@ -430,36 +435,49 @@ func (s) TestGRPCLB(t *testing.T) { }, } - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the remote balancer + // address specified via attributes. + r := manual.NewBuilderWithScheme("whatever") + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, + } + rs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s) + r.InitialState(rs) + + // Connect to the test backend. + dopts := []grpc.DialOption{ grpc.WithResolvers(r), grpc.WithTransportCredentials(&serverNameCheckCreds{}), grpc.WithContextDialer(fakeNameDialer), - grpc.WithUserAgent(testUserAgent)) + grpc.WithUserAgent(testUserAgent), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - - rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, - &grpclbstate.State{BalancerAddresses: []resolver.Address{{ - Addr: tss.lbAddr, - ServerName: lbServerName, - }}}) - r.UpdateState(rs) + // Make one successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() + testC := testpb.NewTestServiceClient(cc) if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } } -// The remote balancer sends response with duplicates to grpclb client. -func (s) TestGRPCLBWeighted(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(2, "", nil) +// TestGRPCLB_Weighted tests weighted roundrobin. The remote balancer is +// configured to send a response with duplicate backend addresses (to simulate +// weights) to the grpclb client. The test verifies that RPCs are weighted +// roundrobin-ed across these backends. +func (s) TestGRPCLB_Weighted(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 2, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -474,59 +492,67 @@ func (s) TestGRPCLBWeighted(t *testing.T) { Port: int32(tss.bePorts[1]), LoadBalanceToken: lbToken, }} - portsToIndex := make(map[int]int) - for i := range beServers { - portsToIndex[tss.bePorts[i]] = i + + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the remote balancer + // address specified via attributes. + r := manual.NewBuilderWithScheme("whatever") + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, } + rs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s) + r.InitialState(rs) - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + // Connect to test backends. + dopts := []grpc.DialOption{ grpc.WithResolvers(r), grpc.WithTransportCredentials(&serverNameCheckCreds{}), - grpc.WithContextDialer(fakeNameDialer)) + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - - rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, - &grpclbstate.State{BalancerAddresses: []resolver.Address{{ - Addr: tss.lbAddr, - ServerName: lbServerName, - }}}) - r.UpdateState(rs) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - sequences := []string{"00101", "00011"} + // Sequence represents the sequence of backends to be returned from the + // remote load balancer. + sequences := [][]int{ + {0, 0, 1, 0, 1}, + {0, 0, 0, 1, 1}, + } for _, seq := range sequences { - var ( - bes []*lbpb.Server - p peer.Peer - result string - ) + // Push the configured sequence of backend to the remote balancer, and + // compute the expected addresses to which RPCs should be routed. + var backends []*lbpb.Server + var wantAddrs []resolver.Address for _, s := range seq { - bes = append(bes, beServers[s-'0']) + backends = append(backends, beServers[s]) + wantAddrs = append(wantAddrs, resolver.Address{Addr: tss.beListeners[s].Addr().String()}) } - tss.ls.sls <- &lbpb.ServerList{Servers: bes} + tss.ls.sls <- &lbpb.ServerList{Servers: backends} - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) - } - // The generated result will be in format of "0010100101". - if !strings.Contains(result, strings.Repeat(seq, 2)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + testC := testpb.NewTestServiceClient(cc) + if err := roundrobin.CheckWeightedRoundRobinRPCs(ctx, testC, wantAddrs); err != nil { + t.Fatal(err) } } } -func (s) TestDropRequest(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(2, "", nil) +// TestGRPCLB_DropRequest tests grpclb support for dropping requests based on +// configuration received from the remote balancer. +// +// TODO: Rewrite this test to verify drop behavior using the +// ClientStats.CallsFinishedWithDrop field instead. +func (s) TestGRPCLB_DropRequest(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 2, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -547,23 +573,34 @@ func (s) TestDropRequest(t *testing.T) { }}, } - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the remote balancer + // address specified via attributes. + r := manual.NewBuilderWithScheme("whatever") + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, + } + rs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s) + r.InitialState(rs) + + // Connect to test backends. + dopts := []grpc.DialOption{ grpc.WithResolvers(r), grpc.WithTransportCredentials(&serverNameCheckCreds{}), - grpc.WithContextDialer(fakeNameDialer)) + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) - rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, - &grpclbstate.State{BalancerAddresses: []resolver.Address{{ - Addr: tss.lbAddr, - ServerName: lbServerName, - }}}) - r.UpdateState(rs) - var ( i int p peer.Peer @@ -675,16 +712,17 @@ func (s) TestDropRequest(t *testing.T) { } } -// When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list. -func (s) TestBalancerDisconnects(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - +// TestGRPCLB_BalancerDisconnects tests the case where the remote balancer in +// use disconnects. The test verifies that grpclb connects to the next remote +// balancer address specified in attributes, and RPCs get routed to the backends +// returned by the new balancer. +func (s) TestGRPCLB_BalancerDisconnects(t *testing.T) { var ( tests []*testServers lbs []*grpc.Server ) for i := 0; i < 2; i++ { - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -704,18 +742,12 @@ func (s) TestBalancerDisconnects(t *testing.T) { lbs = append(lbs, tss.lb) } - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, - grpc.WithResolvers(r), - grpc.WithTransportCredentials(&serverNameCheckCreds{}), - grpc.WithContextDialer(fakeNameDialer)) - if err != nil { - t.Fatalf("Failed to dial to the backend %v", err) - } - defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - - rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, - &grpclbstate.State{BalancerAddresses: []resolver.Address{ + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the remote balancer + // addresses specified via attributes. + r := manual.NewBuilderWithScheme("whatever") + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ { Addr: tests[0].lbAddr, ServerName: lbServerName, @@ -724,42 +756,50 @@ func (s) TestBalancerDisconnects(t *testing.T) { Addr: tests[1].lbAddr, ServerName: lbServerName, }, - }}) - r.UpdateState(rs) + }, + } + rs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s) + r.InitialState(rs) + + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - var p peer.Peer - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port != tests[0].bePorts[0] { - t.Fatalf("got peer: %v, want peer port: %v", p.Addr, tests[0].bePorts[0]) + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tests[0].beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } - lbs[0].Stop() // Stop balancer[0], balancer[1] should be used by grpclb. // Check peer address to see if that happened. - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tests[1].bePorts[0] { - return - } - time.Sleep(time.Millisecond) + lbs[0].Stop() + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tests[1].beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } - t.Fatalf("No RPC sent to second backend after 1 second") } -func (s) TestFallback(t *testing.T) { +// TestGRPCLB_Fallback tests the following fallback scenarios: +// - when the remote balancer address specified in attributes is invalid, the +// test verifies that RPCs are routed to the fallback backend. +// - when the remote balancer address specified in attributes is changed to a +// valid one, the test verifies that RPCs are routed to the backend returned +// by the remote balancer. +// - when the configured remote balancer goes down, the test verifies that +// RPCs are routed to the fallback backend. +func (s) TestGRPCLB_Fallback(t *testing.T) { balancer.Register(newLBBuilderWithFallbackTimeout(100 * time.Millisecond)) defer balancer.Register(newLBBuilder()) - r := manual.NewBuilderWithScheme("whatever") - // Start a remote balancer and a backend. Push the backend address to the - // remote balancer. - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -773,6 +813,7 @@ func (s) TestFallback(t *testing.T) { }, }, } + // Push the backend address to the remote balancer. tss.ls.sls <- sl // Start a standalone backend for fallback. @@ -781,13 +822,16 @@ func (s) TestFallback(t *testing.T) { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() - standaloneBEs := startBackends(beServerName, true, beLis) + standaloneBEs := startBackends(t, beServerName, true, beLis) defer stopBackends(standaloneBEs) - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ grpc.WithResolvers(r), grpc.WithTransportCredentials(&serverNameCheckCreds{}), - grpc.WithContextDialer(fakeNameDialer)) + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } @@ -807,12 +851,8 @@ func (s) TestFallback(t *testing.T) { // Make an RPC and verify that it got routed to the fallback backend. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - var p peer.Peer - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) - } - if p.Addr.String() != beLis.Addr().String() { - t.Fatalf("got peer: %v, want peer: %v", p.Addr, beLis.Addr()) + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: beLis.Addr().String()}}); err != nil { + t.Fatal(err) } // Push another update to the resolver, this time with a valid balancer @@ -830,73 +870,31 @@ func (s) TestFallback(t *testing.T) { } // Wait for RPCs to get routed to the backend behind the remote balancer. - var backendUsed bool - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed = true - break - } - time.Sleep(time.Millisecond) - } - if !backendUsed { - t.Fatalf("No RPC sent to backend behind remote balancer after 1 second") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } // Close backend and remote balancer connections, should use fallback. tss.beListeners[0].(*testutils.RestartableListener).Stop() tss.lbListener.(*testutils.RestartableListener).Stop() - - var fallbackUsed bool - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - // Because we are hard-closing the connection, above, it's possible - // for the first RPC attempt to be sent on the old connection, - // which will lead to an Unavailable error when it is closed. - // Ignore unavailable errors. - if status.Code(err) != codes.Unavailable { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - } - if p.Addr.String() == beLis.Addr().String() { - fallbackUsed = true - break - } - time.Sleep(time.Millisecond) - } - if !fallbackUsed { - t.Fatalf("No RPC sent to fallback after 2 seconds") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: beLis.Addr().String()}}); err != nil { + t.Fatal(err) } // Restart backend and remote balancer, should not use fallback backend. tss.beListeners[0].(*testutils.RestartableListener).Restart() tss.lbListener.(*testutils.RestartableListener).Restart() tss.ls.sls <- sl - - var backendUsed2 bool - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed2 = true - break - } - time.Sleep(time.Millisecond) - } - if !backendUsed2 { - t.Fatalf("No RPC sent to backend behind remote balancer after 2 seconds") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } } -func (s) TestExplicitFallback(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - // Start a remote balancer and a backend. Push the backend address to the - // remote balancer. - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) +// TestGRPCLB_ExplicitFallback tests the case where the remote balancer sends an +// explicit fallback signal to the grpclb client, and the test verifies that +// RPCs are routed to the fallback backend. +func (s) TestGRPCLB_ExplicitFallback(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -910,6 +908,7 @@ func (s) TestExplicitFallback(t *testing.T) { }, }, } + // Push the backend address to the remote balancer. tss.ls.sls <- sl // Start a standalone backend for fallback. @@ -918,82 +917,55 @@ func (s) TestExplicitFallback(t *testing.T) { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() - standaloneBEs := startBackends(beServerName, true, beLis) + standaloneBEs := startBackends(t, beServerName, true, beLis) defer stopBackends(standaloneBEs) - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the address of the + // fallback backend. The remote balancer address is specified via + // attributes. + r := manual.NewBuilderWithScheme("whatever") + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig), + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.InitialState(rs) + + dopts := []grpc.DialOption{ grpc.WithResolvers(r), grpc.WithTransportCredentials(&serverNameCheckCreds{}), - grpc.WithContextDialer(fakeNameDialer)) + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) - rs := resolver.State{ - Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, - ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), - } - rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) - r.UpdateState(rs) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - var p peer.Peer - var backendUsed bool - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed = true - break - } - time.Sleep(time.Millisecond) - } - if !backendUsed { - t.Fatalf("No RPC sent to backend behind remote balancer after 2 seconds") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } // Send fallback signal from remote balancer; should use fallback. tss.ls.fallbackNow() - - var fallbackUsed bool - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.String() == beLis.Addr().String() { - fallbackUsed = true - break - } - time.Sleep(time.Millisecond) - } - if !fallbackUsed { - t.Fatalf("No RPC sent to fallback after 2 seconds") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: beLis.Addr().String()}}); err != nil { + t.Fatal(err) } // Send another server list; should use backends again. tss.ls.sls <- sl - - backendUsed = false - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed = true - break - } - time.Sleep(time.Millisecond) - } - if !backendUsed { - t.Fatalf("No RPC sent to backend behind remote balancer after 2 seconds") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } } -func (s) TestFallBackWithNoServerAddress(t *testing.T) { +// TestGRPCLB_FallBackWithNoServerAddress tests the fallback case where no +// backend addresses are returned by the remote balancer. +func (s) TestGRPCLB_FallBackWithNoServerAddress(t *testing.T) { resolveNowCh := testutils.NewChannel() r := manual.NewBuilderWithScheme("whatever") r.ResolveNowCallback = func(resolver.ResolveNowOptions) { @@ -1004,9 +976,9 @@ func (s) TestFallBackWithNoServerAddress(t *testing.T) { } } - // Start a remote balancer and a backend. Push the backend address to the - // remote balancer yet. - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) + // Start a remote balancer and a backend. Don't push the backend address to + // the remote balancer yet. + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -1027,13 +999,15 @@ func (s) TestFallBackWithNoServerAddress(t *testing.T) { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() - standaloneBEs := startBackends(beServerName, true, beLis) + standaloneBEs := startBackends(t, beServerName, true, beLis) defer stopBackends(standaloneBEs) - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + dopts := []grpc.DialOption{ grpc.WithResolvers(r), grpc.WithTransportCredentials(&serverNameCheckCreds{}), - grpc.WithContextDialer(fakeNameDialer)) + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } @@ -1086,27 +1060,17 @@ func (s) TestFallBackWithNoServerAddress(t *testing.T) { case <-tss.ls.balanceLoadCh: } - var backendUsed bool - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed = true - break - } - time.Sleep(time.Millisecond) - } - if !backendUsed { - t.Fatalf("No RPC sent to backend behind remote balancer after 1 second") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } } } -func (s) TestGRPCLBPickFirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(3, "", nil) +// TestGRPCLB_PickFirst configures grpclb with pick_first as the child policy. +// The test changes the list of backend addresses returned by the remote +// balancer and verifies that RPCs are sent to the first address returned. +func (s) TestGRPCLB_PickFirst(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 3, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -1125,105 +1089,87 @@ func (s) TestGRPCLBPickFirst(t *testing.T) { Port: int32(tss.bePorts[2]), LoadBalanceToken: lbToken, }} - portsToIndex := make(map[int]int) - for i := range beServers { - portsToIndex[tss.bePorts[i]] = i + beServerAddrs := []resolver.Address{} + for _, lis := range tss.beListeners { + beServerAddrs = append(beServerAddrs, resolver.Address{Addr: lis.Addr().String()}) } - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + // Connect to the test backends. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ grpc.WithResolvers(r), grpc.WithTransportCredentials(&serverNameCheckCreds{}), - grpc.WithContextDialer(fakeNameDialer)) + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - - var ( - p peer.Peer - result string - ) - tss.ls.sls <- &lbpb.ServerList{Servers: beServers[0:3]} - // Start with sub policy pick_first. + // Push a service config with grpclb as the load balancing policy and + // configure pick_first as its child policy. rs := resolver.State{ServiceConfig: r.CC.ParseServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)} + + // Push a resolver update with the remote balancer address specified via + // attributes. r.UpdateState(grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}})) + // Push all three backend addresses to the remote balancer, and verify that + // RPCs are routed to the first backend. + tss.ls.sls <- &lbpb.ServerList{Servers: beServers[0:3]} ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) - } - if seq := "00000"; !strings.Contains(result, strings.Repeat(seq, 100)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, beServerAddrs[0]); err != nil { + t.Fatal(err) } + // Update the address list with the remote balancer and verify pick_first + // behavior based on the new backends. tss.ls.sls <- &lbpb.ServerList{Servers: beServers[2:]} - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) - } - if seq := "22222"; !strings.Contains(result, strings.Repeat(seq, 100)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, beServerAddrs[2]); err != nil { + t.Fatal(err) } + // Update the address list with the remote balancer and verify pick_first + // behavior based on the new backends. Since the currently connected backend + // is in the new list (even though it is not the first one on the list), + // pick_first will continue to use it. tss.ls.sls <- &lbpb.ServerList{Servers: beServers[1:]} - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) - } - if seq := "22222"; !strings.Contains(result, strings.Repeat(seq, 100)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, beServerAddrs[2]); err != nil { + t.Fatal(err) } - // Switch sub policy to roundrobin. - rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, - &grpclbstate.State{BalancerAddresses: []resolver.Address{{ - Addr: tss.lbAddr, - ServerName: lbServerName, - }}}) - r.UpdateState(rs) - - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) + // Switch child policy to roundrobin. + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, } - if seq := "121212"; !strings.Contains(result, strings.Repeat(seq, 100)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, s) + r.UpdateState(rs) + testC := testpb.NewTestServiceClient(cc) + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, beServerAddrs[1:]); err != nil { + t.Fatal(err) } tss.ls.sls <- &lbpb.ServerList{Servers: beServers[0:3]} - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) - } - if seq := "012012012"; !strings.Contains(result, strings.Repeat(seq, 2)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, beServerAddrs[0:3]); err != nil { + t.Fatal(err) } } -func (s) TestGRPCLBBackendConnectionErrorPropagation(t *testing.T) { +// TestGRPCLB_BackendConnectionErrorPropagation tests the case where grpclb +// falls back to a backend which returns an error and the test verifies that the +// error is propagated to the RPC. +func (s) TestGRPCLB_BackendConnectionErrorPropagation(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") // Start up an LB which will tells the client to fall back right away. - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(0, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 0, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -1237,7 +1183,7 @@ func (s) TestGRPCLBBackendConnectionErrorPropagation(t *testing.T) { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() - standaloneBEs := startBackends("arbitrary.invalid.name", true, beLis) + standaloneBEs := startBackends(t, "arbitrary.invalid.name", true, beLis) defer stopBackends(standaloneBEs) cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, @@ -1275,9 +1221,7 @@ func (s) TestGRPCLBBackendConnectionErrorPropagation(t *testing.T) { } func testGRPCLBEmptyServerList(t *testing.T, svcfg string) { - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -1289,13 +1233,15 @@ func testGRPCLBEmptyServerList(t *testing.T, svcfg string) { LoadBalanceToken: lbToken, }} - creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), - grpc.WithContextDialer(fakeNameDialer)) + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } @@ -1304,11 +1250,15 @@ func testGRPCLBEmptyServerList(t *testing.T, svcfg string) { tss.ls.sls <- &lbpb.ServerList{Servers: beServers} - rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(svcfg)}, - &grpclbstate.State{BalancerAddresses: []resolver.Address{{ - Addr: tss.lbAddr, - ServerName: lbServerName, - }}}) + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, + } + rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(svcfg)}, s) r.UpdateState(rs) t.Log("Perform an initial RPC and expect it to succeed...") if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { @@ -1317,7 +1267,7 @@ func testGRPCLBEmptyServerList(t *testing.T, svcfg string) { t.Log("Now send an empty server list. Wait until we see an RPC failure to make sure the client got it...") tss.ls.sls <- &lbpb.ServerList{} gotError := false - for i := 0; i < 100; i++ { + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { gotError = true break @@ -1344,9 +1294,7 @@ func (s) TestGRPCLBEmptyServerListPickFirst(t *testing.T) { func (s) TestGRPCLBWithTargetNameFieldInConfig(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - // Start a remote balancer and a backend. Push the backend address to the - // remote balancer. - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -1360,6 +1308,7 @@ func (s) TestGRPCLBWithTargetNameFieldInConfig(t *testing.T) { }, }, } + // Push the backend address to the remote balancer. tss.ls.sls <- sl cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, @@ -1405,11 +1354,15 @@ func (s) TestGRPCLBWithTargetNameFieldInConfig(t *testing.T) { // Push a resolver update with grpclb configuration containing the // target_name field. Our fake remote balancer has been updated above to expect the newServerName in the initial request. lbCfg := fmt.Sprintf(`{"loadBalancingConfig": [{"grpclb": {"serviceName": "%s"}}]}`, newServerName) - rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(lbCfg)}, - &grpclbstate.State{BalancerAddresses: []resolver.Address{{ - Addr: tss.lbAddr, - ServerName: lbServerName, - }}}) + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, + } + rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(lbCfg)}, s) r.UpdateState(rs) select { case <-ctx.Done(): @@ -1445,7 +1398,7 @@ func checkStats(stats, expected *rpcStats) error { func runAndCheckStats(t *testing.T, drop bool, statsChan chan *lbpb.ClientStats, runRPCs func(*grpc.ClientConn), statsWant *rpcStats) error { r := manual.NewBuilderWithScheme("whatever") - tss, cleanup, err := startBackendsAndRemoteLoadBalancer(1, "", statsChan) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", statsChan) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } diff --git a/balancer/grpclb/state/state.go b/balancer/grpclb/grpclbstate/state.go similarity index 87% rename from balancer/grpclb/state/state.go rename to balancer/grpclb/grpclbstate/state.go index 4ecfa1c21511..cece046be343 100644 --- a/balancer/grpclb/state/state.go +++ b/balancer/grpclb/grpclbstate/state.go @@ -16,9 +16,9 @@ * */ -// Package state declares grpclb types to be set by resolvers wishing to pass -// information to grpclb via resolver.State Attributes. -package state +// Package grpclbstate declares grpclb types to be set by resolvers wishing to +// pass information to grpclb via resolver.State Attributes. +package grpclbstate import ( "google.golang.org/grpc/resolver" @@ -27,7 +27,7 @@ import ( // keyType is the key to use for storing State in Attributes. type keyType string -const key = keyType("grpc.grpclb.state") +const key = keyType("grpc.grpclb.grpclbstate") // State contains gRPCLB-relevant data passed from the name resolver. type State struct { diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index b08ac30adfef..d51302e65c62 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -32,7 +32,7 @@ import ( "sync" "time" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + grpclbstate "google.golang.org/grpc/balancer/grpclb/grpclbstate" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index 6bfcf299b33c..bfed6a74ff38 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -34,7 +34,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + grpclbstate "google.golang.org/grpc/balancer/grpclb/grpclbstate" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/testutils" From 617d6c8a6cb089aa7b049c88710961c183ac474c Mon Sep 17 00:00:00 2001 From: Gregory Cooke Date: Thu, 1 Dec 2022 14:09:57 -0500 Subject: [PATCH 694/998] security/advancedtls: add test for crl cache expiration behavior (#5749) * Add test for cache reloading * cleanup * swap to using nil for no revoked certs * Add description for new test --- security/advancedtls/crl_test.go | 40 ++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/security/advancedtls/crl_test.go b/security/advancedtls/crl_test.go index 1f026b1d2885..153074a28c5a 100644 --- a/security/advancedtls/crl_test.go +++ b/security/advancedtls/crl_test.go @@ -731,3 +731,43 @@ func TestIssuerNonPrintableString(t *testing.T) { t.Fatalf("fetchCRL failed: %s", err) } } + +// TestCRLCacheExpirationReloading tests the basic expiration and reloading of a +// cached CRL. The setup places an empty CRL in the cache, and a corresponding +// CRL with a revocation in the CRL directory. We then validate the certificate +// to verify that the certificate is not revoked. Then, we modify the +// NextUpdate time to be in the past so that when we next check for revocation, +// the existing cache entry should be seen as expired, and the CRL in the +// directory showing `revokedInt.pem` as revoked will be loaded, resulting in +// the check returning `RevocationRevoked`. +func TestCRLCacheExpirationReloading(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("Creating cache failed") + } + + var certs = makeChain(t, testdata.Path("crl/revokedInt.pem")) + // Certs[1] has the same issuer as the revoked cert + rawIssuer := certs[1].RawIssuer + + // `3.crl`` revokes `revokedInt.pem` + crl := loadCRL(t, testdata.Path("crl/3.crl")) + // Modify the crl so that the cert is NOT revoked and add it to the cache + crl.CertList.TBSCertList.RevokedCertificates = nil + crl.CertList.TBSCertList.NextUpdate = time.Now().Add(time.Hour) + cache.Add(hex.EncodeToString(rawIssuer), crl) + var cfg = RevocationConfig{RootDir: testdata.Path("crl"), Cache: cache} + revocationStatus := checkChain(certs, cfg) + if revocationStatus != RevocationUnrevoked { + t.Fatalf("Certificate check should be RevocationUnrevoked, was %v", revocationStatus) + } + + // Modify the entry in the cache so that the cache will be refreshed + crl.CertList.TBSCertList.NextUpdate = time.Now() + cache.Add(hex.EncodeToString(rawIssuer), crl) + + revocationStatus = checkChain(certs, cfg) + if revocationStatus != RevocationRevoked { + t.Fatalf("A certificate should have been `RevocationRevoked` but was %v", revocationStatus) + } +} From 736197138d20463a205671717be299ac634a7c8c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 1 Dec 2022 11:59:34 -0800 Subject: [PATCH 695/998] rls: use a regex for the expected error string (#5827) --- balancer/rls/control_channel_test.go | 32 ++++++++++++++-------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/balancer/rls/control_channel_test.go b/balancer/rls/control_channel_test.go index ccba8e36f976..eb9a802039bd 100644 --- a/balancer/rls/control_channel_test.go +++ b/balancer/rls/control_channel_test.go @@ -25,7 +25,7 @@ import ( "errors" "fmt" "io/ioutil" - "strings" + "regexp" "testing" "time" @@ -350,7 +350,7 @@ func (s) TestControlChannelCredsSuccess(t *testing.T) { } } -func testControlChannelCredsFailure(t *testing.T, sopts []grpc.ServerOption, bopts balancer.BuildOptions, wantCode codes.Code, wantErr string) { +func testControlChannelCredsFailure(t *testing.T, sopts []grpc.ServerOption, bopts balancer.BuildOptions, wantCode codes.Code, wantErrRegex *regexp.Regexp) { // StartFakeRouteLookupServer a fake server. // // Start an RLS server and set the throttler to never throttle requests. The @@ -369,8 +369,8 @@ func testControlChannelCredsFailure(t *testing.T, sopts []grpc.ServerOption, bop // Perform the lookup and expect the callback to be invoked with an error. errCh := make(chan error) ctrlCh.lookup(nil, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, func(_ []string, _ string, err error) { - if st, ok := status.FromError(err); !ok || st.Code() != wantCode || !strings.Contains(st.String(), wantErr) { - errCh <- fmt.Errorf("rlsClient.lookup() returned error: %v, wantCode: %v, wantErr: %s", err, wantCode, wantErr) + if st, ok := status.FromError(err); !ok || st.Code() != wantCode || !wantErrRegex.MatchString(st.String()) { + errCh <- fmt.Errorf("rlsClient.lookup() returned error: %v, wantCode: %v, wantErr: %s", err, wantCode, wantErrRegex.String()) return } errCh <- nil @@ -393,11 +393,11 @@ func (s) TestControlChannelCredsFailure(t *testing.T) { clientCreds := makeTLSCreds(t, "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") tests := []struct { - name string - sopts []grpc.ServerOption - bopts balancer.BuildOptions - wantCode codes.Code - wantErr string + name string + sopts []grpc.ServerOption + bopts balancer.BuildOptions + wantCode codes.Code + wantErrRegex *regexp.Regexp }{ { name: "transport creds authority mismatch", @@ -406,8 +406,8 @@ func (s) TestControlChannelCredsFailure(t *testing.T) { DialCreds: clientCreds, Authority: "authority-mismatch", }, - wantCode: codes.Unavailable, - wantErr: "transport: authentication handshake failed: x509: certificate is valid for *.test.example.com, not authority-mismatch", + wantCode: codes.Unavailable, + wantErrRegex: regexp.MustCompile(`transport: authentication handshake failed: .* \*.test.example.com.*authority-mismatch`), }, { name: "transport creds handshake failure", @@ -416,8 +416,8 @@ func (s) TestControlChannelCredsFailure(t *testing.T) { DialCreds: clientCreds, Authority: "x.test.example.com", }, - wantCode: codes.Unavailable, - wantErr: "transport: authentication handshake failed: tls: first record does not look like a TLS handshake", + wantCode: codes.Unavailable, + wantErrRegex: regexp.MustCompile("transport: authentication handshake failed: .*"), }, { name: "call creds mismatch", @@ -432,13 +432,13 @@ func (s) TestControlChannelCredsFailure(t *testing.T) { }, Authority: "x.test.example.com", }, - wantCode: codes.PermissionDenied, - wantErr: "didn't find call creds", + wantCode: codes.PermissionDenied, + wantErrRegex: regexp.MustCompile("didn't find call creds"), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - testControlChannelCredsFailure(t, test.sopts, test.bopts, test.wantCode, test.wantErr) + testControlChannelCredsFailure(t, test.sopts, test.bopts, test.wantCode, test.wantErrRegex) }) } } From 001d234e1f2dc942e65d0b58baa9561d3103d74a Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 1 Dec 2022 21:09:18 -0500 Subject: [PATCH 696/998] rls: Fix regex in rls test (#5834) --- balancer/rls/control_channel_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/balancer/rls/control_channel_test.go b/balancer/rls/control_channel_test.go index eb9a802039bd..86342f7d5b44 100644 --- a/balancer/rls/control_channel_test.go +++ b/balancer/rls/control_channel_test.go @@ -407,7 +407,7 @@ func (s) TestControlChannelCredsFailure(t *testing.T) { Authority: "authority-mismatch", }, wantCode: codes.Unavailable, - wantErrRegex: regexp.MustCompile(`transport: authentication handshake failed: .* \*.test.example.com.*authority-mismatch`), + wantErrRegex: regexp.MustCompile(`transport: authentication handshake failed: .* \*\.test\.example\.com.*authority-mismatch`), }, { name: "transport creds handshake failure", From a2054471ce480ee4d1003d46ed73b2edd915f656 Mon Sep 17 00:00:00 2001 From: richzw <1590890+richzw@users.noreply.github.com> Date: Wed, 7 Dec 2022 00:57:50 +0800 Subject: [PATCH 697/998] examples: add new example to show updating metadata in interceptors (#5788) --- Documentation/grpc-metadata.md | 5 + examples/examples_test.sh | 3 + .../features/metadata_interceptor/README.md | 70 +++++++++ .../metadata_interceptor/client/main.go | 86 +++++++++++ .../metadata_interceptor/server/main.go | 140 ++++++++++++++++++ 5 files changed, 304 insertions(+) create mode 100644 examples/features/metadata_interceptor/README.md create mode 100644 examples/features/metadata_interceptor/client/main.go create mode 100644 examples/features/metadata_interceptor/server/main.go diff --git a/Documentation/grpc-metadata.md b/Documentation/grpc-metadata.md index ff4de6e71de3..06b36f4ac171 100644 --- a/Documentation/grpc-metadata.md +++ b/Documentation/grpc-metadata.md @@ -223,3 +223,8 @@ func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) erro stream.SetTrailer(trailer) } ``` + +## Updating metadata from a server interceptor + +An example for updating metadata from a server interceptor is +available [here](../examples/features/metadata_interceptor/server/main.go). diff --git a/examples/examples_test.sh b/examples/examples_test.sh index bde2837f659b..0c919a1a0969 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -59,6 +59,7 @@ EXAMPLES=( "features/interceptor" "features/load_balancing" "features/metadata" + "features/metadata_interceptor" "features/multiplex" "features/name_resolving" "features/unix_abstract" @@ -105,6 +106,7 @@ declare -A EXPECTED_SERVER_OUTPUT=( ["features/interceptor"]="unary echoing message \"hello world\"" ["features/load_balancing"]="serving on :50051" ["features/metadata"]="message:\"this is examples/metadata\", sending echo" + ["features/metadata_interceptor"]="key1 from metadata: " ["features/multiplex"]=":50051" ["features/name_resolving"]="serving on localhost:50051" ["features/unix_abstract"]="serving on @abstract-unix-socket" @@ -121,6 +123,7 @@ declare -A EXPECTED_CLIENT_OUTPUT=( ["features/interceptor"]="UnaryEcho: hello world" ["features/load_balancing"]="calling helloworld.Greeter/SayHello with pick_first" ["features/metadata"]="this is examples/metadata" + ["features/metadata_interceptor"]="BidiStreaming Echo: hello world" ["features/multiplex"]="Greeting: Hello multiplex" ["features/name_resolving"]="calling helloworld.Greeter/SayHello to \"example:///resolver.example.grpc.io\"" ["features/unix_abstract"]="calling echo.Echo/UnaryEcho to unix-abstract:abstract-unix-socket" diff --git a/examples/features/metadata_interceptor/README.md b/examples/features/metadata_interceptor/README.md new file mode 100644 index 000000000000..93a6925d79ef --- /dev/null +++ b/examples/features/metadata_interceptor/README.md @@ -0,0 +1,70 @@ +# Metadata interceptor example + +This example shows how to update metadata from unary and streaming interceptors on the server. +Please see +[grpc-metadata.md](https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +for more information. + +## Try it + +``` +go run server/main.go +``` + +``` +go run client/main.go +``` + +## Explanation + +#### Unary interceptor + +The interceptor can read existing metadata from the RPC context passed to it. +Since Go contexts are immutable, the interceptor will have to create a new context +with updated metadata and pass it to the provided handler. + +```go +func SomeInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + // Get the incoming metadata from the RPC context, and add a new + // key-value pair to it. + md, ok := metadata.FromIncomingContext(ctx) + md.Append("key1", "value1") + + // Create a context with the new metadata and pass it to handler. + ctx = metadata.NewIncomingContext(ctx, md) + return handler(ctx, req) +} +``` + +#### Streaming interceptor + +`grpc.ServerStream` does not provide a way to modify its RPC context. The streaming +interceptor therefore needs to implement the `grpc.ServerStream` interface and return +a context with updated metadata. + +The easiest way to do this would be to create a type which embeds the `grpc.ServerStream` +interface and overrides only the `Context()` method to return a context with updated +metadata. The streaming interceptor would then pass this wrapped stream to the provided handler. + +```go +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (s *wrappedStream) Context() context.Context { + return s.ctx +} + +func SomeStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // Get the incoming metadata from the RPC context, and add a new + // key-value pair to it. + md, ok := metadata.FromIncomingContext(ctx) + md.Append("key1", "value1") + + // Create a context with the new metadata and pass it to handler. + ctx = metadata.NewIncomingContext(ctx, md) + + return handler(srv, &wrappedStream{ss, ctx}) +} +``` \ No newline at end of file diff --git a/examples/features/metadata_interceptor/client/main.go b/examples/features/metadata_interceptor/client/main.go new file mode 100644 index 000000000000..a6ad804d726a --- /dev/null +++ b/examples/features/metadata_interceptor/client/main.go @@ -0,0 +1,86 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client. +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") + +func callUnaryEcho(ctx context.Context, client pb.EchoClient) { + resp, err := client.UnaryEcho(ctx, &pb.EchoRequest{Message: "hello world"}) + if err != nil { + log.Fatalf("UnaryEcho %v", err) + } + fmt.Println("UnaryEcho: ", resp.Message) +} + +func callBidiStreamingEcho(ctx context.Context, client pb.EchoClient) { + c, err := client.BidirectionalStreamingEcho(ctx) + if err != nil { + log.Fatalf("BidiStreamingEcho %v", err) + } + + if err := c.Send(&pb.EchoRequest{Message: "hello world"}); err != nil { + log.Fatalf("Sending echo request: %v", err) + } + c.CloseSend() + + for { + resp, err := c.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("Receiving echo response: %v", err) + } + fmt.Println("BidiStreaming Echo: ", resp.Message) + } +} + +func main() { + flag.Parse() + + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("grpc.Dial(%q): %v", *addr, err) + } + defer conn.Close() + + ec := pb.NewEchoClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + callUnaryEcho(ctx, ec) + + callBidiStreamingEcho(ctx, ec) +} diff --git a/examples/features/metadata_interceptor/server/main.go b/examples/features/metadata_interceptor/server/main.go new file mode 100644 index 000000000000..8f0dc5bfe6d4 --- /dev/null +++ b/examples/features/metadata_interceptor/server/main.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server. +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var port = flag.Int("port", 50051, "the port to serve on") + +var errMissingMetadata = status.Errorf(codes.InvalidArgument, "no incoming metadata in rpc context") + +type server struct { + pb.UnimplementedEchoServer +} + +func unaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errMissingMetadata + } + + md.Append("key1", "value1") + ctx = metadata.NewIncomingContext(ctx, md) + + return handler(ctx, req) +} + +func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { + fmt.Printf("--- UnaryEcho ---\n") + + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Internal, "UnaryEcho: missing incoming metadata in rpc context") + } + + // Read and print metadata added by the interceptor. + if v, ok := md["key1"]; ok { + fmt.Printf("key1 from metadata: \n") + for i, e := range v { + fmt.Printf(" %d. %s\n", i, e) + } + } + + return &pb.EchoResponse{Message: in.Message}, nil +} + +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (s *wrappedStream) Context() context.Context { + return s.ctx +} + +func streamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + md, ok := metadata.FromIncomingContext(ss.Context()) + if !ok { + return errMissingMetadata + } + + md.Append("key1", "value1") + ctx := metadata.NewIncomingContext(ss.Context(), md) + + return handler(srv, &wrappedStream{ss, ctx}) +} + +func (s *server) BidirectionalStreamingEcho(stream pb.Echo_BidirectionalStreamingEchoServer) error { + fmt.Printf("--- BidirectionalStreamingEcho ---\n") + + md, ok := metadata.FromIncomingContext(stream.Context()) + if !ok { + return status.Errorf(codes.Internal, "BidirectionalStreamingEcho: missing incoming metadata in rpc context") + } + + // Read and print metadata added by the interceptor. + if v, ok := md["key1"]; ok { + fmt.Printf("key1 from metadata: \n") + for i, e := range v { + fmt.Printf(" %d. %s\n", i, e) + } + } + + // Read requests and send responses. + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + if err = stream.Send(&pb.EchoResponse{Message: in.Message}); err != nil { + return err + } + } +} + +func main() { + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("net.Listen() failed: %v", err) + } + fmt.Printf("Server listening at %v\n", lis.Addr()) + + s := grpc.NewServer(grpc.UnaryInterceptor(unaryInterceptor), grpc.StreamInterceptor(streamInterceptor)) + pb.RegisterEchoServer(s, &server{}) + s.Serve(lis) +} From f7c110af15b7fa3bd3b710f184a2a5218578ed46 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 6 Dec 2022 10:27:30 -0800 Subject: [PATCH 698/998] test: remove use of deprecated WithInsecure() API (#5836) --- test/authority_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/authority_test.go b/test/authority_test.go index ccc99b18dba0..ef0f56656b24 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -227,7 +227,7 @@ func (s) TestAuthorityReplacedWithResolverAddress(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: ss.Address, ServerName: expectedAuthority}}}) - cc, err := grpc.Dial(r.Scheme()+":///whatever", grpc.WithInsecure(), grpc.WithResolvers(r)) + cc, err := grpc.Dial(r.Scheme()+":///whatever", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", ss.Address, err) } From 19490352e8c1c7de044844858d4e04b0362c3c2f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 6 Dec 2022 11:59:35 -0800 Subject: [PATCH 699/998] ringhash: add logs to surface information about ring creation (#5832) Fixes https://github.com/grpc/grpc-go/issues/5781 --- xds/internal/balancer/ringhash/ring.go | 7 ++++++- xds/internal/balancer/ringhash/ring_test.go | 6 +++--- xds/internal/balancer/ringhash/ringhash.go | 2 +- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/xds/internal/balancer/ringhash/ring.go b/xds/internal/balancer/ringhash/ring.go index 71d31eaeb8b0..3e35556d8a73 100644 --- a/xds/internal/balancer/ringhash/ring.go +++ b/xds/internal/balancer/ringhash/ring.go @@ -24,6 +24,7 @@ import ( "strconv" xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/resolver" ) @@ -65,9 +66,12 @@ type ringEntry struct { // and first item with hash >= given hash will be returned. // // Must be called with a non-empty subConns map. -func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64) *ring { +func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, logger *grpclog.PrefixLogger) *ring { + logger.Debugf("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) + // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 normalizedWeights, minWeight := normalizeWeights(subConns) + logger.Debugf("newRing: normalized subConn weights is %v", normalizedWeights) // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. @@ -78,6 +82,7 @@ func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64) *ri scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize)) ringSize := math.Ceil(scale) items := make([]*ringEntry, 0, int(ringSize)) + logger.Debugf("newRing: creating new ring of size %v", ringSize) // For each entry, scale*weight nodes are generated in the ring. // diff --git a/xds/internal/balancer/ringhash/ring_test.go b/xds/internal/balancer/ringhash/ring_test.go index b1d987609903..9c6eb0c242ff 100644 --- a/xds/internal/balancer/ringhash/ring_test.go +++ b/xds/internal/balancer/ringhash/ring_test.go @@ -52,7 +52,7 @@ func (s) TestRingNew(t *testing.T) { for _, min := range []uint64{3, 4, 6, 8} { for _, max := range []uint64{20, 8} { t.Run(fmt.Sprintf("size-min-%v-max-%v", min, max), func(t *testing.T) { - r := newRing(testSubConnMap, min, max) + r := newRing(testSubConnMap, min, max, nil) totalCount := len(r.items) if totalCount < int(min) || totalCount > int(max) { t.Fatalf("unexpect size %v, want min %v, max %v", totalCount, min, max) @@ -82,7 +82,7 @@ func equalApproximately(x, y float64) bool { } func (s) TestRingPick(t *testing.T) { - r := newRing(testSubConnMap, 10, 20) + r := newRing(testSubConnMap, 10, 20, nil) for _, h := range []uint64{xxhash.Sum64String("1"), xxhash.Sum64String("2"), xxhash.Sum64String("3"), xxhash.Sum64String("4")} { t.Run(fmt.Sprintf("picking-hash-%v", h), func(t *testing.T) { e := r.pick(h) @@ -100,7 +100,7 @@ func (s) TestRingPick(t *testing.T) { } func (s) TestRingNext(t *testing.T) { - r := newRing(testSubConnMap, 10, 20) + r := newRing(testSubConnMap, 10, 20, nil) for _, e := range r.items { ne := r.next(e) diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index 6f91ff303317..b9caefa63a2d 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -293,7 +293,7 @@ func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) err if regenerateRing { // Ring creation is guaranteed to not fail because we call newRing() // with a non-empty subConns map. - b.ring = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize) + b.ring = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize, b.logger) b.regeneratePicker() b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } From 22c1fd2e10d2eebb48710237c49635779fd1f7af Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 7 Dec 2022 10:52:31 -0800 Subject: [PATCH 700/998] deps: update golang.org/x/net to latest in all modules (#5847) --- examples/go.mod | 6 +++--- examples/go.sum | 13 +++++++------ gcp/observability/go.mod | 6 +++--- gcp/observability/go.sum | 13 +++++++------ go.mod | 6 +++--- go.sum | 13 +++++++------ security/advancedtls/examples/go.mod | 6 +++--- security/advancedtls/examples/go.sum | 10 +++++++--- security/advancedtls/go.mod | 6 +++--- security/advancedtls/go.sum | 10 +++++++--- security/authorization/go.mod | 3 ++- security/authorization/go.sum | 21 ++++++++++++++++++--- test/tools/go.mod | 1 + test/tools/go.sum | 3 ++- 14 files changed, 73 insertions(+), 44 deletions(-) diff --git a/examples/go.mod b/examples/go.mod index 2e6a7c299504..cb3f5ad8a70e 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -19,9 +19,9 @@ require ( github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect - golang.org/x/net v0.2.0 // indirect - golang.org/x/sys v0.2.0 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/net v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect google.golang.org/appengine v1.6.7 // indirect ) diff --git a/examples/go.sum b/examples/go.sum index 942564bff6fa..b894c7be2049 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -591,8 +591,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -693,11 +693,11 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -708,8 +708,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index cac2d1d78ff2..fb4828fb8345 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -27,10 +27,10 @@ require ( github.com/googleapis/gax-go/v2 v2.6.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect - golang.org/x/net v0.2.0 // indirect + golang.org/x/net v0.4.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.2.0 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect google.golang.org/api v0.102.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index 50272f7c7734..1967b291fa34 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -618,8 +618,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -726,11 +726,11 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -741,8 +741,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/go.mod b/go.mod index 9964600f921e..355d1937167e 100644 --- a/go.mod +++ b/go.mod @@ -11,9 +11,9 @@ require ( github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 - golang.org/x/net v0.2.0 + golang.org/x/net v0.4.0 golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 - golang.org/x/sys v0.2.0 + golang.org/x/sys v0.3.0 google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 google.golang.org/protobuf v1.28.1 ) @@ -23,6 +23,6 @@ require ( cloud.google.com/go/compute/metadata v0.2.1 // indirect github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/text v0.5.0 // indirect google.golang.org/appengine v1.6.7 // indirect ) diff --git a/go.sum b/go.sum index cdcf905cf85a..07842338cc91 100644 --- a/go.sum +++ b/go.sum @@ -618,8 +618,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -723,11 +723,11 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -738,8 +738,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index aa41ccbd92cd..b5f9fe48c78e 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -11,9 +11,9 @@ require ( require ( github.com/golang/protobuf v1.5.2 // indirect golang.org/x/crypto v0.3.0 // indirect - golang.org/x/net v0.2.0 // indirect - golang.org/x/sys v0.2.0 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/net v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index b838ed1b97b1..0f3640bf7e4a 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -585,8 +585,9 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -686,11 +687,13 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -701,8 +704,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index fe75f27fc6c3..4df7ab38c2ae 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -11,9 +11,9 @@ require ( require ( github.com/golang/protobuf v1.5.2 // indirect - golang.org/x/net v0.2.0 // indirect - golang.org/x/sys v0.2.0 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/net v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index b838ed1b97b1..0f3640bf7e4a 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -585,8 +585,9 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -686,11 +687,13 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -701,8 +704,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/security/authorization/go.mod b/security/authorization/go.mod index 0413d91056af..7596d57e207b 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -17,5 +17,6 @@ require ( github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/net v0.4.0 // indirect + golang.org/x/text v0.5.0 // indirect ) diff --git a/security/authorization/go.sum b/security/authorization/go.sum index db688f8fd778..fc168047bc1f 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -63,10 +63,12 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -74,6 +76,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -83,9 +86,12 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -93,6 +99,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -102,16 +109,23 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e h1:XMgFehsDnnLGtjvjOfqWSUzt0alpTR1RSEuznObga2c= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -120,6 +134,7 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/test/tools/go.mod b/test/tools/go.mod index f0be567a7d4a..d61d0e14551f 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -8,6 +8,7 @@ require ( github.com/golang/protobuf v1.5.2 golang.org/x/exp/typeparams v0.0.0-20221114191408-850992195362 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 + golang.org/x/sys v0.3.0 // indirect golang.org/x/tools v0.3.0 google.golang.org/protobuf v1.28.1 // indirect honnef.co/go/tools v0.3.3 diff --git a/test/tools/go.sum b/test/tools/go.sum index 2a7e36966de0..ca57c49e5c5d 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -41,8 +41,9 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= From 638141fbb9c9f6a40b23fe073a582aaaddb43b4f Mon Sep 17 00:00:00 2001 From: richzw <1590890+richzw@users.noreply.github.com> Date: Thu, 8 Dec 2022 02:52:57 +0800 Subject: [PATCH 701/998] examples: add feature/cancellation retry to example test script (#5846) --- examples/examples_test.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/examples/examples_test.sh b/examples/examples_test.sh index 0c919a1a0969..e9985d678096 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -52,6 +52,7 @@ EXAMPLES=( "helloworld" "route_guide" "features/authentication" + "features/cancellation" "features/compression" "features/deadline" "features/encryption/TLS" @@ -62,6 +63,7 @@ EXAMPLES=( "features/metadata_interceptor" "features/multiplex" "features/name_resolving" + "features/retry" "features/unix_abstract" ) @@ -99,6 +101,7 @@ declare -A EXPECTED_SERVER_OUTPUT=( ["helloworld"]="Received: world" ["route_guide"]="" ["features/authentication"]="server starting on port 50051..." + ["features/cancellation"]="server: error receiving from stream: rpc error: code = Canceled desc = context canceled" ["features/compression"]="UnaryEcho called with message \"compress\"" ["features/deadline"]="" ["features/encryption/TLS"]="" @@ -109,6 +112,7 @@ declare -A EXPECTED_SERVER_OUTPUT=( ["features/metadata_interceptor"]="key1 from metadata: " ["features/multiplex"]=":50051" ["features/name_resolving"]="serving on localhost:50051" + ["features/retry"]="request succeeded count: 4" ["features/unix_abstract"]="serving on @abstract-unix-socket" ) @@ -116,6 +120,7 @@ declare -A EXPECTED_CLIENT_OUTPUT=( ["helloworld"]="Greeting: Hello world" ["route_guide"]="Feature: name: \"\", point:(416851321, -742674555)" ["features/authentication"]="UnaryEcho: hello world" + ["features/cancellation"]="cancelling context" ["features/compression"]="UnaryEcho call returned \"compress\", " ["features/deadline"]="wanted = DeadlineExceeded, got = DeadlineExceeded" ["features/encryption/TLS"]="UnaryEcho: hello world" @@ -126,6 +131,7 @@ declare -A EXPECTED_CLIENT_OUTPUT=( ["features/metadata_interceptor"]="BidiStreaming Echo: hello world" ["features/multiplex"]="Greeting: Hello multiplex" ["features/name_resolving"]="calling helloworld.Greeter/SayHello to \"example:///resolver.example.grpc.io\"" + ["features/retry"]="UnaryEcho reply: message:\"Try and Success\"" ["features/unix_abstract"]="calling echo.Echo/UnaryEcho to unix-abstract:abstract-unix-socket" ) From aba03e1ab1a506953566b436d658a40c2b2a506d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 8 Dec 2022 16:26:21 -0800 Subject: [PATCH 702/998] xds: pass options by value to helper routines which setup the management server in tests (#5833) --- internal/testutils/xds/e2e/server.go | 23 ++++++++----------- .../xds/e2e/setup_management_server.go | 2 +- test/xds/xds_client_ack_nack_test.go | 2 +- test/xds/xds_client_affinity_test.go | 2 +- test/xds/xds_client_federation_test.go | 4 ++-- test/xds/xds_client_integration_test.go | 2 +- test/xds/xds_client_outlier_detection_test.go | 4 ++-- test/xds/xds_client_retry_test.go | 2 +- .../xds_rls_clusterspecifier_plugin_test.go | 2 +- test/xds/xds_security_config_nack_test.go | 4 ++-- test/xds/xds_server_integration_test.go | 6 ++--- test/xds/xds_server_rbac_test.go | 8 +++---- test/xds/xds_server_serving_mode_test.go | 4 ++-- xds/csds/csds_test.go | 2 +- .../clusterresolver/e2e_test/eds_impl_test.go | 8 +++---- xds/internal/httpfilter/fault/fault_test.go | 2 +- xds/internal/test/e2e/controlplane.go | 2 +- .../xdsclient/e2e_test/authority_test.go | 4 ++-- .../xdsclient/e2e_test/cds_watchers_test.go | 16 ++++++------- .../xdsclient/e2e_test/eds_watchers_test.go | 14 +++++------ .../e2e_test/federation_watchers_test.go | 4 ++-- .../xdsclient/e2e_test/lds_watchers_test.go | 16 ++++++------- .../xdsclient/e2e_test/misc_watchers_test.go | 2 +- .../xdsclient/e2e_test/rds_watchers_test.go | 14 +++++------ .../transport/transport_ack_nack_test.go | 6 ++--- .../transport/transport_backoff_test.go | 6 ++--- 26 files changed, 78 insertions(+), 83 deletions(-) diff --git a/internal/testutils/xds/e2e/server.go b/internal/testutils/xds/e2e/server.go index 38eb01adea8d..dac74e3cebce 100644 --- a/internal/testutils/xds/e2e/server.go +++ b/internal/testutils/xds/e2e/server.go @@ -104,18 +104,16 @@ type ManagementServerOptions struct { // resource snapshot held by the management server, as required by the test // logic. When the test is done, it should call the Stop() method to cleanup // resources allocated by the management server. -func StartManagementServer(opts *ManagementServerOptions) (*ManagementServer, error) { +func StartManagementServer(opts ManagementServerOptions) (*ManagementServer, error) { // Create a snapshot cache. The first parameter to NewSnapshotCache() // controls whether the server should wait for all resources to be // explicitly named in the request before responding to any of them. - wait := opts == nil || !opts.AllowResourceSubset + wait := !opts.AllowResourceSubset cache := v3cache.NewSnapshotCache(wait, v3cache.IDHash{}, serverLogger{}) logger.Infof("Created new snapshot cache...") - var lis net.Listener - if opts != nil && opts.Listener != nil { - lis = opts.Listener - } else { + lis := opts.Listener + if lis == nil { var err error lis, err = net.Listen("tcp", "localhost:0") if err != nil { @@ -126,14 +124,11 @@ func StartManagementServer(opts *ManagementServerOptions) (*ManagementServer, er // Cancelling the context passed to the server is the only way of stopping it // at the end of the test. ctx, cancel := context.WithCancel(context.Background()) - callbacks := v3server.CallbackFuncs{} - if opts != nil { - callbacks = v3server.CallbackFuncs{ - StreamOpenFunc: opts.OnStreamOpen, - StreamClosedFunc: opts.OnStreamClosed, - StreamRequestFunc: opts.OnStreamRequest, - StreamResponseFunc: opts.OnStreamResponse, - } + callbacks := v3server.CallbackFuncs{ + StreamOpenFunc: opts.OnStreamOpen, + StreamClosedFunc: opts.OnStreamClosed, + StreamRequestFunc: opts.OnStreamRequest, + StreamResponseFunc: opts.OnStreamResponse, } // Create an xDS management server and register the ADS implementation diff --git a/internal/testutils/xds/e2e/setup_management_server.go b/internal/testutils/xds/e2e/setup_management_server.go index 9ab76edc99a4..5ba730722401 100644 --- a/internal/testutils/xds/e2e/setup_management_server.go +++ b/internal/testutils/xds/e2e/setup_management_server.go @@ -41,7 +41,7 @@ import ( // - bootstrap contents to be used by the client // - xDS resolver builder to be used by the client // - a cleanup function to be invoked at the end of the test -func SetupManagementServer(t *testing.T, opts *ManagementServerOptions) (*ManagementServer, string, []byte, resolver.Builder, func()) { +func SetupManagementServer(t *testing.T, opts ManagementServerOptions) (*ManagementServer, string, []byte, resolver.Builder, func()) { t.Helper() // Spin up an xDS management server on a local port. diff --git a/test/xds/xds_client_ack_nack_test.go b/test/xds/xds_client_ack_nack_test.go index ca954f8a34fc..4ba750a05025 100644 --- a/test/xds/xds_client_ack_nack_test.go +++ b/test/xds/xds_client_ack_nack_test.go @@ -81,7 +81,7 @@ func (s) TestClientResourceVersionAfterStreamRestart(t *testing.T) { // Map from stream id to a map of resource type to resource version. ackVersionsMap := make(map[int64]map[string]string) - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ Listener: lis, OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { // Return early under the following circumstances: diff --git a/test/xds/xds_client_affinity_test.go b/test/xds/xds_client_affinity_test.go index 94666af3b472..69ae3fc147c2 100644 --- a/test/xds/xds_client_affinity_test.go +++ b/test/xds/xds_client_affinity_test.go @@ -88,7 +88,7 @@ func (s) TestClientSideAffinitySanityCheck(t *testing.T) { return func() { envconfig.XDSRingHash = old } }()() - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() port, cleanup2 := startTestService(t, nil) diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go index 1ae54b682389..13566a5db7dc 100644 --- a/test/xds/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -54,7 +54,7 @@ func (s) TestClientSideFederation(t *testing.T) { defer func() { envconfig.XDSFederation = oldXDSFederation }() // Start a management server as the default authority. - serverDefaultAuth, err := e2e.StartManagementServer(nil) + serverDefaultAuth, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } @@ -62,7 +62,7 @@ func (s) TestClientSideFederation(t *testing.T) { // Start another management server as the other authority. const nonDefaultAuth = "non-default-auth" - serverAnotherAuth, err := e2e.StartManagementServer(nil) + serverAnotherAuth, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } diff --git a/test/xds/xds_client_integration_test.go b/test/xds/xds_client_integration_test.go index a0bafb987a73..492972b7cc51 100644 --- a/test/xds/xds_client_integration_test.go +++ b/test/xds/xds_client_integration_test.go @@ -74,7 +74,7 @@ func startTestService(t *testing.T, server *stubserver.StubServer) (uint32, func } func (s) TestClientSideXDS(t *testing.T) { - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() port, cleanup2 := startTestService(t, nil) diff --git a/test/xds/xds_client_outlier_detection_test.go b/test/xds/xds_client_outlier_detection_test.go index 424012700770..a2753c3ea8ca 100644 --- a/test/xds/xds_client_outlier_detection_test.go +++ b/test/xds/xds_client_outlier_detection_test.go @@ -49,7 +49,7 @@ import ( // Detection balancer. This test verifies that an RPC is able to proceed // normally with this configuration. func (s) TestOutlierDetection_NoopConfig(t *testing.T) { - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() port, cleanup2 := startTestService(t, nil) @@ -166,7 +166,7 @@ func checkRoundRobinRPCs(ctx context.Context, client testpb.TestServiceClient, a // the unhealthy upstream is ejected, RPC's should regularly round robin across // all three upstreams. func (s) TestOutlierDetectionWithOutlier(t *testing.T) { - managementServer, nodeID, _, r, cleanup := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, _, r, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Working backend 1. diff --git a/test/xds/xds_client_retry_test.go b/test/xds/xds_client_retry_test.go index 46eb8f34f3d0..fa3f4b865f6e 100644 --- a/test/xds/xds_client_retry_test.go +++ b/test/xds/xds_client_retry_test.go @@ -49,7 +49,7 @@ func (s) TestClientSideRetry(t *testing.T) { }, } - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() port, cleanup2 := startTestService(t, ss) diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go index f5ab17bca7ee..13d5ba43670c 100644 --- a/test/xds/xds_rls_clusterspecifier_plugin_test.go +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -130,7 +130,7 @@ func testRLSinxDS(t *testing.T, lbPolicy e2e.LoadBalancingPolicy) { // Set up all components and configuration necessary - management server, // xDS resolver, fake RLS Server, and xDS configuration which specifies an // RLS Balancer that communicates to this set up fake RLS Server. - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() port, cleanup2 := startTestService(t, nil) defer cleanup2() diff --git a/test/xds/xds_security_config_nack_test.go b/test/xds/xds_security_config_nack_test.go index 750fff039155..8b292bc006d3 100644 --- a/test/xds/xds_security_config_nack_test.go +++ b/test/xds/xds_security_config_nack_test.go @@ -41,7 +41,7 @@ func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { missingIdentityProviderInstance = "missing-identity-provider-instance" missingRootProviderInstance = "missing-root-provider-instance" ) - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -324,7 +324,7 @@ func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { // SetupManagementServer() sets up a bootstrap file with certificate // provider instance names: `e2e.ServerSideCertProviderInstance` and // `e2e.ClientSideCertProviderInstance`. - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() port, cleanup2 := startTestService(t, nil) diff --git a/test/xds/xds_server_integration_test.go b/test/xds/xds_server_integration_test.go index ac041ecd6849..cf0acccec51e 100644 --- a/test/xds/xds_server_integration_test.go +++ b/test/xds/xds_server_integration_test.go @@ -125,7 +125,7 @@ func hostPortFromListener(lis net.Listener) (string, uint32, error) { // the client and the server. This results in both of them using the // configured fallback credentials (which is insecure creds in this case). func (s) TestServerSideXDS_Fallback(t *testing.T) { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -207,7 +207,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -277,7 +277,7 @@ func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { // configuration pointing to the use of the file_watcher plugin and we verify // that the same client is now able to successfully make an RPC. func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) diff --git a/test/xds/xds_server_rbac_test.go b/test/xds/xds_server_rbac_test.go index b1058bda9677..3841d061c375 100644 --- a/test/xds/xds_server_rbac_test.go +++ b/test/xds/xds_server_rbac_test.go @@ -60,7 +60,7 @@ func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { defer func() { envconfig.XDSRBAC = oldRBAC }() - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -605,7 +605,7 @@ func (s) TestRBACHTTPFilter(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { func() { - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -790,7 +790,7 @@ func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) { envconfig.XDSRBAC = oldRBAC }() - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) @@ -847,7 +847,7 @@ func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) { envconfig.XDSRBAC = oldRBAC }() - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() lis, cleanup2 := setupGRPCServer(t, bootstrapContents) diff --git a/test/xds/xds_server_serving_mode_test.go b/test/xds/xds_server_serving_mode_test.go index 6f730948c128..bf890133141a 100644 --- a/test/xds/xds_server_serving_mode_test.go +++ b/test/xds/xds_server_serving_mode_test.go @@ -43,7 +43,7 @@ import ( // change callback is not invoked and client connections to the server are not // recycled. func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { - managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) @@ -163,7 +163,7 @@ func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { // xDS enabled gRPC servers. It verifies that appropriate mode changes happen in // the server, and also verifies behavior of clientConns under these modes. func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { - managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Configure xDS credentials to be used on the server-side. diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 42ba3da6b40c..6772389c6f7f 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -236,7 +236,7 @@ func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.M // Spin up a xDS management server on a local port. nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer(nil) + fs, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatal(err) } diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index f9fe74dd5401..ee6b029d8c47 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -175,7 +175,7 @@ func clientEndpointsResource(nodeID, edsServiceName string, localities []localit // 4. Replace the backend. Test verifies that all RPCs reach the new backend. func (s) TestEDS_OneLocality(t *testing.T) { // Spin up a management server to receive xDS resources from. - managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() // Start backend servers which provide an implementation of the TestService. @@ -280,7 +280,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { // weighted roundrobined across them. func (s) TestEDS_MultipleLocalities(t *testing.T) { // Spin up a management server to receive xDS resources from. - managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() // Start backend servers which provide an implementation of the TestService. @@ -403,7 +403,7 @@ func (s) TestEDS_MultipleLocalities(t *testing.T) { // traffic is routed only to backends deemed capable of receiving traffic. func (s) TestEDS_EndpointsHealth(t *testing.T) { // Spin up a management server to receive xDS resources from. - managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() // Start backend servers which provide an implementation of the TestService. @@ -483,7 +483,7 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { // removed" error. func (s) TestEDS_EmptyUpdate(t *testing.T) { // Spin up a management server to receive xDS resources from. - managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, nil) + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() // Start backend servers which provide an implementation of the TestService. diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 3df3281c7fa3..6ae345552f21 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -100,7 +100,7 @@ func (*testService) FullDuplexCall(stream testpb.TestService_FullDuplexCallServe func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { // Spin up a xDS management server on a local port. nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer(nil) + fs, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatal(err) } diff --git a/xds/internal/test/e2e/controlplane.go b/xds/internal/test/e2e/controlplane.go index 97bb38321fc4..eee2b13819ea 100644 --- a/xds/internal/test/e2e/controlplane.go +++ b/xds/internal/test/e2e/controlplane.go @@ -33,7 +33,7 @@ type controlPlane struct { func newControlPlane() (*controlPlane, error) { // Spin up an xDS management server on a local port. - server, err := e2e.StartManagementServer(nil) + server, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { return nil, fmt.Errorf("failed to spin up the xDS management server: %v", err) } diff --git a/xds/internal/xdsclient/e2e_test/authority_test.go b/xds/internal/xdsclient/e2e_test/authority_test.go index 93b63636dc68..17cdf674a0e6 100644 --- a/xds/internal/xdsclient/e2e_test/authority_test.go +++ b/xds/internal/xdsclient/e2e_test/authority_test.go @@ -77,14 +77,14 @@ func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time. lisNonDefault := testutils.NewListenerWrapper(t, nil) // Start a management server to act as the default authority. - defaultAuthorityServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{Listener: lisDefault}) + defaultAuthorityServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{Listener: lisDefault}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } t.Cleanup(func() { defaultAuthorityServer.Stop() }) // Start a management server to act as the non-default authority. - nonDefaultAuthorityServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{Listener: lisNonDefault}) + nonDefaultAuthorityServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{Listener: lisNonDefault}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } diff --git a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go index 22efe2ccb699..8a95c087dd29 100644 --- a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go @@ -144,7 +144,7 @@ func (s) TestCDSWatch(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -274,7 +274,7 @@ func (s) TestCDSWatch_TwoWatchesForSameResourceName(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -359,7 +359,7 @@ func (s) TestCDSWatch_TwoWatchesForSameResourceName(t *testing.T) { // all watch callbacks. func (s) TestCDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -440,7 +440,7 @@ func (s) TestCDSWatch_ResourceCaching(t *testing.T) { firstAckReceived := grpcsync.NewEvent() secondRequestReceived := grpcsync.NewEvent() - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { // The first request has an empty version string. if !firstRequestReceived && req.GetVersionInfo() == "" { @@ -571,7 +571,7 @@ func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // invocation with error) does not take place. func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, err := e2e.StartManagementServer(nil) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } @@ -644,7 +644,7 @@ func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // invocation of the watch callback associated with the deleted resource. func (s) TesCDSWatch_ResourceRemoved(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -753,7 +753,7 @@ func (s) TesCDSWatch_ResourceRemoved(t *testing.T) { // propagated to the watcher. func (s) TestCDSWatch_NACKError(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -802,7 +802,7 @@ func (s) TestCDSWatch_NACKError(t *testing.T) { // invalid resource receive an error. func (s) TestCDSWatch_PartialValid(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. diff --git a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go index 790e83778492..d8a515233438 100644 --- a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go @@ -168,7 +168,7 @@ func (s) TestEDSWatch(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -322,7 +322,7 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -408,7 +408,7 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { // The test is run with both old and new style names. func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -490,7 +490,7 @@ func (s) TestEDSWatch_ResourceCaching(t *testing.T) { firstAckReceived := grpcsync.NewEvent() secondRequestReceived := grpcsync.NewEvent() - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { // The first request has an empty version string. if !firstRequestReceived && req.GetVersionInfo() == "" { @@ -628,7 +628,7 @@ func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // invocation with error) does not take place. func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, err := e2e.StartManagementServer(nil) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } @@ -700,7 +700,7 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // propagated to the watcher. func (s) TestEDSWatch_NACKError(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -749,7 +749,7 @@ func (s) TestEDSWatch_NACKError(t *testing.T) { // invalid resource receive an error. func (s) TestEDSWatch_PartialValid(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. diff --git a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go index 91c92c6fd01a..640ee6fac737 100644 --- a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go @@ -47,14 +47,14 @@ func setupForFederationWatchersTest(t *testing.T) (*e2e.ManagementServer, string overrideFedEnvVar(t) // Start a management server as the default authority. - serverDefaultAuthority, err := e2e.StartManagementServer(nil) + serverDefaultAuthority, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } t.Cleanup(serverDefaultAuthority.Stop) // Start another management server as the other authority. - serverNonDefaultAuthority, err := e2e.StartManagementServer(nil) + serverNonDefaultAuthority, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go index 59589f6393da..9510c33ad68e 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -197,7 +197,7 @@ func (s) TestLDSWatch(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -327,7 +327,7 @@ func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -413,7 +413,7 @@ func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { // The test is run with both old and new style names. func (s) TestLDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -489,7 +489,7 @@ func (s) TestLDSWatch_ResourceCaching(t *testing.T) { firstAckReceived := grpcsync.NewEvent() secondRequestReceived := grpcsync.NewEvent() - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { // The first request has an empty version string. if !firstRequestReceived && req.GetVersionInfo() == "" { @@ -620,7 +620,7 @@ func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // invocation with error) does not take place. func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, err := e2e.StartManagementServer(nil) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } @@ -694,7 +694,7 @@ func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // The test is run with both old and new style names. func (s) TestLDSWatch_ResourceRemoved(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -802,7 +802,7 @@ func (s) TestLDSWatch_ResourceRemoved(t *testing.T) { // propagated to the watcher. func (s) TestLDSWatch_NACKError(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -851,7 +851,7 @@ func (s) TestLDSWatch_NACKError(t *testing.T) { // invalid resource receive an error. func (s) TestLDSWatch_PartialValid(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. diff --git a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go index 414fb249b9a7..05e7083cd150 100644 --- a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go @@ -38,7 +38,7 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { // Start an xDS management server and set the option to allow it to respond // to requests which only specify a subset of the configured resources. - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{AllowResourceSubset: true}) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) defer cleanup() // Create an xDS client with the above bootstrap contents. diff --git a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go index 3f4dd419f96c..e8f38d41922c 100644 --- a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go @@ -178,7 +178,7 @@ func (s) TestRDSWatch(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -348,7 +348,7 @@ func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -434,7 +434,7 @@ func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) { // The test is run with both old and new style names. func (s) TestRDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -520,7 +520,7 @@ func (s) TestRDSWatch_ResourceCaching(t *testing.T) { firstAckReceived := grpcsync.NewEvent() secondRequestReceived := grpcsync.NewEvent() - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, &e2e.ManagementServerOptions{ + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { // The first request has an empty version string. if !firstRequestReceived && req.GetVersionInfo() == "" { @@ -661,7 +661,7 @@ func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // invocation with error) does not take place. func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, err := e2e.StartManagementServer(nil) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatalf("Failed to spin up the xDS management server: %v", err) } @@ -737,7 +737,7 @@ func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // propagated to the watcher. func (s) TestRDSWatch_NACKError(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. @@ -786,7 +786,7 @@ func (s) TestRDSWatch_NACKError(t *testing.T) { // invalid resource receive an error. func (s) TestRDSWatch_PartialValid(t *testing.T) { overrideFedEnvVar(t) - mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, nil) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup() // Create an xDS client with the above bootstrap contents. diff --git a/xds/internal/xdsclient/transport/transport_ack_nack_test.go b/xds/internal/xdsclient/transport/transport_ack_nack_test.go index 3d5740905b82..30b56674ed03 100644 --- a/xds/internal/xdsclient/transport/transport_ack_nack_test.go +++ b/xds/internal/xdsclient/transport/transport_ack_nack_test.go @@ -87,7 +87,7 @@ func (s) TestSimpleAckAndNack(t *testing.T) { // the test goroutine to verify ack version and nonce. streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) - mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { select { case streamRequestCh <- req: @@ -273,7 +273,7 @@ func (s) TestInvalidFirstResponse(t *testing.T) { // the test goroutine to verify ack version and nonce. streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) - mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { select { case streamRequestCh <- req: @@ -403,7 +403,7 @@ func (s) TestResourceIsNotRequestedAnymore(t *testing.T) { // the test goroutine to verify ack version and nonce. streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) - mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { select { case streamRequestCh <- req: diff --git a/xds/internal/xdsclient/transport/transport_backoff_test.go b/xds/internal/xdsclient/transport/transport_backoff_test.go index e7c44e1873e4..2753e459c366 100644 --- a/xds/internal/xdsclient/transport/transport_backoff_test.go +++ b/xds/internal/xdsclient/transport/transport_backoff_test.go @@ -62,7 +62,7 @@ func (s) TestTransport_BackoffAfterStreamFailure(t *testing.T) { // Create an xDS management server listening on a local port. streamErr := errors.New("ADS stream error") - mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ // Push on a channel whenever the stream is closed. OnStreamClosed: func(int64) { select { @@ -206,7 +206,7 @@ func (s) TestTransport_RetriesAfterBrokenStream(t *testing.T) { t.Fatalf("Failed to create a local listener for the xDS management server: %v", err) } lis := testutils.NewRestartableListener(l) - mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ Listener: lis, // Push the received request on to a channel for the test goroutine to // verify that it matches expectations. @@ -380,7 +380,7 @@ func (s) TestTransport_ResourceRequestedBeforeStreamCreation(t *testing.T) { lis := testutils.NewRestartableListener(l) streamErr := errors.New("ADS stream error") - mgmtServer, err := e2e.StartManagementServer(&e2e.ManagementServerOptions{ + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ Listener: lis, // Return an error everytime a request is sent on the stream. This From a9709c3f8cfe5f22703b8cfebd83d3996f8fbb33 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 8 Dec 2022 19:44:23 -0500 Subject: [PATCH 703/998] Added logs for reasons causing connection and transport close (#5840) --- clientconn.go | 3 ++ internal/transport/controlbuf.go | 28 +++-------- internal/transport/handler_server.go | 15 +++--- internal/transport/http2_client.go | 19 +++++--- internal/transport/http2_server.go | 69 +++++++++++++--------------- internal/transport/transport.go | 2 +- internal/transport/transport_test.go | 8 ++-- server.go | 6 +-- 8 files changed, 71 insertions(+), 79 deletions(-) diff --git a/clientconn.go b/clientconn.go index 78c81a108ed0..045668904519 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1274,6 +1274,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 0e221af728bc..aaa9c859a349 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -191,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -416,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -527,18 +527,6 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil - } - }() for { it, err := l.cbuf.get(true) if err != nil { @@ -582,7 +570,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -670,11 +657,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err + if err == errStreamDrain { // errStreamDrain need not close transport + return nil } - // Other errors(errStreamDrain) need not close transport. - return nil + return err } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err @@ -772,7 +758,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -807,7 +793,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("received GOAWAY with no active streams") } } return nil diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index fb272235d817..0c6ada99274d 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -141,12 +141,15 @@ type serverHandlerTransport struct { stats []stats.Handler } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -236,7 +239,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -346,7 +349,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 23d6ec6bc497..3e582a2853c6 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -242,8 +242,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func(conn net.Conn) { defer ctxMonitorDone.Fire() // Signal this goroutine has exited. <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. - if connectCtx.Err() != nil { + if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + } conn.Close() } }(conn) @@ -445,10 +448,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. @@ -951,6 +952,9 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. t.onClose() @@ -1003,11 +1007,14 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") + } t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 6c8aa403539e..7f1b08f628ef 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -21,6 +21,7 @@ package transport import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -293,7 +294,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -331,10 +332,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } t.conn.Close() t.controlBuf.finish() @@ -344,8 +344,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -361,15 +362,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -453,7 +451,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return false + return nil } if !isGRPC || headerError { @@ -463,7 +461,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -503,7 +501,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -514,7 +512,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() @@ -530,7 +528,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error @@ -550,7 +548,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: stat, rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -597,7 +595,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -630,19 +628,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -886,10 +881,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -1169,10 +1161,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1199,12 +1188,15 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1319,19 +1311,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { + if retErr != nil { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + return false, retErr } return true, nil } diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 2e615ee20cc5..6cff20c8e022 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -701,7 +701,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 6cd2201b5919..517cd4015da9 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -343,7 +343,7 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT s.mu.Lock() if s.conns == nil { s.mu.Unlock() - transport.Close() + transport.Close(errors.New("s.conns is nil")) return } s.conns[transport] = true @@ -421,7 +421,7 @@ func (s *server) stop() { s.lis.Close() s.mu.Lock() for c := range s.conns { - c.Close() + c.Close(errors.New("server Stop called")) } s.conns = nil s.mu.Unlock() @@ -1649,8 +1649,8 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) } // Close down both server and client so that their internals can be read without data // races. - client.Close(fmt.Errorf("closed manually by test")) - st.Close() + client.Close(errors.New("closed manually by test")) + st.Close(errors.New("closed manually by test")) <-st.readerDone <-st.writerDone <-client.readerDone diff --git a/server.go b/server.go index 8c8a1915008d..7456d6d32bc3 100644 --- a/server.go +++ b/server.go @@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup var roundRobinCounter uint32 @@ -1046,7 +1046,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { @@ -1809,7 +1809,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { From f54bba9af73267dcc87fcc4841dfa1e0de39d332 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 8 Dec 2022 19:21:56 -0800 Subject: [PATCH 704/998] test/xds: minor cleanup in xDS e2e test (#5843) --- test/xds/xds_security_config_nack_test.go | 64 +++++++++++------------ 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/test/xds/xds_security_config_nack_test.go b/test/xds/xds_security_config_nack_test.go index 8b292bc006d3..2c5f5aea611f 100644 --- a/test/xds/xds_security_config_nack_test.go +++ b/test/xds/xds_security_config_nack_test.go @@ -41,29 +41,6 @@ func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { missingIdentityProviderInstance = "missing-identity-provider-instance" missingRootProviderInstance = "missing-root-provider-instance" ) - managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) - defer cleanup1() - - lis, cleanup2 := setupGRPCServer(t, bootstrapContents) - defer cleanup2() - - // Grab the host and port of the server and create client side xDS - // resources corresponding to it. - host, port, err := hostPortFromListener(lis) - if err != nil { - t.Fatalf("failed to retrieve host and port of server: %v", err) - } - - // Create xDS resources to be consumed on the client side. This - // includes the listener, route configuration, cluster (with - // security configuration) and endpoint resources. - resources := e2e.DefaultClientResources(e2e.ResourceParams{ - DialTarget: serviceName, - NodeID: nodeID, - Host: host, - Port: port, - SecLevel: e2e.SecurityLevelMTLS, - }) tests := []struct { name string @@ -164,22 +141,41 @@ func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { }, } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() for _, test := range tests { t.Run(test.name, func(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + // Grab the host and port of the server and create client side xDS + // resources corresponding to it. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + + // Create xDS resources to be consumed on the client side. This + // includes the listener, route configuration, cluster (with + // security configuration) and endpoint resources. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelMTLS, + }) + // Create an inbound xDS listener resource for the server side. inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelMTLS) for _, fc := range inboundLis.GetFilterChains() { fc.TransportSocket = test.securityConfig } + resources.Listeners = append(resources.Listeners, inboundLis) - // Setup the management server with client and server resources. - if len(resources.Listeners) == 1 { - resources.Listeners = append(resources.Listeners, inboundLis) - } else { - resources.Listeners[1] = inboundLis - } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -203,10 +199,10 @@ func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { if test.wantErr { timeout = defaultTestShortTimeout } - ctx2, cancel2 := context.WithTimeout(ctx, timeout) - defer cancel2() + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() client := testgrpc.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx2, &testpb.Empty{}, grpc.WaitForReady(true)); (err != nil) != test.wantErr { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); (err != nil) != test.wantErr { t.Fatalf("EmptyCall() returned err: %v, wantErr %v", err, test.wantErr) } }) From 3e27f89917e8092469db2709c5d726acaaa2396e Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 9 Dec 2022 16:58:12 -0500 Subject: [PATCH 705/998] binarylog: Account for key in metadata truncation (#5851) --- gcp/observability/logging_test.go | 128 ++++++++++++++++++++++++++++ internal/binarylog/method_logger.go | 2 +- 2 files changed, 129 insertions(+), 1 deletion(-) diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index 91c109678818..1489a60ea22e 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -44,6 +44,9 @@ func cmpLoggingEntryList(got []*grpcLogEntry, want []*grpcLogEntry) error { if len(a) > len(b) { a, b = b, a } + if len(a) == 0 && len(a) != len(b) { // No metadata for one and the other comparator wants metadata. + return false + } for k, v := range a { if b[k] != v { return false @@ -1145,3 +1148,128 @@ func (s) TestMarshalJSON(t *testing.T) { t.Fatalf("json.Marshal(%v) failed with error: %v", logEntry, err) } } + +// TestMetadataTruncationAccountsKey tests that the metadata truncation takes +// into account both the key and value of metadata. It configures an +// observability system with a maximum byte length for metadata, which is +// greater than just the byte length of the metadata value but less than the +// byte length of the metadata key + metadata value. Thus, in the ClientHeader +// logging event, no metadata should be logged. +func (s) TestMetadataTruncationAccountsKey(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + const mdValue = "value" + configMetadataLimit := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: len(mdValue) + 1, + }, + }, + }, + } + + cleanup, err := setupObservabilitySystemWithConfig(configMetadataLimit) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // the set config MaxMetdataBytes is in between len(mdValue) and len("key") + // + len(mdValue), and thus shouldn't log this metadata entry. + md := metadata.MD{ + "key": []string{mdValue}, + } + ctx = metadata.NewOutgoingContext(ctx, md) + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: []byte("00000")}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + PayloadTruncated: true, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + MessageLength: 9, + Message: []uint8{}, + }, + PayloadTruncated: true, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.mu.Unlock() +} diff --git a/internal/binarylog/method_logger.go b/internal/binarylog/method_logger.go index 179f4a26d135..85e3ff2816ae 100644 --- a/internal/binarylog/method_logger.go +++ b/internal/binarylog/method_logger.go @@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } From 5003029eb684a60a7a84aad3ad498d7bf1e44628 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 13 Dec 2022 10:01:03 -0800 Subject: [PATCH 706/998] testutils: do a better job of verifying pick_first in tests (#5850) --- internal/testutils/pickfirst/pickfirst.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/internal/testutils/pickfirst/pickfirst.go b/internal/testutils/pickfirst/pickfirst.go index ee5bff0d88fb..6ed93948e389 100644 --- a/internal/testutils/pickfirst/pickfirst.go +++ b/internal/testutils/pickfirst/pickfirst.go @@ -41,7 +41,12 @@ import ( func CheckRPCsToBackend(ctx context.Context, cc *grpc.ClientConn, wantAddr resolver.Address) error { client := testgrpc.NewTestServiceClient(cc) peer := &peer.Peer{} - // Make sure the RPC reaches the expected backend once. + // Make sure that 20 RPCs in a row reach the expected backend. Some + // tests switch from round_robin back to pick_first and call this + // function. None of our tests spin up more than 10 backends. So, + // waiting for 20 RPCs to reach a single backend would a decent + // indicator of having switched to pick_first. + count := 0 for { time.Sleep(time.Millisecond) if ctx.Err() != nil { @@ -55,7 +60,12 @@ func CheckRPCsToBackend(ctx context.Context, cc *grpc.ClientConn, wantAddr resol // removed. continue } - if peer.Addr.String() == wantAddr.Addr { + if peer.Addr.String() != wantAddr.Addr { + count = 0 + continue + } + count++ + if count > 20 { break } } From 2f413c454850afdb19a685fdee95c5d7701891ab Mon Sep 17 00:00:00 2001 From: Sean Barag Date: Tue, 13 Dec 2022 11:31:23 -0800 Subject: [PATCH 707/998] transport/http2: use HTTP 400 for bad requests instead of 500 (#5804) --- internal/transport/handler_server.go | 30 ++++++++++++++++------- internal/transport/handler_server_test.go | 4 +-- server.go | 3 ++- 3 files changed, 25 insertions(+), 12 deletions(-) diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index 0c6ada99274d..ebe8bfe330a5 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -46,24 +46,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed time-out: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } diff --git a/internal/transport/handler_server_test.go b/internal/transport/handler_server_test.go index b08dcaaf3c4b..82b4baca58b6 100644 --- a/internal/transport/handler_server_test.go +++ b/internal/transport/handler_server_test.go @@ -63,7 +63,7 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { Method: "GET", Header: http.Header{}, }, - wantErr: "invalid gRPC request method", + wantErr: `invalid gRPC request method "GET"`, }, { name: "bad content type", @@ -74,7 +74,7 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { "Content-Type": {"application/foo"}, }, }, - wantErr: "invalid gRPC request content-type", + wantErr: `invalid gRPC request content-type "application/foo"`, }, { name: "not flusher", diff --git a/server.go b/server.go index 7456d6d32bc3..2808b7c83e80 100644 --- a/server.go +++ b/server.go @@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { From 9373e5cb26f0e34c1b5fa54f1ba712979b54d661 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 13 Dec 2022 15:44:03 -0500 Subject: [PATCH 708/998] transport: Fix closing a closed channel panic in handlePing (#5854) --- internal/transport/http2_server.go | 25 ++++++------- test/goaway_test.go | 58 ++++++++++++++++++++++++++++++ test/servertester.go | 6 ++++ 3 files changed, 77 insertions(+), 12 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 7f1b08f628ef..37e089bc8433 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -42,6 +42,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -102,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -838,8 +839,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -1287,10 +1288,10 @@ func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) + t.drainEvent = grpcsync.NewEvent() t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } @@ -1346,7 +1347,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return diff --git a/test/goaway_test.go b/test/goaway_test.go index bcd13ae7da66..48b7f0f7c7ac 100644 --- a/test/goaway_test.go +++ b/test/goaway_test.go @@ -363,6 +363,7 @@ func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { close(ch2) }() // Loop until the server side GoAway signal is propagated to the client. + for { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { @@ -402,6 +403,7 @@ func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() = %v, want ", stream, err) } + <-ch1 <-ch2 cancel() @@ -707,3 +709,59 @@ func (s) TestGoAwayStreamIDSmallerThanCreatedStreams(t *testing.T) { ct.writeGoAway(1, http2.ErrCodeNo, []byte{}) goAwayWritten.Fire() } + +// TestTwoGoAwayPingFrames tests the scenario where you get two go away ping +// frames from the client during graceful shutdown. This should not crash the +// server. +func (s) TestTwoGoAwayPingFrames(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + defer lis.Close() + s := grpc.NewServer() + defer s.Stop() + go s.Serve(lis) + + conn, err := net.DialTimeout("tcp", lis.Addr().String(), defaultTestTimeout) + if err != nil { + t.Fatalf("Failed to dial: %v", err) + } + + st := newServerTesterFromConn(t, conn) + st.greet() + pingReceivedClientSide := testutils.NewChannel() + go func() { + for { + f, err := st.readFrame() + if err != nil { + return + } + switch f.(type) { + case *http2.GoAwayFrame: + case *http2.PingFrame: + pingReceivedClientSide.Send(nil) + default: + t.Errorf("server tester received unexpected frame type %T", f) + } + } + }() + gsDone := testutils.NewChannel() + go func() { + s.GracefulStop() + gsDone.Send(nil) + }() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := pingReceivedClientSide.Receive(ctx); err != nil { + t.Fatalf("Error waiting for ping frame client side from graceful shutdown: %v", err) + } + // Write two goaway pings here. + st.writePing(true, [8]byte{1, 6, 1, 8, 0, 3, 3, 9}) + st.writePing(true, [8]byte{1, 6, 1, 8, 0, 3, 3, 9}) + // Close the conn to finish up the Graceful Shutdown process. + conn.Close() + if _, err := gsDone.Receive(ctx); err != nil { + t.Fatalf("Error waiting for graceful shutdown of the server: %v", err) + } +} diff --git a/test/servertester.go b/test/servertester.go index 9758e8eb6cf8..bf7bd8b214e6 100644 --- a/test/servertester.go +++ b/test/servertester.go @@ -273,3 +273,9 @@ func (st *serverTester) writeRSTStream(streamID uint32, code http2.ErrCode) { st.t.Fatalf("Error writing RST_STREAM: %v", err) } } + +func (st *serverTester) writePing(ack bool, data [8]byte) { + if err := st.fr.WritePing(ack, data); err != nil { + st.t.Fatalf("Error writing PING: %v", err) + } +} From e53d28f5eb19a6816d4bb5c3db1b793ac2ba3e49 Mon Sep 17 00:00:00 2001 From: Mohan Li <67390330+mohanli-ml@users.noreply.github.com> Date: Wed, 14 Dec 2022 09:05:38 -0800 Subject: [PATCH 709/998] xdsclient: log node ID with verbosity INFO (#5860) --- xds/internal/xdsclient/singleton.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/xds/internal/xdsclient/singleton.go b/xds/internal/xdsclient/singleton.go index c07dd4323f76..408a27cf6279 100644 --- a/xds/internal/xdsclient/singleton.go +++ b/xds/internal/xdsclient/singleton.go @@ -90,6 +90,11 @@ func newRefCountedWithConfig(config *bootstrap.Config) (XDSClient, error) { singletonClient.clientImpl = c singletonClient.refCount++ singletonClientImplCreateHook() + nodeID := "" + if node, ok := config.XDSServer.NodeProto.(interface{ GetId() string }); ok { + nodeID = node.GetId() + } + logger.Infof("xDS node ID: %s", nodeID) return &onceClosingClient{XDSClient: singletonClient}, nil } From ae86ff40e723905d1d7f084405a7ed6e9a5265e1 Mon Sep 17 00:00:00 2001 From: Antoine Tollenaere Date: Thu, 15 Dec 2022 18:49:58 +0100 Subject: [PATCH 710/998] benchmark: fix typo in ClientReadBufferSize feature name (#5867) --- benchmark/stats/stats.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/stats/stats.go b/benchmark/stats/stats.go index 7f4db236277e..f5d8666648df 100644 --- a/benchmark/stats/stats.go +++ b/benchmark/stats/stats.go @@ -209,7 +209,7 @@ func (f Features) partialString(b *bytes.Buffer, wantFeatures []bool, sep, delim case EnablePreloaderIndex: b.WriteString(fmt.Sprintf("Preloader%v%v%v", sep, f.EnablePreloader, delim)) case ClientReadBufferSize: - b.WriteString(fmt.Sprintf("ClientReadBufferSize%v%v%v", sep, f.ClientWriteBufferSize, delim)) + b.WriteString(fmt.Sprintf("ClientReadBufferSize%v%v%v", sep, f.ClientReadBufferSize, delim)) case ClientWriteBufferSize: b.WriteString(fmt.Sprintf("ClientWriteBufferSize%v%v%v", sep, f.ClientWriteBufferSize, delim)) case ServerReadBufferSize: From a0e8eb9dc4111cd4d8dc52bc89cd733e5630fc0c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 16 Dec 2022 09:45:36 -0800 Subject: [PATCH 711/998] test: rename race.go to race_test.go (#5869) --- test/{race.go => race_test.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test/{race.go => race_test.go} (100%) diff --git a/test/race.go b/test/race_test.go similarity index 100% rename from test/race.go rename to test/race_test.go From 357d7afc43fe0df74205b797525818fca8cd8f53 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 16 Dec 2022 13:26:17 -0800 Subject: [PATCH 712/998] Change version to 1.53.0-dev (#5872) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 02de108d42f0..39c349fcbcd4 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.52.0-dev" +const Version = "1.53.0-dev" From 81ad1b550fc4d50baec7dd4df98c66681469c687 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 16 Dec 2022 15:04:12 -0800 Subject: [PATCH 713/998] *: update all dependencies (#5874) --- gcp/observability/go.mod | 20 +++++++-------- gcp/observability/go.sum | 53 ++++++++++++++++++++++++++++------------ test/tools/go.mod | 5 ++-- test/tools/go.sum | 15 ++++++------ 4 files changed, 57 insertions(+), 36 deletions(-) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index fb4828fb8345..0428a7bf5869 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -3,37 +3,37 @@ module google.golang.org/grpc/gcp/observability go 1.17 require ( - cloud.google.com/go/logging v1.4.2 + cloud.google.com/go/logging v1.6.1 contrib.go.opencensus.io/exporter/stackdriver v0.13.12 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 go.opencensus.io v0.24.0 golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 - google.golang.org/grpc v1.50.1 + google.golang.org/grpc v1.51.0 ) require ( - cloud.google.com/go v0.105.0 // indirect - cloud.google.com/go/compute v1.12.1 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect + cloud.google.com/go v0.107.0 // indirect + cloud.google.com/go/compute v1.14.0 // indirect + cloud.google.com/go/compute/metadata v0.2.2 // indirect cloud.google.com/go/longrunning v0.3.0 // indirect cloud.google.com/go/monitoring v1.8.0 // indirect cloud.google.com/go/trace v1.4.0 // indirect - github.com/aws/aws-sdk-go v1.37.0 // indirect + github.com/aws/aws-sdk-go v1.44.162 // indirect github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.6.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect golang.org/x/net v0.4.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.3.0 // indirect golang.org/x/text v0.5.0 // indirect - google.golang.org/api v0.102.0 // indirect + google.golang.org/api v0.103.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index 1967b291fa34..d10bc57e88a5 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -25,18 +25,21 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= @@ -79,6 +82,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= @@ -107,11 +111,14 @@ cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLq cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= +cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= @@ -140,6 +147,7 @@ cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4c cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= @@ -161,12 +169,14 @@ cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1 cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= @@ -186,6 +196,7 @@ cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZ cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= @@ -196,6 +207,7 @@ cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+ cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= @@ -204,8 +216,8 @@ cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiP cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/logging v1.4.2 h1:Mu2Q75VBDQlW1HlBMjTX4X84UFR73G1TiLlRYc/b7tA= -cloud.google.com/go/logging v1.4.2/go.mod h1:jco9QZSx8HiVVqLJReq7z7bVdj0P1Jb9PDFs63T+axo= +cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= @@ -261,6 +273,9 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -315,6 +330,7 @@ cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5 cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= @@ -374,8 +390,9 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= +github.com/aws/aws-sdk-go v1.44.162/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -399,8 +416,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -480,8 +498,9 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -618,6 +637,7 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -630,7 +650,6 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -698,7 +717,6 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -726,10 +744,12 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -747,6 +767,8 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -827,7 +849,6 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= @@ -858,8 +879,9 @@ google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -907,9 +929,7 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -975,8 +995,11 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd h1:OjndDrsik+Gt+e6fs45z9AxiewiKyLKYpA45W5Kpkks= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/test/tools/go.mod b/test/tools/go.mod index d61d0e14551f..2e4cc207cd31 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -6,10 +6,9 @@ require ( github.com/BurntSushi/toml v1.2.1 // indirect github.com/client9/misspell v0.3.4 github.com/golang/protobuf v1.5.2 - golang.org/x/exp/typeparams v0.0.0-20221114191408-850992195362 // indirect + golang.org/x/exp/typeparams v0.0.0-20221215174704-0915cd710c24 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/sys v0.3.0 // indirect - golang.org/x/tools v0.3.0 + golang.org/x/tools v0.4.0 google.golang.org/protobuf v1.28.1 // indirect honnef.co/go/tools v0.3.3 ) diff --git a/test/tools/go.sum b/test/tools/go.sum index ca57c49e5c5d..23431a9dc1bd 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -14,8 +14,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20221114191408-850992195362 h1:rUI77tLrgYyDn6S179yfuWoysXhXQTHvvlRfOOS9ffw= -golang.org/x/exp/typeparams v0.0.0-20221114191408-850992195362/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20221215174704-0915cd710c24 h1:+iZuikSm1jIhtO1dsw9jQcYCoGFEDDVXp236qRsnqK4= +golang.org/x/exp/typeparams v0.0.0-20221215174704-0915cd710c24/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -27,7 +27,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -41,24 +41,23 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= From 70617b11fae57b9d9f4a405c8d2ca783ca5a1707 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 16 Dec 2022 16:37:31 -0800 Subject: [PATCH 714/998] vet & github: run vet separately from tests; make vet-proto only check protos (#5873) --- .github/workflows/testing.yml | 11 ++++++++--- vet.sh | 18 +++++++++++------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index c351b0942de6..cf831894ec63 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -19,6 +19,8 @@ jobs: vet-proto: runs-on: ubuntu-latest timeout-minutes: 20 + env: + VET_ONLY_PROTO: 1 steps: # Setup the environment. - name: Setup Go @@ -43,7 +45,10 @@ jobs: fail-fast: false matrix: include: - - type: vet+tests + - type: vet + goversion: 1.19 + + - type: tests goversion: 1.19 - type: tests @@ -93,13 +98,13 @@ jobs: # Only run vet for 'vet' runs. - name: Run vet.sh - if: startsWith(matrix.type, 'vet') + if: matrix.type == 'vet' run: ./vet.sh -install && ./vet.sh # Main tests run for everything except when testing "extras" # (where we run a reduced set of tests). - name: Run tests - if: contains(matrix.type, 'tests') + if: matrix.type == 'tests' run: | go version go test ${{ matrix.testflags }} -cpu 1,4 -timeout 7m google.golang.org/grpc/... diff --git a/vet.sh b/vet.sh index 1d03c0914810..2f0850f845da 100755 --- a/vet.sh +++ b/vet.sh @@ -66,6 +66,17 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + # - Ensure all source files contain a copyright message. # (Done in two parts because Darwin "git grep" has broken support for compound # exclusion matches.) @@ -93,13 +104,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*. misspell -error . -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - # - gofmt, goimports, golint (with exceptions for generated code), go vet, # go mod tidy. # Perform these checks on each module inside gRPC. From 54b7d03e0ff0adee3b18ea65af9ac8f849accf47 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 16 Dec 2022 20:02:04 -0500 Subject: [PATCH 715/998] grpc: Add join Dial Option (#5861) --- default_dial_option_server_option_test.go | 36 +++++++++++++++++++++++ dialoptions.go | 15 ++++++++++ internal/internal.go | 3 ++ 3 files changed, 54 insertions(+) diff --git a/default_dial_option_server_option_test.go b/default_dial_option_server_option_test.go index eecd6b846f28..c6cdd7c84838 100644 --- a/default_dial_option_server_option_test.go +++ b/default_dial_option_server_option_test.go @@ -80,3 +80,39 @@ func (s) TestAddExtraServerOptions(t *testing.T) { t.Fatalf("Unexpected len of extraServerOptions: %d != 0", len(extraServerOptions)) } } + +// TestJoinDialOption tests the join dial option. It configures a joined dial +// option with three individual dial options, and verifies that all three are +// successfully applied. +func (s) TestJoinDialOption(t *testing.T) { + const maxRecvSize = 998765 + const initialWindowSize = 100 + jdo := newJoinDialOption(WithTransportCredentials(insecure.NewCredentials()), WithReadBufferSize(maxRecvSize), WithInitialWindowSize(initialWindowSize)) + cc, err := Dial("fake", jdo) + if err != nil { + t.Fatalf("Dialing with insecure credentials failed: %v", err) + } + defer cc.Close() + if cc.dopts.copts.ReadBufferSize != maxRecvSize { + t.Fatalf("Unexpected cc.dopts.copts.ReadBufferSize: %d != %d", cc.dopts.copts.ReadBufferSize, maxRecvSize) + } + if cc.dopts.copts.InitialWindowSize != initialWindowSize { + t.Fatalf("Unexpected cc.dopts.copts.InitialWindowSize: %d != %d", cc.dopts.copts.InitialWindowSize, initialWindowSize) + } +} + +// TestJoinDialOption tests the join server option. It configures a joined +// server option with three individual server options, and verifies that all +// three are successfully applied. +func (s) TestJoinServerOption(t *testing.T) { + const maxRecvSize = 998765 + const initialWindowSize = 100 + jso := newJoinServerOption(Creds(insecure.NewCredentials()), MaxRecvMsgSize(maxRecvSize), InitialWindowSize(initialWindowSize)) + s := NewServer(jso) + if s.opts.maxReceiveMessageSize != maxRecvSize { + t.Fatalf("Unexpected s.opts.maxReceiveMessageSize: %d != %d", s.opts.maxReceiveMessageSize, maxRecvSize) + } + if s.opts.initialWindowSize != initialWindowSize { + t.Fatalf("Unexpected s.opts.initialWindowSize: %d != %d", s.opts.initialWindowSize, initialWindowSize) + } +} diff --git a/dialoptions.go b/dialoptions.go index 8f5b536f11eb..4866da101c60 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -44,6 +44,7 @@ func init() { extraDialOptions = nil } internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -111,6 +112,20 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is diff --git a/internal/internal.go b/internal/internal.go index fd0ee3dcaf1e..0a76d9de6e02 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -77,6 +77,9 @@ var ( // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption From b2d4d5dbae052c23959f5adb63eae94e6bd6a893 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Mon, 19 Dec 2022 12:57:49 -0600 Subject: [PATCH 716/998] test: fix raceyness check to deflake test http server (#5866) Fixes https://github.com/grpc/grpc-go/issues/4990 --- test/end2end_test.go | 104 +++++++++++++++++++++++++++++-------------- 1 file changed, 71 insertions(+), 33 deletions(-) diff --git a/test/end2end_test.go b/test/end2end_test.go index 61907e4ca903..ae536520fc09 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -888,6 +888,32 @@ type lazyConn struct { beLazy int32 } +// possible conn closed errors. +const possibleConnResetMsg = "connection reset by peer" +const possibleEOFMsg = "error reading from server: EOF" + +// isConnClosedErr checks the error msg for possible conn closed messages. There +// is a raceyness in the timing of when TCP packets are sent from client to +// server, and when we tell the server to stop, so we need to check for both of +// these possible error messages: +// 1. If the call to ss.S.Stop() causes the server's sockets to close while +// there's still in-fight data from the client on the TCP connection, then +// the kernel can send an RST back to the client (also see +// https://stackoverflow.com/questions/33053507/econnreset-in-send-linux-c). +// Note that while this condition is expected to be rare due to the +// test httpServer start synchronization, in theory it should be possible, +// e.g. if the client sends a BDP ping at the right time. +// 2. If, for example, the call to ss.S.Stop() happens after the RPC headers +// have been received at the server, then the TCP connection can shutdown +// gracefully when the server's socket closes. +// 3. If there is an actual io.EOF received because the client stopped the stream. +func isConnClosedErr(err error) bool { + errContainsConnResetMsg := strings.Contains(err.Error(), possibleConnResetMsg) + errContainsEOFMsg := strings.Contains(err.Error(), possibleEOFMsg) + + return errContainsConnResetMsg || errContainsEOFMsg || err == io.EOF +} + func (l *lazyConn) Write(b []byte) (int, error) { if atomic.LoadInt32(&(l.beLazy)) == 1 { time.Sleep(time.Second) @@ -1013,18 +1039,7 @@ func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - // The precise behavior of this test is subject to raceyness around the timing of when TCP packets - // are sent from client to server, and when we tell the server to stop, so we need to account for both - // of these possible error messages: - // 1) If the call to ss.S.Stop() causes the server's sockets to close while there's still in-fight - // data from the client on the TCP connection, then the kernel can send an RST back to the client (also - // see https://stackoverflow.com/questions/33053507/econnreset-in-send-linux-c). Note that while this - // condition is expected to be rare due to the rpcStartedOnServer synchronization, in theory it should - // be possible, e.g. if the client sends a BDP ping at the right time. - // 2) If, for example, the call to ss.S.Stop() happens after the RPC headers have been received at the - // server, then the TCP connection can shutdown gracefully when the server's socket closes. - const possibleConnResetMsg = "connection reset by peer" - const possibleEOFMsg = "error reading from server: EOF" + // Start an RPC. Then, while the RPC is still being accepted or handled at the server, abruptly // stop the server, killing the connection. The RPC error message should include details about the specific // connection error that was encountered. @@ -1037,7 +1052,10 @@ func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) { // the RPC has been started on it. <-rpcStartedOnServer ss.S.Stop() - if _, err := stream.Recv(); err == nil || (!strings.Contains(err.Error(), possibleConnResetMsg) && !strings.Contains(err.Error(), possibleEOFMsg)) { + // The precise behavior of this test is subject to raceyness around the timing + // of when TCP packets are sent from client to server, and when we tell the + // server to stop, so we need to account for both possible error messages. + if _, err := stream.Recv(); err == io.EOF || !isConnClosedErr(err) { t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q OR %q", stream, err, possibleConnResetMsg, possibleEOFMsg) } close(rpcDoneOnClient) @@ -6739,31 +6757,37 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) { // Non-gRPC content-type fallback path. for httpCode := range transport.HTTPStatusConvTab { - doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ + if err := doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ ":status", fmt.Sprintf("%d", httpCode), "content-type", "text/html", // non-gRPC content type to switch to HTTP mode. "grpc-status", "1", // Make up a gRPC status error "grpc-status-details-bin", "???", // Make up a gRPC field parsing error - }) + }); err != nil { + t.Error(err) + } } // Missing content-type fallback path. for httpCode := range transport.HTTPStatusConvTab { - doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ + if err := doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ ":status", fmt.Sprintf("%d", httpCode), // Omitting content type to switch to HTTP mode. "grpc-status", "1", // Make up a gRPC status error "grpc-status-details-bin", "???", // Make up a gRPC field parsing error - }) + }); err != nil { + t.Error(err) + } } // Malformed HTTP status when fallback. - doHTTPHeaderTest(t, codes.Internal, []string{ + if err := doHTTPHeaderTest(t, codes.Internal, []string{ ":status", "abc", // Omitting content type to switch to HTTP mode. "grpc-status", "1", // Make up a gRPC status error "grpc-status-details-bin", "???", // Make up a gRPC field parsing error - }) + }); err != nil { + t.Error(err) + } } // Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame). @@ -6809,18 +6833,22 @@ func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) { errCode: codes.Unavailable, }, } { - doHTTPHeaderTest(t, test.errCode, test.header) + if err := doHTTPHeaderTest(t, test.errCode, test.header); err != nil { + t.Error(err) + } } } // Testing non-Trailers-only Trailers (delivered in second HEADERS frame) func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { - for _, test := range []struct { + tests := []struct { + name string responseHeader []string trailer []string errCode codes.Code }{ { + name: "trailer missing grpc-status", responseHeader: []string{ ":status", "200", "content-type", "application/grpc", @@ -6832,6 +6860,7 @@ func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { errCode: codes.Unavailable, }, { + name: "malformed grpc-status-details-bin field with status 404", responseHeader: []string{ ":status", "404", "content-type", "application/grpc", @@ -6844,6 +6873,7 @@ func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { errCode: codes.Unimplemented, }, { + name: "malformed grpc-status-details-bin field with status 200", responseHeader: []string{ ":status", "200", "content-type", "application/grpc", @@ -6855,8 +6885,14 @@ func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { }, errCode: codes.Internal, }, - } { - doHTTPHeaderTest(t, test.errCode, test.responseHeader, test.trailer) + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := doHTTPHeaderTest(t, test.errCode, test.responseHeader, test.trailer); err != nil { + t.Error(err) + } + }) + } } @@ -6865,7 +6901,9 @@ func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) { ":status", "200", "content-type", "application/grpc", } - doHTTPHeaderTest(t, codes.Internal, header, header, header) + if err := doHTTPHeaderTest(t, codes.Internal, header, header, header); err != nil { + t.Fatal(err) + } } type httpServerResponse struct { @@ -6930,14 +6968,14 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) { } writer.Flush() // necessary since client is expecting preface before declaring connection fully setup. var sid uint32 - // Loop until conn is closed and framer returns io.EOF + // Loop until framer returns possible conn closed errors. for requestNum := 0; ; requestNum = (requestNum + 1) % len(s.responses) { // Read frames until a header is received. for { frame, err := framer.ReadFrame() if err != nil { - if err != io.EOF { - t.Errorf("Error at server-side while reading frame. Err: %v", err) + if !isConnClosedErr(err) { + t.Errorf("Error at server-side while reading frame. got: %q, want: rpc error containing substring %q OR %q", err, possibleConnResetMsg, possibleEOFMsg) } return } @@ -6994,11 +7032,10 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) { }() } -func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string) { - t.Helper() +func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string) error { lis, err := net.Listen("tcp", "localhost:0") if err != nil { - t.Fatalf("Failed to listen. Err: %v", err) + return fmt.Errorf("listening on %q: %v", "localhost:0", err) } defer lis.Close() server := &httpServer{ @@ -7007,7 +7044,7 @@ func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string server.start(t, lis) cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { - t.Fatalf("failed to dial due to err: %v", err) + return fmt.Errorf("dial(%q): %v", lis.Addr().String(), err) } defer cc.Close() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -7015,11 +7052,12 @@ func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string client := testpb.NewTestServiceClient(cc) stream, err := client.FullDuplexCall(ctx) if err != nil { - t.Fatalf("error creating stream due to err: %v", err) + return fmt.Errorf("creating FullDuplex stream: %v", err) } if _, err := stream.Recv(); err == nil || status.Code(err) != errCode { - t.Fatalf("stream.Recv() = _, %v, want error code: %v", err, errCode) + return fmt.Errorf("stream.Recv() = %v, want error code: %v", err, errCode) } + return nil } func (s) TestClientCancellationPropagatesUnary(t *testing.T) { From 4f16fbe410f7abb12190c349950ccb8d41873f92 Mon Sep 17 00:00:00 2001 From: Theodore Salvo Date: Mon, 19 Dec 2022 19:34:28 -0500 Subject: [PATCH 717/998] examples: update server reflection tutorial (#5824) Fixes https://github.com/grpc/grpc-go/issues/4593 --- Documentation/server-reflection-tutorial.md | 100 ++++++++++++-------- 1 file changed, 61 insertions(+), 39 deletions(-) diff --git a/Documentation/server-reflection-tutorial.md b/Documentation/server-reflection-tutorial.md index 9f26656f22b2..6c7dc6cd6a5f 100644 --- a/Documentation/server-reflection-tutorial.md +++ b/Documentation/server-reflection-tutorial.md @@ -2,8 +2,9 @@ gRPC Server Reflection provides information about publicly-accessible gRPC services on a server, and assists clients at runtime to construct RPC requests -and responses without precompiled service information. It is used by gRPC CLI, -which can be used to introspect server protos and send/receive test RPCs. +and responses without precompiled service information. It is used by +[gRPCurl](https://github.com/fullstorydev/grpcurl), which can be used to +introspect server protos and send/receive test RPCs. ## Enable Server Reflection @@ -39,36 +40,41 @@ make the following changes: An example server with reflection registered can be found at `examples/features/reflection/server`. -## gRPC CLI +## gRPCurl -After enabling Server Reflection in a server application, you can use gRPC CLI -to check its services. gRPC CLI is only available in c++. Instructions on how to -build and use gRPC CLI can be found at -[command_line_tool.md](https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md). +After enabling Server Reflection in a server application, you can use gRPCurl +to check its services. gRPCurl is built with Go and has packages available. +Instructions on how to install and use gRPCurl can be found at +[gRPCurl Installation](https://github.com/fullstorydev/grpcurl#installation). -## Use gRPC CLI to check services +## Use gRPCurl to check services First, start the helloworld server in grpc-go directory: ```sh -$ cd -$ go run examples/features/reflection/server/main.go +$ cd /examples +$ go run features/reflection/server/main.go ``` -Open a new terminal and make sure you are in the directory where grpc_cli lives: - +output: ```sh -$ cd /bins/opt +server listening at [::]:50051 ``` -### List services +After installing gRPCurl, open a new terminal and run the commands from the new +terminal. + +**NOTE:** gRPCurl expects a TLS-encrypted connection by default. For all of +the commands below, use the `-plaintext` flag to use an unencrypted connection. -`grpc_cli ls` command lists services and methods exposed at a given port: +### List services and methods + +The `list` command lists services exposed at a given port: - List all the services exposed at a given port ```sh - $ ./grpc_cli ls localhost:50051 + $ grpcurl -plaintext localhost:50051 list ``` output: @@ -78,72 +84,88 @@ $ cd /bins/opt helloworld.Greeter ``` -- List one service with details +- List all the methods of a service - `grpc_cli ls` command inspects a service given its full name (in the format of - \.\). It can print information with a long listing format - when `-l` flag is set. This flag can be used to get more details about a - service. + The `list` command lists methods given the full service name (in the format of + \.\). ```sh - $ ./grpc_cli ls localhost:50051 helloworld.Greeter -l + $ grpcurl -plaintext localhost:50051 list helloworld.Greeter ``` output: ```sh - filename: helloworld.proto - package: helloworld; - service Greeter { - rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} - } + helloworld.Greeter.SayHello + ``` + +### Describe services and methods + +- Describe all services + The `describe` command inspects a service given its full name (in the format + of \.\). + + ```sh + $ grpcurl -plaintext localhost:50051 describe helloworld.Greeter ``` -### List methods + output: + ```sh + helloworld.Greeter is a service: + service Greeter { + rpc SayHello ( .helloworld.HelloRequest ) returns ( .helloworld.HelloReply ); + } + ``` -- List one method with details +- Describe all methods of a service - `grpc_cli ls` command also inspects a method given its full name (in the - format of \.\.\). + The `describe` command inspects a method given its full name (in the format of + \.\.\). ```sh - $ ./grpc_cli ls localhost:50051 helloworld.Greeter.SayHello -l + $ grpcurl -plaintext localhost:50051 describe helloworld.Greeter.SayHello ``` output: ```sh - rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} + helloworld.Greeter.SayHello is a method: + rpc SayHello ( .helloworld.HelloRequest ) returns ( .helloworld.HelloReply ); ``` ### Inspect message types -We can use`grpc_cli type` command to inspect request/response types given the +We can use the `describe` command to inspect request/response types given the full name of the type (in the format of \.\). - Get information about the request type ```sh - $ ./grpc_cli type localhost:50051 helloworld.HelloRequest + $ grpcurl -plaintext localhost:50051 describe helloworld.HelloRequest ``` output: ```sh + helloworld.HelloRequest is a message: message HelloRequest { - optional string name = 1[json_name = "name"]; + string name = 1; } ``` ### Call a remote method -We can send RPCs to a server and get responses using `grpc_cli call` command. +We can send RPCs to a server and get responses using the full method name (in +the format of \.\.\). The `-d ` flag +represents the request data and the `-format text` flag indicates that the +request data is in text format. - Call a unary method ```sh - $ ./grpc_cli call localhost:50051 SayHello "name: 'gRPC CLI'" + $ grpcurl -plaintext -format text -d 'name: "gRPCurl"' \ + localhost:50051 helloworld.Greeter.SayHello ``` output: ```sh - message: "Hello gRPC CLI" + message: "Hello gRPCurl" ``` From 68b388b26f1004c4d17b867cf223f4b7a96cf2fd Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 20 Dec 2022 15:13:02 -0800 Subject: [PATCH 718/998] balancer: support injection of per-call metadata from LB policies (#5853) --- balancer/balancer.go | 8 +++ clientconn.go | 2 +- picker_wrapper.go | 28 +++++---- stream.go | 37 +++++++++--- test/balancer_test.go | 136 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 190 insertions(+), 21 deletions(-) diff --git a/balancer/balancer.go b/balancer/balancer.go index 392b21fb2d8e..09d61dd1b55b 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -279,6 +279,14 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metatada metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and diff --git a/clientconn.go b/clientconn.go index 045668904519..8402c19e3e9f 100644 --- a/clientconn.go +++ b/clientconn.go @@ -934,7 +934,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, diff --git a/picker_wrapper.go b/picker_wrapper.go index a5d5516ee060..c525dc070fc6 100644 --- a/picker_wrapper.go +++ b/picker_wrapper.go @@ -58,12 +58,18 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { acw.mu.Lock() ac := acw.ac acw.mu.Unlock() ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -82,7 +88,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} var lastPickErr error @@ -90,7 +96,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, nil, ErrClientConnClosing + return nil, balancer.PickResult{}, ErrClientConnClosing } if pw.picker == nil { @@ -111,9 +117,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, nil, status.Error(codes.Canceled, errStr) + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -125,7 +131,6 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Unlock() pickResult, err := p.Pick(info) - if err != nil { if err == balancer.ErrNoSubConnAvailable { continue @@ -136,7 +141,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, nil, dropError{error: err} + return nil, balancer.PickResult{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -144,7 +149,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, nil, status.Error(codes.Unavailable, err.Error()) + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } acw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -154,9 +159,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, pickResult.Done), nil + doneChannelzWrapper(acw, &pickResult) + return t, pickResult, nil } - return t, pickResult.Done, nil + return t, pickResult, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/stream.go b/stream.go index 0f8e6c0149da..175aee9583ea 100644 --- a/stream.go +++ b/stream.go @@ -438,7 +438,7 @@ func (a *csAttempt) getTransport() error { cs := a.cs var err error - a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -455,6 +455,25 @@ func (a *csAttempt) getTransport() error { func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metatada != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metatada) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) @@ -529,12 +548,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -1103,12 +1122,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, diff --git a/test/balancer_test.go b/test/balancer_test.go index c919f1e0f7c4..bd782ffa6e4f 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -866,3 +866,139 @@ func (s) TestAuthorityInBuildOptions(t *testing.T) { }) } } + +// wrappedPickFirstBalancerBuilder builds a custom balancer which wraps an +// underlying pick_first balancer. +type wrappedPickFirstBalancerBuilder struct { + name string +} + +func (*wrappedPickFirstBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(grpc.PickFirstBalancerName) + wpfb := &wrappedPickFirstBalancer{ + ClientConn: cc, + } + pf := builder.Build(wpfb, opts) + wpfb.Balancer = pf + return wpfb +} + +func (wbb *wrappedPickFirstBalancerBuilder) Name() string { + return wbb.name +} + +// wrappedPickFirstBalancer contains a pick_first balancer and forwards all +// calls from the ClientConn to it. For state updates from the pick_first +// balancer, it creates a custom picker which injects arbitrary metadata on a +// per-call basis. +type wrappedPickFirstBalancer struct { + balancer.Balancer + balancer.ClientConn +} + +func (wb *wrappedPickFirstBalancer) UpdateState(state balancer.State) { + state.Picker = &wrappedPicker{p: state.Picker} + wb.ClientConn.UpdateState(state) +} + +const ( + metadataHeaderInjectedByBalancer = "metadata-header-injected-by-balancer" + metadataHeaderInjectedByApplication = "metadata-header-injected-by-application" + metadataValueInjectedByBalancer = "metadata-value-injected-by-balancer" + metadataValueInjectedByApplication = "metadata-value-injected-by-application" +) + +// wrappedPicker wraps the picker returned by the pick_first +type wrappedPicker struct { + p balancer.Picker +} + +func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + res, err := wp.p.Pick(info) + if err != nil { + return balancer.PickResult{}, err + } + + if res.Metatada == nil { + res.Metatada = metadata.Pairs(metadataHeaderInjectedByBalancer, metadataValueInjectedByBalancer) + } else { + res.Metatada.Append(metadataHeaderInjectedByBalancer, metadataValueInjectedByBalancer) + } + return res, nil +} + +// TestMetadataInPickResult tests the scenario where an LB policy inject +// arbitrary metadata on a per-call basis and verifies that the injected +// metadata makes it all the way to the server RPC handler. +func (s) TestMetadataInPickResult(t *testing.T) { + t.Log("Starting test backend...") + mdChan := make(chan metadata.MD, 1) + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + select { + case mdChan <- md: + case <-ctx.Done(): + return nil, ctx.Err() + } + return &testpb.Empty{}, nil + }, + } + if err := ss.StartServer(); err != nil { + t.Fatalf("Starting test backend: %v", err) + } + defer ss.Stop() + t.Logf("Started test backend at %q", ss.Address) + + name := t.Name() + "wrappedPickFirstBalancer" + t.Logf("Registering test balancer with name %q...", name) + b := &wrappedPickFirstBalancerBuilder{name: t.Name() + "wrappedPickFirstBalancer"} + balancer.Register(b) + + t.Log("Creating ClientConn to test backend...") + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: ss.Address}}}) + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, b.Name())), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial(): %v", err) + } + defer cc.Close() + tc := testpb.NewTestServiceClient(cc) + + t.Log("Making EmptyCall() RPC with custom metadata...") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + md := metadata.Pairs(metadataHeaderInjectedByApplication, metadataValueInjectedByApplication) + ctx = metadata.NewOutgoingContext(ctx, md) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() RPC: %v", err) + } + t.Log("EmptyCall() RPC succeeded") + + t.Log("Waiting for custom metadata to be received at the test backend...") + var gotMD metadata.MD + select { + case gotMD = <-mdChan: + case <-ctx.Done(): + t.Fatalf("Timed out waiting for custom metadata to be received at the test backend") + } + + t.Log("Verifying custom metadata added by the client application is received at the test backend...") + wantMDVal := []string{metadataValueInjectedByApplication} + gotMDVal := gotMD.Get(metadataHeaderInjectedByApplication) + if !cmp.Equal(gotMDVal, wantMDVal) { + t.Fatalf("Mismatch in custom metadata received at test backend, got: %v, want %v", gotMDVal, wantMDVal) + } + + t.Log("Verifying custom metadata added by the LB policy is received at the test backend...") + wantMDVal = []string{metadataValueInjectedByBalancer} + gotMDVal = gotMD.Get(metadataHeaderInjectedByBalancer) + if !cmp.Equal(gotMDVal, wantMDVal) { + t.Fatalf("Mismatch in custom metadata received at test backend, got: %v, want %v", gotMDVal, wantMDVal) + } +} From f94594d587d5442778b970bccc478f347b51810f Mon Sep 17 00:00:00 2001 From: apolcyn Date: Tue, 20 Dec 2022 15:43:18 -0800 Subject: [PATCH 719/998] interop: add test client for use in xDS federation e2e tests (#5878) --- interop/test_utils.go | 15 ++-- interop/xds_federation/client.go | 126 +++++++++++++++++++++++++++++++ 2 files changed, 133 insertions(+), 8 deletions(-) create mode 100644 interop/xds_federation/client.go diff --git a/interop/test_utils.go b/interop/test_utils.go index 8a2baceb80c0..50bd2010ffa4 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -746,29 +746,28 @@ func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.D if p.Addr != nil { addrStr = p.Addr.String() } - fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s failed: %s\n", i, latencyMs, addrStr, err) + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s server_uri: %s failed: %s\n", i, latencyMs, addrStr, serverAddr, err) <-earliestNextStart continue } if latency > perIterationMaxAcceptableLatency { totalFailures++ - fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s exceeds max acceptable latency: %d\n", i, latencyMs, p.Addr.String(), perIterationMaxAcceptableLatency.Milliseconds()) + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s server_uri: %s exceeds max acceptable latency: %d\n", i, latencyMs, p.Addr.String(), serverAddr, perIterationMaxAcceptableLatency.Milliseconds()) <-earliestNextStart continue } - fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s succeeded\n", i, latencyMs, p.Addr.String()) + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s server_uri: %s succeeded\n", i, latencyMs, p.Addr.String(), serverAddr) <-earliestNextStart } var b bytes.Buffer h.Print(&b) - fmt.Fprintln(os.Stderr, "Histogram of per-iteration latencies in milliseconds:") - fmt.Fprintln(os.Stderr, b.String()) - fmt.Fprintf(os.Stderr, "soak test ran: %d / %d iterations. total failures: %d. max failures threshold: %d. See breakdown above for which iterations succeeded, failed, and why for more info.\n", iterationsDone, soakIterations, totalFailures, maxFailures) + fmt.Fprintf(os.Stderr, "(server_uri: %s) histogram of per-iteration latencies in milliseconds: %s\n", serverAddr, b.String()) + fmt.Fprintf(os.Stderr, "(server_uri: %s) soak test ran: %d / %d iterations. total failures: %d. max failures threshold: %d. See breakdown above for which iterations succeeded, failed, and why for more info.\n", serverAddr, iterationsDone, soakIterations, totalFailures, maxFailures) if iterationsDone < soakIterations { - logger.Fatalf("soak test consumed all %f seconds of time and quit early, only having ran %d out of desired %d iterations.", overallDeadline.Sub(start).Seconds(), iterationsDone, soakIterations) + logger.Fatalf("(server_uri: %s) soak test consumed all %f seconds of time and quit early, only having ran %d out of desired %d iterations.", serverAddr, overallDeadline.Sub(start).Seconds(), iterationsDone, soakIterations) } if totalFailures > maxFailures { - logger.Fatalf("soak test total failures: %d exceeds max failures threshold: %d.", totalFailures, maxFailures) + logger.Fatalf("(server_uri: %s) soak test total failures: %d exceeds max failures threshold: %d.", serverAddr, totalFailures, maxFailures) } } diff --git a/interop/xds_federation/client.go b/interop/xds_federation/client.go new file mode 100644 index 000000000000..31ec9bba7a36 --- /dev/null +++ b/interop/xds_federation/client.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an interop client. +package main + +import ( + "flag" + "strings" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/google" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + + _ "google.golang.org/grpc/balancer/grpclb" // Register the grpclb load balancing policy. + _ "google.golang.org/grpc/balancer/rls" // Register the RLS load balancing policy. + _ "google.golang.org/grpc/xds/googledirectpath" // Register xDS resolver required for c2p directpath. + + testgrpc "google.golang.org/grpc/interop/grpc_testing" +) + +const ( + computeEngineCredsName = "compute_engine_channel_creds" + insecureCredsName = "INSECURE_CREDENTIALS" +) + +var ( + serverURIs = flag.String("server_uris", "", "Comma-separated list of sever URIs to make RPCs to") + credentialsTypes = flag.String("credentials_types", "", "Comma-separated list of credentials, each entry is used for the server of the corresponding index in server_uris. Supported values: compute_engine_channel_creds, INSECURE_CREDENTIALS") + soakIterations = flag.Int("soak_iterations", 10, "The number of iterations to use for the two soak tests: rpc_soak and channel_soak") + soakMaxFailures = flag.Int("soak_max_failures", 0, "The number of iterations in soak tests that are allowed to fail (either due to non-OK status code or exceeding the per-iteration max acceptable latency).") + soakPerIterationMaxAcceptableLatencyMs = flag.Int("soak_per_iteration_max_acceptable_latency_ms", 1000, "The number of milliseconds a single iteration in the two soak tests (rpc_soak and channel_soak) should take.") + soakOverallTimeoutSeconds = flag.Int("soak_overall_timeout_seconds", 10, "The overall number of seconds after which a soak test should stop and fail, if the desired number of iterations have not yet completed.") + soakMinTimeMsBetweenRPCs = flag.Int("soak_min_time_ms_between_rpcs", 0, "The minimum time in milliseconds between consecutive RPCs in a soak test (rpc_soak or channel_soak), useful for limiting QPS") + testCase = flag.String("test_case", "rpc_soak", + `Configure different test cases. Valid options are: + rpc_soak: sends --soak_iterations large_unary RPCs; + channel_soak: sends --soak_iterations RPCs, rebuilding the channel each time`) + + logger = grpclog.Component("interop") +) + +type clientConfig struct { + tc testgrpc.TestServiceClient + opts []grpc.DialOption + uri string +} + +func main() { + flag.Parse() + // validate flags + uris := strings.Split(*serverURIs, ",") + creds := strings.Split(*credentialsTypes, ",") + if len(uris) != len(creds) { + logger.Fatalf("Number of entries in --server_uris (%d) != number of entries in --credentials_types (%d)", len(uris), len(creds)) + } + for _, c := range creds { + if c != computeEngineCredsName && c != insecureCredsName { + logger.Fatalf("Unsupported credentials type: %v", c) + } + } + var resetChannel bool + switch *testCase { + case "rpc_soak": + resetChannel = false + case "channel_soak": + resetChannel = true + default: + logger.Fatal("Unsupported test case: ", *testCase) + } + + // create clients as specified in flags + var clients []clientConfig + for i := range uris { + var opts []grpc.DialOption + switch creds[i] { + case computeEngineCredsName: + opts = append(opts, grpc.WithCredentialsBundle(google.NewComputeEngineCredentials())) + case insecureCredsName: + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + cc, err := grpc.Dial(uris[i], opts...) + if err != nil { + logger.Fatalf("Fail to dial %v: %v", uris[i], err) + } + defer cc.Close() + clients = append(clients, clientConfig{ + tc: testgrpc.NewTestServiceClient(cc), + opts: opts, + uri: uris[i], + }) + } + + // run soak tests with the different clients + logger.Infof("Clients running with test case %q", *testCase) + var wg sync.WaitGroup + for i := range clients { + wg.Add(1) + go func(c clientConfig) { + interop.DoSoakTest(c.tc, c.uri, c.opts, resetChannel, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Duration(*soakMinTimeMsBetweenRPCs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) + logger.Infof("%s test done for server: %s", *testCase, c.uri) + wg.Done() + }(clients[i]) + } + wg.Wait() + logger.Infoln("All clients done!") +} From 5ff7dfcd799e086f54828b62c45adfb83e85e011 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 21 Dec 2022 13:18:52 -0800 Subject: [PATCH 720/998] rls: propagate headers received in RLS response to backends (#5883) --- balancer/rls/picker.go | 17 ++++++++-- balancer/rls/picker_test.go | 65 ++++++++++++++++++++++++++++++++++++- 2 files changed, 79 insertions(+), 3 deletions(-) diff --git a/balancer/rls/picker.go b/balancer/rls/picker.go index f73fe7b1028f..bd5985ad9e7c 100644 --- a/balancer/rls/picker.go +++ b/balancer/rls/picker.go @@ -167,13 +167,26 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // // Caller must hold at least a read-lock on p.lb.cacheMu. func (p *rlsPicker) delegateToChildPolicies(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { + const rlsDataHeaderName = "x-google-rls-data" for i, cpw := range dcEntry.childPolicyWrappers { state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if - // it the last one (which handles the case of delegating to the last + // it is the last one (which handles the case of delegating to the last // child picker if all child polcies are in TRANSIENT_FAILURE). if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { - return state.Picker.Pick(info) + // Any header data received from the RLS server is stored in the + // cache entry and needs to be sent to the actual backend in the + // X-Google-RLS-Data header. + res, err := state.Picker.Pick(info) + if err != nil { + return res, err + } + if res.Metatada == nil { + res.Metatada = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) + } else { + res.Metatada.Append(rlsDataHeaderName, dcEntry.headerData) + } + return res, nil } } // In the unlikely event that we have a cache entry with no targets, we end up diff --git a/balancer/rls/picker_test.go b/balancer/rls/picker_test.go index 11c91055d68c..87c4af471a0a 100644 --- a/balancer/rls/picker_test.go +++ b/balancer/rls/picker_test.go @@ -20,16 +20,22 @@ package rls import ( "context" + "fmt" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" - rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/internal/stubserver" rlstest "google.golang.org/grpc/internal/testutils/rls" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/durationpb" + + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" ) // Test verifies the scenario where there is no matching entry in the data cache @@ -241,6 +247,63 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ValidEntry(t *testing.T) { verifyRLSRequest(t, rlsReqCh, false) } +// Test verifies the scenario where there is a matching entry in the data cache +// which is valid and there is no pending request. The pick is expected to be +// delegated to the child policy. +func (s) TestPick_DataCacheHit_NoPendingEntry_ValidEntry_WithHeaderData(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build the RLS config without a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Start a test backend which expects the header data contents sent from the + // RLS server to be part of RPC metadata as X-Google-RLS-Data header. + const headerDataContents = "foo,bar,baz" + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + gotHeaderData := metadata.ValueFromIncomingContext(ctx, "x-google-rls-data") + if len(gotHeaderData) != 1 || gotHeaderData[0] != headerDataContents { + return nil, fmt.Errorf("got metadata in `X-Google-RLS-Data` is %v, want %s", gotHeaderData, headerDataContents) + } + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + defer backend.Stop() + + // Setup the fake RLS server to return the above backend as a target in the + // RLS response. Also, populate the header data field in the response. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{ + Targets: []string{backend.Address}, + HeaderData: headerDataContents, + }} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend with the header + // data sent by the RLS server. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := testgrpc.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() RPC: %v", err) + } +} + // Test verifies the scenario where there is a matching entry in the data cache // which is stale and there is no pending request. The pick is expected to be // delegated to the child policy with a proactive cache refresh. From 07ac97c355970d6210cfa5341f2ba0850ec2757b Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 21 Dec 2022 15:44:31 -0600 Subject: [PATCH 721/998] transport: simplify httpClient by moving onGoAway func to onClose (#5885) --- clientconn.go | 12 ++++-------- internal/transport/http2_client.go | 17 ++++++++++------- internal/transport/transport.go | 4 ++-- internal/transport/transport_test.go | 14 +++++++------- 4 files changed, 23 insertions(+), 24 deletions(-) diff --git a/clientconn.go b/clientconn.go index 8402c19e3e9f..26166b89becd 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1237,9 +1237,11 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) - onClose := grpcsync.OnceFunc(func() { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() + // adjust params based on GoAwayReason + ac.adjustParams(r) if ac.state == connectivity.Shutdown { // Already shut down. tearDown() already cleared the transport and // canceled hctx via ac.ctx, and we expected this connection to be @@ -1260,19 +1262,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // Always go idle and wait for the LB policy to initiate a new // connection attempt. ac.updateConnectivityState(connectivity.Idle, nil) - }) - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - ac.mu.Unlock() - onClose() } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 3e582a2853c6..8cf2a7a11039 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -140,8 +140,7 @@ type http2Client struct { channelzID *channelz.Identifier czData *channelzData - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) bufferPool *bufferPool @@ -197,7 +196,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -343,7 +342,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), - onGoAway: onGoAway, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), onClose: onClose, @@ -957,7 +955,9 @@ func (t *http2Client) Close(err error) { } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. - t.onClose() + if t.state != draining { + t.onClose(GoAwayInvalid) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1010,6 +1010,7 @@ func (t *http2Client) GracefulClose() { if logger.V(logLevel) { logger.Infof("transport: GracefulClose called") } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() @@ -1290,8 +1291,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 6cff20c8e022..0ac77ea4f8c7 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -583,8 +583,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 517cd4015da9..b41378b0024d 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -452,7 +452,7 @@ func setUpWithOptions(t *testing.T, port int, sc *ServerConfig, ht hType, copts copts.ChannelzParentID = channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil) connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) - ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func(GoAwayReason) {}, func() {}) + ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func(GoAwayReason) {}) if connErr != nil { cancel() // Do not cancel in success path. t.Fatalf("failed to create transport: %v", connErr) @@ -483,7 +483,7 @@ func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, connCh chan net.C connCh <- conn }() connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) - tr, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}, func() {}) + tr, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err != nil { cancel() // Do not cancel in success path. // Server clean-up. @@ -1287,7 +1287,7 @@ func (s) TestClientHonorsConnectContext(t *testing.T) { time.AfterFunc(100*time.Millisecond, cancel) copts := ConnectOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)} - _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}, func() {}) + _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err == nil { t.Fatalf("NewClientTransport() returned successfully; wanted error") } @@ -1299,7 +1299,7 @@ func (s) TestClientHonorsConnectContext(t *testing.T) { // Test context deadline. connectCtx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() - _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}, func() {}) + _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err == nil { t.Fatalf("NewClientTransport() returned successfully; wanted error") } @@ -1378,7 +1378,7 @@ func (s) TestClientWithMisbehavedServer(t *testing.T) { defer cancel() copts := ConnectOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)} - ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}, func() {}) + ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err != nil { t.Fatalf("Error while creating client transport: %v", err) } @@ -2282,7 +2282,7 @@ func (s) TestClientHandshakeInfo(t *testing.T) { TransportCredentials: creds, ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), } - tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}, func() {}) + tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}) if err != nil { t.Fatalf("NewClientTransport(): %v", err) } @@ -2323,7 +2323,7 @@ func (s) TestClientHandshakeInfoDialer(t *testing.T) { Dialer: dialer, ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), } - tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}, func() {}) + tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}) if err != nil { t.Fatalf("NewClientTransport(): %v", err) } From 08479c5e2ecb7d30f5210fc74127179745ffca46 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 21 Dec 2022 13:53:03 -0800 Subject: [PATCH 722/998] xdsclient: resource agnostic API implementation (#5776) --- xds/csds/csds.go | 122 +- xds/csds/csds_e2e_test.go | 434 ++++++ xds/csds/csds_test.go | 518 +------ .../clusterresolver/clusterresolver_test.go | 2 - .../clusterresolver/e2e_test/eds_impl_test.go | 3 +- xds/internal/httpfilter/fault/fault_test.go | 5 +- xds/internal/xdsclient/authority.go | 430 +++++- xds/internal/xdsclient/client.go | 11 +- xds/internal/xdsclient/client_new.go | 3 +- .../xdsclient/clientimpl_authority.go | 54 +- xds/internal/xdsclient/clientimpl_dump.go | 50 +- .../xdsclient/clientimpl_loadreport.go | 2 +- .../xdsclient/clientimpl_validator.go | 67 - xds/internal/xdsclient/clientimpl_watchers.go | 196 ++- .../xdsclient/controller/controller.go | 201 --- .../xdsclient/controller/controller_test.go | 154 -- .../xdsclient/controller/loadreport.go | 145 -- .../xdsclient/controller/transport.go | 443 ------ .../xdsclient/controller/v2_ack_test.go | 483 ------- .../xdsclient/controller/v2_cds_test.go | 186 --- .../xdsclient/controller/v2_client_test.go | 212 --- .../xdsclient/controller/v2_eds_test.go | 200 --- .../xdsclient/controller/v2_lds_test.go | 198 --- .../xdsclient/controller/v2_rds_test.go | 203 --- .../xdsclient/controller/v2_testutils_test.go | 470 ------ .../xdsclient/controller/version/v2/client.go | 155 -- .../controller/version/v2/loadreport.go | 170 --- .../xdsclient/controller/version/v3/client.go | 157 -- .../controller/version/v3/loadreport.go | 169 --- .../xdsclient/controller/version/version.go | 123 -- xds/internal/xdsclient/dump_test.go | 511 ------- xds/internal/xdsclient/e2e_test/dump_test.go | 255 ++++ .../xdsclient/e2e_test/misc_watchers_test.go | 7 +- .../e2e_test/resource_update_test.go | 1161 +++++++++++++++ xds/internal/xdsclient/loadreport_test.go | 27 +- xds/internal/xdsclient/pubsub/dump.go | 87 -- xds/internal/xdsclient/pubsub/interface.go | 39 - xds/internal/xdsclient/pubsub/pubsub.go | 186 --- xds/internal/xdsclient/pubsub/update.go | 318 ----- xds/internal/xdsclient/pubsub/watch.go | 239 ---- xds/internal/xdsclient/watchers_test.go | 47 - xds/internal/xdsclient/xdsclient_test.go | 1 - .../xdsresource/cluster_resource_type.go | 6 +- .../xdsresource/endpoints_resource_type.go | 3 +- .../xdsresource/listener_resource_type.go | 5 +- .../xdsclient/xdsresource/resource_type.go | 18 +- .../xdsresource/route_config_resource_type.go | 3 +- .../xdsclient/xdsresource/test_utils_test.go | 7 - xds/internal/xdsclient/xdsresource/type.go | 54 + .../xdsclient/xdsresource/unmarshal.go | 114 +- .../xdsclient/xdsresource/unmarshal_cds.go | 16 +- .../xdsresource/unmarshal_cds_test.go | 270 +--- .../xdsclient/xdsresource/unmarshal_eds.go | 9 - .../xdsresource/unmarshal_eds_test.go | 236 +--- .../xdsclient/xdsresource/unmarshal_lds.go | 16 +- .../xdsresource/unmarshal_lds_test.go | 1257 +++++++---------- .../xdsclient/xdsresource/unmarshal_rds.go | 10 - .../xdsresource/unmarshal_rds_test.go | 320 ++--- xds/xds.go | 20 +- 59 files changed, 3312 insertions(+), 7496 deletions(-) create mode 100644 xds/csds/csds_e2e_test.go delete mode 100644 xds/internal/xdsclient/clientimpl_validator.go delete mode 100644 xds/internal/xdsclient/controller/controller.go delete mode 100644 xds/internal/xdsclient/controller/controller_test.go delete mode 100644 xds/internal/xdsclient/controller/loadreport.go delete mode 100644 xds/internal/xdsclient/controller/transport.go delete mode 100644 xds/internal/xdsclient/controller/v2_ack_test.go delete mode 100644 xds/internal/xdsclient/controller/v2_cds_test.go delete mode 100644 xds/internal/xdsclient/controller/v2_client_test.go delete mode 100644 xds/internal/xdsclient/controller/v2_eds_test.go delete mode 100644 xds/internal/xdsclient/controller/v2_lds_test.go delete mode 100644 xds/internal/xdsclient/controller/v2_rds_test.go delete mode 100644 xds/internal/xdsclient/controller/v2_testutils_test.go delete mode 100644 xds/internal/xdsclient/controller/version/v2/client.go delete mode 100644 xds/internal/xdsclient/controller/version/v2/loadreport.go delete mode 100644 xds/internal/xdsclient/controller/version/v3/client.go delete mode 100644 xds/internal/xdsclient/controller/version/v3/loadreport.go delete mode 100644 xds/internal/xdsclient/controller/version/version.go delete mode 100644 xds/internal/xdsclient/dump_test.go create mode 100644 xds/internal/xdsclient/e2e_test/dump_test.go create mode 100644 xds/internal/xdsclient/e2e_test/resource_update_test.go delete mode 100644 xds/internal/xdsclient/pubsub/dump.go delete mode 100644 xds/internal/xdsclient/pubsub/interface.go delete mode 100644 xds/internal/xdsclient/pubsub/pubsub.go delete mode 100644 xds/internal/xdsclient/pubsub/update.go delete mode 100644 xds/internal/xdsclient/pubsub/watch.go delete mode 100644 xds/internal/xdsclient/watchers_test.go diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 0d71f8f8577a..15039793216c 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -25,55 +25,58 @@ package csds import ( "context" + "fmt" "io" + "sync" - v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" - v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/timestamppb" - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register v2 xds_client. - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register v3 xds_client. + v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) -var ( - logger = grpclog.Component("xds") - newXDSClient = func() xdsclient.XDSClient { - c, err := xdsclient.New() - if err != nil { - logger.Warningf("failed to create xds client: %v", err) - return nil - } - return c - } -) +var logger = grpclog.Component("xds") -const ( - listenerTypeURL = "envoy.config.listener.v3.Listener" - routeConfigTypeURL = "envoy.config.route.v3.RouteConfiguration" - clusterTypeURL = "envoy.config.cluster.v3.Cluster" - endpointsTypeURL = "envoy.config.endpoint.v3.ClusterLoadAssignment" -) +const prefix = "[csds-server %p] " -// ClientStatusDiscoveryServer implementations interface ClientStatusDiscoveryServiceServer. +func prefixLogger(s *ClientStatusDiscoveryServer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, s)) +} + +// ClientStatusDiscoveryServer provides an implementation of the Client Status +// Discovery Service (CSDS) for exposing the xDS config of a given client. See +// https://github.com/envoyproxy/envoy/blob/main/api/envoy/service/status/v3/csds.proto. +// +// For more details about the gRPC implementation of CSDS, refer to gRPC A40 at: +// https://github.com/grpc/proposal/blob/master/A40-csds-support.md. type ClientStatusDiscoveryServer struct { - // xdsClient will always be the same in practice. But we keep a copy in each - // server instance for testing. + logger *internalgrpclog.PrefixLogger + + mu sync.Mutex xdsClient xdsclient.XDSClient } -// NewClientStatusDiscoveryServer returns an implementation of the CSDS server that can be -// registered on a gRPC server. +// NewClientStatusDiscoveryServer returns an implementation of the CSDS server +// that can be registered on a gRPC server. func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { - return &ClientStatusDiscoveryServer{xdsClient: newXDSClient()}, nil + c, err := xdsclient.New() + if err != nil { + logger.Warningf("Failed to create xDS client: %v", err) + } + s := &ClientStatusDiscoveryServer{xdsClient: c} + s.logger = prefixLogger(s) + s.logger.Infof("Created CSDS server, with xdsClient %p", c) + return s, nil } // StreamClientStatus implementations interface ClientStatusDiscoveryServiceServer. @@ -106,6 +109,9 @@ func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req * // // If it returns an error, the error is a status error. func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.xdsClient == nil { return &v3statuspb.ClientStatusResponse{}, nil } @@ -115,21 +121,12 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp return nil, status.Errorf(codes.InvalidArgument, "node_matchers are not supported, request contains node_matchers: %v", req.NodeMatchers) } - lds := dumpToGenericXdsConfig(listenerTypeURL, s.xdsClient.DumpLDS) - rds := dumpToGenericXdsConfig(routeConfigTypeURL, s.xdsClient.DumpRDS) - cds := dumpToGenericXdsConfig(clusterTypeURL, s.xdsClient.DumpCDS) - eds := dumpToGenericXdsConfig(endpointsTypeURL, s.xdsClient.DumpEDS) - configs := make([]*v3statuspb.ClientConfig_GenericXdsConfig, 0, len(lds)+len(rds)+len(cds)+len(eds)) - configs = append(configs, lds...) - configs = append(configs, rds...) - configs = append(configs, cds...) - configs = append(configs, eds...) - + dump := s.xdsClient.DumpResources() ret := &v3statuspb.ClientStatusResponse{ Config: []*v3statuspb.ClientConfig{ { - Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().XDSServer.NodeProto), - GenericXdsConfigs: configs, + Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().XDSServer.NodeProto, s.logger), + GenericXdsConfigs: dumpToGenericXdsConfig(dump), }, }, } @@ -138,9 +135,11 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp // Close cleans up the resources. func (s *ClientStatusDiscoveryServer) Close() { + s.mu.Lock() if s.xdsClient != nil { s.xdsClient.Close() } + s.mu.Unlock() } // nodeProtoToV3 converts the given proto into a v3.Node. n is from bootstrap @@ -153,7 +152,7 @@ func (s *ClientStatusDiscoveryServer) Close() { // The default case (not v2 or v3) is nil, instead of error, because the // resources in the response are more important than the node. The worst case is // that the user will receive no Node info, but will still get resources. -func nodeProtoToV3(n proto.Message) *v3corepb.Node { +func nodeProtoToV3(n proto.Message, logger *internalgrpclog.PrefixLogger) *v3corepb.Node { var node *v3corepb.Node switch nn := n.(type) { case *v3corepb.Node: @@ -174,26 +173,27 @@ func nodeProtoToV3(n proto.Message) *v3corepb.Node { return node } -func dumpToGenericXdsConfig(typeURL string, dumpF func() map[string]xdsresource.UpdateWithMD) []*v3statuspb.ClientConfig_GenericXdsConfig { - dump := dumpF() - ret := make([]*v3statuspb.ClientConfig_GenericXdsConfig, 0, len(dump)) - for name, d := range dump { - config := &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: typeURL, - Name: name, - VersionInfo: d.MD.Version, - XdsConfig: d.Raw, - LastUpdated: timestamppb.New(d.MD.Timestamp), - ClientStatus: serviceStatusToProto(d.MD.Status), - } - if errState := d.MD.ErrState; errState != nil { - config.ErrorState = &v3adminpb.UpdateFailureState{ - LastUpdateAttempt: timestamppb.New(errState.Timestamp), - Details: errState.Err.Error(), - VersionInfo: errState.Version, +func dumpToGenericXdsConfig(dump map[string]map[string]xdsresource.UpdateWithMD) []*v3statuspb.ClientConfig_GenericXdsConfig { + var ret []*v3statuspb.ClientConfig_GenericXdsConfig + for typeURL, updates := range dump { + for name, update := range updates { + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: typeURL, + Name: name, + VersionInfo: update.MD.Version, + XdsConfig: update.Raw, + LastUpdated: timestamppb.New(update.MD.Timestamp), + ClientStatus: serviceStatusToProto(update.MD.Status), + } + if errState := update.MD.ErrState; errState != nil { + config.ErrorState = &v3adminpb.UpdateFailureState{ + LastUpdateAttempt: timestamppb.New(errState.Timestamp), + Details: errState.Err.Error(), + VersionInfo: errState.Version, + } } + ret = append(ret, config) } - ret = append(ret, config) } return ret } diff --git a/xds/csds/csds_e2e_test.go b/xds/csds/csds_e2e_test.go new file mode 100644 index 000000000000..55ae772741b2 --- /dev/null +++ b/xds/csds/csds_e2e_test.go @@ -0,0 +1,434 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package csds_test + +import ( + "context" + "fmt" + "io" + "sort" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/csds" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + + v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + v3statuspbgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter +) + +const defaultTestTimeout = 5 * time.Second + +var cmpOpts = cmp.Options{ + cmp.Transformer("sort", func(in []*v3statuspb.ClientConfig_GenericXdsConfig) []*v3statuspb.ClientConfig_GenericXdsConfig { + out := append([]*v3statuspb.ClientConfig_GenericXdsConfig(nil), in...) + sort.Slice(out, func(i, j int) bool { + a, b := out[i], out[j] + if a == nil { + return true + } + if b == nil { + return false + } + if strings.Compare(a.TypeUrl, b.TypeUrl) == 0 { + return strings.Compare(a.Name, b.Name) < 0 + } + return strings.Compare(a.TypeUrl, b.TypeUrl) < 0 + }) + return out + }), + protocmp.Transform(), + protocmp.IgnoreFields((*v3statuspb.ClientConfig_GenericXdsConfig)(nil), "last_updated"), + protocmp.IgnoreFields((*v3adminpb.UpdateFailureState)(nil), "last_update_attempt", "details"), +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestCSDS(t *testing.T) { + // Spin up a xDS management server on a local port. + nodeID := uuid.New().String() + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + + // Create a bootstrap file in a temporary directory. + bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ + Version: bootstrap.TransportV3, + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer bootstrapCleanup() + + // Create an xDS client. This will end up using the same singleton as used + // by the CSDS service. + xdsC, err := xdsclient.New() + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer xdsC.Close() + + // Initialize an gRPC server and register CSDS on it. + server := grpc.NewServer() + csdss, err := csds.NewClientStatusDiscoveryServer() + if err != nil { + t.Fatal(err) + } + v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) + defer func() { + server.Stop() + csdss.Close() + }() + + // Create a local listener and pass it to Serve(). + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + // Create a client to the CSDS server. + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Failed to dial CSDS server %q: %v", lis.Addr().String(), err) + } + c := v3statuspbgrpc.NewClientStatusDiscoveryServiceClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("Failed to create a stream for CSDS: %v", err) + } + defer conn.Close() + + // Verify that the xDS client reports an empty config. + if err := checkClientStatusResponse(stream, nil); err != nil { + t.Fatal(err) + } + + // Initialize the xDS resources to be used in this test. + ldsTargets := []string{"lds.target.good:0000", "lds.target.good:1111"} + rdsTargets := []string{"route-config-0", "route-config-1"} + cdsTargets := []string{"cluster-0", "cluster-1"} + edsTargets := []string{"endpoints-0", "endpoints-1"} + listeners := make([]*v3listenerpb.Listener, len(ldsTargets)) + listenerAnys := make([]*anypb.Any, len(ldsTargets)) + for i := range ldsTargets { + listeners[i] = e2e.DefaultClientListener(ldsTargets[i], rdsTargets[i]) + listenerAnys[i] = testutils.MarshalAny(listeners[i]) + } + routes := make([]*v3routepb.RouteConfiguration, len(rdsTargets)) + routeAnys := make([]*anypb.Any, len(rdsTargets)) + for i := range rdsTargets { + routes[i] = e2e.DefaultRouteConfig(rdsTargets[i], ldsTargets[i], cdsTargets[i]) + routeAnys[i] = testutils.MarshalAny(routes[i]) + } + clusters := make([]*v3clusterpb.Cluster, len(cdsTargets)) + clusterAnys := make([]*anypb.Any, len(cdsTargets)) + for i := range cdsTargets { + clusters[i] = e2e.DefaultCluster(cdsTargets[i], edsTargets[i], e2e.SecurityLevelNone) + clusterAnys[i] = testutils.MarshalAny(clusters[i]) + } + endpoints := make([]*v3endpointpb.ClusterLoadAssignment, len(edsTargets)) + endpointAnys := make([]*anypb.Any, len(edsTargets)) + ips := []string{"0.0.0.0", "1.1.1.1"} + ports := []uint32{123, 456} + for i := range edsTargets { + endpoints[i] = e2e.DefaultEndpoint(edsTargets[i], ips[i], ports[i:i+1]) + endpointAnys[i] = testutils.MarshalAny(endpoints[i]) + } + + // Register watches on the xDS client for two resources of each type. + for _, target := range ldsTargets { + xdsC.WatchListener(target, func(xdsresource.ListenerUpdate, error) {}) + } + for _, target := range rdsTargets { + xdsC.WatchRouteConfig(target, func(xdsresource.RouteConfigUpdate, error) {}) + } + for _, target := range cdsTargets { + xdsC.WatchCluster(target, func(xdsresource.ClusterUpdate, error) {}) + } + for _, target := range edsTargets { + xdsC.WatchEndpoints(target, func(xdsresource.EndpointsUpdate, error) {}) + } + + // Verify that the xDS client reports the resources as being in "Requested" + // state. + want := []*v3statuspb.ClientConfig_GenericXdsConfig{} + for i := range ldsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.listener.v3.Listener", ldsTargets[i], "", v3adminpb.ClientResourceStatus_REQUESTED, nil)) + } + for i := range rdsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", rdsTargets[i], "", v3adminpb.ClientResourceStatus_REQUESTED, nil)) + } + for i := range cdsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.cluster.v3.Cluster", cdsTargets[i], "", v3adminpb.ClientResourceStatus_REQUESTED, nil)) + } + for i := range edsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", edsTargets[i], "", v3adminpb.ClientResourceStatus_REQUESTED, nil)) + } + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for resources in \"Requested\" state: %v", err) + } + if err := checkClientStatusResponse(stream, want); err == nil { + break + } + time.Sleep(time.Millisecond * 100) + } + + // Configure the management server with two resources of each type, + // corresponding to the watches registered above. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: listeners, + Routes: routes, + Clusters: clusters, + Endpoints: endpoints, + }); err != nil { + t.Fatal(err) + } + + // Verify that the xDS client reports the resources as being in "ACKed" + // state, and in version "1". + want = nil + for i := range ldsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.listener.v3.Listener", ldsTargets[i], "1", v3adminpb.ClientResourceStatus_ACKED, listenerAnys[i])) + } + for i := range rdsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", rdsTargets[i], "1", v3adminpb.ClientResourceStatus_ACKED, routeAnys[i])) + } + for i := range cdsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.cluster.v3.Cluster", cdsTargets[i], "1", v3adminpb.ClientResourceStatus_ACKED, clusterAnys[i])) + } + for i := range edsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", edsTargets[i], "1", v3adminpb.ClientResourceStatus_ACKED, endpointAnys[i])) + } + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for resources in \"ACKed\" state: %v", err) + } + err := checkClientStatusResponse(stream, want) + if err == nil { + break + } + time.Sleep(time.Millisecond * 100) + } + + // Update the first resource of each type in the management server to a + // value which is expected to be NACK'ed by the xDS client. + const nackResourceIdx = 0 + listeners[nackResourceIdx].ApiListener = &v3listenerpb.ApiListener{} + routes[nackResourceIdx].VirtualHosts = []*v3routepb.VirtualHost{{Routes: []*v3routepb.Route{{}}}} + clusters[nackResourceIdx].ClusterDiscoveryType = &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC} + endpoints[nackResourceIdx].Endpoints = []*v3endpointpb.LocalityLbEndpoints{{}} + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: listeners, + Routes: routes, + Clusters: clusters, + Endpoints: endpoints, + SkipValidation: true, + }); err != nil { + t.Fatal(err) + } + + // Verify that the xDS client reports the first resource of each type as + // being in "NACKed" state, and the second resource of each type to be in + // "ACKed" state. The version for the ACKed resource would be "2", while + // that for the NACKed resource would be "1". In the NACKed resource, the + // version which is NACKed is stored in the ErrorState field. + want = nil + for i := range ldsTargets { + config := makeGenericXdsConfig("type.googleapis.com/envoy.config.listener.v3.Listener", ldsTargets[i], "2", v3adminpb.ClientResourceStatus_ACKED, listenerAnys[i]) + if i == nackResourceIdx { + config.VersionInfo = "1" + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{VersionInfo: "2"} + } + want = append(want, config) + } + for i := range rdsTargets { + config := makeGenericXdsConfig("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", rdsTargets[i], "2", v3adminpb.ClientResourceStatus_ACKED, routeAnys[i]) + if i == nackResourceIdx { + config.VersionInfo = "1" + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{VersionInfo: "2"} + } + want = append(want, config) + } + for i := range cdsTargets { + config := makeGenericXdsConfig("type.googleapis.com/envoy.config.cluster.v3.Cluster", cdsTargets[i], "2", v3adminpb.ClientResourceStatus_ACKED, clusterAnys[i]) + if i == nackResourceIdx { + config.VersionInfo = "1" + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{VersionInfo: "2"} + } + want = append(want, config) + } + for i := range edsTargets { + config := makeGenericXdsConfig("type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", edsTargets[i], "2", v3adminpb.ClientResourceStatus_ACKED, endpointAnys[i]) + if i == nackResourceIdx { + config.VersionInfo = "1" + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{VersionInfo: "2"} + } + want = append(want, config) + } + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for resources in \"NACKed\" state: %v", err) + } + err := checkClientStatusResponse(stream, want) + if err == nil { + break + } + time.Sleep(time.Millisecond * 100) + } +} + +func makeGenericXdsConfig(typeURL, name, version string, status v3adminpb.ClientResourceStatus, config *anypb.Any) *v3statuspb.ClientConfig_GenericXdsConfig { + return &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: typeURL, + Name: name, + VersionInfo: version, + ClientStatus: status, + XdsConfig: config, + } +} + +func checkClientStatusResponse(stream v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, want []*v3statuspb.ClientConfig_GenericXdsConfig) error { + if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { + if err != io.EOF { + return fmt.Errorf("failed to send ClientStatusRequest: %v", err) + } + // If the stream has closed, we call Recv() until it returns a non-nil + // error to get the actual error on the stream. + for { + if _, err := stream.Recv(); err != nil { + return fmt.Errorf("failed to recv ClientStatusResponse: %v", err) + } + } + } + resp, err := stream.Recv() + if err != nil { + return fmt.Errorf("failed to recv ClientStatusResponse: %v", err) + } + + if n := len(resp.Config); n != 1 { + return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(resp)) + } + + if diff := cmp.Diff(resp.Config[0].GenericXdsConfigs, want, cmpOpts); diff != "" { + return fmt.Errorf(diff) + } + return nil +} + +func (s) TestCSDSNoXDSClient(t *testing.T) { + // Create a bootstrap file in a temporary directory. Since we pass empty + // options, it would end up creating a bootstrap file with an empty + // serverURI which will fail xDS client creation. + bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{}) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { bootstrapCleanup() }) + + // Initialize an gRPC server and register CSDS on it. + server := grpc.NewServer() + csdss, err := csds.NewClientStatusDiscoveryServer() + if err != nil { + t.Fatal(err) + } + defer csdss.Close() + v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) + + // Create a local listener and pass it to Serve(). + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + defer server.Stop() + + // Create a client to the CSDS server. + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Failed to dial CSDS server %q: %v", lis.Addr().String(), err) + } + defer conn.Close() + c := v3statuspbgrpc.NewClientStatusDiscoveryServiceClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("Failed to create a stream for CSDS: %v", err) + } + + if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { + t.Fatalf("Failed to send ClientStatusRequest: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("Failed to recv ClientStatusResponse: %v", err) + } + if n := len(r.Config); n != 0 { + t.Fatalf("got %d configs, want 0: %v", n, proto.MarshalTextString(r)) + } +} diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go index 6772389c6f7f..48002b8501e0 100644 --- a/xds/csds/csds_test.go +++ b/xds/csds/csds_test.go @@ -19,98 +19,15 @@ package csds import ( - "context" - "fmt" - "sort" - "strings" "testing" - "time" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" - "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/testutils/xds/bootstrap" - "google.golang.org/grpc/internal/testutils/xds/e2e" - _ "google.golang.org/grpc/xds/internal/httpfilter/router" - "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/testing/protocmp" - "google.golang.org/protobuf/types/known/anypb" - v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" - v3statuspbgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" -) - -const defaultTestTimeout = 10 * time.Second - -var cmpOpts = cmp.Options{ - cmp.Transformer("sort", func(in []*v3statuspb.ClientConfig_GenericXdsConfig) []*v3statuspb.ClientConfig_GenericXdsConfig { - out := append([]*v3statuspb.ClientConfig_GenericXdsConfig(nil), in...) - sort.Slice(out, func(i, j int) bool { - a, b := out[i], out[j] - if a == nil { - return true - } - if b == nil { - return false - } - if strings.Compare(a.TypeUrl, b.TypeUrl) == 0 { - return strings.Compare(a.Name, b.Name) < 0 - } - return strings.Compare(a.TypeUrl, b.TypeUrl) < 0 - }) - return out - }), - protocmp.Transform(), -} - -// filterFields clears unimportant fields in the proto messages. -// -// protocmp.IgnoreFields() doesn't work on nil messages (it panics). -func filterFields(ms []*v3statuspb.ClientConfig_GenericXdsConfig) []*v3statuspb.ClientConfig_GenericXdsConfig { - out := append([]*v3statuspb.ClientConfig_GenericXdsConfig{}, ms...) - for _, m := range out { - if m == nil { - continue - } - m.LastUpdated = nil - if m.ErrorState != nil { - m.ErrorState.Details = "blahblah" - m.ErrorState.LastUpdateAttempt = nil - } - } - return out -} - -var ( - ldsTargets = []string{"lds.target.good:0000", "lds.target.good:1111"} - listeners = make([]*v3listenerpb.Listener, len(ldsTargets)) - listenerAnys = make([]*anypb.Any, len(ldsTargets)) - - rdsTargets = []string{"route-config-0", "route-config-1"} - routes = make([]*v3routepb.RouteConfiguration, len(rdsTargets)) - routeAnys = make([]*anypb.Any, len(rdsTargets)) - - cdsTargets = []string{"cluster-0", "cluster-1"} - clusters = make([]*v3clusterpb.Cluster, len(cdsTargets)) - clusterAnys = make([]*anypb.Any, len(cdsTargets)) - - edsTargets = []string{"endpoints-0", "endpoints-1"} - endpoints = make([]*v3endpointpb.ClusterLoadAssignment, len(edsTargets)) - endpointAnys = make([]*anypb.Any, len(edsTargets)) - ips = []string{"0.0.0.0", "1.1.1.1"} - ports = []uint32{123, 456} ) type s struct { @@ -121,438 +38,7 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -func init() { - for i := range ldsTargets { - listeners[i] = e2e.DefaultClientListener(ldsTargets[i], rdsTargets[i]) - listenerAnys[i] = testutils.MarshalAny(listeners[i]) - } - for i := range rdsTargets { - routes[i] = e2e.DefaultRouteConfig(rdsTargets[i], ldsTargets[i], cdsTargets[i]) - routeAnys[i] = testutils.MarshalAny(routes[i]) - } - for i := range cdsTargets { - clusters[i] = e2e.DefaultCluster(cdsTargets[i], edsTargets[i], e2e.SecurityLevelNone) - clusterAnys[i] = testutils.MarshalAny(clusters[i]) - } - for i := range edsTargets { - endpoints[i] = e2e.DefaultEndpoint(edsTargets[i], ips[i], ports[i:i+1]) - endpointAnys[i] = testutils.MarshalAny(endpoints[i]) - } -} - -func (s) TestCSDS(t *testing.T) { - const retryCount = 10 - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - xdsC, mgmServer, nodeID, stream, cleanup := commonSetup(ctx, t) - defer cleanup() - - for _, target := range ldsTargets { - xdsC.WatchListener(target, func(xdsresource.ListenerUpdate, error) {}) - } - for _, target := range rdsTargets { - xdsC.WatchRouteConfig(target, func(xdsresource.RouteConfigUpdate, error) {}) - } - for _, target := range cdsTargets { - xdsC.WatchCluster(target, func(xdsresource.ClusterUpdate, error) {}) - } - for _, target := range edsTargets { - xdsC.WatchEndpoints(target, func(xdsresource.EndpointsUpdate, error) {}) - } - - for i := 0; i < retryCount; i++ { - err := checkForRequested(stream) - if err == nil { - break - } - if i == retryCount-1 { - t.Fatalf("%v", err) - } - time.Sleep(time.Millisecond * 100) - } - - if err := mgmServer.Update(ctx, e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: listeners, - Routes: routes, - Clusters: clusters, - Endpoints: endpoints, - }); err != nil { - t.Fatal(err) - } - for i := 0; i < retryCount; i++ { - err := checkForACKed(stream) - if err == nil { - break - } - if i == retryCount-1 { - t.Fatalf("%v", err) - } - time.Sleep(time.Millisecond * 100) - } - - const nackResourceIdx = 0 - var ( - nackListeners = append([]*v3listenerpb.Listener{}, listeners...) - nackRoutes = append([]*v3routepb.RouteConfiguration{}, routes...) - nackClusters = append([]*v3clusterpb.Cluster{}, clusters...) - nackEndpoints = append([]*v3endpointpb.ClusterLoadAssignment{}, endpoints...) - ) - nackListeners[0] = &v3listenerpb.Listener{Name: ldsTargets[nackResourceIdx], ApiListener: &v3listenerpb.ApiListener{}} // 0 will be nacked. 1 will stay the same. - nackRoutes[0] = &v3routepb.RouteConfiguration{ - Name: rdsTargets[nackResourceIdx], VirtualHosts: []*v3routepb.VirtualHost{{Routes: []*v3routepb.Route{{}}}}, - } - nackClusters[0] = &v3clusterpb.Cluster{ - Name: cdsTargets[nackResourceIdx], ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, - } - nackEndpoints[0] = &v3endpointpb.ClusterLoadAssignment{ - ClusterName: edsTargets[nackResourceIdx], Endpoints: []*v3endpointpb.LocalityLbEndpoints{{}}, - } - if err := mgmServer.Update(ctx, e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: nackListeners, - Routes: nackRoutes, - Clusters: nackClusters, - Endpoints: nackEndpoints, - SkipValidation: true, - }); err != nil { - t.Fatal(err) - } - for i := 0; i < retryCount; i++ { - err := checkForNACKed(nackResourceIdx, stream) - if err == nil { - break - } - if i == retryCount-1 { - t.Fatalf("%v", err) - } - time.Sleep(time.Millisecond * 100) - } -} - -func commonSetup(ctx context.Context, t *testing.T) (xdsclient.XDSClient, *e2e.ManagementServer, string, v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { - t.Helper() - - // Spin up a xDS management server on a local port. - nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) - if err != nil { - t.Fatal(err) - } - - // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ - Version: bootstrap.TransportV3, - NodeID: nodeID, - ServerURI: fs.Address, - }) - if err != nil { - t.Fatal(err) - } - - // Create xds_client. - xdsC, err := xdsclient.New() - if err != nil { - t.Fatalf("failed to create xds client: %v", err) - } - origNewXDSClient := newXDSClient - newXDSClient = func() xdsclient.XDSClient { return xdsC } - - // Initialize an gRPC server and register CSDS on it. - server := grpc.NewServer() - csdss, err := NewClientStatusDiscoveryServer() - if err != nil { - t.Fatal(err) - } - v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) - - // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - go func() { - if err := server.Serve(lis); err != nil { - t.Errorf("Serve() failed: %v", err) - } - }() - - // Create CSDS client. - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatalf("cannot connect to server: %v", err) - } - c := v3statuspbgrpc.NewClientStatusDiscoveryServiceClient(conn) - stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("cannot get ServerReflectionInfo: %v", err) - } - - return xdsC, fs, nodeID, stream, func() { - fs.Stop() - conn.Close() - server.Stop() - csdss.Close() - newXDSClient = origNewXDSClient - xdsC.Close() - bootstrapCleanup() - } -} - -func checkForRequested(stream v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient) error { - if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { - return fmt.Errorf("failed to send request: %v", err) - } - r, err := stream.Recv() - if err != nil { - // io.EOF is not ok. - return fmt.Errorf("failed to recv response: %v", err) - } - - if n := len(r.Config); n != 1 { - return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(r)) - } - - var want []*v3statuspb.ClientConfig_GenericXdsConfig - // Status is Requested, but version and xds config are all unset. - for i := range ldsTargets { - want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: listenerTypeURL, Name: ldsTargets[i], ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - for i := range rdsTargets { - want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: routeConfigTypeURL, Name: rdsTargets[i], ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - for i := range cdsTargets { - want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: clusterTypeURL, Name: cdsTargets[i], ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - for i := range edsTargets { - want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: endpointsTypeURL, Name: edsTargets[i], ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - if diff := cmp.Diff(filterFields(r.Config[0].GenericXdsConfigs), want, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - return nil -} - -func checkForACKed(stream v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient) error { - const wantVersion = "1" - - if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { - return fmt.Errorf("failed to send: %v", err) - } - r, err := stream.Recv() - if err != nil { - // io.EOF is not ok. - return fmt.Errorf("failed to recv response: %v", err) - } - - if n := len(r.Config); n != 1 { - return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(r)) - } - - var want []*v3statuspb.ClientConfig_GenericXdsConfig - // Status is Acked, config is filled with the prebuilt Anys. - for i := range ldsTargets { - want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: listenerTypeURL, - Name: ldsTargets[i], - VersionInfo: wantVersion, - XdsConfig: listenerAnys[i], - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - for i := range rdsTargets { - want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: routeConfigTypeURL, - Name: rdsTargets[i], - VersionInfo: wantVersion, - XdsConfig: routeAnys[i], - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - for i := range cdsTargets { - want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: clusterTypeURL, - Name: cdsTargets[i], - VersionInfo: wantVersion, - XdsConfig: clusterAnys[i], - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - for i := range edsTargets { - want = append(want, &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: endpointsTypeURL, - Name: edsTargets[i], - VersionInfo: wantVersion, - XdsConfig: endpointAnys[i], - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - if diff := cmp.Diff(filterFields(r.Config[0].GenericXdsConfigs), want, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - return nil -} - -func checkForNACKed(nackResourceIdx int, stream v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient) error { - const ( - ackVersion = "1" - nackVersion = "2" - ) - if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { - return fmt.Errorf("failed to send: %v", err) - } - r, err := stream.Recv() - if err != nil { - // io.EOF is not ok. - return fmt.Errorf("failed to recv response: %v", err) - } - - if n := len(r.Config); n != 1 { - return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(r)) - } - - var want []*v3statuspb.ClientConfig_GenericXdsConfig - // Resources with the nackIdx are NACKed. - for i := range ldsTargets { - config := &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: listenerTypeURL, - Name: ldsTargets[i], - VersionInfo: nackVersion, - XdsConfig: listenerAnys[i], - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - config.VersionInfo = ackVersion - config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - config.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - want = append(want, config) - } - for i := range rdsTargets { - config := &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: routeConfigTypeURL, - Name: rdsTargets[i], - VersionInfo: nackVersion, - XdsConfig: routeAnys[i], - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - config.VersionInfo = ackVersion - config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - config.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - want = append(want, config) - } - for i := range cdsTargets { - config := &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: clusterTypeURL, - Name: cdsTargets[i], - VersionInfo: nackVersion, - XdsConfig: clusterAnys[i], - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - config.VersionInfo = ackVersion - config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - config.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - want = append(want, config) - } - for i := range edsTargets { - config := &v3statuspb.ClientConfig_GenericXdsConfig{ - TypeUrl: endpointsTypeURL, - Name: edsTargets[i], - VersionInfo: nackVersion, - XdsConfig: endpointAnys[i], - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - config.VersionInfo = ackVersion - config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - config.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - want = append(want, config) - } - if diff := cmp.Diff(filterFields(r.Config[0].GenericXdsConfigs), want, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - return nil -} - -func (s) TestCSDSNoXDSClient(t *testing.T) { - oldNewXDSClient := newXDSClient - newXDSClient = func() xdsclient.XDSClient { return nil } - defer func() { newXDSClient = oldNewXDSClient }() - - // Initialize an gRPC server and register CSDS on it. - server := grpc.NewServer() - csdss, err := NewClientStatusDiscoveryServer() - if err != nil { - t.Fatal(err) - } - defer csdss.Close() - v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) - // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - go func() { - if err := server.Serve(lis); err != nil { - t.Errorf("Serve() failed: %v", err) - } - }() - defer server.Stop() - - // Create CSDS client. - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatalf("cannot connect to server: %v", err) - } - defer conn.Close() - c := v3statuspbgrpc.NewClientStatusDiscoveryServiceClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("cannot get ServerReflectionInfo: %v", err) - } - - if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { - t.Fatalf("failed to send: %v", err) - } - r, err := stream.Recv() - if err != nil { - // io.EOF is not ok. - t.Fatalf("failed to recv response: %v", err) - } - if n := len(r.Config); n != 0 { - t.Fatalf("got %d configs, want 0: %v", n, proto.MarshalTextString(r)) - } -} - -func Test_nodeProtoToV3(t *testing.T) { +func (s) Test_nodeProtoToV3(t *testing.T) { const ( testID = "test-id" testCluster = "test-cluster" @@ -597,7 +83,7 @@ func Test_nodeProtoToV3(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := nodeProtoToV3(tt.n) + got := nodeProtoToV3(tt.n, nil) if diff := cmp.Diff(got, tt.want, protocmp.Transform()); diff != "" { t.Errorf("nodeProtoToV3() got unexpected result, diff (-got, +want): %v", diff) } diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index c2a5729e3bcb..7a3ced47b3a1 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -41,8 +41,6 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // V2 client registration. ) const ( diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index ee6b029d8c47..75d37e71ea61 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -48,8 +48,7 @@ import ( testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" - _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the "cluster_resolver_experimental" LB policy. - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the "cluster_resolver_experimental" LB policy. ) const ( diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 6ae345552f21..6bf00771f413 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -52,9 +52,8 @@ import ( tpb "github.com/envoyproxy/go-control-plane/envoy/type/v3" testpb "google.golang.org/grpc/test/grpc_testing" - _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. ) const defaultTestTimeout = 10 * time.Second diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 817cb7338f5a..7a533e662207 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -18,102 +18,416 @@ package xdsclient import ( + "context" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" + "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" ) -// authority is a combination of pubsub and the controller for this authority. +type watchState int + +const ( + watchStateStarted watchState = iota + watchStateRespReceived + watchStateTimeout + watchStateCanceled +) + +type resourceState struct { + watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource + cache xdsresource.ResourceData // Most recent ACKed update for this resource + md xdsresource.UpdateMetadata // Metadata for the most recent update + + // Common watch state for all watchers of this resource. + wTimer *time.Timer // Expiry timer + wState watchState // State of the watch +} + +// authority wraps all state associated with a single management server. It +// contains the transport used to communicate with the management server and a +// cache of resource state for resources requested from the management server. // -// Note that it might make sense to use one pubsub for all the resources (for -// all the controllers). One downside is the handling of StoW APIs (LDS/CDS). -// These responses contain all the resources from that control plane, so pubsub -// will need to keep lists of resources from each control plane, to know what -// are removed. +// Bootstrap configuration could contain multiple entries in the authorities map +// that share the same server config (server address and credentials to use). We +// share the same authority instance amongst these entries, and the reference +// counting is taken care of by the `clientImpl` type. type authority struct { - config *bootstrap.ServerConfig - pubsub *pubsub.Pubsub - controller controllerInterface - refCount int + serverCfg *bootstrap.ServerConfig // Server config for this authority + bootstrapCfg *bootstrap.Config // Full bootstrap configuration + refCount int // Reference count of watches referring to this authority + serializer *callbackSerializer // Callback serializer for invoking watch callbacks + resourceTypeGetter func(string) xdsresource.Type // ResourceType registry lookup + transport *transport.Transport // Underlying xDS transport to the management server + watchExpiryTimeout time.Duration // Resource watch expiry timeout + logger *grpclog.PrefixLogger + + // A two level map containing the state of all the resources being watched. + // + // The first level map key is the ResourceType (Listener, Route etc). This + // allows us to have a single map for all resources instead of having per + // resource-type maps. + // + // The second level map key is the resource name, with the value being the + // actual state of the resource. + resourcesMu sync.Mutex + resources map[xdsresource.Type]map[string]*resourceState } -// caller must hold parent's authorityMu. -func (a *authority) ref() { - a.refCount++ +// authorityArgs is a convenience struct to wrap arguments required to create a +// new authority. All fields here correspond directly to appropriate fields +// stored in the authority struct. +type authorityArgs struct { + // The reason for passing server config and bootstrap config separately + // (although the former is part of the latter) is because authorities in the + // bootstrap config might contain an empty server config, and in this case, + // the top-level server config is to be used. + // + // There are two code paths from where a new authority struct might be + // created. One is when a watch is registered for a resource, and one is + // when load reporting needs to be started. We have the authority name in + // the first case, but do in the second. We only have the server config in + // the second case. + serverCfg *bootstrap.ServerConfig + bootstrapCfg *bootstrap.Config + serializer *callbackSerializer + resourceTypeGetter func(string) xdsresource.Type + watchExpiryTimeout time.Duration + logger *grpclog.PrefixLogger } -// caller must hold parent's authorityMu. -func (a *authority) unref() int { - a.refCount-- - return a.refCount +func newAuthority(args authorityArgs) (*authority, error) { + ret := &authority{ + serverCfg: args.serverCfg, + bootstrapCfg: args.bootstrapCfg, + serializer: args.serializer, + resourceTypeGetter: args.resourceTypeGetter, + watchExpiryTimeout: args.watchExpiryTimeout, + logger: args.logger, + resources: make(map[xdsresource.Type]map[string]*resourceState), + } + + tr, err := transport.New(transport.Options{ + ServerCfg: *args.serverCfg, + UpdateHandler: ret.handleResourceUpdate, + StreamErrorHandler: ret.newConnectionError, + Logger: args.logger, + }) + if err != nil { + return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) + } + ret.transport = tr + return ret, nil + } -func (a *authority) close() { - if a.pubsub != nil { - a.pubsub.Close() +func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate) error { + rType := a.resourceTypeGetter(resourceUpdate.URL) + if rType == nil { + return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) } - if a.controller != nil { - a.controller.Close() + + opts := &xdsresource.DecodeOptions{ + BootstrapConfig: a.bootstrapCfg, + Logger: a.logger, } + updates, md, err := decodeAllResources(opts, rType, resourceUpdate) + a.updateResourceStateAndScheduleCallbacks(rType, updates, md) + return err } -func (a *authority) watchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { - first, cancelF := a.pubsub.WatchListener(serviceName, cb) - if first { - a.controller.AddWatch(xdsresource.ListenerResource, serviceName) +func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + resourceStates := a.resources[rType] + for name, uErr := range updates { + if state, ok := resourceStates[name]; ok { + // Cancel the expiry timer associated with the resource once a + // response is received, irrespective of whether the update is a + // good one or not. + state.wTimer.Stop() + state.wState = watchStateRespReceived + + if uErr.err != nil { + // On error, keep previous version of the resource. But update + // status and error. + state.md.ErrState = md.ErrState + state.md.Status = md.Status + for watcher := range state.watchers { + watcher := watcher + err := uErr.err + a.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + } + continue + } + // If we get here, it means that the update is a valid one. Notify + // watchers only if this is a first time update or it is different + // from the one currently cached. + if state.cache == nil || !state.cache.Equal(uErr.resource) { + for watcher := range state.watchers { + watcher := watcher + resource := uErr.resource + a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) + } + } + // Sync cache. + a.logger.Debugf("Resource type %q with name %q, value %s added to cache", rType.TypeEnum().String(), name, uErr.resource.ToJSON()) + state.cache = uErr.resource + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + state.md = md + state.md.ErrState = nil + state.md.Status = xdsresource.ServiceStatusACKed + if md.ErrState != nil { + state.md.Version = md.ErrState.Version + } + } } - return func() { - if cancelF() { - a.controller.RemoveWatch(xdsresource.ListenerResource, serviceName) + + // If this resource type requires that all resources be present in every + // SotW response from the server, a response that does not include a + // previously seen resource will be interpreted as a deletion of that + // resource. + if !rType.AllResourcesRequiredInSotW() { + return + } + for name, state := range resourceStates { + if _, ok := updates[name]; !ok { + // The metadata status is set to "ServiceStatusNotExist" if a + // previous update deleted this resource, in which case we do not + // want to repeatedly call the watch callbacks with a + // "resource-not-found" error. + if state.md.Status == xdsresource.ServiceStatusNotExist { + continue + } + + // If resource exists in cache, but not in the new update, delete + // the resource from cache, and also send a resource not found error + // to indicate resource removed. Metadata for the resource is still + // maintained, as this is required by CSDS. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) + } } } } -func (a *authority) watchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { - first, cancelF := a.pubsub.WatchRouteConfig(routeName, cb) - if first { - a.controller.AddWatch(xdsresource.RouteConfigResource, routeName) +type resourceDataErrTuple struct { + resource xdsresource.ResourceData + err error +} + +func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, update transport.ResourceUpdate) (map[string]resourceDataErrTuple, xdsresource.UpdateMetadata, error) { + timestamp := time.Now() + md := xdsresource.UpdateMetadata{ + Version: update.Version, + Timestamp: timestamp, } - return func() { - if cancelF() { - a.controller.RemoveWatch(xdsresource.RouteConfigResource, routeName) + + topLevelErrors := make([]error, 0) // Tracks deserialization errors, where we don't have a resource name. + perResourceErrors := make(map[string]error) // Tracks resource validation errors, where we have a resource name. + ret := make(map[string]resourceDataErrTuple) // Return result, a map from resource name to either resource data or error. + for _, r := range update.Resources { + result, err := rType.Decode(opts, r) + + // Name field of the result is left unpopulated only when resource + // deserialization fails. + name := "" + if result != nil { + name = xdsresource.ParseName(result.Name).String() + } + if err == nil { + ret[name] = resourceDataErrTuple{resource: result.Resource} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret[name] = resourceDataErrTuple{err: err} } -} -func (a *authority) watchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { - first, cancelF := a.pubsub.WatchCluster(clusterName, cb) - if first { - a.controller.AddWatch(xdsresource.ClusterResource, clusterName) + if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { + md.Status = xdsresource.ServiceStatusACKed + return ret, md, nil } - return func() { - if cancelF() { - a.controller.RemoveWatch(xdsresource.ClusterResource, clusterName) + + typeStr := rType.TypeEnum().String() + md.Status = xdsresource.ServiceStatusNACKed + errRet := xdsresource.CombineErrors(typeStr, topLevelErrors, perResourceErrors) + md.ErrState = &xdsresource.UpdateErrorMetadata{ + Version: update.Version, + Err: errRet, + Timestamp: timestamp, + } + return ret, md, errRet +} + +// newConnectionError is called by the underlying transport when it receives a +// connection error. The error will be forwarded to all the resource watchers. +func (a *authority) newConnectionError(err error) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // For all resource types, for all resources within each resource type, and + // for all the watchers for every resource, propagate the connection error + // from the transport layer. + for _, rType := range a.resources { + for _, state := range rType { + for watcher := range state.watchers { + watcher := watcher + a.serializer.Schedule(func(context.Context) { + watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) + }) + } } } } -func (a *authority) watchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { - first, cancelF := a.pubsub.WatchEndpoints(clusterName, cb) - if first { - a.controller.AddWatch(xdsresource.EndpointsResource, clusterName) +// Increments the reference count. Caller must hold parent's authorityMu. +func (a *authority) refLocked() { + a.refCount++ +} + +// Decrements the reference count. Caller must hold parent's authorityMu. +func (a *authority) unrefLocked() int { + a.refCount-- + return a.refCount +} + +func (a *authority) close() { + a.transport.Close() +} + +func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { + a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeEnum(), resourceName) + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // Lookup the ResourceType specific resources from the top-level map. If + // there is no entry for this ResourceType, create one. + resources := a.resources[rType] + if resources == nil { + resources = make(map[string]*resourceState) + a.resources[rType] = resources } + + // Lookup the resourceState for the particular resource that the watch is + // being registered for. If this is the first watch for this resource, + // instruct the transport layer to send a DiscoveryRequest for the same. + state := resources[resourceName] + if state == nil { + a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeEnum(), resourceName) + state = &resourceState{ + watchers: make(map[xdsresource.ResourceWatcher]bool), + md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, + wState: watchStateStarted, + } + state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { + a.handleWatchTimerExpiry(state, fmt.Errorf("watch for resource %q of type %s timed out", resourceName, rType.TypeEnum().String())) + }) + resources[resourceName] = state + a.sendDiscoveryRequestLocked(rType, resources) + } + // Always add the new watcher to the set of watchers. + state.watchers[watcher] = true + + // If we have a cached copy of the resource, notify the new watcher. + if state.cache != nil { + a.logger.Debugf("Resource type %q with resource name %q found in cache: %s", rType.TypeEnum(), resourceName, state.cache.ToJSON()) + resource := state.cache + a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) + } + return func() { - if cancelF() { - a.controller.RemoveWatch(xdsresource.EndpointsResource, clusterName) + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // We already have a reference to the resourceState for this particular + // resource. Avoid indexing into the two-level map to figure this out. + + // Delete this particular watcher from the list of watchers, so that its + // callback will not be invoked in the future. + state.wState = watchStateCanceled + delete(state.watchers, watcher) + if len(state.watchers) > 0 { + return } + + // There are no more watchers for this resource, delete the state + // associated with it, and instruct the transport to send a request + // which does not include this resource name. + delete(resources, resourceName) + a.sendDiscoveryRequestLocked(rType, resources) } } +func (a *authority) handleWatchTimerExpiry(state *resourceState, err error) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + if state.wState == watchStateCanceled { + return + } + + state.wState = watchStateTimeout + for watcher := range state.watchers { + watcher := watcher + a.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + } +} + +// sendDiscoveryRequestLocked sends a discovery request for the specified +// resource type and resource names. Even though this method does not directly +// access the resource cache, it is important that `resourcesMu` be beld when +// calling this method to ensure that a consistent snapshot of resource names is +// being requested. +func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { + resourcesToRequest := make([]string, len(resources)) + i := 0 + for name := range resources { + resourcesToRequest[i] = name + i++ + } + a.transport.SendRequest(rType.TypeURL(), resourcesToRequest) +} + func (a *authority) reportLoad() (*load.Store, func()) { - // An empty string means to report load to the same same used for ADS. There - // should never be a need to specify a string other than an empty string. If - // a different server is to be used, a different authority (controller) will - // be created. - return a.controller.ReportLoad("") + return a.transport.ReportLoad() } -func (a *authority) dump(t xdsresource.ResourceType) map[string]xdsresource.UpdateWithMD { - return a.pubsub.Dump(t) +func (a *authority) dumpResources() map[string]map[string]xdsresource.UpdateWithMD { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + dump := make(map[string]map[string]xdsresource.UpdateWithMD) + for rType, resourceStates := range a.resources { + states := make(map[string]xdsresource.UpdateWithMD) + for name, state := range resourceStates { + var raw *anypb.Any + if state.cache != nil { + raw = state.cache.Raw() + } + states[name] = xdsresource.UpdateWithMD{ + MD: state.md, + Raw: raw, + } + } + dump[rType.TypeURL()] = states + } + return dump } diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 976b86fe34d7..332b31409134 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -34,7 +34,6 @@ type XDSClient interface { WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() - ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) // WatchResource uses xDS to discover the resource associated with the // provided resource name. The resource type implementation determines how @@ -50,11 +49,13 @@ type XDSClient interface { // delete the resource type specific watch APIs on this interface. WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) - DumpLDS() map[string]xdsresource.UpdateWithMD - DumpRDS() map[string]xdsresource.UpdateWithMD - DumpCDS() map[string]xdsresource.UpdateWithMD - DumpEDS() map[string]xdsresource.UpdateWithMD + // DumpResources returns the status of the xDS resources. Returns a map of + // resource type URLs to a map of resource names to resource state. + DumpResources() map[string]map[string]xdsresource.UpdateWithMD + + ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) BootstrapConfig() *bootstrap.Config + Close() } diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go index 4b2b8cfd39eb..b3aecb7fa5ac 100644 --- a/xds/internal/xdsclient/client_new.go +++ b/xds/internal/xdsclient/client_new.go @@ -69,8 +69,7 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i } c.logger = prefixLogger(c) - c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) - c.logger.Infof("Created") + c.logger.Infof("Created client to xDS management server: %s", config.XDSServer) return c, nil } diff --git a/xds/internal/xdsclient/clientimpl_authority.go b/xds/internal/xdsclient/clientimpl_authority.go index 623420ccc78f..2531b39472f5 100644 --- a/xds/internal/xdsclient/clientimpl_authority.go +++ b/xds/internal/xdsclient/clientimpl_authority.go @@ -20,30 +20,11 @@ package xdsclient import ( "errors" "fmt" - "time" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/controller" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) -type controllerInterface interface { - AddWatch(resourceType xdsresource.ResourceType, resourceName string) - RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) - ReportLoad(server string) (*load.Store, func()) - Close() -} - -var newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, boff func(int) time.Duration) (controllerInterface, error) { - return controller.New(config, pubsub, validator, logger, boff) -} - // findAuthority returns the authority for this name. If it doesn't already // exist, one will be created. // @@ -88,12 +69,13 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref fun // authority. // // unref() will be done when the watch is canceled. - a.ref() + a.refLocked() return a, func() { c.unrefAuthority(a) }, nil } -// newAuthorityLocked creates a new authority for the config. But before that, it -// checks the cache to see if an authority for this config already exists. +// newAuthorityLocked creates a new authority for the given config. If an +// authority for the given config exists in the cache, it is returned instead of +// creating a new one. // // The caller must take a reference of the returned authority before using, and // unref afterwards. @@ -121,23 +103,17 @@ func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *auth } // Make a new authority since there's no existing authority for this config. - nodeID := "" - if v3, ok := c.config.XDSServer.NodeProto.(*v3corepb.Node); ok { - nodeID = v3.GetId() - } else if v2, ok := c.config.XDSServer.NodeProto.(*v2corepb.Node); ok { - nodeID = v2.GetId() - } - ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, nodeID, c.logger)} - defer func() { - if retErr != nil { - ret.close() - } - }() - ctr, err := newController(config, ret.pubsub, c.updateValidator, c.logger, nil) + ret, err := newAuthority(authorityArgs{ + serverCfg: config, + bootstrapCfg: c.config, + serializer: c.serializer, + resourceTypeGetter: c.resourceTypes.get, + watchExpiryTimeout: c.watchExpiryTimeout, + logger: c.logger, + }) if err != nil { - return nil, err + return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) } - ret.controller = ctr // Add it to the cache, so it will be reused. c.authorities[configStr] = ret return ret, nil @@ -153,10 +129,10 @@ func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *auth func (c *clientImpl) unrefAuthority(a *authority) { c.authorityMu.Lock() defer c.authorityMu.Unlock() - if a.unref() > 0 { + if a.unrefLocked() > 0 { return } - configStr := a.config.String() + configStr := a.serverCfg.String() delete(c.authorities, configStr) c.idleAuthorities.Add(configStr, a, func() { a.close() diff --git a/xds/internal/xdsclient/clientimpl_dump.go b/xds/internal/xdsclient/clientimpl_dump.go index 69407d20cafd..b9d0499301a2 100644 --- a/xds/internal/xdsclient/clientimpl_dump.go +++ b/xds/internal/xdsclient/clientimpl_dump.go @@ -22,42 +22,32 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -func mergeMaps(maps []map[string]xdsresource.UpdateWithMD) map[string]xdsresource.UpdateWithMD { - ret := make(map[string]xdsresource.UpdateWithMD) - for _, m := range maps { - for k, v := range m { - ret[k] = v +func appendMaps(dst, src map[string]map[string]xdsresource.UpdateWithMD) { + // Iterate through the resource types. + for rType, srcResources := range src { + // Lookup/create the resource type specific map in the destination. + dstResources := dst[rType] + if dstResources == nil { + dstResources = make(map[string]xdsresource.UpdateWithMD) + dst[rType] = dstResources + } + + // Iterate through the resources within the resource type in the source, + // and copy them over to the destination. + for name, update := range srcResources { + dstResources[name] = update } } - return ret } -func (c *clientImpl) dump(t xdsresource.ResourceType) map[string]xdsresource.UpdateWithMD { +// DumpResources returns the status and contents of all xDS resources. +func (c *clientImpl) DumpResources() map[string]map[string]xdsresource.UpdateWithMD { c.authorityMu.Lock() defer c.authorityMu.Unlock() - maps := make([]map[string]xdsresource.UpdateWithMD, 0, len(c.authorities)) + dumps := make(map[string]map[string]xdsresource.UpdateWithMD) for _, a := range c.authorities { - maps = append(maps, a.dump(t)) + dump := a.dumpResources() + appendMaps(dumps, dump) } - return mergeMaps(maps) -} - -// DumpLDS returns the status and contents of LDS. -func (c *clientImpl) DumpLDS() map[string]xdsresource.UpdateWithMD { - return c.dump(xdsresource.ListenerResource) -} - -// DumpRDS returns the status and contents of RDS. -func (c *clientImpl) DumpRDS() map[string]xdsresource.UpdateWithMD { - return c.dump(xdsresource.RouteConfigResource) -} - -// DumpCDS returns the status and contents of CDS. -func (c *clientImpl) DumpCDS() map[string]xdsresource.UpdateWithMD { - return c.dump(xdsresource.ClusterResource) -} - -// DumpEDS returns the status and contents of EDS. -func (c *clientImpl) DumpEDS() map[string]xdsresource.UpdateWithMD { - return c.dump(xdsresource.EndpointsResource) + return dumps } diff --git a/xds/internal/xdsclient/clientimpl_loadreport.go b/xds/internal/xdsclient/clientimpl_loadreport.go index cba5afd454a7..dd0ae225e8d0 100644 --- a/xds/internal/xdsclient/clientimpl_loadreport.go +++ b/xds/internal/xdsclient/clientimpl_loadreport.go @@ -36,7 +36,7 @@ func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, fu return nil, func() {} } // Hold the ref before starting load reporting. - a.ref() + a.refLocked() store, cancelF := a.reportLoad() return store, func() { cancelF() diff --git a/xds/internal/xdsclient/clientimpl_validator.go b/xds/internal/xdsclient/clientimpl_validator.go deleted file mode 100644 index 50bdbe4e23f4..000000000000 --- a/xds/internal/xdsclient/clientimpl_validator.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "fmt" - - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -func (c *clientImpl) filterChainUpdateValidator(fc *xdsresource.FilterChain) error { - if fc == nil { - return nil - } - return c.securityConfigUpdateValidator(fc.SecurityCfg) -} - -func (c *clientImpl) securityConfigUpdateValidator(sc *xdsresource.SecurityConfig) error { - if sc == nil { - return nil - } - if sc.IdentityInstanceName != "" { - if _, ok := c.config.CertProviderConfigs[sc.IdentityInstanceName]; !ok { - return fmt.Errorf("identitiy certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) - } - } - if sc.RootInstanceName != "" { - if _, ok := c.config.CertProviderConfigs[sc.RootInstanceName]; !ok { - return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) - } - } - return nil -} - -func (c *clientImpl) updateValidator(u interface{}) error { - switch update := u.(type) { - case xdsresource.ListenerUpdate: - if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil { - return nil - } - return update.InboundListenerCfg.FilterChains.Validate(c.filterChainUpdateValidator) - case xdsresource.ClusterUpdate: - return c.securityConfigUpdateValidator(update.SecurityCfg) - default: - // We currently invoke this update validation function only for LDS and - // CDS updates. In the future, if we wish to invoke it for other xDS - // updates, corresponding plumbing needs to be added to those unmarshal - // functions. - } - return nil -} diff --git a/xds/internal/xdsclient/clientimpl_watchers.go b/xds/internal/xdsclient/clientimpl_watchers.go index 7095a8394487..05eea38f4376 100644 --- a/xds/internal/xdsclient/clientimpl_watchers.go +++ b/xds/internal/xdsclient/clientimpl_watchers.go @@ -25,46 +25,93 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -// WatchListener uses LDS to discover information about the provided listener. +// This is only required temporarily, while we modify the +// clientImpl.WatchListener API to be implemented via the wrapper +// WatchListener() API which calls the WatchResource() API. +type listenerWatcher struct { + resourceName string + cb func(xdsresource.ListenerUpdate, error) +} + +func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { + l.cb(update.Resource, nil) +} + +func (l *listenerWatcher) OnError(err error) { + l.cb(xdsresource.ListenerUpdate{}, err) +} + +func (l *listenerWatcher) OnResourceDoesNotExist() { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Resource name %q of type Listener not found in received response", l.resourceName) + l.cb(xdsresource.ListenerUpdate{}, err) +} + +// WatchListener uses LDS to discover information about the Listener resource +// identified by resourceName. // // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { - n := xdsresource.ParseName(serviceName) - a, unref, err := c.findAuthority(n) - if err != nil { - cb(xdsresource.ListenerUpdate{}, err) - return func() {} - } - cancelF := a.watchListener(n.String(), cb) - return func() { - cancelF() - unref() - } +func (c *clientImpl) WatchListener(resourceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { + watcher := &listenerWatcher{resourceName: resourceName, cb: cb} + return xdsresource.WatchListener(c, resourceName, watcher) +} + +// This is only required temporarily, while we modify the +// clientImpl.WatchRouteConfig API to be implemented via the wrapper +// WatchRouteConfig() API which calls the WatchResource() API. +type routeConfigWatcher struct { + resourceName string + cb func(xdsresource.RouteConfigUpdate, error) +} + +func (r *routeConfigWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { + r.cb(update.Resource, nil) } -// WatchRouteConfig starts a listener watcher for the service. +func (r *routeConfigWatcher) OnError(err error) { + r.cb(xdsresource.RouteConfigUpdate{}, err) +} + +func (r *routeConfigWatcher) OnResourceDoesNotExist() { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Resource name %q of type RouteConfiguration not found in received response", r.resourceName) + r.cb(xdsresource.RouteConfigUpdate{}, err) +} + +// WatchRouteConfig uses RDS to discover information about the +// RouteConfiguration resource identified by resourceName. // // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { - n := xdsresource.ParseName(routeName) - a, unref, err := c.findAuthority(n) - if err != nil { - cb(xdsresource.RouteConfigUpdate{}, err) - return func() {} - } - cancelF := a.watchRouteConfig(n.String(), cb) - return func() { - cancelF() - unref() - } +func (c *clientImpl) WatchRouteConfig(resourceName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { + watcher := &routeConfigWatcher{resourceName: resourceName, cb: cb} + return xdsresource.WatchRouteConfig(c, resourceName, watcher) +} + +// This is only required temporarily, while we modify the +// clientImpl.WatchCluster API to be implemented via the wrapper WatchCluster() +// API which calls the WatchResource() API. +type clusterWatcher struct { + resourceName string + cb func(xdsresource.ClusterUpdate, error) +} + +func (c *clusterWatcher) OnUpdate(update *xdsresource.ClusterResourceData) { + c.cb(update.Resource, nil) +} + +func (c *clusterWatcher) OnError(err error) { + c.cb(xdsresource.ClusterUpdate{}, err) +} + +func (c *clusterWatcher) OnResourceDoesNotExist() { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Resource name %q of type Cluster not found in received response", c.resourceName) + c.cb(xdsresource.ClusterUpdate{}, err) } -// WatchCluster uses CDS to discover information about the provided -// clusterName. +// WatchCluster uses CDS to discover information about the Cluster resource +// identified by resourceName. // // WatchCluster can be called multiple times, with same or different // clusterNames. Each call will start an independent watcher for the resource. @@ -72,21 +119,34 @@ func (c *clientImpl) WatchRouteConfig(routeName string, cb func(xdsresource.Rout // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { - n := xdsresource.ParseName(clusterName) - a, unref, err := c.findAuthority(n) - if err != nil { - cb(xdsresource.ClusterUpdate{}, err) - return func() {} - } - cancelF := a.watchCluster(n.String(), cb) - return func() { - cancelF() - unref() - } +func (c *clientImpl) WatchCluster(resourceName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { + watcher := &clusterWatcher{resourceName: resourceName, cb: cb} + return xdsresource.WatchCluster(c, resourceName, watcher) +} + +// This is only required temporarily, while we modify the +// clientImpl.WatchEndpoints API to be implemented via the wrapper +// WatchEndpoints() API which calls the WatchResource() API. +type endpointsWatcher struct { + resourceName string + cb func(xdsresource.EndpointsUpdate, error) +} + +func (c *endpointsWatcher) OnUpdate(update *xdsresource.EndpointsResourceData) { + c.cb(update.Resource, nil) +} + +func (c *endpointsWatcher) OnError(err error) { + c.cb(xdsresource.EndpointsUpdate{}, err) } -// WatchEndpoints uses EDS to discover endpoints in the provided clusterName. +func (c *endpointsWatcher) OnResourceDoesNotExist() { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Resource name %q of type Endpoints not found in received response", c.resourceName) + c.cb(xdsresource.EndpointsUpdate{}, err) +} + +// WatchEndpoints uses EDS to discover information about the +// ClusterLoadAssignment resource identified by resourceName. // // WatchEndpoints can be called multiple times, with same or different // clusterNames. Each call will start an independent watcher for the resource. @@ -94,18 +154,9 @@ func (c *clientImpl) WatchCluster(clusterName string, cb func(xdsresource.Cluste // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { - n := xdsresource.ParseName(clusterName) - a, unref, err := c.findAuthority(n) - if err != nil { - cb(xdsresource.EndpointsUpdate{}, err) - return func() {} - } - cancelF := a.watchEndpoints(n.String(), cb) - return func() { - cancelF() - unref() - } +func (c *clientImpl) WatchEndpoints(resourceName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { + watcher := &endpointsWatcher{resourceName: resourceName, cb: cb} + return xdsresource.WatchEndpoints(c, resourceName, watcher) } // WatchResource uses xDS to discover the resource associated with the provided @@ -126,6 +177,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, } if err := c.resourceTypes.maybeRegister(rType); err != nil { + logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeEnum().String(), resourceName) c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) return func() {} } @@ -138,7 +190,21 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, // - Call the watchResource() method on the authority. // - Return a cancel function to cancel the watch on the authority and to // release the reference. - return func() {} + + // TODO: Make ParseName return an error if parsing fails, and + // schedule the OnError callback in that case. + n := xdsresource.ParseName(resourceName) + a, unref, err := c.findAuthority(n) + if err != nil { + logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeEnum().String(), resourceName, n.Authority) + c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + return func() {} + } + cancelF := a.watchResource(rType, n.String(), watcher) + return func() { + cancelF() + unref() + } } // A registry of xdsresource.Type implementations indexed by their corresponding @@ -153,21 +219,21 @@ func newResourceTypeRegistry() *resourceTypeRegistry { return &resourceTypeRegistry{types: make(map[string]xdsresource.Type)} } +func (r *resourceTypeRegistry) get(url string) xdsresource.Type { + r.mu.Lock() + defer r.mu.Unlock() + return r.types[url] +} + func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { r.mu.Lock() defer r.mu.Unlock() - urls := []string{rType.V2TypeURL(), rType.V3TypeURL()} - for _, u := range urls { - if u == "" { - // Silently ignore unsupported versions of the resource. - continue - } - typ, ok := r.types[u] - if ok && typ != rType { - return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeEnum()) - } - r.types[u] = rType + url := rType.TypeURL() + typ, ok := r.types[url] + if ok && typ != rType { + return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeEnum()) } + r.types[url] = rType return nil } diff --git a/xds/internal/xdsclient/controller/controller.go b/xds/internal/xdsclient/controller/controller.go deleted file mode 100644 index 520da06a103d..000000000000 --- a/xds/internal/xdsclient/controller/controller.go +++ /dev/null @@ -1,201 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package controller contains implementation to connect to the control plane. -// Including starting the ClientConn, starting the xDS stream, and -// sending/receiving messages. -// -// All the messages are parsed by the resource package (e.g. -// UnmarshalListener()) and sent to the Pubsub watchers. -package controller - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/controller/version" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// Controller manages the connection and stream to the control plane. -// -// It keeps track of what resources are being watched, and send new requests -// when new watches are added. -// -// It takes a pubsub (as an interface) as input. When a response is received, -// it's parsed, and the updates are sent to the pubsub. -type Controller struct { - config *bootstrap.ServerConfig - updateHandler pubsub.UpdateHandler - updateValidator xdsresource.UpdateValidatorFunc - logger *grpclog.PrefixLogger - - cc *grpc.ClientConn // Connection to the management server. - vClient version.VersionedClient - stopRunGoroutine context.CancelFunc - // The run goroutine closes this channel when it exits, and we block on this - // channel in Close(). This ensures that when Close() returns, the - // underlying transport is closed, and we can guarantee that we will not - // process any subsequent responses from the management server. - runDoneCh chan struct{} - - backoff func(int) time.Duration - streamCh chan grpc.ClientStream - sendCh *buffer.Unbounded - - mu sync.Mutex - // Message specific watch infos, protected by the above mutex. These are - // written to, after successfully reading from the update channel, and are - // read from when recovering from a broken stream to resend the xDS - // messages. When the user of this client object cancels a watch call, - // these are set to nil. All accesses to the map protected and any value - // inside the map should be protected with the above mutex. - watchMap map[xdsresource.ResourceType]map[string]bool - // versionMap contains the version that was acked (the version in the ack - // request that was sent on wire). The key is rType, the value is the - // version string, because the versions for different resource types should - // be independent. - versionMap map[xdsresource.ResourceType]string - // nonceMap contains the nonce from the most recent received response. - nonceMap map[xdsresource.ResourceType]string - closed bool - - // Changes to map lrsClients and the lrsClient inside the map need to be - // protected by lrsMu. - // - // TODO: after LRS refactoring, each controller should only manage the LRS - // stream to its server. LRS streams to other servers should be managed by - // other controllers. - lrsMu sync.Mutex - lrsClients map[string]*lrsClient -} - -var grpcDial = grpc.Dial - -// SetGRPCDial sets the dialer for the controller. The dial can be used to -// manipulate the dial options or change the target if needed. -// The SetGRPCDial must be called before gRPC initialization to make sure it -// affects all the controllers created. -// To reset any dialer set, pass in grpc.Dial as the parameter. -func SetGRPCDial(dialer func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error)) { - grpcDial = dialer -} - -// New creates a new controller. -func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, boff func(int) time.Duration) (_ *Controller, retErr error) { - switch { - case config == nil: - return nil, errors.New("xds: no xds_server provided") - case config.ServerURI == "": - return nil, errors.New("xds: no xds_server name provided in options") - case config.Creds == nil: - return nil, errors.New("xds: no credentials provided in options") - case config.NodeProto == nil: - return nil, errors.New("xds: no node_proto provided in options") - } - - dopts := []grpc.DialOption{ - config.Creds, - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 5 * time.Minute, - Timeout: 20 * time.Second, - }), - } - - if boff == nil { - boff = backoff.DefaultExponential.Backoff - } - ret := &Controller{ - config: config, - updateValidator: validator, - updateHandler: updateHandler, - runDoneCh: make(chan struct{}), - - backoff: boff, - streamCh: make(chan grpc.ClientStream, 1), - sendCh: buffer.NewUnbounded(), - watchMap: make(map[xdsresource.ResourceType]map[string]bool), - versionMap: make(map[xdsresource.ResourceType]string), - nonceMap: make(map[xdsresource.ResourceType]string), - - lrsClients: make(map[string]*lrsClient), - } - - defer func() { - if retErr != nil { - ret.Close() - } - }() - - cc, err := grpcDial(config.ServerURI, dopts...) - if err != nil { - // An error from a non-blocking dial indicates something serious. - return nil, fmt.Errorf("xds: failed to dial control plane {%s}: %v", config.ServerURI, err) - } - ret.cc = cc - - builder := version.GetAPIClientBuilder(config.TransportAPI) - if builder == nil { - return nil, fmt.Errorf("no client builder for xDS API version: %v", config.TransportAPI) - } - apiClient, err := builder(version.BuildOptions{NodeProto: config.NodeProto, Logger: logger}) - if err != nil { - return nil, err - } - ret.vClient = apiClient - - ctx, cancel := context.WithCancel(context.Background()) - ret.stopRunGoroutine = cancel - go ret.run(ctx) - - return ret, nil -} - -// Close closes the controller. -func (t *Controller) Close() { - t.mu.Lock() - if t.closed { - t.mu.Unlock() - return - } - t.closed = true - t.mu.Unlock() - - // Note that Close needs to check for nils even if some of them are always - // set in the constructor. This is because the constructor defers Close() in - // error cases, and the fields might not be set when the error happens. - if t.stopRunGoroutine != nil { - t.stopRunGoroutine() - } - if t.cc != nil { - t.cc.Close() - } - // Wait on the run goroutine to be done only if it was started. - if t.stopRunGoroutine != nil { - <-t.runDoneCh - } -} diff --git a/xds/internal/xdsclient/controller/controller_test.go b/xds/internal/xdsclient/controller/controller_test.go deleted file mode 100644 index 599afb3a3e94..000000000000 --- a/xds/internal/xdsclient/controller/controller_test.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package controller - -import ( - "testing" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" -) - -const testXDSServer = "xds-server" - -// noopUpdateHandler ignores all updates. It's to be used in tests where the -// updates don't matter. To avoid potential nil panic. -var noopUpdateHandler = &testUpdateReceiver{ - f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) {}, -} - -// TestNew covers that New() returns an error if the input *ServerConfig -// contains invalid content. -func (s) TestNew(t *testing.T) { - tests := []struct { - name string - config *bootstrap.ServerConfig - wantErr bool - }{ - { - name: "empty-opts", - config: &bootstrap.ServerConfig{}, - wantErr: true, - }, - { - name: "empty-balancer-name", - config: &bootstrap.ServerConfig{ - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: testutils.EmptyNodeProtoV2, - }, - wantErr: true, - }, - { - name: "empty-dial-creds", - config: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - NodeProto: testutils.EmptyNodeProtoV2, - }, - wantErr: true, - }, - { - name: "empty-node-proto", - config: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, - wantErr: true, - }, - { - name: "node-proto-version-mismatch", - config: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV2, - NodeProto: testutils.EmptyNodeProtoV3, - }, - wantErr: true, - }, - { - name: "happy-case", - config: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: testutils.EmptyNodeProtoV2, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - c, err := New(test.config, noopUpdateHandler, nil, nil, nil) // Only testing the config, other inputs are left as nil. - defer func() { - if c != nil { - c.Close() - } - }() - if (err != nil) != test.wantErr { - t.Fatalf("New(%+v) = %v, wantErr: %v", test.config, err, test.wantErr) - } - }) - } -} - -func (s) TestNewWithGRPCDial(t *testing.T) { - config := &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: testutils.EmptyNodeProtoV2, - } - - customDialerCalled := false - customDialer := func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - customDialerCalled = true - return grpc.Dial(target, opts...) - } - - // Set the dialer and make sure it is called. - SetGRPCDial(customDialer) - c, err := New(config, noopUpdateHandler, nil, nil, nil) - if err != nil { - t.Fatalf("New(%+v) = %v, want no error", config, err) - } - if c != nil { - c.Close() - } - - if !customDialerCalled { - t.Errorf("New(%+v) custom dialer called = false, want true", config) - } - customDialerCalled = false - - // Reset the dialer and make sure it is not called. - SetGRPCDial(grpc.Dial) - c, err = New(config, noopUpdateHandler, nil, nil, nil) - defer func() { - if c != nil { - c.Close() - } - }() - if err != nil { - t.Fatalf("New(%+v) = %v, want no error", config, err) - } - - if customDialerCalled { - t.Errorf("New(%+v) interceptor called = true, want false", config) - } -} diff --git a/xds/internal/xdsclient/controller/loadreport.go b/xds/internal/xdsclient/controller/loadreport.go deleted file mode 100644 index a28cc95dc6f6..000000000000 --- a/xds/internal/xdsclient/controller/loadreport.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package controller - -import ( - "context" - - "google.golang.org/grpc" - "google.golang.org/grpc/xds/internal/xdsclient/controller/version" - "google.golang.org/grpc/xds/internal/xdsclient/load" -) - -// ReportLoad starts an load reporting stream to the given server. If the server -// is not an empty string, and is different from the management server, a new -// ClientConn will be created. -// -// The same options used for creating the Client will be used (including -// NodeProto, and dial options if necessary). -// -// It returns a Store for the user to report loads, a function to cancel the -// load reporting stream. -// -// TODO(xdsfed): LRS refactor, delete the parameter of this function, and -// cleanup the multiple LRS ClientConn code. Each controller should have one -// ClientConn to the authority it's created for, all LRS streams (and ADS -// streams) in this controller should all share that ClientConn. -func (c *Controller) ReportLoad(server string) (*load.Store, func()) { - c.lrsMu.Lock() - defer c.lrsMu.Unlock() - - // If there's already a client to this server, use it. Otherwise, create - // one. - lrsC, ok := c.lrsClients[server] - if !ok { - lrsC = newLRSClient(c, server) - c.lrsClients[server] = lrsC - } - - store := lrsC.ref() - return store, func() { - // This is a callback, need to hold lrsMu. - c.lrsMu.Lock() - defer c.lrsMu.Unlock() - if lrsC.unRef() { - // Delete the lrsClient from map if this is the last reference. - delete(c.lrsClients, server) - } - } -} - -// lrsClient maps to one lrsServer. It contains: -// - a ClientConn to this server (only if it's different from the management -// server) -// - a load.Store that contains loads only for this server -type lrsClient struct { - parent *Controller - server string - - cc *grpc.ClientConn // nil if the server is same as the management server - refCount int - cancelStream func() - loadStore *load.Store -} - -// newLRSClient creates a new LRS stream to the server. -func newLRSClient(parent *Controller, server string) *lrsClient { - return &lrsClient{ - parent: parent, - server: server, - refCount: 0, - } -} - -// ref increments the refCount. If this is the first ref, it starts the LRS stream. -// -// Not thread-safe, caller needs to synchronize. -func (lrsC *lrsClient) ref() *load.Store { - lrsC.refCount++ - if lrsC.refCount == 1 { - lrsC.startStream() - } - return lrsC.loadStore -} - -// unRef decrements the refCount, and closes the stream if refCount reaches 0 -// (and close the cc if cc is not xDS cc). It returns whether refCount reached 0 -// after this call. -// -// Not thread-safe, caller needs to synchronize. -func (lrsC *lrsClient) unRef() (closed bool) { - lrsC.refCount-- - if lrsC.refCount != 0 { - return false - } - lrsC.parent.logger.Infof("Stopping load report to server: %s", lrsC.server) - lrsC.cancelStream() - if lrsC.cc != nil { - lrsC.cc.Close() - } - return true -} - -// startStream starts the LRS stream to the server. If server is not the same -// management server from the parent, it also creates a ClientConn. -func (lrsC *lrsClient) startStream() { - var cc *grpc.ClientConn - - lrsC.parent.logger.Infof("Starting load report to server: %s", lrsC.server) - if lrsC.server == "" || lrsC.server == lrsC.parent.config.ServerURI { - // Reuse the xDS client if server is the same. - cc = lrsC.parent.cc - } else { - lrsC.parent.logger.Infof("LRS server is different from management server, starting a new ClientConn") - ccNew, err := grpc.Dial(lrsC.server, lrsC.parent.config.Creds) - if err != nil { - // An error from a non-blocking dial indicates something serious. - lrsC.parent.logger.Infof("xds: failed to dial load report server {%s}: %v", lrsC.server, err) - return - } - cc = ccNew - lrsC.cc = ccNew - } - - var ctx context.Context - ctx, lrsC.cancelStream = context.WithCancel(context.Background()) - - // Create the store and stream. - lrsC.loadStore = load.NewStore() - go lrsC.parent.reportLoad(ctx, cc, version.LoadReportingOptions{LoadStore: lrsC.loadStore}) -} diff --git a/xds/internal/xdsclient/controller/transport.go b/xds/internal/xdsclient/controller/transport.go deleted file mode 100644 index 526aefae29b0..000000000000 --- a/xds/internal/xdsclient/controller/transport.go +++ /dev/null @@ -1,443 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package controller - -import ( - "context" - "fmt" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc" - controllerversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version" - xdsresourceversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// AddWatch adds a watch for an xDS resource given its type and name. -func (t *Controller) AddWatch(rType xdsresource.ResourceType, resourceName string) { - t.sendCh.Put(&watchAction{ - rType: rType, - remove: false, - resource: resourceName, - }) -} - -// RemoveWatch cancels an already registered watch for an xDS resource -// given its type and name. -func (t *Controller) RemoveWatch(rType xdsresource.ResourceType, resourceName string) { - t.sendCh.Put(&watchAction{ - rType: rType, - remove: true, - resource: resourceName, - }) -} - -// run starts an ADS stream (and backs off exponentially, if the previous -// stream failed without receiving a single reply) and runs the sender and -// receiver routines to send and receive data from the stream respectively. -func (t *Controller) run(ctx context.Context) { - sendDoneCh := make(chan struct{}) - defer func() { - <-sendDoneCh - close(t.runDoneCh) - }() - go t.send(ctx, sendDoneCh) - - // TODO: start a goroutine monitoring ClientConn's connectivity state, and - // report error (and log) when stats is transient failure. - - retries := 0 - lastStreamStartTime := time.Time{} - for ctx.Err() == nil { - dur := time.Until(lastStreamStartTime.Add(t.backoff(retries))) - if dur > 0 { - timer := time.NewTimer(dur) - select { - case <-timer.C: - case <-ctx.Done(): - timer.Stop() - return - } - } - - retries++ - lastStreamStartTime = time.Now() - stream, err := t.vClient.NewStream(ctx, t.cc) - if err != nil { - t.updateHandler.NewConnectionError(err) - t.logger.Warningf("xds: ADS stream creation failed: %v", err) - continue - } - t.logger.Infof("ADS stream created") - - select { - case <-t.streamCh: - default: - } - t.streamCh <- stream - if t.recv(stream) { - retries = 0 - } - } -} - -// send is a separate goroutine for sending watch requests on the xds stream. -// -// It watches the stream channel for new streams, and the request channel for -// new requests to send on the stream. -// -// For each new request (watchAction), it's -// - processed and added to the watch map -// so, resend will pick them up when there are new streams -// - sent on the current stream if there's one -// the current stream is cleared when any send on it fails -// -// For each new stream, all the existing requests will be resent. -// -// Note that this goroutine doesn't do anything to the old stream when there's a -// new one. In fact, there should be only one stream in progress, and new one -// should only be created when the old one fails (recv returns an error). -func (t *Controller) send(ctx context.Context, doneCh chan struct{}) { - defer func() { close(doneCh) }() - - var stream grpc.ClientStream - for { - select { - case <-ctx.Done(): - return - case stream = <-t.streamCh: - if !t.sendExisting(stream) { - // send failed, clear the current stream. - stream = nil - } - case u := <-t.sendCh.Get(): - t.sendCh.Load() - - var ( - target []string - rType xdsresource.ResourceType - version, nonce, errMsg string - send bool - ) - switch update := u.(type) { - case *watchAction: - target, rType, version, nonce = t.processWatchInfo(update) - case *ackAction: - target, rType, version, nonce, send = t.processAckInfo(update, stream) - if !send { - continue - } - errMsg = update.errMsg - } - if stream == nil { - // There's no stream yet. Skip the request. This request - // will be resent to the new streams. If no stream is - // created, the watcher will timeout (same as server not - // sending response back). - continue - } - if err := t.vClient.SendRequest(stream, target, rType, version, nonce, errMsg); err != nil { - t.logger.Warningf("ADS request for {target: %q, type: %v, version: %q, nonce: %q} failed: %v", target, rType, version, nonce, err) - // send failed, clear the current stream. - stream = nil - } - } - } -} - -// sendExisting sends out xDS requests for registered watchers when recovering -// from a broken stream. -// -// We call stream.Send() here with the lock being held. It should be OK to do -// that here because the stream has just started and Send() usually returns -// quickly (once it pushes the message onto the transport layer) and is only -// ever blocked if we don't have enough flow control quota. -func (t *Controller) sendExisting(stream grpc.ClientStream) bool { - t.mu.Lock() - defer t.mu.Unlock() - - // Reset only the nonce when the stream restarts. - // - // xDS spec says the following. See section: - // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version - // - // Note that the version for a resource type is not a property of an - // individual xDS stream but rather a property of the resources themselves. If - // the stream becomes broken and the client creates a new stream, the client’s - // initial request on the new stream should indicate the most recent version - // seen by the client on the previous stream - t.nonceMap = make(map[xdsresource.ResourceType]string) - - for rType, s := range t.watchMap { - if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, t.versionMap[rType], "", ""); err != nil { - t.logger.Warningf("ADS request failed: %v", err) - return false - } - } - - return true -} - -// recv receives xDS responses on the provided ADS stream and branches out to -// message specific handlers. -func (t *Controller) recv(stream grpc.ClientStream) bool { - msgReceived := false - for { - resp, err := t.vClient.RecvResponse(stream) - if err != nil { - t.updateHandler.NewConnectionError(err) - t.logger.Warningf("ADS stream is closed with error: %v", err) - return msgReceived - } - msgReceived = true - - rType, version, nonce, err := t.handleResponse(resp) - if e, ok := err.(xdsresourceversion.ErrResourceTypeUnsupported); ok { - t.logger.Warningf("%s", e.ErrStr) - continue - } - if err != nil { - t.sendCh.Put(&ackAction{ - rType: rType, - version: "", - nonce: nonce, - errMsg: err.Error(), - stream: stream, - }) - t.logger.Warningf("Sending NACK for response type: %v, version: %v, nonce: %v, reason: %v", rType, version, nonce, err) - continue - } - t.sendCh.Put(&ackAction{ - rType: rType, - version: version, - nonce: nonce, - stream: stream, - }) - t.logger.Infof("Sending ACK for response type: %v, version: %v, nonce: %v", rType, version, nonce) - } -} - -func (t *Controller) handleResponse(resp proto.Message) (xdsresource.ResourceType, string, string, error) { - rType, resource, version, nonce, err := t.vClient.ParseResponse(resp) - if err != nil { - return rType, version, nonce, err - } - opts := &xdsresource.UnmarshalOptions{ - Version: version, - Resources: resource, - Logger: t.logger, - UpdateValidator: t.updateValidator, - } - var md xdsresource.UpdateMetadata - switch rType { - case xdsresource.ListenerResource: - var update map[string]xdsresource.ListenerUpdateErrTuple - update, md, err = xdsresource.UnmarshalListener(opts) - t.updateHandler.NewListeners(update, md) - case xdsresource.RouteConfigResource: - var update map[string]xdsresource.RouteConfigUpdateErrTuple - update, md, err = xdsresource.UnmarshalRouteConfig(opts) - t.updateHandler.NewRouteConfigs(update, md) - case xdsresource.ClusterResource: - var update map[string]xdsresource.ClusterUpdateErrTuple - update, md, err = xdsresource.UnmarshalCluster(opts) - t.updateHandler.NewClusters(update, md) - case xdsresource.EndpointsResource: - var update map[string]xdsresource.EndpointsUpdateErrTuple - update, md, err = xdsresource.UnmarshalEndpoints(opts) - t.updateHandler.NewEndpoints(update, md) - default: - return rType, "", "", xdsresourceversion.ErrResourceTypeUnsupported{ - ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", rType), - } - } - return rType, version, nonce, err -} - -func mapToSlice(m map[string]bool) []string { - ret := make([]string, 0, len(m)) - for i := range m { - ret = append(ret, i) - } - return ret -} - -type watchAction struct { - rType xdsresource.ResourceType - remove bool // Whether this is to remove watch for the resource. - resource string -} - -// processWatchInfo pulls the fields needed by the request from a watchAction. -// -// It also updates the watch map. -func (t *Controller) processWatchInfo(w *watchAction) (target []string, rType xdsresource.ResourceType, ver, nonce string) { - t.mu.Lock() - defer t.mu.Unlock() - - var current map[string]bool - current, ok := t.watchMap[w.rType] - if !ok { - current = make(map[string]bool) - t.watchMap[w.rType] = current - } - - if w.remove { - delete(current, w.resource) - if len(current) == 0 { - delete(t.watchMap, w.rType) - } - } else { - current[w.resource] = true - } - - rType = w.rType - target = mapToSlice(current) - // We don't reset version or nonce when a new watch is started. The version - // and nonce from previous response are carried by the request. Only the nonce - // is reset when the stream is recreated. - ver = t.versionMap[rType] - nonce = t.nonceMap[rType] - return target, rType, ver, nonce -} - -type ackAction struct { - rType xdsresource.ResourceType - version string // NACK if version is an empty string. - nonce string - errMsg string // Empty unless it's a NACK. - // ACK/NACK are tagged with the stream it's for. When the stream is down, - // all the ACK/NACK for this stream will be dropped, and the version/nonce - // won't be updated. - stream grpc.ClientStream -} - -// processAckInfo pulls the fields needed by the ack request from a ackAction. -// -// If no active watch is found for this ack, it returns false for send. -func (t *Controller) processAckInfo(ack *ackAction, stream grpc.ClientStream) (target []string, rType xdsresource.ResourceType, version, nonce string, send bool) { - if ack.stream != stream { - // If ACK's stream isn't the current sending stream, this means the ACK - // was pushed to queue before the old stream broke, and a new stream has - // been started since. Return immediately here so we don't update the - // nonce for the new stream. - return nil, xdsresource.UnknownResource, "", "", false - } - rType = ack.rType - - t.mu.Lock() - defer t.mu.Unlock() - - // Update the nonce no matter if we are going to send the ACK request on - // wire. We may not send the request if the watch is canceled. But the nonce - // needs to be updated so the next request will have the right nonce. - nonce = ack.nonce - t.nonceMap[rType] = nonce - - s, ok := t.watchMap[rType] - if !ok || len(s) == 0 { - // We don't send the request ack if there's no active watch (this can be - // either the server sends responses before any request, or the watch is - // canceled while the ackAction is in queue), because there's no resource - // name. And if we send a request with empty resource name list, the - // server may treat it as a wild card and send us everything. - return nil, xdsresource.UnknownResource, "", "", false - } - send = true - target = mapToSlice(s) - - version = ack.version - if version == "" { - // This is a nack, get the previous acked version. - version = t.versionMap[rType] - // version will still be an empty string if rType isn't - // found in versionMap, this can happen if there wasn't any ack - // before. - } else { - t.versionMap[rType] = version - } - return target, rType, version, nonce, send -} - -// reportLoad starts an LRS stream to report load data to the management server. -// It blocks until the context is cancelled. -func (t *Controller) reportLoad(ctx context.Context, cc *grpc.ClientConn, opts controllerversion.LoadReportingOptions) { - retries := 0 - lastStreamStartTime := time.Time{} - for ctx.Err() == nil { - dur := time.Until(lastStreamStartTime.Add(t.backoff(retries))) - if dur > 0 { - timer := time.NewTimer(dur) - select { - case <-timer.C: - case <-ctx.Done(): - timer.Stop() - return - } - } - - retries++ - lastStreamStartTime = time.Now() - func() { - // streamCtx is created and canceled in case we terminate the stream - // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring - // goroutine. - streamCtx, cancel := context.WithCancel(ctx) - defer cancel() - stream, err := t.vClient.NewLoadStatsStream(streamCtx, cc) - if err != nil { - t.logger.Warningf("lrs: failed to create stream: %v", err) - return - } - t.logger.Infof("lrs: created LRS stream") - - if err := t.vClient.SendFirstLoadStatsRequest(stream); err != nil { - t.logger.Warningf("lrs: failed to send first request: %v", err) - return - } - - clusters, interval, err := t.vClient.HandleLoadStatsResponse(stream) - if err != nil { - t.logger.Warningf("lrs: error from stream: %v", err) - return - } - - retries = 0 - t.sendLoads(streamCtx, stream, opts.LoadStore, clusters, interval) - }() - } -} - -func (t *Controller) sendLoads(ctx context.Context, stream grpc.ClientStream, store *load.Store, clusterNames []string, interval time.Duration) { - tick := time.NewTicker(interval) - defer tick.Stop() - for { - select { - case <-tick.C: - case <-ctx.Done(): - return - } - if err := t.vClient.SendLoadStatsRequest(stream, store.Stats(clusterNames)); err != nil { - t.logger.Warningf("lrs: error from stream: %v", err) - return - } - } -} diff --git a/xds/internal/xdsclient/controller/v2_ack_test.go b/xds/internal/xdsclient/controller/v2_ack_test.go deleted file mode 100644 index 6680de7911b2..000000000000 --- a/xds/internal/xdsclient/controller/v2_ack_test.go +++ /dev/null @@ -1,483 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package controller - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - "github.com/golang/protobuf/proto" - anypb "github.com/golang/protobuf/ptypes/any" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" -) - -const ( - defaultTestTimeout = 5 * time.Second - defaultTestShortTimeout = 10 * time.Millisecond -) - -func startXDSV2Client(t *testing.T, controlPlaneAddr string) (v2c *Controller, cbLDS, cbRDS, cbCDS, cbEDS *testutils.Channel, cleanup func()) { - cbLDS = testutils.NewChannel() - cbRDS = testutils.NewChannel() - cbCDS = testutils.NewChannel() - cbEDS = testutils.NewChannel() - v2c, err := newTestController(&testUpdateReceiver{ - f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { - t.Logf("Received %v callback with {%+v}", rType, d) - switch rType { - case xdsresource.ListenerResource: - if _, ok := d[goodLDSTarget1]; ok { - cbLDS.Send(struct{}{}) - } - case xdsresource.RouteConfigResource: - if _, ok := d[goodRouteName1]; ok { - cbRDS.Send(struct{}{}) - } - case xdsresource.ClusterResource: - if _, ok := d[goodClusterName1]; ok { - cbCDS.Send(struct{}{}) - } - case xdsresource.EndpointsResource: - if _, ok := d[goodEDSName]; ok { - cbEDS.Send(struct{}{}) - } - } - }, - }, controlPlaneAddr, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - t.Log("Started xds client...") - return v2c, cbLDS, cbRDS, cbCDS, cbEDS, v2c.Close -} - -// compareXDSRequest reads requests from channel, compare it with want. -func compareXDSRequest(ctx context.Context, ch *testutils.Channel, want *xdspb.DiscoveryRequest, ver, nonce string, wantErr bool) error { - val, err := ch.Receive(ctx) - if err != nil { - return err - } - req := val.(*fakeserver.Request) - if req.Err != nil { - return fmt.Errorf("unexpected error from request: %v", req.Err) - } - - xdsReq := req.Req.(*xdspb.DiscoveryRequest) - if (xdsReq.ErrorDetail != nil) != wantErr { - return fmt.Errorf("received request with error details: %v, wantErr: %v", xdsReq.ErrorDetail, wantErr) - } - // All NACK request.ErrorDetails have hardcoded status code InvalidArguments. - if xdsReq.ErrorDetail != nil && xdsReq.ErrorDetail.Code != int32(codes.InvalidArgument) { - return fmt.Errorf("received request with error details: %v, want status with code: %v", xdsReq.ErrorDetail, codes.InvalidArgument) - } - - xdsReq.ErrorDetail = nil // Clear the error details field before comparing. - wantClone := proto.Clone(want).(*xdspb.DiscoveryRequest) - wantClone.VersionInfo = ver - wantClone.ResponseNonce = nonce - if !cmp.Equal(xdsReq, wantClone, cmp.Comparer(proto.Equal)) { - return fmt.Errorf("received request different from want, diff: %s", cmp.Diff(req.Req, wantClone, cmp.Comparer(proto.Equal))) - } - return nil -} - -func sendXDSRespWithVersion(ch chan<- *fakeserver.Response, respWithoutVersion *xdspb.DiscoveryResponse, ver int) (nonce string) { - respToSend := proto.Clone(respWithoutVersion).(*xdspb.DiscoveryResponse) - respToSend.VersionInfo = strconv.Itoa(ver) - nonce = strconv.Itoa(int(time.Now().UnixNano())) - respToSend.Nonce = nonce - ch <- &fakeserver.Response{Resp: respToSend} - return -} - -// startXDS calls watch to send the first request. It then sends a good response -// and checks for ack. -func startXDS(ctx context.Context, t *testing.T, rType xdsresource.ResourceType, v2c *Controller, reqChan *testutils.Channel, req *xdspb.DiscoveryRequest, preVersion string, preNonce string) { - nameToWatch := "" - switch rType { - case xdsresource.ListenerResource: - nameToWatch = goodLDSTarget1 - case xdsresource.RouteConfigResource: - nameToWatch = goodRouteName1 - case xdsresource.ClusterResource: - nameToWatch = goodClusterName1 - case xdsresource.EndpointsResource: - nameToWatch = goodEDSName - } - v2c.AddWatch(rType, nameToWatch) - - if err := compareXDSRequest(ctx, reqChan, req, preVersion, preNonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", rType, err) - } - t.Logf("FakeServer received %v request...", rType) -} - -// sendGoodResp sends the good response, with the given version, and a random -// nonce. -// -// It also waits and checks that the ack request contains the given version, and -// the generated nonce. -func sendGoodResp(ctx context.Context, t *testing.T, rType xdsresource.ResourceType, fakeServer *fakeserver.Server, ver int, goodResp *xdspb.DiscoveryResponse, wantReq *xdspb.DiscoveryRequest, callbackCh *testutils.Channel) (string, error) { - nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodResp, ver) - t.Logf("Good %v response pushed to fakeServer...", rType) - - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, wantReq, strconv.Itoa(ver), nonce, false); err != nil { - return "", fmt.Errorf("failed to receive %v request: %v", rType, err) - } - t.Logf("Good %v response acked", rType) - - if _, err := callbackCh.Receive(ctx); err != nil { - return "", fmt.Errorf("timeout when expecting %v update", rType) - } - t.Logf("Good %v response callback executed", rType) - return nonce, nil -} - -// sendBadResp sends a bad response with the given version. This response will -// be nacked, so we expect a request with the previous version (version-1). -// -// But the nonce in request should be the new nonce. -func sendBadResp(ctx context.Context, t *testing.T, rType xdsresource.ResourceType, fakeServer *fakeserver.Server, ver int, wantReq *xdspb.DiscoveryRequest) error { - var typeURL string - switch rType { - case xdsresource.ListenerResource: - typeURL = version.V2ListenerURL - case xdsresource.RouteConfigResource: - typeURL = version.V2RouteConfigURL - case xdsresource.ClusterResource: - typeURL = version.V2ClusterURL - case xdsresource.EndpointsResource: - typeURL = version.V2EndpointsURL - } - nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{{}}, - TypeUrl: typeURL, - }, ver) - t.Logf("Bad %v response pushed to fakeServer...", rType) - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, wantReq, strconv.Itoa(ver-1), nonce, true); err != nil { - return fmt.Errorf("failed to receive %v request: %v", rType, err) - } - t.Logf("Bad %v response nacked", rType) - return nil -} - -// TestV2ClientAck verifies that valid responses are acked, and invalid ones -// are nacked. -// -// This test also verifies the version for different types are independent. -func (s) TestV2ClientAck(t *testing.T) { - var ( - versionLDS = 1000 - versionRDS = 2000 - versionCDS = 3000 - versionEDS = 4000 - ) - - fakeServer, cleanup := startServer(t) - defer cleanup() - - v2c, cbLDS, cbRDS, cbCDS, cbEDS, v2cCleanup := startXDSV2Client(t, fakeServer.Address) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start the watch, send a good response, and check for ack. - startXDS(ctx, t, xdsresource.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { - t.Fatal(err) - } - versionLDS++ - startXDS(ctx, t, xdsresource.RouteConfigResource, v2c, fakeServer.XDSRequestChan, goodRDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsresource.RouteConfigResource, fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS); err != nil { - t.Fatal(err) - } - versionRDS++ - startXDS(ctx, t, xdsresource.ClusterResource, v2c, fakeServer.XDSRequestChan, goodCDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { - t.Fatal(err) - } - versionCDS++ - startXDS(ctx, t, xdsresource.EndpointsResource, v2c, fakeServer.XDSRequestChan, goodEDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsresource.EndpointsResource, fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS); err != nil { - t.Fatal(err) - } - versionEDS++ - - // Send a bad response, and check for nack. - if err := sendBadResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSRequest); err != nil { - t.Fatal(err) - } - versionLDS++ - if err := sendBadResp(ctx, t, xdsresource.RouteConfigResource, fakeServer, versionRDS, goodRDSRequest); err != nil { - t.Fatal(err) - } - versionRDS++ - if err := sendBadResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { - t.Fatal(err) - } - versionCDS++ - if err := sendBadResp(ctx, t, xdsresource.EndpointsResource, fakeServer, versionEDS, goodEDSRequest); err != nil { - t.Fatal(err) - } - versionEDS++ - - // send another good response, and check for ack, with the new version. - if _, err := sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { - t.Fatal(err) - } - versionLDS++ - if _, err := sendGoodResp(ctx, t, xdsresource.RouteConfigResource, fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS); err != nil { - t.Fatal(err) - } - versionRDS++ - if _, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { - t.Fatal(err) - } - versionCDS++ - if _, err := sendGoodResp(ctx, t, xdsresource.EndpointsResource, fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS); err != nil { - t.Fatal(err) - } - versionEDS++ -} - -// Test when the first response is invalid, and is nacked, the nack requests -// should have an empty version string. -func (s) TestV2ClientAckFirstIsNack(t *testing.T) { - var versionLDS = 1000 - - fakeServer, cleanup := startServer(t) - defer cleanup() - - v2c, cbLDS, _, _, _, v2cCleanup := startXDSV2Client(t, fakeServer.Address) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start the watch, send a good response, and check for ack. - startXDS(ctx, t, xdsresource.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") - - nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{{}}, - TypeUrl: version.V2ListenerURL, - }, versionLDS) - t.Logf("Bad response pushed to fakeServer...") - - // The expected version string is an empty string, because this is the first - // response, and it's nacked (so there's no previous ack version). - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodLDSRequest, "", nonce, true); err != nil { - t.Errorf("Failed to receive request: %v", err) - } - t.Logf("Bad response nacked") - versionLDS++ - - sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) - versionLDS++ -} - -// Test when a nack is sent after a new watch, we nack with the previous acked -// version (instead of resetting to empty string). -func (s) TestV2ClientAckNackAfterNewWatch(t *testing.T) { - var versionLDS = 1000 - - fakeServer, cleanup := startServer(t) - defer cleanup() - - v2c, cbLDS, _, _, _, v2cCleanup := startXDSV2Client(t, fakeServer.Address) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start the watch, send a good response, and check for ack. - startXDS(ctx, t, xdsresource.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") - nonce, err := sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) - if err != nil { - t.Fatal(err) - } - // Start a new watch. The version in the new request should be the version - // from the previous response, thus versionLDS before ++. - startXDS(ctx, t, xdsresource.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, strconv.Itoa(versionLDS), nonce) - versionLDS++ - - // This is an invalid response after the new watch. - nonce = sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{{}}, - TypeUrl: version.V2ListenerURL, - }, versionLDS) - t.Logf("Bad response pushed to fakeServer...") - - // The expected version string is the previous acked version. - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodLDSRequest, strconv.Itoa(versionLDS-1), nonce, true); err != nil { - t.Errorf("Failed to receive request: %v", err) - } - t.Logf("Bad response nacked") - versionLDS++ - - if _, err := sendGoodResp(ctx, t, xdsresource.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { - t.Fatal(err) - } - versionLDS++ -} - -// TestV2ClientAckNewWatchAfterCancel verifies the new request for a new watch -// after the previous watch is canceled, has the right version. -func (s) TestV2ClientAckNewWatchAfterCancel(t *testing.T) { - var versionCDS = 3000 - - fakeServer, cleanup := startServer(t) - defer cleanup() - - v2c, _, _, cbCDS, _, v2cCleanup := startXDSV2Client(t, fakeServer.Address) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start a CDS watch. - v2c.AddWatch(xdsresource.ClusterResource, goodClusterName1) - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, "", "", false); err != nil { - t.Fatal(err) - } - t.Logf("FakeServer received %v request...", xdsresource.ClusterResource) - - // Send a good CDS response, this function waits for the ACK with the right - // version. - nonce, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) - if err != nil { - t.Fatal(err) - } - // Cancel the CDS watch, and start a new one. The new watch should have the - // version from the response above. - v2c.RemoveWatch(xdsresource.ClusterResource, goodClusterName1) - // Wait for a request with no resource names, because the only watch was - // removed. - emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: version.V2ClusterURL} - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, emptyReq, strconv.Itoa(versionCDS), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) - } - v2c.AddWatch(xdsresource.ClusterResource, goodClusterName1) - // Wait for a request with correct resource names and version. - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) - } - versionCDS++ - - // Send a bad response with the next version. - if err := sendBadResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { - t.Fatal(err) - } - versionCDS++ - - // send another good response, and check for ack, with the new version. - if _, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { - t.Fatal(err) - } - versionCDS++ -} - -// TestV2ClientAckCancelResponseRace verifies if the response and ACK request -// race with cancel (which means the ACK request will not be sent on wire, -// because there's no active watch), the nonce will still be updated, and the -// new request with the new watch will have the correct nonce. -func (s) TestV2ClientAckCancelResponseRace(t *testing.T) { - var versionCDS = 3000 - - fakeServer, cleanup := startServer(t) - defer cleanup() - - v2c, _, _, cbCDS, _, v2cCleanup := startXDSV2Client(t, fakeServer.Address) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start a CDS watch. - v2c.AddWatch(xdsresource.ClusterResource, goodClusterName1) - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, "", "", false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) - } - t.Logf("FakeServer received %v request...", xdsresource.ClusterResource) - - // send a good response, and check for ack, with the new version. - nonce, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) - if err != nil { - t.Fatal(err) - } - // Cancel the watch before the next response is sent. This mimics the case - // watch is canceled while response is on wire. - v2c.RemoveWatch(xdsresource.ClusterResource, goodClusterName1) - // Wait for a request with no resource names, because the only watch was - // removed. - emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: version.V2ClusterURL} - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, emptyReq, strconv.Itoa(versionCDS), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) - } - versionCDS++ - - sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if req, err := fakeServer.XDSRequestChan.Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("Got unexpected xds request after watch is canceled: %v", req) - } - - // Send a good response. - nonce = sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodCDSResponse1, versionCDS) - t.Logf("Good %v response pushed to fakeServer...", xdsresource.ClusterResource) - - // Expect no ACK because watch was canceled. - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if req, err := fakeServer.XDSRequestChan.Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("Got unexpected xds request after watch is canceled: %v", req) - } - - // Still expected an callback update, because response was good. - if _, err := cbCDS.Receive(ctx); err != nil { - t.Fatalf("Timeout when expecting %v update", xdsresource.ClusterResource) - } - - // Start a new watch. The new watch should have the nonce from the response - // above, and version from the first good response. - v2c.AddWatch(xdsresource.ClusterResource, goodClusterName1) - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS-1), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsresource.ClusterResource, err) - } - - // Send a bad response with the next version. - if err := sendBadResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { - t.Fatal(err) - } - versionCDS++ - - // send another good response, and check for ack, with the new version. - if _, err := sendGoodResp(ctx, t, xdsresource.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { - t.Fatal(err) - } - versionCDS++ -} diff --git a/xds/internal/xdsclient/controller/v2_cds_test.go b/xds/internal/xdsclient/controller/v2_cds_test.go deleted file mode 100644 index d262b53a46bf..000000000000 --- a/xds/internal/xdsclient/controller/v2_cds_test.go +++ /dev/null @@ -1,186 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package controller - -import ( - "testing" - "time" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - anypb "github.com/golang/protobuf/ptypes/any" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" -) - -const ( - serviceName1 = "foo-service" - serviceName2 = "bar-service" -) - -var ( - badlyMarshaledCDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2ClusterURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - TypeUrl: version.V2ClusterURL, - } - goodCluster1 = &xdspb.Cluster{ - Name: goodClusterName1, - ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, - EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ - EdsConfig: &corepb.ConfigSource{ - ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ - Ads: &corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName1, - }, - LbPolicy: xdspb.Cluster_ROUND_ROBIN, - LrsServer: &corepb.ConfigSource{ - ConfigSourceSpecifier: &corepb.ConfigSource_Self{ - Self: &corepb.SelfConfigSource{}, - }, - }, - } - marshaledCluster1 = testutils.MarshalAny(goodCluster1) - goodCluster2 = &xdspb.Cluster{ - Name: goodClusterName2, - ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, - EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ - EdsConfig: &corepb.ConfigSource{ - ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ - Ads: &corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName2, - }, - LbPolicy: xdspb.Cluster_ROUND_ROBIN, - } - marshaledCluster2 = testutils.MarshalAny(goodCluster2) - goodCDSResponse1 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledCluster1, - }, - TypeUrl: version.V2ClusterURL, - } - goodCDSResponse2 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledCluster2, - }, - TypeUrl: version.V2ClusterURL, - } -) - -// TestCDSHandleResponse starts a fake xDS server, makes a ClientConn to it, -// and creates a v2Client using it. Then, it registers a CDS watcher and tests -// different CDS responses. -func (s) TestCDSHandleResponse(t *testing.T) { - tests := []struct { - name string - cdsResponse *xdspb.DiscoveryResponse - wantErr bool - wantUpdate map[string]xdsresource.ClusterUpdateErrTuple - wantUpdateMD xdsresource.UpdateMetadata - wantUpdateErr bool - }{ - // Badly marshaled CDS response. - { - name: "badly-marshaled-response", - cdsResponse: badlyMarshaledCDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // Response contains one good cluster we are not interested in. - { - name: "one-uninteresting-cluster", - cdsResponse: goodCDSResponse2, - wantErr: false, - wantUpdate: map[string]xdsresource.ClusterUpdateErrTuple{ - goodClusterName2: {Update: xdsresource.ClusterUpdate{ClusterName: goodClusterName2, EDSServiceName: serviceName2, Raw: marshaledCluster2}}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains one cluster and it is good. - { - name: "one-good-cluster", - cdsResponse: goodCDSResponse1, - wantErr: false, - wantUpdate: map[string]xdsresource.ClusterUpdateErrTuple{ - goodClusterName1: {Update: xdsresource.ClusterUpdate{ClusterName: goodClusterName1, EDSServiceName: serviceName1, LRSServerConfig: xdsresource.ClusterLRSServerSelf, Raw: marshaledCluster1}}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testWatchHandle(t, &watchHandleTestcase{ - rType: xdsresource.ClusterResource, - resourceName: goodClusterName1, - - responseToHandle: test.cdsResponse, - wantHandleErr: test.wantErr, - wantUpdate: test.wantUpdate, - wantUpdateMD: test.wantUpdateMD, - wantUpdateErr: test.wantUpdateErr, - }) - }) - } -} - -// TestCDSHandleResponseWithoutWatch tests the case where the v2Client receives -// a CDS response without a registered watcher. -func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) { - fakeServer, cleanup := startServer(t) - defer cleanup() - - v2c, err := newTestController(&testUpdateReceiver{ - f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, - }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - if _, _, _, err := v2c.handleResponse(badResourceTypeInLDSResponse); err == nil { - t.Fatal("v2c.handleCDSResponse() succeeded, should have failed") - } - - if _, _, _, err := v2c.handleResponse(goodCDSResponse1); err != nil { - t.Fatal("v2c.handleCDSResponse() succeeded, should have failed") - } -} diff --git a/xds/internal/xdsclient/controller/v2_client_test.go b/xds/internal/xdsclient/controller/v2_client_test.go deleted file mode 100644 index 942f18649034..000000000000 --- a/xds/internal/xdsclient/controller/v2_client_test.go +++ /dev/null @@ -1,212 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package controller - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 xDS API client. -) - -// TestV2ClientBackoffAfterRecvError verifies if the v2Client backs off when it -// encounters a Recv error while receiving an LDS response. -func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { - fakeServer, cleanup := startServer(t) - defer cleanup() - - // Override the v2Client backoff function with this, so that we can verify - // that a backoff actually was triggered. - boCh := make(chan int, 1) - clientBackoff := func(v int) time.Duration { - boCh <- v - return 0 - } - - callbackCh := make(chan struct{}) - v2c, err := newTestController(&testUpdateReceiver{ - f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) { close(callbackCh) }, - }, fakeServer.Address, goodNodeProto, clientBackoff, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - t.Log("Started xds v2Client...") - - v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request...") - - fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} - t.Log("Bad LDS response pushed to fakeServer...") - - timer := time.NewTimer(defaultTestTimeout) - select { - case <-timer.C: - t.Fatal("Timeout when expecting LDS update") - case <-boCh: - timer.Stop() - t.Log("v2Client backed off before retrying...") - case <-callbackCh: - t.Fatal("Received unexpected LDS callback") - } - - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request after backoff...") -} - -// TestV2ClientRetriesAfterBrokenStream verifies the case where a stream -// encountered a Recv() error, and is expected to send out xDS requests for -// registered watchers once it comes back up again. -func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) { - fakeServer, cleanup := startServer(t) - defer cleanup() - - callbackCh := testutils.NewChannel() - v2c, err := newTestController(&testUpdateReceiver{ - f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { - if rType == xdsresource.ListenerResource { - if u, ok := d[goodLDSTarget1]; ok { - t.Logf("Received LDS callback with ldsUpdate {%+v}", u) - callbackCh.Send(struct{}{}) - } - } - }, - }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - t.Log("Started xds v2Client...") - - v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request...") - - fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} - t.Log("Good LDS response pushed to fakeServer...") - - if _, err := callbackCh.Receive(ctx); err != nil { - t.Fatal("Timeout when expecting LDS update") - } - - // Read the ack, so the next request is sent after stream re-creation. - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS ACK") - } - - fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} - t.Log("Bad LDS response pushed to fakeServer...") - - val, err := fakeServer.XDSRequestChan.Receive(ctx) - if err != nil { - t.Fatalf("Timeout expired when expecting LDS update") - } - gotRequest := val.(*fakeserver.Request) - if !proto.Equal(gotRequest.Req, goodLDSRequest) { - t.Fatalf("gotRequest: %+v, wantRequest: %+v", gotRequest.Req, goodLDSRequest) - } -} - -// TestV2ClientWatchWithoutStream verifies the case where a watch is started -// when the xds stream is not created. The watcher should not receive any update -// (because there won't be any xds response, and timeout is done at a upper -// level). And when the stream is re-created, the watcher should get future -// updates. -func (s) TestV2ClientWatchWithoutStream(t *testing.T) { - fakeServer, sCleanup, err := fakeserver.StartServer() - if err != nil { - t.Fatalf("Failed to start fake xDS server: %v", err) - } - defer sCleanup() - - const scheme = "xds-client-test-whatever" - rb := manual.NewBuilderWithScheme(scheme) - rb.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: "no.such.server"}}}) - resolver.Register(rb) - defer resolver.UnregisterForTesting(scheme) - - callbackCh := testutils.NewChannel() - v2c, err := newTestController(&testUpdateReceiver{ - f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { - if rType == xdsresource.ListenerResource { - if u, ok := d[goodLDSTarget1]; ok { - t.Logf("Received LDS callback with ldsUpdate {%+v}", u) - callbackCh.Send(u) - } - } - }, - }, scheme+":///whatever", goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - t.Log("Started xds v2Client...") - - // This watch is started when the xds-ClientConn is in Transient Failure, - // and no xds stream is created. - v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) - - // The watcher should receive an update, with a timeout error in it. - sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if v, err := callbackCh.Receive(sCtx); err == nil { - t.Fatalf("Expect an timeout error from watcher, got %v", v) - } - - // Send the real server address to the ClientConn, the stream should be - // created, and the previous watch should be sent. - rb.UpdateState(resolver.State{ - Addresses: []resolver.Address{{Addr: fakeServer.Address}}, - }) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request...") - - fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} - t.Log("Good LDS response pushed to fakeServer...") - - if v, err := callbackCh.Receive(ctx); err != nil { - t.Fatal("Timeout when expecting LDS update") - } else if _, ok := v.(xdsresource.ListenerUpdateErrTuple); !ok { - t.Fatalf("Expect an LDS update from watcher, got %v", v) - } -} diff --git a/xds/internal/xdsclient/controller/v2_eds_test.go b/xds/internal/xdsclient/controller/v2_eds_test.go deleted file mode 100644 index 665fee92a1ea..000000000000 --- a/xds/internal/xdsclient/controller/v2_eds_test.go +++ /dev/null @@ -1,200 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package controller - -import ( - "testing" - "time" - - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - anypb "github.com/golang/protobuf/ptypes/any" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal" - xtestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" -) - -var ( - badlyMarshaledEDSResponse = &v2xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2EndpointsURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - TypeUrl: version.V2EndpointsURL, - } - badResourceTypeInEDSResponse = &v2xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{marshaledConnMgr1}, - TypeUrl: version.V2EndpointsURL, - } - marshaledGoodCLA1 = func() *anypb.Any { - clab0 := xtestutils.NewClusterLoadAssignmentBuilder(goodEDSName, nil) - clab0.AddLocality("locality-1", 1, 1, []string{"addr1:314"}, nil) - clab0.AddLocality("locality-2", 1, 0, []string{"addr2:159"}, nil) - return testutils.MarshalAny(clab0.Build()) - }() - goodEDSResponse1 = &v2xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledGoodCLA1, - }, - TypeUrl: version.V2EndpointsURL, - } - marshaledGoodCLA2 = func() *anypb.Any { - clab0 := xtestutils.NewClusterLoadAssignmentBuilder("not-goodEDSName", nil) - clab0.AddLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) - return testutils.MarshalAny(clab0.Build()) - }() - goodEDSResponse2 = &v2xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledGoodCLA2, - }, - TypeUrl: version.V2EndpointsURL, - } -) - -func (s) TestEDSHandleResponse(t *testing.T) { - tests := []struct { - name string - edsResponse *v2xdspb.DiscoveryResponse - wantErr bool - wantUpdate map[string]xdsresource.EndpointsUpdateErrTuple - wantUpdateMD xdsresource.UpdateMetadata - wantUpdateErr bool - }{ - // Any in resource is badly marshaled. - { - name: "badly-marshaled_response", - edsResponse: badlyMarshaledEDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // Response doesn't contain resource with the right type. - { - name: "no-config-in-response", - edsResponse: badResourceTypeInEDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // Response contains one uninteresting ClusterLoadAssignment. - { - name: "one-uninterestring-assignment", - edsResponse: goodEDSResponse2, - wantErr: false, - wantUpdate: map[string]xdsresource.EndpointsUpdateErrTuple{ - "not-goodEDSName": {Update: xdsresource.EndpointsUpdate{ - Localities: []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{{Address: "addr1:314", Weight: 1}}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 0, - Weight: 1, - }, - }, - Raw: marshaledGoodCLA2, - }}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains one good ClusterLoadAssignment. - { - name: "one-good-assignment", - edsResponse: goodEDSResponse1, - wantErr: false, - wantUpdate: map[string]xdsresource.EndpointsUpdateErrTuple{ - goodEDSName: {Update: xdsresource.EndpointsUpdate{ - Localities: []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{{Address: "addr1:314", Weight: 1}}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []xdsresource.Endpoint{{Address: "addr2:159", Weight: 1}}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, - }, - Raw: marshaledGoodCLA1, - }}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testWatchHandle(t, &watchHandleTestcase{ - rType: xdsresource.EndpointsResource, - resourceName: goodEDSName, - responseToHandle: test.edsResponse, - wantHandleErr: test.wantErr, - wantUpdate: test.wantUpdate, - wantUpdateMD: test.wantUpdateMD, - wantUpdateErr: test.wantUpdateErr, - }) - }) - } -} - -// TestEDSHandleResponseWithoutWatch tests the case where the v2Client -// receives an EDS response without a registered EDS watcher. -func (s) TestEDSHandleResponseWithoutWatch(t *testing.T) { - fakeServer, cleanup := startServer(t) - defer cleanup() - - v2c, err := newTestController(&testUpdateReceiver{ - f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, - }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - if _, _, _, err := v2c.handleResponse(badResourceTypeInEDSResponse); err == nil { - t.Fatal("v2c.handleEDSResponse() succeeded, should have failed") - } - - if _, _, _, err := v2c.handleResponse(goodEDSResponse1); err != nil { - t.Fatal("v2c.handleEDSResponse() succeeded, should have failed") - } -} diff --git a/xds/internal/xdsclient/controller/v2_lds_test.go b/xds/internal/xdsclient/controller/v2_lds_test.go deleted file mode 100644 index 56b292988b03..000000000000 --- a/xds/internal/xdsclient/controller/v2_lds_test.go +++ /dev/null @@ -1,198 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package controller - -import ( - "testing" - "time" - - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// TestLDSHandleResponse starts a fake xDS server, makes a ClientConn to it, -// and creates a client using it. Then, it registers a watchLDS and tests -// different LDS responses. -func (s) TestLDSHandleResponse(t *testing.T) { - tests := []struct { - name string - ldsResponse *v2xdspb.DiscoveryResponse - wantErr bool - wantUpdate map[string]xdsresource.ListenerUpdateErrTuple - wantUpdateMD xdsresource.UpdateMetadata - wantUpdateErr bool - }{ - // Badly marshaled LDS response. - { - name: "badly-marshaled-response", - ldsResponse: badlyMarshaledLDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // Response does not contain Listener proto. - { - name: "no-listener-proto-in-response", - ldsResponse: badResourceTypeInLDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // No APIListener in the response. Just one test case here for a bad - // ApiListener, since the others are covered in - // TestGetRouteConfigNameFromListener. - { - name: "no-apiListener-in-response", - ldsResponse: noAPIListenerLDSResponse, - wantErr: true, - wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ - goodLDSTarget1: {Err: cmpopts.AnyError}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // Response contains one listener and it is good. - { - name: "one-good-listener", - ldsResponse: goodLDSResponse1, - wantErr: false, - wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ - goodLDSTarget1: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains multiple good listeners, including the one we are - // interested in. - { - name: "multiple-good-listener", - ldsResponse: ldsResponseWithMultipleResources, - wantErr: false, - wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ - goodLDSTarget1: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, - goodLDSTarget2: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains two good listeners (one interesting and one - // uninteresting), and one badly marshaled listener. This will cause a - // nack because the uninteresting listener will still be parsed. - { - name: "good-bad-ugly-listeners", - ldsResponse: goodBadUglyLDSResponse, - wantErr: true, - wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ - goodLDSTarget1: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener1}}, - goodLDSTarget2: {Err: cmpopts.AnyError}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // Response contains one listener, but we are not interested in it. - { - name: "one-uninteresting-listener", - ldsResponse: goodLDSResponse2, - wantErr: false, - wantUpdate: map[string]xdsresource.ListenerUpdateErrTuple{ - goodLDSTarget2: {Update: xdsresource.ListenerUpdate{RouteConfigName: goodRouteName1, Raw: marshaledListener2}}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response constains no resources. This is the case where the server - // does not know about the target we are interested in. - { - name: "empty-response", - ldsResponse: emptyLDSResponse, - wantErr: false, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testWatchHandle(t, &watchHandleTestcase{ - rType: xdsresource.ListenerResource, - resourceName: goodLDSTarget1, - responseToHandle: test.ldsResponse, - wantHandleErr: test.wantErr, - wantUpdate: test.wantUpdate, - wantUpdateMD: test.wantUpdateMD, - wantUpdateErr: test.wantUpdateErr, - }) - }) - } -} - -// TestLDSHandleResponseWithoutWatch tests the case where the client receives -// an LDS response without a registered watcher. -func (s) TestLDSHandleResponseWithoutWatch(t *testing.T) { - fakeServer, cleanup := startServer(t) - defer cleanup() - - v2c, err := newTestController(&testUpdateReceiver{ - f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, - }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - if _, _, _, err := v2c.handleResponse(badResourceTypeInLDSResponse); err == nil { - t.Fatal("v2c.handleLDSResponse() succeeded, should have failed") - } - - if _, _, _, err := v2c.handleResponse(goodLDSResponse1); err != nil { - t.Fatal("v2c.handleLDSResponse() succeeded, should have failed") - } -} diff --git a/xds/internal/xdsclient/controller/v2_rds_test.go b/xds/internal/xdsclient/controller/v2_rds_test.go deleted file mode 100644 index 476df71094bf..000000000000 --- a/xds/internal/xdsclient/controller/v2_rds_test.go +++ /dev/null @@ -1,203 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package controller - -import ( - "context" - "testing" - "time" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// doLDS makes a LDS watch, and waits for the response and ack to finish. -// -// This is called by RDS tests to start LDS first, because LDS is a -// pre-requirement for RDS, and RDS handle would fail without an existing LDS -// watch. -func doLDS(ctx context.Context, t *testing.T, v2c *Controller, fakeServer *fakeserver.Server) { - v2c.AddWatch(xdsresource.ListenerResource, goodLDSTarget1) - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout waiting for LDS request: %v", err) - } -} - -// TestRDSHandleResponseWithRouting starts a fake xDS server, makes a ClientConn -// to it, and creates a v2Client using it. Then, it registers an LDS and RDS -// watcher and tests different RDS responses. -func (s) TestRDSHandleResponseWithRouting(t *testing.T) { - tests := []struct { - name string - rdsResponse *xdspb.DiscoveryResponse - wantErr bool - wantUpdate map[string]xdsresource.RouteConfigUpdateErrTuple - wantUpdateMD xdsresource.UpdateMetadata - wantUpdateErr bool - }{ - // Badly marshaled RDS response. - { - name: "badly-marshaled-response", - rdsResponse: badlyMarshaledRDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // Response does not contain RouteConfiguration proto. - { - name: "no-route-config-in-response", - rdsResponse: badResourceTypeInRDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Err: cmpopts.AnyError, - }, - }, - wantUpdateErr: false, - }, - // No virtualHosts in the response. Just one test case here for a bad - // RouteConfiguration, since the others are covered in - // TestGetClusterFromRouteConfiguration. - { - name: "no-virtual-hosts-in-response", - rdsResponse: noVirtualHostsInRDSResponse, - wantErr: false, - wantUpdate: map[string]xdsresource.RouteConfigUpdateErrTuple{ - goodRouteName1: {Update: xdsresource.RouteConfigUpdate{ - VirtualHosts: nil, - Raw: marshaledNoVirtualHostsRouteConfig, - }}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains one good RouteConfiguration, uninteresting though. - { - name: "one-uninteresting-route-config", - rdsResponse: goodRDSResponse2, - wantErr: false, - wantUpdate: map[string]xdsresource.RouteConfigUpdateErrTuple{ - goodRouteName2: {Update: xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), - WeightedClusters: map[string]xdsresource.WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: xdsresource.RouteActionRoute}}, - }, - { - Domains: []string{goodLDSTarget1}, - Routes: []*xdsresource.Route{{ - Prefix: newStringP(""), - WeightedClusters: map[string]xdsresource.WeightedCluster{goodClusterName2: {Weight: 1}}, - ActionType: xdsresource.RouteActionRoute}}, - }, - }, - Raw: marshaledGoodRouteConfig2, - }}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains one good interesting RouteConfiguration. - { - name: "one-good-route-config", - rdsResponse: goodRDSResponse1, - wantErr: false, - wantUpdate: map[string]xdsresource.RouteConfigUpdateErrTuple{ - goodRouteName1: {Update: xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*xdsresource.Route{{ - Prefix: newStringP(""), - WeightedClusters: map[string]xdsresource.WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: xdsresource.RouteActionRoute}}, - }, - { - Domains: []string{goodLDSTarget1}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), - WeightedClusters: map[string]xdsresource.WeightedCluster{goodClusterName1: {Weight: 1}}, - ActionType: xdsresource.RouteActionRoute}}, - }, - }, - Raw: marshaledGoodRouteConfig1, - }}, - }, - wantUpdateMD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testWatchHandle(t, &watchHandleTestcase{ - rType: xdsresource.RouteConfigResource, - resourceName: goodRouteName1, - responseToHandle: test.rdsResponse, - wantHandleErr: test.wantErr, - wantUpdate: test.wantUpdate, - wantUpdateMD: test.wantUpdateMD, - wantUpdateErr: test.wantUpdateErr, - }) - }) - } -} - -// TestRDSHandleResponseWithoutRDSWatch tests the case where the v2Client -// receives an RDS response without a registered RDS watcher. -func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) { - fakeServer, cleanup := startServer(t) - defer cleanup() - - v2c, err := newTestController(&testUpdateReceiver{ - f: func(xdsresource.ResourceType, map[string]interface{}, xdsresource.UpdateMetadata) {}, - }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - doLDS(ctx, t, v2c, fakeServer) - - if _, _, _, err := v2c.handleResponse(badResourceTypeInRDSResponse); err == nil { - t.Fatal("v2c.handleRDSResponse() succeeded, should have failed") - } - - if _, _, _, err := v2c.handleResponse(goodRDSResponse1); err != nil { - t.Fatal("v2c.handleRDSResponse() succeeded, should have failed") - } -} diff --git a/xds/internal/xdsclient/controller/v2_testutils_test.go b/xds/internal/xdsclient/controller/v2_testutils_test.go deleted file mode 100644 index de147d480e54..000000000000 --- a/xds/internal/xdsclient/controller/v2_testutils_test.go +++ /dev/null @@ -1,470 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package controller - -import ( - "context" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - "google.golang.org/protobuf/testing/protocmp" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - basepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" - httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" - listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2" - anypb "github.com/golang/protobuf/ptypes/any" - structpb "github.com/golang/protobuf/ptypes/struct" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -const ( - goodLDSTarget1 = "lds.target.good:1111" - goodLDSTarget2 = "lds.target.good:2222" - goodRouteName1 = "GoodRouteConfig1" - goodRouteName2 = "GoodRouteConfig2" - goodEDSName = "GoodClusterAssignment1" - uninterestingDomain = "uninteresting.domain" - goodClusterName1 = "GoodClusterName1" - goodClusterName2 = "GoodClusterName2" - uninterestingClusterName = "UninterestingClusterName" - httpConnManagerURL = "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" -) - -var ( - goodNodeProto = &basepb.Node{ - Id: "ENVOY_NODE_ID", - Metadata: &structpb.Struct{ - Fields: map[string]*structpb.Value{ - "TRAFFICDIRECTOR_GRPC_HOSTNAME": { - Kind: &structpb.Value_StringValue{StringValue: "trafficdirector"}, - }, - }, - }, - } - goodLDSRequest = &xdspb.DiscoveryRequest{ - Node: goodNodeProto, - TypeUrl: version.V2ListenerURL, - ResourceNames: []string{goodLDSTarget1}, - } - goodRDSRequest = &xdspb.DiscoveryRequest{ - Node: goodNodeProto, - TypeUrl: version.V2RouteConfigURL, - ResourceNames: []string{goodRouteName1}, - } - goodCDSRequest = &xdspb.DiscoveryRequest{ - Node: goodNodeProto, - TypeUrl: version.V2ClusterURL, - ResourceNames: []string{goodClusterName1}, - } - goodEDSRequest = &xdspb.DiscoveryRequest{ - Node: goodNodeProto, - TypeUrl: version.V2EndpointsURL, - ResourceNames: []string{goodEDSName}, - } - goodHTTPConnManager1 = &httppb.HttpConnectionManager{ - RouteSpecifier: &httppb.HttpConnectionManager_Rds{ - Rds: &httppb.Rds{ - ConfigSource: &basepb.ConfigSource{ - ConfigSourceSpecifier: &basepb.ConfigSource_Ads{Ads: &basepb.AggregatedConfigSource{}}, - }, - RouteConfigName: goodRouteName1, - }, - }, - } - marshaledConnMgr1 = testutils.MarshalAny(goodHTTPConnManager1) - goodListener1 = &xdspb.Listener{ - Name: goodLDSTarget1, - ApiListener: &listenerpb.ApiListener{ - ApiListener: marshaledConnMgr1, - }, - } - marshaledListener1 = testutils.MarshalAny(goodListener1) - goodListener2 = &xdspb.Listener{ - Name: goodLDSTarget2, - ApiListener: &listenerpb.ApiListener{ - ApiListener: marshaledConnMgr1, - }, - } - marshaledListener2 = testutils.MarshalAny(goodListener2) - noAPIListener = &xdspb.Listener{Name: goodLDSTarget1} - marshaledNoAPIListener = testutils.MarshalAny(noAPIListener) - badAPIListener2 = &xdspb.Listener{ - Name: goodLDSTarget2, - ApiListener: &listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: httpConnManagerURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - } - badlyMarshaledAPIListener2, _ = proto.Marshal(badAPIListener2) - goodLDSResponse1 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledListener1, - }, - TypeUrl: version.V2ListenerURL, - } - goodLDSResponse2 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledListener2, - }, - TypeUrl: version.V2ListenerURL, - } - emptyLDSResponse = &xdspb.DiscoveryResponse{TypeUrl: version.V2ListenerURL} - badlyMarshaledLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2ListenerURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - TypeUrl: version.V2ListenerURL, - } - badResourceTypeInLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{marshaledConnMgr1}, - TypeUrl: version.V2ListenerURL, - } - ldsResponseWithMultipleResources = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledListener2, - marshaledListener1, - }, - TypeUrl: version.V2ListenerURL, - } - noAPIListenerLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{marshaledNoAPIListener}, - TypeUrl: version.V2ListenerURL, - } - goodBadUglyLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledListener2, - marshaledListener1, - { - TypeUrl: version.V2ListenerURL, - Value: badlyMarshaledAPIListener2, - }, - }, - TypeUrl: version.V2ListenerURL, - } - badlyMarshaledRDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2RouteConfigURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - TypeUrl: version.V2RouteConfigURL, - } - badResourceTypeInRDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{marshaledConnMgr1}, - TypeUrl: version.V2RouteConfigURL, - } - noVirtualHostsRouteConfig = &xdspb.RouteConfiguration{ - Name: goodRouteName1, - } - marshaledNoVirtualHostsRouteConfig = testutils.MarshalAny(noVirtualHostsRouteConfig) - noVirtualHostsInRDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledNoVirtualHostsRouteConfig, - }, - TypeUrl: version.V2RouteConfigURL, - } - goodRouteConfig1 = &xdspb.RouteConfiguration{ - Name: goodRouteName1, - VirtualHosts: []*routepb.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*routepb.Route{ - { - Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &routepb.Route_Route{ - Route: &routepb.RouteAction{ - ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, - }, - }, - }, - }, - }, - { - Domains: []string{goodLDSTarget1}, - Routes: []*routepb.Route{ - { - Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &routepb.Route_Route{ - Route: &routepb.RouteAction{ - ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName1}, - }, - }, - }, - }, - }, - }, - } - marshaledGoodRouteConfig1 = testutils.MarshalAny(goodRouteConfig1) - goodRouteConfig2 = &xdspb.RouteConfiguration{ - Name: goodRouteName2, - VirtualHosts: []*routepb.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*routepb.Route{ - { - Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &routepb.Route_Route{ - Route: &routepb.RouteAction{ - ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, - }, - }, - }, - }, - }, - { - Domains: []string{goodLDSTarget1}, - Routes: []*routepb.Route{ - { - Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &routepb.Route_Route{ - Route: &routepb.RouteAction{ - ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName2}, - }, - }, - }, - }, - }, - }, - } - marshaledGoodRouteConfig2 = testutils.MarshalAny(goodRouteConfig2) - goodRDSResponse1 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledGoodRouteConfig1, - }, - TypeUrl: version.V2RouteConfigURL, - } - goodRDSResponse2 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledGoodRouteConfig2, - }, - TypeUrl: version.V2RouteConfigURL, - } -) - -type watchHandleTestcase struct { - rType xdsresource.ResourceType - resourceName string - - responseToHandle *xdspb.DiscoveryResponse - wantHandleErr bool - wantUpdate interface{} - wantUpdateMD xdsresource.UpdateMetadata - wantUpdateErr bool -} - -type testUpdateReceiver struct { - f func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) -} - -func (t *testUpdateReceiver) NewListeners(d map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - dd := make(map[string]interface{}) - for k, v := range d { - dd[k] = v - } - t.newUpdate(xdsresource.ListenerResource, dd, metadata) -} - -func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - dd := make(map[string]interface{}) - for k, v := range d { - dd[k] = v - } - t.newUpdate(xdsresource.RouteConfigResource, dd, metadata) -} - -func (t *testUpdateReceiver) NewClusters(d map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - dd := make(map[string]interface{}) - for k, v := range d { - dd[k] = v - } - t.newUpdate(xdsresource.ClusterResource, dd, metadata) -} - -func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - dd := make(map[string]interface{}) - for k, v := range d { - dd[k] = v - } - t.newUpdate(xdsresource.EndpointsResource, dd, metadata) -} - -func (t *testUpdateReceiver) NewConnectionError(error) {} - -func (t *testUpdateReceiver) newUpdate(rType xdsresource.ResourceType, d map[string]interface{}, metadata xdsresource.UpdateMetadata) { - t.f(rType, d, metadata) -} - -// testWatchHandle is called to test response handling for each xDS. -// -// It starts the xDS watch as configured in test, waits for the fake xds server -// to receive the request (so watch callback is installed), and calls -// handleXDSResp with responseToHandle (if it's set). It then compares the -// update received by watch callback with the expected results. -func testWatchHandle(t *testing.T, test *watchHandleTestcase) { - t.Helper() - - fakeServer, cleanup := startServer(t) - defer cleanup() - - type updateErr struct { - u interface{} - md xdsresource.UpdateMetadata - err error - } - gotUpdateCh := testutils.NewChannel() - - v2c, err := newTestController(&testUpdateReceiver{ - f: func(rType xdsresource.ResourceType, d map[string]interface{}, md xdsresource.UpdateMetadata) { - if rType == test.rType { - switch test.rType { - case xdsresource.ListenerResource: - dd := make(map[string]xdsresource.ListenerUpdateErrTuple) - for n, u := range d { - dd[n] = u.(xdsresource.ListenerUpdateErrTuple) - } - gotUpdateCh.Send(updateErr{dd, md, nil}) - case xdsresource.RouteConfigResource: - dd := make(map[string]xdsresource.RouteConfigUpdateErrTuple) - for n, u := range d { - dd[n] = u.(xdsresource.RouteConfigUpdateErrTuple) - } - gotUpdateCh.Send(updateErr{dd, md, nil}) - case xdsresource.ClusterResource: - dd := make(map[string]xdsresource.ClusterUpdateErrTuple) - for n, u := range d { - dd[n] = u.(xdsresource.ClusterUpdateErrTuple) - } - gotUpdateCh.Send(updateErr{dd, md, nil}) - case xdsresource.EndpointsResource: - dd := make(map[string]xdsresource.EndpointsUpdateErrTuple) - for n, u := range d { - dd[n] = u.(xdsresource.EndpointsUpdateErrTuple) - } - gotUpdateCh.Send(updateErr{dd, md, nil}) - } - } - }, - }, fakeServer.Address, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - // Register the watcher, this will also trigger the v2Client to send the xDS - // request. - v2c.AddWatch(test.rType, test.resourceName) - - // Wait till the request makes it to the fakeServer. This ensures that - // the watch request has been processed by the v2Client. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout waiting for an xDS request: %v", err) - } - - // Directly push the response through a call to handleXDSResp. This bypasses - // the fakeServer, so it's only testing the handle logic. Client response - // processing is covered elsewhere. - // - // Also note that this won't trigger ACK, so there's no need to clear the - // request channel afterwards. - if _, _, _, err := v2c.handleResponse(test.responseToHandle); (err != nil) != test.wantHandleErr { - t.Fatalf("v2c.handleRDSResponse() returned err: %v, wantErr: %v", err, test.wantHandleErr) - } - - wantUpdate := test.wantUpdate - cmpOpts := cmp.Options{ - cmpopts.EquateEmpty(), protocmp.Transform(), - cmpopts.IgnoreFields(xdsresource.UpdateMetadata{}, "Timestamp"), - cmpopts.IgnoreFields(xdsresource.UpdateErrorMetadata{}, "Timestamp"), - cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), - } - uErr, err := gotUpdateCh.Receive(ctx) - if err == context.DeadlineExceeded { - t.Fatal("Timeout expecting xDS update") - } - gotUpdate := uErr.(updateErr).u - if diff := cmp.Diff(gotUpdate, wantUpdate, cmpOpts); diff != "" { - t.Fatalf("got update : %+v, want %+v, diff: %s", gotUpdate, wantUpdate, diff) - } - gotUpdateMD := uErr.(updateErr).md - if diff := cmp.Diff(gotUpdateMD, test.wantUpdateMD, cmpOpts); diff != "" { - t.Fatalf("got update : %+v, want %+v, diff: %s", gotUpdateMD, test.wantUpdateMD, diff) - } - gotUpdateErr := uErr.(updateErr).err - if (gotUpdateErr != nil) != test.wantUpdateErr { - t.Fatalf("got xDS update error {%v}, wantErr: %v", gotUpdateErr, test.wantUpdateErr) - } -} - -// startServer starts a fake XDS server and also returns a ClientConn -// connected to it. -func startServer(t *testing.T) (*fakeserver.Server, func()) { - t.Helper() - fs, sCleanup, err := fakeserver.StartServer() - if err != nil { - t.Fatalf("Failed to start fake xDS server: %v", err) - } - return fs, sCleanup -} - -func newTestController(p pubsub.UpdateHandler, controlPlanAddr string, n *basepb.Node, b func(int) time.Duration, l *grpclog.PrefixLogger) (*Controller, error) { - c, err := New(&bootstrap.ServerConfig{ - ServerURI: controlPlanAddr, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV2, - NodeProto: n, - }, p, nil, l, b) - if err != nil { - return nil, err - } - return c, nil -} - -func newStringP(s string) *string { - return &s -} diff --git a/xds/internal/xdsclient/controller/version/v2/client.go b/xds/internal/xdsclient/controller/version/v2/client.go deleted file mode 100644 index 968947b0669e..000000000000 --- a/xds/internal/xdsclient/controller/version/v2/client.go +++ /dev/null @@ -1,155 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package v2 provides xDS v2 transport protocol specific functionality. -package v2 - -import ( - "context" - "fmt" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - controllerversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - xdsresourceversion "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - "google.golang.org/protobuf/types/known/anypb" - - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v2adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" - statuspb "google.golang.org/genproto/googleapis/rpc/status" -) - -func init() { - controllerversion.RegisterAPIClientBuilder(xdsresourceversion.TransportV2, newClient) -} - -var ( - resourceTypeToURL = map[xdsresource.ResourceType]string{ - xdsresource.ListenerResource: xdsresourceversion.V2ListenerURL, - xdsresource.RouteConfigResource: xdsresourceversion.V2RouteConfigURL, - xdsresource.ClusterResource: xdsresourceversion.V2ClusterURL, - xdsresource.EndpointsResource: xdsresourceversion.V2EndpointsURL, - } -) - -func newClient(opts controllerversion.BuildOptions) (controllerversion.VersionedClient, error) { - nodeProto, ok := opts.NodeProto.(*v2corepb.Node) - if !ok { - return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, (*v2corepb.Node)(nil)) - } - v2c := &client{nodeProto: nodeProto, logger: opts.Logger} - return v2c, nil -} - -type adsStream v2adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient - -// client performs the actual xDS RPCs using the xDS v2 API. It creates a -// single ADS stream on which the different types of xDS requests and responses -// are multiplexed. -type client struct { - nodeProto *v2corepb.Node - logger *grpclog.PrefixLogger -} - -func (v2c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) { - return v2adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx, grpc.WaitForReady(true)) -} - -// SendRequest sends out a DiscoveryRequest for the given resourceNames, of type -// rType, on the provided stream. -// -// version is the ack version to be sent with the request -// - If this is the new request (not an ack/nack), version will be empty. -// - If this is an ack, version will be the version from the response. -// - If this is a nack, version will be the previous acked version (from -// versionMap). If there was no ack before, it will be empty. -func (v2c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error { - stream, ok := s.(adsStream) - if !ok { - return fmt.Errorf("xds: Attempt to send request on unsupported stream type: %T", s) - } - req := &v2xdspb.DiscoveryRequest{ - Node: v2c.nodeProto, - TypeUrl: resourceTypeToURL[rType], - ResourceNames: resourceNames, - VersionInfo: version, - ResponseNonce: nonce, - } - if errMsg != "" { - req.ErrorDetail = &statuspb.Status{ - Code: int32(codes.InvalidArgument), Message: errMsg, - } - } - if err := stream.Send(req); err != nil { - return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err) - } - v2c.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req)) - return nil -} - -// RecvResponse blocks on the receipt of one response message on the provided -// stream. -func (v2c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { - stream, ok := s.(adsStream) - if !ok { - return nil, fmt.Errorf("xds: Attempt to receive response on unsupported stream type: %T", s) - } - - resp, err := stream.Recv() - if err != nil { - return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) - } - v2c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - v2c.logger.Debugf("ADS response received: %v", pretty.ToJSON(resp)) - return resp, nil -} - -func (v2c *client) ParseResponse(r proto.Message) (xdsresource.ResourceType, []*anypb.Any, string, string, error) { - rType := xdsresource.UnknownResource - resp, ok := r.(*v2xdspb.DiscoveryResponse) - if !ok { - return rType, nil, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) - } - - // Note that the xDS transport protocol is versioned independently of - // the resource types, and it is supported to transfer older versions - // of resource types using new versions of the transport protocol, or - // vice-versa. Hence we need to handle v3 type_urls as well here. - var err error - url := resp.GetTypeUrl() - switch { - case xdsresource.IsListenerResource(url): - rType = xdsresource.ListenerResource - case xdsresource.IsRouteConfigResource(url): - rType = xdsresource.RouteConfigResource - case xdsresource.IsClusterResource(url): - rType = xdsresource.ClusterResource - case xdsresource.IsEndpointsResource(url): - rType = xdsresource.EndpointsResource - default: - return rType, nil, "", "", controllerversion.ErrResourceTypeUnsupported{ - ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()), - } - } - return rType, resp.GetResources(), resp.GetVersionInfo(), resp.GetNonce(), err -} diff --git a/xds/internal/xdsclient/controller/version/v2/loadreport.go b/xds/internal/xdsclient/controller/version/v2/loadreport.go deleted file mode 100644 index da5128ac456e..000000000000 --- a/xds/internal/xdsclient/controller/version/v2/loadreport.go +++ /dev/null @@ -1,170 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v2 - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal/xdsclient/load" - - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v2endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" - lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" - lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" - "google.golang.org/grpc" - "google.golang.org/grpc/xds/internal" -) - -const clientFeatureLRSSendAllClusters = "envoy.lrs.supports_send_all_clusters" - -type lrsStream lrsgrpc.LoadReportingService_StreamLoadStatsClient - -func (v2c *client) NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) { - c := lrsgrpc.NewLoadReportingServiceClient(cc) - return c.StreamLoadStats(ctx) -} - -func (v2c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { - stream, ok := s.(lrsStream) - if !ok { - return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) - } - node := proto.Clone(v2c.nodeProto).(*v2corepb.Node) - if node == nil { - node = &v2corepb.Node{} - } - node.ClientFeatures = append(node.ClientFeatures, clientFeatureLRSSendAllClusters) - - req := &lrspb.LoadStatsRequest{Node: node} - v2c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) - err := stream.Send(req) - if err == io.EOF { - return getStreamError(stream) - } - return err -} - -func (v2c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time.Duration, error) { - stream, ok := s.(lrsStream) - if !ok { - return nil, 0, fmt.Errorf("lrs: Attempt to receive response on unsupported stream type: %T", s) - } - - resp, err := stream.Recv() - if err != nil { - return nil, 0, fmt.Errorf("lrs: failed to receive first response: %v", err) - } - v2c.logger.Infof("lrs: received first LoadStatsResponse: %+v", pretty.ToJSON(resp)) - - interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) - if err != nil { - return nil, 0, fmt.Errorf("lrs: failed to convert report interval: %v", err) - } - - if resp.ReportEndpointGranularity { - // TODO: fixme to support per endpoint loads. - return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") - } - - clusters := resp.Clusters - if resp.SendAllClusters { - // Return nil to send stats for all clusters. - clusters = nil - } - - return clusters, interval, nil -} - -func (v2c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error { - stream, ok := s.(lrsStream) - if !ok { - return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) - } - - clusterStats := make([]*v2endpointpb.ClusterStats, 0, len(loads)) - for _, sd := range loads { - droppedReqs := make([]*v2endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) - for category, count := range sd.Drops { - droppedReqs = append(droppedReqs, &v2endpointpb.ClusterStats_DroppedRequests{ - Category: category, - DroppedCount: count, - }) - } - localityStats := make([]*v2endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) - for l, localityData := range sd.LocalityStats { - lid, err := internal.LocalityIDFromString(l) - if err != nil { - return err - } - loadMetricStats := make([]*v2endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) - for name, loadData := range localityData.LoadStats { - loadMetricStats = append(loadMetricStats, &v2endpointpb.EndpointLoadMetricStats{ - MetricName: name, - NumRequestsFinishedWithMetric: loadData.Count, - TotalMetricValue: loadData.Sum, - }) - } - localityStats = append(localityStats, &v2endpointpb.UpstreamLocalityStats{ - Locality: &v2corepb.Locality{ - Region: lid.Region, - Zone: lid.Zone, - SubZone: lid.SubZone, - }, - TotalSuccessfulRequests: localityData.RequestStats.Succeeded, - TotalRequestsInProgress: localityData.RequestStats.InProgress, - TotalErrorRequests: localityData.RequestStats.Errored, - LoadMetricStats: loadMetricStats, - UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. - }) - } - - clusterStats = append(clusterStats, &v2endpointpb.ClusterStats{ - ClusterName: sd.Cluster, - ClusterServiceName: sd.Service, - UpstreamLocalityStats: localityStats, - TotalDroppedRequests: sd.TotalDrops, - DroppedRequests: droppedReqs, - LoadReportInterval: ptypes.DurationProto(sd.ReportInterval), - }) - - } - - req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} - v2c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) - err := stream.Send(req) - if err == io.EOF { - return getStreamError(stream) - } - return err -} - -func getStreamError(stream lrsStream) error { - for { - if _, err := stream.Recv(); err != nil { - return err - } - } -} diff --git a/xds/internal/xdsclient/controller/version/v3/client.go b/xds/internal/xdsclient/controller/version/v3/client.go deleted file mode 100644 index 4cacd94dd19b..000000000000 --- a/xds/internal/xdsclient/controller/version/v3/client.go +++ /dev/null @@ -1,157 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package v3 provides xDS v3 transport protocol specific functionality. -package v3 - -import ( - "context" - "fmt" - - "github.com/golang/protobuf/proto" - statuspb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - controllerversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - xdsresourceversion "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - "google.golang.org/protobuf/types/known/anypb" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" -) - -func init() { - controllerversion.RegisterAPIClientBuilder(xdsresourceversion.TransportV3, newClient) -} - -var ( - resourceTypeToURL = map[xdsresource.ResourceType]string{ - xdsresource.ListenerResource: xdsresourceversion.V3ListenerURL, - xdsresource.RouteConfigResource: xdsresourceversion.V3RouteConfigURL, - xdsresource.ClusterResource: xdsresourceversion.V3ClusterURL, - xdsresource.EndpointsResource: xdsresourceversion.V3EndpointsURL, - } -) - -func newClient(opts controllerversion.BuildOptions) (controllerversion.VersionedClient, error) { - nodeProto, ok := opts.NodeProto.(*v3corepb.Node) - if !ok { - return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, v3corepb.Node{}) - } - v3c := &client{ - nodeProto: nodeProto, logger: opts.Logger, - } - return v3c, nil -} - -type adsStream v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient - -// client performs the actual xDS RPCs using the xDS v3 API. It creates a -// single ADS stream on which the different types of xDS requests and responses -// are multiplexed. -type client struct { - nodeProto *v3corepb.Node - logger *grpclog.PrefixLogger -} - -func (v3c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) { - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx, grpc.WaitForReady(true)) -} - -// SendRequest sends out a DiscoveryRequest for the given resourceNames, of type -// rType, on the provided stream. -// -// version is the ack version to be sent with the request -// - If this is the new request (not an ack/nack), version will be empty. -// - If this is an ack, version will be the version from the response. -// - If this is a nack, version will be the previous acked version (from -// versionMap). If there was no ack before, it will be empty. -func (v3c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error { - stream, ok := s.(adsStream) - if !ok { - return fmt.Errorf("xds: Attempt to send request on unsupported stream type: %T", s) - } - req := &v3discoverypb.DiscoveryRequest{ - Node: v3c.nodeProto, - TypeUrl: resourceTypeToURL[rType], - ResourceNames: resourceNames, - VersionInfo: version, - ResponseNonce: nonce, - } - if errMsg != "" { - req.ErrorDetail = &statuspb.Status{ - Code: int32(codes.InvalidArgument), Message: errMsg, - } - } - if err := stream.Send(req); err != nil { - return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err) - } - v3c.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req)) - return nil -} - -// RecvResponse blocks on the receipt of one response message on the provided -// stream. -func (v3c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { - stream, ok := s.(adsStream) - if !ok { - return nil, fmt.Errorf("xds: Attempt to receive response on unsupported stream type: %T", s) - } - - resp, err := stream.Recv() - if err != nil { - return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) - } - v3c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - v3c.logger.Debugf("ADS response received: %+v", pretty.ToJSON(resp)) - return resp, nil -} - -func (v3c *client) ParseResponse(r proto.Message) (xdsresource.ResourceType, []*anypb.Any, string, string, error) { - rType := xdsresource.UnknownResource - resp, ok := r.(*v3discoverypb.DiscoveryResponse) - if !ok { - return rType, nil, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) - } - - // Note that the xDS transport protocol is versioned independently of - // the resource types, and it is supported to transfer older versions - // of resource types using new versions of the transport protocol, or - // vice-versa. Hence we need to handle v3 type_urls as well here. - var err error - url := resp.GetTypeUrl() - switch { - case xdsresource.IsListenerResource(url): - rType = xdsresource.ListenerResource - case xdsresource.IsRouteConfigResource(url): - rType = xdsresource.RouteConfigResource - case xdsresource.IsClusterResource(url): - rType = xdsresource.ClusterResource - case xdsresource.IsEndpointsResource(url): - rType = xdsresource.EndpointsResource - default: - return rType, nil, "", "", controllerversion.ErrResourceTypeUnsupported{ - ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()), - } - } - return rType, resp.GetResources(), resp.GetVersionInfo(), resp.GetNonce(), err -} diff --git a/xds/internal/xdsclient/controller/version/v3/loadreport.go b/xds/internal/xdsclient/controller/version/v3/loadreport.go deleted file mode 100644 index f8d866bb1a59..000000000000 --- a/xds/internal/xdsclient/controller/version/v3/loadreport.go +++ /dev/null @@ -1,169 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v3 - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal/xdsclient/load" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" - lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/xds/internal" -) - -const clientFeatureLRSSendAllClusters = "envoy.lrs.supports_send_all_clusters" - -type lrsStream lrsgrpc.LoadReportingService_StreamLoadStatsClient - -func (v3c *client) NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) { - c := lrsgrpc.NewLoadReportingServiceClient(cc) - return c.StreamLoadStats(ctx) -} - -func (v3c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { - stream, ok := s.(lrsStream) - if !ok { - return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) - } - node := proto.Clone(v3c.nodeProto).(*v3corepb.Node) - if node == nil { - node = &v3corepb.Node{} - } - node.ClientFeatures = append(node.ClientFeatures, clientFeatureLRSSendAllClusters) - - req := &lrspb.LoadStatsRequest{Node: node} - v3c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) - err := stream.Send(req) - if err == io.EOF { - return getStreamError(stream) - } - return err -} - -func (v3c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time.Duration, error) { - stream, ok := s.(lrsStream) - if !ok { - return nil, 0, fmt.Errorf("lrs: Attempt to receive response on unsupported stream type: %T", s) - } - - resp, err := stream.Recv() - if err != nil { - return nil, 0, fmt.Errorf("lrs: failed to receive first response: %v", err) - } - v3c.logger.Infof("lrs: received first LoadStatsResponse: %+v", pretty.ToJSON(resp)) - - interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) - if err != nil { - return nil, 0, fmt.Errorf("lrs: failed to convert report interval: %v", err) - } - - if resp.ReportEndpointGranularity { - // TODO: fixme to support per endpoint loads. - return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") - } - - clusters := resp.Clusters - if resp.SendAllClusters { - // Return nil to send stats for all clusters. - clusters = nil - } - - return clusters, interval, nil -} - -func (v3c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error { - stream, ok := s.(lrsStream) - if !ok { - return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) - } - - clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) - for _, sd := range loads { - droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) - for category, count := range sd.Drops { - droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ - Category: category, - DroppedCount: count, - }) - } - localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) - for l, localityData := range sd.LocalityStats { - lid, err := internal.LocalityIDFromString(l) - if err != nil { - return err - } - loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) - for name, loadData := range localityData.LoadStats { - loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ - MetricName: name, - NumRequestsFinishedWithMetric: loadData.Count, - TotalMetricValue: loadData.Sum, - }) - } - localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ - Locality: &v3corepb.Locality{ - Region: lid.Region, - Zone: lid.Zone, - SubZone: lid.SubZone, - }, - TotalSuccessfulRequests: localityData.RequestStats.Succeeded, - TotalRequestsInProgress: localityData.RequestStats.InProgress, - TotalErrorRequests: localityData.RequestStats.Errored, - LoadMetricStats: loadMetricStats, - UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. - }) - } - - clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ - ClusterName: sd.Cluster, - ClusterServiceName: sd.Service, - UpstreamLocalityStats: localityStats, - TotalDroppedRequests: sd.TotalDrops, - DroppedRequests: droppedReqs, - LoadReportInterval: ptypes.DurationProto(sd.ReportInterval), - }) - } - - req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} - v3c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) - err := stream.Send(req) - if err == io.EOF { - return getStreamError(stream) - } - return err -} - -func getStreamError(stream lrsStream) error { - for { - if _, err := stream.Recv(); err != nil { - return err - } - } -} diff --git a/xds/internal/xdsclient/controller/version/version.go b/xds/internal/xdsclient/controller/version/version.go deleted file mode 100644 index f79a21e294fa..000000000000 --- a/xds/internal/xdsclient/controller/version/version.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package version defines APIs to deal with different versions of xDS. -package version - -import ( - "context" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - "google.golang.org/protobuf/types/known/anypb" -) - -var ( - m = make(map[version.TransportAPI]func(opts BuildOptions) (VersionedClient, error)) -) - -// RegisterAPIClientBuilder registers a client builder for xDS transport protocol -// version specified by b.Version(). -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple builders are -// registered for the same version, the one registered last will take effect. -func RegisterAPIClientBuilder(v version.TransportAPI, f func(opts BuildOptions) (VersionedClient, error)) { - m[v] = f -} - -// GetAPIClientBuilder returns the client builder registered for the provided -// xDS transport API version. -func GetAPIClientBuilder(version version.TransportAPI) func(opts BuildOptions) (VersionedClient, error) { - if f, ok := m[version]; ok { - return f - } - return nil -} - -// BuildOptions contains options to be passed to client builders. -type BuildOptions struct { - // NodeProto contains the Node proto to be used in xDS requests. The actual - // type depends on the transport protocol version used. - NodeProto proto.Message - // // Backoff returns the amount of time to backoff before retrying broken - // // streams. - // Backoff func(int) time.Duration - // Logger provides enhanced logging capabilities. - Logger *grpclog.PrefixLogger -} - -// LoadReportingOptions contains configuration knobs for reporting load data. -type LoadReportingOptions struct { - LoadStore *load.Store -} - -// ErrResourceTypeUnsupported is an error used to indicate an unsupported xDS -// resource type. The wrapped ErrStr contains the details. -type ErrResourceTypeUnsupported struct { - ErrStr string -} - -// Error helps implements the error interface. -func (e ErrResourceTypeUnsupported) Error() string { - return e.ErrStr -} - -// VersionedClient is the interface to version specific operations of the -// client. -// -// It mainly deals with the type assertion from proto.Message to the real v2/v3 -// types, and grpc.Stream to the versioned stream types. -type VersionedClient interface { - // NewStream returns a new xDS client stream specific to the underlying - // transport protocol version. - NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) - // SendRequest constructs and sends out a DiscoveryRequest message specific - // to the underlying transport protocol version. - SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error - // RecvResponse uses the provided stream to receive a response specific to - // the underlying transport protocol version. - RecvResponse(s grpc.ClientStream) (proto.Message, error) - // ParseResponse type asserts message to the versioned response, and - // retrieves the fields. - ParseResponse(r proto.Message) (xdsresource.ResourceType, []*anypb.Any, string, string, error) - - // The following are LRS methods. - - // NewLoadStatsStream returns a new LRS client stream specific to the - // underlying transport protocol version. - NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) - // SendFirstLoadStatsRequest constructs and sends the first request on the - // LRS stream. - SendFirstLoadStatsRequest(s grpc.ClientStream) error - // HandleLoadStatsResponse receives the first response from the server which - // contains the load reporting interval and the clusters for which the - // server asks the client to report load for. - // - // If the response sets SendAllClusters to true, the returned clusters is - // nil. - HandleLoadStatsResponse(s grpc.ClientStream) (clusters []string, _ time.Duration, _ error) - // SendLoadStatsRequest will be invoked at regular intervals to send load - // report with load data reported since the last time this method was - // invoked. - SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error -} diff --git a/xds/internal/xdsclient/dump_test.go b/xds/internal/xdsclient/dump_test.go deleted file mode 100644 index 165d608d3312..000000000000 --- a/xds/internal/xdsclient/dump_test.go +++ /dev/null @@ -1,511 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "fmt" - "testing" - "time" - - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/testing/protocmp" - "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/protobuf/types/known/durationpb" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/testutils" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" -) - -func (s) TestLDSConfigDump(t *testing.T) { - const testVersion = "test-version-lds" - var ( - ldsTargets = []string{"lds.target.good:0000", "lds.target.good:1111"} - routeConfigNames = []string{"route-config-0", "route-config-1"} - listenerRaws = make(map[string]*anypb.Any, len(ldsTargets)) - ) - - for i := range ldsTargets { - listenersT := &v3listenerpb.Listener{ - Name: ldsTargets[i], - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: routeConfigNames[i], - }, - }, - CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ - MaxStreamDuration: durationpb.New(time.Second), - }, - }), - }, - } - listenerRaws[ldsTargets[i]] = testutils.MarshalAny(listenersT) - } - - client, err := NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - }, defaultTestWatchExpiryTimeout, time.Duration(0)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - // Expected unknown. - if err := compareDump(client.DumpLDS, map[string]xdsresource.UpdateWithMD{}); err != nil { - t.Fatalf(err.Error()) - } - - wantRequested := make(map[string]xdsresource.UpdateWithMD) - for _, n := range ldsTargets { - cancel := client.WatchListener(n, func(update xdsresource.ListenerUpdate, err error) {}) - defer cancel() - wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} - } - // Expected requested. - if err := compareDump(client.DumpLDS, wantRequested); err != nil { - t.Fatalf(err.Error()) - } - - update0 := make(map[string]xdsresource.ListenerUpdateErrTuple) - want0 := make(map[string]xdsresource.UpdateWithMD) - for n, r := range listenerRaws { - update0[n] = xdsresource.ListenerUpdateErrTuple{Update: xdsresource.ListenerUpdate{Raw: r}} - want0[n] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, - Raw: r, - } - } - updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "") - updateHandler.NewListeners(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) - - // Expect ACK. - if err := compareDump(client.DumpLDS, want0); err != nil { - t.Fatalf(err.Error()) - } - - const nackVersion = "lds-version-nack" - var nackErr = fmt.Errorf("lds nack error") - updateHandler.NewListeners( - map[string]xdsresource.ListenerUpdateErrTuple{ - ldsTargets[0]: {Err: nackErr}, - ldsTargets[1]: {Update: xdsresource.ListenerUpdate{Raw: listenerRaws[ldsTargets[1]]}}, - }, - xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - ) - - // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsresource.UpdateWithMD) - // Though resource 0 was NACKed, the dump should show the previous ACKed raw - // message, as well as the NACK error. - wantDump[ldsTargets[0]] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - Version: testVersion, - ErrState: &xdsresource.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - Raw: listenerRaws[ldsTargets[0]], - } - - wantDump[ldsTargets[1]] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, - Raw: listenerRaws[ldsTargets[1]], - } - if err := compareDump(client.DumpLDS, wantDump); err != nil { - t.Fatalf(err.Error()) - } -} - -func (s) TestRDSConfigDump(t *testing.T) { - const testVersion = "test-version-rds" - var ( - listenerNames = []string{"lds.target.good:0000", "lds.target.good:1111"} - rdsTargets = []string{"route-config-0", "route-config-1"} - clusterNames = []string{"cluster-0", "cluster-1"} - routeRaws = make(map[string]*anypb.Any, len(rdsTargets)) - ) - - for i := range rdsTargets { - routeConfigT := &v3routepb.RouteConfiguration{ - Name: rdsTargets[i], - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{listenerNames[i]}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterNames[i]}, - }, - }, - }}, - }, - }, - } - - routeRaws[rdsTargets[i]] = testutils.MarshalAny(routeConfigT) - } - - client, err := NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - }, defaultTestWatchExpiryTimeout, time.Duration(0)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - // Expected unknown. - if err := compareDump(client.DumpRDS, map[string]xdsresource.UpdateWithMD{}); err != nil { - t.Fatalf(err.Error()) - } - - wantRequested := make(map[string]xdsresource.UpdateWithMD) - for _, n := range rdsTargets { - cancel := client.WatchRouteConfig(n, func(update xdsresource.RouteConfigUpdate, err error) {}) - defer cancel() - wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} - } - // Expected requested. - if err := compareDump(client.DumpRDS, wantRequested); err != nil { - t.Fatalf(err.Error()) - } - - update0 := make(map[string]xdsresource.RouteConfigUpdateErrTuple) - want0 := make(map[string]xdsresource.UpdateWithMD) - for n, r := range routeRaws { - update0[n] = xdsresource.RouteConfigUpdateErrTuple{Update: xdsresource.RouteConfigUpdate{Raw: r}} - want0[n] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, - Raw: r, - } - } - updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "") - updateHandler.NewRouteConfigs(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) - - // Expect ACK. - if err := compareDump(client.DumpRDS, want0); err != nil { - t.Fatalf(err.Error()) - } - - const nackVersion = "rds-version-nack" - var nackErr = fmt.Errorf("rds nack error") - updateHandler.NewRouteConfigs( - map[string]xdsresource.RouteConfigUpdateErrTuple{ - rdsTargets[0]: {Err: nackErr}, - rdsTargets[1]: {Update: xdsresource.RouteConfigUpdate{Raw: routeRaws[rdsTargets[1]]}}, - }, - xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - ) - - // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsresource.UpdateWithMD) - // Though resource 0 was NACKed, the dump should show the previous ACKed raw - // message, as well as the NACK error. - wantDump[rdsTargets[0]] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - Version: testVersion, - ErrState: &xdsresource.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - Raw: routeRaws[rdsTargets[0]], - } - wantDump[rdsTargets[1]] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, - Raw: routeRaws[rdsTargets[1]], - } - if err := compareDump(client.DumpRDS, wantDump); err != nil { - t.Fatalf(err.Error()) - } -} - -func (s) TestCDSConfigDump(t *testing.T) { - const testVersion = "test-version-cds" - var ( - cdsTargets = []string{"cluster-0", "cluster-1"} - serviceNames = []string{"service-0", "service-1"} - clusterRaws = make(map[string]*anypb.Any, len(cdsTargets)) - ) - - for i := range cdsTargets { - clusterT := &v3clusterpb.Cluster{ - Name: cdsTargets[i], - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceNames[i], - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - LrsServer: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ - Self: &v3corepb.SelfConfigSource{}, - }, - }, - } - - clusterRaws[cdsTargets[i]] = testutils.MarshalAny(clusterT) - } - - client, err := NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - }, defaultTestWatchExpiryTimeout, time.Duration(0)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - // Expected unknown. - if err := compareDump(client.DumpCDS, map[string]xdsresource.UpdateWithMD{}); err != nil { - t.Fatalf(err.Error()) - } - - wantRequested := make(map[string]xdsresource.UpdateWithMD) - for _, n := range cdsTargets { - cancel := client.WatchCluster(n, func(update xdsresource.ClusterUpdate, err error) {}) - defer cancel() - wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} - } - // Expected requested. - if err := compareDump(client.DumpCDS, wantRequested); err != nil { - t.Fatalf(err.Error()) - } - - update0 := make(map[string]xdsresource.ClusterUpdateErrTuple) - want0 := make(map[string]xdsresource.UpdateWithMD) - for n, r := range clusterRaws { - update0[n] = xdsresource.ClusterUpdateErrTuple{Update: xdsresource.ClusterUpdate{Raw: r}} - want0[n] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, - Raw: r, - } - } - updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "") - updateHandler.NewClusters(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) - - // Expect ACK. - if err := compareDump(client.DumpCDS, want0); err != nil { - t.Fatalf(err.Error()) - } - - const nackVersion = "cds-version-nack" - var nackErr = fmt.Errorf("cds nack error") - updateHandler.NewClusters( - map[string]xdsresource.ClusterUpdateErrTuple{ - cdsTargets[0]: {Err: nackErr}, - cdsTargets[1]: {Update: xdsresource.ClusterUpdate{Raw: clusterRaws[cdsTargets[1]]}}, - }, - xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - ) - - // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsresource.UpdateWithMD) - // Though resource 0 was NACKed, the dump should show the previous ACKed raw - // message, as well as the NACK error. - wantDump[cdsTargets[0]] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - Version: testVersion, - ErrState: &xdsresource.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - Raw: clusterRaws[cdsTargets[0]], - } - wantDump[cdsTargets[1]] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, - Raw: clusterRaws[cdsTargets[1]], - } - if err := compareDump(client.DumpCDS, wantDump); err != nil { - t.Fatalf(err.Error()) - } -} - -func (s) TestEDSConfigDump(t *testing.T) { - const testVersion = "test-version-cds" - var ( - edsTargets = []string{"cluster-0", "cluster-1"} - localityNames = []string{"locality-0", "locality-1"} - addrs = []string{"addr0:123", "addr1:456"} - endpointRaws = make(map[string]*anypb.Any, len(edsTargets)) - ) - - for i := range edsTargets { - clab0 := xdstestutils.NewClusterLoadAssignmentBuilder(edsTargets[i], nil) - clab0.AddLocality(localityNames[i], 1, 1, []string{addrs[i]}, nil) - claT := clab0.Build() - - endpointRaws[edsTargets[i]] = testutils.MarshalAny(claT) - } - - client, err := NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - }, defaultTestWatchExpiryTimeout, time.Duration(0)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - // Expected unknown. - if err := compareDump(client.DumpEDS, map[string]xdsresource.UpdateWithMD{}); err != nil { - t.Fatalf(err.Error()) - } - - wantRequested := make(map[string]xdsresource.UpdateWithMD) - for _, n := range edsTargets { - cancel := client.WatchEndpoints(n, func(update xdsresource.EndpointsUpdate, err error) {}) - defer cancel() - wantRequested[n] = xdsresource.UpdateWithMD{MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}} - } - // Expected requested. - if err := compareDump(client.DumpEDS, wantRequested); err != nil { - t.Fatalf(err.Error()) - } - - update0 := make(map[string]xdsresource.EndpointsUpdateErrTuple) - want0 := make(map[string]xdsresource.UpdateWithMD) - for n, r := range endpointRaws { - update0[n] = xdsresource.EndpointsUpdateErrTuple{Update: xdsresource.EndpointsUpdate{Raw: r}} - want0[n] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}, - Raw: r, - } - } - updateHandler := findPubsubForTest(t, client.(*clientRefCounted).clientImpl, "") - updateHandler.NewEndpoints(update0, xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: testVersion}) - - // Expect ACK. - if err := compareDump(client.DumpEDS, want0); err != nil { - t.Fatalf(err.Error()) - } - - const nackVersion = "eds-version-nack" - var nackErr = fmt.Errorf("eds nack error") - updateHandler.NewEndpoints( - map[string]xdsresource.EndpointsUpdateErrTuple{ - edsTargets[0]: {Err: nackErr}, - edsTargets[1]: {Update: xdsresource.EndpointsUpdate{Raw: endpointRaws[edsTargets[1]]}}, - }, - xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - ErrState: &xdsresource.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - ) - - // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsresource.UpdateWithMD) - // Though resource 0 was NACKed, the dump should show the previous ACKed raw - // message, as well as the NACK error. - wantDump[edsTargets[0]] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{ - Status: xdsresource.ServiceStatusNACKed, - Version: testVersion, - ErrState: &xdsresource.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - Raw: endpointRaws[edsTargets[0]], - } - wantDump[edsTargets[1]] = xdsresource.UpdateWithMD{ - MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: nackVersion}, - Raw: endpointRaws[edsTargets[1]], - } - if err := compareDump(client.DumpEDS, wantDump); err != nil { - t.Fatalf(err.Error()) - } -} - -func compareDump(dumpFunc func() map[string]xdsresource.UpdateWithMD, wantDump interface{}) error { - dump := dumpFunc() - cmpOpts := cmp.Options{ - cmpopts.EquateEmpty(), - cmp.Comparer(func(a, b time.Time) bool { return true }), - cmp.Comparer(func(x, y error) bool { - if x == nil || y == nil { - return x == nil && y == nil - } - return x.Error() == y.Error() - }), - protocmp.Transform(), - } - if diff := cmp.Diff(dump, wantDump, cmpOpts); diff != "" { - return fmt.Errorf("Dump() returned unexpected dump, diff (-got +want): %s", diff) - } - return nil -} diff --git a/xds/internal/xdsclient/e2e_test/dump_test.go b/xds/internal/xdsclient/e2e_test/dump_test.go new file mode 100644 index 000000000000..1a2765cc2ec9 --- /dev/null +++ b/xds/internal/xdsclient/e2e_test/dump_test.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" +) + +func compareDump(ctx context.Context, client xdsclient.XDSClient, want map[string]map[string]xdsresource.UpdateWithMD) error { + var lastErr error + for { + if err := ctx.Err(); err != nil { + return fmt.Errorf("Timeout when waiting for expected dump: %v", lastErr) + } + cmpOpts := cmp.Options{ + cmpopts.EquateEmpty(), + cmp.Comparer(func(a, b time.Time) bool { return true }), + cmpopts.EquateErrors(), + protocmp.Transform(), + } + diff := cmp.Diff(want, client.DumpResources(), cmpOpts) + if diff == "" { + return nil + } + lastErr = fmt.Errorf("DumpResources() returned unexpected dump, diff (-want +got):\n%s", diff) + time.Sleep(100 * time.Millisecond) + } +} + +func (s) TestDumpResources(t *testing.T) { + // Initialize the xDS resources to be used in this test. + ldsTargets := []string{"lds.target.good:0000", "lds.target.good:1111"} + rdsTargets := []string{"route-config-0", "route-config-1"} + cdsTargets := []string{"cluster-0", "cluster-1"} + edsTargets := []string{"endpoints-0", "endpoints-1"} + listeners := make([]*v3listenerpb.Listener, len(ldsTargets)) + listenerAnys := make([]*anypb.Any, len(ldsTargets)) + for i := range ldsTargets { + listeners[i] = e2e.DefaultClientListener(ldsTargets[i], rdsTargets[i]) + listenerAnys[i] = testutils.MarshalAny(listeners[i]) + } + routes := make([]*v3routepb.RouteConfiguration, len(rdsTargets)) + routeAnys := make([]*anypb.Any, len(rdsTargets)) + for i := range rdsTargets { + routes[i] = e2e.DefaultRouteConfig(rdsTargets[i], ldsTargets[i], cdsTargets[i]) + routeAnys[i] = testutils.MarshalAny(routes[i]) + } + clusters := make([]*v3clusterpb.Cluster, len(cdsTargets)) + clusterAnys := make([]*anypb.Any, len(cdsTargets)) + for i := range cdsTargets { + clusters[i] = e2e.DefaultCluster(cdsTargets[i], edsTargets[i], e2e.SecurityLevelNone) + clusterAnys[i] = testutils.MarshalAny(clusters[i]) + } + endpoints := make([]*v3endpointpb.ClusterLoadAssignment, len(edsTargets)) + endpointAnys := make([]*anypb.Any, len(edsTargets)) + ips := []string{"0.0.0.0", "1.1.1.1"} + ports := []uint32{123, 456} + for i := range edsTargets { + endpoints[i] = e2e.DefaultEndpoint(edsTargets[i], ips[i], ports[i:i+1]) + endpointAnys[i] = testutils.MarshalAny(endpoints[i]) + } + + // Spin up an xDS management server on a local port. + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Dump resources and expect empty configs. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := compareDump(ctx, client, nil); err != nil { + t.Fatal(err) + } + + // Register watches, dump resources and expect configs in requested state. + for _, target := range ldsTargets { + client.WatchListener(target, func(xdsresource.ListenerUpdate, error) {}) + } + for _, target := range rdsTargets { + client.WatchRouteConfig(target, func(xdsresource.RouteConfigUpdate, error) {}) + } + for _, target := range cdsTargets { + client.WatchCluster(target, func(xdsresource.ClusterUpdate, error) {}) + } + for _, target := range edsTargets { + client.WatchEndpoints(target, func(xdsresource.EndpointsUpdate, error) {}) + } + want := map[string]map[string]xdsresource.UpdateWithMD{ + "type.googleapis.com/envoy.config.listener.v3.Listener": { + ldsTargets[0]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + ldsTargets[1]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration": { + rdsTargets[0]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + rdsTargets[1]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + "type.googleapis.com/envoy.config.cluster.v3.Cluster": { + cdsTargets[0]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + cdsTargets[1]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment": { + edsTargets[0]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + edsTargets[1]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + } + if err := compareDump(ctx, client, want); err != nil { + t.Fatal(err) + } + + // Configure the resources on the management server. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: listeners, + Routes: routes, + Clusters: clusters, + Endpoints: endpoints, + }); err != nil { + t.Fatal(err) + } + + // Dump resources and expect ACK configs. + want = map[string]map[string]xdsresource.UpdateWithMD{ + "type.googleapis.com/envoy.config.listener.v3.Listener": { + ldsTargets[0]: {Raw: listenerAnys[0], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + ldsTargets[1]: {Raw: listenerAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + }, + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration": { + rdsTargets[0]: {Raw: routeAnys[0], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + rdsTargets[1]: {Raw: routeAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + }, + "type.googleapis.com/envoy.config.cluster.v3.Cluster": { + cdsTargets[0]: {Raw: clusterAnys[0], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + cdsTargets[1]: {Raw: clusterAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + }, + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment": { + edsTargets[0]: {Raw: endpointAnys[0], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + edsTargets[1]: {Raw: endpointAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + }, + } + if err := compareDump(ctx, client, want); err != nil { + t.Fatal(err) + } + + // Update the first resource of each type in the management server to a + // value which is expected to be NACK'ed by the xDS client. + const nackResourceIdx = 0 + listeners[nackResourceIdx].ApiListener = &v3listenerpb.ApiListener{} + routes[nackResourceIdx].VirtualHosts = []*v3routepb.VirtualHost{{Routes: []*v3routepb.Route{{}}}} + clusters[nackResourceIdx].ClusterDiscoveryType = &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC} + endpoints[nackResourceIdx].Endpoints = []*v3endpointpb.LocalityLbEndpoints{{}} + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: listeners, + Routes: routes, + Clusters: clusters, + Endpoints: endpoints, + SkipValidation: true, + }); err != nil { + t.Fatal(err) + } + + // Verify that the xDS client reports the first resource of each type as + // being in "NACKed" state, and the second resource of each type to be in + // "ACKed" state. The version for the ACKed resource would be "2", while + // that for the NACKed resource would be "1". In the NACKed resource, the + // version which is NACKed is stored in the ErrorState field. + want = map[string]map[string]xdsresource.UpdateWithMD{ + "type.googleapis.com/envoy.config.listener.v3.Listener": { + ldsTargets[0]: { + Raw: listenerAnys[0], + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "1", + ErrState: &xdsresource.UpdateErrorMetadata{Version: "2", Err: cmpopts.AnyError}, + }, + }, + ldsTargets[1]: {Raw: listenerAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "2"}}, + }, + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration": { + rdsTargets[0]: { + Raw: routeAnys[0], + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "1", + ErrState: &xdsresource.UpdateErrorMetadata{Version: "2", Err: cmpopts.AnyError}, + }, + }, + rdsTargets[1]: {Raw: routeAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "2"}}, + }, + "type.googleapis.com/envoy.config.cluster.v3.Cluster": { + cdsTargets[0]: { + Raw: clusterAnys[0], + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "1", + ErrState: &xdsresource.UpdateErrorMetadata{Version: "2", Err: cmpopts.AnyError}, + }, + }, + cdsTargets[1]: {Raw: clusterAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "2"}}, + }, + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment": { + edsTargets[0]: { + Raw: endpointAnys[0], + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "1", + ErrState: &xdsresource.UpdateErrorMetadata{Version: "2", Err: cmpopts.AnyError}, + }, + }, + edsTargets[1]: {Raw: endpointAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "2"}}, + }, + } + if err := compareDump(ctx, client, want); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go index 05e7083cd150..42544aa53b39 100644 --- a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go @@ -70,18 +70,17 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { updateCh1 := testutils.NewChannel() updateCh2 := testutils.NewChannel() updateCh3 := testutils.NewChannel() - var rdsCancel2, rdsCancel3 func() rdsCancel1 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + // Watch for the same resource name. - rdsCancel2 = client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + rdsCancel2 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) }) t.Cleanup(rdsCancel2) // Watch for a different resource name. - rdsCancel3 = client.WatchRouteConfig(rdsNameNewStyle, func(u xdsresource.RouteConfigUpdate, err error) { + rdsCancel3 := client.WatchRouteConfig(rdsNameNewStyle, func(u xdsresource.RouteConfigUpdate, err error) { updateCh3.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) - rdsCancel3() }) t.Cleanup(rdsCancel3) }) diff --git a/xds/internal/xdsclient/e2e_test/resource_update_test.go b/xds/internal/xdsclient/e2e_test/resource_update_test.go new file mode 100644 index 000000000000..7294b40f93cd --- /dev/null +++ b/xds/internal/xdsclient/e2e_test/resource_update_test.go @@ -0,0 +1,1161 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/testutils/fakeserver" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. +) + +// startFakeManagementServer starts a fake xDS management server and returns a +// cleanup function to close the fake server. +func startFakeManagementServer(t *testing.T) (*fakeserver.Server, func()) { + t.Helper() + fs, sCleanup, err := fakeserver.StartServer() + if err != nil { + t.Fatalf("Failed to start fake xDS server: %v", err) + } + return fs, sCleanup +} + +func compareUpdateMetadata(ctx context.Context, dumpFunc func() map[string]xdsresource.UpdateWithMD, want map[string]xdsresource.UpdateWithMD) error { + var lastErr error + for ; ctx.Err() == nil; <-time.After(100 * time.Millisecond) { + cmpOpts := cmp.Options{ + cmpopts.EquateEmpty(), + cmp.Comparer(func(a, b time.Time) bool { return true }), + cmpopts.EquateErrors(), + protocmp.Transform(), + } + gotUpdateMetadata := dumpFunc() + diff := cmp.Diff(want, gotUpdateMetadata, cmpOpts) + if diff == "" { + return nil + } + lastErr = fmt.Errorf("unexpected diff in metadata, diff (-want +got):\n%s\n want: %+v\n got: %+v", diff, want, gotUpdateMetadata) + } + return fmt.Errorf("timeout when waiting for expected update metadata: %v", lastErr) +} + +// TestHandleListenerResponseFromManagementServer covers different scenarios +// involving receipt of an LDS response from the management server. The test +// verifies that the internal state of the xDS client (parsed resource and +// metadata) matches expectations. +func (s) TestHandleListenerResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + var ( + emptyRouterFilter = e2e.RouterHTTPFilter + apiListener = &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }) + }(), + } + resource1 = &v3listenerpb.Listener{ + Name: resourceName1, + ApiListener: apiListener, + } + resource2 = &v3listenerpb.Listener{ + Name: resourceName2, + ApiListener: apiListener, + } + ) + + tests := []struct { + desc string + resourceName string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantUpdate xdsresource.ListenerUpdate + wantErr string + wantUpdateMetadata map[string]xdsresource.UpdateWithMD + }{ + { + desc: "badly-marshaled-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Value: []byte{1, 2, 3, 4}, + }}, + }, + wantErr: "Listener not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "empty-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + }, + wantErr: "Listener not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "unexpected-type-in-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3routepb.RouteConfiguration{})}, + }, + wantErr: "Listener not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "one-bad-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: resourceName1, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }}), + }, + }, + wantErr: "no RouteSpecifier", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "1", + Err: cmpopts.AnyError, + }, + }}, + }, + }, + { + desc: "one-good-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantUpdate: xdsresource.ListenerUpdate{ + RouteConfigName: "route-configuration-name", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + { + desc: "two-resources-when-we-requested-one", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantUpdate: xdsresource.ListenerUpdate{ + RouteConfigName: "route-configuration-name", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + t.Logf("Created xDS client to %s", mgmtServer.Address) + + // A wrapper struct to wrap the update and the associated error, as + // received by the resource watch callback. + type updateAndErr struct { + update xdsresource.ListenerUpdate + err error + } + updateAndErrCh := testutils.NewChannel() + + // Register a watch, and push the results on to a channel. + client.WatchListener(test.resourceName, func(update xdsresource.ListenerUpdate, err error) { + updateAndErrCh.Send(updateAndErr{update: update, err: err}) + }) + t.Logf("Registered a watch for Listener %q", test.resourceName) + + // Wait for the discovery request to be sent out. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := mgmtServer.XDSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for discovery request at the management server: %v", ctx) + } + wantReq := &fakeserver.Request{Req: &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{test.resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + }} + gotReq := val.(*fakeserver.Request) + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Discovery request received at management server is %+v, want %+v", gotReq, wantReq) + } + t.Logf("Discovery request received at management server") + + // Configure the fake management server with a response. + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Wait for an update from the xDS client and compare with expected + // update. + val, err = updateAndErrCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for watch callback to invoked after response from management server: %v", err) + } + gotUpdate := val.(updateAndErr).update + gotErr := val.(updateAndErr).err + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.HTTPFilter{}, "Filter", "Config"), + cmpopts.IgnoreFields(xdsresource.ListenerUpdate{}, "Raw"), + } + if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { + t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) + } + if err := compareUpdateMetadata(ctx, func() map[string]xdsresource.UpdateWithMD { + dump := client.DumpResources() + return dump["type.googleapis.com/envoy.config.listener.v3.Listener"] + }, test.wantUpdateMetadata); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestHandleRouteConfigResponseFromManagementServer covers different scenarios +// involving receipt of an RDS response from the management server. The test +// verifies that the internal state of the xDS client (parsed resource and +// metadata) matches expectations. +func (s) TestHandleRouteConfigResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + var ( + virtualHosts = []*v3routepb.VirtualHost{ + { + Domains: []string{"lds-target-name"}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: "cluster-name"}, + }, + }, + }, + }, + }, + } + resource1 = &v3routepb.RouteConfiguration{ + Name: resourceName1, + VirtualHosts: virtualHosts, + } + resource2 = &v3routepb.RouteConfiguration{ + Name: resourceName2, + VirtualHosts: virtualHosts, + } + ) + + tests := []struct { + desc string + resourceName string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantUpdate xdsresource.RouteConfigUpdate + wantErr string + wantUpdateMetadata map[string]xdsresource.UpdateWithMD + }{ + // The first three tests involve scenarios where the response fails + // protobuf deserialization (because it contains an invalid data or type + // in the anypb.Any) or the requested resource is not present in the + // response. In either case, no resource update makes its way to the + // top-level xDS client. An RDS response without a requested resource + // does not mean that the resource does not exist in the server. It + // could be part of a future update. Therefore, the only failure mode + // for this resource is for the watch to timeout. + { + desc: "badly-marshaled-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + Value: []byte{1, 2, 3, 4}, + }}, + }, + wantErr: fmt.Sprintf("watch for resource %q of type RouteConfigResource timed out", resourceName1), + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + }, + { + desc: "empty-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + }, + wantErr: fmt.Sprintf("watch for resource %q of type RouteConfigResource timed out", resourceName1), + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + }, + { + desc: "unexpected-type-in-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3clusterpb.Cluster{})}, + }, + wantErr: fmt.Sprintf("watch for resource %q of type RouteConfigResource timed out", resourceName1), + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + }, + { + desc: "one-bad-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3routepb.RouteConfiguration{ + Name: resourceName1, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds-resource-name"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: "cluster-resource-name"}, + }}}}, + RetryPolicy: &v3routepb.RetryPolicy{ + NumRetries: &wrapperspb.UInt32Value{Value: 0}, + }, + }}, + })}, + }, + wantErr: "received route is invalid: retry_policy.num_retries = 0; must be >= 1", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "1", + Err: cmpopts.AnyError, + }, + }}, + }, + }, + { + desc: "one-good-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantUpdate: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{"lds-target-name"}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsresource.WeightedCluster{"cluster-name": {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}}, + }, + }, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + { + desc: "two-resources-when-we-requested-one", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantUpdate: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{"lds-target-name"}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsresource.WeightedCluster{"cluster-name": {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}}, + }, + }, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + } + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + t.Logf("Created xDS client to %s", mgmtServer.Address) + + // A wrapper struct to wrap the update and the associated error, as + // received by the resource watch callback. + type updateAndErr struct { + update xdsresource.RouteConfigUpdate + err error + } + updateAndErrCh := testutils.NewChannel() + + // Register a watch, and push the results on to a channel. + client.WatchRouteConfig(test.resourceName, func(update xdsresource.RouteConfigUpdate, err error) { + updateAndErrCh.Send(updateAndErr{update: update, err: err}) + }) + t.Logf("Registered a watch for Route Configuration %q", test.resourceName) + + // Wait for the discovery request to be sent out. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := mgmtServer.XDSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for discovery request at the management server: %v", ctx) + } + wantReq := &fakeserver.Request{Req: &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{test.resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + }} + gotReq := val.(*fakeserver.Request) + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Discovery request received at management server is %+v, want %+v", gotReq, wantReq) + } + t.Logf("Discovery request received at management server") + + // Configure the fake management server with a response. + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Wait for an update from the xDS client and compare with expected + // update. + val, err = updateAndErrCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for watch callback to invoked after response from management server: %v", err) + } + gotUpdate := val.(updateAndErr).update + gotErr := val.(updateAndErr).err + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.RouteConfigUpdate{}, "Raw"), + } + if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { + t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) + } + if err := compareUpdateMetadata(ctx, func() map[string]xdsresource.UpdateWithMD { + dump := client.DumpResources() + return dump["type.googleapis.com/envoy.config.route.v3.RouteConfiguration"] + }, test.wantUpdateMetadata); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestHandleClusterResponseFromManagementServer covers different scenarios +// involving receipt of a CDS response from the management server. The test +// verifies that the internal state of the xDS client (parsed resource and +// metadata) matches expectations. +func (s) TestHandleClusterResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + resource1 := &v3clusterpb.Cluster{ + Name: resourceName1, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: "eds-service-name", + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + } + resource2 := proto.Clone(resource1).(*v3clusterpb.Cluster) + resource2.Name = resourceName2 + + tests := []struct { + desc string + resourceName string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantUpdate xdsresource.ClusterUpdate + wantErr string + wantUpdateMetadata map[string]xdsresource.UpdateWithMD + }{ + { + desc: "badly-marshaled-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + Value: []byte{1, 2, 3, 4}, + }}, + }, + wantErr: "Cluster not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "empty-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + }, + wantErr: "Cluster not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "unexpected-type-in-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3endpointpb.ClusterLoadAssignment{})}, + }, + wantErr: "Cluster not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "one-bad-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: resourceName1, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: "eds-service-name", + }, + LbPolicy: v3clusterpb.Cluster_MAGLEV, + })}, + }, + wantErr: "unexpected lbPolicy MAGLEV", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "1", + Err: cmpopts.AnyError, + }, + }}, + }, + }, + { + desc: "one-good-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: "resource-name-1", + EDSServiceName: "eds-service-name", + LRSServerConfig: xdsresource.ClusterLRSServerSelf, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + { + desc: "two-resources-when-we-requested-one", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: "resource-name-1", + EDSServiceName: "eds-service-name", + LRSServerConfig: xdsresource.ClusterLRSServerSelf, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + t.Logf("Created xDS client to %s", mgmtServer.Address) + + // A wrapper struct to wrap the update and the associated error, as + // received by the resource watch callback. + type updateAndErr struct { + update xdsresource.ClusterUpdate + err error + } + updateAndErrCh := testutils.NewChannel() + + // Register a watch, and push the results on to a channel. + client.WatchCluster(test.resourceName, func(update xdsresource.ClusterUpdate, err error) { + updateAndErrCh.Send(updateAndErr{update: update, err: err}) + }) + t.Logf("Registered a watch for Cluster %q", test.resourceName) + + // Wait for the discovery request to be sent out. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := mgmtServer.XDSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for discovery request at the management server: %v", ctx) + } + wantReq := &fakeserver.Request{Req: &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{test.resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + }} + gotReq := val.(*fakeserver.Request) + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Discovery request received at management server is %+v, want %+v", gotReq, wantReq) + } + t.Logf("Discovery request received at management server") + + // Configure the fake management server with a response. + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Wait for an update from the xDS client and compare with expected + // update. + val, err = updateAndErrCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for watch callback to invoked after response from management server: %v", err) + } + gotUpdate := val.(updateAndErr).update + gotErr := val.(updateAndErr).err + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw"), + } + if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { + t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) + } + if err := compareUpdateMetadata(ctx, func() map[string]xdsresource.UpdateWithMD { + dump := client.DumpResources() + return dump["type.googleapis.com/envoy.config.cluster.v3.Cluster"] + }, test.wantUpdateMetadata); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestHandleEndpointsResponseFromManagementServer covers different scenarios +// involving receipt of a CDS response from the management server. The test +// verifies that the internal state of the xDS client (parsed resource and +// metadata) matches expectations. +func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + resource1 := &v3endpointpb.ClusterLoadAssignment{ + ClusterName: resourceName1, + Endpoints: []*v3endpointpb.LocalityLbEndpoints{ + { + Locality: &v3corepb.Locality{SubZone: "locality-1"}, + LbEndpoints: []*v3endpointpb.LbEndpoint{ + { + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: "addr1", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: uint32(314), + }, + }, + }, + }, + }, + }, + }, + }, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, + Priority: 1, + }, + { + Locality: &v3corepb.Locality{SubZone: "locality-2"}, + LbEndpoints: []*v3endpointpb.LbEndpoint{ + { + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: "addr2", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: uint32(159), + }, + }, + }, + }, + }, + }, + }, + }, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, + Priority: 0, + }, + }, + } + resource2 := proto.Clone(resource1).(*v3endpointpb.ClusterLoadAssignment) + resource2.ClusterName = resourceName2 + + tests := []struct { + desc string + resourceName string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantUpdate xdsresource.EndpointsUpdate + wantErr string + wantUpdateMetadata map[string]xdsresource.UpdateWithMD + }{ + // The first three tests involve scenarios where the response fails + // protobuf deserialization (because it contains an invalid data or type + // in the anypb.Any) or the requested resource is not present in the + // response. In either case, no resource update makes its way to the + // top-level xDS client. An EDS response without a requested resource + // does not mean that the resource does not exist in the server. It + // could be part of a future update. Therefore, the only failure mode + // for this resource is for the watch to timeout. + { + desc: "badly-marshaled-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + Resources: []*anypb.Any{{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + Value: []byte{1, 2, 3, 4}, + }}, + }, + wantErr: fmt.Sprintf("watch for resource %q of type EndpointsResource timed out", resourceName1), + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + }, + { + desc: "empty-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + }, + wantErr: fmt.Sprintf("watch for resource %q of type EndpointsResource timed out", resourceName1), + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + }, + { + desc: "unexpected-type-in-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{})}, + }, + wantErr: fmt.Sprintf("watch for resource %q of type EndpointsResource timed out", resourceName1), + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + }, + { + desc: "one-bad-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3endpointpb.ClusterLoadAssignment{ + ClusterName: resourceName1, + Endpoints: []*v3endpointpb.LocalityLbEndpoints{ + { + Locality: &v3corepb.Locality{SubZone: "locality-1"}, + LbEndpoints: []*v3endpointpb.LbEndpoint{ + { + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: "addr1", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: uint32(314), + }, + }, + }, + }, + }, + }, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 0}, + }, + }, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, + Priority: 1, + }, + }, + }), + }, + }, + wantErr: "EDS response contains an endpoint with zero weight", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "1", + Err: cmpopts.AnyError, + }, + }}, + }, + }, + { + desc: "one-good-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantUpdate: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314", Weight: 1}}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159", Weight: 1}}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, + }, + }, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + { + desc: "two-resources-when-we-requested-one", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantUpdate: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314", Weight: 1}}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159", Weight: 1}}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, + }, + }, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer client.Close() + t.Logf("Created xDS client to %s", mgmtServer.Address) + + // A wrapper struct to wrap the update and the associated error, as + // received by the resource watch callback. + type updateAndErr struct { + update xdsresource.EndpointsUpdate + err error + } + updateAndErrCh := testutils.NewChannel() + + // Register a watch, and push the results on to a channel. + client.WatchEndpoints(test.resourceName, func(update xdsresource.EndpointsUpdate, err error) { + updateAndErrCh.Send(updateAndErr{update: update, err: err}) + }) + t.Logf("Registered a watch for Endpoint %q", test.resourceName) + + // Wait for the discovery request to be sent out. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := mgmtServer.XDSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for discovery request at the management server: %v", ctx) + } + wantReq := &fakeserver.Request{Req: &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{test.resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + }} + gotReq := val.(*fakeserver.Request) + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Discovery request received at management server is %+v, want %+v", gotReq, wantReq) + } + t.Logf("Discovery request received at management server") + + // Configure the fake management server with a response. + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Wait for an update from the xDS client and compare with expected + // update. + val, err = updateAndErrCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for watch callback to invoked after response from management server: %v", err) + } + gotUpdate := val.(updateAndErr).update + gotErr := val.(updateAndErr).err + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.EndpointsUpdate{}, "Raw"), + } + if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { + t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) + } + if err := compareUpdateMetadata(ctx, func() map[string]xdsresource.UpdateWithMD { + dump := client.DumpResources() + return dump["type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"] + }, test.wantUpdateMetadata); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index 8a0703190b21..c199ae767a1c 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -23,10 +23,6 @@ import ( "testing" "time" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" - lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" - durationpb "github.com/golang/protobuf/ptypes/duration" "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -37,7 +33,10 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 xDS API client. + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + durationpb "github.com/golang/protobuf/ptypes/duration" ) const ( @@ -55,8 +54,8 @@ func (s) TestLRSClient(t *testing.T) { XDSServer: &bootstrap.ServerConfig{ ServerURI: fs.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV2, - NodeProto: &v2corepb.Node{}, + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{}, }, }, defaultClientWatchExpiryTimeout, time.Duration(0)) if err != nil { @@ -72,8 +71,8 @@ func (s) TestLRSClient(t *testing.T) { ServerURI: fs.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), CredsType: "insecure", - TransportAPI: version.TransportV2, - NodeProto: &v2corepb.Node{}, + TransportAPI: version.TransportV3, + NodeProto: &v3corepb.Node{}, }, ) defer lrsCancel1() @@ -101,7 +100,7 @@ func (s) TestLRSClient(t *testing.T) { Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), CredsType: "insecure", TransportAPI: version.TransportV2, - NodeProto: &v2corepb.Node{}, + NodeProto: &v3corepb.Node{}, }, ) defer lrsCancel2() @@ -120,7 +119,7 @@ func (s) TestLRSClient(t *testing.T) { // Send one resp to the client. fs2.LRSResponseChan <- &fakeserver.Response{ - Resp: &lrspb.LoadStatsResponse{ + Resp: &v3lrspb.LoadStatsResponse{ SendAllClusters: true, LoadReportingInterval: &durationpb.Duration{Nanos: 50000000}, }, @@ -131,16 +130,16 @@ func (s) TestLRSClient(t *testing.T) { if err != nil { t.Fatalf("unexpected LRS request: %v, %v, want error canceled", u, err) } - receivedLoad := u.(*fakeserver.Request).Req.(*lrspb.LoadStatsRequest).ClusterStats + receivedLoad := u.(*fakeserver.Request).Req.(*v3lrspb.LoadStatsRequest).ClusterStats if len(receivedLoad) <= 0 { t.Fatalf("unexpected load received, want load for cluster, eds, dropped for test") } receivedLoad[0].LoadReportInterval = nil - want := &endpointpb.ClusterStats{ + want := &v3endpointpb.ClusterStats{ ClusterName: "cluster", ClusterServiceName: "eds", TotalDroppedRequests: 1, - DroppedRequests: []*endpointpb.ClusterStats_DroppedRequests{{Category: "test", DroppedCount: 1}}, + DroppedRequests: []*v3endpointpb.ClusterStats_DroppedRequests{{Category: "test", DroppedCount: 1}}, } if d := cmp.Diff(want, receivedLoad[0], protocmp.Transform()); d != "" { t.Fatalf("unexpected load received, want load for cluster, eds, dropped for test, diff (-want +got):\n%s", d) diff --git a/xds/internal/xdsclient/pubsub/dump.go b/xds/internal/xdsclient/pubsub/dump.go deleted file mode 100644 index 2ff19a901616..000000000000 --- a/xds/internal/xdsclient/pubsub/dump.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package pubsub - -import ( - anypb "github.com/golang/protobuf/ptypes/any" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -func rawFromCache(s string, cache interface{}) *anypb.Any { - switch c := cache.(type) { - case map[string]xdsresource.ListenerUpdate: - if v, ok := c[s]; ok { - return v.Raw - } - return nil - case map[string]xdsresource.RouteConfigUpdate: - if v, ok := c[s]; ok { - return v.Raw - } - return nil - case map[string]xdsresource.ClusterUpdate: - if v, ok := c[s]; ok { - return v.Raw - } - return nil - case map[string]xdsresource.EndpointsUpdate: - if v, ok := c[s]; ok { - return v.Raw - } - return nil - default: - return nil - } -} - -// Dump dumps the resource for the given type. -func (pb *Pubsub) Dump(t xdsresource.ResourceType) map[string]xdsresource.UpdateWithMD { - pb.mu.Lock() - defer pb.mu.Unlock() - - var ( - md map[string]xdsresource.UpdateMetadata - cache interface{} - ) - switch t { - case xdsresource.ListenerResource: - md = pb.ldsMD - cache = pb.ldsCache - case xdsresource.RouteConfigResource: - md = pb.rdsMD - cache = pb.rdsCache - case xdsresource.ClusterResource: - md = pb.cdsMD - cache = pb.cdsCache - case xdsresource.EndpointsResource: - md = pb.edsMD - cache = pb.edsCache - default: - pb.logger.Errorf("dumping resource of unknown type: %v", t) - return nil - } - - ret := make(map[string]xdsresource.UpdateWithMD, len(md)) - for s, md := range md { - ret[s] = xdsresource.UpdateWithMD{ - MD: md, - Raw: rawFromCache(s, cache), - } - } - return ret -} diff --git a/xds/internal/xdsclient/pubsub/interface.go b/xds/internal/xdsclient/pubsub/interface.go deleted file mode 100644 index 334ec101e29d..000000000000 --- a/xds/internal/xdsclient/pubsub/interface.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package pubsub - -import "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - -// UpdateHandler receives and processes (by taking appropriate actions) xDS -// resource updates from an APIClient for a specific version. -// -// It's a subset of the APIs of a *Pubsub. -type UpdateHandler interface { - // NewListeners handles updates to xDS listener resources. - NewListeners(map[string]xdsresource.ListenerUpdateErrTuple, xdsresource.UpdateMetadata) - // NewRouteConfigs handles updates to xDS RouteConfiguration resources. - NewRouteConfigs(map[string]xdsresource.RouteConfigUpdateErrTuple, xdsresource.UpdateMetadata) - // NewClusters handles updates to xDS Cluster resources. - NewClusters(map[string]xdsresource.ClusterUpdateErrTuple, xdsresource.UpdateMetadata) - // NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely - // referred to as Endpoints) resources. - NewEndpoints(map[string]xdsresource.EndpointsUpdateErrTuple, xdsresource.UpdateMetadata) - // NewConnectionError handles connection errors from the xDS stream. The - // error will be reported to all the resource watchers. - NewConnectionError(err error) -} diff --git a/xds/internal/xdsclient/pubsub/pubsub.go b/xds/internal/xdsclient/pubsub/pubsub.go deleted file mode 100644 index 95e8ac77300e..000000000000 --- a/xds/internal/xdsclient/pubsub/pubsub.go +++ /dev/null @@ -1,186 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package pubsub implements a utility type to maintain resource watchers and -// the updates. -// -// This package is designed to work with the xds resources. It could be made a -// general system that works with all types. -package pubsub - -import ( - "sync" - "time" - - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// Pubsub maintains resource watchers and resource updates. -// -// There can be multiple watchers for the same resource. An update to a resource -// triggers updates to all the existing watchers. Watchers can be canceled at -// any time. -type Pubsub struct { - done *grpcsync.Event - logger *grpclog.PrefixLogger - watchExpiryTimeout time.Duration - nodeID string - - updateCh *buffer.Unbounded // chan *watcherInfoWithUpdate - // All the following maps are to keep the updates/metadata in a cache. - mu sync.Mutex - ldsWatchers map[string]map[*watchInfo]bool - ldsCache map[string]xdsresource.ListenerUpdate - ldsMD map[string]xdsresource.UpdateMetadata - rdsWatchers map[string]map[*watchInfo]bool - rdsCache map[string]xdsresource.RouteConfigUpdate - rdsMD map[string]xdsresource.UpdateMetadata - cdsWatchers map[string]map[*watchInfo]bool - cdsCache map[string]xdsresource.ClusterUpdate - cdsMD map[string]xdsresource.UpdateMetadata - edsWatchers map[string]map[*watchInfo]bool - edsCache map[string]xdsresource.EndpointsUpdate - edsMD map[string]xdsresource.UpdateMetadata -} - -// New creates a new Pubsub. -// -// The passed in nodeID will be attached to all errors sent to the watchers. -func New(watchExpiryTimeout time.Duration, nodeID string, logger *grpclog.PrefixLogger) *Pubsub { - pb := &Pubsub{ - done: grpcsync.NewEvent(), - logger: logger, - watchExpiryTimeout: watchExpiryTimeout, - nodeID: nodeID, - - updateCh: buffer.NewUnbounded(), - ldsWatchers: make(map[string]map[*watchInfo]bool), - ldsCache: make(map[string]xdsresource.ListenerUpdate), - ldsMD: make(map[string]xdsresource.UpdateMetadata), - rdsWatchers: make(map[string]map[*watchInfo]bool), - rdsCache: make(map[string]xdsresource.RouteConfigUpdate), - rdsMD: make(map[string]xdsresource.UpdateMetadata), - cdsWatchers: make(map[string]map[*watchInfo]bool), - cdsCache: make(map[string]xdsresource.ClusterUpdate), - cdsMD: make(map[string]xdsresource.UpdateMetadata), - edsWatchers: make(map[string]map[*watchInfo]bool), - edsCache: make(map[string]xdsresource.EndpointsUpdate), - edsMD: make(map[string]xdsresource.UpdateMetadata), - } - go pb.run() - return pb -} - -// WatchListener registers a watcher for the LDS resource. -// -// It also returns whether this is the first watch for this resource. -func (pb *Pubsub) WatchListener(serviceName string, cb func(xdsresource.ListenerUpdate, error)) (first bool, cancel func() bool) { - wi := &watchInfo{ - c: pb, - rType: xdsresource.ListenerResource, - target: serviceName, - ldsCallback: cb, - } - - wi.expiryTimer = time.AfterFunc(pb.watchExpiryTimeout, func() { - wi.timeout() - }) - return pb.watch(wi) -} - -// WatchRouteConfig register a watcher for the RDS resource. -// -// It also returns whether this is the first watch for this resource. -func (pb *Pubsub) WatchRouteConfig(routeName string, cb func(xdsresource.RouteConfigUpdate, error)) (first bool, cancel func() bool) { - wi := &watchInfo{ - c: pb, - rType: xdsresource.RouteConfigResource, - target: routeName, - rdsCallback: cb, - } - - wi.expiryTimer = time.AfterFunc(pb.watchExpiryTimeout, func() { - wi.timeout() - }) - return pb.watch(wi) -} - -// WatchCluster register a watcher for the CDS resource. -// -// It also returns whether this is the first watch for this resource. -func (pb *Pubsub) WatchCluster(clusterName string, cb func(xdsresource.ClusterUpdate, error)) (first bool, cancel func() bool) { - wi := &watchInfo{ - c: pb, - rType: xdsresource.ClusterResource, - target: clusterName, - cdsCallback: cb, - } - - wi.expiryTimer = time.AfterFunc(pb.watchExpiryTimeout, func() { - wi.timeout() - }) - return pb.watch(wi) -} - -// WatchEndpoints registers a watcher for the EDS resource. -// -// It also returns whether this is the first watch for this resource. -func (pb *Pubsub) WatchEndpoints(clusterName string, cb func(xdsresource.EndpointsUpdate, error)) (first bool, cancel func() bool) { - wi := &watchInfo{ - c: pb, - rType: xdsresource.EndpointsResource, - target: clusterName, - edsCallback: cb, - } - - wi.expiryTimer = time.AfterFunc(pb.watchExpiryTimeout, func() { - wi.timeout() - }) - return pb.watch(wi) -} - -// Close closes the pubsub. -func (pb *Pubsub) Close() { - if pb.done.HasFired() { - return - } - pb.done.Fire() -} - -// run is a goroutine for all the callbacks. -// -// Callback can be called in watch(), if an item is found in cache. Without this -// goroutine, the callback will be called inline, which might cause a deadlock -// in user's code. Callbacks also cannot be simple `go callback()` because the -// order matters. -func (pb *Pubsub) run() { - for { - select { - case t := <-pb.updateCh.Get(): - pb.updateCh.Load() - if pb.done.HasFired() { - return - } - pb.callCallback(t.(*watcherInfoWithUpdate)) - case <-pb.done.Done(): - return - } - } -} diff --git a/xds/internal/xdsclient/pubsub/update.go b/xds/internal/xdsclient/pubsub/update.go deleted file mode 100644 index 9ae6ae976712..000000000000 --- a/xds/internal/xdsclient/pubsub/update.go +++ /dev/null @@ -1,318 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package pubsub - -import ( - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/proto" -) - -type watcherInfoWithUpdate struct { - wi *watchInfo - update interface{} - err error -} - -// scheduleCallback should only be called by methods of watchInfo, which checks -// for watcher states and maintain consistency. -func (pb *Pubsub) scheduleCallback(wi *watchInfo, update interface{}, err error) { - pb.updateCh.Put(&watcherInfoWithUpdate{ - wi: wi, - update: update, - err: err, - }) -} - -func (pb *Pubsub) callCallback(wiu *watcherInfoWithUpdate) { - pb.mu.Lock() - // Use a closure to capture the callback and type assertion, to save one - // more switch case. - // - // The callback must be called without pb.mu. Otherwise if the callback calls - // another watch() inline, it will cause a deadlock. This leaves a small - // window that a watcher's callback could be called after the watcher is - // canceled, and the user needs to take care of it. - var ccb func() - switch wiu.wi.rType { - case xdsresource.ListenerResource: - if s, ok := pb.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.ldsCallback(wiu.update.(xdsresource.ListenerUpdate), wiu.err) } - } - case xdsresource.RouteConfigResource: - if s, ok := pb.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.rdsCallback(wiu.update.(xdsresource.RouteConfigUpdate), wiu.err) } - } - case xdsresource.ClusterResource: - if s, ok := pb.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.cdsCallback(wiu.update.(xdsresource.ClusterUpdate), wiu.err) } - } - case xdsresource.EndpointsResource: - if s, ok := pb.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.edsCallback(wiu.update.(xdsresource.EndpointsUpdate), wiu.err) } - } - } - pb.mu.Unlock() - - if ccb != nil { - ccb() - } -} - -// NewListeners is called when there's a new LDS update. -func (pb *Pubsub) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - pb.mu.Lock() - defer pb.mu.Unlock() - - for name, uErr := range updates { - if s, ok := pb.ldsWatchers[name]; ok { - if uErr.Err != nil { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := pb.ldsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - pb.ldsMD[name] = mdCopy - for wi := range s { - wi.newError(uErr.Err) - } - continue - } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different - // from the one currently cached. - if cur, ok := pb.ldsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { - for wi := range s { - wi.newUpdate(uErr.Update) - } - } - // Sync cache. - pb.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) - pb.ldsCache[name] = uErr.Update - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - mdCopy := metadata - mdCopy.Status = xdsresource.ServiceStatusACKed - mdCopy.ErrState = nil - if metadata.ErrState != nil { - mdCopy.Version = metadata.ErrState.Version - } - pb.ldsMD[name] = mdCopy - } - } - // Resources not in the new update were removed by the server, so delete - // them. - for name := range pb.ldsCache { - if _, ok := updates[name]; !ok { - // If resource exists in cache, but not in the new update, delete - // the resource from cache, and also send an resource not found - // error to indicate resource removed. - delete(pb.ldsCache, name) - pb.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} - for wi := range pb.ldsWatchers[name] { - wi.resourceNotFound() - } - } - } - // When LDS resource is removed, we don't delete corresponding RDS cached - // data. The RDS watch will be canceled, and cache entry is removed when the - // last watch is canceled. -} - -// NewRouteConfigs is called when there's a new RDS update. -func (pb *Pubsub) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - pb.mu.Lock() - defer pb.mu.Unlock() - - // If no error received, the status is ACK. - for name, uErr := range updates { - if s, ok := pb.rdsWatchers[name]; ok { - if uErr.Err != nil { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := pb.rdsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - pb.rdsMD[name] = mdCopy - for wi := range s { - wi.newError(uErr.Err) - } - continue - } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different - // from the one currently cached. - if cur, ok := pb.rdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { - for wi := range s { - wi.newUpdate(uErr.Update) - } - } - // Sync cache. - pb.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) - pb.rdsCache[name] = uErr.Update - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - mdCopy := metadata - mdCopy.Status = xdsresource.ServiceStatusACKed - mdCopy.ErrState = nil - if metadata.ErrState != nil { - mdCopy.Version = metadata.ErrState.Version - } - pb.rdsMD[name] = mdCopy - } - } -} - -// NewClusters is called when there's a new CDS update. -func (pb *Pubsub) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - pb.mu.Lock() - defer pb.mu.Unlock() - - for name, uErr := range updates { - if s, ok := pb.cdsWatchers[name]; ok { - if uErr.Err != nil { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := pb.cdsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - pb.cdsMD[name] = mdCopy - for wi := range s { - // Send the watcher the individual error, instead of the - // overall combined error from the metadata.ErrState. - wi.newError(uErr.Err) - } - continue - } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different - // from the one currently cached. - if cur, ok := pb.cdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { - for wi := range s { - wi.newUpdate(uErr.Update) - } - } - // Sync cache. - pb.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) - pb.cdsCache[name] = uErr.Update - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - mdCopy := metadata - mdCopy.Status = xdsresource.ServiceStatusACKed - mdCopy.ErrState = nil - if metadata.ErrState != nil { - mdCopy.Version = metadata.ErrState.Version - } - pb.cdsMD[name] = mdCopy - } - } - // Resources not in the new update were removed by the server, so delete - // them. - for name := range pb.cdsCache { - if _, ok := updates[name]; !ok { - // If resource exists in cache, but not in the new update, delete it - // from cache, and also send an resource not found error to indicate - // resource removed. - delete(pb.cdsCache, name) - pb.cdsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} - for wi := range pb.cdsWatchers[name] { - wi.resourceNotFound() - } - } - } - // When CDS resource is removed, we don't delete corresponding EDS cached - // data. The EDS watch will be canceled, and cache entry is removed when the - // last watch is canceled. -} - -// NewEndpoints is called when there's anew EDS update. -func (pb *Pubsub) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) { - pb.mu.Lock() - defer pb.mu.Unlock() - - for name, uErr := range updates { - if s, ok := pb.edsWatchers[name]; ok { - if uErr.Err != nil { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := pb.edsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - pb.edsMD[name] = mdCopy - for wi := range s { - // Send the watcher the individual error, instead of the - // overall combined error from the metadata.ErrState. - wi.newError(uErr.Err) - } - continue - } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different - // from the one currently cached. - if cur, ok := pb.edsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) { - for wi := range s { - wi.newUpdate(uErr.Update) - } - } - // Sync cache. - pb.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) - pb.edsCache[name] = uErr.Update - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - mdCopy := metadata - mdCopy.Status = xdsresource.ServiceStatusACKed - mdCopy.ErrState = nil - if metadata.ErrState != nil { - mdCopy.Version = metadata.ErrState.Version - } - pb.edsMD[name] = mdCopy - } - } -} - -// NewConnectionError is called by the underlying xdsAPIClient when it receives -// a connection error. The error will be forwarded to all the resource watchers. -func (pb *Pubsub) NewConnectionError(err error) { - pb.mu.Lock() - defer pb.mu.Unlock() - - for _, s := range pb.ldsWatchers { - for wi := range s { - wi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) - } - } - for _, s := range pb.rdsWatchers { - for wi := range s { - wi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) - } - } - for _, s := range pb.cdsWatchers { - for wi := range s { - wi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) - } - } - for _, s := range pb.edsWatchers { - for wi := range s { - wi.newError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) - } - } -} diff --git a/xds/internal/xdsclient/pubsub/watch.go b/xds/internal/xdsclient/pubsub/watch.go deleted file mode 100644 index bef179936a89..000000000000 --- a/xds/internal/xdsclient/pubsub/watch.go +++ /dev/null @@ -1,239 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package pubsub - -import ( - "fmt" - "sync" - "time" - - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -type watchInfoState int - -const ( - watchInfoStateStarted watchInfoState = iota - watchInfoStateRespReceived - watchInfoStateTimeout - watchInfoStateCanceled -) - -// watchInfo holds all the information from a watch() call. -type watchInfo struct { - c *Pubsub - rType xdsresource.ResourceType - target string - - ldsCallback func(xdsresource.ListenerUpdate, error) - rdsCallback func(xdsresource.RouteConfigUpdate, error) - cdsCallback func(xdsresource.ClusterUpdate, error) - edsCallback func(xdsresource.EndpointsUpdate, error) - - expiryTimer *time.Timer - - // mu protects state, and c.scheduleCallback(). - // - No callback should be scheduled after watchInfo is canceled. - // - No timeout error should be scheduled after watchInfo is resp received. - mu sync.Mutex - state watchInfoState -} - -func (wi *watchInfo) newUpdate(update interface{}) { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.state = watchInfoStateRespReceived - wi.expiryTimer.Stop() - wi.c.scheduleCallback(wi, update, nil) -} - -func (wi *watchInfo) newError(err error) { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.state = watchInfoStateRespReceived - wi.expiryTimer.Stop() - wi.sendErrorLocked(err) -} - -func (wi *watchInfo) resourceNotFound() { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.state = watchInfoStateRespReceived - wi.expiryTimer.Stop() - wi.sendErrorLocked(xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "xds: %v target %s not found in received response", wi.rType, wi.target)) -} - -func (wi *watchInfo) timeout() { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled || wi.state == watchInfoStateRespReceived { - return - } - wi.state = watchInfoStateTimeout - wi.sendErrorLocked(fmt.Errorf("xds: %v target %s not found, watcher timeout", wi.rType, wi.target)) -} - -// Caller must hold wi.mu. -func (wi *watchInfo) sendErrorLocked(err error) { - var u interface{} - switch wi.rType { - case xdsresource.ListenerResource: - u = xdsresource.ListenerUpdate{} - case xdsresource.RouteConfigResource: - u = xdsresource.RouteConfigUpdate{} - case xdsresource.ClusterResource: - u = xdsresource.ClusterUpdate{} - case xdsresource.EndpointsResource: - u = xdsresource.EndpointsUpdate{} - } - - errMsg := err.Error() - errTyp := xdsresource.ErrType(err) - if errTyp == xdsresource.ErrorTypeUnknown { - err = fmt.Errorf("%v, xDS client nodeID: %s", errMsg, wi.c.nodeID) - } else { - err = xdsresource.NewErrorf(errTyp, "%v, xDS client nodeID: %s", errMsg, wi.c.nodeID) - } - - wi.c.scheduleCallback(wi, u, err) -} - -func (wi *watchInfo) cancel() { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.expiryTimer.Stop() - wi.state = watchInfoStateCanceled -} - -func (pb *Pubsub) watch(wi *watchInfo) (first bool, cancel func() bool) { - pb.mu.Lock() - defer pb.mu.Unlock() - pb.logger.Debugf("new watch for type %v, resource name %v", wi.rType, wi.target) - var ( - watchers map[string]map[*watchInfo]bool - mds map[string]xdsresource.UpdateMetadata - ) - switch wi.rType { - case xdsresource.ListenerResource: - watchers = pb.ldsWatchers - mds = pb.ldsMD - case xdsresource.RouteConfigResource: - watchers = pb.rdsWatchers - mds = pb.rdsMD - case xdsresource.ClusterResource: - watchers = pb.cdsWatchers - mds = pb.cdsMD - case xdsresource.EndpointsResource: - watchers = pb.edsWatchers - mds = pb.edsMD - default: - pb.logger.Errorf("unknown watch type: %v", wi.rType) - return false, nil - } - - var firstWatcher bool - resourceName := wi.target - s, ok := watchers[wi.target] - if !ok { - // If this is a new watcher, will ask lower level to send a new request - // with the resource name. - // - // If this (type+name) is already being watched, will not notify the - // underlying versioned apiClient. - pb.logger.Debugf("first watch for type %v, resource name %v, will send a new xDS request", wi.rType, wi.target) - s = make(map[*watchInfo]bool) - watchers[resourceName] = s - mds[resourceName] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested} - firstWatcher = true - } - // No matter what, add the new watcher to the set, so it's callback will be - // call for new responses. - s[wi] = true - - // If the resource is in cache, call the callback with the value. - switch wi.rType { - case xdsresource.ListenerResource: - if v, ok := pb.ldsCache[resourceName]; ok { - pb.logger.Debugf("LDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) - wi.newUpdate(v) - } - case xdsresource.RouteConfigResource: - if v, ok := pb.rdsCache[resourceName]; ok { - pb.logger.Debugf("RDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) - wi.newUpdate(v) - } - case xdsresource.ClusterResource: - if v, ok := pb.cdsCache[resourceName]; ok { - pb.logger.Debugf("CDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) - wi.newUpdate(v) - } - case xdsresource.EndpointsResource: - if v, ok := pb.edsCache[resourceName]; ok { - pb.logger.Debugf("EDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) - wi.newUpdate(v) - } - } - - return firstWatcher, func() bool { - pb.logger.Debugf("watch for type %v, resource name %v canceled", wi.rType, wi.target) - wi.cancel() - pb.mu.Lock() - defer pb.mu.Unlock() - var lastWatcher bool - if s := watchers[resourceName]; s != nil { - // Remove this watcher, so it's callback will not be called in the - // future. - delete(s, wi) - if len(s) == 0 { - pb.logger.Debugf("last watch for type %v, resource name %v canceled, will send a new xDS request", wi.rType, wi.target) - // If this was the last watcher, also tell xdsv2Client to stop - // watching this resource. - delete(watchers, resourceName) - delete(mds, resourceName) - lastWatcher = true - // Remove the resource from cache. When a watch for this - // resource is added later, it will trigger a xDS request with - // resource names, and client will receive new xDS responses. - switch wi.rType { - case xdsresource.ListenerResource: - delete(pb.ldsCache, resourceName) - case xdsresource.RouteConfigResource: - delete(pb.rdsCache, resourceName) - case xdsresource.ClusterResource: - delete(pb.cdsCache, resourceName) - case xdsresource.EndpointsResource: - delete(pb.edsCache, resourceName) - } - } - } - return lastWatcher - } -} diff --git a/xds/internal/xdsclient/watchers_test.go b/xds/internal/xdsclient/watchers_test.go deleted file mode 100644 index 36409821ab0d..000000000000 --- a/xds/internal/xdsclient/watchers_test.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package xdsclient - -import ( - "testing" - - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" -) - -// findPubsubForTest returns the pubsub for the given authority, to send updates -// to. If authority is "", the default is returned. If the authority is not -// found, the test will fail. -func findPubsubForTest(t *testing.T, c *clientImpl, authority string) pubsub.UpdateHandler { - t.Helper() - var config *bootstrap.ServerConfig - if authority == "" { - config = c.config.XDSServer - } else { - authConfig, ok := c.config.Authorities[authority] - if !ok { - t.Fatalf("failed to find authority %q", authority) - } - config = authConfig.XDSServer - } - a := c.authorities[config.String()] - if a == nil { - t.Fatalf("authority for %q is not created", authority) - } - return a.pubsub -} diff --git a/xds/internal/xdsclient/xdsclient_test.go b/xds/internal/xdsclient/xdsclient_test.go index 74da4de7c8b4..d7bb926659f3 100644 --- a/xds/internal/xdsclient/xdsclient_test.go +++ b/xds/internal/xdsclient/xdsclient_test.go @@ -22,7 +22,6 @@ import ( "testing" "google.golang.org/grpc/internal/grpctest" - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 API client. ) type s struct { diff --git a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index a2529793392b..87e6dbd1194a 100644 --- a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -31,8 +31,7 @@ var ( // Singleton instantiation of the resource type implementation. clusterType = clusterResourceType{ resourceTypeState: resourceTypeState{ - v2TypeURL: "type.googleapis.com/envoy.api.v2.Cluster", - v3TypeURL: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + typeURL: "type.googleapis.com/envoy.config.cluster.v3.Cluster", typeEnum: ClusterResource, allResourcesRequiredInSotW: true, }, @@ -50,7 +49,7 @@ type clusterResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (clusterResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, cluster, err := unmarshalClusterResource(resource, nil, opts.Logger) + name, cluster, err := unmarshalClusterResource(resource, opts.Logger) switch { case name == "": // Name is unset only when protobuf deserialization fails. @@ -90,7 +89,6 @@ func (c *ClusterResourceData) Equal(other ResourceData) bool { return false } return proto.Equal(c.Resource.Raw, other.Raw()) - } // ToJSON returns a JSON string representation of the resource data. diff --git a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index 2ba7e494aeca..dc1c09da08f3 100644 --- a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -31,8 +31,7 @@ var ( // Singleton instantiation of the resource type implementation. endpointsType = endpointsResourceType{ resourceTypeState: resourceTypeState{ - v2TypeURL: "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment", - v3TypeURL: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + typeURL: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", typeEnum: EndpointsResource, allResourcesRequiredInSotW: false, }, diff --git a/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 442389f1cc18..6b2fff9f6f0c 100644 --- a/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -34,8 +34,7 @@ var ( // Singleton instantiation of the resource type implementation. listenerType = listenerResourceType{ resourceTypeState: resourceTypeState{ - v2TypeURL: "type.googleapis.com/envoy.api.v2.Listener", - v3TypeURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + typeURL: "type.googleapis.com/envoy.config.listener.v3.Listener", typeEnum: ListenerResource, allResourcesRequiredInSotW: true, }, @@ -82,7 +81,7 @@ func listenerValidator(bc *bootstrap.Config, lis ListenerUpdate) error { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (listenerResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, listener, err := unmarshalListenerResource(resource, nil, opts.Logger) + name, listener, err := unmarshalListenerResource(resource, opts.Logger) switch { case name == "": // Name is unset only when protobuf deserialization fails. diff --git a/xds/internal/xdsclient/xdsresource/resource_type.go b/xds/internal/xdsclient/xdsresource/resource_type.go index 6946c5647fc0..6fced7784d02 100644 --- a/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/xds/internal/xdsclient/xdsresource/resource_type.go @@ -65,11 +65,8 @@ type ResourceWatcher interface { // Type wraps all resource-type specific functionality. Each supported resource // type will provide an implementation of this interface. type Type interface { - // V2TypeURL is the xDS type URL of this resource type for v2 transport. - V2TypeURL() string - - // V3TypeURL is the xDS type URL of this resource type for v3 transport. - V3TypeURL() string + // TypeURL is the xDS type URL of this resource type for v3 transport. + TypeURL() string // TypeEnum is an enumerated value for this resource type. This can be used // for logging/debugging purposes, as well in cases where the resource type @@ -135,18 +132,13 @@ type DecodeResult struct { // type implementations, which can then embed this struct and get the methods // implemented here for free. type resourceTypeState struct { - v2TypeURL string - v3TypeURL string + typeURL string typeEnum ResourceType allResourcesRequiredInSotW bool } -func (r resourceTypeState) V2TypeURL() string { - return r.v2TypeURL -} - -func (r resourceTypeState) V3TypeURL() string { - return r.v3TypeURL +func (r resourceTypeState) TypeURL() string { + return r.typeURL } func (r resourceTypeState) TypeEnum() ResourceType { diff --git a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index 9c7465fd7de0..31be4d6aebd0 100644 --- a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -31,8 +31,7 @@ var ( // Singleton instantiation of the resource type implementation. routeConfigType = routeConfigResourceType{ resourceTypeState: resourceTypeState{ - v2TypeURL: "type.googleapis.com/envoy.api.v2.RouteConfiguration", - v3TypeURL: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + typeURL: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", typeEnum: RouteConfigResource, allResourcesRequiredInSotW: false, }, diff --git a/xds/internal/xdsclient/xdsresource/test_utils_test.go b/xds/internal/xdsclient/xdsresource/test_utils_test.go index b352caa23b75..04a15f96c248 100644 --- a/xds/internal/xdsclient/xdsresource/test_utils_test.go +++ b/xds/internal/xdsclient/xdsresource/test_utils_test.go @@ -42,11 +42,4 @@ var ( cmp.Comparer(func(a, b time.Time) bool { return true }), protocmp.Transform(), } - - cmpOptsIgnoreDetails = cmp.Options{ - cmp.Comparer(func(a, b time.Time) bool { return true }), - cmp.Comparer(func(x, y error) bool { - return (x == nil) == (y == nil) - }), - } ) diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go index faf34f98e3c7..d9c78997cffb 100644 --- a/xds/internal/xdsclient/xdsresource/type.go +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -165,3 +165,57 @@ func (r ResourceType) String() string { return "UnknownResource" } } + +var v2ResourceTypeToURL = map[ResourceType]string{ + ListenerResource: version.V2ListenerURL, + HTTPConnManagerResource: version.V2HTTPConnManagerURL, + RouteConfigResource: version.V2RouteConfigURL, + ClusterResource: version.V2ClusterURL, + EndpointsResource: version.V2EndpointsURL, +} +var v3ResourceTypeToURL = map[ResourceType]string{ + ListenerResource: version.V3ListenerURL, + HTTPConnManagerResource: version.V3HTTPConnManagerURL, + RouteConfigResource: version.V3RouteConfigURL, + ClusterResource: version.V3ClusterURL, + EndpointsResource: version.V3EndpointsURL, +} + +// URL returns the transport protocol specific resource type URL. +func (r ResourceType) URL(v version.TransportAPI) string { + var mapping map[ResourceType]string + switch v { + case version.TransportV2: + mapping = v2ResourceTypeToURL + case version.TransportV3: + mapping = v3ResourceTypeToURL + default: + return "UnknownResource" + } + if url, ok := mapping[r]; ok { + return url + } + return "UnknownResource" +} + +var urlToResourceType = map[string]ResourceType{ + version.V2ListenerURL: ListenerResource, + version.V2RouteConfigURL: RouteConfigResource, + version.V2ClusterURL: ClusterResource, + version.V2EndpointsURL: EndpointsResource, + version.V2HTTPConnManagerURL: HTTPConnManagerResource, + version.V3ListenerURL: ListenerResource, + version.V3RouteConfigURL: RouteConfigResource, + version.V3ClusterURL: ClusterResource, + version.V3EndpointsURL: EndpointsResource, + version.V3HTTPConnManagerURL: HTTPConnManagerResource, +} + +// ResourceTypeFromURL returns the xDS resource type associated with the given +// resource type URL. +func ResourceTypeFromURL(url string) ResourceType { + if typ, ok := urlToResourceType[url]; ok { + return typ + } + return UnknownResource +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal.go b/xds/internal/xdsclient/xdsresource/unmarshal.go index eda11088765b..28ae41e43a91 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal.go @@ -23,7 +23,6 @@ import ( "errors" "fmt" "strings" - "time" "google.golang.org/grpc/internal/grpclog" "google.golang.org/protobuf/types/known/anypb" @@ -42,117 +41,8 @@ type UnmarshalOptions struct { UpdateValidator UpdateValidatorFunc } -// processAllResources unmarshals and validates the resources, populates the -// provided ret (a map), and returns metadata and error. -// -// After this function, the ret map will be populated with both valid and -// invalid updates. Invalid resources will have an entry with the key as the -// resource name, value as an empty update. -// -// The type of the resource is determined by the type of ret. E.g. -// map[string]ListenerUpdate means this is for LDS. -func processAllResources(opts *UnmarshalOptions, ret interface{}) (UpdateMetadata, error) { - timestamp := time.Now() - md := UpdateMetadata{ - Version: opts.Version, - Timestamp: timestamp, - } - var topLevelErrors []error - perResourceErrors := make(map[string]error) - - for _, r := range opts.Resources { - switch ret2 := ret.(type) { - case map[string]ListenerUpdateErrTuple: - name, update, err := unmarshalListenerResource(r, opts.UpdateValidator, opts.Logger) - name = ParseName(name).String() - if err == nil { - ret2[name] = ListenerUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = ListenerUpdateErrTuple{Err: err} - case map[string]RouteConfigUpdateErrTuple: - name, update, err := unmarshalRouteConfigResource(r, opts.Logger) - name = ParseName(name).String() - if err == nil { - ret2[name] = RouteConfigUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = RouteConfigUpdateErrTuple{Err: err} - case map[string]ClusterUpdateErrTuple: - name, update, err := unmarshalClusterResource(r, opts.UpdateValidator, opts.Logger) - name = ParseName(name).String() - if err == nil { - ret2[name] = ClusterUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = ClusterUpdateErrTuple{Err: err} - case map[string]EndpointsUpdateErrTuple: - name, update, err := unmarshalEndpointsResource(r, opts.Logger) - name = ParseName(name).String() - if err == nil { - ret2[name] = EndpointsUpdateErrTuple{Update: update} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = EndpointsUpdateErrTuple{Err: err} - } - } - - if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { - md.Status = ServiceStatusACKed - return md, nil - } - - var typeStr string - switch ret.(type) { - case map[string]ListenerUpdate: - typeStr = "LDS" - case map[string]RouteConfigUpdate: - typeStr = "RDS" - case map[string]ClusterUpdate: - typeStr = "CDS" - case map[string]EndpointsUpdate: - typeStr = "EDS" - } - - md.Status = ServiceStatusNACKed - errRet := combineErrors(typeStr, topLevelErrors, perResourceErrors) - md.ErrState = &UpdateErrorMetadata{ - Version: opts.Version, - Err: errRet, - Timestamp: timestamp, - } - return md, errRet -} - -func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { +// CombineErrors TBD. +func CombineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { var errStrB strings.Builder errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) if len(topLevelErrors) > 0 { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 3621d61209a0..fce59f65c33e 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -41,16 +41,7 @@ import ( // to this value by the management server. const transportSocketName = "envoy.transport_sockets.tls" -// UnmarshalCluster processes resources received in an CDS response, validates -// them, and transforms them into a native struct which contains only fields we -// are interested in. -func UnmarshalCluster(opts *UnmarshalOptions) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]ClusterUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalClusterResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { +func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { r, err := unwrapResource(r) if err != nil { return "", ClusterUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) @@ -70,11 +61,6 @@ func unmarshalClusterResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpcl return cluster.GetName(), ClusterUpdate{}, err } cu.Raw = r - if f != nil { - if err := f(cu); err != nil { - return "", ClusterUpdate{}, err - } - } return cluster.GetName(), cu, nil } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index fa8f4d4bbcc8..3705c02bedf4 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -27,6 +27,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" @@ -1442,248 +1443,121 @@ func (s) TestUnmarshalCluster(t *testing.T) { }, }) ) - const testVersion = "test-version-cds" tests := []struct { name string - resources []*anypb.Any - wantUpdate map[string]ClusterUpdateErrTuple - wantMD UpdateMetadata + resource *anypb.Any + wantName string + wantUpdate ClusterUpdate wantErr bool }{ { - name: "non-cluster resource type", - resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, - }, - wantErr: true, + name: "non-cluster resource type", + resource: &anypb.Any{TypeUrl: version.V3HTTPConnManagerURL}, + wantErr: true, }, { name: "badly marshaled cluster resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ClusterURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, + resource: &anypb.Any{ + TypeUrl: version.V3ClusterURL, + Value: []byte{1, 2, 3, 4}, }, wantErr: true, }, { name: "bad cluster resource", - resources: []*anypb.Any{ - testutils.MarshalAny(&v3clusterpb.Cluster{ - Name: "test", - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, - }), - }, - wantUpdate: map[string]ClusterUpdateErrTuple{ - "test": {Err: cmpopts.AnyError}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, - }, - wantErr: true, + resource: testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: "test", + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, + }), + wantName: "test", + wantErr: true, }, { name: "cluster resource with non-self lrs_server field", - resources: []*anypb.Any{ - testutils.MarshalAny(&v3clusterpb.Cluster{ - Name: "test", - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: v3Service, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - LrsServer: &v3corepb.ConfigSource{ + resource: testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: "test", + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ Ads: &v3corepb.AggregatedConfigSource{}, }, }, - }), - }, - wantUpdate: map[string]ClusterUpdateErrTuple{ - "test": {Err: cmpopts.AnyError}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, + ServiceName: v3Service, }, - }, - wantErr: true, - }, - { - name: "v2 cluster", - resources: []*anypb.Any{v2ClusterAny}, - wantUpdate: map[string]ClusterUpdateErrTuple{ - v2ClusterName: {Update: ClusterUpdate{ - ClusterName: v2ClusterName, - EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v2ClusterAny, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v2 cluster wrapped", - resources: []*anypb.Any{testutils.MarshalAny(&v2xdspb.Resource{Resource: v2ClusterAny})}, - wantUpdate: map[string]ClusterUpdateErrTuple{ - v2ClusterName: {Update: ClusterUpdate{ - ClusterName: v2ClusterName, - EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v2ClusterAny, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }), + wantName: "test", + wantErr: true, }, { - name: "v3 cluster", - resources: []*anypb.Any{v3ClusterAny}, - wantUpdate: map[string]ClusterUpdateErrTuple{ - v3ClusterName: {Update: ClusterUpdate{ - ClusterName: v3ClusterName, - EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v3ClusterAny, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v2 cluster", + resource: v2ClusterAny, + wantName: v2ClusterName, + wantUpdate: ClusterUpdate{ + ClusterName: v2ClusterName, + EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v2ClusterAny, }, }, { - name: "v3 cluster wrapped", - resources: []*anypb.Any{testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3ClusterAny})}, - wantUpdate: map[string]ClusterUpdateErrTuple{ - v3ClusterName: {Update: ClusterUpdate{ - ClusterName: v3ClusterName, - EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v3ClusterAny, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v2 cluster wrapped", + resource: testutils.MarshalAny(&v2xdspb.Resource{Resource: v2ClusterAny}), + wantName: v2ClusterName, + wantUpdate: ClusterUpdate{ + ClusterName: v2ClusterName, + EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v2ClusterAny, }, }, { - name: "v3 cluster with EDS config source self", - resources: []*anypb.Any{v3ClusterAnyWithEDSConfigSourceSelf}, - wantUpdate: map[string]ClusterUpdateErrTuple{ - v3ClusterName: {Update: ClusterUpdate{ - ClusterName: v3ClusterName, - EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v3ClusterAnyWithEDSConfigSourceSelf, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v3 cluster", + resource: v3ClusterAny, + wantName: v3ClusterName, + wantUpdate: ClusterUpdate{ + ClusterName: v3ClusterName, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v3ClusterAny, }, }, { - name: "multiple clusters", - resources: []*anypb.Any{v2ClusterAny, v3ClusterAny}, - wantUpdate: map[string]ClusterUpdateErrTuple{ - v2ClusterName: {Update: ClusterUpdate{ - ClusterName: v2ClusterName, - EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v2ClusterAny, - }}, - v3ClusterName: {Update: ClusterUpdate{ - ClusterName: v3ClusterName, - EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v3ClusterAny, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v3 cluster wrapped", + resource: testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3ClusterAny}), + wantName: v3ClusterName, + wantUpdate: ClusterUpdate{ + ClusterName: v3ClusterName, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v3ClusterAny, }, }, { - // To test that unmarshal keeps processing on errors. - name: "good and bad clusters", - resources: []*anypb.Any{ - v2ClusterAny, - // bad cluster resource - testutils.MarshalAny(&v3clusterpb.Cluster{ - Name: "bad", - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, - }), - v3ClusterAny, - }, - wantUpdate: map[string]ClusterUpdateErrTuple{ - v2ClusterName: {Update: ClusterUpdate{ - ClusterName: v2ClusterName, - EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v2ClusterAny, - }}, - v3ClusterName: {Update: ClusterUpdate{ - ClusterName: v3ClusterName, - EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v3ClusterAny, - }}, - "bad": {Err: cmpopts.AnyError}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, + name: "v3 cluster with EDS config source self", + resource: v3ClusterAnyWithEDSConfigSourceSelf, + wantName: v3ClusterName, + wantUpdate: ClusterUpdate{ + ClusterName: v3ClusterName, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v3ClusterAnyWithEDSConfigSourceSelf, }, - wantErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - opts := &UnmarshalOptions{ - Version: testVersion, - Resources: test.resources, - } - update, md, err := UnmarshalCluster(opts) + name, update, err := unmarshalClusterResource(test.resource, nil) if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalCluster(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) + t.Fatalf("unmarshalClusterResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) + if name != test.wantName { + t.Errorf("unmarshalClusterResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + t.Errorf("unmarshalClusterResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) } }) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index e091d0ddea0f..a1809a62fc9a 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -33,15 +33,6 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) -// UnmarshalEndpoints processes resources received in an EDS response, -// validates them, and transforms them into a native struct which contains only -// fields we are interested in. -func UnmarshalEndpoints(opts *UnmarshalOptions) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]EndpointsUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { r, err := unwrapResource(r) if err != nil { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index f89333c76e76..3fd3f417e943 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -30,7 +30,7 @@ import ( anypb "github.com/golang/protobuf/ptypes/any" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" @@ -233,204 +233,112 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { }) return clab0.Build() }()) - const testVersion = "test-version-eds" tests := []struct { name string - resources []*anypb.Any - wantUpdate map[string]EndpointsUpdateErrTuple - wantMD UpdateMetadata + resource *anypb.Any + wantName string + wantUpdate EndpointsUpdate wantErr bool }{ { - name: "non-clusterLoadAssignment resource type", - resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, - }, - wantErr: true, + name: "non-clusterLoadAssignment resource type", + resource: &anypb.Any{TypeUrl: version.V3HTTPConnManagerURL}, + wantErr: true, }, { name: "badly marshaled clusterLoadAssignment resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3EndpointsURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, + resource: &anypb.Any{ + TypeUrl: version.V3EndpointsURL, + Value: []byte{1, 2, 3, 4}, }, wantErr: true, }, { name: "bad endpoints resource", - resources: []*anypb.Any{testutils.MarshalAny(func() *v3endpointpb.ClusterLoadAssignment { + resource: testutils.MarshalAny(func() *v3endpointpb.ClusterLoadAssignment { clab0 := newClaBuilder("test", nil) clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) return clab0.Build() - }())}, - wantUpdate: map[string]EndpointsUpdateErrTuple{"test": {Err: cmpopts.AnyError}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, - }, - wantErr: true, + }()), + wantName: "test", + wantErr: true, }, { - name: "v3 endpoints", - resources: []*anypb.Any{v3EndpointsAny}, - wantUpdate: map[string]EndpointsUpdateErrTuple{ - "test": {Update: EndpointsUpdate{ - Drops: nil, - Localities: []Locality{ - { - Endpoints: []Endpoint{{ - Address: "addr1:314", - HealthStatus: EndpointHealthStatusUnhealthy, - Weight: 271, - }}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []Endpoint{{ - Address: "addr2:159", - HealthStatus: EndpointHealthStatusDraining, - Weight: 828, - }}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, + name: "v3 endpoints", + resource: v3EndpointsAny, + wantName: "test", + wantUpdate: EndpointsUpdate{ + Drops: nil, + Localities: []Locality{ + { + Endpoints: []Endpoint{{ + Address: "addr1:314", + HealthStatus: EndpointHealthStatusUnhealthy, + Weight: 271, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, }, - Raw: v3EndpointsAny, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 endpoints wrapped", - resources: []*anypb.Any{testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3EndpointsAny})}, - wantUpdate: map[string]EndpointsUpdateErrTuple{ - "test": {Update: EndpointsUpdate{ - Drops: nil, - Localities: []Locality{ - { - Endpoints: []Endpoint{{ - Address: "addr1:314", - HealthStatus: EndpointHealthStatusUnhealthy, - Weight: 271, - }}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []Endpoint{{ - Address: "addr2:159", - HealthStatus: EndpointHealthStatusDraining, - Weight: 828, - }}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, + { + Endpoints: []Endpoint{{ + Address: "addr2:159", + HealthStatus: EndpointHealthStatusDraining, + Weight: 828, + }}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, }, - Raw: v3EndpointsAny, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: v3EndpointsAny, }, }, { - // To test that unmarshal keeps processing on errors. - name: "good and bad endpoints", - resources: []*anypb.Any{ - v3EndpointsAny, - testutils.MarshalAny(func() *v3endpointpb.ClusterLoadAssignment { - clab0 := newClaBuilder("bad", nil) - clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) - clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) - return clab0.Build() - }()), - }, - wantUpdate: map[string]EndpointsUpdateErrTuple{ - "test": {Update: EndpointsUpdate{ - Drops: nil, - Localities: []Locality{ - { - Endpoints: []Endpoint{{ - Address: "addr1:314", - HealthStatus: EndpointHealthStatusUnhealthy, - Weight: 271, - }}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []Endpoint{{ - Address: "addr2:159", - HealthStatus: EndpointHealthStatusDraining, - Weight: 828, - }}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, + name: "v3 endpoints wrapped", + resource: testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3EndpointsAny}), + wantName: "test", + wantUpdate: EndpointsUpdate{ + Drops: nil, + Localities: []Locality{ + { + Endpoints: []Endpoint{{ + Address: "addr1:314", + HealthStatus: EndpointHealthStatusUnhealthy, + Weight: 271, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []Endpoint{{ + Address: "addr2:159", + HealthStatus: EndpointHealthStatusDraining, + Weight: 828, + }}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, }, - Raw: v3EndpointsAny, - }}, - "bad": {Err: cmpopts.AnyError}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, }, + Raw: v3EndpointsAny, }, - wantErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - opts := &UnmarshalOptions{ - Version: testVersion, - Resources: test.resources, - } - update, md, err := UnmarshalEndpoints(opts) + name, update, err := unmarshalEndpointsResource(test.resource, nil) if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalEndpoints(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) + t.Fatalf("unmarshalEndpointsResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) + if name != test.wantName { + t.Errorf("unmarshalEndpointsResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + t.Errorf("unmarshalEndpointsResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) } }) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index 2e59c0605c9b..6b273e82f956 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -36,16 +36,7 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) -// UnmarshalListener processes resources received in an LDS response, validates -// them, and transforms them into a native struct which contains only fields we -// are interested in. -func UnmarshalListener(opts *UnmarshalOptions) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]ListenerUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - -func unmarshalListenerResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { +func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { r, err := unwrapResource(r) if err != nil { return "", ListenerUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) @@ -66,11 +57,6 @@ func unmarshalListenerResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpc if err != nil { return lis.GetName(), ListenerUpdate{}, err } - if f != nil { - if err := f(*lu); err != nil { - return lis.GetName(), ListenerUpdate{}, err - } - } lu.Raw = r return lis.GetName(), *lu, nil } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index f46cf3801aca..62089bc31b8f 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -23,11 +23,10 @@ import ( "testing" "time" - v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds/internal/httpfilter" @@ -47,6 +46,7 @@ import ( v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" anypb "github.com/golang/protobuf/ptypes/any" spb "github.com/golang/protobuf/ptypes/struct" @@ -224,69 +224,55 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }), }, }) - - errMD = UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, - } ) tests := []struct { name string - resources []*anypb.Any - wantUpdate map[string]ListenerUpdateErrTuple - wantMD UpdateMetadata + resource *anypb.Any + wantName string + wantUpdate ListenerUpdate wantErr bool }{ { - name: "non-listener resource", - resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}}, - wantMD: errMD, - wantErr: true, + name: "non-listener resource", + resource: &anypb.Any{TypeUrl: version.V3HTTPConnManagerURL}, + wantErr: true, }, { name: "badly marshaled listener resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V3HTTPConnManagerURL, - Value: []byte{1, 2, 3, 4}, - }, + resource: &anypb.Any{ + TypeUrl: version.V3ListenerURL, + Value: func() []byte { + lis := &v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: &anypb.Any{ + TypeUrl: version.V3HTTPConnManagerURL, + Value: []byte{1, 2, 3, 4}, }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, + }, + } + mLis, _ := proto.Marshal(lis) + return mLis + }(), }, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + wantName: v3LDSTarget, + wantErr: true, }, { name: "wrong type in apiListener", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, ApiListener: &v3listenerpb.ApiListener{ ApiListener: testutils.MarshalAny(&v2xdspb.Listener{}), }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + }), + wantName: v3LDSTarget, + wantErr: true, }, { name: "empty httpConnMgr in apiListener", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, ApiListener: &v3listenerpb.ApiListener{ ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ @@ -295,43 +281,36 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, }), }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + }), + wantName: v3LDSTarget, + wantErr: true, }, { name: "scopedRoutes routeConfig in apiListener", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, ApiListener: &v3listenerpb.ApiListener{ ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, }), }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + }), + wantName: v3LDSTarget, + wantErr: true, }, { - name: "rds.ConfigSource in apiListener is Self", - resources: []*anypb.Any{v3ListenerWithCDSConfigSourceSelf}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, - HTTPFilters: []HTTPFilter{routerFilter}, - Raw: v3ListenerWithCDSConfigSourceSelf, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "rds.ConfigSource in apiListener is Self", + resource: v3ListenerWithCDSConfigSourceSelf, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + HTTPFilters: []HTTPFilter{routerFilter}, + Raw: v3ListenerWithCDSConfigSourceSelf, }, }, { name: "rds.ConfigSource in apiListener is not ADS or Self", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, ApiListener: &v3listenerpb.ApiListener{ ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ @@ -347,32 +326,24 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, }), }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + }), + wantName: v3LDSTarget, + wantErr: true, }, { - name: "empty resource list", - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with no filters", - resources: []*anypb.Any{v3LisWithFilters()}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v3 with no filters", + resource: v3LisWithFilters(), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: routerFilterList, + Raw: v3LisWithFilters(), }, }, { name: "v3 no terminal filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, ApiListener: &v3listenerpb.ApiListener{ ApiListener: testutils.MarshalAny( @@ -390,254 +361,204 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { }, }), }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + }), + wantName: v3LDSTarget, + wantErr: true, }, { - name: "v3 with custom filter", - resources: []*anypb.Any{v3LisWithFilters(customFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{ - { - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }, - routerFilter, + name: "v3 with custom filter", + resource: v3LisWithFilters(customFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, }, - Raw: v3LisWithFilters(customFilter), - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + routerFilter, + }, + Raw: v3LisWithFilters(customFilter), }, }, { - name: "v3 with custom filter in old typed struct", - resources: []*anypb.Any{v3LisWithFilters(oldTypedStructFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{ - { - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterOldTypedStructConfig}, - }, - routerFilter, + name: "v3 with custom filter in old typed struct", + resource: v3LisWithFilters(oldTypedStructFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterOldTypedStructConfig}, }, - Raw: v3LisWithFilters(oldTypedStructFilter), - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + routerFilter, + }, + Raw: v3LisWithFilters(oldTypedStructFilter), }, }, { - name: "v3 with custom filter in new typed struct", - resources: []*anypb.Any{v3LisWithFilters(newTypedStructFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{ - { - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterNewTypedStructConfig}, - }, - routerFilter, + name: "v3 with custom filter in new typed struct", + resource: v3LisWithFilters(newTypedStructFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterNewTypedStructConfig}, }, - Raw: v3LisWithFilters(newTypedStructFilter), - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + routerFilter, + }, + Raw: v3LisWithFilters(newTypedStructFilter), }, }, { - name: "v3 with optional custom filter", - resources: []*anypb.Any{v3LisWithFilters(customOptionalFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{ - { - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }, - routerFilter, + name: "v3 with optional custom filter", + resource: v3LisWithFilters(customOptionalFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, }, - Raw: v3LisWithFilters(customOptionalFilter), - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + routerFilter, + }, + Raw: v3LisWithFilters(customOptionalFilter), }, }, { - name: "v3 with two filters with same name", - resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + name: "v3 with two filters with same name", + resource: v3LisWithFilters(customFilter, customFilter), + wantName: v3LDSTarget, + wantErr: true, }, { - name: "v3 with two filters - same type different name", - resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter2)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }, { - Name: "customFilter2", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }, - routerFilter, - }, - Raw: v3LisWithFilters(customFilter, customFilter2), - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v3 with two filters - same type different name", + resource: v3LisWithFilters(customFilter, customFilter2), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{{ + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, + }, { + Name: "customFilter2", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, + }, + routerFilter, + }, + Raw: v3LisWithFilters(customFilter, customFilter2), }, }, { - name: "v3 with server-only filter", - resources: []*anypb.Any{v3LisWithFilters(serverOnlyCustomFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + name: "v3 with server-only filter", + resource: v3LisWithFilters(serverOnlyCustomFilter), + wantName: v3LDSTarget, + wantErr: true, }, { - name: "v3 with optional server-only filter", - resources: []*anypb.Any{v3LisWithFilters(serverOnlyOptionalCustomFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, - MaxStreamDuration: time.Second, - Raw: v3LisWithFilters(serverOnlyOptionalCustomFilter), - HTTPFilters: routerFilterList, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v3 with optional server-only filter", + resource: v3LisWithFilters(serverOnlyOptionalCustomFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + Raw: v3LisWithFilters(serverOnlyOptionalCustomFilter), + HTTPFilters: routerFilterList, }, }, { - name: "v3 with client-only filter", - resources: []*anypb.Any{v3LisWithFilters(clientOnlyCustomFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{ - { - Name: "clientOnlyCustomFilter", - Filter: clientOnlyHTTPFilter{}, - Config: filterConfig{Cfg: clientOnlyCustomFilterConfig}, - }, - routerFilter}, - Raw: v3LisWithFilters(clientOnlyCustomFilter), - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v3 with client-only filter", + resource: v3LisWithFilters(clientOnlyCustomFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "clientOnlyCustomFilter", + Filter: clientOnlyHTTPFilter{}, + Config: filterConfig{Cfg: clientOnlyCustomFilterConfig}, + }, + routerFilter}, + Raw: v3LisWithFilters(clientOnlyCustomFilter), }, }, { - name: "v3 with err filter", - resources: []*anypb.Any{v3LisWithFilters(errFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + name: "v3 with err filter", + resource: v3LisWithFilters(errFilter), + wantName: v3LDSTarget, + wantErr: true, }, { - name: "v3 with optional err filter", - resources: []*anypb.Any{v3LisWithFilters(errOptionalFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + name: "v3 with optional err filter", + resource: v3LisWithFilters(errOptionalFilter), + wantName: v3LDSTarget, + wantErr: true, }, { - name: "v3 with unknown filter", - resources: []*anypb.Any{v3LisWithFilters(unknownFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + name: "v3 with unknown filter", + resource: v3LisWithFilters(unknownFilter), + wantName: v3LDSTarget, + wantErr: true, }, { - name: "v3 with unknown filter (optional)", - resources: []*anypb.Any{v3LisWithFilters(unknownOptionalFilter)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, - MaxStreamDuration: time.Second, - HTTPFilters: routerFilterList, - Raw: v3LisWithFilters(unknownOptionalFilter), - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v3 with unknown filter (optional)", + resource: v3LisWithFilters(unknownOptionalFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: routerFilterList, + Raw: v3LisWithFilters(unknownOptionalFilter), }, }, { - name: "v2 listener resource", - resources: []*anypb.Any{v2Lis}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v2 listener resource", + resource: v2Lis, + wantName: v2LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v2RouteConfigName, + Raw: v2Lis, }, }, { - name: "v2 listener resource wrapped", - resources: []*anypb.Any{testutils.MarshalAny(&v2xdspb.Resource{Resource: v2Lis})}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v2 listener resource wrapped", + resource: testutils.MarshalAny(&v2xdspb.Resource{Resource: v2Lis}), + wantName: v2LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v2RouteConfigName, + Raw: v2Lis, }, }, { - name: "v3 listener resource", - resources: []*anypb.Any{v3LisWithFilters()}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v3 listener resource", + resource: v3LisWithFilters(), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: routerFilterList, + Raw: v3LisWithFilters(), }, }, { - name: "v3 listener resource wrapped", - resources: []*anypb.Any{testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3LisWithFilters()})}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList, Raw: v3LisWithFilters()}}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "v3 listener resource wrapped", + resource: testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3LisWithFilters()}), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: routerFilterList, + Raw: v3LisWithFilters(), }, }, // "To allow equating RBAC's direct_remote_ip and @@ -645,19 +566,14 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { // or zero and HttpConnectionManager.original_ip_detection_extensions // must be empty." - A41 { - name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-valid", - resources: []*anypb.Any{v3LisToTestRBAC(0, nil)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - RouteConfigName: v3RouteConfigName, - MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{routerFilter}, - Raw: v3LisToTestRBAC(0, nil), - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-valid", + resource: v3LisToTestRBAC(0, nil), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{routerFilter}, + Raw: v3LisToTestRBAC(0, nil), }, }, // In order to support xDS Configured RBAC HTTPFilter equating direct @@ -666,94 +582,49 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { // determining an origin clients ip address, direct remote ip != remote // ip. { - name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-num-untrusted-hops", - resources: []*anypb.Any{v3LisToTestRBAC(1, nil)}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-num-untrusted-hops", + resource: v3LisToTestRBAC(1, nil), + wantName: v3LDSTarget, + wantErr: true, }, // In order to support xDS Configured RBAC HTTPFilter equating direct // remote ip and remote ip, originalIpDetectionExtensions must be empty. // This is because if you have to ask ip-detection-extension for the // original ip, direct remote ip might not equal remote ip. { - name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-original-ip-detection-extension", - resources: []*anypb.Any{v3LisToTestRBAC(0, []*v3corepb.TypedExtensionConfig{{Name: "something"}})}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: true, - }, - { - name: "v3 listener with inline route configuration", - resources: []*anypb.Any{v3LisWithInlineRoute}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - InlineRouteConfig: &RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{{ - Domains: []string{v3LDSTarget}, - Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, ActionType: RouteActionRoute}}, - }}}, - MaxStreamDuration: time.Second, - Raw: v3LisWithInlineRoute, - HTTPFilters: routerFilterList, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "multiple listener resources", - resources: []*anypb.Any{v2Lis, v3LisWithFilters()}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}}, - v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-original-ip-detection-extension", + resource: v3LisToTestRBAC(0, []*v3corepb.TypedExtensionConfig{{Name: "something"}}), + wantName: v3LDSTarget, + wantErr: true, }, { - // To test that unmarshal keeps processing on errors. - name: "good and bad listener resources", - resources: []*anypb.Any{ - v2Lis, - testutils.MarshalAny(&v3listenerpb.Listener{ - Name: "bad", - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, - }), - }}), - v3LisWithFilters(), - }, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v2LDSTarget: {Update: ListenerUpdate{RouteConfigName: v2RouteConfigName, Raw: v2Lis}}, - v3LDSTarget: {Update: ListenerUpdate{RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(), HTTPFilters: routerFilterList}}, - "bad": {Err: cmpopts.AnyError}, + name: "v3 listener with inline route configuration", + resource: v3LisWithInlineRoute, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InlineRouteConfig: &RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{v3LDSTarget}, + Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, ActionType: RouteActionRoute}}, + }}}, + MaxStreamDuration: time.Second, + Raw: v3LisWithInlineRoute, + HTTPFilters: routerFilterList, }, - wantMD: errMD, - wantErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - opts := &UnmarshalOptions{ - Version: testVersion, - Resources: test.resources, - } - update, md, err := UnmarshalListener(opts) + name, update, err := unmarshalListenerResource(test.resource, nil) if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalListener(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) + t.Errorf("unmarshalListenerResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) + if name != test.wantName { + t.Errorf("unmarshalListenerResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + t.Errorf("unmarshalListenerResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) } }) } @@ -1017,14 +888,6 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }) - errMD = UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, - } ) v3LisToTestRBAC := func(xffNumTrustedHops uint32, originalIpDetectionExtensions []*v3corepb.TypedExtensionConfig) *anypb.Any { return testutils.MarshalAny(&v3listenerpb.Listener{ @@ -1109,53 +972,49 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { tests := []struct { name string - resources []*anypb.Any - wantUpdate map[string]ListenerUpdateErrTuple - wantMD UpdateMetadata + resource *anypb.Any + wantName string + wantUpdate ListenerUpdate wantErr string }{ { name: "non-empty listener filters", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, ListenerFilters: []*v3listenerpb.ListenerFilter{ {Name: "listener-filter-1"}, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "unsupported field 'listener_filters'", + }), + wantName: v3LDSTarget, + wantErr: "unsupported field 'listener_filters'", }, { name: "use_original_dst is set", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, UseOriginalDst: &wrapperspb.BoolValue{Value: true}, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "unsupported field 'use_original_dst'", + }), + wantName: v3LDSTarget, + wantErr: "unsupported field 'use_original_dst'", }, { - name: "no address field", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{Name: v3LDSTarget})}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "no address field in LDS response", + name: "no address field", + resource: testutils.MarshalAny(&v3listenerpb.Listener{Name: v3LDSTarget}), + wantName: v3LDSTarget, + wantErr: "no address field in LDS response", }, { name: "no socket address field", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: &v3corepb.Address{}, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "no socket_address field in LDS response", + }), + wantName: v3LDSTarget, + wantErr: "no socket_address field in LDS response", }, { name: "no filter chains and no default filter chain", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1164,14 +1023,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { Filters: emptyValidNetworkFilters, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "no supported filter chains and no default filter chain", + }), + wantName: v3LDSTarget, + wantErr: "no supported filter chains and no default filter chain", }, { name: "missing http connection manager network filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1179,14 +1037,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { Name: "filter-chain-1", }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "missing HttpConnectionManager filter", + }), + wantName: v3LDSTarget, + wantErr: "missing HttpConnectionManager filter", }, { name: "missing filter name in http filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1201,14 +1058,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "missing name field in filter", + }), + wantName: v3LDSTarget, + wantErr: "missing name field in filter", }, { name: "duplicate filter names in http filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1240,14 +1096,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "duplicate filter name", + }), + wantName: v3LDSTarget, + wantErr: "duplicate filter name", }, { name: "no terminal filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1267,14 +1122,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "http filters list is empty", + }), + wantName: v3LDSTarget, + wantErr: "http filters list is empty", }, { name: "terminal filter not last", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1295,14 +1149,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "is a terminal filter but it is not last in the filter chain", + }), + wantName: v3LDSTarget, + wantErr: "is a terminal filter but it is not last in the filter chain", }, { name: "last not terminal filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1323,14 +1176,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "is not a terminal filter", + }), + wantName: v3LDSTarget, + wantErr: "is not a terminal filter", }, { name: "unsupported oneof in typed config of http filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1344,14 +1196,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "unsupported config_type", + }), + wantName: v3LDSTarget, + wantErr: "unsupported config_type", }, { name: "overlapping filter chain match criteria", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1368,14 +1219,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { Filters: emptyValidNetworkFilters, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "multiple filter chains with overlapping matching rules are defined", + }), + wantName: v3LDSTarget, + wantErr: "multiple filter chains with overlapping matching rules are defined", }, { name: "unsupported network filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1391,14 +1241,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "unsupported network filter", + }), + wantName: v3LDSTarget, + wantErr: "unsupported network filter", }, { name: "badly marshaled network filter", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1417,14 +1266,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "failed unmarshaling of network filter", + }), + wantName: v3LDSTarget, + wantErr: "failed unmarshaling of network filter", }, { name: "unexpected transport socket name", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1436,14 +1284,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "transport_socket field has unexpected name", + }), + wantName: v3LDSTarget, + wantErr: "transport_socket field has unexpected name", }, { name: "unexpected transport socket typedConfig URL", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1458,14 +1305,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "transport_socket field has unexpected typeURL", + }), + wantName: v3LDSTarget, + wantErr: "transport_socket field has unexpected typeURL", }, { name: "badly marshaled transport socket", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1483,14 +1329,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "failed to unmarshal DownstreamTlsContext in LDS response", + }), + wantName: v3LDSTarget, + wantErr: "failed to unmarshal DownstreamTlsContext in LDS response", }, { name: "missing CommonTlsContext", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1505,31 +1350,29 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", + }), + wantName: v3LDSTarget, + wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", }, { - name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-valid", - resources: []*anypb.Any{v3LisToTestRBAC(0, nil)}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: &FilterChainManager{ - dstPrefixMap: map[string]*destPrefixEntry{ - unspecifiedPrefixMapKey: { - srcTypeArr: [3]*sourcePrefixes{ - { - srcPrefixMap: map[string]*sourcePrefixEntry{ - unspecifiedPrefixMapKey: { - srcPortMap: map[int]*FilterChain{ - 0: { - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, - }, + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-valid", + resource: v3LisToTestRBAC(0, nil), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1539,45 +1382,37 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - Raw: listenerEmptyTransportSocket, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: listenerEmptyTransportSocket, }, }, { - name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-num-untrusted-hops", - resources: []*anypb.Any{v3LisToTestRBAC(1, nil)}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "xff_num_trusted_hops must be unset or zero", + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-num-untrusted-hops", + resource: v3LisToTestRBAC(1, nil), + wantName: v3LDSTarget, + wantErr: "xff_num_trusted_hops must be unset or zero", }, { - name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-original-ip-detection-extension", - resources: []*anypb.Any{v3LisToTestRBAC(0, []*v3corepb.TypedExtensionConfig{{Name: "something"}})}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "original_ip_detection_extensions must be empty", + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-original-ip-detection-extension", + resource: v3LisToTestRBAC(0, []*v3corepb.TypedExtensionConfig{{Name: "something"}}), + wantName: v3LDSTarget, + wantErr: "original_ip_detection_extensions must be empty", }, { - name: "rbac-with-invalid-regex", - resources: []*anypb.Any{v3LisWithBadRBACConfiguration(badRBACCfgRegex)}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "error parsing config for filter", + name: "rbac-with-invalid-regex", + resource: v3LisWithBadRBACConfiguration(badRBACCfgRegex), + wantName: v3LDSTarget, + wantErr: "error parsing config for filter", }, { - name: "rbac-with-invalid-destination-ip-matcher", - resources: []*anypb.Any{v3LisWithBadRBACConfiguration(badRBACCfgDestIP)}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "error parsing config for filter", + name: "rbac-with-invalid-destination-ip-matcher", + resource: v3LisWithBadRBACConfiguration(badRBACCfgDestIP), + wantName: v3LDSTarget, + wantErr: "error parsing config for filter", }, { name: "unsupported validation context in transport socket", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1600,31 +1435,29 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "validation context contains unexpected type", + }), + wantName: v3LDSTarget, + wantErr: "validation context contains unexpected type", }, { - name: "empty transport socket", - resources: []*anypb.Any{listenerEmptyTransportSocket}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: &FilterChainManager{ - dstPrefixMap: map[string]*destPrefixEntry{ - unspecifiedPrefixMapKey: { - srcTypeArr: [3]*sourcePrefixes{ - { - srcPrefixMap: map[string]*sourcePrefixEntry{ - unspecifiedPrefixMapKey: { - srcPortMap: map[int]*FilterChain{ - 0: { - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, - }, + name: "empty transport socket", + resource: listenerEmptyTransportSocket, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1634,17 +1467,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - Raw: listenerEmptyTransportSocket, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: listenerEmptyTransportSocket, }, }, { name: "no identity and root certificate providers using deprecated fields", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1667,14 +1496,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", + }), + wantName: v3LDSTarget, + wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", }, { name: "no identity and root certificate providers using new fields", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1697,14 +1525,13 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", + }), + wantName: v3LDSTarget, + wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", }, { name: "no identity certificate provider with require_client_cert", - resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + resource: testutils.MarshalAny(&v3listenerpb.Listener{ Name: v3LDSTarget, Address: localSocketAddress, FilterChains: []*v3listenerpb.FilterChain{ @@ -1721,35 +1548,33 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - })}, - wantUpdate: map[string]ListenerUpdateErrTuple{v3LDSTarget: {Err: cmpopts.AnyError}}, - wantMD: errMD, - wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", + }), + wantName: v3LDSTarget, + wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", }, { - name: "happy case with no validation context using deprecated fields", - resources: []*anypb.Any{listenerNoValidationContextDeprecatedFields}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: &FilterChainManager{ - dstPrefixMap: map[string]*destPrefixEntry{ - unspecifiedPrefixMapKey: { - srcTypeArr: [3]*sourcePrefixes{ - { - srcPrefixMap: map[string]*sourcePrefixEntry{ - unspecifiedPrefixMapKey: { - srcPortMap: map[int]*FilterChain{ - 0: { - SecurityCfg: &SecurityConfig{ - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", - }, - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, + name: "happy case with no validation context using deprecated fields", + resource: listenerNoValidationContextDeprecatedFields, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1757,48 +1582,43 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - def: &FilterChain{ - SecurityCfg: &SecurityConfig{ - IdentityInstanceName: "defaultIdentityPluginInstance", - IdentityCertName: "defaultIdentityCertName", - }, - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, - Raw: listenerNoValidationContextDeprecatedFields, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: listenerNoValidationContextDeprecatedFields, }, }, { - name: "happy case with no validation context using new fields", - resources: []*anypb.Any{listenerNoValidationContextNewFields}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: &FilterChainManager{ - dstPrefixMap: map[string]*destPrefixEntry{ - unspecifiedPrefixMapKey: { - srcTypeArr: [3]*sourcePrefixes{ - { - srcPrefixMap: map[string]*sourcePrefixEntry{ - unspecifiedPrefixMapKey: { - srcPortMap: map[int]*FilterChain{ - 0: { - SecurityCfg: &SecurityConfig{ - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", - }, - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, + name: "happy case with no validation context using new fields", + resource: listenerNoValidationContextNewFields, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1806,51 +1626,46 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - def: &FilterChain{ - SecurityCfg: &SecurityConfig{ - IdentityInstanceName: "defaultIdentityPluginInstance", - IdentityCertName: "defaultIdentityCertName", - }, - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, - Raw: listenerNoValidationContextNewFields, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: listenerNoValidationContextNewFields, }, }, { - name: "happy case with validation context provider instance with deprecated fields", - resources: []*anypb.Any{listenerWithValidationContextDeprecatedFields}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: &FilterChainManager{ - dstPrefixMap: map[string]*destPrefixEntry{ - unspecifiedPrefixMapKey: { - srcTypeArr: [3]*sourcePrefixes{ - { - srcPrefixMap: map[string]*sourcePrefixEntry{ - unspecifiedPrefixMapKey: { - srcPortMap: map[int]*FilterChain{ - 0: { - SecurityCfg: &SecurityConfig{ - RootInstanceName: "rootPluginInstance", - RootCertName: "rootCertName", - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", - RequireClientCert: true, - }, - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, + name: "happy case with validation context provider instance with deprecated fields", + resource: listenerWithValidationContextDeprecatedFields, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + RootInstanceName: "rootPluginInstance", + RootCertName: "rootCertName", + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + RequireClientCert: true, }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1858,54 +1673,49 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - def: &FilterChain{ - SecurityCfg: &SecurityConfig{ - RootInstanceName: "defaultRootPluginInstance", - RootCertName: "defaultRootCertName", - IdentityInstanceName: "defaultIdentityPluginInstance", - IdentityCertName: "defaultIdentityCertName", - RequireClientCert: true, - }, - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + RootInstanceName: "defaultRootPluginInstance", + RootCertName: "defaultRootCertName", + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + RequireClientCert: true, }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, - Raw: listenerWithValidationContextDeprecatedFields, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: listenerWithValidationContextDeprecatedFields, }, }, { - name: "happy case with validation context provider instance with new fields", - resources: []*anypb.Any{listenerWithValidationContextNewFields}, - wantUpdate: map[string]ListenerUpdateErrTuple{ - v3LDSTarget: {Update: ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: &FilterChainManager{ - dstPrefixMap: map[string]*destPrefixEntry{ - unspecifiedPrefixMapKey: { - srcTypeArr: [3]*sourcePrefixes{ - { - srcPrefixMap: map[string]*sourcePrefixEntry{ - unspecifiedPrefixMapKey: { - srcPortMap: map[int]*FilterChain{ - 0: { - SecurityCfg: &SecurityConfig{ - RootInstanceName: "rootPluginInstance", - RootCertName: "rootCertName", - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", - RequireClientCert: true, - }, - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, + name: "happy case with validation context provider instance with new fields", + resource: listenerWithValidationContextNewFields, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + RootInstanceName: "rootPluginInstance", + RootCertName: "rootCertName", + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + RequireClientCert: true, }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, }, @@ -1913,47 +1723,36 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { }, }, }, - def: &FilterChain{ - SecurityCfg: &SecurityConfig{ - RootInstanceName: "defaultRootPluginInstance", - RootCertName: "defaultRootCertName", - IdentityInstanceName: "defaultIdentityPluginInstance", - IdentityCertName: "defaultIdentityCertName", - RequireClientCert: true, - }, - InlineRouteConfig: inlineRouteConfig, - HTTPFilters: routerFilterList, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + RootInstanceName: "defaultRootPluginInstance", + RootCertName: "defaultRootCertName", + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + RequireClientCert: true, }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, }, }, - Raw: listenerWithValidationContextNewFields, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: listenerWithValidationContextNewFields, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - opts := &UnmarshalOptions{ - Version: testVersion, - Resources: test.resources, - } - gotUpdate, md, err := UnmarshalListener(opts) - if (err != nil) != (test.wantErr != "") { - t.Fatalf("UnmarshalListener(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) - } + name, update, err := unmarshalListenerResource(test.resource, nil) if err != nil && !strings.Contains(err.Error(), test.wantErr) { - t.Fatalf("UnmarshalListener(%+v) = %v wantErr: %q", opts, err, test.wantErr) + t.Errorf("unmarshalListenerResource(%s) = %v wantErr: %q", pretty.ToJSON(test.resource), err, test.wantErr) } - if diff := cmp.Diff(gotUpdate, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) + if name != test.wantName { + t.Errorf("unmarshalListenerResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + t.Errorf("unmarshalListenerResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) } }) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 32c48d46b691..8f434d11d957 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -35,16 +35,6 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) -// UnmarshalRouteConfig processes resources received in an RDS response, -// validates them, and transforms them into a native struct which contains only -// fields we are interested in. The provided hostname determines the route -// configuration resources of interest. -func UnmarshalRouteConfig(opts *UnmarshalOptions) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) { - update := make(map[string]RouteConfigUpdateErrTuple) - md, err := processAllResources(opts, update) - return update, md, err -} - func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { r, err := unwrapResource(r) if err != nil { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index b6d02f30bef1..ea308825c094 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -30,6 +30,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/httpfilter" @@ -907,282 +908,127 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { VirtualHosts: v3VirtualHost, }) ) - const testVersion = "test-version-rds" tests := []struct { name string - resources []*anypb.Any - wantUpdate map[string]RouteConfigUpdateErrTuple - wantMD UpdateMetadata + resource *anypb.Any + wantName string + wantUpdate RouteConfigUpdate wantErr bool }{ { - name: "non-routeConfig resource type", - resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, - }, - wantErr: true, + name: "non-routeConfig resource type", + resource: &anypb.Any{TypeUrl: version.V3HTTPConnManagerURL}, + wantErr: true, }, { name: "badly marshaled routeconfig resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3RouteConfigURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, - }, + resource: &anypb.Any{ + TypeUrl: version.V3RouteConfigURL, + Value: []byte{1, 2, 3, 4}, }, wantErr: true, }, { - name: "empty resource list", - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v2 routeConfig resource", - resources: []*anypb.Any{v2RouteConfig}, - wantUpdate: map[string]RouteConfigUpdateErrTuple{ - v2RouteConfigName: {Update: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, + name: "v2 routeConfig resource", + resource: v2RouteConfig, + wantName: v2RouteConfigName, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, }, - Raw: v2RouteConfig, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v2 routeConfig resource wrapped", - resources: []*anypb.Any{testutils.MarshalAny(&v2xdspb.Resource{Resource: v2RouteConfig})}, - wantUpdate: map[string]RouteConfigUpdateErrTuple{ - v2RouteConfigName: {Update: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, }, - Raw: v2RouteConfig, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: v2RouteConfig, }, }, { - name: "v3 routeConfig resource", - resources: []*anypb.Any{v3RouteConfig}, - wantUpdate: map[string]RouteConfigUpdateErrTuple{ - v3RouteConfigName: {Update: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, + name: "v2 routeConfig resource wrapped", + resource: testutils.MarshalAny(&v2xdspb.Resource{Resource: v2RouteConfig}), + wantName: v2RouteConfigName, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, }, - Raw: v3RouteConfig, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 routeConfig resource wrapped", - resources: []*anypb.Any{testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3RouteConfig})}, - wantUpdate: map[string]RouteConfigUpdateErrTuple{ - v3RouteConfigName: {Update: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, }, - Raw: v3RouteConfig, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: v2RouteConfig, }, }, { - name: "multiple routeConfig resources", - resources: []*anypb.Any{v2RouteConfig, v3RouteConfig}, - wantUpdate: map[string]RouteConfigUpdateErrTuple{ - v3RouteConfigName: {Update: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, + name: "v3 routeConfig resource", + resource: v3RouteConfig, + wantName: v3RouteConfigName, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, }, - Raw: v3RouteConfig, - }}, - v2RouteConfigName: {Update: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, }, - Raw: v2RouteConfig, - }}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, + }, + Raw: v3RouteConfig, }, }, { - // To test that unmarshal keeps processing on errors. - name: "good and bad routeConfig resources", - resources: []*anypb.Any{ - v2RouteConfig, - testutils.MarshalAny(&v3routepb.RouteConfiguration{ - Name: "bad", - VirtualHosts: []*v3routepb.VirtualHost{ - {Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_ConnectMatcher_{}}, - }}}}}), - v3RouteConfig, - }, - wantUpdate: map[string]RouteConfigUpdateErrTuple{ - v3RouteConfigName: {Update: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, + name: "v3 routeConfig resource wrapped", + resource: testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3RouteConfig}), + wantName: v3RouteConfigName, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, }, - Raw: v3RouteConfig, - }}, - v2RouteConfigName: {Update: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, }, - Raw: v2RouteConfig, - }}, - "bad": {Err: cmpopts.AnyError}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: cmpopts.AnyError, }, + Raw: v3RouteConfig, }, - wantErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - opts := &UnmarshalOptions{ - Version: testVersion, - Resources: test.resources, - } - update, md, err := UnmarshalRouteConfig(opts) + name, update, err := unmarshalRouteConfigResource(test.resource, nil) if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalRouteConfig(%+v), got err: %v, wantErr: %v", opts, err, test.wantErr) + t.Errorf("unmarshalRouteConfigResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) + if name != test.wantName { + t.Errorf("unmarshalRouteConfigResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + t.Errorf("unmarshalRouteConfigResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) } }) } diff --git a/xds/xds.go b/xds/xds.go index 7c479f5f8a86..706e11c49487 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -30,21 +30,21 @@ package xds import ( "fmt" - v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "google.golang.org/grpc" - _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. "google.golang.org/grpc/internal" internaladmin "google.golang.org/grpc/internal/admin" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" - _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. - _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. - _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. - _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 xDS API client. - _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. + + _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. + _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver + + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) func init() { From 94a65dca4098e6fcc5cf4fd814809a8d2ac7d8d2 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 21 Dec 2022 15:11:59 -0800 Subject: [PATCH 723/998] rls: deflake tests (#5877) Fixes https://github.com/grpc/grpc-go/issues/5845 --- balancer/rls/balancer.go | 71 +++++++++++------ balancer/rls/balancer_test.go | 37 +++++++++ balancer/rls/helpers_test.go | 20 ++--- balancer/rls/picker_test.go | 100 +++++++++++++++--------- internal/balancergroup/balancergroup.go | 17 ++-- 5 files changed, 166 insertions(+), 79 deletions(-) diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go index 036ad4454c71..f18f4531d839 100644 --- a/balancer/rls/balancer.go +++ b/balancer/rls/balancer.go @@ -91,14 +91,16 @@ func (rlsBB) Name() string { func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { lb := &rlsBalancer{ - done: grpcsync.NewEvent(), - cc: cc, - bopts: opts, - purgeTicker: dataCachePurgeTicker(), - lbCfg: &lbConfig{}, - pendingMap: make(map[cacheKey]*backoffState), - childPolicies: make(map[string]*childPolicyWrapper), - updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + cc: cc, + bopts: opts, + purgeTicker: dataCachePurgeTicker(), + dataCachePurgeHook: dataCachePurgeHook, + lbCfg: &lbConfig{}, + pendingMap: make(map[cacheKey]*backoffState), + childPolicies: make(map[string]*childPolicyWrapper), + updateCh: buffer.NewUnbounded(), } lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) lb.dataCache = newDataCache(maxCacheSize, lb.logger) @@ -110,11 +112,13 @@ func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. // rlsBalancer implements the RLS LB policy. type rlsBalancer struct { - done *grpcsync.Event - cc balancer.ClientConn - bopts balancer.BuildOptions - purgeTicker *time.Ticker - logger *internalgrpclog.PrefixLogger + closed *grpcsync.Event // Fires when Close() is invoked. Guarded by stateMu. + done *grpcsync.Event // Fires when Close() is done. + cc balancer.ClientConn + bopts balancer.BuildOptions + purgeTicker *time.Ticker + dataCachePurgeHook func() + logger *internalgrpclog.PrefixLogger // If both cacheMu and stateMu need to be acquired, the former must be // acquired first to prevent a deadlock. This order restriction is due to the @@ -167,7 +171,18 @@ type controlChannelReady struct{} // on to a channel that this goroutine will select on, thereby the handling of // the update will happen asynchronously. func (b *rlsBalancer) run() { - go b.purgeDataCache() + // We exit out of the for loop below only after `Close()` has been invoked. + // Firing the done event here will ensure that Close() returns only after + // all goroutines are done. + defer func() { b.done.Fire() }() + + // Wait for purgeDataCache() goroutine to exit before returning from here. + doneCh := make(chan struct{}) + defer func() { + <-doneCh + }() + go b.purgeDataCache(doneCh) + for { select { case u := <-b.updateCh.Get(): @@ -194,7 +209,7 @@ func (b *rlsBalancer) run() { default: b.logger.Errorf("Unsupported update type %T", update) } - case <-b.done.Done(): + case <-b.closed.Done(): return } } @@ -203,10 +218,12 @@ func (b *rlsBalancer) run() { // purgeDataCache is a long-running goroutine which periodically deletes expired // entries. An expired entry is one for which both the expiryTime and // backoffExpiryTime are in the past. -func (b *rlsBalancer) purgeDataCache() { +func (b *rlsBalancer) purgeDataCache(doneCh chan struct{}) { + defer close(doneCh) + for { select { - case <-b.done.Done(): + case <-b.closed.Done(): return case <-b.purgeTicker.C: b.cacheMu.Lock() @@ -215,19 +232,21 @@ func (b *rlsBalancer) purgeDataCache() { if updatePicker { b.sendNewPicker() } - dataCachePurgeHook() + b.dataCachePurgeHook() } } } func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { defer clientConnUpdateHook() - if b.done.HasFired() { + + b.stateMu.Lock() + if b.closed.HasFired() { + b.stateMu.Unlock() b.logger.Warningf("Received service config after balancer close: %s", pretty.ToJSON(ccs.BalancerConfig)) return errBalancerClosed } - b.stateMu.Lock() newCfg := ccs.BalancerConfig.(*lbConfig) if b.lbCfg.Equal(newCfg) { b.stateMu.Unlock() @@ -405,10 +424,9 @@ func (b *rlsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub } func (b *rlsBalancer) Close() { - b.done.Fire() - - b.purgeTicker.Stop() b.stateMu.Lock() + b.closed.Fire() + b.purgeTicker.Stop() if b.ctrlCh != nil { b.ctrlCh.close() } @@ -418,6 +436,8 @@ func (b *rlsBalancer) Close() { b.cacheMu.Lock() b.dataCache.stop() b.cacheMu.Unlock() + + <-b.done.Done() } func (b *rlsBalancer) ExitIdle() { @@ -479,8 +499,11 @@ func (b *rlsBalancer) sendNewPickerLocked() { func (b *rlsBalancer) sendNewPicker() { b.stateMu.Lock() + defer b.stateMu.Unlock() + if b.closed.HasFired() { + return + } b.sendNewPickerLocked() - b.stateMu.Unlock() } // The aggregated connectivity state reported is determined as follows: diff --git a/balancer/rls/balancer_test.go b/balancer/rls/balancer_test.go index 6ac57188bff4..20da394ab2b2 100644 --- a/balancer/rls/balancer_test.go +++ b/balancer/rls/balancer_test.go @@ -1048,6 +1048,43 @@ func (s) TestUpdateStatePauses(t *testing.T) { // Make sure an RLS request is sent out. verifyRLSRequest(t, rlsReqCh, true) + // Wait for the control channel to become READY, before reading the states + // out of the wrapping top-level balancer. + // + // makeTestRPCAndExpectItToReachBackend repeatedly sends RPCs with short + // deadlines until one succeeds. See its docstring for details. + // + // The following sequence of events is possible: + // 1. When the first RPC is attempted above, a pending cache entry is + // created, an RLS request is sent out, and the pick is queued. The + // channel is in CONNECTING state. + // 2. When the RLS response arrives, the pending cache entry is moved to the + // data cache, a child policy is created for the target specified in the + // response and a new picker is returned. The channel is still in + // CONNECTING, and retried pick is again queued. + // 3. The child policy moves through the standard set of states, IDLE --> + // CONNECTING --> READY. And for each of these state changes, a new + // picker is sent on the channel. But the overall connectivity state of + // the channel is still CONNECTING. + // 4. Right around the time when the child policy becomes READY, the + // deadline associated with the first RPC made by + // makeTestRPCAndExpectItToReachBackend() could expire, and it could send + // a new one. And because the internal state of the LB policy now + // contains a child policy which is READY, this RPC will succeed. But the + // RLS LB policy has yet to push a new picker on the channel. + // 5. If we read the states seen by the top-level wrapping LB policy without + // waiting for the channel to become READY, there is a possibility that we + // might not see the READY state in there. And if that happens, we will + // see two extra states in the last check made in the test, and thereby + // the test would fail. Waiting for the channel to become READY here + // ensures that the test does not flake because of this rare sequence of + // events. + for s := cc.GetState(); s != connectivity.Ready; s = cc.GetState() { + if !cc.WaitForStateChange(ctx, s) { + t.Fatal("Timeout when waiting for connectivity state to reach READY") + } + } + // Cache the state changes seen up to this point. states0 := wb.getStates() diff --git a/balancer/rls/helpers_test.go b/balancer/rls/helpers_test.go index 67ca18755ee9..fa2029b97ecf 100644 --- a/balancer/rls/helpers_test.go +++ b/balancer/rls/helpers_test.go @@ -21,7 +21,6 @@ package rls import ( "context" "strings" - "sync" "testing" "time" @@ -30,6 +29,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" @@ -104,18 +104,14 @@ func neverThrottlingThrottler() *fakeThrottler { } } -// oneTimeAllowingThrottler returns a fake throttler which does not throttle the -// first request, but throttles everything that comes after. This is useful for -// tests which need to set up a valid cache entry before testing other cases. -func oneTimeAllowingThrottler() *fakeThrottler { - var once sync.Once +// oneTimeAllowingThrottler returns a fake throttler which does not throttle +// requests until the client RPC succeeds, but throttles everything that comes +// after. This is useful for tests which need to set up a valid cache entry +// before testing other cases. +func oneTimeAllowingThrottler(firstRPCDone *grpcsync.Event) *fakeThrottler { return &fakeThrottler{ - throttleFunc: func() bool { - throttle := true - once.Do(func() { throttle = false }) - return throttle - }, - throttleCh: make(chan struct{}, 1), + throttleFunc: firstRPCDone.HasFired, + throttleCh: make(chan struct{}, 1), } } diff --git a/balancer/rls/picker_test.go b/balancer/rls/picker_test.go index 87c4af471a0a..887b855f87b1 100644 --- a/balancer/rls/picker_test.go +++ b/balancer/rls/picker_test.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/stubserver" rlstest "google.golang.org/grpc/internal/testutils/rls" "google.golang.org/grpc/metadata" @@ -185,10 +186,16 @@ func (s) TestPick_DataCacheMiss_PendingEntryExists(t *testing.T) { } defer cc.Close() - // Make an RPC and expect it to fail with deadline exceeded error. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + // Make an RPC that results in the RLS request being sent out. And + // since the RLS server is configured to block on the first request, + // this RPC will block until its context expires. This ensures that + // we have a pending cache entry for the duration of the test. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - makeTestRPCAndVerifyError(ctx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + go func() { + client := testgrpc.NewTestServiceClient(cc) + client.EmptyCall(ctx, &testpb.Empty{}) + }() // Make sure an RLS request is sent out. verifyRLSRequest(t, rlsReqCh, true) @@ -329,8 +336,9 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_StaleEntry(t *testing.T) { // Start an RLS server and setup the throttler appropriately. rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) var throttler *fakeThrottler + firstRPCDone := grpcsync.NewEvent() if test.throttled { - throttler = oneTimeAllowingThrottler() + throttler = oneTimeAllowingThrottler(firstRPCDone) overrideAdaptiveThrottler(t, throttler) } else { throttler = neverThrottlingThrottler() @@ -368,6 +376,7 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_StaleEntry(t *testing.T) { // Make sure an RLS request is sent out. verifyRLSRequest(t, rlsReqCh, true) + firstRPCDone.Fire() // The cache entry has a large maxAge, but a small stateAge. We keep // retrying until the cache entry becomes stale, in which case we expect a @@ -429,8 +438,9 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntry(t *testing.T) { // Start an RLS server and setup the throttler appropriately. rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) var throttler *fakeThrottler + firstRPCDone := grpcsync.NewEvent() if test.throttled { - throttler = oneTimeAllowingThrottler() + throttler = oneTimeAllowingThrottler(firstRPCDone) overrideAdaptiveThrottler(t, throttler) } else { throttler = neverThrottlingThrottler() @@ -475,6 +485,7 @@ func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntry(t *testing.T) { // Make sure an RLS request is sent out. verifyRLSRequest(t, rlsReqCh, true) + firstRPCDone.Fire() // Keep retrying the RPC until the cache entry expires. Expected behavior // is dependent on the scenario being tested. @@ -612,21 +623,26 @@ func (s) TestPick_DataCacheHit_PendingEntryExists_StaleEntry(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // A unary interceptor which does nothing on the first RPC, but - // blocks on subsequent RPCs on the fake RLS server until the test - // is done. Since we configure the LB policy with a really low value - // for stale age, this allows us to simulate the condition where the - // LB policy has a stale entry and a pending entry in the cache. + // A unary interceptor which simply calls the underlying handler + // until the first client RPC is done. We want one client RPC to + // succeed to ensure that a data cache entry is created. For + // subsequent client RPCs which result in RLS requests, this + // interceptor blocks until the test's context expires. And since we + // configure the RLS LB policy with a really low value for max age, + // this allows us to simulate the condition where the it has an + // expired entry and a pending entry in the cache. rlsReqCh := make(chan struct{}, 1) - i := 0 + firstRPCDone := grpcsync.NewEvent() interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - rlsReqCh <- struct{}{} - if i == 0 { - i++ - return handler(ctx, req) + select { + case rlsReqCh <- struct{}{}: + default: } - <-ctx.Done() - return nil, ctx.Err() + if firstRPCDone.HasFired() { + <-ctx.Done() + return nil, ctx.Err() + } + return handler(ctx, req) } // Start an RLS server and set the throttler to never throttle. @@ -669,6 +685,7 @@ func (s) TestPick_DataCacheHit_PendingEntryExists_StaleEntry(t *testing.T) { // Make sure an RLS request is sent out. verifyRLSRequest(t, rlsReqCh, true) + firstRPCDone.Fire() // The cache entry has a large maxAge, but a small stateAge. We keep // retrying until the cache entry becomes stale, in which case we expect a @@ -706,22 +723,26 @@ func (s) TestPick_DataCacheHit_PendingEntryExists_ExpiredEntry(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // A unary interceptor which does nothing on the first RPC, but - // blocks on subsequent RPCs on the fake RLS server until the test - // is done. And since we configure the LB policy with a really low - // value for max age, this allows us to simulate the condition where - // the LB policy has an expired entry and a pending entry in the - // cache. + // A unary interceptor which simply calls the underlying handler + // until the first client RPC is done. We want one client RPC to + // succeed to ensure that a data cache entry is created. For + // subsequent client RPCs which result in RLS requests, this + // interceptor blocks until the test's context expires. And since we + // configure the RLS LB policy with a really low value for max age, + // this allows us to simulate the condition where the it has an + // expired entry and a pending entry in the cache. rlsReqCh := make(chan struct{}, 1) - i := 0 + firstRPCDone := grpcsync.NewEvent() interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - rlsReqCh <- struct{}{} - if i == 0 { - i++ - return handler(ctx, req) + select { + case rlsReqCh <- struct{}{}: + default: } - <-ctx.Done() - return nil, ctx.Err() + if firstRPCDone.HasFired() { + <-ctx.Done() + return nil, ctx.Err() + } + return handler(ctx, req) } // Start an RLS server and set the throttler to never throttle. @@ -762,13 +783,18 @@ func (s) TestPick_DataCacheHit_PendingEntryExists_ExpiredEntry(t *testing.T) { // Make sure an RLS request is sent out. verifyRLSRequest(t, rlsReqCh, true) - - // At this point, we have a cache entry with a small maxAge, and the RLS - // server is configured to block on further RLS requests. As we retry the - // RPC, at some point the cache entry would expire and force us to send an - // RLS request. But this request would exceed the deadline since the - // server blocks. - makeTestRPCAndVerifyError(ctx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + firstRPCDone.Fire() + + // At this point, we have a cache entry with a small maxAge, and the + // RLS server is configured to block on further RLS requests. As we + // retry the RPC, at some point the cache entry would expire and + // force us to send an RLS request which would block on the server, + // giving us a pending cache entry for the duration of the test. + go func() { + for client := testgrpc.NewTestServiceClient(cc); ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + client.EmptyCall(ctx, &testpb.Empty{}) + } + }() verifyRLSRequest(t, rlsReqCh, true) // Another RPC at this point should find the pending entry and be queued. diff --git a/internal/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go index ae17801fe2f0..c1f7e75c3ec8 100644 --- a/internal/balancergroup/balancergroup.go +++ b/internal/balancergroup/balancergroup.go @@ -175,6 +175,9 @@ func (sbc *subBalancerWrapper) gracefulSwitch(builder balancer.Builder) { } func (sbc *subBalancerWrapper) stopBalancer() { + if sbc.balancer == nil { + return + } sbc.balancer.Close() sbc.balancer = nil } @@ -393,13 +396,15 @@ func (bg *BalancerGroup) Remove(id string) { if sbToRemove, ok := bg.idToBalancerConfig[id]; ok { if bg.outgoingStarted { bg.balancerCache.Add(id, sbToRemove, func() { - // After timeout, when sub-balancer is removed from cache, need - // to close the underlying sub-balancer, and remove all its - // subconns. + // A sub-balancer evicted from the timeout cache needs to closed + // and its subConns need to removed, unconditionally. There is a + // possibility that a sub-balancer might be removed (thereby + // moving it to the cache) around the same time that the + // balancergroup is closed, and by the time we get here the + // balancergroup might be closed. Check for `outgoingStarted == + // true` at that point can lead to a leaked sub-balancer. bg.outgoingMu.Lock() - if bg.outgoingStarted { - sbToRemove.stopBalancer() - } + sbToRemove.stopBalancer() bg.outgoingMu.Unlock() bg.cleanupSubConns(sbToRemove) }) From 4565dd70ae16baf38a102bf5f44ded22bc57b357 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 22 Dec 2022 08:31:38 -0800 Subject: [PATCH 724/998] ringhash: allow overriding max ringhash size via environment variable (#5884) --- internal/envconfig/envconfig.go | 20 +++++ internal/envconfig/envconfig_test.go | 74 +++++++++++++++++++ xds/internal/balancer/ringhash/config.go | 15 ++-- xds/internal/balancer/ringhash/config_test.go | 43 ++++++++++- 4 files changed, 140 insertions(+), 12 deletions(-) create mode 100644 internal/envconfig/envconfig_test.go diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index 7edd196bd3da..6e6b00ffc1a8 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -21,6 +21,7 @@ package envconfig import ( "os" + "strconv" "strings" ) @@ -36,4 +37,23 @@ var ( // AdvertiseCompressors is set if registered compressor should be advertised // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) ) + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/internal/envconfig/envconfig_test.go b/internal/envconfig/envconfig_test.go new file mode 100644 index 000000000000..13923e5bbe6a --- /dev/null +++ b/internal/envconfig/envconfig_test.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" + "testing" + + "google.golang.org/grpc/internal/grpctest" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestUint64FromEnv(t *testing.T) { + + var testCases = []struct { + name string + val string + def, min, max uint64 + want uint64 + }{ + { + name: "error parsing", + val: "asdf", def: 5, want: 5, + }, { + name: "unset", + val: "", def: 5, want: 5, + }, { + name: "too low", + val: "5", min: 10, want: 10, + }, { + name: "too high", + val: "5", max: 2, want: 2, + }, { + name: "in range", + val: "17391", def: 13000, min: 12000, max: 18000, want: 17391, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + const testVar = "testvar" + if tc.val == "" { + os.Unsetenv(testVar) + } else { + os.Setenv(testVar, tc.val) + } + if got := uint64FromEnv(testVar, tc.def, tc.min, tc.max); got != tc.want { + t.Errorf("uint64FromEnv(%q(=%q), %v, %v, %v) = %v; want %v", testVar, tc.val, tc.def, tc.min, tc.max, got, tc.want) + } + }) + } +} diff --git a/xds/internal/balancer/ringhash/config.go b/xds/internal/balancer/ringhash/config.go index 4278b0636c7d..4763120fa649 100644 --- a/xds/internal/balancer/ringhash/config.go +++ b/xds/internal/balancer/ringhash/config.go @@ -22,6 +22,7 @@ import ( "encoding/json" "fmt" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/serviceconfig" ) @@ -36,8 +37,6 @@ type LBConfig struct { const ( defaultMinSize = 1024 defaultMaxSize = 4096 - // TODO(apolcyn): make makeRingSizeCap configurable, with either a dial option or global setting - maxRingSizeCap = 4096 ) func parseConfig(c json.RawMessage) (*LBConfig, error) { @@ -51,14 +50,14 @@ func parseConfig(c json.RawMessage) (*LBConfig, error) { if cfg.MaxRingSize == 0 { cfg.MaxRingSize = defaultMaxSize } - if cfg.MinRingSize > maxRingSizeCap { - cfg.MinRingSize = maxRingSizeCap - } - if cfg.MaxRingSize > maxRingSizeCap { - cfg.MaxRingSize = maxRingSizeCap - } if cfg.MinRingSize > cfg.MaxRingSize { return nil, fmt.Errorf("min %v is greater than max %v", cfg.MinRingSize, cfg.MaxRingSize) } + if cfg.MinRingSize > envconfig.RingHashCap { + cfg.MinRingSize = envconfig.RingHashCap + } + if cfg.MaxRingSize > envconfig.RingHashCap { + cfg.MaxRingSize = envconfig.RingHashCap + } return &cfg, nil } diff --git a/xds/internal/balancer/ringhash/config_test.go b/xds/internal/balancer/ringhash/config_test.go index 175301981ef4..d8f9ed30bb68 100644 --- a/xds/internal/balancer/ringhash/config_test.go +++ b/xds/internal/balancer/ringhash/config_test.go @@ -22,14 +22,16 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/envconfig" ) func (s) TestParseConfig(t *testing.T) { tests := []struct { - name string - js string - want *LBConfig - wantErr bool + name string + js string + envConfigCap uint64 + want *LBConfig + wantErr bool }{ { name: "OK", @@ -52,9 +54,42 @@ func (s) TestParseConfig(t *testing.T) { want: nil, wantErr: true, }, + { + name: "min greater than max greater than global limit", + js: `{"minRingSize": 6000, "maxRingSize": 5000}`, + want: nil, + wantErr: true, + }, + { + name: "max greater than global limit", + js: `{"minRingSize": 1, "maxRingSize": 6000}`, + want: &LBConfig{MinRingSize: 1, MaxRingSize: 4096}, + }, + { + name: "min and max greater than global limit", + js: `{"minRingSize": 5000, "maxRingSize": 6000}`, + want: &LBConfig{MinRingSize: 4096, MaxRingSize: 4096}, + }, + { + name: "min and max less than raised global limit", + js: `{"minRingSize": 5000, "maxRingSize": 6000}`, + envConfigCap: 8000, + want: &LBConfig{MinRingSize: 5000, MaxRingSize: 6000}, + }, + { + name: "min and max greater than raised global limit", + js: `{"minRingSize": 10000, "maxRingSize": 10000}`, + envConfigCap: 8000, + want: &LBConfig{MinRingSize: 8000, MaxRingSize: 8000}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + if tt.envConfigCap != 0 { + old := envconfig.RingHashCap + defer func() { envconfig.RingHashCap = old }() + envconfig.RingHashCap = tt.envConfigCap + } got, err := parseConfig([]byte(tt.js)) if (err != nil) != tt.wantErr { t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) From 0e5421c1e5331f07dff52801ff24abe9ca188b2e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 22 Dec 2022 15:02:43 -0800 Subject: [PATCH 725/998] internal/envconfig: add convenience boolFromEnv to improve readability (#5887) --- internal/envconfig/envconfig.go | 19 ++++++++++------- internal/envconfig/envconfig_test.go | 31 +++++++++++++++++++++++++++- internal/envconfig/xds.go | 31 ++++++++++------------------ 3 files changed, 52 insertions(+), 29 deletions(-) diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index 6e6b00ffc1a8..5ba9d94d49c2 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -25,18 +25,12 @@ import ( "strings" ) -const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" - advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" -) - var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) // AdvertiseCompressors is set if registered compressor should be advertised // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds @@ -44,6 +38,15 @@ var ( RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) ) +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + func uint64FromEnv(envVar string, def, min, max uint64) uint64 { v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) if err != nil { diff --git a/internal/envconfig/envconfig_test.go b/internal/envconfig/envconfig_test.go index 13923e5bbe6a..68fdf6c73a7f 100644 --- a/internal/envconfig/envconfig_test.go +++ b/internal/envconfig/envconfig_test.go @@ -34,7 +34,6 @@ func Test(t *testing.T) { } func (s) TestUint64FromEnv(t *testing.T) { - var testCases = []struct { name string val string @@ -72,3 +71,33 @@ func (s) TestUint64FromEnv(t *testing.T) { }) } } + +func (s) TestBoolFromEnv(t *testing.T) { + var testCases = []struct { + val string + def bool + want bool + }{ + {val: "", def: true, want: true}, + {val: "", def: false, want: false}, + {val: "true", def: true, want: true}, + {val: "true", def: false, want: true}, + {val: "false", def: true, want: false}, + {val: "false", def: false, want: false}, + {val: "asdf", def: true, want: true}, + {val: "asdf", def: false, want: false}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + const testVar = "testvar" + if tc.val == "" { + os.Unsetenv(testVar) + } else { + os.Setenv(testVar, tc.val) + } + if got := boolFromEnv(testVar, tc.def); got != tc.want { + t.Errorf("boolFromEnv(%q(=%q), %v) = %v; want %v", testVar, tc.val, tc.def, got, tc.want) + } + }) + } +} diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index af09711a3e88..04136882c7bc 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -20,7 +20,6 @@ package envconfig import ( "os" - "strings" ) const ( @@ -36,16 +35,6 @@ const ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - - ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" - clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" - aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" - outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" - federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" - rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" - - c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) var ( @@ -64,38 +53,40 @@ var ( // XDSRingHash indicates whether ring hash support is enabled, which can be // disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) // XDSClientSideSecurity is used to control processing of security // configuration on the client-side. // // Note that there is no env var protection for the server-side because we // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. - XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) // XDSAggregateAndDNS indicates whether processing of aggregated cluster // and DNS cluster is enabled, which can be enabled by setting the // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") + XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) // XDSOutlierDetection indicates whether outlier detection support is // enabled, which can be disabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") - // XDSFederation indicates whether federation support is enabled. - XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) + // XDSFederation indicates whether federation support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false) // XDSRLS indicates whether processing of Cluster Specifier plugins and // support for the RLS CLuster Specifier is enabled, which can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to // "true". - XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") ) From c90744f16afe371e5a76d8022ebcacf69421b2d7 Mon Sep 17 00:00:00 2001 From: Theodore Salvo Date: Tue, 27 Dec 2022 22:06:47 -0500 Subject: [PATCH 726/998] oauth: mark `NewOauthAccess` as deprecated and update examples to use `TokenSource` (#5882) * Mark NewOauthAccess as deprecated & change examples * Fix composite literal uses unkeyed fields for v1.19 --- Documentation/grpc-auth-support.md | 2 +- credentials/oauth/oauth.go | 2 ++ examples/features/authentication/README.md | 6 +++--- examples/features/authentication/client/main.go | 4 ++-- examples/features/interceptor/client/main.go | 12 ++++++------ interop/client/client.go | 3 ++- 6 files changed, 16 insertions(+), 13 deletions(-) diff --git a/Documentation/grpc-auth-support.md b/Documentation/grpc-auth-support.md index 0a6b9f52c1cd..1362eeaa4ae2 100644 --- a/Documentation/grpc-auth-support.md +++ b/Documentation/grpc-auth-support.md @@ -53,7 +53,7 @@ Alternatively, a client may also use the `grpc.CallOption` on each invocation of an RPC. To create a `credentials.PerRPCCredentials`, use -[oauth.NewOauthAccess](https://godoc.org/google.golang.org/grpc/credentials/oauth#NewOauthAccess). +[oauth.TokenSource](https://godoc.org/google.golang.org/grpc/credentials/oauth#TokenSource). Note, the OAuth2 implementation of `grpc.PerRPCCredentials` requires a client to use [grpc.WithTransportCredentials](https://godoc.org/google.golang.org/grpc#WithTransportCredentials) to prevent any insecure transmission of tokens. diff --git a/credentials/oauth/oauth.go b/credentials/oauth/oauth.go index c748fd21ce2b..8eedfea2c226 100644 --- a/credentials/oauth/oauth.go +++ b/credentials/oauth/oauth.go @@ -121,6 +121,8 @@ type oauthAccess struct { } // NewOauthAccess constructs the PerRPCCredentials using a given token. +// +// Deprecated: use oauth.TokenSource instead. func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } diff --git a/examples/features/authentication/README.md b/examples/features/authentication/README.md index 0ba3f9469fc5..57028b8795d3 100644 --- a/examples/features/authentication/README.md +++ b/examples/features/authentication/README.md @@ -29,9 +29,9 @@ https://godoc.org/google.golang.org/grpc/credentials/oauth for details. #### Client -On client side, users should first get a valid oauth token, and then call -[`credentials.NewOauthAccess`](https://godoc.org/google.golang.org/grpc/credentials/oauth#NewOauthAccess) -to initialize a `credentials.PerRPCCredentials` with it. Next, if user wants to +On client side, users should first get a valid oauth token, and then initialize a +[`oauth.TokenSource`](https://godoc.org/google.golang.org/grpc/credentials/oauth#TokenSource) +which implements `credentials.PerRPCCredentials`. Next, if user wants to apply a single OAuth token for all RPC calls on the same connection, then configure grpc `Dial` with `DialOption` [`WithPerRPCCredentials`](https://godoc.org/google.golang.org/grpc#WithPerRPCCredentials). diff --git a/examples/features/authentication/client/main.go b/examples/features/authentication/client/main.go index ec46f2c52da4..a189b4be8cfa 100644 --- a/examples/features/authentication/client/main.go +++ b/examples/features/authentication/client/main.go @@ -50,7 +50,7 @@ func main() { flag.Parse() // Set up the credentials for the connection. - perRPC := oauth.NewOauthAccess(fetchToken()) + perRPC := oauth.TokenSource{TokenSource: oauth2.StaticTokenSource(fetchToken())} creds, err := credentials.NewClientTLSFromFile(data.Path("x509/ca_cert.pem"), "x.test.example.com") if err != nil { log.Fatalf("failed to load credentials: %v", err) @@ -61,7 +61,7 @@ func main() { // itself. // See: https://godoc.org/google.golang.org/grpc#PerRPCCredentials grpc.WithPerRPCCredentials(perRPC), - // oauth.NewOauthAccess requires the configuration of transport + // oauth.TokenSource requires the configuration of transport // credentials. grpc.WithTransportCredentials(creds), } diff --git a/examples/features/interceptor/client/main.go b/examples/features/interceptor/client/main.go index eba69b3c9887..0832e4861cdd 100644 --- a/examples/features/interceptor/client/main.go +++ b/examples/features/interceptor/client/main.go @@ -55,9 +55,9 @@ func unaryInterceptor(ctx context.Context, method string, req, reply interface{} } } if !credsConfigured { - opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{ - AccessToken: fallbackToken, - }))) + opts = append(opts, grpc.PerRPCCredentials(oauth.TokenSource{ + TokenSource: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: fallbackToken}), + })) } start := time.Now() err := invoker(ctx, method, req, reply, cc, opts...) @@ -97,9 +97,9 @@ func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.Clie } } if !credsConfigured { - opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{ - AccessToken: fallbackToken, - }))) + opts = append(opts, grpc.PerRPCCredentials(oauth.TokenSource{ + TokenSource: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: fallbackToken}), + })) } s, err := streamer(ctx, desc, cc, method, opts...) if err != nil { diff --git a/interop/client/client.go b/interop/client/client.go index 1e3a46a75745..194c37664788 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -28,6 +28,7 @@ import ( "strconv" "time" + "golang.org/x/oauth2" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" @@ -201,7 +202,7 @@ func main() { } opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) } else if *testCase == "oauth2_auth_token" { - opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewOauthAccess(interop.GetToken(*serviceAccountKeyFile, *oauthScope)))) + opts = append(opts, grpc.WithPerRPCCredentials(oauth.TokenSource{TokenSource: oauth2.StaticTokenSource(interop.GetToken(*serviceAccountKeyFile, *oauthScope))})) } } if len(*serviceConfigJSON) > 0 { From f1a9ef9c1b9dffea52f3f8d3b4be70953f08d785 Mon Sep 17 00:00:00 2001 From: Fu Wei Date: Thu, 29 Dec 2022 01:59:01 +0800 Subject: [PATCH 727/998] stream: update ServerStream.SendMsg doc (#5894) --- stream.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stream.go b/stream.go index 175aee9583ea..93231af2ac56 100644 --- a/stream.go +++ b/stream.go @@ -1483,6 +1483,9 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On From 12b8fb52a18c8a1667dde7a4f8087ecdd2abbeaf Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 28 Dec 2022 17:23:09 -0600 Subject: [PATCH 728/998] test: move e2e HTTP header tests to http_header_end2end_test.go (#5901) --- test/end2end_test.go | 180 --------------------- test/http_header_end2end_test.go | 260 +++++++++++++++++++++++++++++++ 2 files changed, 260 insertions(+), 180 deletions(-) create mode 100644 test/http_header_end2end_test.go diff --git a/test/end2end_test.go b/test/end2end_test.go index ae536520fc09..0f5cbc345774 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -6754,158 +6754,6 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { } } -func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) { - // Non-gRPC content-type fallback path. - for httpCode := range transport.HTTPStatusConvTab { - if err := doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ - ":status", fmt.Sprintf("%d", httpCode), - "content-type", "text/html", // non-gRPC content type to switch to HTTP mode. - "grpc-status", "1", // Make up a gRPC status error - "grpc-status-details-bin", "???", // Make up a gRPC field parsing error - }); err != nil { - t.Error(err) - } - } - - // Missing content-type fallback path. - for httpCode := range transport.HTTPStatusConvTab { - if err := doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ - ":status", fmt.Sprintf("%d", httpCode), - // Omitting content type to switch to HTTP mode. - "grpc-status", "1", // Make up a gRPC status error - "grpc-status-details-bin", "???", // Make up a gRPC field parsing error - }); err != nil { - t.Error(err) - } - } - - // Malformed HTTP status when fallback. - if err := doHTTPHeaderTest(t, codes.Internal, []string{ - ":status", "abc", - // Omitting content type to switch to HTTP mode. - "grpc-status", "1", // Make up a gRPC status error - "grpc-status-details-bin", "???", // Make up a gRPC field parsing error - }); err != nil { - t.Error(err) - } -} - -// Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame). -func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) { - for _, test := range []struct { - header []string - errCode codes.Code - }{ - { - // missing gRPC status. - header: []string{ - ":status", "403", - "content-type", "application/grpc", - }, - errCode: codes.PermissionDenied, - }, - { - // malformed grpc-status. - header: []string{ - ":status", "502", - "content-type", "application/grpc", - "grpc-status", "abc", - }, - errCode: codes.Internal, - }, - { - // Malformed grpc-tags-bin field. - header: []string{ - ":status", "502", - "content-type", "application/grpc", - "grpc-status", "0", - "grpc-tags-bin", "???", - }, - errCode: codes.Unavailable, - }, - { - // gRPC status error. - header: []string{ - ":status", "502", - "content-type", "application/grpc", - "grpc-status", "3", - }, - errCode: codes.Unavailable, - }, - } { - if err := doHTTPHeaderTest(t, test.errCode, test.header); err != nil { - t.Error(err) - } - } -} - -// Testing non-Trailers-only Trailers (delivered in second HEADERS frame) -func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { - tests := []struct { - name string - responseHeader []string - trailer []string - errCode codes.Code - }{ - { - name: "trailer missing grpc-status", - responseHeader: []string{ - ":status", "200", - "content-type", "application/grpc", - }, - trailer: []string{ - // trailer missing grpc-status - ":status", "502", - }, - errCode: codes.Unavailable, - }, - { - name: "malformed grpc-status-details-bin field with status 404", - responseHeader: []string{ - ":status", "404", - "content-type", "application/grpc", - }, - trailer: []string{ - // malformed grpc-status-details-bin field - "grpc-status", "0", - "grpc-status-details-bin", "????", - }, - errCode: codes.Unimplemented, - }, - { - name: "malformed grpc-status-details-bin field with status 200", - responseHeader: []string{ - ":status", "200", - "content-type", "application/grpc", - }, - trailer: []string{ - // malformed grpc-status-details-bin field - "grpc-status", "0", - "grpc-status-details-bin", "????", - }, - errCode: codes.Internal, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if err := doHTTPHeaderTest(t, test.errCode, test.responseHeader, test.trailer); err != nil { - t.Error(err) - } - }) - - } -} - -func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) { - header := []string{ - ":status", "200", - "content-type", "application/grpc", - } - if err := doHTTPHeaderTest(t, codes.Internal, header, header, header); err != nil { - t.Fatal(err) - } -} - type httpServerResponse struct { headers [][]string payload []byte @@ -7032,34 +6880,6 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) { }() } -func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string) error { - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - return fmt.Errorf("listening on %q: %v", "localhost:0", err) - } - defer lis.Close() - server := &httpServer{ - responses: []httpServerResponse{{trailers: headerFields}}, - } - server.start(t, lis) - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return fmt.Errorf("dial(%q): %v", lis.Addr().String(), err) - } - defer cc.Close() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - client := testpb.NewTestServiceClient(cc) - stream, err := client.FullDuplexCall(ctx) - if err != nil { - return fmt.Errorf("creating FullDuplex stream: %v", err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != errCode { - return fmt.Errorf("stream.Recv() = %v, want error code: %v", err, errCode) - } - return nil -} - func (s) TestClientCancellationPropagatesUnary(t *testing.T) { wg := &sync.WaitGroup{} called, done := make(chan struct{}), make(chan struct{}) diff --git a/test/http_header_end2end_test.go b/test/http_header_end2end_test.go new file mode 100644 index 000000000000..efdbd530afbc --- /dev/null +++ b/test/http_header_end2end_test.go @@ -0,0 +1,260 @@ +/* +* +* Copyright 2022 gRPC authors. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* + */ +package test + +import ( + "context" + "fmt" + "net" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) { + type test struct { + name string + header []string + errCode codes.Code + } + + var tests []test + + // Non-gRPC content-type fallback path. + for httpCode := range transport.HTTPStatusConvTab { + tests = append(tests, test{ + name: fmt.Sprintf("Non-gRPC content-type fallback path with httpCode: %v", httpCode), + header: []string{ + ":status", fmt.Sprintf("%d", httpCode), + "content-type", "text/html", // non-gRPC content type to switch to HTTP mode. + "grpc-status", "1", // Make up a gRPC status error + "grpc-status-details-bin", "???", // Make up a gRPC field parsing error + }, + errCode: transport.HTTPStatusConvTab[int(httpCode)], + }) + } + + // Missing content-type fallback path. + for httpCode := range transport.HTTPStatusConvTab { + tests = append(tests, test{ + name: fmt.Sprintf("Missing content-type fallback path with httpCode: %v", httpCode), + header: []string{ + ":status", fmt.Sprintf("%d", httpCode), + // Omitting content type to switch to HTTP mode. + "grpc-status", "1", // Make up a gRPC status error + "grpc-status-details-bin", "???", // Make up a gRPC field parsing error + }, + errCode: transport.HTTPStatusConvTab[int(httpCode)], + }) + } + + // Malformed HTTP status when fallback. + tests = append(tests, test{ + name: "Malformed HTTP status when fallback", + header: []string{ + ":status", "abc", + // Omitting content type to switch to HTTP mode. + "grpc-status", "1", // Make up a gRPC status error + "grpc-status-details-bin", "???", // Make up a gRPC field parsing error + }, + errCode: codes.Internal, + }) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + serverAddr, cleanup, err := startServer(t, test.header) + if err != nil { + t.Fatal(err) + } + defer cleanup() + if err := doHTTPHeaderTest(serverAddr, test.errCode); err != nil { + t.Error(err) + } + }) + } +} + +// Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame). +func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) { + for _, test := range []struct { + name string + header []string + errCode codes.Code + }{ + { + name: "missing gRPC status", + header: []string{ + ":status", "403", + "content-type", "application/grpc", + }, + errCode: codes.PermissionDenied, + }, + { + name: "malformed grpc-status", + header: []string{ + ":status", "502", + "content-type", "application/grpc", + "grpc-status", "abc", + }, + errCode: codes.Internal, + }, + { + name: "Malformed grpc-tags-bin field", + header: []string{ + ":status", "502", + "content-type", "application/grpc", + "grpc-status", "0", + "grpc-tags-bin", "???", + }, + errCode: codes.Unavailable, + }, + { + name: "gRPC status error", + header: []string{ + ":status", "502", + "content-type", "application/grpc", + "grpc-status", "3", + }, + errCode: codes.Unavailable, + }, + } { + t.Run(test.name, func(t *testing.T) { + serverAddr, cleanup, err := startServer(t, test.header) + if err != nil { + t.Fatal(err) + } + defer cleanup() + if err := doHTTPHeaderTest(serverAddr, test.errCode); err != nil { + t.Error(err) + } + }) + } +} + +// Testing non-Trailers-only Trailers (delivered in second HEADERS frame) +func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { + tests := []struct { + name string + responseHeader []string + trailer []string + errCode codes.Code + }{ + { + name: "trailer missing grpc-status", + responseHeader: []string{ + ":status", "200", + "content-type", "application/grpc", + }, + trailer: []string{ + // trailer missing grpc-status + ":status", "502", + }, + errCode: codes.Unavailable, + }, + { + name: "malformed grpc-status-details-bin field with status 404", + responseHeader: []string{ + ":status", "404", + "content-type", "application/grpc", + }, + trailer: []string{ + // malformed grpc-status-details-bin field + "grpc-status", "0", + "grpc-status-details-bin", "????", + }, + errCode: codes.Unimplemented, + }, + { + name: "malformed grpc-status-details-bin field with status 200", + responseHeader: []string{ + ":status", "200", + "content-type", "application/grpc", + }, + trailer: []string{ + // malformed grpc-status-details-bin field + "grpc-status", "0", + "grpc-status-details-bin", "????", + }, + errCode: codes.Internal, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + serverAddr, cleanup, err := startServer(t, test.responseHeader, test.trailer) + if err != nil { + t.Fatal(err) + } + defer cleanup() + if err := doHTTPHeaderTest(serverAddr, test.errCode); err != nil { + t.Error(err) + } + }) + + } +} + +func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) { + header := []string{ + ":status", "200", + "content-type", "application/grpc", + } + serverAddr, cleanup, err := startServer(t, header, header, header) + if err != nil { + t.Fatal(err) + } + defer cleanup() + if err := doHTTPHeaderTest(serverAddr, codes.Internal); err != nil { + t.Fatal(err) + } +} + +func startServer(t *testing.T, headerFields ...[]string) (serverAddr string, cleanup func(), err error) { + t.Helper() + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + return "", nil, fmt.Errorf("listening on %q: %v", "localhost:0", err) + } + server := &httpServer{responses: []httpServerResponse{{trailers: headerFields}}} + server.start(t, lis) + return lis.Addr().String(), func() { lis.Close() }, nil +} + +func doHTTPHeaderTest(lisAddr string, errCode codes.Code) error { + cc, err := grpc.Dial(lisAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return fmt.Errorf("dial(%q): %v", lisAddr, err) + } + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testpb.NewTestServiceClient(cc) + stream, err := client.FullDuplexCall(ctx) + if err != nil { + return fmt.Errorf("creating FullDuplex stream: %v", err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != errCode { + return fmt.Errorf("stream.Recv() = %v, want error code: %v", err, errCode) + } + return nil +} From 8ec85e4246eac45b3976cb440ae17367c3b39422 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 3 Jan 2023 11:12:51 -0800 Subject: [PATCH 729/998] priority: improve and reduce verbosity of logs (#5902) --- xds/internal/balancer/priority/balancer.go | 2 +- xds/internal/balancer/priority/balancer_child.go | 2 +- .../balancer/priority/balancer_priority.go | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index 17f57a576d33..28062c51ee93 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -105,7 +105,7 @@ type priorityBalancer struct { } func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + b.logger.Debugf("Received an update with balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) diff --git a/xds/internal/balancer/priority/balancer_child.go b/xds/internal/balancer/priority/balancer_child.go index ea7778bb56f8..7e8ccbd335e9 100644 --- a/xds/internal/balancer/priority/balancer_child.go +++ b/xds/internal/balancer/priority/balancer_child.go @@ -115,7 +115,7 @@ func (cb *childBalancer) sendUpdate() { BalancerConfig: cb.config, }) if err != nil { - cb.parent.logger.Warningf("failed to update ClientConn state for child %v: %v", cb.name, err) + cb.parent.logger.Warningf("Failed to update state for child policy %q: %v", cb.name, err) } } diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index 916f94ec0c02..4655bf418474 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -83,13 +83,13 @@ var ( // Caller must hold b.mu. func (b *priorityBalancer) syncPriority(childUpdating string) { if b.inhibitPickerUpdates { - b.logger.Infof("Skipping update from child with name %q", childUpdating) + b.logger.Debugf("Skipping update from child policy %q", childUpdating) return } for p, name := range b.priorities { child, ok := b.children[name] if !ok { - b.logger.Warningf("child with name %q is not found in children", name) + b.logger.Warningf("Priority name %q is not found in list of child policies", name) continue } @@ -99,12 +99,12 @@ func (b *priorityBalancer) syncPriority(childUpdating string) { (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { if b.childInUse != child.name || child.name == childUpdating { - b.logger.Warningf("childInUse, childUpdating: %q, %q", b.childInUse, child.name) + b.logger.Debugf("childInUse, childUpdating: %q, %q", b.childInUse, child.name) // If we switch children or the child in use just updated its // picker, push the child's picker to the parent. b.cc.UpdateState(child.state) } - b.logger.Infof("switching to (%q, %v) in syncPriority", child.name, p) + b.logger.Debugf("Switching to (%q, %v) in syncPriority", child.name, p) b.switchToChild(child, p) break } @@ -119,7 +119,7 @@ func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { name := b.priorities[i] child, ok := b.children[name] if !ok { - b.logger.Warningf("child with name %q is not found in children", name) + b.logger.Warningf("Priority name %q is not found in list of child policies", name) continue } child.stop() @@ -173,11 +173,11 @@ func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.S // necessary. child, ok := b.children[childName] if !ok { - b.logger.Warningf("priority: child balancer not found for child %v", childName) + b.logger.Warningf("Child policy not found for %q", childName) return } if !child.started { - b.logger.Warningf("priority: ignoring update from child %q which is not in started state: %+v", childName, s) + b.logger.Warningf("Ignoring update from child policy %q which is not in started state: %+v", childName, s) return } child.state = s From f2fbb0e07ebf3dd46aa641ee89c9f17e8083eaf6 Mon Sep 17 00:00:00 2001 From: Theodore Salvo Date: Tue, 3 Jan 2023 14:20:20 -0500 Subject: [PATCH 730/998] Deprecate use of `ioutil` package (#5906) Resolves https://github.com/grpc/grpc-go/issues/5897 --- authz/grpc_authz_end2end_test.go | 25 +++++++++---------- authz/grpc_authz_server_interceptors.go | 4 +-- authz/grpc_authz_server_interceptors_test.go | 9 +++---- balancer/rls/control_channel_test.go | 6 ++--- benchmark/benchmain/main.go | 3 +-- benchmark/stats/curve.go | 3 +-- binarylog/sink.go | 4 +-- credentials/oauth/oauth.go | 6 ++--- credentials/sts/sts.go | 9 ++++--- credentials/sts/sts_test.go | 14 +++++------ credentials/tls.go | 4 +-- .../tls/certprovider/pemfile/watcher.go | 8 +++--- .../tls/certprovider/pemfile/watcher_test.go | 17 ++++++------- credentials/tls/certprovider/store_test.go | 4 +-- credentials/xds/xds_client_test.go | 6 ++--- credentials/xds/xds_server_test.go | 4 +-- encoding/gzip/gzip.go | 5 ++-- .../features/encryption/mTLS/client/main.go | 4 +-- .../features/encryption/mTLS/server/main.go | 4 +-- examples/route_guide/server/server.go | 4 +-- gcp/observability/config.go | 3 +-- gcp/observability/observability_test.go | 3 +-- grpclog/loggerv2.go | 7 +++--- internal/credentials/spiffe_test.go | 6 ++--- internal/googlecloud/manufacturer_linux.go | 4 +-- internal/testutils/xds/bootstrap/bootstrap.go | 5 ++-- internal/testutils/xds/e2e/setup_certs.go | 17 ++++++------- interop/client/client.go | 4 +-- interop/test_utils.go | 3 +-- rpc_util.go | 7 +++--- .../advancedtls_integration_test.go | 17 ++++++------- security/advancedtls/crl.go | 4 +-- security/advancedtls/crl_test.go | 15 ++++++----- .../internal/testutils/testutils.go | 4 +-- xds/googledirectpath/utils.go | 4 +-- xds/internal/xdsclient/bootstrap/bootstrap.go | 4 +-- 36 files changed, 118 insertions(+), 132 deletions(-) diff --git a/authz/grpc_authz_end2end_test.go b/authz/grpc_authz_end2end_test.go index f27ca2c06e20..c7cf26e122a4 100644 --- a/authz/grpc_authz_end2end_test.go +++ b/authz/grpc_authz_end2end_test.go @@ -23,7 +23,6 @@ import ( "crypto/tls" "crypto/x509" "io" - "io/ioutil" "net" "os" "testing" @@ -434,9 +433,9 @@ func (s) TestAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection(t if err != nil { t.Fatalf("tls.LoadX509KeyPair(x509/server1_cert.pem, x509/server1_key.pem) failed: %v", err) } - ca, err := ioutil.ReadFile(testdata.Path("x509/client_ca_cert.pem")) + ca, err := os.ReadFile(testdata.Path("x509/client_ca_cert.pem")) if err != nil { - t.Fatalf("ioutil.ReadFile(x509/client_ca_cert.pem) failed: %v", err) + t.Fatalf("os.ReadFile(x509/client_ca_cert.pem) failed: %v", err) } certPool := x509.NewCertPool() if !certPool.AppendCertsFromPEM(ca) { @@ -464,9 +463,9 @@ func (s) TestAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection(t if err != nil { t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) } - ca, err = ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) + ca, err = os.ReadFile(testdata.Path("x509/server_ca_cert.pem")) if err != nil { - t.Fatalf("ioutil.ReadFile(x509/server_ca_cert.pem) failed: %v", err) + t.Fatalf("os.ReadFile(x509/server_ca_cert.pem) failed: %v", err) } roots := x509.NewCertPool() if !roots.AppendCertsFromPEM(ca) { @@ -602,8 +601,8 @@ func (s) TestFileWatcher_ValidPolicyRefresh(t *testing.T) { // Rewrite the file with a different valid authorization policy. valid2 := authzTests["AllowsRPCEmptyDenyMatchInAllow"] - if err := ioutil.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { - t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) + if err := os.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", file, err) } // Verifying authorization decision. @@ -649,8 +648,8 @@ func (s) TestFileWatcher_InvalidPolicySkipReload(t *testing.T) { } // Skips the invalid policy update, and continues to use the valid policy. - if err := ioutil.WriteFile(file, []byte("{}"), os.ModePerm); err != nil { - t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) + if err := os.WriteFile(file, []byte("{}"), os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", file, err) } // Wait 40 ms for background go routine to read updated files. @@ -700,8 +699,8 @@ func (s) TestFileWatcher_RecoversFromReloadFailure(t *testing.T) { } // Skips the invalid policy update, and continues to use the valid policy. - if err := ioutil.WriteFile(file, []byte("{}"), os.ModePerm); err != nil { - t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) + if err := os.WriteFile(file, []byte("{}"), os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", file, err) } // Wait 120 ms for background go routine to read updated files. @@ -715,8 +714,8 @@ func (s) TestFileWatcher_RecoversFromReloadFailure(t *testing.T) { // Rewrite the file with a different valid authorization policy. valid2 := authzTests["AllowsRPCEmptyDenyMatchInAllow"] - if err := ioutil.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { - t.Fatalf("ioutil.WriteFile(%q) failed: %v", file, err) + if err := os.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", file, err) } // Verifying authorization decision. diff --git a/authz/grpc_authz_server_interceptors.go b/authz/grpc_authz_server_interceptors.go index 1ac5e967030d..ab93af13f37e 100644 --- a/authz/grpc_authz_server_interceptors.go +++ b/authz/grpc_authz_server_interceptors.go @@ -20,7 +20,7 @@ import ( "bytes" "context" "fmt" - "io/ioutil" + "os" "sync/atomic" "time" "unsafe" @@ -140,7 +140,7 @@ func (i *FileWatcherInterceptor) run(ctx context.Context) { // constructor, if there is an error in reading the file or parsing the policy, the // previous internalInterceptors will not be replaced. func (i *FileWatcherInterceptor) updateInternalInterceptor() error { - policyContents, err := ioutil.ReadFile(i.policyFile) + policyContents, err := os.ReadFile(i.policyFile) if err != nil { return fmt.Errorf("policyFile(%s) read failed: %v", i.policyFile, err) } diff --git a/authz/grpc_authz_server_interceptors_test.go b/authz/grpc_authz_server_interceptors_test.go index f43f9807612e..ae74c896d960 100644 --- a/authz/grpc_authz_server_interceptors_test.go +++ b/authz/grpc_authz_server_interceptors_test.go @@ -20,7 +20,6 @@ package authz_test import ( "fmt" - "io/ioutil" "os" "path" "testing" @@ -34,15 +33,15 @@ func createTmpPolicyFile(t *testing.T, dirSuffix string, policy []byte) string { // Create a temp directory. Passing an empty string for the first argument // uses the system temp directory. - dir, err := ioutil.TempDir("", dirSuffix) + dir, err := os.MkdirTemp("", dirSuffix) if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) + t.Fatalf("os.MkdirTemp() failed: %v", err) } t.Logf("Using tmpdir: %s", dir) // Write policy into file. filename := path.Join(dir, "policy.json") - if err := ioutil.WriteFile(filename, policy, os.ModePerm); err != nil { - t.Fatalf("ioutil.WriteFile(%q) failed: %v", filename, err) + if err := os.WriteFile(filename, policy, os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", filename, err) } t.Logf("Wrote policy %s to file at %s", string(policy), filename) return filename diff --git a/balancer/rls/control_channel_test.go b/balancer/rls/control_channel_test.go index 86342f7d5b44..d401d4cb6de8 100644 --- a/balancer/rls/control_channel_test.go +++ b/balancer/rls/control_channel_test.go @@ -24,7 +24,7 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" + "os" "regexp" "testing" "time" @@ -215,9 +215,9 @@ func makeTLSCreds(t *testing.T, certPath, keyPath, rootsPath string) credentials if err != nil { t.Fatalf("tls.LoadX509KeyPair(%q, %q) failed: %v", certPath, keyPath, err) } - b, err := ioutil.ReadFile(testdata.Path(rootsPath)) + b, err := os.ReadFile(testdata.Path(rootsPath)) if err != nil { - t.Fatalf("ioutil.ReadFile(%q) failed: %v", rootsPath, err) + t.Fatalf("os.ReadFile(%q) failed: %v", rootsPath, err) } roots := x509.NewCertPool() if !roots.AppendCertsFromPEM(b) { diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index d9d1e6d6a8cf..cbdefac17e9a 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -46,7 +46,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" "net" "os" @@ -881,5 +880,5 @@ func (nopCompressor) Type() string { return compModeNop } // nopDecompressor is a decompressor that just copies data. type nopDecompressor struct{} -func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return ioutil.ReadAll(r) } +func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return io.ReadAll(r) } func (nopDecompressor) Type() string { return compModeNop } diff --git a/benchmark/stats/curve.go b/benchmark/stats/curve.go index 68821bcc2690..124183dac2ea 100644 --- a/benchmark/stats/curve.go +++ b/benchmark/stats/curve.go @@ -23,7 +23,6 @@ import ( "encoding/csv" "encoding/hex" "fmt" - "io/ioutil" "math" "math/rand" "os" @@ -81,7 +80,7 @@ func (pcr *payloadCurveRange) chooseRandom() int { // sha256file is a helper function that returns a hex string matching the // SHA-256 sum of the input file. func sha256file(file string) (string, error) { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) if err != nil { return "", err } diff --git a/binarylog/sink.go b/binarylog/sink.go index db79346a2917..4b6aa653f93c 100644 --- a/binarylog/sink.go +++ b/binarylog/sink.go @@ -24,7 +24,7 @@ package binarylog import ( "fmt" - "io/ioutil" + "os" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" iblog "google.golang.org/grpc/internal/binarylog" @@ -60,7 +60,7 @@ func NewTempFileSink() (Sink, error) { // Two other options to replace this function: // 1. take filename as input. // 2. export NewBufferedSink(). - tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") + tempFile, err := os.CreateTemp("/tmp", "grpcgo_binarylog_*.txt") if err != nil { return nil, fmt.Errorf("failed to create temp file: %v", err) } diff --git a/credentials/oauth/oauth.go b/credentials/oauth/oauth.go index 8eedfea2c226..d475cbc0894c 100644 --- a/credentials/oauth/oauth.go +++ b/credentials/oauth/oauth.go @@ -22,8 +22,8 @@ package oauth import ( "context" "fmt" - "io/ioutil" "net/url" + "os" "sync" "golang.org/x/oauth2" @@ -73,7 +73,7 @@ type jwtAccess struct { // NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) + jsonKey, err := os.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) } @@ -192,7 +192,7 @@ func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerR // NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file // of a Google Developers service account. func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) + jsonKey, err := os.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) } diff --git a/credentials/sts/sts.go b/credentials/sts/sts.go index 19ca7d0b3305..0110201a98f3 100644 --- a/credentials/sts/sts.go +++ b/credentials/sts/sts.go @@ -33,9 +33,10 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" + "os" "sync" "time" @@ -58,8 +59,8 @@ const ( var ( loadSystemCertPool = x509.SystemCertPool makeHTTPDoer = makeHTTPClient - readSubjectTokenFrom = ioutil.ReadFile - readActorTokenFrom = ioutil.ReadFile + readSubjectTokenFrom = os.ReadFile + readActorTokenFrom = os.ReadFile logger = grpclog.Component("credentials") ) @@ -306,7 +307,7 @@ func sendRequest(client httpDoer, req *http.Request) ([]byte, error) { // When the http.Client returns a non-nil error, it is the // responsibility of the caller to read the response body till an EOF is // encountered and to close it. - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) resp.Body.Close() if err != nil { return nil, err diff --git a/credentials/sts/sts_test.go b/credentials/sts/sts_test.go index 3a2f239dacae..70bfa8b046f3 100644 --- a/credentials/sts/sts_test.go +++ b/credentials/sts/sts_test.go @@ -25,7 +25,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httputil" "strings" @@ -123,7 +123,7 @@ func makeGoodResponse() *http.Response { TokenType: "Bearer", ExpiresIn: 3600, }) - respBody := ioutil.NopCloser(bytes.NewReader(respJSON)) + respBody := io.NopCloser(bytes.NewReader(respJSON)) return &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, @@ -330,7 +330,7 @@ func (s) TestGetRequestMetadataCacheExpiry(t *testing.T) { TokenType: "Bearer", ExpiresIn: expiresInSecs, }) - respBody := ioutil.NopCloser(bytes.NewReader(respJSON)) + respBody := io.NopCloser(bytes.NewReader(respJSON)) resp := &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, @@ -366,7 +366,7 @@ func (s) TestGetRequestMetadataBadResponses(t *testing.T) { response: &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, - Body: ioutil.NopCloser(strings.NewReader("not JSON")), + Body: io.NopCloser(strings.NewReader("not JSON")), }, }, { @@ -374,7 +374,7 @@ func (s) TestGetRequestMetadataBadResponses(t *testing.T) { response: &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, - Body: ioutil.NopCloser(strings.NewReader("{}")), + Body: io.NopCloser(strings.NewReader("{}")), }, }, } @@ -669,7 +669,7 @@ func (s) TestSendRequest(t *testing.T) { resp: &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, - Body: ioutil.NopCloser(errReader{}), + Body: io.NopCloser(errReader{}), }, wantErr: true, }, @@ -678,7 +678,7 @@ func (s) TestSendRequest(t *testing.T) { resp: &http.Response{ Status: "400 BadRequest", StatusCode: http.StatusBadRequest, - Body: ioutil.NopCloser(strings.NewReader("")), + Body: io.NopCloser(strings.NewReader("")), }, wantErr: true, }, diff --git a/credentials/tls.go b/credentials/tls.go index ce2bbc10a142..877b7cd21af7 100644 --- a/credentials/tls.go +++ b/credentials/tls.go @@ -23,9 +23,9 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/url" + "os" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return nil, err } diff --git a/credentials/tls/certprovider/pemfile/watcher.go b/credentials/tls/certprovider/pemfile/watcher.go index 3c62491f7be8..7ed5c53ba404 100644 --- a/credentials/tls/certprovider/pemfile/watcher.go +++ b/credentials/tls/certprovider/pemfile/watcher.go @@ -32,7 +32,7 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" + "os" "path/filepath" "time" @@ -154,12 +154,12 @@ func (w *watcher) updateIdentityDistributor() { return } - certFileContents, err := ioutil.ReadFile(w.opts.CertFile) + certFileContents, err := os.ReadFile(w.opts.CertFile) if err != nil { logger.Warningf("certFile (%s) read failed: %v", w.opts.CertFile, err) return } - keyFileContents, err := ioutil.ReadFile(w.opts.KeyFile) + keyFileContents, err := os.ReadFile(w.opts.KeyFile) if err != nil { logger.Warningf("keyFile (%s) read failed: %v", w.opts.KeyFile, err) return @@ -191,7 +191,7 @@ func (w *watcher) updateRootDistributor() { return } - rootFileContents, err := ioutil.ReadFile(w.opts.RootFile) + rootFileContents, err := os.ReadFile(w.opts.RootFile) if err != nil { logger.Warningf("rootFile (%s) read failed: %v", w.opts.RootFile, err) return diff --git a/credentials/tls/certprovider/pemfile/watcher_test.go b/credentials/tls/certprovider/pemfile/watcher_test.go index 6cc65bd50001..521f762d3a41 100644 --- a/credentials/tls/certprovider/pemfile/watcher_test.go +++ b/credentials/tls/certprovider/pemfile/watcher_test.go @@ -21,7 +21,6 @@ package pemfile import ( "context" "fmt" - "io/ioutil" "os" "path" "testing" @@ -161,12 +160,12 @@ func (wd *wrappedDistributor) Set(km *certprovider.KeyMaterial, err error) { func createTmpFile(t *testing.T, src, dst string) { t.Helper() - data, err := ioutil.ReadFile(src) + data, err := os.ReadFile(src) if err != nil { - t.Fatalf("ioutil.ReadFile(%q) failed: %v", src, err) + t.Fatalf("os.ReadFile(%q) failed: %v", src, err) } - if err := ioutil.WriteFile(dst, data, os.ModePerm); err != nil { - t.Fatalf("ioutil.WriteFile(%q) failed: %v", dst, err) + if err := os.WriteFile(dst, data, os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", dst, err) } t.Logf("Wrote file at: %s", dst) t.Logf("%s", string(data)) @@ -181,9 +180,9 @@ func createTmpDirWithFiles(t *testing.T, dirSuffix, certSrc, keySrc, rootSrc str // Create a temp directory. Passing an empty string for the first argument // uses the system temp directory. - dir, err := ioutil.TempDir("", dirSuffix) + dir, err := os.MkdirTemp("", dirSuffix) if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) + t.Fatalf("os.MkdirTemp() failed: %v", err) } t.Logf("Using tmpdir: %s", dir) @@ -323,9 +322,9 @@ func (s) TestProvider_UpdateSuccessWithSymlink(t *testing.T) { dir2 := createTmpDirWithFiles(t, "update_with_symlink2_*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/server_ca_cert.pem") // Create a symlink under a new tempdir, and make it point to dir1. - tmpdir, err := ioutil.TempDir("", "test_symlink_*") + tmpdir, err := os.MkdirTemp("", "test_symlink_*") if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) + t.Fatalf("os.MkdirTemp() failed: %v", err) } symLinkName := path.Join(tmpdir, "test_symlink") if err := os.Symlink(dir1, symLinkName); err != nil { diff --git a/credentials/tls/certprovider/store_test.go b/credentials/tls/certprovider/store_test.go index faeba8db4fe9..54384e8225ef 100644 --- a/credentials/tls/certprovider/store_test.go +++ b/credentials/tls/certprovider/store_test.go @@ -24,7 +24,7 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" + "os" "testing" "time" @@ -126,7 +126,7 @@ func loadKeyMaterials(t *testing.T, cert, key, ca string) *KeyMaterial { t.Fatalf("Failed to load keyPair: %v", err) } - pemData, err := ioutil.ReadFile(testdata.Path(ca)) + pemData, err := os.ReadFile(testdata.Path(ca)) if err != nil { t.Fatal(err) } diff --git a/credentials/xds/xds_client_test.go b/credentials/xds/xds_client_test.go index f4b86df060b2..456af3454842 100644 --- a/credentials/xds/xds_client_test.go +++ b/credentials/xds/xds_client_test.go @@ -24,8 +24,8 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" "net" + "os" "strings" "testing" "time" @@ -159,7 +159,7 @@ func testServerMutualTLSHandshake(rawConn net.Conn) handshakeResult { if err != nil { return handshakeResult{err: err} } - pemData, err := ioutil.ReadFile(testdata.Path("x509/client_ca_cert.pem")) + pemData, err := os.ReadFile(testdata.Path("x509/client_ca_cert.pem")) if err != nil { return handshakeResult{err: err} } @@ -204,7 +204,7 @@ func makeIdentityProvider(t *testing.T, certPath, keyPath string) certprovider.P // makeRootProvider creates a new instance of the fakeProvider returning the // root key material specified in the provider file paths. func makeRootProvider(t *testing.T, caPath string) *fakeProvider { - pemData, err := ioutil.ReadFile(testdata.Path(caPath)) + pemData, err := os.ReadFile(testdata.Path(caPath)) if err != nil { t.Fatal(err) } diff --git a/credentials/xds/xds_server_test.go b/credentials/xds/xds_server_test.go index 5c29ba38c286..bc32a04e69a1 100644 --- a/credentials/xds/xds_server_test.go +++ b/credentials/xds/xds_server_test.go @@ -24,8 +24,8 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" "net" + "os" "strings" "testing" "time" @@ -39,7 +39,7 @@ import ( func makeClientTLSConfig(t *testing.T, mTLS bool) *tls.Config { t.Helper() - pemData, err := ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) + pemData, err := os.ReadFile(testdata.Path("x509/server_ca_cert.pem")) if err != nil { t.Fatal(err) } diff --git a/encoding/gzip/gzip.go b/encoding/gzip/gzip.go index ca820bd47d44..a3bb173c24ac 100644 --- a/encoding/gzip/gzip.go +++ b/encoding/gzip/gzip.go @@ -30,7 +30,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "sync" "google.golang.org/grpc/encoding" @@ -42,7 +41,7 @@ const Name = "gzip" func init() { c := &compressor{} c.poolCompressor.New = func() interface{} { - return &writer{Writer: gzip.NewWriter(ioutil.Discard), pool: &c.poolCompressor} + return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} } encoding.RegisterCompressor(c) } @@ -63,7 +62,7 @@ func SetLevel(level int) error { } c := encoding.GetCompressor(Name).(*compressor) c.poolCompressor.New = func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } diff --git a/examples/features/encryption/mTLS/client/main.go b/examples/features/encryption/mTLS/client/main.go index 3b5b4f31c108..4bc1ef06defa 100644 --- a/examples/features/encryption/mTLS/client/main.go +++ b/examples/features/encryption/mTLS/client/main.go @@ -25,8 +25,8 @@ import ( "crypto/x509" "flag" "fmt" - "io/ioutil" "log" + "os" "time" "google.golang.org/grpc" @@ -57,7 +57,7 @@ func main() { ca := x509.NewCertPool() caFilePath := data.Path("x509/ca_cert.pem") - caBytes, err := ioutil.ReadFile(caFilePath) + caBytes, err := os.ReadFile(caFilePath) if err != nil { log.Fatalf("failed to read ca cert %q: %v", caFilePath, err) } diff --git a/examples/features/encryption/mTLS/server/main.go b/examples/features/encryption/mTLS/server/main.go index cdcc676b507b..edd6829dcb91 100644 --- a/examples/features/encryption/mTLS/server/main.go +++ b/examples/features/encryption/mTLS/server/main.go @@ -25,9 +25,9 @@ import ( "crypto/x509" "flag" "fmt" - "io/ioutil" "log" "net" + "os" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -56,7 +56,7 @@ func main() { ca := x509.NewCertPool() caFilePath := data.Path("x509/client_ca_cert.pem") - caBytes, err := ioutil.ReadFile(caFilePath) + caBytes, err := os.ReadFile(caFilePath) if err != nil { log.Fatalf("failed to read ca cert %q: %v", caFilePath, err) } diff --git a/examples/route_guide/server/server.go b/examples/route_guide/server/server.go index 7c09e533ee95..2a35a6f78aee 100644 --- a/examples/route_guide/server/server.go +++ b/examples/route_guide/server/server.go @@ -28,10 +28,10 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" "math" "net" + "os" "sync" "time" @@ -155,7 +155,7 @@ func (s *routeGuideServer) loadFeatures(filePath string) { var data []byte if filePath != "" { var err error - data, err = ioutil.ReadFile(filePath) + data, err = os.ReadFile(filePath) if err != nil { log.Fatalf("Failed to load default features: %v", err) } diff --git a/gcp/observability/config.go b/gcp/observability/config.go index 0b6067d8e770..b361bc367c01 100644 --- a/gcp/observability/config.go +++ b/gcp/observability/config.go @@ -23,7 +23,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "regexp" @@ -116,7 +115,7 @@ func parseObservabilityConfig() (*config, error) { if envconfig.ObservabilityConfig != "" { logger.Warning("Ignoring GRPC_GCP_OBSERVABILITY_CONFIG and using GRPC_GCP_OBSERVABILITY_CONFIG_FILE contents.") } - content, err := ioutil.ReadFile(f) // TODO: Switch to os.ReadFile once dropped support for go 1.15 + content, err := os.ReadFile(f) if err != nil { return nil, fmt.Errorf("error reading observability configuration file %q: %v", f, err) } diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index b4ef9774282a..e58a46452757 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -23,7 +23,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "sync" "testing" @@ -188,7 +187,7 @@ func (s) TestRefuseStartWithExcludeAndWildCardAll(t *testing.T) { // also sets the environment variable GRPC_CONFIG_OBSERVABILITY_JSON to point to // this created config. func createTmpConfigInFileSystem(rawJSON string) (func(), error) { - configJSONFile, err := ioutil.TempFile(os.TempDir(), "configJSON-") + configJSONFile, err := os.CreateTemp(os.TempDir(), "configJSON-") if err != nil { return nil, fmt.Errorf("cannot create file %v: %v", configJSONFile.Name(), err) } diff --git a/grpclog/loggerv2.go b/grpclog/loggerv2.go index b5560b47ec4b..5de66e40d365 100644 --- a/grpclog/loggerv2.go +++ b/grpclog/loggerv2.go @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "os" "strconv" @@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { diff --git a/internal/credentials/spiffe_test.go b/internal/credentials/spiffe_test.go index 599481ad0bf9..0011ed012bbd 100644 --- a/internal/credentials/spiffe_test.go +++ b/internal/credentials/spiffe_test.go @@ -22,8 +22,8 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" - "io/ioutil" "net/url" + "os" "testing" "google.golang.org/grpc/internal/grpctest" @@ -209,9 +209,9 @@ func (s) TestSPIFFEIDFromCert(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - data, err := ioutil.ReadFile(testdata.Path(tt.dataPath)) + data, err := os.ReadFile(testdata.Path(tt.dataPath)) if err != nil { - t.Fatalf("ioutil.ReadFile(%s) failed: %v", testdata.Path(tt.dataPath), err) + t.Fatalf("os.ReadFile(%s) failed: %v", testdata.Path(tt.dataPath), err) } block, _ := pem.Decode(data) if block == nil { diff --git a/internal/googlecloud/manufacturer_linux.go b/internal/googlecloud/manufacturer_linux.go index e53b8ffc837f..6e455fb0a822 100644 --- a/internal/googlecloud/manufacturer_linux.go +++ b/internal/googlecloud/manufacturer_linux.go @@ -18,10 +18,10 @@ package googlecloud -import "io/ioutil" +import "os" const linuxProductNameFile = "/sys/class/dmi/id/product_name" func manufacturer() ([]byte, error) { - return ioutil.ReadFile(linuxProductNameFile) + return os.ReadFile(linuxProductNameFile) } diff --git a/internal/testutils/xds/bootstrap/bootstrap.go b/internal/testutils/xds/bootstrap/bootstrap.go index ba7953bd05e2..a8c49612bd59 100644 --- a/internal/testutils/xds/bootstrap/bootstrap.go +++ b/internal/testutils/xds/bootstrap/bootstrap.go @@ -22,7 +22,6 @@ package bootstrap import ( "encoding/json" "fmt" - "io/ioutil" "os" "google.golang.org/grpc/grpclog" @@ -81,12 +80,12 @@ func CreateFile(opts Options) (func(), error) { if err != nil { return nil, err } - f, err := ioutil.TempFile("", "test_xds_bootstrap_*") + f, err := os.CreateTemp("", "test_xds_bootstrap_*") if err != nil { return nil, fmt.Errorf("failed to created bootstrap file: %v", err) } - if err := ioutil.WriteFile(f.Name(), bootstrapContents, 0644); err != nil { + if err := os.WriteFile(f.Name(), bootstrapContents, 0644); err != nil { return nil, fmt.Errorf("failed to created bootstrap file: %v", err) } logger.Infof("Created bootstrap file at %q with contents: %s\n", f.Name(), bootstrapContents) diff --git a/internal/testutils/xds/e2e/setup_certs.go b/internal/testutils/xds/e2e/setup_certs.go index 62ea51d04d7f..799e18564879 100644 --- a/internal/testutils/xds/e2e/setup_certs.go +++ b/internal/testutils/xds/e2e/setup_certs.go @@ -22,7 +22,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "os" "path" "testing" @@ -39,12 +38,12 @@ const ( ) func createTmpFile(src, dst string) error { - data, err := ioutil.ReadFile(src) + data, err := os.ReadFile(src) if err != nil { - return fmt.Errorf("ioutil.ReadFile(%q) failed: %v", src, err) + return fmt.Errorf("os.ReadFile(%q) failed: %v", src, err) } - if err := ioutil.WriteFile(dst, data, os.ModePerm); err != nil { - return fmt.Errorf("ioutil.WriteFile(%q) failed: %v", dst, err) + if err := os.WriteFile(dst, data, os.ModePerm); err != nil { + return fmt.Errorf("os.WriteFile(%q) failed: %v", dst, err) } return nil } @@ -56,9 +55,9 @@ func createTmpFile(src, dst string) error { func createTmpDirWithFiles(dirSuffix, certSrc, keySrc, rootSrc string) (string, error) { // Create a temp directory. Passing an empty string for the first argument // uses the system temp directory. - dir, err := ioutil.TempDir("", dirSuffix) + dir, err := os.MkdirTemp("", dirSuffix) if err != nil { - return "", fmt.Errorf("ioutil.TempDir() failed: %v", err) + return "", fmt.Errorf("os.MkdirTemp() failed: %v", err) } if err := createTmpFile(testdata.Path(certSrc), path.Join(dir, certFile)); err != nil { @@ -82,9 +81,9 @@ func CreateClientTLSCredentials(t *testing.T) credentials.TransportCredentials { if err != nil { t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) } - b, err := ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) + b, err := os.ReadFile(testdata.Path("x509/server_ca_cert.pem")) if err != nil { - t.Fatalf("ioutil.ReadFile(x509/server_ca_cert.pem) failed: %v", err) + t.Fatalf("os.ReadFile(x509/server_ca_cert.pem) failed: %v", err) } roots := x509.NewCertPool() if !roots.AppendCertsFromPEM(b) { diff --git a/interop/client/client.go b/interop/client/client.go index 194c37664788..980ed9942589 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -23,8 +23,8 @@ import ( "crypto/tls" "crypto/x509" "flag" - "io/ioutil" "net" + "os" "strconv" "time" @@ -154,7 +154,7 @@ func main() { if *caFile == "" { *caFile = testdata.Path("ca.pem") } - b, err := ioutil.ReadFile(*caFile) + b, err := os.ReadFile(*caFile) if err != nil { logger.Fatalf("Failed to read root certificate file %q: %v", *caFile, err) } diff --git a/interop/test_utils.go b/interop/test_utils.go index 50bd2010ffa4..6f6cde7d846c 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -24,7 +24,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "strings" "time" @@ -279,7 +278,7 @@ func DoComputeEngineCreds(tc testgrpc.TestServiceClient, serviceAccount, oauthSc } func getServiceAccountJSONKey(keyFile string) []byte { - jsonKey, err := ioutil.ReadFile(keyFile) + jsonKey, err := os.ReadFile(keyFile) if err != nil { logger.Fatalf("Failed to read the service account key file: %v", err) } diff --git a/rpc_util.go b/rpc_util.go index 934fc1aa015e..d5c6f70e7be7 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -25,7 +25,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "math" "strings" "sync" @@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -746,7 +745,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } diff --git a/security/advancedtls/advancedtls_integration_test.go b/security/advancedtls/advancedtls_integration_test.go index d547985dd74a..d5a620d14f96 100644 --- a/security/advancedtls/advancedtls_integration_test.go +++ b/security/advancedtls/advancedtls_integration_test.go @@ -23,7 +23,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "os" "sync" @@ -436,27 +435,27 @@ type tmpCredsFiles struct { func createTmpFiles() (*tmpCredsFiles, error) { tmpFiles := &tmpCredsFiles{} var err error - tmpFiles.clientCertTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.clientCertTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.clientKeyTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.clientKeyTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.clientTrustTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.clientTrustTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.serverCertTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.serverCertTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.serverKeyTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.serverKeyTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.serverTrustTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.serverTrustTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } @@ -496,11 +495,11 @@ func (tmpFiles *tmpCredsFiles) removeFiles() { } func copyFileContents(sourceFile, destinationFile string) error { - input, err := ioutil.ReadFile(sourceFile) + input, err := os.ReadFile(sourceFile) if err != nil { return err } - err = ioutil.WriteFile(destinationFile, input, 0644) + err = os.WriteFile(destinationFile, input, 0644) if err != nil { return err } diff --git a/security/advancedtls/crl.go b/security/advancedtls/crl.go index 4812435881ac..b1b2e30530fb 100644 --- a/security/advancedtls/crl.go +++ b/security/advancedtls/crl.go @@ -30,7 +30,7 @@ import ( "encoding/pem" "errors" "fmt" - "io/ioutil" + "os" "path/filepath" "strings" "time" @@ -434,7 +434,7 @@ func fetchCRL(rawIssuer []byte, cfg RevocationConfig) (*certificateListExt, erro return nil, fmt.Errorf("asn1.Unmarshal(Issuer) len(rest) = %v, err = %v", len(rest), err) } crlPath := fmt.Sprintf("%s.r%d", filepath.Join(cfg.RootDir, x509NameHash(r)), i) - crlBytes, err := ioutil.ReadFile(crlPath) + crlBytes, err := os.ReadFile(crlPath) if err != nil { // Break when we can't read a CRL file. grpclogLogger.Infof("readFile: %v", err) diff --git a/security/advancedtls/crl_test.go b/security/advancedtls/crl_test.go index 153074a28c5a..65ef3ca1b738 100644 --- a/security/advancedtls/crl_test.go +++ b/security/advancedtls/crl_test.go @@ -29,7 +29,6 @@ import ( "encoding/hex" "encoding/pem" "fmt" - "io/ioutil" "math/big" "net" "os" @@ -320,9 +319,9 @@ func makeChain(t *testing.T, name string) []*x509.Certificate { certChain := make([]*x509.Certificate, 0) - rest, err := ioutil.ReadFile(name) + rest, err := os.ReadFile(name) if err != nil { - t.Fatalf("ioutil.ReadFile(%v) failed %v", name, err) + t.Fatalf("os.ReadFile(%v) failed %v", name, err) } for len(rest) > 0 { var block *pem.Block @@ -338,7 +337,7 @@ func makeChain(t *testing.T, name string) []*x509.Certificate { } func loadCRL(t *testing.T, path string) *certificateListExt { - b, err := ioutil.ReadFile(path) + b, err := os.ReadFile(path) if err != nil { t.Fatalf("readFile(%v) failed err = %v", path, err) } @@ -682,9 +681,9 @@ func TestVerifyConnection(t *testing.T) { } }() - dir, err := ioutil.TempDir("", "crl_dir") + dir, err := os.MkdirTemp("", "crl_dir") if err != nil { - t.Fatalf("ioutil.TempDir failed err = %v", err) + t.Fatalf("os.MkdirTemp failed err = %v", err) } defer os.RemoveAll(dir) @@ -693,9 +692,9 @@ func TestVerifyConnection(t *testing.T) { t.Fatalf("templ.CreateCRL failed err = %v", err) } - err = ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.r0", x509NameHash(cert.Subject.ToRDNSequence()))), crl, 0777) + err = os.WriteFile(path.Join(dir, fmt.Sprintf("%s.r0", x509NameHash(cert.Subject.ToRDNSequence()))), crl, 0777) if err != nil { - t.Fatalf("ioutil.WriteFile failed err = %v", err) + t.Fatalf("os.WriteFile failed err = %v", err) } cp := x509.NewCertPool() diff --git a/security/advancedtls/internal/testutils/testutils.go b/security/advancedtls/internal/testutils/testutils.go index a2c048882b7a..1bc0dc3bf4e2 100644 --- a/security/advancedtls/internal/testutils/testutils.go +++ b/security/advancedtls/internal/testutils/testutils.go @@ -22,7 +22,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" "google.golang.org/grpc/security/advancedtls/testdata" ) @@ -58,7 +58,7 @@ type CertStore struct { } func readTrustCert(fileName string) (*x509.CertPool, error) { - trustData, err := ioutil.ReadFile(fileName) + trustData, err := os.ReadFile(fileName) if err != nil { return nil, err } diff --git a/xds/googledirectpath/utils.go b/xds/googledirectpath/utils.go index 600441979785..de33cf48d0e5 100644 --- a/xds/googledirectpath/utils.go +++ b/xds/googledirectpath/utils.go @@ -21,7 +21,7 @@ package googledirectpath import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "sync" @@ -47,7 +47,7 @@ func getFromMetadata(timeout time.Duration, urlStr string) ([]byte, error) { if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("metadata server returned resp with non-OK: %v", resp) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("failed reading from metadata server: %v", err) } diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 97fe4a8b0792..d0caa0f77603 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -24,8 +24,8 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" "net/url" + "os" "strings" "github.com/golang/protobuf/jsonpb" @@ -64,7 +64,7 @@ func init() { var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version) // For overriding in unit tests. -var bootstrapFileReadFunc = ioutil.ReadFile +var bootstrapFileReadFunc = os.ReadFile // insecureCredsBuilder implements the `Credentials` interface defined in // package `xds/bootstrap` and encapsulates an insecure credential. From 3b2da532bc28340ce282c34caf8be0581dceeaa4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 10 Jan 2023 15:46:57 -0800 Subject: [PATCH 731/998] xdsclient: handle resource not found errors correctly (#5912) --- xds/internal/xdsclient/authority.go | 29 ++++- xds/internal/xdsclient/clientimpl_watchers.go | 8 +- .../xdsclient/e2e_test/cds_watchers_test.go | 97 ++++++++++++++++- .../xdsclient/e2e_test/eds_watchers_test.go | 2 +- .../xdsclient/e2e_test/lds_watchers_test.go | 100 +++++++++++++++++- .../xdsclient/e2e_test/rds_watchers_test.go | 2 +- .../e2e_test/resource_update_test.go | 24 ++--- 7 files changed, 239 insertions(+), 23 deletions(-) diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 7a533e662207..3d4f99e88d5c 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -200,6 +200,24 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty return } for name, state := range resourceStates { + if state.cache == nil { + // If the resource state does not contain a cached update, which can + // happen when: + // - resource was newly requested but has not yet been received, or, + // - resource was removed as part of a previous update, + // we don't want to generate an error for the watchers. + // + // For the first of the above two conditions, this ADS response may + // be in reaction to an earlier request that did not yet request the + // new resource, so its absence from the response does not + // necessarily indicate that the resource does not exist. For that + // case, we rely on the request timeout instead. + // + // For the second of the above two conditions, we already generated + // an error when we received the first response which removed this + // resource. So, there is no need to generate another one. + continue + } if _, ok := updates[name]; !ok { // The metadata status is set to "ServiceStatusNotExist" if a // previous update deleted this resource, in which case we do not @@ -338,7 +356,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w wState: watchStateStarted, } state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { - a.handleWatchTimerExpiry(state, fmt.Errorf("watch for resource %q of type %s timed out", resourceName, rType.TypeEnum().String())) + a.handleWatchTimerExpiry(rType, resourceName, state) }) resources[resourceName] = state a.sendDiscoveryRequestLocked(rType, resources) @@ -376,7 +394,8 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w } } -func (a *authority) handleWatchTimerExpiry(state *resourceState, err error) { +func (a *authority) handleWatchTimerExpiry(rType xdsresource.Type, resourceName string, state *resourceState) { + a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeEnum().String()) a.resourcesMu.Lock() defer a.resourcesMu.Unlock() @@ -385,9 +404,13 @@ func (a *authority) handleWatchTimerExpiry(state *resourceState, err error) { } state.wState = watchStateTimeout + // With the watch timer firing, it is safe to assume that the resource does + // not exist on the management server. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - a.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) } } diff --git a/xds/internal/xdsclient/clientimpl_watchers.go b/xds/internal/xdsclient/clientimpl_watchers.go index 05eea38f4376..77c4a614a228 100644 --- a/xds/internal/xdsclient/clientimpl_watchers.go +++ b/xds/internal/xdsclient/clientimpl_watchers.go @@ -42,7 +42,7 @@ func (l *listenerWatcher) OnError(err error) { } func (l *listenerWatcher) OnResourceDoesNotExist() { - err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Resource name %q of type Listener not found in received response", l.resourceName) + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Listener not found in received response", l.resourceName) l.cb(xdsresource.ListenerUpdate{}, err) } @@ -74,7 +74,7 @@ func (r *routeConfigWatcher) OnError(err error) { } func (r *routeConfigWatcher) OnResourceDoesNotExist() { - err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Resource name %q of type RouteConfiguration not found in received response", r.resourceName) + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type RouteConfiguration not found in received response", r.resourceName) r.cb(xdsresource.RouteConfigUpdate{}, err) } @@ -106,7 +106,7 @@ func (c *clusterWatcher) OnError(err error) { } func (c *clusterWatcher) OnResourceDoesNotExist() { - err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Resource name %q of type Cluster not found in received response", c.resourceName) + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", c.resourceName) c.cb(xdsresource.ClusterUpdate{}, err) } @@ -141,7 +141,7 @@ func (c *endpointsWatcher) OnError(err error) { } func (c *endpointsWatcher) OnResourceDoesNotExist() { - err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Resource name %q of type Endpoints not found in received response", c.resourceName) + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Endpoints not found in received response", c.resourceName) c.cb(xdsresource.EndpointsUpdate{}, err) } diff --git a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go index 8a95c087dd29..0a7655048bf6 100644 --- a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go @@ -559,7 +559,7 @@ func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Verify that an empty update with the expected error is received. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - wantErr := fmt.Errorf("watch for resource %q of type Cluster timed out", cdsName) + wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "") if err := verifyClusterUpdate(ctx, updateCh, xdsresource.ClusterUpdateErrTuple{Err: wantErr}); err != nil { t.Fatal(err) } @@ -866,3 +866,98 @@ func (s) TestCDSWatch_PartialValid(t *testing.T) { t.Fatal(err) } } + +// TestCDSWatch_PartialResponse covers the case where a response from the +// management server does not contain all requested resources. CDS responses are +// supposed to contain all requested resources, and the absence of one usually +// indicates that the management server does not know about it. In cases where +// the server has never responded with this resource before, the xDS client is +// expected to wait for the watch timeout to expire before concluding that the +// resource does not exist on the server +func (s) TestCDSWatch_PartialResponse(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for two cluster resources and have the + // callbacks push the received updates on to a channel. + resourceName1 := cdsName + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(resourceName1, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + resourceName2 := cdsNameNewStyle + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(resourceName2, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Configure the management server to return only one of the two cluster + // resources, corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for first watcher. + wantUpdate1 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName1, + EDSServiceName: edsName, + }, + } + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate1); err != nil { + t.Fatal(err) + } + + // Verify that the second watcher does not get an update with an error. + if err := verifyNoClusterUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Configure the management server to return two cluster resources, + // corresponding to the registered watches. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone), + }, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the second watcher. + wantUpdate2 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName2, + EDSServiceName: edsNameNewStyle, + }, + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate2); err != nil { + t.Fatal(err) + } + + // Verify that the first watcher gets no update, as the first resource did + // not change. + if err := verifyNoClusterUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go index d8a515233438..7daed4bc964a 100644 --- a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go @@ -616,7 +616,7 @@ func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Verify that an empty update with the expected error is received. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - wantErr := fmt.Errorf("watch for resource %q of type ClusterLoadAssignment timed out", rdsName) + wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "") if err := verifyEndpointsUpdate(ctx, updateCh, xdsresource.EndpointsUpdateErrTuple{Err: wantErr}); err != nil { t.Fatal(err) } diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go index 9510c33ad68e..b4f0310eb538 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -608,7 +608,7 @@ func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Verify that an empty update with the expected error is received. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - wantErr := fmt.Errorf("watch for resource %q of type Listener timed out", ldsName) + wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "") if err := verifyListenerUpdate(ctx, updateCh, xdsresource.ListenerUpdateErrTuple{Err: wantErr}); err != nil { t.Fatal(err) } @@ -916,3 +916,101 @@ func (s) TestLDSWatch_PartialValid(t *testing.T) { t.Fatal(err) } } + +// TestLDSWatch_PartialResponse covers the case where a response from the +// management server does not contain all requested resources. LDS responses are +// supposed to contain all requested resources, and the absence of one usually +// indicates that the management server does not know about it. In cases where +// the server has never responded with this resource before, the xDS client is +// expected to wait for the watch timeout to expire before concluding that the +// resource does not exist on the server +func (s) TestLDSWatch_PartialResponse(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer client.Close() + + // Register two watches for two listener resources and have the + // callbacks push the received updates on to a channel. + resourceName1 := ldsName + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(resourceName1, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + + resourceName2 := ldsNameNewStyle + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(resourceName2, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Configure the management server to return only one of the two listener + // resources, corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + e2e.DefaultClientListener(resourceName1, rdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for first watcher. + wantUpdate1 := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate1); err != nil { + t.Fatal(err) + } + + // Verify that the second watcher does not get an update with an error. + if err := verifyNoListenerUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Configure the management server to return two listener resources, + // corresponding to the registered watches. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + e2e.DefaultClientListener(resourceName1, rdsName), + e2e.DefaultClientListener(resourceName2, rdsName), + }, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the second watcher. + wantUpdate2 := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate2); err != nil { + t.Fatal(err) + } + + // Verify that the first watcher gets no update, as the first resource did + // not change. + if err := verifyNoListenerUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go index e8f38d41922c..f2c21a8c77d8 100644 --- a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go @@ -649,7 +649,7 @@ func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Verify that an empty update with the expected error is received. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - wantErr := fmt.Errorf("watch for resource %q of type RouteConfiguration timed out", rdsName) + wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "") if err := verifyRouteConfigUpdate(ctx, updateCh, xdsresource.RouteConfigUpdateErrTuple{Err: wantErr}); err != nil { t.Fatal(err) } diff --git a/xds/internal/xdsclient/e2e_test/resource_update_test.go b/xds/internal/xdsclient/e2e_test/resource_update_test.go index 7294b40f93cd..9c1039315971 100644 --- a/xds/internal/xdsclient/e2e_test/resource_update_test.go +++ b/xds/internal/xdsclient/e2e_test/resource_update_test.go @@ -387,9 +387,9 @@ func (s) TestHandleRouteConfigResponseFromManagementServer(t *testing.T) { Value: []byte{1, 2, 3, 4}, }}, }, - wantErr: fmt.Sprintf("watch for resource %q of type RouteConfigResource timed out", resourceName1), + wantErr: "RouteConfiguration not found in received response", wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ - "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, }, }, { @@ -399,9 +399,9 @@ func (s) TestHandleRouteConfigResponseFromManagementServer(t *testing.T) { TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", VersionInfo: "1", }, - wantErr: fmt.Sprintf("watch for resource %q of type RouteConfigResource timed out", resourceName1), + wantErr: "RouteConfiguration not found in received response", wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ - "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, }, }, { @@ -412,9 +412,9 @@ func (s) TestHandleRouteConfigResponseFromManagementServer(t *testing.T) { VersionInfo: "1", Resources: []*anypb.Any{testutils.MarshalAny(&v3clusterpb.Cluster{})}, }, - wantErr: fmt.Sprintf("watch for resource %q of type RouteConfigResource timed out", resourceName1), + wantErr: "RouteConfiguration not found in received response", wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ - "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, }, }, { @@ -926,9 +926,9 @@ func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { Value: []byte{1, 2, 3, 4}, }}, }, - wantErr: fmt.Sprintf("watch for resource %q of type EndpointsResource timed out", resourceName1), + wantErr: "Endpoints not found in received response", wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ - "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, }, }, { @@ -938,9 +938,9 @@ func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", VersionInfo: "1", }, - wantErr: fmt.Sprintf("watch for resource %q of type EndpointsResource timed out", resourceName1), + wantErr: "Endpoints not found in received response", wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ - "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, }, }, { @@ -951,9 +951,9 @@ func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { VersionInfo: "1", Resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{})}, }, - wantErr: fmt.Sprintf("watch for resource %q of type EndpointsResource timed out", resourceName1), + wantErr: "Endpoints not found in received response", wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ - "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, }, }, { From 9b73c42daa31adf946146fcb85eb7b96a1e5ee03 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 10 Jan 2023 16:31:19 -0800 Subject: [PATCH 732/998] test/xds: add tests for scenarios where authority in resource name is not specified in bootstrap config (#5890) Fixes https://github.com/grpc/grpc-go/issues/5429 --- test/xds/xds_client_federation_test.go | 134 ++++++++++++++++++++++++ test/xds/xds_client_integration_test.go | 1 + 2 files changed, 135 insertions(+) diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go index 13566a5db7dc..b6d99de34ea8 100644 --- a/test/xds/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -21,16 +21,19 @@ package xds_test import ( "context" "fmt" + "strings" "testing" "github.com/google/uuid" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" @@ -140,3 +143,134 @@ func (s) TestClientSideFederation(t *testing.T) { t.Fatalf("rpc EmptyCall() failed: %v", err) } } + +// TestFederation_UnknownAuthorityInDialTarget tests the case where a ClientConn +// is created with a dial target containing an authority which is not specified +// in the bootstrap configuration. The test verifies that RPCs on the ClientConn +// fail with an appropriate error. +func (s) TestFederation_UnknownAuthorityInDialTarget(t *testing.T) { + oldXDSFederation := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldXDSFederation }() + + // Setting up the management server is not *really* required for this test + // case. All we need is a bootstrap configuration which does not contain the + // authority mentioned in the dial target. But setting up the management + // server and actually making an RPC ensures that the xDS client is + // configured properly, and when we dial with an unknown authority in the + // next step, we can be sure that the error we receive is legitimate. + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + port, cleanup2 := startTestService(t, nil) + defer cleanup2() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + target := fmt.Sprintf("xds:///%s", serviceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("Dialing target %q: %v", target, err) + } + defer cc.Close() + t.Log("Created ClientConn to test service") + + client := testpb.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() RPC: %v", err) + } + t.Log("Successfully performed an EmptyCall RPC") + + target = fmt.Sprintf("xds://unknown-authority/%s", serviceName) + t.Logf("Dialing target %q with unknown authority which is expected to fail", target) + const wantErr = `authority "unknown-authority" is not found in the bootstrap file` + _, err = grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("grpc.Dial(%q) returned %v, want: %s", target, err, wantErr) + } +} + +// TestFederation_UnknownAuthorityInReceivedResponse tests the case where the +// LDS resource associated with the dial target contains an RDS resource name +// with an authority which is not specified in the bootstrap configuration. The +// test verifies that RPCs fail with an appropriate error. +func (s) TestFederation_UnknownAuthorityInReceivedResponse(t *testing.T) { + oldXDSFederation := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldXDSFederation }() + + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + nodeID := uuid.New().String() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + Version: bootstrap.TransportV3, + NodeID: nodeID, + ServerURI: mgmtServer.Address, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + }) + if err != nil { + t.Fatal(err) + } + + resolverBuilder := internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error)) + resolver, err := resolverBuilder(bootstrapContents) + if err != nil { + t.Fatalf("Creating xDS resolver for testing: %v", err) + } + + // LDS is old style name. + // RDS is new style, with an unknown authority. + const serviceName = "my-service-client-side-xds" + const unknownAuthority = "unknown-authority" + ldsName := serviceName + rdsName := fmt.Sprintf("xdstp://%s/envoy.config.route.v3.RouteConfiguration/%s", unknownAuthority, "route-"+serviceName) + + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "cluster-"+serviceName)}, + SkipValidation: true, // This update has only LDS and RDS resources. + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + target := fmt.Sprintf("xds:///%s", serviceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("Dialing target %q: %v", target, err) + } + defer cc.Close() + t.Log("Created ClientConn to test service") + + client := testpb.NewTestServiceClient(cc) + _, err = client.EmptyCall(ctx, &testpb.Empty{}) + if err == nil { + t.Fatal("EmptyCall RPC succeeded for target with unknown authority when expected to fail") + } + if got, want := status.Code(err), codes.Unavailable; got != want { + t.Fatalf("EmptyCall RPC returned status code: %v, want %v", got, want) + } + if wantErr := `failed to find authority "unknown-authority"`; !strings.Contains(err.Error(), wantErr) { + t.Fatalf("EmptyCall RPC returned error: %v, want %v", err, wantErr) + } +} diff --git a/test/xds/xds_client_integration_test.go b/test/xds/xds_client_integration_test.go index 492972b7cc51..4ffdac86ece1 100644 --- a/test/xds/xds_client_integration_test.go +++ b/test/xds/xds_client_integration_test.go @@ -70,6 +70,7 @@ func startTestService(t *testing.T, server *stubserver.StubServer) (uint32, func if err != nil { t.Fatalf("invalid serving port for stub server: %v", err) } + t.Logf("Started test service backend at %q", server.Address) return uint32(port), server.Stop } From 974a5ef80498e9bcd5f3043d610823a072bfd018 Mon Sep 17 00:00:00 2001 From: Simon Kotwicz Date: Wed, 11 Jan 2023 11:07:56 -0800 Subject: [PATCH 733/998] grpc: document defaults in MaxCallMsgSize functions (#5916) --- rpc_util.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/rpc_util.go b/rpc_util.go index d5c6f70e7be7..c9438c064f3f 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -296,7 +296,8 @@ func (o FailFastCallOption) before(c *callInfo) error { func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } @@ -319,7 +320,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } From 42b7b6331c38f2ce533a589cb1ef789cad97abd9 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 11 Jan 2023 14:21:24 -0500 Subject: [PATCH 734/998] stats/opencensus: OpenCensus instrumentation api (#5919) --- stats/opencensus/go.mod | 20 + stats/opencensus/go.sum | 967 +++++++++++++++++++++++++++++++++ stats/opencensus/opencensus.go | 129 +++++ 3 files changed, 1116 insertions(+) create mode 100644 stats/opencensus/go.mod create mode 100644 stats/opencensus/go.sum create mode 100644 stats/opencensus/opencensus.go diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod new file mode 100644 index 000000000000..f7886ec6f5b5 --- /dev/null +++ b/stats/opencensus/go.mod @@ -0,0 +1,20 @@ +module google.golang.org/grpc/stats/opencensus + +go 1.17 + +require ( + go.opencensus.io v0.24.0 + google.golang.org/grpc v1.51.0 +) + +require ( + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/protobuf v1.5.2 // indirect + golang.org/x/net v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + google.golang.org/protobuf v1.28.1 // indirect +) + +replace google.golang.org/grpc => ../../ diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum new file mode 100644 index 000000000000..4f1cc2c21f54 --- /dev/null +++ b/stats/opencensus/go.sum @@ -0,0 +1,967 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go new file mode 100644 index 000000000000..ea9368ebbfee --- /dev/null +++ b/stats/opencensus/opencensus.go @@ -0,0 +1,129 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package opencensus implements opencensus instrumentation code for gRPC-Go +// clients and servers. +package opencensus + +import ( + "context" + + "go.opencensus.io/trace" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" +) + +var ( + joinDialOptions = internal.JoinDialOptions.(func(...grpc.DialOption) grpc.DialOption) +) + +// TraceOptions are the tracing options for opencensus instrumentation. +type TraceOptions struct { + // TS is the Sampler used for tracing. + TS trace.Sampler + // DisableTrace determines whether traces are disabled for an OpenCensus + // Dial or Server option. will overwrite any global option setting. + DisableTrace bool +} + +// DialOption returns a dial option which enables OpenCensus instrumentation +// code for a grpc.ClientConn. +// +// Client applications interested in instrumenting their grpc.ClientConn should +// pass the dial option returned from this function as the first dial option to +// grpc.Dial(). +// +// Using this option will always lead to instrumentation, however in order to +// use the data an exporter must be registered with the OpenCensus trace package +// for traces and the OpenCensus view package for metrics. Client side has +// retries, so a Unary and Streaming Interceptor are registered to handle per +// RPC traces/metrics, and a Stats Handler is registered to handle per RPC +// attempt trace/metrics. These three components registered work together in +// conjunction, and do not work standalone. It is not supported to use this +// alongside another stats handler dial option. +func DialOption(to TraceOptions) grpc.DialOption { + return joinDialOptions(grpc.WithChainUnaryInterceptor(unaryInterceptor), grpc.WithChainStreamInterceptor(streamInterceptor), grpc.WithStatsHandler(&clientStatsHandler{to: to})) +} + +// ServerOption returns a server option which enables OpenCensus instrumentation +// code for a grpc.Server. +// +// Server applications interested in instrumenting their grpc.Server should +// pass the server option returned from this function as the first argument to +// grpc.NewServer(). +// +// Using this option will always lead to instrumentation, however in order to +// use the data an exporter must be registered with the OpenCensus trace package +// for traces and the OpenCensus view package for metrics. Server side does not +// have retries, so a registered Stats Handler is the only option that is +// returned. It is not supported to use this alongside another stats handler +// server option. +func ServerOption(to TraceOptions) grpc.ServerOption { + return grpc.StatsHandler(&serverStatsHandler{to: to}) +} + +// unaryInterceptor handles per RPC context management. It also handles per RPC +// tracing and stats. +func unaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return invoker(ctx, method, req, reply, cc, opts...) +} + +// streamInterceptor handles per RPC context management. It also handles per RPC +// tracing and stats. +func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, opts...) +} + +type clientStatsHandler struct { + to TraceOptions +} + +// TagConn exists to satisfy stats.Handler. +func (csh *clientStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn exists to satisfy stats.Handler. +func (csh *clientStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +// TagRPC implements per RPC attempt context management. +func (csh *clientStatsHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + return ctx +} + +func (csh *clientStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {} + +type serverStatsHandler struct { + to TraceOptions +} + +// TagConn exists to satisfy stats.Handler. +func (ssh *serverStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn exists to satisfy stats.Handler. +func (ssh *serverStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +// TagRPC implements per RPC context management. +func (ssh *serverStatsHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + return ctx +} + +// HandleRPC implements per RPC tracing and stats implementation. +func (ssh *serverStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {} From 6de8f50f91068ed755f6a38a52053e7a05a6366e Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 11 Jan 2023 12:58:00 -0800 Subject: [PATCH 735/998] transport: drain client transport when streamID approaches maxStreamID (#5889) Fixes https://github.com/grpc/grpc-go/issues/5600 --- internal/transport/controlbuf.go | 16 +-- internal/transport/defaults.go | 5 + internal/transport/http2_client.go | 31 ++++-- internal/transport/transport_test.go | 46 ++++++++ test/transport_test.go | 153 +++++++++++++++++++++++++++ 5 files changed, 236 insertions(+), 15 deletions(-) create mode 100644 test/transport_test.go diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index aaa9c859a349..a5b7513f412d 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -650,16 +650,18 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) + return nil + } if err := hdr.initStream(str.id); err != nil { - if err == errStreamDrain { // errStreamDrain need not close transport - return nil - } return err } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { diff --git a/internal/transport/defaults.go b/internal/transport/defaults.go index 9fa306b2e07a..73c939603559 100644 --- a/internal/transport/defaults.go +++ b/internal/transport/defaults.go @@ -47,3 +47,8 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of math.MaxUint32. It's exported so that tests can override it. +var MaxStreamID = uint32(3_221_225_472) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 8cf2a7a11039..667989f603d3 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -742,15 +742,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, endStream: false, initStream: func(id uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) @@ -768,6 +765,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } firstTry := true var ch chan struct{} + transportDrainRequired := false checkForStreamQuota := func(it interface{}) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { @@ -783,6 +781,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() @@ -862,6 +865,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, sh.HandleRPC(s.ctx, outHeader) } } + if transportDrainRequired { + if logger.V(logLevel) { + logger.Infof("transport: t.nextID > MaxStreamID. Draining") + } + t.GracefulClose() + } return s, nil } @@ -1783,3 +1792,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index b41378b0024d..3c4b0876497e 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -536,6 +536,52 @@ func (s) TestInflightStreamClosing(t *testing.T) { } } +// Tests that when streamID > MaxStreamId, the current client transport drains. +func (s) TestClientTransportDrainsAfterStreamIDExhausted(t *testing.T) { + server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) + defer cancel() + defer server.stop() + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + + originalMaxStreamID := MaxStreamID + MaxStreamID = 3 + defer func() { + MaxStreamID = originalMaxStreamID + }() + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + + s, err := ct.NewStream(ctx, callHdr) + if err != nil { + t.Fatalf("ct.NewStream() = %v", err) + } + if s.id != 1 { + t.Fatalf("Stream id: %d, want: 1", s.id) + } + + if got, want := ct.stateForTesting(), reachable; got != want { + t.Fatalf("Client transport state %v, want %v", got, want) + } + + // The expected stream ID here is 3 since stream IDs are incremented by 2. + s, err = ct.NewStream(ctx, callHdr) + if err != nil { + t.Fatalf("ct.NewStream() = %v", err) + } + if s.id != 3 { + t.Fatalf("Stream id: %d, want: 3", s.id) + } + + // Verifying that ct.state is draining when next stream ID > MaxStreamId. + if got, want := ct.stateForTesting(), draining; got != want { + t.Fatalf("Client transport state %v, want %v", got, want) + } +} + func (s) TestClientSendAndReceive(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() diff --git a/test/transport_test.go b/test/transport_test.go new file mode 100644 index 000000000000..c78abdc5693e --- /dev/null +++ b/test/transport_test.go @@ -0,0 +1,153 @@ +/* +* +* Copyright 2023 gRPC authors. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* + */ +package test + +import ( + "context" + "io" + "net" + "sync" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +// connWrapperWithCloseCh wraps a net.Conn and fires an event when closed. +type connWrapperWithCloseCh struct { + net.Conn + close *grpcsync.Event +} + +// Close closes the connection and sends a value on the close channel. +func (cw *connWrapperWithCloseCh) Close() error { + cw.close.Fire() + return cw.Conn.Close() +} + +// These custom creds are used for storing the connections made by the client. +// The closeCh in conn can be used to detect when conn is closed. +type transportRestartCheckCreds struct { + mu sync.Mutex + connections []*connWrapperWithCloseCh +} + +func (c *transportRestartCheckCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return rawConn, nil, nil +} +func (c *transportRestartCheckCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + conn := &connWrapperWithCloseCh{Conn: rawConn, close: grpcsync.NewEvent()} + c.connections = append(c.connections, conn) + return conn, nil, nil +} +func (c *transportRestartCheckCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{} +} +func (c *transportRestartCheckCreds) Clone() credentials.TransportCredentials { + return c +} +func (c *transportRestartCheckCreds) OverrideServerName(s string) error { + return nil +} + +// Tests that the client transport drains and restarts when next stream ID exceeds +// MaxStreamID. This test also verifies that subsequent RPCs use a new client +// transport and the old transport is closed. +func (s) TestClientTransportRestartsAfterStreamIDExhausted(t *testing.T) { + // Set the transport's MaxStreamID to 4 to cause connection to drain after 2 RPCs. + originalMaxStreamID := transport.MaxStreamID + transport.MaxStreamID = 4 + defer func() { + transport.MaxStreamID = originalMaxStreamID + }() + + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return status.Errorf(codes.Internal, "unexpected error receiving: %v", err) + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { + return status.Errorf(codes.Internal, "unexpected error sending: %v", err) + } + if recv, err := stream.Recv(); err != io.EOF { + return status.Errorf(codes.Internal, "Recv = %v, %v; want _, io.EOF", recv, err) + } + return nil + }, + } + + creds := &transportRestartCheckCreds{} + if err := ss.Start(nil, grpc.WithTransportCredentials(creds)); err != nil { + t.Fatalf("Starting stubServer: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + var streams []testpb.TestService_FullDuplexCallClient + + const numStreams = 3 + // expected number of conns when each stream is created i.e., 3rd stream is created + // on a new connection. + expectedNumConns := [numStreams]int{1, 1, 2} + + // Set up 3 streams. + for i := 0; i < numStreams; i++ { + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Creating FullDuplex stream: %v", err) + } + streams = append(streams, s) + // Verify expected num of conns after each stream is created. + if len(creds.connections) != expectedNumConns[i] { + t.Fatalf("Got number of connections created: %v, want: %v", len(creds.connections), expectedNumConns[i]) + } + } + + // Verify all streams still work. + for i, stream := range streams { + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("Sending on stream %d: %v", i, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("Receiving on stream %d: %v", i, err) + } + } + + for i, stream := range streams { + if err := stream.CloseSend(); err != nil { + t.Fatalf("CloseSend() on stream %d: %v", i, err) + } + } + + // Verifying first connection was closed. + select { + case <-creds.connections[0].close.Done(): + case <-ctx.Done(): + t.Fatal("Timeout expired when waiting for first client transport to close") + } +} From bf3ad352405d29974b516f8ff29dc8fd64fe46bc Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 11 Jan 2023 13:49:41 -0800 Subject: [PATCH 736/998] *: update all dependencies (#5924) --- examples/go.mod | 28 +- examples/go.sum | 123 +++- gcp/observability/go.mod | 16 +- gcp/observability/go.sum | 81 ++- go.mod | 28 +- go.sum | 666 ++---------------- security/advancedtls/examples/go.mod | 16 +- security/advancedtls/examples/go.sum | 963 +-------------------------- security/advancedtls/go.mod | 12 +- security/advancedtls/go.sum | 962 +------------------------- security/authorization/go.mod | 21 +- security/authorization/go.sum | 351 +++++++++- stats/opencensus/go.mod | 12 +- stats/opencensus/go.sum | 99 ++- test/tools/go.mod | 4 +- test/tools/go.sum | 18 +- vet.sh | 2 +- 17 files changed, 725 insertions(+), 2677 deletions(-) diff --git a/examples/go.mod b/examples/go.mod index cb3f5ad8a70e..19b85d4f25aa 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -4,24 +4,24 @@ go 1.17 require ( github.com/golang/protobuf v1.5.2 - golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 - google.golang.org/grpc v1.50.1 + golang.org/x/oauth2 v0.4.0 + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f + google.golang.org/grpc v1.52.0 google.golang.org/protobuf v1.28.1 ) require ( - cloud.google.com/go/compute v1.12.1 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect - github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect - github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect - github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect + github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b // indirect + github.com/envoyproxy/go-control-plane v0.10.3 // indirect + github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect + golang.org/x/net v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect google.golang.org/appengine v1.6.7 // indirect ) diff --git a/examples/go.sum b/examples/go.sum index b894c7be2049..ec75a892562e 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -2,6 +2,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -14,6 +15,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -25,18 +27,20 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= @@ -79,6 +83,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= @@ -107,11 +112,16 @@ cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLq cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= @@ -140,6 +150,7 @@ cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4c cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= @@ -161,12 +172,14 @@ cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1 cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= @@ -186,16 +199,19 @@ cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZ cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= @@ -204,10 +220,12 @@ cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiP cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= @@ -256,6 +274,9 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -310,6 +331,7 @@ cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5 cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= @@ -319,6 +341,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= @@ -349,6 +372,7 @@ cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= @@ -365,29 +389,40 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -449,6 +484,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -470,26 +506,47 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99 github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -503,13 +560,18 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -544,7 +606,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -574,11 +638,13 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -591,8 +657,9 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -616,8 +683,9 @@ golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7Lm golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -663,11 +731,13 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -675,6 +745,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -693,11 +764,13 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -709,11 +782,13 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -758,6 +833,7 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -765,6 +841,7 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -823,6 +900,7 @@ google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -864,7 +942,9 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -897,6 +977,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -932,8 +1013,13 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -957,6 +1043,7 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 0428a7bf5869..acbfadb8e1f7 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -8,32 +8,32 @@ require ( github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 go.opencensus.io v0.24.0 - golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 + golang.org/x/oauth2 v0.4.0 google.golang.org/grpc v1.51.0 ) require ( cloud.google.com/go v0.107.0 // indirect - cloud.google.com/go/compute v1.14.0 // indirect - cloud.google.com/go/compute/metadata v0.2.2 // indirect + cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/longrunning v0.3.0 // indirect cloud.google.com/go/monitoring v1.8.0 // indirect cloud.google.com/go/trace v1.4.0 // indirect github.com/aws/aws-sdk-go v1.44.162 // indirect - github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect - golang.org/x/net v0.4.0 // indirect + golang.org/x/net v0.5.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect google.golang.org/api v0.103.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index d10bc57e88a5..b60536b2d605 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -2,6 +2,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -14,6 +15,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -113,12 +115,14 @@ cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOt cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= -cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= @@ -201,6 +205,8 @@ cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= @@ -223,6 +229,7 @@ cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+ cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= @@ -340,6 +347,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= @@ -372,6 +380,7 @@ cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= @@ -394,26 +403,35 @@ github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zK github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= github.com/aws/aws-sdk-go v1.44.162/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -477,6 +495,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -502,9 +521,13 @@ github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMd github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -514,17 +537,27 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -550,12 +583,16 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -590,7 +627,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -620,11 +659,13 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -638,8 +679,9 @@ golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -664,8 +706,9 @@ golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7Lm golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -712,11 +755,13 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -724,6 +769,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -745,12 +791,14 @@ golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -762,8 +810,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -813,6 +861,7 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -820,6 +869,7 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -923,7 +973,9 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -960,6 +1012,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -995,11 +1048,13 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd h1:OjndDrsik+Gt+e6fs45z9AxiewiKyLKYpA45W5Kpkks= google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/go.mod b/go.mod index 355d1937167e..8948f1390e37 100644 --- a/go.mod +++ b/go.mod @@ -3,26 +3,26 @@ module google.golang.org/grpc go 1.17 require ( - github.com/cespare/xxhash/v2 v2.1.1 - github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 - github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 - github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/cespare/xxhash/v2 v2.2.0 + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe + github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b + github.com/envoyproxy/go-control-plane v0.10.3 + github.com/golang/glog v1.0.0 github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 - golang.org/x/net v0.4.0 - golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 - golang.org/x/sys v0.3.0 - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 + golang.org/x/net v0.5.0 + golang.org/x/oauth2 v0.4.0 + golang.org/x/sys v0.4.0 + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f google.golang.org/protobuf v1.28.1 ) require ( - cloud.google.com/go/compute v1.12.1 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect - github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect - golang.org/x/text v0.5.0 // indirect + cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect + golang.org/x/text v0.6.0 // indirect google.golang.org/appengine v1.6.7 // indirect ) diff --git a/go.sum b/go.sum index 07842338cc91..4fdfdcb98e46 100644 --- a/go.sum +++ b/go.sum @@ -13,401 +13,77 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -418,8 +94,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -435,10 +109,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -448,19 +120,13 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -468,73 +134,61 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -557,7 +211,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -567,10 +220,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -597,55 +248,18 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -654,13 +268,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -686,61 +294,24 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -784,26 +355,11 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -820,40 +376,6 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -892,81 +414,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -979,30 +430,11 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index b5f9fe48c78e..77379f44ae47 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -3,18 +3,18 @@ module google.golang.org/grpc/security/advancedtls/examples go 1.17 require ( - google.golang.org/grpc v1.51.0 - google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b - google.golang.org/grpc/security/advancedtls v0.0.0-20201112215255-90f1b3ee835b + google.golang.org/grpc v1.52.0 + google.golang.org/grpc/examples v0.0.0-20230111003119-9b73c42daa31 + google.golang.org/grpc/security/advancedtls v0.0.0-20230111003119-9b73c42daa31 ) require ( github.com/golang/protobuf v1.5.2 // indirect - golang.org/x/crypto v0.3.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + golang.org/x/crypto v0.5.0 // indirect + golang.org/x/net v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 0f3640bf7e4a..3e95b40aa1a9 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -1,964 +1,21 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index 4df7ab38c2ae..96dfca921d0f 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -4,17 +4,17 @@ go 1.17 require ( github.com/hashicorp/golang-lru v0.5.4 - golang.org/x/crypto v0.3.0 - google.golang.org/grpc v1.51.0 + golang.org/x/crypto v0.5.0 + google.golang.org/grpc v1.52.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) require ( github.com/golang/protobuf v1.5.2 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + golang.org/x/net v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 0f3640bf7e4a..2a30c3dac779 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,964 +1,22 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/security/authorization/go.mod b/security/authorization/go.mod index 7596d57e207b..cf40541a13f6 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -3,20 +3,19 @@ module google.golang.org/grpc/security/authorization go 1.17 require ( - github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 - github.com/google/cel-go v0.10.1 - github.com/google/go-cmp v0.5.5 - google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 - google.golang.org/grpc v1.40.0 - google.golang.org/protobuf v1.27.1 + github.com/envoyproxy/go-control-plane v0.10.3 + github.com/google/cel-go v0.12.5 + github.com/google/go-cmp v0.5.9 + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f + google.golang.org/grpc v1.52.0 + google.golang.org/protobuf v1.28.1 ) require ( - github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e // indirect - github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed // indirect - github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b // indirect + github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/text v0.6.0 // indirect ) diff --git a/security/authorization/go.sum b/security/authorization/go.sum index fc168047bc1f..5e0656ec0083 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -1,34 +1,96 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e h1:GCzyKMDDjSGnlpl3clrdAK7I1AaVoaiKDOYkUzChZzg= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -40,124 +102,328 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/cel-go v0.10.1 h1:MQBGSZGnDwh7T/un+mzGKOMz3x+4E/GDPprWjDL+1Jg= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/cel-go v0.12.5 h1:DmzaiSgoaqGCjtpPQWl26/gND+yRpim56H1jCVev6d8= +github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -170,11 +436,24 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index f7886ec6f5b5..851683220f61 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -4,16 +4,16 @@ go 1.17 require ( go.opencensus.io v0.24.0 - google.golang.org/grpc v1.51.0 + google.golang.org/grpc v1.52.0 ) require ( - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + golang.org/x/net v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index 4f1cc2c21f54..7361a2d8ac99 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -2,6 +2,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -14,6 +15,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -25,17 +27,20 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= @@ -78,6 +83,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= @@ -107,8 +113,13 @@ cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQH cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= @@ -137,6 +148,7 @@ cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4c cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= @@ -158,12 +170,14 @@ cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1 cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= @@ -183,16 +197,19 @@ cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZ cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= @@ -201,10 +218,12 @@ cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiP cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= @@ -253,6 +272,9 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -307,6 +329,7 @@ cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5 cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= @@ -316,6 +339,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= @@ -346,6 +370,7 @@ cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= @@ -363,27 +388,38 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -442,6 +478,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -462,22 +499,38 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99 github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -503,12 +556,16 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -543,7 +600,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -573,11 +632,13 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -590,8 +651,9 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -616,6 +678,7 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -661,11 +724,13 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -673,6 +738,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -691,11 +757,13 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -707,11 +775,13 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -756,6 +826,7 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -763,6 +834,7 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -821,6 +893,7 @@ google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -861,7 +934,9 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -894,6 +969,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -929,8 +1005,13 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/test/tools/go.mod b/test/tools/go.mod index 2e4cc207cd31..b1dcc440964f 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -6,9 +6,9 @@ require ( github.com/BurntSushi/toml v1.2.1 // indirect github.com/client9/misspell v0.3.4 github.com/golang/protobuf v1.5.2 - golang.org/x/exp/typeparams v0.0.0-20221215174704-0915cd710c24 // indirect + golang.org/x/exp/typeparams v0.0.0-20230108222341-4b8118a2686a // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/tools v0.4.0 + golang.org/x/tools v0.5.0 google.golang.org/protobuf v1.28.1 // indirect honnef.co/go/tools v0.3.3 ) diff --git a/test/tools/go.sum b/test/tools/go.sum index 23431a9dc1bd..8728e287ec57 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -14,8 +14,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20221215174704-0915cd710c24 h1:+iZuikSm1jIhtO1dsw9jQcYCoGFEDDVXp236qRsnqK4= -golang.org/x/exp/typeparams v0.0.0-20221215174704-0915cd710c24/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230108222341-4b8118a2686a h1:uDSAx3XXnfrX4V3OAQGdxSR6CWIHAls7RJdpfCYaakI= +golang.org/x/exp/typeparams v0.0.0-20230108222341-4b8118a2686a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -27,7 +27,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -41,23 +41,23 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= diff --git a/vet.sh b/vet.sh index 2f0850f845da..3728aed04fc7 100755 --- a/vet.sh +++ b/vet.sh @@ -115,7 +115,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy + go mod tidy -compat=1.17 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd From be06d526c0895bbe73c343a8355cd2d292adcb2f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 12 Jan 2023 16:00:34 -0800 Subject: [PATCH 737/998] binarylog: consistently rename imports for binarylog proto (#5931) --- binarylog/binarylog_end2end_test.go | 140 ++++++++-------- binarylog/sink.go | 4 +- internal/binarylog/method_logger.go | 126 +++++++------- internal/binarylog/method_logger_test.go | 204 +++++++++++------------ internal/binarylog/sink.go | 12 +- 5 files changed, 243 insertions(+), 243 deletions(-) diff --git a/binarylog/binarylog_end2end_test.go b/binarylog/binarylog_end2end_test.go index 1ac0a8e7c02b..66bb7bda3af4 100644 --- a/binarylog/binarylog_end2end_test.go +++ b/binarylog/binarylog_end2end_test.go @@ -38,7 +38,7 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" ) @@ -64,10 +64,10 @@ var testSink = &testBinLogSink{} type testBinLogSink struct { mu sync.Mutex - buf []*pb.GrpcLogEntry + buf []*binlogpb.GrpcLogEntry } -func (s *testBinLogSink) Write(e *pb.GrpcLogEntry) error { +func (s *testBinLogSink) Write(e *binlogpb.GrpcLogEntry) error { s.mu.Lock() s.buf = append(s.buf, e) s.mu.Unlock() @@ -78,12 +78,12 @@ func (s *testBinLogSink) Close() error { return nil } // Returns all client entris if client is true, otherwise return all server // entries. -func (s *testBinLogSink) logEntries(client bool) []*pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_SERVER +func (s *testBinLogSink) logEntries(client bool) []*binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_SERVER if client { - logger = pb.GrpcLogEntry_LOGGER_CLIENT + logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } - var ret []*pb.GrpcLogEntry + var ret []*binlogpb.GrpcLogEntry s.mu.Lock() for _, e := range s.buf { if e.Logger == logger { @@ -481,31 +481,31 @@ type expectedData struct { err error } -func (ed *expectedData) newClientHeaderEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_CLIENT - var peer *pb.Address +func (ed *expectedData) newClientHeaderEntry(client bool, rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT + var peer *binlogpb.Address if !client { - logger = pb.GrpcLogEntry_LOGGER_SERVER + logger = binlogpb.GrpcLogEntry_LOGGER_SERVER ed.te.clientAddrMu.Lock() - peer = &pb.Address{ + peer = &binlogpb.Address{ Address: ed.te.clientIP.String(), IpPort: uint32(ed.te.clientPort), } if ed.te.clientIP.To4() != nil { - peer.Type = pb.Address_TYPE_IPV4 + peer.Type = binlogpb.Address_TYPE_IPV4 } else { - peer.Type = pb.Address_TYPE_IPV6 + peer.Type = binlogpb.Address_TYPE_IPV6 } ed.te.clientAddrMu.Unlock() } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, Logger: logger, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: &pb.ClientHeader{ + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: &binlogpb.ClientHeader{ Metadata: iblog.MdToMetadataProto(testMetadata), MethodName: ed.method, Authority: ed.te.srvAddr, @@ -515,29 +515,29 @@ func (ed *expectedData) newClientHeaderEntry(client bool, rpcID, inRPCID uint64) } } -func (ed *expectedData) newServerHeaderEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_SERVER - var peer *pb.Address +func (ed *expectedData) newServerHeaderEntry(client bool, rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_SERVER + var peer *binlogpb.Address if client { - logger = pb.GrpcLogEntry_LOGGER_CLIENT - peer = &pb.Address{ + logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + peer = &binlogpb.Address{ Address: ed.te.srvIP.String(), IpPort: uint32(ed.te.srvPort), } if ed.te.srvIP.To4() != nil { - peer.Type = pb.Address_TYPE_IPV4 + peer.Type = binlogpb.Address_TYPE_IPV4 } else { - peer.Type = pb.Address_TYPE_IPV6 + peer.Type = binlogpb.Address_TYPE_IPV6 } } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, Logger: logger, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: iblog.MdToMetadataProto(testMetadata), }, }, @@ -545,23 +545,23 @@ func (ed *expectedData) newServerHeaderEntry(client bool, rpcID, inRPCID uint64) } } -func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_CLIENT +func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT if !client { - logger = pb.GrpcLogEntry_LOGGER_SERVER + logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } data, err := proto.Marshal(msg) if err != nil { grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err) } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, Logger: logger, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, @@ -569,23 +569,23 @@ func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64 } } -func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_CLIENT +func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT if !client { - logger = pb.GrpcLogEntry_LOGGER_SERVER + logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } data, err := proto.Marshal(msg) if err != nil { grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err) } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, Logger: logger, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, @@ -593,34 +593,34 @@ func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64 } } -func (ed *expectedData) newHalfCloseEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_CLIENT +func (ed *expectedData) newHalfCloseEntry(client bool, rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT if !client { - logger = pb.GrpcLogEntry_LOGGER_SERVER + logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. Logger: logger, } } -func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64, stErr error) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_SERVER - var peer *pb.Address +func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64, stErr error) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_SERVER + var peer *binlogpb.Address if client { - logger = pb.GrpcLogEntry_LOGGER_CLIENT - peer = &pb.Address{ + logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + peer = &binlogpb.Address{ Address: ed.te.srvIP.String(), IpPort: uint32(ed.te.srvPort), } if ed.te.srvIP.To4() != nil { - peer.Type = pb.Address_TYPE_IPV4 + peer.Type = binlogpb.Address_TYPE_IPV4 } else { - peer.Type = pb.Address_TYPE_IPV6 + peer.Type = binlogpb.Address_TYPE_IPV6 } } st, ok := status.FromError(stErr) @@ -638,14 +638,14 @@ func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64 grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, Logger: logger, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: iblog.MdToMetadataProto(testTrailerMetadata), // st will be nil if err was not a status error, but nil is ok. StatusCode: uint32(st.Code()), @@ -657,20 +657,20 @@ func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64 } } -func (ed *expectedData) newCancelEntry(rpcID, inRPCID uint64) *pb.GrpcLogEntry { - return &pb.GrpcLogEntry{ +func (ed *expectedData) newCancelEntry(rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry { + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, Payload: nil, } } -func (ed *expectedData) toClientLogEntries() []*pb.GrpcLogEntry { +func (ed *expectedData) toClientLogEntries() []*binlogpb.GrpcLogEntry { var ( - ret []*pb.GrpcLogEntry + ret []*binlogpb.GrpcLogEntry idInRPC uint64 = 1 ) ret = append(ret, ed.newClientHeaderEntry(true, globalRPCID, idInRPC)) @@ -726,9 +726,9 @@ func (ed *expectedData) toClientLogEntries() []*pb.GrpcLogEntry { return ret } -func (ed *expectedData) toServerLogEntries() []*pb.GrpcLogEntry { +func (ed *expectedData) toServerLogEntries() []*binlogpb.GrpcLogEntry { var ( - ret []*pb.GrpcLogEntry + ret []*binlogpb.GrpcLogEntry idInRPC uint64 = 1 ) ret = append(ret, ed.newClientHeaderEntry(false, globalRPCID, idInRPC)) @@ -838,7 +838,7 @@ func runRPCs(t *testing.T, cc *rpcConfig) *expectedData { // // This function is typically called with only two entries. It's written in this // way so the code can be put in a for loop instead of copied twice. -func equalLogEntry(entries ...*pb.GrpcLogEntry) (equal bool) { +func equalLogEntry(entries ...*binlogpb.GrpcLogEntry) (equal bool) { for i, e := range entries { // Clear out some fields we don't compare. e.Timestamp = nil @@ -869,7 +869,7 @@ func testClientBinaryLog(t *testing.T, c *rpcConfig) error { defer testSink.clear() expect := runRPCs(t, c) want := expect.toClientLogEntries() - var got []*pb.GrpcLogEntry + var got []*binlogpb.GrpcLogEntry // In racy cases, some entries are not logged when the RPC is finished (e.g. // context.Cancel). // @@ -969,7 +969,7 @@ func testServerBinaryLog(t *testing.T, c *rpcConfig) error { defer testSink.clear() expect := runRPCs(t, c) want := expect.toServerLogEntries() - var got []*pb.GrpcLogEntry + var got []*binlogpb.GrpcLogEntry // In racy cases, some entries are not logged when the RPC is finished (e.g. // context.Cancel). This is unlikely to happen on server side, but it does // no harm to retry. diff --git a/binarylog/sink.go b/binarylog/sink.go index 4b6aa653f93c..d924e4c91867 100644 --- a/binarylog/sink.go +++ b/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "fmt" "os" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" iblog "google.golang.org/grpc/internal/binarylog" ) @@ -48,7 +48,7 @@ type Sink interface { // entry. Some options are: proto bytes, or proto json. // // Note this function needs to be thread-safe. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close closes this sink and cleans up resources (e.g. the flushing // goroutine). Close() error diff --git a/internal/binarylog/method_logger.go b/internal/binarylog/method_logger.go index 85e3ff2816ae..d71e441778f4 100644 --- a/internal/binarylog/method_logger.go +++ b/internal/binarylog/method_logger.go @@ -26,7 +26,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -79,7 +79,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { // Build is an internal only method for building the proto message out of the // input event. It's made public to enable other library to reuse as much logic // in TruncatingMethodLogger as possible. -func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -87,11 +87,11 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } return m @@ -102,7 +102,7 @@ func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } -func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -132,7 +132,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated return truncated } -func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -145,7 +145,7 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated // LogEntryConfig represents the configuration for binary log entry. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -159,10 +159,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -170,16 +170,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -195,19 +195,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -223,7 +223,7 @@ type ClientMessage struct { Message interface{} } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -238,19 +238,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -263,7 +263,7 @@ type ServerMessage struct { Message interface{} } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -278,19 +278,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -300,15 +300,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -324,7 +324,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -340,10 +340,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -352,9 +352,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -367,15 +367,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -392,15 +392,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -410,26 +410,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/internal/binarylog/method_logger_test.go b/internal/binarylog/method_logger_test.go index ff87b6a2ec4e..5d1e09a39658 100644 --- a/internal/binarylog/method_logger_test.go +++ b/internal/binarylog/method_logger_test.go @@ -26,10 +26,10 @@ import ( "time" "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/ptypes/duration" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" ) func (s) TestLog(t *testing.T) { @@ -46,7 +46,7 @@ func (s) TestLog(t *testing.T) { port6 := 796 tcpAddr6, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("[%v]:%d", addr6, port6)) - testProtoMsg := &pb.Message{ + testProtoMsg := &binlogpb.Message{ Length: 1, Data: []byte{'a'}, } @@ -54,7 +54,7 @@ func (s) TestLog(t *testing.T) { testCases := []struct { config LogEntryConfig - want *pb.GrpcLogEntry + want *binlogpb.GrpcLogEntry }{ { config: &ClientHeader{ @@ -67,31 +67,31 @@ func (s) TestLog(t *testing.T) { Timeout: 2*time.Second + 3*time.Nanosecond, PeerAddr: tcpAddr, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: &pb.ClientHeader{ - Metadata: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: &binlogpb.ClientHeader{ + Metadata: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, }, MethodName: "testservice/testmethod", Authority: "test.service.io", - Timeout: &dpb.Duration{ + Timeout: &durationpb.Duration{ Seconds: 2, Nanos: 3, }, }, }, PayloadTruncated: false, - Peer: &pb.Address{ - Type: pb.Address_TYPE_IPV4, + Peer: &binlogpb.Address{ + Type: binlogpb.Address_TYPE_IPV4, Address: addr, IpPort: uint32(port), }, @@ -103,15 +103,15 @@ func (s) TestLog(t *testing.T) { MethodName: "testservice/testmethod", Authority: "test.service.io", }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: &pb.ClientHeader{ - Metadata: &pb.Metadata{}, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: &binlogpb.ClientHeader{ + Metadata: &binlogpb.Metadata{}, MethodName: "testservice/testmethod", Authority: "test.service.io", }, @@ -127,16 +127,16 @@ func (s) TestLog(t *testing.T) { }, PeerAddr: tcpAddr6, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ - Metadata: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ + Metadata: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, @@ -144,8 +144,8 @@ func (s) TestLog(t *testing.T) { }, }, PayloadTruncated: false, - Peer: &pb.Address{ - Type: pb.Address_TYPE_IPV6, + Peer: &binlogpb.Address{ + Type: binlogpb.Address_TYPE_IPV6, Address: addr6, IpPort: uint32(port6), }, @@ -156,14 +156,14 @@ func (s) TestLog(t *testing.T) { OnClientSide: true, Message: testProtoMsg, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(testProtoBytes)), Data: testProtoBytes, }, @@ -177,14 +177,14 @@ func (s) TestLog(t *testing.T) { OnClientSide: false, Message: testProtoMsg, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(testProtoBytes)), Data: testProtoBytes, }, @@ -197,12 +197,12 @@ func (s) TestLog(t *testing.T) { config: &ClientHalfClose{ OnClientSide: false, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, Payload: nil, PayloadTruncated: false, Peer: nil, @@ -214,23 +214,23 @@ func (s) TestLog(t *testing.T) { Err: status.Errorf(codes.Unavailable, "test"), PeerAddr: tcpAddr, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ - Metadata: &pb.Metadata{}, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ + Metadata: &binlogpb.Metadata{}, StatusCode: uint32(codes.Unavailable), StatusMessage: "test", StatusDetails: nil, }, }, PayloadTruncated: false, - Peer: &pb.Address{ - Type: pb.Address_TYPE_IPV4, + Peer: &binlogpb.Address{ + Type: binlogpb.Address_TYPE_IPV4, Address: addr, IpPort: uint32(port), }, @@ -240,15 +240,15 @@ func (s) TestLog(t *testing.T) { config: &ServerTrailer{ OnClientSide: true, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ - Metadata: &pb.Metadata{}, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ + Metadata: &binlogpb.Metadata{}, StatusCode: uint32(codes.OK), StatusMessage: "", StatusDetails: nil, @@ -262,12 +262,12 @@ func (s) TestLog(t *testing.T) { config: &Cancel{ OnClientSide: true, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, Payload: nil, PayloadTruncated: false, Peer: nil, @@ -284,16 +284,16 @@ func (s) TestLog(t *testing.T) { "a": {"b", "bb"}, }, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: &pb.ClientHeader{ - Metadata: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: &binlogpb.ClientHeader{ + Metadata: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, @@ -312,16 +312,16 @@ func (s) TestLog(t *testing.T) { "a": {"b", "bb"}, }, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ - Metadata: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ + Metadata: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, @@ -336,7 +336,7 @@ func (s) TestLog(t *testing.T) { buf.Reset() tc.want.SequenceIdWithinCall = uint64(i + 1) ml.Log(tc.config) - inSink := new(pb.GrpcLogEntry) + inSink := new(binlogpb.GrpcLogEntry) if err := proto.Unmarshal(buf.Bytes()[4:], inSink); err != nil { t.Errorf("failed to unmarshal bytes in sink to proto: %v", err) continue @@ -351,44 +351,44 @@ func (s) TestLog(t *testing.T) { func (s) TestTruncateMetadataNotTruncated(t *testing.T) { testCases := []struct { ml *TruncatingMethodLogger - mpPb *pb.Metadata + mpPb *binlogpb.Metadata }{ { ml: NewTruncatingMethodLogger(maxUInt, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, }, }, }, { ml: NewTruncatingMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, }, }, }, { ml: NewTruncatingMethodLogger(1, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: nil}, }, }, }, { ml: NewTruncatingMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1, 1}}, }, }, }, { ml: NewTruncatingMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1}}, }, @@ -398,8 +398,8 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { // limit. { ml: NewTruncatingMethodLogger(1, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "grpc-trace-bin", Value: []byte("some.trace.key")}, }, @@ -418,14 +418,14 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { func (s) TestTruncateMetadataTruncated(t *testing.T) { testCases := []struct { ml *TruncatingMethodLogger - mpPb *pb.Metadata + mpPb *binlogpb.Metadata entryLen int }{ { ml: NewTruncatingMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1, 1, 1}}, }, }, @@ -433,8 +433,8 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { }, { ml: NewTruncatingMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1}}, @@ -444,8 +444,8 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { }, { ml: NewTruncatingMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1, 1}}, {Key: "", Value: []byte{1}}, }, @@ -454,8 +454,8 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { }, { ml: NewTruncatingMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1, 1}}, }, @@ -479,23 +479,23 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { func (s) TestTruncateMessageNotTruncated(t *testing.T) { testCases := []struct { ml *TruncatingMethodLogger - msgPb *pb.Message + msgPb *binlogpb.Message }{ { ml: NewTruncatingMethodLogger(maxUInt, maxUInt), - msgPb: &pb.Message{ + msgPb: &binlogpb.Message{ Data: []byte{1}, }, }, { ml: NewTruncatingMethodLogger(maxUInt, 3), - msgPb: &pb.Message{ + msgPb: &binlogpb.Message{ Data: []byte{1, 1}, }, }, { ml: NewTruncatingMethodLogger(maxUInt, 2), - msgPb: &pb.Message{ + msgPb: &binlogpb.Message{ Data: []byte{1, 1}, }, }, @@ -512,13 +512,13 @@ func (s) TestTruncateMessageNotTruncated(t *testing.T) { func (s) TestTruncateMessageTruncated(t *testing.T) { testCases := []struct { ml *TruncatingMethodLogger - msgPb *pb.Message + msgPb *binlogpb.Message oldLength uint32 }{ { ml: NewTruncatingMethodLogger(maxUInt, 2), - msgPb: &pb.Message{ + msgPb: &binlogpb.Message{ Length: 3, Data: []byte{1, 1, 1}, }, diff --git a/internal/binarylog/sink.go b/internal/binarylog/sink.go index c2fdd58b3198..264de387c2a5 100644 --- a/internal/binarylog/sink.go +++ b/internal/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( @@ -42,15 +42,15 @@ type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,7 +66,7 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) @@ -96,7 +96,7 @@ type bufferedSink struct { done chan struct{} } -func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() defer fs.mu.Unlock() if !fs.flusherStarted { From 9228cffc1a0702e602baa365e984397ab63fa295 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 12 Jan 2023 16:02:10 -0800 Subject: [PATCH 738/998] rls: fix a data race involving the LRU cache (#5925) --- balancer/rls/balancer.go | 29 ++++++--- balancer/rls/cache.go | 66 ++++--------------- balancer/rls/cache_test.go | 33 ---------- balancer/rls/picker.go | 129 +++++++------------------------------ 4 files changed, 59 insertions(+), 198 deletions(-) diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go index f18f4531d839..f0cff9ac4455 100644 --- a/balancer/rls/balancer.go +++ b/balancer/rls/balancer.go @@ -125,8 +125,11 @@ type rlsBalancer struct { // fact that in places where we need to acquire both the locks, we always // start off reading the cache. - // cacheMu guards access to the data cache and pending requests map. - cacheMu sync.RWMutex + // cacheMu guards access to the data cache and pending requests map. We + // cannot use an RWMutex here since even an operation like + // dataCache.getEntry() modifies the underlying LRU, which is implemented as + // a doubly linked list. + cacheMu sync.Mutex dataCache *dataCache // Cache of RLS data. pendingMap map[cacheKey]*backoffState // Map of pending RLS requests. @@ -263,17 +266,14 @@ func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error // channels, we also swap out the throttling state. b.handleControlChannelUpdate(newCfg) - // If the new config changes the size of the data cache, we might have to - // evict entries to get the cache size down to the newly specified size. - if newCfg.cacheSizeBytes != b.lbCfg.cacheSizeBytes { - b.dataCache.resize(newCfg.cacheSizeBytes) - } - // Any changes to child policy name or configuration needs to be handled by // either creating new child policies or pushing updates to existing ones. b.resolverState = ccs.ResolverState b.handleChildPolicyConfigUpdate(newCfg, &ccs) + // Resize the cache if the size in the config has changed. + resizeCache := newCfg.cacheSizeBytes != b.lbCfg.cacheSizeBytes + // Update the copy of the config in the LB policy before releasing the lock. b.lbCfg = newCfg @@ -284,6 +284,19 @@ func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error b.updateCh.Put(resumePickerUpdates{done: done}) b.stateMu.Unlock() <-done + + if resizeCache { + // If the new config changes reduces the size of the data cache, we + // might have to evict entries to get the cache size down to the newly + // specified size. + // + // And we cannot do this operation above (where we compute the + // `resizeCache` boolean) because `cacheMu` needs to be grabbed before + // `stateMu` if we are to hold both locks at the same time. + b.cacheMu.Lock() + b.dataCache.resize(newCfg.cacheSizeBytes) + b.cacheMu.Unlock() + } return nil } diff --git a/balancer/rls/cache.go b/balancer/rls/cache.go index 9a38072c7745..d7a6a1a436c6 100644 --- a/balancer/rls/cache.go +++ b/balancer/rls/cache.go @@ -91,8 +91,6 @@ type cacheEntry struct { // size stores the size of this cache entry. Used to enforce the cache size // specified in the LB policy configuration. size int64 - // onEvict is the callback to be invoked when this cache entry is evicted. - onEvict func() } // backoffState wraps all backoff related state associated with a cache entry. @@ -156,20 +154,6 @@ func (l *lru) getLeastRecentlyUsed() cacheKey { return e.Value.(cacheKey) } -// iterateAndRun traverses the lru in least-recently-used order and calls the -// provided function for every element. -// -// Callers may delete the cache entry associated with the cacheKey passed into -// f, but they may not perform any other operation which reorders the elements -// in the lru. -func (l *lru) iterateAndRun(f func(cacheKey)) { - var next *list.Element - for e := l.ll.Front(); e != nil; e = next { - next = e.Next() - f(e.Value.(cacheKey)) - } -} - // dataCache contains a cache of RLS data used by the LB policy to make routing // decisions. // @@ -252,29 +236,22 @@ func (dc *dataCache) resize(size int64) (backoffCancelled bool) { // The return value indicates if any expired entries were evicted. // // The LB policy invokes this method periodically to purge expired entries. -func (dc *dataCache) evictExpiredEntries() (evicted bool) { +func (dc *dataCache) evictExpiredEntries() bool { if dc.shutdown.HasFired() { return false } - evicted = false - dc.keys.iterateAndRun(func(key cacheKey) { - entry, ok := dc.entries[key] - if !ok { - // This should never happen. - dc.logger.Errorf("cacheKey %+v not found in the cache while attempting to perform periodic cleanup of expired entries", key) - return - } - + evicted := false + for key, entry := range dc.entries { // Only evict entries for which both the data expiration time and // backoff expiration time fields are in the past. now := time.Now() if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { - return + continue } - evicted = true dc.deleteAndcleanup(key, entry) - }) + evicted = true + } return evicted } @@ -285,22 +262,15 @@ func (dc *dataCache) evictExpiredEntries() (evicted bool) { // The LB policy invokes this method when the control channel moves from READY // to TRANSIENT_FAILURE back to READY. See `monitorConnectivityState` method on // the `controlChannel` type for more details. -func (dc *dataCache) resetBackoffState(newBackoffState *backoffState) (backoffReset bool) { +func (dc *dataCache) resetBackoffState(newBackoffState *backoffState) bool { if dc.shutdown.HasFired() { return false } - backoffReset = false - dc.keys.iterateAndRun(func(key cacheKey) { - entry, ok := dc.entries[key] - if !ok { - // This should never happen. - dc.logger.Errorf("cacheKey %+v not found in the cache while attempting to perform periodic cleanup of expired entries", key) - return - } - + backoffReset := false + for _, entry := range dc.entries { if entry.backoffState == nil { - return + continue } if entry.backoffState.timer != nil { entry.backoffState.timer.Stop() @@ -310,7 +280,7 @@ func (dc *dataCache) resetBackoffState(newBackoffState *backoffState) (backoffRe entry.backoffTime = time.Time{} entry.backoffExpiryTime = time.Time{} backoffReset = true - }) + } return backoffReset } @@ -377,25 +347,15 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) { // - the entry is removed from the map of entries // - current size of the data cache is update // - the key is removed from the LRU -// - onEvict is invoked in a separate goroutine func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) { delete(dc.entries, key) dc.currentSize -= entry.size dc.keys.removeEntry(key) - if entry.onEvict != nil { - go entry.onEvict() - } } func (dc *dataCache) stop() { - dc.keys.iterateAndRun(func(key cacheKey) { - entry, ok := dc.entries[key] - if !ok { - // This should never happen. - dc.logger.Errorf("cacheKey %+v not found in the cache while shutting down", key) - return - } + for key, entry := range dc.entries { dc.deleteAndcleanup(key, entry) - }) + } dc.shutdown.Fire() } diff --git a/balancer/rls/cache_test.go b/balancer/rls/cache_test.go index cb9b060b59ae..80185f39c929 100644 --- a/balancer/rls/cache_test.go +++ b/balancer/rls/cache_test.go @@ -117,39 +117,6 @@ func (s) TestLRU_BasicOperations(t *testing.T) { } } -func (s) TestLRU_IterateAndRun(t *testing.T) { - initCacheEntries() - // Create an LRU and add some entries to it. - lru := newLRU() - for _, k := range cacheKeys { - lru.addEntry(k) - } - - // Iterate through the lru to make sure that entries are returned in the - // least recently used order. - var gotKeys []cacheKey - lru.iterateAndRun(func(key cacheKey) { - gotKeys = append(gotKeys, key) - }) - if !cmp.Equal(gotKeys, cacheKeys, cmp.AllowUnexported(cacheKey{})) { - t.Fatalf("lru.iterateAndRun returned %v, want %v", gotKeys, cacheKeys) - } - - // Make sure that removing entries from the lru while iterating through it - // is a safe operation. - lru.iterateAndRun(func(key cacheKey) { - lru.removeEntry(key) - }) - - // Check the lru internals to make sure we freed up all the memory. - if len := lru.ll.Len(); len != 0 { - t.Fatalf("Number of entries in the lru's underlying list is %d, want 0", len) - } - if len := len(lru.m); len != 0 { - t.Fatalf("Number of entries in the lru's underlying map is %d, want 0", len) - } -} - func (s) TestDataCache_BasicOperations(t *testing.T) { initCacheEntries() dc := newDataCache(5, nil) diff --git a/balancer/rls/picker.go b/balancer/rls/picker.go index bd5985ad9e7c..3305f4529fd9 100644 --- a/balancer/rls/picker.go +++ b/balancer/rls/picker.go @@ -84,10 +84,8 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { md, _ := metadata.FromOutgoingContext(info.Ctx) reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) - // Grab a read-lock to perform a cache lookup. If it so happens that we need - // to write to the cache (if we have to send out an RLS request), we will - // release the read-lock and acquire a write-lock. - p.lb.cacheMu.RLock() + p.lb.cacheMu.Lock() + defer p.lb.cacheMu.Unlock() // Lookup data cache and pending request map using request path and keys. cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str} @@ -98,75 +96,62 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { switch { // No data cache entry. No pending request. case dcEntry == nil && pendingEntry == nil: - p.lb.cacheMu.RUnlock() - bs := &backoffState{bs: defaultBackoffStrategy} - return p.sendRequestAndReturnPick(cacheKey, bs, reqKeys.Map, info) + throttled := p.sendRouteLookupRequestLocked(cacheKey, &backoffState{bs: defaultBackoffStrategy}, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + return p.useDefaultPickIfPossible(info, errRLSThrottled) + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable // No data cache entry. Pending request exits. case dcEntry == nil && pendingEntry != nil: - p.lb.cacheMu.RUnlock() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable // Data cache hit. No pending request. case dcEntry != nil && pendingEntry == nil: if dcEntry.expiryTime.After(now) { if !dcEntry.staleTime.IsZero() && dcEntry.staleTime.Before(now) && dcEntry.backoffTime.Before(now) { - // Executing the proactive cache refresh in a goroutine simplifies - // acquiring and releasing of locks. - go func(bs *backoffState) { - p.lb.cacheMu.Lock() - // It is OK to ignore the return value which indicates if this request - // was throttled. This is an attempt to proactively refresh the cache, - // and it is OK for it to fail. - p.sendRouteLookupRequest(cacheKey, bs, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) - p.lb.cacheMu.Unlock() - }(dcEntry.backoffState) + p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) } // Delegate to child policies. - res, err := p.delegateToChildPolicies(dcEntry, info) - p.lb.cacheMu.RUnlock() + res, err := p.delegateToChildPoliciesLocked(dcEntry, info) return res, err } // We get here only if the data cache entry has expired. If entry is in // backoff, delegate to default target or fail the pick. if dcEntry.backoffState != nil && dcEntry.backoffTime.After(now) { - st := dcEntry.status - p.lb.cacheMu.RUnlock() - // Avoid propagating the status code received on control plane RPCs to the // data plane which can lead to unexpected outcomes as we do not control // the status code sent by the control plane. Propagating the status // message received from the control plane is still fine, as it could be // useful for debugging purposes. + st := dcEntry.status return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) } // We get here only if the entry has expired and is not in backoff. - bs := *dcEntry.backoffState - p.lb.cacheMu.RUnlock() - return p.sendRequestAndReturnPick(cacheKey, &bs, reqKeys.Map, info) + throttled := p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + return p.useDefaultPickIfPossible(info, errRLSThrottled) + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable // Data cache hit. Pending request exists. default: if dcEntry.expiryTime.After(now) { - res, err := p.delegateToChildPolicies(dcEntry, info) - p.lb.cacheMu.RUnlock() + res, err := p.delegateToChildPoliciesLocked(dcEntry, info) return res, err } // Data cache entry has expired and pending request exists. Queue pick. - p.lb.cacheMu.RUnlock() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } } -// delegateToChildPolicies is a helper function which iterates through the list -// of child policy wrappers in a cache entry and attempts to find a child policy -// to which this RPC can be routed to. If all child policies are in +// delegateToChildPoliciesLocked is a helper function which iterates through the +// list of child policy wrappers in a cache entry and attempts to find a child +// policy to which this RPC can be routed to. If all child policies are in // TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. -// -// Caller must hold at least a read-lock on p.lb.cacheMu. -func (p *rlsPicker) delegateToChildPolicies(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { +func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { const rlsDataHeaderName = "x-google-rls-data" for i, cpw := range dcEntry.childPolicyWrappers { state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) @@ -194,69 +179,6 @@ func (p *rlsPicker) delegateToChildPolicies(dcEntry *cacheEntry, info balancer.P return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } -// sendRequestAndReturnPick is called to send out an RLS request on the control -// channel. Since sending out an RLS request entails creating an entry in the -// pending request map, this method needs to acquire the write-lock on the -// cache. This also means that the caller must release the read-lock that they -// could have been holding. This means that things could have happened in -// between and therefore a fresh lookup on the cache needs to be performed here -// with the write-lock and all cases need to be handled. -// -// Acquires the write-lock on the cache. Caller must not hold p.lb.cacheMu. -func (p *rlsPicker) sendRequestAndReturnPick(cacheKey cacheKey, bs *backoffState, reqKeys map[string]string, info balancer.PickInfo) (balancer.PickResult, error) { - p.lb.cacheMu.Lock() - defer p.lb.cacheMu.Unlock() - - // We need to perform another cache lookup to ensure that things haven't - // changed since the last lookup. - dcEntry := p.lb.dataCache.getEntry(cacheKey) - pendingEntry := p.lb.pendingMap[cacheKey] - - // Existence of a pending map entry indicates that someone sent out a request - // before us and the response is pending. Skip sending a new request. - // Piggyback on the existing one by queueing the pick. - if pendingEntry != nil { - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - - // If no data cache entry exists, it means that no one jumped in front of us. - // We need to send out an RLS request and queue the pick. - if dcEntry == nil { - throttled := p.sendRouteLookupRequest(cacheKey, bs, reqKeys, rlspb.RouteLookupRequest_REASON_MISS, "") - if throttled { - return p.useDefaultPickIfPossible(info, errRLSThrottled) - } - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } - - // Existence of a data cache entry indicates either that someone sent out a - // request before us and received a response, or we got here in the first - // place because we found an expired entry in the data cache. - now := time.Now() - switch { - // Valid data cache entry. Delegate to its child policies. - case dcEntry.expiryTime.After(now): - return p.delegateToChildPolicies(dcEntry, info) - - // Entry is in backoff. Delegate to default target or fail the pick. - case dcEntry.backoffState != nil && dcEntry.backoffTime.After(now): - // Avoid propagating the status code received on control plane RPCs to the - // data plane which can lead to unexpected outcomes as we do not control - // the status code sent by the control plane. Propagating the status - // message received from the control plane is still fine, as it could be - // useful for debugging purposes. - return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", dcEntry.status.Error()))) - - // Entry has expired, but is not in backoff. Send request and queue pick. - default: - throttled := p.sendRouteLookupRequest(cacheKey, bs, reqKeys, rlspb.RouteLookupRequest_REASON_MISS, "") - if throttled { - return p.useDefaultPickIfPossible(info, errRLSThrottled) - } - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable - } -} - // useDefaultPickIfPossible is a helper method which delegates to the default // target if one is configured, or fails the pick with the given error. func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, error) { @@ -267,12 +189,11 @@ func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefa return balancer.PickResult{}, errOnNoDefault } -// sendRouteLookupRequest adds an entry to the pending request map and sends out -// an RLS request using the passed in arguments. Returns a value indicating if -// the request was throttled by the client-side adaptive throttler. -// -// Caller must hold a write-lock on p.lb.cacheMu. -func (p *rlsPicker) sendRouteLookupRequest(cacheKey cacheKey, bs *backoffState, reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string) bool { +// sendRouteLookupRequestLocked adds an entry to the pending request map and +// sends out an RLS request using the passed in arguments. Returns a value +// indicating if the request was throttled by the client-side adaptive +// throttler. +func (p *rlsPicker) sendRouteLookupRequestLocked(cacheKey cacheKey, bs *backoffState, reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string) bool { if p.lb.pendingMap[cacheKey] != nil { return false } From 2a9e970f94f51d90e5df2097301f75c1250ed572 Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Fri, 13 Jan 2023 13:02:53 -0500 Subject: [PATCH 739/998] xds interop: Fix buildscripts not continuing on a failed test suite (#5932) --- test/kokoro/psm-security.sh | 2 +- test/kokoro/xds_k8s_lb.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/kokoro/psm-security.sh b/test/kokoro/psm-security.sh index 97aca28d79b7..b5c3bec60e00 100755 --- a/test/kokoro/psm-security.sh +++ b/test/kokoro/psm-security.sh @@ -158,7 +158,7 @@ main() { local failed_tests=0 test_suites=("baseline_test" "security_test" "authz_test") for test in "${test_suites[@]}"; do - run_test $test || (( failed_tests++ )) + run_test $test || (( failed_tests++ )) && true done echo "Failed test suites: ${failed_tests}" if (( failed_tests > 0 )); then diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh index 31aca363aafc..81dae14f9f39 100755 --- a/test/kokoro/xds_k8s_lb.sh +++ b/test/kokoro/xds_k8s_lb.sh @@ -160,7 +160,7 @@ main() { local failed_tests=0 test_suites=("api_listener_test" "change_backend_service_test" "failover_test" "remove_neg_test" "round_robin_test" "affinity_test" "outlier_detection_test") for test in "${test_suites[@]}"; do - run_test $test || (( failed_tests++ )) + run_test $test || (( failed_tests++ )) && true done echo "Failed test suites: ${failed_tests}" if (( failed_tests > 0 )); then From 78ddc05d9b338930bfa4a0d2b4b5039724b7d013 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 13 Jan 2023 10:25:48 -0800 Subject: [PATCH 740/998] xdsclient: fix race in load report implementation (#5927) --- xds/internal/xdsclient/clientimpl_loadreport.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xds/internal/xdsclient/clientimpl_loadreport.go b/xds/internal/xdsclient/clientimpl_loadreport.go index dd0ae225e8d0..e53df3f1edd9 100644 --- a/xds/internal/xdsclient/clientimpl_loadreport.go +++ b/xds/internal/xdsclient/clientimpl_loadreport.go @@ -30,13 +30,15 @@ import ( func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { c.authorityMu.Lock() a, err := c.newAuthorityLocked(server) - c.authorityMu.Unlock() if err != nil { + c.authorityMu.Unlock() c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err) return nil, func() {} } // Hold the ref before starting load reporting. a.refLocked() + c.authorityMu.Unlock() + store, cancelF := a.reportLoad() return store, func() { cancelF() From cde2edce6bd8555b810086d615479d97b72e992c Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Tue, 17 Jan 2023 18:18:44 -0500 Subject: [PATCH 741/998] Revert "xds interop: Fix buildscripts not continuing on a failed test suite (#5932)" (#5936) --- test/kokoro/psm-security.sh | 2 +- test/kokoro/xds_k8s_lb.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/kokoro/psm-security.sh b/test/kokoro/psm-security.sh index b5c3bec60e00..97aca28d79b7 100755 --- a/test/kokoro/psm-security.sh +++ b/test/kokoro/psm-security.sh @@ -158,7 +158,7 @@ main() { local failed_tests=0 test_suites=("baseline_test" "security_test" "authz_test") for test in "${test_suites[@]}"; do - run_test $test || (( failed_tests++ )) && true + run_test $test || (( failed_tests++ )) done echo "Failed test suites: ${failed_tests}" if (( failed_tests > 0 )); then diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh index 81dae14f9f39..31aca363aafc 100755 --- a/test/kokoro/xds_k8s_lb.sh +++ b/test/kokoro/xds_k8s_lb.sh @@ -160,7 +160,7 @@ main() { local failed_tests=0 test_suites=("api_listener_test" "change_backend_service_test" "failover_test" "remove_neg_test" "round_robin_test" "affinity_test" "outlier_detection_test") for test in "${test_suites[@]}"; do - run_test $test || (( failed_tests++ )) && true + run_test $test || (( failed_tests++ )) done echo "Failed test suites: ${failed_tests}" if (( failed_tests > 0 )); then From 379a2f676c51305fe4b143d9698806edb7d7d0fa Mon Sep 17 00:00:00 2001 From: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Date: Wed, 18 Jan 2023 11:11:47 +1100 Subject: [PATCH 742/998] *: add missing colon to errorf messages to improve readability (#5911) --- balancer/grpclb/grpclb_remote_balancer.go | 2 +- benchmark/benchmain/main.go | 2 +- benchmark/benchmark.go | 2 +- benchmark/client/main.go | 2 +- benchmark/server/main.go | 2 +- benchmark/worker/benchmark_client.go | 2 +- benchmark/worker/benchmark_server.go | 2 +- clientconn.go | 2 +- examples/features/health/client/main.go | 2 +- examples/features/interceptor/server/main.go | 4 ++-- .../metadata_interceptor/client/main.go | 4 ++-- examples/route_guide/client/client.go | 2 +- examples/route_guide/server/server.go | 2 +- internal/transport/http2_client.go | 4 ++-- interop/fake_grpclb/fake_grpclb.go | 2 +- interop/http2/negative_http2_client.go | 6 +++--- interop/server/server.go | 4 ++-- interop/xds/server/server.go | 15 +++++++++------ pickfirst.go | 2 +- rpc_util.go | 4 ++-- security/advancedtls/crl.go | 18 +++++++++--------- server.go | 2 +- service_config.go | 10 +++++----- stress/client/main.go | 2 +- stress/metrics_client/main.go | 2 +- .../balancer/cdsbalancer/cdsbalancer.go | 2 +- xds/internal/clusterspecifier/rls/rls.go | 2 +- .../xdsclient/xdsresource/unmarshal_cds.go | 6 +++--- .../xdsclient/xdsresource/unmarshal_rds.go | 2 +- 29 files changed, 58 insertions(+), 55 deletions(-) diff --git a/balancer/grpclb/grpclb_remote_balancer.go b/balancer/grpclb/grpclb_remote_balancer.go index dab1959418e1..e56006d7131a 100644 --- a/balancer/grpclb/grpclb_remote_balancer.go +++ b/balancer/grpclb/grpclb_remote_balancer.go @@ -332,7 +332,7 @@ func (ccw *remoteBalancerCCWrapper) callRemoteBalancer(ctx context.Context) (bac lbClient := &loadBalancerClient{cc: ccw.cc} stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) if err != nil { - return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer: %v", err) } ccw.lb.mu.Lock() ccw.lb.remoteBalancerConnected = true diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index cbdefac17e9a..3d054f358037 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -870,7 +870,7 @@ func (nopCompressor) Do(w io.Writer, p []byte) error { return err } if n != len(p) { - return fmt.Errorf("nopCompressor.Write: wrote %v bytes; want %v", n, len(p)) + return fmt.Errorf("nopCompressor.Write: wrote %d bytes; want %d", n, len(p)) } return nil } diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index e75f8f833188..b2c3356abea9 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -278,7 +278,7 @@ func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { func NewClientConnWithContext(ctx context.Context, addr string, opts ...grpc.DialOption) *grpc.ClientConn { conn, err := grpc.DialContext(ctx, addr, opts...) if err != nil { - logger.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) + logger.Fatalf("NewClientConn(%q) failed to create a ClientConn: %v", addr, err) } return conn } diff --git a/benchmark/client/main.go b/benchmark/client/main.go index 5c615ced1409..395caf39ea76 100644 --- a/benchmark/client/main.go +++ b/benchmark/client/main.go @@ -86,7 +86,7 @@ var ( func main() { flag.Parse() if *testName == "" { - logger.Fatalf("test_name not set") + logger.Fatal("-test_name not set") } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, diff --git a/benchmark/server/main.go b/benchmark/server/main.go index da23a0270bd6..144d090b9c4a 100644 --- a/benchmark/server/main.go +++ b/benchmark/server/main.go @@ -54,7 +54,7 @@ var ( func main() { flag.Parse() if *testName == "" { - logger.Fatalf("test name not set") + logger.Fatal("-test_name not set") } lis, err := net.Listen("tcp", ":"+*port) if err != nil { diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 312fcfd7dc1f..7f02728709ca 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -132,7 +132,7 @@ func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error } creds, err := credentials.NewClientTLSFromFile(*caFile, config.SecurityParams.ServerHostOverride) if err != nil { - return nil, nil, status.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) + return nil, nil, status.Errorf(codes.InvalidArgument, "failed to create TLS credentials: %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index da6288c11de4..f3ae4d08d836 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -101,7 +101,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { - logger.Fatalf("failed to generate credentials %v", err) + logger.Fatalf("failed to generate credentials: %v", err) } opts = append(opts, grpc.Creds(creds)) } diff --git a/clientconn.go b/clientconn.go index 26166b89becd..6ead8a6f1e9f 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1376,7 +1376,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) } } }() diff --git a/examples/features/health/client/main.go b/examples/features/health/client/main.go index 1e44aeb3d30e..63b4717b5257 100644 --- a/examples/features/health/client/main.go +++ b/examples/features/health/client/main.go @@ -74,7 +74,7 @@ func main() { conn, err := grpc.Dial(address, options...) if err != nil { - log.Fatalf("did not connect %v", err) + log.Fatalf("grpc.Dial(%q): %v", address, err) } defer conn.Close() diff --git a/examples/features/interceptor/server/main.go b/examples/features/interceptor/server/main.go index 1b07cdecd6ca..78b87aae3472 100644 --- a/examples/features/interceptor/server/main.go +++ b/examples/features/interceptor/server/main.go @@ -98,7 +98,7 @@ func unaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServ } m, err := handler(ctx, req) if err != nil { - logger("RPC failed with error %v", err) + logger("RPC failed with error: %v", err) } return m, err } @@ -135,7 +135,7 @@ func streamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamS err := handler(srv, newWrappedStream(ss)) if err != nil { - logger("RPC failed with error %v", err) + logger("RPC failed with error: %v", err) } return err } diff --git a/examples/features/metadata_interceptor/client/main.go b/examples/features/metadata_interceptor/client/main.go index a6ad804d726a..5e1bebec12ae 100644 --- a/examples/features/metadata_interceptor/client/main.go +++ b/examples/features/metadata_interceptor/client/main.go @@ -38,7 +38,7 @@ var addr = flag.String("addr", "localhost:50051", "the address to connect to") func callUnaryEcho(ctx context.Context, client pb.EchoClient) { resp, err := client.UnaryEcho(ctx, &pb.EchoRequest{Message: "hello world"}) if err != nil { - log.Fatalf("UnaryEcho %v", err) + log.Fatalf("UnaryEcho: %v", err) } fmt.Println("UnaryEcho: ", resp.Message) } @@ -46,7 +46,7 @@ func callUnaryEcho(ctx context.Context, client pb.EchoClient) { func callBidiStreamingEcho(ctx context.Context, client pb.EchoClient) { c, err := client.BidirectionalStreamingEcho(ctx) if err != nil { - log.Fatalf("BidiStreamingEcho %v", err) + log.Fatalf("BidiStreamingEcho: %v", err) } if err := c.Send(&pb.EchoRequest{Message: "hello world"}); err != nil { diff --git a/examples/route_guide/client/client.go b/examples/route_guide/client/client.go index 7d24b88b1ced..d027d2d6d42b 100644 --- a/examples/route_guide/client/client.go +++ b/examples/route_guide/client/client.go @@ -161,7 +161,7 @@ func main() { } creds, err := credentials.NewClientTLSFromFile(*caFile, *serverHostOverride) if err != nil { - log.Fatalf("Failed to create TLS credentials %v", err) + log.Fatalf("Failed to create TLS credentials: %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { diff --git a/examples/route_guide/server/server.go b/examples/route_guide/server/server.go index 2a35a6f78aee..44b2f963516b 100644 --- a/examples/route_guide/server/server.go +++ b/examples/route_guide/server/server.go @@ -233,7 +233,7 @@ func main() { } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { - log.Fatalf("Failed to generate credentials %v", err) + log.Fatalf("Failed to generate credentials: %v", err) } opts = []grpc.ServerOption{grpc.Creds(creds)} } diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 667989f603d3..79ee8aea0a21 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -216,7 +216,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } // Any further errors will close the underlying connection @@ -1182,7 +1182,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode) } statusCode = codes.Unknown } diff --git a/interop/fake_grpclb/fake_grpclb.go b/interop/fake_grpclb/fake_grpclb.go index e29d2f439fa9..00ae00a7f683 100644 --- a/interop/fake_grpclb/fake_grpclb.go +++ b/interop/fake_grpclb/fake_grpclb.go @@ -53,7 +53,7 @@ func main() { keyFile := testdata.Path("server1.key") creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) if err != nil { - logger.Fatalf("Failed to generate credentials %v", err) + logger.Fatalf("Failed to generate credentials: %v", err) } opts = append(opts, grpc.Creds(creds)) } else if *useALTS { diff --git a/interop/http2/negative_http2_client.go b/interop/http2/negative_http2_client.go index 9fddc5f328a9..b8c1d522009e 100644 --- a/interop/http2/negative_http2_client.go +++ b/interop/http2/negative_http2_client.go @@ -81,7 +81,7 @@ func rstAfterHeader(tc testgrpc.TestServiceClient) { req := largeSimpleRequest() reply, err := tc.UnaryCall(context.Background(), req) if reply != nil { - logger.Fatalf("Client received reply despite server sending rst stream after header") + logger.Fatal("Client received reply despite server sending rst stream after header") } if status.Code(err) != codes.Internal { logger.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal) @@ -92,7 +92,7 @@ func rstDuringData(tc testgrpc.TestServiceClient) { req := largeSimpleRequest() reply, err := tc.UnaryCall(context.Background(), req) if reply != nil { - logger.Fatalf("Client received reply despite server sending rst stream during data") + logger.Fatal("Client received reply despite server sending rst stream during data") } if status.Code(err) != codes.Unknown { logger.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Unknown) @@ -103,7 +103,7 @@ func rstAfterData(tc testgrpc.TestServiceClient) { req := largeSimpleRequest() reply, err := tc.UnaryCall(context.Background(), req) if reply != nil { - logger.Fatalf("Client received reply despite server sending rst stream after data") + logger.Fatal("Client received reply despite server sending rst stream after data") } if status.Code(err) != codes.Internal { logger.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal) diff --git a/interop/server/server.go b/interop/server/server.go index 3d27ded80b68..0778dbf961f0 100644 --- a/interop/server/server.go +++ b/interop/server/server.go @@ -48,7 +48,7 @@ var ( func main() { flag.Parse() if *useTLS && *useALTS { - logger.Fatalf("use_tls and use_alts cannot be both set to true") + logger.Fatal("-use_tls and -use_alts cannot be both set to true") } p := strconv.Itoa(*port) lis, err := net.Listen("tcp", ":"+p) @@ -66,7 +66,7 @@ func main() { } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { - logger.Fatalf("Failed to generate credentials %v", err) + logger.Fatalf("Failed to generate credentials: %v", err) } opts = append(opts, grpc.Creds(creds)) } else if *useALTS { diff --git a/interop/xds/server/server.go b/interop/xds/server/server.go index 4bc69ae2fe6d..a45a893c392c 100644 --- a/interop/xds/server/server.go +++ b/interop/xds/server/server.go @@ -118,9 +118,10 @@ func main() { // If -secure_mode is not set, expose all services on -port with a regular // gRPC server. if !*secureMode { - lis, err := net.Listen("tcp4", fmt.Sprintf(":%d", *port)) + addr := fmt.Sprintf(":%d", *port) + lis, err := net.Listen("tcp4", addr) if err != nil { - logger.Fatalf("net.Listen(%s) failed: %v", fmt.Sprintf(":%d", *port), err) + logger.Fatalf("net.Listen(%s) failed: %v", addr, err) } server := grpc.NewServer() @@ -141,9 +142,10 @@ func main() { } // Create a listener on -port to expose the test service. - testLis, err := net.Listen("tcp4", fmt.Sprintf(":%d", *port)) + addr := fmt.Sprintf(":%d", *port) + testLis, err := net.Listen("tcp4", addr) if err != nil { - logger.Fatalf("net.Listen(%s) failed: %v", fmt.Sprintf(":%d", *port), err) + logger.Fatalf("net.Listen(%s) failed: %v", addr, err) } // Create server-side xDS credentials with a plaintext fallback. @@ -164,9 +166,10 @@ func main() { defer testServer.Stop() // Create a listener on -maintenance_port to expose other services. - maintenanceLis, err := net.Listen("tcp4", fmt.Sprintf(":%d", *maintenancePort)) + addr = fmt.Sprintf(":%d", *maintenancePort) + maintenanceLis, err := net.Listen("tcp4", addr) if err != nil { - logger.Fatalf("net.Listen(%s) failed: %v", fmt.Sprintf(":%d", *maintenancePort), err) + logger.Fatalf("net.Listen(%s) failed: %v", addr, err) } // Create a regular gRPC server and register the maintenance services on diff --git a/pickfirst.go b/pickfirst.go index b3a55481b944..fc91b4d266de 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -51,7 +51,7 @@ type pickfirstBalancer struct { func (b *pickfirstBalancer) ResolverError(err error) { if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure diff --git a/rpc_util.go b/rpc_util.go index c9438c064f3f..cb7020ebecd7 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -712,7 +712,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei d, size, err = decompress(compressor, d, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java @@ -760,7 +760,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf return err } if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { payInfo.uncompressedBytes = d diff --git a/security/advancedtls/crl.go b/security/advancedtls/crl.go index b1b2e30530fb..e3cac66c07f1 100644 --- a/security/advancedtls/crl.go +++ b/security/advancedtls/crl.go @@ -238,11 +238,11 @@ func fetchIssuerCRL(rawIssuer []byte, crlVerifyCrt []*x509.Certificate, cfg Revo crl, err := fetchCRL(rawIssuer, cfg) if err != nil { - return nil, fmt.Errorf("fetchCRL() failed err = %v", err) + return nil, fmt.Errorf("fetchCRL() failed: %v", err) } if err := verifyCRL(crl, rawIssuer, crlVerifyCrt); err != nil { - return nil, fmt.Errorf("verifyCRL() failed err = %v", err) + return nil, fmt.Errorf("verifyCRL() failed: %v", err) } if cfg.Cache != nil { cfg.Cache.Add(hex.EncodeToString(rawIssuer), crl) @@ -264,7 +264,7 @@ func checkCert(c *x509.Certificate, crlVerifyCrt []*x509.Certificate, cfg Revoca } revocation, err := checkCertRevocation(c, crl) if err != nil { - grpclogLogger.Warningf("checkCertRevocation(CRL %v) failed %v", crl.CertList.TBSCertList.Issuer, err) + grpclogLogger.Warningf("checkCertRevocation(CRL %v) failed: %v", crl.CertList.TBSCertList.Issuer, err) // We couldn't check the CRL file for some reason, so we don't know if it's RevocationUnrevoked or not. return RevocationUndetermined } @@ -317,7 +317,7 @@ func parseCertIssuerExt(ext pkix.Extension) ([]byte, error) { // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName var generalNames []asn1.RawValue if rest, err := asn1.Unmarshal(ext.Value, &generalNames); err != nil || len(rest) != 0 { - return nil, fmt.Errorf("asn1.Unmarshal failed err = %v", err) + return nil, fmt.Errorf("asn1.Unmarshal failed: %v", err) } for _, generalName := range generalNames { @@ -386,7 +386,7 @@ func parseCRLExtensions(c *pkix.CertificateList) (*certificateListExt, error) { case oidAuthorityKeyIdentifier.Equal(ext.Id): var a authKeyID if rest, err := asn1.Unmarshal(ext.Value, &a); err != nil { - return nil, fmt.Errorf("asn1.Unmarshal failed. err = %v", err) + return nil, fmt.Errorf("asn1.Unmarshal failed: %v", err) } else if len(rest) != 0 { return nil, errors.New("trailing data after AKID extension") } @@ -395,7 +395,7 @@ func parseCRLExtensions(c *pkix.CertificateList) (*certificateListExt, error) { case oidIssuingDistributionPoint.Equal(ext.Id): var dp issuingDistributionPoint if rest, err := asn1.Unmarshal(ext.Value, &dp); err != nil { - return nil, fmt.Errorf("asn1.Unmarshal failed. err = %v", err) + return nil, fmt.Errorf("asn1.Unmarshal failed: %v", err) } else if len(rest) != 0 { return nil, errors.New("trailing data after IssuingDistributionPoint extension") } @@ -431,7 +431,7 @@ func fetchCRL(rawIssuer []byte, cfg RevocationConfig) (*certificateListExt, erro var r pkix.RDNSequence rest, err := asn1.Unmarshal(rawIssuer, &r) if len(rest) != 0 || err != nil { - return nil, fmt.Errorf("asn1.Unmarshal(Issuer) len(rest) = %v, err = %v", len(rest), err) + return nil, fmt.Errorf("asn1.Unmarshal(Issuer) len(rest) = %d failed: %v", len(rest), err) } crlPath := fmt.Sprintf("%s.r%d", filepath.Join(cfg.RootDir, x509NameHash(r)), i) crlBytes, err := os.ReadFile(crlPath) @@ -444,11 +444,11 @@ func fetchCRL(rawIssuer []byte, cfg RevocationConfig) (*certificateListExt, erro crl, err := x509.ParseCRL(crlBytes) if err != nil { // Parsing errors for a CRL shouldn't happen so fail. - return nil, fmt.Errorf("x509.ParseCrl(%v) failed err = %v", crlPath, err) + return nil, fmt.Errorf("x509.ParseCrl(%v) failed: %v", crlPath, err) } var certList *certificateListExt if certList, err = parseCRLExtensions(crl); err != nil { - grpclogLogger.Infof("fetchCRL: unsupported crl %v, err = %v", crlPath, err) + grpclogLogger.Infof("fetchCRL: unsupported crl %v: %v", crlPath, err) // Continue to find a supported CRL continue } diff --git a/server.go b/server.go index 2808b7c83e80..d5a6e78be44d 100644 --- a/server.go +++ b/server.go @@ -1299,7 +1299,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } diff --git a/service_config.go b/service_config.go index 01bbb2025aed..f22acace4253 100644 --- a/service_config.go +++ b/service_config.go @@ -226,7 +226,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -254,7 +254,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } d, err := parseDuration(m.Timeout) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -263,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { Timeout: d, } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -283,13 +283,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} diff --git a/stress/client/main.go b/stress/client/main.go index 5e260b172e8c..ef3db7c13864 100644 --- a/stress/client/main.go +++ b/stress/client/main.go @@ -287,7 +287,7 @@ func newConn(address string, useTLS, testCA bool, tlsServerName string) (*grpc.C } creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { - logger.Fatalf("Failed to create TLS credentials %v", err) + logger.Fatalf("Failed to create TLS credentials: %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) diff --git a/stress/metrics_client/main.go b/stress/metrics_client/main.go index f64a64c8a3f0..8948f868dbf3 100644 --- a/stress/metrics_client/main.go +++ b/stress/metrics_client/main.go @@ -72,7 +72,7 @@ func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) { func main() { flag.Parse() if *metricsServerAddress == "" { - logger.Fatalf("Metrics server address is empty.") + logger.Fatal("-metrics_server_address is unset") } conn, err := grpc.Dial(*metricsServerAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 14c1c2e769aa..4a0beab131d1 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -349,7 +349,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { if b.childLB == nil { childLB, err := newChildBalancer(b.ccw, b.bOpts) if err != nil { - b.logger.Errorf("Failed to create child policy of type %s, %v", clusterresolver.Name, err) + b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err) return } b.childLB = childLB diff --git a/xds/internal/clusterspecifier/rls/rls.go b/xds/internal/clusterspecifier/rls/rls.go index a167cc5fa2c9..4c39e85739db 100644 --- a/xds/internal/clusterspecifier/rls/rls.go +++ b/xds/internal/clusterspecifier/rls/rls.go @@ -102,7 +102,7 @@ func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.Bala return nil, fmt.Errorf("RLS LB policy not registered") } if _, err = rlsBB.(balancer.ConfigParser).ParseConfig(rawJSON); err != nil { - return nil, fmt.Errorf("rls_csp: validation error from rls lb policy parsing %v", err) + return nil, fmt.Errorf("rls_csp: validation error from rls lb policy parsing: %v", err) } return clusterspecifier.BalancerConfig{{internal.RLSLoadBalancingPolicyName: lbCfgJSON}}, nil diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index fce59f65c33e..f04939182b94 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -503,7 +503,7 @@ func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (*OutlierDetection, interval := defaultInterval if i := od.GetInterval(); i != nil { if err := i.CheckValid(); err != nil { - return nil, fmt.Errorf("outlier_detection.interval is invalid with error %v", err) + return nil, fmt.Errorf("outlier_detection.interval is invalid with error: %v", err) } if interval = i.AsDuration(); interval < 0 { return nil, fmt.Errorf("outlier_detection.interval = %v; must be a valid duration and >= 0", interval) @@ -513,7 +513,7 @@ func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (*OutlierDetection, baseEjectionTime := defaultBaseEjectionTime if bet := od.GetBaseEjectionTime(); bet != nil { if err := bet.CheckValid(); err != nil { - return nil, fmt.Errorf("outlier_detection.base_ejection_time is invalid with error %v", err) + return nil, fmt.Errorf("outlier_detection.base_ejection_time is invalid with error: %v", err) } if baseEjectionTime = bet.AsDuration(); baseEjectionTime < 0 { return nil, fmt.Errorf("outlier_detection.base_ejection_time = %v; must be >= 0", baseEjectionTime) @@ -523,7 +523,7 @@ func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (*OutlierDetection, maxEjectionTime := defaultMaxEjectionTime if met := od.GetMaxEjectionTime(); met != nil { if err := met.CheckValid(); err != nil { - return nil, fmt.Errorf("outlier_detection.max_ejection_time is invalid with error %v", err) + return nil, fmt.Errorf("outlier_detection.max_ejection_time is invalid: %v", err) } if maxEjectionTime = met.AsDuration(); maxEjectionTime < 0 { return nil, fmt.Errorf("outlier_detection.max_ejection_time = %v; must be >= 0", maxEjectionTime) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 8f434d11d957..819e47d32fce 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -83,7 +83,7 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l var err error csps, err = processClusterSpecifierPlugins(rc.ClusterSpecifierPlugins) if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("received route is invalid %v", err) + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) } } // cspNames represents all the cluster specifiers referenced by Route From 4e4d8288ff4fb99150a526165c767e6fac3bd5ec Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Tue, 17 Jan 2023 19:25:48 -0500 Subject: [PATCH 743/998] xds interop: Fix buildscripts not continuing on a failed test suite (#5937) --- test/kokoro/psm-security.sh | 2 +- test/kokoro/xds_k8s_lb.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/kokoro/psm-security.sh b/test/kokoro/psm-security.sh index 97aca28d79b7..f99cb9a87883 100755 --- a/test/kokoro/psm-security.sh +++ b/test/kokoro/psm-security.sh @@ -158,7 +158,7 @@ main() { local failed_tests=0 test_suites=("baseline_test" "security_test" "authz_test") for test in "${test_suites[@]}"; do - run_test $test || (( failed_tests++ )) + run_test $test || (( ++failed_tests )) done echo "Failed test suites: ${failed_tests}" if (( failed_tests > 0 )); then diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh index 31aca363aafc..f1f01794a056 100755 --- a/test/kokoro/xds_k8s_lb.sh +++ b/test/kokoro/xds_k8s_lb.sh @@ -160,7 +160,7 @@ main() { local failed_tests=0 test_suites=("api_listener_test" "change_backend_service_test" "failover_test" "remove_neg_test" "round_robin_test" "affinity_test" "outlier_detection_test") for test in "${test_suites[@]}"; do - run_test $test || (( failed_tests++ )) + run_test $test || (( ++failed_tests )) done echo "Failed test suites: ${failed_tests}" if (( failed_tests > 0 )); then From 9326362a37b8d5e1bb943ab93e21090d0771bc04 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 18 Jan 2023 10:05:46 -0800 Subject: [PATCH 744/998] transport: fix maxStreamID to align with http2 spec (#5948) --- internal/transport/defaults.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/transport/defaults.go b/internal/transport/defaults.go index 73c939603559..bc8ee0747496 100644 --- a/internal/transport/defaults.go +++ b/internal/transport/defaults.go @@ -50,5 +50,6 @@ const ( // MaxStreamID is the upper bound for the stream ID before the current // transport gracefully closes and new transport is created for subsequent RPCs. -// This is set to 75% of math.MaxUint32. It's exported so that tests can override it. -var MaxStreamID = uint32(3_221_225_472) +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) From ace808232fa0b225fbb8106ef6522c550adf80f2 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 18 Jan 2023 11:32:40 -0800 Subject: [PATCH 745/998] xdsclient: close func refactor (#5926) Fixes https://github.com/grpc/grpc-go/issues/5895 --- internal/testutils/xds/bootstrap/bootstrap.go | 22 +- internal/testutils/xds/e2e/server.go | 2 +- xds/csds/csds.go | 15 +- xds/csds/csds_e2e_test.go | 4 +- xds/googledirectpath/googlec2p.go | 14 +- xds/googledirectpath/googlec2p_test.go | 4 +- .../cdsbalancer/cdsbalancer_security_test.go | 1 - .../balancer/cdsbalancer/cdsbalancer_test.go | 2 - .../balancer/clusterimpl/balancer_test.go | 7 - .../clusterresolver/clusterresolver_test.go | 5 +- .../clusterresolver/e2e_test/eds_impl_test.go | 16 +- .../balancer/clusterresolver/priority_test.go | 3 - xds/internal/resolver/xds_resolver.go | 32 +-- xds/internal/resolver/xds_resolver_test.go | 254 +++++++++--------- xds/internal/testutils/fakeclient/client.go | 9 - xds/internal/xdsclient/bootstrap/bootstrap.go | 10 +- xds/internal/xdsclient/client.go | 2 - xds/internal/xdsclient/client_new.go | 83 ++++-- xds/internal/xdsclient/client_test.go | 3 - xds/internal/xdsclient/clientimpl.go | 7 +- .../xdsclient/e2e_test/authority_test.go | 22 +- .../xdsclient/e2e_test/cds_watchers_test.go | 40 +-- xds/internal/xdsclient/e2e_test/dump_test.go | 4 +- .../xdsclient/e2e_test/eds_watchers_test.go | 32 +-- .../e2e_test/federation_watchers_test.go | 7 +- .../xdsclient/e2e_test/lds_watchers_test.go | 40 +-- .../xdsclient/e2e_test/misc_watchers_test.go | 4 +- .../xdsclient/e2e_test/rds_watchers_test.go | 32 +-- .../e2e_test/resource_update_test.go | 16 +- xds/internal/xdsclient/loadreport_test.go | 8 +- xds/internal/xdsclient/singleton.go | 86 +++--- xds/internal/xdsclient/singleton_test.go | 44 ++- xds/server.go | 16 +- xds/server_test.go | 16 +- 34 files changed, 424 insertions(+), 438 deletions(-) diff --git a/internal/testutils/xds/bootstrap/bootstrap.go b/internal/testutils/xds/bootstrap/bootstrap.go index a8c49612bd59..195f1bea0c6e 100644 --- a/internal/testutils/xds/bootstrap/bootstrap.go +++ b/internal/testutils/xds/bootstrap/bootstrap.go @@ -49,7 +49,11 @@ type Options struct { NodeID string // ServerURI is the address of the management server. ServerURI string - // ServerListenerResourceNameTemplate is the Listener resource name to fetch. + // ClientDefaultListenerResourceNameTemplate is the default listener + // resource name template to be used on the gRPC client. + ClientDefaultListenerResourceNameTemplate string + // ServerListenerResourceNameTemplate is the listener resource name template + // to be used on the gRPC server. ServerListenerResourceNameTemplate string // CertificateProviders is the certificate providers configuration. CertificateProviders map[string]json.RawMessage @@ -111,8 +115,9 @@ func Contents(opts Options) ([]byte, error) { Node: node{ ID: opts.NodeID, }, - CertificateProviders: opts.CertificateProviders, - ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, + CertificateProviders: opts.CertificateProviders, + ClientDefaultListenerResourceNameTemplate: opts.ClientDefaultListenerResourceNameTemplate, + ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, } switch opts.Version { case TransportV2: @@ -146,11 +151,12 @@ func Contents(opts Options) ([]byte, error) { } type bootstrapConfig struct { - XdsServers []server `json:"xds_servers,omitempty"` - Node node `json:"node,omitempty"` - CertificateProviders map[string]json.RawMessage `json:"certificate_providers,omitempty"` - ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` - Authorities map[string]authority `json:"authorities,omitempty"` + XdsServers []server `json:"xds_servers,omitempty"` + Node node `json:"node,omitempty"` + CertificateProviders map[string]json.RawMessage `json:"certificate_providers,omitempty"` + ClientDefaultListenerResourceNameTemplate string `json:"client_default_listener_resource_name_template,omitempty"` + ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` + Authorities map[string]authority `json:"authorities,omitempty"` } type authority struct { diff --git a/internal/testutils/xds/e2e/server.go b/internal/testutils/xds/e2e/server.go index dac74e3cebce..001544d141d7 100644 --- a/internal/testutils/xds/e2e/server.go +++ b/internal/testutils/xds/e2e/server.go @@ -117,7 +117,7 @@ func StartManagementServer(opts ManagementServerOptions) (*ManagementServer, err var err error lis, err = net.Listen("tcp", "localhost:0") if err != nil { - return nil, fmt.Errorf("failed to start xDS management server: %v", err) + return nil, fmt.Errorf("listening on local host and port: %v", err) } } diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 15039793216c..551757b80069 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -62,18 +62,19 @@ func prefixLogger(s *ClientStatusDiscoveryServer) *internalgrpclog.PrefixLogger type ClientStatusDiscoveryServer struct { logger *internalgrpclog.PrefixLogger - mu sync.Mutex - xdsClient xdsclient.XDSClient + mu sync.Mutex + xdsClient xdsclient.XDSClient + xdsClientClose func() } // NewClientStatusDiscoveryServer returns an implementation of the CSDS server // that can be registered on a gRPC server. func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { - c, err := xdsclient.New() + c, close, err := xdsclient.New() if err != nil { logger.Warningf("Failed to create xDS client: %v", err) } - s := &ClientStatusDiscoveryServer{xdsClient: c} + s := &ClientStatusDiscoveryServer{xdsClient: c, xdsClientClose: close} s.logger = prefixLogger(s) s.logger.Infof("Created CSDS server, with xdsClient %p", c) return s, nil @@ -135,11 +136,9 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp // Close cleans up the resources. func (s *ClientStatusDiscoveryServer) Close() { - s.mu.Lock() - if s.xdsClient != nil { - s.xdsClient.Close() + if s.xdsClientClose != nil { + s.xdsClientClose() } - s.mu.Unlock() } // nodeProtoToV3 converts the given proto into a v3.Node. n is from bootstrap diff --git a/xds/csds/csds_e2e_test.go b/xds/csds/csds_e2e_test.go index 55ae772741b2..d83530a157bc 100644 --- a/xds/csds/csds_e2e_test.go +++ b/xds/csds/csds_e2e_test.go @@ -108,11 +108,11 @@ func (s) TestCSDS(t *testing.T) { // Create an xDS client. This will end up using the same singleton as used // by the CSDS service. - xdsC, err := xdsclient.New() + xdsC, close, err := xdsclient.New() if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer xdsC.Close() + defer close() // Initialize an gRPC server and register CSDS on it. server := grpc.NewServer() diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 669c4fc1c483..5bc17b03e5b2 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -70,7 +70,7 @@ const ( var ( onGCE = googlecloud.OnGCE - newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, error) { + newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, func(), error) { return xdsclient.NewWithConfig(config) } @@ -135,7 +135,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts // Create singleton xds client with this config. The xds client will be // used by the xds resolver later. - xdsC, err := newClientWithConfig(config) + _, close, err := newClientWithConfig(config) if err != nil { return nil, fmt.Errorf("failed to start xDS client: %v", err) } @@ -154,12 +154,12 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts } xdsR, err := resolver.Get(xdsName).Build(t, cc, opts) if err != nil { - xdsC.Close() + close() return nil, err } return &c2pResolver{ - Resolver: xdsR, - client: xdsC, + Resolver: xdsR, + clientCloseFunc: close, }, nil } @@ -169,12 +169,12 @@ func (b c2pResolverBuilder) Scheme() string { type c2pResolver struct { resolver.Resolver - client xdsclient.XDSClient + clientCloseFunc func() } func (r *c2pResolver) Close() { r.Resolver.Close() - r.client.Close() + r.clientCloseFunc() } var ipv6EnabledMetadata = &structpb.Struct{ diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index f9357e9bf3aa..1eb1b18cf760 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -180,9 +180,9 @@ func TestBuildXDS(t *testing.T) { configCh := make(chan *bootstrap.Config, 1) oldNewClient := newClientWithConfig - newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, error) { + newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, func(), error) { configCh <- config - return tXDSClient, nil + return tXDSClient, func() { tXDSClient.Close() }, nil } defer func() { newClientWithConfig = oldNewClient }() diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 7d5898ada83d..8d7face5e0a3 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -175,7 +175,6 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS return xdsC, cdsB.(*cdsBalancer), edsB, tcc, func() { newChildBalancer = oldEDSBalancerBuilder - xdsC.Close() } } diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index fa94e13f442a..132f1002a8cb 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -255,7 +255,6 @@ func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *t return xdsC, cdsB.(*cdsBalancer), edsB, tcc, func() { newChildBalancer = oldEDSBalancerBuilder - xdsC.Close() } } @@ -286,7 +285,6 @@ func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBal // provided xdsClient is invoked appropriately. func (s) TestUpdateClientConnState(t *testing.T) { xdsC := fakeclient.NewClient() - defer xdsC.Close() tests := []struct { name string diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 1d531c1a52dc..c4fcd84e55be 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -88,7 +88,6 @@ func (s) TestDropByCategory(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() - defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -248,7 +247,6 @@ func (s) TestDropByCategory(t *testing.T) { func (s) TestDropCircuitBreaking(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() - defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -361,7 +359,6 @@ func (s) TestDropCircuitBreaking(t *testing.T) { func (s) TestPickerUpdateAfterClose(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() - defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -430,7 +427,6 @@ func (s) TestClusterNameInAddressAttributes(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() - defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -506,7 +502,6 @@ func (s) TestReResolution(t *testing.T) { defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() - defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -574,7 +569,6 @@ func (s) TestLoadReporting(t *testing.T) { } xdsC := fakeclient.NewClient() - defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) @@ -689,7 +683,6 @@ func (s) TestUpdateLRSServer(t *testing.T) { } xdsC := fakeclient.NewClient() - defer xdsC.Close() builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 7a3ced47b3a1..a368b9da7326 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -215,10 +215,7 @@ func setup(childLBCh *testutils.Channel) (*fakeclient.Client, func()) { defer func() { childLBCh.Send(childLB) }() return childLB } - return xdsC, func() { - newChildBalancer = origNewChildBalancer - xdsC.Close() - } + return xdsC, func() { newChildBalancer = origNewChildBalancer } } // TestSubConnStateChange verifies if the top-level clusterResolverBalancer passes on diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index 75d37e71ea61..83289ce40df2 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -192,11 +192,11 @@ func (s) TestEDS_OneLocality(t *testing.T) { } // Create an xDS client for use by the cluster_resolver LB policy. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Create a manual resolver and push a service config specifying the use of // the cluster_resolver LB policy with a single discovery mechanism. @@ -300,11 +300,11 @@ func (s) TestEDS_MultipleLocalities(t *testing.T) { } // Create an xDS client for use by the cluster_resolver LB policy. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Create a manual resolver and push service config specifying the use of // the cluster_resolver LB policy with a single discovery mechanism. @@ -438,11 +438,11 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { } // Create an xDS client for use by the cluster_resolver LB policy. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Create a manual resolver and push service config specifying the use of // the cluster_resolver LB policy with a single discovery mechanism. @@ -504,11 +504,11 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { } // Create an xDS client for use by the cluster_resolver LB policy. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Create a manual resolver and push service config specifying the use of // the cluster_resolver LB policy with a single discovery mechanism. diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index ce3df8fdfad3..b2cc0c9f2097 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -78,17 +78,14 @@ func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) }, }); err != nil { edsb.Close() - xdsC.Close() t.Fatal(err) } if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { edsb.Close() - xdsC.Close() t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } return edsb, cc, xdsC, func() { edsb.Close() - xdsC.Close() } } diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index f473fcbaa733..09b3356301db 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -44,14 +44,14 @@ const xdsScheme = "xds" // ClientConns at the same time. func newBuilderForTesting(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ - newXDSClient: func() (xdsclient.XDSClient, error) { + newXDSClient: func() (xdsclient.XDSClient, func(), error) { return xdsclient.NewWithBootstrapContentsForTesting(config) }, }, nil } // For overriding in unittests. -var newXDSClient = func() (xdsclient.XDSClient, error) { return xdsclient.New() } +var newXDSClient = func() (xdsclient.XDSClient, func(), error) { return xdsclient.New() } func init() { resolver.Register(&xdsResolverBuilder{}) @@ -59,7 +59,7 @@ func init() { } type xdsResolverBuilder struct { - newXDSClient func() (xdsclient.XDSClient, error) + newXDSClient func() (xdsclient.XDSClient, func(), error) } // Build helps implement the resolver.Builder interface. @@ -87,11 +87,12 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon newXDSClient = b.newXDSClient } - client, err := newXDSClient() + client, close, err := newXDSClient() if err != nil { return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) } - r.client = client + r.xdsClient = client + r.xdsClientClose = close bootstrapConfig := client.BootstrapConfig() if bootstrapConfig == nil { return nil, errors.New("bootstrap configuration is empty") @@ -138,11 +139,11 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon r.ldsResourceName = bootstrap.PopulateResourceTemplate(template, endpoint) // Register a watch on the xdsClient for the resource name determined above. - cancelWatch := watchService(r.client, r.ldsResourceName, r.handleServiceUpdate, r.logger) - r.logger.Infof("Watch started on resource name %v with xds-client %p", r.ldsResourceName, r.client) + cancelWatch := watchService(r.xdsClient, r.ldsResourceName, r.handleServiceUpdate, r.logger) + r.logger.Infof("Watch started on resource name %v with xds-client %p", r.ldsResourceName, r.xdsClient) r.cancelWatch = func() { cancelWatch() - r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.ldsResourceName, r.client) + r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.ldsResourceName, r.xdsClient) } go r.run() @@ -174,7 +175,8 @@ type xdsResolver struct { ldsResourceName string // The underlying xdsClient which performs all xDS requests and responses. - client xdsclient.XDSClient + xdsClient xdsclient.XDSClient + xdsClientClose func() // A channel for the watch API callback to write service updates on to. The // updates are read by the run goroutine and passed on to the ClientConn. updateCh chan suWithError @@ -218,13 +220,13 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { r.cc.ReportError(err) return false } - r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.ldsResourceName, r.client, pretty.FormatJSON(sc)) + r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.ldsResourceName, r.xdsClient, pretty.FormatJSON(sc)) // Send the update to the ClientConn. state := iresolver.SetConfigSelector(resolver.State{ ServiceConfig: r.cc.ParseServiceConfig(string(sc)), }, cs) - r.cc.UpdateState(xdsclient.SetClient(state, r.client)) + r.cc.UpdateState(xdsclient.SetClient(state, r.xdsClient)) return true } @@ -237,7 +239,7 @@ func (r *xdsResolver) run() { return case update := <-r.updateCh: if update.err != nil { - r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.ldsResourceName, r.client, update.err) + r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.ldsResourceName, r.xdsClient, update.err) if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { // If error is resource-not-found, it means the LDS // resource was removed. Ultimately send an empty service @@ -265,7 +267,7 @@ func (r *xdsResolver) run() { // Create the config selector for this update. cs, err := r.newConfigSelector(update.su) if err != nil { - r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.ldsResourceName, r.client, err) + r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.ldsResourceName, r.xdsClient, err) r.cc.ReportError(err) continue } @@ -313,8 +315,8 @@ func (r *xdsResolver) Close() { if r.cancelWatch != nil { r.cancelWatch() } - if r.client != nil { - r.client.Close() + if r.xdsClientClose != nil { + r.xdsClientClose() } r.closed.Fire() r.logger.Infof("Shutdown") diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index f684c799123c..330788bfe56e 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -30,11 +30,13 @@ import ( xxhash "github.com/cespare/xxhash/v2" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" @@ -43,7 +45,6 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config "google.golang.org/grpc/xds/internal/balancer/clustermanager" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" @@ -52,13 +53,15 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config ) const ( targetStr = "target" routeStr = "route" cluster = "cluster" - defaultTestTimeout = 1 * time.Second + defaultTestTimeout = 10 * time.Second defaultTestShortTimeout = 100 * time.Microsecond ) @@ -76,15 +79,14 @@ func Test(t *testing.T) { } func (s) TestRegister(t *testing.T) { - b := resolver.Get(xdsScheme) - if b == nil { + if resolver.Get(xdsScheme) == nil { t.Errorf("scheme %v is not registered", xdsScheme) } } -// testClientConn is a fake implemetation of resolver.ClientConn. All is does -// is to store the state received from the resolver locally and signal that -// event through a channel. +// testClientConn is a fake implemetation of resolver.ClientConn that pushes +// state updates and errors returned by the resolver on to channels for +// consumption by tests. type testClientConn struct { resolver.ClientConn stateCh *testutils.Channel @@ -111,34 +113,35 @@ func newTestClientConn() *testClientConn { } } -// TestResolverBuilder tests the xdsResolverBuilder's Build method with +// TestResolverBuilder tests the resolver builder's Build() method with // different parameters. func (s) TestResolverBuilder(t *testing.T) { tests := []struct { name string - xdsClientFunc func() (xdsclient.XDSClient, error) + xdsClientFunc func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) target resolver.Target + buildOpts resolver.BuildOptions wantErr bool }{ { - name: "simple-good", - xdsClientFunc: func() (xdsclient.XDSClient, error) { - return fakeclient.NewClient(), nil + name: "good", + xdsClientFunc: func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) { + return fakeclient.NewClient(), func() { close(closeCh) }, nil }, target: target, wantErr: false, }, { - name: "newXDSClient-throws-error", - xdsClientFunc: func() (xdsclient.XDSClient, error) { - return nil, errors.New("newXDSClient-throws-error") + name: "xDS client creation fails", + xdsClientFunc: func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) { + return nil, func() { close(closeCh) }, errors.New("failed to create xDS client") }, target: target, wantErr: true, }, { name: "authority not defined in bootstrap", - xdsClientFunc: func() (xdsclient.XDSClient, error) { + xdsClientFunc: func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) { c := fakeclient.NewClient() c.SetBootstrapConfig(&bootstrap.Config{ ClientDefaultListenerResourceNameTemplate: "%s", @@ -148,7 +151,7 @@ func (s) TestResolverBuilder(t *testing.T) { }, }, }) - return c, nil + return c, func() { close(closeCh) }, nil }, target: resolver.Target{ URL: url.URL{ @@ -158,12 +161,32 @@ func (s) TestResolverBuilder(t *testing.T) { }, wantErr: true, }, + { + name: "xDS creds specified without certificate providers in bootstrap", + xdsClientFunc: func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) { + c := fakeclient.NewClient() + c.SetBootstrapConfig(&bootstrap.Config{}) + return c, func() { close(closeCh) }, nil + }, + target: target, + buildOpts: resolver.BuildOptions{ + DialCreds: func() credentials.TransportCredentials { + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + t.Fatalf("xds.NewClientCredentials() failed: %v", err) + } + return creds + }(), + }, + wantErr: true, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // Fake out the xdsClient creation process by providing a fake. + // Use a fake xDS client that closes the below channel when closed. + closeCh := make(chan struct{}) oldClientMaker := newXDSClient - newXDSClient = test.xdsClientFunc + newXDSClient = func() (xdsclient.XDSClient, func(), error) { return test.xdsClientFunc(closeCh) } defer func() { newXDSClient = oldClientMaker }() @@ -173,7 +196,7 @@ func (s) TestResolverBuilder(t *testing.T) { t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) } - r, err := builder.Build(test.target, newTestClientConn(), resolver.BuildOptions{}) + r, err := builder.Build(test.target, newTestClientConn(), test.buildOpts) if (err != nil) != test.wantErr { t.Fatalf("builder.Build(%v) returned err: %v, wantErr: %v", target, err, test.wantErr) } @@ -182,48 +205,13 @@ func (s) TestResolverBuilder(t *testing.T) { return } r.Close() - }) - } -} - -// TestResolverBuilder_xdsCredsBootstrapMismatch tests the case where an xds -// resolver is built with xds credentials being specified by the user. The -// bootstrap file does not contain any certificate provider configuration -// though, and therefore we expect the resolver build to fail. -func (s) TestResolverBuilder_xdsCredsBootstrapMismatch(t *testing.T) { - // Fake out the xdsClient creation process by providing a fake, which does - // not have any certificate provider configuration. - fc := fakeclient.NewClient() - fc.SetBootstrapConfig(&bootstrap.Config{}) - oldClientMaker := newXDSClient - newXDSClient = func() (xdsclient.XDSClient, error) { - return fc, nil - } - defer func() { newXDSClient = oldClientMaker }() - defer func() { - select { - case <-time.After(defaultTestTimeout): - t.Fatalf("timeout waiting for close") - case <-fc.Closed.Done(): - } - }() - builder := resolver.Get(xdsScheme) - if builder == nil { - t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) - } - - // Create xds credentials to be passed to resolver.Build(). - creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{FallbackCreds: insecure.NewCredentials()}) - if err != nil { - t.Fatalf("xds.NewClientCredentials() failed: %v", err) - } - - // Since the fake xds client is not configured with any certificate provider - // configs, and we are specifying xds credentials in the call to - // resolver.Build(), we expect it to fail. - if _, err := builder.Build(target, newTestClientConn(), resolver.BuildOptions{DialCreds: creds}); err == nil { - t.Fatal("builder.Build() succeeded when expected to fail") + select { + case <-closeCh: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout when waiting for xDS client to be closed") + } + }) } } @@ -240,8 +228,9 @@ func testSetup(t *testing.T, opts setupOpts) (*xdsResolver, *fakeclient.Client, fc.SetBootstrapConfig(opts.bootstrapC) } oldClientMaker := newXDSClient - newXDSClient = func() (xdsclient.XDSClient, error) { - return fc, nil + closeCh := make(chan struct{}) + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return fc, grpcsync.OnceFunc(func() { close(closeCh) }), nil } cancel := func() { // Make sure the xDS client is closed, in all (successful or failed) @@ -249,7 +238,7 @@ func testSetup(t *testing.T, opts setupOpts) (*xdsResolver, *fakeclient.Client, select { case <-time.After(defaultTestTimeout): t.Fatalf("timeout waiting for close") - case <-fc.Closed.Done(): + case <-closeCh: } newXDSClient = oldClientMaker } @@ -410,7 +399,9 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { }, }, nil) - if gotVal, gotErr := tcc.stateCh.Receive(ctx); gotErr != context.DeadlineExceeded { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if gotVal, gotErr := tcc.stateCh.Receive(sCtx); gotErr != context.DeadlineExceeded { t.Fatalf("ClientConn.UpdateState called after xdsResolver is closed: %v", gotVal) } } @@ -418,12 +409,9 @@ func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { // TestXDSResolverCloseClosesXDSClient tests that the XDS resolver's Close // method closes the XDS client. func (s) TestXDSResolverCloseClosesXDSClient(t *testing.T) { - xdsR, xdsC, _, cancel := testSetup(t, setupOpts{target: target}) - defer cancel() + xdsR, _, _, cancel := testSetup(t, setupOpts{target: target}) xdsR.Close() - if !xdsC.Closed.HasFired() { - t.Fatalf("xds client not closed by xds resolver Close method") - } + cancel() // Blocks until the xDS client is closed. } // TestXDSResolverBadServiceUpdate tests the case the xdsClient returns a bad @@ -471,13 +459,13 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { { routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, wantJSON: `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - } - } - }}]}`, + "xds_cluster_manager_experimental":{ + "children":{ + "cluster:test-cluster-1":{ + "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] + } + } + }}]}`, wantClusters: map[string]bool{"cluster:test-cluster-1": true}, }, { @@ -489,19 +477,19 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { // well as this update, as the previous config selector still // references the old cluster when the new one is pushed. wantJSON: `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - }, - "cluster:cluster_1":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] - }, - "cluster:cluster_2":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] - } - } - }}]}`, + "xds_cluster_manager_experimental":{ + "children":{ + "cluster:test-cluster-1":{ + "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] + }, + "cluster:cluster_1":{ + "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] + }, + "cluster:cluster_2":{ + "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] + } + } + }}]}`, wantClusters: map[string]bool{"cluster:cluster_1": true, "cluster:cluster_2": true}, }, { @@ -513,16 +501,16 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { // stopped, so there are no more references to the first cluster. // Only the second update's clusters should remain. wantJSON: `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:cluster_1":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] - }, - "cluster:cluster_2":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] - } - } - }}]}`, + "xds_cluster_manager_experimental":{ + "children":{ + "cluster:cluster_1":{ + "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] + }, + "cluster:cluster_2":{ + "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] + } + } + }}]}`, wantClusters: map[string]bool{"cluster:cluster_1": true, "cluster:cluster_2": true}, }, } { @@ -723,13 +711,13 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { }, }, nil) wantJSON := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - } - } - }}]}` + "xds_cluster_manager_experimental":{ + "children":{ + "cluster:test-cluster-1":{ + "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] + } + } + }}]}` wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) gotState, err := tcc.stateCh.Receive(ctx) @@ -985,13 +973,13 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { } wantJSON := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - } - } - }}]}` + "xds_cluster_manager_experimental":{ + "children":{ + "cluster:test-cluster-1":{ + "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] + } + } + }}]}` wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { t.Errorf("ClientConn.UpdateState received different service config") @@ -1044,16 +1032,16 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) } wantJSON2 := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - }, - "cluster:NEW":{ - "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] - } - } - }}]}` + "xds_cluster_manager_experimental":{ + "children":{ + "cluster:test-cluster-1":{ + "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] + }, + "cluster:NEW":{ + "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] + } + } + }}]}` wantSCParsed2 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON2) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed2.Config) { t.Errorf("ClientConn.UpdateState received different service config") @@ -1082,13 +1070,13 @@ func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) } wantJSON3 := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:NEW":{ - "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] - } - } - }}]}` + "xds_cluster_manager_experimental":{ + "children":{ + "cluster:NEW":{ + "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] + } + } + }}]}` wantSCParsed3 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON3) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed3.Config) { t.Errorf("ClientConn.UpdateState received different service config") @@ -1166,7 +1154,9 @@ func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { suErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource removed error") xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) - if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != context.DeadlineExceeded { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if gotErrVal, gotErr := tcc.errorCh.Receive(sCtx); gotErr != context.DeadlineExceeded { t.Fatalf("ClientConn.ReportError() received %v, %v, want channel recv timeout", gotErrVal, gotErr) } diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index 3ab57bbd489d..9794425c501f 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -22,7 +22,6 @@ package fakeclient import ( "context" - "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -56,8 +55,6 @@ type Client struct { rdsCbs map[string]func(xdsresource.RouteConfigUpdate, error) cdsCbs map[string]func(xdsresource.ClusterUpdate, error) edsCbs map[string]func(xdsresource.EndpointsUpdate, error) - - Closed *grpcsync.Event // fired when Close is called. } // WatchListener registers a LDS watch. @@ -275,11 +272,6 @@ func (xdsC *Client) WaitForReportLoad(ctx context.Context) (ReportLoadArgs, erro return val.(ReportLoadArgs), nil } -// Close fires xdsC.Closed, indicating it was called. -func (xdsC *Client) Close() { - xdsC.Closed.Fire() -} - // BootstrapConfig returns the bootstrap config. func (xdsC *Client) BootstrapConfig() *bootstrap.Config { return xdsC.bootstrapCfg @@ -321,6 +313,5 @@ func NewClientWithName(name string) *Client { rdsCbs: make(map[string]func(xdsresource.RouteConfigUpdate, error)), cdsCbs: make(map[string]func(xdsresource.ClusterUpdate, error)), edsCbs: make(map[string]func(xdsresource.EndpointsUpdate, error)), - Closed: grpcsync.NewEvent(), } } diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index d0caa0f77603..23eff2d63907 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -366,7 +366,7 @@ func newConfigFromContents(data []byte) (*Config, error) { var jsonData map[string]json.RawMessage if err := json.Unmarshal(data, &jsonData); err != nil { - return nil, fmt.Errorf("xds: Failed to parse bootstrap config: %v", err) + return nil, fmt.Errorf("xds: failed to parse bootstrap config: %v", err) } var node *v3corepb.Node @@ -414,7 +414,7 @@ func newConfigFromContents(data []byte) (*Config, error) { } bc, err := parser.ParseConfig(nameAndConfig.Config) if err != nil { - return nil, fmt.Errorf("xds: Config parsing for plugin %q failed: %v", name, err) + return nil, fmt.Errorf("xds: config parsing for plugin %q failed: %v", name, err) } configs[instance] = bc } @@ -452,13 +452,13 @@ func newConfigFromContents(data []byte) (*Config, error) { config.ClientDefaultListenerResourceNameTemplate = "%s" } if config.XDSServer == nil { - return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers", jsonData["xds_servers"]) + return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers", jsonData["xds_servers"]) } if config.XDSServer.ServerURI == "" { - return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) + return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) } if config.XDSServer.Creds == nil { - return nil, fmt.Errorf("xds: Required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) + return nil, fmt.Errorf("xds: required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) } // Post-process the authorities' client listener resource template field: // - if set, it must start with "xdstp:///" diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 332b31409134..6e380b27d543 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -56,6 +56,4 @@ type XDSClient interface { ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) BootstrapConfig() *bootstrap.Config - - Close() } diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go index b3aecb7fa5ac..8335f9a88a8f 100644 --- a/xds/internal/xdsclient/client_new.go +++ b/xds/internal/xdsclient/client_new.go @@ -37,20 +37,28 @@ import ( // The returned client is a reference counted singleton instance. This function // creates a new client only when one doesn't already exist. // -// Note that the first invocation of New() or NewWithConfig() sets the client -// singleton. The following calls will return the singleton client without -// checking or using the config. -func New() (XDSClient, error) { +// The second return value represents a close function which releases the +// caller's reference on the returned client. The caller is expected to invoke +// it once they are done using the client. The underlying client will be closed +// only when all references are released, and it is safe for the caller to +// invoke this close function multiple times. +func New() (XDSClient, func(), error) { return newRefCountedWithConfig(nil) } // NewWithConfig returns a new xDS client configured by the given config. // -// Internal/Testing Only +// The second return value represents a close function which releases the +// caller's reference on the returned client. The caller is expected to invoke +// it once they are done using the client. The underlying client will be closed +// only when all references are released, and it is safe for the caller to +// invoke this close function multiple times. +// +// # Internal/Testing Only // // This function should ONLY be used for internal (c2p resolver) and/or testing // purposese. DO NOT use this elsewhere. Use New() instead. -func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { +func NewWithConfig(config *bootstrap.Config) (XDSClient, func(), error) { return newRefCountedWithConfig(config) } @@ -76,58 +84,79 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i // NewWithConfigForTesting returns an xDS client for the specified bootstrap // config, separate from the global singleton. // +// The second return value represents a close function which the caller is +// expected to invoke once they are done using the client. It is safe for the +// caller to invoke this close function multiple times. +// // # Testing Only // // This function should ONLY be used for testing purposes. -func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout, authorityIdleTimeout time.Duration) (XDSClient, error) { +// TODO(easwars): Document the new close func. +func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout, authorityIdleTimeout time.Duration) (XDSClient, func(), error) { cl, err := newWithConfig(config, watchExpiryTimeout, authorityIdleTimeout) if err != nil { - return nil, err + return nil, nil, err } - return &clientRefCounted{clientImpl: cl, refCount: 1}, nil + return cl, grpcsync.OnceFunc(cl.close), nil } // NewWithBootstrapContentsForTesting returns an xDS client for this config, // separate from the global singleton. // +// The second return value represents a close function which the caller is +// expected to invoke once they are done using the client. It is safe for the +// caller to invoke this close function multiple times. +// // # Testing Only // // This function should ONLY be used for testing purposes. -func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, error) { +func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, func(), error) { // Normalize the contents buf := bytes.Buffer{} err := json.Indent(&buf, contents, "", "") if err != nil { - return nil, fmt.Errorf("xds: error normalizing JSON: %v", err) + return nil, nil, fmt.Errorf("xds: error normalizing JSON: %v", err) } contents = bytes.TrimSpace(buf.Bytes()) + c, err := getOrMakeClientForTesting(contents) + if err != nil { + return nil, nil, err + } + return c, grpcsync.OnceFunc(func() { + clientsMu.Lock() + defer clientsMu.Unlock() + if c.decrRef() == 0 { + c.close() + delete(clients, string(contents)) + } + }), nil +} + +// getOrMakeClientForTesting creates a new reference counted client (separate +// from the global singleton) for the given config, or returns an existing one. +// It takes care of incrementing the reference count for the returned client, +// and leaves the caller responsible for decrementing the reference count once +// the client is no longer needed. +func getOrMakeClientForTesting(config []byte) (*clientRefCounted, error) { clientsMu.Lock() defer clientsMu.Unlock() - if c := clients[string(contents)]; c != nil { - c.mu.Lock() - // Since we don't remove the *Client from the map when it is closed, we - // need to recreate the impl if the ref count dropped to zero. - if c.refCount > 0 { - c.refCount++ - c.mu.Unlock() - return c, nil - } - c.mu.Unlock() + + if c := clients[string(config)]; c != nil { + c.incrRef() + return c, nil } - bcfg, err := bootstrap.NewConfigFromContentsForTesting(contents) + bcfg, err := bootstrap.NewConfigFromContentsForTesting(config) if err != nil { - return nil, fmt.Errorf("xds: error with bootstrap config: %v", err) + return nil, fmt.Errorf("bootstrap config %s: %v", string(config), err) } - cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) if err != nil { - return nil, err + return nil, fmt.Errorf("creating xDS client: %v", err) } - c := &clientRefCounted{clientImpl: cImpl, refCount: 1} - clients[string(contents)] = c + clients[string(config)] = c return c, nil } diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index b5073d76250a..272292d1f520 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -34,9 +34,6 @@ func Test(t *testing.T) { } const ( - testXDSServer = "xds-server" - testXDSServerAuthority = "xds-server-authority" - defaultTestWatchExpiryTimeout = 500 * time.Millisecond defaultTestTimeout = 5 * time.Second defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. diff --git a/xds/internal/xdsclient/clientimpl.go b/xds/internal/xdsclient/clientimpl.go index e9224cb1a31b..261b6bf48f9a 100644 --- a/xds/internal/xdsclient/clientimpl.go +++ b/xds/internal/xdsclient/clientimpl.go @@ -68,11 +68,8 @@ func (c *clientImpl) BootstrapConfig() *bootstrap.Config { return c.config } -// Close closes the gRPC connection to the management server. -// -// TODO: ensure that all underlying transports are closed before this function -// returns. -func (c *clientImpl) Close() { +// close closes the gRPC connection to the management server. +func (c *clientImpl) close() { if c.done.HasFired() { return } diff --git a/xds/internal/xdsclient/e2e_test/authority_test.go b/xds/internal/xdsclient/e2e_test/authority_test.go index 17cdf674a0e6..fdda7291aa18 100644 --- a/xds/internal/xdsclient/e2e_test/authority_test.go +++ b/xds/internal/xdsclient/e2e_test/authority_test.go @@ -66,8 +66,8 @@ var ( // default and the third one pointing to the non-default). // // Returns two listeners used by the default and non-default management servers -// respectively, and the xDS client. -func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time.Duration) (*testutils.ListenerWrapper, *testutils.ListenerWrapper, xdsclient.XDSClient) { +// respectively, and the xDS client and its close function. +func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time.Duration) (*testutils.ListenerWrapper, *testutils.ListenerWrapper, xdsclient.XDSClient, func()) { overrideFedEnvVar(t) // Create listener wrappers which notify on to a channel whenever a new @@ -94,7 +94,7 @@ func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time. // have empty server configs, and therefore end up using the default server // config, which points to the above management server. nodeID := uuid.New().String() - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: defaultAuthorityServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -117,7 +117,6 @@ func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time. if err != nil { t.Fatalf("failed to create xds client: %v", err) } - t.Cleanup(func() { client.Close() }) resources := e2e.UpdateOptions{ NodeID: nodeID, @@ -132,7 +131,7 @@ func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time. if err := defaultAuthorityServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } - return lisDefault, lisNonDefault, client + return lisDefault, lisNonDefault, client, close } // TestAuthorityShare tests the authority sharing logic. The test verifies the @@ -145,7 +144,8 @@ func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time. func (s) TestAuthorityShare(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - lis, _, client := setupForAuthorityTests(ctx, t, time.Duration(0)) + lis, _, client, close := setupForAuthorityTests(ctx, t, time.Duration(0)) + defer close() // Verify that no connection is established to the management server at this // point. A transport is created only when a resource (which belongs to that @@ -189,7 +189,8 @@ func (s) TestAuthorityShare(t *testing.T) { func (s) TestAuthorityIdleTimeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - lis, _, client := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + lis, _, client, close := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + defer close() // Request the first resource. Verify that a new transport is created. cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) @@ -234,7 +235,7 @@ func (s) TestAuthorityClientClose(t *testing.T) { // test, until explicitly closed. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - lisDefault, lisNonDefault, client := setupForAuthorityTests(ctx, t, time.Duration(2*defaultTestTimeout)) + lisDefault, lisNonDefault, client, close := setupForAuthorityTests(ctx, t, time.Duration(2*defaultTestTimeout)) // Request the first resource. Verify that a new transport is created to the // default management server. @@ -267,7 +268,7 @@ func (s) TestAuthorityClientClose(t *testing.T) { // Closing the xDS client should close the connection to both management // servers, even though we have an open watch to one of them. - client.Close() + close() if _, err := connDefault.CloseCh.Receive(ctx); err != nil { t.Fatal("Connection to management server not closed after client close") } @@ -281,7 +282,8 @@ func (s) TestAuthorityClientClose(t *testing.T) { func (s) TestAuthorityRevive(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - lis, _, client := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + lis, _, client, close := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + defer close() // Request the first resource. Verify that a new transport is created. cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) diff --git a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go index 0a7655048bf6..7119344296e6 100644 --- a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go @@ -148,11 +148,11 @@ func (s) TestCDSWatch(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a cluster resource and have the watch // callback push the received update on to a channel. @@ -278,11 +278,11 @@ func (s) TestCDSWatch_TwoWatchesForSameResourceName(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for the same cluster resource and have the // callbacks push the received updates on to a channel. @@ -363,11 +363,11 @@ func (s) TestCDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for the same cluster resource and have the // callbacks push the received updates on to a channel. @@ -460,11 +460,11 @@ func (s) TestCDSWatch_ResourceCaching(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a cluster resource and have the watch // callback push the received update on to a channel. @@ -532,7 +532,7 @@ func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // receive a response for the watch being registered by the test. // Create an xDS client talking to a non-existent management server. - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: "dummy management server address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -543,7 +543,7 @@ func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() // Register a watch for a resource which is expected to be invoked with an // error after the watch expiry timer fires. @@ -579,7 +579,7 @@ func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -590,7 +590,7 @@ func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() // Register a watch for a cluster resource and have the watch // callback push the received update on to a channel. @@ -648,11 +648,11 @@ func (s) TesCDSWatch_ResourceRemoved(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for two cluster resources and have the // callbacks push the received updates on to a channel. @@ -757,11 +757,11 @@ func (s) TestCDSWatch_NACKError(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a cluster resource and have the watch // callback push the received update on to a channel. @@ -806,11 +806,11 @@ func (s) TestCDSWatch_PartialValid(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for cluster resources. The first watch is expected // to receive an error because the received resource is NACK'ed. The second @@ -880,11 +880,11 @@ func (s) TestCDSWatch_PartialResponse(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for two cluster resources and have the // callbacks push the received updates on to a channel. diff --git a/xds/internal/xdsclient/e2e_test/dump_test.go b/xds/internal/xdsclient/e2e_test/dump_test.go index 1a2765cc2ec9..f71b94c683e3 100644 --- a/xds/internal/xdsclient/e2e_test/dump_test.go +++ b/xds/internal/xdsclient/e2e_test/dump_test.go @@ -98,11 +98,11 @@ func (s) TestDumpResources(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Dump resources and expect empty configs. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go index 7daed4bc964a..2b3216c268cc 100644 --- a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go @@ -172,11 +172,11 @@ func (s) TestEDSWatch(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a endpoint resource and have the watch // callback push the received update on to a channel. @@ -326,11 +326,11 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for the same endpoint resource and have the // callbacks push the received updates on to a channel. @@ -412,11 +412,11 @@ func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for the same endpoint resource and have the // callbacks push the received updates on to a channel. @@ -510,11 +510,11 @@ func (s) TestEDSWatch_ResourceCaching(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for an endpoint resource and have the watch callback // push the received update on to a channel. @@ -589,7 +589,7 @@ func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // receive a response for the watch being registered by the test. // Create an xDS client talking to a non-existent management server. - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: "dummy management server address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -600,7 +600,7 @@ func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() // Register a watch for a resource which is expected to fail with an error // after the watch expiry timer fires. @@ -636,7 +636,7 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -647,7 +647,7 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() // Register a watch for an endpoint resource and have the watch callback // push the received update on to a channel. @@ -704,11 +704,11 @@ func (s) TestEDSWatch_NACKError(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a route configuration resource and have the watch // callback push the received update on to a channel. @@ -753,11 +753,11 @@ func (s) TestEDSWatch_PartialValid(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for two endpoint resources. The first watch is // expected to receive an error because the received resource is NACKed. diff --git a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go index 640ee6fac737..437edebe1392 100644 --- a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go @@ -73,10 +73,11 @@ func setupForFederationWatchersTest(t *testing.T) (*e2e.ManagementServer, string t.Fatalf("Failed to create bootstrap file: %v", err) } // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } + t.Cleanup(close) return serverNonDefaultAuthority, nodeID, client } @@ -88,7 +89,6 @@ func setupForFederationWatchersTest(t *testing.T) (*e2e.ManagementServer, string // that both watchers are notified. func (s) TestFederation_ListenerResourceContextParamOrder(t *testing.T) { serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) - defer client.Close() var ( // Two resource names only differ in context parameter order. @@ -145,7 +145,6 @@ func (s) TestFederation_ListenerResourceContextParamOrder(t *testing.T) { // parameters. The test verifies that both watchers are notified. func (s) TestFederation_RouteConfigResourceContextParamOrder(t *testing.T) { serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) - defer client.Close() var ( // Two resource names only differ in context parameter order. @@ -212,7 +211,6 @@ func (s) TestFederation_RouteConfigResourceContextParamOrder(t *testing.T) { // that both watchers are notified. func (s) TestFederation_ClusterResourceContextParamOrder(t *testing.T) { serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) - defer client.Close() var ( // Two resource names only differ in context parameter order. @@ -269,7 +267,6 @@ func (s) TestFederation_ClusterResourceContextParamOrder(t *testing.T) { // that both watchers are notified. func (s) TestFederation_EndpointsResourceContextParamOrder(t *testing.T) { serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) - defer client.Close() var ( // Two resource names only differ in context parameter order. diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go index b4f0310eb538..ac186e3d25ff 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -201,11 +201,11 @@ func (s) TestLDSWatch(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a listener resource and have the watch // callback push the received update on to a channel. @@ -331,11 +331,11 @@ func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for the same listener resource and have the // callbacks push the received updates on to a channel. @@ -417,11 +417,11 @@ func (s) TestLDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for the same listener resource and have the // callbacks push the received updates on to a channel. @@ -509,11 +509,11 @@ func (s) TestLDSWatch_ResourceCaching(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a listener resource and have the watch // callback push the received update on to a channel. @@ -581,7 +581,7 @@ func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // receive a response for the watch being registered by the test. // Create an xDS client talking to a non-existent management server. - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: "dummy management server address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -592,7 +592,7 @@ func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() // Register a watch for a resource which is expected to fail with an error // after the watch expiry timer fires. @@ -628,7 +628,7 @@ func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -639,7 +639,7 @@ func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() // Register a watch for a listener resource and have the watch // callback push the received update on to a channel. @@ -698,11 +698,11 @@ func (s) TestLDSWatch_ResourceRemoved(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for two listener resources and have the // callbacks push the received updates on to a channel. @@ -806,11 +806,11 @@ func (s) TestLDSWatch_NACKError(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a listener resource and have the watch // callback push the received update on to a channel. @@ -855,11 +855,11 @@ func (s) TestLDSWatch_PartialValid(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for listener resources. The first watch is expected // to receive an error because the received resource is NACKed. The second @@ -930,11 +930,11 @@ func (s) TestLDSWatch_PartialResponse(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for two listener resources and have the // callbacks push the received updates on to a channel. diff --git a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go index 42544aa53b39..e761e70a6246 100644 --- a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go @@ -42,11 +42,11 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Configure the management server to respond with route config resources. resources := e2e.UpdateOptions{ diff --git a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go index f2c21a8c77d8..47fcb2fe2e39 100644 --- a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go @@ -182,11 +182,11 @@ func (s) TestRDSWatch(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a route configuration resource and have the // watch callback push the received update on to a channel. @@ -352,11 +352,11 @@ func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for the same route configuration resource // and have the callbacks push the received updates on to a channel. @@ -438,11 +438,11 @@ func (s) TestRDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for the same route configuration resource // and have the callbacks push the received updates on to a channel. @@ -540,11 +540,11 @@ func (s) TestRDSWatch_ResourceCaching(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a route configuration resource and have the watch // callback push the received update on to a channel. @@ -622,7 +622,7 @@ func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // receive a response for the watch being registered by the test. // Create an xDS client talking to a non-existent management server. - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: "dummy management server address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -633,7 +633,7 @@ func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() // Register a watch for a resource which is expected to fail with an error // after the watch expiry timer fires. @@ -669,7 +669,7 @@ func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -680,7 +680,7 @@ func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() // Register a watch for a route configuration resource and have the watch // callback push the received update on to a channel. @@ -741,11 +741,11 @@ func (s) TestRDSWatch_NACKError(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register a watch for a route configuration resource and have the watch // callback push the received update on to a channel. @@ -790,11 +790,11 @@ func (s) TestRDSWatch_PartialValid(t *testing.T) { defer cleanup() // Create an xDS client with the above bootstrap contents. - client, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) if err != nil { t.Fatalf("Failed to create xDS client: %v", err) } - defer client.Close() + defer close() // Register two watches for route configuration resources. The first watch // is expected to receive an error because the received resource is NACKed. diff --git a/xds/internal/xdsclient/e2e_test/resource_update_test.go b/xds/internal/xdsclient/e2e_test/resource_update_test.go index 9c1039315971..7bef342feb3d 100644 --- a/xds/internal/xdsclient/e2e_test/resource_update_test.go +++ b/xds/internal/xdsclient/e2e_test/resource_update_test.go @@ -243,7 +243,7 @@ func (s) TestHandleListenerResponseFromManagementServer(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -255,7 +255,7 @@ func (s) TestHandleListenerResponseFromManagementServer(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() t.Logf("Created xDS client to %s", mgmtServer.Address) // A wrapper struct to wrap the update and the associated error, as @@ -510,7 +510,7 @@ func (s) TestHandleRouteConfigResponseFromManagementServer(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -522,7 +522,7 @@ func (s) TestHandleRouteConfigResponseFromManagementServer(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() t.Logf("Created xDS client to %s", mgmtServer.Address) // A wrapper struct to wrap the update and the associated error, as @@ -753,7 +753,7 @@ func (s) TestHandleClusterResponseFromManagementServer(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -765,7 +765,7 @@ func (s) TestHandleClusterResponseFromManagementServer(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() t.Logf("Created xDS client to %s", mgmtServer.Address) // A wrapper struct to wrap the update and the associated error, as @@ -1079,7 +1079,7 @@ func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -1091,7 +1091,7 @@ func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer client.Close() + defer close() t.Logf("Created xDS client to %s", mgmtServer.Address) // A wrapper struct to wrap the update and the associated error, as diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index c199ae767a1c..631793454b85 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -50,7 +50,7 @@ func (s) TestLRSClient(t *testing.T) { } defer sCleanup() - xdsC, err := NewWithConfigForTesting(&bootstrap.Config{ + xdsC, close, err := NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ ServerURI: fs.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), @@ -61,9 +61,7 @@ func (s) TestLRSClient(t *testing.T) { if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer xdsC.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() + defer close() // Report to the same address should not create new ClientConn. store1, lrsCancel1 := xdsC.ReportLoad( @@ -77,6 +75,8 @@ func (s) TestLRSClient(t *testing.T) { ) defer lrsCancel1() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if u, err := fs.NewConnChan.Receive(ctx); err != nil { t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err) } diff --git a/xds/internal/xdsclient/singleton.go b/xds/internal/xdsclient/singleton.go index 408a27cf6279..4c42ae424971 100644 --- a/xds/internal/xdsclient/singleton.go +++ b/xds/internal/xdsclient/singleton.go @@ -21,8 +21,11 @@ package xdsclient import ( "fmt" "sync" + "sync/atomic" "time" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) @@ -34,7 +37,8 @@ const ( var ( // This is the client returned by New(). It contains one client implementation, // and maintains the refcount. - singletonClient = &clientRefCounted{} + singletonMu sync.Mutex + singletonClient *clientRefCounted // The following functions are no-ops in the actual code, but can be // overridden in tests to give them visibility into certain events. @@ -45,57 +49,57 @@ var ( // To override in tests. var bootstrapNewConfig = bootstrap.NewConfig -// onceClosingClient is a thin wrapper around clientRefCounted. The Close() -// method is overridden such that the underlying reference counted client's -// Close() is called at most once, thereby making Close() idempotent. -// -// This is the type which is returned by New() and NewWithConfig(), making it -// safe for these callers to call Close() any number of times. -type onceClosingClient struct { - XDSClient +func clientRefCountedClose() { + singletonMu.Lock() + defer singletonMu.Unlock() - once sync.Once + if singletonClient.decrRef() != 0 { + return + } + singletonClient.clientImpl.close() + singletonClientImplCloseHook() + singletonClient = nil } -func (o *onceClosingClient) Close() { - o.once.Do(o.XDSClient.Close) -} +func newRefCountedWithConfig(fallbackConfig *bootstrap.Config) (XDSClient, func(), error) { + singletonMu.Lock() + defer singletonMu.Unlock() -func newRefCountedWithConfig(config *bootstrap.Config) (XDSClient, error) { - singletonClient.mu.Lock() - defer singletonClient.mu.Unlock() + if singletonClient != nil { + singletonClient.incrRef() + return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil - // If the client implementation was created, increment ref count and return - // the client. - if singletonClient.clientImpl != nil { - singletonClient.refCount++ - return &onceClosingClient{XDSClient: singletonClient}, nil } - // If the passed in config is nil, perform bootstrap to read config. - if config == nil { + // Use fallbackConfig only if bootstrap env vars are unspecified. + var config *bootstrap.Config + if envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" { + if fallbackConfig == nil { + return nil, nil, fmt.Errorf("xds: bootstrap env vars are unspecified and provided fallback config is nil") + } + config = fallbackConfig + } else { var err error config, err = bootstrapNewConfig() if err != nil { - return nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) + return nil, nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) } } // Create the new client implementation. c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) if err != nil { - return nil, err + return nil, nil, err } - - singletonClient.clientImpl = c - singletonClient.refCount++ + singletonClient = &clientRefCounted{clientImpl: c, refCount: 1} singletonClientImplCreateHook() + nodeID := "" if node, ok := config.XDSServer.NodeProto.(interface{ GetId() string }); ok { nodeID = node.GetId() } logger.Infof("xDS node ID: %s", nodeID) - return &onceClosingClient{XDSClient: singletonClient}, nil + return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil } // clientRefCounted is ref-counted, and to be shared by the xds resolver and @@ -103,23 +107,13 @@ func newRefCountedWithConfig(config *bootstrap.Config) (XDSClient, error) { type clientRefCounted struct { *clientImpl - // This mu protects all the fields, including the embedded clientImpl above. - mu sync.Mutex - refCount int + refCount int32 // accessed atomically } -// Close closes the client. It does ref count of the xds client implementation, -// and closes the gRPC connection to the management server when ref count -// reaches 0. -func (c *clientRefCounted) Close() { - c.mu.Lock() - defer c.mu.Unlock() - c.refCount-- - if c.refCount == 0 { - c.clientImpl.Close() - // Set clientImpl back to nil. So if New() is called after this, a new - // implementation will be created. - c.clientImpl = nil - singletonClientImplCloseHook() - } +func (c *clientRefCounted) incrRef() int32 { + return atomic.AddInt32(&c.refCount, 1) +} + +func (c *clientRefCounted) decrRef() int32 { + return atomic.AddInt32(&c.refCount, -1) } diff --git a/xds/internal/xdsclient/singleton_test.go b/xds/internal/xdsclient/singleton_test.go index b22663f33ab5..491dc037d969 100644 --- a/xds/internal/xdsclient/singleton_test.go +++ b/xds/internal/xdsclient/singleton_test.go @@ -22,28 +22,26 @@ import ( "context" "testing" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" + "github.com/google/uuid" "google.golang.org/grpc/internal/testutils" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" ) // Test that multiple New() returns the same Client. And only when the last // client is closed, the underlying client is closed. func (s) TestClientNewSingleton(t *testing.T) { - // Override bootstrap with a fake config. - oldBootstrapNewConfig := bootstrapNewConfig - bootstrapNewConfig = func() (*bootstrap.Config, error) { - return &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, - }, nil + // Create a bootstrap configuration, place it in a file in the temp + // directory, and set the bootstrap env vars to point to it. + nodeID := uuid.New().String() + cleanup, err := bootstrap.CreateFile(bootstrap.Options{ + NodeID: nodeID, + ServerURI: "non-existent-server-address", + Version: bootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) } - defer func() { bootstrapNewConfig = oldBootstrapNewConfig }() + defer cleanup() // Override the singleton creation hook to get notified. origSingletonClientImplCreateHook := singletonClientImplCreateHook @@ -62,7 +60,7 @@ func (s) TestClientNewSingleton(t *testing.T) { defer func() { singletonClientImplCloseHook = origSingletonClientImplCloseHook }() // The first call to New() should create a new singleton client. - client, err := New() + _, closeFunc, err := New() if err != nil { t.Fatalf("failed to create xDS client: %v", err) } @@ -75,10 +73,10 @@ func (s) TestClientNewSingleton(t *testing.T) { // Calling New() again should not create new singleton client implementations. const count = 9 - clients := make([]XDSClient, count) + closeFuncs := make([]func(), 9) for i := 0; i < count; i++ { func() { - clients[i], err = New() + _, closeFuncs[i], err = New() if err != nil { t.Fatalf("%d-th call to New() failed with error: %v", i, err) } @@ -97,8 +95,8 @@ func (s) TestClientNewSingleton(t *testing.T) { // acquired above, via the first call to New(). for i := 0; i < count; i++ { func() { - clients[i].Close() - clients[i].Close() + closeFuncs[i]() + closeFuncs[i]() sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() @@ -109,18 +107,18 @@ func (s) TestClientNewSingleton(t *testing.T) { } // Call the last Close(). The underlying implementation should be closed. - client.Close() + closeFunc() if _, err := singletonCloseCh.Receive(ctx); err != nil { t.Fatalf("Timeout waiting for singleton client implementation to be closed: %v", err) } // Calling New() again, after the previous Client was actually closed, should // create a new one. - client, err = New() + _, closeFunc, err = New() if err != nil { t.Fatalf("failed to create client: %v", err) } - defer client.Close() + defer closeFunc() if _, err := singletonCreationCh.Receive(ctx); err != nil { t.Fatalf("Timeout when waiting for singleton xDS client to be created: %v", err) } diff --git a/xds/server.go b/xds/server.go index 5ab8a5a98008..f7003f6cd5c6 100644 --- a/xds/server.go +++ b/xds/server.go @@ -49,7 +49,7 @@ const serverPrefix = "[xds-server %p] " var ( // These new functions will be overridden in unit tests. - newXDSClient = func() (xdsclient.XDSClient, error) { + newXDSClient = func() (xdsclient.XDSClient, func(), error) { return xdsclient.New() } newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { @@ -89,8 +89,9 @@ type GRPCServer struct { // clientMu is used only in initXDSClient(), which is called at the // beginning of Serve(), where we have to decide if we have to create a // client or use an existing one. - clientMu sync.Mutex - xdsC xdsclient.XDSClient + clientMu sync.Mutex + xdsC xdsclient.XDSClient + xdsClientClose func() } // NewGRPCServer creates an xDS-enabled gRPC server using the passed in opts. @@ -184,16 +185,17 @@ func (s *GRPCServer) initXDSClient() error { newXDSClient := newXDSClient if s.opts.bootstrapContentsForTesting != nil { // Bootstrap file contents may be specified as a server option for tests. - newXDSClient = func() (xdsclient.XDSClient, error) { + newXDSClient = func() (xdsclient.XDSClient, func(), error) { return xdsclient.NewWithBootstrapContentsForTesting(s.opts.bootstrapContentsForTesting) } } - client, err := newXDSClient() + client, close, err := newXDSClient() if err != nil { return fmt.Errorf("xds: failed to create xds-client: %v", err) } s.xdsC = client + s.xdsClientClose = close s.logger.Infof("Created an xdsClient") return nil } @@ -334,7 +336,7 @@ func (s *GRPCServer) Stop() { s.quit.Fire() s.gs.Stop() if s.xdsC != nil { - s.xdsC.Close() + s.xdsClientClose() } } @@ -345,7 +347,7 @@ func (s *GRPCServer) GracefulStop() { s.quit.Fire() s.gs.GracefulStop() if s.xdsC != nil { - s.xdsC.Close() + s.xdsClientClose() } } diff --git a/xds/server_test.go b/xds/server_test.go index 78e4725304c0..68cb3dc307a6 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -320,7 +320,7 @@ func (p *fakeProvider) Close() { func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsclient.XDSClient, error) { + newXDSClient = func() (xdsclient.XDSClient, func(), error) { c := fakeclient.NewClient() c.SetBootstrapConfig(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ @@ -332,7 +332,7 @@ func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { CertProviderConfigs: certProviderConfigs, }) clientCh.Send(c) - return c, nil + return c, func() {}, nil } fs := newFakeGRPCServer() @@ -352,7 +352,7 @@ func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsclient.XDSClient, error) { + newXDSClient = func() (xdsclient.XDSClient, func(), error) { c := fakeclient.NewClient() bc := &bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ @@ -367,7 +367,7 @@ func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, } c.SetBootstrapConfig(bc) clientCh.Send(c) - return c, nil + return c, func() {}, nil } return clientCh, func() { newXDSClient = origNewXDSClient } @@ -631,11 +631,11 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { // xdsClient with the specified bootstrap configuration. clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsclient.XDSClient, error) { + newXDSClient = func() (xdsclient.XDSClient, func(), error) { c := fakeclient.NewClient() c.SetBootstrapConfig(test.bootstrapConfig) clientCh.Send(c) - return c, nil + return c, func() {}, nil } defer func() { newXDSClient = origNewXDSClient }() @@ -674,8 +674,8 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { // verifies that Server() exits with a non-nil error. func (s) TestServeNewClientFailure(t *testing.T) { origNewXDSClient := newXDSClient - newXDSClient = func() (xdsclient.XDSClient, error) { - return nil, errors.New("xdsClient creation failed") + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return nil, nil, errors.New("xdsClient creation failed") } defer func() { newXDSClient = origNewXDSClient }() From 9b9b38127038e347420ef08c42e5d74d181b8749 Mon Sep 17 00:00:00 2001 From: Joshua Humphries <2035234+jhump@users.noreply.github.com> Date: Wed, 18 Jan 2023 15:59:58 -0500 Subject: [PATCH 746/998] server: fix a few issues where grpc server uses RST_STREAM for non-HTTP/2 errors (#5893) Fixes https://github.com/grpc/grpc-go/issues/5892 --- internal/transport/handler_server.go | 4 +- internal/transport/handler_server_test.go | 40 ++-- internal/transport/http2_server.go | 46 +++-- internal/transport/transport_test.go | 213 +++++++++++++--------- 4 files changed, 196 insertions(+), 107 deletions(-) diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index ebe8bfe330a5..e6626bf96e7c 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -65,7 +65,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) - http.Error(w, msg, http.StatusBadRequest) + http.Error(w, msg, http.StatusUnsupportedMediaType) return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { @@ -87,7 +87,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - msg := fmt.Sprintf("malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) http.Error(w, msg, http.StatusBadRequest) return nil, status.Error(codes.Internal, msg) } diff --git a/internal/transport/handler_server_test.go b/internal/transport/handler_server_test.go index 82b4baca58b6..fbd8058b79fb 100644 --- a/internal/transport/handler_server_test.go +++ b/internal/transport/handler_server_test.go @@ -41,11 +41,12 @@ import ( func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { type testCase struct { - name string - req *http.Request - wantErr string - modrw func(http.ResponseWriter) http.ResponseWriter - check func(*serverHandlerTransport, *testCase) error + name string + req *http.Request + wantErr string + wantErrCode int + modrw func(http.ResponseWriter) http.ResponseWriter + check func(*serverHandlerTransport, *testCase) error } tests := []testCase{ { @@ -54,7 +55,8 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { ProtoMajor: 1, ProtoMinor: 1, }, - wantErr: "gRPC requires HTTP/2", + wantErr: "gRPC requires HTTP/2", + wantErrCode: http.StatusBadRequest, }, { name: "bad method", @@ -63,7 +65,8 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { Method: "GET", Header: http.Header{}, }, - wantErr: `invalid gRPC request method "GET"`, + wantErr: `invalid gRPC request method "GET"`, + wantErrCode: http.StatusBadRequest, }, { name: "bad content type", @@ -74,7 +77,8 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { "Content-Type": {"application/foo"}, }, }, - wantErr: `invalid gRPC request content-type "application/foo"`, + wantErr: `invalid gRPC request content-type "application/foo"`, + wantErrCode: http.StatusUnsupportedMediaType, }, { name: "not flusher", @@ -93,7 +97,8 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { } return struct{ onlyCloseNotifier }{w.(onlyCloseNotifier)} }, - wantErr: "gRPC requires a ResponseWriter supporting http.Flusher", + wantErr: "gRPC requires a ResponseWriter supporting http.Flusher", + wantErrCode: http.StatusInternalServerError, }, { name: "valid", @@ -153,7 +158,8 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { Path: "/service/foo.bar", }, }, - wantErr: `rpc error: code = Internal desc = malformed time-out: transport: timeout unit is not recognized: "tomorrow"`, + wantErr: `rpc error: code = Internal desc = malformed grpc-timeout: transport: timeout unit is not recognized: "tomorrow"`, + wantErrCode: http.StatusBadRequest, }, { name: "with metadata", @@ -187,7 +193,12 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { } for _, tt := range tests { - rw := newTestHandlerResponseWriter() + rrec := httptest.NewRecorder() + rw := http.ResponseWriter(testHandlerResponseWriter{ + ResponseRecorder: rrec, + closeNotify: make(chan bool, 1), + }) + if tt.modrw != nil { rw = tt.modrw(rw) } @@ -196,6 +207,13 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { t.Errorf("%s: error = %q; want %q", tt.name, gotErr.Error(), tt.wantErr) continue } + if tt.wantErrCode == 0 { + tt.wantErrCode = http.StatusOK + } + if rrec.Code != tt.wantErrCode { + t.Errorf("%s: code = %d; want %d", tt.name, rrec.Code, tt.wantErrCode) + continue + } if gotErr != nil { continue } diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 37e089bc8433..bc3da706726d 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -380,13 +380,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( fc: &inFlow{limit: uint32(t.initialWindowSize)}, } var ( - // If a gRPC Response-Headers has already been received, then it means - // that the peer is speaking gRPC and we are in gRPC mode. - isGRPC = false - mdata = make(map[string][]string) - httpMethod string - // headerError is set if an error is encountered while parsing the headers - headerError bool + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(map[string][]string) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status timeoutSet bool timeout time.Duration @@ -397,6 +398,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( case "content-type": contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) if !validContentType { + contentType = hf.Value break } mdata[hf.Name] = append(mdata[hf.Name], hf.Value) @@ -412,7 +414,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( timeoutSet = true var err error if timeout, err = decodeTimeout(hf.Value); err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) } // "Transports must consider requests containing the Connection header // as malformed." - A41 @@ -420,14 +422,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if logger.V(logLevel) { logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") } - headerError = true + protocolError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break } v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } @@ -446,7 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( logger.Errorf("transport: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 400, + httpStatus: http.StatusBadRequest, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -455,7 +457,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return nil } - if !isGRPC || headerError { + if protocolError { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, @@ -464,6 +466,26 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( }) return nil } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil + } // "If :authority is missing, Host must be renamed to :authority." - A41 if len(mdata[":authority"]) == 0 { diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 3c4b0876497e..e40a2ae6175f 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -1998,105 +1998,154 @@ func (s) TestHeadersHTTPStatusGRPCStatus(t *testing.T) { grpcStatusWant: "13", grpcMessageWant: "which should be POST", }, + { + name: "Client Sending Wrong Content-Type", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/json"}}, + }, + httpStatusWant: "415", + grpcStatusWant: "3", + grpcMessageWant: `invalid gRPC request content-type "application/json"`, + }, + { + name: "Client Sending Bad Timeout", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + {name: "grpc-timeout", values: []string{"18f6n"}}, + }, + httpStatusWant: "400", + grpcStatusWant: "13", + grpcMessageWant: "malformed grpc-timeout", + }, + { + name: "Client Sending Bad Binary Header", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + {name: "foobar-bin", values: []string{"X()3e@#$-"}}, + }, + httpStatusWant: "400", + grpcStatusWant: "13", + grpcMessageWant: `header "foobar-bin": illegal base64 data`, + }, } for _, test := range tests { - server := setUpServerOnly(t, 0, &ServerConfig{}, suspended) - defer server.stop() - // Create a client directly to not tie what you can send to API of - // http2_client.go (i.e. control headers being sent). - mconn, err := net.Dial("tcp", server.lis.Addr().String()) - if err != nil { - t.Fatalf("Client failed to dial: %v", err) - } - defer mconn.Close() + t.Run(test.name, func(t *testing.T) { + server := setUpServerOnly(t, 0, &ServerConfig{}, suspended) + defer server.stop() + // Create a client directly to not tie what you can send to API of + // http2_client.go (i.e. control headers being sent). + mconn, err := net.Dial("tcp", server.lis.Addr().String()) + if err != nil { + t.Fatalf("Client failed to dial: %v", err) + } + defer mconn.Close() - if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) { - t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, ", n, err, len(clientPreface)) - } + if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) { + t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, ", n, err, len(clientPreface)) + } - framer := http2.NewFramer(mconn, mconn) - framer.ReadMetaHeaders = hpack.NewDecoder(4096, nil) - if err := framer.WriteSettings(); err != nil { - t.Fatalf("Error while writing settings: %v", err) - } + framer := http2.NewFramer(mconn, mconn) + framer.ReadMetaHeaders = hpack.NewDecoder(4096, nil) + if err := framer.WriteSettings(); err != nil { + t.Fatalf("Error while writing settings: %v", err) + } - // result chan indicates that reader received a Headers Frame with - // desired grpc status and message from server. An error will be passed - // on it if any other frame is received. - result := testutils.NewChannel() + // result chan indicates that reader received a Headers Frame with + // desired grpc status and message from server. An error will be passed + // on it if any other frame is received. + result := testutils.NewChannel() - // Launch a reader goroutine. - go func() { - for { - frame, err := framer.ReadFrame() - if err != nil { - return - } - switch frame := frame.(type) { - case *http2.SettingsFrame: - // Do nothing. A settings frame is expected from server preface. - case *http2.MetaHeadersFrame: - var httpStatus, grpcStatus, grpcMessage string - for _, header := range frame.Fields { - if header.Name == ":status" { - httpStatus = header.Value + // Launch a reader goroutine. + go func() { + for { + frame, err := framer.ReadFrame() + if err != nil { + return + } + switch frame := frame.(type) { + case *http2.SettingsFrame: + // Do nothing. A settings frame is expected from server preface. + case *http2.MetaHeadersFrame: + var httpStatus, grpcStatus, grpcMessage string + for _, header := range frame.Fields { + if header.Name == ":status" { + httpStatus = header.Value + } + if header.Name == "grpc-status" { + grpcStatus = header.Value + } + if header.Name == "grpc-message" { + grpcMessage = header.Value + } } - if header.Name == "grpc-status" { - grpcStatus = header.Value + if httpStatus != test.httpStatusWant { + result.Send(fmt.Errorf("incorrect HTTP Status got %v, want %v", httpStatus, test.httpStatusWant)) + return } - if header.Name == "grpc-message" { - grpcMessage = header.Value + if grpcStatus != test.grpcStatusWant { // grpc status code internal + result.Send(fmt.Errorf("incorrect gRPC Status got %v, want %v", grpcStatus, test.grpcStatusWant)) + return } - } - if httpStatus != test.httpStatusWant { - result.Send(fmt.Errorf("incorrect HTTP Status got %v, want %v", httpStatus, test.httpStatusWant)) - return - } - if grpcStatus != test.grpcStatusWant { // grpc status code internal - result.Send(fmt.Errorf("incorrect gRPC Status got %v, want %v", grpcStatus, test.grpcStatusWant)) - return - } - if !strings.Contains(grpcMessage, test.grpcMessageWant) { - result.Send(fmt.Errorf("incorrect gRPC message")) + if !strings.Contains(grpcMessage, test.grpcMessageWant) { + result.Send(fmt.Errorf("incorrect gRPC message, want %q got %q", test.grpcMessageWant, grpcMessage)) + return + } + + // Records that client successfully received a HeadersFrame + // with expected Trailers-Only response. + result.Send(nil) return + default: + // The server should send nothing but a single Settings and Headers frame. + result.Send(errors.New("the client received a frame other than Settings or Headers")) } - - // Records that client successfully received a HeadersFrame - // with expected Trailers-Only response. - result.Send(nil) - return - default: - // The server should send nothing but a single Settings and Headers frame. - result.Send(errors.New("the client received a frame other than Settings or Headers")) } - } - }() + }() - var buf bytes.Buffer - henc := hpack.NewEncoder(&buf) + var buf bytes.Buffer + henc := hpack.NewEncoder(&buf) - // Needs to build headers deterministically to conform to gRPC over - // HTTP/2 spec. - for _, header := range test.headers { - for _, value := range header.values { - if err := henc.WriteField(hpack.HeaderField{Name: header.name, Value: value}); err != nil { - t.Fatalf("Error while encoding header: %v", err) + // Needs to build headers deterministically to conform to gRPC over + // HTTP/2 spec. + for _, header := range test.headers { + for _, value := range header.values { + if err := henc.WriteField(hpack.HeaderField{Name: header.name, Value: value}); err != nil { + t.Fatalf("Error while encoding header: %v", err) + } } } - } - if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil { - t.Fatalf("Error while writing headers: %v", err) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - r, err := result.Receive(ctx) - if err != nil { - t.Fatalf("Error receiving from channel: %v", err) - } - if r != nil { - t.Fatalf("want nil, got %v", r) - } + if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil { + t.Fatalf("Error while writing headers: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + r, err := result.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving from channel: %v", err) + } + if r != nil { + t.Fatalf("want nil, got %v", r) + } + }) } } From 6e749384f7913be540988f62836b2418368ca570 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 18 Jan 2023 14:57:16 -0800 Subject: [PATCH 747/998] xds/resolver: cleanup tests to use real xDS client (#5950) --- xds/internal/resolver/xds_resolver_test.go | 302 +++++++++++++-------- 1 file changed, 183 insertions(+), 119 deletions(-) diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 330788bfe56e..a0e58a5b8716 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -29,6 +29,7 @@ import ( xxhash "github.com/cespare/xxhash/v2" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" @@ -40,6 +41,8 @@ import ( "google.golang.org/grpc/internal/grpctest" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" + xdsbootstrap "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -53,6 +56,9 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config ) @@ -113,45 +119,52 @@ func newTestClientConn() *testClientConn { } } -// TestResolverBuilder tests the resolver builder's Build() method with -// different parameters. -func (s) TestResolverBuilder(t *testing.T) { +// TestResolverBuilder_ClientCreationFails tests the case where xDS client +// creation fails, and verifies that xDS resolver build fails as well. +func (s) TestResolverBuilder_ClientCreationFails(t *testing.T) { + // Override xDS client creation function and return an error. + origNewClient := newXDSClient + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return nil, nil, errors.New("failed to create xDS client") + } + defer func() { + newXDSClient = origNewClient + }() + + // Build an xDS resolver and expect it to fail. + builder := resolver.Get(xdsScheme) + if builder == nil { + t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) + } + if _, err := builder.Build(target, newTestClientConn(), resolver.BuildOptions{}); err == nil { + t.Fatalf("builder.Build(%v) succeeded when expected to fail", target) + } +} + +// TestResolverBuilder_DifferentBootstrapConfigs tests the resolver builder's +// Build() method with different xDS bootstrap configurations. +func (s) TestResolverBuilder_DifferentBootstrapConfigs(t *testing.T) { tests := []struct { - name string - xdsClientFunc func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) - target resolver.Target - buildOpts resolver.BuildOptions - wantErr bool + name string + bootstrapCfg *bootstrap.Config // Empty top-level xDS server config, will be set by test logic. + target resolver.Target + buildOpts resolver.BuildOptions + wantErr string }{ { - name: "good", - xdsClientFunc: func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) { - return fakeclient.NewClient(), func() { close(closeCh) }, nil - }, - target: target, - wantErr: false, - }, - { - name: "xDS client creation fails", - xdsClientFunc: func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) { - return nil, func() { close(closeCh) }, errors.New("failed to create xDS client") - }, - target: target, - wantErr: true, + name: "good", + bootstrapCfg: &bootstrap.Config{}, + target: target, }, { name: "authority not defined in bootstrap", - xdsClientFunc: func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) { - c := fakeclient.NewClient() - c.SetBootstrapConfig(&bootstrap.Config{ - ClientDefaultListenerResourceNameTemplate: "%s", - Authorities: map[string]*bootstrap.Authority{ - "test-authority": { - ClientListenerResourceNameTemplate: "xdstp://test-authority/%s", - }, + bootstrapCfg: &bootstrap.Config{ + ClientDefaultListenerResourceNameTemplate: "%s", + Authorities: map[string]*bootstrap.Authority{ + "test-authority": { + ClientListenerResourceNameTemplate: "xdstp://test-authority/%s", }, - }) - return c, func() { close(closeCh) }, nil + }, }, target: resolver.Target{ URL: url.URL{ @@ -159,16 +172,12 @@ func (s) TestResolverBuilder(t *testing.T) { Path: "/" + targetStr, }, }, - wantErr: true, + wantErr: `authority "non-existing-authority" is not found in the bootstrap file`, }, { - name: "xDS creds specified without certificate providers in bootstrap", - xdsClientFunc: func(closeCh chan struct{}) (xdsclient.XDSClient, func(), error) { - c := fakeclient.NewClient() - c.SetBootstrapConfig(&bootstrap.Config{}) - return c, func() { close(closeCh) }, nil - }, - target: target, + name: "xDS creds specified without certificate providers in bootstrap", + bootstrapCfg: &bootstrap.Config{}, + target: target, buildOpts: resolver.BuildOptions{ DialCreds: func() credentials.TransportCredentials { creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{FallbackCreds: insecure.NewCredentials()}) @@ -178,17 +187,36 @@ func (s) TestResolverBuilder(t *testing.T) { return creds }(), }, - wantErr: true, + wantErr: `xdsCreds specified but certificate_providers config missing in bootstrap file`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // Use a fake xDS client that closes the below channel when closed. - closeCh := make(chan struct{}) - oldClientMaker := newXDSClient - newXDSClient = func() (xdsclient.XDSClient, func(), error) { return test.xdsClientFunc(closeCh) } + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Starting xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Add top-level xDS server config corresponding to the above + // management server. + test.bootstrapCfg.XDSServer = &bootstrap.ServerConfig{ + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + } + + // Override xDS client creation to use bootstrap configuration + // specified by the test. + origNewClient := newXDSClient + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + // The watch timeout and idle authority timeout values passed to + // NewWithConfigForTesing() are immaterial for this test, as we + // are only testing the resolver build functionality. + return xdsclient.NewWithConfigForTesting(test.bootstrapCfg, defaultTestTimeout, defaultTestTimeout) + } defer func() { - newXDSClient = oldClientMaker + newXDSClient = origNewClient }() builder := resolver.Get(xdsScheme) @@ -197,7 +225,10 @@ func (s) TestResolverBuilder(t *testing.T) { } r, err := builder.Build(test.target, newTestClientConn(), test.buildOpts) - if (err != nil) != test.wantErr { + if gotErr, wantErr := err != nil, test.wantErr != ""; gotErr != wantErr { + t.Fatalf("builder.Build(%v) returned err: %v, wantErr: %v", target, err, test.wantErr) + } + if test.wantErr != "" && !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("builder.Build(%v) returned err: %v, wantErr: %v", target, err, test.wantErr) } if err != nil { @@ -205,12 +236,6 @@ func (s) TestResolverBuilder(t *testing.T) { return } r.Close() - - select { - case <-closeCh: - case <-time.After(defaultTestTimeout): - t.Fatal("Timeout when waiting for xDS client to be closed") - } }) } } @@ -288,89 +313,128 @@ func waitForWatchRouteConfig(ctx context.Context, t *testing.T, xdsC *fakeclient } } -// TestXDSResolverResourceNameToWatch tests that the correct resource name is -// used to watch for the service. This covers cases with different bootstrap -// config, and different authority. -func (s) TestXDSResolverResourceNameToWatch(t *testing.T) { +// TestResolverResourceName builds an xDS resolver and verifies that the +// resource name specified in the discovery request matches expectations. +func (s) TestResolverResourceName(t *testing.T) { + // Federation support is required when new style names are used. + oldXDSFederation := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldXDSFederation }() + tests := []struct { - name string - bc *bootstrap.Config - target resolver.Target - want string + name string + listenerResourceNameTemplate string + extraAuthority string + dialTarget string + wantResourceName string }{ { - name: "default %s old style", - bc: &bootstrap.Config{ - ClientDefaultListenerResourceNameTemplate: "%s", - }, - target: resolver.Target{ - URL: url.URL{Path: "/" + targetStr}, - }, - want: targetStr, + name: "default %s old style", + listenerResourceNameTemplate: "%s", + dialTarget: "xds:///target", + wantResourceName: "target", }, { - name: "old style no percent encoding", - bc: &bootstrap.Config{ - ClientDefaultListenerResourceNameTemplate: "/path/to/%s", - }, - target: resolver.Target{ - URL: url.URL{Path: "/" + targetStr}, - }, - want: "/path/to/" + targetStr, + name: "old style no percent encoding", + listenerResourceNameTemplate: "/path/to/%s", + dialTarget: "xds:///target", + wantResourceName: "/path/to/target", }, { - name: "new style with %s", - bc: &bootstrap.Config{ - ClientDefaultListenerResourceNameTemplate: "xdstp://authority.com/%s", - Authorities: nil, - }, - target: resolver.Target{ - URL: url.URL{Path: "/0.0.0.0:8080"}, - }, - want: "xdstp://authority.com/0.0.0.0:8080", + name: "new style with %s", + listenerResourceNameTemplate: "xdstp://authority.com/%s", + dialTarget: "xds:///0.0.0.0:8080", + wantResourceName: "xdstp://authority.com/0.0.0.0:8080", }, { - name: "new style percent encoding", - bc: &bootstrap.Config{ - ClientDefaultListenerResourceNameTemplate: "xdstp://authority.com/%s", - Authorities: nil, - }, - target: resolver.Target{ - URL: url.URL{Path: "/[::1]:8080"}, - }, - want: "xdstp://authority.com/%5B::1%5D:8080", + name: "new style percent encoding", + listenerResourceNameTemplate: "xdstp://authority.com/%s", + dialTarget: "xds:///[::1]:8080", + wantResourceName: "xdstp://authority.com/%5B::1%5D:8080", }, { - name: "new style different authority", - bc: &bootstrap.Config{ - ClientDefaultListenerResourceNameTemplate: "xdstp://authority.com/%s", - Authorities: map[string]*bootstrap.Authority{ - "test-authority": { - ClientListenerResourceNameTemplate: "xdstp://test-authority/%s", - }, - }, - }, - target: resolver.Target{ - URL: url.URL{ - Host: "test-authority", - Path: "/" + targetStr, - }, - }, - want: "xdstp://test-authority/" + targetStr, + name: "new style different authority", + listenerResourceNameTemplate: "xdstp://authority.com/%s", + extraAuthority: "test-authority", + dialTarget: "xds://test-authority/target", + wantResourceName: "xdstp://test-authority/envoy.config.listener.v3.Listener/target", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - xdsR, xdsC, _, cancel := testSetup(t, setupOpts{ - bootstrapC: tt.bc, - target: tt.target, + // Setup the management server to push the requested resource name + // on to a channel. No resources are configured on the management + // server as part of this test, as we are only interested in the + // resource name being requested. + resourceNameCh := make(chan string, 1) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + // When the resolver is being closed, the watch associated + // with the listener resource will be cancelled, and it + // might result in a discovery request with no resource + // names. Hence, we only consider requests which contain a + // resource name. + var name string + if len(req.GetResourceNames()) == 1 { + name = req.GetResourceNames()[0] + } + select { + case resourceNameCh <- name: + default: + } + return nil + }, }) - defer cancel() - defer xdsR.Close() + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, tt.want) + // Create a bootstrap configuration with test options. + opts := xdsbootstrap.Options{ + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + ClientDefaultListenerResourceNameTemplate: tt.listenerResourceNameTemplate, + } + if tt.extraAuthority != "" { + // In this test, we really don't care about having multiple + // management servers. All we need to verify is whether the + // resource name matches expectation. + opts.Authorities = map[string]string{ + tt.extraAuthority: mgmtServer.Address, + } + } + bootstrapContents, err := xdsbootstrap.Contents(opts) + if err != nil { + t.Fatal(err) + } + + // Build an xDS resolver that uses the above bootstrap configuration + // and pass it to grpc.Dial(). Creating the xDS resolver should + // result in creation of the xDS client. + newResolver := internal.NewXDSResolverWithConfigForTesting + if newResolver == nil { + t.Fatal("internal.NewXDSResolverWithConfigForTesting is nil") + } + resolver, err := newResolver.(func([]byte) (resolver.Builder, error))(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS resolver for testing: %v", err) + } + cc, err := grpc.Dial(tt.dialTarget, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Verify the resource name in the discovery request being sent out. + select { + case gotResourceName := <-resourceNameCh: + if gotResourceName != tt.wantResourceName { + t.Fatalf("Received discovery request with resource name: %v, want %v", gotResourceName, tt.wantResourceName) + } + case <-time.After(defaultTestTimeout): + t.Fatalf("Timeout when waiting for discovery request") + } }) } } From bc9728f98bdcb889282d6925f27ea96c0cdbd485 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 19 Jan 2023 16:16:47 -0800 Subject: [PATCH 748/998] xds/resolver: cleanup tests to use real xDS client 4/n (#5954) --- internal/testutils/parse_url.go | 34 ++ xds/internal/resolver/xds_resolver_test.go | 441 +++++++++++++++------ 2 files changed, 350 insertions(+), 125 deletions(-) create mode 100644 internal/testutils/parse_url.go diff --git a/internal/testutils/parse_url.go b/internal/testutils/parse_url.go new file mode 100644 index 000000000000..ff276e4d0c38 --- /dev/null +++ b/internal/testutils/parse_url.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testutils + +import ( + "fmt" + "net/url" +) + +// MustParseURL attempts to parse the provided target using url.Parse() +// and panics if parsing fails. +func MustParseURL(target string) *url.URL { + u, err := url.Parse(target) + if err != nil { + panic(fmt.Sprintf("Error parsing target(%s): %v", target, err)) + } + return u +} diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index a0e58a5b8716..e35b4b6ecec6 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -29,6 +29,7 @@ import ( xxhash "github.com/cespare/xxhash/v2" "github.com/google/go-cmp/cmp" + "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -57,7 +58,10 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/wrapperspb" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config @@ -313,6 +317,23 @@ func waitForWatchRouteConfig(ctx context.Context, t *testing.T, xdsC *fakeclient } } +// buildResolverForTarget builds an xDS resolver for the given target. It +// returns a testClientConn which allows inspection of resolver updates, and a +// function to close the resolver once the test is complete. +func buildResolverForTarget(t *testing.T, target resolver.Target) (*testClientConn, func()) { + builder := resolver.Get(xdsScheme) + if builder == nil { + t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) + } + + tcc := newTestClientConn() + r, err := builder.Build(target, tcc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("builder.Build(%v) returned err: %v", target, err) + } + return tcc, r.Close +} + // TestResolverResourceName builds an xDS resolver and verifies that the // resource name specified in the discovery request matches expectations. func (s) TestResolverResourceName(t *testing.T) { @@ -404,27 +425,14 @@ func (s) TestResolverResourceName(t *testing.T) { tt.extraAuthority: mgmtServer.Address, } } - bootstrapContents, err := xdsbootstrap.Contents(opts) + cleanup, err := xdsbootstrap.CreateFile(opts) if err != nil { t.Fatal(err) } + defer cleanup() - // Build an xDS resolver that uses the above bootstrap configuration - // and pass it to grpc.Dial(). Creating the xDS resolver should - // result in creation of the xDS client. - newResolver := internal.NewXDSResolverWithConfigForTesting - if newResolver == nil { - t.Fatal("internal.NewXDSResolverWithConfigForTesting is nil") - } - resolver, err := newResolver.(func([]byte) (resolver.Builder, error))(bootstrapContents) - if err != nil { - t.Fatalf("Failed to create xDS resolver for testing: %v", err) - } - cc, err := grpc.Dial(tt.dialTarget, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() + _, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL(tt.dialTarget)}) + defer rClose() // Verify the resource name in the discovery request being sent out. select { @@ -632,123 +640,265 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { } } -// TestXDSResolverRequestHash tests a case where a resolver receives a RouteConfig update +// TestResolverRequestHash tests a case where a resolver receives a RouteConfig update // with a HashPolicy specifying to generate a hash. The configSelector generated should // successfully generate a Hash. -func (s) TestXDSResolverRequestHash(t *testing.T) { +func (s) TestResolverRequestHash(t *testing.T) { oldRH := envconfig.XDSRingHash envconfig.XDSRingHash = true defer func() { envconfig.XDSRingHash = oldRH }() - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - // Invoke watchAPI callback with a good service update (with hash policies - // specified) and wait for UpdateState method to be called on ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{ - Prefix: newStringP(""), - WeightedClusters: map[string]xdsresource.WeightedCluster{ - "cluster_1": {Weight: 75}, - "cluster_2": {Weight: 25}, - }, - HashPolicies: []*xdsresource.HashPolicy{{ - HashPolicyType: xdsresource.HashPolicyTypeHeader, - HeaderName: ":path", + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + ldsName := serviceName + rdsName := "route-" + serviceName + // Configure the management server with a good listener resource and a + // route configuration resource that specifies a hash policy. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{{ + Name: rdsName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "test-cluster-1", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }, + }}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{{ + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: ":path", + }, + }, + Terminal: true, + }}, }}, }}, - }, - }, - }, nil) - - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + }}, + }}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - gotState, err := tcc.stateCh.Receive(ctx) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState := val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - rState := gotState.(resolver.State) cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Error("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } + // Selecting a config when there was a hash policy specified in the route // that will be selected should put a request hash in the config's context. - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs(":path", "/products"))}) + res, err := cs.SelectConfig(iresolver.RPCInfo{ + Context: metadata.NewOutgoingContext(ctx, metadata.Pairs(":path", "/products")), + Method: "/service/method", + }) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } - requestHashGot := ringhash.GetRequestHashForTesting(res.Context) - requestHashWant := xxhash.Sum64String("/products") - if requestHashGot != requestHashWant { - t.Fatalf("requestHashGot = %v, requestHashWant = %v", requestHashGot, requestHashWant) + gotHash := ringhash.GetRequestHashForTesting(res.Context) + wantHash := xxhash.Sum64String("/products") + if gotHash != wantHash { + t.Fatalf("Got request hash: %v, want: %v", gotHash, wantHash) } } -// TestXDSResolverRemovedWithRPCs tests the case where a config selector sends -// an empty update to the resolver after the resource is removed. -func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer cancel() - defer xdsR.Close() +// TestResolverRemovedWithRPCs tests the case where resources are removed from +// the management server, causing it to send an empty update to the xDS client, +// which returns a resource-not-found error to the xDS resolver. The test +// verifies that an ongoing RPC is handled properly when this happens. +func (s) TestResolverRemovedWithRPCs(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + ldsName := serviceName + rdsName := "route-" + serviceName + // Configure the management server with a good listener and route + // configuration resource. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{{ + Name: rdsName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "test-cluster-1", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }, + }}, + }}, + }}, + }}, + }}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, - }, - }, - }, nil) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - gotState, err := tcc.stateCh.Receive(ctx) + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:test-cluster-1": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "test-cluster-1" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Errorf("Received unexpected service config") + t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) } - // "Make an RPC" by invoking the config selector. cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatalf("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } - // Delete the resource - suErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) + // Delete the resources on the management server. This should result in a + // resource-not-found error from the xDS client. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{NodeID: nodeID}); err != nil { + t.Fatal(err) + } - if _, err = tcc.stateCh.Receive(ctx); err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + // The RPC started earlier is still in progress. So, the xDS resolver will + // not produce an empty service config at this point. Instead it will retain + // the cluster to which the RPC is ongoing in the service config, but will + // return an erroring config selector which will fail new RPCs. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState = val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Errorf("Received unexpected service config") + t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + } + cs = iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") + } + _, err = cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) + if err == nil || status.Code(err) != codes.Unavailable { + t.Fatalf("cs.SelectConfig() returned: %v, want: %v", err, codes.Unavailable) } // "Finish the RPC"; this could cause a panic if the resolver doesn't // handle it correctly. res.OnCommitted() + + // Now that the RPC is committed, the xDS resolver is expected to send an + // update with an empty service config. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState = val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(`{}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Errorf("Received unexpected service config") + t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + } } // TestXDSResolverRemovedResource tests for proper behavior after a resource is @@ -858,60 +1008,101 @@ func (s) TestXDSResolverRemovedResource(t *testing.T) { } } -func (s) TestXDSResolverWRR(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - +// TestResolverWRR tests the case where the route configuration returned by the +// management server contains a set of weighted clusters. The test performs a +// bunch of RPCs using the cluster specifier returned by the resolver, and +// verifies the cluster distribution. +func (s) TestResolverWRR(t *testing.T) { defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) newWRR = testutils.NewTestWRR - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ - "A": {Weight: 5}, - "B": {Weight: 10}, - }}}, - }, - }, - }, nil) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + ldsName := serviceName + rdsName := "route-" + serviceName + // Configure the management server with a good listener resource and a + // route configuration resource. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{{ + Name: rdsName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "A", + Weight: &wrapperspb.UInt32Value{Value: 75}, + }, + { + Name: "B", + Weight: &wrapperspb.UInt32Value{Value: 25}, + }, + }, + }}, + }}, + }}, + }}, + }}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Read the update pushed by the resolver to the ClientConn. gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatal("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } + // Make RPCs are verify WRR behavior in the cluster specifier. picks := map[string]int{} - for i := 0; i < 30; i++ { - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + for i := 0; i < 100; i++ { + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } picks[clustermanager.GetPickedClusterForTesting(res.Context)]++ res.OnCommitted() } - want := map[string]int{"cluster:A": 10, "cluster:B": 20} - if !reflect.DeepEqual(picks, want) { - t.Errorf("picked clusters = %v; want %v", picks, want) + want := map[string]int{"cluster:A": 75, "cluster:B": 25} + if !cmp.Equal(picks, want) { + t.Errorf("Picked clusters: %v; want: %v", picks, want) } } From 7bf6a58a171d03d3fab927bf977b88f630ff48a9 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 23 Jan 2023 17:44:50 -0500 Subject: [PATCH 749/998] gcp/observability: Cleanup resources allocated if start errors (#5960) --- gcp/observability/observability.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/gcp/observability/observability.go b/gcp/observability/observability.go index d8dde6404de0..6b7d4b1f762a 100644 --- a/gcp/observability/observability.go +++ b/gcp/observability/observability.go @@ -55,17 +55,29 @@ func Start(ctx context.Context) error { } // Set the project ID if it isn't configured manually. - if err := ensureProjectIDInObservabilityConfig(ctx, config); err != nil { + if err = ensureProjectIDInObservabilityConfig(ctx, config); err != nil { return err } + // Cleanup any created resources this function created in case this function + // errors. + defer func() { + if err != nil { + End() + } + }() + // Enabling tracing and metrics via OpenCensus - if err := startOpenCensus(config); err != nil { + if err = startOpenCensus(config); err != nil { return fmt.Errorf("failed to instrument OpenCensus: %v", err) } + if err = startLogging(ctx, config); err != nil { + return fmt.Errorf("failed to start logging: %v", err) + } + // Logging is controlled by the config at methods level. - return startLogging(ctx, config) + return nil } // End is the clean-up API for gRPC Observability plugin. It is expected to be From 4075ef07c5d5947b313ef74659678f1eca645bad Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 23 Jan 2023 14:50:46 -0800 Subject: [PATCH 750/998] xds: fix panic involving double close of channel in xDS transport (#5959) --- xds/internal/xdsclient/transport/loadreport.go | 8 ++++++++ .../xdsclient/transport/loadreport_test.go | 14 +++++++++++++- xds/internal/xdsclient/transport/transport.go | 1 - 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/xds/internal/xdsclient/transport/loadreport.go b/xds/internal/xdsclient/transport/loadreport.go index a683afd57938..58a2e5dedb6a 100644 --- a/xds/internal/xdsclient/transport/loadreport.go +++ b/xds/internal/xdsclient/transport/loadreport.go @@ -62,6 +62,11 @@ func (t *Transport) lrsStartStream() { ctx, cancel := context.WithCancel(context.Background()) t.lrsCancelStream = cancel + + // Create a new done channel everytime a new stream is created. This ensures + // that we don't close the same channel multiple times (from lrsRunner() + // goroutine) when multiple streams are created and closed. + t.lrsRunnerDoneCh = make(chan struct{}) go t.lrsRunner(ctx) } @@ -78,6 +83,9 @@ func (t *Transport) lrsStopStream() { t.lrsCancelStream() t.logger.Infof("Stopping LRS stream") + + // Wait for the runner goroutine to exit. The done channel will be + // recreated when a new stream is created. <-t.lrsRunnerDoneCh } diff --git a/xds/internal/xdsclient/transport/loadreport_test.go b/xds/internal/xdsclient/transport/loadreport_test.go index f6203c9b4425..815ca25b27b7 100644 --- a/xds/internal/xdsclient/transport/loadreport_test.go +++ b/xds/internal/xdsclient/transport/loadreport_test.go @@ -54,7 +54,7 @@ func (s) TestReportLoad(t *testing.T) { NodeProto: nodeProto, } - // Create a transport to the fake server. + // Create a transport to the fake management server. tr, err := transport.New(transport.Options{ ServerCfg: serverCfg, UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No ADS validation. @@ -190,4 +190,16 @@ func (s) TestReportLoad(t *testing.T) { if _, err := mgmtServer.LRSStreamCloseChan.Receive(ctx); err != nil { t.Fatal("Timeout waiting for LRS stream to close") } + + // Calling the load reporting API again should result in the creation of a + // new LRS stream. This ensures that creating and closing multiple streams + // works smoothly. + _, cancelLRS3 := tr.ReportLoad() + if err != nil { + t.Fatalf("Failed to start LRS load reporting: %v", err) + } + if _, err := mgmtServer.LRSStreamOpenChan.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for LRS stream to be created: %v", err) + } + cancelLRS3() } diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index e0b9807c1648..814ca5f87263 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -202,7 +202,6 @@ func New(opts Options) (*Transport, error) { versions: make(map[string]string), nonces: make(map[string]string), adsRunnerDoneCh: make(chan struct{}), - lrsRunnerDoneCh: make(chan struct{}), } // This context is used for sending and receiving RPC requests and From 52a8392f374b8cd60e176b67925a7f8c1605d014 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 23 Jan 2023 18:31:16 -0500 Subject: [PATCH 751/998] gcp/observability: update method name validation (#5951) --- gcp/observability/config.go | 33 +++++--- gcp/observability/logging.go | 23 ++++-- gcp/observability/logging_test.go | 126 ++++++++++++++++++++++++++++-- 3 files changed, 159 insertions(+), 23 deletions(-) diff --git a/gcp/observability/config.go b/gcp/observability/config.go index b361bc367c01..ae7ea8b6983c 100644 --- a/gcp/observability/config.go +++ b/gcp/observability/config.go @@ -24,19 +24,14 @@ import ( "errors" "fmt" "os" - "regexp" + "strings" gcplogging "cloud.google.com/go/logging" "golang.org/x/oauth2/google" "google.golang.org/grpc/internal/envconfig" ) -const ( - envProjectID = "GOOGLE_CLOUD_PROJECT" - methodStringRegexpStr = `^([\w./]+)/((?:\w+)|[*])$` -) - -var methodStringRegexp = regexp.MustCompile(methodStringRegexpStr) +const envProjectID = "GOOGLE_CLOUD_PROJECT" // fetchDefaultProjectID fetches the default GCP project id from environment. func fetchDefaultProjectID(ctx context.Context) string { @@ -59,6 +54,25 @@ func fetchDefaultProjectID(ctx context.Context) string { return credentials.ProjectID } +// validateMethodString validates whether the string passed in is a valid +// pattern. +func validateMethodString(method string) error { + if strings.HasPrefix(method, "/") { + return errors.New("cannot have a leading slash") + } + serviceMethod := strings.Split(method, "/") + if len(serviceMethod) != 2 { + return errors.New("/ must come in between service and method, only one /") + } + if serviceMethod[1] == "" { + return errors.New("method name must be non empty") + } + if serviceMethod[0] == "*" { + return errors.New("cannot have service wildcard * i.e. (*/m)") + } + return nil +} + func validateLogEventMethod(methods []string, exclude bool) error { for _, method := range methods { if method == "*" { @@ -67,9 +81,8 @@ func validateLogEventMethod(methods []string, exclude bool) error { } continue } - match := methodStringRegexp.FindStringSubmatch(method) - if match == nil { - return fmt.Errorf("invalid method string: %v", method) + if err := validateMethodString(method); err != nil { + return fmt.Errorf("invalid method string: %v, err: %v", method, err) } } return nil diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index dcd7bf848fd7..bf46ffa6fe3d 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -22,6 +22,7 @@ import ( "bytes" "context" "encoding/base64" + "errors" "fmt" "strings" "time" @@ -322,6 +323,7 @@ func (bml *binaryMethodLogger) Log(c iblog.LogEntryConfig) { } type eventConfig struct { + // ServiceMethod has /s/m syntax for fast matching. ServiceMethod map[string]bool Services map[string]bool MatchAll bool @@ -364,6 +366,17 @@ func (bl *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { return nil } +// parseMethod splits service and method from the input. It expects format +// "service/method". +func parseMethod(method string) (string, string, error) { + pos := strings.Index(method, "/") + if pos < 0 { + // Shouldn't happen, config already validated. + return "", "", errors.New("invalid method name: no / found") + } + return method[:pos], method[pos+1:], nil +} + func registerClientRPCEvents(clientRPCEvents []clientRPCEvents, exporter loggingExporter) { if len(clientRPCEvents) == 0 { return @@ -382,7 +395,7 @@ func registerClientRPCEvents(clientRPCEvents []clientRPCEvents, exporter logging eventConfig.MatchAll = true continue } - s, m, err := grpcutil.ParseMethod(method) + s, m, err := parseMethod(method) if err != nil { continue } @@ -390,7 +403,7 @@ func registerClientRPCEvents(clientRPCEvents []clientRPCEvents, exporter logging eventConfig.Services[s] = true continue } - eventConfig.ServiceMethod[method] = true + eventConfig.ServiceMethod["/"+method] = true } eventConfigs = append(eventConfigs, eventConfig) } @@ -419,15 +432,15 @@ func registerServerRPCEvents(serverRPCEvents []serverRPCEvents, exporter logging eventConfig.MatchAll = true continue } - s, m, err := grpcutil.ParseMethod(method) - if err != nil { // Shouldn't happen, already validated at this point. + s, m, err := parseMethod(method) + if err != nil { continue } if m == "*" { eventConfig.Services[s] = true continue } - eventConfig.ServiceMethod[method] = true + eventConfig.ServiceMethod["/"+method] = true } eventConfigs = append(eventConfigs, eventConfig) } diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index 1489a60ea22e..0265c45ddc04 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -24,6 +24,7 @@ import ( "encoding/json" "fmt" "io" + "strings" "sync" "testing" @@ -99,13 +100,14 @@ func setupObservabilitySystemWithConfig(cfg *config) (func(), error) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() err = Start(ctx) - if err != nil { - return nil, fmt.Errorf("error in Start: %v", err) - } - return func() { + cleanup := func() { End() envconfig.ObservabilityConfig = oldObservabilityConfig - }, nil + } + if err != nil { + return cleanup, fmt.Errorf("error in Start: %v", err) + } + return cleanup, nil } // TestClientRPCEventsLogAll tests the observability system configured with a @@ -777,18 +779,18 @@ func (s) TestPrecedenceOrderingInConfiguration(t *testing.T) { CloudLogging: &cloudLogging{ ClientRPCEvents: []clientRPCEvents{ { - Methods: []string{"/grpc.testing.TestService/UnaryCall"}, + Methods: []string{"grpc.testing.TestService/UnaryCall"}, MaxMetadataBytes: 30, MaxMessageBytes: 30, }, { - Methods: []string{"/grpc.testing.TestService/EmptyCall"}, + Methods: []string{"grpc.testing.TestService/EmptyCall"}, Exclude: true, MaxMetadataBytes: 30, MaxMessageBytes: 30, }, { - Methods: []string{"/grpc.testing.TestService/*"}, + Methods: []string{"grpc.testing.TestService/*"}, MaxMetadataBytes: 30, MaxMessageBytes: 30, }, @@ -1273,3 +1275,111 @@ func (s) TestMetadataTruncationAccountsKey(t *testing.T) { } fle.mu.Unlock() } + +// TestMethodInConfiguration tests different method names with an expectation on +// whether they should error or not. +func (s) TestMethodInConfiguration(t *testing.T) { + // To skip creating a stackdriver exporter. + fle := &fakeLoggingExporter{ + t: t, + } + + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + tests := []struct { + name string + config *config + wantErr string + }{ + { + name: "leading-slash", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"/service/method"}, + }, + }, + }, + }, + wantErr: "cannot have a leading slash", + }, + { + name: "wildcard service/method", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*/method"}, + }, + }, + }, + }, + wantErr: "cannot have service wildcard *", + }, + { + name: "/ in service name", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"ser/vice/method"}, + }, + }, + }, + }, + wantErr: "only one /", + }, + { + name: "empty method name", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"service/"}, + }, + }, + }, + }, + wantErr: "method name must be non empty", + }, + { + name: "normal", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"service/method"}, + }, + }, + }, + }, + wantErr: "", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cleanup, gotErr := setupObservabilitySystemWithConfig(test.config) + if cleanup != nil { + defer cleanup() + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Start(%v) = %v, wantErr %v", test.config, gotErr, test.wantErr) + } + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Start(%v) = %v, wantErr %v", test.config, gotErr, test.wantErr) + } + }) + } +} From 4adb2a7a00d8b62df5ea34d520fe3ca13bffd31a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 23 Jan 2023 16:56:37 -0800 Subject: [PATCH 752/998] xds/resolver: cleanup tests to use real xDS client 2/n (#5952) --- xds/internal/resolver/xds_resolver_test.go | 126 +++++++++++++++++---- 1 file changed, 105 insertions(+), 21 deletions(-) diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index e35b4b6ecec6..af7729390660 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -447,34 +447,118 @@ func (s) TestResolverResourceName(t *testing.T) { } } -// TestXDSResolverWatchCallbackAfterClose tests the case where a service update -// from the underlying xdsClient is received after the resolver is closed. -func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer cancel() +// TestResolverWatchCallbackAfterClose tests the case where a service update +// from the underlying xDS client is received after the resolver is closed, and +// verifies that the update is not propagated to the ClientConn. +func (s) TestResolverWatchCallbackAfterClose(t *testing.T) { + // Setup the management server that synchronizes with the test goroutine + // using two channels. The management server signals the test goroutine when + // it receives a discovery request for a route configuration resource. And + // the test goroutine signals the management server when the resolver is + // closed. + waitForRouteConfigDiscoveryReqCh := make(chan struct{}) + waitForResolverCloseCh := make(chan struct{}) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() == version.V3RouteConfigURL { + close(waitForRouteConfigDiscoveryReqCh) + <-waitForResolverCloseCh + } + return nil + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + // Configure listener and route configuration resources on the management + // server. + const serviceName = "my-service-client-side-xds" + rdsName := "route-" + serviceName + cdsName := "cluster-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, serviceName, cdsName)}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - // Call the watchAPI callback after closing the resolver, and make sure no - // update is triggerred on the ClientConn. - xdsR.Close() - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, - }, - }, - }, nil) + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Wait for a discovery request for a route configuration resource. + select { + case <-waitForRouteConfigDiscoveryReqCh: + case <-ctx.Done(): + t.Fatal("Timeout when waiting for a discovery request for a route configuration resource") + } + + // Close the resolver and unblock the management server. + rClose() + close(waitForResolverCloseCh) + // Verify that the update from the management server is not propagated to + // the ClientConn. The xDS resolver, once closed, is expected to drop + // updates from the xDS client. sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() - if gotVal, gotErr := tcc.stateCh.Receive(sCtx); gotErr != context.DeadlineExceeded { - t.Fatalf("ClientConn.UpdateState called after xdsResolver is closed: %v", gotVal) + if _, err := tcc.stateCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("ClientConn received an update from the resolver that was closed: %v", err) + } +} + +// TestResolverCloseClosesXDSClient tests that the xDS resolver's Close method +// closes the xDS client. +func (s) TestResolverCloseClosesXDSClient(t *testing.T) { + bootstrapCfg := &bootstrap.Config{ + XDSServer: &bootstrap.ServerConfig{ + ServerURI: "dummy-management-server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + TransportAPI: version.TransportV3, + }, + } + + // Override xDS client creation to use bootstrap configuration pointing to a + // dummy management server. Also close a channel when the returned xDS + // client is closed. + closeCh := make(chan struct{}) + origNewClient := newXDSClient + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + c, cancel, err := xdsclient.NewWithConfigForTesting(bootstrapCfg, defaultTestTimeout, defaultTestTimeout) + return c, func() { + close(closeCh) + cancel() + }, err + } + defer func() { + newXDSClient = origNewClient + }() + + _, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///my-service-client-side-xds")}) + rClose() + + select { + case <-closeCh: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout when waiting for xDS client to be closed") } } From e5a0237a46a5f95fa571624929be10c7afebb180 Mon Sep 17 00:00:00 2001 From: Ronak Jain Date: Tue, 24 Jan 2023 23:11:05 +0530 Subject: [PATCH 753/998] encoding: fix duplicate compressor names (#5958) --- encoding/encoding.go | 4 ++- encoding/encoding_test.go | 55 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 encoding/encoding_test.go diff --git a/encoding/encoding.go b/encoding/encoding.go index 711763d54fb7..07a5861352a6 100644 --- a/encoding/encoding.go +++ b/encoding/encoding.go @@ -75,7 +75,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c - grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. diff --git a/encoding/encoding_test.go b/encoding/encoding_test.go new file mode 100644 index 000000000000..38c31dcdddcc --- /dev/null +++ b/encoding/encoding_test.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/grpcutil" +) + +type mockNamedCompressor struct { + Compressor +} + +func (mockNamedCompressor) Name() string { + return "mock-compressor" +} + +func TestDuplicateCompressorRegister(t *testing.T) { + defer func(m map[string]Compressor) { registeredCompressor = m }(registeredCompressor) + defer func(c []string) { grpcutil.RegisteredCompressorNames = c }(grpcutil.RegisteredCompressorNames) + registeredCompressor = map[string]Compressor{} + grpcutil.RegisteredCompressorNames = []string{} + + RegisterCompressor(&mockNamedCompressor{}) + + // Register another instance of the same compressor. + mc := &mockNamedCompressor{} + RegisterCompressor(mc) + if got := registeredCompressor["mock-compressor"]; got != mc { + t.Fatalf("Unexpected compressor, got: %+v, want:%+v", got, mc) + } + + wantNames := []string{"mock-compressor"} + if !cmp.Equal(wantNames, grpcutil.RegisteredCompressorNames) { + t.Fatalf("Unexpected compressor names, got: %+v, want:%+v", grpcutil.RegisteredCompressorNames, wantNames) + } +} From 894816c487f8dd48fc971c45a7c5baa4b86ef7de Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 24 Jan 2023 10:19:54 -0800 Subject: [PATCH 754/998] grpclb: rename `grpclbstate` package back to `state` (#5962) Fixes https://github.com/grpc/grpc-go/issues/5928 --- balancer/grpclb/grpclb.go | 2 +- balancer/grpclb/grpclb_test.go | 2 +- balancer/grpclb/{grpclbstate => state}/state.go | 8 ++++---- internal/resolver/dns/dns_resolver.go | 2 +- internal/resolver/dns/dns_resolver_test.go | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) rename balancer/grpclb/{grpclbstate => state}/state.go (87%) diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index b0dd72fce141..dd15810d0aeb 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -32,7 +32,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/grpclb/grpclbstate" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index f8fbfe03d5d1..da2df41f2af8 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -36,7 +36,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/grpclb/grpclbstate" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" diff --git a/balancer/grpclb/grpclbstate/state.go b/balancer/grpclb/state/state.go similarity index 87% rename from balancer/grpclb/grpclbstate/state.go rename to balancer/grpclb/state/state.go index cece046be343..4ecfa1c21511 100644 --- a/balancer/grpclb/grpclbstate/state.go +++ b/balancer/grpclb/state/state.go @@ -16,9 +16,9 @@ * */ -// Package grpclbstate declares grpclb types to be set by resolvers wishing to -// pass information to grpclb via resolver.State Attributes. -package grpclbstate +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state import ( "google.golang.org/grpc/resolver" @@ -27,7 +27,7 @@ import ( // keyType is the key to use for storing State in Attributes. type keyType string -const key = keyType("grpc.grpclb.grpclbstate") +const key = keyType("grpc.grpclb.state") // State contains gRPCLB-relevant data passed from the name resolver. type State struct { diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index d51302e65c62..b08ac30adfef 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -32,7 +32,7 @@ import ( "sync" "time" - grpclbstate "google.golang.org/grpc/balancer/grpclb/grpclbstate" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index bfed6a74ff38..6bfcf299b33c 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -34,7 +34,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" - grpclbstate "google.golang.org/grpc/balancer/grpclb/grpclbstate" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/testutils" From 3930549b38c0fc4cd94a95efccf7cef5f90515fd Mon Sep 17 00:00:00 2001 From: "Kyle J. Burda" Date: Tue, 24 Jan 2023 15:03:56 -0500 Subject: [PATCH 755/998] resolver: replace resolver.Target.Endpoint field with Endpoint() method (#5852) Fixes https://github.com/grpc/grpc-go/issues/5796 --- balancer/grpclb/grpclb.go | 4 +- balancer/rls/balancer.go | 2 +- clientconn.go | 19 +--- clientconn_parsed_target_test.go | 90 ++++++++++--------- .../features/load_balancing/client/main.go | 2 +- .../features/name_resolving/client/main.go | 2 +- internal/resolver/dns/dns_resolver.go | 2 +- internal/resolver/dns/dns_resolver_test.go | 31 ++++--- internal/resolver/passthrough/passthrough.go | 4 +- resolver/resolver.go | 22 ++++- .../balancer/clusterresolver/priority_test.go | 5 +- .../clusterresolver/resource_resolver_dns.go | 3 +- .../clusterresolver/resource_resolver_test.go | 17 ++-- xds/internal/resolver/xds_resolver_test.go | 18 ++-- 14 files changed, 114 insertions(+), 107 deletions(-) diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index dd15810d0aeb..6d698229a342 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -136,8 +136,8 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal lb := &lbBalancer{ cc: newLBCacheClientConn(cc), - dialTarget: opt.Target.Endpoint, - target: opt.Target.Endpoint, + dialTarget: opt.Target.Endpoint(), + target: opt.Target.Endpoint(), opt: opt, fallbackTimeout: b.fallbackTimeout, doneCh: make(chan struct{}), diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go index f0cff9ac4455..b2f97a9509ed 100644 --- a/balancer/rls/balancer.go +++ b/balancer/rls/balancer.go @@ -481,7 +481,7 @@ func (b *rlsBalancer) sendNewPickerLocked() { } picker := &rlsPicker{ kbm: b.lbCfg.kbMap, - origEndpoint: b.bopts.Target.Endpoint, + origEndpoint: b.bopts.Target.Endpoint(), lb: b, defaultPolicy: b.defaultPolicy, ctrlCh: b.ctrlCh, diff --git a/clientconn.go b/clientconn.go index 6ead8a6f1e9f..d607d4e9e243 100644 --- a/clientconn.go +++ b/clientconn.go @@ -256,7 +256,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) if err != nil { return nil, err } @@ -1587,30 +1587,17 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and endpoint. Query +// resolver.Target struct containing scheme, authority and url. Query // params are stripped from the endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - // For targets of the form "[scheme]://[authority]/endpoint, the endpoint - // value returned from url.Parse() contains a leading "/". Although this is - // in accordance with RFC 3986, we do not want to break existing resolver - // implementations which expect the endpoint without the leading "/". So, we - // end up stripping the leading "/" here. But this will result in an - // incorrect parsing for something like "unix:///path/to/socket". Since we - // own the "unix" resolver, we can workaround in the unix resolver by using - // the `URL` field instead of the `Endpoint` field. - endpoint := u.Path - if endpoint == "" { - endpoint = u.Opaque - } - endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ Scheme: u.Scheme, Authority: u.Host, - Endpoint: endpoint, URL: *u, }, nil } diff --git a/clientconn_parsed_target_test.go b/clientconn_parsed_target_test.go index 8f832a2c7cb4..e957bca78c1e 100644 --- a/clientconn_parsed_target_test.go +++ b/clientconn_parsed_target_test.go @@ -21,6 +21,7 @@ package grpc import ( "context" "errors" + "fmt" "net" "net/url" "testing" @@ -28,6 +29,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" ) @@ -40,46 +42,46 @@ func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { wantParsed resolver.Target }{ // No scheme is specified. - {target: "://", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://"}}, - {target: ":///", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///"}}, - {target: "://a/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/"}}, - {target: ":///a", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///a"}}, - {target: "://a/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://a/b"}}, - {target: "/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/"}}, - {target: "a/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a/b"}}, - {target: "a//b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a//b"}}, - {target: "google.com", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com"}}, - {target: "google.com/?a=b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "google.com/"}}, - {target: "/unix/socket/address", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address"}}, + {target: "://", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://"))}}, + {target: ":///", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ":///"))}}, + {target: "://a/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://a/"))}}, + {target: ":///a", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ":///a"))}}, + {target: "://a/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://a/b"))}}, + {target: "/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "/"))}}, + {target: "a/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a/b"))}}, + {target: "a//b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a//b"))}}, + {target: "google.com", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "google.com"))}}, + {target: "google.com/?a=b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "google.com/?a=b"))}}, + {target: "/unix/socket/address", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "/unix/socket/address"))}}, // An unregistered scheme is specified. - {target: "a:///", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///"}}, - {target: "a://b/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/"}}, - {target: "a:///b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:///b"}}, - {target: "a://b/c", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b/c"}}, - {target: "a:b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:b"}}, - {target: "a:/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a:/b"}}, - {target: "a://b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "a://b"}}, + {target: "a:///", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:///"))}}, + {target: "a://b/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a://b/"))}}, + {target: "a:///b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:///b"))}}, + {target: "a://b/c", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a://b/c"))}}, + {target: "a:b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:b"))}}, + {target: "a:/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:/b"))}}, + {target: "a://b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a://b"))}}, // A registered scheme is specified. - {target: "dns:///google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "google.com"}}, - {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}}, - {target: "dns://a.server.com/google.com/?a=b", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/"}}, - {target: "unix:///a/b/c", wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}}, - {target: "unix-abstract:a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c"}}, - {target: "unix-abstract:a b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a b"}}, - {target: "unix-abstract:a:b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a:b"}}, - {target: "unix-abstract:a-b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a-b"}}, - {target: "unix-abstract:/ a///://::!@#$%25^&*()b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: " a///://::!@"}}, - {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "passthrough:abc"}}, - {target: "unix-abstract:unix:///abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "unix:///abc"}}, - {target: "unix-abstract:///a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c"}}, - {target: "unix-abstract:///", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: ""}}, - {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{Scheme: "passthrough", Authority: "", Endpoint: "unix:///a/b/c"}}, + {target: "dns:///google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "", URL: *testutils.MustParseURL("dns:///google.com")}}, + {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", URL: *testutils.MustParseURL("dns://a.server.com/google.com")}}, + {target: "dns://a.server.com/google.com/?a=b", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", URL: *testutils.MustParseURL("dns://a.server.com/google.com/?a=b")}}, + {target: "unix:///a/b/c", wantParsed: resolver.Target{Scheme: "unix", Authority: "", URL: *testutils.MustParseURL("unix:///a/b/c")}}, + {target: "unix-abstract:a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:a/b/c")}}, + {target: "unix-abstract:a b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:a b")}}, + {target: "unix-abstract:a:b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:a:b")}}, + {target: "unix-abstract:a-b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:a-b")}}, + {target: "unix-abstract:/ a///://::!@#$%25^&*()b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:/ a///://::!@#$%25^&*()b")}}, + {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:passthrough:abc")}}, + {target: "unix-abstract:unix:///abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:unix:///abc")}}, + {target: "unix-abstract:///a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:///a/b/c")}}, + {target: "unix-abstract:///", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:///")}}, + {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{Scheme: "passthrough", Authority: "", URL: *testutils.MustParseURL("passthrough:///unix:///a/b/c")}}, // Cases for `scheme:absolute-path`. - {target: "dns:/a/b/c", wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "a/b/c"}}, - {target: "unregistered:/a/b/c", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "unregistered:/a/b/c"}}, + {target: "dns:/a/b/c", wantParsed: resolver.Target{Scheme: "dns", Authority: "", URL: *testutils.MustParseURL("dns:/a/b/c")}}, + {target: "unregistered:/a/b/c", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL("unregistered:/a/b/c")}}, } for _, test := range tests { @@ -138,56 +140,56 @@ func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { // different behaviors with a custom dialer. { target: "unix:a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}, + wantParsed: resolver.Target{Scheme: "unix", Authority: "", URL: *testutils.MustParseURL("unix:a/b/c")}, wantDialerAddress: "unix:a/b/c", }, { target: "unix:/a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}, + wantParsed: resolver.Target{Scheme: "unix", Authority: "", URL: *testutils.MustParseURL("unix:/a/b/c")}, wantDialerAddress: "unix:///a/b/c", }, { target: "unix:///a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}, + wantParsed: resolver.Target{Scheme: "unix", Authority: "", URL: *testutils.MustParseURL("unix:///a/b/c")}, wantDialerAddress: "unix:///a/b/c", }, { target: "dns:///127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "127.0.0.1:50051"}, + wantParsed: resolver.Target{Scheme: "dns", Authority: "", URL: *testutils.MustParseURL("dns:///127.0.0.1:50051")}, wantDialerAddress: "127.0.0.1:50051", }, { target: ":///127.0.0.1:50051", badScheme: true, - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ":///127.0.0.1:50051"}, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ":///127.0.0.1:50051"))}, wantDialerAddress: ":///127.0.0.1:50051", }, { target: "dns://authority/127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: "dns", Authority: "authority", Endpoint: "127.0.0.1:50051"}, + wantParsed: resolver.Target{Scheme: "dns", Authority: "authority", URL: *testutils.MustParseURL("dns://authority/127.0.0.1:50051")}, wantDialerAddress: "127.0.0.1:50051", }, { target: "://authority/127.0.0.1:50051", badScheme: true, - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "://authority/127.0.0.1:50051"}, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://authority/127.0.0.1:50051"))}, wantDialerAddress: "://authority/127.0.0.1:50051", }, { target: "/unix/socket/address", badScheme: true, - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: "/unix/socket/address"}, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "/unix/socket/address"))}, wantDialerAddress: "/unix/socket/address", }, { target: "", badScheme: true, - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", Endpoint: ""}, + wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ""))}, wantDialerAddress: "", }, { target: "passthrough://a.server.com/google.com", - wantParsed: resolver.Target{Scheme: "passthrough", Authority: "a.server.com", Endpoint: "google.com"}, + wantParsed: resolver.Target{Scheme: "passthrough", Authority: "a.server.com", URL: *testutils.MustParseURL("passthrough://a.server.com/google.com")}, wantDialerAddress: "google.com", }, } diff --git a/examples/features/load_balancing/client/main.go b/examples/features/load_balancing/client/main.go index 2caecb7b3d32..6e3d1fc86fe3 100644 --- a/examples/features/load_balancing/client/main.go +++ b/examples/features/load_balancing/client/main.go @@ -111,7 +111,7 @@ type exampleResolver struct { } func (r *exampleResolver) start() { - addrStrs := r.addrsStore[r.target.Endpoint] + addrStrs := r.addrsStore[r.target.Endpoint()] addrs := make([]resolver.Address, len(addrStrs)) for i, s := range addrStrs { addrs[i] = resolver.Address{Addr: s} diff --git a/examples/features/name_resolving/client/main.go b/examples/features/name_resolving/client/main.go index ad6b310b6de7..2766611ba795 100644 --- a/examples/features/name_resolving/client/main.go +++ b/examples/features/name_resolving/client/main.go @@ -119,7 +119,7 @@ type exampleResolver struct { } func (r *exampleResolver) start() { - addrStrs := r.addrsStore[r.target.Endpoint] + addrStrs := r.addrsStore[r.target.Endpoint()] addrs := make([]resolver.Address, len(addrStrs)) for i, s := range addrStrs { addrs[i] = resolver.Address{Addr: s} diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index b08ac30adfef..09a667f33cb0 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -116,7 +116,7 @@ type dnsBuilder struct{} // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { return nil, err } diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index 6bfcf299b33c..d67ee7d080bf 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -23,7 +23,6 @@ import ( "errors" "fmt" "net" - "net/url" "os" "reflect" "strings" @@ -735,7 +734,7 @@ func testDNSResolver(t *testing.T) { for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} - r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -807,7 +806,7 @@ func TestDNSResolverExponentialBackoff(t *testing.T) { cc := &testClientConn{target: test.target} // Cause ClientConn to return an error. cc.updateStateErr = balancer.ErrBadResolverState - r, err := b.Build(resolver.Target{Endpoint: test.target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", test.target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("Error building resolver for target %v: %v", test.target, err) } @@ -962,7 +961,7 @@ func testDNSResolverWithSRV(t *testing.T) { for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} - r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -1046,7 +1045,7 @@ func testDNSResolveNow(t *testing.T) { for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} - r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -1124,7 +1123,7 @@ func testIPResolver(t *testing.T) { for _, v := range tests { b := NewBuilder() cc := &testClientConn{target: v.target} - r, err := b.Build(resolver.Target{Endpoint: v.target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", v.target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -1175,7 +1174,7 @@ func TestResolveFunc(t *testing.T) { {"[2001:db8:a0b:12f0::1]:21", nil}, {":80", nil}, {"127.0.0...1:12345", nil}, - {"[fe80::1%lo0]:80", nil}, + {"[fe80::1%25lo0]:80", nil}, {"golang.org:http", nil}, {"[2001:db8::1]:http", nil}, {"[2001:db8::1]:", errEndsWithColon}, @@ -1187,7 +1186,7 @@ func TestResolveFunc(t *testing.T) { b := NewBuilder() for _, v := range tests { cc := &testClientConn{target: v.addr, errChan: make(chan error, 1)} - r, err := b.Build(resolver.Target{Endpoint: v.addr}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", v.addr))}, cc, resolver.BuildOptions{}) if err == nil { r.Close() } @@ -1226,7 +1225,7 @@ func TestDisableServiceConfig(t *testing.T) { for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} - r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{DisableServiceConfig: a.disableServiceConfig}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{DisableServiceConfig: a.disableServiceConfig}) if err != nil { t.Fatalf("%v\n", err) } @@ -1264,7 +1263,7 @@ func TestTXTError(t *testing.T) { envconfig.TXTErrIgnore = ignore b := NewBuilder() cc := &testClientConn{target: "ipv4.single.fake"} // has A records but not TXT records. - r, err := b.Build(resolver.Target{Endpoint: "ipv4.single.fake"}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", "ipv4.single.fake"))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -1300,7 +1299,7 @@ func TestDNSResolverRetry(t *testing.T) { b := NewBuilder() target := "ipv4.single.fake" cc := &testClientConn{target: target} - r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -1438,12 +1437,12 @@ func TestCustomAuthority(t *testing.T) { } } + mockEndpointTarget := "foo.bar.com" b := NewBuilder() - cc := &testClientConn{target: "foo.bar.com", errChan: make(chan error, 1)} + cc := &testClientConn{target: mockEndpointTarget, errChan: make(chan error, 1)} target := resolver.Target{ - Endpoint: "foo.bar.com", Authority: a.authority, - URL: url.URL{Host: a.authority}, + URL: *testutils.MustParseURL(fmt.Sprintf("scheme://%s/%s", a.authority, mockEndpointTarget)), } r, err := b.Build(target, cc, resolver.BuildOptions{}) @@ -1501,7 +1500,7 @@ func TestRateLimitedResolve(t *testing.T) { b := NewBuilder() cc := &testClientConn{target: target} - r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("resolver.Build() returned error: %v\n", err) } @@ -1610,7 +1609,7 @@ func TestReportError(t *testing.T) { cc := &testClientConn{target: target, errChan: make(chan error)} totalTimesCalledError := 0 b := NewBuilder() - r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("Error building resolver for target %v: %v", target, err) } diff --git a/internal/resolver/passthrough/passthrough.go b/internal/resolver/passthrough/passthrough.go index c6e08221ff64..afac56572ad5 100644 --- a/internal/resolver/passthrough/passthrough.go +++ b/internal/resolver/passthrough/passthrough.go @@ -31,7 +31,7 @@ const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - if target.Endpoint == "" && opts.Dialer == nil { + if target.Endpoint() == "" && opts.Dialer == nil { return nil, errors.New("passthrough: received empty target in Build()") } r := &passthroughResolver{ @@ -52,7 +52,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/resolver/resolver.go b/resolver/resolver.go index 967cbc7373ab..654e9ce69f4a 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -24,6 +24,7 @@ import ( "context" "net" "net/url" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -247,9 +248,6 @@ type Target struct { Scheme string // Deprecated: use URL.Host instead. Authority string - // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when - // the former is empty. - Endpoint string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -257,6 +255,24 @@ type Target struct { URL url.URL } +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index b2cc0c9f2097..984215a86e6a 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -62,7 +62,8 @@ func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) cc := testutils.NewTestClientConn(t) builder := balancer.Get(Name) - edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSServcie}}) + // TODO: @kylejb will fix typo for 'testEDSServcie' in another PR + edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{URL: *testutils.MustParseURL("dns:///" + testEDSServcie)}}) if edsb == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } @@ -853,7 +854,7 @@ func (s) TestFallbackToDNS(t *testing.T) { defer ctxCancel() select { case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) } case <-ctx.Done(): diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index 7a639f51a5d9..703b00811dfa 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -21,6 +21,7 @@ package clusterresolver import ( "fmt" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -50,7 +51,7 @@ func newDNSResolver(target string, topLevelResolver *resourceResolver) *dnsDisco target: target, topLevelResolver: topLevelResolver, } - r, err := newDNS(resolver.Target{Scheme: "dns", Endpoint: target}, ret, resolver.BuildOptions{}) + r, err := newDNS(resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + target)}, ret, resolver.BuildOptions{}) if err != nil { select { case <-topLevelResolver.updateChannel: diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go index 8c90ed0e1cd4..f20482e30288 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -24,9 +24,10 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/xds/internal/testutils" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -40,10 +41,10 @@ var ( ) func init() { - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) testEDSUpdates = append(testEDSUpdates, parseEDSRespProtoForTesting(clab1.Build())) - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) + clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) testEDSUpdates = append(testEDSUpdates, parseEDSRespProtoForTesting(clab2.Build())) } @@ -156,7 +157,7 @@ func (s) TestResourceResolverOneDNSResource(t *testing.T) { { name: "watch DNS", target: testDNSTarget, - wantTarget: resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}, + wantTarget: resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}, addrs: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}, want: []priorityConfig{{ mechanism: DiscoveryMechanism{ @@ -614,7 +615,7 @@ func (s) TestResourceResolverEDSAndDNS(t *testing.T) { } select { case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -719,7 +720,7 @@ func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { }}) select { case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -786,7 +787,7 @@ func (s) TestResourceResolverError(t *testing.T) { } select { case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) } case <-ctx.Done(): @@ -847,7 +848,7 @@ func (s) TestResourceResolverDNSResolveNow(t *testing.T) { defer ctxCancel() select { case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", Endpoint: testDNSTarget}); diff != "" { + if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) } case <-ctx.Done(): diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index af7729390660..deb7018ca617 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -75,7 +75,7 @@ const ( defaultTestShortTimeout = 100 * time.Microsecond ) -var target = resolver.Target{Endpoint: targetStr, URL: url.URL{Scheme: "xds", Path: "/" + targetStr}} +var target = resolver.Target{URL: *testutils.MustParseURL("xds:///" + targetStr)} var routerFilter = xdsresource.HTTPFilter{Name: "rtr", Filter: httpfilter.Get(router.TypeURL)} var routerFilterList = []xdsresource.HTTPFilter{routerFilter} @@ -907,14 +907,14 @@ func (s) TestResolverRemovedWithRPCs(t *testing.T) { { "cds_experimental": { "cluster": "test-cluster-1" - } - } - ] - } - } - } - } - ] + } + } + ] + } + } + } + } + ] }`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { t.Errorf("Received unexpected service config") From bf8fc46fa6eb913e4ed0f6dee6c6a7b75e85fbf0 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 24 Jan 2023 15:09:06 -0800 Subject: [PATCH 756/998] xds/resolver: cleanup tests to use real xDS client 5/n (#5955) --- internal/testutils/xds/e2e/clientresources.go | 9 +- xds/internal/resolver/xds_resolver_test.go | 656 ++++++++++-------- .../e2e_test/federation_watchers_test.go | 2 +- .../xdsclient/e2e_test/misc_watchers_test.go | 4 +- .../xdsclient/e2e_test/rds_watchers_test.go | 20 +- 5 files changed, 389 insertions(+), 302 deletions(-) diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go index 2dacebb14653..9d9012e23838 100644 --- a/internal/testutils/xds/e2e/clientresources.go +++ b/internal/testutils/xds/e2e/clientresources.go @@ -296,7 +296,14 @@ func DefaultRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.Rou Routes: []*v3routepb.Route{{ Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: clusterName, + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }, + }}, }}, }}, }}, diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index deb7018ca617..a4b884e88680 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -28,6 +28,7 @@ import ( "time" xxhash "github.com/cespare/xxhash/v2" + "github.com/envoyproxy/go-control-plane/pkg/wellknown" "github.com/google/go-cmp/cmp" "github.com/google/uuid" "google.golang.org/grpc" @@ -58,10 +59,13 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/wrapperspb" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config @@ -858,27 +862,9 @@ func (s) TestResolverRemovedWithRPCs(t *testing.T) { // Configure the management server with a good listener and route // configuration resource. resources := e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, - Routes: []*v3routepb.RouteConfiguration{{ - Name: rdsName, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsName}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - { - Name: "test-cluster-1", - Weight: &wrapperspb.UInt32Value{Value: 100}, - }, - }, - }}, - }}, - }}, - }}, - }}, + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "test-cluster-1")}, SkipValidation: true, } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -917,9 +903,7 @@ func (s) TestResolverRemovedWithRPCs(t *testing.T) { ] }`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("Received unexpected service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } cs := iresolver.GetConfigSelector(rState) @@ -950,9 +934,7 @@ func (s) TestResolverRemovedWithRPCs(t *testing.T) { t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("Received unexpected service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } cs = iresolver.GetConfigSelector(rState) if cs == nil { @@ -979,9 +961,7 @@ func (s) TestResolverRemovedWithRPCs(t *testing.T) { } wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(`{}`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("Received unexpected service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } } @@ -1190,54 +1170,139 @@ func (s) TestResolverWRR(t *testing.T) { } } -func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() +// TestResolverMaxStreamDuration tests the case where the resolver receives max +// stream duration as part of the listener and route configuration resources. +// The test verifies that the RPC timeout returned by the config selector +// matches expectations. A non-nil max stream duration (this includes an +// explicit zero value) in a matching route overrides the value specified in the +// listener resource. +func (s) TestResolverMaxStreamDuration(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() - defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) - newWRR = testutils.NewTestWRR + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{ - Prefix: newStringP("/foo"), - WeightedClusters: map[string]xdsresource.WeightedCluster{"A": {Weight: 1}}, - MaxStreamDuration: newDurationP(5 * time.Second), - }, { - Prefix: newStringP("/bar"), - WeightedClusters: map[string]xdsresource.WeightedCluster{"B": {Weight: 1}}, - MaxStreamDuration: newDurationP(0), - }, { - Prefix: newStringP(""), - WeightedClusters: map[string]xdsresource.WeightedCluster{"C": {Weight: 1}}, - }}, + // Configure the management server with a listener resource that specifies a + // max stream duration as part of its HTTP connection manager. Also + // configure a route configuration resource, which has multiple routes with + // different values of max stream duration. + ldsName := serviceName + rdsName := "route-" + serviceName + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, }, + RouteConfigName: rdsName, + }}, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(1 * time.Second), }, - }, nil) + }) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{{ + Name: ldsName, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + }}, + Routes: []*v3routepb.RouteConfiguration{{ + Name: rdsName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsName}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/foo"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "A", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }}, + }, + MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{ + MaxStreamDuration: durationpb.New(5 * time.Second), + }, + }}, + }, + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/bar"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "B", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }}, + }, + MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{ + MaxStreamDuration: durationpb.New(0 * time.Second), + }, + }}, + }, + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "C", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }}, + }, + }}, + }, + }, + }}, + }}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + // Read the update pushed by the resolver to the ClientConn. gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatal("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } testCases := []struct { @@ -1262,295 +1327,310 @@ func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { t.Run(tc.name, func(t *testing.T) { req := iresolver.RPCInfo{ Method: tc.method, - Context: context.Background(), + Context: ctx, } res, err := cs.SelectConfig(req) if err != nil { - t.Errorf("Unexpected error from cs.SelectConfig(%v): %v", req, err) + t.Errorf("cs.SelectConfig(%v): %v", req, err) return } res.OnCommitted() got := res.MethodConfig.Timeout - if !reflect.DeepEqual(got, tc.want) { + if !cmp.Equal(got, tc.want) { t.Errorf("For method %q: res.MethodConfig.Timeout = %v; want %v", tc.method, got, tc.want) } }) } } -// TestXDSResolverDelayedOnCommitted tests that clusters remain in service +// TestResolverDelayedOnCommitted tests that clusters remain in service // config if RPCs are in flight. -func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() +func (s) TestResolverDelayedOnCommitted(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Configure the management server with a good listener and route + // configuration resource. + ldsName := serviceName + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "old-cluster")}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, - }, - }, - }, nil) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - gotState, err := tcc.stateCh.Receive(ctx) + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - - wantJSON := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - } - } - }}]}` - wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:old-cluster": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "old-cluster" + } + } + ] + } + } + } + } + ] +}`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } + // Make an RPC, but do not commit it yet. cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatal("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + resOld, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } - cluster := clustermanager.GetPickedClusterForTesting(res.Context) - if cluster != "cluster:test-cluster-1" { - t.Fatalf("") + if cluster := clustermanager.GetPickedClusterForTesting(resOld.Context); cluster != "cluster:old-cluster" { + t.Fatalf("Picked cluster is %q, want %q", cluster, "cluster:old-cluster") } - // delay res.OnCommitted() - // Perform TWO updates to ensure the old config selector does not hold a - // reference to test-cluster-1. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"NEW": {Weight: 1}}}}, - }, - }, - }, nil) - tcc.stateCh.Receive(ctx) // Ignore the first update. + // Delay resOld.OnCommitted(). As long as there are pending RPCs to removed + // clusters, they still appear in the service config. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"NEW": {Weight: 1}}}}, - }, - }, - }, nil) + // Update the route configuration resource on the management server to + // return a new cluster. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "new-cluster")}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - gotState, err = tcc.stateCh.Receive(ctx) + // Read the update pushed by the resolver to the ClientConn and ensure the + // old cluster is present in the service config. Also ensure that the newly + // returned config selector does not hold a reference to the old cluster. + val, err = tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState = gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - wantJSON2 := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - }, - "cluster:NEW":{ - "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] - } - } - }}]}` - wantSCParsed2 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON2) - if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed2.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed2.Config)) + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:old-cluster": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "old-cluster" + } + } + ] + }, + "cluster:new-cluster": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "new-cluster" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s\nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } - // Invoke OnCommitted; should lead to a service config update that deletes - // test-cluster-1. - res.OnCommitted() - - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"NEW": {Weight: 1}}}}, - }, - }, - }, nil) - gotState, err = tcc.stateCh.Receive(ctx) + cs = iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") + } + resNew, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } - rState = gotState.(resolver.State) - if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + if cluster := clustermanager.GetPickedClusterForTesting(resNew.Context); cluster != "cluster:new-cluster" { + t.Fatalf("Picked cluster is %q, want %q", cluster, "cluster:new-cluster") } - wantJSON3 := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:NEW":{ - "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] - } - } - }}]}` - wantSCParsed3 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON3) - if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed3.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed3.Config)) - } -} -// TestXDSResolverUpdates tests the cases where the resolver gets a good update -// after an error, and an error after the good update. -func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() + // Invoke OnCommitted on the old RPC; should lead to a service config update + // that deletes the old cluster, as the old cluster no longer has any + // pending RPCs. + resOld.OnCommitted() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Invoke the watchAPI callback with a bad service update and wait for the - // ReportError method to be called on the ClientConn. - suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) - - if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { - t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) - } - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, - }, - }, - }, nil) - gotState, err := tcc.stateCh.Receive(ctx) + val, err = tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - - // Invoke the watchAPI callback with a bad service update and wait for the - // ReportError method to be called on the ClientConn. - suErr2 := errors.New("bad serviceupdate 2") - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr2) - if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr2 { - t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr2) + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:new-cluster": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "new-cluster" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } } -// TestXDSResolverResourceNotFoundError tests the cases where the resolver gets -// a ResourceNotFoundError. It should generate a service config picking -// weighted_target, but no child balancers. -func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Invoke the watchAPI callback with a bad service update and wait for the - // ReportError method to be called on the ClientConn. - suErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) - - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if gotErrVal, gotErr := tcc.errorCh.Receive(sCtx); gotErr != context.DeadlineExceeded { - t.Fatalf("ClientConn.ReportError() received %v, %v, want channel recv timeout", gotErrVal, gotErr) +// TestResolverMultipleLDSUpdates tests the case where two LDS updates with the +// same RDS name to watch are received without an RDS in between. Those LDS +// updates shouldn't trigger a service config update. +func (s) TestResolverMultipleLDSUpdates(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) } + defer mgmtServer.Stop() - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - gotState, err := tcc.stateCh.Receive(ctx) + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) - } - rState := gotState.(resolver.State) - wantParsedConfig := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)("{}") - if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantParsedConfig.Config) { - t.Error("ClientConn.UpdateState got wrong service config") - t.Errorf("gotParsed: %s", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Errorf("wantParsed: %s", cmp.Diff(nil, wantParsedConfig.Config)) - } - if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatal(err) } -} + defer cleanup() -// TestXDSResolverMultipleLDSUpdates tests the case where two LDS updates with -// the same RDS name to watch are received without an RDS in between. Those LDS -// updates shouldn't trigger service config update. -// -// This test case also makes sure the resolver doesn't panic. -func (s) TestXDSResolverMultipleLDSUpdates(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() + // Build an xDS resolver that uses the above bootstrap configuration + // Creating the xDS resolver should result in creation of the xDS client. + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + // Configure the management server with a listener resource, but no route + // configuration resource. + ldsName := serviceName + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - defer replaceRandNumGenerator(0)() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - // Send a new LDS update, with the same fields. - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer cancel() - // Should NOT trigger a state update. - gotState, err := tcc.stateCh.Receive(ctx) + // Ensure there is no update from the resolver. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + gotState, err := tcc.stateCh.Receive(sCtx) if err == nil { - t.Fatalf("ClientConn.UpdateState received %v, want timeout error", gotState) + t.Fatalf("Received update from resolver %v when none expected", gotState) } - // Send a new LDS update, with the same RDS name, but different fields. - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) - ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer cancel() - gotState, err = tcc.stateCh.Receive(ctx) + // Configure the management server with a listener resource that points to + // the same route configuration resource but has different values for some + // other fields. There is still no route configuration resource on the + // management server. + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: rdsName, + }}, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(1 * time.Second), + }, + }) + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{{ + Name: ldsName, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + }}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that there is no update from the resolver. + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + gotState, err = tcc.stateCh.Receive(sCtx) if err == nil { - t.Fatalf("ClientConn.UpdateState received %v, want timeout error", gotState) + t.Fatalf("Received update from resolver %v when none expected", gotState) } } diff --git a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go index 437edebe1392..e2e6d5dabc31 100644 --- a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go @@ -187,7 +187,7 @@ func (s) TestFederation_RouteConfigResourceContextParamOrder(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{"cluster-resource": {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{"cluster-resource": {Weight: 100}}, }, }, }, diff --git a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go index e761e70a6246..fd08e33bbaab 100644 --- a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/misc_watchers_test.go @@ -96,7 +96,7 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, }, }, }, @@ -112,7 +112,7 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, }, }, }, diff --git a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go index 47fcb2fe2e39..79d2bd7edbe6 100644 --- a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go @@ -142,7 +142,7 @@ func (s) TestRDSWatch(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, }, }, }, @@ -165,7 +165,7 @@ func (s) TestRDSWatch(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsNameNewStyle: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsNameNewStyle: {Weight: 100}}, }, }, }, @@ -281,7 +281,7 @@ func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, }, }, }, @@ -297,7 +297,7 @@ func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{"new-cds-resource": {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{"new-cds-resource": {Weight: 100}}, }, }, }, @@ -319,7 +319,7 @@ func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsNameNewStyle: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsNameNewStyle: {Weight: 100}}, }, }, }, @@ -335,7 +335,7 @@ func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{"new-cds-resource": {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{"new-cds-resource": {Weight: 100}}, }, }, }, @@ -492,7 +492,7 @@ func (s) TestRDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, }, }, }, @@ -577,7 +577,7 @@ func (s) TestRDSWatch_ResourceCaching(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, }, }, }, @@ -713,7 +713,7 @@ func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, }, }, }, @@ -850,7 +850,7 @@ func (s) TestRDSWatch_PartialValid(t *testing.T) { { Prefix: newStringP("/"), ActionType: xdsresource.RouteActionRoute, - WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, }, }, }, From a6376c9893f56fc3819bee9ef5d71f55cc2d38dd Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 24 Jan 2023 19:16:33 -0800 Subject: [PATCH 757/998] xds/resolver: cleanup tests to use real xDS client 3/n (#5953) --- xds/internal/resolver/xds_resolver_test.go | 569 ++++++++++++++------- 1 file changed, 390 insertions(+), 179 deletions(-) diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index a4b884e88680..952a7f67f9cc 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -38,7 +38,6 @@ import ( xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" iresolver "google.golang.org/grpc/internal/resolver" @@ -65,6 +64,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" @@ -108,12 +108,12 @@ type testClientConn struct { } func (t *testClientConn) UpdateState(s resolver.State) error { - t.stateCh.Send(s) + t.stateCh.Replace(s) return nil } func (t *testClientConn) ReportError(err error) { - t.errorCh.Send(err) + t.errorCh.Replace(err) } func (t *testClientConn) ParseServiceConfig(jsonSC string) *serviceconfig.ParseResult { @@ -566,147 +566,341 @@ func (s) TestResolverCloseClosesXDSClient(t *testing.T) { } } -// TestXDSResolverCloseClosesXDSClient tests that the XDS resolver's Close -// method closes the XDS client. -func (s) TestXDSResolverCloseClosesXDSClient(t *testing.T) { - xdsR, _, _, cancel := testSetup(t, setupOpts{target: target}) - xdsR.Close() - cancel() // Blocks until the xDS client is closed. -} +// TestResolverBadServiceUpdate tests the case where a resource returned by the +// management server is NACKed by the xDS client, which then returns an update +// containing an error to the resolver. Verifies that the update is propagated +// to the ClientConn by the resolver. It also tests the cases where the resolver +// gets a good update subsequently, and another error after the good update. +func (s) TestResolverBadServiceUpdate(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() -// TestXDSResolverBadServiceUpdate tests the case the xdsClient returns a bad -// service update. -func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + // Configure a listener resource that is expected to be NACKed because it + // does not contain the `RouteSpecifier` field in the HTTPConnectionManager. + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("router", &v3routerpb.Router{})}, + }) + lis := &v3listenerpb.Listener{ + Name: serviceName, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + } + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{lis}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + wantErr := "no RouteSpecifier" + val, err := tcc.errorCh.Receive(ctx) + if err != nil { + t.Fatal("Timeout when waiting for error to be propagated to the ClientConn") + } + gotErr := val.(error) + if gotErr == nil || !strings.Contains(gotErr.Error(), wantErr) { + t.Fatalf("Received error from resolver %q, want %q", gotErr, wantErr) + } + + // Configure good listener and route configuration resources on the + // management server. + rdsName := "route-" + serviceName + cdsName := "cluster-" + serviceName + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, serviceName, cdsName)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Expect a good update from the resolver. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState := val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } - // Invoke the watchAPI callback with a bad service update and wait for the - // ReportError method to be called on the ClientConn. - suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) + // Configure another bad resource on the management server. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{lis}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { - t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) + // Expect an error update from the resolver. + val, err = tcc.errorCh.Receive(ctx) + if err != nil { + t.Fatal("Timeout when waiting for error to be propagated to the ClientConn") + } + gotErr = val.(error) + if gotErr == nil || !strings.Contains(gotErr.Error(), wantErr) { + t.Fatalf("Received error from resolver %q, want %q", gotErr, wantErr) } } -// TestXDSResolverGoodServiceUpdate tests the happy case where the resolver -// gets a good service update from the xdsClient. -func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() +// TestResolverGoodServiceUpdate tests the case where the resource returned by +// the management server is ACKed by the xDS client, which then returns a good +// service update to the resolver. The test verifies that the service config +// returned by the resolver matches expectations, and that the config selector +// returned by the resolver picks clusters based on the route configuration +// received from the management server. +func (s) TestResolverGoodServiceUpdate(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - defer replaceRandNumGenerator(0)() + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + ldsName := serviceName + rdsName := "route-" + serviceName for _, tt := range []struct { - routes []*xdsresource.Route - wantJSON string - wantClusters map[string]bool + routeConfig *v3routepb.RouteConfiguration + wantServiceConfig string + wantClusters map[string]bool }{ { - routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, - wantJSON: `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - } - } - }}]}`, + // A route configuration with a single cluster. + routeConfig: &v3routepb.RouteConfiguration{ + Name: rdsName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "test-cluster-1", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }, + }}, + }}, + }}, + }}, + }, + wantServiceConfig: ` +{ + "loadBalancingConfig": [{ + "xds_cluster_manager_experimental": { + "children": { + "cluster:test-cluster-1": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "test-cluster-1" + } + }] + } + } + } + }] +}`, wantClusters: map[string]bool{"cluster:test-cluster-1": true}, }, { - routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ - "cluster_1": {Weight: 75}, - "cluster_2": {Weight: 25}, - }}}, - // This update contains the cluster from the previous update as - // well as this update, as the previous config selector still - // references the old cluster when the new one is pushed. - wantJSON: `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - }, - "cluster:cluster_1":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] - }, - "cluster:cluster_2":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] - } - } - }}]}`, + // A route configuration with a two new clusters. + routeConfig: &v3routepb.RouteConfiguration{ + Name: rdsName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "cluster_1", + Weight: &wrapperspb.UInt32Value{Value: 75}, + }, + { + Name: "cluster_2", + Weight: &wrapperspb.UInt32Value{Value: 25}, + }, + }, + }}, + }}, + }}, + }}, + }, + // This update contains the cluster from the previous update as well + // as this update, as the previous config selector still references + // the old cluster when the new one is pushed. + wantServiceConfig: ` +{ + "loadBalancingConfig": [{ + "xds_cluster_manager_experimental": { + "children": { + "cluster:test-cluster-1": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "test-cluster-1" + } + }] + }, + "cluster:cluster_1": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "cluster_1" + } + }] + }, + "cluster:cluster_2": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "cluster_2" + } + }] + } + } + } + }] +}`, wantClusters: map[string]bool{"cluster:cluster_1": true, "cluster:cluster_2": true}, }, { - routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{ - "cluster_1": {Weight: 75}, - "cluster_2": {Weight: 25}, - }}}, + // A redundant route configuration update. + // TODO(easwars): Do we need this, or can we do something else? Because the xds client might swallow this update. + routeConfig: &v3routepb.RouteConfiguration{ + Name: rdsName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "cluster_1", + Weight: &wrapperspb.UInt32Value{Value: 75}, + }, + { + Name: "cluster_2", + Weight: &wrapperspb.UInt32Value{Value: 25}, + }, + }, + }}, + }}, + }}, + }}, + }, // With this redundant update, the old config selector has been // stopped, so there are no more references to the first cluster. // Only the second update's clusters should remain. - wantJSON: `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:cluster_1":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] - }, - "cluster:cluster_2":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] - } - } - }}]}`, + wantServiceConfig: ` +{ + "loadBalancingConfig": [{ + "xds_cluster_manager_experimental": { + "children": { + "cluster:cluster_1": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "cluster_1" + } + }] + }, + "cluster:cluster_2": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "cluster_2" + } + }] + } + } + } + }] +}`, wantClusters: map[string]bool{"cluster:cluster_1": true, "cluster:cluster_2": true}, }, } { - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: tt.routes, - }, - }, - }, nil) + // Configure the management server with a good listener resource and a + // route configuration resource, as specified by the test case. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{tt.routeConfig}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - gotState, err := tcc.stateCh.Receive(ctx) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(tt.wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(tt.wantServiceConfig) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") + t.Errorf("Received unexpected service config") t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) } cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Error("received nil config selector") - continue + t.Fatal("Received nil config selector in update from resolver") } pickedClusters := make(map[string]bool) @@ -714,15 +908,15 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { // with the random number generator stubbed out, we can rely on this // to be 100% reproducible. for i := 0; i < 100; i++ { - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } cluster := clustermanager.GetPickedClusterForTesting(res.Context) pickedClusters[cluster] = true res.OnCommitted() } - if !reflect.DeepEqual(pickedClusters, tt.wantClusters) { + if !cmp.Equal(pickedClusters, tt.wantClusters) { t.Errorf("Picked clusters: %v; want: %v", pickedClusters, tt.wantClusters) } } @@ -965,110 +1159,139 @@ func (s) TestResolverRemovedWithRPCs(t *testing.T) { } } -// TestXDSResolverRemovedResource tests for proper behavior after a resource is -// removed. -func (s) TestXDSResolverRemovedResource(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer cancel() - defer xdsR.Close() +// TestResolverRemovedResource tests the case where resources returned by the +// management server are removed. The test verifies that the resolver pushes the +// expected config selector and service config in this case. +func (s) TestResolverRemovedResource(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + Version: xdsbootstrap.TransportV3, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Configure the management server with a good listener and route + // configuration resource. + ldsName := serviceName + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "test-cluster-1")}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, - }, - }, - }, nil) - wantJSON := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster:test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - } - } - }}]}` - wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - gotState, err := tcc.stateCh.Receive(ctx) + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:test-cluster-1": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "test-cluster-1" + } + } + ] + } + } + } + } + ] +}`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } // "Make an RPC" by invoking the config selector. cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatalf("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } // "Finish the RPC"; this could cause a panic if the resolver doesn't // handle it correctly. res.OnCommitted() - // Delete the resource. The channel should receive a service config with the - // original cluster but with an erroring config selector. - suErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{}, suErr) + // Delete the resources on the management server, resulting in a + // resource-not-found error from the xDS client. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{NodeID: nodeID}); err != nil { + t.Fatal(err) + } - if gotState, err = tcc.stateCh.Receive(ctx); err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + // The channel should receive the existing service config with the original + // cluster but with an erroring config selector. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState = gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } // "Make another RPC" by invoking the config selector. cs = iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatalf("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - res, err = cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + res, err = cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err == nil || status.Code(err) != codes.Unavailable { - t.Fatalf("Expected UNAVAILABLE error from cs.SelectConfig(_); got %v, %v", res, err) + t.Fatalf("cs.SelectConfig() got %v, %v, expected UNAVAILABLE error", res, err) } // In the meantime, an empty ServiceConfig update should have been sent. - if gotState, err = tcc.stateCh.Receive(ctx); err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState = gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)("{}") if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } } @@ -1892,18 +2115,6 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { } } -func replaceRandNumGenerator(start int64) func() { - nextInt := start - xdsresource.RandInt63n = func(int64) (ret int64) { - ret = nextInt - nextInt++ - return - } - return func() { - xdsresource.RandInt63n = grpcrand.Int63n - } -} - func newDurationP(d time.Duration) *time.Duration { return &d } From e2d69aa076dd070e3668784c4dc8bcf7131b3f67 Mon Sep 17 00:00:00 2001 From: "Kyle J. Burda" Date: Wed, 25 Jan 2023 14:27:02 -0500 Subject: [PATCH 758/998] tests: fix spelling of variable (#5966) --- .../clusterresolver/clusterresolver_test.go | 16 ++++++++-------- .../balancer/clusterresolver/config_test.go | 8 ++++---- .../clusterresolver/configbuilder_test.go | 4 ++-- .../balancer/clusterresolver/priority_test.go | 3 +-- .../clusterresolver/resource_resolver_test.go | 18 +++++++++--------- 5 files changed, 24 insertions(+), 25 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index a368b9da7326..940de0aacbe2 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -46,7 +46,7 @@ import ( const ( defaultTestTimeout = 5 * time.Second defaultTestShortTimeout = 10 * time.Millisecond - testEDSServcie = "test-eds-service-name" + testEDSService = "test-eds-service-name" testClusterName = "test-cluster-name" testClusterName2 = "google_cfe_some-name" ) @@ -104,7 +104,7 @@ func (t *noopTestClientConn) NewSubConn([]resolver.Address, balancer.NewSubConnO return nil, nil } -func (noopTestClientConn) Target() string { return testEDSServcie } +func (noopTestClientConn) Target() string { return testEDSService } type scStateChange struct { sc balancer.SubConn @@ -234,7 +234,7 @@ func (s) TestSubConnStateChange(t *testing.T) { if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), + BalancerConfig: newLBConfigWithOneEDS(testEDSService), }); err != nil { t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) } @@ -282,7 +282,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { defer cancel() if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), + BalancerConfig: newLBConfigWithOneEDS(testEDSService), }); err != nil { t.Fatal(err) } @@ -336,7 +336,7 @@ func (s) TestErrorFromXDSClientUpdate(t *testing.T) { // An update with the same service name should not trigger a new watch. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), + BalancerConfig: newLBConfigWithOneEDS(testEDSService), }); err != nil { t.Fatal(err) } @@ -370,7 +370,7 @@ func (s) TestErrorFromResolver(t *testing.T) { defer cancel() if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), + BalancerConfig: newLBConfigWithOneEDS(testEDSService), }); err != nil { t.Fatal(err) } @@ -421,7 +421,7 @@ func (s) TestErrorFromResolver(t *testing.T) { // the previous watch was canceled. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSServcie), + BalancerConfig: newLBConfigWithOneEDS(testEDSService), }); err != nil { t.Fatal(err) } @@ -549,7 +549,7 @@ func (s) TestOutlierDetection(t *testing.T) { // level. if err := edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDSAndOutlierDetection(testEDSServcie, noopODCfg), + BalancerConfig: newLBConfigWithOneEDSAndOutlierDetection(testEDSService, noopODCfg), }); err != nil { t.Fatal(err) } diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index cca1bd8eadc1..76f2d744e19d 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -187,7 +187,7 @@ func TestParseConfig(t *testing.T) { LoadReportingServer: testLRSServerConfig, MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServcie, + EDSServiceName: testEDSService, }, }, XDSLBPolicy: nil, @@ -204,7 +204,7 @@ func TestParseConfig(t *testing.T) { LoadReportingServer: testLRSServerConfig, MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServcie, + EDSServiceName: testEDSService, }, { Type: DiscoveryMechanismTypeLogicalDNS, @@ -224,7 +224,7 @@ func TestParseConfig(t *testing.T) { LoadReportingServer: testLRSServerConfig, MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServcie, + EDSServiceName: testEDSService, }, }, XDSLBPolicy: &internalserviceconfig.BalancerConfig{ @@ -244,7 +244,7 @@ func TestParseConfig(t *testing.T) { LoadReportingServer: testLRSServerConfig, MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServcie, + EDSServiceName: testEDSService, }, }, XDSLBPolicy: &internalserviceconfig.BalancerConfig{ diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index f3a830291605..5fbb0b95e339 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -588,12 +588,12 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { mechanism: DiscoveryMechanism{ Cluster: testClusterName, Type: DiscoveryMechanismTypeEDS, - EDSServiceName: testEDSServcie, + EDSServiceName: testEDSService, }, // lrsServer is nil, so LRS policy will not be used. wantConfig: &clusterimpl.LBConfig{ Cluster: testClusterName, - EDSServiceName: testEDSServcie, + EDSServiceName: testEDSService, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: weightedtarget.Name, Config: &weightedtarget.LBConfig{ diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index 984215a86e6a..fdcef37f2d8e 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -62,8 +62,7 @@ func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) cc := testutils.NewTestClientConn(t) builder := balancer.Get(Name) - // TODO: @kylejb will fix typo for 'testEDSServcie' in another PR - edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{URL: *testutils.MustParseURL("dns:///" + testEDSServcie)}}) + edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{URL: *testutils.MustParseURL("dns:///" + testEDSService)}}) if edsb == nil { t.Fatalf("builder.Build(%s) failed and returned nil", Name) } diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go index f20482e30288..2252373e56e6 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -60,14 +60,14 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { }{ {name: "watch EDS", clusterName: testClusterName, - edsName: testEDSServcie, - wantName: testEDSServcie, + edsName: testEDSService, + wantName: testEDSService, edsUpdate: testEDSUpdates[0], want: []priorityConfig{{ mechanism: DiscoveryMechanism{ Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, - EDSServiceName: testEDSServcie, + EDSServiceName: testEDSService, }, edsResp: testEDSUpdates[0], childNameGen: newNameGenerator(0), @@ -123,7 +123,7 @@ func (s) TestResourceResolverOneEDSResource(t *testing.T) { t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) } if edsNameCanceled != test.wantName { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled, testEDSServcie) + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled, testEDSService) } }) } @@ -225,7 +225,7 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { rr.updateMechanisms([]DiscoveryMechanism{{ Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, - EDSServiceName: testEDSServcie, + EDSServiceName: testEDSService, }}) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -233,8 +233,8 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { if err != nil { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } - if gotEDSName1 != testEDSServcie { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testEDSServcie) + if gotEDSName1 != testEDSService { + t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testEDSService) } // Invoke callback, should get an update. @@ -245,7 +245,7 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { mechanism: DiscoveryMechanism{ Type: DiscoveryMechanismTypeEDS, Cluster: testClusterName, - EDSServiceName: testEDSServcie, + EDSServiceName: testEDSService, }, edsResp: testEDSUpdates[0], childNameGen: newNameGenerator(0), @@ -266,7 +266,7 @@ func (s) TestResourceResolverChangeEDSName(t *testing.T) { t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) } if edsNameCanceled1 != gotEDSName1 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, testEDSServcie) + t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, testEDSService) } gotEDSName2, err := fakeClient.WaitForWatchEDS(ctx) if err != nil { From 2a1e9348ff7b5d9f4b5039e84e6c9873b5b3e26e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 25 Jan 2023 16:28:29 -0800 Subject: [PATCH 759/998] server: after GracefulStop, ensure connections are closed when final RPC completes (#5968) Fixes https://github.com/grpc/grpc-go/issues/5930 --- internal/transport/controlbuf.go | 6 ++-- test/gracefulstop_test.go | 51 ++++++++++++++++++++++++++++++++ test/servertester.go | 35 +++++++++++++++++++--- test/stream_cleanup_test.go | 6 ++-- 4 files changed, 89 insertions(+), 9 deletions(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index a5b7513f412d..9097385e1a6a 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -527,6 +527,9 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { + // Always flush the writer before exiting in case there are pending frames + // to be sent. + defer l.framer.writer.Flush() for { it, err := l.cbuf.get(true) if err != nil { @@ -759,7 +762,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + if l.draining && len(l.estdStreams) == 0 { return errors.New("finished processing active streams while in draining mode") } return nil @@ -814,7 +817,6 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { } func (l *loopyWriter) closeConnectionHandler() error { - l.framer.writer.Flush() // Exit loopyWriter entirely by returning an error here. This will lead to // the transport closing the connection, and, ultimately, transport // closure. diff --git a/test/gracefulstop_test.go b/test/gracefulstop_test.go index a5a8448ad2fd..15e0611a219b 100644 --- a/test/gracefulstop_test.go +++ b/test/gracefulstop_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + "golang.org/x/net/http2" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" @@ -164,3 +165,53 @@ func (s) TestGracefulStop(t *testing.T) { cancel() wg.Wait() } + +func (s) TestGracefulStopClosesConnAfterLastStream(t *testing.T) { + // This test ensures that a server closes the connections to its clients + // when the final stream has completed after a GOAWAY. + + handlerCalled := make(chan struct{}) + gracefulStopCalled := make(chan struct{}) + + ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + close(handlerCalled) // Initiate call to GracefulStop. + <-gracefulStopCalled // Wait for GOAWAYs to be received by the client. + return nil + }} + + te := newTest(t, tcpClearEnv) + te.startServer(ts) + defer te.tearDown() + + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false) + + <-handlerCalled // Wait for the server to invoke its handler. + + // Gracefully stop the server. + gracefulStopDone := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(gracefulStopDone) + }() + st.wantGoAway(http2.ErrCodeNo) // Server sends a GOAWAY due to GracefulStop. + pf := st.wantPing() // Server sends a ping to verify client receipt. + st.writePing(true, pf.Data) // Send ping ack to confirm. + st.wantGoAway(http2.ErrCodeNo) // Wait for subsequent GOAWAY to indicate no new stream processing. + + close(gracefulStopCalled) // Unblock server handler. + + fr := st.wantAnyFrame() // Wait for trailer. + hdr, ok := fr.(*http2.MetaHeadersFrame) + if !ok { + t.Fatalf("Received unexpected frame of type (%T) from server: %v; want HEADERS", fr, fr) + } + if !hdr.StreamEnded() { + t.Fatalf("Received unexpected HEADERS frame from server: %v; want END_STREAM set", fr) + } + + st.wantRSTStream(http2.ErrCodeNo) // Server should send RST_STREAM because client did not half-close. + + <-gracefulStopDone // Wait for GracefulStop to return. + }) +} diff --git a/test/servertester.go b/test/servertester.go index bf7bd8b214e6..3701a0e094d9 100644 --- a/test/servertester.go +++ b/test/servertester.go @@ -138,19 +138,46 @@ func (st *serverTester) writeSettingsAck() { } } +func (st *serverTester) wantGoAway(errCode http2.ErrCode) *http2.GoAwayFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RST frame: %v", err) + } + gaf, ok := f.(*http2.GoAwayFrame) + if !ok { + st.t.Fatalf("got a %T; want *http2.GoAwayFrame", f) + } + if gaf.ErrCode != errCode { + st.t.Fatalf("expected GOAWAY error code '%v', got '%v'", errCode.String(), gaf.ErrCode.String()) + } + return gaf +} + +func (st *serverTester) wantPing() *http2.PingFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RST frame: %v", err) + } + pf, ok := f.(*http2.PingFrame) + if !ok { + st.t.Fatalf("got a %T; want *http2.GoAwayFrame", f) + } + return pf +} + func (st *serverTester) wantRSTStream(errCode http2.ErrCode) *http2.RSTStreamFrame { f, err := st.readFrame() if err != nil { st.t.Fatalf("Error while expecting an RST frame: %v", err) } - sf, ok := f.(*http2.RSTStreamFrame) + rf, ok := f.(*http2.RSTStreamFrame) if !ok { st.t.Fatalf("got a %T; want *http2.RSTStreamFrame", f) } - if sf.ErrCode != errCode { - st.t.Fatalf("expected RST error code '%v', got '%v'", errCode.String(), sf.ErrCode.String()) + if rf.ErrCode != errCode { + st.t.Fatalf("expected RST error code '%v', got '%v'", errCode.String(), rf.ErrCode.String()) } - return sf + return rf } func (st *serverTester) wantSettings() *http2.SettingsFrame { diff --git a/test/stream_cleanup_test.go b/test/stream_cleanup_test.go index 83dd68549e99..53298ea372fc 100644 --- a/test/stream_cleanup_test.go +++ b/test/stream_cleanup_test.go @@ -46,7 +46,7 @@ func (s) TestStreamCleanup(t *testing.T) { return &testpb.Empty{}, nil }, } - if err := ss.Start([]grpc.ServerOption{grpc.MaxConcurrentStreams(1)}, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(callRecvMsgSize))), grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { + if err := ss.Start(nil, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(callRecvMsgSize))), grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() @@ -79,7 +79,7 @@ func (s) TestStreamCleanupAfterSendStatus(t *testing.T) { }) }, } - if err := ss.Start([]grpc.ServerOption{grpc.MaxConcurrentStreams(1)}, grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { + if err := ss.Start(nil, grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() @@ -132,6 +132,6 @@ func (s) TestStreamCleanupAfterSendStatus(t *testing.T) { case <-gracefulStopDone: timer.Stop() case <-timer.C: - t.Fatalf("s.GracefulStop() didn't finish without 1 second after the last RPC") + t.Fatalf("s.GracefulStop() didn't finish within 1 second after the last RPC") } } From c813c17a33d0832602245ddef55084907194552a Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Thu, 26 Jan 2023 14:50:21 -0800 Subject: [PATCH 760/998] Change version to 1.54.0-dev (#5985) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 39c349fcbcd4..1a3ff5289096 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.53.0-dev" +const Version = "1.54.0-dev" From 6a707eb1bb94d73e8a692961da19bb5c95b7b723 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 27 Jan 2023 17:06:29 -0500 Subject: [PATCH 761/998] client: add an option to disable global dial options (#5990) --- clientconn.go | 10 ++++-- default_dial_option_server_option_test.go | 39 +++++++++++++++++------ dialoptions.go | 7 ++-- internal/internal.go | 3 ++ server.go | 8 ++--- 5 files changed, 48 insertions(+), 19 deletions(-) diff --git a/clientconn.go b/clientconn.go index d607d4e9e243..e3919895e951 100644 --- a/clientconn.go +++ b/clientconn.go @@ -133,6 +133,10 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // https://github.com/grpc/grpc/blob/master/doc/naming.md. // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + return dialWithGlobalOptions(ctx, target, false, opts...) +} + +func dialWithGlobalOptions(ctx context.Context, target string, disableGlobalOptions bool, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, csMgr: &connectivityStateManager{}, @@ -146,8 +150,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - for _, opt := range extraDialOptions { - opt.apply(&cc.dopts) + if !disableGlobalOptions { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } } for _, opt := range opts { diff --git a/default_dial_option_server_option_test.go b/default_dial_option_server_option_test.go index c6cdd7c84838..b1501d2fb55d 100644 --- a/default_dial_option_server_option_test.go +++ b/default_dial_option_server_option_test.go @@ -19,6 +19,7 @@ package grpc import ( + "context" "strings" "testing" @@ -26,7 +27,7 @@ import ( "google.golang.org/grpc/internal" ) -func (s) TestAddExtraDialOptions(t *testing.T) { +func (s) TestAddGlobalDialOptions(t *testing.T) { // Ensure the Dial fails without credentials if _, err := Dial("fake"); err == nil { t.Fatalf("Dialing without a credential did not fail") @@ -40,8 +41,8 @@ func (s) TestAddExtraDialOptions(t *testing.T) { opts := []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithTransportCredentials(insecure.NewCredentials()), WithTransportCredentials(insecure.NewCredentials())} internal.AddGlobalDialOptions.(func(opt ...DialOption))(opts...) for i, opt := range opts { - if extraDialOptions[i] != opt { - t.Fatalf("Unexpected extra dial option at index %d: %v != %v", i, extraDialOptions[i], opt) + if globalDialOptions[i] != opt { + t.Fatalf("Unexpected global dial option at index %d: %v != %v", i, globalDialOptions[i], opt) } } @@ -53,19 +54,37 @@ func (s) TestAddExtraDialOptions(t *testing.T) { } internal.ClearGlobalDialOptions() - if len(extraDialOptions) != 0 { - t.Fatalf("Unexpected len of extraDialOptions: %d != 0", len(extraDialOptions)) + if len(globalDialOptions) != 0 { + t.Fatalf("Unexpected len of globalDialOptions: %d != 0", len(globalDialOptions)) } } -func (s) TestAddExtraServerOptions(t *testing.T) { +// TestDisableGlobalOptions tests dialing with a bit that disables global +// options. Dialing with this bit set should not pick up global options. +func (s) TestDisableGlobalOptions(t *testing.T) { + // Set transport credentials as a global option. + internal.AddGlobalDialOptions.(func(opt ...DialOption))(WithTransportCredentials(insecure.NewCredentials())) + // Dial with disable global options set to true. This Dial should fail due + // to the global dial options with credentials not being picked up due to it + // being disabled. + if _, err := internal.DialWithGlobalOptions.(func(context.Context, string, bool, ...DialOption) (*ClientConn, error))(context.Background(), "fake", true); err == nil { + t.Fatalf("Dialing without a credential did not fail") + } else { + if !strings.Contains(err.Error(), "no transport security set") { + t.Fatalf("Dialing failed with unexpected error: %v", err) + } + } + internal.ClearGlobalDialOptions() +} + +func (s) TestAddGlobalServerOptions(t *testing.T) { const maxRecvSize = 998765 // Set and check the ServerOptions opts := []ServerOption{Creds(insecure.NewCredentials()), MaxRecvMsgSize(maxRecvSize)} internal.AddGlobalServerOptions.(func(opt ...ServerOption))(opts...) for i, opt := range opts { - if extraServerOptions[i] != opt { - t.Fatalf("Unexpected extra server option at index %d: %v != %v", i, extraServerOptions[i], opt) + if globalServerOptions[i] != opt { + t.Fatalf("Unexpected global server option at index %d: %v != %v", i, globalServerOptions[i], opt) } } @@ -76,8 +95,8 @@ func (s) TestAddExtraServerOptions(t *testing.T) { } internal.ClearGlobalServerOptions() - if len(extraServerOptions) != 0 { - t.Fatalf("Unexpected len of extraServerOptions: %d != 0", len(extraServerOptions)) + if len(globalServerOptions) != 0 { + t.Fatalf("Unexpected len of globalServerOptions: %d != 0", len(globalServerOptions)) } } diff --git a/dialoptions.go b/dialoptions.go index 4866da101c60..67f2404961fd 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -38,11 +38,12 @@ import ( func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { - extraDialOptions = append(extraDialOptions, opt...) + globalDialOptions = append(globalDialOptions, opt...) } internal.ClearGlobalDialOptions = func() { - extraDialOptions = nil + globalDialOptions = nil } + internal.DialWithGlobalOptions = dialWithGlobalOptions internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption } @@ -83,7 +84,7 @@ type DialOption interface { apply(*dialOptions) } -var extraDialOptions []DialOption +var globalDialOptions []DialOption // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. diff --git a/internal/internal.go b/internal/internal.go index 0a76d9de6e02..cb5139a1980c 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -29,6 +29,9 @@ import ( ) var ( + // DialWithGlobalOptions dials with a knob on whether to disable global dial + // options (set via AddGlobalDialOptions). + DialWithGlobalOptions interface{} // func (context.Context, string, bool, ...DialOption) (*ClientConn, error) // WithHealthCheckFunc is set by dialoptions.go WithHealthCheckFunc interface{} // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking diff --git a/server.go b/server.go index d5a6e78be44d..0ebaaf5da143 100644 --- a/server.go +++ b/server.go @@ -74,10 +74,10 @@ func init() { srv.drainServerTransports(addr) } internal.AddGlobalServerOptions = func(opt ...ServerOption) { - extraServerOptions = append(extraServerOptions, opt...) + globalServerOptions = append(globalServerOptions, opt...) } internal.ClearGlobalServerOptions = func() { - extraServerOptions = nil + globalServerOptions = nil } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption @@ -183,7 +183,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } -var extraServerOptions []ServerOption +var globalServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -600,7 +600,7 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions - for _, o := range extraServerOptions { + for _, o := range globalServerOptions { o.apply(&opts) } for _, o := range opt { From d103fc70660a9efc804d9e1dc9971dc55557bace Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 31 Jan 2023 10:28:48 -0800 Subject: [PATCH 762/998] xdsclient/xdsresource: reduce chattiness of logs (#5993) --- xds/internal/server/listener_wrapper_test.go | 6 +- xds/internal/xdsclient/authority.go | 36 ++++++++-- .../xdsresource/cluster_resource_type.go | 2 +- .../xdsresource/endpoints_resource_type.go | 2 +- .../xdsclient/xdsresource/filter_chain.go | 17 ++--- .../xdsresource/filter_chain_test.go | 24 +++---- .../xdsresource/listener_resource_type.go | 2 +- xds/internal/xdsclient/xdsresource/logging.go | 28 ++++++++ .../xdsclient/xdsresource/resource_type.go | 10 ++- .../xdsresource/route_config_resource_type.go | 2 +- .../xdsclient/xdsresource/unmarshal.go | 68 ------------------- .../xdsclient/xdsresource/unmarshal_cds.go | 5 +- .../xdsresource/unmarshal_cds_test.go | 2 +- .../xdsclient/xdsresource/unmarshal_eds.go | 8 +-- .../xdsresource/unmarshal_eds_test.go | 4 +- .../xdsclient/xdsresource/unmarshal_lds.go | 21 +++--- .../xdsresource/unmarshal_lds_test.go | 4 +- .../xdsclient/xdsresource/unmarshal_rds.go | 29 ++++---- .../xdsresource/unmarshal_rds_test.go | 8 +-- xds/server_test.go | 4 +- 20 files changed, 128 insertions(+), 154 deletions(-) create mode 100644 xds/internal/xdsclient/xdsresource/logging.go delete mode 100644 xds/internal/xdsclient/xdsresource/unmarshal.go diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go index 2c1e8b75b8fe..7d246f6373eb 100644 --- a/xds/internal/server/listener_wrapper_test.go +++ b/xds/internal/server/listener_wrapper_test.go @@ -283,7 +283,7 @@ func (s) TestNewListenerWrapper(t *testing.T) { t.Fatalf("ready channel written to after receipt of a bad Listener update") } - fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains, nil) + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } @@ -347,7 +347,7 @@ func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { if name != testListenerResourceName { t.Fatalf("listenerWrapper registered a lds watch on %s, want %s", name, testListenerResourceName) } - fcm, err := xdsresource.NewFilterChainManager(listenerWithRouteConfiguration, nil) + fcm, err := xdsresource.NewFilterChainManager(listenerWithRouteConfiguration) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } @@ -410,7 +410,7 @@ func (s) TestListenerWrapper_Accept(t *testing.T) { // Push a good update with a filter chain which accepts local connections on // 192.168.0.0/16 subnet and port 80. - fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains, nil) + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 3d4f99e88d5c..8e53bd824d68 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -19,7 +19,9 @@ package xdsclient import ( "context" + "errors" "fmt" + "strings" "sync" "time" @@ -133,10 +135,7 @@ func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) } - opts := &xdsresource.DecodeOptions{ - BootstrapConfig: a.bootstrapCfg, - Logger: a.logger, - } + opts := &xdsresource.DecodeOptions{BootstrapConfig: a.bootstrapCfg} updates, md, err := decodeAllResources(opts, rType, resourceUpdate) a.updateResourceStateAndScheduleCallbacks(rType, updates, md) return err @@ -178,7 +177,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty } } // Sync cache. - a.logger.Debugf("Resource type %q with name %q, value %s added to cache", rType.TypeEnum().String(), name, uErr.resource.ToJSON()) + a.logger.Debugf("Resource type %q with name %q added to cache", rType.TypeEnum().String(), name) state.cache = uErr.resource // Set status to ACK, and clear error state. The metadata might be a // NACK metadata because some other resources in the same response @@ -286,7 +285,7 @@ func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, typeStr := rType.TypeEnum().String() md.Status = xdsresource.ServiceStatusNACKed - errRet := xdsresource.CombineErrors(typeStr, topLevelErrors, perResourceErrors) + errRet := combineErrors(typeStr, topLevelErrors, perResourceErrors) md.ErrState = &xdsresource.UpdateErrorMetadata{ Version: update.Version, Err: errRet, @@ -454,3 +453,28 @@ func (a *authority) dumpResources() map[string]map[string]xdsresource.UpdateWith } return dump } + +func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { + var errStrB strings.Builder + errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) + if len(topLevelErrors) > 0 { + errStrB.WriteString("top level errors: ") + for i, err := range topLevelErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + errStrB.WriteString(err.Error()) + } + } + if len(perResourceErrors) > 0 { + var i int + for name, err := range perResourceErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + i++ + errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) + } + } + return errors.New(errStrB.String()) +} diff --git a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index 87e6dbd1194a..433cfcacb69e 100644 --- a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -49,7 +49,7 @@ type clusterResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (clusterResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, cluster, err := unmarshalClusterResource(resource, opts.Logger) + name, cluster, err := unmarshalClusterResource(resource) switch { case name == "": // Name is unset only when protobuf deserialization fails. diff --git a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index dc1c09da08f3..b778a4a5340c 100644 --- a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -49,7 +49,7 @@ type endpointsResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (endpointsResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, rc, err := unmarshalEndpointsResource(resource, opts.Logger) + name, rc, err := unmarshalEndpointsResource(resource) switch { case name == "": // Name is unset only when protobuf deserialization fails. diff --git a/xds/internal/xdsclient/xdsresource/filter_chain.go b/xds/internal/xdsclient/xdsresource/filter_chain.go index 20cd40879555..f748cbc8ce29 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -28,7 +28,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" @@ -177,7 +176,6 @@ const ( // 7. Source IP address. // 8. Source port. type FilterChainManager struct { - logger *grpclog.PrefixLogger // Destination prefix is the first match criteria that we support. // Therefore, this multi-stage map is indexed on destination prefixes // specified in the match criteria. @@ -248,10 +246,9 @@ type sourcePrefixEntry struct { // // This function is only exported so that tests outside of this package can // create a FilterChainManager. -func NewFilterChainManager(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger) (*FilterChainManager, error) { +func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { // Parse all the filter chains and build the internal data structures. fci := &FilterChainManager{ - logger: logger, dstPrefixMap: make(map[string]*destPrefixEntry), RouteConfigNames: make(map[string]bool), } @@ -305,7 +302,7 @@ func (fci *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) if fcm.GetDestinationPort().GetValue() != 0 { // Destination port is the first match criteria and we do not // support filter chains which contains this match criteria. - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) continue } @@ -354,7 +351,7 @@ func (fci *FilterChainManager) addFilterChainsForServerNames(dstEntry *destPrefi // Filter chains specifying server names in their match criteria always fail // a match at connection time. So, these filter chains can be dropped now. if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) return nil } @@ -367,13 +364,13 @@ func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *de case tp != "" && tp != "raw_buffer": // Only allow filter chains with transport protocol set to empty string // or "raw_buffer". - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) return nil case tp == "" && dstEntry.rawBufferSeen: // If we have already seen filter chains with transport protocol set to // "raw_buffer", we can drop filter chains with transport protocol set // to empty string, since the former takes precedence. - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) return nil case tp != "" && !dstEntry.rawBufferSeen: // This is the first "raw_buffer" that we are seeing. Set the bit and @@ -387,7 +384,7 @@ func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *de func (fci *FilterChainManager) addFilterChainsForApplicationProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { - fci.logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) + logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) return nil } return fci.addFilterChainsForSourceType(dstEntry, fc) @@ -652,7 +649,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) // server-side." - A36 // Can specify v3 here, as will never get to this function // if v2. - routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig(), nil, false) + routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig(), false) if err != nil { return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) } diff --git a/xds/internal/xdsclient/xdsresource/filter_chain_test.go b/xds/internal/xdsclient/xdsresource/filter_chain_test.go index c141619c5a5a..4edf9ce006f1 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain_test.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain_test.go @@ -187,7 +187,7 @@ func (s) TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - if fci, err := NewFilterChainManager(test.lis, nil); err == nil { + if fci, err := NewFilterChainManager(test.lis); err == nil { t.Fatalf("NewFilterChainManager() returned %v when expected to fail", fci) } }) @@ -286,7 +286,7 @@ func (s) TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { const wantErr = "multiple filter chains with overlapping matching rules are defined" for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - if _, err := NewFilterChainManager(test.lis, nil); err == nil || !strings.Contains(err.Error(), wantErr) { + if _, err := NewFilterChainManager(test.lis); err == nil || !strings.Contains(err.Error(), wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, wantErr) } }) @@ -508,7 +508,7 @@ func (s) TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis, nil) + _, err := NewFilterChainManager(test.lis) if err == nil || !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) } @@ -745,7 +745,7 @@ func (s) TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis, nil) + gotFC, err := NewFilterChainManager(test.lis) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -886,7 +886,7 @@ func (s) TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis, nil) + _, err := NewFilterChainManager(test.lis) if err == nil || !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) } @@ -960,7 +960,7 @@ func (s) TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - _, err := NewFilterChainManager(test.lis, nil) + _, err := NewFilterChainManager(test.lis) if err == nil || !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) } @@ -1280,7 +1280,7 @@ func (s) TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis, nil) + gotFC, err := NewFilterChainManager(test.lis) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -1509,7 +1509,7 @@ func (s) TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis, nil) + gotFC, err := NewFilterChainManager(test.lis) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -1682,7 +1682,7 @@ func (s) TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis, nil) + gotFC, err := NewFilterChainManager(test.lis) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -2183,7 +2183,7 @@ func (s) TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - gotFC, err := NewFilterChainManager(test.lis, nil) + gotFC, err := NewFilterChainManager(test.lis) if err != nil { t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) } @@ -2334,7 +2334,7 @@ func (s) TestLookup_Failures(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - fci, err := NewFilterChainManager(test.lis, nil) + fci, err := NewFilterChainManager(test.lis) if err != nil { t.Fatalf("NewFilterChainManager() failed: %v", err) } @@ -2568,7 +2568,7 @@ func (s) TestLookup_Successes(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - fci, err := NewFilterChainManager(test.lis, nil) + fci, err := NewFilterChainManager(test.lis) if err != nil { t.Fatalf("NewFilterChainManager() failed: %v", err) } diff --git a/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 6b2fff9f6f0c..81ba72fd0a23 100644 --- a/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -81,7 +81,7 @@ func listenerValidator(bc *bootstrap.Config, lis ListenerUpdate) error { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (listenerResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, listener, err := unmarshalListenerResource(resource, opts.Logger) + name, listener, err := unmarshalListenerResource(resource) switch { case name == "": // Name is unset only when protobuf deserialization fails. diff --git a/xds/internal/xdsclient/xdsresource/logging.go b/xds/internal/xdsclient/xdsresource/logging.go new file mode 100644 index 000000000000..62bcb016ba25 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/logging.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsresource + +import ( + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-resource] " + +var logger = internalgrpclog.NewPrefixLogger(grpclog.Component("xds"), prefix) diff --git a/xds/internal/xdsclient/xdsresource/resource_type.go b/xds/internal/xdsclient/xdsresource/resource_type.go index 6fced7784d02..5a1714660bf7 100644 --- a/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/xds/internal/xdsclient/xdsresource/resource_type.go @@ -15,10 +15,16 @@ * limitations under the License. */ +// Package xdsresource implements the xDS data model layer. +// +// Provides resource-type specific functionality to unmarshal xDS protos into +// internal data structures that contain only fields gRPC is interested in. +// These internal data structures are passed to components in the xDS stack +// (resolver/balancers/server) that have expressed interest in receiving +// updates to specific resources. package xdsresource import ( - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/types/known/anypb" ) @@ -115,8 +121,6 @@ type DecodeOptions struct { // BootstrapConfig contains the bootstrap configuration passed to the // top-level xdsClient. This contains useful data for resource validation. BootstrapConfig *bootstrap.Config - // Logger is to be used for emitting logs during the Decode operation. - Logger *grpclog.PrefixLogger } // DecodeResult is the result of a decode operation. diff --git a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index 31be4d6aebd0..70bbbf387593 100644 --- a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -49,7 +49,7 @@ type routeConfigResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. func (routeConfigResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { - name, rc, err := unmarshalRouteConfigResource(resource, opts.Logger) + name, rc, err := unmarshalRouteConfigResource(resource) switch { case name == "": // Name is unset only when protobuf deserialization fails. diff --git a/xds/internal/xdsclient/xdsresource/unmarshal.go b/xds/internal/xdsclient/xdsresource/unmarshal.go deleted file mode 100644 index 28ae41e43a91..000000000000 --- a/xds/internal/xdsclient/xdsresource/unmarshal.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package xdsresource contains functions to proto xds updates (unmarshal from -// proto), and types for the resource updates. -package xdsresource - -import ( - "errors" - "fmt" - "strings" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/protobuf/types/known/anypb" -) - -// UnmarshalOptions wraps the input parameters for `UnmarshalXxx` functions. -type UnmarshalOptions struct { - // Version is the version of the received response. - Version string - // Resources are the xDS resources resources in the received response. - Resources []*anypb.Any - // Logger is the prefix logger to be used during unmarshaling. - Logger *grpclog.PrefixLogger - // UpdateValidator is a post unmarshal validation check provided by the - // upper layer. - UpdateValidator UpdateValidatorFunc -} - -// CombineErrors TBD. -func CombineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { - var errStrB strings.Builder - errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) - if len(topLevelErrors) > 0 { - errStrB.WriteString("top level errors: ") - for i, err := range topLevelErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - errStrB.WriteString(err.Error()) - } - } - if len(perResourceErrors) > 0 { - var i int - for name, err := range perResourceErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - i++ - errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) - } - } - return errors.New(errStrB.String()) -} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index f04939182b94..49adefc4561c 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -30,8 +30,6 @@ import ( v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" @@ -41,7 +39,7 @@ import ( // to this value by the management server. const transportSocketName = "envoy.transport_sockets.tls" -func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { +func unmarshalClusterResource(r *anypb.Any) (string, ClusterUpdate, error) { r, err := unwrapResource(r) if err != nil { return "", ClusterUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) @@ -55,7 +53,6 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, pretty.ToJSON(cluster)) cu, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { return cluster.GetName(), ClusterUpdate{}, err diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 3705c02bedf4..74af95e627fb 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -1549,7 +1549,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - name, update, err := unmarshalClusterResource(test.resource, nil) + name, update, err := unmarshalClusterResource(test.resource) if (err != nil) != test.wantErr { t.Fatalf("unmarshalClusterResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index a1809a62fc9a..ecb40f38fa88 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -27,13 +27,12 @@ import ( v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" - "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal" "google.golang.org/protobuf/types/known/anypb" ) -func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { +func unmarshalEndpointsResource(r *anypb.Any) (string, EndpointsUpdate, error) { r, err := unwrapResource(r) if err != nil { return "", EndpointsUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) @@ -47,9 +46,8 @@ func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (str if err := proto.Unmarshal(r.GetValue(), cla); err != nil { return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) - u, err := parseEDSRespProto(cla, logger) + u, err := parseEDSRespProto(cla) if err != nil { return cla.GetClusterName(), EndpointsUpdate{}, err } @@ -109,7 +107,7 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs return endpoints, nil } -func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.PrefixLogger) (EndpointsUpdate, error) { +func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { ret := EndpointsUpdate{} for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go index 3fd3f417e943..cfb452b26fad 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -208,7 +208,7 @@ func (s) TestEDSParseRespProto(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := parseEDSRespProto(tt.m, nil) + got, err := parseEDSRespProto(tt.m) if (err != nil) != tt.wantErr { t.Errorf("parseEDSRespProto() error = %v, wantErr %v", err, tt.wantErr) return @@ -330,7 +330,7 @@ func (s) TestUnmarshalEndpoints(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - name, update, err := unmarshalEndpointsResource(test.resource, nil) + name, update, err := unmarshalEndpointsResource(test.resource) if (err != nil) != test.wantErr { t.Fatalf("unmarshalEndpointsResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index 6b273e82f956..e1b49c217873 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -29,14 +29,12 @@ import ( v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) -func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { +func unmarshalListenerResource(r *anypb.Any) (string, ListenerUpdate, error) { r, err := unwrapResource(r) if err != nil { return "", ListenerUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) @@ -51,9 +49,8 @@ func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (stri if err := proto.Unmarshal(r.GetValue(), lis); err != nil { return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, pretty.ToJSON(lis)) - lu, err := processListener(lis, logger, v2) + lu, err := processListener(lis, v2) if err != nil { return lis.GetName(), ListenerUpdate{}, err } @@ -61,16 +58,16 @@ func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (stri return lis.GetName(), *lu, nil } -func processListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { +func processListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUpdate, error) { if lis.GetApiListener() != nil { - return processClientSideListener(lis, logger, v2) + return processClientSideListener(lis, v2) } - return processServerSideListener(lis, logger) + return processServerSideListener(lis) } // processClientSideListener checks if the provided Listener proto meets // the expected criteria. If so, it returns a non-empty routeConfigName. -func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger, v2 bool) (*ListenerUpdate, error) { +func processClientSideListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUpdate, error) { update := &ListenerUpdate{} apiLisAny := lis.GetApiListener().GetApiListener() @@ -102,7 +99,7 @@ func processClientSideListener(lis *v3listenerpb.Listener, logger *grpclog.Prefi } update.RouteConfigName = name case *v3httppb.HttpConnectionManager_RouteConfig: - routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), logger, v2) + routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), v2) if err != nil { return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) } @@ -257,7 +254,7 @@ func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilt return ret, nil } -func processServerSideListener(lis *v3listenerpb.Listener, logger *grpclog.PrefixLogger) (*ListenerUpdate, error) { +func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { if n := len(lis.ListenerFilters); n != 0 { return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) } @@ -279,7 +276,7 @@ func processServerSideListener(lis *v3listenerpb.Listener, logger *grpclog.Prefi }, } - fcMgr, err := NewFilterChainManager(lis, logger) + fcMgr, err := NewFilterChainManager(lis) if err != nil { return nil, err } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index 62089bc31b8f..f753bd00a8cc 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -616,7 +616,7 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - name, update, err := unmarshalListenerResource(test.resource, nil) + name, update, err := unmarshalListenerResource(test.resource) if (err != nil) != test.wantErr { t.Errorf("unmarshalListenerResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) } @@ -1744,7 +1744,7 @@ func (s) TestUnmarshalListener_ServerSide(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - name, update, err := unmarshalListenerResource(test.resource, nil) + name, update, err := unmarshalListenerResource(test.resource) if err != nil && !strings.Contains(err.Error(), test.wantErr) { t.Errorf("unmarshalListenerResource(%s) = %v wantErr: %q", pretty.ToJSON(test.resource), err, test.wantErr) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 819e47d32fce..edcbeaa2454b 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -28,14 +28,12 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) -func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { +func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, error) { r, err := unwrapResource(r) if err != nil { return "", RouteConfigUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) @@ -48,11 +46,10 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s if err := proto.Unmarshal(r.GetValue(), rc); err != nil { return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, pretty.ToJSON(rc)) // TODO: Pass version.TransportAPI instead of relying upon the type URL v2 := r.GetTypeUrl() == version.V2RouteConfigURL - u, err := generateRDSUpdateFromRouteConfiguration(rc, logger, v2) + u, err := generateRDSUpdateFromRouteConfiguration(rc, v2) if err != nil { return rc.GetName(), RouteConfigUpdate{}, err } @@ -76,7 +73,7 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s // field must be empty and whose route field must be set. Inside that route // message, the cluster field will contain the clusterName or weighted clusters // we are looking for. -func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { +func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, v2 bool) (RouteConfigUpdate, error) { vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) csps := make(map[string]clusterspecifier.BalancerConfig) if envconfig.XDSRLS { @@ -91,7 +88,7 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l // ignored and not emitted by the xdsclient. var cspNames = make(map[string]bool) for _, vh := range rc.GetVirtualHosts() { - routes, cspNs, err := routesProtoToSlice(vh.Routes, csps, logger, v2) + routes, cspNs, err := routesProtoToSlice(vh.Routes, csps, v2) if err != nil { return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) } @@ -216,7 +213,7 @@ func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { return cfg, nil } -func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, map[string]bool, error) { +func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig, v2 bool) ([]*Route, map[string]bool, error) { var routesRet []*Route var cspNames = make(map[string]bool) for _, r := range routes { @@ -227,7 +224,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif if len(match.GetQueryParameters()) != 0 { // Ignore route with query parameters. - logger.Warningf("route %+v has query parameter matchers, the route will be ignored", r) + logger.Warningf("Ignoring route %+v with query parameter matchers", r) continue } @@ -309,7 +306,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif // Hash Policies are only applicable for a Ring Hash LB. if envconfig.XDSRingHash { - hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) + hp, err := hashPoliciesProtoToSlice(action.HashPolicy) if err != nil { return nil, nil, err } @@ -365,7 +362,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif // it as if it we didn't know about the cluster_specifier_plugin // at all. if !envconfig.XDSRLS { - logger.Infof("route %+v contains route_action with unsupported field: cluster_specifier_plugin, the route will be ignored", r) + logger.Warningf("Ignoring route %+v with unsupported route_action field: cluster_specifier_plugin", r) continue } if _, ok := csps[a.ClusterSpecifierPlugin]; !ok { @@ -376,13 +373,13 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif return nil, nil, fmt.Errorf("route %+v, action %+v, specifies a cluster specifier plugin %+v that is not in Route Configuration", r, a, a.ClusterSpecifierPlugin) } if csps[a.ClusterSpecifierPlugin] == nil { - logger.Infof("route %+v references optional and unsupported cluster specifier plugin %v, the route will be ignored", r, a.ClusterSpecifierPlugin) + logger.Warningf("Ignoring route %+v with optional and unsupported cluster specifier plugin %+v", r, a.ClusterSpecifierPlugin) continue } cspNames[a.ClusterSpecifierPlugin] = true route.ClusterSpecifierPlugin = a.ClusterSpecifierPlugin default: - logger.Infof("route %+v references unknown ClusterSpecifier %+v, the route will be ignored", r, a) + logger.Warningf("Ignoring route %+v with unknown ClusterSpecifier %+v", r, a) continue } @@ -424,7 +421,7 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif return routesRet, cspNames, nil } -func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logger *grpclog.PrefixLogger) ([]*HashPolicy, error) { +func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy) ([]*HashPolicy, error) { var hashPoliciesRet []*HashPolicy for _, p := range policies { policy := HashPolicy{Terminal: p.Terminal} @@ -443,12 +440,12 @@ func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logg } case *v3routepb.RouteAction_HashPolicy_FilterState_: if p.GetFilterState().GetKey() != "io.grpc.channel_id" { - logger.Infof("hash policy %+v contains an invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) + logger.Warningf("Ignoring hash policy %+v with invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) continue } policy.HashPolicyType = HashPolicyTypeChannelID default: - logger.Infof("hash policy %T is an unsupported hash policy", p.GetPolicySpecifier()) + logger.Warningf("Ignoring unsupported hash policy %T", p.GetPolicySpecifier()) continue } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index ea308825c094..a74ce555a681 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -778,7 +778,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { envconfig.XDSRLS = test.rlsEnabled - gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc, nil, false) + gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc, false) if (gotError != nil) != test.wantError || !cmp.Equal(gotUpdate, test.wantUpdate, cmpopts.EquateEmpty(), cmp.Transformer("FilterConfig", func(fc httpfilter.FilterConfig) string { @@ -1020,7 +1020,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - name, update, err := unmarshalRouteConfigResource(test.resource, nil) + name, update, err := unmarshalRouteConfigResource(test.resource) if (err != nil) != test.wantErr { t.Errorf("unmarshalRouteConfigResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) } @@ -1608,7 +1608,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { defer func() { envconfig.XDSRingHash = oldRingHashSupport }() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, _, err := routesProtoToSlice(tt.routes, nil, nil, false) + got, _, err := routesProtoToSlice(tt.routes, nil, false) if (err != nil) != tt.wantErr { t.Fatalf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) } @@ -1721,7 +1721,7 @@ func (s) TestHashPoliciesProtoToSlice(t *testing.T) { defer func() { envconfig.XDSRingHash = oldRingHashSupport }() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := hashPoliciesProtoToSlice(tt.hashPolicies, nil) + got, err := hashPoliciesProtoToSlice(tt.hashPolicies) if (err != nil) != tt.wantErr { t.Fatalf("hashPoliciesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/xds/server_test.go b/xds/server_test.go index 68cb3dc307a6..a0983a6508ea 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -448,7 +448,7 @@ func (s) TestServeSuccess(t *testing.T) { // Push a good LDS response, and wait for Serve() to be invoked on the // underlying grpc.Server. - fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains, nil) + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } @@ -801,7 +801,7 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { }, }, }, - }, nil) + }) if err != nil { t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) } From 37111547ca4ed0b23e2e06f6e35006e361a819c6 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 31 Jan 2023 10:29:11 -0800 Subject: [PATCH 763/998] xdsclient/bootstrap: reduce chattiness of logs (#5991) --- xds/internal/xdsclient/bootstrap/bootstrap.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 23eff2d63907..989ec4999748 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -317,7 +317,7 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { // // Note that even if the content is invalid, we don't failover to the // file content env variable. - logger.Debugf("xds: using bootstrap file with name %q", fName) + logger.Debugf("Using bootstrap file with name %q", fName) return bootstrapFileReadFunc(fName) } @@ -349,7 +349,6 @@ func NewConfig() (*Config, error) { if err != nil { return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) } - logger.Debugf("Bootstrap content: %s", data) return newConfigFromContents(data) } @@ -425,7 +424,7 @@ func newConfigFromContents(data []byte) (*Config, error) { } case "client_default_listener_resource_name_template": if !envconfig.XDSFederation { - logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k) + logger.Warningf("Bootstrap field %v is not support when Federation is disabled", k) continue } if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil { @@ -433,7 +432,7 @@ func newConfigFromContents(data []byte) (*Config, error) { } case "authorities": if !envconfig.XDSFederation { - logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k) + logger.Warningf("Bootstrap field %v is not support when Federation is disabled", k) continue } if err := json.Unmarshal(v, &config.Authorities); err != nil { @@ -477,7 +476,7 @@ func newConfigFromContents(data []byte) (*Config, error) { if err := config.updateNodeProto(node); err != nil { return nil, err } - logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) + logger.Debugf("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) return config, nil } From a7058f7b72f9c1452b6e7cf8bb9afd03997017f5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 31 Jan 2023 10:36:41 -0800 Subject: [PATCH 764/998] xds/csds: switch tests to use the new generic xdsclient API (#6000) --- xds/csds/csds_e2e_test.go | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/xds/csds/csds_e2e_test.go b/xds/csds/csds_e2e_test.go index d83530a157bc..ba3e58399f60 100644 --- a/xds/csds/csds_e2e_test.go +++ b/xds/csds/csds_e2e_test.go @@ -86,6 +86,34 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } +// The following watcher implementations are no-ops since we don't really care +// about the callback received by these watchers in the test. We only care +// whether CSDS reports the expected state. + +type unimplementedListenerWatcher struct{} + +func (unimplementedListenerWatcher) OnUpdate(*xdsresource.ListenerResourceData) {} +func (unimplementedListenerWatcher) OnError(error) {} +func (unimplementedListenerWatcher) OnResourceDoesNotExist() {} + +type unimplementedRouteConfigWatcher struct{} + +func (unimplementedRouteConfigWatcher) OnUpdate(*xdsresource.RouteConfigResourceData) {} +func (unimplementedRouteConfigWatcher) OnError(error) {} +func (unimplementedRouteConfigWatcher) OnResourceDoesNotExist() {} + +type unimplementedClusterWatcher struct{} + +func (unimplementedClusterWatcher) OnUpdate(*xdsresource.ClusterResourceData) {} +func (unimplementedClusterWatcher) OnError(error) {} +func (unimplementedClusterWatcher) OnResourceDoesNotExist() {} + +type unimplementedEndpointsWatcher struct{} + +func (unimplementedEndpointsWatcher) OnUpdate(*xdsresource.EndpointsResourceData) {} +func (unimplementedEndpointsWatcher) OnError(error) {} +func (unimplementedEndpointsWatcher) OnResourceDoesNotExist() {} + func (s) TestCSDS(t *testing.T) { // Spin up a xDS management server on a local port. nodeID := uuid.New().String() @@ -190,16 +218,16 @@ func (s) TestCSDS(t *testing.T) { // Register watches on the xDS client for two resources of each type. for _, target := range ldsTargets { - xdsC.WatchListener(target, func(xdsresource.ListenerUpdate, error) {}) + xdsresource.WatchListener(xdsC, target, unimplementedListenerWatcher{}) } for _, target := range rdsTargets { - xdsC.WatchRouteConfig(target, func(xdsresource.RouteConfigUpdate, error) {}) + xdsresource.WatchRouteConfig(xdsC, target, unimplementedRouteConfigWatcher{}) } for _, target := range cdsTargets { - xdsC.WatchCluster(target, func(xdsresource.ClusterUpdate, error) {}) + xdsresource.WatchCluster(xdsC, target, unimplementedClusterWatcher{}) } for _, target := range edsTargets { - xdsC.WatchEndpoints(target, func(xdsresource.EndpointsUpdate, error) {}) + xdsresource.WatchEndpoints(xdsC, target, unimplementedEndpointsWatcher{}) } // Verify that the xDS client reports the resources as being in "Requested" From 095409727694aeb107ce0825bca3d3a6ed98acbf Mon Sep 17 00:00:00 2001 From: Ronak Jain Date: Wed, 1 Feb 2023 02:57:34 +0530 Subject: [PATCH 765/998] server: expose API to set send compressor (#5744) Fixes https://github.com/grpc/grpc-go/issues/5792 --- internal/transport/handler_server_test.go | 28 +- internal/transport/http2_server.go | 11 + internal/transport/transport.go | 23 +- server.go | 100 ++++++- stream.go | 9 + test/end2end_test.go | 335 ++++++++++++++++++++++ 6 files changed, 489 insertions(+), 17 deletions(-) diff --git a/internal/transport/handler_server_test.go b/internal/transport/handler_server_test.go index fbd8058b79fb..a6eb20285787 100644 --- a/internal/transport/handler_server_test.go +++ b/internal/transport/handler_server_test.go @@ -280,31 +280,36 @@ func (s) TestHandlerTransport_HandleStreams(t *testing.T) { t.Errorf("stream method = %q; want %q", s.method, want) } - err := s.SetHeader(metadata.Pairs("custom-header", "Custom header value")) - if err != nil { + if err := s.SetHeader(metadata.Pairs("custom-header", "Custom header value")); err != nil { t.Error(err) } - err = s.SetTrailer(metadata.Pairs("custom-trailer", "Custom trailer value")) - if err != nil { + + if err := s.SetTrailer(metadata.Pairs("custom-trailer", "Custom trailer value")); err != nil { + t.Error(err) + } + + if err := s.SetSendCompress("gzip"); err != nil { t.Error(err) } md := metadata.Pairs("custom-header", "Another custom header value") - err = s.SendHeader(md) - delete(md, "custom-header") - if err != nil { + if err := s.SendHeader(md); err != nil { t.Error(err) } + delete(md, "custom-header") - err = s.SetHeader(metadata.Pairs("too-late", "Header value that should be ignored")) - if err == nil { + if err := s.SetHeader(metadata.Pairs("too-late", "Header value that should be ignored")); err == nil { t.Error("expected SetHeader call after SendHeader to fail") } - err = s.SendHeader(metadata.Pairs("too-late", "This header value should be ignored as well")) - if err == nil { + + if err := s.SendHeader(metadata.Pairs("too-late", "This header value should be ignored as well")); err == nil { t.Error("expected second SendHeader call to fail") } + if err := s.SetSendCompress("snappy"); err == nil { + t.Error("expected second SetSendCompress call to fail") + } + st.bodyw.Close() // no body st.ht.WriteStatus(s, status.New(codes.OK, "")) } @@ -317,6 +322,7 @@ func (s) TestHandlerTransport_HandleStreams(t *testing.T) { "Content-Type": {"application/grpc"}, "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, "Custom-Header": {"Custom header value", "Another custom header value"}, + "Grpc-Encoding": {"gzip"}, } wantTrailer := http.Header{ "Grpc-Status": {"0"}, diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index bc3da706726d..7dee882bf663 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -404,6 +404,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( mdata[hf.Name] = append(mdata[hf.Name], hf.Value) s.contentSubtype = contentSubtype isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors case "grpc-encoding": s.recvCompress = hf.Value case ":method": diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 0ac77ea4f8c7..1b7d7fabc512 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -257,6 +257,9 @@ type Stream struct { fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -345,8 +348,24 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors } // Done returns a channel which is closed when it receives the final status diff --git a/server.go b/server.go index 0ebaaf5da143..fa7f8b580be6 100644 --- a/server.go +++ b/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -1263,6 +1264,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor + var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1283,12 +1285,18 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - stream.SetSendCompress(cp.Type()) + sendCompressorName = cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - stream.SetSendCompress(rc) + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1375,6 +1383,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). @@ -1597,12 +1610,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) + ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - stream.SetSendCompress(rc) + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1935,6 +1954,60 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. // @@ -1969,3 +2042,22 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} diff --git a/stream.go b/stream.go index 93231af2ac56..89936a4f1665 100644 --- a/stream.go +++ b/stream.go @@ -1511,6 +1511,8 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor + sendCompressorName string + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1603,6 +1605,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { diff --git a/test/end2end_test.go b/test/end2end_test.go index 0f5cbc345774..d3c339ccb8d9 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -59,6 +59,7 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/stubserver" @@ -5080,6 +5081,340 @@ func (s) TestClientForwardsGrpcAcceptEncodingHeader(t *testing.T) { } } +// wrapCompressor is a wrapper of encoding.Compressor which maintains count of +// Compressor method invokes. +type wrapCompressor struct { + encoding.Compressor + compressInvokes int32 +} + +func (wc *wrapCompressor) Compress(w io.Writer) (io.WriteCloser, error) { + atomic.AddInt32(&wc.compressInvokes, 1) + return wc.Compressor.Compress(w) +} + +func setupGzipWrapCompressor(t *testing.T) *wrapCompressor { + oldC := encoding.GetCompressor("gzip") + c := &wrapCompressor{Compressor: oldC} + encoding.RegisterCompressor(c) + t.Cleanup(func() { + encoding.RegisterCompressor(oldC) + }) + return c +} + +func (s) TestSetSendCompressorSuccess(t *testing.T) { + for _, tt := range []struct { + name string + desc string + dialOpts []grpc.DialOption + resCompressor string + wantCompressInvokes int32 + }{ + { + name: "identity_request_and_gzip_response", + desc: "request is uncompressed and response is gzip compressed", + resCompressor: "gzip", + wantCompressInvokes: 1, + }, + { + name: "gzip_request_and_identity_response", + desc: "request is gzip compressed and response is uncompressed with identity", + resCompressor: "identity", + dialOpts: []grpc.DialOption{ + // Use WithCompressor instead of UseCompressor to avoid counting + // the client's compressor usage. + grpc.WithCompressor(grpc.NewGZIPCompressor()), + }, + wantCompressInvokes: 0, + }, + } { + t.Run(tt.name, func(t *testing.T) { + t.Run("unary", func(t *testing.T) { + testUnarySetSendCompressorSuccess(t, tt.resCompressor, tt.wantCompressInvokes, tt.dialOpts) + }) + + t.Run("stream", func(t *testing.T) { + testStreamSetSendCompressorSuccess(t, tt.resCompressor, tt.wantCompressInvokes, tt.dialOpts) + }) + }) + } +} + +func testUnarySetSendCompressorSuccess(t *testing.T, resCompressor string, wantCompressInvokes int32, dialOpts []grpc.DialOption) { + wc := setupGzipWrapCompressor(t) + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if err := grpc.SetSendCompressor(ctx, resCompressor); err != nil { + return nil, err + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil, dialOpts...); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("Unexpected unary call error, got: %v, want: nil", err) + } + + compressInvokes := atomic.LoadInt32(&wc.compressInvokes) + if compressInvokes != wantCompressInvokes { + t.Fatalf("Unexpected compress invokes, got:%d, want: %d", compressInvokes, wantCompressInvokes) + } +} + +func testStreamSetSendCompressorSuccess(t *testing.T, resCompressor string, wantCompressInvokes int32, dialOpts []grpc.DialOption) { + wc := setupGzipWrapCompressor(t) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + + if err := grpc.SetSendCompressor(stream.Context(), resCompressor); err != nil { + return err + } + + return stream.Send(&testpb.StreamingOutputCallResponse{}) + }, + } + if err := ss.Start(nil, dialOpts...); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) + } + + if err := s.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("Unexpected full duplex call send error, got: %v, want: nil", err) + } + + if _, err := s.Recv(); err != nil { + t.Fatalf("Unexpected full duplex recv error, got: %v, want: nil", err) + } + + compressInvokes := atomic.LoadInt32(&wc.compressInvokes) + if compressInvokes != wantCompressInvokes { + t.Fatalf("Unexpected compress invokes, got:%d, want: %d", compressInvokes, wantCompressInvokes) + } +} + +func (s) TestUnregisteredSetSendCompressorFailure(t *testing.T) { + resCompressor := "snappy2" + wantErr := status.Error(codes.Unknown, "unable to set send compressor: compressor not registered \"snappy2\"") + + t.Run("unary", func(t *testing.T) { + testUnarySetSendCompressorFailure(t, resCompressor, wantErr) + }) + + t.Run("stream", func(t *testing.T) { + testStreamSetSendCompressorFailure(t, resCompressor, wantErr) + }) +} + +func (s) TestUnadvertisedSetSendCompressorFailure(t *testing.T) { + // Disable client compressor advertisement. + defer func(b bool) { envconfig.AdvertiseCompressors = b }(envconfig.AdvertiseCompressors) + envconfig.AdvertiseCompressors = false + + resCompressor := "gzip" + wantErr := status.Error(codes.Unknown, "unable to set send compressor: client does not support compressor \"gzip\"") + + t.Run("unary", func(t *testing.T) { + testUnarySetSendCompressorFailure(t, resCompressor, wantErr) + }) + + t.Run("stream", func(t *testing.T) { + testStreamSetSendCompressorFailure(t, resCompressor, wantErr) + }) +} + +func testUnarySetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if err := grpc.SetSendCompressor(ctx, resCompressor); err != nil { + return nil, err + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !equalError(err, wantErr) { + t.Fatalf("Unexpected unary call error, got: %v, want: %v", err, wantErr) + } +} + +func testStreamSetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + + if err := grpc.SetSendCompressor(stream.Context(), resCompressor); err != nil { + return err + } + + return stream.Send(&testpb.StreamingOutputCallResponse{}) + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v, want: nil", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) + } + + if err := s.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("Unexpected full duplex call send error, got: %v, want: nil", err) + } + + if _, err := s.Recv(); !equalError(err, wantErr) { + t.Fatalf("Unexpected full duplex recv error, got: %v, want: nil", err) + } +} + +func (s) TestUnarySetSendCompressorAfterHeaderSendFailure(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + // Send headers early and then set send compressor. + grpc.SendHeader(ctx, metadata.MD{}) + err := grpc.SetSendCompressor(ctx, "gzip") + if err == nil { + t.Error("Wanted set send compressor error") + return &testpb.Empty{}, nil + } + return nil, err + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + wantErr := status.Error(codes.Unknown, "transport: set send compressor called after headers sent or stream done") + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !equalError(err, wantErr) { + t.Fatalf("Unexpected unary call error, got: %v, want: %v", err, wantErr) + } +} + +func (s) TestStreamSetSendCompressorAfterHeaderSendFailure(t *testing.T) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + // Send headers early and then set send compressor. + grpc.SendHeader(stream.Context(), metadata.MD{}) + err := grpc.SetSendCompressor(stream.Context(), "gzip") + if err == nil { + t.Error("Wanted set send compressor error") + } + return err + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + wantErr := status.Error(codes.Unknown, "transport: set send compressor called after headers sent or stream done") + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) + } + + if _, err := s.Recv(); !equalError(err, wantErr) { + t.Fatalf("Unexpected full duplex recv error, got: %v, want: %v", err, wantErr) + } +} + +func (s) TestClientSupportedCompressors(t *testing.T) { + for _, tt := range []struct { + desc string + ctx context.Context + want []string + }{ + { + desc: "No additional grpc-accept-encoding header", + ctx: context.Background(), + want: []string{"gzip"}, + }, + { + desc: "With additional grpc-accept-encoding header", + ctx: metadata.AppendToOutgoingContext(context.Background(), + "grpc-accept-encoding", "test-compressor-1", + "grpc-accept-encoding", "test-compressor-2", + ), + want: []string{"gzip", "test-compressor-1", "test-compressor-2"}, + }, + { + desc: "With additional empty grpc-accept-encoding header", + ctx: metadata.AppendToOutgoingContext(context.Background(), + "grpc-accept-encoding", "", + ), + want: []string{"gzip"}, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + got, err := grpc.ClientSupportedCompressors(ctx) + if err != nil { + return nil, err + } + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("unexpected client compressors got: %v, want: %v", got, tt.want) + } + + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v, want: nil", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(tt.ctx, defaultTestTimeout) + defer cancel() + + _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) + if err != nil { + t.Fatalf("Unexpected unary call error, got: %v, want: nil", err) + } + }) + } +} + func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) { const mdkey = "somedata" From d6dabba01f9365acd64808b28374614d6cd6143d Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 31 Jan 2023 14:57:29 -0800 Subject: [PATCH 766/998] xds/server: reduce chattiness of logs (#5995) --- xds/internal/server/listener_wrapper.go | 19 ++++--------------- xds/server.go | 7 +------ xds/xds.go | 4 ++-- 3 files changed, 7 insertions(+), 23 deletions(-) diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index c6ab885fcf90..9f5b2ecafe5f 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -66,10 +66,6 @@ type ServingModeCallback func(addr net.Addr, mode connectivity.ServingMode, err // connections. type DrainCallback func(addr net.Addr) -func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", p)) -} - // XDSClient wraps the methods on the XDSClient which are required by // the listenerWrapper. type XDSClient interface { @@ -117,7 +113,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru ldsUpdateCh: make(chan ldsUpdateWithError, 1), rdsUpdateCh: make(chan rdsHandlerUpdate, 1), } - lw.logger = prefixLogger(lw) + lw.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", lw)) // Serve() verifies that Addr() returns a valid TCPAddr. So, it is safe to // ignore the error from SplitHostPort(). @@ -125,13 +121,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru lw.addr, lw.port, _ = net.SplitHostPort(lisAddr) lw.rdsHandler = newRDSHandler(lw.xdsC, lw.rdsUpdateCh) - - cancelWatch := lw.xdsC.WatchListener(lw.name, lw.handleListenerUpdate) - lw.logger.Infof("Watch started on resource name %v", lw.name) - lw.cancelWatch = func() { - cancelWatch() - lw.logger.Infof("Watch cancelled on resource name %v", lw.name) - } + lw.cancelWatch = lw.xdsC.WatchListener(lw.name, lw.handleListenerUpdate) go lw.run() return lw, lw.goodUpdate.Done() } @@ -270,7 +260,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { // error, `grpc.Serve()` method sleeps for a small duration and // therefore ends up blocking all connection attempts during that // time frame, which is also not ideal for an error like this. - l.logger.Warningf("connection from %s to %s failed to find any matching filter chain", conn.RemoteAddr().String(), conn.LocalAddr().String()) + l.logger.Warningf("Connection from %s to %s failed to find any matching filter chain", conn.RemoteAddr().String(), conn.LocalAddr().String()) conn.Close() continue } @@ -302,7 +292,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { // tradeoff for simplicity. vhswi, err := fc.ConstructUsableRouteConfiguration(rc) if err != nil { - l.logger.Warningf("route configuration construction: %v", err) + l.logger.Warningf("Failed to construct usable route configuration: %v", err) conn.Close() continue } @@ -388,7 +378,6 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { // continue to use the old configuration. return } - l.logger.Infof("Received update for resource %q: %+v", l.name, update.update) // Make sure that the socket address on the received Listener resource // matches the address of the net.Listener passed to us by the user. This diff --git a/xds/server.go b/xds/server.go index f7003f6cd5c6..24abb65f253f 100644 --- a/xds/server.go +++ b/xds/server.go @@ -61,10 +61,6 @@ var ( logger = grpclog.Component("xds") ) -func prefixLogger(p *GRPCServer) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, p)) -} - // grpcServer contains methods from grpc.Server which are used by the // GRPCServer type here. This is useful for overriding in unit tests. type grpcServer interface { @@ -107,7 +103,7 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { gs: newGRPCServer(newOpts...), quit: grpcsync.NewEvent(), } - s.logger = prefixLogger(s) + s.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, s)) s.logger.Infof("Created xds.GRPCServer") s.handleServerOptions(opts) @@ -196,7 +192,6 @@ func (s *GRPCServer) initXDSClient() error { } s.xdsC = client s.xdsClientClose = close - s.logger.Infof("Created an xdsClient") return nil } diff --git a/xds/xds.go b/xds/xds.go index 706e11c49487..8e6def6014a7 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -56,14 +56,14 @@ func init() { case *GRPCServer: sss, ok := ss.gs.(*grpc.Server) if !ok { - logger.Warningf("grpc server within xds.GRPCServer is not *grpc.Server, CSDS will not be registered") + logger.Warning("grpc server within xds.GRPCServer is not *grpc.Server, CSDS will not be registered") return nil, nil } grpcServer = sss default: // Returning an error would cause the top level admin.Register() to // fail. Log a warning instead. - logger.Warningf("server to register service on is neither a *grpc.Server or a *xds.GRPCServer, CSDS will not be registered") + logger.Error("Server to register service on is neither a *grpc.Server or a *xds.GRPCServer, CSDS will not be registered") return nil, nil } From 3151e834fa2536844f801bb132c684fbb10364fd Mon Sep 17 00:00:00 2001 From: Borja Lazaro Toralles Date: Wed, 1 Feb 2023 21:20:35 +0000 Subject: [PATCH 767/998] cmd/protoc-gen-go-grpc: export consts for full method names (#5886) --- .../grpc_lb_v1/load_balancer_grpc.pb.go | 6 +- channelz/grpc_channelz_v1/channelz_grpc.pb.go | 38 +++++--- cmd/protoc-gen-go-grpc/grpc.go | 33 +++++-- .../proto/grpc_gcp/handshaker_grpc.pb.go | 6 +- examples/features/proto/echo/echo_grpc.pb.go | 17 ++-- .../helloworld/helloworld_grpc.pb.go | 8 +- .../routeguide/route_guide_grpc.pb.go | 17 ++-- health/grpc_health_v1/health_grpc.pb.go | 11 ++- internal/proto/grpc_lookup_v1/rls_grpc.pb.go | 8 +- .../grpc_testing/benchmark_service_grpc.pb.go | 20 +++-- .../report_qps_scenario_service_grpc.pb.go | 8 +- interop/grpc_testing/test_grpc.pb.go | 90 +++++++++++++------ .../grpc_testing/worker_service_grpc.pb.go | 19 ++-- profiling/proto/service_grpc.pb.go | 13 ++- .../grpc_reflection_v1/reflection_grpc.pb.go | 6 +- .../reflection_grpc.pb.go | 6 +- reflection/grpc_testing/test_grpc.pb.go | 11 ++- stress/grpc_testing/metrics_grpc.pb.go | 11 ++- test/grpc_testing/test_grpc.pb.go | 25 ++++-- 19 files changed, 249 insertions(+), 104 deletions(-) diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index cf1034830d58..93e5e133b561 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -37,6 +37,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" +) + // LoadBalancerClient is the client API for LoadBalancer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -54,7 +58,7 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { } func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/channelz/grpc_channelz_v1/channelz_grpc.pb.go b/channelz/grpc_channelz_v1/channelz_grpc.pb.go index 958e51870cd7..04c9b1e52b5f 100644 --- a/channelz/grpc_channelz_v1/channelz_grpc.pb.go +++ b/channelz/grpc_channelz_v1/channelz_grpc.pb.go @@ -39,6 +39,16 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Channelz_GetTopChannels_FullMethodName = "/grpc.channelz.v1.Channelz/GetTopChannels" + Channelz_GetServers_FullMethodName = "/grpc.channelz.v1.Channelz/GetServers" + Channelz_GetServer_FullMethodName = "/grpc.channelz.v1.Channelz/GetServer" + Channelz_GetServerSockets_FullMethodName = "/grpc.channelz.v1.Channelz/GetServerSockets" + Channelz_GetChannel_FullMethodName = "/grpc.channelz.v1.Channelz/GetChannel" + Channelz_GetSubchannel_FullMethodName = "/grpc.channelz.v1.Channelz/GetSubchannel" + Channelz_GetSocket_FullMethodName = "/grpc.channelz.v1.Channelz/GetSocket" +) + // ChannelzClient is the client API for Channelz service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -70,7 +80,7 @@ func NewChannelzClient(cc grpc.ClientConnInterface) ChannelzClient { func (c *channelzClient) GetTopChannels(ctx context.Context, in *GetTopChannelsRequest, opts ...grpc.CallOption) (*GetTopChannelsResponse, error) { out := new(GetTopChannelsResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetTopChannels", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetTopChannels_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -79,7 +89,7 @@ func (c *channelzClient) GetTopChannels(ctx context.Context, in *GetTopChannelsR func (c *channelzClient) GetServers(ctx context.Context, in *GetServersRequest, opts ...grpc.CallOption) (*GetServersResponse, error) { out := new(GetServersResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetServers", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetServers_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -88,7 +98,7 @@ func (c *channelzClient) GetServers(ctx context.Context, in *GetServersRequest, func (c *channelzClient) GetServer(ctx context.Context, in *GetServerRequest, opts ...grpc.CallOption) (*GetServerResponse, error) { out := new(GetServerResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetServer", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetServer_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -97,7 +107,7 @@ func (c *channelzClient) GetServer(ctx context.Context, in *GetServerRequest, op func (c *channelzClient) GetServerSockets(ctx context.Context, in *GetServerSocketsRequest, opts ...grpc.CallOption) (*GetServerSocketsResponse, error) { out := new(GetServerSocketsResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetServerSockets", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetServerSockets_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -106,7 +116,7 @@ func (c *channelzClient) GetServerSockets(ctx context.Context, in *GetServerSock func (c *channelzClient) GetChannel(ctx context.Context, in *GetChannelRequest, opts ...grpc.CallOption) (*GetChannelResponse, error) { out := new(GetChannelResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetChannel", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetChannel_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -115,7 +125,7 @@ func (c *channelzClient) GetChannel(ctx context.Context, in *GetChannelRequest, func (c *channelzClient) GetSubchannel(ctx context.Context, in *GetSubchannelRequest, opts ...grpc.CallOption) (*GetSubchannelResponse, error) { out := new(GetSubchannelResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetSubchannel", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetSubchannel_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -124,7 +134,7 @@ func (c *channelzClient) GetSubchannel(ctx context.Context, in *GetSubchannelReq func (c *channelzClient) GetSocket(ctx context.Context, in *GetSocketRequest, opts ...grpc.CallOption) (*GetSocketResponse, error) { out := new(GetSocketResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetSocket", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetSocket_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -199,7 +209,7 @@ func _Channelz_GetTopChannels_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetTopChannels", + FullMethod: Channelz_GetTopChannels_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetTopChannels(ctx, req.(*GetTopChannelsRequest)) @@ -217,7 +227,7 @@ func _Channelz_GetServers_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetServers", + FullMethod: Channelz_GetServers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetServers(ctx, req.(*GetServersRequest)) @@ -235,7 +245,7 @@ func _Channelz_GetServer_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetServer", + FullMethod: Channelz_GetServer_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetServer(ctx, req.(*GetServerRequest)) @@ -253,7 +263,7 @@ func _Channelz_GetServerSockets_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetServerSockets", + FullMethod: Channelz_GetServerSockets_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetServerSockets(ctx, req.(*GetServerSocketsRequest)) @@ -271,7 +281,7 @@ func _Channelz_GetChannel_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetChannel", + FullMethod: Channelz_GetChannel_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetChannel(ctx, req.(*GetChannelRequest)) @@ -289,7 +299,7 @@ func _Channelz_GetSubchannel_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetSubchannel", + FullMethod: Channelz_GetSubchannel_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetSubchannel(ctx, req.(*GetSubchannelRequest)) @@ -307,7 +317,7 @@ func _Channelz_GetSocket_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetSocket", + FullMethod: Channelz_GetSocket_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetSocket(ctx, req.(*GetSocketRequest)) diff --git a/cmd/protoc-gen-go-grpc/grpc.go b/cmd/protoc-gen-go-grpc/grpc.go index a997e29f9f1a..9e15d2d8daf3 100644 --- a/cmd/protoc-gen-go-grpc/grpc.go +++ b/cmd/protoc-gen-go-grpc/grpc.go @@ -36,7 +36,8 @@ const ( ) type serviceGenerateHelperInterface interface { - formatFullMethodName(service *protogen.Service, method *protogen.Method) string + formatFullMethodSymbol(service *protogen.Service, method *protogen.Method) string + genFullMethods(g *protogen.GeneratedFile, service *protogen.Service) generateClientStruct(g *protogen.GeneratedFile, clientName string) generateNewClientDefinitions(g *protogen.GeneratedFile, service *protogen.Service, clientName string) generateUnimplementedServerType(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) @@ -46,8 +47,19 @@ type serviceGenerateHelperInterface interface { type serviceGenerateHelper struct{} -func (serviceGenerateHelper) formatFullMethodName(service *protogen.Service, method *protogen.Method) string { - return fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) +func (serviceGenerateHelper) formatFullMethodSymbol(service *protogen.Service, method *protogen.Method) string { + return fmt.Sprintf("%s_%s_FullMethodName", service.GoName, method.GoName) +} + +func (serviceGenerateHelper) genFullMethods(g *protogen.GeneratedFile, service *protogen.Service) { + g.P("const (") + for _, method := range service.Methods { + fmSymbol := helper.formatFullMethodSymbol(service, method) + fmName := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) + g.P(fmSymbol, ` = "`, fmName, `"`) + } + g.P(")") + g.P() } func (serviceGenerateHelper) generateClientStruct(g *protogen.GeneratedFile, clientName string) { @@ -167,13 +179,16 @@ func generateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen. } func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { + // Full methods constants. + helper.genFullMethods(g, service) + + // Client interface. clientName := service.GoName + "Client" g.P("// ", clientName, " is the client API for ", service.GoName, " service.") g.P("//") g.P("// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.") - // Client interface. if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { g.P("//") g.P(deprecationComment) @@ -288,7 +303,7 @@ func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) { service := method.Parent - sname := helper.formatFullMethodName(service, method) + fmSymbol := helper.formatFullMethodSymbol(service, method) if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { g.P(deprecationComment) @@ -296,7 +311,7 @@ func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P("func (c *", unexport(service.GoName), "Client) ", clientSignature(g, method), "{") if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { g.P("out := new(", method.Output.GoIdent, ")") - g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) + g.P(`err := c.cc.Invoke(ctx, `, fmSymbol, `, in, out, opts...)`) g.P("if err != nil { return nil, err }") g.P("return out, nil") g.P("}") @@ -305,7 +320,7 @@ func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene } streamType := unexport(service.GoName) + method.GoName + "Client" serviceDescVar := service.GoName + "_ServiceDesc" - g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], "`, sname, `", opts...)`) + g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], `, fmSymbol, `, opts...)`) g.P("if err != nil { return nil, err }") g.P("x := &", streamType, "{stream}") if !method.Desc.IsStreamingClient() { @@ -433,8 +448,8 @@ func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }") g.P("info := &", grpcPackage.Ident("UnaryServerInfo"), "{") g.P("Server: srv,") - fullMethodName := helper.formatFullMethodName(service, method) - g.P("FullMethod: \"", fullMethodName, "\",") + fmSymbol := helper.formatFullMethodSymbol(service, method) + g.P("FullMethod: ", fmSymbol, ",") g.P("}") g.P("handler := func(ctx ", contextPackage.Ident("Context"), ", req interface{}) (interface{}, error) {") g.P("return srv.(", service.GoName, "Server).", method.GoName, "(ctx, req.(*", method.Input.GoIdent, "))") diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index d3562c6d5e62..46d320c9760a 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -35,6 +35,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + HandshakerService_DoHandshake_FullMethodName = "/grpc.gcp.HandshakerService/DoHandshake" +) + // HandshakerServiceClient is the client API for HandshakerService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -57,7 +61,7 @@ func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceCl } func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { - stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) + stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/examples/features/proto/echo/echo_grpc.pb.go b/examples/features/proto/echo/echo_grpc.pb.go index 7d0db54f8886..e39c6fd16168 100644 --- a/examples/features/proto/echo/echo_grpc.pb.go +++ b/examples/features/proto/echo/echo_grpc.pb.go @@ -35,6 +35,13 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Echo_UnaryEcho_FullMethodName = "/grpc.examples.echo.Echo/UnaryEcho" + Echo_ServerStreamingEcho_FullMethodName = "/grpc.examples.echo.Echo/ServerStreamingEcho" + Echo_ClientStreamingEcho_FullMethodName = "/grpc.examples.echo.Echo/ClientStreamingEcho" + Echo_BidirectionalStreamingEcho_FullMethodName = "/grpc.examples.echo.Echo/BidirectionalStreamingEcho" +) + // EchoClient is the client API for Echo service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -59,7 +66,7 @@ func NewEchoClient(cc grpc.ClientConnInterface) EchoClient { func (c *echoClient) UnaryEcho(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoResponse, error) { out := new(EchoResponse) - err := c.cc.Invoke(ctx, "/grpc.examples.echo.Echo/UnaryEcho", in, out, opts...) + err := c.cc.Invoke(ctx, Echo_UnaryEcho_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -67,7 +74,7 @@ func (c *echoClient) UnaryEcho(ctx context.Context, in *EchoRequest, opts ...grp } func (c *echoClient) ServerStreamingEcho(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (Echo_ServerStreamingEchoClient, error) { - stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[0], "/grpc.examples.echo.Echo/ServerStreamingEcho", opts...) + stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[0], Echo_ServerStreamingEcho_FullMethodName, opts...) if err != nil { return nil, err } @@ -99,7 +106,7 @@ func (x *echoServerStreamingEchoClient) Recv() (*EchoResponse, error) { } func (c *echoClient) ClientStreamingEcho(ctx context.Context, opts ...grpc.CallOption) (Echo_ClientStreamingEchoClient, error) { - stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[1], "/grpc.examples.echo.Echo/ClientStreamingEcho", opts...) + stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[1], Echo_ClientStreamingEcho_FullMethodName, opts...) if err != nil { return nil, err } @@ -133,7 +140,7 @@ func (x *echoClientStreamingEchoClient) CloseAndRecv() (*EchoResponse, error) { } func (c *echoClient) BidirectionalStreamingEcho(ctx context.Context, opts ...grpc.CallOption) (Echo_BidirectionalStreamingEchoClient, error) { - stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[2], "/grpc.examples.echo.Echo/BidirectionalStreamingEcho", opts...) + stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[2], Echo_BidirectionalStreamingEcho_FullMethodName, opts...) if err != nil { return nil, err } @@ -217,7 +224,7 @@ func _Echo_UnaryEcho_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.examples.echo.Echo/UnaryEcho", + FullMethod: Echo_UnaryEcho_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EchoServer).UnaryEcho(ctx, req.(*EchoRequest)) diff --git a/examples/helloworld/helloworld/helloworld_grpc.pb.go b/examples/helloworld/helloworld/helloworld_grpc.pb.go index 0a878d460582..002a8a283880 100644 --- a/examples/helloworld/helloworld/helloworld_grpc.pb.go +++ b/examples/helloworld/helloworld/helloworld_grpc.pb.go @@ -32,6 +32,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Greeter_SayHello_FullMethodName = "/helloworld.Greeter/SayHello" +) + // GreeterClient is the client API for Greeter service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -50,7 +54,7 @@ func NewGreeterClient(cc grpc.ClientConnInterface) GreeterClient { func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { out := new(HelloReply) - err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, opts...) + err := c.cc.Invoke(ctx, Greeter_SayHello_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -96,7 +100,7 @@ func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/helloworld.Greeter/SayHello", + FullMethod: Greeter_SayHello_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) diff --git a/examples/route_guide/routeguide/route_guide_grpc.pb.go b/examples/route_guide/routeguide/route_guide_grpc.pb.go index 056a44f7c898..f78ac40de314 100644 --- a/examples/route_guide/routeguide/route_guide_grpc.pb.go +++ b/examples/route_guide/routeguide/route_guide_grpc.pb.go @@ -32,6 +32,13 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + RouteGuide_GetFeature_FullMethodName = "/routeguide.RouteGuide/GetFeature" + RouteGuide_ListFeatures_FullMethodName = "/routeguide.RouteGuide/ListFeatures" + RouteGuide_RecordRoute_FullMethodName = "/routeguide.RouteGuide/RecordRoute" + RouteGuide_RouteChat_FullMethodName = "/routeguide.RouteGuide/RouteChat" +) + // RouteGuideClient is the client API for RouteGuide service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -72,7 +79,7 @@ func NewRouteGuideClient(cc grpc.ClientConnInterface) RouteGuideClient { func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) { out := new(Feature) - err := c.cc.Invoke(ctx, "/routeguide.RouteGuide/GetFeature", in, out, opts...) + err := c.cc.Invoke(ctx, RouteGuide_GetFeature_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -80,7 +87,7 @@ func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...gr } func (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) { - stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[0], "/routeguide.RouteGuide/ListFeatures", opts...) + stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[0], RouteGuide_ListFeatures_FullMethodName, opts...) if err != nil { return nil, err } @@ -112,7 +119,7 @@ func (x *routeGuideListFeaturesClient) Recv() (*Feature, error) { } func (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) { - stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[1], "/routeguide.RouteGuide/RecordRoute", opts...) + stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[1], RouteGuide_RecordRoute_FullMethodName, opts...) if err != nil { return nil, err } @@ -146,7 +153,7 @@ func (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) { } func (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) { - stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[2], "/routeguide.RouteGuide/RouteChat", opts...) + stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[2], RouteGuide_RouteChat_FullMethodName, opts...) if err != nil { return nil, err } @@ -246,7 +253,7 @@ func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/routeguide.RouteGuide/GetFeature", + FullMethod: RouteGuide_GetFeature_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RouteGuideServer).GetFeature(ctx, req.(*Point)) diff --git a/health/grpc_health_v1/health_grpc.pb.go b/health/grpc_health_v1/health_grpc.pb.go index a332dfd7b54e..99308c4a167d 100644 --- a/health/grpc_health_v1/health_grpc.pb.go +++ b/health/grpc_health_v1/health_grpc.pb.go @@ -35,6 +35,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" +) + // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -70,7 +75,7 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -78,7 +83,7 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...) if err != nil { return nil, err } @@ -166,7 +171,7 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.health.v1.Health/Check", + FullMethod: Health_Check_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) diff --git a/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index 076b966f3446..4c1cf2849916 100644 --- a/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -32,6 +32,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + RouteLookupService_RouteLookup_FullMethodName = "/grpc.lookup.v1.RouteLookupService/RouteLookup" +) + // RouteLookupServiceClient is the client API for RouteLookupService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -50,7 +54,7 @@ func NewRouteLookupServiceClient(cc grpc.ClientConnInterface) RouteLookupService func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) { out := new(RouteLookupResponse) - err := c.cc.Invoke(ctx, "/grpc.lookup.v1.RouteLookupService/RouteLookup", in, out, opts...) + err := c.cc.Invoke(ctx, RouteLookupService_RouteLookup_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -96,7 +100,7 @@ func _RouteLookupService_RouteLookup_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.lookup.v1.RouteLookupService/RouteLookup", + FullMethod: RouteLookupService_RouteLookup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RouteLookupServiceServer).RouteLookup(ctx, req.(*RouteLookupRequest)) diff --git a/interop/grpc_testing/benchmark_service_grpc.pb.go b/interop/grpc_testing/benchmark_service_grpc.pb.go index 7aaa44fd3faf..b740b6829304 100644 --- a/interop/grpc_testing/benchmark_service_grpc.pb.go +++ b/interop/grpc_testing/benchmark_service_grpc.pb.go @@ -35,6 +35,14 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + BenchmarkService_UnaryCall_FullMethodName = "/grpc.testing.BenchmarkService/UnaryCall" + BenchmarkService_StreamingCall_FullMethodName = "/grpc.testing.BenchmarkService/StreamingCall" + BenchmarkService_StreamingFromClient_FullMethodName = "/grpc.testing.BenchmarkService/StreamingFromClient" + BenchmarkService_StreamingFromServer_FullMethodName = "/grpc.testing.BenchmarkService/StreamingFromServer" + BenchmarkService_StreamingBothWays_FullMethodName = "/grpc.testing.BenchmarkService/StreamingBothWays" +) + // BenchmarkServiceClient is the client API for BenchmarkService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -67,7 +75,7 @@ func NewBenchmarkServiceClient(cc grpc.ClientConnInterface) BenchmarkServiceClie func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, opts...) + err := c.cc.Invoke(ctx, BenchmarkService_UnaryCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -75,7 +83,7 @@ func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleReques } func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[0], "/grpc.testing.BenchmarkService/StreamingCall", opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[0], BenchmarkService_StreamingCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -106,7 +114,7 @@ func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) { } func (c *benchmarkServiceClient) StreamingFromClient(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingFromClientClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[1], "/grpc.testing.BenchmarkService/StreamingFromClient", opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[1], BenchmarkService_StreamingFromClient_FullMethodName, opts...) if err != nil { return nil, err } @@ -140,7 +148,7 @@ func (x *benchmarkServiceStreamingFromClientClient) CloseAndRecv() (*SimpleRespo } func (c *benchmarkServiceClient) StreamingFromServer(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (BenchmarkService_StreamingFromServerClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[2], "/grpc.testing.BenchmarkService/StreamingFromServer", opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[2], BenchmarkService_StreamingFromServer_FullMethodName, opts...) if err != nil { return nil, err } @@ -172,7 +180,7 @@ func (x *benchmarkServiceStreamingFromServerClient) Recv() (*SimpleResponse, err } func (c *benchmarkServiceClient) StreamingBothWays(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingBothWaysClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[3], "/grpc.testing.BenchmarkService/StreamingBothWays", opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[3], BenchmarkService_StreamingBothWays_FullMethodName, opts...) if err != nil { return nil, err } @@ -267,7 +275,7 @@ func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.BenchmarkService/UnaryCall", + FullMethod: BenchmarkService_UnaryCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BenchmarkServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) diff --git a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go index 1c40d481b9ed..e09b5b240511 100644 --- a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go @@ -35,6 +35,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ReportQpsScenarioService_ReportScenario_FullMethodName = "/grpc.testing.ReportQpsScenarioService/ReportScenario" +) + // ReportQpsScenarioServiceClient is the client API for ReportQpsScenarioService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -53,7 +57,7 @@ func NewReportQpsScenarioServiceClient(cc grpc.ClientConnInterface) ReportQpsSce func (c *reportQpsScenarioServiceClient) ReportScenario(ctx context.Context, in *ScenarioResult, opts ...grpc.CallOption) (*Void, error) { out := new(Void) - err := c.cc.Invoke(ctx, "/grpc.testing.ReportQpsScenarioService/ReportScenario", in, out, opts...) + err := c.cc.Invoke(ctx, ReportQpsScenarioService_ReportScenario_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -100,7 +104,7 @@ func _ReportQpsScenarioService_ReportScenario_Handler(srv interface{}, ctx conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.ReportQpsScenarioService/ReportScenario", + FullMethod: ReportQpsScenarioService_ReportScenario_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReportQpsScenarioServiceServer).ReportScenario(ctx, req.(*ScenarioResult)) diff --git a/interop/grpc_testing/test_grpc.pb.go b/interop/grpc_testing/test_grpc.pb.go index 64e802bb9c6f..23189f0dc8f5 100644 --- a/interop/grpc_testing/test_grpc.pb.go +++ b/interop/grpc_testing/test_grpc.pb.go @@ -35,6 +35,17 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + TestService_EmptyCall_FullMethodName = "/grpc.testing.TestService/EmptyCall" + TestService_UnaryCall_FullMethodName = "/grpc.testing.TestService/UnaryCall" + TestService_CacheableUnaryCall_FullMethodName = "/grpc.testing.TestService/CacheableUnaryCall" + TestService_StreamingOutputCall_FullMethodName = "/grpc.testing.TestService/StreamingOutputCall" + TestService_StreamingInputCall_FullMethodName = "/grpc.testing.TestService/StreamingInputCall" + TestService_FullDuplexCall_FullMethodName = "/grpc.testing.TestService/FullDuplexCall" + TestService_HalfDuplexCall_FullMethodName = "/grpc.testing.TestService/HalfDuplexCall" + TestService_UnimplementedCall_FullMethodName = "/grpc.testing.TestService/UnimplementedCall" +) + // TestServiceClient is the client API for TestService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -77,7 +88,7 @@ func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_EmptyCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -86,7 +97,7 @@ func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...gr func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_UnaryCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -95,7 +106,7 @@ func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, op func (c *testServiceClient) CacheableUnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/CacheableUnaryCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_CacheableUnaryCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -103,7 +114,7 @@ func (c *testServiceClient) CacheableUnaryCall(ctx context.Context, in *SimpleRe } func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], "/grpc.testing.TestService/StreamingOutputCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], TestService_StreamingOutputCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -135,7 +146,7 @@ func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallRespo } func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], "/grpc.testing.TestService/StreamingInputCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], TestService_StreamingInputCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -169,7 +180,7 @@ func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCal } func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], "/grpc.testing.TestService/FullDuplexCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], TestService_FullDuplexCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -200,7 +211,7 @@ func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, } func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], "/grpc.testing.TestService/HalfDuplexCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], TestService_HalfDuplexCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -232,7 +243,7 @@ func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, func (c *testServiceClient) UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/UnimplementedCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_UnimplementedCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -323,7 +334,7 @@ func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/EmptyCall", + FullMethod: TestService_EmptyCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) @@ -341,7 +352,7 @@ func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/UnaryCall", + FullMethod: TestService_UnaryCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) @@ -359,7 +370,7 @@ func _TestService_CacheableUnaryCall_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/CacheableUnaryCall", + FullMethod: TestService_CacheableUnaryCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).CacheableUnaryCall(ctx, req.(*SimpleRequest)) @@ -476,7 +487,7 @@ func _TestService_UnimplementedCall_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/UnimplementedCall", + FullMethod: TestService_UnimplementedCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).UnimplementedCall(ctx, req.(*Empty)) @@ -535,6 +546,10 @@ var TestService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + UnimplementedService_UnimplementedCall_FullMethodName = "/grpc.testing.UnimplementedService/UnimplementedCall" +) + // UnimplementedServiceClient is the client API for UnimplementedService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -553,7 +568,7 @@ func NewUnimplementedServiceClient(cc grpc.ClientConnInterface) UnimplementedSer func (c *unimplementedServiceClient) UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.UnimplementedService/UnimplementedCall", in, out, opts...) + err := c.cc.Invoke(ctx, UnimplementedService_UnimplementedCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -599,7 +614,7 @@ func _UnimplementedService_UnimplementedCall_Handler(srv interface{}, ctx contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.UnimplementedService/UnimplementedCall", + FullMethod: UnimplementedService_UnimplementedCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(UnimplementedServiceServer).UnimplementedCall(ctx, req.(*Empty)) @@ -623,6 +638,11 @@ var UnimplementedService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + ReconnectService_Start_FullMethodName = "/grpc.testing.ReconnectService/Start" + ReconnectService_Stop_FullMethodName = "/grpc.testing.ReconnectService/Stop" +) + // ReconnectServiceClient is the client API for ReconnectService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -641,7 +661,7 @@ func NewReconnectServiceClient(cc grpc.ClientConnInterface) ReconnectServiceClie func (c *reconnectServiceClient) Start(ctx context.Context, in *ReconnectParams, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.ReconnectService/Start", in, out, opts...) + err := c.cc.Invoke(ctx, ReconnectService_Start_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -650,7 +670,7 @@ func (c *reconnectServiceClient) Start(ctx context.Context, in *ReconnectParams, func (c *reconnectServiceClient) Stop(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReconnectInfo, error) { out := new(ReconnectInfo) - err := c.cc.Invoke(ctx, "/grpc.testing.ReconnectService/Stop", in, out, opts...) + err := c.cc.Invoke(ctx, ReconnectService_Stop_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -699,7 +719,7 @@ func _ReconnectService_Start_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.ReconnectService/Start", + FullMethod: ReconnectService_Start_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReconnectServiceServer).Start(ctx, req.(*ReconnectParams)) @@ -717,7 +737,7 @@ func _ReconnectService_Stop_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.ReconnectService/Stop", + FullMethod: ReconnectService_Stop_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReconnectServiceServer).Stop(ctx, req.(*Empty)) @@ -745,6 +765,11 @@ var ReconnectService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + LoadBalancerStatsService_GetClientStats_FullMethodName = "/grpc.testing.LoadBalancerStatsService/GetClientStats" + LoadBalancerStatsService_GetClientAccumulatedStats_FullMethodName = "/grpc.testing.LoadBalancerStatsService/GetClientAccumulatedStats" +) + // LoadBalancerStatsServiceClient is the client API for LoadBalancerStatsService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -765,7 +790,7 @@ func NewLoadBalancerStatsServiceClient(cc grpc.ClientConnInterface) LoadBalancer func (c *loadBalancerStatsServiceClient) GetClientStats(ctx context.Context, in *LoadBalancerStatsRequest, opts ...grpc.CallOption) (*LoadBalancerStatsResponse, error) { out := new(LoadBalancerStatsResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.LoadBalancerStatsService/GetClientStats", in, out, opts...) + err := c.cc.Invoke(ctx, LoadBalancerStatsService_GetClientStats_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -774,7 +799,7 @@ func (c *loadBalancerStatsServiceClient) GetClientStats(ctx context.Context, in func (c *loadBalancerStatsServiceClient) GetClientAccumulatedStats(ctx context.Context, in *LoadBalancerAccumulatedStatsRequest, opts ...grpc.CallOption) (*LoadBalancerAccumulatedStatsResponse, error) { out := new(LoadBalancerAccumulatedStatsResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.LoadBalancerStatsService/GetClientAccumulatedStats", in, out, opts...) + err := c.cc.Invoke(ctx, LoadBalancerStatsService_GetClientAccumulatedStats_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -826,7 +851,7 @@ func _LoadBalancerStatsService_GetClientStats_Handler(srv interface{}, ctx conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.LoadBalancerStatsService/GetClientStats", + FullMethod: LoadBalancerStatsService_GetClientStats_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LoadBalancerStatsServiceServer).GetClientStats(ctx, req.(*LoadBalancerStatsRequest)) @@ -844,7 +869,7 @@ func _LoadBalancerStatsService_GetClientAccumulatedStats_Handler(srv interface{} } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.LoadBalancerStatsService/GetClientAccumulatedStats", + FullMethod: LoadBalancerStatsService_GetClientAccumulatedStats_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LoadBalancerStatsServiceServer).GetClientAccumulatedStats(ctx, req.(*LoadBalancerAccumulatedStatsRequest)) @@ -872,6 +897,11 @@ var LoadBalancerStatsService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + XdsUpdateHealthService_SetServing_FullMethodName = "/grpc.testing.XdsUpdateHealthService/SetServing" + XdsUpdateHealthService_SetNotServing_FullMethodName = "/grpc.testing.XdsUpdateHealthService/SetNotServing" +) + // XdsUpdateHealthServiceClient is the client API for XdsUpdateHealthService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -890,7 +920,7 @@ func NewXdsUpdateHealthServiceClient(cc grpc.ClientConnInterface) XdsUpdateHealt func (c *xdsUpdateHealthServiceClient) SetServing(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.XdsUpdateHealthService/SetServing", in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateHealthService_SetServing_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -899,7 +929,7 @@ func (c *xdsUpdateHealthServiceClient) SetServing(ctx context.Context, in *Empty func (c *xdsUpdateHealthServiceClient) SetNotServing(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.XdsUpdateHealthService/SetNotServing", in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateHealthService_SetNotServing_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -949,7 +979,7 @@ func _XdsUpdateHealthService_SetServing_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.XdsUpdateHealthService/SetServing", + FullMethod: XdsUpdateHealthService_SetServing_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(XdsUpdateHealthServiceServer).SetServing(ctx, req.(*Empty)) @@ -967,7 +997,7 @@ func _XdsUpdateHealthService_SetNotServing_Handler(srv interface{}, ctx context. } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.XdsUpdateHealthService/SetNotServing", + FullMethod: XdsUpdateHealthService_SetNotServing_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(XdsUpdateHealthServiceServer).SetNotServing(ctx, req.(*Empty)) @@ -995,6 +1025,10 @@ var XdsUpdateHealthService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + XdsUpdateClientConfigureService_Configure_FullMethodName = "/grpc.testing.XdsUpdateClientConfigureService/Configure" +) + // XdsUpdateClientConfigureServiceClient is the client API for XdsUpdateClientConfigureService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -1013,7 +1047,7 @@ func NewXdsUpdateClientConfigureServiceClient(cc grpc.ClientConnInterface) XdsUp func (c *xdsUpdateClientConfigureServiceClient) Configure(ctx context.Context, in *ClientConfigureRequest, opts ...grpc.CallOption) (*ClientConfigureResponse, error) { out := new(ClientConfigureResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.XdsUpdateClientConfigureService/Configure", in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateClientConfigureService_Configure_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -1060,7 +1094,7 @@ func _XdsUpdateClientConfigureService_Configure_Handler(srv interface{}, ctx con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.XdsUpdateClientConfigureService/Configure", + FullMethod: XdsUpdateClientConfigureService_Configure_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(XdsUpdateClientConfigureServiceServer).Configure(ctx, req.(*ClientConfigureRequest)) diff --git a/interop/grpc_testing/worker_service_grpc.pb.go b/interop/grpc_testing/worker_service_grpc.pb.go index 1a33f7310f9d..10e153efa6f7 100644 --- a/interop/grpc_testing/worker_service_grpc.pb.go +++ b/interop/grpc_testing/worker_service_grpc.pb.go @@ -35,6 +35,13 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + WorkerService_RunServer_FullMethodName = "/grpc.testing.WorkerService/RunServer" + WorkerService_RunClient_FullMethodName = "/grpc.testing.WorkerService/RunClient" + WorkerService_CoreCount_FullMethodName = "/grpc.testing.WorkerService/CoreCount" + WorkerService_QuitWorker_FullMethodName = "/grpc.testing.WorkerService/QuitWorker" +) + // WorkerServiceClient is the client API for WorkerService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -68,7 +75,7 @@ func NewWorkerServiceClient(cc grpc.ClientConnInterface) WorkerServiceClient { } func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) { - stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[0], "/grpc.testing.WorkerService/RunServer", opts...) + stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[0], WorkerService_RunServer_FullMethodName, opts...) if err != nil { return nil, err } @@ -99,7 +106,7 @@ func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) { } func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) { - stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[1], "/grpc.testing.WorkerService/RunClient", opts...) + stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[1], WorkerService_RunClient_FullMethodName, opts...) if err != nil { return nil, err } @@ -131,7 +138,7 @@ func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) { func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) { out := new(CoreResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, opts...) + err := c.cc.Invoke(ctx, WorkerService_CoreCount_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -140,7 +147,7 @@ func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, op func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) { out := new(Void) - err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, opts...) + err := c.cc.Invoke(ctx, WorkerService_QuitWorker_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -263,7 +270,7 @@ func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.WorkerService/CoreCount", + FullMethod: WorkerService_CoreCount_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServiceServer).CoreCount(ctx, req.(*CoreRequest)) @@ -281,7 +288,7 @@ func _WorkerService_QuitWorker_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.WorkerService/QuitWorker", + FullMethod: WorkerService_QuitWorker_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServiceServer).QuitWorker(ctx, req.(*Void)) diff --git a/profiling/proto/service_grpc.pb.go b/profiling/proto/service_grpc.pb.go index 83ce1336faf5..0fc93f0c1fe0 100644 --- a/profiling/proto/service_grpc.pb.go +++ b/profiling/proto/service_grpc.pb.go @@ -32,6 +32,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Profiling_Enable_FullMethodName = "/grpc.go.profiling.v1alpha.Profiling/Enable" + Profiling_GetStreamStats_FullMethodName = "/grpc.go.profiling.v1alpha.Profiling/GetStreamStats" +) + // ProfilingClient is the client API for Profiling service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -53,7 +58,7 @@ func NewProfilingClient(cc grpc.ClientConnInterface) ProfilingClient { func (c *profilingClient) Enable(ctx context.Context, in *EnableRequest, opts ...grpc.CallOption) (*EnableResponse, error) { out := new(EnableResponse) - err := c.cc.Invoke(ctx, "/grpc.go.profiling.v1alpha.Profiling/Enable", in, out, opts...) + err := c.cc.Invoke(ctx, Profiling_Enable_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -62,7 +67,7 @@ func (c *profilingClient) Enable(ctx context.Context, in *EnableRequest, opts .. func (c *profilingClient) GetStreamStats(ctx context.Context, in *GetStreamStatsRequest, opts ...grpc.CallOption) (*GetStreamStatsResponse, error) { out := new(GetStreamStatsResponse) - err := c.cc.Invoke(ctx, "/grpc.go.profiling.v1alpha.Profiling/GetStreamStats", in, out, opts...) + err := c.cc.Invoke(ctx, Profiling_GetStreamStats_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -112,7 +117,7 @@ func _Profiling_Enable_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.go.profiling.v1alpha.Profiling/Enable", + FullMethod: Profiling_Enable_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProfilingServer).Enable(ctx, req.(*EnableRequest)) @@ -130,7 +135,7 @@ func _Profiling_GetStreamStats_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.go.profiling.v1alpha.Profiling/GetStreamStats", + FullMethod: Profiling_GetStreamStats_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProfilingServer).GetStreamStats(ctx, req.(*GetStreamStatsRequest)) diff --git a/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/reflection/grpc_reflection_v1/reflection_grpc.pb.go index 2382d205cf5e..f1228329edef 100644 --- a/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ b/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -39,6 +39,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + // ServerReflectionClient is the client API for ServerReflection service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -57,7 +61,7 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo", opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index ed54ab1378eb..46032f00d25a 100644 --- a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -36,6 +36,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" +) + // ServerReflectionClient is the client API for ServerReflection service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -54,7 +58,7 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/reflection/grpc_testing/test_grpc.pb.go b/reflection/grpc_testing/test_grpc.pb.go index 7df4c7057d96..ebd5e5089e9e 100644 --- a/reflection/grpc_testing/test_grpc.pb.go +++ b/reflection/grpc_testing/test_grpc.pb.go @@ -32,6 +32,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + SearchService_Search_FullMethodName = "/grpc.testing.SearchService/Search" + SearchService_StreamingSearch_FullMethodName = "/grpc.testing.SearchService/StreamingSearch" +) + // SearchServiceClient is the client API for SearchService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -50,7 +55,7 @@ func NewSearchServiceClient(cc grpc.ClientConnInterface) SearchServiceClient { func (c *searchServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { out := new(SearchResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.SearchService/Search", in, out, opts...) + err := c.cc.Invoke(ctx, SearchService_Search_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -58,7 +63,7 @@ func (c *searchServiceClient) Search(ctx context.Context, in *SearchRequest, opt } func (c *searchServiceClient) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) { - stream, err := c.cc.NewStream(ctx, &SearchService_ServiceDesc.Streams[0], "/grpc.testing.SearchService/StreamingSearch", opts...) + stream, err := c.cc.NewStream(ctx, &SearchService_ServiceDesc.Streams[0], SearchService_StreamingSearch_FullMethodName, opts...) if err != nil { return nil, err } @@ -130,7 +135,7 @@ func _SearchService_Search_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.SearchService/Search", + FullMethod: SearchService_Search_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SearchServiceServer).Search(ctx, req.(*SearchRequest)) diff --git a/stress/grpc_testing/metrics_grpc.pb.go b/stress/grpc_testing/metrics_grpc.pb.go index 014c6d512611..dc4f9feb4df6 100644 --- a/stress/grpc_testing/metrics_grpc.pb.go +++ b/stress/grpc_testing/metrics_grpc.pb.go @@ -39,6 +39,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + MetricsService_GetAllGauges_FullMethodName = "/grpc.testing.MetricsService/GetAllGauges" + MetricsService_GetGauge_FullMethodName = "/grpc.testing.MetricsService/GetGauge" +) + // MetricsServiceClient is the client API for MetricsService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -59,7 +64,7 @@ func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient { } func (c *metricsServiceClient) GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) { - stream, err := c.cc.NewStream(ctx, &MetricsService_ServiceDesc.Streams[0], "/grpc.testing.MetricsService/GetAllGauges", opts...) + stream, err := c.cc.NewStream(ctx, &MetricsService_ServiceDesc.Streams[0], MetricsService_GetAllGauges_FullMethodName, opts...) if err != nil { return nil, err } @@ -92,7 +97,7 @@ func (x *metricsServiceGetAllGaugesClient) Recv() (*GaugeResponse, error) { func (c *metricsServiceClient) GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) { out := new(GaugeResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.MetricsService/GetGauge", in, out, opts...) + err := c.cc.Invoke(ctx, MetricsService_GetGauge_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -165,7 +170,7 @@ func _MetricsService_GetGauge_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.MetricsService/GetGauge", + FullMethod: MetricsService_GetGauge_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetricsServiceServer).GetGauge(ctx, req.(*GaugeRequest)) diff --git a/test/grpc_testing/test_grpc.pb.go b/test/grpc_testing/test_grpc.pb.go index 51e0938f62fd..222987b583aa 100644 --- a/test/grpc_testing/test_grpc.pb.go +++ b/test/grpc_testing/test_grpc.pb.go @@ -35,6 +35,15 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + TestService_EmptyCall_FullMethodName = "/grpc.testing.TestService/EmptyCall" + TestService_UnaryCall_FullMethodName = "/grpc.testing.TestService/UnaryCall" + TestService_StreamingOutputCall_FullMethodName = "/grpc.testing.TestService/StreamingOutputCall" + TestService_StreamingInputCall_FullMethodName = "/grpc.testing.TestService/StreamingInputCall" + TestService_FullDuplexCall_FullMethodName = "/grpc.testing.TestService/FullDuplexCall" + TestService_HalfDuplexCall_FullMethodName = "/grpc.testing.TestService/HalfDuplexCall" +) + // TestServiceClient is the client API for TestService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -71,7 +80,7 @@ func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_EmptyCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -80,7 +89,7 @@ func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...gr func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_UnaryCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -88,7 +97,7 @@ func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, op } func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], "/grpc.testing.TestService/StreamingOutputCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], TestService_StreamingOutputCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -120,7 +129,7 @@ func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallRespo } func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], "/grpc.testing.TestService/StreamingInputCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], TestService_StreamingInputCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -154,7 +163,7 @@ func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCal } func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], "/grpc.testing.TestService/FullDuplexCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], TestService_FullDuplexCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -185,7 +194,7 @@ func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, } func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], "/grpc.testing.TestService/HalfDuplexCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], TestService_HalfDuplexCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -287,7 +296,7 @@ func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/EmptyCall", + FullMethod: TestService_EmptyCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) @@ -305,7 +314,7 @@ func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/UnaryCall", + FullMethod: TestService_UnaryCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) From f69e9ad8d487ea92fad1f5c854a45541bd62dcc5 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 6 Feb 2023 20:00:14 -0500 Subject: [PATCH 768/998] stats/opencensus: Add OpenCensus metrics support (#5923) --- codes/code_string.go | 51 +- internal/internal.go | 3 + stats/opencensus/client_metrics.go | 116 ++++ stats/opencensus/e2e_test.go | 940 +++++++++++++++++++++++++++++ stats/opencensus/go.mod | 1 + stats/opencensus/opencensus.go | 10 +- stats/opencensus/server_metrics.go | 115 ++++ stats/opencensus/stats.go | 215 +++++++ 8 files changed, 1448 insertions(+), 3 deletions(-) create mode 100644 stats/opencensus/client_metrics.go create mode 100644 stats/opencensus/e2e_test.go create mode 100644 stats/opencensus/server_metrics.go create mode 100644 stats/opencensus/stats.go diff --git a/codes/code_string.go b/codes/code_string.go index 0b206a57822a..934fac2b090a 100644 --- a/codes/code_string.go +++ b/codes/code_string.go @@ -18,7 +18,15 @@ package codes -import "strconv" +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} func (c Code) String() string { switch c { @@ -60,3 +68,44 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/internal/internal.go b/internal/internal.go index cb5139a1980c..dc9fcd1101be 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -61,6 +61,9 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + CanonicalString interface{} // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular diff --git a/stats/opencensus/client_metrics.go b/stats/opencensus/client_metrics.go new file mode 100644 index 000000000000..3ea5e1c2c424 --- /dev/null +++ b/stats/opencensus/client_metrics.go @@ -0,0 +1,116 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + keyClientMethod = tag.MustNewKey("grpc_client_method") + keyClientStatus = tag.MustNewKey("grpc_client_status") +) + +// Measures, which are recorded by client stats handler: Note that due to the +// nature of how stats handlers are called on gRPC's client side, the per rpc +// unit is actually per attempt throughout this definition file. +var ( + clientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + clientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) + clientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + clientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) + clientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + clientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "The total number of client RPCs ever opened, including those that have not completed.", stats.UnitDimensionless) + clientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) +) + +var ( + // ClientSentMessagesPerRPCView is the distribution of sent messages per + // RPC, keyed on method. + ClientSentMessagesPerRPCView = &view.View{ + Measure: clientSentMessagesPerRPC, + Name: "grpc.io/client/sent_messages_per_rpc", + Description: "Distribution of sent messages per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: countDistribution, + } + // ClientReceivedMessagesPerRPCView is the distribution of received messages + // per RPC, keyed on method. + ClientReceivedMessagesPerRPCView = &view.View{ + Measure: clientReceivedMessagesPerRPC, + Name: "grpc.io/client/received_messages_per_rpc", + Description: "Distribution of received messages per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: countDistribution, + } + // ClientSentBytesPerRPCView is the distribution of sent bytes per RPC, + // keyed on method. + ClientSentBytesPerRPCView = &view.View{ + Measure: clientSentBytesPerRPC, + Name: "grpc.io/client/sent_bytes_per_rpc", + Description: "Distribution of sent bytes per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: bytesDistribution, + } + // ClientReceivedBytesPerRPCView is the distribution of received bytes per + // RPC, keyed on method. + ClientReceivedBytesPerRPCView = &view.View{ + Measure: clientReceivedBytesPerRPC, + Name: "grpc.io/client/received_bytes_per_rpc", + Description: "Distribution of received bytes per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: bytesDistribution, + } + // ClientStartedRPCsView is the count of opened RPCs, keyed on method. + ClientStartedRPCsView = &view.View{ + Measure: clientStartedRPCs, + Name: "grpc.io/client/started_rpcs", + Description: "Number of opened client RPCs, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: view.Count(), + } + // ClientCompletedRPCsView is the count of completed RPCs, keyed on method + // and status. + ClientCompletedRPCsView = &view.View{ + Measure: clientRoundtripLatency, + Name: "grpc.io/client/completed_rpcs", + Description: "Number of completed RPCs by method and status.", + TagKeys: []tag.Key{keyClientMethod, keyClientStatus}, + Aggregation: view.Count(), + } + // ClientRoundtripLatencyView is the distribution of round-trip latency in + // milliseconds per RPC, keyed on method. + ClientRoundtripLatencyView = &view.View{ + Measure: clientRoundtripLatency, + Name: "grpc.io/client/roundtrip_latency", + Description: "Distribution of round-trip latency, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: millisecondsDistribution, + } +) + +// DefaultClientViews is the set of client views which are considered the +// minimum required to monitor client side performance. +var DefaultClientViews = []*view.View{ + ClientSentBytesPerRPCView, + ClientReceivedBytesPerRPCView, + ClientRoundtripLatencyView, + ClientCompletedRPCsView, + ClientStartedRPCsView, +} diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go new file mode 100644 index 000000000000..d302ec94d3d4 --- /dev/null +++ b/stats/opencensus/e2e_test.go @@ -0,0 +1,940 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "context" + "errors" + "fmt" + "io" + "reflect" + "sort" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/leakcheck" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/test/grpc_testing" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func init() { + // OpenCensus, once included in binary, will spawn a global goroutine + // recorder that is not controllable by application. + // https://github.com/census-instrumentation/opencensus-go/issues/1191 + leakcheck.RegisterIgnoreGoroutine("go.opencensus.io/stats/view.(*worker).start") +} + +var defaultTestTimeout = 5 * time.Second + +type fakeExporter struct { + t *testing.T + + mu sync.RWMutex + seenViews map[string]*viewInformation +} + +// viewInformation is information Exported from the view package through +// ExportView relevant to testing, i.e. a reasonably non flaky expectation of +// desired emissions to Exporter. +type viewInformation struct { + aggType view.AggType + aggBuckets []float64 + desc string + tagKeys []tag.Key + rows []*view.Row +} + +func (fe *fakeExporter) ExportView(vd *view.Data) { + fe.mu.Lock() + defer fe.mu.Unlock() + fe.seenViews[vd.View.Name] = &viewInformation{ + aggType: vd.View.Aggregation.Type, + aggBuckets: vd.View.Aggregation.Buckets, + desc: vd.View.Description, + tagKeys: vd.View.TagKeys, + rows: vd.Rows, + } +} + +// compareRows compares rows with respect to the information desired to test. +// Both the tags representing the rows and also the data of the row are tested +// for equality. Rows are in nondeterministic order when ExportView is called, +// but handled inside this function by sorting. +func compareRows(rows []*view.Row, rows2 []*view.Row) bool { + if len(rows) != len(rows2) { + return false + } + // Sort both rows according to the same rule. This is to take away non + // determinism in the row ordering passed to the Exporter, while keeping the + // row data. + sort.Slice(rows, func(i, j int) bool { + return rows[i].String() > rows[j].String() + }) + + sort.Slice(rows2, func(i, j int) bool { + return rows2[i].String() > rows2[j].String() + }) + + for i, row := range rows { + if !cmp.Equal(row.Tags, rows2[i].Tags, cmp.Comparer(func(a tag.Key, b tag.Key) bool { + return a.Name() == b.Name() + })) { + return false + } + if !compareData(row.Data, rows2[i].Data) { + return false + } + } + return true +} + +// compareData returns whether the two aggregation data's are equal to each +// other with respect to parts of the data desired for correct emission. The +// function first makes sure the two types of aggregation data are the same, and +// then checks the equality for the respective aggregation data type. +func compareData(ad view.AggregationData, ad2 view.AggregationData) bool { + if ad == nil && ad2 == nil { + return true + } + if ad == nil || ad2 == nil { + return false + } + if reflect.TypeOf(ad) != reflect.TypeOf(ad2) { + return false + } + switch ad1 := ad.(type) { + case *view.DistributionData: + dd2 := ad2.(*view.DistributionData) + // Count and Count Per Buckets are reasonable for correctness, + // especially since we verify equality of bucket endpoints elsewhere. + if ad1.Count != dd2.Count { + return false + } + for i, count := range ad1.CountPerBucket { + if count != dd2.CountPerBucket[i] { + return false + } + } + case *view.CountData: + cd2 := ad2.(*view.CountData) + return ad1.Value == cd2.Value + + // gRPC open census plugin does not have these next two types of aggregation + // data types present, for now just check for type equality between the two + // aggregation data points (done above). + // case *view.SumData + // case *view.LastValueData: + } + return true +} + +func (vi *viewInformation) Equal(vi2 *viewInformation) bool { + if vi == nil && vi2 == nil { + return true + } + if vi == nil || vi2 == nil { + return false + } + if vi.aggType != vi2.aggType { + return false + } + if !cmp.Equal(vi.aggBuckets, vi2.aggBuckets) { + return false + } + if vi.desc != vi2.desc { + return false + } + if !cmp.Equal(vi.tagKeys, vi2.tagKeys, cmp.Comparer(func(a tag.Key, b tag.Key) bool { + return a.Name() == b.Name() + })) { + return false + } + if !compareRows(vi.rows, vi2.rows) { + return false + } + return true +} + +// distributionDataLatencyCount checks if the view information contains the +// desired distrubtion latency total count that falls in buckets of 5 seconds or +// less. This must be called with non nil view information that is aggregated +// with distribution data. Returns a nil error if correct count information +// found, non nil error if correct information not found. +func distributionDataLatencyCount(vi *viewInformation, countWant int64) error { + var totalCount int64 + var largestIndexWithFive int + for i, bucket := range vi.aggBuckets { + // Distribution for latency is measured in milliseconds, so 5 * 1000 = + // 5000. + if bucket > 5000 { + largestIndexWithFive = i + break + } + } + // Iterating through rows sums up data points for all methods. In this case, + // a data point for the unary and for the streaming RPC. + for _, row := range vi.rows { + // This could potentially have an extra measurement in buckets above 5s, + // but that's fine. Count of buckets that could contain up to 5s is a + // good enough assertion. + for i, count := range row.Data.(*view.DistributionData).CountPerBucket { + if i >= largestIndexWithFive { + break + } + totalCount = totalCount + count + } + } + if totalCount != countWant { + return fmt.Errorf("wrong total count for counts under 5: %v, wantCount: %v", totalCount, countWant) + } + return nil +} + +// TestAllMetricsOneFunction tests emitted metrics from gRPC. It registers all +// the metrics provided by this package. It then configures a system with a gRPC +// Client and gRPC server with the OpenCensus Dial and Server Option configured, +// and makes a Unary RPC and a Streaming RPC. These two RPCs should cause +// certain emissions for each registered metric through the OpenCensus View +// package. +func (s) TestAllMetricsOneFunction(t *testing.T) { + allViews := []*view.View{ + ClientStartedRPCsView, + ServerStartedRPCsView, + ClientCompletedRPCsView, + ServerCompletedRPCsView, + ClientSentBytesPerRPCView, + ServerSentBytesPerRPCView, + ClientReceivedBytesPerRPCView, + ServerReceivedBytesPerRPCView, + ClientSentMessagesPerRPCView, + ServerSentMessagesPerRPCView, + ClientReceivedMessagesPerRPCView, + ServerReceivedMessagesPerRPCView, + ClientRoundtripLatencyView, + ServerLatencyView, + } + view.Register(allViews...) + // Unregister unconditionally in this defer to correctly cleanup globals in + // error conditions. + defer view.Unregister(allViews...) + fe := &fakeExporter{ + t: t, + seenViews: make(map[string]*viewInformation), + } + view.RegisterExporter(fe) + defer view.UnregisterExporter(fe) + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + } + }, + } + if err := ss.Start([]grpc.ServerOption{ServerOption(TraceOptions{})}, DialOption(TraceOptions{})); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Make two RPC's, a unary RPC and a streaming RPC. These should cause + // certain metrics to be emitted. + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + cmtk := tag.MustNewKey("grpc_client_method") + smtk := tag.MustNewKey("grpc_server_method") + cstk := tag.MustNewKey("grpc_client_status") + sstk := tag.MustNewKey("grpc_server_status") + wantMetrics := []struct { + metric *view.View + wantVI *viewInformation + }{ + { + metric: ClientStartedRPCsView, + wantVI: &viewInformation{ + aggType: view.AggTypeCount, + aggBuckets: []float64{}, + desc: "Number of opened client RPCs, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + }, + }, + }, + { + metric: ServerStartedRPCsView, + wantVI: &viewInformation{ + aggType: view.AggTypeCount, + aggBuckets: []float64{}, + desc: "Number of opened server RPCs, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + }, + }, + }, + { + metric: ClientCompletedRPCsView, + wantVI: &viewInformation{ + aggType: view.AggTypeCount, + aggBuckets: []float64{}, + desc: "Number of completed RPCs by method and status.", + tagKeys: []tag.Key{ + cmtk, + cstk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + { + Key: cstk, + Value: "OK", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + { + Key: cstk, + Value: "OK", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + }, + }, + }, + { + metric: ServerCompletedRPCsView, + wantVI: &viewInformation{ + aggType: view.AggTypeCount, + aggBuckets: []float64{}, + desc: "Number of completed RPCs by method and status.", + tagKeys: []tag.Key{ + smtk, + sstk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + { + Key: sstk, + Value: "OK", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + { + Key: sstk, + Value: "OK", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + }, + }, + }, + { + metric: ClientSentBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of sent bytes per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerSentBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of sent bytes per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + + { + metric: ClientReceivedBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of received bytes per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerReceivedBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of received bytes per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientSentMessagesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: countDistributionBounds, + desc: "Distribution of sent messages per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerSentMessagesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: countDistributionBounds, + desc: "Distribution of sent messages per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientReceivedMessagesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: countDistributionBounds, + desc: "Distribution of received messages per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerReceivedMessagesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: countDistributionBounds, + desc: "Distribution of received messages per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientRoundtripLatencyView, + }, + { + metric: ServerLatencyView, + }, + } + // Unregister all the views. Unregistering a view causes a synchronous + // upload of any collected data for the view to any registered exporters. + // Thus, after this unregister call, the exporter has the data to make + // assertions on immediately. + view.Unregister(allViews...) + // Assert the expected emissions for each metric match the expected + // emissions. + for _, wantMetric := range wantMetrics { + metricName := wantMetric.metric.Name + var vi *viewInformation + if vi = fe.seenViews[metricName]; vi == nil { + t.Fatalf("couldn't find %v in the views exported, never collected", metricName) + } + + // For latency metrics, there is a lot of non determinism about + // the exact milliseconds of RPCs that finish. Thus, rather than + // declare the exact data you want, make sure the latency + // measurement points for the two RPCs above fall within buckets + // that fall into less than 5 seconds, which is the rpc timeout. + if metricName == "grpc.io/client/roundtrip_latency" || metricName == "grpc.io/server/server_latency" { + // RPCs have a context timeout of 5s, so all the recorded + // measurements (one per RPC - two total) should fall within 5 + // second buckets. + if err := distributionDataLatencyCount(vi, 2); err != nil { + t.Fatalf("Invalid OpenCensus export view data for metric %v: %v", metricName, err) + } + continue + } + if diff := cmp.Diff(vi, wantMetric.wantVI); diff != "" { + t.Fatalf("got unexpected viewInformation for metric %v, diff (-got, +want): %v", metricName, diff) + } + // Note that this test only fatals with one error if a metric fails. + // This is fine, as all are expected to pass so if a single one fails + // you can figure it out and iterate as needed. + } +} + +// TestOpenCensusTags tests this instrumentation code's ability to propagate +// OpenCensus tags across the wire. It also tests the server stats handler's +// functionality of adding the server method tag for the application to see. The +// test makes an Unary RPC without a tag map and with a tag map, and expects to +// see a tag map at the application layer with server method tag in the first +// case, and a tag map at the application layer with the populated tag map plus +// server method tag in second case. +func (s) TestOpenCensusTags(t *testing.T) { + // This stub servers functions represent the application layer server side. + // This is the intended feature being tested: that open census tags + // populated at the client side application layer end up at the server side + // application layer with the server method tag key in addition to the map + // populated at the client side application layer if populated. + tmCh := testutils.NewChannel() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + // Do the sends of the tag maps for assertions in this main testing + // goroutine. Do the receives and assertions in a forked goroutine. + if tm := tag.FromContext(ctx); tm != nil { + tmCh.Send(tm) + } else { + tmCh.Send(errors.New("no tag map received server side")) + } + return &grpc_testing.SimpleResponse{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{ServerOption(TraceOptions{})}, DialOption(TraceOptions{})); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + key1 := tag.MustNewKey("key 1") + wg := sync.WaitGroup{} + wg.Add(1) + readerErrCh := testutils.NewChannel() + // Spawn a goroutine to receive and validation two tag maps received by the + // server application code. + go func() { + defer wg.Done() + unaryCallMethodName := "grpc.testing.TestService/UnaryCall" + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Attempt to receive the tag map from the first RPC. + if tm, err := tmCh.Receive(ctx); err == nil { + tagMap, ok := tm.(*tag.Map) + // Shouldn't happen, this test sends only *tag.Map type on channel. + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", tm)) + } + // keyServerMethod should be present in this tag map received server + // side. + val, ok := tagMap.Value(keyServerMethod) + if !ok { + readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", keyServerMethod.Name())) + } + if val != unaryCallMethodName { + readerErrCh.Send(fmt.Errorf("serverMethod receieved: %v, want server method: %v", val, unaryCallMethodName)) + } + } else { + readerErrCh.Send(fmt.Errorf("error while waiting for a tag map: %v", err)) + } + readerErrCh.Send(nil) + + // Attempt to receive the tag map from the second RPC. + if tm, err := tmCh.Receive(ctx); err == nil { + tagMap, ok := tm.(*tag.Map) + // Shouldn't happen, this test sends only *tag.Map type on channel. + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", tm)) + } + // key1: "value1" populated in the tag map client side should make + // it's way to server. + val, ok := tagMap.Value(key1) + if !ok { + readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", key1.Name())) + } + if val != "value1" { + readerErrCh.Send(fmt.Errorf("key %v received: %v, want server method: %v", key1.Name(), val, unaryCallMethodName)) + } + // keyServerMethod should be appended to tag map as well. + val, ok = tagMap.Value(keyServerMethod) + if !ok { + readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", keyServerMethod.Name())) + } + if val != unaryCallMethodName { + readerErrCh.Send(fmt.Errorf("key: %v received: %v, want server method: %v", keyServerMethod.Name(), val, unaryCallMethodName)) + } + } else { + readerErrCh.Send(fmt.Errorf("error while waiting for second tag map: %v", err)) + } + readerErrCh.Send(nil) + }() + + // Make a unary RPC without populating an OpenCensus tag map. The server + // side should receive an OpenCensus tag map containing only the + // keyServerMethod. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Should receive a nil error from the readerErrCh, meaning the reader + // goroutine successfully received a tag map with the keyServerMethod + // populated. + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } + + tm := &tag.Map{} + ctx = tag.NewContext(ctx, tm) + ctx, err := tag.New(ctx, tag.Upsert(key1, "value1")) + // Setup steps like this can fatal, so easier to do the RPC's and subsequent + // sends of the tag maps of the RPC's in main goroutine and have the + // corresponding receives and assertions in a forked goroutine. + if err != nil { + t.Fatalf("Error creating tag map: %v", err) + } + // Make a unary RPC with a populated OpenCensus tag map. The server side + // should receive an OpenCensus tag map containing this populated tag map + // with the keyServerMethod tag appended to it. + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } + + wg.Wait() +} diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index 851683220f61..9f28368d940b 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -3,6 +3,7 @@ module google.golang.org/grpc/stats/opencensus go 1.17 require ( + github.com/google/go-cmp v0.5.9 go.opencensus.io v0.24.0 google.golang.org/grpc v1.52.0 ) diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go index ea9368ebbfee..fe5cdf08cc3f 100644 --- a/stats/opencensus/opencensus.go +++ b/stats/opencensus/opencensus.go @@ -103,10 +103,13 @@ func (csh *clientStatsHandler) HandleConn(context.Context, stats.ConnStats) {} // TagRPC implements per RPC attempt context management. func (csh *clientStatsHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = csh.statsTagRPC(ctx, rti) return ctx } -func (csh *clientStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {} +func (csh *clientStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + recordRPCData(ctx, rs) +} type serverStatsHandler struct { to TraceOptions @@ -122,8 +125,11 @@ func (ssh *serverStatsHandler) HandleConn(context.Context, stats.ConnStats) {} // TagRPC implements per RPC context management. func (ssh *serverStatsHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = ssh.statsTagRPC(ctx, rti) return ctx } // HandleRPC implements per RPC tracing and stats implementation. -func (ssh *serverStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {} +func (ssh *serverStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + recordRPCData(ctx, rs) +} diff --git a/stats/opencensus/server_metrics.go b/stats/opencensus/server_metrics.go new file mode 100644 index 000000000000..9268271c3694 --- /dev/null +++ b/stats/opencensus/server_metrics.go @@ -0,0 +1,115 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + keyServerMethod = tag.MustNewKey("grpc_server_method") + keyServerStatus = tag.MustNewKey("grpc_server_status") +) + +// Measures, which are recorded by server stats handler: Note that on gRPC's +// server side, the per rpc unit is truly per rpc, as there is no concept of a +// rpc attempt server side. +var ( + serverReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) // the collection/measurement point of this measure handles the /rpc aspect of it + serverReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) + serverSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + serverSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + serverStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "The total number of server RPCs ever opened, including those that have not completed.", stats.UnitDimensionless) + serverLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) +) + +var ( + // ServerSentMessagesPerRPCView is the distribution of sent messages per + // RPC, keyed on method. + ServerSentMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_messages_per_rpc", + Description: "Distribution of sent messages per RPC, by method.", + TagKeys: []tag.Key{keyServerMethod}, + Measure: serverSentMessagesPerRPC, + Aggregation: countDistribution, + } + // ServerReceivedMessagesPerRPCView is the distribution of received messages + // per RPC, keyed on method. + ServerReceivedMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/received_messages_per_rpc", + Description: "Distribution of received messages per RPC, by method.", + TagKeys: []tag.Key{keyServerMethod}, + Measure: serverReceivedMessagesPerRPC, + Aggregation: countDistribution, + } + // ServerSentBytesPerRPCView is the distribution of received bytes per RPC, + // keyed on method. + ServerSentBytesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_bytes_per_rpc", + Description: "Distribution of sent bytes per RPC, by method.", + Measure: serverSentBytesPerRPC, + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: bytesDistribution, + } + // ServerReceivedBytesPerRPCView is the distribution of sent bytes per RPC, + // keyed on method. + ServerReceivedBytesPerRPCView = &view.View{ + Name: "grpc.io/server/received_bytes_per_rpc", + Description: "Distribution of received bytes per RPC, by method.", + Measure: serverReceivedBytesPerRPC, + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: bytesDistribution, + } + // ServerStartedRPCsView is the count of opened RPCs, keyed on method. + ServerStartedRPCsView = &view.View{ + Measure: serverStartedRPCs, + Name: "grpc.io/server/started_rpcs", + Description: "Number of opened server RPCs, by method.", + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: view.Count(), + } + // ServerCompletedRPCsView is the count of completed RPCs, keyed on + // method and status. + ServerCompletedRPCsView = &view.View{ + Name: "grpc.io/server/completed_rpcs", + Description: "Number of completed RPCs by method and status.", + TagKeys: []tag.Key{keyServerMethod, keyServerStatus}, + Measure: serverLatency, + Aggregation: view.Count(), + } + // ServerLatencyView is the distribution of server latency in milliseconds + // per RPC, keyed on method. + ServerLatencyView = &view.View{ + Name: "grpc.io/server/server_latency", + Description: "Distribution of server latency in milliseconds, by method.", + TagKeys: []tag.Key{keyServerMethod}, + Measure: serverLatency, + Aggregation: millisecondsDistribution, + } +) + +// DefaultServerViews is the set of server views which are considered the +// minimum required to monitor server side performance. +var DefaultServerViews = []*view.View{ + ServerReceivedBytesPerRPCView, + ServerSentBytesPerRPCView, + ServerLatencyView, + ServerCompletedRPCsView, + ServerStartedRPCsView, +} diff --git a/stats/opencensus/stats.go b/stats/opencensus/stats.go new file mode 100644 index 000000000000..35a929393d4c --- /dev/null +++ b/stats/opencensus/stats.go @@ -0,0 +1,215 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "context" + "strings" + "sync/atomic" + "time" + + ocstats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +var logger = grpclog.Component("opencensus-instrumentation") + +var canonicalString = internal.CanonicalString.(func(codes.Code) string) + +type rpcDataKey struct{} + +func setRPCData(ctx context.Context, d *rpcData) context.Context { + return context.WithValue(ctx, rpcDataKey{}, d) +} + +var ( + // bounds separate variable for testing purposes. + bytesDistributionBounds = []float64{1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} + bytesDistribution = view.Distribution(bytesDistributionBounds...) + millisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + countDistributionBounds = []float64{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536} + countDistribution = view.Distribution(countDistributionBounds...) +) + +func removeLeadingSlash(mn string) string { + return strings.TrimLeft(mn, "/") +} + +// rpcData is data about the rpc attempt client side, and the overall rpc server +// side. +type rpcData struct { + // access these counts atomically for hedging in the future + // number of messages sent from side (client || server) + sentMsgs int64 + // number of bytes sent (within each message) from side (client || server) + sentBytes int64 + // number of messages received on side (client || server) + recvMsgs int64 + // number of bytes received (within each message) received on side (client + // || server) + recvBytes int64 + + startTime time.Time + method string +} + +// statsTagRPC creates a recording object to derive measurements from in the +// context, scoping the recordings to per RPC Attempt client side (scope of the +// context). It also populates the gRPC Metadata within the context with any +// opencensus specific tags set by the application in the context, binary +// encoded to send across the wire. +func (csh *clientStatsHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + d := &rpcData{ + startTime: time.Now(), + method: info.FullMethodName, + } + // Populate gRPC Metadata with OpenCensus tag map if set by application. + if tm := tag.FromContext(ctx); tm != nil { + ctx = stats.SetTags(ctx, tag.Encode(tm)) + } + return setRPCData(ctx, d) +} + +// statsTagRPC creates a recording object to derive measurements from in the +// context, scoping the recordings to per RPC server side (scope of the +// context). It also deserializes the opencensus tags set in the context's gRPC +// Metadata, and adds a server method tag to the opencensus tags. +func (ssh *serverStatsHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + d := &rpcData{ + startTime: time.Now(), + method: info.FullMethodName, + } + if tagsBin := stats.Tags(ctx); tagsBin != nil { + if tags, err := tag.Decode(tagsBin); err == nil { + ctx = tag.NewContext(ctx, tags) + } + } + // We can ignore the error here because in the error case, the context + // passed in is returned. If the call errors, the server side application + // layer won't get this key server method information in the tag map, but + // this instrumentation code will function as normal. + ctx, _ = tag.New(ctx, tag.Upsert(keyServerMethod, removeLeadingSlash(info.FullMethodName))) + return setRPCData(ctx, d) +} + +func recordRPCData(ctx context.Context, s stats.RPCStats) { + d, ok := ctx.Value(rpcDataKey{}).(*rpcData) + if !ok { + // Shouldn't happen, as gRPC calls TagRPC which populates the rpcData in + // context. + return + } + switch st := s.(type) { + case *stats.InHeader, *stats.OutHeader, *stats.InTrailer, *stats.OutTrailer: + // Headers and Trailers are not relevant to the measures, as the + // measures concern number of messages and bytes for messages. This + // aligns with flow control. + case *stats.Begin: + recordDataBegin(ctx, d, st) + case *stats.OutPayload: + recordDataOutPayload(d, st) + case *stats.InPayload: + recordDataInPayload(d, st) + case *stats.End: + recordDataEnd(ctx, d, st) + default: + // Shouldn't happen. gRPC calls into stats handler, and will never not + // be one of the types above. + logger.Errorf("Received unexpected stats type (%T) with data: %v", s, s) + } +} + +// recordDataBegin takes a measurement related to the RPC beginning, +// client/server started RPCs dependent on the caller. +func recordDataBegin(ctx context.Context, d *rpcData, b *stats.Begin) { + if b.Client { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(keyClientMethod, removeLeadingSlash(d.method))), + ocstats.WithMeasurements(clientStartedRPCs.M(1))) + return + } + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(keyServerMethod, removeLeadingSlash(d.method))), + ocstats.WithMeasurements(serverStartedRPCs.M(1))) +} + +// recordDataOutPayload records the length in bytes of outgoing messages and +// increases total count of sent messages both stored in the RPCs (attempt on +// client side) context for use in taking measurements at RPC end. +func recordDataOutPayload(d *rpcData, op *stats.OutPayload) { + atomic.AddInt64(&d.sentMsgs, 1) + atomic.AddInt64(&d.sentBytes, int64(op.Length)) +} + +// recordDataInPayload records the length in bytes of incoming messages and +// increases total count of sent messages both stored in the RPCs (attempt on +// client side) context for use in taking measurements at RPC end. +func recordDataInPayload(d *rpcData, ip *stats.InPayload) { + atomic.AddInt64(&d.recvMsgs, 1) + atomic.AddInt64(&d.recvBytes, int64(ip.Length)) +} + +// recordDataEnd takes per RPC measurements derived from information derived +// from the lifetime of the RPC (RPC attempt client side). +func recordDataEnd(ctx context.Context, d *rpcData, e *stats.End) { + // latency bounds for distribution data (speced millisecond bounds) have + // fractions, thus need a float. + latency := float64(time.Since(d.startTime)) / float64(time.Millisecond) + var st string + if e.Error != nil { + s, _ := status.FromError(e.Error) + st = canonicalString(s.Code()) + } else { + st = "OK" + } + + // TODO: Attach trace data through attachments?!?! + + if e.Client { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(keyClientMethod, removeLeadingSlash(d.method)), + tag.Upsert(keyClientStatus, st)), + ocstats.WithMeasurements( + clientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + clientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentMsgs)), + clientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvMsgs)), + clientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + clientRoundtripLatency.M(latency), + clientServerLatency.M(latency), + )) + return + } + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(keyServerMethod, removeLeadingSlash(d.method)), + tag.Upsert(keyServerStatus, st), + ), + ocstats.WithMeasurements( + serverSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + serverSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentMsgs)), + serverReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvMsgs)), + serverReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + serverLatency.M(latency))) +} From f855226105fb6ed3098d8d442d0b7a4d7f733f07 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Tue, 7 Feb 2023 14:24:07 -0800 Subject: [PATCH 769/998] github: update codeQL action to v2 (#6009) --- .github/workflows/codeql-analysis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2a73b94079c5..9ed65e45b91d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,9 +27,9 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@v2 with: languages: go - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + uses: github/codeql-action/analyze@v2 From e9d9bd0436ee42f962566bb3beaed2a3e9ed6361 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 8 Feb 2023 13:03:14 -0800 Subject: [PATCH 770/998] tests: reduce the degree of stress testing in long running tests (#6003) --- clientconn_test.go | 18 +++++++--- internal/transport/transport_test.go | 54 ++++++++++++---------------- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/clientconn_test.go b/clientconn_test.go index ee8372ad891b..700d98211d55 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -335,7 +335,7 @@ func (s) TestCloseConnectionWhenServerPrefaceNotReceived(t *testing.T) { func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { - t.Fatalf("Error while listening. Err: %v", err) + t.Fatalf("Unexpected error from net.Listen(%q, %q): %v", "tcp", "localhost:0", err) } defer lis.Close() done := make(chan struct{}) @@ -367,9 +367,19 @@ func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { prevAt = meow } }() - cc, err := Dial(lis.Addr().String(), WithTransportCredentials(insecure.NewCredentials())) + bc := backoff.Config{ + BaseDelay: 200 * time.Millisecond, + Multiplier: 1.1, + Jitter: 0, + MaxDelay: 120 * time.Second, + } + cp := ConnectParams{ + Backoff: bc, + MinConnectTimeout: 1 * time.Second, + } + cc, err := Dial(lis.Addr().String(), WithTransportCredentials(insecure.NewCredentials()), WithConnectParams(cp)) if err != nil { - t.Fatalf("Error while dialing. Err: %v", err) + t.Fatalf("Unexpected error from Dial(%v) = %v", lis.Addr(), err) } defer cc.Close() go stayConnected(cc) @@ -1192,7 +1202,7 @@ func keepReading(conn net.Conn) { // stayConnected makes cc stay connected by repeatedly calling cc.Connect() // until the state becomes Shutdown or until 10 seconds elapses. func stayConnected(cc *ClientConn) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() for { diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index e40a2ae6175f..0c5e00a75cce 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -659,16 +659,13 @@ func performOneRPC(ct ClientTransport) { func (s) TestClientMix(t *testing.T) { s, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() - go func(s *server) { - time.Sleep(5 * time.Second) - s.stop() - }(s) + time.AfterFunc(time.Second, s.stop) go func(ct ClientTransport) { <-ct.Error() ct.Close(fmt.Errorf("closed manually by test")) }(ct) - for i := 0; i < 1000; i++ { - time.Sleep(10 * time.Millisecond) + for i := 0; i < 750; i++ { + time.Sleep(2 * time.Millisecond) go performOneRPC(ct) } } @@ -2191,33 +2188,26 @@ func runPingPongTest(t *testing.T, msgSize int) { binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(msgSize)) opts := &Options{} incomingHeader := make([]byte, 5) - done := make(chan struct{}) - go func() { - timer := time.NewTimer(time.Second * 5) - <-timer.C - close(done) - }() - for { - select { - case <-done: - client.Write(stream, nil, nil, &Options{Last: true}) - if _, err := stream.Read(incomingHeader); err != io.EOF { - t.Fatalf("Client expected EOF from the server. Got: %v", err) - } - return - default: - if err := client.Write(stream, outgoingHeader, msg, opts); err != nil { - t.Fatalf("Error on client while writing message. Err: %v", err) - } - if _, err := stream.Read(incomingHeader); err != nil { - t.Fatalf("Error on client while reading data header. Err: %v", err) - } - sz := binary.BigEndian.Uint32(incomingHeader[1:]) - recvMsg := make([]byte, int(sz)) - if _, err := stream.Read(recvMsg); err != nil { - t.Fatalf("Error on client while reading data. Err: %v", err) - } + + ctx, cancel = context.WithTimeout(ctx, time.Second) + defer cancel() + for ctx.Err() == nil { + if err := client.Write(stream, outgoingHeader, msg, opts); err != nil { + t.Fatalf("Error on client while writing message. Err: %v", err) } + if _, err := stream.Read(incomingHeader); err != nil { + t.Fatalf("Error on client while reading data header. Err: %v", err) + } + sz := binary.BigEndian.Uint32(incomingHeader[1:]) + recvMsg := make([]byte, int(sz)) + if _, err := stream.Read(recvMsg); err != nil { + t.Fatalf("Error on client while reading data. Err: %v", err) + } + } + + client.Write(stream, nil, nil, &Options{Last: true}) + if _, err := stream.Read(incomingHeader); err != io.EOF { + t.Fatalf("Client expected EOF from the server. Got: %v", err) } } From b81e8b62c95ec79a5dbf588ba7193cc1ed5fef8a Mon Sep 17 00:00:00 2001 From: horpto <__Singleton__@hackerdom.ru> Date: Thu, 9 Feb 2023 00:27:02 +0300 Subject: [PATCH 771/998] metadata: slightly improve operateHeaders (#6008) --- internal/transport/http2_server.go | 4 ++-- metadata/metadata.go | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 7dee882bf663..8629b7ecc6d0 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -383,7 +383,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // if false, content-type was missing or invalid isGRPC = false contentType = "" - mdata = make(map[string][]string) + mdata = make(metadata.MD, len(frame.Fields)) httpMethod string // these are set if an error is encountered while parsing the headers protocolError bool @@ -606,7 +606,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(mdata).Copy(), + Header: mdata.Copy(), } sh.HandleRPC(s.ctx, inHeader) } diff --git a/metadata/metadata.go b/metadata/metadata.go index fb4a88f59bd3..77fc055935a9 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -91,7 +91,11 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - return Join(md) + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out } // Get obtains the values for a given key. From d655f404da25f4fe983b1f17538b52b1472d1f02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=A7=E5=8F=AF?= Date: Thu, 9 Feb 2023 05:36:09 +0800 Subject: [PATCH 772/998] internal/transport: fix severity of log when receiving a GOAWAY with error code ENHANCE_YOUR_CALM (#5935) --- clientconn_test.go | 3 +++ internal/transport/http2_client.go | 10 ++++++---- internal/transport/keepalive_test.go | 7 +++++++ test/goaway_test.go | 2 ++ 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/clientconn_test.go b/clientconn_test.go index 700d98211d55..9004f3177fdd 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/credentials/insecure" internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -691,6 +692,8 @@ func (s) TestResolverEmptyUpdateNotPanic(t *testing.T) { } func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") + lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen. Err: %v", err) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 79ee8aea0a21..b0964d7ddfb1 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -1264,10 +1264,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - if logger.V(logLevel) { - logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") } id := f.LastStreamID if id > 0 && id%2 == 0 { diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index 41395216fe43..6ef0764cc6ad 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/keepalive" ) @@ -397,6 +398,8 @@ func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { // explicitly makes sure the fix works and the client sends a ping every [Time] // period. func (s) TestKeepaliveClientFrequency(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") + serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ MinTime: 1200 * time.Millisecond, // 1.2 seconds @@ -443,6 +446,8 @@ func (s) TestKeepaliveClientFrequency(t *testing.T) { // (when there are no active streams), based on the configured // EnforcementPolicy. func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") + serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ MinTime: 2 * time.Second, @@ -488,6 +493,8 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { // (even when there is an active stream), based on the configured // EnforcementPolicy. func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") + serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ MinTime: 2 * time.Second, diff --git a/test/goaway_test.go b/test/goaway_test.go index 48b7f0f7c7ac..9971487051f4 100644 --- a/test/goaway_test.go +++ b/test/goaway_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/keepalive" @@ -120,6 +121,7 @@ func (s) TestDetailedGoAwayErrorOnGracefulClosePropagatesToRPCError(t *testing.T } func (s) TestDetailedGoAwayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") // set the min keepalive time very low so that this test can take // a reasonable amount of time prev := internal.KeepaliveMinPingTime From ceb3f07190ea6f353ef8e889e4d65ceb8616b37d Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 8 Feb 2023 17:02:17 -0500 Subject: [PATCH 773/998] client: Revert dialWithGlobalOption (#6012) --- clientconn.go | 10 ++-------- default_dial_option_server_option_test.go | 19 ------------------- dialoptions.go | 1 - internal/internal.go | 3 --- 4 files changed, 2 insertions(+), 31 deletions(-) diff --git a/clientconn.go b/clientconn.go index e3919895e951..e6bc3e4ede68 100644 --- a/clientconn.go +++ b/clientconn.go @@ -133,10 +133,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // https://github.com/grpc/grpc/blob/master/doc/naming.md. // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - return dialWithGlobalOptions(ctx, target, false, opts...) -} - -func dialWithGlobalOptions(ctx context.Context, target string, disableGlobalOptions bool, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, csMgr: &connectivityStateManager{}, @@ -150,10 +146,8 @@ func dialWithGlobalOptions(ctx context.Context, target string, disableGlobalOpti cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - if !disableGlobalOptions { - for _, opt := range globalDialOptions { - opt.apply(&cc.dopts) - } + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) } for _, opt := range opts { diff --git a/default_dial_option_server_option_test.go b/default_dial_option_server_option_test.go index b1501d2fb55d..094fea31f7f2 100644 --- a/default_dial_option_server_option_test.go +++ b/default_dial_option_server_option_test.go @@ -19,7 +19,6 @@ package grpc import ( - "context" "strings" "testing" @@ -59,24 +58,6 @@ func (s) TestAddGlobalDialOptions(t *testing.T) { } } -// TestDisableGlobalOptions tests dialing with a bit that disables global -// options. Dialing with this bit set should not pick up global options. -func (s) TestDisableGlobalOptions(t *testing.T) { - // Set transport credentials as a global option. - internal.AddGlobalDialOptions.(func(opt ...DialOption))(WithTransportCredentials(insecure.NewCredentials())) - // Dial with disable global options set to true. This Dial should fail due - // to the global dial options with credentials not being picked up due to it - // being disabled. - if _, err := internal.DialWithGlobalOptions.(func(context.Context, string, bool, ...DialOption) (*ClientConn, error))(context.Background(), "fake", true); err == nil { - t.Fatalf("Dialing without a credential did not fail") - } else { - if !strings.Contains(err.Error(), "no transport security set") { - t.Fatalf("Dialing failed with unexpected error: %v", err) - } - } - internal.ClearGlobalDialOptions() -} - func (s) TestAddGlobalServerOptions(t *testing.T) { const maxRecvSize = 998765 // Set and check the ServerOptions diff --git a/dialoptions.go b/dialoptions.go index 67f2404961fd..1fe8412e186a 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -43,7 +43,6 @@ func init() { internal.ClearGlobalDialOptions = func() { globalDialOptions = nil } - internal.DialWithGlobalOptions = dialWithGlobalOptions internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption } diff --git a/internal/internal.go b/internal/internal.go index dc9fcd1101be..c3cbaa93df25 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -29,9 +29,6 @@ import ( ) var ( - // DialWithGlobalOptions dials with a knob on whether to disable global dial - // options (set via AddGlobalDialOptions). - DialWithGlobalOptions interface{} // func (context.Context, string, bool, ...DialOption) (*ClientConn, error) // WithHealthCheckFunc is set by dialoptions.go WithHealthCheckFunc interface{} // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking From 55dfae6e5becc928314500087c01690708deef72 Mon Sep 17 00:00:00 2001 From: Fabian Holler Date: Wed, 8 Feb 2023 23:04:05 +0100 Subject: [PATCH 774/998] resolver: document handling UpdateState errors by resolvers (#6002) Extend the Godoc for resolver.ClientConn.UpdateState with a description of how resolvers should handle returned errors. The description is based on the explanation of dfawley in https://github.com/grpc/grpc-go/issues/5048 --- resolver/resolver.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/resolver/resolver.go b/resolver/resolver.go index 654e9ce69f4a..eb6a4690930c 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -203,6 +203,15 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling From 81534105ca42a17b7aaa1f082709216c19bdaa76 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 13 Feb 2023 21:13:32 -0500 Subject: [PATCH 775/998] client: Add dial option to disable global dial options (#6016) --- clientconn.go | 14 ++++++++++++-- default_dial_option_server_option_test.go | 16 ++++++++++++++++ dialoptions.go | 11 +++++++++++ internal/internal.go | 4 ++++ 4 files changed, 43 insertions(+), 2 deletions(-) diff --git a/clientconn.go b/clientconn.go index e6bc3e4ede68..a2afb4668e8d 100644 --- a/clientconn.go +++ b/clientconn.go @@ -146,8 +146,18 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - for _, opt := range globalDialOptions { - opt.apply(&cc.dopts) + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } } for _, opt := range opts { diff --git a/default_dial_option_server_option_test.go b/default_dial_option_server_option_test.go index 094fea31f7f2..4af5c0b8772c 100644 --- a/default_dial_option_server_option_test.go +++ b/default_dial_option_server_option_test.go @@ -19,6 +19,7 @@ package grpc import ( + "fmt" "strings" "testing" @@ -58,6 +59,21 @@ func (s) TestAddGlobalDialOptions(t *testing.T) { } } +// TestDisableGlobalOptions tests dialing with the disableGlobalDialOptions dial +// option. Dialing with this set should not pick up global options. +func (s) TestDisableGlobalOptions(t *testing.T) { + // Set transport credentials as a global option. + internal.AddGlobalDialOptions.(func(opt ...DialOption))(WithTransportCredentials(insecure.NewCredentials())) + // Dial with the disable global options dial option. This dial should fail + // due to the global dial options with credentials not being picked up due + // to global options being disabled. + noTSecStr := "no transport security set" + if _, err := Dial("fake", internal.DisableGlobalDialOptions.(func() DialOption)()); !strings.Contains(fmt.Sprint(err), noTSecStr) { + t.Fatalf("Dialing received unexpected error: %v, want error containing \"%v\"", err, noTSecStr) + } + internal.ClearGlobalDialOptions() +} + func (s) TestAddGlobalServerOptions(t *testing.T) { const maxRecvSize = 998765 // Set and check the ServerOptions diff --git a/dialoptions.go b/dialoptions.go index 1fe8412e186a..e9d6852fd2c1 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -45,6 +45,7 @@ func init() { } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -96,6 +97,16 @@ type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { diff --git a/internal/internal.go b/internal/internal.go index c3cbaa93df25..cd68fb3bb929 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -77,6 +77,10 @@ var ( // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. AddGlobalDialOptions interface{} // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + DisableGlobalDialOptions interface{} // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. ClearGlobalDialOptions func() From f4feddb3752317e4707064a5fd4fad14e00208f2 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Tue, 14 Feb 2023 10:13:53 -0800 Subject: [PATCH 776/998] github: update tests to use go version 1.20 (#6020) --- .github/workflows/testing.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index cf831894ec63..f7f0fbec6e45 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -26,7 +26,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: 1.19 + go-version: '1.20' - name: Checkout repo uses: actions/checkout@v2 @@ -46,31 +46,31 @@ jobs: matrix: include: - type: vet - goversion: 1.19 + goversion: '1.20' - type: tests - goversion: 1.19 + goversion: '1.20' - type: tests - goversion: 1.19 + goversion: '1.20' testflags: -race - type: tests - goversion: 1.19 + goversion: '1.20' goarch: 386 - type: tests - goversion: 1.19 + goversion: '1.20' goarch: arm64 - type: tests - goversion: 1.18 + goversion: '1.19' - type: tests - goversion: 1.17 + goversion: '1.18' - type: extras - goversion: 1.19 + goversion: '1.20' steps: # Setup the environment. From dd12def8213d02d4ead479ff1635363dc1c6b137 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 14 Feb 2023 16:27:28 -0500 Subject: [PATCH 777/998] stats/opencensus: Add OpenCensus traces support (#5978) * Add opencensus traces support --- stats/opencensus/e2e_test.go | 372 ++++++++++++++++++++++++++++++++- stats/opencensus/opencensus.go | 62 +++++- stats/opencensus/stats.go | 84 ++++---- stats/opencensus/trace.go | 119 +++++++++++ 4 files changed, 585 insertions(+), 52 deletions(-) create mode 100644 stats/opencensus/trace.go diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index d302ec94d3d4..501f13b586ef 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -30,6 +30,7 @@ import ( "github.com/google/go-cmp/cmp" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "go.opencensus.io/trace" "google.golang.org/grpc" "google.golang.org/grpc/internal/grpctest" @@ -61,6 +62,7 @@ type fakeExporter struct { mu sync.RWMutex seenViews map[string]*viewInformation + seenSpans []spanInformation } // viewInformation is information Exported from the view package through @@ -867,7 +869,7 @@ func (s) TestOpenCensusTags(t *testing.T) { readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", tm)) } // key1: "value1" populated in the tag map client side should make - // it's way to server. + // its way to server. val, ok := tagMap.Value(key1) if !ok { readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", key1.Name())) @@ -935,6 +937,372 @@ func (s) TestOpenCensusTags(t *testing.T) { t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) } } - wg.Wait() } + +// compareSpanContext only checks the equality of the trace options, which +// represent whether the span should be sampled. The other fields are checked +// for presence in later assertions. +func compareSpanContext(sc trace.SpanContext, sc2 trace.SpanContext) bool { + return sc.TraceOptions.IsSampled() == sc2.TraceOptions.IsSampled() +} + +func compareMessageEvents(me []trace.MessageEvent, me2 []trace.MessageEvent) bool { + if len(me) != len(me2) { + return false + } + // Order matters here, message events are deterministic so no flakiness to + // test. + for i, e := range me { + e2 := me2[i] + if e.EventType != e2.EventType { + return false + } + if e.MessageID != e2.MessageID { + return false + } + if e.UncompressedByteSize != e2.UncompressedByteSize { + return false + } + if e.CompressedByteSize != e2.CompressedByteSize { + return false + } + } + return true +} + +// compareLinks compares the type of link received compared to the wanted link. +func compareLinks(ls []trace.Link, ls2 []trace.Link) bool { + if len(ls) != len(ls2) { + return false + } + for i, l := range ls { + l2 := ls2[i] + if l.Type != l2.Type { + return false + } + } + return true +} + +// spanInformation is the information received about the span. This is a subset +// of information that is important to verify that gRPC has knobs over, which +// goes through a stable OpenCensus API with well defined behavior. This keeps +// the robustness of assertions over time. +type spanInformation struct { + // SpanContext either gets pulled off the wire in certain cases server side + // or created. + sc trace.SpanContext + parentSpanID trace.SpanID + spanKind int + name string + message string + messageEvents []trace.MessageEvent + status trace.Status + links []trace.Link + hasRemoteParent bool + childSpanCount int +} + +// validateTraceAndSpanIDs checks for consistent trace ID across the full trace. +// It also asserts each span has a corresponding generated SpanID, and makes +// sure in the case of a server span and a client span, the server span points +// to the client span as its parent. This is assumed to be called with spans +// from the same RPC (thus the same trace). If called with spanInformation slice +// of length 2, it assumes first span is a server span which points to second +// span as parent and second span is a client span. These assertions are +// orthogonal to pure equality assertions, as this data is generated at runtime, +// so can only test relations between IDs (i.e. this part of the data has the +// same ID as this part of the data). +// +// Returns an error in the case of a failing assertion, non nil error otherwise. +func validateTraceAndSpanIDs(sis []spanInformation) error { + var traceID trace.TraceID + for i, si := range sis { + // Trace IDs should all be consistent across every span, since this + // function assumes called with Span from one RPC, which all fall under + // one trace. + if i == 0 { + traceID = si.sc.TraceID + } else { + if !cmp.Equal(si.sc.TraceID, traceID) { + return fmt.Errorf("TraceIDs should all be consistent: %v, %v", si.sc.TraceID, traceID) + } + } + // Due to the span IDs being 8 bytes, the documentation states that it + // is practically a mathematical uncertainty in practice to create two + // colliding IDs. Thus, for a presence check (the ID was actually + // generated, I will simply compare to the zero value, even though a + // zero value is a theoretical possibility of generation). This is + // because in practice, this zero value defined by this test will never + // collide with the generated ID. + if cmp.Equal(si.sc.SpanID, trace.SpanID{}) { + return errors.New("span IDs should be populated from the creation of the span") + } + } + // If the length of spans of an RPC is 2, it means there is a server span + // which exports first and a client span which exports second. Thus, the + // server span should point to the client span as its parent, represented + // by its ID. + if len(sis) == 2 { + if !cmp.Equal(sis[0].parentSpanID, sis[1].sc.SpanID) { + return fmt.Errorf("server span should point to the client span as its parent. parentSpanID: %v, clientSpanID: %v", sis[0].parentSpanID, sis[1].sc.SpanID) + } + } + return nil +} + +// Equal compares the constant data of the exported span information that is +// important for correctness known before runtime. +func (si spanInformation) Equal(si2 spanInformation) bool { + if !compareSpanContext(si.sc, si2.sc) { + return false + } + + if si.spanKind != si2.spanKind { + return false + } + if si.name != si2.name { + return false + } + if si.message != si2.message { + return false + } + // Ignore attribute comparison because Java doesn't even populate any so not + // important for correctness. + if !compareMessageEvents(si.messageEvents, si2.messageEvents) { + return false + } + if !cmp.Equal(si.status, si2.status) { + return false + } + // compare link type as link type child is important. + if !compareLinks(si.links, si2.links) { + return false + } + if si.hasRemoteParent != si2.hasRemoteParent { + return false + } + return si.childSpanCount == si2.childSpanCount +} + +func (fe *fakeExporter) ExportSpan(sd *trace.SpanData) { + fe.mu.Lock() + defer fe.mu.Unlock() + + // Persist the subset of data received that is important for correctness and + // to make various assertions on later. Keep the ordering as ordering of + // spans is deterministic in the context of one RPC. + gotSI := spanInformation{ + sc: sd.SpanContext, + parentSpanID: sd.ParentSpanID, + spanKind: sd.SpanKind, + name: sd.Name, + message: sd.Message, + // annotations - ignore + // attributes - ignore, I just left them in from previous but no spec + // for correctness so no need to test. Java doesn't even have any + // attributes. + messageEvents: sd.MessageEvents, + status: sd.Status, + links: sd.Links, + hasRemoteParent: sd.HasRemoteParent, + childSpanCount: sd.ChildSpanCount, + } + fe.seenSpans = append(fe.seenSpans, gotSI) +} + +// TestSpan tests emitted spans from gRPC. It configures a system with a gRPC +// Client and gRPC server with the OpenCensus Dial and Server Option configured, +// and makes a Unary RPC and a Streaming RPC. This should cause spans with +// certain information to be emitted from client and server side for each RPC. +func (s) TestSpan(t *testing.T) { + fe := &fakeExporter{ + t: t, + } + trace.RegisterExporter(fe) + defer trace.UnregisterExporter(fe) + + so := TraceOptions{ + TS: trace.ProbabilitySampler(1), + DisableTrace: false, + } + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + } + }, + } + if err := ss.Start([]grpc.ServerOption{ServerOption(so)}, DialOption(so)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Make a Unary RPC. This should cause a span with message events + // corresponding to the request message and response message to be emitted + // both from the client and the server. Note that RPCs trigger exports of + // corresponding span data synchronously, thus the Span Data is guaranteed + // to have been read by exporter and is ready to make assertions on. + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + + // The spans received are server first, then client. This is due to the RPC + // finishing on the server first. The ordering of message events for a Unary + // Call is as follows: (client send, server recv), (server send (server span + // end), client recv (client span end)). + wantSI := []spanInformation{ + { + // Sampling rate of 100 percent, so this should populate every span + // with the information that this span is being sampled. Here and + // every other span emitted in this test. + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindServer, + name: "grpc.testing.TestService.UnaryCall", + // message id - "must be calculated as two different counters + // starting from 1 one for sent messages and one for received + // message. This way we guarantee that the values will be consistent + // between different implementations. In case of unary calls only + // one sent and one received message will be recorded for both + // client and server spans." + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeRecv, + MessageID: 1, // First msg recv so 1 (see comment above) + UncompressedByteSize: 2, + CompressedByteSize: 7, + }, + { + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 (see comment above) + CompressedByteSize: 5, + }, + }, + links: []trace.Link{ + { + Type: trace.LinkTypeChild, + }, + }, + // For some reason, status isn't populated in the data sent to the + // exporter. This seems wrong, but it didn't send status in old + // instrumentation code, so I'm iffy on it but fine. + hasRemoteParent: true, + }, + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindClient, + name: "grpc.testing.TestService.UnaryCall", + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 (see comment above) + UncompressedByteSize: 2, + CompressedByteSize: 7, + }, + { + EventType: trace.MessageEventTypeRecv, + MessageID: 1, // First msg recv so 1 (see comment above) + CompressedByteSize: 5, + }, + }, + hasRemoteParent: false, + }, + } + if diff := cmp.Diff(fe.seenSpans, wantSI); diff != "" { + t.Fatalf("got unexpected spans, diff (-got, +want): %v", diff) + } + fe.mu.Lock() + if err := validateTraceAndSpanIDs(fe.seenSpans); err != nil { + fe.mu.Unlock() + t.Fatalf("Error in runtime data assertions: %v", err) + } + fe.seenSpans = nil + fe.mu.Unlock() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %v", err) + } + // Send two messages. This should be recorded in the emitted spans message + // events, with message IDs which increase for each message. + if err := stream.Send(&grpc_testing.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("stream.Send failed: %v", err) + } + if err := stream.Send(&grpc_testing.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("stream.Send failed: %v", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + wantSI = []spanInformation{ + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindServer, + name: "grpc.testing.TestService.FullDuplexCall", + links: []trace.Link{ + { + Type: trace.LinkTypeChild, + }, + }, + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeRecv, + MessageID: 1, // First msg recv so 1 + CompressedByteSize: 5, + }, + { + EventType: trace.MessageEventTypeRecv, + MessageID: 2, // Second msg recv so 2 + CompressedByteSize: 5, + }, + }, + hasRemoteParent: true, + }, + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindClient, + name: "grpc.testing.TestService.FullDuplexCall", + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 + CompressedByteSize: 5, + }, + { + EventType: trace.MessageEventTypeSent, + MessageID: 2, // Second msg send so 2 + CompressedByteSize: 5, + }, + }, + hasRemoteParent: false, + }, + } + if diff := cmp.Diff(fe.seenSpans, wantSI); diff != "" { + t.Fatalf("got unexpected spans, diff (-got, +want): %v", diff) + } + fe.mu.Lock() + defer fe.mu.Unlock() + if err := validateTraceAndSpanIDs(fe.seenSpans); err != nil { + t.Fatalf("Error in runtime data assertions: %v", err) + } +} diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go index fe5cdf08cc3f..64369deae863 100644 --- a/stats/opencensus/opencensus.go +++ b/stats/opencensus/opencensus.go @@ -89,6 +89,24 @@ func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.Clie return streamer(ctx, desc, cc, method, opts...) } +type rpcInfo struct { + mi *metricsInfo + ti *traceInfo +} + +type rpcInfoKey struct{} + +func setRPCInfo(ctx context.Context, ri *rpcInfo) context.Context { + return context.WithValue(ctx, rpcInfoKey{}, ri) +} + +// getSpanWithMsgCount returns the rpcInfo stored in the context, or nil +// if there isn't one. +func getRPCInfo(ctx context.Context) *rpcInfo { + ri, _ := ctx.Value(rpcInfoKey{}).(*rpcInfo) + return ri +} + type clientStatsHandler struct { to TraceOptions } @@ -103,12 +121,28 @@ func (csh *clientStatsHandler) HandleConn(context.Context, stats.ConnStats) {} // TagRPC implements per RPC attempt context management. func (csh *clientStatsHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = csh.statsTagRPC(ctx, rti) - return ctx + ctx, mi := csh.statsTagRPC(ctx, rti) + var ti *traceInfo + if !csh.to.DisableTrace { + ctx, ti = csh.traceTagRPC(ctx, rti) + } + ri := &rpcInfo{ + mi: mi, + ti: ti, + } + return setRPCInfo(ctx, ri) } func (csh *clientStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - recordRPCData(ctx, rs) + ri := getRPCInfo(ctx) + if ri == nil { + // Shouldn't happen because TagRPC populates this information. + return + } + recordRPCData(ctx, rs, ri.mi) + if !csh.to.DisableTrace { + populateSpan(ctx, rs, ri.ti) + } } type serverStatsHandler struct { @@ -125,11 +159,27 @@ func (ssh *serverStatsHandler) HandleConn(context.Context, stats.ConnStats) {} // TagRPC implements per RPC context management. func (ssh *serverStatsHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = ssh.statsTagRPC(ctx, rti) - return ctx + ctx, mi := ssh.statsTagRPC(ctx, rti) + var ti *traceInfo + if !ssh.to.DisableTrace { + ctx, ti = ssh.traceTagRPC(ctx, rti) + } + ri := &rpcInfo{ + mi: mi, + ti: ti, + } + return setRPCInfo(ctx, ri) } // HandleRPC implements per RPC tracing and stats implementation. func (ssh *serverStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - recordRPCData(ctx, rs) + ri := getRPCInfo(ctx) + if ri == nil { + // Shouldn't happen because TagRPC populates this information. + return + } + recordRPCData(ctx, rs, ri.mi) + if !ssh.to.DisableTrace { + populateSpan(ctx, rs, ri.ti) + } } diff --git a/stats/opencensus/stats.go b/stats/opencensus/stats.go index 35a929393d4c..163b2bba161b 100644 --- a/stats/opencensus/stats.go +++ b/stats/opencensus/stats.go @@ -37,12 +37,6 @@ var logger = grpclog.Component("opencensus-instrumentation") var canonicalString = internal.CanonicalString.(func(codes.Code) string) -type rpcDataKey struct{} - -func setRPCData(ctx context.Context, d *rpcData) context.Context { - return context.WithValue(ctx, rpcDataKey{}, d) -} - var ( // bounds separate variable for testing purposes. bytesDistributionBounds = []float64{1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} @@ -56,9 +50,9 @@ func removeLeadingSlash(mn string) string { return strings.TrimLeft(mn, "/") } -// rpcData is data about the rpc attempt client side, and the overall rpc server -// side. -type rpcData struct { +// metricsInfo is data used for recording metrics about the rpc attempt client +// side, and the overall rpc server side. +type metricsInfo struct { // access these counts atomically for hedging in the future // number of messages sent from side (client || server) sentMsgs int64 @@ -79,27 +73,29 @@ type rpcData struct { // context). It also populates the gRPC Metadata within the context with any // opencensus specific tags set by the application in the context, binary // encoded to send across the wire. -func (csh *clientStatsHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - d := &rpcData{ +func (csh *clientStatsHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) (context.Context, *metricsInfo) { + mi := &metricsInfo{ startTime: time.Now(), method: info.FullMethodName, } + // Populate gRPC Metadata with OpenCensus tag map if set by application. if tm := tag.FromContext(ctx); tm != nil { ctx = stats.SetTags(ctx, tag.Encode(tm)) } - return setRPCData(ctx, d) + return ctx, mi } // statsTagRPC creates a recording object to derive measurements from in the // context, scoping the recordings to per RPC server side (scope of the // context). It also deserializes the opencensus tags set in the context's gRPC // Metadata, and adds a server method tag to the opencensus tags. -func (ssh *serverStatsHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - d := &rpcData{ +func (ssh *serverStatsHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) (context.Context, *metricsInfo) { + mi := &metricsInfo{ startTime: time.Now(), method: info.FullMethodName, } + if tagsBin := stats.Tags(ctx); tagsBin != nil { if tags, err := tag.Decode(tagsBin); err == nil { ctx = tag.NewContext(ctx, tags) @@ -110,14 +106,14 @@ func (ssh *serverStatsHandler) statsTagRPC(ctx context.Context, info *stats.RPCT // layer won't get this key server method information in the tag map, but // this instrumentation code will function as normal. ctx, _ = tag.New(ctx, tag.Upsert(keyServerMethod, removeLeadingSlash(info.FullMethodName))) - return setRPCData(ctx, d) + return ctx, mi } -func recordRPCData(ctx context.Context, s stats.RPCStats) { - d, ok := ctx.Value(rpcDataKey{}).(*rpcData) - if !ok { - // Shouldn't happen, as gRPC calls TagRPC which populates the rpcData in +func recordRPCData(ctx context.Context, s stats.RPCStats, mi *metricsInfo) { + if mi == nil { + // Shouldn't happen, as gRPC calls TagRPC which populates the metricsInfo in // context. + logger.Error("ctx passed into stats handler metrics event handling has no metrics data present") return } switch st := s.(type) { @@ -126,13 +122,13 @@ func recordRPCData(ctx context.Context, s stats.RPCStats) { // measures concern number of messages and bytes for messages. This // aligns with flow control. case *stats.Begin: - recordDataBegin(ctx, d, st) + recordDataBegin(ctx, mi, st) case *stats.OutPayload: - recordDataOutPayload(d, st) + recordDataOutPayload(mi, st) case *stats.InPayload: - recordDataInPayload(d, st) + recordDataInPayload(mi, st) case *stats.End: - recordDataEnd(ctx, d, st) + recordDataEnd(ctx, mi, st) default: // Shouldn't happen. gRPC calls into stats handler, and will never not // be one of the types above. @@ -142,40 +138,40 @@ func recordRPCData(ctx context.Context, s stats.RPCStats) { // recordDataBegin takes a measurement related to the RPC beginning, // client/server started RPCs dependent on the caller. -func recordDataBegin(ctx context.Context, d *rpcData, b *stats.Begin) { +func recordDataBegin(ctx context.Context, mi *metricsInfo, b *stats.Begin) { if b.Client { ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(keyClientMethod, removeLeadingSlash(d.method))), + ocstats.WithTags(tag.Upsert(keyClientMethod, removeLeadingSlash(mi.method))), ocstats.WithMeasurements(clientStartedRPCs.M(1))) return } ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(keyServerMethod, removeLeadingSlash(d.method))), + ocstats.WithTags(tag.Upsert(keyServerMethod, removeLeadingSlash(mi.method))), ocstats.WithMeasurements(serverStartedRPCs.M(1))) } // recordDataOutPayload records the length in bytes of outgoing messages and // increases total count of sent messages both stored in the RPCs (attempt on // client side) context for use in taking measurements at RPC end. -func recordDataOutPayload(d *rpcData, op *stats.OutPayload) { - atomic.AddInt64(&d.sentMsgs, 1) - atomic.AddInt64(&d.sentBytes, int64(op.Length)) +func recordDataOutPayload(mi *metricsInfo, op *stats.OutPayload) { + atomic.AddInt64(&mi.sentMsgs, 1) + atomic.AddInt64(&mi.sentBytes, int64(op.Length)) } // recordDataInPayload records the length in bytes of incoming messages and // increases total count of sent messages both stored in the RPCs (attempt on // client side) context for use in taking measurements at RPC end. -func recordDataInPayload(d *rpcData, ip *stats.InPayload) { - atomic.AddInt64(&d.recvMsgs, 1) - atomic.AddInt64(&d.recvBytes, int64(ip.Length)) +func recordDataInPayload(mi *metricsInfo, ip *stats.InPayload) { + atomic.AddInt64(&mi.recvMsgs, 1) + atomic.AddInt64(&mi.recvBytes, int64(ip.Length)) } // recordDataEnd takes per RPC measurements derived from information derived // from the lifetime of the RPC (RPC attempt client side). -func recordDataEnd(ctx context.Context, d *rpcData, e *stats.End) { +func recordDataEnd(ctx context.Context, mi *metricsInfo, e *stats.End) { // latency bounds for distribution data (speced millisecond bounds) have // fractions, thus need a float. - latency := float64(time.Since(d.startTime)) / float64(time.Millisecond) + latency := float64(time.Since(mi.startTime)) / float64(time.Millisecond) var st string if e.Error != nil { s, _ := status.FromError(e.Error) @@ -189,13 +185,13 @@ func recordDataEnd(ctx context.Context, d *rpcData, e *stats.End) { if e.Client { ocstats.RecordWithOptions(ctx, ocstats.WithTags( - tag.Upsert(keyClientMethod, removeLeadingSlash(d.method)), + tag.Upsert(keyClientMethod, removeLeadingSlash(mi.method)), tag.Upsert(keyClientStatus, st)), ocstats.WithMeasurements( - clientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - clientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentMsgs)), - clientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvMsgs)), - clientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + clientSentBytesPerRPC.M(atomic.LoadInt64(&mi.sentBytes)), + clientSentMessagesPerRPC.M(atomic.LoadInt64(&mi.sentMsgs)), + clientReceivedMessagesPerRPC.M(atomic.LoadInt64(&mi.recvMsgs)), + clientReceivedBytesPerRPC.M(atomic.LoadInt64(&mi.recvBytes)), clientRoundtripLatency.M(latency), clientServerLatency.M(latency), )) @@ -203,13 +199,13 @@ func recordDataEnd(ctx context.Context, d *rpcData, e *stats.End) { } ocstats.RecordWithOptions(ctx, ocstats.WithTags( - tag.Upsert(keyServerMethod, removeLeadingSlash(d.method)), + tag.Upsert(keyServerMethod, removeLeadingSlash(mi.method)), tag.Upsert(keyServerStatus, st), ), ocstats.WithMeasurements( - serverSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - serverSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentMsgs)), - serverReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvMsgs)), - serverReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + serverSentBytesPerRPC.M(atomic.LoadInt64(&mi.sentBytes)), + serverSentMessagesPerRPC.M(atomic.LoadInt64(&mi.sentMsgs)), + serverReceivedMessagesPerRPC.M(atomic.LoadInt64(&mi.recvMsgs)), + serverReceivedBytesPerRPC.M(atomic.LoadInt64(&mi.recvBytes)), serverLatency.M(latency))) } diff --git a/stats/opencensus/trace.go b/stats/opencensus/trace.go new file mode 100644 index 000000000000..c2ad518545b8 --- /dev/null +++ b/stats/opencensus/trace.go @@ -0,0 +1,119 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "context" + "strings" + "sync/atomic" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" + + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// traceInfo is data used for recording traces. +type traceInfo struct { + span *trace.Span + countSentMsg uint32 + countRecvMsg uint32 +} + +// traceTagRPC populates context with a new span, and serializes information +// about this span into gRPC Metadata. +func (csh *clientStatsHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) (context.Context, *traceInfo) { + // TODO: get consensus on whether this method name of "s.m" is correct. + mn := strings.Replace(removeLeadingSlash(rti.FullMethodName), "/", ".", -1) + _, span := trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS), trace.WithSpanKind(trace.SpanKindClient)) + + tcBin := propagation.Binary(span.SpanContext()) + return stats.SetTrace(ctx, tcBin), &traceInfo{ + span: span, + countSentMsg: 0, // msg events scoped to scope of context, per attempt client side + countRecvMsg: 0, + } +} + +// traceTagRPC populates context with new span data, with a parent based on the +// spanContext deserialized from context passed in (wire data in gRPC metadata) +// if present. +func (ssh *serverStatsHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) (context.Context, *traceInfo) { + mn := strings.Replace(removeLeadingSlash(rti.FullMethodName), "/", ".", -1) + + var span *trace.Span + if sc, ok := propagation.FromBinary(stats.Trace(ctx)); ok { + // Returned context is ignored because will populate context with data + // that wraps the span instead. + _, span = trace.StartSpanWithRemoteParent(ctx, mn, sc, trace.WithSpanKind(trace.SpanKindServer), trace.WithSampler(ssh.to.TS)) + span.AddLink(trace.Link{TraceID: sc.TraceID, SpanID: sc.SpanID, Type: trace.LinkTypeChild}) + } else { + // Returned context is ignored because will populate context with data + // that wraps the span instead. + _, span = trace.StartSpan(ctx, mn, trace.WithSpanKind(trace.SpanKindServer), trace.WithSampler(ssh.to.TS)) + } + + return ctx, &traceInfo{ + span: span, + countSentMsg: 0, + countRecvMsg: 0, + } +} + +// populateSpan populates span information based on stats passed in (invariants +// of the RPC lifecycle), and also ends span which triggers the span to be +// exported. +func populateSpan(ctx context.Context, rs stats.RPCStats, ti *traceInfo) { + if ti == nil || ti.span == nil { + // Shouldn't happen, tagRPC call comes before this function gets called + // which populates this information. + logger.Error("ctx passed into stats handler tracing event handling has no span present") + return + } + span := ti.span + + switch rs := rs.(type) { + case *stats.Begin: + // Note: Go always added these attributes even though they are not + // defined by the OpenCensus gRPC spec. Thus, they are unimportant for + // correctness. + span.AddAttributes( + trace.BoolAttribute("Client", rs.Client), + trace.BoolAttribute("FailFast", rs.FailFast), + ) + case *stats.InPayload: + // message id - "must be calculated as two different counters starting + // from 1 one for sent messages and one for received messages." + mi := atomic.AddUint32(&ti.countRecvMsg, 1) + span.AddMessageReceiveEvent(int64(mi), int64(rs.Length), int64(rs.WireLength)) + case *stats.OutPayload: + mi := atomic.AddUint32(&ti.countSentMsg, 1) + span.AddMessageSendEvent(int64(mi), int64(rs.Length), int64(rs.WireLength)) + case *stats.End: + if rs.Error != nil { + // "The mapping between gRPC canonical codes and OpenCensus codes + // can be found here", which implies 1:1 mapping to gRPC statuses + // (OpenCensus statuses are based off gRPC statuses and a subset). + s := status.Convert(rs.Error) + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + } else { + span.SetStatus(trace.Status{Code: trace.StatusCodeOK}) // could get rid of this else conditional and just leave as 0 value, but this makes it explicit + } + span.End() + } +} From 081499f2e8a45710778f3011b896371fcde3cba9 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Tue, 14 Feb 2023 13:35:52 -0800 Subject: [PATCH 778/998] xds: remove support for v2 Transport API (#6013) --- admin/test/utils.go | 1 - internal/testutils/xds/bootstrap/bootstrap.go | 21 +- .../xds/e2e/setup_management_server.go | 1 - test/xds/xds_client_federation_test.go | 2 - xds/csds/csds.go | 36 +-- xds/csds/csds_e2e_test.go | 1 - xds/csds/csds_test.go | 92 -------- xds/googledirectpath/googlec2p.go | 8 +- xds/googledirectpath/googlec2p_test.go | 6 +- xds/internal/httpfilter/fault/fault_test.go | 1 - xds/internal/resolver/xds_resolver_test.go | 21 +- xds/internal/test/e2e/controlplane.go | 1 - xds/internal/xdsclient/authority.go | 1 + xds/internal/xdsclient/bootstrap/bootstrap.go | 114 ++-------- .../xdsclient/bootstrap/bootstrap_test.go | 212 ++++++++---------- .../xdsclient/e2e_test/authority_test.go | 14 +- .../xdsclient/e2e_test/cds_watchers_test.go | 15 +- .../xdsclient/e2e_test/eds_watchers_test.go | 15 +- .../e2e_test/federation_watchers_test.go | 1 - .../xdsclient/e2e_test/lds_watchers_test.go | 15 +- .../xdsclient/e2e_test/rds_watchers_test.go | 15 +- .../e2e_test/resource_update_test.go | 37 ++- xds/internal/xdsclient/loadreport_test.go | 24 +- xds/internal/xdsclient/singleton.go | 6 +- xds/internal/xdsclient/singleton_test.go | 1 - .../xdsclient/transport/loadreport_test.go | 10 +- xds/internal/xdsclient/transport/transport.go | 10 +- .../transport/transport_ack_nack_test.go | 27 +-- .../transport/transport_backoff_test.go | 31 ++- .../xdsclient/transport/transport_new_test.go | 28 +-- .../transport/transport_resource_test.go | 9 +- .../xdsclient/transport/transport_test.go | 8 +- .../xdsclient/xdsresource/filter_chain.go | 2 +- xds/internal/xdsclient/xdsresource/type.go | 66 +----- .../xdsresource/unmarshal_cds_test.go | 45 +--- .../xdsclient/xdsresource/unmarshal_lds.go | 17 +- .../xdsresource/unmarshal_lds_test.go | 41 +--- .../xdsclient/xdsresource/unmarshal_rds.go | 41 ++-- .../xdsresource/unmarshal_rds_test.go | 84 +------ .../xdsclient/xdsresource/version/version.go | 24 -- xds/server_test.go | 8 +- 41 files changed, 255 insertions(+), 857 deletions(-) delete mode 100644 xds/csds/csds_test.go diff --git a/admin/test/utils.go b/admin/test/utils.go index e85ab9240e1a..0ff3d00279ea 100644 --- a/admin/test/utils.go +++ b/admin/test/utils.go @@ -55,7 +55,6 @@ type ExpectedStatusCodes struct { func RunRegisterTests(t *testing.T, ec ExpectedStatusCodes) { nodeID := uuid.New().String() bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ - Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: "no.need.for.a.server", }) diff --git a/internal/testutils/xds/bootstrap/bootstrap.go b/internal/testutils/xds/bootstrap/bootstrap.go index 195f1bea0c6e..88790716bf2a 100644 --- a/internal/testutils/xds/bootstrap/bootstrap.go +++ b/internal/testutils/xds/bootstrap/bootstrap.go @@ -30,20 +30,8 @@ import ( var logger = grpclog.Component("internal/xds") -// TransportAPI refers to the API version for xDS transport protocol. -type TransportAPI int - -const ( - // TransportV2 refers to the v2 xDS transport protocol. - TransportV2 TransportAPI = iota - // TransportV3 refers to the v3 xDS transport protocol. - TransportV3 -) - // Options wraps the parameters used to generate bootstrap configuration. type Options struct { - // Version is the xDS transport protocol version. - Version TransportAPI // NodeID is the node identifier of the gRPC client/server node in the // proxyless service mesh. NodeID string @@ -119,14 +107,7 @@ func Contents(opts Options) ([]byte, error) { ClientDefaultListenerResourceNameTemplate: opts.ClientDefaultListenerResourceNameTemplate, ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, } - switch opts.Version { - case TransportV2: - // TODO: Add any v2 specific fields. - case TransportV3: - cfg.XdsServers[0].ServerFeatures = append(cfg.XdsServers[0].ServerFeatures, "xds_v3") - default: - return nil, fmt.Errorf("unsupported xDS transport protocol version: %v", opts.Version) - } + cfg.XdsServers[0].ServerFeatures = append(cfg.XdsServers[0].ServerFeatures, "xds_v3") auths := make(map[string]authority) if envconfig.XDSFederation { diff --git a/internal/testutils/xds/e2e/setup_management_server.go b/internal/testutils/xds/e2e/setup_management_server.go index 5ba730722401..877cdea2c58d 100644 --- a/internal/testutils/xds/e2e/setup_management_server.go +++ b/internal/testutils/xds/e2e/setup_management_server.go @@ -79,7 +79,6 @@ func SetupManagementServer(t *testing.T, opts ManagementServerOptions) (*Managem // Create a bootstrap file in a temporary directory. nodeID := uuid.New().String() bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ - Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: server.Address, CertificateProviders: cpc, diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go index b6d99de34ea8..0bd16779e1e1 100644 --- a/test/xds/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -74,7 +74,6 @@ func (s) TestClientSideFederation(t *testing.T) { // Create a bootstrap file in a temporary directory. nodeID := uuid.New().String() bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ - Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: serverDefaultAuth.Address, ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, @@ -220,7 +219,6 @@ func (s) TestFederation_UnknownAuthorityInReceivedResponse(t *testing.T) { nodeID := uuid.New().String() bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ - Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: mgmtServer.Address, ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 551757b80069..8d03124811a4 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -29,7 +29,6 @@ import ( "io" "sync" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -39,8 +38,6 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) @@ -126,7 +123,7 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp ret := &v3statuspb.ClientStatusResponse{ Config: []*v3statuspb.ClientConfig{ { - Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().XDSServer.NodeProto, s.logger), + Node: s.xdsClient.BootstrapConfig().NodeProto, GenericXdsConfigs: dumpToGenericXdsConfig(dump), }, }, @@ -141,37 +138,6 @@ func (s *ClientStatusDiscoveryServer) Close() { } } -// nodeProtoToV3 converts the given proto into a v3.Node. n is from bootstrap -// config, it can be either v2.Node or v3.Node. -// -// If n is already a v3.Node, return it. -// If n is v2.Node, marshal and unmarshal it to v3. -// Otherwise, return nil. -// -// The default case (not v2 or v3) is nil, instead of error, because the -// resources in the response are more important than the node. The worst case is -// that the user will receive no Node info, but will still get resources. -func nodeProtoToV3(n proto.Message, logger *internalgrpclog.PrefixLogger) *v3corepb.Node { - var node *v3corepb.Node - switch nn := n.(type) { - case *v3corepb.Node: - node = nn - case *v2corepb.Node: - v2, err := proto.Marshal(nn) - if err != nil { - logger.Warningf("Failed to marshal node (%v): %v", n, err) - break - } - node = new(v3corepb.Node) - if err := proto.Unmarshal(v2, node); err != nil { - logger.Warningf("Failed to unmarshal node (%v): %v", v2, err) - } - default: - logger.Warningf("node from bootstrap is %#v, only v2.Node and v3.Node are supported", nn) - } - return node -} - func dumpToGenericXdsConfig(dump map[string]map[string]xdsresource.UpdateWithMD) []*v3statuspb.ClientConfig_GenericXdsConfig { var ret []*v3statuspb.ClientConfig_GenericXdsConfig for typeURL, updates := range dump { diff --git a/xds/csds/csds_e2e_test.go b/xds/csds/csds_e2e_test.go index ba3e58399f60..481e93929fa2 100644 --- a/xds/csds/csds_e2e_test.go +++ b/xds/csds/csds_e2e_test.go @@ -125,7 +125,6 @@ func (s) TestCSDS(t *testing.T) { // Create a bootstrap file in a temporary directory. bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ - Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: mgmtServer.Address, }) diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go deleted file mode 100644 index 48002b8501e0..000000000000 --- a/xds/csds/csds_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package csds - -import ( - "testing" - - "github.com/golang/protobuf/proto" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/protobuf/testing/protocmp" - - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -func (s) Test_nodeProtoToV3(t *testing.T) { - const ( - testID = "test-id" - testCluster = "test-cluster" - testZone = "test-zone" - ) - tests := []struct { - name string - n proto.Message - want *v3corepb.Node - }{ - { - name: "v3", - n: &v3corepb.Node{ - Id: testID, - Cluster: testCluster, - Locality: &v3corepb.Locality{Zone: testZone}, - }, - want: &v3corepb.Node{ - Id: testID, - Cluster: testCluster, - Locality: &v3corepb.Locality{Zone: testZone}, - }, - }, - { - name: "v2", - n: &v2corepb.Node{ - Id: testID, - Cluster: testCluster, - Locality: &v2corepb.Locality{Zone: testZone}, - }, - want: &v3corepb.Node{ - Id: testID, - Cluster: testCluster, - Locality: &v3corepb.Locality{Zone: testZone}, - }, - }, - { - name: "not node", - n: &v2corepb.Locality{Zone: testZone}, - want: nil, // Input is not a node, should return nil. - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := nodeProtoToV3(tt.n, nil) - if diff := cmp.Diff(got, tt.want, protocmp.Transform()); diff != "" { - t.Errorf("nodeProtoToV3() got unexpected result, diff (-got, +want): %v", diff) - } - }) - } -} diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 5bc17b03e5b2..c8989c87bf49 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -41,7 +41,6 @@ import ( _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/structpb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -118,10 +117,8 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts balancerName = tdURL } serverConfig := &bootstrap.ServerConfig{ - ServerURI: balancerName, - Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()), - TransportAPI: version.TransportV3, - NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), + ServerURI: balancerName, + Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()), } config := &bootstrap.Config{ XDSServer: serverConfig, @@ -131,6 +128,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts XDSServer: serverConfig, }, }, + NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), } // Create singleton xds client with this config. The xds client will be diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 1eb1b18cf760..f5e8f2d93c71 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -32,7 +32,6 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/structpb" @@ -214,9 +213,7 @@ func TestBuildXDS(t *testing.T) { } } serverConfig := &bootstrap.ServerConfig{ - ServerURI: tdURL, - TransportAPI: version.TransportV3, - NodeProto: wantNode, + ServerURI: tdURL, } wantConfig := &bootstrap.Config{ XDSServer: serverConfig, @@ -226,6 +223,7 @@ func TestBuildXDS(t *testing.T) { XDSServer: serverConfig, }, }, + NodeProto: wantNode, } if tt.tdURI != "" { wantConfig.XDSServer.ServerURI = tt.tdURI diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 6bf00771f413..beeaf43321ad 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -106,7 +106,6 @@ func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { // Create a bootstrap file in a temporary directory. bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ - Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: fs.Address, ServerListenerResourceNameTemplate: "grpc/server", diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 952a7f67f9cc..d97993d86e1f 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -209,9 +209,8 @@ func (s) TestResolverBuilder_DifferentBootstrapConfigs(t *testing.T) { // Add top-level xDS server config corresponding to the above // management server. test.bootstrapCfg.XDSServer = &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), } // Override xDS client creation to use bootstrap configuration @@ -418,7 +417,6 @@ func (s) TestResolverResourceName(t *testing.T) { // Create a bootstrap configuration with test options. opts := xdsbootstrap.Options{ ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, ClientDefaultListenerResourceNameTemplate: tt.listenerResourceNameTemplate, } if tt.extraAuthority != "" { @@ -481,7 +479,6 @@ func (s) TestResolverWatchCallbackAfterClose(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) @@ -534,9 +531,8 @@ func (s) TestResolverWatchCallbackAfterClose(t *testing.T) { func (s) TestResolverCloseClosesXDSClient(t *testing.T) { bootstrapCfg := &bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy-management-server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, + ServerURI: "dummy-management-server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, } @@ -583,7 +579,6 @@ func (s) TestResolverBadServiceUpdate(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) @@ -694,7 +689,6 @@ func (s) TestResolverGoodServiceUpdate(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) @@ -941,7 +935,6 @@ func (s) TestResolverRequestHash(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) @@ -1040,7 +1033,6 @@ func (s) TestResolverRemovedWithRPCs(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) @@ -1174,7 +1166,6 @@ func (s) TestResolverRemovedResource(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) @@ -1314,7 +1305,6 @@ func (s) TestResolverWRR(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) @@ -1411,7 +1401,6 @@ func (s) TestResolverMaxStreamDuration(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) @@ -1580,7 +1569,6 @@ func (s) TestResolverDelayedOnCommitted(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) @@ -1777,7 +1765,6 @@ func (s) TestResolverMultipleLDSUpdates(t *testing.T) { cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ NodeID: nodeID, ServerURI: mgmtServer.Address, - Version: xdsbootstrap.TransportV3, }) if err != nil { t.Fatal(err) diff --git a/xds/internal/test/e2e/controlplane.go b/xds/internal/test/e2e/controlplane.go index eee2b13819ea..98030dd448f5 100644 --- a/xds/internal/test/e2e/controlplane.go +++ b/xds/internal/test/e2e/controlplane.go @@ -40,7 +40,6 @@ func newControlPlane() (*controlPlane, error) { nodeID := uuid.New().String() bootstrapContentBytes, err := bootstrap.Contents(bootstrap.Options{ - Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: server.Address, ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 8e53bd824d68..173edd9d1278 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -120,6 +120,7 @@ func newAuthority(args authorityArgs) (*authority, error) { UpdateHandler: ret.handleResourceUpdate, StreamErrorHandler: ret.newConnectionError, Logger: args.logger, + NodeProto: args.bootstrapCfg.NodeProto, }) if err != nil { return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 989ec4999748..2a920b94a312 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -28,8 +28,8 @@ import ( "os" "strings" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/google" @@ -39,10 +39,6 @@ import ( "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) const ( @@ -61,8 +57,6 @@ func init() { bootstrap.RegisterCredentials(&googleDefaultCredsBuilder{}) } -var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version) - // For overriding in unit tests. var bootstrapFileReadFunc = os.ReadFile @@ -103,17 +97,6 @@ type ServerConfig struct { Creds grpc.DialOption // CredsType is the type of the creds. It will be used to dedup servers. CredsType string - // TransportAPI indicates the API version of xDS transport protocol to use. - // This describes the xDS gRPC endpoint and version of - // DiscoveryRequest/Response used on the wire. - TransportAPI version.TransportAPI - // NodeProto contains the Node proto to be used in xDS requests. The actual - // type depends on the transport protocol version used. - // - // Note that it's specified in the bootstrap globally for all the servers, - // but we keep it in each server config so that its type (e.g. *v2pb.Node or - // *v3pb.Node) is consistent with the transport API version. - NodeProto proto.Message } // String returns the string representation of the ServerConfig. @@ -126,13 +109,7 @@ type ServerConfig struct { // content. It doesn't cover NodeProto because NodeProto isn't used by // federation. func (sc *ServerConfig) String() string { - var ver string - switch sc.TransportAPI { - case version.TransportV3: - ver = "xDSv3" - case version.TransportV2: - ver = "xDSv2" - } + var ver = "xDSv3" return strings.Join([]string{sc.ServerURI, sc.CredsType, ver}, "-") } @@ -142,9 +119,7 @@ func (sc ServerConfig) MarshalJSON() ([]byte, error) { ServerURI: sc.ServerURI, ChannelCreds: []channelCreds{{Type: sc.CredsType, Config: nil}}, } - if sc.TransportAPI == version.TransportV3 { - server.ServerFeatures = []string{serverFeaturesV3} - } + server.ServerFeatures = []string{serverFeaturesV3} return json.Marshal(server) } @@ -169,11 +144,6 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { sc.Creds = grpc.WithCredentialsBundle(bundle) break } - for _, f := range server.ServerFeatures { - if f == serverFeaturesV3 { - sc.TransportAPI = version.TransportV3 - } - } return nil } @@ -277,7 +247,6 @@ type Config struct { // // Defaults to "%s". ClientDefaultListenerResourceNameTemplate string - // Authorities is a map of authority name to corresponding configuration. // // This is used in the following cases: @@ -292,6 +261,9 @@ type Config struct { // In any of those cases, it is an error if the specified authority is // not present in this map. Authorities map[string]*Authority + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node } type channelCreds struct { @@ -373,12 +345,6 @@ func newConfigFromContents(data []byte) (*Config, error) { for k, v := range jsonData { switch k { case "node": - // We unconditionally convert the JSON into a v3.Node proto. The v3 - // proto does not contain the deprecated field "build_version" from - // the v2 proto. We do not expect the bootstrap file to contain the - // "build_version" field. In any case, the unmarshal will succeed - // because we have set the `AllowUnknownFields` option on the - // unmarshaler. node = &v3corepb.Node{} if err := m.Unmarshal(bytes.NewReader(v), node); err != nil { return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) @@ -473,66 +439,16 @@ func newConfigFromContents(data []byte) (*Config, error) { } } - if err := config.updateNodeProto(node); err != nil { - return nil, err + // Performing post-production on the node information. Some additional fields + // which are not expected to be set in the bootstrap file are populated here. + if node == nil { + node = &v3corepb.Node{} } + node.UserAgentName = gRPCUserAgentName + node.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} + node.ClientFeatures = append(node.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) + config.NodeProto = node + logger.Debugf("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) return config, nil } - -// updateNodeProto updates the node proto read from the bootstrap file. -// -// The input node is a v3.Node protobuf message corresponding to the JSON -// contents found in the bootstrap file. This method performs some post -// processing on it: -// 1. If the node is nil, we create an empty one here. That way, callers of this -// function can always expect that the NodeProto field is non-nil. -// 2. Some additional fields which are not expected to be set in the bootstrap -// file are populated here. -// 3. For each server config (both top level and in each authority), we set its -// node field to the v3.Node, or a v2.Node with the same content, depending on -// the server's transport API version. -func (c *Config) updateNodeProto(node *v3corepb.Node) error { - v3 := node - if v3 == nil { - v3 = &v3corepb.Node{} - } - v3.UserAgentName = gRPCUserAgentName - v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) - - v3bytes, err := proto.Marshal(v3) - if err != nil { - return fmt.Errorf("xds: proto.Marshal(%v): %v", v3, err) - } - v2 := &v2corepb.Node{} - if err := proto.Unmarshal(v3bytes, v2); err != nil { - return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3bytes, err) - } - // BuildVersion is deprecated, and is replaced by user_agent_name and - // user_agent_version. But the management servers are still using the old - // field, so we will keep both set. - v2.BuildVersion = gRPCVersion - v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - - switch c.XDSServer.TransportAPI { - case version.TransportV2: - c.XDSServer.NodeProto = v2 - case version.TransportV3: - c.XDSServer.NodeProto = v3 - } - - for _, a := range c.Authorities { - if a.XDSServer == nil { - continue - } - switch a.XDSServer.TransportAPI { - case version.TransportV2: - a.XDSServer.NodeProto = v2 - case version.TransportV3: - a.XDSServer.NodeProto = v3 - } - } - - return nil -} diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 6aa047d6ddbc..0ff7813f493b 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -28,23 +28,53 @@ import ( "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc" "google.golang.org/grpc/credentials/google" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/xds/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" structpb "github.com/golang/protobuf/ptypes/struct" ) var ( - v2BootstrapFileMap = map[string]string{ + v3BootstrapFileMap = map[string]string{ + "serverFeaturesIncludesXDSV3": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ + { "type": "google_default" } + ], + "server_features" : ["foo", "bar"] + }] + }`, + "serverFeaturesExcludesXDSV3": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ + { "type": "google_default" } + ], + "server_features" : ["foo", "bar", "xds_v3"] + }] + }`, "emptyNodeProto": ` { "xds_servers" : [{ @@ -153,40 +183,6 @@ var ( ] }`, } - v3BootstrapFileMap = map[string]string{ - "serverDoesNotSupportsV3": ` - { - "node": { - "id": "ENVOY_NODE_ID", - "metadata": { - "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" - } - }, - "xds_servers" : [{ - "server_uri": "trafficdirector.googleapis.com:443", - "channel_creds": [ - { "type": "google_default" } - ], - "server_features" : ["foo", "bar"] - }] - }`, - "serverSupportsV3": ` - { - "node": { - "id": "ENVOY_NODE_ID", - "metadata": { - "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" - } - }, - "xds_servers" : [{ - "server_uri": "trafficdirector.googleapis.com:443", - "channel_creds": [ - { "type": "google_default" } - ], - "server_features" : ["foo", "bar", "xds_v3"] - }] - }`, - } metadata = &structpb.Struct{ Fields: map[string]*structpb.Value{ "TRAFFICDIRECTOR_GRPC_HOSTNAME": { @@ -194,14 +190,6 @@ var ( }, }, } - v2NodeProto = &v2corepb.Node{ - Id: "ENVOY_NODE_ID", - Metadata: metadata, - BuildVersion: gRPCVersion, - UserAgentName: gRPCUserAgentName, - UserAgentVersionType: &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, - ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, - } v3NodeProto = &v3corepb.Node{ Id: "ENVOY_NODE_ID", Metadata: metadata, @@ -209,32 +197,22 @@ var ( UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, } - nilCredsConfigV2 = &Config{ + nilCredsConfigV3 = &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), CredsType: "insecure", - NodeProto: v2NodeProto, }, + NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "%s", } - nonNilCredsConfigV2 = &Config{ + nonNilCredsConfigV3 = &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), CredsType: "google_default", - NodeProto: v2NodeProto, - }, - ClientDefaultListenerResourceNameTemplate: "%s", - } - nonNilCredsConfigV3 = &Config{ - XDSServer: &ServerConfig{ - ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", - TransportAPI: version.TransportV3, - NodeProto: v3NodeProto, }, + NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "%s", } ) @@ -314,9 +292,9 @@ func testNewConfigWithFileContentEnv(t *testing.T, fileName string, wantError bo } } -// TestNewConfigV2ProtoFailure exercises the functionality in NewConfig with +// TestNewConfigV3ProtoFailure exercises the functionality in NewConfig with // different bootstrap file contents which are expected to fail. -func TestNewConfigV2ProtoFailure(t *testing.T) { +func TestNewConfigV3ProtoFailure(t *testing.T) { bootstrapFileMap := map[string]string{ "empty": "", "badJSON": `["test": 123]`, @@ -380,11 +358,11 @@ func TestNewConfigV2ProtoFailure(t *testing.T) { } } -// TestNewConfigV2ProtoSuccess exercises the functionality in NewConfig with +// TestNewConfigV3ProtoSuccess exercises the functionality in NewConfig with // different bootstrap file contents. It overrides the fileReadFunc by returning // bootstrap file contents defined in this test, instead of reading from a file. -func TestNewConfigV2ProtoSuccess(t *testing.T) { - cancel := setupBootstrapOverride(v2BootstrapFileMap) +func TestNewConfigV3ProtoSuccess(t *testing.T) { + cancel := setupBootstrapOverride(v3BootstrapFileMap) defer cancel() tests := []struct { @@ -397,22 +375,21 @@ func TestNewConfigV2ProtoSuccess(t *testing.T) { ServerURI: "trafficdirector.googleapis.com:443", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), CredsType: "insecure", - NodeProto: &v2corepb.Node{ - BuildVersion: gRPCVersion, - UserAgentName: gRPCUserAgentName, - UserAgentVersionType: &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, - ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, - }, + }, + NodeProto: &v3corepb.Node{ + UserAgentName: gRPCUserAgentName, + UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, + ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, }, ClientDefaultListenerResourceNameTemplate: "%s", }, }, - {"unknownTopLevelFieldInFile", nilCredsConfigV2}, - {"unknownFieldInNodeProto", nilCredsConfigV2}, - {"unknownFieldInXdsServer", nilCredsConfigV2}, - {"multipleChannelCreds", nonNilCredsConfigV2}, - {"goodBootstrap", nonNilCredsConfigV2}, - {"multipleXDSServers", nonNilCredsConfigV2}, + {"unknownTopLevelFieldInFile", nilCredsConfigV3}, + {"unknownFieldInNodeProto", nilCredsConfigV3}, + {"unknownFieldInXdsServer", nilCredsConfigV3}, + {"multipleChannelCreds", nonNilCredsConfigV3}, + {"goodBootstrap", nonNilCredsConfigV3}, + {"multipleXDSServers", nonNilCredsConfigV3}, } for _, test := range tests { @@ -434,8 +411,8 @@ func TestNewConfigV3Support(t *testing.T) { name string wantConfig *Config }{ - {"serverDoesNotSupportsV3", nonNilCredsConfigV2}, - {"serverSupportsV3", nonNilCredsConfigV3}, + {"serverFeaturesIncludesXDSV3", nonNilCredsConfigV3}, + {"serverFeaturesExcludesXDSV3", nonNilCredsConfigV3}, } for _, test := range tests { @@ -454,14 +431,14 @@ func TestNewConfigV3Support(t *testing.T) { func TestNewConfigBootstrapEnvPriority(t *testing.T) { oldFileReadFunc := bootstrapFileReadFunc bootstrapFileReadFunc = func(filename string) ([]byte, error) { - return fileReadFromFileMap(v2BootstrapFileMap, filename) + return fileReadFromFileMap(v3BootstrapFileMap, filename) } defer func() { bootstrapFileReadFunc = oldFileReadFunc }() - goodFileName1 := "goodBootstrap" - goodConfig1 := nonNilCredsConfigV2 + goodFileName1 := "serverFeaturesIncludesXDSV3" + goodConfig1 := nonNilCredsConfigV3 - goodFileName2 := "serverSupportsV3" + goodFileName2 := "serverFeaturesExcludesXDSV3" goodFileContent2 := v3BootstrapFileMap[goodFileName2] goodConfig2 := nonNilCredsConfigV3 @@ -664,12 +641,11 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { goodConfig := &Config{ XDSServer: &ServerConfig{ - ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", - TransportAPI: version.TransportV3, - NodeProto: v3NodeProto, + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + CredsType: "google_default", }, + NodeProto: v3NodeProto, CertProviderConfigs: map[string]*certprovider.BuildableConfig{ "fakeProviderInstance": wantCfg, }, @@ -758,12 +734,11 @@ func TestNewConfigWithServerListenerResourceNameTemplate(t *testing.T) { name: "goodServerListenerResourceNameTemplate", wantConfig: &Config{ XDSServer: &ServerConfig{ - ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", - TransportAPI: version.TransportV2, - NodeProto: v2NodeProto, + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + CredsType: "google_default", }, + NodeProto: v3NodeProto, ServerListenerResourceNameTemplate: "grpc/server?xds.resource.listening_address=%s", ClientDefaultListenerResourceNameTemplate: "%s", }, @@ -908,23 +883,20 @@ func TestNewConfigWithFederation(t *testing.T) { name: "good", wantConfig: &Config{ XDSServer: &ServerConfig{ - ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", - TransportAPI: version.TransportV2, - NodeProto: v2NodeProto, + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + CredsType: "google_default", }, + NodeProto: v3NodeProto, ServerListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/grpc/server?listening_address=%s", ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", Authorities: map[string]*Authority{ "xds.td.com": { ClientListenerResourceNameTemplate: "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", XDSServer: &ServerConfig{ - ServerURI: "td.com", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", - TransportAPI: version.TransportV3, - NodeProto: v3NodeProto, + ServerURI: "td.com", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + CredsType: "google_default", }, }, }, @@ -934,12 +906,11 @@ func TestNewConfigWithFederation(t *testing.T) { name: "goodWithDefaultDefaultClientListenerTemplate", wantConfig: &Config{ XDSServer: &ServerConfig{ - ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", - TransportAPI: version.TransportV2, - NodeProto: v2NodeProto, + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + CredsType: "google_default", }, + NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "%s", }, }, @@ -947,12 +918,11 @@ func TestNewConfigWithFederation(t *testing.T) { name: "goodWithDefaultClientListenerTemplatePerAuthority", wantConfig: &Config{ XDSServer: &ServerConfig{ - ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", - TransportAPI: version.TransportV2, - NodeProto: v2NodeProto, + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + CredsType: "google_default", }, + NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", Authorities: map[string]*Authority{ "xds.td.com": { @@ -968,12 +938,11 @@ func TestNewConfigWithFederation(t *testing.T) { name: "goodWithNoServerPerAuthority", wantConfig: &Config{ XDSServer: &ServerConfig{ - ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", - TransportAPI: version.TransportV2, - NodeProto: v2NodeProto, + ServerURI: "trafficdirector.googleapis.com:443", + Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), + CredsType: "google_default", }, + NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", Authorities: map[string]*Authority{ "xds.td.com": { @@ -998,10 +967,9 @@ func TestNewConfigWithFederation(t *testing.T) { func TestServerConfigMarshalAndUnmarshal(t *testing.T) { c := ServerConfig{ - ServerURI: "test-server", - Creds: nil, - CredsType: "test-creds", - TransportAPI: version.TransportV3, + ServerURI: "test-server", + Creds: nil, + CredsType: "test-creds", } bs, err := json.Marshal(c) diff --git a/xds/internal/xdsclient/e2e_test/authority_test.go b/xds/internal/xdsclient/e2e_test/authority_test.go index fdda7291aa18..f2cb06e300ab 100644 --- a/xds/internal/xdsclient/e2e_test/authority_test.go +++ b/xds/internal/xdsclient/e2e_test/authority_test.go @@ -32,7 +32,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -96,20 +95,17 @@ func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: defaultAuthorityServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: defaultAuthorityServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{Id: nodeID}, Authorities: map[string]*bootstrap.Authority{ testAuthority1: {}, testAuthority2: {}, testAuthority3: { XDSServer: &bootstrap.ServerConfig{ - ServerURI: nonDefaultAuthorityServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: nonDefaultAuthorityServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, }, }, diff --git a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go index 7119344296e6..4f2d9a0b381a 100644 --- a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go @@ -36,7 +36,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -534,11 +533,10 @@ func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Create an xDS client talking to a non-existent management server. client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy management server address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{}, + ServerURI: "dummy management server address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) @@ -581,11 +579,10 @@ func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) diff --git a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go index 2b3216c268cc..3b5e7c4b3fdf 100644 --- a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go @@ -37,7 +37,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/wrapperspb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -591,11 +590,10 @@ func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Create an xDS client talking to a non-existent management server. client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy management server address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{}, + ServerURI: "dummy management server address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) @@ -638,11 +636,10 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) diff --git a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go index e2e6d5dabc31..1218a4527a5a 100644 --- a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/federation_watchers_test.go @@ -62,7 +62,6 @@ func setupForFederationWatchersTest(t *testing.T) (*e2e.ManagementServer, string nodeID := uuid.New().String() bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ - Version: bootstrap.TransportV3, NodeID: nodeID, ServerURI: serverDefaultAuthority.Address, ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go index ac186e3d25ff..0da626dda3d7 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -39,7 +39,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" @@ -583,11 +582,10 @@ func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Create an xDS client talking to a non-existent management server. client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy management server address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{}, + ServerURI: "dummy management server address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) @@ -630,11 +628,10 @@ func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) diff --git a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go index 79d2bd7edbe6..6bf3f7a27c5e 100644 --- a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go @@ -36,7 +36,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/wrapperspb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -624,11 +623,10 @@ func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Create an xDS client talking to a non-existent management server. client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy management server address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{}, + ServerURI: "dummy management server address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) @@ -671,11 +669,10 @@ func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) diff --git a/xds/internal/xdsclient/e2e_test/resource_update_test.go b/xds/internal/xdsclient/e2e_test/resource_update_test.go index 7bef342feb3d..415c21998bae 100644 --- a/xds/internal/xdsclient/e2e_test/resource_update_test.go +++ b/xds/internal/xdsclient/e2e_test/resource_update_test.go @@ -37,7 +37,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" @@ -245,12 +244,11 @@ func (s) TestHandleListenerResponseFromManagementServer(t *testing.T) { nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", }, + NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) @@ -512,12 +510,11 @@ func (s) TestHandleRouteConfigResponseFromManagementServer(t *testing.T) { nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", }, + NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) @@ -755,12 +752,11 @@ func (s) TestHandleClusterResponseFromManagementServer(t *testing.T) { nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", }, + NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) @@ -1081,12 +1077,11 @@ func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", }, + NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index 631793454b85..23fb2d4cf46c 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -30,7 +30,6 @@ import ( "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -52,11 +51,10 @@ func (s) TestLRSClient(t *testing.T) { xdsC, close, err := NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: fs.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{}, + ServerURI: fs.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{}, }, defaultClientWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) @@ -66,11 +64,9 @@ func (s) TestLRSClient(t *testing.T) { // Report to the same address should not create new ClientConn. store1, lrsCancel1 := xdsC.ReportLoad( &bootstrap.ServerConfig{ - ServerURI: fs.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{}, + ServerURI: fs.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", }, ) defer lrsCancel1() @@ -96,11 +92,9 @@ func (s) TestLRSClient(t *testing.T) { // Report to a different address should create new ClientConn. store2, lrsCancel2 := xdsC.ReportLoad( &bootstrap.ServerConfig{ - ServerURI: fs2.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV2, - NodeProto: &v3corepb.Node{}, + ServerURI: fs2.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", }, ) defer lrsCancel2() diff --git a/xds/internal/xdsclient/singleton.go b/xds/internal/xdsclient/singleton.go index 4c42ae424971..96db8ef51387 100644 --- a/xds/internal/xdsclient/singleton.go +++ b/xds/internal/xdsclient/singleton.go @@ -94,11 +94,7 @@ func newRefCountedWithConfig(fallbackConfig *bootstrap.Config) (XDSClient, func( singletonClient = &clientRefCounted{clientImpl: c, refCount: 1} singletonClientImplCreateHook() - nodeID := "" - if node, ok := config.XDSServer.NodeProto.(interface{ GetId() string }); ok { - nodeID = node.GetId() - } - logger.Infof("xDS node ID: %s", nodeID) + logger.Infof("xDS node ID: %s", config.NodeProto.GetId()) return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil } diff --git a/xds/internal/xdsclient/singleton_test.go b/xds/internal/xdsclient/singleton_test.go index 491dc037d969..1875ea118d09 100644 --- a/xds/internal/xdsclient/singleton_test.go +++ b/xds/internal/xdsclient/singleton_test.go @@ -36,7 +36,6 @@ func (s) TestClientNewSingleton(t *testing.T) { cleanup, err := bootstrap.CreateFile(bootstrap.Options{ NodeID: nodeID, ServerURI: "non-existent-server-address", - Version: bootstrap.TransportV3, }) if err != nil { t.Fatal(err) diff --git a/xds/internal/xdsclient/transport/loadreport_test.go b/xds/internal/xdsclient/transport/loadreport_test.go index 815ca25b27b7..3babfe3fbd28 100644 --- a/xds/internal/xdsclient/transport/loadreport_test.go +++ b/xds/internal/xdsclient/transport/loadreport_test.go @@ -30,7 +30,6 @@ import ( "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/transport" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/durationpb" @@ -47,16 +46,15 @@ func (s) TestReportLoad(t *testing.T) { // Construct the server config to represent the management server. nodeProto := &v3corepb.Node{Id: uuid.New().String()} serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: nodeProto, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", } // Create a transport to the fake management server. tr, err := transport.New(transport.Options{ ServerCfg: serverCfg, + NodeProto: nodeProto, UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No ADS validation. StreamErrorHandler: func(error) {}, // No ADS stream error handling. Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index 814ca5f87263..10c863c5e6bd 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -143,6 +143,9 @@ type Options struct { Backoff func(retries int) time.Duration // Logger does logging with a prefix. Logger *grpclog.PrefixLogger + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node } // For overriding in unit tests. @@ -161,11 +164,6 @@ func New(opts Options) (*Transport, error) { return nil, errors.New("missing stream error handler when creating a new transport") } - node, ok := opts.ServerCfg.NodeProto.(*v3corepb.Node) - if !ok { - return nil, fmt.Errorf("unexpected type %T for NodeProto, want %T", opts.ServerCfg.NodeProto, &v3corepb.Node{}) - } - // Dial the xDS management with the passed in credentials. dopts := []grpc.DialOption{ opts.ServerCfg.Creds, @@ -193,7 +191,7 @@ func New(opts Options) (*Transport, error) { adsStreamErrHandler: opts.StreamErrorHandler, lrsStore: load.NewStore(), backoff: boff, - nodeProto: node, + nodeProto: opts.NodeProto, logger: opts.Logger, adsStreamCh: make(chan adsStream, 1), diff --git a/xds/internal/xdsclient/transport/transport_ack_nack_test.go b/xds/internal/xdsclient/transport/transport_ack_nack_test.go index 30b56674ed03..90f3ec1983be 100644 --- a/xds/internal/xdsclient/transport/transport_ack_nack_test.go +++ b/xds/internal/xdsclient/transport/transport_ack_nack_test.go @@ -137,11 +137,9 @@ func (s) TestSimpleAckAndNack(t *testing.T) { // Construct the server config to represent the management server. serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", } // Create a new transport. @@ -149,6 +147,7 @@ func (s) TestSimpleAckAndNack(t *testing.T) { ServerCfg: serverCfg, UpdateHandler: dataModelValidator, StreamErrorHandler: func(err error) {}, + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) @@ -324,11 +323,9 @@ func (s) TestInvalidFirstResponse(t *testing.T) { // Construct the server config to represent the management server. serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", } // Create a new transport. @@ -336,6 +333,7 @@ func (s) TestInvalidFirstResponse(t *testing.T) { ServerCfg: serverCfg, UpdateHandler: dataModelValidator, StreamErrorHandler: func(err error) {}, + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) @@ -453,11 +451,9 @@ func (s) TestResourceIsNotRequestedAnymore(t *testing.T) { // Construct the server config to represent the management server. serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", } // Create a new transport. @@ -465,6 +461,7 @@ func (s) TestResourceIsNotRequestedAnymore(t *testing.T) { ServerCfg: serverCfg, UpdateHandler: dataModelValidator, StreamErrorHandler: func(err error) {}, + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) diff --git a/xds/internal/xdsclient/transport/transport_backoff_test.go b/xds/internal/xdsclient/transport/transport_backoff_test.go index 2753e459c366..a7726bbc509a 100644 --- a/xds/internal/xdsclient/transport/transport_backoff_test.go +++ b/xds/internal/xdsclient/transport/transport_backoff_test.go @@ -101,11 +101,9 @@ func (s) TestTransport_BackoffAfterStreamFailure(t *testing.T) { // Construct the server config to represent the management server. nodeID := uuid.New().String() serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", } // Create a new transport. Since we are only testing backoff behavior here, @@ -119,7 +117,8 @@ func (s) TestTransport_BackoffAfterStreamFailure(t *testing.T) { default: } }, - Backoff: transportBackoff, + Backoff: transportBackoff, + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) @@ -269,11 +268,9 @@ func (s) TestTransport_RetriesAfterBrokenStream(t *testing.T) { // Construct the server config to represent the management server. serverCfg := bootstrap.ServerConfig{ - ServerURI: lis.Addr().String(), - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: lis.Addr().String(), + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", } // Create a new transport. Since we are only testing backoff behavior here, @@ -287,7 +284,8 @@ func (s) TestTransport_RetriesAfterBrokenStream(t *testing.T) { default: } }, - Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) @@ -408,11 +406,9 @@ func (s) TestTransport_ResourceRequestedBeforeStreamCreation(t *testing.T) { // Construct the server config to represent the management server. nodeID := uuid.New().String() serverCfg := bootstrap.ServerConfig{ - ServerURI: lis.Addr().String(), - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerURI: lis.Addr().String(), + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", } // Create a new transport. Since we are only testing backoff behavior here, @@ -422,6 +418,7 @@ func (s) TestTransport_ResourceRequestedBeforeStreamCreation(t *testing.T) { UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. StreamErrorHandler: func(error) {}, // No stream error handling. Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) diff --git a/xds/internal/xdsclient/transport/transport_new_test.go b/xds/internal/xdsclient/transport/transport_new_test.go index 60286d9eb156..5cbcb5da5d55 100644 --- a/xds/internal/xdsclient/transport/transport_new_test.go +++ b/xds/internal/xdsclient/transport/transport_new_test.go @@ -25,9 +25,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/transport" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) @@ -54,8 +52,9 @@ func (s) TestNew(t *testing.T) { opts: transport.Options{ServerCfg: bootstrap.ServerConfig{ ServerURI: "server-address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + }, NodeProto: &v3corepb.Node{}, - }}, + }, wantErrStr: "missing update handler when creating a new transport", }, { @@ -64,35 +63,20 @@ func (s) TestNew(t *testing.T) { ServerCfg: bootstrap.ServerConfig{ ServerURI: "server-address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: &v3corepb.Node{}, }, + NodeProto: &v3corepb.Node{}, UpdateHandler: func(transport.ResourceUpdate) error { return nil }, }, wantErrStr: "missing stream error handler when creating a new transport", }, - { - name: "node proto version mismatch for v3", - opts: transport.Options{ - ServerCfg: bootstrap.ServerConfig{ - ServerURI: "server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: &v2corepb.Node{}, - TransportAPI: version.TransportV3, - }, - UpdateHandler: func(transport.ResourceUpdate) error { return nil }, - StreamErrorHandler: func(error) {}, - }, - wantErrStr: "unexpected type *core.Node for NodeProto, want *corev3.Node", - }, { name: "happy case", opts: transport.Options{ ServerCfg: bootstrap.ServerConfig{ - ServerURI: "server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: &v3corepb.Node{}, - TransportAPI: version.TransportV3, + ServerURI: "server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{}, UpdateHandler: func(transport.ResourceUpdate) error { return nil }, StreamErrorHandler: func(error) {}, }, diff --git a/xds/internal/xdsclient/transport/transport_resource_test.go b/xds/internal/xdsclient/transport/transport_resource_test.go index 62149fa4bf0b..eb050f639f58 100644 --- a/xds/internal/xdsclient/transport/transport_resource_test.go +++ b/xds/internal/xdsclient/transport/transport_resource_test.go @@ -178,11 +178,9 @@ func (s) TestHandleResponseFromManagementServer(t *testing.T) { // Construct the server config to represent the management server. serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - TransportAPI: version.TransportV3, - NodeProto: &v3corepb.Node{Id: uuid.New().String()}, + ServerURI: mgmtServer.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", } // Create a new transport. @@ -200,6 +198,7 @@ func (s) TestHandleResponseFromManagementServer(t *testing.T) { }, StreamErrorHandler: func(error) {}, // No stream error handling. Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + NodeProto: &v3corepb.Node{Id: uuid.New().String()}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) diff --git a/xds/internal/xdsclient/transport/transport_test.go b/xds/internal/xdsclient/transport/transport_test.go index 41ebc1b4fe55..80b44aa3cc0f 100644 --- a/xds/internal/xdsclient/transport/transport_test.go +++ b/xds/internal/xdsclient/transport/transport_test.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) type s struct { @@ -50,11 +49,10 @@ func (s) TestNewWithGRPCDial(t *testing.T) { // Create a new transport and ensure that the custom dialer was called. opts := Options{ ServerCfg: bootstrap.ServerConfig{ - ServerURI: "server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: &v3corepb.Node{}, - TransportAPI: version.TransportV3, + ServerURI: "server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, + NodeProto: &v3corepb.Node{}, UpdateHandler: func(ResourceUpdate) error { return nil }, StreamErrorHandler: func(error) {}, } diff --git a/xds/internal/xdsclient/xdsresource/filter_chain.go b/xds/internal/xdsclient/xdsresource/filter_chain.go index f748cbc8ce29..0390412fdc89 100644 --- a/xds/internal/xdsclient/xdsresource/filter_chain.go +++ b/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -649,7 +649,7 @@ func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) // server-side." - A36 // Can specify v3 here, as will never get to this function // if v2. - routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig(), false) + routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig()) if err != nil { return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) } diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go index d9c78997cffb..c629380d508b 100644 --- a/xds/internal/xdsclient/xdsresource/type.go +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -51,38 +51,38 @@ type UpdateMetadata struct { // IsListenerResource returns true if the provider URL corresponds to an xDS // Listener resource. func IsListenerResource(url string) bool { - return url == version.V2ListenerURL || url == version.V3ListenerURL + return url == version.V3ListenerURL } // IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS // HTTPConnManager resource. func IsHTTPConnManagerResource(url string) bool { - return url == version.V2HTTPConnManagerURL || url == version.V3HTTPConnManagerURL + return url == version.V3HTTPConnManagerURL } // IsRouteConfigResource returns true if the provider URL corresponds to an xDS // RouteConfig resource. func IsRouteConfigResource(url string) bool { - return url == version.V2RouteConfigURL || url == version.V3RouteConfigURL + return url == version.V3RouteConfigURL } // IsClusterResource returns true if the provider URL corresponds to an xDS // Cluster resource. func IsClusterResource(url string) bool { - return url == version.V2ClusterURL || url == version.V3ClusterURL + return url == version.V3ClusterURL } // IsEndpointsResource returns true if the provider URL corresponds to an xDS // Endpoints resource. func IsEndpointsResource(url string) bool { - return url == version.V2EndpointsURL || url == version.V3EndpointsURL + return url == version.V3EndpointsURL } // unwrapResource unwraps and returns the inner resource if it's in a resource // wrapper. The original resource is returned if it's not wrapped. func unwrapResource(r *anypb.Any) (*anypb.Any, error) { url := r.GetTypeUrl() - if url != version.V2ResourceWrapperURL && url != version.V3ResourceWrapperURL { + if url != version.V3ResourceWrapperURL { // Not wrapped. return r, nil } @@ -165,57 +165,3 @@ func (r ResourceType) String() string { return "UnknownResource" } } - -var v2ResourceTypeToURL = map[ResourceType]string{ - ListenerResource: version.V2ListenerURL, - HTTPConnManagerResource: version.V2HTTPConnManagerURL, - RouteConfigResource: version.V2RouteConfigURL, - ClusterResource: version.V2ClusterURL, - EndpointsResource: version.V2EndpointsURL, -} -var v3ResourceTypeToURL = map[ResourceType]string{ - ListenerResource: version.V3ListenerURL, - HTTPConnManagerResource: version.V3HTTPConnManagerURL, - RouteConfigResource: version.V3RouteConfigURL, - ClusterResource: version.V3ClusterURL, - EndpointsResource: version.V3EndpointsURL, -} - -// URL returns the transport protocol specific resource type URL. -func (r ResourceType) URL(v version.TransportAPI) string { - var mapping map[ResourceType]string - switch v { - case version.TransportV2: - mapping = v2ResourceTypeToURL - case version.TransportV3: - mapping = v3ResourceTypeToURL - default: - return "UnknownResource" - } - if url, ok := mapping[r]; ok { - return url - } - return "UnknownResource" -} - -var urlToResourceType = map[string]ResourceType{ - version.V2ListenerURL: ListenerResource, - version.V2RouteConfigURL: RouteConfigResource, - version.V2ClusterURL: ClusterResource, - version.V2EndpointsURL: EndpointsResource, - version.V2HTTPConnManagerURL: HTTPConnManagerResource, - version.V3ListenerURL: ListenerResource, - version.V3RouteConfigURL: RouteConfigResource, - version.V3ClusterURL: ClusterResource, - version.V3EndpointsURL: EndpointsResource, - version.V3HTTPConnManagerURL: HTTPConnManagerResource, -} - -// ResourceTypeFromURL returns the xDS resource type associated with the given -// resource type URL. -func ResourceTypeFromURL(url string) ResourceType { - if typ, ok := urlToResourceType[url]; ok { - return typ - } - return UnknownResource -} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 74af95e627fb..632ab8fb06e9 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -34,8 +34,6 @@ import ( "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/wrapperspb" - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" @@ -1382,31 +1380,10 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { func (s) TestUnmarshalCluster(t *testing.T) { const ( - v2ClusterName = "v2clusterName" v3ClusterName = "v3clusterName" - v2Service = "v2Service" - v3Service = "v2Service" + v3Service = "v3Service" ) var ( - v2ClusterAny = testutils.MarshalAny(&v2xdspb.Cluster{ - Name: v2ClusterName, - ClusterDiscoveryType: &v2xdspb.Cluster_Type{Type: v2xdspb.Cluster_EDS}, - EdsClusterConfig: &v2xdspb.Cluster_EdsClusterConfig{ - EdsConfig: &v2corepb.ConfigSource{ - ConfigSourceSpecifier: &v2corepb.ConfigSource_Ads{ - Ads: &v2corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: v2Service, - }, - LbPolicy: v2xdspb.Cluster_ROUND_ROBIN, - LrsServer: &v2corepb.ConfigSource{ - ConfigSourceSpecifier: &v2corepb.ConfigSource_Self{ - Self: &v2corepb.SelfConfigSource{}, - }, - }, - }) - v3ClusterAny = testutils.MarshalAny(&v3clusterpb.Cluster{ Name: v3ClusterName, ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, @@ -1496,26 +1473,6 @@ func (s) TestUnmarshalCluster(t *testing.T) { wantName: "test", wantErr: true, }, - { - name: "v2 cluster", - resource: v2ClusterAny, - wantName: v2ClusterName, - wantUpdate: ClusterUpdate{ - ClusterName: v2ClusterName, - EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v2ClusterAny, - }, - }, - { - name: "v2 cluster wrapped", - resource: testutils.MarshalAny(&v2xdspb.Resource{Resource: v2ClusterAny}), - wantName: v2ClusterName, - wantUpdate: ClusterUpdate{ - ClusterName: v2ClusterName, - EDSServiceName: v2Service, LRSServerConfig: ClusterLRSServerSelf, - Raw: v2ClusterAny, - }, - }, { name: "v3 cluster", resource: v3ClusterAny, diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index e1b49c217873..ffcf9477f514 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -30,7 +30,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) @@ -43,14 +42,12 @@ func unmarshalListenerResource(r *anypb.Any) (string, ListenerUpdate, error) { if !IsListenerResource(r.GetTypeUrl()) { return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2ListenerURL lis := &v3listenerpb.Listener{} if err := proto.Unmarshal(r.GetValue(), lis); err != nil { return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - lu, err := processListener(lis, v2) + lu, err := processListener(lis) if err != nil { return lis.GetName(), ListenerUpdate{}, err } @@ -58,16 +55,16 @@ func unmarshalListenerResource(r *anypb.Any) (string, ListenerUpdate, error) { return lis.GetName(), *lu, nil } -func processListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUpdate, error) { +func processListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { if lis.GetApiListener() != nil { - return processClientSideListener(lis, v2) + return processClientSideListener(lis) } return processServerSideListener(lis) } // processClientSideListener checks if the provided Listener proto meets // the expected criteria. If so, it returns a non-empty routeConfigName. -func processClientSideListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUpdate, error) { +func processClientSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { update := &ListenerUpdate{} apiLisAny := lis.GetApiListener().GetApiListener() @@ -99,7 +96,7 @@ func processClientSideListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUp } update.RouteConfigName = name case *v3httppb.HttpConnectionManager_RouteConfig: - routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig(), v2) + routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig()) if err != nil { return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) } @@ -110,10 +107,6 @@ func processClientSideListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUp return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) } - if v2 { - return update, nil - } - // The following checks and fields only apply to xDS protocol versions v3+. update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index f753bd00a8cc..d2ce5ac34424 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/internal/envconfig" @@ -35,11 +36,7 @@ import ( v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v2httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" - v2listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" rpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" @@ -58,30 +55,12 @@ import ( func (s) TestUnmarshalListener_ClientSide(t *testing.T) { const ( - v2LDSTarget = "lds.target.good:2222" v3LDSTarget = "lds.target.good:3333" - v2RouteConfigName = "v2RouteConfig" v3RouteConfigName = "v3RouteConfig" routeName = "routeName" - testVersion = "test-version-lds-client" ) var ( - v2Lis = testutils.MarshalAny(&v2xdspb.Listener{ - Name: v2LDSTarget, - ApiListener: &v2listenerpb.ApiListener{ - ApiListener: testutils.MarshalAny(&v2httppb.HttpConnectionManager{ - RouteSpecifier: &v2httppb.HttpConnectionManager_Rds{ - Rds: &v2httppb.Rds{ - ConfigSource: &v2corepb.ConfigSource{ - ConfigSourceSpecifier: &v2corepb.ConfigSource_Ads{Ads: &v2corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: v2RouteConfigName, - }, - }, - }), - }, - }) customFilter = &v3httppb.HttpFilter{ Name: "customFilter", ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, @@ -521,24 +500,6 @@ func (s) TestUnmarshalListener_ClientSide(t *testing.T) { Raw: v3LisWithFilters(unknownOptionalFilter), }, }, - { - name: "v2 listener resource", - resource: v2Lis, - wantName: v2LDSTarget, - wantUpdate: ListenerUpdate{ - RouteConfigName: v2RouteConfigName, - Raw: v2Lis, - }, - }, - { - name: "v2 listener resource wrapped", - resource: testutils.MarshalAny(&v2xdspb.Resource{Resource: v2Lis}), - wantName: v2LDSTarget, - wantUpdate: ListenerUpdate{ - RouteConfigName: v2RouteConfigName, - Raw: v2Lis, - }, - }, { name: "v3 listener resource", resource: v3LisWithFilters(), diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index edcbeaa2454b..057b1c7a3440 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/xds/internal/clusterspecifier" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) @@ -47,9 +46,7 @@ func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, erro return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2RouteConfigURL - u, err := generateRDSUpdateFromRouteConfiguration(rc, v2) + u, err := generateRDSUpdateFromRouteConfiguration(rc) if err != nil { return rc.GetName(), RouteConfigUpdate{}, err } @@ -73,7 +70,7 @@ func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, erro // field must be empty and whose route field must be set. Inside that route // message, the cluster field will contain the clusterName or weighted clusters // we are looking for. -func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, v2 bool) (RouteConfigUpdate, error) { +func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration) (RouteConfigUpdate, error) { vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) csps := make(map[string]clusterspecifier.BalancerConfig) if envconfig.XDSRLS { @@ -88,7 +85,7 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, v // ignored and not emitted by the xdsclient. var cspNames = make(map[string]bool) for _, vh := range rc.GetVirtualHosts() { - routes, cspNs, err := routesProtoToSlice(vh.Routes, csps, v2) + routes, cspNs, err := routesProtoToSlice(vh.Routes, csps) if err != nil { return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) } @@ -104,13 +101,11 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, v Routes: routes, RetryConfig: rc, } - if !v2 { - cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) - } - vhOut.HTTPFilterConfigOverride = cfgs + cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) } + vhOut.HTTPFilterConfigOverride = cfgs vhs = append(vhs, vhOut) } @@ -213,7 +208,7 @@ func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { return cfg, nil } -func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig, v2 bool) ([]*Route, map[string]bool, error) { +func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig) ([]*Route, map[string]bool, error) { var routesRet []*Route var cspNames = make(map[string]bool) for _, r := range routes { @@ -325,13 +320,11 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif continue } wc := WeightedCluster{Weight: w} - if !v2 { - cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) - if err != nil { - return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) - } - wc.HTTPFilterConfigOverride = cfgs + cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) } + wc.HTTPFilterConfigOverride = cfgs route.WeightedClusters[c.GetName()] = wc totalWeight += w } @@ -409,13 +402,11 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif route.ActionType = RouteActionUnsupported } - if !v2 { - cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) - if err != nil { - return nil, nil, fmt.Errorf("route %+v: %v", r, err) - } - route.HTTPFilterConfigOverride = cfgs + cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v: %v", r, err) } + route.HTTPFilterConfigOverride = cfgs routesRet = append(routesRet, &route) } return routesRet, cspNames, nil diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index a74ce555a681..e24e16e5b028 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -37,8 +37,6 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/durationpb" - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" rpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" @@ -778,7 +776,7 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { envconfig.XDSRLS = test.rlsEnabled - gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc, false) + gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc) if (gotError != nil) != test.wantError || !cmp.Equal(gotUpdate, test.wantUpdate, cmpopts.EquateEmpty(), cmp.Transformer("FilterConfig", func(fc httpfilter.FilterConfig) string { @@ -836,45 +834,11 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { ldsTarget = "lds.target.good:1111" uninterestingDomain = "uninteresting.domain" uninterestingClusterName = "uninterestingClusterName" - v2RouteConfigName = "v2RouteConfig" v3RouteConfigName = "v3RouteConfig" - v2ClusterName = "v2Cluster" v3ClusterName = "v3Cluster" ) var ( - v2VirtualHost = []*v2routepb.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*v2routepb.Route{ - { - Match: &v2routepb.RouteMatch{PathSpecifier: &v2routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v2routepb.Route_Route{ - Route: &v2routepb.RouteAction{ - ClusterSpecifier: &v2routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, - }, - }, - }, - }, - }, - { - Domains: []string{ldsTarget}, - Routes: []*v2routepb.Route{ - { - Match: &v2routepb.RouteMatch{PathSpecifier: &v2routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v2routepb.Route_Route{ - Route: &v2routepb.RouteAction{ - ClusterSpecifier: &v2routepb.RouteAction_Cluster{Cluster: v2ClusterName}, - }, - }, - }, - }, - }, - } - v2RouteConfig = testutils.MarshalAny(&v2xdspb.RouteConfiguration{ - Name: v2RouteConfigName, - VirtualHosts: v2VirtualHost, - }) v3VirtualHost = []*v3routepb.VirtualHost{ { Domains: []string{uninterestingDomain}, @@ -929,50 +893,6 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { }, wantErr: true, }, - { - name: "v2 routeConfig resource", - resource: v2RouteConfig, - wantName: v2RouteConfigName, - wantUpdate: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - }, - Raw: v2RouteConfig, - }, - }, - { - name: "v2 routeConfig resource wrapped", - resource: testutils.MarshalAny(&v2xdspb.Resource{Resource: v2RouteConfig}), - wantName: v2RouteConfigName, - wantUpdate: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), - WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}, - ActionType: RouteActionRoute}}, - }, - }, - Raw: v2RouteConfig, - }, - }, { name: "v3 routeConfig resource", resource: v3RouteConfig, @@ -1608,7 +1528,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { defer func() { envconfig.XDSRingHash = oldRingHashSupport }() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, _, err := routesProtoToSlice(tt.routes, nil, false) + got, _, err := routesProtoToSlice(tt.routes, nil) if (err != nil) != tt.wantErr { t.Fatalf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/xds/internal/xdsclient/xdsresource/version/version.go b/xds/internal/xdsclient/xdsresource/version/version.go index 2c4819abddc0..82ad5fe52c70 100644 --- a/xds/internal/xdsclient/xdsresource/version/version.go +++ b/xds/internal/xdsclient/xdsresource/version/version.go @@ -20,35 +20,11 @@ // versions. package version -// TransportAPI refers to the API version for xDS transport protocol. This -// describes the xDS gRPC endpoint and version of DiscoveryRequest/Response used -// on the wire. -type TransportAPI int - -const ( - // TransportV2 refers to the v2 xDS transport protocol. - TransportV2 TransportAPI = iota - // TransportV3 refers to the v3 xDS transport protocol. - TransportV3 -) - // Resource URLs. We need to be able to accept either version of the resource // regardless of the version of the transport protocol in use. const ( googleapiPrefix = "type.googleapis.com/" - V2ListenerType = "envoy.api.v2.Listener" - V2RouteConfigType = "envoy.api.v2.RouteConfiguration" - V2ClusterType = "envoy.api.v2.Cluster" - V2EndpointsType = "envoy.api.v2.ClusterLoadAssignment" - - V2ResourceWrapperURL = googleapiPrefix + "envoy.api.v2.Resource" - V2ListenerURL = googleapiPrefix + V2ListenerType - V2RouteConfigURL = googleapiPrefix + V2RouteConfigType - V2ClusterURL = googleapiPrefix + V2ClusterType - V2EndpointsURL = googleapiPrefix + V2EndpointsType - V2HTTPConnManagerURL = googleapiPrefix + "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" - V3ListenerType = "envoy.config.listener.v3.Listener" V3RouteConfigType = "envoy.config.route.v3.RouteConfiguration" V3ClusterType = "envoy.config.cluster.v3.Cluster" diff --git a/xds/server_test.go b/xds/server_test.go index a0983a6508ea..a965fb6ea315 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -326,8 +326,8 @@ func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { XDSServer: &bootstrap.ServerConfig{ ServerURI: "dummyBalancer", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV3, }, + NodeProto: xdstestutils.EmptyNodeProtoV3, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, CertProviderConfigs: certProviderConfigs, }) @@ -358,8 +358,8 @@ func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, XDSServer: &bootstrap.ServerConfig{ ServerURI: "dummyBalancer", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV3, }, + NodeProto: xdstestutils.EmptyNodeProtoV3, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, } if includeCertProviderCfg { @@ -607,8 +607,8 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { XDSServer: &bootstrap.ServerConfig{ ServerURI: "dummyBalancer", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV3, }, + NodeProto: xdstestutils.EmptyNodeProtoV3, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, }, }, @@ -618,8 +618,8 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { XDSServer: &bootstrap.ServerConfig{ ServerURI: "dummyBalancer", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV3, }, + NodeProto: xdstestutils.EmptyNodeProtoV3, CertProviderConfigs: certProviderConfigs, }, }, From 30d8c0a043db44fb4ca58d8a34a94d53f161b956 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 14 Feb 2023 22:57:10 -0500 Subject: [PATCH 779/998] xds/internal/xdsclient: NACK empty clusters in aggregate clusters (#6023) --- .../xdsclient/xdsresource/unmarshal_cds.go | 3 ++ .../xdsresource/unmarshal_cds_test.go | 32 +++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 49adefc4561c..e6d7261e59b9 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -177,6 +177,9 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } + if len(clusters.Clusters) == 0 { + return ClusterUpdate{}, fmt.Errorf("xds: aggregate cluster has empty clusters field in response: %+v", cluster) + } ret.ClusterType = ClusterTypeAggregate ret.PrioritizedClusterNames = clusters.Clusters return ret, nil diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 632ab8fb06e9..533fd85c3984 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -194,6 +194,38 @@ func (s) TestValidateCluster_Failure(t *testing.T) { wantUpdate: emptyUpdate, wantErr: true, }, + { + name: "aggregate-nil-clusters", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{}), + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "aggregate-empty-clusters", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ + Clusters: []string{}, + }), + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, } oldAggregateAndDNSSupportEnv := envconfig.XDSAggregateAndDNS From 6d612a3e6778b36fe50935f3fada6265f891631c Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 15 Feb 2023 08:51:43 -0800 Subject: [PATCH 780/998] resolver: update Resolver.Scheme() docstring to mention requirement of lowercase scheme names (#6014) --- clientconn.go | 3 ++ resolver/resolver.go | 11 ++++-- resolver_test.go | 93 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 103 insertions(+), 4 deletions(-) create mode 100644 resolver_test.go diff --git a/clientconn.go b/clientconn.go index a2afb4668e8d..b50c698a0557 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1537,6 +1537,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { diff --git a/resolver/resolver.go b/resolver/resolver.go index eb6a4690930c..6215e5ef2b02 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -41,8 +41,9 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -289,8 +290,10 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. Scheme() string } diff --git a/resolver_test.go b/resolver_test.go new file mode 100644 index 000000000000..5b1e40c2a3dc --- /dev/null +++ b/resolver_test.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "testing" + + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/resolver" +) + +type wrapResolverBuilder struct { + resolver.Builder + scheme string +} + +func (w *wrapResolverBuilder) Scheme() string { + return w.scheme +} + +func init() { + resolver.Register(&wrapResolverBuilder{Builder: resolver.Get("passthrough"), scheme: "casetest"}) + resolver.Register(&wrapResolverBuilder{Builder: resolver.Get("dns"), scheme: "caseTest"}) +} + +func (s) TestResolverCaseSensitivity(t *testing.T) { + // This should find the "casetest" resolver instead of the "caseTest" + // resolver, even though the latter was registered later. "casetest" is + // "passthrough" and "caseTest" is "dns". With "passthrough" the dialer + // should see the target's address directly, but "dns" would be converted + // into a loopback IP (v4 or v6) address. + target := "caseTest:///localhost:1234" + addrCh := make(chan string, 1) + customDialer := func(ctx context.Context, addr string) (net.Conn, error) { + select { + case addrCh <- addr: + default: + } + return nil, fmt.Errorf("not dialing with custom dialer") + } + + cc, err := Dial(target, WithContextDialer(customDialer), WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Unexpected Dial(%q) error: %v", target, err) + } + cc.Connect() + if got, want := <-addrCh, "localhost:1234"; got != want { + cc.Close() + t.Fatalf("Dialer got address %q; wanted %q", got, want) + } + cc.Close() + + // Clear addrCh for future use. + select { + case <-addrCh: + default: + } + + res := &wrapResolverBuilder{Builder: resolver.Get("dns"), scheme: "caseTest2"} + // This should not find the injected resolver due to the case not matching. + // This results in "passthrough" being used with the address as the whole + // target. + target = "caseTest2:///localhost:1234" + cc, err = Dial(target, WithContextDialer(customDialer), WithResolvers(res), WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Unexpected Dial(%q) error: %v", target, err) + } + cc.Connect() + if got, want := <-addrCh, target; got != want { + cc.Close() + t.Fatalf("Dialer got address %q; wanted %q", got, want) + } + cc.Close() +} From 0f02ca5cc927d2bc9dba043c9468779a2391ed4d Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 15 Feb 2023 14:44:45 -0500 Subject: [PATCH 781/998] gcp/observability: Switch observability module to use new opencensus instrumentation code (#6021) --- gcp/observability/go.mod | 3 ++- gcp/observability/go.sum | 2 ++ gcp/observability/opencensus.go | 10 +++++----- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index acbfadb8e1f7..ffd5bfa9f455 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -9,7 +9,8 @@ require ( github.com/google/uuid v1.3.0 go.opencensus.io v0.24.0 golang.org/x/oauth2 v0.4.0 - google.golang.org/grpc v1.51.0 + google.golang.org/grpc v1.52.0 + google.golang.org/grpc/stats/opencensus v0.0.0-20230214213552-081499f2e8a4 ) require ( diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index b60536b2d605..ea8a84e214a7 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -1056,6 +1056,8 @@ google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614G google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/stats/opencensus v0.0.0-20230214213552-081499f2e8a4 h1:JfKOhIhejpMhny1RYnvFO5QxXdVOEFSE12OSTgQvFus= +google.golang.org/grpc/stats/opencensus v0.0.0-20230214213552-081499f2e8a4/go.mod h1:l7+BYcyrDJFQo8nh4v8h5TJ6VfQ9QGBfFqVO7xoqQzI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index 482c8d4f0078..760faaf2a1fc 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -30,6 +30,7 @@ import ( "go.opencensus.io/trace" "google.golang.org/grpc" "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats/opencensus" ) var ( @@ -96,9 +97,9 @@ func startOpenCensus(config *config) error { return err } - var so trace.StartOptions + var to opencensus.TraceOptions if config.CloudTrace != nil { - so.Sampler = trace.ProbabilitySampler(config.CloudTrace.SamplingRate) + to.TS = trace.ProbabilitySampler(config.CloudTrace.SamplingRate) trace.RegisterExporter(exporter.(trace.Exporter)) logger.Infof("Start collecting and exporting trace spans with global_trace_sampling_rate=%.2f", config.CloudTrace.SamplingRate) } @@ -115,9 +116,8 @@ func startOpenCensus(config *config) error { logger.Infof("Start collecting and exporting metrics") } - // Only register default StatsHandlers if other things are setup correctly. - internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(grpc.StatsHandler(&ocgrpc.ServerHandler{StartOptions: so})) - internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(grpc.WithStatsHandler(&ocgrpc.ClientHandler{StartOptions: so})) + internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(opencensus.ServerOption(to)) + internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(opencensus.DialOption(to)) logger.Infof("Enabled OpenCensus StatsHandlers for clients and servers") return nil From abff344ead8f49f3a89ae8be68b1538611950ec4 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 16 Feb 2023 17:33:17 -0500 Subject: [PATCH 782/998] stats/opencensus: Add per call latency metric (#6017) --- rpc_util.go | 36 +++++++++++++++++++++++ stats/opencensus/client_metrics.go | 14 +++++++++ stats/opencensus/e2e_test.go | 7 ++++- stats/opencensus/opencensus.go | 47 +++++++++++++++++++++++++++--- stream.go | 3 ++ 5 files changed, 102 insertions(+), 5 deletions(-) diff --git a/rpc_util.go b/rpc_util.go index cb7020ebecd7..0c65336ef6b9 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -159,6 +159,7 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int + onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -295,6 +296,41 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default // 4MB. diff --git a/stats/opencensus/client_metrics.go b/stats/opencensus/client_metrics.go index 3ea5e1c2c424..08bb9a8a4373 100644 --- a/stats/opencensus/client_metrics.go +++ b/stats/opencensus/client_metrics.go @@ -38,6 +38,8 @@ var ( clientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) clientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "The total number of client RPCs ever opened, including those that have not completed.", stats.UnitDimensionless) clientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) + // Per call measure: + clientAPILatency = stats.Float64("grpc.io/client/api_latency", "The end-to-end time the gRPC library takes to complete an RPC from the application’s perspective", stats.UnitMilliseconds) ) var ( @@ -103,6 +105,18 @@ var ( TagKeys: []tag.Key{keyClientMethod}, Aggregation: millisecondsDistribution, } + + // The following metric is per call: + + // ClientAPILatencyView is the distribution of client api latency for the + // full RPC call, keyed on method and status. + ClientAPILatencyView = &view.View{ + Measure: clientAPILatency, + Name: "grpc.io/client/api_latency", + Description: "Distribution of client api latency, by method and status", + TagKeys: []tag.Key{keyClientMethod, keyClientStatus}, + Aggregation: millisecondsDistribution, + } ) // DefaultClientViews is the set of client views which are considered the diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 501f13b586ef..156c3708e39d 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -244,6 +244,7 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { ServerReceivedMessagesPerRPCView, ClientRoundtripLatencyView, ServerLatencyView, + ClientAPILatencyView, } view.Register(allViews...) // Unregister unconditionally in this defer to correctly cleanup globals in @@ -760,6 +761,10 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { { metric: ServerLatencyView, }, + // Per call metrics: + { + metric: ClientAPILatencyView, + }, } // Unregister all the views. Unregistering a view causes a synchronous // upload of any collected data for the view to any registered exporters. @@ -780,7 +785,7 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { // declare the exact data you want, make sure the latency // measurement points for the two RPCs above fall within buckets // that fall into less than 5 seconds, which is the rpc timeout. - if metricName == "grpc.io/client/roundtrip_latency" || metricName == "grpc.io/server/server_latency" { + if metricName == "grpc.io/client/roundtrip_latency" || metricName == "grpc.io/server/server_latency" || metricName == "grpc.io/client/api_latency" { // RPCs have a context timeout of 5s, so all the recorded // measurements (one per RPC - two total) should fall within 5 // second buckets. diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go index 64369deae863..13f8caa8bec5 100644 --- a/stats/opencensus/opencensus.go +++ b/stats/opencensus/opencensus.go @@ -20,12 +20,16 @@ package opencensus import ( "context" + "time" + ocstats "go.opencensus.io/stats" + "go.opencensus.io/tag" "go.opencensus.io/trace" "google.golang.org/grpc" "google.golang.org/grpc/internal" "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" ) var ( @@ -78,15 +82,50 @@ func ServerOption(to TraceOptions) grpc.ServerOption { } // unaryInterceptor handles per RPC context management. It also handles per RPC -// tracing and stats. +// tracing and stats, and records the latency for the full RPC call. func unaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - return invoker(ctx, method, req, reply, cc, opts...) + startTime := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + callLatency := float64(time.Since(startTime)) / float64(time.Millisecond) + + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(keyClientMethod, removeLeadingSlash(method)), + tag.Upsert(keyClientStatus, canonicalString(status.Code(err))), + ), + ocstats.WithMeasurements( + clientAPILatency.M(callLatency), + ), + ) + + return err } // streamInterceptor handles per RPC context management. It also handles per RPC -// tracing and stats. +// tracing and stats, and records the latency for the full RPC call. func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - return streamer(ctx, desc, cc, method, opts...) + startTime := time.Now() + + callback := func(err error) { + callLatency := float64(time.Since(startTime)) / float64(time.Millisecond) + ocstats.RecordWithOptions(context.Background(), + ocstats.WithTags( + tag.Upsert(keyClientMethod, method), + tag.Upsert(keyClientStatus, canonicalString(status.Code(err))), + ), + ocstats.WithMeasurements( + clientAPILatency.M(callLatency), + ), + ) + } + + opts = append([]grpc.CallOption{grpc.OnFinish(callback)}, opts...) + + s, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + return nil, err + } + return s, nil } type rpcInfo struct { diff --git a/stream.go b/stream.go index 89936a4f1665..ad14c74b39e5 100644 --- a/stream.go +++ b/stream.go @@ -971,6 +971,9 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) From 85b95dc6f9e313726b8591d5c64e4a0bcb1a5f9a Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:50:44 -0500 Subject: [PATCH 783/998] gcp/observability: Register new views (#6026) --- gcp/observability/observability_test.go | 6 ++++++ gcp/observability/opencensus.go | 5 ++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index e58a46452757..07e78be56e6f 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -388,6 +388,12 @@ func (s) TestOpenCensusIntegration(t *testing.T) { if value := fe.SeenViews["grpc.io/server/completed_rpcs"]; value != TypeOpenCensusViewCount { errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/completed_rpcs: %s != %s", value, TypeOpenCensusViewCount)) } + if value := fe.SeenViews["grpc.io/client/roundtrip_latency"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/completed_rpcs: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/server/server_latency"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("grpc.io/server/server_latency: %s != %s", value, TypeOpenCensusViewDistribution)) + } if fe.SeenSpans <= 0 { errs = append(errs, fmt.Errorf("unexpected number of seen spans: %v <= 0", fe.SeenSpans)) } diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index 760faaf2a1fc..782e8333cfc2 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -25,7 +25,6 @@ import ( "contrib.go.opencensus.io/exporter/stackdriver" "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" - "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" "go.opencensus.io/trace" "google.golang.org/grpc" @@ -105,10 +104,10 @@ func startOpenCensus(config *config) error { } if config.CloudMonitoring != nil { - if err := view.Register(ocgrpc.ServerStartedRPCsView, ocgrpc.ClientCompletedRPCsView); err != nil { + if err := view.Register(opencensus.ClientStartedRPCsView, opencensus.ClientCompletedRPCsView, opencensus.ClientRoundtripLatencyView); err != nil { return fmt.Errorf("failed to register default client views: %v", err) } - if err := view.Register(ocgrpc.ClientStartedRPCsView, ocgrpc.ServerCompletedRPCsView); err != nil { + if err := view.Register(opencensus.ServerStartedRPCsView, opencensus.ServerCompletedRPCsView, opencensus.ServerLatencyView); err != nil { return fmt.Errorf("failed to register default server views: %v", err) } view.SetReportingPeriod(defaultMetricsReportingInterval) From 8702a2ebf4b01eb7389c8db9fe038063bfc8e2bb Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:51:28 -0500 Subject: [PATCH 784/998] stats/opencensus: Add top level call span (#6030) --- stats/opencensus/e2e_test.go | 35 +++++++++++++++++-- stats/opencensus/opencensus.go | 63 ++++++++++++++++++++-------------- stats/opencensus/trace.go | 2 +- 3 files changed, 72 insertions(+), 28 deletions(-) diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 156c3708e39d..120996cf2f60 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -1209,7 +1209,7 @@ func (s) TestSpan(t *testing.T) { TraceOptions: 1, }, spanKind: trace.SpanKindClient, - name: "grpc.testing.TestService.UnaryCall", + name: "Attempt.grpc.testing.TestService.UnaryCall", messageEvents: []trace.MessageEvent{ { EventType: trace.MessageEventTypeSent, @@ -1225,6 +1225,15 @@ func (s) TestSpan(t *testing.T) { }, hasRemoteParent: false, }, + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindClient, + name: "Sent.grpc.testing.TestService.UnaryCall", + hasRemoteParent: false, + childSpanCount: 1, + }, } if diff := cmp.Diff(fe.seenSpans, wantSI); diff != "" { t.Fatalf("got unexpected spans, diff (-got, +want): %v", diff) @@ -1234,6 +1243,13 @@ func (s) TestSpan(t *testing.T) { fe.mu.Unlock() t.Fatalf("Error in runtime data assertions: %v", err) } + if !cmp.Equal(fe.seenSpans[0].parentSpanID, fe.seenSpans[1].sc.SpanID) { + t.Fatalf("server span should point to the client attempt span as its parent. parentSpanID: %v, clientAttemptSpanID: %v", fe.seenSpans[0].parentSpanID, fe.seenSpans[1].sc.SpanID) + } + if !cmp.Equal(fe.seenSpans[1].parentSpanID, fe.seenSpans[2].sc.SpanID) { + t.Fatalf("client attempt span should point to the client call span as its parent. parentSpanID: %v, clientCallSpanID: %v", fe.seenSpans[1].parentSpanID, fe.seenSpans[2].sc.SpanID) + } + fe.seenSpans = nil fe.mu.Unlock() @@ -1281,12 +1297,21 @@ func (s) TestSpan(t *testing.T) { }, hasRemoteParent: true, }, + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindClient, + name: "Sent.grpc.testing.TestService.FullDuplexCall", + hasRemoteParent: false, + childSpanCount: 1, + }, { sc: trace.SpanContext{ TraceOptions: 1, }, spanKind: trace.SpanKindClient, - name: "grpc.testing.TestService.FullDuplexCall", + name: "Attempt.grpc.testing.TestService.FullDuplexCall", messageEvents: []trace.MessageEvent{ { EventType: trace.MessageEventTypeSent, @@ -1310,4 +1335,10 @@ func (s) TestSpan(t *testing.T) { if err := validateTraceAndSpanIDs(fe.seenSpans); err != nil { t.Fatalf("Error in runtime data assertions: %v", err) } + if !cmp.Equal(fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) { + t.Fatalf("server span should point to the client attempt span as its parent. parentSpanID: %v, clientAttemptSpanID: %v", fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) + } + if !cmp.Equal(fe.seenSpans[2].parentSpanID, fe.seenSpans[1].sc.SpanID) { + t.Fatalf("client attempt span should point to the client call span as its parent. parentSpanID: %v, clientCallSpanID: %v", fe.seenSpans[2].parentSpanID, fe.seenSpans[1].sc.SpanID) + } } diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go index 13f8caa8bec5..350cebfb4aca 100644 --- a/stats/opencensus/opencensus.go +++ b/stats/opencensus/opencensus.go @@ -20,6 +20,7 @@ package opencensus import ( "context" + "strings" "time" ocstats "go.opencensus.io/stats" @@ -61,7 +62,8 @@ type TraceOptions struct { // conjunction, and do not work standalone. It is not supported to use this // alongside another stats handler dial option. func DialOption(to TraceOptions) grpc.DialOption { - return joinDialOptions(grpc.WithChainUnaryInterceptor(unaryInterceptor), grpc.WithChainStreamInterceptor(streamInterceptor), grpc.WithStatsHandler(&clientStatsHandler{to: to})) + csh := &clientStatsHandler{to: to} + return joinDialOptions(grpc.WithChainUnaryInterceptor(csh.unaryInterceptor), grpc.WithChainStreamInterceptor(csh.streamInterceptor), grpc.WithStatsHandler(csh)) } // ServerOption returns a server option which enables OpenCensus instrumentation @@ -81,46 +83,57 @@ func ServerOption(to TraceOptions) grpc.ServerOption { return grpc.StatsHandler(&serverStatsHandler{to: to}) } -// unaryInterceptor handles per RPC context management. It also handles per RPC -// tracing and stats, and records the latency for the full RPC call. -func unaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - startTime := time.Now() - err := invoker(ctx, method, req, reply, cc, opts...) - callLatency := float64(time.Since(startTime)) / float64(time.Millisecond) +// createCallSpan creates a call span if tracing is enabled, which will be put +// in the context provided if created. +func (csh *clientStatsHandler) createCallSpan(ctx context.Context, method string) (context.Context, *trace.Span) { + var span *trace.Span + if !csh.to.DisableTrace { + mn := "Sent." + strings.Replace(removeLeadingSlash(method), "/", ".", -1) + ctx, span = trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS), trace.WithSpanKind(trace.SpanKindClient)) + } + return ctx, span +} - ocstats.RecordWithOptions(ctx, +// perCallTracesAndMetrics records per call spans and metrics. +func perCallTracesAndMetrics(err error, span *trace.Span, startTime time.Time, method string) { + s := status.Convert(err) + if span != nil { + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + span.End() + } + callLatency := float64(time.Since(startTime)) / float64(time.Millisecond) + ocstats.RecordWithOptions(context.Background(), ocstats.WithTags( - tag.Upsert(keyClientMethod, removeLeadingSlash(method)), - tag.Upsert(keyClientStatus, canonicalString(status.Code(err))), + tag.Upsert(keyClientMethod, method), + tag.Upsert(keyClientStatus, canonicalString(s.Code())), ), ocstats.WithMeasurements( clientAPILatency.M(callLatency), ), ) +} +// unaryInterceptor handles per RPC context management. It also handles per RPC +// tracing and stats by creating a top level call span and recording the latency +// for the full RPC call. +func (csh *clientStatsHandler) unaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + startTime := time.Now() + ctx, span := csh.createCallSpan(ctx, method) + err := invoker(ctx, method, req, reply, cc, opts...) + perCallTracesAndMetrics(err, span, startTime, method) return err } // streamInterceptor handles per RPC context management. It also handles per RPC -// tracing and stats, and records the latency for the full RPC call. -func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { +// tracing and stats by creating a top level call span and recording the latency +// for the full RPC call. +func (csh *clientStatsHandler) streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { startTime := time.Now() - + ctx, span := csh.createCallSpan(ctx, method) callback := func(err error) { - callLatency := float64(time.Since(startTime)) / float64(time.Millisecond) - ocstats.RecordWithOptions(context.Background(), - ocstats.WithTags( - tag.Upsert(keyClientMethod, method), - tag.Upsert(keyClientStatus, canonicalString(status.Code(err))), - ), - ocstats.WithMeasurements( - clientAPILatency.M(callLatency), - ), - ) + perCallTracesAndMetrics(err, span, startTime, method) } - opts = append([]grpc.CallOption{grpc.OnFinish(callback)}, opts...) - s, err := streamer(ctx, desc, cc, method, opts...) if err != nil { return nil, err diff --git a/stats/opencensus/trace.go b/stats/opencensus/trace.go index c2ad518545b8..afe6729c3c86 100644 --- a/stats/opencensus/trace.go +++ b/stats/opencensus/trace.go @@ -39,7 +39,7 @@ type traceInfo struct { // about this span into gRPC Metadata. func (csh *clientStatsHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) (context.Context, *traceInfo) { // TODO: get consensus on whether this method name of "s.m" is correct. - mn := strings.Replace(removeLeadingSlash(rti.FullMethodName), "/", ".", -1) + mn := "Attempt." + strings.Replace(removeLeadingSlash(rti.FullMethodName), "/", ".", -1) _, span := trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS), trace.WithSpanKind(trace.SpanKindClient)) tcBin := propagation.Binary(span.SpanContext()) From 5353eaa44095b31752e57f89034137f31fc7f763 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 21 Feb 2023 19:30:13 -0800 Subject: [PATCH 785/998] testing: add helpers to configure cluster specifier plugin type (#5977) --- internal/testutils/xds/e2e/clientresources.go | 116 ++++ .../xds_rls_clusterspecifier_plugin_test.go | 38 +- .../resolver/cluster_specifier_plugin_test.go | 655 ++++++++++-------- xds/internal/resolver/xds_resolver_test.go | 151 +--- 4 files changed, 536 insertions(+), 424 deletions(-) diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go index 9d9012e23838..7e54d0a95461 100644 --- a/internal/testutils/xds/e2e/clientresources.go +++ b/internal/testutils/xds/e2e/clientresources.go @@ -26,6 +26,7 @@ import ( "github.com/envoyproxy/go-control-plane/pkg/wellknown" "github.com/golang/protobuf/proto" "google.golang.org/grpc/internal/testutils" + "google.golang.org/protobuf/types/known/anypb" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -310,6 +311,121 @@ func DefaultRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.Rou } } +// RouteConfigClusterSpecifierType determines the cluster specifier type for the +// route actions configured in the returned RouteConfiguration resource. +type RouteConfigClusterSpecifierType int + +const ( + // RouteConfigClusterSpecifierTypeCluster results in the cluster specifier + // being set to a RouteAction_Cluster. + RouteConfigClusterSpecifierTypeCluster RouteConfigClusterSpecifierType = iota + // RouteConfigClusterSpecifierTypeWeightedCluster results in the cluster + // specifier being set to RouteAction_WeightedClusters. + RouteConfigClusterSpecifierTypeWeightedCluster + // RouteConfigClusterSpecifierTypeClusterSpecifierPlugin results in the + // cluster specifier being set to a RouteAction_ClusterSpecifierPlugin. + RouteConfigClusterSpecifierTypeClusterSpecifierPlugin +) + +// RouteConfigOptions contains options to configure a RouteConfiguration +// resource. +type RouteConfigOptions struct { + // RouteConfigName is the name of the RouteConfiguration resource. + RouteConfigName string + // ListenerName is the name of the Listener resource which uses this + // RouteConfiguration. + ListenerName string + // ClusterSpecifierType determines the cluster specifier type. + ClusterSpecifierType RouteConfigClusterSpecifierType + + // ClusterName is name of the cluster resource used when the cluster + // specifier type is set to RouteConfigClusterSpecifierTypeCluster. + // + // Default value of "A" is used if left unspecified. + ClusterName string + // WeightedClusters is a map from cluster name to weights, and is used when + // the cluster specifier type is set to + // RouteConfigClusterSpecifierTypeWeightedCluster. + // + // Default value of {"A": 75, "B": 25} is used if left unspecified. + WeightedClusters map[string]int + // The below two fields specify the name of the cluster specifier plugin and + // its configuration, and are used when the cluster specifier type is set to + // RouteConfigClusterSpecifierTypeClusterSpecifierPlugin. Tests are expected + // to provide valid values for these fields when appropriate. + ClusterSpecifierPluginName string + ClusterSpecifierPluginConfig *anypb.Any +} + +// RouteConfigResourceWithOptions returns a RouteConfiguration resource +// configured with the provided options. +func RouteConfigResourceWithOptions(opts RouteConfigOptions) *v3routepb.RouteConfiguration { + switch opts.ClusterSpecifierType { + case RouteConfigClusterSpecifierTypeCluster: + clusterName := opts.ClusterName + if clusterName == "" { + clusterName = "A" + } + return &v3routepb.RouteConfiguration{ + Name: opts.RouteConfigName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{opts.ListenerName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}, + }}, + }}, + } + case RouteConfigClusterSpecifierTypeWeightedCluster: + weightedClusters := opts.WeightedClusters + if weightedClusters == nil { + weightedClusters = map[string]int{"A": 75, "B": 25} + } + clusters := []*v3routepb.WeightedCluster_ClusterWeight{} + for name, weight := range weightedClusters { + clusters = append(clusters, &v3routepb.WeightedCluster_ClusterWeight{ + Name: name, + Weight: &wrapperspb.UInt32Value{Value: uint32(weight)}, + }) + } + return &v3routepb.RouteConfiguration{ + Name: opts.RouteConfigName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{opts.ListenerName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{Clusters: clusters}}, + }}, + }}, + }}, + } + case RouteConfigClusterSpecifierTypeClusterSpecifierPlugin: + return &v3routepb.RouteConfiguration{ + Name: opts.RouteConfigName, + ClusterSpecifierPlugins: []*v3routepb.ClusterSpecifierPlugin{{ + Extension: &v3corepb.TypedExtensionConfig{ + Name: opts.ClusterSpecifierPluginName, + TypedConfig: opts.ClusterSpecifierPluginConfig, + }}, + }, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{opts.ListenerName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{ClusterSpecifierPlugin: opts.ClusterSpecifierPluginName}, + }}, + }}, + }}, + } + default: + panic(fmt.Sprintf("unsupported cluster specifier plugin type: %v", opts.ClusterSpecifierType)) + } +} + // DefaultCluster returns a basic xds Cluster resource. func DefaultCluster(clusterName, edsServiceName string, secLevel SecurityLevel) *v3clusterpb.Cluster { return ClusterResourceWithOptions(&ClusterOptions{ diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go index 13d5ba43670c..8bc021c91fae 100644 --- a/test/xds/xds_rls_clusterspecifier_plugin_test.go +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -33,7 +33,6 @@ import ( "google.golang.org/protobuf/types/known/durationpb" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" @@ -53,7 +52,15 @@ func defaultClientResourcesWithRLSCSP(lb e2e.LoadBalancingPolicy, params e2e.Res return e2e.UpdateOptions{ NodeID: params.NodeID, Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(params.DialTarget, routeConfigName)}, - Routes: []*v3routepb.RouteConfiguration{defaultRouteConfigWithRLSCSP(routeConfigName, params.DialTarget, rlsProto)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: routeConfigName, + ListenerName: params.DialTarget, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "rls-csp", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ + RouteLookupConfig: rlsProto, + }), + })}, Clusters: []*v3clusterpb.Cluster{e2e.ClusterResourceWithOptions(&e2e.ClusterOptions{ ClusterName: clusterName, ServiceName: endpointsName, @@ -64,33 +71,6 @@ func defaultClientResourcesWithRLSCSP(lb e2e.LoadBalancingPolicy, params e2e.Res } } -// defaultRouteConfigWithRLSCSP returns a basic xds RouteConfig resource with an -// RLS Cluster Specifier Plugin configured as the route. -func defaultRouteConfigWithRLSCSP(routeName, ldsTarget string, rlsProto *rlspb.RouteLookupConfig) *v3routepb.RouteConfiguration { - return &v3routepb.RouteConfiguration{ - Name: routeName, - ClusterSpecifierPlugins: []*v3routepb.ClusterSpecifierPlugin{ - { - Extension: &v3corepb.TypedExtensionConfig{ - Name: "rls-csp", - TypedConfig: testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ - RouteLookupConfig: rlsProto, - }), - }, - }, - }, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{ClusterSpecifierPlugin: "rls-csp"}, - }}, - }}, - }}, - } -} - // TestRLSinxDS tests an xDS configured system with an RLS Balancer present. // // This test sets up the RLS Balancer using the RLS Cluster Specifier Plugin, diff --git a/xds/internal/resolver/cluster_specifier_plugin_test.go b/xds/internal/resolver/cluster_specifier_plugin_test.go index 9ac2ca2b52a0..2a01beaeebc6 100644 --- a/xds/internal/resolver/cluster_specifier_plugin_test.go +++ b/xds/internal/resolver/cluster_specifier_plugin_test.go @@ -20,190 +20,248 @@ package resolver import ( "context" + "encoding/json" + "fmt" "testing" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" "github.com/google/go-cmp/cmp" + "github.com/google/uuid" "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/testutils" + xdsbootstrap "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clustermanager" "google.golang.org/grpc/xds/internal/clusterspecifier" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" ) func init() { - balancer.Register(cspB{}) + balancer.Register(cspBalancerBuilder{}) + clusterspecifier.Register(testClusterSpecifierPlugin{}) } -type cspB struct{} +// cspBalancerBuilder is a no-op LB policy which is referenced by the +// testClusterSpecifierPlugin. +type cspBalancerBuilder struct{} -func (cspB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (cspBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { return nil } -func (cspB) Name() string { +func (cspBalancerBuilder) Name() string { return "csp_experimental" } -type cspConfig struct { +type cspBalancerConfig struct { + serviceconfig.LoadBalancingConfig ArbitraryField string `json:"arbitrary_field"` } -// TestXDSResolverClusterSpecifierPlugin tests that cluster specifier plugins -// produce the correct service config, and that the config selector routes to a -// cluster specifier plugin supported by this service config (i.e. prefixed with -// a cluster specifier plugin prefix). -func (s) TestXDSResolverClusterSpecifierPlugin(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() +func (cspBalancerBuilder) ParseConfig(lbCfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &cspBalancerConfig{} + if err := json.Unmarshal(lbCfg, cfg); err != nil { + return nil, err + } + return cfg, nil + +} + +// testClusterSpecifierPlugin is a test cluster specifier plugin which returns +// an LB policy configuration specifying the cspBalancer. +type testClusterSpecifierPlugin struct { +} + +func (testClusterSpecifierPlugin) TypeURLs() []string { + // The config for this plugin contains a wrapperspb.StringValue, and since + // we marshal that proto as an Any proto, the type URL on the latter gets + // set to "type.googleapis.com/google.protobuf.StringValue". If we wanted a + // more descriptive type URL for this test plugin, we would have to define a + // proto package with a message for the configuration. That would be + // overkill for a test. Therefore, this seems to be an acceptable tradeoff. + return []string{"type.googleapis.com/google.protobuf.StringValue"} +} +func (testClusterSpecifierPlugin) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.BalancerConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("testClusterSpecifierPlugin: nil configuration message provided") + } + any, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("testClusterSpecifierPlugin: error parsing config %v: got type %T, want *anypb.Any", cfg, cfg) + } + lbCfg := new(wrapperspb.StringValue) + if err := ptypes.UnmarshalAny(any, lbCfg); err != nil { + return nil, fmt.Errorf("testClusterSpecifierPlugin: error parsing config %v: %v", cfg, err) + } + return []map[string]interface{}{{"csp_experimental": cspBalancerConfig{ArbitraryField: lbCfg.GetValue()}}}, nil +} + +// TestResolverClusterSpecifierPlugin tests the case where a route configuration +// containing cluster specifier plugins is sent by the management server. The +// test verifies that the service config output by the resolver contains the LB +// policy specified by the cluster specifier plugin, and the config selector +// returns the cluster associated with the cluster specifier plugin. +// +// The test also verifies that a change in the cluster specifier plugin config +// result in appropriate change in the service config pushed by the resolver. +func (s) TestResolverClusterSpecifierPlugin(t *testing.T) { + // Env var GRPC_EXPERIMENTAL_XDS_RLS_LB controls whether the xDS client + // allows routes with cluster specifier plugin as their route action. + oldRLS := envconfig.XDSRLS + envconfig.XDSRLS = true + defer func() { + envconfig.XDSRLS = oldRLS + }() + + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + // Configure listener and route configuration resources on the management + // server. + const serviceName = "my-service-client-side-xds" + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: serviceName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "cspA", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&wrapperspb.StringValue{Value: "anything"}), + })}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspA"}}, - }, - }, - // Top level csp config here - the value of cspA should get directly - // placed as a child policy of xds cluster manager. - ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspA": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anything"}}}}, - }, nil) - - gotState, err := tcc.stateCh.Receive(ctx) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Wait for an update from the resolver, and verify the service config. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) - } - wantJSON := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster_specifier_plugin:cspA":{ - "childPolicy":[{"csp_experimental":{"arbitrary_field":"anything"}}] - } - } - }}]}` - - wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspA": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anything" + } + } + ] + } + } + } + } + ] +}`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatal("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } - cluster := clustermanager.GetPickedClusterForTesting(res.Context) - clusterWant := clusterSpecifierPluginPrefix + "cspA" - if cluster != clusterWant { - t.Fatalf("cluster: %+v, want: %+v", cluster, clusterWant) + gotCluster := clustermanager.GetPickedClusterForTesting(res.Context) + wantCluster := "cluster_specifier_plugin:cspA" + if gotCluster != wantCluster { + t.Fatalf("config selector returned cluster: %v, want: %v", gotCluster, wantCluster) } -} -// TestXDSResolverClusterSpecifierPluginConfigUpdate tests that cluster -// specifier plugins produce the correct service config, and that on an update -// to the CSP Configuration, the new config is accounted for in the output -// service config. -func (s) TestXDSResolverClusterSpecifierPluginConfigUpdate(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() + // Change the cluster specifier plugin configuration. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: serviceName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "cspA", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&wrapperspb.StringValue{Value: "changed"}), + })}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspA"}}, - }, - }, - // Top level csp config here - the value of cspA should get directly - // placed as a child policy of xds cluster manager. - ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspA": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anything"}}}}, - }, nil) - - gotState, err := tcc.stateCh.Receive(ctx) + // Wait for an update from the resolver, and verify the service config. + val, err = tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) - } - wantJSON := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster_specifier_plugin:cspA":{ - "childPolicy":[{"csp_experimental":{"arbitrary_field":"anything"}}] - } - } - }}]}` - - wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) - if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) - } - - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspA"}}, - }, - }, - // Top level csp config here - the value of cspA should get directly - // placed as a child policy of xds cluster manager. - ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspA": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "changed"}}}}, - }, nil) - - gotState, err = tcc.stateCh.Receive(ctx) - if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - rState = gotState.(resolver.State) - if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) - } - wantJSON = `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster_specifier_plugin:cspA":{ - "childPolicy":[{"csp_experimental":{"arbitrary_field":"changed"}}] - } - } - }}]}` - - wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspA": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "changed" + } + } + ] + } + } + } + } + ] +}`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } } @@ -211,158 +269,215 @@ func (s) TestXDSResolverClusterSpecifierPluginConfigUpdate(t *testing.T) { // their corresponding configurations remain in service config if RPCs are in // flight. func (s) TestXDSResolverDelayedOnCommittedCSP(t *testing.T) { - xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) - defer xdsR.Close() - defer cancel() - + // Env var GRPC_EXPERIMENTAL_XDS_RLS_LB controls whether the xDS client + // allows routes with cluster specifier plugin as their route action. + oldRLS := envconfig.XDSRLS + envconfig.XDSRLS = true + defer func() { + envconfig.XDSRLS = oldRLS + }() + + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + // Configure listener and route configuration resources on the management + // server. + const serviceName = "my-service-client-side-xds" + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: serviceName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "cspA", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&wrapperspb.StringValue{Value: "anythingA"}), + })}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspA"}}, - }, - }, - // Top level csp config here - the value of cspA should get directly - // placed as a child policy of xds cluster manager. - ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspA": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anythingA"}}}}, - }, nil) - - gotState, err := tcc.stateCh.Receive(ctx) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Wait for an update from the resolver, and verify the service config. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) - } - wantJSON := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster_specifier_plugin:cspA":{ - "childPolicy":[{"csp_experimental":{"arbitrary_field":"anythingA"}}] - } - } - }}]}` - - wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspA": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anythingA" + } + } + ] + } + } + } + } + ] +}`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatal("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + resOld, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) - } - - cluster := clustermanager.GetPickedClusterForTesting(res.Context) - clusterWant := clusterSpecifierPluginPrefix + "cspA" - if cluster != clusterWant { - t.Fatalf("cluster: %+v, want: %+v", cluster, clusterWant) - } - // delay res.OnCommitted() - - // Perform TWO updates to ensure the old config selector does not hold a reference to cspA - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspB"}}, - }, - }, - // Top level csp config here - the value of cspB should get directly - // placed as a child policy of xds cluster manager. - ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspB": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anythingB"}}}}, - }, nil) - tcc.stateCh.Receive(ctx) // Ignore the first update. - - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspB"}}, - }, - }, - // Top level csp config here - the value of cspB should get directly - // placed as a child policy of xds cluster manager. - ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspB": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anythingB"}}}}, - }, nil) - - gotState, err = tcc.stateCh.Receive(ctx) + t.Fatalf("cs.SelectConfig(): %v", err) + } + + gotCluster := clustermanager.GetPickedClusterForTesting(resOld.Context) + wantCluster := "cluster_specifier_plugin:cspA" + if gotCluster != wantCluster { + t.Fatalf("config selector returned cluster: %v, want: %v", gotCluster, wantCluster) + } + + // Delay resOld.OnCommitted(). As long as there are pending RPCs to removed + // clusters, they still appear in the service config. + + // Change the cluster specifier plugin configuration. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: serviceName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "cspB", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&wrapperspb.StringValue{Value: "anythingB"}), + })}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for an update from the resolver, and verify the service config. + val, err = tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState = gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) - } - wantJSON2 := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster_specifier_plugin:cspA":{ - "childPolicy":[{"csp_experimental":{"arbitrary_field":"anythingA"}}] - }, - "cluster_specifier_plugin:cspB":{ - "childPolicy":[{"csp_experimental":{"arbitrary_field":"anythingB"}}] - } - } - }}]}` - - wantSCParsed2 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON2) - if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed2.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed2.Config)) - } - - // Invoke OnCommitted; should lead to a service config update that deletes + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspA": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anythingA" + } + } + ] + }, + "cluster_specifier_plugin:cspB": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anythingB" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) + } + + // Perform an RPC and ensure that it is routed to the new cluster. + cs = iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") + } + resNew, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) + if err != nil { + t.Fatalf("cs.SelectConfig(): %v", err) + } + + gotCluster = clustermanager.GetPickedClusterForTesting(resNew.Context) + wantCluster = "cluster_specifier_plugin:cspB" + if gotCluster != wantCluster { + t.Fatalf("config selector returned cluster: %v, want: %v", gotCluster, wantCluster) + } + + // Invoke resOld.OnCommitted; should lead to a service config update that deletes // cspA. - res.OnCommitted() - - xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ - VirtualHosts: []*xdsresource.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*xdsresource.Route{{Prefix: newStringP(""), ClusterSpecifierPlugin: "cspB"}}, - }, - }, - // Top level csp config here - the value of cspB should get directly - // placed as a child policy of xds cluster manager. - ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{"cspB": []map[string]interface{}{{"csp_experimental": cspConfig{ArbitraryField: "anythingB"}}}}, - }, nil) - gotState, err = tcc.stateCh.Receive(ctx) + resOld.OnCommitted() + + val, err = tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("Error waiting for UpdateState to be called: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState = gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) - } - wantJSON3 := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster_specifier_plugin:cspB":{ - "childPolicy":[{"csp_experimental":{"arbitrary_field":"anythingB"}}] - } - } - }}]}` - - wantSCParsed3 := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantJSON3) - if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed3.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed3.Config)) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspB": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anythingB" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } } diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index d97993d86e1f..80f7fb75b126 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -82,7 +82,6 @@ const ( var target = resolver.Target{URL: *testutils.MustParseURL("xds:///" + targetStr)} var routerFilter = xdsresource.HTTPFilter{Name: "rtr", Filter: httpfilter.Get(router.TypeURL)} -var routerFilterList = []xdsresource.HTTPFilter{routerFilter} type s struct { grpctest.Tester @@ -108,11 +107,17 @@ type testClientConn struct { } func (t *testClientConn) UpdateState(s resolver.State) error { - t.stateCh.Replace(s) + // Tests should ideally consume all state updates, and if one happens + // unexpectedly, tests should catch it. Hence using `Send()` here. + t.stateCh.Send(s) return nil } func (t *testClientConn) ReportError(err error) { + // When used with a go-control-plane management server that continuously + // resends resources which are NACKed by the xDS client, using a `Replace()` + // here simplifies tests which will have access to the most recently + // received error. t.errorCh.Replace(err) } @@ -708,25 +713,12 @@ func (s) TestResolverGoodServiceUpdate(t *testing.T) { }{ { // A route configuration with a single cluster. - routeConfig: &v3routepb.RouteConfiguration{ - Name: rdsName, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsName}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - { - Name: "test-cluster-1", - Weight: &wrapperspb.UInt32Value{Value: 100}, - }, - }, - }}, - }}, - }}, - }}, - }, + routeConfig: e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: ldsName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeCluster, + ClusterName: "test-cluster-1", + }), wantServiceConfig: ` { "loadBalancingConfig": [{ @@ -747,29 +739,12 @@ func (s) TestResolverGoodServiceUpdate(t *testing.T) { }, { // A route configuration with a two new clusters. - routeConfig: &v3routepb.RouteConfiguration{ - Name: rdsName, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsName}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - { - Name: "cluster_1", - Weight: &wrapperspb.UInt32Value{Value: 75}, - }, - { - Name: "cluster_2", - Weight: &wrapperspb.UInt32Value{Value: 25}, - }, - }, - }}, - }}, - }}, - }}, - }, + routeConfig: e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: ldsName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeWeightedCluster, + WeightedClusters: map[string]int{"cluster_1": 75, "cluster_2": 25}, + }), // This update contains the cluster from the previous update as well // as this update, as the previous config selector still references // the old cluster when the new one is pushed. @@ -802,65 +777,10 @@ func (s) TestResolverGoodServiceUpdate(t *testing.T) { } } }] -}`, - wantClusters: map[string]bool{"cluster:cluster_1": true, "cluster:cluster_2": true}, - }, - { - // A redundant route configuration update. - // TODO(easwars): Do we need this, or can we do something else? Because the xds client might swallow this update. - routeConfig: &v3routepb.RouteConfiguration{ - Name: rdsName, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsName}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - { - Name: "cluster_1", - Weight: &wrapperspb.UInt32Value{Value: 75}, - }, - { - Name: "cluster_2", - Weight: &wrapperspb.UInt32Value{Value: 25}, - }, - }, - }}, - }}, - }}, - }}, - }, - // With this redundant update, the old config selector has been - // stopped, so there are no more references to the first cluster. - // Only the second update's clusters should remain. - wantServiceConfig: ` -{ - "loadBalancingConfig": [{ - "xds_cluster_manager_experimental": { - "children": { - "cluster:cluster_1": { - "childPolicy": [{ - "cds_experimental": { - "cluster": "cluster_1" - } - }] - }, - "cluster:cluster_2": { - "childPolicy": [{ - "cds_experimental": { - "cluster": "cluster_2" - } - }] - } - } - } - }] }`, wantClusters: map[string]bool{"cluster:cluster_1": true, "cluster:cluster_2": true}, }, } { - // Configure the management server with a good listener resource and a // route configuration resource, as specified by the test case. resources := e2e.UpdateOptions{ @@ -887,9 +807,7 @@ func (s) TestResolverGoodServiceUpdate(t *testing.T) { wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(tt.wantServiceConfig) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("Received unexpected service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } cs := iresolver.GetConfigSelector(rState) @@ -1322,29 +1240,12 @@ func (s) TestResolverWRR(t *testing.T) { resources := e2e.UpdateOptions{ NodeID: nodeID, Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, - Routes: []*v3routepb.RouteConfiguration{{ - Name: rdsName, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsName}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - { - Name: "A", - Weight: &wrapperspb.UInt32Value{Value: 75}, - }, - { - Name: "B", - Weight: &wrapperspb.UInt32Value{Value: 25}, - }, - }, - }}, - }}, - }}, - }}, - }}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: ldsName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeWeightedCluster, + WeightedClusters: map[string]int{"A": 75, "B": 25}, + })}, SkipValidation: true, } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) From 6fe609daff9b3b7a9620eb3b746c2bc0a1113bbc Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 24 Feb 2023 13:12:12 -0800 Subject: [PATCH 786/998] xdsclient: minor cleanup in eds parsing (#6055) --- xds/internal/xdsclient/xdsresource/unmarshal_eds.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index ecb40f38fa88..89386f6e2680 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -152,7 +152,7 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, ret.Localities = append(ret.Localities, Locality{ ID: lid, Endpoints: endpoints, - Weight: locality.GetLoadBalancingWeight().GetValue(), + Weight: weight, Priority: priority, }) } From 3775f633ce208a524fd882c9b4678b95b8a5a4d4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 24 Feb 2023 13:13:13 -0800 Subject: [PATCH 787/998] xdsclient/transport: reduce chattiness of logs (#5992) --- internal/grpclog/prefixLogger.go | 12 +++++++ .../xdsclient/transport/loadreport.go | 22 +++++++----- xds/internal/xdsclient/transport/transport.go | 35 +++++++++++++------ 3 files changed, 50 insertions(+), 19 deletions(-) diff --git a/internal/grpclog/prefixLogger.go b/internal/grpclog/prefixLogger.go index 82af70e96f15..02224b42ca86 100644 --- a/internal/grpclog/prefixLogger.go +++ b/internal/grpclog/prefixLogger.go @@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { // Debugf does info logging at verbose level 2. func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. if !Logger.V(2) { return } @@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { return } InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/xds/internal/xdsclient/transport/loadreport.go b/xds/internal/xdsclient/transport/loadreport.go index 58a2e5dedb6a..89ffc4fcec66 100644 --- a/xds/internal/xdsclient/transport/loadreport.go +++ b/xds/internal/xdsclient/transport/loadreport.go @@ -120,19 +120,19 @@ func (t *Transport) lrsRunner(ctx context.Context) { defer cancel() stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) if err != nil { - t.logger.Warningf("Failed to create LRS stream: %v", err) + t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) return false } - t.logger.Infof("Created LRS stream to server: %s", t.serverURI) + t.logger.Infof("Created LRS stream to server %q", t.serverURI) if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { - t.logger.Warningf("Failed to send first LRS request: %v", err) + t.logger.Warningf("Sending first LRS request failed: %v", err) return false } clusters, interval, err := t.recvFirstLoadStatsResponse(stream) if err != nil { - t.logger.Warningf("Failed to read from LRS stream: %v", err) + t.logger.Warningf("Reading from LRS stream failed: %v", err) return false } @@ -160,7 +160,7 @@ func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterName return } if err := t.sendLoadStatsRequest(stream, t.lrsStore.Stats(clusterNames)); err != nil { - t.logger.Warningf("Failed to write to LRS stream: %v", err) + t.logger.Warningf("Writing to LRS stream failed: %v", err) return } } @@ -168,7 +168,9 @@ func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterName func (t *Transport) sendFirstLoadStatsRequest(stream lrsStream, node *v3corepb.Node) error { req := &v3lrspb.LoadStatsRequest{Node: node} - t.logger.Debugf("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + } err := stream.Send(req) if err == io.EOF { return getStreamError(stream) @@ -181,7 +183,9 @@ func (t *Transport) recvFirstLoadStatsResponse(stream lrsStream) ([]string, time if err != nil { return nil, 0, fmt.Errorf("failed to receive first LoadStatsResponse: %v", err) } - t.logger.Debugf("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + } interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) if err != nil { @@ -251,7 +255,9 @@ func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) e } req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} - t.logger.Debugf("Sending LRS loads: %s", pretty.ToJSON(req)) + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending LRS loads: %s", pretty.ToJSON(req)) + } err := stream.Send(req) if err == io.EOF { return getStreamError(stream) diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index 10c863c5e6bd..1a0fed7a4bd9 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -45,6 +45,12 @@ import ( statuspb "google.golang.org/genproto/googleapis/rpc/status" ) +// Any per-RPC level logs which print complete request or response messages +// should be gated at this verbosity level. Other per-RPC level logs which print +// terse output should be at `INFO` and verbosity 2, which corresponds to using +// the `Debugf` method on the logger. +const perRPCVerbosityLevel = 9 + type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient // Transport provides a resource-type agnostic implementation of the xDS @@ -264,19 +270,26 @@ func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, reso } } if err := stream.Send(req); err != nil { - return fmt.Errorf("sending ADS request %s failed: %v", pretty.ToJSON(req), err) + return err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) + } else { + t.logger.Debugf("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) } - t.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req)) return nil } func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (resources []*anypb.Any, resourceURL, version, nonce string, err error) { resp, err := stream.Recv() if err != nil { - return nil, "", "", "", fmt.Errorf("failed to read ADS response: %v", err) + return nil, "", "", "", err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) + } else { + t.logger.Debugf("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) } - t.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - t.logger.Debugf("ADS response received: %v", pretty.ToJSON(resp)) return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil } @@ -307,7 +320,7 @@ func (t *Transport) adsRunner(ctx context.Context) { stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) if err != nil { t.adsStreamErrHandler(err) - t.logger.Warningf("ADS stream creation failed: %v", err) + t.logger.Warningf("Creating new ADS stream failed: %v", err) return false } t.logger.Infof("ADS stream created") @@ -377,7 +390,7 @@ func (t *Transport) send(ctx context.Context) { continue } if err := t.sendAggregatedDiscoveryServiceRequest(stream, resources, url, version, nonce, nackErr); err != nil { - t.logger.Warningf("ADS request for {resources: %q, url: %v, version: %q, nonce: %q} failed: %v", resources, url, version, nonce, err) + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, version, nonce, err) // Send failed, clear the current stream. stream = nil } @@ -410,7 +423,7 @@ func (t *Transport) sendExisting(stream adsStream) bool { for url, resources := range t.resources { if err := t.sendAggregatedDiscoveryServiceRequest(stream, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { - t.logger.Warningf("ADS request failed: %v", err) + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, t.versions[url], "", err) return false } } @@ -427,7 +440,7 @@ func (t *Transport) recv(stream adsStream) bool { resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) if err != nil { t.adsStreamErrHandler(err) - t.logger.Warningf("ADS stream is closed with error: %v", err) + t.logger.Warningf("ADS stream closed: %v", err) return msgReceived } msgReceived = true @@ -454,7 +467,7 @@ func (t *Transport) recv(stream adsStream) bool { nackErr: err, }) t.mu.Unlock() - t.logger.Warningf("Sending NACK for resource type: %v, version: %v, nonce: %v, reason: %v", url, rVersion, nonce, err) + t.logger.Warningf("Sending NACK for resource type: %q, version: %q, nonce: %q, reason: %v", url, rVersion, nonce, err) continue } t.adsRequestCh.Put(&ackRequest{ @@ -463,7 +476,7 @@ func (t *Transport) recv(stream adsStream) bool { stream: stream, version: rVersion, }) - t.logger.Infof("Sending ACK for resource type: %v, version: %v, nonce: %v", url, rVersion, nonce) + t.logger.Debugf("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) } } From 1093d3ac0ad2f833f914301efb3171619a662d51 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 27 Feb 2023 16:34:15 -0800 Subject: [PATCH 788/998] channelz: remove dependency on testing package (#6050) --- channelz/service/func_linux.go | 62 ++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 22 deletions(-) diff --git a/channelz/service/func_linux.go b/channelz/service/func_linux.go index 2e52d5f5a98f..ae759aa74212 100644 --- a/channelz/service/func_linux.go +++ b/channelz/service/func_linux.go @@ -25,7 +25,6 @@ import ( durpb "github.com/golang/protobuf/ptypes/duration" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/testutils" ) func convertToPtypesDuration(sec int64, usec int64) *durpb.Duration { @@ -35,32 +34,47 @@ func convertToPtypesDuration(sec int64, usec int64) *durpb.Duration { func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOption { var opts []*channelzpb.SocketOption if skopts.Linger != nil { - opts = append(opts, &channelzpb.SocketOption{ - Name: "SO_LINGER", - Additional: testutils.MarshalAny(&channelzpb.SocketOptionLinger{ - Active: skopts.Linger.Onoff != 0, - Duration: convertToPtypesDuration(int64(skopts.Linger.Linger), 0), - }), + additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionLinger{ + Active: skopts.Linger.Onoff != 0, + Duration: convertToPtypesDuration(int64(skopts.Linger.Linger), 0), }) + if err == nil { + opts = append(opts, &channelzpb.SocketOption{ + Name: "SO_LINGER", + Additional: additional, + }) + } else { + logger.Warningf("Failed to marshal socket options linger %+v: %v", skopts.Linger, err) + } } if skopts.RecvTimeout != nil { - opts = append(opts, &channelzpb.SocketOption{ - Name: "SO_RCVTIMEO", - Additional: testutils.MarshalAny(&channelzpb.SocketOptionTimeout{ - Duration: convertToPtypesDuration(int64(skopts.RecvTimeout.Sec), int64(skopts.RecvTimeout.Usec)), - }), + additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ + Duration: convertToPtypesDuration(int64(skopts.RecvTimeout.Sec), int64(skopts.RecvTimeout.Usec)), }) + if err == nil { + opts = append(opts, &channelzpb.SocketOption{ + Name: "SO_RCVTIMEO", + Additional: additional, + }) + } else { + logger.Warningf("Failed to marshal socket options receive timeout %+v: %v", skopts.RecvTimeout, err) + } } if skopts.SendTimeout != nil { - opts = append(opts, &channelzpb.SocketOption{ - Name: "SO_SNDTIMEO", - Additional: testutils.MarshalAny(&channelzpb.SocketOptionTimeout{ - Duration: convertToPtypesDuration(int64(skopts.SendTimeout.Sec), int64(skopts.SendTimeout.Usec)), - }), + additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ + Duration: convertToPtypesDuration(int64(skopts.SendTimeout.Sec), int64(skopts.SendTimeout.Usec)), }) + if err == nil { + opts = append(opts, &channelzpb.SocketOption{ + Name: "SO_SNDTIMEO", + Additional: additional, + }) + } else { + logger.Warningf("Failed to marshal socket options send timeout %+v: %v", skopts.SendTimeout, err) + } } if skopts.TCPInfo != nil { - additional := testutils.MarshalAny(&channelzpb.SocketOptionTcpInfo{ + additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTcpInfo{ TcpiState: uint32(skopts.TCPInfo.State), TcpiCaState: uint32(skopts.TCPInfo.Ca_state), TcpiRetransmits: uint32(skopts.TCPInfo.Retransmits), @@ -91,10 +105,14 @@ func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOptio TcpiAdvmss: skopts.TCPInfo.Advmss, TcpiReordering: skopts.TCPInfo.Reordering, }) - opts = append(opts, &channelzpb.SocketOption{ - Name: "TCP_INFO", - Additional: additional, - }) + if err == nil { + opts = append(opts, &channelzpb.SocketOption{ + Name: "TCP_INFO", + Additional: additional, + }) + } else { + logger.Warningf("Failed to marshal socket options TCP info %+v: %v", skopts.TCPInfo, err) + } } return opts } From 681b13383c4e01c9fce03a50aaa356d84320033b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 27 Feb 2023 16:57:44 -0800 Subject: [PATCH 789/998] admin/test: split channelz imports (#6058) --- admin/test/utils.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/admin/test/utils.go b/admin/test/utils.go index 0ff3d00279ea..086ba2e6e476 100644 --- a/admin/test/utils.go +++ b/admin/test/utils.go @@ -36,6 +36,7 @@ import ( v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + channelzgrpc "google.golang.org/grpc/channelz/grpc_channelz_v1" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" ) @@ -98,7 +99,7 @@ func RunRegisterTests(t *testing.T, ec ExpectedStatusCodes) { // RunChannelz makes a channelz RPC. func RunChannelz(conn *grpc.ClientConn) error { - c := channelzpb.NewChannelzClient(conn) + c := channelzgrpc.NewChannelzClient(conn) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() _, err := c.GetTopChannels(ctx, &channelzpb.GetTopChannelsRequest{}, grpc.WaitForReady(true)) From 7437662fd5b853bd604201ddcc4f26496690ed97 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 27 Feb 2023 20:31:24 -0500 Subject: [PATCH 790/998] internal/transport: Fix flaky keep alive test (#6059) --- internal/transport/keepalive_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index 6ef0764cc6ad..c745ed497b78 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -363,6 +363,7 @@ func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 50 * time.Millisecond, PermitWithoutStream: true, }, }, From 0586c51d1b09895a829cf393518f498f1675db6a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 28 Feb 2023 09:34:05 -0800 Subject: [PATCH 791/998] internal/transport: reduce running time of test from 5s to 1s (#6061) --- internal/transport/keepalive_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index c745ed497b78..4a904703ef91 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -622,13 +622,13 @@ func (s) TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 2 * time.Second, + MinTime: 100 * time.Millisecond, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 50 * time.Millisecond, - Timeout: 1 * time.Second, + Time: 10 * time.Millisecond, + Timeout: 10 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) @@ -639,7 +639,7 @@ func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T }() // No active streams on the client. Give keepalive enough time. - time.Sleep(5 * time.Second) + time.Sleep(1 * time.Second) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() From 75bed1de3d3c89e988dc6f3b43cefce6eca6fcb7 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 28 Feb 2023 09:36:06 -0800 Subject: [PATCH 792/998] test: move e2e health checking tests out of end2end_test.go (#6062) --- test/end2end_test.go | 310 -------------------------------------- test/healthcheck_test.go | 312 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 311 insertions(+), 311 deletions(-) diff --git a/test/end2end_test.go b/test/end2end_test.go index d3c339ccb8d9..c7ea9eef9033 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -2183,316 +2183,6 @@ func testTap(t *testing.T, e env) { } } -// healthCheck is a helper function to make a unary health check RPC and return -// the response. -func healthCheck(d time.Duration, cc *grpc.ClientConn, service string) (*healthpb.HealthCheckResponse, error) { - ctx, cancel := context.WithTimeout(context.Background(), d) - defer cancel() - hc := healthgrpc.NewHealthClient(cc) - return hc.Check(ctx, &healthpb.HealthCheckRequest{Service: service}) -} - -// verifyHealthCheckStatus is a helper function to verify that the current -// health status of the service matches the one passed in 'wantStatus'. -func verifyHealthCheckStatus(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantStatus healthpb.HealthCheckResponse_ServingStatus) { - t.Helper() - resp, err := healthCheck(d, cc, service) - if err != nil { - t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) - } - if resp.Status != wantStatus { - t.Fatalf("Got the serving status %v, want %v", resp.Status, wantStatus) - } -} - -// verifyHealthCheckErrCode is a helper function to verify that a unary health -// check RPC returns an error with a code set to 'wantCode'. -func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantCode codes.Code) { - t.Helper() - if _, err := healthCheck(d, cc, service); status.Code(err) != wantCode { - t.Fatalf("Health/Check() got errCode %v, want %v", status.Code(err), wantCode) - } -} - -// newHealthCheckStream is a helper function to start a health check streaming -// RPC, and returns the stream. -func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) { - t.Helper() - ctx, cancel := context.WithCancel(context.Background()) - hc := healthgrpc.NewHealthClient(cc) - stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service}) - if err != nil { - t.Fatalf("hc.Watch(_, %v) failed: %v", service, err) - } - return stream, cancel -} - -// healthWatchChecker is a helper function to verify that the next health -// status returned on the given stream matches the one passed in 'wantStatus'. -func healthWatchChecker(t *testing.T, stream healthgrpc.Health_WatchClient, wantStatus healthpb.HealthCheckResponse_ServingStatus) { - t.Helper() - response, err := stream.Recv() - if err != nil { - t.Fatalf("stream.Recv() failed: %v", err) - } - if response.Status != wantStatus { - t.Fatalf("got servingStatus %v, want %v", response.Status, wantStatus) - } -} - -// TestHealthCheckSuccess invokes the unary Check() RPC on the health server in -// a successful case. -func (s) TestHealthCheckSuccess(t *testing.T) { - for _, e := range listTestEnv() { - testHealthCheckSuccess(t, e) - } -} - -func testHealthCheckSuccess(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - defer te.tearDown() - - verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.OK) -} - -// TestHealthCheckFailure invokes the unary Check() RPC on the health server -// with an expired context and expects the RPC to fail. -func (s) TestHealthCheckFailure(t *testing.T) { - for _, e := range listTestEnv() { - testHealthCheckFailure(t, e) - } -} - -func testHealthCheckFailure(t *testing.T, e env) { - te := newTest(t, e) - te.declareLogNoise( - "Failed to dial ", - "grpc: the client connection is closing; please retry", - ) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - defer te.tearDown() - - verifyHealthCheckErrCode(t, 0*time.Second, te.clientConn(), defaultHealthService, codes.DeadlineExceeded) - awaitNewConnLogOutput() -} - -// TestHealthCheckOff makes a unary Check() RPC on the health server where the -// health status of the defaultHealthService is not set, and therefore expects -// an error code 'codes.NotFound'. -func (s) TestHealthCheckOff(t *testing.T) { - for _, e := range listTestEnv() { - // TODO(bradfitz): Temporarily skip this env due to #619. - if e.name == "handler-tls" { - continue - } - testHealthCheckOff(t, e) - } -} - -func testHealthCheckOff(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.NotFound) -} - -// TestHealthWatchMultipleClients makes a streaming Watch() RPC on the health -// server with multiple clients and expects the same status on both streams. -func (s) TestHealthWatchMultipleClients(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchMultipleClients(t, e) - } -} - -func testHealthWatchMultipleClients(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - stream1, cf1 := newHealthCheckStream(t, cc, defaultHealthService) - defer cf1() - healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - - stream2, cf2 := newHealthCheckStream(t, cc, defaultHealthService) - defer cf2() - healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) - healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING) - healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING) -} - -// TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server -// and makes sure that the health status of the server is as expected after -// multiple calls to SetServingStatus with the same status. -func (s) TestHealthWatchSameStatus(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchSameStatus(t, e) - } -} - -func testHealthWatchSameStatus(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) - defer cf() - - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) -} - -// TestHealthWatchServiceStatusSetBeforeStartingServer starts a health server -// on which the health status for the defaultService is set before the gRPC -// server is started, and expects the correct health status to be returned. -func (s) TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchSetServiceStatusBeforeStartingServer(t, e) - } -} - -func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) { - hs := health.NewServer() - te := newTest(t, e) - te.healthServer = hs - hs.SetServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) - defer cf() - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) -} - -// TestHealthWatchDefaultStatusChange verifies the simple case where the -// service starts off with a SERVICE_UNKNOWN status (because SetServingStatus -// hasn't been called yet) and then moves to SERVING after SetServingStatus is -// called. -func (s) TestHealthWatchDefaultStatusChange(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchDefaultStatusChange(t, e) - } -} - -func testHealthWatchDefaultStatusChange(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) - defer cf() - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) -} - -// TestHealthWatchSetServiceStatusBeforeClientCallsWatch verifies the case -// where the health status is set to SERVING before the client calls Watch(). -func (s) TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e) - } -} - -func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) - defer cf() - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) -} - -// TestHealthWatchOverallServerHealthChange verifies setting the overall status -// of the server by using the empty service name. -func (s) TestHealthWatchOverallServerHealthChange(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchOverallServerHealthChange(t, e) - } -} - -func testHealthWatchOverallServerHealthChange(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), "") - defer cf() - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) - te.setHealthServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) -} - -// TestUnknownHandler verifies that an expected error is returned (by setting -// the unknownHandler on the server) for a service which is not exposed to the -// client. -func (s) TestUnknownHandler(t *testing.T) { - // An example unknownHandler that returns a different code and a different - // method, making sure that we do not expose what methods are implemented to - // a client that is not authenticated. - unknownHandler := func(srv interface{}, stream grpc.ServerStream) error { - return status.Error(codes.Unauthenticated, "user unauthenticated") - } - for _, e := range listTestEnv() { - // TODO(bradfitz): Temporarily skip this env due to #619. - if e.name == "handler-tls" { - continue - } - testUnknownHandler(t, e, unknownHandler) - } -} - -func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) { - te := newTest(t, e) - te.unknownHandler = unknownHandler - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), "", codes.Unauthenticated) -} - -// TestHealthCheckServingStatus makes a streaming Watch() RPC on the health -// server and verifies a bunch of health status transitions. -func (s) TestHealthCheckServingStatus(t *testing.T) { - for _, e := range listTestEnv() { - testHealthCheckServingStatus(t, e) - } -} - -func testHealthCheckServingStatus(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - verifyHealthCheckStatus(t, 1*time.Second, cc, "", healthpb.HealthCheckResponse_SERVING) - verifyHealthCheckErrCode(t, 1*time.Second, cc, defaultHealthService, codes.NotFound) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_SERVING) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) - verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) -} - func (s) TestEmptyUnaryWithUserAgent(t *testing.T) { for _, e := range listTestEnv() { testEmptyUnaryWithUserAgent(t, e) diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index 0c4f1a324db4..f4daaaa77c6b 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -31,7 +31,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" - _ "google.golang.org/grpc/health" + "google.golang.org/grpc/health" healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal" @@ -867,3 +867,313 @@ func (s) TestHealthCheckChannelzCountingCallFailure(t *testing.T) { t.Fatal(err) } } + +// healthCheck is a helper function to make a unary health check RPC and return +// the response. +func healthCheck(d time.Duration, cc *grpc.ClientConn, service string) (*healthpb.HealthCheckResponse, error) { + ctx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + hc := healthgrpc.NewHealthClient(cc) + return hc.Check(ctx, &healthpb.HealthCheckRequest{Service: service}) +} + +// verifyHealthCheckStatus is a helper function to verify that the current +// health status of the service matches the one passed in 'wantStatus'. +func verifyHealthCheckStatus(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantStatus healthpb.HealthCheckResponse_ServingStatus) { + t.Helper() + resp, err := healthCheck(d, cc, service) + if err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } + if resp.Status != wantStatus { + t.Fatalf("Got the serving status %v, want %v", resp.Status, wantStatus) + } +} + +// verifyHealthCheckErrCode is a helper function to verify that a unary health +// check RPC returns an error with a code set to 'wantCode'. +func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantCode codes.Code) { + t.Helper() + if _, err := healthCheck(d, cc, service); status.Code(err) != wantCode { + t.Fatalf("Health/Check() got errCode %v, want %v", status.Code(err), wantCode) + } +} + +// newHealthCheckStream is a helper function to start a health check streaming +// RPC, and returns the stream. +func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + hc := healthgrpc.NewHealthClient(cc) + stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service}) + if err != nil { + t.Fatalf("hc.Watch(_, %v) failed: %v", service, err) + } + return stream, cancel +} + +// healthWatchChecker is a helper function to verify that the next health +// status returned on the given stream matches the one passed in 'wantStatus'. +func healthWatchChecker(t *testing.T, stream healthgrpc.Health_WatchClient, wantStatus healthpb.HealthCheckResponse_ServingStatus) { + t.Helper() + response, err := stream.Recv() + if err != nil { + t.Fatalf("stream.Recv() failed: %v", err) + } + if response.Status != wantStatus { + t.Fatalf("got servingStatus %v, want %v", response.Status, wantStatus) + } +} + +// TestHealthCheckSuccess invokes the unary Check() RPC on the health server in +// a successful case. +func (s) TestHealthCheckSuccess(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckSuccess(t, e) + } +} + +func testHealthCheckSuccess(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + defer te.tearDown() + + verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.OK) +} + +// TestHealthCheckFailure invokes the unary Check() RPC on the health server +// with an expired context and expects the RPC to fail. +func (s) TestHealthCheckFailure(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckFailure(t, e) + } +} + +func testHealthCheckFailure(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise( + "Failed to dial ", + "grpc: the client connection is closing; please retry", + ) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + defer te.tearDown() + + verifyHealthCheckErrCode(t, 0*time.Second, te.clientConn(), defaultHealthService, codes.DeadlineExceeded) + awaitNewConnLogOutput() +} + +// TestHealthCheckOff makes a unary Check() RPC on the health server where the +// health status of the defaultHealthService is not set, and therefore expects +// an error code 'codes.NotFound'. +func (s) TestHealthCheckOff(t *testing.T) { + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testHealthCheckOff(t, e) + } +} + +func testHealthCheckOff(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.NotFound) +} + +// TestHealthWatchMultipleClients makes a streaming Watch() RPC on the health +// server with multiple clients and expects the same status on both streams. +func (s) TestHealthWatchMultipleClients(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchMultipleClients(t, e) + } +} + +func testHealthWatchMultipleClients(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + stream1, cf1 := newHealthCheckStream(t, cc, defaultHealthService) + defer cf1() + healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) + + stream2, cf2 := newHealthCheckStream(t, cc, defaultHealthService) + defer cf2() + healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) + + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) + healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING) + healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING) +} + +// TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server +// and makes sure that the health status of the server is as expected after +// multiple calls to SetServingStatus with the same status. +func (s) TestHealthWatchSameStatus(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchSameStatus(t, e) + } +} + +func testHealthWatchSameStatus(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) + defer cf() + + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) +} + +// TestHealthWatchServiceStatusSetBeforeStartingServer starts a health server +// on which the health status for the defaultService is set before the gRPC +// server is started, and expects the correct health status to be returned. +func (s) TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchSetServiceStatusBeforeStartingServer(t, e) + } +} + +func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) { + hs := health.NewServer() + te := newTest(t, e) + te.healthServer = hs + hs.SetServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) + defer cf() + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) +} + +// TestHealthWatchDefaultStatusChange verifies the simple case where the +// service starts off with a SERVICE_UNKNOWN status (because SetServingStatus +// hasn't been called yet) and then moves to SERVING after SetServingStatus is +// called. +func (s) TestHealthWatchDefaultStatusChange(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchDefaultStatusChange(t, e) + } +} + +func testHealthWatchDefaultStatusChange(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) + defer cf() + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) +} + +// TestHealthWatchSetServiceStatusBeforeClientCallsWatch verifies the case +// where the health status is set to SERVING before the client calls Watch(). +func (s) TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e) + } +} + +func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) + defer cf() + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) +} + +// TestHealthWatchOverallServerHealthChange verifies setting the overall status +// of the server by using the empty service name. +func (s) TestHealthWatchOverallServerHealthChange(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchOverallServerHealthChange(t, e) + } +} + +func testHealthWatchOverallServerHealthChange(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), "") + defer cf() + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) + te.setHealthServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) +} + +// TestUnknownHandler verifies that an expected error is returned (by setting +// the unknownHandler on the server) for a service which is not exposed to the +// client. +func (s) TestUnknownHandler(t *testing.T) { + // An example unknownHandler that returns a different code and a different + // method, making sure that we do not expose what methods are implemented to + // a client that is not authenticated. + unknownHandler := func(srv interface{}, stream grpc.ServerStream) error { + return status.Error(codes.Unauthenticated, "user unauthenticated") + } + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testUnknownHandler(t, e, unknownHandler) + } +} + +func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) { + te := newTest(t, e) + te.unknownHandler = unknownHandler + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), "", codes.Unauthenticated) +} + +// TestHealthCheckServingStatus makes a streaming Watch() RPC on the health +// server and verifies a bunch of health status transitions. +func (s) TestHealthCheckServingStatus(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckServingStatus(t, e) + } +} + +func testHealthCheckServingStatus(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + verifyHealthCheckStatus(t, 1*time.Second, cc, "", healthpb.HealthCheckResponse_SERVING) + verifyHealthCheckErrCode(t, 1*time.Second, cc, defaultHealthService, codes.NotFound) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_SERVING) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) + verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) +} From dba41efd93f8cecb12ae47e5a1461636dbd45f9e Mon Sep 17 00:00:00 2001 From: KT <39397413+ktalg@users.noreply.github.com> Date: Wed, 1 Mar 2023 02:43:56 +0800 Subject: [PATCH 793/998] metadata: fix validation issues (#6001) --- internal/metadata/metadata.go | 62 ++++++++++++-------- internal/metadata/metadata_test.go | 4 ++ stream.go | 11 +++- test/metadata_test.go | 91 ++++++++++++++++++++---------- 4 files changed, 113 insertions(+), 55 deletions(-) diff --git a/internal/metadata/metadata.go b/internal/metadata/metadata.go index b2980f8ac44a..c82e608e0773 100644 --- a/internal/metadata/metadata.go +++ b/internal/metadata/metadata.go @@ -76,33 +76,11 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { return addr } -// Validate returns an error if the input md contains invalid keys or values. -// -// If the header is not a pseudo-header, the following items are checked: -// - header names must contain one or more characters from this set [0-9 a-z _ - .]. -// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. -// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +// Validate validates every pair in md with ValidatePair. func Validate(md metadata.MD) error { for k, vals := range md { - // pseudo-header will be ignored - if k[0] == ':' { - continue - } - // check key, for i that saving a conversion if not using for range - for i := 0; i < len(k); i++ { - r := k[i] - if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { - return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) - } - } - if strings.HasSuffix(k, "-bin") { - continue - } - // check value - for _, val := range vals { - if hasNotPrintable(val) { - return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) - } + if err := ValidatePair(k, vals...); err != nil { + return err } } return nil @@ -118,3 +96,37 @@ func hasNotPrintable(msg string) bool { } return false } + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/internal/metadata/metadata_test.go b/internal/metadata/metadata_test.go index 80f1a44bb6ac..8f0e430e5ed4 100644 --- a/internal/metadata/metadata_test.go +++ b/internal/metadata/metadata_test.go @@ -100,6 +100,10 @@ func TestValidate(t *testing.T) { md: map[string][]string{"test": {string(rune(0x19))}}, want: errors.New("header key \"test\" contains value with non-printable ASCII characters"), }, + { + md: map[string][]string{"": {"valid"}}, + want: errors.New("there is an empty key in the header"), + }, { md: map[string][]string{"test-bin": {string(rune(0x19))}}, want: nil, diff --git a/stream.go b/stream.go index ad14c74b39e5..368da22acd57 100644 --- a/stream.go +++ b/stream.go @@ -168,10 +168,19 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } } if channelz.IsOn() { cc.incrCallsStarted() diff --git a/test/metadata_test.go b/test/metadata_test.go index ad2b12cfc77f..a15e5cb1c6e7 100644 --- a/test/metadata_test.go +++ b/test/metadata_test.go @@ -36,29 +36,55 @@ import ( ) func (s) TestInvalidMetadata(t *testing.T) { - grpctest.TLogger.ExpectErrorN("stream: failed to validate md when setting trailer", 2) + grpctest.TLogger.ExpectErrorN("stream: failed to validate md when setting trailer", 5) tests := []struct { - md metadata.MD - want error - recv error + name string + md metadata.MD + appendMD []string + want error + recv error }{ { + name: "invalid key", md: map[string][]string{string(rune(0x19)): {"testVal"}}, want: status.Error(codes.Internal, "header key \"\\x19\" contains illegal characters not in [0-9a-z-_.]"), recv: status.Error(codes.Internal, "invalid header field"), }, { + name: "invalid value", md: map[string][]string{"test": {string(rune(0x19))}}, want: status.Error(codes.Internal, "header key \"test\" contains value with non-printable ASCII characters"), recv: status.Error(codes.Internal, "invalid header field"), }, { + name: "invalid appended value", + md: map[string][]string{"test": {"test"}}, + appendMD: []string{"/", "value"}, + want: status.Error(codes.Internal, "header key \"/\" contains illegal characters not in [0-9a-z-_.]"), + recv: status.Error(codes.Internal, "invalid header field"), + }, + { + name: "empty appended key", + md: map[string][]string{"test": {"test"}}, + appendMD: []string{"", "value"}, + want: status.Error(codes.Internal, "there is an empty key in the header"), + recv: status.Error(codes.Internal, "invalid header field"), + }, + { + name: "empty key", + md: map[string][]string{"": {"test"}}, + want: status.Error(codes.Internal, "there is an empty key in the header"), + recv: status.Error(codes.Internal, "invalid header field"), + }, + { + name: "-bin key with arbitrary value", md: map[string][]string{"test-bin": {string(rune(0x19))}}, want: nil, recv: io.EOF, }, { + name: "valid key and value", md: map[string][]string{"test": {"value"}}, want: nil, recv: io.EOF, @@ -77,13 +103,16 @@ func (s) TestInvalidMetadata(t *testing.T) { } test := tests[testNum] testNum++ - if err := stream.SetHeader(test.md); !reflect.DeepEqual(test.want, err) { - return fmt.Errorf("call stream.SendHeader(md) validate metadata which is %v got err :%v, want err :%v", test.md, err, test.want) + // merge original md and added md. + md := metadata.Join(test.md, metadata.Pairs(test.appendMD...)) + + if err := stream.SetHeader(md); !reflect.DeepEqual(test.want, err) { + return fmt.Errorf("call stream.SendHeader(md) validate metadata which is %v got err :%v, want err :%v", md, err, test.want) } - if err := stream.SendHeader(test.md); !reflect.DeepEqual(test.want, err) { - return fmt.Errorf("call stream.SendHeader(md) validate metadata which is %v got err :%v, want err :%v", test.md, err, test.want) + if err := stream.SendHeader(md); !reflect.DeepEqual(test.want, err) { + return fmt.Errorf("call stream.SendHeader(md) validate metadata which is %v got err :%v, want err :%v", md, err, test.want) } - stream.SetTrailer(test.md) + stream.SetTrailer(md) return nil }, } @@ -93,29 +122,33 @@ func (s) TestInvalidMetadata(t *testing.T) { defer ss.Stop() for _, test := range tests { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - ctx = metadata.NewOutgoingContext(ctx, test.md) - if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !reflect.DeepEqual(test.want, err) { - t.Errorf("call ss.Client.EmptyCall() validate metadata which is %v got err :%v, want err :%v", test.md, err, test.want) - } + t.Run("unary "+test.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + ctx = metadata.NewOutgoingContext(ctx, test.md) + ctx = metadata.AppendToOutgoingContext(ctx, test.appendMD...) + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !reflect.DeepEqual(test.want, err) { + t.Errorf("call ss.Client.EmptyCall() validate metadata which is %v got err :%v, want err :%v", test.md, err, test.want) + } + }) } // call the stream server's api to drive the server-side unit testing for _, test := range tests { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - stream, err := ss.Client.FullDuplexCall(ctx) - defer cancel() - if err != nil { - t.Errorf("call ss.Client.FullDuplexCall(context.Background()) will success but got err :%v", err) - continue - } - if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { - t.Errorf("call ss.Client stream Send(nil) will success but got err :%v", err) - } - if _, err := stream.Recv(); status.Code(err) != status.Code(test.recv) || !strings.Contains(err.Error(), test.recv.Error()) { - t.Errorf("stream.Recv() = _, get err :%v, want err :%v", err, test.recv) - } + t.Run("streaming "+test.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Errorf("call ss.Client.FullDuplexCall(context.Background()) will success but got err :%v", err) + return + } + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Errorf("call ss.Client stream Send(nil) will success but got err :%v", err) + } + if _, err := stream.Recv(); status.Code(err) != status.Code(test.recv) || !strings.Contains(err.Error(), test.recv.Error()) { + t.Errorf("stream.Recv() = _, get err :%v, want err :%v", err, test.recv) + } + }) } } From d53f0ec3181c3d7309e1acff4e1bbdf5af5aac83 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 28 Feb 2023 11:30:48 -0800 Subject: [PATCH 794/998] test: move compressor tests out of end2end_test.go (#6063) --- test/compressor_test.go | 706 ++++++++++++++++++++++++++++++++++++++++ test/end2end_test.go | 668 ------------------------------------- 2 files changed, 706 insertions(+), 668 deletions(-) create mode 100644 test/compressor_test.go diff --git a/test/compressor_test.go b/test/compressor_test.go new file mode 100644 index 000000000000..9ca498bbba40 --- /dev/null +++ b/test/compressor_test.go @@ -0,0 +1,706 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "bytes" + "compress/gzip" + "context" + "io" + "reflect" + "strings" + "sync/atomic" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +func (s) TestCompressServerHasNoSupport(t *testing.T) { + for _, e := range listTestEnv() { + testCompressServerHasNoSupport(t, e) + } +} + +func testCompressServerHasNoSupport(t *testing.T, e env) { + te := newTest(t, e) + te.serverCompression = false + te.clientCompression = false + te.clientNopCompression = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: respSize, + Payload: payload, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.Unimplemented { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented) + } + // Streaming RPC + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Unimplemented { + t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented) + } +} + +func (s) TestCompressOK(t *testing.T) { + for _, e := range listTestEnv() { + testCompressOK(t, e) + } +} + +func testCompressOK(t *testing.T, e env) { + te := newTest(t, e) + te.serverCompression = true + te.clientCompression = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + // Unary call + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: respSize, + Payload: payload, + } + ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + // Streaming RPC + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: 31415, + }, + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + stream.CloseSend() + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) + } +} + +func (s) TestIdentityEncoding(t *testing.T) { + for _, e := range listTestEnv() { + testIdentityEncoding(t, e) + } +} + +func testIdentityEncoding(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + // Unary call + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: 10, + Payload: payload, + } + ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + // Streaming RPC + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity")) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: []*testpb.ResponseParameters{{Size: 10}}, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + stream.CloseSend() + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) + } +} + +// renameCompressor is a grpc.Compressor wrapper that allows customizing the +// Type() of another compressor. +type renameCompressor struct { + grpc.Compressor + name string +} + +func (r *renameCompressor) Type() string { return r.name } + +// renameDecompressor is a grpc.Decompressor wrapper that allows customizing the +// Type() of another Decompressor. +type renameDecompressor struct { + grpc.Decompressor + name string +} + +func (r *renameDecompressor) Type() string { return r.name } + +func (s) TestClientForwardsGrpcAcceptEncodingHeader(t *testing.T) { + wantGrpcAcceptEncodingCh := make(chan []string, 1) + defer close(wantGrpcAcceptEncodingCh) + + compressor := renameCompressor{Compressor: grpc.NewGZIPCompressor(), name: "testgzip"} + decompressor := renameDecompressor{Decompressor: grpc.NewGZIPDecompressor(), name: "testgzip"} + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Internal, "no metadata in context") + } + if got, want := md["grpc-accept-encoding"], <-wantGrpcAcceptEncodingCh; !reflect.DeepEqual(got, want) { + return nil, status.Errorf(codes.Internal, "got grpc-accept-encoding=%q; want [%q]", got, want) + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{grpc.RPCDecompressor(&decompressor)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + wantGrpcAcceptEncodingCh <- []string{"gzip"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + wantGrpcAcceptEncodingCh <- []string{"gzip"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.UseCompressor("gzip")); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + // Use compressor directly which is not registered via + // encoding.RegisterCompressor. + if err := ss.StartClient(grpc.WithCompressor(&compressor)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + wantGrpcAcceptEncodingCh <- []string{"gzip,testgzip"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } +} + +// wrapCompressor is a wrapper of encoding.Compressor which maintains count of +// Compressor method invokes. +type wrapCompressor struct { + encoding.Compressor + compressInvokes int32 +} + +func (wc *wrapCompressor) Compress(w io.Writer) (io.WriteCloser, error) { + atomic.AddInt32(&wc.compressInvokes, 1) + return wc.Compressor.Compress(w) +} + +func setupGzipWrapCompressor(t *testing.T) *wrapCompressor { + oldC := encoding.GetCompressor("gzip") + c := &wrapCompressor{Compressor: oldC} + encoding.RegisterCompressor(c) + t.Cleanup(func() { + encoding.RegisterCompressor(oldC) + }) + return c +} + +func (s) TestSetSendCompressorSuccess(t *testing.T) { + for _, tt := range []struct { + name string + desc string + dialOpts []grpc.DialOption + resCompressor string + wantCompressInvokes int32 + }{ + { + name: "identity_request_and_gzip_response", + desc: "request is uncompressed and response is gzip compressed", + resCompressor: "gzip", + wantCompressInvokes: 1, + }, + { + name: "gzip_request_and_identity_response", + desc: "request is gzip compressed and response is uncompressed with identity", + resCompressor: "identity", + dialOpts: []grpc.DialOption{ + // Use WithCompressor instead of UseCompressor to avoid counting + // the client's compressor usage. + grpc.WithCompressor(grpc.NewGZIPCompressor()), + }, + wantCompressInvokes: 0, + }, + } { + t.Run(tt.name, func(t *testing.T) { + t.Run("unary", func(t *testing.T) { + testUnarySetSendCompressorSuccess(t, tt.resCompressor, tt.wantCompressInvokes, tt.dialOpts) + }) + + t.Run("stream", func(t *testing.T) { + testStreamSetSendCompressorSuccess(t, tt.resCompressor, tt.wantCompressInvokes, tt.dialOpts) + }) + }) + } +} + +func testUnarySetSendCompressorSuccess(t *testing.T, resCompressor string, wantCompressInvokes int32, dialOpts []grpc.DialOption) { + wc := setupGzipWrapCompressor(t) + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if err := grpc.SetSendCompressor(ctx, resCompressor); err != nil { + return nil, err + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil, dialOpts...); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("Unexpected unary call error, got: %v, want: nil", err) + } + + compressInvokes := atomic.LoadInt32(&wc.compressInvokes) + if compressInvokes != wantCompressInvokes { + t.Fatalf("Unexpected compress invokes, got:%d, want: %d", compressInvokes, wantCompressInvokes) + } +} + +func testStreamSetSendCompressorSuccess(t *testing.T, resCompressor string, wantCompressInvokes int32, dialOpts []grpc.DialOption) { + wc := setupGzipWrapCompressor(t) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + + if err := grpc.SetSendCompressor(stream.Context(), resCompressor); err != nil { + return err + } + + return stream.Send(&testpb.StreamingOutputCallResponse{}) + }, + } + if err := ss.Start(nil, dialOpts...); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) + } + + if err := s.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("Unexpected full duplex call send error, got: %v, want: nil", err) + } + + if _, err := s.Recv(); err != nil { + t.Fatalf("Unexpected full duplex recv error, got: %v, want: nil", err) + } + + compressInvokes := atomic.LoadInt32(&wc.compressInvokes) + if compressInvokes != wantCompressInvokes { + t.Fatalf("Unexpected compress invokes, got:%d, want: %d", compressInvokes, wantCompressInvokes) + } +} + +func (s) TestUnregisteredSetSendCompressorFailure(t *testing.T) { + resCompressor := "snappy2" + wantErr := status.Error(codes.Unknown, "unable to set send compressor: compressor not registered \"snappy2\"") + + t.Run("unary", func(t *testing.T) { + testUnarySetSendCompressorFailure(t, resCompressor, wantErr) + }) + + t.Run("stream", func(t *testing.T) { + testStreamSetSendCompressorFailure(t, resCompressor, wantErr) + }) +} + +func (s) TestUnadvertisedSetSendCompressorFailure(t *testing.T) { + // Disable client compressor advertisement. + defer func(b bool) { envconfig.AdvertiseCompressors = b }(envconfig.AdvertiseCompressors) + envconfig.AdvertiseCompressors = false + + resCompressor := "gzip" + wantErr := status.Error(codes.Unknown, "unable to set send compressor: client does not support compressor \"gzip\"") + + t.Run("unary", func(t *testing.T) { + testUnarySetSendCompressorFailure(t, resCompressor, wantErr) + }) + + t.Run("stream", func(t *testing.T) { + testStreamSetSendCompressorFailure(t, resCompressor, wantErr) + }) +} + +func testUnarySetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if err := grpc.SetSendCompressor(ctx, resCompressor); err != nil { + return nil, err + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !equalError(err, wantErr) { + t.Fatalf("Unexpected unary call error, got: %v, want: %v", err, wantErr) + } +} + +func testStreamSetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + + if err := grpc.SetSendCompressor(stream.Context(), resCompressor); err != nil { + return err + } + + return stream.Send(&testpb.StreamingOutputCallResponse{}) + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v, want: nil", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) + } + + if err := s.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("Unexpected full duplex call send error, got: %v, want: nil", err) + } + + if _, err := s.Recv(); !equalError(err, wantErr) { + t.Fatalf("Unexpected full duplex recv error, got: %v, want: nil", err) + } +} + +func (s) TestUnarySetSendCompressorAfterHeaderSendFailure(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + // Send headers early and then set send compressor. + grpc.SendHeader(ctx, metadata.MD{}) + err := grpc.SetSendCompressor(ctx, "gzip") + if err == nil { + t.Error("Wanted set send compressor error") + return &testpb.Empty{}, nil + } + return nil, err + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + wantErr := status.Error(codes.Unknown, "transport: set send compressor called after headers sent or stream done") + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !equalError(err, wantErr) { + t.Fatalf("Unexpected unary call error, got: %v, want: %v", err, wantErr) + } +} + +func (s) TestStreamSetSendCompressorAfterHeaderSendFailure(t *testing.T) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + // Send headers early and then set send compressor. + grpc.SendHeader(stream.Context(), metadata.MD{}) + err := grpc.SetSendCompressor(stream.Context(), "gzip") + if err == nil { + t.Error("Wanted set send compressor error") + } + return err + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + wantErr := status.Error(codes.Unknown, "transport: set send compressor called after headers sent or stream done") + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) + } + + if _, err := s.Recv(); !equalError(err, wantErr) { + t.Fatalf("Unexpected full duplex recv error, got: %v, want: %v", err, wantErr) + } +} + +func (s) TestClientSupportedCompressors(t *testing.T) { + for _, tt := range []struct { + desc string + ctx context.Context + want []string + }{ + { + desc: "No additional grpc-accept-encoding header", + ctx: context.Background(), + want: []string{"gzip"}, + }, + { + desc: "With additional grpc-accept-encoding header", + ctx: metadata.AppendToOutgoingContext(context.Background(), + "grpc-accept-encoding", "test-compressor-1", + "grpc-accept-encoding", "test-compressor-2", + ), + want: []string{"gzip", "test-compressor-1", "test-compressor-2"}, + }, + { + desc: "With additional empty grpc-accept-encoding header", + ctx: metadata.AppendToOutgoingContext(context.Background(), + "grpc-accept-encoding", "", + ), + want: []string{"gzip"}, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + got, err := grpc.ClientSupportedCompressors(ctx) + if err != nil { + return nil, err + } + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("unexpected client compressors got: %v, want: %v", got, tt.want) + } + + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v, want: nil", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(tt.ctx, defaultTestTimeout) + defer cancel() + + _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) + if err != nil { + t.Fatalf("Unexpected unary call error, got: %v, want: nil", err) + } + }) + } +} + +func (s) TestCompressorRegister(t *testing.T) { + for _, e := range listTestEnv() { + testCompressorRegister(t, e) + } +} + +func testCompressorRegister(t *testing.T, e env) { + te := newTest(t, e) + te.clientCompression = false + te.serverCompression = false + te.clientUseCompression = true + + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + // Unary call + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: respSize, + Payload: payload, + } + ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + // Streaming RPC + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: 31415, + }, + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } +} + +type badGzipCompressor struct{} + +func (badGzipCompressor) Do(w io.Writer, p []byte) error { + buf := &bytes.Buffer{} + gzw := gzip.NewWriter(buf) + if _, err := gzw.Write(p); err != nil { + return err + } + err := gzw.Close() + bs := buf.Bytes() + if len(bs) >= 6 { + bs[len(bs)-6] ^= 1 // modify checksum at end by 1 byte + } + w.Write(bs) + return err +} + +func (badGzipCompressor) Type() string { + return "gzip" +} + +func (s) TestGzipBadChecksum(t *testing.T) { + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + } + if err := ss.Start(nil, grpc.WithCompressor(badGzipCompressor{})); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024)) + if err != nil { + t.Fatalf("Unexpected error from newPayload: %v", err) + } + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: p}); err == nil || + status.Code(err) != codes.Internal || + !strings.Contains(status.Convert(err).Message(), gzip.ErrChecksum.Error()) { + t.Errorf("ss.Client.UnaryCall(_) = _, %v\n\twant: _, status(codes.Internal, contains %q)", err, gzip.ErrChecksum) + } +} diff --git a/test/end2end_test.go b/test/end2end_test.go index c7ea9eef9033..9de88ebcc46f 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -21,7 +21,6 @@ package test import ( "bufio" "bytes" - "compress/gzip" "context" "crypto/tls" "errors" @@ -59,7 +58,6 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/stubserver" @@ -3705,165 +3703,6 @@ func testStreamsQuotaRecovery(t *testing.T, e env) { } } -func (s) TestCompressServerHasNoSupport(t *testing.T) { - for _, e := range listTestEnv() { - testCompressServerHasNoSupport(t, e) - } -} - -func testCompressServerHasNoSupport(t *testing.T, e env) { - te := newTest(t, e) - te.serverCompression = false - te.clientCompression = false - te.clientNopCompression = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - - const argSize = 271828 - const respSize = 314159 - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: respSize, - Payload: payload, - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.Unimplemented { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented) - } - // Streaming RPC - stream, err := tc.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Unimplemented { - t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented) - } -} - -func (s) TestCompressOK(t *testing.T) { - for _, e := range listTestEnv() { - testCompressOK(t, e) - } -} - -func testCompressOK(t *testing.T, e env) { - te := newTest(t, e) - te.serverCompression = true - te.clientCompression = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - - // Unary call - const argSize = 271828 - const respSize = 314159 - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: respSize, - Payload: payload, - } - ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) - } - // Streaming RPC - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := tc.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - respParam := []*testpb.ResponseParameters{ - { - Size: 31415, - }, - } - payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) - if err != nil { - t.Fatal(err) - } - sreq := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - stream.CloseSend() - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = %v, want ", stream, err) - } - if _, err := stream.Recv(); err != io.EOF { - t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) - } -} - -func (s) TestIdentityEncoding(t *testing.T) { - for _, e := range listTestEnv() { - testIdentityEncoding(t, e) - } -} - -func testIdentityEncoding(t *testing.T, e env) { - te := newTest(t, e) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - - // Unary call - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: 10, - Payload: payload, - } - ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) - } - // Streaming RPC - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity")) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) - if err != nil { - t.Fatal(err) - } - sreq := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: []*testpb.ResponseParameters{{Size: 10}}, - Payload: payload, - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - stream.CloseSend() - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = %v, want ", stream, err) - } - if _, err := stream.Recv(); err != io.EOF { - t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) - } -} - func (s) TestUnaryClientInterceptor(t *testing.T) { for _, e := range listTestEnv() { testUnaryClientInterceptor(t, e) @@ -4705,406 +4544,6 @@ func (s) TestForceServerCodec(t *testing.T) { } } -// renameCompressor is a grpc.Compressor wrapper that allows customizing the -// Type() of another compressor. -type renameCompressor struct { - grpc.Compressor - name string -} - -func (r *renameCompressor) Type() string { return r.name } - -// renameDecompressor is a grpc.Decompressor wrapper that allows customizing the -// Type() of another Decompressor. -type renameDecompressor struct { - grpc.Decompressor - name string -} - -func (r *renameDecompressor) Type() string { return r.name } - -func (s) TestClientForwardsGrpcAcceptEncodingHeader(t *testing.T) { - wantGrpcAcceptEncodingCh := make(chan []string, 1) - defer close(wantGrpcAcceptEncodingCh) - - compressor := renameCompressor{Compressor: grpc.NewGZIPCompressor(), name: "testgzip"} - decompressor := renameDecompressor{Decompressor: grpc.NewGZIPDecompressor(), name: "testgzip"} - - ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, status.Errorf(codes.Internal, "no metadata in context") - } - if got, want := md["grpc-accept-encoding"], <-wantGrpcAcceptEncodingCh; !reflect.DeepEqual(got, want) { - return nil, status.Errorf(codes.Internal, "got grpc-accept-encoding=%q; want [%q]", got, want) - } - return &testpb.Empty{}, nil - }, - } - if err := ss.Start([]grpc.ServerOption{grpc.RPCDecompressor(&decompressor)}); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - wantGrpcAcceptEncodingCh <- []string{"gzip"} - if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) - } - - wantGrpcAcceptEncodingCh <- []string{"gzip"} - if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.UseCompressor("gzip")); err != nil { - t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) - } - - // Use compressor directly which is not registered via - // encoding.RegisterCompressor. - if err := ss.StartClient(grpc.WithCompressor(&compressor)); err != nil { - t.Fatalf("Error starting client: %v", err) - } - wantGrpcAcceptEncodingCh <- []string{"gzip,testgzip"} - if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) - } -} - -// wrapCompressor is a wrapper of encoding.Compressor which maintains count of -// Compressor method invokes. -type wrapCompressor struct { - encoding.Compressor - compressInvokes int32 -} - -func (wc *wrapCompressor) Compress(w io.Writer) (io.WriteCloser, error) { - atomic.AddInt32(&wc.compressInvokes, 1) - return wc.Compressor.Compress(w) -} - -func setupGzipWrapCompressor(t *testing.T) *wrapCompressor { - oldC := encoding.GetCompressor("gzip") - c := &wrapCompressor{Compressor: oldC} - encoding.RegisterCompressor(c) - t.Cleanup(func() { - encoding.RegisterCompressor(oldC) - }) - return c -} - -func (s) TestSetSendCompressorSuccess(t *testing.T) { - for _, tt := range []struct { - name string - desc string - dialOpts []grpc.DialOption - resCompressor string - wantCompressInvokes int32 - }{ - { - name: "identity_request_and_gzip_response", - desc: "request is uncompressed and response is gzip compressed", - resCompressor: "gzip", - wantCompressInvokes: 1, - }, - { - name: "gzip_request_and_identity_response", - desc: "request is gzip compressed and response is uncompressed with identity", - resCompressor: "identity", - dialOpts: []grpc.DialOption{ - // Use WithCompressor instead of UseCompressor to avoid counting - // the client's compressor usage. - grpc.WithCompressor(grpc.NewGZIPCompressor()), - }, - wantCompressInvokes: 0, - }, - } { - t.Run(tt.name, func(t *testing.T) { - t.Run("unary", func(t *testing.T) { - testUnarySetSendCompressorSuccess(t, tt.resCompressor, tt.wantCompressInvokes, tt.dialOpts) - }) - - t.Run("stream", func(t *testing.T) { - testStreamSetSendCompressorSuccess(t, tt.resCompressor, tt.wantCompressInvokes, tt.dialOpts) - }) - }) - } -} - -func testUnarySetSendCompressorSuccess(t *testing.T, resCompressor string, wantCompressInvokes int32, dialOpts []grpc.DialOption) { - wc := setupGzipWrapCompressor(t) - ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - if err := grpc.SetSendCompressor(ctx, resCompressor); err != nil { - return nil, err - } - return &testpb.Empty{}, nil - }, - } - if err := ss.Start(nil, dialOpts...); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("Unexpected unary call error, got: %v, want: nil", err) - } - - compressInvokes := atomic.LoadInt32(&wc.compressInvokes) - if compressInvokes != wantCompressInvokes { - t.Fatalf("Unexpected compress invokes, got:%d, want: %d", compressInvokes, wantCompressInvokes) - } -} - -func testStreamSetSendCompressorSuccess(t *testing.T, resCompressor string, wantCompressInvokes int32, dialOpts []grpc.DialOption) { - wc := setupGzipWrapCompressor(t) - ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { - if _, err := stream.Recv(); err != nil { - return err - } - - if err := grpc.SetSendCompressor(stream.Context(), resCompressor); err != nil { - return err - } - - return stream.Send(&testpb.StreamingOutputCallResponse{}) - }, - } - if err := ss.Start(nil, dialOpts...); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - s, err := ss.Client.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) - } - - if err := s.Send(&testpb.StreamingOutputCallRequest{}); err != nil { - t.Fatalf("Unexpected full duplex call send error, got: %v, want: nil", err) - } - - if _, err := s.Recv(); err != nil { - t.Fatalf("Unexpected full duplex recv error, got: %v, want: nil", err) - } - - compressInvokes := atomic.LoadInt32(&wc.compressInvokes) - if compressInvokes != wantCompressInvokes { - t.Fatalf("Unexpected compress invokes, got:%d, want: %d", compressInvokes, wantCompressInvokes) - } -} - -func (s) TestUnregisteredSetSendCompressorFailure(t *testing.T) { - resCompressor := "snappy2" - wantErr := status.Error(codes.Unknown, "unable to set send compressor: compressor not registered \"snappy2\"") - - t.Run("unary", func(t *testing.T) { - testUnarySetSendCompressorFailure(t, resCompressor, wantErr) - }) - - t.Run("stream", func(t *testing.T) { - testStreamSetSendCompressorFailure(t, resCompressor, wantErr) - }) -} - -func (s) TestUnadvertisedSetSendCompressorFailure(t *testing.T) { - // Disable client compressor advertisement. - defer func(b bool) { envconfig.AdvertiseCompressors = b }(envconfig.AdvertiseCompressors) - envconfig.AdvertiseCompressors = false - - resCompressor := "gzip" - wantErr := status.Error(codes.Unknown, "unable to set send compressor: client does not support compressor \"gzip\"") - - t.Run("unary", func(t *testing.T) { - testUnarySetSendCompressorFailure(t, resCompressor, wantErr) - }) - - t.Run("stream", func(t *testing.T) { - testStreamSetSendCompressorFailure(t, resCompressor, wantErr) - }) -} - -func testUnarySetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) { - ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - if err := grpc.SetSendCompressor(ctx, resCompressor); err != nil { - return nil, err - } - return &testpb.Empty{}, nil - }, - } - if err := ss.Start(nil); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !equalError(err, wantErr) { - t.Fatalf("Unexpected unary call error, got: %v, want: %v", err, wantErr) - } -} - -func testStreamSetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) { - ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { - if _, err := stream.Recv(); err != nil { - return err - } - - if err := grpc.SetSendCompressor(stream.Context(), resCompressor); err != nil { - return err - } - - return stream.Send(&testpb.StreamingOutputCallResponse{}) - }, - } - if err := ss.Start(nil); err != nil { - t.Fatalf("Error starting endpoint server: %v, want: nil", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - s, err := ss.Client.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) - } - - if err := s.Send(&testpb.StreamingOutputCallRequest{}); err != nil { - t.Fatalf("Unexpected full duplex call send error, got: %v, want: nil", err) - } - - if _, err := s.Recv(); !equalError(err, wantErr) { - t.Fatalf("Unexpected full duplex recv error, got: %v, want: nil", err) - } -} - -func (s) TestUnarySetSendCompressorAfterHeaderSendFailure(t *testing.T) { - ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - // Send headers early and then set send compressor. - grpc.SendHeader(ctx, metadata.MD{}) - err := grpc.SetSendCompressor(ctx, "gzip") - if err == nil { - t.Error("Wanted set send compressor error") - return &testpb.Empty{}, nil - } - return nil, err - }, - } - if err := ss.Start(nil); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - wantErr := status.Error(codes.Unknown, "transport: set send compressor called after headers sent or stream done") - if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !equalError(err, wantErr) { - t.Fatalf("Unexpected unary call error, got: %v, want: %v", err, wantErr) - } -} - -func (s) TestStreamSetSendCompressorAfterHeaderSendFailure(t *testing.T) { - ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { - // Send headers early and then set send compressor. - grpc.SendHeader(stream.Context(), metadata.MD{}) - err := grpc.SetSendCompressor(stream.Context(), "gzip") - if err == nil { - t.Error("Wanted set send compressor error") - } - return err - }, - } - if err := ss.Start(nil); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - wantErr := status.Error(codes.Unknown, "transport: set send compressor called after headers sent or stream done") - s, err := ss.Client.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) - } - - if _, err := s.Recv(); !equalError(err, wantErr) { - t.Fatalf("Unexpected full duplex recv error, got: %v, want: %v", err, wantErr) - } -} - -func (s) TestClientSupportedCompressors(t *testing.T) { - for _, tt := range []struct { - desc string - ctx context.Context - want []string - }{ - { - desc: "No additional grpc-accept-encoding header", - ctx: context.Background(), - want: []string{"gzip"}, - }, - { - desc: "With additional grpc-accept-encoding header", - ctx: metadata.AppendToOutgoingContext(context.Background(), - "grpc-accept-encoding", "test-compressor-1", - "grpc-accept-encoding", "test-compressor-2", - ), - want: []string{"gzip", "test-compressor-1", "test-compressor-2"}, - }, - { - desc: "With additional empty grpc-accept-encoding header", - ctx: metadata.AppendToOutgoingContext(context.Background(), - "grpc-accept-encoding", "", - ), - want: []string{"gzip"}, - }, - } { - t.Run(tt.desc, func(t *testing.T) { - ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - got, err := grpc.ClientSupportedCompressors(ctx) - if err != nil { - return nil, err - } - - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("unexpected client compressors got: %v, want: %v", got, tt.want) - } - - return &testpb.Empty{}, nil - }, - } - if err := ss.Start(nil); err != nil { - t.Fatalf("Error starting endpoint server: %v, want: nil", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(tt.ctx, defaultTestTimeout) - defer cancel() - - _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) - if err != nil { - t.Fatalf("Unexpected unary call error, got: %v, want: nil", err) - } - }) - } -} - func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) { const mdkey = "somedata" @@ -6153,67 +5592,6 @@ func (s) TestInterceptorCanAccessCallOptions(t *testing.T) { } } -func (s) TestCompressorRegister(t *testing.T) { - for _, e := range listTestEnv() { - testCompressorRegister(t, e) - } -} - -func testCompressorRegister(t *testing.T, e env) { - te := newTest(t, e) - te.clientCompression = false - te.serverCompression = false - te.clientUseCompression = true - - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - - // Unary call - const argSize = 271828 - const respSize = 314159 - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: respSize, - Payload: payload, - } - ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) - } - // Streaming RPC - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := tc.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - respParam := []*testpb.ResponseParameters{ - { - Size: 31415, - }, - } - payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) - if err != nil { - t.Fatal(err) - } - sreq := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = %v, want ", stream, err) - } -} - func (s) TestServeExitsWhenListenerClosed(t *testing.T) { ss := &stubserver.StubServer{ EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { @@ -6949,52 +6327,6 @@ func (s) TestClientCancellationPropagatesUnary(t *testing.T) { wg.Wait() } -type badGzipCompressor struct{} - -func (badGzipCompressor) Do(w io.Writer, p []byte) error { - buf := &bytes.Buffer{} - gzw := gzip.NewWriter(buf) - if _, err := gzw.Write(p); err != nil { - return err - } - err := gzw.Close() - bs := buf.Bytes() - if len(bs) >= 6 { - bs[len(bs)-6] ^= 1 // modify checksum at end by 1 byte - } - w.Write(bs) - return err -} - -func (badGzipCompressor) Type() string { - return "gzip" -} - -func (s) TestGzipBadChecksum(t *testing.T) { - ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{}, nil - }, - } - if err := ss.Start(nil, grpc.WithCompressor(badGzipCompressor{})); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024)) - if err != nil { - t.Fatalf("Unexpected error from newPayload: %v", err) - } - if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: p}); err == nil || - status.Code(err) != codes.Internal || - !strings.Contains(status.Convert(err).Message(), gzip.ErrChecksum.Error()) { - t.Errorf("ss.Client.UnaryCall(_) = _, %v\n\twant: _, status(codes.Internal, contains %q)", err, gzip.ErrChecksum) - } -} - // When an RPC is canceled, it's possible that the last Recv() returns before // all call options' after are executed. func (s) TestCanceledRPCCallOptionRace(t *testing.T) { From 28b6bcf9baef5f8a9a410f43eff8ecc8ca10b497 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Tue, 28 Feb 2023 11:44:37 -0800 Subject: [PATCH 795/998] xds/xdsclient: improve failure mode behavior (gRFC A57) (#5996) --- xds/internal/internal.go | 3 + xds/internal/xdsclient/authority.go | 113 +++++-- xds/internal/xdsclient/authority_test.go | 305 ++++++++++++++++++ xds/internal/xdsclient/client_test.go | 2 +- .../xdsclient/e2e_test/cds_watchers_test.go | 11 +- .../xdsclient/e2e_test/eds_watchers_test.go | 11 +- .../xdsclient/e2e_test/lds_watchers_test.go | 11 +- .../xdsclient/e2e_test/rds_watchers_test.go | 10 +- .../xdsclient/transport/loadreport_test.go | 11 +- xds/internal/xdsclient/transport/transport.go | 112 ++++--- .../transport/transport_ack_nack_test.go | 27 +- .../transport/transport_backoff_test.go | 29 +- .../xdsclient/transport/transport_new_test.go | 32 +- .../transport/transport_resource_test.go | 9 +- .../xdsclient/transport/transport_test.go | 7 +- .../xdsresource/cluster_resource_type.go | 3 +- .../xdsresource/endpoints_resource_type.go | 3 +- xds/internal/xdsclient/xdsresource/errors.go | 3 + .../xdsresource/listener_resource_type.go | 3 +- .../xdsclient/xdsresource/resource_type.go | 10 + .../xdsresource/route_config_resource_type.go | 3 +- 21 files changed, 593 insertions(+), 125 deletions(-) create mode 100644 xds/internal/xdsclient/authority_test.go diff --git a/xds/internal/internal.go b/xds/internal/internal.go index 8df20a1f9c0a..ba6fa3d78807 100644 --- a/xds/internal/internal.go +++ b/xds/internal/internal.go @@ -80,3 +80,6 @@ func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { addr.BalancerAttributes = addr.BalancerAttributes.WithValue(localityKey, l) return addr } + +// ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. +var ResourceTypeMapForTesting map[string]interface{} diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 173edd9d1278..9a46161931be 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -36,10 +36,11 @@ import ( type watchState int const ( - watchStateStarted watchState = iota - watchStateRespReceived - watchStateTimeout - watchStateCanceled + watchStateStarted watchState = iota // Watch started, request not yet set. + watchStateRequested // Request sent for resource being watched. + watchStateReceived // Response received for resource being watched. + watchStateTimeout // Watch timer expired, no response. + watchStateCanceled // Watch cancelled. ) type resourceState struct { @@ -116,18 +117,34 @@ func newAuthority(args authorityArgs) (*authority, error) { } tr, err := transport.New(transport.Options{ - ServerCfg: *args.serverCfg, - UpdateHandler: ret.handleResourceUpdate, - StreamErrorHandler: ret.newConnectionError, - Logger: args.logger, - NodeProto: args.bootstrapCfg.NodeProto, + ServerCfg: *args.serverCfg, + OnRecvHandler: ret.handleResourceUpdate, + OnErrorHandler: ret.newConnectionError, + OnSendHandler: ret.transportOnSendHandler, + Logger: args.logger, + NodeProto: args.bootstrapCfg.NodeProto, }) if err != nil { return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) } ret.transport = tr return ret, nil +} +// transportOnSendHandler is called by the underlying transport when it sends a +// resource request successfully. Timers are activated for resources waiting for +// a response. +func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { + rType := a.resourceTypeGetter(u.URL) + // Resource type not found is not expected under normal circumstances, since + // the resource type url passed to the transport is determined by the authority. + if rType == nil { + a.logger.Warningf("Unknown resource type url: %s.", u.URL) + return + } + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + a.startWatchTimersLocked(rType, u.ResourceNames) } func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate) error { @@ -152,8 +169,10 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // Cancel the expiry timer associated with the resource once a // response is received, irrespective of whether the update is a // good one or not. - state.wTimer.Stop() - state.wState = watchStateRespReceived + if state.wState == watchStateRequested { + state.wTimer.Stop() + state.wState = watchStateReceived + } if uErr.err != nil { // On error, keep previous version of the resource. But update @@ -295,17 +314,71 @@ func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, return ret, md, errRet } +// startWatchTimersLocked is invoked upon transport.OnSend() callback with resources +// requested on the underlying ADS stream. This satisfies the conditions to start +// watch timers per A57 [https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md#handling-resources-that-do-not-exist] +// +// Caller must hold a.resourcesMu. +func (a *authority) startWatchTimersLocked(rType xdsresource.Type, resourceNames []string) { + resourceStates := a.resources[rType] + for _, resourceName := range resourceNames { + if state, ok := resourceStates[resourceName]; ok { + if state.wState != watchStateStarted { + continue + } + state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { + a.handleWatchTimerExpiry(rType, resourceName, state) + }) + state.wState = watchStateRequested + } + } +} + +// stopWatchTimersLocked is invoked upon connection errors to stops watch timers +// for resources that have been requested, but not yet responded to by the management +// server. +// +// Caller must hold a.resourcesMu. +func (a *authority) stopWatchTimersLocked() { + for _, rType := range a.resources { + for resourceName, state := range rType { + if state.wState != watchStateRequested { + continue + } + if !state.wTimer.Stop() { + // If the timer has already fired, it means that the timer watch expiry + // callback is blocked on the same lock that we currently hold. Don't change + // the watch state and instead let the watch expiry callback handle it. + a.logger.Warningf("Watch timer for resource %v already fired. Ignoring here.", resourceName) + continue + } + state.wTimer = nil + state.wState = watchStateStarted + } + } +} + // newConnectionError is called by the underlying transport when it receives a // connection error. The error will be forwarded to all the resource watchers. func (a *authority) newConnectionError(err error) { a.resourcesMu.Lock() defer a.resourcesMu.Unlock() - // For all resource types, for all resources within each resource type, and - // for all the watchers for every resource, propagate the connection error - // from the transport layer. + a.stopWatchTimersLocked() + + // We do not consider it an error if the ADS stream was closed after having received + // a response on the stream. This is because there are legitimate reasons why the server + // may need to close the stream during normal operations, such as needing to rebalance + // load or the underlying connection hitting its max connection age limit. + // See gRFC A57 for more details. + if xdsresource.ErrType(err) == xdsresource.ErrTypeStreamFailedAfterRecv { + a.logger.Warningf("Watchers not notified since ADS stream failed after having received at least one response: %v", err) + return + } + for _, rType := range a.resources { for _, state := range rType { + // Propagate the connection error from the transport layer to all watchers. for watcher := range state.watchers { watcher := watcher a.serializer.Schedule(func(context.Context) { @@ -355,9 +428,6 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, wState: watchStateStarted, } - state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { - a.handleWatchTimerExpiry(rType, resourceName, state) - }) resources[resourceName] = state a.sendDiscoveryRequestLocked(rType, resources) } @@ -399,7 +469,14 @@ func (a *authority) handleWatchTimerExpiry(rType xdsresource.Type, resourceName a.resourcesMu.Lock() defer a.resourcesMu.Unlock() - if state.wState == watchStateCanceled { + switch state.wState { + case watchStateRequested: + // This is the only state where we need to handle the timer expiry by + // invoking appropriate watch callbacks. This is handled outside the switch. + case watchStateCanceled: + return + default: + a.logger.Warningf("Unexpected watch state %q for resource %q.", state.wState, resourceName) return } diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go new file mode 100644 index 000000000000..fa2661fcfd8d --- /dev/null +++ b/xds/internal/xdsclient/authority_test.go @@ -0,0 +1,305 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package xdsclient + +import ( + "context" + "fmt" + "testing" + "time" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal" + _ "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" +) + +var emptyServerOpts = e2e.ManagementServerOptions{} + +type testResourceWatcher struct { + updateCh chan *xdsresource.ResourceData + errorCh chan error +} + +func (w *testResourceWatcher) OnUpdate(data xdsresource.ResourceData) { + select { + case w.updateCh <- &data: + default: + } +} + +func (w *testResourceWatcher) OnError(err error) { + select { + case w.errorCh <- err: + default: + } +} + +func (w *testResourceWatcher) OnResourceDoesNotExist() {} + +func newTestResourceWatcher() *testResourceWatcher { + return &testResourceWatcher{ + updateCh: make(chan *xdsresource.ResourceData), + errorCh: make(chan error), + } +} + +var ( + // Listener resource type implementation retrieved from the resource type map + // in the internal package, which is initialized when the individual resource + // types are created. + listenerResourceType = internal.ResourceTypeMapForTesting[version.V3ListenerURL].(xdsresource.Type) + rtRegistry = newResourceTypeRegistry() +) + +func init() { + // Simulating maybeRegister for listenerResourceType. The getter to this registry + // is passed to the authority for accessing the resource type. + rtRegistry.types[listenerResourceType.TypeURL()] = listenerResourceType +} + +func setupTest(ctx context.Context, t *testing.T, opts e2e.ManagementServerOptions, watchExpiryTimeout time.Duration) (*authority, *e2e.ManagementServer, string) { + t.Helper() + nodeID := uuid.New().String() + ms, err := e2e.StartManagementServer(opts) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %q", err) + } + + a, err := newAuthority(authorityArgs{ + serverCfg: &bootstrap.ServerConfig{ + ServerURI: ms.Address, + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + CredsType: "insecure", + }, + bootstrapCfg: &bootstrap.Config{ + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + serializer: newCallbackSerializer(ctx), + resourceTypeGetter: rtRegistry.get, + watchExpiryTimeout: watchExpiryTimeout, + logger: nil, + }) + if err != nil { + t.Fatalf("Failed to create authority: %q", err) + } + return a, ms, nodeID +} + +// This tests verifies watch and timer state for the scenario where a watch for +// an LDS resource is registered and the management server sends an update the +// same resource. +func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Setting up a mgmt server with a done channel when OnStreamRequest is invoked. + serverOnReqDoneCh := make(chan struct{}) + serverOpt := e2e.ManagementServerOptions{ + OnStreamRequest: func(int64, *v3discoverypb.DiscoveryRequest) error { + select { + case serverOnReqDoneCh <- struct{}{}: + default: + } + return nil + }, + } + a, ms, nodeID := setupTest(ctx, t, serverOpt, defaultTestTimeout) + defer ms.Stop() + defer a.close() + + rn := "xdsclient-test-lds-resource" + w := newTestResourceWatcher() + cancelResource := a.watchResource(listenerResourceType, rn, w) + defer cancelResource() + + if err := compareWatchState(a, rn, watchStateStarted); err != nil { + t.Fatal(err) + } + + // This blocking read is to verify that the underlying transport has successfully + // sent the request to the server, hence the onSend callback was already invoked. + // onSend callback should transition the watchState to `watchStateRequested`. + <-serverOnReqDoneCh + if err := compareWatchState(a, rn, watchStateRequested); err != nil { + t.Fatal(err) + } + + // Updating mgmt server with the same lds resource. Blocking on watcher's update + // ch to verify the watch state transition to `watchStateReceived`. + if err := updateResourceInServer(ctx, ms, rn, nodeID); err != nil { + t.Fatalf("Failed to update server with resource: %q; err: %q", rn, err) + } + select { + case <-ctx.Done(): + t.Fatal("Test timed out before watcher received an update from server.") + case err := <-w.errorCh: + t.Fatalf("Watch got an unexpected error update: %q. Want valid updates.", err) + case <-w.updateCh: + // This means the OnUpdate callback was invoked and the watcher was notified. + } + if err := compareWatchState(a, rn, watchStateReceived); err != nil { + t.Fatal(err) + } +} + +// This tests the resource's watch state transition when the ADS stream is closed +// by the management server. After the test calls `watchResource` api to register +// a watch for a resource, it stops the management server, and verifies the resource's +// watch state transitions to `watchStateStarted` and timer ready to be restarted. +func (s) TestTimerAndWatchStateOnErrorCallback(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + a, ms, _ := setupTest(ctx, t, emptyServerOpts, defaultTestTimeout) + defer a.close() + + rn := "xdsclient-test-lds-resource" + w := newTestResourceWatcher() + cancelResource := a.watchResource(listenerResourceType, rn, w) + defer cancelResource() + + // Stopping the server and blocking on watcher's err channel to be notified. + // This means the onErr callback should be invoked which transitions the watch + // state to `watchStateStarted`. + ms.Stop() + + select { + case <-ctx.Done(): + t.Fatal("Test timed out before verifying error propagation.") + case err := <-w.errorCh: + if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { + t.Fatal("Connection error not propagated to watchers.") + } + } + + if err := compareWatchState(a, rn, watchStateStarted); err != nil { + t.Fatal(err) + } +} + +// This tests the case where the ADS stream breaks after successfully receiving +// a message on the stream. The test performs the following: +// - configures the management server with resourceA. +// - registers a watch for resourceA and verifies that the watcher's update +// callback is invoked. +// - registers a watch for resourceB and verifies that the watcher's update +// callback is not invoked. This is because the management server does not +// contain resourceB. +// - stops the management server to verify that the error propagated to the +// watcher is a connection error. This happens when the authority attempts +// to create a new stream. +func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Using a shorter expiry timeout to verify that the watch timeout was never fired. + a, ms, nodeID := setupTest(ctx, t, emptyServerOpts, defaultTestWatchExpiryTimeout) + defer a.close() + + nameA := "xdsclient-test-lds-resourceA" + watcherA := newTestResourceWatcher() + cancelA := a.watchResource(listenerResourceType, nameA, watcherA) + + if err := updateResourceInServer(ctx, ms, nameA, nodeID); err != nil { + t.Fatalf("Failed to update server with resource: %q; err: %q", nameA, err) + } + + // Blocking on resource A watcher's update Channel to verify that there is + // more than one msg(s) received the ADS stream. + select { + case <-ctx.Done(): + t.Fatal("Test timed out before watcher received the update.") + case err := <-watcherA.errorCh: + t.Fatalf("Watch got an unexpected error update: %q; want: valid update.", err) + case <-watcherA.updateCh: + } + + nameB := "xdsclient-test-lds-resourceB" + watcherB := newTestResourceWatcher() + cancelB := a.watchResource(listenerResourceType, nameB, watcherB) + defer cancelB() + + // Blocking on resource B watcher's error channel. This error should be due to + // connectivity issue when reconnecting because the mgmt server was already been + // stopped. ALl other errors or an update will fail the test. + cancelA() + ms.Stop() + select { + case <-ctx.Done(): + t.Fatal("Test timed out before mgmt server got the request.") + case u := <-watcherB.updateCh: + t.Fatalf("Watch got an unexpected resource update: %v.", u) + case gotErr := <-watcherB.errorCh: + wantErr := xdsresource.ErrorTypeConnection + if xdsresource.ErrType(gotErr) != wantErr { + t.Fatalf("Watch got an unexpected error:%q. Want: %q.", gotErr, wantErr) + } + } + + // Since there was already a response on the stream, the timer for resource B + // should not fire. If the timer did fire, watch state would be in `watchStateTimeout`. + <-time.After(defaultTestWatchExpiryTimeout) + if err := compareWatchState(a, nameB, watchStateStarted); err != nil { + t.Fatalf("Invalid watch state: %v.", err) + } + +} + +func compareWatchState(a *authority, rn string, wantState watchState) error { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + gotState := a.resources[listenerResourceType][rn].wState + if gotState != wantState { + return fmt.Errorf("%v. Want: %v", gotState, wantState) + } + + wTimer := a.resources[listenerResourceType][rn].wTimer + switch gotState { + case watchStateRequested: + if wTimer == nil { + return fmt.Errorf("got nil timer, want active timer") + } + case watchStateStarted: + if wTimer != nil { + return fmt.Errorf("got active timer, want nil timer") + } + default: + if wTimer.Stop() { + // This means that the timer was running but could be successfully stopped. + return fmt.Errorf("got active timer, want stopped timer") + } + } + return nil +} + +func updateResourceInServer(ctx context.Context, ms *e2e.ManagementServer, rn string, nID string) error { + l := e2e.DefaultClientListener(rn, "new-rds-resource") + resources := e2e.UpdateOptions{ + NodeID: nID, + Listeners: []*v3listenerpb.Listener{l}, + SkipValidation: true, + } + return ms.Update(ctx, resources) +} diff --git a/xds/internal/xdsclient/client_test.go b/xds/internal/xdsclient/client_test.go index 272292d1f520..f32688850e4f 100644 --- a/xds/internal/xdsclient/client_test.go +++ b/xds/internal/xdsclient/client_test.go @@ -34,7 +34,7 @@ func Test(t *testing.T) { } const ( - defaultTestWatchExpiryTimeout = 500 * time.Millisecond + defaultTestWatchExpiryTimeout = 100 * time.Millisecond defaultTestTimeout = 5 * time.Second defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ) diff --git a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go index 4f2d9a0b381a..88306b7be08d 100644 --- a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/cds_watchers_test.go @@ -527,13 +527,16 @@ func (s) TestCDSWatch_ResourceCaching(t *testing.T) { // verifies that the watch callback is invoked with an error once the // watchExpiryTimer fires. func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { - // No need to spin up a management server since we don't want the client to - // receive a response for the watch being registered by the test. + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() - // Create an xDS client talking to a non-existent management server. client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy management server address", + ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, NodeProto: &v3corepb.Node{}, diff --git a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go index 3b5e7c4b3fdf..fac63379166e 100644 --- a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/eds_watchers_test.go @@ -584,13 +584,16 @@ func (s) TestEDSWatch_ResourceCaching(t *testing.T) { // verifies that the watch callback is invoked with an error once the // watchExpiryTimer fires. func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { - // No need to spin up a management server since we don't want the client to - // receive a response for the watch being registered by the test. + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() - // Create an xDS client talking to a non-existent management server. client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy management server address", + ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, NodeProto: &v3corepb.Node{}, diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go index 0da626dda3d7..07eb066f5d00 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/lds_watchers_test.go @@ -576,13 +576,16 @@ func (s) TestLDSWatch_ResourceCaching(t *testing.T) { // verifies that the watch callback is invoked with an error once the // watchExpiryTimer fires. func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { - // No need to spin up a management server since we don't want the client to - // receive a response for the watch being registered by the test. + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() - // Create an xDS client talking to a non-existent management server. client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy management server address", + ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, NodeProto: &v3corepb.Node{}, diff --git a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go index 6bf3f7a27c5e..93425a1f74e4 100644 --- a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go +++ b/xds/internal/xdsclient/e2e_test/rds_watchers_test.go @@ -617,13 +617,17 @@ func (s) TestRDSWatch_ResourceCaching(t *testing.T) { // verifies that the watch callback is invoked with an error once the // watchExpiryTimer fires. func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { - // No need to spin up a management server since we don't want the client to - // receive a response for the watch being registered by the test. + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() // Create an xDS client talking to a non-existent management server. client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy management server address", + ServerURI: mgmtServer.Address, Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, NodeProto: &v3corepb.Node{}, diff --git a/xds/internal/xdsclient/transport/loadreport_test.go b/xds/internal/xdsclient/transport/loadreport_test.go index 3babfe3fbd28..91fef7ae0991 100644 --- a/xds/internal/xdsclient/transport/loadreport_test.go +++ b/xds/internal/xdsclient/transport/loadreport_test.go @@ -53,11 +53,12 @@ func (s) TestReportLoad(t *testing.T) { // Create a transport to the fake management server. tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, - NodeProto: nodeProto, - UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No ADS validation. - StreamErrorHandler: func(error) {}, // No ADS stream error handling. - Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + ServerCfg: serverCfg, + NodeProto: nodeProto, + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No ADS validation. + OnErrorHandler: func(error) {}, // No ADS stream error handling. + OnSendHandler: func(*transport.ResourceSendInfo) {}, // No ADS stream update handling. + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index 1a0fed7a4bd9..6dea512abf94 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -63,17 +63,18 @@ type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesC // protocol version. type Transport struct { // These fields are initialized at creation time and are read-only afterwards. - cc *grpc.ClientConn // ClientConn to the mangement server. - serverURI string // URI of the management server. - updateHandler UpdateHandlerFunc // Resource update handler. xDS data model layer. - adsStreamErrHandler func(error) // To report underlying stream errors. - lrsStore *load.Store // Store returned to user for pushing loads. - backoff func(int) time.Duration // Backoff after stream failures. - nodeProto *v3corepb.Node // Identifies the gRPC application. - logger *grpclog.PrefixLogger // Prefix logger for transport logs. - adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. - adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. - lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. + cc *grpc.ClientConn // ClientConn to the mangement server. + serverURI string // URI of the management server. + onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer. + onErrorHandler func(error) // To report underlying stream errors. + onSendHandler OnSendHandlerFunc // To report resources requested on ADS stream. + lrsStore *load.Store // Store returned to user for pushing loads. + backoff func(int) time.Duration // Backoff after stream failures. + nodeProto *v3corepb.Node // Identifies the gRPC application. + logger *grpclog.PrefixLogger // Prefix logger for transport logs. + adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. + adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. + lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. // These channels enable synchronization amongst the different goroutines // spawned by the transport, and between asynchorous events resulting from @@ -102,7 +103,7 @@ type Transport struct { lrsRefCount int // Reference count on the load store. } -// UpdateHandlerFunc is the implementation at the xDS data model layer, which +// OnRecvHandlerFunc is the implementation at the xDS data model layer, which // determines if the configuration received from the management server can be // applied locally or not. // @@ -111,7 +112,11 @@ type Transport struct { // cause the transport layer to send an ACK to the management server. A non-nil // error is returned from this function when the data model layer believes // otherwise, and this will cause the transport layer to send a NACK. -type UpdateHandlerFunc func(update ResourceUpdate) error +type OnRecvHandlerFunc func(update ResourceUpdate) error + +// OnSendHandlerFunc is the implementation at the authority, which handles state +// changes for the resource watch and stop watch timers accordingly. +type OnSendHandlerFunc func(update *ResourceSendInfo) // ResourceUpdate is a representation of the configuration update received from // the management server. It only contains fields which are useful to the data @@ -131,17 +136,27 @@ type Options struct { // ServerCfg contains all the configuration required to connect to the xDS // management server. ServerCfg bootstrap.ServerConfig - // UpdateHandler is the component which makes ACK/NACK decisions based on + // OnRecvHandler is the component which makes ACK/NACK decisions based on // the received resources. // // Invoked inline and implementations must not block. - UpdateHandler UpdateHandlerFunc - // StreamErrorHandler provides a way for the transport layer to report + OnRecvHandler OnRecvHandlerFunc + // OnErrorHandler provides a way for the transport layer to report // underlying stream errors. These can be bubbled all the way up to the user // of the xdsClient. // // Invoked inline and implementations must not block. - StreamErrorHandler func(error) + OnErrorHandler func(error) + // OnSendHandler provides a way for the transport layer to report underlying + // resource requests sent on the stream. However, Send() on the ADS stream will + // return successfully as long as: + // 1. there is enough flow control quota to send the message. + // 2. the message is added to the send buffer. + // However, the connection may fail after the callback is invoked and before + // the message is actually sent on the wire. This is accepted. + // + // Invoked inline and implementations must not block. + OnSendHandler func(*ResourceSendInfo) // Backoff controls the amount of time to backoff before recreating failed // ADS streams. If unspecified, a default exponential backoff implementation // is used. For more details, see: @@ -164,10 +179,12 @@ func New(opts Options) (*Transport, error) { return nil, errors.New("missing server URI when creating a new transport") case opts.ServerCfg.Creds == nil: return nil, errors.New("missing credentials when creating a new transport") - case opts.UpdateHandler == nil: - return nil, errors.New("missing update handler when creating a new transport") - case opts.StreamErrorHandler == nil: - return nil, errors.New("missing stream error handler when creating a new transport") + case opts.OnRecvHandler == nil: + return nil, errors.New("missing OnRecv callback handler when creating a new transport") + case opts.OnErrorHandler == nil: + return nil, errors.New("missing OnError callback handler when creating a new transport") + case opts.OnSendHandler == nil: + return nil, errors.New("missing OnSend callback handler when creating a new transport") } // Dial the xDS management with the passed in credentials. @@ -191,14 +208,15 @@ func New(opts Options) (*Transport, error) { boff = backoff.DefaultExponential.Backoff } ret := &Transport{ - cc: cc, - serverURI: opts.ServerCfg.ServerURI, - updateHandler: opts.UpdateHandler, - adsStreamErrHandler: opts.StreamErrorHandler, - lrsStore: load.NewStore(), - backoff: boff, - nodeProto: opts.NodeProto, - logger: opts.Logger, + cc: cc, + serverURI: opts.ServerCfg.ServerURI, + onRecvHandler: opts.OnRecvHandler, + onErrorHandler: opts.OnErrorHandler, + onSendHandler: opts.OnSendHandler, + lrsStore: load.NewStore(), + backoff: boff, + nodeProto: opts.NodeProto, + logger: opts.Logger, adsStreamCh: make(chan adsStream, 1), adsRequestCh: buffer.NewUnbounded(), @@ -247,13 +265,16 @@ func (t *Transport) SendRequest(url string, resources []string) { func (t *Transport) newAggregatedDiscoveryServiceStream(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { // The transport retries the stream with an exponential backoff whenever the - // stream breaks. But if the channel is broken, we don't want the backoff - // logic to continuously retry the stream. Setting WaitForReady() blocks the - // stream creation until the channel is READY. - // - // TODO(easwars): Make changes required to comply with A57: - // https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx, grpc.WaitForReady(true)) + // stream breaks without ever having seen a response. + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) +} + +// ResourceSendInfo wraps the names and url of resources sent to the management +// server. This is used by the `authority` type to start/stop the watch timer +// associated with every resource in the update. +type ResourceSendInfo struct { + ResourceNames []string + URL string } func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { @@ -277,6 +298,7 @@ func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, reso } else { t.logger.Debugf("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) } + t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) return nil } @@ -301,9 +323,6 @@ func (t *Transport) adsRunner(ctx context.Context) { go t.send(ctx) - // TODO: start a goroutine monitoring ClientConn's connectivity state, and - // report error (and log) when stats is transient failure. - backoffAttempt := 0 backoffTimer := time.NewTimer(0) for ctx.Err() == nil { @@ -319,7 +338,7 @@ func (t *Transport) adsRunner(ctx context.Context) { resetBackoff := func() bool { stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) if err != nil { - t.adsStreamErrHandler(err) + t.onErrorHandler(err) t.logger.Warningf("Creating new ADS stream failed: %v", err) return false } @@ -439,13 +458,22 @@ func (t *Transport) recv(stream adsStream) bool { for { resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) if err != nil { - t.adsStreamErrHandler(err) + // Note that we do not consider it an error if the ADS stream was closed + // after having received a response on the stream. This is because there + // are legitimate reasons why the server may need to close the stream during + // normal operations, such as needing to rebalance load or the underlying + // connection hitting its max connection age limit. + // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). + if msgReceived { + err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) + } + t.onErrorHandler(err) t.logger.Warningf("ADS stream closed: %v", err) return msgReceived } msgReceived = true - err = t.updateHandler(ResourceUpdate{ + err = t.onRecvHandler(ResourceUpdate{ Resources: resources, URL: url, Version: rVersion, diff --git a/xds/internal/xdsclient/transport/transport_ack_nack_test.go b/xds/internal/xdsclient/transport/transport_ack_nack_test.go index 90f3ec1983be..acaec84229bf 100644 --- a/xds/internal/xdsclient/transport/transport_ack_nack_test.go +++ b/xds/internal/xdsclient/transport/transport_ack_nack_test.go @@ -144,10 +144,11 @@ func (s) TestSimpleAckAndNack(t *testing.T) { // Create a new transport. tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, - UpdateHandler: dataModelValidator, - StreamErrorHandler: func(err error) {}, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerCfg: serverCfg, + OnRecvHandler: dataModelValidator, + OnErrorHandler: func(err error) {}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) @@ -330,10 +331,11 @@ func (s) TestInvalidFirstResponse(t *testing.T) { // Create a new transport. tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, - UpdateHandler: dataModelValidator, - StreamErrorHandler: func(err error) {}, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerCfg: serverCfg, + NodeProto: &v3corepb.Node{Id: nodeID}, + OnRecvHandler: dataModelValidator, + OnErrorHandler: func(err error) {}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) @@ -458,10 +460,11 @@ func (s) TestResourceIsNotRequestedAnymore(t *testing.T) { // Create a new transport. tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, - UpdateHandler: dataModelValidator, - StreamErrorHandler: func(err error) {}, - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerCfg: serverCfg, + NodeProto: &v3corepb.Node{Id: nodeID}, + OnRecvHandler: dataModelValidator, + OnErrorHandler: func(err error) {}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) diff --git a/xds/internal/xdsclient/transport/transport_backoff_test.go b/xds/internal/xdsclient/transport/transport_backoff_test.go index a7726bbc509a..34ecc5da9782 100644 --- a/xds/internal/xdsclient/transport/transport_backoff_test.go +++ b/xds/internal/xdsclient/transport/transport_backoff_test.go @@ -110,15 +110,16 @@ func (s) TestTransport_BackoffAfterStreamFailure(t *testing.T) { // we can pass a no-op data model layer implementation. tr, err := transport.New(transport.Options{ ServerCfg: serverCfg, - UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. - StreamErrorHandler: func(err error) { + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. + OnErrorHandler: func(err error) { select { case streamErrCh <- err: default: } }, - Backoff: transportBackoff, - NodeProto: &v3corepb.Node{Id: nodeID}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + Backoff: transportBackoff, + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) @@ -277,15 +278,16 @@ func (s) TestTransport_RetriesAfterBrokenStream(t *testing.T) { // we can pass a no-op data model layer implementation. tr, err := transport.New(transport.Options{ ServerCfg: serverCfg, - UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. - StreamErrorHandler: func(err error) { + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. + OnErrorHandler: func(err error) { select { case streamErrCh <- err: default: } }, - Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. - NodeProto: &v3corepb.Node{Id: nodeID}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) @@ -414,11 +416,12 @@ func (s) TestTransport_ResourceRequestedBeforeStreamCreation(t *testing.T) { // Create a new transport. Since we are only testing backoff behavior here, // we can pass a no-op data model layer implementation. tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, - UpdateHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. - StreamErrorHandler: func(error) {}, // No stream error handling. - Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. - NodeProto: &v3corepb.Node{Id: nodeID}, + ServerCfg: serverCfg, + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. + OnErrorHandler: func(error) {}, // No stream error handling. + OnSendHandler: func(*transport.ResourceSendInfo) {}, // No on send handler + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + NodeProto: &v3corepb.Node{Id: nodeID}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) diff --git a/xds/internal/xdsclient/transport/transport_new_test.go b/xds/internal/xdsclient/transport/transport_new_test.go index 5cbcb5da5d55..a4c0da03d633 100644 --- a/xds/internal/xdsclient/transport/transport_new_test.go +++ b/xds/internal/xdsclient/transport/transport_new_test.go @@ -48,26 +48,41 @@ func (s) TestNew(t *testing.T) { wantErrStr: "missing credentials when creating a new transport", }, { - name: "missing update handler", + name: "missing onRecv handler", opts: transport.Options{ServerCfg: bootstrap.ServerConfig{ ServerURI: "server-address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, NodeProto: &v3corepb.Node{}, }, - wantErrStr: "missing update handler when creating a new transport", + wantErrStr: "missing OnRecv callback handler when creating a new transport", }, { - name: "missing stream error handler", + name: "missing onError handler", opts: transport.Options{ ServerCfg: bootstrap.ServerConfig{ ServerURI: "server-address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, NodeProto: &v3corepb.Node{}, - UpdateHandler: func(transport.ResourceUpdate) error { return nil }, + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, + OnSendHandler: func(*transport.ResourceSendInfo) {}, }, - wantErrStr: "missing stream error handler when creating a new transport", + wantErrStr: "missing OnError callback handler when creating a new transport", + }, + + { + name: "missing onSend handler", + opts: transport.Options{ + ServerCfg: bootstrap.ServerConfig{ + ServerURI: "server-address", + Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + }, + NodeProto: &v3corepb.Node{}, + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, + OnErrorHandler: func(error) {}, + }, + wantErrStr: "missing OnSend callback handler when creating a new transport", }, { name: "happy case", @@ -76,9 +91,10 @@ func (s) TestNew(t *testing.T) { ServerURI: "server-address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, - NodeProto: &v3corepb.Node{}, - UpdateHandler: func(transport.ResourceUpdate) error { return nil }, - StreamErrorHandler: func(error) {}, + NodeProto: &v3corepb.Node{}, + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, + OnErrorHandler: func(error) {}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, }, }, } diff --git a/xds/internal/xdsclient/transport/transport_resource_test.go b/xds/internal/xdsclient/transport/transport_resource_test.go index eb050f639f58..ed07e999fc7d 100644 --- a/xds/internal/xdsclient/transport/transport_resource_test.go +++ b/xds/internal/xdsclient/transport/transport_resource_test.go @@ -188,7 +188,7 @@ func (s) TestHandleResponseFromManagementServer(t *testing.T) { tr, err := transport.New(transport.Options{ ServerCfg: serverCfg, // No validation. Simply push received resources on a channel. - UpdateHandler: func(update transport.ResourceUpdate) error { + OnRecvHandler: func(update transport.ResourceUpdate) error { resourcesCh.Send(&resourcesWithTypeURL{ resources: update.Resources, url: update.URL, @@ -196,9 +196,10 @@ func (s) TestHandleResponseFromManagementServer(t *testing.T) { }) return nil }, - StreamErrorHandler: func(error) {}, // No stream error handling. - Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. - NodeProto: &v3corepb.Node{Id: uuid.New().String()}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, // No onSend handling. + OnErrorHandler: func(error) {}, // No stream error handling. + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + NodeProto: &v3corepb.Node{Id: uuid.New().String()}, }) if err != nil { t.Fatalf("Failed to create xDS transport: %v", err) diff --git a/xds/internal/xdsclient/transport/transport_test.go b/xds/internal/xdsclient/transport/transport_test.go index 80b44aa3cc0f..de8d665080b6 100644 --- a/xds/internal/xdsclient/transport/transport_test.go +++ b/xds/internal/xdsclient/transport/transport_test.go @@ -52,9 +52,10 @@ func (s) TestNewWithGRPCDial(t *testing.T) { ServerURI: "server-address", Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), }, - NodeProto: &v3corepb.Node{}, - UpdateHandler: func(ResourceUpdate) error { return nil }, - StreamErrorHandler: func(error) {}, + NodeProto: &v3corepb.Node{}, + OnRecvHandler: func(ResourceUpdate) error { return nil }, + OnErrorHandler: func(error) {}, + OnSendHandler: func(*ResourceSendInfo) {}, } c, err := New(opts) if err != nil { diff --git a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index 433cfcacb69e..0c9b4473ce2f 100644 --- a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -19,6 +19,7 @@ package xdsresource import ( "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) @@ -31,7 +32,7 @@ var ( // Singleton instantiation of the resource type implementation. clusterType = clusterResourceType{ resourceTypeState: resourceTypeState{ - typeURL: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + typeURL: version.V3ClusterURL, typeEnum: ClusterResource, allResourcesRequiredInSotW: true, }, diff --git a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index b778a4a5340c..caa728376099 100644 --- a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -19,6 +19,7 @@ package xdsresource import ( "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) @@ -31,7 +32,7 @@ var ( // Singleton instantiation of the resource type implementation. endpointsType = endpointsResourceType{ resourceTypeState: resourceTypeState{ - typeURL: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + typeURL: version.V3EndpointsURL, typeEnum: EndpointsResource, allResourcesRequiredInSotW: false, }, diff --git a/xds/internal/xdsclient/xdsresource/errors.go b/xds/internal/xdsclient/xdsresource/errors.go index 2d1b179db130..00ef9310481a 100644 --- a/xds/internal/xdsclient/xdsresource/errors.go +++ b/xds/internal/xdsclient/xdsresource/errors.go @@ -37,6 +37,9 @@ const ( // ErrorTypeResourceTypeUnsupported indicates the receipt of a message from // the management server with resources of an unsupported resource type. ErrorTypeResourceTypeUnsupported + // ErrTypeStreamFailedAfterRecv indicates an ADS stream error, after + // successful receipt of at least one message from the server. + ErrTypeStreamFailedAfterRecv ) type xdsClientError struct { diff --git a/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 81ba72fd0a23..2e230d857bd4 100644 --- a/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -22,6 +22,7 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) @@ -34,7 +35,7 @@ var ( // Singleton instantiation of the resource type implementation. listenerType = listenerResourceType{ resourceTypeState: resourceTypeState{ - typeURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + typeURL: version.V3ListenerURL, typeEnum: ListenerResource, allResourcesRequiredInSotW: true, }, diff --git a/xds/internal/xdsclient/xdsresource/resource_type.go b/xds/internal/xdsclient/xdsresource/resource_type.go index 5a1714660bf7..8d07b2bb8668 100644 --- a/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/xds/internal/xdsclient/xdsresource/resource_type.go @@ -25,10 +25,20 @@ package xdsresource import ( + "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) +func init() { + internal.ResourceTypeMapForTesting = make(map[string]interface{}) + internal.ResourceTypeMapForTesting[version.V3ListenerURL] = listenerType + internal.ResourceTypeMapForTesting[version.V3RouteConfigURL] = routeConfigType + internal.ResourceTypeMapForTesting[version.V3ClusterURL] = clusterType + internal.ResourceTypeMapForTesting[version.V3EndpointsURL] = endpointsType +} + // Producer contains a single method to discover resource configuration from a // remote management server using xDS APIs. // diff --git a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index 70bbbf387593..448d122d49d0 100644 --- a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -19,6 +19,7 @@ package xdsresource import ( "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) @@ -31,7 +32,7 @@ var ( // Singleton instantiation of the resource type implementation. routeConfigType = routeConfigResourceType{ resourceTypeState: resourceTypeState{ - typeURL: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + typeURL: version.V3RouteConfigURL, typeEnum: RouteConfigResource, allResourcesRequiredInSotW: false, }, From 832ecc257487a70e4d0fc1108fa9bb3c76fe3d86 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 28 Feb 2023 13:29:59 -0800 Subject: [PATCH 796/998] channelz: use protocmp.Transform() to compare protos (#6065) --- channelz/service/service_sktopt_test.go | 8 ++++---- channelz/service/service_test.go | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/channelz/service/service_sktopt_test.go b/channelz/service/service_sktopt_test.go index 7302d9105a4c..8ec5341cb030 100644 --- a/channelz/service/service_sktopt_test.go +++ b/channelz/service/service_sktopt_test.go @@ -32,13 +32,13 @@ import ( "testing" "github.com/golang/protobuf/ptypes" - durpb "github.com/golang/protobuf/ptypes/duration" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/sys/unix" - channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" "google.golang.org/protobuf/testing/protocmp" + + durpb "github.com/golang/protobuf/ptypes/duration" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" ) func init() { @@ -153,7 +153,7 @@ func (s) TestGetSocketOptions(t *testing.T) { for i, s := range ss { resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i].Int()}) got, want := resp.GetSocket().GetRef(), &channelzpb.SocketRef{SocketId: ids[i].Int(), Name: strconv.Itoa(i)} - if !cmp.Equal(got, want, cmpopts.IgnoreUnexported(channelzpb.SocketRef{})) { + if !cmp.Equal(got, want, protocmp.Transform()) { t.Fatalf("resp.GetSocket() returned metrics.GetRef() = %#v, want %#v", got, want) } socket, err := socketProtoToStruct(resp.GetSocket()) diff --git a/channelz/service/service_test.go b/channelz/service/service_test.go index e03832a401b9..94ca6b8b35b7 100644 --- a/channelz/service/service_test.go +++ b/channelz/service/service_test.go @@ -30,13 +30,13 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/protobuf/testing/protocmp" + + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" ) func init() { @@ -748,7 +748,7 @@ func (s) TestGetSocket(t *testing.T) { for i, s := range ss { resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i].Int()}) got, want := resp.GetSocket().GetRef(), &channelzpb.SocketRef{SocketId: ids[i].Int(), Name: strconv.Itoa(i)} - if !cmp.Equal(got, want, cmpopts.IgnoreUnexported(channelzpb.SocketRef{})) { + if !cmp.Equal(got, want, protocmp.Transform()) { t.Fatalf("resp.GetSocket() returned metrics.GetRef() = %#v, want %#v", got, want) } socket, err := socketProtoToStruct(resp.GetSocket()) From a1693ec5d2b165a14705859618e70895315967f9 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 28 Feb 2023 15:04:46 -0800 Subject: [PATCH 797/998] fakeserver: remove ADS and LRS v2 support (#6068) --- xds/internal/testutils/fakeserver/server.go | 102 -------------------- 1 file changed, 102 deletions(-) diff --git a/xds/internal/testutils/fakeserver/server.go b/xds/internal/testutils/fakeserver/server.go index 1d7e6b482228..8030f923428c 100644 --- a/xds/internal/testutils/fakeserver/server.go +++ b/xds/internal/testutils/fakeserver/server.go @@ -35,12 +35,8 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/status" - v2discoverypb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" v3discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - v2lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" - v2lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" ) @@ -100,9 +96,7 @@ type Server struct { Address string // The underlying fake implementation of xDS and LRS. - xdsV2 *xdsServerV2 xdsV3 *xdsServerV3 - lrsV2 *lrsServerV2 lrsV3 *lrsServerV3 } @@ -139,9 +133,7 @@ func StartServer() (*Server, func(), error) { LRSStreamCloseChan: testutils.NewChannel(), Address: lis.Addr().String(), } - s.xdsV2 = &xdsServerV2{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} s.xdsV3 = &xdsServerV3{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} - s.lrsV2 = &lrsServerV2{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan, streamOpenChan: s.LRSStreamOpenChan, streamCloseChan: s.LRSStreamCloseChan} s.lrsV3 = &lrsServerV3{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan, streamOpenChan: s.LRSStreamOpenChan, streamCloseChan: s.LRSStreamCloseChan} wp := &wrappedListener{ Listener: lis, @@ -149,8 +141,6 @@ func StartServer() (*Server, func(), error) { } server := grpc.NewServer() - v2lrsgrpc.RegisterLoadReportingServiceServer(server, s.lrsV2) - v2discoverygrpc.RegisterAggregatedDiscoveryServiceServer(server, s.xdsV2) v3lrsgrpc.RegisterLoadReportingServiceServer(server, s.lrsV3) v3discoverygrpc.RegisterAggregatedDiscoveryServiceServer(server, s.xdsV3) go server.Serve(wp) @@ -158,57 +148,6 @@ func StartServer() (*Server, func(), error) { return s, func() { server.Stop() }, nil } -type xdsServerV2 struct { - reqChan *testutils.Channel - respChan chan *Response -} - -func (xdsS *xdsServerV2) StreamAggregatedResources(s v2discoverygrpc.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { - errCh := make(chan error, 2) - go func() { - for { - req, err := s.Recv() - if err != nil { - errCh <- err - return - } - xdsS.reqChan.Send(&Request{req, err}) - } - }() - go func() { - var retErr error - defer func() { - errCh <- retErr - }() - - for { - select { - case r := <-xdsS.respChan: - if r.Err != nil { - retErr = r.Err - return - } - if err := s.Send(r.Resp.(*v2discoverypb.DiscoveryResponse)); err != nil { - retErr = err - return - } - case <-s.Context().Done(): - retErr = s.Context().Err() - return - } - } - }() - - if err := <-errCh; err != nil { - return err - } - return nil -} - -func (xdsS *xdsServerV2) DeltaAggregatedResources(v2discoverygrpc.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { - return status.Error(codes.Unimplemented, "") -} - type xdsServerV3 struct { reqChan *testutils.Channel respChan chan *Response @@ -260,47 +199,6 @@ func (xdsS *xdsServerV3) DeltaAggregatedResources(v3discoverygrpc.AggregatedDisc return status.Error(codes.Unimplemented, "") } -type lrsServerV2 struct { - reqChan *testutils.Channel - respChan chan *Response - streamOpenChan *testutils.Channel - streamCloseChan *testutils.Channel -} - -func (lrsS *lrsServerV2) StreamLoadStats(s v2lrsgrpc.LoadReportingService_StreamLoadStatsServer) error { - lrsS.streamOpenChan.Send(nil) - defer lrsS.streamCloseChan.Send(nil) - - req, err := s.Recv() - lrsS.reqChan.Send(&Request{req, err}) - if err != nil { - return err - } - - select { - case r := <-lrsS.respChan: - if r.Err != nil { - return r.Err - } - if err := s.Send(r.Resp.(*v2lrspb.LoadStatsResponse)); err != nil { - return err - } - case <-s.Context().Done(): - return s.Context().Err() - } - - for { - req, err := s.Recv() - lrsS.reqChan.Send(&Request{req, err}) - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } -} - type lrsServerV3 struct { reqChan *testutils.Channel respChan chan *Response From 8ba23be9613c672d40ae261d2a1335d639bdd59b Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 1 Mar 2023 09:58:20 -0800 Subject: [PATCH 798/998] cmd/protoc-gen-go-grpc: bump -version to 1.3.0 for release (#6064) --- balancer/grpclb/grpc_lb_v1/load_balancer.pb.go | 2 +- .../grpclb/grpc_lb_v1/load_balancer_grpc.pb.go | 4 ++-- binarylog/grpc_binarylog_v1/binarylog.pb.go | 2 +- channelz/grpc_channelz_v1/channelz.pb.go | 2 +- channelz/grpc_channelz_v1/channelz_grpc.pb.go | 4 ++-- cmd/protoc-gen-go-grpc/main.go | 2 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 2 +- .../alts/internal/proto/grpc_gcp/handshaker.pb.go | 2 +- .../internal/proto/grpc_gcp/handshaker_grpc.pb.go | 4 ++-- .../grpc_gcp/transport_security_common.pb.go | 2 +- examples/features/proto/echo/echo.pb.go | 2 +- examples/features/proto/echo/echo_grpc.pb.go | 4 ++-- examples/helloworld/helloworld/helloworld.pb.go | 2 +- .../helloworld/helloworld/helloworld_grpc.pb.go | 4 ++-- examples/route_guide/routeguide/route_guide.pb.go | 2 +- .../route_guide/routeguide/route_guide_grpc.pb.go | 4 ++-- health/grpc_health_v1/health.pb.go | 2 +- health/grpc_health_v1/health_grpc.pb.go | 4 ++-- internal/proto/grpc_lookup_v1/rls.pb.go | 2 +- internal/proto/grpc_lookup_v1/rls_config.pb.go | 2 +- internal/proto/grpc_lookup_v1/rls_grpc.pb.go | 4 ++-- interop/grpc_testing/benchmark_service.pb.go | 2 +- interop/grpc_testing/benchmark_service_grpc.pb.go | 4 ++-- interop/grpc_testing/control.pb.go | 2 +- interop/grpc_testing/core/stats.pb.go | 2 +- interop/grpc_testing/empty.pb.go | 2 +- interop/grpc_testing/messages.pb.go | 2 +- interop/grpc_testing/payloads.pb.go | 2 +- .../report_qps_scenario_service.pb.go | 2 +- .../report_qps_scenario_service_grpc.pb.go | 4 ++-- interop/grpc_testing/stats.pb.go | 2 +- interop/grpc_testing/test.pb.go | 2 +- interop/grpc_testing/test_grpc.pb.go | 4 ++-- interop/grpc_testing/worker_service.pb.go | 2 +- interop/grpc_testing/worker_service_grpc.pb.go | 4 ++-- profiling/proto/service.pb.go | 2 +- profiling/proto/service_grpc.pb.go | 4 ++-- reflection/grpc_reflection_v1/reflection.pb.go | 2 +- .../grpc_reflection_v1/reflection_grpc.pb.go | 4 ++-- .../grpc_reflection_v1alpha/reflection.pb.go | 2 +- .../grpc_reflection_v1alpha/reflection_grpc.pb.go | 4 ++-- reflection/grpc_testing/proto2.pb.go | 2 +- reflection/grpc_testing/proto2_ext.pb.go | 2 +- reflection/grpc_testing/proto2_ext2.pb.go | 2 +- reflection/grpc_testing/test.pb.go | 2 +- reflection/grpc_testing/test_grpc.pb.go | 4 ++-- stress/grpc_testing/metrics.pb.go | 2 +- stress/grpc_testing/metrics_grpc.pb.go | 4 ++-- test/codec_perf/perf.pb.go | 2 +- test/grpc_testing/test.pb.go | 2 +- test/grpc_testing/test_grpc.pb.go | 4 ++-- vet.sh | 15 +++------------ 52 files changed, 72 insertions(+), 81 deletions(-) diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 1205aff23f79..6620ed11c786 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -20,7 +20,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 93e5e133b561..00d0954b38a5 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 diff --git a/binarylog/grpc_binarylog_v1/binarylog.pb.go b/binarylog/grpc_binarylog_v1/binarylog.pb.go index 66d141fce707..8cd89dab9047 100644 --- a/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 diff --git a/channelz/grpc_channelz_v1/channelz.pb.go b/channelz/grpc_channelz_v1/channelz.pb.go index 0bf728630e6b..3aa7c4c59f70 100644 --- a/channelz/grpc_channelz_v1/channelz.pb.go +++ b/channelz/grpc_channelz_v1/channelz.pb.go @@ -22,7 +22,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/channelz/v1/channelz.proto package grpc_channelz_v1 diff --git a/channelz/grpc_channelz_v1/channelz_grpc.pb.go b/channelz/grpc_channelz_v1/channelz_grpc.pb.go index 04c9b1e52b5f..070f787ca527 100644 --- a/channelz/grpc_channelz_v1/channelz_grpc.pb.go +++ b/channelz/grpc_channelz_v1/channelz_grpc.pb.go @@ -21,8 +21,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/channelz/v1/channelz.proto package grpc_channelz_v1 diff --git a/cmd/protoc-gen-go-grpc/main.go b/cmd/protoc-gen-go-grpc/main.go index e1943024ac53..340eaf3ee7bf 100644 --- a/cmd/protoc-gen-go-grpc/main.go +++ b/cmd/protoc-gen-go-grpc/main.go @@ -41,7 +41,7 @@ import ( "google.golang.org/protobuf/types/pluginpb" ) -const version = "1.2.0" +const version = "1.3.0" var requireUnimplemented *bool diff --git a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 1a40e17e8d3f..16e814b9b910 100644 --- a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/gcp/altscontext.proto package grpc_gcp diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 50eefa53830e..258a130a9d04 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/gcp/handshaker.proto package grpc_gcp diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index 46d320c9760a..39ecccf878ee 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/gcp/handshaker.proto package grpc_gcp diff --git a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index b07412f18585..cc9a27059964 100644 --- a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/gcp/transport_security_common.proto package grpc_gcp diff --git a/examples/features/proto/echo/echo.pb.go b/examples/features/proto/echo/echo.pb.go index f30af166bd1c..8b4c26ad7eca 100644 --- a/examples/features/proto/echo/echo.pb.go +++ b/examples/features/proto/echo/echo.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: examples/features/proto/echo/echo.proto package echo diff --git a/examples/features/proto/echo/echo_grpc.pb.go b/examples/features/proto/echo/echo_grpc.pb.go index e39c6fd16168..7efd51403fb9 100644 --- a/examples/features/proto/echo/echo_grpc.pb.go +++ b/examples/features/proto/echo/echo_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: examples/features/proto/echo/echo.proto package echo diff --git a/examples/helloworld/helloworld/helloworld.pb.go b/examples/helloworld/helloworld/helloworld.pb.go index d75336b204e2..7142f9bfc3ef 100644 --- a/examples/helloworld/helloworld/helloworld.pb.go +++ b/examples/helloworld/helloworld/helloworld.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: examples/helloworld/helloworld/helloworld.proto package helloworld diff --git a/examples/helloworld/helloworld/helloworld_grpc.pb.go b/examples/helloworld/helloworld/helloworld_grpc.pb.go index 002a8a283880..55e4f31df3ca 100644 --- a/examples/helloworld/helloworld/helloworld_grpc.pb.go +++ b/examples/helloworld/helloworld/helloworld_grpc.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: examples/helloworld/helloworld/helloworld.proto package helloworld diff --git a/examples/route_guide/routeguide/route_guide.pb.go b/examples/route_guide/routeguide/route_guide.pb.go index 9c42c50b004e..024f81f06067 100644 --- a/examples/route_guide/routeguide/route_guide.pb.go +++ b/examples/route_guide/routeguide/route_guide.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: examples/route_guide/routeguide/route_guide.proto package routeguide diff --git a/examples/route_guide/routeguide/route_guide_grpc.pb.go b/examples/route_guide/routeguide/route_guide_grpc.pb.go index f78ac40de314..08012c0f4bcf 100644 --- a/examples/route_guide/routeguide/route_guide_grpc.pb.go +++ b/examples/route_guide/routeguide/route_guide_grpc.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: examples/route_guide/routeguide/route_guide.proto package routeguide diff --git a/health/grpc_health_v1/health.pb.go b/health/grpc_health_v1/health.pb.go index 8e29a62f164f..0b1abc6467c2 100644 --- a/health/grpc_health_v1/health.pb.go +++ b/health/grpc_health_v1/health.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/health/grpc_health_v1/health_grpc.pb.go b/health/grpc_health_v1/health_grpc.pb.go index 99308c4a167d..a01a1b4d54bd 100644 --- a/health/grpc_health_v1/health_grpc.pb.go +++ b/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/internal/proto/grpc_lookup_v1/rls.pb.go b/internal/proto/grpc_lookup_v1/rls.pb.go index 21b6429d6521..a5f053b92926 100644 --- a/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/internal/proto/grpc_lookup_v1/rls.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 diff --git a/internal/proto/grpc_lookup_v1/rls_config.pb.go b/internal/proto/grpc_lookup_v1/rls_config.pb.go index 05a307092a80..1c33ce64ebf1 100644 --- a/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/lookup/v1/rls_config.proto package grpc_lookup_v1 diff --git a/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index 4c1cf2849916..2435fbc9a9b9 100644 --- a/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 diff --git a/interop/grpc_testing/benchmark_service.pb.go b/interop/grpc_testing/benchmark_service.pb.go index 3df10459c572..26d54d9929e8 100644 --- a/interop/grpc_testing/benchmark_service.pb.go +++ b/interop/grpc_testing/benchmark_service.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/testing/benchmark_service.proto package grpc_testing diff --git a/interop/grpc_testing/benchmark_service_grpc.pb.go b/interop/grpc_testing/benchmark_service_grpc.pb.go index b740b6829304..84cd44e4d45d 100644 --- a/interop/grpc_testing/benchmark_service_grpc.pb.go +++ b/interop/grpc_testing/benchmark_service_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/testing/benchmark_service.proto package grpc_testing diff --git a/interop/grpc_testing/control.pb.go b/interop/grpc_testing/control.pb.go index 5bd5aaffb5b5..8cf2fb07a4f2 100644 --- a/interop/grpc_testing/control.pb.go +++ b/interop/grpc_testing/control.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/testing/control.proto package grpc_testing diff --git a/interop/grpc_testing/core/stats.pb.go b/interop/grpc_testing/core/stats.pb.go index d2649da373d7..6780b71481c3 100644 --- a/interop/grpc_testing/core/stats.pb.go +++ b/interop/grpc_testing/core/stats.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/core/stats.proto package core diff --git a/interop/grpc_testing/empty.pb.go b/interop/grpc_testing/empty.pb.go index d7671accfffd..bdbe3f8680fe 100644 --- a/interop/grpc_testing/empty.pb.go +++ b/interop/grpc_testing/empty.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/testing/empty.proto package grpc_testing diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index 3e09a8aa55c9..6d6f5cd20304 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/testing/messages.proto package grpc_testing diff --git a/interop/grpc_testing/payloads.pb.go b/interop/grpc_testing/payloads.pb.go index 2b8388c7e7ed..da19e9dcabe7 100644 --- a/interop/grpc_testing/payloads.pb.go +++ b/interop/grpc_testing/payloads.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/testing/payloads.proto package grpc_testing diff --git a/interop/grpc_testing/report_qps_scenario_service.pb.go b/interop/grpc_testing/report_qps_scenario_service.pb.go index 118dfd720ecc..40086a564d30 100644 --- a/interop/grpc_testing/report_qps_scenario_service.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/testing/report_qps_scenario_service.proto package grpc_testing diff --git a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go index e09b5b240511..33392bc6ae33 100644 --- a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/testing/report_qps_scenario_service.proto package grpc_testing diff --git a/interop/grpc_testing/stats.pb.go b/interop/grpc_testing/stats.pb.go index 8ae929219109..a76771d6c624 100644 --- a/interop/grpc_testing/stats.pb.go +++ b/interop/grpc_testing/stats.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/testing/stats.proto package grpc_testing diff --git a/interop/grpc_testing/test.pb.go b/interop/grpc_testing/test.pb.go index 266bed6f64b4..cf85993c7899 100644 --- a/interop/grpc_testing/test.pb.go +++ b/interop/grpc_testing/test.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/testing/test.proto package grpc_testing diff --git a/interop/grpc_testing/test_grpc.pb.go b/interop/grpc_testing/test_grpc.pb.go index 23189f0dc8f5..fcf87509acff 100644 --- a/interop/grpc_testing/test_grpc.pb.go +++ b/interop/grpc_testing/test_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/testing/test.proto package grpc_testing diff --git a/interop/grpc_testing/worker_service.pb.go b/interop/grpc_testing/worker_service.pb.go index bc045a3a0577..25bd944fb0e1 100644 --- a/interop/grpc_testing/worker_service.pb.go +++ b/interop/grpc_testing/worker_service.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/testing/worker_service.proto package grpc_testing diff --git a/interop/grpc_testing/worker_service_grpc.pb.go b/interop/grpc_testing/worker_service_grpc.pb.go index 10e153efa6f7..1de7f09f841a 100644 --- a/interop/grpc_testing/worker_service_grpc.pb.go +++ b/interop/grpc_testing/worker_service_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/testing/worker_service.proto package grpc_testing diff --git a/profiling/proto/service.pb.go b/profiling/proto/service.pb.go index fc2d51bed131..384290deaf94 100644 --- a/profiling/proto/service.pb.go +++ b/profiling/proto/service.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: profiling/proto/service.proto package proto diff --git a/profiling/proto/service_grpc.pb.go b/profiling/proto/service_grpc.pb.go index 0fc93f0c1fe0..5d696a26f924 100644 --- a/profiling/proto/service_grpc.pb.go +++ b/profiling/proto/service_grpc.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: profiling/proto/service.proto package proto diff --git a/reflection/grpc_reflection_v1/reflection.pb.go b/reflection/grpc_reflection_v1/reflection.pb.go index 4e7cbd5dcf55..cee1e1d08a60 100644 --- a/reflection/grpc_reflection_v1/reflection.pb.go +++ b/reflection/grpc_reflection_v1/reflection.pb.go @@ -22,7 +22,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/reflection/v1/reflection.proto package grpc_reflection_v1 diff --git a/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/reflection/grpc_reflection_v1/reflection_grpc.pb.go index f1228329edef..62b56a8be0e6 100644 --- a/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ b/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -21,8 +21,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: grpc/reflection/v1/reflection.proto package grpc_reflection_v1 diff --git a/reflection/grpc_reflection_v1alpha/reflection.pb.go b/reflection/grpc_reflection_v1alpha/reflection.pb.go index ee4b04caf0da..444f2a6f4f73 100644 --- a/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha diff --git a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 46032f00d25a..367a029be6b3 100644 --- a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha diff --git a/reflection/grpc_testing/proto2.pb.go b/reflection/grpc_testing/proto2.pb.go index c475c197be98..000220b9850c 100644 --- a/reflection/grpc_testing/proto2.pb.go +++ b/reflection/grpc_testing/proto2.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: reflection/grpc_testing/proto2.proto package grpc_testing diff --git a/reflection/grpc_testing/proto2_ext.pb.go b/reflection/grpc_testing/proto2_ext.pb.go index bd33e953bbfc..c38d5c4c7aba 100644 --- a/reflection/grpc_testing/proto2_ext.pb.go +++ b/reflection/grpc_testing/proto2_ext.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: reflection/grpc_testing/proto2_ext.proto package grpc_testing diff --git a/reflection/grpc_testing/proto2_ext2.pb.go b/reflection/grpc_testing/proto2_ext2.pb.go index c54e5f76ecbf..625d7a490b27 100644 --- a/reflection/grpc_testing/proto2_ext2.pb.go +++ b/reflection/grpc_testing/proto2_ext2.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: reflection/grpc_testing/proto2_ext2.proto package grpc_testing diff --git a/reflection/grpc_testing/test.pb.go b/reflection/grpc_testing/test.pb.go index 09408daf9410..40b35fda1ef4 100644 --- a/reflection/grpc_testing/test.pb.go +++ b/reflection/grpc_testing/test.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: reflection/grpc_testing/test.proto package grpc_testing diff --git a/reflection/grpc_testing/test_grpc.pb.go b/reflection/grpc_testing/test_grpc.pb.go index ebd5e5089e9e..d130f6f3dae3 100644 --- a/reflection/grpc_testing/test_grpc.pb.go +++ b/reflection/grpc_testing/test_grpc.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: reflection/grpc_testing/test.proto package grpc_testing diff --git a/stress/grpc_testing/metrics.pb.go b/stress/grpc_testing/metrics.pb.go index fdcaf94fbb70..a7e49565a189 100644 --- a/stress/grpc_testing/metrics.pb.go +++ b/stress/grpc_testing/metrics.pb.go @@ -22,7 +22,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: stress/grpc_testing/metrics.proto package grpc_testing diff --git a/stress/grpc_testing/metrics_grpc.pb.go b/stress/grpc_testing/metrics_grpc.pb.go index dc4f9feb4df6..4e2f985bdf16 100644 --- a/stress/grpc_testing/metrics_grpc.pb.go +++ b/stress/grpc_testing/metrics_grpc.pb.go @@ -21,8 +21,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: stress/grpc_testing/metrics.proto package grpc_testing diff --git a/test/codec_perf/perf.pb.go b/test/codec_perf/perf.pb.go index 7ef3b7a58242..2602db7d3a22 100644 --- a/test/codec_perf/perf.pb.go +++ b/test/codec_perf/perf.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: test/codec_perf/perf.proto package codec_perf diff --git a/test/grpc_testing/test.pb.go b/test/grpc_testing/test.pb.go index 3ee33a8e74c3..1ad453ab61e7 100644 --- a/test/grpc_testing/test.pb.go +++ b/test/grpc_testing/test.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: test/grpc_testing/test.proto package grpc_testing diff --git a/test/grpc_testing/test_grpc.pb.go b/test/grpc_testing/test_grpc.pb.go index 222987b583aa..897e70dafffa 100644 --- a/test/grpc_testing/test_grpc.pb.go +++ b/test/grpc_testing/test_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.14.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 // source: test/grpc_testing/test.proto package grpc_testing diff --git a/vet.sh b/vet.sh index 3728aed04fc7..a8e4732b3d20 100755 --- a/vet.sh +++ b/vet.sh @@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then github.com/client9/misspell/cmd/misspell popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -68,8 +60,7 @@ fi # - Check that generated proto files are up to date. if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ + make proto && git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi From 1d16ef5bd8fc1728072ffb2056d78c8659a5f5c5 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 1 Mar 2023 20:47:18 -0500 Subject: [PATCH 799/998] metadata: Lowercase appended metadata (#6071) --- metadata/metadata.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/metadata/metadata.go b/metadata/metadata.go index 77fc055935a9..a2cdcaf12a87 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -175,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } From 8c374f7607ea9c0dd888b4b7e8a91f44b01046eb Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 2 Mar 2023 08:58:05 -0800 Subject: [PATCH 800/998] clusterresolver: cleanup resource resolver implementation (#6052) --- .../clusterresolver/clusterresolver.go | 150 ++++++------- .../clusterresolver/resource_resolver.go | 205 ++++++++---------- .../clusterresolver/resource_resolver_dns.go | 67 ++++-- .../clusterresolver/resource_resolver_eds.go | 77 +++++++ 4 files changed, 286 insertions(+), 213 deletions(-) create mode 100644 xds/internal/balancer/clusterresolver/resource_resolver_eds.go diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index b4a37f60c011..e068d1014790 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -16,7 +16,9 @@ * */ -// Package clusterresolver contains EDS balancer implementation. +// Package clusterresolver contains the implementation of the +// xds_cluster_resolver_experimental LB policy which resolves endpoint addresses +// using a list of one or more discovery mechanisms. package clusterresolver import ( @@ -62,12 +64,12 @@ type bb struct{} func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { priorityBuilder := balancer.Get(priority.Name) if priorityBuilder == nil { - logger.Errorf("priority balancer is needed but not registered") + logger.Errorf("%q LB policy is needed but not registered", priority.Name) return nil } priorityConfigParser, ok := priorityBuilder.(balancer.ConfigParser) if !ok { - logger.Errorf("priority balancer builder is not a config parser") + logger.Errorf("%q LB policy does not implement a config parser", priority.Name) return nil } @@ -108,15 +110,14 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err return &cfg, nil } -// ccUpdate wraps a clientConn update received from gRPC (pushed from the -// xdsResolver). +// ccUpdate wraps a clientConn update received from gRPC. type ccUpdate struct { state balancer.ClientConnState err error } // scUpdate wraps a subConn update received from gRPC. This is directly passed -// on to the child balancer. +// on to the child policy. type scUpdate struct { subConn balancer.SubConn state balancer.SubConnState @@ -124,10 +125,8 @@ type scUpdate struct { type exitIdle struct{} -// clusterResolverBalancer manages xdsClient and the actual EDS balancer implementation that -// does load balancing. -// -// It currently has only an clusterResolverBalancer. Later, we may add fallback. +// clusterResolverBalancer resolves endpoint addresses using a list of one or +// more discovery mechanisms. type clusterResolverBalancer struct { cc balancer.ClientConn bOpts balancer.BuildOptions @@ -150,22 +149,21 @@ type clusterResolverBalancer struct { watchUpdateReceived bool } -// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good -// updates lead to registration of EDS and DNS watches. Updates with error lead -// to cancellation of existing watch and propagation of the same error to the -// child balancer. +// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. +// +// A good update results in creation of endpoint resolvers for the configured +// discovery mechanisms. An update with an error results in cancellation of any +// existing endpoint resolution and propagation of the same to the child policy. func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { - // We first handle errors, if any, and then proceed with handling the - // update, only if the status quo has changed. if err := update.err; err != nil { b.handleErrorFromUpdate(err, true) return } - b.logger.Infof("Receive update from resolver, balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) + b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) cfg, _ := update.state.BalancerConfig.(*LBConfig) if cfg == nil { - b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", update.state.BalancerConfig) + b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig) return } @@ -173,23 +171,19 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { b.configRaw = update.state.ResolverState.ServiceConfig b.resourceWatcher.updateMechanisms(cfg.DiscoveryMechanisms) + // The child policy is created only after all configured discovery + // mechanisms have been successfully returned endpoints. If that is not the + // case, we return early. if !b.watchUpdateReceived { - // If update was not received, wait for it. return } - // If eds resp was received before this, the child policy was created. We - // need to generate a new balancer config and send it to the child, because - // certain fields (unrelated to EDS watch) might have changed. - if err := b.updateChildConfig(); err != nil { - b.logger.Warningf("failed to update child policy config: %v", err) - } + b.updateChildConfig() } -// handleWatchUpdate handles a watch update from the xDS Client. Good updates -// lead to clientConn updates being invoked on the underlying child balancer. -func (b *clusterResolverBalancer) handleWatchUpdate(update *resourceUpdate) { +// handleResourceUpdate handles a resource update or error from the resource +// resolver by propagating the same to the child LB policy. +func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { if err := update.err; err != nil { - b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) b.handleErrorFromUpdate(err, false) return } @@ -197,80 +191,77 @@ func (b *clusterResolverBalancer) handleWatchUpdate(update *resourceUpdate) { b.watchUpdateReceived = true b.priorities = update.priorities - // A new EDS update triggers new child configs (e.g. different priorities - // for the priority balancer), and new addresses (the endpoints come from - // the EDS response). - if err := b.updateChildConfig(); err != nil { - b.logger.Warningf("failed to update child policy's balancer config: %v", err) - } + // An update from the resource resolver contains resolved endpoint addresses + // for all configured discovery mechanisms ordered by priority. This is used + // to generate configuration for the priority LB policy. + b.updateChildConfig() } -// updateChildConfig builds a balancer config from eb's cached eds resp and -// service config, and sends that to the child balancer. Note that it also -// generates the addresses, because the endpoints come from the EDS resp. +// updateChildConfig builds child policy configuration using endpoint addresses +// returned by the resource resolver and child policy configuration provided by +// parent LB policy. // -// If child balancer doesn't already exist, one will be created. -func (b *clusterResolverBalancer) updateChildConfig() error { - // Child was build when the first EDS resp was received, so we just build - // the config and addresses. +// A child policy is created if one doesn't already exist. The newly built +// configuration is then pushed to the child policy. +func (b *clusterResolverBalancer) updateChildConfig() { if b.child == nil { b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, b.config.XDSLBPolicy) if err != nil { - return fmt.Errorf("failed to build priority balancer config: %v", err) + b.logger.Warningf("Failed to build child policy config: %v", err) + return } childCfg, err := b.priorityConfigParser.ParseConfig(childCfgBytes) if err != nil { - return fmt.Errorf("failed to parse generated priority balancer config, this should never happen because the config is generated: %v", err) + b.logger.Warningf("Failed to parse child policy config. This should never happen because the config was generated: %v", err) + return } - b.logger.Infof("build balancer config: %v", pretty.ToJSON(childCfg)) - return b.child.UpdateClientConnState(balancer.ClientConnState{ + b.logger.Infof("Built child policy config: %v", pretty.ToJSON(childCfg)) + + if err := b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addrs, ServiceConfig: b.configRaw, Attributes: b.attrsWithClient, }, BalancerConfig: childCfg, - }) + }); err != nil { + b.logger.Warningf("Failed to push config to child policy: %v", err) + } } -// handleErrorFromUpdate handles both the error from parent ClientConn (from CDS -// balancer) and the error from xds client (from the watcher). fromParent is -// true if error is from parent ClientConn. -// -// If the error is connection error, it should be handled for fallback purposes. -// -// If the error is resource-not-found: -// - If it's from CDS balancer (shows as a resolver error), it means LDS or CDS -// resources were removed. The EDS watch should be canceled. -// - If it's from xds client, it means EDS resource were removed. The EDS -// watcher should keep watching. -// In both cases, the sub-balancers will be receive the error. +// handleErrorFromUpdate handles errors from the parent LB policy and endpoint +// resolvers. fromParent is true if error is from the parent LB policy. In both +// cases, the error is propagated to the child policy, if one exists. func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) { b.logger.Warningf("Received error: %v", err) + + // A resource-not-found error from the parent LB policy means that the LDS + // or CDS resource was removed. This should result in endpoint resolvers + // being stopped here. + // + // A resource-not-found error from the EDS endpoint resolver means that the + // EDS resource was removed. No action needs to be taken for this, and we + // should continue watching the same EDS resource. if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { - // This is an error from the parent ClientConn (can be the parent CDS - // balancer), and is a resource-not-found error. This means the resource - // (can be either LDS or CDS) was removed. Stop the EDS watch. b.resourceWatcher.stop() } + if b.child != nil { b.child.ResolverError(err) - } else { - // If eds balancer was never created, fail the RPCs with errors. - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(err), - }) + return } - + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) } -// run is a long-running goroutine which handles all updates from gRPC and -// xdsClient. All methods which are invoked directly by gRPC or xdsClient simply -// push an update onto a channel which is read and acted upon right here. +// run is a long-running goroutine that handles updates from gRPC and endpoint +// resolvers. The methods handling the individual updates simply push them onto +// a channel which is read and acted upon from here. func (b *clusterResolverBalancer) run() { for { select { @@ -283,7 +274,7 @@ func (b *clusterResolverBalancer) run() { // SubConn updates are simply handed over to the underlying // child balancer. if b.child == nil { - b.logger.Errorf("xds: received scUpdate {%+v} with no child balancer", update) + b.logger.Errorf("Received a SubConn update {%+v} with no child policy", update) break } b.child.UpdateSubConnState(update.subConn, update.state) @@ -301,9 +292,9 @@ func (b *clusterResolverBalancer) run() { } } case u := <-b.resourceWatcher.updateChannel: - b.handleWatchUpdate(u) + b.handleResourceUpdate(u) - // Close results in cancellation of the EDS watch and closing of the + // Close results in stopping the endpoint resolvers and closing the // underlying child policy and is the only way to exit this goroutine. case <-b.closed.Done(): b.resourceWatcher.stop() @@ -322,12 +313,9 @@ func (b *clusterResolverBalancer) run() { // Following are methods to implement the balancer interface. -// UpdateClientConnState receives the serviceConfig (which contains the -// clusterName to watch for in CDS) and the xdsClient object from the -// xdsResolver. func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if b.closed.HasFired() { - b.logger.Warningf("xds: received ClientConnState {%+v} after clusterResolverBalancer was closed", state) + b.logger.Warningf("Received update from gRPC {%+v} after close", state) return errBalancerClosed } @@ -347,7 +335,7 @@ func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientCon // ResolverError handles errors reported by the xdsResolver. func (b *clusterResolverBalancer) ResolverError(err error) { if b.closed.HasFired() { - b.logger.Warningf("xds: received resolver error {%v} after clusterResolverBalancer was closed", err) + b.logger.Warningf("Received resolver error {%v} after close", err) return } b.updateCh.Put(&ccUpdate{err: err}) @@ -356,7 +344,7 @@ func (b *clusterResolverBalancer) ResolverError(err error) { // UpdateSubConnState handles subConn updates from gRPC. func (b *clusterResolverBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { if b.closed.HasFired() { - b.logger.Warningf("xds: received subConn update {%v, %v} after clusterResolverBalancer was closed", sc, state) + b.logger.Warningf("Received subConn update {%v, %v} after close", sc, state) return } b.updateCh.Put(&scUpdate{subConn: sc, state: state}) diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index 9c2fc6e7c797..e47aaf1ceba1 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -21,7 +21,6 @@ package clusterresolver import ( "sync" - "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -32,9 +31,33 @@ type resourceUpdate struct { err error } -type discoveryMechanism interface { +// topLevelResolver is used by concrete endpointsResolver implementations for +// reporting updates and errors. The `resourceResolver` type implements this +// interface and takes appropriate actions upon receipt of updates and errors +// from underlying concrete resolvers. +type topLevelResolver interface { + onUpdate() + onError(error) +} + +// endpointsResolver wraps the functionality to resolve a given resource name to +// a set of endpoints. The mechanism used by concrete implementations depend on +// the supported discovery mechanism type. +type endpointsResolver interface { + // lastUpdate returns endpoint results from the most recent resolution. + // + // The type of the first return result is dependent on the resolver + // implementation. + // + // The second return result indicates whether the resolver was able to + // successfully resolve the resource name to endpoints. If set to false, the + // first return result is invalid and must not be used. lastUpdate() (interface{}, bool) + + // resolverNow triggers re-resolution of the resource. resolveNow() + + // stop stops resolution of the resource. stop() } @@ -47,14 +70,13 @@ type discoveryMechanismKey struct { name string } -// resolverMechanismTuple is needed to keep the resolver and the discovery -// mechanism together, because resolvers can be shared. And we need the -// mechanism for fields like circuit breaking, LRS etc when generating the +// discoveryMechanismAndResolver is needed to keep the resolver and the +// discovery mechanism together, because resolvers can be shared. And we need +// the mechanism for fields like circuit breaking, LRS etc when generating the // balancer config. -type resolverMechanismTuple struct { - dm DiscoveryMechanism - dmKey discoveryMechanismKey - r discoveryMechanism +type discoveryMechanismAndResolver struct { + dm DiscoveryMechanism + r endpointsResolver childNameGen *nameGenerator } @@ -66,14 +88,14 @@ type resourceResolver struct { // mu protects the slice and map, and content of the resolvers in the slice. mu sync.Mutex mechanisms []DiscoveryMechanism - children []resolverMechanismTuple + children []discoveryMechanismAndResolver // childrenMap's value only needs the resolver implementation (type // discoveryMechanism) and the childNameGen. The other two fields are not // used. // // TODO(cleanup): maybe we can make a new type with just the necessary // fields, and use it here instead. - childrenMap map[discoveryMechanismKey]resolverMechanismTuple + childrenMap map[discoveryMechanismKey]discoveryMechanismAndResolver // Each new discovery mechanism needs a child name generator to reuse child // policy names. But to make sure the names across discover mechanism // doesn't conflict, we need a seq ID. This ID is incremented for each new @@ -85,7 +107,7 @@ func newResourceResolver(parent *clusterResolverBalancer) *resourceResolver { return &resourceResolver{ parent: parent, updateChannel: make(chan *resourceUpdate, 1), - childrenMap: make(map[discoveryMechanismKey]resolverMechanismTuple), + childrenMap: make(map[discoveryMechanismKey]discoveryMechanismAndResolver), } } @@ -102,6 +124,21 @@ func equalDiscoveryMechanisms(a, b []DiscoveryMechanism) bool { return true } +func discoveryMechanismToKey(dm DiscoveryMechanism) discoveryMechanismKey { + switch dm.Type { + case DiscoveryMechanismTypeEDS: + nameToWatch := dm.EDSServiceName + if nameToWatch == "" { + nameToWatch = dm.Cluster + } + return discoveryMechanismKey{typ: dm.Type, name: nameToWatch} + case DiscoveryMechanismTypeLogicalDNS: + return discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} + default: + return discoveryMechanismKey{} + } +} + func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { rr.mu.Lock() defer rr.mu.Unlock() @@ -109,65 +146,45 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { return } rr.mechanisms = mechanisms - rr.children = make([]resolverMechanismTuple, len(mechanisms)) + rr.children = make([]discoveryMechanismAndResolver, len(mechanisms)) newDMs := make(map[discoveryMechanismKey]bool) // Start one watch for each new discover mechanism {type+resource_name}. for i, dm := range mechanisms { + dmKey := discoveryMechanismToKey(dm) + newDMs[dmKey] = true + dmAndResolver, ok := rr.childrenMap[dmKey] + if ok { + // If this is not new, keep the fields (especially childNameGen), + // and only update the DiscoveryMechanism. + // + // Note that the same dmKey doesn't mean the same + // DiscoveryMechanism. There are fields (e.g. + // MaxConcurrentRequests) in DiscoveryMechanism that are not copied + // to dmKey, we need to keep those updated. + dmAndResolver.dm = dm + rr.children[i] = dmAndResolver + continue + } + + // Create resolver for a newly seen resource. + var resolver endpointsResolver switch dm.Type { case DiscoveryMechanismTypeEDS: - // If EDSServiceName is not set, use the cluster name as EDS service - // name to watch. - nameToWatch := dm.EDSServiceName - if nameToWatch == "" { - nameToWatch = dm.Cluster - } - dmKey := discoveryMechanismKey{typ: dm.Type, name: nameToWatch} - newDMs[dmKey] = true - - r, ok := rr.childrenMap[dmKey] - if !ok { - r = resolverMechanismTuple{ - dm: dm, - dmKey: dmKey, - r: newEDSResolver(nameToWatch, rr.parent.xdsClient, rr), - childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), - } - rr.childrenMap[dmKey] = r - rr.childNameGeneratorSeqID++ - } else { - // If this is not new, keep the fields (especially - // childNameGen), and only update the DiscoveryMechanism. - // - // Note that the same dmKey doesn't mean the same - // DiscoveryMechanism. There are fields (e.g. - // MaxConcurrentRequests) in DiscoveryMechanism that are not - // copied to dmKey, we need to keep those updated. - r.dm = dm - } - rr.children[i] = r + resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr) case DiscoveryMechanismTypeLogicalDNS: - // Name to resolve in DNS is the hostname, not the ClientConn - // target. - dmKey := discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} - newDMs[dmKey] = true - - r, ok := rr.childrenMap[dmKey] - if !ok { - r = resolverMechanismTuple{ - dm: dm, - dmKey: dmKey, - r: newDNSResolver(dm.DNSHostname, rr), - childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), - } - rr.childrenMap[dmKey] = r - rr.childNameGeneratorSeqID++ - } else { - r.dm = dm - } - rr.children[i] = r + resolver = newDNSResolver(dmKey.name, rr) + } + dmAndResolver = discoveryMechanismAndResolver{ + dm: dm, + r: resolver, + childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), } + rr.childrenMap[dmKey] = dmAndResolver + rr.children[i] = dmAndResolver + rr.childNameGeneratorSeqID++ } + // Stop the resources that were removed. for dm, r := range rr.childrenMap { if !newDMs[dm] { @@ -177,7 +194,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { } // Regenerate even if there's no change in discovery mechanism, in case // priority order changed. - rr.generate() + rr.generateLocked() } // resolveNow is typically called to trigger re-resolve of DNS. The EDS @@ -199,7 +216,7 @@ func (rr *resourceResolver) stop() { // be removed entirely, but a future use case might want to reuse the // policy instead. cm := rr.childrenMap - rr.childrenMap = make(map[discoveryMechanismKey]resolverMechanismTuple) + rr.childrenMap = make(map[discoveryMechanismKey]discoveryMechanismAndResolver) rr.mechanisms = nil rr.children = nil rr.mu.Unlock() @@ -209,13 +226,12 @@ func (rr *resourceResolver) stop() { } } -// generate collects all the updates from all the resolvers, and push the -// combined result into the update channel. It only pushes the update when all -// the child resolvers have received at least one update, otherwise it will -// wait. +// generateLocked collects updates from all resolvers. It pushes the combined +// result on the update channel if all child resolvers have received at least +// one update. Otherwise it returns early. // // caller must hold rr.mu. -func (rr *resourceResolver) generate() { +func (rr *resourceResolver) generateLocked() { var ret []priorityConfig for _, rDM := range rr.children { u, ok := rDM.r.lastUpdate() @@ -238,49 +254,16 @@ func (rr *resourceResolver) generate() { rr.updateChannel <- &resourceUpdate{priorities: ret} } -type edsDiscoveryMechanism struct { - cancel func() - - update xdsresource.EndpointsUpdate - updateReceived bool -} - -func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { - if !er.updateReceived { - return nil, false - } - return er.update, true -} - -func (er *edsDiscoveryMechanism) resolveNow() { -} - -func (er *edsDiscoveryMechanism) stop() { - er.cancel() +func (rr *resourceResolver) onUpdate() { + rr.mu.Lock() + rr.generateLocked() + rr.mu.Unlock() } -// newEDSResolver starts the EDS watch on the given xds client. -func newEDSResolver(nameToWatch string, xdsc xdsclient.XDSClient, topLevelResolver *resourceResolver) *edsDiscoveryMechanism { - ret := &edsDiscoveryMechanism{} - topLevelResolver.parent.logger.Infof("EDS watch started on %v", nameToWatch) - cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsresource.EndpointsUpdate, err error) { - topLevelResolver.mu.Lock() - defer topLevelResolver.mu.Unlock() - if err != nil { - select { - case <-topLevelResolver.updateChannel: - default: - } - topLevelResolver.updateChannel <- &resourceUpdate{err: err} - return - } - ret.update = update - ret.updateReceived = true - topLevelResolver.generate() - }) - ret.cancel = func() { - topLevelResolver.parent.logger.Infof("EDS watch canceled on %v", nameToWatch) - cancel() +func (rr *resourceResolver) onError(err error) { + select { + case <-rr.updateChannel: + default: } - return ret + rr.updateChannel <- &resourceUpdate{err: err} } diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index 703b00811dfa..4ce70e0fe6bb 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -20,8 +20,9 @@ package clusterresolver import ( "fmt" + "net/url" + "sync" - "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -39,31 +40,54 @@ var ( // It implements resolver.ClientConn interface to work with the DNS resolver. type dnsDiscoveryMechanism struct { target string - topLevelResolver *resourceResolver - r resolver.Resolver + topLevelResolver topLevelResolver + dnsR resolver.Resolver + mu sync.Mutex addrs []string updateReceived bool } -func newDNSResolver(target string, topLevelResolver *resourceResolver) *dnsDiscoveryMechanism { +// newDNSResolver creates an endpoints resolver which uses a DNS resolver under +// the hood. +// +// An error in parsing the provided target string or an error in creating a DNS +// resolver means that we will never be able to resolve the provided target +// strings to endpoints. The topLevelResolver propagates address updates to the +// clusterresolver LB policy **only** after it receives updates from all its +// child resolvers. Therefore, an error here means that the topLevelResolver +// will never send address updates to the clusterresolver LB policy. +// +// Calling the onError() callback will ensure that this error is +// propagated to the child policy which eventually move the channel to +// transient failure. +// +// The `dnsR` field is unset if we run into erros in this function. Therefore, a +// nil check is required wherever we access that field. +func newDNSResolver(target string, topLevelResolver topLevelResolver) *dnsDiscoveryMechanism { ret := &dnsDiscoveryMechanism{ target: target, topLevelResolver: topLevelResolver, } - r, err := newDNS(resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + target)}, ret, resolver.BuildOptions{}) + u, err := url.Parse("dns:///" + target) + if err != nil { + topLevelResolver.onError(fmt.Errorf("failed to parse dns hostname %q in clusterresolver LB policy", target)) + return ret + } + + r, err := newDNS(resolver.Target{Scheme: "dns", URL: *u}, ret, resolver.BuildOptions{}) if err != nil { - select { - case <-topLevelResolver.updateChannel: - default: - } - topLevelResolver.updateChannel <- &resourceUpdate{err: err} + topLevelResolver.onError(fmt.Errorf("failed to build DNS resolver for target %q: %v", target, err)) + return ret } - ret.r = r + ret.dnsR = r return ret } func (dr *dnsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + dr.mu.Lock() + defer dr.mu.Unlock() + if !dr.updateReceived { return nil, false } @@ -71,35 +95,36 @@ func (dr *dnsDiscoveryMechanism) lastUpdate() (interface{}, bool) { } func (dr *dnsDiscoveryMechanism) resolveNow() { - dr.r.ResolveNow(resolver.ResolveNowOptions{}) + if dr.dnsR != nil { + dr.dnsR.ResolveNow(resolver.ResolveNowOptions{}) + } } func (dr *dnsDiscoveryMechanism) stop() { - dr.r.Close() + if dr.dnsR != nil { + dr.dnsR.Close() + } } // dnsDiscoveryMechanism needs to implement resolver.ClientConn interface to receive // updates from the real DNS resolver. func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { - dr.topLevelResolver.mu.Lock() - defer dr.topLevelResolver.mu.Unlock() + dr.mu.Lock() addrs := make([]string, len(state.Addresses)) for i, a := range state.Addresses { addrs[i] = a.Addr } dr.addrs = addrs dr.updateReceived = true - dr.topLevelResolver.generate() + dr.mu.Unlock() + + dr.topLevelResolver.onUpdate() return nil } func (dr *dnsDiscoveryMechanism) ReportError(err error) { - select { - case <-dr.topLevelResolver.updateChannel: - default: - } - dr.topLevelResolver.updateChannel <- &resourceUpdate{err: err} + dr.topLevelResolver.onError(err) } func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go new file mode 100644 index 000000000000..62d932b85d5a --- /dev/null +++ b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +type edsResourceWatcher interface { + WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() +} + +type edsDiscoveryMechanism struct { + cancel func() + topLevelResolver topLevelResolver + + mu sync.Mutex + update xdsresource.EndpointsUpdate + updateReceived bool +} + +func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + er.mu.Lock() + defer er.mu.Unlock() + + if !er.updateReceived { + return nil, false + } + return er.update, true +} + +func (er *edsDiscoveryMechanism) resolveNow() { +} + +func (er *edsDiscoveryMechanism) stop() { + er.cancel() +} + +func (er *edsDiscoveryMechanism) handleEndpointsUpdate(update xdsresource.EndpointsUpdate, err error) { + if err != nil { + er.topLevelResolver.onError(err) + return + } + + er.mu.Lock() + er.update = update + er.updateReceived = true + er.mu.Unlock() + + er.topLevelResolver.onUpdate() +} + +// newEDSResolver returns an implementation of the endpointsResolver interface +// that uses EDS to resolve the given name to endpoints. +func newEDSResolver(nameToWatch string, watcher edsResourceWatcher, topLevelResolver topLevelResolver) *edsDiscoveryMechanism { + ret := &edsDiscoveryMechanism{topLevelResolver: topLevelResolver} + ret.cancel = watcher.WatchEndpoints(nameToWatch, ret.handleEndpointsUpdate) + return ret +} From 20141c25965dcb81f94ff08f6e21e0f04858ae6e Mon Sep 17 00:00:00 2001 From: Borja Lazaro Toralles Date: Thu, 2 Mar 2023 17:17:20 +0000 Subject: [PATCH 801/998] examples: add an example to illustrate authorization (authz) support (#5920) --- examples/examples_test.sh | 3 + examples/features/authz/README.md | 40 +++++ examples/features/authz/client/main.go | 132 +++++++++++++++ examples/features/authz/server/main.go | 215 +++++++++++++++++++++++++ examples/features/authz/token/token.go | 53 ++++++ 5 files changed, 443 insertions(+) create mode 100644 examples/features/authz/README.md create mode 100644 examples/features/authz/client/main.go create mode 100644 examples/features/authz/server/main.go create mode 100644 examples/features/authz/token/token.go diff --git a/examples/examples_test.sh b/examples/examples_test.sh index e9985d678096..812a46556bf0 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -52,6 +52,7 @@ EXAMPLES=( "helloworld" "route_guide" "features/authentication" + "features/authz" "features/cancellation" "features/compression" "features/deadline" @@ -101,6 +102,7 @@ declare -A EXPECTED_SERVER_OUTPUT=( ["helloworld"]="Received: world" ["route_guide"]="" ["features/authentication"]="server starting on port 50051..." + ["features/authz"]="unary echoing message \"hello world\"" ["features/cancellation"]="server: error receiving from stream: rpc error: code = Canceled desc = context canceled" ["features/compression"]="UnaryEcho called with message \"compress\"" ["features/deadline"]="" @@ -120,6 +122,7 @@ declare -A EXPECTED_CLIENT_OUTPUT=( ["helloworld"]="Greeting: Hello world" ["route_guide"]="Feature: name: \"\", point:(416851321, -742674555)" ["features/authentication"]="UnaryEcho: hello world" + ["features/authz"]="UnaryEcho: hello world" ["features/cancellation"]="cancelling context" ["features/compression"]="UnaryEcho call returned \"compress\", " ["features/deadline"]="wanted = DeadlineExceeded, got = DeadlineExceeded" diff --git a/examples/features/authz/README.md b/examples/features/authz/README.md new file mode 100644 index 000000000000..498beb367f1e --- /dev/null +++ b/examples/features/authz/README.md @@ -0,0 +1,40 @@ +# RBAC authorization + +This example uses the `StaticInterceptor` from the `google.golang.org/grpc/authz` +package. It uses a header based RBAC policy to match each gRPC method to a +required role. For simplicity, the context is injected with mock metadata which +includes the required roles, but this should be fetched from an appropriate +service based on the authenticated context. + +## Try it + +Server requires the following roles on an authenticated user to authorize usage +of these methods: + +- `UnaryEcho` requires the role `UNARY_ECHO:W` +- `BidirectionalStreamingEcho` requires the role `STREAM_ECHO:RW` + +Upon receiving a request, the server first checks that a token was supplied, +decodes it and checks that a secret is correctly set (hardcoded to `super-secret` +for simplicity, this should use a proper ID provider in production). + +If the above is successful, it uses the username in the token to set appropriate +roles (hardcoded to the 2 required roles above if the username matches `super-user` +for simplicity, these roles should be supplied externally as well). + +Start the server with: + +``` +go run server/main.go +``` + +The client implementation shows how using a valid token (setting username and +secret) with each of the endpoints will return successfully. It also exemplifies +how using a bad token will result in `codes.PermissionDenied` being returned +from the service. + +Start the client with: + +``` +go run client/main.go +``` diff --git a/examples/features/authz/client/main.go b/examples/features/authz/client/main.go new file mode 100644 index 000000000000..85d085d24580 --- /dev/null +++ b/examples/features/authz/client/main.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client. +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "time" + + "golang.org/x/oauth2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/examples/data" + "google.golang.org/grpc/examples/features/authz/token" + ecpb "google.golang.org/grpc/examples/features/proto/echo" + "google.golang.org/grpc/status" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") + +func callUnaryEcho(ctx context.Context, client ecpb.EchoClient, message string, opts ...grpc.CallOption) error { + resp, err := client.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}, opts...) + if err != nil { + return status.Errorf(status.Code(err), "UnaryEcho RPC failed: %v", err) + } + fmt.Println("UnaryEcho: ", resp.Message) + return nil +} + +func callBidiStreamingEcho(ctx context.Context, client ecpb.EchoClient, opts ...grpc.CallOption) error { + c, err := client.BidirectionalStreamingEcho(ctx, opts...) + if err != nil { + return status.Errorf(status.Code(err), "BidirectionalStreamingEcho RPC failed: %v", err) + } + for i := 0; i < 5; i++ { + if err := c.Send(&ecpb.EchoRequest{Message: fmt.Sprintf("Request %d", i+1)}); err != nil { + return status.Errorf(status.Code(err), "sending StreamingEcho message: %v", err) + } + } + c.CloseSend() + for { + resp, err := c.Recv() + if err == io.EOF { + break + } + if err != nil { + return status.Errorf(status.Code(err), "receiving StreamingEcho message: %v", err) + } + fmt.Println("BidiStreaming Echo: ", resp.Message) + } + return nil +} + +func newCredentialsCallOption(t token.Token) grpc.CallOption { + tokenBase64, err := t.Encode() + if err != nil { + log.Fatalf("encoding token: %v", err) + } + oath2Token := oauth2.Token{AccessToken: tokenBase64} + return grpc.PerRPCCredentials(oauth.TokenSource{TokenSource: oauth2.StaticTokenSource(&oath2Token)}) +} + +func main() { + flag.Parse() + + // Create tls based credential. + creds, err := credentials.NewClientTLSFromFile(data.Path("x509/ca_cert.pem"), "x.test.example.com") + if err != nil { + log.Fatalf("failed to load credentials: %v", err) + } + // Set up a connection to the server. + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds)) + if err != nil { + log.Fatalf("grpc.Dial(%q): %v", *addr, err) + } + defer conn.Close() + + // Make an echo client and send RPCs. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + client := ecpb.NewEchoClient(conn) + + // Make RPCs as an authorized user and expect them to succeed. + authorisedUserTokenCallOption := newCredentialsCallOption(token.Token{Username: "super-user", Secret: "super-secret"}) + if err := callUnaryEcho(ctx, client, "hello world", authorisedUserTokenCallOption); err != nil { + log.Fatalf("Unary RPC by authorized user failed: %v", err) + } + if err := callBidiStreamingEcho(ctx, client, authorisedUserTokenCallOption); err != nil { + log.Fatalf("Bidirectional RPC by authorized user failed: %v", err) + } + + // Make RPCs as an unauthorized user and expect them to fail with status code PermissionDenied. + unauthorisedUserTokenCallOption := newCredentialsCallOption(token.Token{Username: "bad-actor", Secret: "super-secret"}) + if err := callUnaryEcho(ctx, client, "hello world", unauthorisedUserTokenCallOption); err != nil { + switch c := status.Code(err); c { + case codes.PermissionDenied: + log.Printf("Unary RPC by unauthorized user failed as expected: %v", err) + default: + log.Fatalf("Unary RPC by unauthorized user failed unexpectedly: %v, %v", c, err) + } + } + if err := callBidiStreamingEcho(ctx, client, unauthorisedUserTokenCallOption); err != nil { + switch c := status.Code(err); c { + case codes.PermissionDenied: + log.Printf("Bidirectional RPC by unauthorized user failed as expected: %v", err) + default: + log.Fatalf("Bidirectional RPC by unauthorized user failed unexpectedly: %v", err) + } + } +} diff --git a/examples/features/authz/server/main.go b/examples/features/authz/server/main.go new file mode 100644 index 000000000000..e06ddbdf153e --- /dev/null +++ b/examples/features/authz/server/main.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server. +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/authz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/examples/data" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "google.golang.org/grpc/examples/features/authz/token" + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +const ( + unaryEchoWriterRole = "UNARY_ECHO:W" + streamEchoReadWriterRole = "STREAM_ECHO:RW" + authzPolicy = ` + { + "name": "authz", + "allow_rules": [ + { + "name": "allow_UnaryEcho", + "request": { + "paths": ["/grpc.examples.echo.Echo/UnaryEcho"], + "headers": [ + { + "key": "UNARY_ECHO:W", + "values": ["true"] + } + ] + } + }, + { + "name": "allow_BidirectionalStreamingEcho", + "request": { + "paths": ["/grpc.examples.echo.Echo/BidirectionalStreamingEcho"], + "headers": [ + { + "key": "STREAM_ECHO:RW", + "values": ["true"] + } + ] + } + } + ], + "deny_rules": [] + } + ` +) + +var ( + port = flag.Int("port", 50051, "the port to serve on") + + errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata") +) + +func newContextWithRoles(ctx context.Context, username string) context.Context { + md := metadata.MD{} + if username == "super-user" { + md.Set(unaryEchoWriterRole, "true") + md.Set(streamEchoReadWriterRole, "true") + } + return metadata.NewIncomingContext(ctx, md) +} + +type server struct { + pb.UnimplementedEchoServer +} + +func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { + fmt.Printf("unary echoing message %q\n", in.Message) + return &pb.EchoResponse{Message: in.Message}, nil +} + +func (s *server) BidirectionalStreamingEcho(stream pb.Echo_BidirectionalStreamingEchoServer) error { + for { + in, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + fmt.Printf("Receiving message from stream: %v\n", err) + return err + } + fmt.Printf("bidi echoing message %q\n", in.Message) + stream.Send(&pb.EchoResponse{Message: in.Message}) + } +} + +// isAuthenticated validates the authorization. +func isAuthenticated(authorization []string) (username string, err error) { + if len(authorization) < 1 { + return "", errors.New("received empty authorization token from client") + } + tokenBase64 := strings.TrimPrefix(authorization[0], "Bearer ") + // Perform the token validation here. For the sake of this example, the code + // here forgoes any of the usual OAuth2 token validation and instead checks + // for a token matching an arbitrary string. + var token token.Token + err = token.Decode(tokenBase64) + if err != nil { + return "", fmt.Errorf("base64 decoding of received token %q: %v", tokenBase64, err) + } + if token.Secret != "super-secret" { + return "", fmt.Errorf("received token %q does not match expected %q", token.Secret, "super-secret") + } + return token.Username, nil +} + +// authUnaryInterceptor looks up the authorization header from the incoming RPC context, +// retrieves the username from it and creates a new context with the username before invoking +// the provided handler. +func authUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errMissingMetadata + } + username, err := isAuthenticated(md["authorization"]) + if err != nil { + return nil, status.Error(codes.Unauthenticated, err.Error()) + } + return handler(newContextWithRoles(ctx, username), req) +} + +// wrappedStream wraps a grpc.ServerStream associated with an incoming RPC, and +// a custom context containing the username derived from the authorization header +// specified in the incoming RPC metadata +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedStream) Context() context.Context { + return w.ctx +} + +func newWrappedStream(ctx context.Context, s grpc.ServerStream) grpc.ServerStream { + return &wrappedStream{s, ctx} +} + +// authStreamInterceptor looks up the authorization header from the incoming RPC context, +// retrieves the username from it and creates a new context with the username before invoking +// the provided handler. +func authStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + md, ok := metadata.FromIncomingContext(ss.Context()) + if !ok { + return errMissingMetadata + } + username, err := isAuthenticated(md["authorization"]) + if err != nil { + return status.Error(codes.Unauthenticated, err.Error()) + } + return handler(srv, newWrappedStream(newContextWithRoles(ss.Context(), username), ss)) +} + +func main() { + flag.Parse() + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("Listening on local port %q: %v", *port, err) + } + + // Create tls based credential. + creds, err := credentials.NewServerTLSFromFile(data.Path("x509/server_cert.pem"), data.Path("x509/server_key.pem")) + if err != nil { + log.Fatalf("Loading credentials: %v", err) + } + + // Create an authorization interceptor using a static policy. + staticInteceptor, err := authz.NewStatic(authzPolicy) + if err != nil { + log.Fatalf("Creating a static authz interceptor: %v", err) + } + unaryInts := grpc.ChainUnaryInterceptor(authUnaryInterceptor, staticInteceptor.UnaryInterceptor) + streamInts := grpc.ChainStreamInterceptor(authStreamInterceptor, staticInteceptor.StreamInterceptor) + s := grpc.NewServer(grpc.Creds(creds), unaryInts, streamInts) + + // Register EchoServer on the server. + pb.RegisterEchoServer(s, &server{}) + + if err := s.Serve(lis); err != nil { + log.Fatalf("Serving Echo service on local port: %v", err) + } +} diff --git a/examples/features/authz/token/token.go b/examples/features/authz/token/token.go new file mode 100644 index 000000000000..8447c13992b6 --- /dev/null +++ b/examples/features/authz/token/token.go @@ -0,0 +1,53 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package token + +import ( + "encoding/base64" + "encoding/json" +) + +// Token is a mock authorization token sent by the client as part of the RPC headers, +// and used by the server for authorization against a predefined policy. +type Token struct { + // Secret is used by the server to authenticate the user + Secret string `json:"secret"` + // Username is used by the server to assign roles in the metadata for authorization + Username string `json:"username"` +} + +// Encode returns a base64 encoded version of the JSON representation of token. +func (t *Token) Encode() (string, error) { + barr, err := json.Marshal(t) + if err != nil { + return "", err + } + s := base64.StdEncoding.EncodeToString(barr) + return s, nil +} + +// Decode updates the internals of Token using the passed in base64 +// encoded version of the JSON representation of token. +func (t *Token) Decode(s string) error { + barr, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return err + } + return json.Unmarshal(barr, t) +} From a8b32264c62f4e9a7388afa832d46fdd48385737 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 2 Mar 2023 17:01:58 -0500 Subject: [PATCH 802/998] gcp/observability: Disable logging and traces on channels to cloud ops backends (#6022) --- gcp/observability/exporting.go | 21 ++++++++++- gcp/observability/go.mod | 2 +- gcp/observability/logging.go | 5 --- gcp/observability/logging_test.go | 59 ------------------------------- gcp/observability/opencensus.go | 2 ++ 5 files changed, 23 insertions(+), 66 deletions(-) diff --git a/gcp/observability/exporting.go b/gcp/observability/exporting.go index 73eac40b5ed0..862014640deb 100644 --- a/gcp/observability/exporting.go +++ b/gcp/observability/exporting.go @@ -22,9 +22,28 @@ import ( "context" "fmt" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats/opencensus" + gcplogging "cloud.google.com/go/logging" ) +// cOptsDisableLogTrace are client options for the go client libraries which are +// used to configure connections to GCP exporting backends. These disable global +// dial and server options set by this module, which configure logging, metrics, +// and tracing on all created grpc.ClientConn's and grpc.Server's. These options +// turn on only metrics, and also disable the client libraries behavior of +// plumbing in the older opencensus instrumentation code. +var cOptsDisableLogTrace = []option.ClientOption{ + option.WithTelemetryDisabled(), + option.WithGRPCDialOption(internal.DisableGlobalDialOptions.(func() grpc.DialOption)()), + option.WithGRPCDialOption(opencensus.DialOption(opencensus.TraceOptions{ + DisableTrace: true, + })), +} + // loggingExporter is the interface of logging exporter for gRPC Observability. // In future, we might expose this to allow users provide custom exporters. But // now, it exists for testing purposes. @@ -42,7 +61,7 @@ type cloudLoggingExporter struct { } func newCloudLoggingExporter(ctx context.Context, config *config) (loggingExporter, error) { - c, err := gcplogging.NewClient(ctx, fmt.Sprintf("projects/%v", config.ProjectID)) + c, err := gcplogging.NewClient(ctx, fmt.Sprintf("projects/%v", config.ProjectID), cOptsDisableLogTrace...) if err != nil { return nil, fmt.Errorf("failed to create cloudLoggingExporter: %v", err) } diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index ffd5bfa9f455..a52a353ec3c1 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -9,6 +9,7 @@ require ( github.com/google/uuid v1.3.0 go.opencensus.io v0.24.0 golang.org/x/oauth2 v0.4.0 + google.golang.org/api v0.103.0 google.golang.org/grpc v1.52.0 google.golang.org/grpc/stats/opencensus v0.0.0-20230214213552-081499f2e8a4 ) @@ -32,7 +33,6 @@ require ( golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.4.0 // indirect golang.org/x/text v0.6.0 // indirect - google.golang.org/api v0.103.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index bf46ffa6fe3d..6c6d8bf0aada 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -340,11 +340,6 @@ type binaryLogger struct { } func (bl *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { - // Prevent logging from logging, traces, and metrics API calls. - if strings.HasPrefix(methodName, "/google.logging.v2.LoggingServiceV2/") || strings.HasPrefix(methodName, "/google.monitoring.v3.MetricService/") || - strings.HasPrefix(methodName, "/google.devtools.cloudtrace.v2.TraceService/") { - return nil - } s, _, err := grpcutil.ParseMethod(methodName) if err != nil { logger.Infof("binarylogging: failed to parse %q: %v", methodName, err) diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index 0265c45ddc04..a02233387407 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -1062,65 +1062,6 @@ func (s) TestTranslateMetadata(t *testing.T) { } } -// TestCloudLoggingAPICallsFiltered tests that the observability plugin does not -// emit logs for cloud logging API calls. -func (s) TestCloudLoggingAPICallsFiltered(t *testing.T) { - fle := &fakeLoggingExporter{ - t: t, - } - - defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { - newLoggingExporter = ne - }(newLoggingExporter) - - newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { - return fle, nil - } - configLogAll := &config{ - ProjectID: "fake", - CloudLogging: &cloudLogging{ - ClientRPCEvents: []clientRPCEvents{ - { - Methods: []string{"*"}, - MaxMetadataBytes: 30, - MaxMessageBytes: 30, - }, - }, - }, - } - cleanup, err := setupObservabilitySystemWithConfig(configLogAll) - if err != nil { - t.Fatalf("error setting up observability %v", err) - } - defer cleanup() - - ss := &stubserver.StubServer{} - if err := ss.Start(nil); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Any of the three cloud logging API calls should not cause any logs to be - // emitted, even though the configuration specifies to log any rpc - // regardless of method. - req := &grpc_testing.SimpleRequest{} - resp := &grpc_testing.SimpleResponse{} - - ss.CC.Invoke(ctx, "/google.logging.v2.LoggingServiceV2/some-method", req, resp) - ss.CC.Invoke(ctx, "/google.monitoring.v3.MetricService/some-method", req, resp) - ss.CC.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/some-method", req, resp) - // The exporter should have received no new log entries due to these three - // calls, as they should be filtered out. - fle.mu.Lock() - defer fle.mu.Unlock() - if len(fle.entries) != 0 { - t.Fatalf("Unexpected length of entries %v, want 0", len(fle.entries)) - } -} - func (s) TestMarshalJSON(t *testing.T) { logEntry := &grpcLogEntry{ CallID: "300-300-300", diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index 782e8333cfc2..d80d2d9aa05e 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -74,6 +74,8 @@ func newStackdriverExporter(config *config) (tracingMetricsExporter, error) { MonitoredResource: mr, DefaultMonitoringLabels: labelsToMonitoringLabels(config.Labels), DefaultTraceAttributes: labelsToTraceAttributes(config.Labels), + MonitoringClientOptions: cOptsDisableLogTrace, + TraceClientOptions: cOptsDisableLogTrace, }) if err != nil { return nil, fmt.Errorf("failed to create Stackdriver exporter: %v", err) From d8f80bb0a326eca1c59051a422433b4f3ae5d85c Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 2 Mar 2023 17:13:50 -0500 Subject: [PATCH 803/998] stats/opencensus: Added client api latency and upgrade go.mod (#6042) --- gcp/observability/go.mod | 2 +- gcp/observability/go.sum | 4 ++-- gcp/observability/observability_test.go | 3 +++ gcp/observability/opencensus.go | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index a52a353ec3c1..e730895d3aa4 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -11,7 +11,7 @@ require ( golang.org/x/oauth2 v0.4.0 google.golang.org/api v0.103.0 google.golang.org/grpc v1.52.0 - google.golang.org/grpc/stats/opencensus v0.0.0-20230214213552-081499f2e8a4 + google.golang.org/grpc/stats/opencensus v0.0.0-20230221205128-8702a2ebf4b0 ) require ( diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index ea8a84e214a7..109fef979a6b 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -1056,8 +1056,8 @@ google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614G google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/stats/opencensus v0.0.0-20230214213552-081499f2e8a4 h1:JfKOhIhejpMhny1RYnvFO5QxXdVOEFSE12OSTgQvFus= -google.golang.org/grpc/stats/opencensus v0.0.0-20230214213552-081499f2e8a4/go.mod h1:l7+BYcyrDJFQo8nh4v8h5TJ6VfQ9QGBfFqVO7xoqQzI= +google.golang.org/grpc/stats/opencensus v0.0.0-20230221205128-8702a2ebf4b0 h1:v7h+HONu0plE0b3y9fBiOWlsqTdQQ5A9l9Ag2LXbEoE= +google.golang.org/grpc/stats/opencensus v0.0.0-20230221205128-8702a2ebf4b0/go.mod h1:l7+BYcyrDJFQo8nh4v8h5TJ6VfQ9QGBfFqVO7xoqQzI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index 07e78be56e6f..87e9668eefdf 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -375,6 +375,9 @@ func (s) TestOpenCensusIntegration(t *testing.T) { for ctx.Err() == nil { errs = nil fe.mu.RLock() + if value := fe.SeenViews["grpc.io/client/api_latency"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/api_latency: %s != %s", value, TypeOpenCensusViewDistribution)) + } if value := fe.SeenViews["grpc.io/client/started_rpcs"]; value != TypeOpenCensusViewCount { errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/started_rpcs: %s != %s", value, TypeOpenCensusViewCount)) } diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index d80d2d9aa05e..386b5ec55b4a 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -106,7 +106,7 @@ func startOpenCensus(config *config) error { } if config.CloudMonitoring != nil { - if err := view.Register(opencensus.ClientStartedRPCsView, opencensus.ClientCompletedRPCsView, opencensus.ClientRoundtripLatencyView); err != nil { + if err := view.Register(opencensus.ClientAPILatencyView, opencensus.ClientStartedRPCsView, opencensus.ClientCompletedRPCsView, opencensus.ClientRoundtripLatencyView); err != nil { return fmt.Errorf("failed to register default client views: %v", err) } if err := view.Register(opencensus.ServerStartedRPCsView, opencensus.ServerCompletedRPCsView, opencensus.ServerLatencyView); err != nil { From 52dcd1470d11b6576efb5b85551712a25daa9789 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 2 Mar 2023 14:15:02 -0800 Subject: [PATCH 804/998] xdsclient: move tests from `e2e_test` to `tests` directory (#6073) --- xds/internal/xdsclient/{e2e_test => tests}/authority_test.go | 2 +- xds/internal/xdsclient/{e2e_test => tests}/cds_watchers_test.go | 2 +- xds/internal/xdsclient/{e2e_test => tests}/dump_test.go | 2 +- xds/internal/xdsclient/{e2e_test => tests}/eds_watchers_test.go | 2 +- .../xdsclient/{e2e_test => tests}/federation_watchers_test.go | 2 +- xds/internal/xdsclient/{e2e_test => tests}/lds_watchers_test.go | 2 +- .../xdsclient/{e2e_test => tests}/misc_watchers_test.go | 2 +- xds/internal/xdsclient/{e2e_test => tests}/rds_watchers_test.go | 2 +- .../xdsclient/{e2e_test => tests}/resource_update_test.go | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) rename xds/internal/xdsclient/{e2e_test => tests}/authority_test.go (99%) rename xds/internal/xdsclient/{e2e_test => tests}/cds_watchers_test.go (99%) rename xds/internal/xdsclient/{e2e_test => tests}/dump_test.go (99%) rename xds/internal/xdsclient/{e2e_test => tests}/eds_watchers_test.go (99%) rename xds/internal/xdsclient/{e2e_test => tests}/federation_watchers_test.go (99%) rename xds/internal/xdsclient/{e2e_test => tests}/lds_watchers_test.go (99%) rename xds/internal/xdsclient/{e2e_test => tests}/misc_watchers_test.go (99%) rename xds/internal/xdsclient/{e2e_test => tests}/rds_watchers_test.go (99%) rename xds/internal/xdsclient/{e2e_test => tests}/resource_update_test.go (99%) diff --git a/xds/internal/xdsclient/e2e_test/authority_test.go b/xds/internal/xdsclient/tests/authority_test.go similarity index 99% rename from xds/internal/xdsclient/e2e_test/authority_test.go rename to xds/internal/xdsclient/tests/authority_test.go index f2cb06e300ab..b14d0d1cdfe7 100644 --- a/xds/internal/xdsclient/e2e_test/authority_test.go +++ b/xds/internal/xdsclient/tests/authority_test.go @@ -16,7 +16,7 @@ * */ -package e2e_test +package xdsclient_test import ( "context" diff --git a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go b/xds/internal/xdsclient/tests/cds_watchers_test.go similarity index 99% rename from xds/internal/xdsclient/e2e_test/cds_watchers_test.go rename to xds/internal/xdsclient/tests/cds_watchers_test.go index 88306b7be08d..9cbff4af83d7 100644 --- a/xds/internal/xdsclient/e2e_test/cds_watchers_test.go +++ b/xds/internal/xdsclient/tests/cds_watchers_test.go @@ -16,7 +16,7 @@ * */ -package e2e_test +package xdsclient_test import ( "context" diff --git a/xds/internal/xdsclient/e2e_test/dump_test.go b/xds/internal/xdsclient/tests/dump_test.go similarity index 99% rename from xds/internal/xdsclient/e2e_test/dump_test.go rename to xds/internal/xdsclient/tests/dump_test.go index f71b94c683e3..4fe4ffb0e864 100644 --- a/xds/internal/xdsclient/e2e_test/dump_test.go +++ b/xds/internal/xdsclient/tests/dump_test.go @@ -16,7 +16,7 @@ * */ -package e2e_test +package xdsclient_test import ( "context" diff --git a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go b/xds/internal/xdsclient/tests/eds_watchers_test.go similarity index 99% rename from xds/internal/xdsclient/e2e_test/eds_watchers_test.go rename to xds/internal/xdsclient/tests/eds_watchers_test.go index fac63379166e..1389862f4596 100644 --- a/xds/internal/xdsclient/e2e_test/eds_watchers_test.go +++ b/xds/internal/xdsclient/tests/eds_watchers_test.go @@ -16,7 +16,7 @@ * */ -package e2e_test +package xdsclient_test import ( "context" diff --git a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go b/xds/internal/xdsclient/tests/federation_watchers_test.go similarity index 99% rename from xds/internal/xdsclient/e2e_test/federation_watchers_test.go rename to xds/internal/xdsclient/tests/federation_watchers_test.go index 1218a4527a5a..974e6221aab9 100644 --- a/xds/internal/xdsclient/e2e_test/federation_watchers_test.go +++ b/xds/internal/xdsclient/tests/federation_watchers_test.go @@ -15,7 +15,7 @@ * limitations under the License. */ -package e2e_test +package xdsclient_test import ( "context" diff --git a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go b/xds/internal/xdsclient/tests/lds_watchers_test.go similarity index 99% rename from xds/internal/xdsclient/e2e_test/lds_watchers_test.go rename to xds/internal/xdsclient/tests/lds_watchers_test.go index 07eb066f5d00..67da0843ed66 100644 --- a/xds/internal/xdsclient/e2e_test/lds_watchers_test.go +++ b/xds/internal/xdsclient/tests/lds_watchers_test.go @@ -16,7 +16,7 @@ * */ -package e2e_test +package xdsclient_test import ( "context" diff --git a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go b/xds/internal/xdsclient/tests/misc_watchers_test.go similarity index 99% rename from xds/internal/xdsclient/e2e_test/misc_watchers_test.go rename to xds/internal/xdsclient/tests/misc_watchers_test.go index fd08e33bbaab..1b67f547e93c 100644 --- a/xds/internal/xdsclient/e2e_test/misc_watchers_test.go +++ b/xds/internal/xdsclient/tests/misc_watchers_test.go @@ -16,7 +16,7 @@ * */ -package e2e_test +package xdsclient_test import ( "context" diff --git a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go b/xds/internal/xdsclient/tests/rds_watchers_test.go similarity index 99% rename from xds/internal/xdsclient/e2e_test/rds_watchers_test.go rename to xds/internal/xdsclient/tests/rds_watchers_test.go index 93425a1f74e4..2617622c7b34 100644 --- a/xds/internal/xdsclient/e2e_test/rds_watchers_test.go +++ b/xds/internal/xdsclient/tests/rds_watchers_test.go @@ -16,7 +16,7 @@ * */ -package e2e_test +package xdsclient_test import ( "context" diff --git a/xds/internal/xdsclient/e2e_test/resource_update_test.go b/xds/internal/xdsclient/tests/resource_update_test.go similarity index 99% rename from xds/internal/xdsclient/e2e_test/resource_update_test.go rename to xds/internal/xdsclient/tests/resource_update_test.go index 415c21998bae..70496f54d4c3 100644 --- a/xds/internal/xdsclient/e2e_test/resource_update_test.go +++ b/xds/internal/xdsclient/tests/resource_update_test.go @@ -16,7 +16,7 @@ * */ -package e2e_test +package xdsclient_test import ( "context" From ae4a23150b4c2f6103843c9f7930140604501e71 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 2 Mar 2023 17:47:45 -0800 Subject: [PATCH 805/998] ringhash: ensure addresses are consistenly hashed across updates (#6066) --- xds/internal/balancer/ringhash/ring.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/xds/internal/balancer/ringhash/ring.go b/xds/internal/balancer/ringhash/ring.go index 3e35556d8a73..4d7fdb35e722 100644 --- a/xds/internal/balancer/ringhash/ring.go +++ b/xds/internal/balancer/ringhash/ring.go @@ -92,16 +92,19 @@ func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, log // // A hash is generated for each item, and later the results will be sorted // based on the hash. - var ( - idx int - targetIdx float64 - ) + var currentHashes, targetHashes float64 for _, scw := range normalizedWeights { - targetIdx += scale * scw.weight - for float64(idx) < targetIdx { - h := xxhash.Sum64String(scw.sc.addr + strconv.Itoa(idx)) - items = append(items, &ringEntry{idx: idx, hash: h, sc: scw.sc}) + targetHashes += scale * scw.weight + // This index ensures that ring entries corresponding to the same + // address hash to different values. And since this index is + // per-address, these entries hash to the same value across address + // updates. + idx := 0 + for currentHashes < targetHashes { + h := xxhash.Sum64String(scw.sc.addr + "_" + strconv.Itoa(idx)) + items = append(items, &ringEntry{hash: h, sc: scw.sc}) idx++ + currentHashes++ } } From b9e6d59a1a5f4674ea4b9a4c38af15f6bd186ae6 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 3 Mar 2023 13:07:40 -0800 Subject: [PATCH 806/998] xdsclient: send Node proto only on first discovery request on ADS stream (#6078) --- xds/internal/testutils/fakeserver/server.go | 18 +- xds/internal/testutils/resource_watcher.go | 64 +++++++ xds/internal/xdsclient/authority_test.go | 52 ++--- xds/internal/xdsclient/loadreport_test.go | 4 +- .../xdsclient/tests/misc_watchers_test.go | 180 ++++++++++++++++++ .../xdsclient/tests/resource_update_test.go | 2 +- xds/internal/xdsclient/transport/transport.go | 30 ++- .../transport/transport_resource_test.go | 2 +- 8 files changed, 298 insertions(+), 54 deletions(-) create mode 100644 xds/internal/testutils/resource_watcher.go diff --git a/xds/internal/testutils/fakeserver/server.go b/xds/internal/testutils/fakeserver/server.go index 8030f923428c..48c2e0b8ef4c 100644 --- a/xds/internal/testutils/fakeserver/server.go +++ b/xds/internal/testutils/fakeserver/server.go @@ -114,13 +114,17 @@ func (wl *wrappedListener) Accept() (net.Conn, error) { return c, err } -// StartServer makes a new Server and gets it to start listening on a local -// port for gRPC requests. The returned cancel function should be invoked by -// the caller upon completion of the test. -func StartServer() (*Server, func(), error) { - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, func() {}, fmt.Errorf("net.Listen() failed: %v", err) +// StartServer makes a new Server and gets it to start listening on the given +// net.Listener. If the given net.Listener is nil, a new one is created on a +// local port for gRPC requests. The returned cancel function should be invoked +// by the caller upon completion of the test. +func StartServer(lis net.Listener) (*Server, func(), error) { + if lis == nil { + var err error + lis, err = net.Listen("tcp", "localhost:0") + if err != nil { + return nil, func() {}, fmt.Errorf("net.Listen() failed: %v", err) + } } s := &Server{ diff --git a/xds/internal/testutils/resource_watcher.go b/xds/internal/testutils/resource_watcher.go new file mode 100644 index 000000000000..01235b0ba66f --- /dev/null +++ b/xds/internal/testutils/resource_watcher.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testutils + +import "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + +// TestResourceWatcher implements the xdsresource.ResourceWatcher interface, +// used to receive updates on watches registered with the xDS client, when using +// the resource-type agnostic WatchResource API. +// +// Tests can the channels provided by this tyep to get access to updates and +// errors sent by the xDS client. +type TestResourceWatcher struct { + // UpdateCh is the channel on which xDS client updates are delivered. + UpdateCh chan *xdsresource.ResourceData + // ErrorCh is the channel on which errors from the xDS client are delivered. + ErrorCh chan error +} + +// OnUpdate is invoked by the xDS client to report an update on the resource +// being watched. +func (w *TestResourceWatcher) OnUpdate(data xdsresource.ResourceData) { + select { + case w.UpdateCh <- &data: + default: + } +} + +// OnError is invoked by the xDS client to report errors. +func (w *TestResourceWatcher) OnError(err error) { + select { + case w.ErrorCh <- err: + default: + } +} + +// OnResourceDoesNotExist is used by the xDS client to report that the resource +// being watched no longer exists. +func (w *TestResourceWatcher) OnResourceDoesNotExist() {} + +// NewTestResourceWatcher returns a TestResourceWatcher to watch for resources +// via the xDS client. +func NewTestResourceWatcher() *TestResourceWatcher { + return &TestResourceWatcher{ + UpdateCh: make(chan *xdsresource.ResourceData), + ErrorCh: make(chan error), + } +} diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index fa2661fcfd8d..4e8ea305921b 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -15,6 +15,7 @@ * limitations under the License. * */ + package xdsclient import ( @@ -32,6 +33,7 @@ import ( "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds/internal" _ "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" @@ -39,34 +41,6 @@ import ( var emptyServerOpts = e2e.ManagementServerOptions{} -type testResourceWatcher struct { - updateCh chan *xdsresource.ResourceData - errorCh chan error -} - -func (w *testResourceWatcher) OnUpdate(data xdsresource.ResourceData) { - select { - case w.updateCh <- &data: - default: - } -} - -func (w *testResourceWatcher) OnError(err error) { - select { - case w.errorCh <- err: - default: - } -} - -func (w *testResourceWatcher) OnResourceDoesNotExist() {} - -func newTestResourceWatcher() *testResourceWatcher { - return &testResourceWatcher{ - updateCh: make(chan *xdsresource.ResourceData), - errorCh: make(chan error), - } -} - var ( // Listener resource type implementation retrieved from the resource type map // in the internal package, which is initialized when the individual resource @@ -131,7 +105,7 @@ func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) { defer a.close() rn := "xdsclient-test-lds-resource" - w := newTestResourceWatcher() + w := testutils.NewTestResourceWatcher() cancelResource := a.watchResource(listenerResourceType, rn, w) defer cancelResource() @@ -155,9 +129,9 @@ func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) { select { case <-ctx.Done(): t.Fatal("Test timed out before watcher received an update from server.") - case err := <-w.errorCh: + case err := <-w.ErrorCh: t.Fatalf("Watch got an unexpected error update: %q. Want valid updates.", err) - case <-w.updateCh: + case <-w.UpdateCh: // This means the OnUpdate callback was invoked and the watcher was notified. } if err := compareWatchState(a, rn, watchStateReceived); err != nil { @@ -176,7 +150,7 @@ func (s) TestTimerAndWatchStateOnErrorCallback(t *testing.T) { defer a.close() rn := "xdsclient-test-lds-resource" - w := newTestResourceWatcher() + w := testutils.NewTestResourceWatcher() cancelResource := a.watchResource(listenerResourceType, rn, w) defer cancelResource() @@ -188,7 +162,7 @@ func (s) TestTimerAndWatchStateOnErrorCallback(t *testing.T) { select { case <-ctx.Done(): t.Fatal("Test timed out before verifying error propagation.") - case err := <-w.errorCh: + case err := <-w.ErrorCh: if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { t.Fatal("Connection error not propagated to watchers.") } @@ -219,7 +193,7 @@ func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { defer a.close() nameA := "xdsclient-test-lds-resourceA" - watcherA := newTestResourceWatcher() + watcherA := testutils.NewTestResourceWatcher() cancelA := a.watchResource(listenerResourceType, nameA, watcherA) if err := updateResourceInServer(ctx, ms, nameA, nodeID); err != nil { @@ -231,13 +205,13 @@ func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { select { case <-ctx.Done(): t.Fatal("Test timed out before watcher received the update.") - case err := <-watcherA.errorCh: + case err := <-watcherA.ErrorCh: t.Fatalf("Watch got an unexpected error update: %q; want: valid update.", err) - case <-watcherA.updateCh: + case <-watcherA.UpdateCh: } nameB := "xdsclient-test-lds-resourceB" - watcherB := newTestResourceWatcher() + watcherB := testutils.NewTestResourceWatcher() cancelB := a.watchResource(listenerResourceType, nameB, watcherB) defer cancelB() @@ -249,9 +223,9 @@ func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { select { case <-ctx.Done(): t.Fatal("Test timed out before mgmt server got the request.") - case u := <-watcherB.updateCh: + case u := <-watcherB.UpdateCh: t.Fatalf("Watch got an unexpected resource update: %v.", u) - case gotErr := <-watcherB.errorCh: + case gotErr := <-watcherB.ErrorCh: wantErr := xdsresource.ErrorTypeConnection if xdsresource.ErrType(gotErr) != wantErr { t.Fatalf("Watch got an unexpected error:%q. Want: %q.", gotErr, wantErr) diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index 23fb2d4cf46c..92450ce747e0 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -43,7 +43,7 @@ const ( ) func (s) TestLRSClient(t *testing.T) { - fs, sCleanup, err := fakeserver.StartServer() + fs, sCleanup, err := fakeserver.StartServer(nil) if err != nil { t.Fatalf("failed to start fake xDS server: %v", err) } @@ -83,7 +83,7 @@ func (s) TestLRSClient(t *testing.T) { t.Errorf("unexpected NewConn: %v, %v, want channel recv timeout", u, err) } - fs2, sCleanup2, err := fakeserver.StartServer() + fs2, sCleanup2, err := fakeserver.StartServer(nil) if err != nil { t.Fatalf("failed to start fake xDS server: %v", err) } diff --git a/xds/internal/xdsclient/tests/misc_watchers_test.go b/xds/internal/xdsclient/tests/misc_watchers_test.go index 1b67f547e93c..0d09a921f08b 100644 --- a/xds/internal/xdsclient/tests/misc_watchers_test.go +++ b/xds/internal/xdsclient/tests/misc_watchers_test.go @@ -20,14 +20,31 @@ package xdsclient_test import ( "context" + "fmt" "testing" + "github.com/google/uuid" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +var ( + // Resource type implementations retrieved from the resource type map in the + // internal package, which is initialized when the individual resource types + // are created. + listenerResourceType = internal.ResourceTypeMapForTesting[version.V3ListenerURL].(xdsresource.Type) + routeConfigResourceType = internal.ResourceTypeMapForTesting[version.V3RouteConfigURL].(xdsresource.Type) ) // TestWatchCallAnotherWatch tests the scenario where a watch is registered for @@ -129,3 +146,166 @@ func (s) TestWatchCallAnotherWatch(t *testing.T) { t.Fatal(err) } } + +// TestNodeProtoSentOnlyInFirstRequest verifies that a non-empty node proto gets +// sent only on the first discovery request message on the ADS stream. +// +// It also verifies the same behavior holds after a stream restart. +func (s) TestNodeProtoSentOnlyInFirstRequest(t *testing.T) { + overrideFedEnvVar(t) + + // Create a restartable listener which can close existing connections. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis := testutils.NewRestartableListener(l) + + // Start a fake xDS management server with the above restartable listener. + // + // We are unable to use the go-control-plane server here, because it caches + // the node proto received in the first request message and adds it to + // subsequent requests before invoking the OnStreamRequest() callback. + // Therefore we cannot verify what is sent by the xDS client. + mgmtServer, cleanup, err := fakeserver.StartServer(lis) + if err != nil { + t.Fatalf("Failed to start fake xDS server: %v", err) + } + defer cleanup() + + // Create a bootstrap file in a temporary directory. + nodeID := uuid.New().String() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatalf("Failed to create bootstrap file: %v", err) + } + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Configure a listener resource on the fake xDS server. + const ( + serviceName = "my-service-client-side-xds" + routeConfigName = "route-" + serviceName + clusterName = "cluster-" + serviceName + ) + lisAny, err := anypb.New(e2e.DefaultClientListener(serviceName, routeConfigName)) + if err != nil { + t.Fatalf("Failed to marshal listener resource into an Any proto: %v", err) + } + mgmtServer.XDSResponseChan <- &fakeserver.Response{ + Resp: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{lisAny}, + }, + } + + // Register a watch for the Listener resource. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + watcher := xdstestutils.NewTestResourceWatcher() + client.WatchResource(listenerResourceType, serviceName, watcher) + + // The first request on the stream must contain a non-empty node proto. + if err := readDiscoveryResponseAndCheckForNonEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + + // The xDS client is expected to ACK the Listener resource. The discovery + // request corresponding to the ACK must contain a nil node proto. + if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + + // Configure the route configuration resource on the fake xDS server. + rcAny, err := anypb.New(e2e.DefaultRouteConfig(routeConfigName, serviceName, clusterName)) + if err != nil { + t.Fatalf("Failed to marshal route configuration resource into an Any proto: %v", err) + } + mgmtServer.XDSResponseChan <- &fakeserver.Response{ + Resp: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{rcAny}, + }, + } + + // Register a watch for a RouteConfiguration resource. Ensure that the + // discovery requests for the route configuration resource and the + // subsequent ACK contains an empty node proto. + client.WatchResource(routeConfigResourceType, routeConfigName, watcher) + if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + + // Stop the management server and expect the error callback to be invoked. + lis.Stop() + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for the connection error to be propagated to the watcher") + case <-watcher.ErrorCh: + } + + // Restart the management server. + lis.Restart() + + // The xDS client is expected to re-request previously requested resources. + // Hence, we expect two DiscoveryRequest messages (one for the Listener and + // one for the RouteConfiguration resource). The first message should + // contain a non-nil node proto and second one should contain a nil-proto. + // + // And since we don't push any responses on the response channel of the fake + // server, we do not expect any ACKs here. + if err := readDiscoveryResponseAndCheckForNonEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + + // The xDS client is expected to ACK the Listener resource. The discovery + // request corresponding to the ACK must contain a nil node proto. + if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } +} + +// readDiscoveryResponseAndCheckForEmptyNodeProto reads a discovery request +// message out of the provided reqCh. It returns an error if it fails to read a +// message before the context deadline expires, or if the read message contains +// a non-empty node proto. +func readDiscoveryResponseAndCheckForEmptyNodeProto(ctx context.Context, reqCh *testutils.Channel) error { + v, err := reqCh.Receive(ctx) + if err != nil { + return fmt.Errorf("Timeout when waiting for a DiscoveryRequest message") + } + req := v.(*fakeserver.Request).Req.(*v3discoverypb.DiscoveryRequest) + if node := req.GetNode(); node != nil { + return fmt.Errorf("Node proto received in DiscoveryRequest message is %v, want empty node proto", node) + } + return nil +} + +// readDiscoveryResponseAndCheckForNonEmptyNodeProto reads a discovery request +// message out of the provided reqCh. It returns an error if it fails to read a +// message before the context deadline expires, or if the read message contains +// an empty node proto. +func readDiscoveryResponseAndCheckForNonEmptyNodeProto(ctx context.Context, reqCh *testutils.Channel) error { + v, err := reqCh.Receive(ctx) + if err != nil { + return fmt.Errorf("Timeout when waiting for a DiscoveryRequest message") + } + req := v.(*fakeserver.Request).Req.(*v3discoverypb.DiscoveryRequest) + if node := req.GetNode(); node == nil { + return fmt.Errorf("Empty node proto received in DiscoveryRequest message, want non-empty node proto") + } + return nil +} diff --git a/xds/internal/xdsclient/tests/resource_update_test.go b/xds/internal/xdsclient/tests/resource_update_test.go index 70496f54d4c3..31ae8eda8d1a 100644 --- a/xds/internal/xdsclient/tests/resource_update_test.go +++ b/xds/internal/xdsclient/tests/resource_update_test.go @@ -57,7 +57,7 @@ import ( // cleanup function to close the fake server. func startFakeManagementServer(t *testing.T) (*fakeserver.Server, func()) { t.Helper() - fs, sCleanup, err := fakeserver.StartServer() + fs, sCleanup, err := fakeserver.StartServer(nil) if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index 6dea512abf94..0ef46b8548d0 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -277,14 +277,16 @@ type ResourceSendInfo struct { URL string } -func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { +func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, sendNodeProto bool, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { req := &v3discoverypb.DiscoveryRequest{ - Node: t.nodeProto, TypeUrl: resourceURL, ResourceNames: resourceNames, VersionInfo: version, ResponseNonce: nonce, } + if sendNodeProto { + req.Node = t.nodeProto + } if nackErr != nil { req.ErrorDetail = &statuspb.Status{ Code: int32(codes.InvalidArgument), Message: nackErr.Error(), @@ -372,16 +374,32 @@ func (t *Transport) adsRunner(ctx context.Context) { // there are new streams) and the appropriate request is sent out. func (t *Transport) send(ctx context.Context) { var stream adsStream + // The xDS protocol only requires that we send the node proto in the first + // discovery request on every stream. Sending the node proto in every + // request message wastes CPU resources on the client and the server. + sendNodeProto := true for { select { case <-ctx.Done(): return case stream = <-t.adsStreamCh: + // We have a new stream and we've to ensure that the node proto gets + // sent out in the first request on the stream. At this point, we + // might not have any registered watches. Setting this field to true + // here will ensure that the node proto gets sent out along with the + // discovery request when the first watch is registered. + if len(t.resources) == 0 { + sendNodeProto = true + continue + } + if !t.sendExisting(stream) { // Send failed, clear the current stream. Attempt to resend will // only be made after a new stream is created. stream = nil + continue } + sendNodeProto = false case u := <-t.adsRequestCh.Get(): t.adsRequestCh.Load() @@ -408,11 +426,12 @@ func (t *Transport) send(ctx context.Context) { // sending response back). continue } - if err := t.sendAggregatedDiscoveryServiceRequest(stream, resources, url, version, nonce, nackErr); err != nil { + if err := t.sendAggregatedDiscoveryServiceRequest(stream, sendNodeProto, resources, url, version, nonce, nackErr); err != nil { t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, version, nonce, err) // Send failed, clear the current stream. stream = nil } + sendNodeProto = false } } } @@ -440,11 +459,14 @@ func (t *Transport) sendExisting(stream adsStream) bool { // seen by the client on the previous stream t.nonces = make(map[string]string) + // Send node proto only in the first request on the stream. + sendNodeProto := true for url, resources := range t.resources { - if err := t.sendAggregatedDiscoveryServiceRequest(stream, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { + if err := t.sendAggregatedDiscoveryServiceRequest(stream, sendNodeProto, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, t.versions[url], "", err) return false } + sendNodeProto = false } return true diff --git a/xds/internal/xdsclient/transport/transport_resource_test.go b/xds/internal/xdsclient/transport/transport_resource_test.go index ed07e999fc7d..ceb5a7f67bf4 100644 --- a/xds/internal/xdsclient/transport/transport_resource_test.go +++ b/xds/internal/xdsclient/transport/transport_resource_test.go @@ -60,7 +60,7 @@ const ( // cleanup function to close the fake server. func startFakeManagementServer(t *testing.T) (*fakeserver.Server, func()) { t.Helper() - fs, sCleanup, err := fakeserver.StartServer() + fs, sCleanup, err := fakeserver.StartServer(nil) if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } From cc320bf820f54711da93f6d2b9d20596a8a72f0f Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 3 Mar 2023 17:20:54 -0500 Subject: [PATCH 807/998] grpc: Log server trailers before writing status (#6076) --- server.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/server.go b/server.go index fa7f8b580be6..0b92f44d8473 100644 --- a/server.go +++ b/server.go @@ -1443,7 +1443,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, statusOK) if len(binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), @@ -1453,7 +1452,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. binlog.Log(st) } } - return err + return t.WriteStatus(stream, statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1659,7 +1658,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } - t.WriteStatus(ss.s, appStatus) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), @@ -1669,6 +1667,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp binlog.Log(st) } } + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1677,7 +1676,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, statusOK) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), @@ -1687,7 +1685,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp binlog.Log(st) } } - return err + return t.WriteStatus(ss.s, statusOK) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { From f31168468f371461d89277c470e597c7c882b396 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 3 Mar 2023 17:21:40 -0500 Subject: [PATCH 808/998] stats/opencensus: New uncompressed metrics and align with tracing spec (#6051) --- rpc_util.go | 17 +-- server.go | 11 +- stats/opencensus/client_metrics.go | 34 +++-- stats/opencensus/e2e_test.go | 196 +++++++++++++++++++++++++---- stats/opencensus/server_metrics.go | 34 +++-- stats/opencensus/stats.go | 11 ++ stats/opencensus/trace.go | 4 +- stats/stats.go | 22 +++- stats/stats_test.go | 4 +- stream.go | 14 ++- 10 files changed, 283 insertions(+), 64 deletions(-) diff --git a/rpc_util.go b/rpc_util.go index 0c65336ef6b9..2030736a306b 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -694,12 +694,13 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, } } @@ -720,7 +721,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - wireLength int // The compressed length got from wire. + compressedLength int // The compressed length got from wire. uncompressedBytes []byte } @@ -730,7 +731,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, err } if payInfo != nil { - payInfo.wireLength = len(d) + payInfo.compressedLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { diff --git a/server.go b/server.go index 0b92f44d8473..8d573dc6075b 100644 --- a/server.go +++ b/server.go @@ -1320,11 +1320,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - WireLength: payInfo.wireLength + headerLen, - Data: d, - Length: len(d), + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, }) } if len(binlogs) != 0 { diff --git a/stats/opencensus/client_metrics.go b/stats/opencensus/client_metrics.go index 08bb9a8a4373..9286d95289a1 100644 --- a/stats/opencensus/client_metrics.go +++ b/stats/opencensus/client_metrics.go @@ -31,13 +31,15 @@ var ( // nature of how stats handlers are called on gRPC's client side, the per rpc // unit is actually per attempt throughout this definition file. var ( - clientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - clientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) - clientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - clientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) - clientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) - clientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "The total number of client RPCs ever opened, including those that have not completed.", stats.UnitDimensionless) - clientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) + clientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + clientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) + clientSentCompressedBytesPerRPC = stats.Int64("grpc.io/client/sent_compressed_message_bytes_per_rpc", "Total compressed bytes sent across all request messages per RPC.", stats.UnitBytes) + clientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + clientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) + clientReceivedCompressedBytesPerRPC = stats.Int64("grpc.io/client/received_compressed_message_bytes_per_rpc", "Total compressed bytes received across all response messages per RPC.", stats.UnitBytes) + clientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + clientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "The total number of client RPCs ever opened, including those that have not completed.", stats.UnitDimensionless) + clientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) // Per call measure: clientAPILatency = stats.Float64("grpc.io/client/api_latency", "The end-to-end time the gRPC library takes to complete an RPC from the application’s perspective", stats.UnitMilliseconds) ) @@ -70,6 +72,15 @@ var ( TagKeys: []tag.Key{keyClientMethod}, Aggregation: bytesDistribution, } + // ClientSentCompressedBytesPerRPCView is the distribution of compressed + // sent bytes per RPC, keyed on method. + ClientSentCompressedBytesPerRPCView = &view.View{ + Measure: clientSentCompressedBytesPerRPC, + Name: "grpc.io/client/sent_compressed_bytes_per_rpc", + Description: "Distribution of sent compressed bytes per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: bytesDistribution, + } // ClientReceivedBytesPerRPCView is the distribution of received bytes per // RPC, keyed on method. ClientReceivedBytesPerRPCView = &view.View{ @@ -79,6 +90,15 @@ var ( TagKeys: []tag.Key{keyClientMethod}, Aggregation: bytesDistribution, } + // ClientReceivedCompressedBytesPerRPCView is the distribution of compressed + // received bytes per RPC, keyed on method. + ClientReceivedCompressedBytesPerRPCView = &view.View{ + Measure: clientReceivedCompressedBytesPerRPC, + Name: "grpc.io/client/received_compressed_bytes_per_rpc", + Description: "Distribution of received compressed bytes per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: bytesDistribution, + } // ClientStartedRPCsView is the count of opened RPCs, keyed on method. ClientStartedRPCsView = &view.View{ Measure: clientStartedRPCs, diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 120996cf2f60..251a75cdac54 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -33,6 +33,7 @@ import ( "go.opencensus.io/trace" "google.golang.org/grpc" + "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/stubserver" @@ -235,9 +236,13 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { ClientCompletedRPCsView, ServerCompletedRPCsView, ClientSentBytesPerRPCView, + ClientSentCompressedBytesPerRPCView, ServerSentBytesPerRPCView, + ServerSentCompressedBytesPerRPCView, ClientReceivedBytesPerRPCView, + ClientReceivedCompressedBytesPerRPCView, ServerReceivedBytesPerRPCView, + ServerReceivedCompressedBytesPerRPCView, ClientSentMessagesPerRPCView, ServerSentMessagesPerRPCView, ClientReceivedMessagesPerRPCView, @@ -259,7 +264,9 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { ss := &stubserver.StubServer{ UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + return &grpc_testing.SimpleResponse{Payload: &grpc_testing.Payload{ + Body: make([]byte, 10000), + }}, nil }, FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { for { @@ -278,7 +285,9 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { defer cancel() // Make two RPC's, a unary RPC and a streaming RPC. These should cause // certain metrics to be emitted. - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{ + Body: make([]byte, 10000), + }}, grpc.UseCompressor(gzip.Name)); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } stream, err := ss.Client.FullDuplexCall(ctx) @@ -467,6 +476,43 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { tagKeys: []tag.Key{ cmtk, }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientSentCompressedBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of sent compressed bytes per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, rows: []*view.Row{ { Tags: []tag.Tag{ @@ -504,6 +550,43 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { tagKeys: []tag.Key{ smtk, }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerSentCompressedBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of sent compressed bytes per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, rows: []*view.Row{ { Tags: []tag.Tag{ @@ -532,7 +615,6 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { }, }, }, - { metric: ClientReceivedBytesPerRPCView, wantVI: &viewInformation{ @@ -542,6 +624,43 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { tagKeys: []tag.Key{ cmtk, }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientReceivedCompressedBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of received compressed bytes per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, rows: []*view.Row{ { Tags: []tag.Tag{ @@ -579,6 +698,43 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { tagKeys: []tag.Key{ smtk, }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerReceivedCompressedBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of received compressed bytes per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, rows: []*view.Row{ { Tags: []tag.Tag{ @@ -1186,12 +1342,11 @@ func (s) TestSpan(t *testing.T) { EventType: trace.MessageEventTypeRecv, MessageID: 1, // First msg recv so 1 (see comment above) UncompressedByteSize: 2, - CompressedByteSize: 7, + CompressedByteSize: 2, }, { - EventType: trace.MessageEventTypeSent, - MessageID: 1, // First msg send so 1 (see comment above) - CompressedByteSize: 5, + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 (see comment above) }, }, links: []trace.Link{ @@ -1215,12 +1370,11 @@ func (s) TestSpan(t *testing.T) { EventType: trace.MessageEventTypeSent, MessageID: 1, // First msg send so 1 (see comment above) UncompressedByteSize: 2, - CompressedByteSize: 7, + CompressedByteSize: 2, }, { - EventType: trace.MessageEventTypeRecv, - MessageID: 1, // First msg recv so 1 (see comment above) - CompressedByteSize: 5, + EventType: trace.MessageEventTypeRecv, + MessageID: 1, // First msg recv so 1 (see comment above) }, }, hasRemoteParent: false, @@ -1285,14 +1439,12 @@ func (s) TestSpan(t *testing.T) { }, messageEvents: []trace.MessageEvent{ { - EventType: trace.MessageEventTypeRecv, - MessageID: 1, // First msg recv so 1 - CompressedByteSize: 5, + EventType: trace.MessageEventTypeRecv, + MessageID: 1, // First msg recv so 1 }, { - EventType: trace.MessageEventTypeRecv, - MessageID: 2, // Second msg recv so 2 - CompressedByteSize: 5, + EventType: trace.MessageEventTypeRecv, + MessageID: 2, // Second msg recv so 2 }, }, hasRemoteParent: true, @@ -1314,14 +1466,12 @@ func (s) TestSpan(t *testing.T) { name: "Attempt.grpc.testing.TestService.FullDuplexCall", messageEvents: []trace.MessageEvent{ { - EventType: trace.MessageEventTypeSent, - MessageID: 1, // First msg send so 1 - CompressedByteSize: 5, + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 }, { - EventType: trace.MessageEventTypeSent, - MessageID: 2, // Second msg send so 2 - CompressedByteSize: 5, + EventType: trace.MessageEventTypeSent, + MessageID: 2, // Second msg send so 2 }, }, hasRemoteParent: false, diff --git a/stats/opencensus/server_metrics.go b/stats/opencensus/server_metrics.go index 9268271c3694..49a54232b7b6 100644 --- a/stats/opencensus/server_metrics.go +++ b/stats/opencensus/server_metrics.go @@ -31,12 +31,14 @@ var ( // server side, the per rpc unit is truly per rpc, as there is no concept of a // rpc attempt server side. var ( - serverReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) // the collection/measurement point of this measure handles the /rpc aspect of it - serverReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) - serverSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - serverSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) - serverStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "The total number of server RPCs ever opened, including those that have not completed.", stats.UnitDimensionless) - serverLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) + serverReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) // the collection/measurement point of this measure handles the /rpc aspect of it + serverReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) + serverReceivedCompressedBytesPerRPC = stats.Int64("grpc.io/server/received_compressed_bytes_per_rpc", "Total compressed bytes received across all messages per RPC.", stats.UnitBytes) + serverSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + serverSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + serverSentCompressedBytesPerRPC = stats.Int64("grpc.io/server/sent_compressed_bytes_per_rpc", "Total compressed bytes sent in across all response messages per RPC.", stats.UnitBytes) + serverStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "The total number of server RPCs ever opened, including those that have not completed.", stats.UnitDimensionless) + serverLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) ) var ( @@ -61,12 +63,21 @@ var ( // ServerSentBytesPerRPCView is the distribution of received bytes per RPC, // keyed on method. ServerSentBytesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_bytes_per_rpc", + Name: "grpc.io/server/sent_compressed_bytes_per_rpc", Description: "Distribution of sent bytes per RPC, by method.", Measure: serverSentBytesPerRPC, TagKeys: []tag.Key{keyServerMethod}, Aggregation: bytesDistribution, } + // ServerSentCompressedBytesPerRPCView is the distribution of received + // compressed bytes per RPC, keyed on method. + ServerSentCompressedBytesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_bytes_per_rpc", + Description: "Distribution of sent compressed bytes per RPC, by method.", + Measure: serverSentCompressedBytesPerRPC, + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: bytesDistribution, + } // ServerReceivedBytesPerRPCView is the distribution of sent bytes per RPC, // keyed on method. ServerReceivedBytesPerRPCView = &view.View{ @@ -76,6 +87,15 @@ var ( TagKeys: []tag.Key{keyServerMethod}, Aggregation: bytesDistribution, } + // ServerReceivedCompressedBytesPerRPCView is the distribution of sent bytes + // per RPC, keyed on method. + ServerReceivedCompressedBytesPerRPCView = &view.View{ + Name: "grpc.io/server/received_compressed_bytes_per_rpc", + Description: "Distribution of received compressed bytes per RPC, by method.", + Measure: serverReceivedCompressedBytesPerRPC, + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: bytesDistribution, + } // ServerStartedRPCsView is the count of opened RPCs, keyed on method. ServerStartedRPCsView = &view.View{ Measure: serverStartedRPCs, diff --git a/stats/opencensus/stats.go b/stats/opencensus/stats.go index 163b2bba161b..01cd0b9b157d 100644 --- a/stats/opencensus/stats.go +++ b/stats/opencensus/stats.go @@ -58,11 +58,16 @@ type metricsInfo struct { sentMsgs int64 // number of bytes sent (within each message) from side (client || server) sentBytes int64 + // number of bytes after compression (within each message) from side (client || server) + sentCompressedBytes int64 // number of messages received on side (client || server) recvMsgs int64 // number of bytes received (within each message) received on side (client // || server) recvBytes int64 + // number of compressed bytes received (within each message) received on + // side (client || server) + recvCompressedBytes int64 startTime time.Time method string @@ -156,6 +161,7 @@ func recordDataBegin(ctx context.Context, mi *metricsInfo, b *stats.Begin) { func recordDataOutPayload(mi *metricsInfo, op *stats.OutPayload) { atomic.AddInt64(&mi.sentMsgs, 1) atomic.AddInt64(&mi.sentBytes, int64(op.Length)) + atomic.AddInt64(&mi.sentCompressedBytes, int64(op.CompressedLength)) } // recordDataInPayload records the length in bytes of incoming messages and @@ -164,6 +170,7 @@ func recordDataOutPayload(mi *metricsInfo, op *stats.OutPayload) { func recordDataInPayload(mi *metricsInfo, ip *stats.InPayload) { atomic.AddInt64(&mi.recvMsgs, 1) atomic.AddInt64(&mi.recvBytes, int64(ip.Length)) + atomic.AddInt64(&mi.recvCompressedBytes, int64(ip.CompressedLength)) } // recordDataEnd takes per RPC measurements derived from information derived @@ -189,9 +196,11 @@ func recordDataEnd(ctx context.Context, mi *metricsInfo, e *stats.End) { tag.Upsert(keyClientStatus, st)), ocstats.WithMeasurements( clientSentBytesPerRPC.M(atomic.LoadInt64(&mi.sentBytes)), + clientSentCompressedBytesPerRPC.M(atomic.LoadInt64(&mi.sentCompressedBytes)), clientSentMessagesPerRPC.M(atomic.LoadInt64(&mi.sentMsgs)), clientReceivedMessagesPerRPC.M(atomic.LoadInt64(&mi.recvMsgs)), clientReceivedBytesPerRPC.M(atomic.LoadInt64(&mi.recvBytes)), + clientReceivedCompressedBytesPerRPC.M(atomic.LoadInt64(&mi.recvCompressedBytes)), clientRoundtripLatency.M(latency), clientServerLatency.M(latency), )) @@ -204,8 +213,10 @@ func recordDataEnd(ctx context.Context, mi *metricsInfo, e *stats.End) { ), ocstats.WithMeasurements( serverSentBytesPerRPC.M(atomic.LoadInt64(&mi.sentBytes)), + serverSentCompressedBytesPerRPC.M(atomic.LoadInt64(&mi.sentCompressedBytes)), serverSentMessagesPerRPC.M(atomic.LoadInt64(&mi.sentMsgs)), serverReceivedMessagesPerRPC.M(atomic.LoadInt64(&mi.recvMsgs)), serverReceivedBytesPerRPC.M(atomic.LoadInt64(&mi.recvBytes)), + serverReceivedCompressedBytesPerRPC.M(atomic.LoadInt64(&mi.recvCompressedBytes)), serverLatency.M(latency))) } diff --git a/stats/opencensus/trace.go b/stats/opencensus/trace.go index afe6729c3c86..2c8a93551fa7 100644 --- a/stats/opencensus/trace.go +++ b/stats/opencensus/trace.go @@ -100,10 +100,10 @@ func populateSpan(ctx context.Context, rs stats.RPCStats, ti *traceInfo) { // message id - "must be calculated as two different counters starting // from 1 one for sent messages and one for received messages." mi := atomic.AddUint32(&ti.countRecvMsg, 1) - span.AddMessageReceiveEvent(int64(mi), int64(rs.Length), int64(rs.WireLength)) + span.AddMessageReceiveEvent(int64(mi), int64(rs.Length), int64(rs.CompressedLength)) case *stats.OutPayload: mi := atomic.AddUint32(&ti.countSentMsg, 1) - span.AddMessageSendEvent(int64(mi), int64(rs.Length), int64(rs.WireLength)) + span.AddMessageSendEvent(int64(mi), int64(rs.Length), int64(rs.CompressedLength)) case *stats.End: if rs.Error != nil { // "The mapping between gRPC canonical codes and OpenCensus codes diff --git a/stats/stats.go b/stats/stats.go index 0285dcc6a268..7a552a9b7871 100644 --- a/stats/stats.go +++ b/stats/stats.go @@ -67,10 +67,18 @@ type InPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int + // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -129,9 +137,15 @@ type OutPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/stats/stats_test.go b/stats/stats_test.go index 0b7276e48cb7..b0b3df70c9d3 100644 --- a/stats/stats_test.go +++ b/stats/stats_test.go @@ -568,9 +568,9 @@ func checkInPayload(t *testing.T, d *gotData, e *expectedData) { } // Below are sanity checks that WireLength and RecvTime are populated. // TODO: check values of WireLength and RecvTime. - if len(st.Data) > 0 && st.WireLength == 0 { + if len(st.Data) > 0 && st.CompressedLength == 0 { t.Fatalf("st.WireLength = %v with non-empty data, want ", - st.WireLength) + st.CompressedLength) } if st.RecvTime.IsZero() { t.Fatalf("st.ReceivedTime = %v, want ", st.RecvTime) diff --git a/stream.go b/stream.go index 368da22acd57..0926cd4f5755 100644 --- a/stream.go +++ b/stream.go @@ -1093,9 +1093,10 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1716,9 +1717,10 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, }) } } From b46bdef165183076a0322fdf24142644735a6957 Mon Sep 17 00:00:00 2001 From: Stanley Cheung Date: Fri, 3 Mar 2023 15:00:20 -0800 Subject: [PATCH 809/998] interop/observability: add GCP Observability Testing Client/Server (#5979) --- interop/observability/Dockerfile | 53 ++ interop/observability/build_docker.sh | 26 + interop/observability/client/client.go | 78 ++ interop/observability/go.mod | 46 + interop/observability/go.sum | 1093 ++++++++++++++++++++++++ interop/observability/run.sh | 41 + interop/observability/server/server.go | 55 ++ 7 files changed, 1392 insertions(+) create mode 100644 interop/observability/Dockerfile create mode 100755 interop/observability/build_docker.sh create mode 100644 interop/observability/client/client.go create mode 100644 interop/observability/go.mod create mode 100644 interop/observability/go.sum create mode 100755 interop/observability/run.sh create mode 100644 interop/observability/server/server.go diff --git a/interop/observability/Dockerfile b/interop/observability/Dockerfile new file mode 100644 index 000000000000..7fcfc6df7865 --- /dev/null +++ b/interop/observability/Dockerfile @@ -0,0 +1,53 @@ +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# Stage 1: Build the interop test client and server +# + +FROM golang:1.17.13-bullseye as build + +WORKDIR /grpc-go +COPY . . + +WORKDIR /grpc-go/interop/observability +RUN go build -o server/ server/server.go && \ + go build -o client/ client/client.go + + + +# +# Stage 2: +# +# - Copy only the necessary files to reduce Docker image size. +# - Have an ENTRYPOINT script which will launch the interop test client or server +# with the given parameters. +# + +FROM golang:1.17.13-bullseye + +ENV GRPC_GO_LOG_SEVERITY_LEVEL info +ENV GRPC_GO_LOG_VERBOSITY_LEVEL 2 + +WORKDIR /grpc-go/interop/observability/server +COPY --from=build /grpc-go/interop/observability/server/server . + +WORKDIR /grpc-go/interop/observability/client +COPY --from=build /grpc-go/interop/observability/client/client . + +WORKDIR /grpc-go/interop/observability +COPY --from=build /grpc-go/interop/observability/run.sh . + +ENTRYPOINT ["/grpc-go/interop/observability/run.sh"] diff --git a/interop/observability/build_docker.sh b/interop/observability/build_docker.sh new file mode 100755 index 000000000000..ed7a1811e923 --- /dev/null +++ b/interop/observability/build_docker.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex +cd "$(dirname "$0")"/../.. + +# Environment Variables: +# +# TAG_NAME: the docker image tag name +# + +echo Building ${TAG_NAME} + +docker build --no-cache -t ${TAG_NAME} -f ./interop/observability/Dockerfile . diff --git a/interop/observability/client/client.go b/interop/observability/client/client.go new file mode 100644 index 000000000000..d8cf72fa76c9 --- /dev/null +++ b/interop/observability/client/client.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "context" + "flag" + "log" + "net" + "strconv" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/gcp/observability" + "google.golang.org/grpc/interop" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + serverHost = flag.String("server_host", "localhost", "The server host name") + serverPort = flag.Int("server_port", 10000, "The server port number") + testCase = flag.String("test_case", "large_unary", "The action to perform") + numTimes = flag.Int("num_times", 1, "Number of times to run the test case") +) + +func main() { + err := observability.Start(context.Background()) + if err != nil { + log.Fatalf("observability start failed: %v", err) + } + defer observability.End() + flag.Parse() + serverAddr := *serverHost + if *serverPort != 0 { + serverAddr = net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) + } + conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + tc := testgrpc.NewTestServiceClient(conn) + for i := 0; i < *numTimes; i++ { + if *testCase == "ping_pong" { + interop.DoPingPong(tc) + } else if *testCase == "large_unary" { + interop.DoLargeUnaryCall(tc) + } else if *testCase == "custom_metadata" { + interop.DoCustomMetadata(tc) + } else { + log.Fatalf("Invalid test case: %s", *testCase) + } + } + // TODO(stanleycheung): remove this once the observability exporter plugin is able to + // gracefully flush observability data to cloud at shutdown + // TODO(stanleycheung): see if we can reduce the number 65 + const exporterSleepDuration = 65 * time.Second + log.Printf("Sleeping %v before closing...", exporterSleepDuration) + time.Sleep(exporterSleepDuration) +} diff --git a/interop/observability/go.mod b/interop/observability/go.mod new file mode 100644 index 000000000000..783ec9c24c43 --- /dev/null +++ b/interop/observability/go.mod @@ -0,0 +1,46 @@ +module google.golang.org/grpc/interop/observability + +go 1.17 + +require ( + google.golang.org/grpc v1.53.0 + google.golang.org/grpc/gcp/observability v0.0.0-20230214181353-f4feddb37523 +) + +require ( + cloud.google.com/go v0.107.0 // indirect + cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/logging v1.6.1 // indirect + cloud.google.com/go/longrunning v0.3.0 // indirect + cloud.google.com/go/monitoring v1.8.0 // indirect + cloud.google.com/go/trace v1.4.0 // indirect + contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect + github.com/aws/aws-sdk-go v1.44.162 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/prometheus/prometheus v2.5.0+incompatible // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/net v0.5.0 // indirect + golang.org/x/oauth2 v0.4.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect + google.golang.org/api v0.103.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/grpc/stats/opencensus v0.0.0-20230221205128-8702a2ebf4b0 // indirect + google.golang.org/protobuf v1.28.1 // indirect +) + +replace google.golang.org/grpc => ../.. + +replace google.golang.org/grpc/gcp/observability => ../../gcp/observability + +replace google.golang.org/grpc/stats/opencensus => ../../stats/opencensus diff --git a/interop/observability/go.sum b/interop/observability/go.sum new file mode 100644 index 000000000000..b60536b2d605 --- /dev/null +++ b/interop/observability/go.sum @@ -0,0 +1,1093 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0 h1:qO9eLn2esajC9sxpqp1YKX37nXC3L4BfGnPS0Cx9dYo= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= +github.com/aws/aws-sdk-go v1.44.162/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/interop/observability/run.sh b/interop/observability/run.sh new file mode 100755 index 000000000000..d2634980da47 --- /dev/null +++ b/interop/observability/run.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex +cd "$(dirname "$0")"/../.. + +# TODO(stanleycheung): replace positional parameters with explicit parameters +# +# $1: server | client +# +# For server: $2: server_port +# +# For client: $2: server_host +# $3: server_port +# $4: test_case +# $5: num_times + +if [ "$1" = "server" ] ; then + /grpc-go/interop/observability/server/server --port $2 + +elif [ "$1" = "client" ] ; then + /grpc-go/interop/observability/client/client \ + --server_host=$2 --server_port=$3 \ + --test_case=$4 --num_times=$5 + +else + echo "Invalid action: $1" + exit 1 +fi diff --git a/interop/observability/server/server.go b/interop/observability/server/server.go new file mode 100644 index 000000000000..7efab04bba3f --- /dev/null +++ b/interop/observability/server/server.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/gcp/observability" + "google.golang.org/grpc/interop" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + port = flag.Int("port", 10000, "The server port") +) + +func main() { + err := observability.Start(context.Background()) + if err != nil { + log.Fatalf("observability start failed: %v", err) + } + defer observability.End() + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + server := grpc.NewServer() + defer server.Stop() + testgrpc.RegisterTestServiceServer(server, interop.NewTestServer()) + log.Printf("Observability interop server listening on %v", lis.Addr()) + server.Serve(lis) +} From e83e34be0b72a3015b03725947702ee3352eb80a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 6 Mar 2023 13:57:32 -0800 Subject: [PATCH 810/998] xds/resolver/test: use a non-blocking send instead of closing the channel (#6082) --- xds/internal/resolver/xds_resolver_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 80f7fb75b126..1e7ae998aebd 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -463,12 +463,15 @@ func (s) TestResolverWatchCallbackAfterClose(t *testing.T) { // it receives a discovery request for a route configuration resource. And // the test goroutine signals the management server when the resolver is // closed. - waitForRouteConfigDiscoveryReqCh := make(chan struct{}) + waitForRouteConfigDiscoveryReqCh := make(chan struct{}, 1) waitForResolverCloseCh := make(chan struct{}) mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { if req.GetTypeUrl() == version.V3RouteConfigURL { - close(waitForRouteConfigDiscoveryReqCh) + select { + case waitForRouteConfigDiscoveryReqCh <- struct{}{}: + default: + } <-waitForResolverCloseCh } return nil From 3292193519c3a61e19232348f543a9b318f80d50 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 6 Mar 2023 15:45:45 -0800 Subject: [PATCH 811/998] xdsclient: handle race with watch timer handling (#6086) --- xds/internal/xdsclient/authority.go | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 9a46161931be..1ea1d532e3d8 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -169,8 +169,32 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // Cancel the expiry timer associated with the resource once a // response is received, irrespective of whether the update is a // good one or not. - if state.wState == watchStateRequested { - state.wTimer.Stop() + // + // We check for watch states `started` and `requested` here to + // accommodate for a race which can happen in the following + // scenario: + // - When a watch is registered, it is possible that the ADS stream + // is not yet created. In this case, the request for the resource + // is not sent out immediately. An entry in the `resourceStates` + // map is created with a watch state of `started`. + // - Once the stream is created, it is possible that the management + // server might respond with the requested resource before we send + // out request for the same. If we don't check for `started` here, + // and move the state to `received`, we will end up starting the + // timer when the request gets sent out. And since the mangement + // server already sent us the resource, there is a good chance + // that it will not send it again. This would eventually lead to + // the timer firing, even though we have the resource in the + // cache. + if state.wState == watchStateStarted || state.wState == watchStateRequested { + // It is OK to ignore the return value from Stop() here because + // if the timer has already fired, it means that the timer watch + // expiry callback is blocked on the same lock that we currently + // hold. Since we move the state to `received` here, the timer + // callback will be a no-op. + if state.wTimer != nil { + state.wTimer.Stop() + } state.wState = watchStateReceived } From 33df9fc43d365166e8a68c62fb9f4496f1492f09 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 7 Mar 2023 10:03:02 -0800 Subject: [PATCH 812/998] credentials/xds: improve error message upon SAN matching failure (#6080) --- credentials/xds/xds.go | 6 ++++-- credentials/xds/xds_client_test.go | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/credentials/xds/xds.go b/credentials/xds/xds.go index 680ea9cfa109..fd402b81655d 100644 --- a/credentials/xds/xds.go +++ b/credentials/xds/xds.go @@ -162,8 +162,10 @@ func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawCo } // The SANs sent by the MeshCA are encoded as SPIFFE IDs. We need to // only look at the SANs on the leaf cert. - if !hi.MatchingSANExists(certs[0]) { - return fmt.Errorf("SANs received in leaf certificate %+v does not match any of the accepted SANs", certs[0]) + if cert := certs[0]; !hi.MatchingSANExists(cert) { + // TODO: Print the complete certificate once the x509 package + // supports a String() method on the Certificate type. + return fmt.Errorf("Received SANs {DNSNames: %v, EmailAddresses: %v, IPAddresses: %v, URIs: %v} do not match any of the accepted SANs", cert.DNSNames, cert.EmailAddresses, cert.IPAddresses, cert.URIs) } return nil } diff --git a/credentials/xds/xds_client_test.go b/credentials/xds/xds_client_test.go index 456af3454842..2fd2e21cdd73 100644 --- a/credentials/xds/xds_client_test.go +++ b/credentials/xds/xds_client_test.go @@ -476,7 +476,7 @@ func (s) TestClientCredsHandshakeFailure(t *testing.T) { handshakeFunc: testServerTLSHandshake, rootProvider: makeRootProvider(t, "x509/server_ca_cert.pem"), san: "bad-san", - wantErr: "does not match any of the accepted SANs", + wantErr: "do not match any of the accepted SANs", }, } From 558e1b6f7f8e705c486fe12ebdbbfec064d7941d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 7 Mar 2023 14:50:03 -0800 Subject: [PATCH 813/998] examples/authz: add token package docstring (#6095) --- examples/features/authz/token/token.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/features/authz/token/token.go b/examples/features/authz/token/token.go index 8447c13992b6..4899f3b08658 100644 --- a/examples/features/authz/token/token.go +++ b/examples/features/authz/token/token.go @@ -16,6 +16,8 @@ * */ +// Package token implements an example of authorization token encoding/decoding +// that can be used in RPC headers. package token import ( From ad4057fcc57e701b94eaf87483bd356a7bc47416 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 8 Mar 2023 13:40:47 -0800 Subject: [PATCH 814/998] transport: stop returning errors that are always nil (#6098) --- internal/transport/controlbuf.go | 34 +++++++++++++------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 9097385e1a6a..6bee9485a6e4 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -581,11 +581,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return nil + return } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -593,10 +593,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return nil + return } } - return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -604,13 +603,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } + l.applySettings(s.ss) return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ id: h.streamID, state: empty, @@ -618,7 +615,6 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { wq: h.wq, } l.estdStreams[h.streamID] = str - return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { @@ -720,10 +716,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) error { +func (l *loopyWriter) preprocessData(df *dataFrame) { str, ok := l.estdStreams[df.streamID] if !ok { - return nil + return } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -732,7 +728,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { str.state = active l.activeStreams.enqueue(str) } - return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -743,9 +738,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { o.resp <- l.sendQuota - return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -826,7 +820,7 @@ func (l *loopyWriter) closeConnectionHandler() error { func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) + l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -836,7 +830,7 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - return l.registerStreamHandler(i) + l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *earlyAbortStream: @@ -844,21 +838,22 @@ func (l *loopyWriter) handle(i interface{}) error { case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - return l.preprocessData(i) + l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) + l.outFlowControlSizeRequestHandler(i) case closeConnection: return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } + return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) error { +func (l *loopyWriter) applySettings(ss []http2.Setting) { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -877,7 +872,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { updateHeaderTblSize(l.hEnc, s.Val) } } - return nil } // processData removes the first stream from active streams, writes out at most 16KB From 55d8783479c7cc1a72a435340432702a67d809aa Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 9 Mar 2023 13:56:23 -0500 Subject: [PATCH 815/998] gcp/observability: Link logs and traces by logging Trace and Span IDs (#6056) --- gcp/observability/go.mod | 2 + gcp/observability/go.sum | 2 - gcp/observability/logging.go | 60 ++- gcp/observability/logging_test.go | 9 + gcp/observability/observability_test.go | 568 +++++++++++++++++++++++ internal/binarylog/binarylog.go | 8 +- internal/binarylog/binarylog_test.go | 1 - internal/binarylog/method_logger.go | 5 +- internal/binarylog/method_logger_test.go | 3 +- server.go | 24 +- stats/opencensus/e2e_test.go | 4 +- stats/opencensus/opencensus.go | 13 +- stats/opencensus/trace.go | 2 + stream.go | 24 +- test/end2end_test.go | 2 +- 15 files changed, 673 insertions(+), 54 deletions(-) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index e730895d3aa4..2f9b5e0bc002 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -39,3 +39,5 @@ require ( ) replace google.golang.org/grpc => ../../ + +replace google.golang.org/grpc/stats/opencensus => ../../stats/opencensus diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index 109fef979a6b..b60536b2d605 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -1056,8 +1056,6 @@ google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614G google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/stats/opencensus v0.0.0-20230221205128-8702a2ebf4b0 h1:v7h+HONu0plE0b3y9fBiOWlsqTdQQ5A9l9Ag2LXbEoE= -google.golang.org/grpc/stats/opencensus v0.0.0-20230221205128-8702a2ebf4b0/go.mod h1:l7+BYcyrDJFQo8nh4v8h5TJ6VfQ9QGBfFqVO7xoqQzI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index 6c6d8bf0aada..7be05975146c 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -29,6 +29,7 @@ import ( gcplogging "cloud.google.com/go/logging" "github.com/google/uuid" + "go.opencensus.io/trace" "google.golang.org/grpc" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" @@ -36,6 +37,7 @@ import ( "google.golang.org/grpc/internal/binarylog" iblog "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/stats/opencensus" ) var lExporter loggingExporter @@ -237,13 +239,16 @@ type methodLoggerBuilder interface { } type binaryMethodLogger struct { - callID, serviceName, methodName, authority string + callID, serviceName, methodName, authority, projectID string - mlb methodLoggerBuilder - exporter loggingExporter + mlb methodLoggerBuilder + exporter loggingExporter + clientSide bool } -func (bml *binaryMethodLogger) Log(c iblog.LogEntryConfig) { +// buildGCPLoggingEntry converts the binary log log entry into a gcp logging +// entry. +func (bml *binaryMethodLogger) buildGCPLoggingEntry(ctx context.Context, c iblog.LogEntryConfig) gcplogging.Entry { binLogEntry := bml.mlb.Build(c) grpcLogEntry := &grpcLogEntry{ @@ -305,9 +310,6 @@ func (bml *binaryMethodLogger) Log(c iblog.LogEntryConfig) { setPeerIfPresent(binLogEntry, grpcLogEntry) case binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL: grpcLogEntry.Type = eventTypeCancel - default: - logger.Infof("Unknown event type: %v", binLogEntry.Type) - return } grpcLogEntry.ServiceName = bml.serviceName grpcLogEntry.MethodName = bml.methodName @@ -318,8 +320,25 @@ func (bml *binaryMethodLogger) Log(c iblog.LogEntryConfig) { Severity: 100, Payload: grpcLogEntry, } + if bml.clientSide { + // client side span, populated through opencensus trace package. + if span := trace.FromContext(ctx); span != nil { + sc := span.SpanContext() + gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + fmt.Sprintf("%x", sc.TraceID) + gcploggingEntry.SpanID = fmt.Sprintf("%x", sc.SpanID) + } + } else { + // server side span, populated through stats/opencensus package. + if tID, sID, ok := opencensus.GetTraceAndSpanID(ctx); ok { + gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + fmt.Sprintf("%x", tID) + gcploggingEntry.SpanID = fmt.Sprintf("%x", sID) + } + } + return gcploggingEntry +} - bml.exporter.EmitGcpLoggingEntry(gcploggingEntry) +func (bml *binaryMethodLogger) Log(ctx context.Context, c iblog.LogEntryConfig) { + bml.exporter.EmitGcpLoggingEntry(bml.buildGCPLoggingEntry(ctx, c)) } type eventConfig struct { @@ -336,7 +355,9 @@ type eventConfig struct { type binaryLogger struct { EventConfigs []eventConfig + projectID string exporter loggingExporter + clientSide bool } func (bl *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { @@ -352,9 +373,11 @@ func (bl *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { } return &binaryMethodLogger{ - exporter: bl.exporter, - mlb: iblog.NewTruncatingMethodLogger(eventConfig.HeaderBytes, eventConfig.MessageBytes), - callID: uuid.NewString(), + exporter: bl.exporter, + mlb: iblog.NewTruncatingMethodLogger(eventConfig.HeaderBytes, eventConfig.MessageBytes), + callID: uuid.NewString(), + projectID: bl.projectID, + clientSide: bl.clientSide, } } } @@ -372,7 +395,8 @@ func parseMethod(method string) (string, string, error) { return method[:pos], method[pos+1:], nil } -func registerClientRPCEvents(clientRPCEvents []clientRPCEvents, exporter loggingExporter) { +func registerClientRPCEvents(config *config, exporter loggingExporter) { + clientRPCEvents := config.CloudLogging.ClientRPCEvents if len(clientRPCEvents) == 0 { return } @@ -405,11 +429,14 @@ func registerClientRPCEvents(clientRPCEvents []clientRPCEvents, exporter logging clientSideLogger := &binaryLogger{ EventConfigs: eventConfigs, exporter: exporter, + projectID: config.ProjectID, + clientSide: true, } internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(internal.WithBinaryLogger.(func(bl binarylog.Logger) grpc.DialOption)(clientSideLogger)) } -func registerServerRPCEvents(serverRPCEvents []serverRPCEvents, exporter loggingExporter) { +func registerServerRPCEvents(config *config, exporter loggingExporter) { + serverRPCEvents := config.CloudLogging.ServerRPCEvents if len(serverRPCEvents) == 0 { return } @@ -442,6 +469,8 @@ func registerServerRPCEvents(serverRPCEvents []serverRPCEvents, exporter logging serverSideLogger := &binaryLogger{ EventConfigs: eventConfigs, exporter: exporter, + projectID: config.ProjectID, + clientSide: false, } internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(internal.BinaryLogger.(func(bl binarylog.Logger) grpc.ServerOption)(serverSideLogger)) } @@ -456,9 +485,8 @@ func startLogging(ctx context.Context, config *config) error { return fmt.Errorf("unable to create CloudLogging exporter: %v", err) } - cl := config.CloudLogging - registerClientRPCEvents(cl.ClientRPCEvents, lExporter) - registerServerRPCEvents(cl.ServerRPCEvents, lExporter) + registerClientRPCEvents(config, lExporter) + registerServerRPCEvents(config, lExporter) return nil } diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index a02233387407..a42b1da550fd 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -68,6 +68,8 @@ type fakeLoggingExporter struct { mu sync.Mutex entries []*grpcLogEntry + + idsSeen []*traceAndSpanIDString } func (fle *fakeLoggingExporter) EmitGcpLoggingEntry(entry gcplogging.Entry) { @@ -76,6 +78,13 @@ func (fle *fakeLoggingExporter) EmitGcpLoggingEntry(entry gcplogging.Entry) { if entry.Severity != 100 { fle.t.Errorf("entry.Severity is not 100, this should be hardcoded") } + + ids := &traceAndSpanIDString{ + traceID: entry.Trace, + spanID: entry.SpanID, + } + fle.idsSeen = append(fle.idsSeen, ids) + grpcLogEntry, ok := entry.Payload.(*grpcLogEntry) if !ok { fle.t.Errorf("payload passed in isn't grpcLogEntry") diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index 87e9668eefdf..fa8cba1d38df 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -21,19 +21,24 @@ package observability import ( "context" "encoding/json" + "errors" "fmt" "io" "os" + "strings" "sync" "testing" "time" + "github.com/google/go-cmp/cmp" "go.opencensus.io/stats/view" "go.opencensus.io/trace" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/metadata" "google.golang.org/grpc/test/grpc_testing" ) @@ -80,6 +85,8 @@ type fakeOpenCensusExporter struct { // Number of spans SeenSpans int + idCh *testutils.Channel + t *testing.T mu sync.RWMutex } @@ -102,7 +109,39 @@ func (fe *fakeOpenCensusExporter) ExportView(vd *view.Data) { } } +type traceAndSpanID struct { + spanName string + traceID trace.TraceID + spanID trace.SpanID +} + +type traceAndSpanIDString struct { + traceID string + spanID string +} + +// idsToString is a helper that converts from generated trace and span IDs to +// the string version stored in trace message events. (hex 16 lowercase encoded, +// and extra data attached to trace id). +func idsToString(tasi traceAndSpanID, projectID string) traceAndSpanIDString { + return traceAndSpanIDString{ + traceID: "projects/" + projectID + "/traces/" + fmt.Sprintf("%x", tasi.traceID), + spanID: fmt.Sprintf("%x", tasi.spanID), + } +} + func (fe *fakeOpenCensusExporter) ExportSpan(vd *trace.SpanData) { + if fe.idCh != nil { + // This is what export span sees representing the trace/span ID which + // will populate different contexts throughout the system, convert in + // caller to string version as the logging code does. + fe.idCh.Send(traceAndSpanID{ + spanName: vd.Name, + traceID: vd.TraceID, + spanID: vd.SpanID, + }) + } + fe.mu.Lock() defer fe.mu.Unlock() fe.SeenSpans++ @@ -487,3 +526,532 @@ func (s) TestStartErrorsThenEnd(t *testing.T) { } End() } + +// TestLoggingLinkedWithTraceClientSide tests that client side logs get the +// trace and span id corresponding to the created Call Level Span for the RPC. +func (s) TestLoggingLinkedWithTraceClientSide(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + oldNewLoggingExporter := newLoggingExporter + defer func() { + newLoggingExporter = oldNewLoggingExporter + }() + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + idCh := testutils.NewChannel() + + fe := &fakeOpenCensusExporter{ + t: t, + idCh: idCh, + } + oldNewExporter := newExporter + defer func() { + newExporter = oldNewExporter + }() + + newExporter = func(config *config) (tracingMetricsExporter, error) { + return fe, nil + } + + const projectID = "project-id" + tracesAndLogsConfig := &config{ + ProjectID: projectID, + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + CloudTrace: &cloudTrace{ + SamplingRate: 1.0, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(tracesAndLogsConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Spawn a goroutine to receive the trace and span ids received by the + // exporter corresponding to a Unary RPC. + readerErrCh := testutils.NewChannel() + unaryDone := grpcsync.NewEvent() + go func() { + var traceAndSpanIDs []traceAndSpanID + val, err := idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok := val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + <-unaryDone.Done() + var tasiSent traceAndSpanIDString + for _, tasi := range traceAndSpanIDs { + if strings.HasPrefix(tasi.spanName, "Sent.") { + tasiSent = idsToString(tasi, projectID) + continue + } + } + + fle.mu.Lock() + for _, tasiSeen := range fle.idsSeen { + if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{})); diff != "" { + readerErrCh.Send(errors.New("got unexpected id, should be a client span")) + } + } + + fle.entries = nil + fle.mu.Unlock() + readerErrCh.Send(nil) + }() + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: testOkPayload}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + unaryDone.Fire() + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } +} + +// TestLoggingLinkedWithTraceServerSide tests that server side logs get the +// trace and span id corresponding to the created Server Span for the RPC. +func (s) TestLoggingLinkedWithTraceServerSide(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + oldNewLoggingExporter := newLoggingExporter + defer func() { + newLoggingExporter = oldNewLoggingExporter + }() + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + idCh := testutils.NewChannel() + + fe := &fakeOpenCensusExporter{ + t: t, + idCh: idCh, + } + oldNewExporter := newExporter + defer func() { + newExporter = oldNewExporter + }() + + newExporter = func(config *config) (tracingMetricsExporter, error) { + return fe, nil + } + + const projectID = "project-id" + tracesAndLogsConfig := &config{ + ProjectID: projectID, + CloudLogging: &cloudLogging{ + ServerRPCEvents: []serverRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + CloudTrace: &cloudTrace{ + SamplingRate: 1.0, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(tracesAndLogsConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Spawn a goroutine to receive the trace and span ids received by the + // exporter corresponding to a Unary RPC. + readerErrCh := testutils.NewChannel() + unaryDone := grpcsync.NewEvent() + go func() { + var traceAndSpanIDs []traceAndSpanID + val, err := idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok := val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + <-unaryDone.Done() + var tasiServer traceAndSpanIDString + for _, tasi := range traceAndSpanIDs { + if strings.HasPrefix(tasi.spanName, "grpc.") { + tasiServer = idsToString(tasi, projectID) + continue + } + } + + fle.mu.Lock() + for _, tasiSeen := range fle.idsSeen { + if diff := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{})); diff != "" { + readerErrCh.Send(errors.New("got unexpected id, should be a server span")) + } + } + + fle.entries = nil + fle.mu.Unlock() + readerErrCh.Send(nil) + }() + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: testOkPayload}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + unaryDone.Fire() + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } +} + +// TestLoggingLinkedWithTrace tests that client and server side logs get the +// trace and span id corresponding to either the Call Level Span or Server Span +// (no determinism, so can only assert one or the other), for Unary and +// Streaming RPCs. +func (s) TestLoggingLinkedWithTrace(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + oldNewLoggingExporter := newLoggingExporter + defer func() { + newLoggingExporter = oldNewLoggingExporter + }() + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + idCh := testutils.NewChannel() + + fe := &fakeOpenCensusExporter{ + t: t, + idCh: idCh, + } + oldNewExporter := newExporter + defer func() { + newExporter = oldNewExporter + }() + + newExporter = func(config *config) (tracingMetricsExporter, error) { + return fe, nil + } + + const projectID = "project-id" + tracesAndLogsConfig := &config{ + ProjectID: projectID, + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + ServerRPCEvents: []serverRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + CloudTrace: &cloudTrace{ + SamplingRate: 1.0, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(tracesAndLogsConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + return &grpc_testing.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Spawn a goroutine to receive the trace and span ids received by the + // exporter corresponding to a Unary RPC. + readerErrCh := testutils.NewChannel() + unaryDone := grpcsync.NewEvent() + go func() { + var traceAndSpanIDs []traceAndSpanID + val, err := idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok := val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + <-unaryDone.Done() + var tasiSent traceAndSpanIDString + var tasiServer traceAndSpanIDString + for _, tasi := range traceAndSpanIDs { + if strings.HasPrefix(tasi.spanName, "Sent.") { + tasiSent = idsToString(tasi, projectID) + continue + } + if strings.HasPrefix(tasi.spanName, "grpc.") { + tasiServer = idsToString(tasi, projectID) + } + } + + fle.mu.Lock() + for _, tasiSeen := range fle.idsSeen { + if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{})); diff != "" { + if diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{})); diff2 != "" { + readerErrCh.Send(errors.New("got unexpected id, should be client or server span")) + } + } + } + + fle.entries = nil + fle.mu.Unlock() + readerErrCh.Send(nil) + }() + if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: testOkPayload}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + unaryDone.Fire() + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } + + fle.mu.Lock() + fle.idsSeen = nil + fle.mu.Unlock() + + // Test streaming. Spawn a goroutine to receive the trace and span ids + // received by the exporter corresponding to a streaming RPC. + readerErrCh = testutils.NewChannel() + streamDone := grpcsync.NewEvent() + go func() { + var traceAndSpanIDs []traceAndSpanID + + val, err := idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok := val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + <-streamDone.Done() + var tasiSent traceAndSpanIDString + var tasiServer traceAndSpanIDString + for _, tasi := range traceAndSpanIDs { + if strings.HasPrefix(tasi.spanName, "Sent.") { + tasiSent = idsToString(tasi, projectID) + continue + } + if strings.HasPrefix(tasi.spanName, "grpc.") { + tasiServer = idsToString(tasi, projectID) + } + } + + fle.mu.Lock() + for _, tasiSeen := range fle.idsSeen { + if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{})); diff != "" { + if diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{})); diff2 != "" { + readerErrCh.Send(errors.New("got unexpected id, should be client or server span")) + } + } + } + + fle.entries = nil + fle.mu.Unlock() + readerErrCh.Send(nil) + }() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + streamDone.Fire() + + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } +} diff --git a/internal/binarylog/binarylog.go b/internal/binarylog/binarylog.go index 809d73ccafb0..af03a40d990b 100644 --- a/internal/binarylog/binarylog.go +++ b/internal/binarylog/binarylog.go @@ -28,8 +28,10 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -// Logger is the global binary logger. It can be used to get binary logger for -// each method. +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. type Logger interface { GetMethodLogger(methodName string) MethodLogger } @@ -40,8 +42,6 @@ type Logger interface { // It is used to get a MethodLogger for each individual method. var binLogger Logger -var grpclogLogger = grpclog.Component("binarylog") - // SetLogger sets the binary logger. // // Only call this at init time. diff --git a/internal/binarylog/binarylog_test.go b/internal/binarylog/binarylog_test.go index 05138f8f309f..47f6a541e767 100644 --- a/internal/binarylog/binarylog_test.go +++ b/internal/binarylog/binarylog_test.go @@ -98,7 +98,6 @@ func (s) TestGetMethodLogger(t *testing.T) { t.Errorf("in: %q, method logger is nil, want non-nil", tc.in) continue } - if ml.headerMaxLen != tc.hdr || ml.messageMaxLen != tc.msg { t.Errorf("in: %q, want header: %v, message: %v, got header: %v, message: %v", tc.in, tc.hdr, tc.msg, ml.headerMaxLen, ml.messageMaxLen) } diff --git a/internal/binarylog/method_logger.go b/internal/binarylog/method_logger.go index d71e441778f4..56fcf008d3de 100644 --- a/internal/binarylog/method_logger.go +++ b/internal/binarylog/method_logger.go @@ -19,6 +19,7 @@ package binarylog import ( + "context" "net" "strings" "sync/atomic" @@ -49,7 +50,7 @@ var idGen callIDGenerator // MethodLogger is the sub-logger for each method. type MethodLogger interface { - Log(LogEntryConfig) + Log(context.Context, LogEntryConfig) } // TruncatingMethodLogger is a method logger that truncates headers and messages @@ -98,7 +99,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/internal/binarylog/method_logger_test.go b/internal/binarylog/method_logger_test.go index 5d1e09a39658..11255bb338b4 100644 --- a/internal/binarylog/method_logger_test.go +++ b/internal/binarylog/method_logger_test.go @@ -20,6 +20,7 @@ package binarylog import ( "bytes" + "context" "fmt" "net" "testing" @@ -335,7 +336,7 @@ func (s) TestLog(t *testing.T) { for i, tc := range testCases { buf.Reset() tc.want.SequenceIdWithinCall = uint64(i + 1) - ml.Log(tc.config) + ml.Log(context.Background(), tc.config) inSink := new(binlogpb.GrpcLogEntry) if err := proto.Unmarshal(buf.Bytes()[4:], inSink); err != nil { t.Errorf("failed to unmarshal bytes in sink to proto: %v", err) diff --git a/server.go b/server.go index 8d573dc6075b..087b9ad7c1f6 100644 --- a/server.go +++ b/server.go @@ -1253,7 +1253,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. logEntry.PeerAddr = peer.Addr } for _, binlog := range binlogs { - binlog.Log(logEntry) + binlog.Log(ctx, logEntry) } } @@ -1333,7 +1333,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(cm) + binlog.Log(stream.Context(), cm) } } if trInfo != nil { @@ -1366,7 +1366,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(sh) + binlog.Log(stream.Context(), sh) } } st := &binarylog.ServerTrailer{ @@ -1374,7 +1374,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } return appErr @@ -1416,8 +1416,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(sh) - binlog.Log(st) + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), st) } } return err @@ -1431,8 +1431,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(sh) - binlog.Log(sm) + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), sm) } } if channelz.IsOn() { @@ -1450,7 +1450,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } return t.WriteStatus(stream, statusOK) @@ -1587,7 +1587,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(logEntry) + binlog.Log(stream.Context(), logEntry) } } @@ -1665,7 +1665,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } t.WriteStatus(ss.s, appStatus) @@ -1683,7 +1683,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } return t.WriteStatus(ss.s, statusOK) diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 251a75cdac54..943713f154d2 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -1477,11 +1477,11 @@ func (s) TestSpan(t *testing.T) { hasRemoteParent: false, }, } + fe.mu.Lock() + defer fe.mu.Unlock() if diff := cmp.Diff(fe.seenSpans, wantSI); diff != "" { t.Fatalf("got unexpected spans, diff (-got, +want): %v", diff) } - fe.mu.Lock() - defer fe.mu.Unlock() if err := validateTraceAndSpanIDs(fe.seenSpans); err != nil { t.Fatalf("Error in runtime data assertions: %v", err) } diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go index 350cebfb4aca..fc7ee341ea63 100644 --- a/stats/opencensus/opencensus.go +++ b/stats/opencensus/opencensus.go @@ -152,13 +152,24 @@ func setRPCInfo(ctx context.Context, ri *rpcInfo) context.Context { return context.WithValue(ctx, rpcInfoKey{}, ri) } -// getSpanWithMsgCount returns the rpcInfo stored in the context, or nil +// getRPCInfo returns the rpcInfo stored in the context, or nil // if there isn't one. func getRPCInfo(ctx context.Context) *rpcInfo { ri, _ := ctx.Value(rpcInfoKey{}).(*rpcInfo) return ri } +// GetTraceAndSpanID returns the trace and span ID of the span in the context. +// Returns true if IDs present and false if IDs not present. +func GetTraceAndSpanID(ctx context.Context) (trace.TraceID, trace.SpanID, bool) { + ri, ok := ctx.Value(rpcInfoKey{}).(*rpcInfo) + if !ok { + return trace.TraceID{}, trace.SpanID{}, false + } + sc := ri.ti.span.SpanContext() + return sc.TraceID, sc.SpanID, true +} + type clientStatsHandler struct { to TraceOptions } diff --git a/stats/opencensus/trace.go b/stats/opencensus/trace.go index 2c8a93551fa7..afd5b4fd8912 100644 --- a/stats/opencensus/trace.go +++ b/stats/opencensus/trace.go @@ -40,6 +40,8 @@ type traceInfo struct { func (csh *clientStatsHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) (context.Context, *traceInfo) { // TODO: get consensus on whether this method name of "s.m" is correct. mn := "Attempt." + strings.Replace(removeLeadingSlash(rti.FullMethodName), "/", ".", -1) + // Returned context is ignored because will populate context with data + // that wraps the span instead. _, span := trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS), trace.WithSpanKind(trace.SpanKindClient)) tcBin := propagation.Binary(span.SpanContext()) diff --git a/stream.go b/stream.go index 0926cd4f5755..d1226a4120f8 100644 --- a/stream.go +++ b/stream.go @@ -361,7 +361,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } } for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } @@ -809,7 +809,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { } cs.serverHeaderBinlogged = true for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } return m, nil @@ -890,7 +890,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { Message: data, } for _, binlog := range cs.binlogs { - binlog.Log(cm) + binlog.Log(cs.ctx, cm) } } return err @@ -914,7 +914,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error { Message: recvInfo.uncompressedBytes, } for _, binlog := range cs.binlogs { - binlog.Log(sm) + binlog.Log(cs.ctx, sm) } } if err != nil || !cs.desc.ServerStreams { @@ -935,7 +935,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error { logEntry.PeerAddr = peer.Addr } for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } } @@ -962,7 +962,7 @@ func (cs *clientStream) CloseSend() error { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(chc) + binlog.Log(cs.ctx, chc) } } // We never returned an error here for reasons. @@ -1004,7 +1004,7 @@ func (cs *clientStream) finish(err error) { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(c) + binlog.Log(cs.ctx, c) } } if err == nil { @@ -1573,7 +1573,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(sh) + binlog.Log(ss.ctx, sh) } } return err @@ -1646,14 +1646,14 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(sh) + binlog.Log(ss.ctx, sh) } } sm := &binarylog.ServerMessage{ Message: data, } for _, binlog := range ss.binlogs { - binlog.Log(sm) + binlog.Log(ss.ctx, sm) } } if len(ss.statsHandler) != 0 { @@ -1701,7 +1701,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} for _, binlog := range ss.binlogs { - binlog.Log(chc) + binlog.Log(ss.ctx, chc) } } return err @@ -1729,7 +1729,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { Message: payInfo.uncompressedBytes, } for _, binlog := range ss.binlogs { - binlog.Log(cm) + binlog.Log(ss.ctx, cm) } } return nil diff --git a/test/end2end_test.go b/test/end2end_test.go index 9de88ebcc46f..a6d992286894 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -6799,7 +6799,7 @@ type mockMethodLogger struct { events uint64 } -func (mml *mockMethodLogger) Log(binarylog.LogEntryConfig) { +func (mml *mockMethodLogger) Log(context.Context, binarylog.LogEntryConfig) { atomic.AddUint64(&mml.events, 1) } From d02039b6859b42161dff237c7d01d04c970cc599 Mon Sep 17 00:00:00 2001 From: Luwei Ge Date: Thu, 9 Mar 2023 12:28:57 -0800 Subject: [PATCH 816/998] Deflake the integration test. (#6093) The short test timeout was causing the DialContext to return an error even if it was non-blocking when a large number of tests are executed simultaneously. The way I think we should do with is to stick with the normal time out but cancel the context promptly, instead of deferring it at the end to release resources. --- .../advancedtls_integration_test.go | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/security/advancedtls/advancedtls_integration_test.go b/security/advancedtls/advancedtls_integration_test.go index d5a620d14f96..2e9076759857 100644 --- a/security/advancedtls/advancedtls_integration_test.go +++ b/security/advancedtls/advancedtls_integration_test.go @@ -41,8 +41,6 @@ import ( const ( // Default timeout for normal connections. defaultTestTimeout = 5 * time.Second - // Default timeout for failed connections. - defaultTestShortTimeout = 10 * time.Millisecond // Intervals that set to monitor the credential updates. credRefreshingInterval = 200 * time.Millisecond // Time we wait for the credential updates to be picked up. @@ -400,18 +398,19 @@ func (s) TestEnd2End(t *testing.T) { } // ------------------------Scenario 3------------------------------------ // stage = 1, new connection should fail - shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - conn2, greetClient, err := callAndVerifyWithClientConn(shortCtx, addr, "rpc call 3", clientTLSCreds, true) + ctx2, cancel2 := context.WithTimeout(context.Background(), defaultTestTimeout) + conn2, _, err := callAndVerifyWithClientConn(ctx2, addr, "rpc call 3", clientTLSCreds, true) if err != nil { t.Fatal(err) } defer conn2.Close() + // Immediately cancel the context so the dialing won't drag the entire timeout still it stops. + cancel2() // ---------------------------------------------------------------------- stage.increase() // ------------------------Scenario 4------------------------------------ // stage = 2, new connection should succeed - conn3, greetClient, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 4", clientTLSCreds, false) + conn3, _, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 4", clientTLSCreds, false) if err != nil { t.Fatal(err) } @@ -691,7 +690,7 @@ func (s) TestPEMFileProviderEnd2End(t *testing.T) { } // New connections should still be good, because the Provider didn't pick // up the changes due to key-cert mismatch. - conn2, greetClient, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 3", clientTLSCreds, false) + conn2, _, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 3", clientTLSCreds, false) if err != nil { t.Fatal(err) } @@ -703,20 +702,21 @@ func (s) TestPEMFileProviderEnd2End(t *testing.T) { // New connections should fail now, because the Provider picked the // change, and *_cert_2.pem is not trusted by *_trust_cert_1.pem on the // other side. - shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - conn3, greetClient, err := callAndVerifyWithClientConn(shortCtx, addr, "rpc call 4", clientTLSCreds, true) + ctx2, cancel2 := context.WithTimeout(context.Background(), defaultTestTimeout) + conn3, _, err := callAndVerifyWithClientConn(ctx2, addr, "rpc call 4", clientTLSCreds, true) if err != nil { t.Fatal(err) } defer conn3.Close() + // Immediately cancel the context so the dialing won't drag the entire timeout still it stops. + cancel2() // Make the trust cert change on the other side, and wait 1 second for // the provider to pick up the change. test.trustCertUpdateFunc() time.Sleep(sleepInterval) // New connections should be good, because the other side is using // *_trust_cert_2.pem now. - conn4, greetClient, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 5", clientTLSCreds, false) + conn4, _, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 5", clientTLSCreds, false) if err != nil { t.Fatal(err) } From 92d9e77ac70e4942b23996680cab13ce32018d97 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Thu, 9 Mar 2023 14:34:15 -0800 Subject: [PATCH 817/998] xds: NACK route configuration if sum of weights of weighted clusters exceeds uint32_max (#6085) --- .../xdsclient/xdsresource/unmarshal_rds.go | 18 ++--- .../xdsresource/unmarshal_rds_test.go | 70 ++----------------- 2 files changed, 10 insertions(+), 78 deletions(-) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index 057b1c7a3440..be7f6624deda 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -19,6 +19,7 @@ package xdsresource import ( "fmt" + "math" "regexp" "strings" "time" @@ -313,12 +314,16 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} case *v3routepb.RouteAction_WeightedClusters: wcs := a.WeightedClusters - var totalWeight uint32 + var totalWeight uint64 for _, c := range wcs.Clusters { w := c.GetWeight().GetValue() if w == 0 { continue } + totalWeight += uint64(w) + if totalWeight > math.MaxUint32 { + return nil, nil, fmt.Errorf("xds: total weight of clusters exceeds MaxUint32") + } wc := WeightedCluster{Weight: w} cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) if err != nil { @@ -326,16 +331,6 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif } wc.HTTPFilterConfigOverride = cfgs route.WeightedClusters[c.GetName()] = wc - totalWeight += w - } - // envoy xds doc - // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight - wantTotalWeight := uint32(100) - if tw := wcs.GetTotalWeight(); tw != nil { - wantTotalWeight = tw.GetValue() - } - if totalWeight != wantTotalWeight { - return nil, nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) } if totalWeight == 0 { return nil, nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) @@ -347,7 +342,6 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif // cluster_specifier: // - Can be Cluster // - Can be Weighted_clusters - // - The sum of weights must add up to the total_weight. // - Can be unset or an unsupported field. The route containing // this action will be ignored. // diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index e24e16e5b028..5dd4e042d72d 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -20,6 +20,7 @@ package xdsresource import ( "errors" "fmt" + "math" "regexp" "testing" "time" @@ -411,38 +412,6 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { }, }, }, - { - // weights not add up to total-weight. - name: "route-config-with-weighted_clusters_weights_not_add_up", - rc: &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}}, - {Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}}, - {Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}}, - }, - TotalWeight: &wrapperspb.UInt32Value{Value: 30}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - wantError: true, - }, { name: "good-route-config-with-weighted_clusters", rc: &v3routepb.RouteConfiguration{ @@ -462,7 +431,6 @@ func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { {Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}}, {Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 10}, }, }, }, @@ -971,7 +939,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}, TypedPerFilterConfig: cfgs}, {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, }}}}, TypedPerFilterConfig: cfgs, }} @@ -1016,7 +983,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, }}}}, }}, wantRoutes: []*Route{{ @@ -1056,7 +1022,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, }}}}, }, }, @@ -1102,7 +1067,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, }}}}, }, }, @@ -1136,7 +1100,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, }}}}, }, { @@ -1252,14 +1215,13 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 0}}, {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 0}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 0}, }}}}, }, }, wantErr: true, }, { - name: "totalWeight is nil in weighted clusters action", + name: "The sum of all weighted clusters is more than uint32", routes: []*v3routepb.Route{ { Match: &v3routepb.RouteMatch{ @@ -1270,30 +1232,9 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ WeightedClusters: &v3routepb.WeightedCluster{ Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 20}}, - {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 30}}, - }, - }}}}, - }, - }, - wantErr: true, - }, - { - name: "The sum of all weighted clusters is not equal totalWeight", - routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, - }, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 50}}, - {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 20}}, + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: math.MaxUint32}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: math.MaxUint32}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, }}}}, }, }, @@ -1353,7 +1294,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 30}}, {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 20}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 50}, }}}}, }, }, @@ -1394,7 +1334,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, }}, HashPolicy: []*v3routepb.RouteAction_HashPolicy{ {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{FilterState: &v3routepb.RouteAction_HashPolicy_FilterState{Key: "io.grpc.channel_id"}}}, @@ -1452,7 +1391,6 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, }}, HashPolicy: []*v3routepb.RouteAction_HashPolicy{ {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{Header: &v3routepb.RouteAction_HashPolicy_Header{HeaderName: ":path"}}}, From 60a1aa38f83816590e3407a32160d3439c21e81f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 9 Mar 2023 14:35:40 -0800 Subject: [PATCH 818/998] testutils: add support for creating endpoint resources with options (#6103) --- internal/testutils/xds/e2e/clientresources.go | 57 +++++++++++++++++-- .../xds_rls_clusterspecifier_plugin_test.go | 2 +- 2 files changed, 52 insertions(+), 7 deletions(-) diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go index 7e54d0a95461..b38d27b24963 100644 --- a/internal/testutils/xds/e2e/clientresources.go +++ b/internal/testutils/xds/e2e/clientresources.go @@ -36,6 +36,7 @@ import ( v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" ) @@ -428,7 +429,7 @@ func RouteConfigResourceWithOptions(opts RouteConfigOptions) *v3routepb.RouteCon // DefaultCluster returns a basic xds Cluster resource. func DefaultCluster(clusterName, edsServiceName string, secLevel SecurityLevel) *v3clusterpb.Cluster { - return ClusterResourceWithOptions(&ClusterOptions{ + return ClusterResourceWithOptions(ClusterOptions{ ClusterName: clusterName, ServiceName: edsServiceName, Policy: LoadBalancingPolicyRoundRobin, @@ -463,7 +464,7 @@ type ClusterOptions struct { // ClusterResourceWithOptions returns an xDS Cluster resource configured with // the provided options. -func ClusterResourceWithOptions(opts *ClusterOptions) *v3clusterpb.Cluster { +func ClusterResourceWithOptions(opts ClusterOptions) *v3clusterpb.Cluster { var tlsContext *v3tlspb.UpstreamTlsContext switch opts.SecurityLevel { case SecurityLevelNone: @@ -523,23 +524,50 @@ func ClusterResourceWithOptions(opts *ClusterOptions) *v3clusterpb.Cluster { return cluster } +// EndpointOptions contains options to configure an Endpoint (or +// ClusterLoadAssignment) resource. +type EndpointOptions struct { + // ClusterName is the name of the Cluster resource (or EDS service name) + // containing the endpoints specified below. + ClusterName string + // Host is the hostname of the endpoints. In our e2e tests, hostname must + // always be "localhost". + Host string + // Ports is a set of ports on "localhost" where the endpoints corresponding + // to this resource reside. + Ports []uint32 + // DropPercents is a map from drop category to a drop percentage. If unset, + // no drops are configured. + DropPercents map[string]int +} + // DefaultEndpoint returns a basic xds Endpoint resource. func DefaultEndpoint(clusterName string, host string, ports []uint32) *v3endpointpb.ClusterLoadAssignment { + return EndpointResourceWithOptions(EndpointOptions{ + ClusterName: clusterName, + Host: host, + Ports: ports, + }) +} + +// EndpointResourceWithOptions returns an xds Endpoint resource configured with +// the provided options. +func EndpointResourceWithOptions(opts EndpointOptions) *v3endpointpb.ClusterLoadAssignment { var lbEndpoints []*v3endpointpb.LbEndpoint - for _, port := range ports { + for _, port := range opts.Ports { lbEndpoints = append(lbEndpoints, &v3endpointpb.LbEndpoint{ HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{Endpoint: &v3endpointpb.Endpoint{ Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ SocketAddress: &v3corepb.SocketAddress{ Protocol: v3corepb.SocketAddress_TCP, - Address: host, + Address: opts.Host, PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: port}}, }}, }}, }) } - return &v3endpointpb.ClusterLoadAssignment{ - ClusterName: clusterName, + cla := &v3endpointpb.ClusterLoadAssignment{ + ClusterName: opts.ClusterName, Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ Locality: &v3corepb.Locality{SubZone: "subzone"}, LbEndpoints: lbEndpoints, @@ -547,4 +575,21 @@ func DefaultEndpoint(clusterName string, host string, ports []uint32) *v3endpoin Priority: 0, }}, } + + var drops []*v3endpointpb.ClusterLoadAssignment_Policy_DropOverload + for category, val := range opts.DropPercents { + drops = append(drops, &v3endpointpb.ClusterLoadAssignment_Policy_DropOverload{ + Category: category, + DropPercentage: &v3typepb.FractionalPercent{ + Numerator: uint32(val), + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }) + } + if len(drops) != 0 { + cla.Policy = &v3endpointpb.ClusterLoadAssignment_Policy{ + DropOverloads: drops, + } + } + return cla } diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go index 8bc021c91fae..afc30bc82133 100644 --- a/test/xds/xds_rls_clusterspecifier_plugin_test.go +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -61,7 +61,7 @@ func defaultClientResourcesWithRLSCSP(lb e2e.LoadBalancingPolicy, params e2e.Res RouteLookupConfig: rlsProto, }), })}, - Clusters: []*v3clusterpb.Cluster{e2e.ClusterResourceWithOptions(&e2e.ClusterOptions{ + Clusters: []*v3clusterpb.Cluster{e2e.ClusterResourceWithOptions(e2e.ClusterOptions{ ClusterName: clusterName, ServiceName: endpointsName, Policy: lb, From 22608213b80d50f51d0d4597d68bb041178dd0af Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 9 Mar 2023 16:30:30 -0800 Subject: [PATCH 819/998] go.mod: upgrade golang.org/x/net to address CVE-2022-41723 (#6106) --- examples/go.mod | 6 +++--- examples/go.sum | 16 +++++++++++++--- gcp/observability/go.mod | 6 +++--- gcp/observability/go.sum | 16 +++++++++++++--- go.mod | 6 +++--- go.sum | 12 ++++++------ interop/observability/go.mod | 6 +++--- interop/observability/go.sum | 16 +++++++++++++--- security/advancedtls/examples/go.mod | 6 +++--- security/advancedtls/examples/go.sum | 12 ++++++------ security/advancedtls/go.mod | 6 +++--- security/advancedtls/go.sum | 12 ++++++------ security/authorization/go.mod | 3 ++- security/authorization/go.sum | 9 +++++---- stats/opencensus/go.mod | 6 +++--- stats/opencensus/go.sum | 16 +++++++++++++--- test/tools/go.mod | 3 ++- test/tools/go.sum | 19 ++++++++++--------- 18 files changed, 110 insertions(+), 66 deletions(-) diff --git a/examples/go.mod b/examples/go.mod index 19b85d4f25aa..eec23182551e 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -19,9 +19,9 @@ require ( github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b // indirect github.com/envoyproxy/go-control-plane v0.10.3 // indirect github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect ) diff --git a/examples/go.sum b/examples/go.sum index ec75a892562e..6df1d79573e0 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -609,6 +609,7 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -658,8 +659,10 @@ golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -765,12 +768,16 @@ golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -782,8 +789,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -842,6 +851,7 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 2f9b5e0bc002..21fc31eeec88 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -29,10 +29,10 @@ require ( github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect - golang.org/x/net v0.5.0 // indirect + golang.org/x/net v0.8.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index b60536b2d605..f5aaa975652c 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -630,6 +630,7 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -680,8 +681,10 @@ golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -792,13 +795,17 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -810,8 +817,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -870,6 +879,7 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/go.mod b/go.mod index 8948f1390e37..866a7f24c344 100644 --- a/go.mod +++ b/go.mod @@ -11,9 +11,9 @@ require ( github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 - golang.org/x/net v0.5.0 + golang.org/x/net v0.8.0 golang.org/x/oauth2 v0.4.0 - golang.org/x/sys v0.4.0 + golang.org/x/sys v0.6.0 google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f google.golang.org/protobuf v1.28.1 ) @@ -23,6 +23,6 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect ) diff --git a/go.sum b/go.sum index 4fdfdcb98e46..c7633c20ea1c 100644 --- a/go.sum +++ b/go.sum @@ -250,8 +250,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -299,8 +299,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -310,8 +310,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/interop/observability/go.mod b/interop/observability/go.mod index 783ec9c24c43..e4ab61e5a503 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -27,11 +27,11 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/net v0.5.0 // indirect + golang.org/x/net v0.8.0 // indirect golang.org/x/oauth2 v0.4.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect google.golang.org/api v0.103.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect diff --git a/interop/observability/go.sum b/interop/observability/go.sum index b60536b2d605..f5aaa975652c 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -630,6 +630,7 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -680,8 +681,10 @@ golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -792,13 +795,17 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -810,8 +817,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -870,6 +879,7 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index 77379f44ae47..34ab57d1fbfa 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -11,9 +11,9 @@ require ( require ( github.com/golang/protobuf v1.5.2 // indirect golang.org/x/crypto v0.5.0 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 3e95b40aa1a9..73641bde1039 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -6,12 +6,12 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index 96dfca921d0f..d7f44b15adbc 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -11,9 +11,9 @@ require ( require ( github.com/golang/protobuf v1.5.2 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 2a30c3dac779..e9dfc37082a5 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -7,12 +7,12 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= diff --git a/security/authorization/go.mod b/security/authorization/go.mod index cf40541a13f6..06259f41e754 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -17,5 +17,6 @@ require ( github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/text v0.8.0 // indirect ) diff --git a/security/authorization/go.sum b/security/authorization/go.sum index 5e0656ec0083..ce3d88ce2187 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -243,7 +243,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -289,7 +290,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -299,8 +300,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index 9f28368d940b..cc895a5ae393 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -11,9 +11,9 @@ require ( require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index 7361a2d8ac99..ee791270f20f 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -603,6 +603,7 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -652,8 +653,10 @@ golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -758,12 +761,16 @@ golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -775,8 +782,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -835,6 +844,7 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/test/tools/go.mod b/test/tools/go.mod index b1dcc440964f..3a148eb8f738 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -8,7 +8,8 @@ require ( github.com/golang/protobuf v1.5.2 golang.org/x/exp/typeparams v0.0.0-20230108222341-4b8118a2686a // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/tools v0.5.0 + golang.org/x/sys v0.6.0 // indirect + golang.org/x/tools v0.6.0 google.golang.org/protobuf v1.28.1 // indirect honnef.co/go/tools v0.3.3 ) diff --git a/test/tools/go.sum b/test/tools/go.sum index 8728e287ec57..4c4b60ffc647 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -20,14 +20,14 @@ golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhp golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -41,23 +41,24 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= From 0558239af019be196084aaecc8b01eac305777b1 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Fri, 10 Mar 2023 09:28:07 -0800 Subject: [PATCH 820/998] Update CONTRIBUTING.md (#6089) --- CONTRIBUTING.md | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 52338d004ce3..8e001134da20 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,6 +20,19 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -32,14 +45,18 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use From 3633361c264f63ece7e37a6a5f3ce7f837f50bda Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 10 Mar 2023 10:00:13 -0800 Subject: [PATCH 821/998] tests: support LRS on the same port as ADS (#6102) --- internal/testutils/xds/e2e/server.go | 36 ++++++++++++---- .../testutils/xds}/fakeserver/server.go | 41 +++++++++++-------- xds/internal/xdsclient/loadreport_test.go | 2 +- .../xdsclient/tests/misc_watchers_test.go | 2 +- .../xdsclient/tests/resource_update_test.go | 2 +- .../xdsclient/transport/loadreport_test.go | 2 +- .../transport/transport_resource_test.go | 2 +- 7 files changed, 58 insertions(+), 29 deletions(-) rename {xds/internal/testutils => internal/testutils/xds}/fakeserver/server.go (88%) diff --git a/internal/testutils/xds/e2e/server.go b/internal/testutils/xds/e2e/server.go index 001544d141d7..9a1a4ec45490 100644 --- a/internal/testutils/xds/e2e/server.go +++ b/internal/testutils/xds/e2e/server.go @@ -26,17 +26,20 @@ import ( "reflect" "strconv" + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "google.golang.org/grpc" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/envoyproxy/go-control-plane/pkg/cache/types" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" v3cache "github.com/envoyproxy/go-control-plane/pkg/cache/v3" v3resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" v3server "github.com/envoyproxy/go-control-plane/pkg/server/v3" - "google.golang.org/grpc" ) // ManagementServer is a thin wrapper around the xDS control plane @@ -46,6 +49,11 @@ type ManagementServer struct { // new connections. Address string + // LRSServer points to the fake LRS server implementation. Set only if the + // SupportLoadReportingService option was set to true when creating this + // management server. + LRSServer *fakeserver.Server + cancel context.CancelFunc // To stop the v3 ADS service. xs v3server.Server // v3 implementation of ADS. gs *grpc.Server // gRPC server which exports the ADS service. @@ -60,6 +68,10 @@ type ManagementServerOptions struct { // will be created and used. Listener net.Listener + // SupportLoadReportingService, if set, results in the load reporting + // service being registered on the same port as that of ADS. + SupportLoadReportingService bool + // AllowResourceSubSet allows the management server to respond to requests // before all configured resources are explicitly named in the request. The // default behavior that we want is for the management server to wait for @@ -138,18 +150,26 @@ func StartManagementServer(opts ManagementServerOptions) (*ManagementServer, err v3discoverygrpc.RegisterAggregatedDiscoveryServiceServer(gs, xs) logger.Infof("Registered Aggregated Discovery Service (ADS)...") - // Start serving. - go gs.Serve(lis) - logger.Infof("xDS management server serving at: %v...", lis.Addr().String()) - - return &ManagementServer{ + mgmtServer := &ManagementServer{ Address: lis.Addr().String(), cancel: cancel, version: 0, gs: gs, xs: xs, cache: cache, - }, nil + } + if opts.SupportLoadReportingService { + lrs := fakeserver.NewServer(lis.Addr().String()) + v3lrsgrpc.RegisterLoadReportingServiceServer(gs, lrs) + mgmtServer.LRSServer = lrs + logger.Infof("Registered Load Reporting Service (LRS)...") + } + + // Start serving. + go gs.Serve(lis) + logger.Infof("xDS management server serving at: %v...", lis.Addr().String()) + + return mgmtServer, nil } // UpdateOptions wraps parameters to be passed to the Update() method. diff --git a/xds/internal/testutils/fakeserver/server.go b/internal/testutils/xds/fakeserver/server.go similarity index 88% rename from xds/internal/testutils/fakeserver/server.go rename to internal/testutils/xds/fakeserver/server.go index 48c2e0b8ef4c..e2f2fb39e0dd 100644 --- a/xds/internal/testutils/fakeserver/server.go +++ b/internal/testutils/xds/fakeserver/server.go @@ -96,8 +96,8 @@ type Server struct { Address string // The underlying fake implementation of xDS and LRS. - xdsV3 *xdsServerV3 - lrsV3 *lrsServerV3 + *xdsServerV3 + *lrsServerV3 } type wrappedListener struct { @@ -127,31 +127,40 @@ func StartServer(lis net.Listener) (*Server, func(), error) { } } - s := &Server{ - XDSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - LRSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - NewConnChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - XDSResponseChan: make(chan *Response, defaultChannelBufferSize), - LRSResponseChan: make(chan *Response, 1), // The server only ever sends one response. - LRSStreamOpenChan: testutils.NewChannel(), - LRSStreamCloseChan: testutils.NewChannel(), - Address: lis.Addr().String(), - } - s.xdsV3 = &xdsServerV3{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} - s.lrsV3 = &lrsServerV3{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan, streamOpenChan: s.LRSStreamOpenChan, streamCloseChan: s.LRSStreamCloseChan} + s := NewServer(lis.Addr().String()) wp := &wrappedListener{ Listener: lis, server: s, } server := grpc.NewServer() - v3lrsgrpc.RegisterLoadReportingServiceServer(server, s.lrsV3) - v3discoverygrpc.RegisterAggregatedDiscoveryServiceServer(server, s.xdsV3) + v3lrsgrpc.RegisterLoadReportingServiceServer(server, s) + v3discoverygrpc.RegisterAggregatedDiscoveryServiceServer(server, s) go server.Serve(wp) return s, func() { server.Stop() }, nil } +// NewServer returns a new instance of Server, set to accept requests on addr. +// It is the responsibility of the caller to register the exported ADS and LRS +// services on an appropriate gRPC server. Most usages should prefer +// StartServer() instead of this. +func NewServer(addr string) *Server { + s := &Server{ + XDSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + LRSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + NewConnChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + XDSResponseChan: make(chan *Response, defaultChannelBufferSize), + LRSResponseChan: make(chan *Response, 1), // The server only ever sends one response. + LRSStreamOpenChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + LRSStreamCloseChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + Address: addr, + } + s.xdsServerV3 = &xdsServerV3{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} + s.lrsServerV3 = &lrsServerV3{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan, streamOpenChan: s.LRSStreamOpenChan, streamCloseChan: s.LRSStreamCloseChan} + return s +} + type xdsServerV3 struct { reqChan *testutils.Channel respChan chan *Response diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index 92450ce747e0..e4e21e412374 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -27,8 +27,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/testing/protocmp" diff --git a/xds/internal/xdsclient/tests/misc_watchers_test.go b/xds/internal/xdsclient/tests/misc_watchers_test.go index 0d09a921f08b..4bb1d2df8497 100644 --- a/xds/internal/xdsclient/tests/misc_watchers_test.go +++ b/xds/internal/xdsclient/tests/misc_watchers_test.go @@ -27,9 +27,9 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" "google.golang.org/grpc/xds/internal" xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" diff --git a/xds/internal/xdsclient/tests/resource_update_test.go b/xds/internal/xdsclient/tests/resource_update_test.go index 31ae8eda8d1a..aeaa5c448374 100644 --- a/xds/internal/xdsclient/tests/resource_update_test.go +++ b/xds/internal/xdsclient/tests/resource_update_test.go @@ -32,8 +32,8 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" diff --git a/xds/internal/xdsclient/transport/loadreport_test.go b/xds/internal/xdsclient/transport/loadreport_test.go index 91fef7ae0991..039780ae7f13 100644 --- a/xds/internal/xdsclient/transport/loadreport_test.go +++ b/xds/internal/xdsclient/transport/loadreport_test.go @@ -27,7 +27,7 @@ import ( "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/protobuf/testing/protocmp" diff --git a/xds/internal/xdsclient/transport/transport_resource_test.go b/xds/internal/xdsclient/transport/transport_resource_test.go index ceb5a7f67bf4..b4c44f4d6160 100644 --- a/xds/internal/xdsclient/transport/transport_resource_test.go +++ b/xds/internal/xdsclient/transport/transport_resource_test.go @@ -30,7 +30,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" From abd4db22a7ccd3dcc2ed576df1fdcb511bc840fe Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 10 Mar 2023 10:27:04 -0800 Subject: [PATCH 822/998] xdsclient/tests: fix flaky test NodeProtoSentOnlyInFirstRequest (#6108) --- .../xdsclient/tests/misc_watchers_test.go | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/xds/internal/xdsclient/tests/misc_watchers_test.go b/xds/internal/xdsclient/tests/misc_watchers_test.go index 4bb1d2df8497..19cf7daba3fe 100644 --- a/xds/internal/xdsclient/tests/misc_watchers_test.go +++ b/xds/internal/xdsclient/tests/misc_watchers_test.go @@ -190,12 +190,24 @@ func (s) TestNodeProtoSentOnlyInFirstRequest(t *testing.T) { } defer close() - // Configure a listener resource on the fake xDS server. const ( serviceName = "my-service-client-side-xds" routeConfigName = "route-" + serviceName clusterName = "cluster-" + serviceName ) + + // Register a watch for the Listener resource. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + watcher := xdstestutils.NewTestResourceWatcher() + client.WatchResource(listenerResourceType, serviceName, watcher) + + // Ensure the watch results in a discovery request with an empty node proto. + if err := readDiscoveryResponseAndCheckForNonEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + + // Configure a listener resource on the fake xDS server. lisAny, err := anypb.New(e2e.DefaultClientListener(serviceName, routeConfigName)) if err != nil { t.Fatalf("Failed to marshal listener resource into an Any proto: %v", err) @@ -208,19 +220,16 @@ func (s) TestNodeProtoSentOnlyInFirstRequest(t *testing.T) { }, } - // Register a watch for the Listener resource. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - watcher := xdstestutils.NewTestResourceWatcher() - client.WatchResource(listenerResourceType, serviceName, watcher) - - // The first request on the stream must contain a non-empty node proto. - if err := readDiscoveryResponseAndCheckForNonEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + // The xDS client is expected to ACK the Listener resource. The discovery + // request corresponding to the ACK must contain a nil node proto. + if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { t.Fatal(err) } - // The xDS client is expected to ACK the Listener resource. The discovery - // request corresponding to the ACK must contain a nil node proto. + // Register a watch for a RouteConfiguration resource. + client.WatchResource(routeConfigResourceType, routeConfigName, watcher) + + // Ensure the watch results in a discovery request with an empty node proto. if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { t.Fatal(err) } @@ -238,13 +247,7 @@ func (s) TestNodeProtoSentOnlyInFirstRequest(t *testing.T) { }, } - // Register a watch for a RouteConfiguration resource. Ensure that the - // discovery requests for the route configuration resource and the - // subsequent ACK contains an empty node proto. - client.WatchResource(routeConfigResourceType, routeConfigName, watcher) - if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { - t.Fatal(err) - } + // Ensure the discovery request for the ACK contains an empty node proto. if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { t.Fatal(err) } @@ -262,17 +265,14 @@ func (s) TestNodeProtoSentOnlyInFirstRequest(t *testing.T) { // The xDS client is expected to re-request previously requested resources. // Hence, we expect two DiscoveryRequest messages (one for the Listener and - // one for the RouteConfiguration resource). The first message should - // contain a non-nil node proto and second one should contain a nil-proto. + // one for the RouteConfiguration resource). The first message should contain + // a non-nil node proto and the second should contain a nil-proto. // // And since we don't push any responses on the response channel of the fake // server, we do not expect any ACKs here. if err := readDiscoveryResponseAndCheckForNonEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { t.Fatal(err) } - - // The xDS client is expected to ACK the Listener resource. The discovery - // request corresponding to the ACK must contain a nil node proto. if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { t.Fatal(err) } From 5796c409ee39ad7bba2db674852401eebfa51495 Mon Sep 17 00:00:00 2001 From: Stanley Cheung Date: Fri, 10 Mar 2023 10:44:04 -0800 Subject: [PATCH 823/998] interop/observability: Pass interop parameters to client/server as-is (#6111) --- interop/observability/run.sh | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/interop/observability/run.sh b/interop/observability/run.sh index d2634980da47..b0494668bf84 100755 --- a/interop/observability/run.sh +++ b/interop/observability/run.sh @@ -16,26 +16,14 @@ set -ex cd "$(dirname "$0")"/../.. -# TODO(stanleycheung): replace positional parameters with explicit parameters -# -# $1: server | client -# -# For server: $2: server_port -# -# For client: $2: server_host -# $3: server_port -# $4: test_case -# $5: num_times - if [ "$1" = "server" ] ; then - /grpc-go/interop/observability/server/server --port $2 + /grpc-go/interop/observability/server/server "${@:2}" elif [ "$1" = "client" ] ; then - /grpc-go/interop/observability/client/client \ - --server_host=$2 --server_port=$3 \ - --test_case=$4 --num_times=$5 + /grpc-go/interop/observability/client/client "${@:2}" else - echo "Invalid action: $1" + echo "Invalid action: $1. Usage:" + echo " $ .../run.sh [server|client] --server_host= --server_port= ..." exit 1 fi From 11e2506cb63b4f75d2190dad016070337628975c Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Tue, 14 Mar 2023 10:40:00 -0700 Subject: [PATCH 824/998] tests: Scale down keepalive test timings (#6088) --- internal/transport/keepalive_test.go | 307 ++++++++++++--------------- 1 file changed, 136 insertions(+), 171 deletions(-) diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index 4a904703ef91..a020fecdc65d 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -45,7 +45,7 @@ const defaultTestTimeout = 10 * time.Second func (s) TestMaxConnectionIdle(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - MaxConnectionIdle: 2 * time.Second, + MaxConnectionIdle: 30 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) @@ -63,28 +63,24 @@ func (s) TestMaxConnectionIdle(t *testing.T) { } client.CloseStream(stream, io.EOF) - // Wait for the server's MaxConnectionIdle timeout to kick in, and for it - // to send a GoAway. - timeout := time.NewTimer(time.Second * 4) + // Verify the server sends a GoAway to client after MaxConnectionIdle timeout + // kicks in. select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } + case <-ctx.Done(): + t.Fatalf("context expired before receiving GoAway from the server.") + case <-client.GoAway(): if reason, _ := client.GetGoAwayReason(); reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } - case <-timeout.C: - t.Fatalf("MaxConnectionIdle timeout expired, expected a GoAway from the server.") } } -// TestMaxConenctionIdleBusyClient tests that a server will not send GoAway to +// TestMaxConnectionIdleBusyClient tests that a server will not send GoAway to // a busy client. func (s) TestMaxConnectionIdleBusyClient(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - MaxConnectionIdle: 2 * time.Second, + MaxConnectionIdle: 100 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) @@ -101,26 +97,25 @@ func (s) TestMaxConnectionIdleBusyClient(t *testing.T) { t.Fatalf("client.NewStream() failed: %v", err) } - // Wait for double the MaxConnectionIdle time to make sure the server does - // not send a GoAway, as the client has an open stream. - timeout := time.NewTimer(time.Second * 4) + // Verify the server does not send a GoAway to client even after MaxConnectionIdle + // timeout kicks in. + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + defer cancel() select { case <-client.GoAway(): - if !timeout.Stop() { - <-timeout.C - } - t.Fatalf("A non-idle client received a GoAway.") - case <-timeout.C: + t.Fatalf("A busy client received a GoAway.") + case <-ctx.Done(): } } // TestMaxConnectionAge tests that a server will send GoAway after a duration // of MaxConnectionAge. func (s) TestMaxConnectionAge(t *testing.T) { + maxConnAge := 100 * time.Millisecond serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - MaxConnectionAge: 1 * time.Second, - MaxConnectionAgeGrace: 1 * time.Second, + MaxConnectionAge: maxConnAge, + MaxConnectionAgeGrace: 10 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) @@ -132,24 +127,19 @@ func (s) TestMaxConnectionAge(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - _, err := client.NewStream(ctx, &CallHdr{}) - if err != nil { + if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } - // Wait for the server's MaxConnectionAge timeout to kick in, and for it - // to send a GoAway. - timeout := time.NewTimer(4 * time.Second) + // Verify the server sends a GoAway to client even after client remains idle + // for more than MaxConnectionIdle time. select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } + case <-client.GoAway(): if reason, _ := client.GetGoAwayReason(); reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } - case <-timeout.C: - t.Fatalf("MaxConnectionAge timeout expired, expected a GoAway from the server.") + case <-ctx.Done(): + t.Fatalf("timed out before getting a GoAway from the server.") } } @@ -166,8 +156,8 @@ const ( func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, + Time: 100 * time.Millisecond, + Timeout: 10 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) @@ -208,15 +198,16 @@ func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { // Server waits for KeepaliveParams.Time seconds before sending out a ping, // and then waits for KeepaliveParams.Timeout for a ping ack. - timeout := time.NewTimer(4 * time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() select { case err := <-errCh: if err != io.EOF { t.Fatalf("client.Read(_) = _,%v, want io.EOF", err) } - case <-timeout.C: - t.Fatalf("keepalive timeout expired, server should have closed the connection.") + case <-ctx.Done(): + t.Fatalf("Test timed out before server closed the connection.") } } @@ -225,8 +216,8 @@ func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { func (s) TestKeepaliveServerWithResponsiveClient(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, + Time: 100 * time.Millisecond, + Timeout: 100 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) @@ -237,13 +228,10 @@ func (s) TestKeepaliveServerWithResponsiveClient(t *testing.T) { }() // Give keepalive logic some time by sleeping. - time.Sleep(4 * time.Second) + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -257,8 +245,8 @@ func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { copts := ConnectOptions{ ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, + Time: 10 * time.Millisecond, + Timeout: 10 * time.Millisecond, PermitWithoutStream: true, }, } @@ -272,14 +260,8 @@ func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { } defer conn.Close() - // Sleep for keepalive to close the connection. - time.Sleep(4 * time.Second) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := pollForStreamCreationError(client); err != nil { + t.Fatal(err) } } @@ -293,8 +275,8 @@ func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { copts := ConnectOptions{ ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, + Time: 10 * time.Millisecond, + Timeout: 10 * time.Millisecond, }, } client, cancel := setUpWithNoPingServer(t, copts, connCh) @@ -308,13 +290,10 @@ func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { defer conn.Close() // Give keepalive some time. - time.Sleep(4 * time.Second) + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -326,10 +305,12 @@ func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { copts := ConnectOptions{ ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, + Time: 500 * time.Millisecond, + Timeout: 500 * time.Millisecond, }, } + // TODO(i/6099): Setup a server which can ping and no-ping based on a flag to + // reduce the flakiness in this test. client, cancel := setUpWithNoPingServer(t, copts, connCh) defer cancel() defer client.Close(fmt.Errorf("closed manually by test")) @@ -344,15 +325,11 @@ func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { defer cancel() // Create a stream, but send no data on it. if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + t.Fatalf("Stream creation failed: %v", err) } - // Give keepalive some time. - time.Sleep(4 * time.Second) - - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := pollForStreamCreationError(client); err != nil { + t.Fatal(err) } } @@ -370,8 +347,8 @@ func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { normal, ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, + Time: 55 * time.Millisecond, + Timeout: time.Second, PermitWithoutStream: true, }}) defer func() { @@ -381,19 +358,16 @@ func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { }() // Give keepalive some time. - time.Sleep(4 * time.Second) + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } // TestKeepaliveClientFrequency creates a server which expects at most 1 client -// ping for every 1.2 seconds, while the client is configured to send a ping -// every 1 second. So, this configuration should end up with the client +// ping for every 100 ms, while the client is configured to send a ping +// every 50 ms. So, this configuration should end up with the client // transport being closed. But we had a bug wherein the client was sending one // ping every [Time+Timeout] instead of every [Time] period, and this test // explicitly makes sure the fix works and the client sends a ping every [Time] @@ -403,14 +377,14 @@ func (s) TestKeepaliveClientFrequency(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 1200 * time.Millisecond, // 1.2 seconds + MinTime: 100 * time.Millisecond, PermitWithoutStream: true, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 2 * time.Second, + Time: 50 * time.Millisecond, + Timeout: time.Second, PermitWithoutStream: true, }, } @@ -421,24 +395,8 @@ func (s) TestKeepaliveClientFrequency(t *testing.T) { cancel() }() - timeout := time.NewTimer(6 * time.Second) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) - } - case <-timeout.C: - t.Fatalf("client transport still healthy; expected GoAway from the server.") - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := waitForGoAwayTooManyPings(client); err != nil { + t.Fatal(err) } } @@ -451,13 +409,13 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 2 * time.Second, + MinTime: time.Second, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 50 * time.Millisecond, - Timeout: 1 * time.Second, + Time: 20 * time.Millisecond, + Timeout: 100 * time.Millisecond, PermitWithoutStream: true, }, } @@ -468,24 +426,8 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { cancel() }() - timeout := time.NewTimer(4 * time.Second) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) - } - case <-timeout.C: - t.Fatalf("client transport still healthy; expected GoAway from the server.") - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := waitForGoAwayTooManyPings(client); err != nil { + t.Fatal(err) } } @@ -498,13 +440,13 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 2 * time.Second, + MinTime: time.Second, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: 50 * time.Millisecond, - Timeout: 1 * time.Second, + Timeout: 100 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) @@ -517,25 +459,11 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) - } - - timeout := time.NewTimer(4 * time.Second) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) - } - case <-timeout.C: - t.Fatalf("client transport still healthy; expected GoAway from the server.") + t.Fatalf("Stream creation failed: %v", err) } - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := waitForGoAwayTooManyPings(client); err != nil { + t.Fatal(err) } } @@ -546,14 +474,14 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { func (s) TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 100 * time.Millisecond, + MinTime: 40 * time.Millisecond, PermitWithoutStream: true, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 101 * time.Millisecond, - Timeout: 1 * time.Second, + Time: 50 * time.Millisecond, + Timeout: time.Second, PermitWithoutStream: true, }, } @@ -564,14 +492,12 @@ func (s) TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { cancel() }() - // Give keepalive enough time. - time.Sleep(3 * time.Second) + // Sleep for client to send ~10 keepalive pings. + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + // Verify that the server does not close the client transport. + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -582,13 +508,12 @@ func (s) TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { func (s) TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 100 * time.Millisecond, + MinTime: 40 * time.Millisecond, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 101 * time.Millisecond, - Timeout: 1 * time.Second, + Time: 50 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) @@ -598,18 +523,15 @@ func (s) TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { cancel() }() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } // Give keepalive enough time. - time.Sleep(3 * time.Second) + time.Sleep(500 * time.Millisecond) - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -639,13 +561,10 @@ func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T }() // No active streams on the client. Give keepalive enough time. - time.Sleep(1 * time.Second) + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -749,3 +668,49 @@ func (s) TestTCPUserTimeout(t *testing.T) { } } } + +// checkForHealthyStream attempts to create a stream and return error if any. +// The stream created is closed right after to avoid any leakages. +func checkForHealthyStream(client *http2Client) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := client.NewStream(ctx, &CallHdr{}) + client.CloseStream(stream, err) + return err +} + +func pollForStreamCreationError(client *http2Client) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for { + if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { + break + } + time.Sleep(50 * time.Millisecond) + } + if ctx.Err() != nil { + return fmt.Errorf("test timed out before stream creation returned an error") + } + return nil +} + +// waitForGoAwayTooManyPings waits for client to receive a GoAwayTooManyPings +// from server. It also asserts that stream creation fails after receiving a +// GoAway. +func waitForGoAwayTooManyPings(client *http2Client) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-client.GoAway(): + if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { + return fmt.Errorf("goAwayReason is %v, want %v", reason, GoAwayTooManyPings) + } + case <-ctx.Done(): + return fmt.Errorf("test timed out before getting GoAway with reason:GoAwayTooManyPings from server") + } + + if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { + return fmt.Errorf("stream creation succeeded after receiving a GoAway from the server") + } + return nil +} From b458a4f11afbced02ab3db241362c60f75972705 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 14 Mar 2023 13:32:25 -0700 Subject: [PATCH 825/998] transport: stop always closing connections when loopy returns (#6110) --- internal/transport/controlbuf.go | 48 +++++++++++++++++++----------- internal/transport/http2_client.go | 11 ++----- internal/transport/http2_server.go | 12 ++------ internal/transport/http_util.go | 24 ++++++++++++++- 4 files changed, 57 insertions(+), 38 deletions(-) diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 6bee9485a6e4..c343c23a530e 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -22,6 +22,7 @@ import ( "bytes" "errors" "fmt" + "net" "runtime" "strconv" "sync" @@ -486,12 +487,13 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool + conn net.Conn // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -504,6 +506,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, + conn: conn, } return l } @@ -521,15 +524,27 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes and closes the underlying connection. Otherwise, the connection is +// left open to allow the I/O error to be encountered by the reader instead. func (l *loopyWriter) run() (err error) { - // Always flush the writer before exiting in case there are pending frames - // to be sent. - defer l.framer.writer.Flush() + defer func() { + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exiting with error: %v", err) + } + if !isIOError(err) { + l.framer.writer.Flush() + l.conn.Close() + } + l.cbuf.finish() + }() for { it, err := l.cbuf.get(true) if err != nil { @@ -757,6 +772,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. return errors.New("finished processing active streams while in draining mode") } return nil @@ -792,6 +808,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. return errors.New("received GOAWAY with no active streams") } } @@ -810,13 +827,6 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) closeConnectionHandler() error { - // Exit loopyWriter entirely by returning an error here. This will lead to - // the transport closing the connection, and, ultimately, transport - // closure. - return ErrConnClosing -} - func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -846,7 +856,9 @@ func (l *loopyWriter) handle(i interface{}) error { case *outFlowControlSizeRequest: l.outFlowControlSizeRequestHandler(i) case closeConnection: - return l.closeConnectionHandler() + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing default: return fmt.Errorf("transport: unknown control message type %T", i) } @@ -905,7 +917,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil + return false, err } } else { l.activeStreams.enqueue(str) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index b0964d7ddfb1..9826feb8c699 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -444,15 +444,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) - } - // Do not close the transport. Let reader goroutine handle it since - // there might be data in the buffers. - t.conn.Close() - t.controlBuf.finish() + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn) + t.loopy.run() close(t.writerDone) }() return t, nil diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 8629b7ecc6d0..99ae1a737469 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -331,14 +331,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - err := t.loopy.run() - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) - } - t.conn.Close() - t.controlBuf.finish() + t.loopy.run() close(t.writerDone) }() go t.keepalive() @@ -1355,9 +1350,6 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, err } if retErr != nil { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() return false, retErr } return true, nil diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index 2c601a864d99..8fcae4f4d079 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -21,6 +21,7 @@ package transport import ( "bufio" "encoding/base64" + "errors" "fmt" "io" "math" @@ -330,7 +331,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) + n, err = w.conn.Write(b) + return n, toIOError(err) } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -352,10 +354,30 @@ func (w *bufWriter) Flush() error { return nil } _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) w.offset = 0 return w.err } +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + type framer struct { writer *bufWriter fr *http2.Framer From 16c3b7df7f78df4061703be769832de44216bfc1 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 14 Mar 2023 14:01:16 -0700 Subject: [PATCH 826/998] examples: add example for ORCA load reporting (#6114) --- examples/examples_test.sh | 4 + examples/features/orca/README.md | 48 ++++++++ examples/features/orca/client/main.go | 153 ++++++++++++++++++++++++++ examples/features/orca/server/main.go | 92 ++++++++++++++++ examples/go.mod | 2 +- internal/internal.go | 3 + orca/service.go | 6 +- 7 files changed, 305 insertions(+), 3 deletions(-) create mode 100644 examples/features/orca/README.md create mode 100644 examples/features/orca/client/main.go create mode 100644 examples/features/orca/server/main.go diff --git a/examples/examples_test.sh b/examples/examples_test.sh index 812a46556bf0..9ae49d37c5e9 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -64,6 +64,7 @@ EXAMPLES=( "features/metadata_interceptor" "features/multiplex" "features/name_resolving" + "features/orca" "features/retry" "features/unix_abstract" ) @@ -75,6 +76,7 @@ declare -A SERVER_ARGS=( declare -A CLIENT_ARGS=( ["features/unix_abstract"]="-addr $UNIX_ADDR" + ["features/orca"]="-test=true" ["default"]="-addr localhost:$SERVER_PORT" ) @@ -114,6 +116,7 @@ declare -A EXPECTED_SERVER_OUTPUT=( ["features/metadata_interceptor"]="key1 from metadata: " ["features/multiplex"]=":50051" ["features/name_resolving"]="serving on localhost:50051" + ["features/orca"]="Server listening" ["features/retry"]="request succeeded count: 4" ["features/unix_abstract"]="serving on @abstract-unix-socket" ) @@ -134,6 +137,7 @@ declare -A EXPECTED_CLIENT_OUTPUT=( ["features/metadata_interceptor"]="BidiStreaming Echo: hello world" ["features/multiplex"]="Greeting: Hello multiplex" ["features/name_resolving"]="calling helloworld.Greeter/SayHello to \"example:///resolver.example.grpc.io\"" + ["features/orca"]="Per-call load report received: map\[db_queries:10\]" ["features/retry"]="UnaryEcho reply: message:\"Try and Success\"" ["features/unix_abstract"]="calling echo.Echo/UnaryEcho to unix-abstract:abstract-unix-socket" ) diff --git a/examples/features/orca/README.md b/examples/features/orca/README.md new file mode 100644 index 000000000000..ef99aa255ba5 --- /dev/null +++ b/examples/features/orca/README.md @@ -0,0 +1,48 @@ +# ORCA Load Reporting + +ORCA is a protocol for reporting load between servers and clients. This +example shows how to implement this from both the client and server side. For +more details, please see [gRFC +A51](https://github.com/grpc/proposal/blob/master/A51-custom-backend-metrics.md) + +## Try it + +``` +go run server/main.go +``` + +``` +go run client/main.go +``` + +## Explanation + +gRPC ORCA support provides two different ways to report load data to clients +from servers: out-of-band and per-RPC. Out-of-band metrics are reported +regularly at some interval on a stream, while per-RPC metrics are reported +along with the trailers at the end of a call. Both of these mechanisms are +optional and work independently. + +The full ORCA API documentation is available here: +https://pkg.go.dev/google.golang.org/grpc/orca + +### Out-of-band Metrics + +The server registers an ORCA service that is used for out-of-band metrics. It +does this by using `orca.Register()` and then setting metrics on the returned +`orca.Service` using its methods. + +The client receives out-of-band metrics via the LB policy. It receives +callbacks to a listener by registering the listener on a `SubConn` via +`orca.RegisterOOBListener`. + +### Per-RPC Metrics + +The server is set up to report query cost metrics in its RPC handler. For +per-RPC metrics to be reported, the gRPC server must be created with the +`orca.CallMetricsServerOption()` option, and metrics are set by calling methods +on the returned `orca.CallMetricRecorder` from +`orca.CallMetricRecorderFromContext()`. + +The client performs one RPC per second. Per-RPC metrics are available for each +call via the `Done()` callback returned from the LB policy's picker. diff --git a/examples/features/orca/client/main.go b/examples/features/orca/client/main.go new file mode 100644 index 000000000000..f295cfd3866a --- /dev/null +++ b/examples/features/orca/client/main.go @@ -0,0 +1,153 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/orca" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") +var test = flag.Bool("test", false, "if set, only 1 RPC is performed before exiting") + +func main() { + flag.Parse() + + // Set up a connection to the server. Configure to use our custom LB + // policy which will receive all the ORCA load reports. + conn, err := grpc.Dial(*addr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"orca_example":{}}]}`), + ) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + + c := pb.NewEchoClient(conn) + + // Perform RPCs once per second. + ticker := time.NewTicker(time.Second) + for range ticker.C { + func() { + // Use an anonymous function to ensure context cancelation via defer. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if _, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "test echo message"}); err != nil { + log.Fatalf("Error from UnaryEcho call: %v", err) + } + }() + if *test { + return + } + } + +} + +// Register an ORCA load balancing policy to receive per-call metrics and +// out-of-band metrics. +func init() { + balancer.Register(orcaLBBuilder{}) +} + +type orcaLBBuilder struct{} + +func (orcaLBBuilder) Name() string { return "orca_example" } +func (orcaLBBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &orcaLB{cc: cc} +} + +// orcaLB is an incomplete LB policy designed to show basic ORCA load reporting +// functionality. It collects per-call metrics in the `Done` callback returned +// by its picker, and it collects out-of-band metrics by registering a listener +// when its SubConn is created. It does not follow general LB policy best +// practices and makes assumptions about the simple test environment it is +// designed to run within. +type orcaLB struct { + cc balancer.ClientConn +} + +func (o *orcaLB) UpdateClientConnState(ccs balancer.ClientConnState) error { + // We assume only one update, ever, containing exactly one address, given + // the use of the "passthrough" (default) name resolver. + + addrs := ccs.ResolverState.Addresses + if len(addrs) != 1 { + return fmt.Errorf("orcaLB: expected 1 address; received: %v", addrs) + } + + // Create one SubConn for the address and connect it. + sc, err := o.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + return fmt.Errorf("orcaLB: error creating SubConn: %v", err) + } + sc.Connect() + + // Register a simple ORCA OOB listener on the SubConn. We request a 1 + // second report interval, but in this example the server indicated the + // minimum interval it will allow is 3 seconds, so reports will only be + // sent that often. + orca.RegisterOOBListener(sc, orcaLis{}, orca.OOBListenerOptions{ReportInterval: time.Second}) + + return nil +} + +func (o *orcaLB) ResolverError(error) {} + +func (o *orcaLB) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + if scs.ConnectivityState == connectivity.Ready { + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: &picker{sc}}) + } +} + +func (o *orcaLB) Close() {} + +type picker struct { + sc balancer.SubConn +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{ + SubConn: p.sc, + Done: func(di balancer.DoneInfo) { + fmt.Println("Per-call load report received:", di.ServerLoad.(*v3orcapb.OrcaLoadReport).GetRequestCost()) + }, + }, nil +} + +// orcaLis is the out-of-band load report listener that we pass to +// orca.RegisterOOBListener to receive periodic load report information. +type orcaLis struct{} + +func (orcaLis) OnLoadReport(lr *v3orcapb.OrcaLoadReport) { + fmt.Println("Out-of-band load report received:", lr) +} diff --git a/examples/features/orca/server/main.go b/examples/features/orca/server/main.go new file mode 100644 index 000000000000..5d4bdb163a17 --- /dev/null +++ b/examples/features/orca/server/main.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/status" + + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var port = flag.Int("port", 50051, "the port to serve on") + +type server struct { + pb.UnimplementedEchoServer +} + +func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { + // Report a sample cost for this query. + cmr := orca.CallMetricRecorderFromContext(ctx) + if cmr == nil { + return nil, status.Errorf(codes.Internal, "unable to retrieve call metric recorder (missing ORCA ServerOption?)") + } + cmr.SetRequestCost("db_queries", 10) + + return &pb.EchoResponse{Message: in.Message}, nil +} + +func main() { + flag.Parse() + + lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) + if err != nil { + log.Fatalf("Failed to listen: %v", err) + } + fmt.Printf("Server listening at %v\n", lis.Addr()) + + // Create the gRPC server with the orca.CallMetricsServerOption() option, + // which will enable per-call metric recording. + s := grpc.NewServer(orca.CallMetricsServerOption()) + pb.RegisterEchoServer(s, &server{}) + + // Register the orca service for out-of-band metric reporting, and set the + // minimum reporting interval to 3 seconds. Note that, by default, the + // minimum interval must be at least 30 seconds, but 3 seconds is set via + // an internal-only option for illustration purposes only. + opts := orca.ServiceOptions{MinReportingInterval: 3 * time.Second} + internal.ORCAAllowAnyMinReportingInterval.(func(so *orca.ServiceOptions))(&opts) + orcaSvc, err := orca.Register(s, opts) + if err != nil { + log.Fatalf("Failed to register ORCA service: %v", err) + } + + // Simulate CPU utilization reporting. + go func() { + for { + orcaSvc.SetCPUUtilization(.5) + time.Sleep(2 * time.Second) + orcaSvc.SetCPUUtilization(.9) + time.Sleep(2 * time.Second) + } + }() + + s.Serve(lis) +} diff --git a/examples/go.mod b/examples/go.mod index eec23182551e..e4fd33eb6ba1 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -3,6 +3,7 @@ module google.golang.org/grpc/examples go 1.17 require ( + github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b github.com/golang/protobuf v1.5.2 golang.org/x/oauth2 v0.4.0 google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f @@ -16,7 +17,6 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b // indirect github.com/envoyproxy/go-control-plane v0.10.3 // indirect github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect golang.org/x/net v0.8.0 // indirect diff --git a/internal/internal.go b/internal/internal.go index cd68fb3bb929..836b6a3b3e78 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -137,6 +137,9 @@ var ( // // TODO: Remove this function once the RBAC env var is removed. UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/orca/service.go b/orca/service.go index 7816dcc1eca1..9400ae0c7e64 100644 --- a/orca/service.go +++ b/orca/service.go @@ -24,7 +24,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/internal" + ointernal "google.golang.org/grpc/orca/internal" "google.golang.org/grpc/status" v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" @@ -33,9 +34,10 @@ import ( ) func init() { - internal.AllowAnyMinReportingInterval = func(so *ServiceOptions) { + ointernal.AllowAnyMinReportingInterval = func(so *ServiceOptions) { so.allowAnyMinReportingInterval = true } + internal.ORCAAllowAnyMinReportingInterval = ointernal.AllowAnyMinReportingInterval } // minReportingInterval is the absolute minimum value supported for From 7507ea6bcbef3e25822b2bedccdc923ad1134b59 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 14 Mar 2023 20:20:09 -0400 Subject: [PATCH 827/998] gcp/observability: Change logging schema and set queue size limit for logs and batching delay (#6118) --- gcp/observability/exporting.go | 3 ++- gcp/observability/logging.go | 25 ++++++++++++++----------- gcp/observability/logging_test.go | 28 ++++++++++++++++++---------- 3 files changed, 34 insertions(+), 22 deletions(-) diff --git a/gcp/observability/exporting.go b/gcp/observability/exporting.go index 862014640deb..3c27b3533e04 100644 --- a/gcp/observability/exporting.go +++ b/gcp/observability/exporting.go @@ -21,6 +21,7 @@ package observability import ( "context" "fmt" + "time" "google.golang.org/api/option" "google.golang.org/grpc" @@ -72,7 +73,7 @@ func newCloudLoggingExporter(ctx context.Context, config *config) (loggingExport return &cloudLoggingExporter{ projectID: config.ProjectID, client: c, - logger: c.Logger("microservices.googleapis.com/observability/grpc", gcplogging.CommonLabels(config.Labels)), + logger: c.Logger("microservices.googleapis.com/observability/grpc", gcplogging.CommonLabels(config.Labels), gcplogging.BufferedByteLimit(1024*1024*50), gcplogging.DelayThreshold(time.Second*10)), }, nil } diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index 7be05975146c..b04c60ba0430 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" iblog "google.golang.org/grpc/internal/binarylog" @@ -44,6 +45,8 @@ var lExporter loggingExporter var newLoggingExporter = newCloudLoggingExporter +var canonicalString = internal.CanonicalString.(func(codes.Code) string) + // translateMetadata translates the metadata from Binary Logging format to // its GrpcLogEntry equivalent. func translateMetadata(m *binlogpb.Metadata) map[string]string { @@ -153,7 +156,7 @@ type payload struct { // Timeout is the RPC timeout value. Timeout time.Duration `json:"timeout,omitempty"` // StatusCode is the gRPC status code. - StatusCode uint32 `json:"statusCode,omitempty"` + StatusCode string `json:"statusCode,omitempty"` // StatusMessage is the gRPC status message. StatusMessage string `json:"statusMessage,omitempty"` // StatusDetails is the value of the grpc-status-details-bin metadata key, @@ -170,9 +173,9 @@ type addrType int const ( typeUnknown addrType = iota // `json:"TYPE_UNKNOWN"` - typeIPv4 // `json:"TYPE_IPV4"` - typeIPv6 // `json:"TYPE_IPV6"` - typeUnix // `json:"TYPE_UNIX"` + ipv4 // `json:"IPV4"` + ipv6 // `json:"IPV6"` + unix // `json:"UNIX"` ) func (at addrType) MarshalJSON() ([]byte, error) { @@ -180,12 +183,12 @@ func (at addrType) MarshalJSON() ([]byte, error) { switch at { case typeUnknown: buffer.WriteString("TYPE_UNKNOWN") - case typeIPv4: - buffer.WriteString("TYPE_IPV4") - case typeIPv6: - buffer.WriteString("TYPE_IPV6") - case typeUnix: - buffer.WriteString("TYPE_UNIX") + case ipv4: + buffer.WriteString("IPV4") + case ipv6: + buffer.WriteString("IPV6") + case unix: + buffer.WriteString("UNIX") } buffer.WriteString(`"`) return buffer.Bytes(), nil @@ -303,7 +306,7 @@ func (bml *binaryMethodLogger) buildGCPLoggingEntry(ctx context.Context, c iblog case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER: grpcLogEntry.Type = eventTypeServerTrailer grpcLogEntry.Payload.Metadata = translateMetadata(binLogEntry.GetTrailer().Metadata) - grpcLogEntry.Payload.StatusCode = binLogEntry.GetTrailer().GetStatusCode() + grpcLogEntry.Payload.StatusCode = canonicalString(codes.Code(binLogEntry.GetTrailer().GetStatusCode())) grpcLogEntry.Payload.StatusMessage = binLogEntry.GetTrailer().GetStatusMessage() grpcLogEntry.Payload.StatusDetails = binLogEntry.GetTrailer().GetStatusDetails() grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index a42b1da550fd..e76ba386d235 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -231,7 +231,8 @@ func (s) TestClientRPCEventsLogAll(t *testing.T) { SequenceID: 5, Authority: ss.Address, Payload: payload{ - Metadata: map[string]string{}, + Metadata: map[string]string{}, + StatusCode: "OK", }, }, } @@ -319,7 +320,8 @@ func (s) TestClientRPCEventsLogAll(t *testing.T) { Authority: ss.Address, SequenceID: 6, Payload: payload{ - Metadata: map[string]string{}, + Metadata: map[string]string{}, + StatusCode: "OK", }, }, } @@ -438,7 +440,8 @@ func (s) TestServerRPCEventsLogAll(t *testing.T) { SequenceID: 5, Authority: ss.Address, Payload: payload{ - Metadata: map[string]string{}, + Metadata: map[string]string{}, + StatusCode: "OK", }, }, } @@ -525,7 +528,8 @@ func (s) TestServerRPCEventsLogAll(t *testing.T) { Authority: ss.Address, SequenceID: 6, Payload: payload{ - Metadata: map[string]string{}, + Metadata: map[string]string{}, + StatusCode: "OK", }, }, } @@ -745,7 +749,8 @@ func (s) TestClientRPCEventsTruncateHeaderAndMetadata(t *testing.T) { SequenceID: 5, Authority: ss.Address, Payload: payload{ - Metadata: map[string]string{}, + Metadata: map[string]string{}, + StatusCode: "OK", }, }, } @@ -892,7 +897,8 @@ func (s) TestPrecedenceOrderingInConfiguration(t *testing.T) { SequenceID: 5, Authority: ss.Address, Payload: payload{ - Metadata: map[string]string{}, + Metadata: map[string]string{}, + StatusCode: "OK", }, }, } @@ -959,7 +965,8 @@ func (s) TestPrecedenceOrderingInConfiguration(t *testing.T) { Authority: ss.Address, SequenceID: 3, Payload: payload{ - Metadata: map[string]string{}, + Metadata: map[string]string{}, + StatusCode: "OK", }, }, } @@ -1080,14 +1087,14 @@ func (s) TestMarshalJSON(t *testing.T) { Payload: payload{ Metadata: map[string]string{"header1": "value1"}, Timeout: 20, - StatusCode: 3, + StatusCode: "UNKNOWN", StatusMessage: "ok", StatusDetails: []byte("ok"), MessageLength: 3, Message: []byte("wow"), }, Peer: address{ - Type: typeIPv4, + Type: ipv4, Address: "localhost", IPPort: 16000, }, @@ -1214,7 +1221,8 @@ func (s) TestMetadataTruncationAccountsKey(t *testing.T) { SequenceID: 5, Authority: ss.Address, Payload: payload{ - Metadata: map[string]string{}, + Metadata: map[string]string{}, + StatusCode: "OK", }, }, } From 52ca9571068dd4433b4aed49f303abe0a3776468 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 14 Mar 2023 18:37:14 -0700 Subject: [PATCH 828/998] xds: make comparison of server configs in bootstrap more reliable (#6112) --- xds/googledirectpath/googlec2p.go | 15 +- xds/googledirectpath/googlec2p_test.go | 20 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 4 +- .../balancer/clusterimpl/balancer_test.go | 8 +- .../balancer/clusterimpl/clusterimpl.go | 2 +- .../clusterimpl/tests/balancer_test.go | 166 ++++++++++++++++ .../balancer/clusterresolver/config_test.go | 4 +- xds/internal/resolver/xds_resolver_test.go | 12 +- xds/internal/testutils/testutils.go | 21 ++ xds/internal/xdsclient/authority_test.go | 19 +- xds/internal/xdsclient/bootstrap/bootstrap.go | 118 ++++++++++-- .../xdsclient/bootstrap/bootstrap_test.go | 182 +++++++++--------- xds/internal/xdsclient/loadreport_test.go | 32 +-- .../xdsclient/tests/authority_test.go | 14 +- .../xdsclient/tests/cds_watchers_test.go | 13 +- .../xdsclient/tests/eds_watchers_test.go | 13 +- .../xdsclient/tests/lds_watchers_test.go | 13 +- .../xdsclient/tests/rds_watchers_test.go | 13 +- .../xdsclient/tests/resource_update_test.go | 27 +-- .../xdsclient/transport/loadreport_test.go | 15 +- xds/internal/xdsclient/transport/transport.go | 4 +- .../transport/transport_ack_nack_test.go | 31 +-- .../transport/transport_backoff_test.go | 35 +--- .../xdsclient/transport/transport_new_test.go | 24 +-- .../transport/transport_resource_test.go | 13 +- .../xdsclient/transport/transport_test.go | 8 +- xds/server_test.go | 32 +-- 27 files changed, 497 insertions(+), 361 deletions(-) create mode 100644 xds/internal/balancer/clusterimpl/tests/balancer_test.go diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index c8989c87bf49..fa7b770d878b 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -31,19 +31,19 @@ import ( "time" "google.golang.org/grpc" - "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/googlecloud" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" - _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/types/known/structpb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + + _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. ) const ( @@ -116,9 +116,14 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts if balancerName == "" { balancerName = tdURL } - serverConfig := &bootstrap.ServerConfig{ - ServerURI: balancerName, - Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()), + serverConfig, err := bootstrap.ServerConfigFromJSON([]byte(fmt.Sprintf(` + { + "server_uri": "%s", + "channel_creds": [{"type": "google_default"}], + "server_features": ["xds_v3"] + }`, balancerName))) + if err != nil { + return nil, fmt.Errorf("failed to build bootstrap configuration: %v", err) } config := &bootstrap.Config{ XDSServer: serverConfig, diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index f5e8f2d93c71..961f6546bf41 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -19,6 +19,7 @@ package googledirectpath import ( + "fmt" "strconv" "strings" "testing" @@ -212,15 +213,20 @@ func TestBuildXDS(t *testing.T) { }, } } - serverConfig := &bootstrap.ServerConfig{ - ServerURI: tdURL, + wantServerConfig, err := bootstrap.ServerConfigFromJSON([]byte(fmt.Sprintf(`{ + "server_uri": "%s", + "channel_creds": [{"type": "google_default"}], + "server_features": ["xds_v3"] + }`, tdURL))) + if err != nil { + t.Fatalf("Failed to build server bootstrap config: %v", err) } wantConfig := &bootstrap.Config{ - XDSServer: serverConfig, + XDSServer: wantServerConfig, ClientDefaultListenerResourceNameTemplate: "%s", Authorities: map[string]*bootstrap.Authority{ "traffic-director-c2p.xds.googleapis.com": { - XDSServer: serverConfig, + XDSServer: wantServerConfig, }, }, NodeProto: wantNode, @@ -234,9 +240,9 @@ func TestBuildXDS(t *testing.T) { protocmp.Transform(), } select { - case c := <-configCh: - if diff := cmp.Diff(c, wantConfig, cmpOpts); diff != "" { - t.Fatalf("%v", diff) + case gotConfig := <-configCh: + if diff := cmp.Diff(wantConfig, gotConfig, cmpOpts); diff != "" { + t.Fatalf("Unexpected diff in bootstrap config (-want +got):\n%s", diff) } case <-time.After(time.Second): t.Fatalf("timeout waiting for client config") diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 132f1002a8cb..6e6ed1d3d4ce 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -53,7 +53,9 @@ const ( var ( defaultTestAuthorityServerConfig = &bootstrap.ServerConfig{ ServerURI: "self_server", - CredsType: "self_creds", + Creds: bootstrap.ChannelCreds{ + Type: "insecure", + }, } noopODLBCfg = outlierdetection.LBConfig{ Interval: 1<<63 - 1, diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index c4fcd84e55be..9a36db4dc7d8 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -59,7 +59,9 @@ var ( } testLRSServerConfig = &bootstrap.ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - CredsType: "google_default", + Creds: bootstrap.ChannelCreds{ + Type: "google_default", + }, } cmpOpts = cmp.Options{ @@ -720,7 +722,9 @@ func (s) TestUpdateLRSServer(t *testing.T) { testLRSServerConfig2 := &bootstrap.ServerConfig{ ServerURI: "trafficdirector-another.googleapis.com:443", - CredsType: "google_default", + Creds: bootstrap.ChannelCreds{ + Type: "google_default", + }, } // Update LRS server to a different name. if err := b.UpdateClientConnState(balancer.ClientConnState{ diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index b79b941ec79a..25090a5bb420 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -186,7 +186,7 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { } else { // Old is not nil, new is not nil, compare string values, if // different, stop old and start new. - if *b.lrsServer != *newConfig.LoadReportingServer { + if !b.lrsServer.Equal(newConfig.LoadReportingServer) { b.lrsServer = newConfig.LoadReportingServer stopOldLoadReport = true startNewLoadReport = true diff --git a/xds/internal/balancer/clusterimpl/tests/balancer_test.go b/xds/internal/balancer/clusterimpl/tests/balancer_test.go new file mode 100644 index 000000000000..57fa21166796 --- /dev/null +++ b/xds/internal/balancer/clusterimpl/tests/balancer_test.go @@ -0,0 +1,166 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl_test + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/status" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" + + _ "google.golang.org/grpc/xds" +) + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 100 * time.Millisecond +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestConfigUpdateWithSameLoadReportingServerConfig tests the scenario where +// the clusterimpl LB policy receives a config update with no change in the load +// reporting server configuration. The test verifies that the existing load +// repoting stream is not terminated and that a new load reporting stream is not +// created. +func (s) TestConfigUpdateWithSameLoadReportingServerConfig(t *testing.T) { + // Create an xDS management server that serves ADS and LRS requests. + opts := e2e.ManagementServerOptions{SupportLoadReportingService: true} + mgmtServer, nodeID, _, resolver, mgmtServerCleanup := e2e.SetupManagementServer(t, opts) + defer mgmtServerCleanup() + + // Start a server backend exposing the test service. + backend := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + backend.StartServer() + defer backend.Stop() + + // Extract the host and port where the server backend is running. + _, p, err := net.SplitHostPort(backend.Address) + if err != nil { + t.Fatalf("Invalid serving address for server backend: %v", err) + } + port, err := strconv.ParseUint(p, 10, 32) + if err != nil { + t.Fatalf("Invalid serving port for server backend: %v", err) + } + t.Logf("Started server backend at %q", backend.Address) + + // Configure the xDS management server with default resources. Override the + // default cluster to include an LRS server config pointing to self. + const serviceName = "my-test-xds-service" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: uint32(port), + SecLevel: e2e.SecurityLevelNone, + }) + resources.Clusters[0].LrsServer = &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Ensure that an LRS stream is created. + if _, err := mgmtServer.LRSServer.LRSStreamOpenChan.Receive(ctx); err != nil { + t.Fatalf("Failure when waiting for an LRS stream to be opened: %v", err) + } + + // Configure a new resource on the management server with drop config that + // drops all RPCs, but with no change in the load reporting server config. + resources.Endpoints = []*v3endpointpb.ClusterLoadAssignment{ + e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ + ClusterName: "endpoints-" + serviceName, + Host: "localhost", + Ports: []uint32{uint32(port)}, + DropPercents: map[string]int{"test-drop-everything": 100}, + }), + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Repeatedly send RPCs until we sees that they are getting dropped, or the + // test context deadline expires. The former indicates that new config with + // drops has been applied. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if err != nil && status.Code(err) == codes.Unavailable && strings.Contains(err.Error(), "RPC is dropped") { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for RPCs to be dropped after config update") + } + + // Ensure that the old LRS stream is not closed. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := mgmtServer.LRSServer.LRSStreamCloseChan.Receive(sCtx); err == nil { + t.Fatal("LRS stream closed when expected not to") + } + + // Also ensure that a new LRS stream is not created. + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := mgmtServer.LRSServer.LRSStreamOpenChan.Receive(sCtx); err == nil { + t.Fatal("New LRS stream created when expected not to") + } +} diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index 76f2d744e19d..2455b88d8079 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -161,7 +161,9 @@ const ( var testLRSServerConfig = &bootstrap.ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - CredsType: "google_default", + Creds: bootstrap.ChannelCreds{ + Type: "google_default", + }, } func TestParseConfig(t *testing.T) { diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 1e7ae998aebd..43b835fada93 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -31,7 +31,6 @@ import ( "github.com/envoyproxy/go-control-plane/pkg/wellknown" "github.com/google/go-cmp/cmp" "github.com/google/uuid" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" @@ -53,6 +52,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -213,10 +213,7 @@ func (s) TestResolverBuilder_DifferentBootstrapConfigs(t *testing.T) { // Add top-level xDS server config corresponding to the above // management server. - test.bootstrapCfg.XDSServer = &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - } + test.bootstrapCfg.XDSServer = xdstestutils.ServerConfigForAddress(t, mgmtServer.Address) // Override xDS client creation to use bootstrap configuration // specified by the test. @@ -538,10 +535,7 @@ func (s) TestResolverWatchCallbackAfterClose(t *testing.T) { // closes the xDS client. func (s) TestResolverCloseClosesXDSClient(t *testing.T) { bootstrapCfg := &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummy-management-server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, "dummy-management-server-address"), } // Override xDS client creation to use bootstrap configuration pointing to a diff --git a/xds/internal/testutils/testutils.go b/xds/internal/testutils/testutils.go index bab5871105f4..a07f5a1171b3 100644 --- a/xds/internal/testutils/testutils.go +++ b/xds/internal/testutils/testutils.go @@ -19,6 +19,10 @@ package testutils import ( + "fmt" + "testing" + + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" ) @@ -45,3 +49,20 @@ func BuildResourceName(typ xdsresource.ResourceType, auth, id string, ctxParams ContextParams: ctxParams, }).String() } + +// ServerConfigForAddress returns a bootstrap.ServerConfig for the given address +// with default values of insecure channel_creds and v3 server_features. +func ServerConfigForAddress(t *testing.T, addr string) *bootstrap.ServerConfig { + t.Helper() + + jsonCfg := fmt.Sprintf(`{ + "server_uri": "%s", + "channel_creds": [{"type": "insecure"}], + "server_features": ["xds_v3"] + }`, addr) + sc, err := bootstrap.ServerConfigFromJSON([]byte(jsonCfg)) + if err != nil { + t.Fatalf("Failed to create server config from JSON %s: %v", jsonCfg, err) + } + return sc +} diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index 4e8ea305921b..928e83f6c878 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -24,19 +24,20 @@ import ( "testing" "time" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds/internal" - _ "google.golang.org/grpc/xds/internal/httpfilter/router" "google.golang.org/grpc/xds/internal/testutils" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. ) var emptyServerOpts = e2e.ManagementServerOptions{} @@ -64,11 +65,7 @@ func setupTest(ctx context.Context, t *testing.T, opts e2e.ManagementServerOptio } a, err := newAuthority(authorityArgs{ - serverCfg: &bootstrap.ServerConfig{ - ServerURI: ms.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - }, + serverCfg: xdstestutils.ServerConfigForAddress(t, ms.Address), bootstrapCfg: &bootstrap.Config{ NodeProto: &v3corepb.Node{Id: nodeID}, }, diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 2a920b94a312..896ef347cafe 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -84,19 +84,66 @@ func (d *googleDefaultCredsBuilder) Name() string { return "google_default" } +// ChannelCreds contains the credentials to be used while communicating with an +// xDS server. It is also used to dedup servers with the same server URI. +type ChannelCreds struct { + // Type contains a unique name identifying the credentials type. The only + // supported types currently are "google_default" and "insecure". + Type string + // Config contains the JSON configuration associated with the credentials. + Config json.RawMessage +} + +// Equal reports whether cc and other are considered equal. +func (cc ChannelCreds) Equal(other ChannelCreds) bool { + return cc.Type == other.Type && bytes.Equal(cc.Config, other.Config) +} + +// String returns a string representation of the credentials. It contains the +// type and the config (if non-nil) separated by a "-". +func (cc ChannelCreds) String() string { + if cc.Config == nil { + return cc.Type + } + + // We do not expect the Marshal call to fail since we wrote to cc.Config + // after a successful unmarshaling from JSON configuration. Therefore, + // it is safe to ignore the error here. + b, _ := json.Marshal(cc.Config) + return cc.Type + "-" + string(b) +} + // ServerConfig contains the configuration to connect to a server, including // URI, creds, and transport API version (e.g. v2 or v3). +// +// It contains unexported fields that are initialized when unmarshaled from JSON +// using either the UnmarshalJSON() method or the ServerConfigFromJSON() +// function. Hence users are strongly encouraged not to use a literal struct +// initialization to create an instance of this type, but instead unmarshal from +// JSON using one of the two available options. type ServerConfig struct { // ServerURI is the management server to connect to. // // The bootstrap file contains an ordered list of xDS servers to contact for // this authority. The first one is picked. ServerURI string - // Creds contains the credentials to be used while talking to the xDS - // server, as a grpc.DialOption. - Creds grpc.DialOption - // CredsType is the type of the creds. It will be used to dedup servers. - CredsType string + // Creds contains the credentials to be used while communicationg with this + // xDS server. It is also used to dedup servers with the same server URI. + Creds ChannelCreds + // ServerFeatures contains a list of features supported by this xDS server. + // It is also used to dedup servers with the same server URI and creds. + ServerFeatures []string + + // As part of unmarshaling the JSON config into this struct, we ensure that + // the credentials config is valid by building an instance of the specified + // credentials and store it here as a grpc.DialOption for easy access when + // dialing this xDS server. + credsDialOption grpc.DialOption +} + +// CredsDialOption returns the configured credentials as a grpc dial option. +func (sc *ServerConfig) CredsDialOption() grpc.DialOption { + return sc.credsDialOption } // String returns the string representation of the ServerConfig. @@ -109,17 +156,17 @@ type ServerConfig struct { // content. It doesn't cover NodeProto because NodeProto isn't used by // federation. func (sc *ServerConfig) String() string { - var ver = "xDSv3" - return strings.Join([]string{sc.ServerURI, sc.CredsType, ver}, "-") + features := strings.Join(sc.ServerFeatures, "-") + return strings.Join([]string{sc.ServerURI, sc.Creds.String(), features}, "-") } // MarshalJSON marshals the ServerConfig to json. func (sc ServerConfig) MarshalJSON() ([]byte, error) { server := xdsServer{ - ServerURI: sc.ServerURI, - ChannelCreds: []channelCreds{{Type: sc.CredsType, Config: nil}}, + ServerURI: sc.ServerURI, + ChannelCreds: []channelCreds{{Type: sc.Creds.Type, Config: sc.Creds.Config}}, + ServerFeatures: sc.ServerFeatures, } - server.ServerFeatures = []string{serverFeaturesV3} return json.Marshal(server) } @@ -129,10 +176,11 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &server); err != nil { return fmt.Errorf("xds: json.Unmarshal(data) for field ServerConfig failed during bootstrap: %v", err) } + sc.ServerURI = server.ServerURI + sc.ServerFeatures = server.ServerFeatures for _, cc := range server.ChannelCreds { // We stop at the first credential type that we support. - sc.CredsType = cc.Type c := bootstrap.GetCredentials(cc.Type) if c == nil { continue @@ -141,12 +189,56 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) } - sc.Creds = grpc.WithCredentialsBundle(bundle) + sc.Creds = ChannelCreds{ + Type: cc.Type, + Config: cc.Config, + } + sc.credsDialOption = grpc.WithCredentialsBundle(bundle) break } return nil } +// ServerConfigFromJSON creates a new ServerConfig from the given JSON +// configuration. This is the preferred way of creating a ServerConfig when +// hand-crafting the JSON configuration. +func ServerConfigFromJSON(data []byte) (*ServerConfig, error) { + sc := new(ServerConfig) + if err := sc.UnmarshalJSON(data); err != nil { + return nil, err + } + return sc, nil +} + +// Equal reports whether sc and other are considered equal. +func (sc *ServerConfig) Equal(other *ServerConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + case sc.ServerURI != other.ServerURI: + return false + case !sc.Creds.Equal(other.Creds): + return false + case !equalStringSlice(sc.ServerFeatures, other.ServerFeatures): + return false + } + return true +} + +func equalStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + // unmarshalJSONServerConfigSlice unmarshals JSON to a slice. func unmarshalJSONServerConfigSlice(data []byte) ([]*ServerConfig, error) { var servers []*ServerConfig @@ -422,7 +514,7 @@ func newConfigFromContents(data []byte) (*Config, error) { if config.XDSServer.ServerURI == "" { return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) } - if config.XDSServer.Creds == nil { + if config.XDSServer.CredsDialOption() == nil { return nil, fmt.Errorf("xds: required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) } // Post-process the authorities' client listener resource template field: diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 0ff7813f493b..7aa409b24c88 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -28,10 +28,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" @@ -56,7 +53,7 @@ var ( "channel_creds": [ { "type": "google_default" } ], - "server_features" : ["foo", "bar"] + "server_features" : ["xds_v3"] }] }`, "serverFeaturesExcludesXDSV3": ` @@ -71,8 +68,7 @@ var ( "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ { "type": "google_default" } - ], - "server_features" : ["foo", "bar", "xds_v3"] + ] }] }`, "emptyNodeProto": ` @@ -145,7 +141,8 @@ var ( "channel_creds": [ { "type": "not-google-default" }, { "type": "google_default" } - ] + ], + "server_features": ["xds_v3"] }] }`, "goodBootstrap": ` @@ -160,7 +157,8 @@ var ( "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ { "type": "google_default" } - ] + ], + "server_features": ["xds_v3"] }] }`, "multipleXDSServers": ` @@ -174,7 +172,8 @@ var ( "xds_servers" : [ { "server_uri": "trafficdirector.googleapis.com:443", - "channel_creds": [{ "type": "google_default" }] + "channel_creds": [{ "type": "google_default" }], + "server_features": ["xds_v3"] }, { "server_uri": "backup.never.use.com:1234", @@ -198,19 +197,35 @@ var ( ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, } nilCredsConfigV3 = &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "insecure"}, + ServerFeatures: []string{"xds_v3"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "%s", + } + nilCredsConfigNoServerFeatures = &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", + Creds: ChannelCreds{Type: "insecure"}, }, NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "%s", } nonNilCredsConfigV3 = &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + ServerFeatures: []string{"xds_v3"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "%s", + } + nonNilCredsConfigNoServerFeatures = &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", + Creds: ChannelCreds{Type: "google_default"}, }, NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "%s", @@ -218,14 +233,13 @@ var ( ) func (c *Config) compare(want *Config) error { - if diff := cmp.Diff(c, want, + if diff := cmp.Diff(want, c, cmpopts.EquateEmpty(), - cmp.AllowUnexported(ServerConfig{}), cmp.Comparer(proto.Equal), cmp.Comparer(func(a, b grpc.DialOption) bool { return (a != nil) == (b != nil) }), cmp.Transformer("certproviderconfigstring", func(a *certprovider.BuildableConfig) string { return a.String() }), ); diff != "" { - return fmt.Errorf("diff: %v", diff) + return fmt.Errorf("unexpected diff in config (-want, +got):\n%s", diff) } return nil } @@ -251,7 +265,6 @@ func setupBootstrapOverride(bootstrapFileMap map[string]string) func() { // This function overrides the bootstrap file NAME env variable, to test the // code that reads file with the given fileName. func testNewConfigWithFileNameEnv(t *testing.T, fileName string, wantError bool, wantConfig *Config) { - t.Helper() origBootstrapFileName := envconfig.XDSBootstrapFileName envconfig.XDSBootstrapFileName = fileName defer func() { envconfig.XDSBootstrapFileName = origBootstrapFileName }() @@ -373,8 +386,7 @@ func TestNewConfigV3ProtoSuccess(t *testing.T) { "emptyNodeProto", &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", + Creds: ChannelCreds{Type: "insecure"}, }, NodeProto: &v3corepb.Node{ UserAgentName: gRPCUserAgentName, @@ -384,9 +396,9 @@ func TestNewConfigV3ProtoSuccess(t *testing.T) { ClientDefaultListenerResourceNameTemplate: "%s", }, }, - {"unknownTopLevelFieldInFile", nilCredsConfigV3}, - {"unknownFieldInNodeProto", nilCredsConfigV3}, - {"unknownFieldInXdsServer", nilCredsConfigV3}, + {"unknownTopLevelFieldInFile", nilCredsConfigNoServerFeatures}, + {"unknownFieldInNodeProto", nilCredsConfigNoServerFeatures}, + {"unknownFieldInXdsServer", nilCredsConfigNoServerFeatures}, {"multipleChannelCreds", nonNilCredsConfigV3}, {"goodBootstrap", nonNilCredsConfigV3}, {"multipleXDSServers", nonNilCredsConfigV3}, @@ -400,34 +412,12 @@ func TestNewConfigV3ProtoSuccess(t *testing.T) { } } -// TestNewConfigV3Support verifies bootstrap functionality involving support for -// the xDS v3 transport protocol. Here the client ends up using v2 or v3 based -// on what the server supports. -func TestNewConfigV3Support(t *testing.T) { - cancel := setupBootstrapOverride(v3BootstrapFileMap) - defer cancel() - - tests := []struct { - name string - wantConfig *Config - }{ - {"serverFeaturesIncludesXDSV3", nonNilCredsConfigV3}, - {"serverFeaturesExcludesXDSV3", nonNilCredsConfigV3}, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testNewConfigWithFileNameEnv(t, test.name, false, test.wantConfig) - testNewConfigWithFileContentEnv(t, test.name, false, test.wantConfig) - }) - } -} - // TestNewConfigBootstrapEnvPriority tests that the two env variables are read // in correct priority. // -// the case where the bootstrap file -// environment variable is not set. +// "GRPC_XDS_BOOTSTRAP" which specifies the file name containing the bootstrap +// configuration takes precedence over "GRPC_XDS_BOOTSTRAP_CONFIG", which +// directly specifies the bootstrap configuration in itself. func TestNewConfigBootstrapEnvPriority(t *testing.T) { oldFileReadFunc := bootstrapFileReadFunc bootstrapFileReadFunc = func(filename string) ([]byte, error) { @@ -440,7 +430,7 @@ func TestNewConfigBootstrapEnvPriority(t *testing.T) { goodFileName2 := "serverFeaturesExcludesXDSV3" goodFileContent2 := v3BootstrapFileMap[goodFileName2] - goodConfig2 := nonNilCredsConfigV3 + goodConfig2 := nonNilCredsConfigNoServerFeatures origBootstrapFileName := envconfig.XDSBootstrapFileName envconfig.XDSBootstrapFileName = "" @@ -458,21 +448,33 @@ func TestNewConfigBootstrapEnvPriority(t *testing.T) { // When one of them is set, it should be used. envconfig.XDSBootstrapFileName = goodFileName1 envconfig.XDSBootstrapFileContent = "" - if c, err := NewConfig(); err != nil || c.compare(goodConfig1) != nil { - t.Errorf("NewConfig() = %v, %v, want: %v, %v", c, err, goodConfig1, nil) + c, err := NewConfig() + if err != nil { + t.Errorf("NewConfig() failed: %v", err) + } + if err := c.compare(goodConfig1); err != nil { + t.Error(err) } envconfig.XDSBootstrapFileName = "" envconfig.XDSBootstrapFileContent = goodFileContent2 - if c, err := NewConfig(); err != nil || c.compare(goodConfig2) != nil { - t.Errorf("NewConfig() = %v, %v, want: %v, %v", c, err, goodConfig1, nil) + c, err = NewConfig() + if err != nil { + t.Errorf("NewConfig() failed: %v", err) + } + if err := c.compare(goodConfig2); err != nil { + t.Error(err) } // Set both, file name should be read. envconfig.XDSBootstrapFileName = goodFileName1 envconfig.XDSBootstrapFileContent = goodFileContent2 - if c, err := NewConfig(); err != nil || c.compare(goodConfig1) != nil { - t.Errorf("NewConfig() = %v, %v, want: %v, %v", c, err, goodConfig1, nil) + c, err = NewConfig() + if err != nil { + t.Errorf("NewConfig() failed: %v", err) + } + if err := c.compare(goodConfig1); err != nil { + t.Error(err) } } @@ -559,7 +561,7 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { "channel_creds": [ { "type": "google_default" } ], - "server_features" : ["foo", "bar", "xds_v3"] + "server_features" : ["xds_v3"] }], "certificate_providers": { "unknownProviderInstance1": { @@ -585,7 +587,7 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { "channel_creds": [ { "type": "google_default" } ], - "server_features" : ["foo", "bar", "xds_v3"], + "server_features" : ["xds_v3"], }], "certificate_providers": { "unknownProviderInstance": { @@ -609,9 +611,9 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ - { "type": "google_default" } + { "type": "insecure" } ], - "server_features" : ["foo", "bar", "xds_v3"] + "server_features" : ["xds_v3"] }], "certificate_providers": { "unknownProviderInstance": { @@ -639,12 +641,19 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { cancel := setupBootstrapOverride(bootstrapFileMap) defer cancel() + // Cannot use xdstestutils.ServerConfigForAddress here, as it would lead to + // a cyclic dependency. + jsonCfg := `{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [{"type": "insecure"}], + "server_features": ["xds_v3"] + }` + serverCfg, err := ServerConfigFromJSON([]byte(jsonCfg)) + if err != nil { + t.Fatalf("Failed to create server config from JSON %s: %v", jsonCfg, err) + } goodConfig := &Config{ - XDSServer: &ServerConfig{ - ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", - }, + XDSServer: serverCfg, NodeProto: v3NodeProto, CertProviderConfigs: map[string]*certprovider.BuildableConfig{ "fakeProviderInstance": wantCfg, @@ -735,8 +744,7 @@ func TestNewConfigWithServerListenerResourceNameTemplate(t *testing.T) { wantConfig: &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", + Creds: ChannelCreds{Type: "google_default"}, }, NodeProto: v3NodeProto, ServerListenerResourceNameTemplate: "grpc/server?xds.resource.listening_address=%s", @@ -801,7 +809,7 @@ func TestNewConfigWithFederation(t *testing.T) { "xds_servers": [{ "server_uri": "td.com", "channel_creds": [ { "type": "google_default" } ], - "server_features" : ["foo", "bar", "xds_v3"] + "server_features" : ["xds_v3"] }] } } @@ -884,8 +892,7 @@ func TestNewConfigWithFederation(t *testing.T) { wantConfig: &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", + Creds: ChannelCreds{Type: "google_default"}, }, NodeProto: v3NodeProto, ServerListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/grpc/server?listening_address=%s", @@ -894,9 +901,9 @@ func TestNewConfigWithFederation(t *testing.T) { "xds.td.com": { ClientListenerResourceNameTemplate: "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", XDSServer: &ServerConfig{ - ServerURI: "td.com", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", + ServerURI: "td.com", + Creds: ChannelCreds{Type: "google_default"}, + ServerFeatures: []string{"xds_v3"}, }, }, }, @@ -907,8 +914,7 @@ func TestNewConfigWithFederation(t *testing.T) { wantConfig: &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", + Creds: ChannelCreds{Type: "google_default"}, }, NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "%s", @@ -919,8 +925,7 @@ func TestNewConfigWithFederation(t *testing.T) { wantConfig: &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", + Creds: ChannelCreds{Type: "google_default"}, }, NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", @@ -939,8 +944,7 @@ func TestNewConfigWithFederation(t *testing.T) { wantConfig: &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - CredsType: "google_default", + Creds: ChannelCreds{Type: "google_default"}, }, NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", @@ -966,22 +970,26 @@ func TestNewConfigWithFederation(t *testing.T) { } func TestServerConfigMarshalAndUnmarshal(t *testing.T) { - c := ServerConfig{ - ServerURI: "test-server", - Creds: nil, - CredsType: "test-creds", + jsonCfg := `{ + "server_uri": "test-server", + "channel_creds": [{"type": "insecure"}], + "server_features": ["xds_v3"] + }` + origConfig, err := ServerConfigFromJSON([]byte(jsonCfg)) + if err != nil { + t.Fatalf("Failed to create server config from JSON %s: %v", jsonCfg, err) } - - bs, err := json.Marshal(c) + bs, err := json.Marshal(origConfig) if err != nil { t.Fatalf("failed to marshal: %v", err) } - var cUnmarshal ServerConfig - if err := json.Unmarshal(bs, &cUnmarshal); err != nil { + + unmarshaledConfig := new(ServerConfig) + if err := json.Unmarshal(bs, unmarshaledConfig); err != nil { t.Fatalf("failed to unmarshal: %v", err) } - if diff := cmp.Diff(cUnmarshal, c); diff != "" { - t.Fatalf("diff (-got +want): %v", diff) + if diff := cmp.Diff(origConfig, unmarshaledConfig); diff != "" { + t.Fatalf("Unexpected diff in server config (-want, +got):\n%s", diff) } } diff --git a/xds/internal/xdsclient/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go index e4e21e412374..06e58acdd2dc 100644 --- a/xds/internal/xdsclient/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -24,11 +24,10 @@ import ( "time" "github.com/google/go-cmp/cmp" - "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils/xds/fakeserver" "google.golang.org/grpc/status" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/testing/protocmp" @@ -43,17 +42,15 @@ const ( ) func (s) TestLRSClient(t *testing.T) { - fs, sCleanup, err := fakeserver.StartServer(nil) + fs1, sCleanup, err := fakeserver.StartServer(nil) if err != nil { t.Fatalf("failed to start fake xDS server: %v", err) } defer sCleanup() + serverCfg1 := xdstestutils.ServerConfigForAddress(t, fs1.Address) xdsC, close, err := NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: fs.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: serverCfg1, NodeProto: &v3corepb.Node{}, }, defaultClientWatchExpiryTimeout, time.Duration(0)) if err != nil { @@ -62,24 +59,18 @@ func (s) TestLRSClient(t *testing.T) { defer close() // Report to the same address should not create new ClientConn. - store1, lrsCancel1 := xdsC.ReportLoad( - &bootstrap.ServerConfig{ - ServerURI: fs.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - }, - ) + store1, lrsCancel1 := xdsC.ReportLoad(serverCfg1) defer lrsCancel1() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if u, err := fs.NewConnChan.Receive(ctx); err != nil { + if u, err := fs1.NewConnChan.Receive(ctx); err != nil { t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err) } sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if u, err := fs.NewConnChan.Receive(sCtx); err != context.DeadlineExceeded { + if u, err := fs1.NewConnChan.Receive(sCtx); err != context.DeadlineExceeded { t.Errorf("unexpected NewConn: %v, %v, want channel recv timeout", u, err) } @@ -90,13 +81,8 @@ func (s) TestLRSClient(t *testing.T) { defer sCleanup2() // Report to a different address should create new ClientConn. - store2, lrsCancel2 := xdsC.ReportLoad( - &bootstrap.ServerConfig{ - ServerURI: fs2.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - }, - ) + serverCgf2 := xdstestutils.ServerConfigForAddress(t, fs2.Address) + store2, lrsCancel2 := xdsC.ReportLoad(serverCgf2) defer lrsCancel2() if u, err := fs2.NewConnChan.Receive(ctx); err != nil { t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err) diff --git a/xds/internal/xdsclient/tests/authority_test.go b/xds/internal/xdsclient/tests/authority_test.go index b14d0d1cdfe7..527e2650d256 100644 --- a/xds/internal/xdsclient/tests/authority_test.go +++ b/xds/internal/xdsclient/tests/authority_test.go @@ -24,8 +24,6 @@ import ( "time" "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" xdstestutils "google.golang.org/grpc/xds/internal/testutils" @@ -94,20 +92,12 @@ func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time. // config, which points to the above management server. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: defaultAuthorityServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, defaultAuthorityServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, Authorities: map[string]*bootstrap.Authority{ testAuthority1: {}, testAuthority2: {}, - testAuthority3: { - XDSServer: &bootstrap.ServerConfig{ - ServerURI: nonDefaultAuthorityServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, - }, + testAuthority3: {XDSServer: xdstestutils.ServerConfigForAddress(t, nonDefaultAuthorityServer.Address)}, }, }, defaultTestWatchExpiryTimeout, idleTimeout) if err != nil { diff --git a/xds/internal/xdsclient/tests/cds_watchers_test.go b/xds/internal/xdsclient/tests/cds_watchers_test.go index 9cbff4af83d7..d6d02724fc91 100644 --- a/xds/internal/xdsclient/tests/cds_watchers_test.go +++ b/xds/internal/xdsclient/tests/cds_watchers_test.go @@ -28,11 +28,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -535,10 +534,7 @@ func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { defer mgmtServer.Stop() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { @@ -581,10 +577,7 @@ func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { diff --git a/xds/internal/xdsclient/tests/eds_watchers_test.go b/xds/internal/xdsclient/tests/eds_watchers_test.go index 1389862f4596..9b220fc59f2c 100644 --- a/xds/internal/xdsclient/tests/eds_watchers_test.go +++ b/xds/internal/xdsclient/tests/eds_watchers_test.go @@ -28,12 +28,11 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds/internal" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -592,10 +591,7 @@ func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { defer mgmtServer.Stop() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { @@ -638,10 +634,7 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { diff --git a/xds/internal/xdsclient/tests/lds_watchers_test.go b/xds/internal/xdsclient/tests/lds_watchers_test.go index 67da0843ed66..7e41a81361ff 100644 --- a/xds/internal/xdsclient/tests/lds_watchers_test.go +++ b/xds/internal/xdsclient/tests/lds_watchers_test.go @@ -29,13 +29,12 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -584,10 +583,7 @@ func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { defer mgmtServer.Stop() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { @@ -630,10 +626,7 @@ func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { diff --git a/xds/internal/xdsclient/tests/rds_watchers_test.go b/xds/internal/xdsclient/tests/rds_watchers_test.go index 2617622c7b34..b03b9ce259bc 100644 --- a/xds/internal/xdsclient/tests/rds_watchers_test.go +++ b/xds/internal/xdsclient/tests/rds_watchers_test.go @@ -28,11 +28,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -626,10 +625,7 @@ func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Create an xDS client talking to a non-existent management server. client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { @@ -672,10 +668,7 @@ func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { diff --git a/xds/internal/xdsclient/tests/resource_update_test.go b/xds/internal/xdsclient/tests/resource_update_test.go index aeaa5c448374..dfb285de3a8d 100644 --- a/xds/internal/xdsclient/tests/resource_update_test.go +++ b/xds/internal/xdsclient/tests/resource_update_test.go @@ -28,12 +28,11 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/internal/testutils/xds/fakeserver" "google.golang.org/grpc/xds/internal" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -243,11 +242,7 @@ func (s) TestHandleListenerResponseFromManagementServer(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { @@ -509,11 +504,7 @@ func (s) TestHandleRouteConfigResponseFromManagementServer(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { @@ -751,11 +742,7 @@ func (s) TestHandleClusterResponseFromManagementServer(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { @@ -1076,11 +1063,7 @@ func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { // Create an xDS client talking to the above management server. nodeID := uuid.New().String() client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, }, defaultTestWatchExpiryTimeout, time.Duration(0)) if err != nil { diff --git a/xds/internal/xdsclient/transport/loadreport_test.go b/xds/internal/xdsclient/transport/loadreport_test.go index 039780ae7f13..c3cdfede5cb6 100644 --- a/xds/internal/xdsclient/transport/loadreport_test.go +++ b/xds/internal/xdsclient/transport/loadreport_test.go @@ -25,10 +25,8 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/google/go-cmp/cmp" "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils/xds/fakeserver" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/durationpb" @@ -43,17 +41,10 @@ func (s) TestReportLoad(t *testing.T) { defer cleanup() t.Logf("Started xDS management server on %s", mgmtServer.Address) - // Construct the server config to represent the management server. - nodeProto := &v3corepb.Node{Id: uuid.New().String()} - serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - } - // Create a transport to the fake management server. + nodeProto := &v3corepb.Node{Id: uuid.New().String()} tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, + ServerCfg: *testutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: nodeProto, OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No ADS validation. OnErrorHandler: func(error) {}, // No ADS stream error handling. diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index 0ef46b8548d0..f02416f37c48 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -177,7 +177,7 @@ func New(opts Options) (*Transport, error) { switch { case opts.ServerCfg.ServerURI == "": return nil, errors.New("missing server URI when creating a new transport") - case opts.ServerCfg.Creds == nil: + case opts.ServerCfg.CredsDialOption() == nil: return nil, errors.New("missing credentials when creating a new transport") case opts.OnRecvHandler == nil: return nil, errors.New("missing OnRecv callback handler when creating a new transport") @@ -189,7 +189,7 @@ func New(opts Options) (*Transport, error) { // Dial the xDS management with the passed in credentials. dopts := []grpc.DialOption{ - opts.ServerCfg.Creds, + opts.ServerCfg.CredsDialOption(), grpc.WithKeepaliveParams(keepalive.ClientParameters{ // We decided to use these sane defaults in all languages, and // kicked the can down the road as far making these configurable. diff --git a/xds/internal/xdsclient/transport/transport_ack_nack_test.go b/xds/internal/xdsclient/transport/transport_ack_nack_test.go index acaec84229bf..f887ae1de0bd 100644 --- a/xds/internal/xdsclient/transport/transport_ack_nack_test.go +++ b/xds/internal/xdsclient/transport/transport_ack_nack_test.go @@ -26,12 +26,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" - "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/proto" @@ -135,16 +133,9 @@ func (s) TestSimpleAckAndNack(t *testing.T) { SkipValidation: true, }) - // Construct the server config to represent the management server. - serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - } - // Create a new transport. tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), OnRecvHandler: dataModelValidator, OnErrorHandler: func(err error) {}, OnSendHandler: func(*transport.ResourceSendInfo) {}, @@ -322,16 +313,9 @@ func (s) TestInvalidFirstResponse(t *testing.T) { SkipValidation: true, }) - // Construct the server config to represent the management server. - serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - } - // Create a new transport. tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, OnRecvHandler: dataModelValidator, OnErrorHandler: func(err error) {}, @@ -451,16 +435,9 @@ func (s) TestResourceIsNotRequestedAnymore(t *testing.T) { SkipValidation: true, }) - // Construct the server config to represent the management server. - serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - } - // Create a new transport. tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), NodeProto: &v3corepb.Node{Id: nodeID}, OnRecvHandler: dataModelValidator, OnErrorHandler: func(err error) {}, diff --git a/xds/internal/xdsclient/transport/transport_backoff_test.go b/xds/internal/xdsclient/transport/transport_backoff_test.go index 34ecc5da9782..2838eae7554e 100644 --- a/xds/internal/xdsclient/transport/transport_backoff_test.go +++ b/xds/internal/xdsclient/transport/transport_backoff_test.go @@ -27,12 +27,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/uuid" - "google.golang.org/grpc" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" @@ -98,18 +96,11 @@ func (s) TestTransport_BackoffAfterStreamFailure(t *testing.T) { return 0 } - // Construct the server config to represent the management server. - nodeID := uuid.New().String() - serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - } - // Create a new transport. Since we are only testing backoff behavior here, // we can pass a no-op data model layer implementation. + nodeID := uuid.New().String() tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. OnErrorHandler: func(err error) { select { @@ -267,17 +258,10 @@ func (s) TestTransport_RetriesAfterBrokenStream(t *testing.T) { SkipValidation: true, }) - // Construct the server config to represent the management server. - serverCfg := bootstrap.ServerConfig{ - ServerURI: lis.Addr().String(), - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - } - // Create a new transport. Since we are only testing backoff behavior here, // we can pass a no-op data model layer implementation. tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. OnErrorHandler: func(err error) { select { @@ -405,18 +389,11 @@ func (s) TestTransport_ResourceRequestedBeforeStreamCreation(t *testing.T) { // stream to the management server. lis.Stop() - // Construct the server config to represent the management server. - nodeID := uuid.New().String() - serverCfg := bootstrap.ServerConfig{ - ServerURI: lis.Addr().String(), - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - } - // Create a new transport. Since we are only testing backoff behavior here, // we can pass a no-op data model layer implementation. + nodeID := uuid.New().String() tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. OnErrorHandler: func(error) {}, // No stream error handling. OnSendHandler: func(*transport.ResourceSendInfo) {}, // No on send handler diff --git a/xds/internal/xdsclient/transport/transport_new_test.go b/xds/internal/xdsclient/transport/transport_new_test.go index a4c0da03d633..2f6c3148a267 100644 --- a/xds/internal/xdsclient/transport/transport_new_test.go +++ b/xds/internal/xdsclient/transport/transport_new_test.go @@ -21,8 +21,7 @@ import ( "strings" "testing" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/transport" @@ -49,10 +48,8 @@ func (s) TestNew(t *testing.T) { }, { name: "missing onRecv handler", - opts: transport.Options{ServerCfg: bootstrap.ServerConfig{ - ServerURI: "server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + opts: transport.Options{ + ServerCfg: *testutils.ServerConfigForAddress(t, "server-address"), NodeProto: &v3corepb.Node{}, }, wantErrStr: "missing OnRecv callback handler when creating a new transport", @@ -60,10 +57,7 @@ func (s) TestNew(t *testing.T) { { name: "missing onError handler", opts: transport.Options{ - ServerCfg: bootstrap.ServerConfig{ - ServerURI: "server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + ServerCfg: *testutils.ServerConfigForAddress(t, "server-address"), NodeProto: &v3corepb.Node{}, OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, OnSendHandler: func(*transport.ResourceSendInfo) {}, @@ -74,10 +68,7 @@ func (s) TestNew(t *testing.T) { { name: "missing onSend handler", opts: transport.Options{ - ServerCfg: bootstrap.ServerConfig{ - ServerURI: "server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + ServerCfg: *testutils.ServerConfigForAddress(t, "server-address"), NodeProto: &v3corepb.Node{}, OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, OnErrorHandler: func(error) {}, @@ -87,10 +78,7 @@ func (s) TestNew(t *testing.T) { { name: "happy case", opts: transport.Options{ - ServerCfg: bootstrap.ServerConfig{ - ServerURI: "server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + ServerCfg: *testutils.ServerConfigForAddress(t, "server-address"), NodeProto: &v3corepb.Node{}, OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, OnErrorHandler: func(error) {}, diff --git a/xds/internal/xdsclient/transport/transport_resource_test.go b/xds/internal/xdsclient/transport/transport_resource_test.go index b4c44f4d6160..0824af77f4ff 100644 --- a/xds/internal/xdsclient/transport/transport_resource_test.go +++ b/xds/internal/xdsclient/transport/transport_resource_test.go @@ -26,12 +26,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/fakeserver" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/testing/protocmp" @@ -176,17 +174,10 @@ func (s) TestHandleResponseFromManagementServer(t *testing.T) { t.Logf("Started xDS management server on %s", mgmtServer.Address) mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} - // Construct the server config to represent the management server. - serverCfg := bootstrap.ServerConfig{ - ServerURI: mgmtServer.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - CredsType: "insecure", - } - // Create a new transport. resourcesCh := testutils.NewChannel() tr, err := transport.New(transport.Options{ - ServerCfg: serverCfg, + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), // No validation. Simply push received resources on a channel. OnRecvHandler: func(update transport.ResourceUpdate) error { resourcesCh.Send(&resourcesWithTypeURL{ diff --git a/xds/internal/xdsclient/transport/transport_test.go b/xds/internal/xdsclient/transport/transport_test.go index de8d665080b6..50dc74dd0ba2 100644 --- a/xds/internal/xdsclient/transport/transport_test.go +++ b/xds/internal/xdsclient/transport/transport_test.go @@ -22,9 +22,8 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" ) type s struct { @@ -48,10 +47,7 @@ func (s) TestNewWithGRPCDial(t *testing.T) { // Create a new transport and ensure that the custom dialer was called. opts := Options{ - ServerCfg: bootstrap.ServerConfig{ - ServerURI: "server-address", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + ServerCfg: *xdstestutils.ServerConfigForAddress(t, "server-address"), NodeProto: &v3corepb.Node{}, OnRecvHandler: func(ResourceUpdate) error { return nil }, OnErrorHandler: func(error) {}, diff --git a/xds/server_test.go b/xds/server_test.go index a965fb6ea315..e20f0400ee9a 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -317,16 +317,13 @@ func (p *fakeProvider) Close() { // setupOverrides sets up overrides for bootstrap config, new xdsClient creation // and new gRPC.Server creation. -func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { +func setupOverrides(t *testing.T) (*fakeGRPCServer, *testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient newXDSClient = func() (xdsclient.XDSClient, func(), error) { c := fakeclient.NewClient() c.SetBootstrapConfig(&bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), NodeProto: xdstestutils.EmptyNodeProtoV3, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, CertProviderConfigs: certProviderConfigs, @@ -349,16 +346,13 @@ func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { // one. Tests that use xdsCredentials need a real grpc.Server instead of a fake // one, because the xDS-enabled server needs to read configured creds from the // underlying grpc.Server to confirm whether xdsCreds were configured. -func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, func()) { +func setupOverridesForXDSCreds(t *testing.T, includeCertProviderCfg bool) (*testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient newXDSClient = func() (xdsclient.XDSClient, func(), error) { c := fakeclient.NewClient() bc := &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), NodeProto: xdstestutils.EmptyNodeProtoV3, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, } @@ -382,7 +376,7 @@ func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, // 4. Push a good response from the xdsClient, and make sure that Serve() on the // underlying grpc.Server is called. func (s) TestServeSuccess(t *testing.T) { - fs, clientCh, cleanup := setupOverrides() + fs, clientCh, cleanup := setupOverrides(t) defer cleanup() // Create a new xDS-enabled gRPC server and pass it a server option to get @@ -505,7 +499,7 @@ func (s) TestServeSuccess(t *testing.T) { // is received. This should cause Serve() to exit before calling Serve() on the // underlying grpc.Server. func (s) TestServeWithStop(t *testing.T) { - fs, clientCh, cleanup := setupOverrides() + fs, clientCh, cleanup := setupOverrides(t) defer cleanup() // Note that we are not deferring the Stop() here since we explicitly call @@ -604,10 +598,7 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { { desc: "certificate provider config is missing", bootstrapConfig: &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), NodeProto: xdstestutils.EmptyNodeProtoV3, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, }, @@ -615,10 +606,7 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { { desc: "server_listener_resource_name_template is missing", bootstrapConfig: &bootstrap.Config{ - XDSServer: &bootstrap.ServerConfig{ - ServerURI: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), NodeProto: xdstestutils.EmptyNodeProtoV3, CertProviderConfigs: certProviderConfigs, }, @@ -708,7 +696,7 @@ func (s) TestServeNewClientFailure(t *testing.T) { // server is not configured with xDS credentials. Verifies that the security // config received as part of a Listener update is not acted upon. func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { - fs, clientCh, cleanup := setupOverrides() + fs, clientCh, cleanup := setupOverrides(t) defer cleanup() // Create a server option to get notified about serving mode changes. We don't @@ -828,7 +816,7 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { // server is configured with xDS credentials, but receives a Listener update // with an error. Verifies that no certificate providers are created. func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) { - clientCh, cleanup := setupOverridesForXDSCreds(true) + clientCh, cleanup := setupOverridesForXDSCreds(t, true) defer cleanup() xdsCreds, err := xds.NewServerCredentials(xds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) From 36fd0a43969cee5d7e0410231f8bee284bf766e7 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 14 Mar 2023 22:50:56 -0400 Subject: [PATCH 829/998] gcp/observability: Add compressed metrics to observability module and synchronize View data with exporter (#6105) --- gcp/observability/observability_test.go | 12 ++++++++++++ gcp/observability/opencensus.go | 25 ++++++++++++++++++++----- stats/opencensus/server_metrics.go | 4 ++-- 3 files changed, 34 insertions(+), 7 deletions(-) diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index fa8cba1d38df..b9edf89fac6b 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -436,6 +436,18 @@ func (s) TestOpenCensusIntegration(t *testing.T) { if value := fe.SeenViews["grpc.io/server/server_latency"]; value != TypeOpenCensusViewDistribution { errs = append(errs, fmt.Errorf("grpc.io/server/server_latency: %s != %s", value, TypeOpenCensusViewDistribution)) } + if value := fe.SeenViews["grpc.io/client/sent_compressed_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/sent_compressed_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/client/received_compressed_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/received_compressed_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/server/sent_compressed_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/sent_compressed_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/server/received_compressed_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/received_compressed_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + } if fe.SeenSpans <= 0 { errs = append(errs, fmt.Errorf("unexpected number of seen spans: %v <= 0", fe.SeenSpans)) } diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index 386b5ec55b4a..be8a615e5b0b 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -35,6 +35,19 @@ import ( var ( // It's a variable instead of const to speed up testing defaultMetricsReportingInterval = time.Second * 30 + defaultViews = []*view.View{ + opencensus.ClientStartedRPCsView, + opencensus.ClientCompletedRPCsView, + opencensus.ClientRoundtripLatencyView, + opencensus.ClientSentCompressedBytesPerRPCView, + opencensus.ClientReceivedCompressedBytesPerRPCView, + opencensus.ClientAPILatencyView, + opencensus.ServerStartedRPCsView, + opencensus.ServerCompletedRPCsView, + opencensus.ServerSentCompressedBytesPerRPCView, + opencensus.ServerReceivedCompressedBytesPerRPCView, + opencensus.ServerLatencyView, + } ) func labelsToMonitoringLabels(labels map[string]string) *stackdriver.Labels { @@ -106,11 +119,8 @@ func startOpenCensus(config *config) error { } if config.CloudMonitoring != nil { - if err := view.Register(opencensus.ClientAPILatencyView, opencensus.ClientStartedRPCsView, opencensus.ClientCompletedRPCsView, opencensus.ClientRoundtripLatencyView); err != nil { - return fmt.Errorf("failed to register default client views: %v", err) - } - if err := view.Register(opencensus.ServerStartedRPCsView, opencensus.ServerCompletedRPCsView, opencensus.ServerLatencyView); err != nil { - return fmt.Errorf("failed to register default server views: %v", err) + if err := view.Register(defaultViews...); err != nil { + return fmt.Errorf("failed to register observability views: %v", err) } view.SetReportingPeriod(defaultMetricsReportingInterval) view.RegisterExporter(exporter.(view.Exporter)) @@ -130,11 +140,16 @@ func stopOpenCensus() { if exporter != nil { internal.ClearGlobalDialOptions() internal.ClearGlobalServerOptions() + // This Unregister call guarantees the data recorded gets sent to + // exporter, synchronising the view package and exporter. Doesn't matter + // if views not registered, will be a noop if not registered. + view.Unregister(defaultViews...) // Call these unconditionally, doesn't matter if not registered, will be // a noop if not registered. trace.UnregisterExporter(exporter) view.UnregisterExporter(exporter) + // This Flush call makes sure recorded telemetry get sent to backend. exporter.Flush() exporter.Close() } diff --git a/stats/opencensus/server_metrics.go b/stats/opencensus/server_metrics.go index 49a54232b7b6..c9763bed441b 100644 --- a/stats/opencensus/server_metrics.go +++ b/stats/opencensus/server_metrics.go @@ -63,7 +63,7 @@ var ( // ServerSentBytesPerRPCView is the distribution of received bytes per RPC, // keyed on method. ServerSentBytesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_compressed_bytes_per_rpc", + Name: "grpc.io/server/sent_bytes_per_rpc", Description: "Distribution of sent bytes per RPC, by method.", Measure: serverSentBytesPerRPC, TagKeys: []tag.Key{keyServerMethod}, @@ -72,7 +72,7 @@ var ( // ServerSentCompressedBytesPerRPCView is the distribution of received // compressed bytes per RPC, keyed on method. ServerSentCompressedBytesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_bytes_per_rpc", + Name: "grpc.io/server/sent_compressed_bytes_per_rpc", Description: "Distribution of sent compressed bytes per RPC, by method.", Measure: serverSentCompressedBytesPerRPC, TagKeys: []tag.Key{keyServerMethod}, From a1e657ce53ba59e6e4076edaa2c68d398e7f7e4f Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Wed, 15 Mar 2023 11:19:01 -0600 Subject: [PATCH 830/998] client: log last error on subchannel connectivity change (#6109) --- clientconn.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/clientconn.go b/clientconn.go index b50c698a0557..b9cc055075ed 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1113,7 +1113,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) return } ac.state = s - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } From 6f44ae89b1ab4e492516fd8f1770ad16451a9f02 Mon Sep 17 00:00:00 2001 From: wenxuwan Date: Thu, 16 Mar 2023 04:19:40 +0800 Subject: [PATCH 831/998] metadata: add benchmark test for FromIncomingContext and ValueFromIncomingContext (#6117) --- metadata/metadata_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/metadata/metadata_test.go b/metadata/metadata_test.go index 57763cd6a973..9277f2d6c84f 100644 --- a/metadata/metadata_test.go +++ b/metadata/metadata_test.go @@ -335,3 +335,21 @@ func BenchmarkFromOutgoingContext(b *testing.B) { FromOutgoingContext(ctx) } } + +func BenchmarkFromIncomingContext(b *testing.B) { + md := Pairs("X-My-Header-1", "42") + ctx := NewIncomingContext(context.Background(), md) + b.ResetTimer() + for n := 0; n < b.N; n++ { + FromIncomingContext(ctx) + } +} + +func BenchmarkValueFromIncomingContext(b *testing.B) { + md := Pairs("X-My-Header-1", "42") + ctx := NewIncomingContext(context.Background(), md) + b.ResetTimer() + for n := 0; n < b.N; n++ { + ValueFromIncomingContext(ctx, "x-my-header-1") + } +} From c84a5005d97fb43bbc01b7d1484c0e4c844bec4c Mon Sep 17 00:00:00 2001 From: Matthew Stevenson <52979934+matthewstevenson88@users.noreply.github.com> Date: Fri, 17 Mar 2023 09:09:42 -0700 Subject: [PATCH 832/998] credentials/alts: defer ALTS stream creation until handshake time (#6077) --- .../alts/internal/handshaker/handshaker.go | 50 ++++++++++----- .../internal/handshaker/handshaker_test.go | 64 +++++++++++++++++++ 2 files changed, 97 insertions(+), 17 deletions(-) diff --git a/credentials/alts/internal/handshaker/handshaker.go b/credentials/alts/internal/handshaker/handshaker.go index 7b953a520e5b..c8a307531429 100644 --- a/credentials/alts/internal/handshaker/handshaker.go +++ b/credentials/alts/internal/handshaker/handshaker.go @@ -138,7 +138,7 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions { // and server options (server options struct does not exist now. When // caller can provide endpoints, it should be created. -// altsHandshaker is used to complete a ALTS handshaking between client and +// altsHandshaker is used to complete an ALTS handshake between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. type altsHandshaker struct { @@ -146,6 +146,8 @@ type altsHandshaker struct { stream altsgrpc.HandshakerService_DoHandshakeClient // the connection to the peer. conn net.Conn + // a virtual connection to the ALTS handshaker service. + clientConn *grpc.ClientConn // client handshake options. clientOpts *ClientHandshakerOptions // server handshake options. @@ -154,39 +156,33 @@ type altsHandshaker struct { side core.Side } -// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker +// NewClientHandshaker creates a core.Handshaker that performs a client-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) - if err != nil { - return nil, err - } return &altsHandshaker{ - stream: stream, + stream: nil, conn: c, + clientConn: conn, clientOpts: opts, side: core.ClientSide, }, nil } -// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker +// NewServerHandshaker creates a core.Handshaker that performs a server-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) - if err != nil { - return nil, err - } return &altsHandshaker{ - stream: stream, + stream: nil, conn: c, + clientConn: conn, serverOpts: opts, side: core.ServerSide, }, nil } -// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once +// ClientHandshake starts and completes a client ALTS handshake for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { @@ -198,6 +194,16 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") } + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + // Create target identities from service account list. targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) for _, account := range h.clientOpts.TargetServiceAccounts { @@ -229,7 +235,7 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent return conn, authInfo, nil } -// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once +// ServerHandshake starts and completes a server ALTS handshake for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { @@ -241,6 +247,16 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") } + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + p := make([]byte, frameLimit) n, err := h.conn.Read(p) if err != nil { diff --git a/credentials/alts/internal/handshaker/handshaker_test.go b/credentials/alts/internal/handshaker/handshaker_test.go index 14a0721054f2..53aee6423158 100644 --- a/credentials/alts/internal/handshaker/handshaker_test.go +++ b/credentials/alts/internal/handshaker/handshaker_test.go @@ -25,6 +25,8 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" grpc "google.golang.org/grpc" core "google.golang.org/grpc/credentials/alts/internal" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" @@ -283,3 +285,65 @@ func (s) TestPeerNotResponding(t *testing.T) { t.Errorf("ClientHandshake() = %v, want %v", got, want) } } + +func (s) TestNewClientHandshaker(t *testing.T) { + conn := testutil.NewTestConn(nil, nil) + clientConn := &grpc.ClientConn{} + opts := &ClientHandshakerOptions{} + hs, err := NewClientHandshaker(context.Background(), clientConn, conn, opts) + if err != nil { + t.Errorf("NewClientHandshaker returned unexpected error: %v", err) + } + expectedHs := &altsHandshaker{ + stream: nil, + conn: conn, + clientConn: clientConn, + clientOpts: opts, + serverOpts: nil, + side: core.ClientSide, + } + cmpOpts := []cmp.Option{ + cmp.AllowUnexported(altsHandshaker{}), + cmpopts.IgnoreFields(altsHandshaker{}, "conn", "clientConn"), + } + if got, want := hs.(*altsHandshaker), expectedHs; !cmp.Equal(got, want, cmpOpts...) { + t.Errorf("NewClientHandshaker() returned unexpected handshaker: got: %v, want: %v", got, want) + } + if hs.(*altsHandshaker).stream != nil { + t.Errorf("NewClientHandshaker() returned handshaker with non-nil stream") + } + if hs.(*altsHandshaker).clientConn != clientConn { + t.Errorf("NewClientHandshaker() returned handshaker with unexpected clientConn") + } +} + +func (s) TestNewServerHandshaker(t *testing.T) { + conn := testutil.NewTestConn(nil, nil) + clientConn := &grpc.ClientConn{} + opts := &ServerHandshakerOptions{} + hs, err := NewServerHandshaker(context.Background(), clientConn, conn, opts) + if err != nil { + t.Errorf("NewServerHandshaker returned unexpected error: %v", err) + } + expectedHs := &altsHandshaker{ + stream: nil, + conn: conn, + clientConn: clientConn, + clientOpts: nil, + serverOpts: opts, + side: core.ServerSide, + } + cmpOpts := []cmp.Option{ + cmp.AllowUnexported(altsHandshaker{}), + cmpopts.IgnoreFields(altsHandshaker{}, "conn", "clientConn"), + } + if got, want := hs.(*altsHandshaker), expectedHs; !cmp.Equal(got, want, cmpOpts...) { + t.Errorf("NewServerHandshaker() returned unexpected handshaker: got: %v, want: %v", got, want) + } + if hs.(*altsHandshaker).stream != nil { + t.Errorf("NewServerHandshaker() returned handshaker with non-nil stream") + } + if hs.(*altsHandshaker).clientConn != clientConn { + t.Errorf("NewServerHandshaker() returned handshaker with unexpected clientConn") + } +} From b638faff2204da74b93bb4aee28051b15a7a33bd Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 17 Mar 2023 14:34:52 -0400 Subject: [PATCH 833/998] stats/opencensus: Add message prefix to metrics names (#6126) --- gcp/observability/observability_test.go | 16 ++++++++-------- gcp/observability/opencensus.go | 8 ++++---- stats/opencensus/client_metrics.go | 20 ++++++++++---------- stats/opencensus/e2e_test.go | 24 ++++++++++++------------ stats/opencensus/server_metrics.go | 20 ++++++++++---------- 5 files changed, 44 insertions(+), 44 deletions(-) diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index b9edf89fac6b..bb58440d43ed 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -436,17 +436,17 @@ func (s) TestOpenCensusIntegration(t *testing.T) { if value := fe.SeenViews["grpc.io/server/server_latency"]; value != TypeOpenCensusViewDistribution { errs = append(errs, fmt.Errorf("grpc.io/server/server_latency: %s != %s", value, TypeOpenCensusViewDistribution)) } - if value := fe.SeenViews["grpc.io/client/sent_compressed_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { - errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/sent_compressed_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + if value := fe.SeenViews["grpc.io/client/sent_compressed_message_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/sent_compressed_message_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) } - if value := fe.SeenViews["grpc.io/client/received_compressed_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { - errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/received_compressed_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + if value := fe.SeenViews["grpc.io/client/received_compressed_message_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/received_compressed_message_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) } - if value := fe.SeenViews["grpc.io/server/sent_compressed_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { - errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/sent_compressed_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + if value := fe.SeenViews["grpc.io/server/sent_compressed_message_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/sent_compressed_message_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) } - if value := fe.SeenViews["grpc.io/server/received_compressed_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { - errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/received_compressed_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + if value := fe.SeenViews["grpc.io/server/received_compressed_message_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/received_compressed_message_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) } if fe.SeenSpans <= 0 { errs = append(errs, fmt.Errorf("unexpected number of seen spans: %v <= 0", fe.SeenSpans)) diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index be8a615e5b0b..a46b7665c343 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -39,13 +39,13 @@ var ( opencensus.ClientStartedRPCsView, opencensus.ClientCompletedRPCsView, opencensus.ClientRoundtripLatencyView, - opencensus.ClientSentCompressedBytesPerRPCView, - opencensus.ClientReceivedCompressedBytesPerRPCView, + opencensus.ClientSentCompressedMessageBytesPerRPCView, + opencensus.ClientReceivedCompressedMessageBytesPerRPCView, opencensus.ClientAPILatencyView, opencensus.ServerStartedRPCsView, opencensus.ServerCompletedRPCsView, - opencensus.ServerSentCompressedBytesPerRPCView, - opencensus.ServerReceivedCompressedBytesPerRPCView, + opencensus.ServerSentCompressedMessageBytesPerRPCView, + opencensus.ServerReceivedCompressedMessageBytesPerRPCView, opencensus.ServerLatencyView, } ) diff --git a/stats/opencensus/client_metrics.go b/stats/opencensus/client_metrics.go index 9286d95289a1..4d45845f8c56 100644 --- a/stats/opencensus/client_metrics.go +++ b/stats/opencensus/client_metrics.go @@ -72,12 +72,12 @@ var ( TagKeys: []tag.Key{keyClientMethod}, Aggregation: bytesDistribution, } - // ClientSentCompressedBytesPerRPCView is the distribution of compressed - // sent bytes per RPC, keyed on method. - ClientSentCompressedBytesPerRPCView = &view.View{ + // ClientSentCompressedMessageBytesPerRPCView is the distribution of + // compressed sent message bytes per RPC, keyed on method. + ClientSentCompressedMessageBytesPerRPCView = &view.View{ Measure: clientSentCompressedBytesPerRPC, - Name: "grpc.io/client/sent_compressed_bytes_per_rpc", - Description: "Distribution of sent compressed bytes per RPC, by method.", + Name: "grpc.io/client/sent_compressed_message_bytes_per_rpc", + Description: "Distribution of sent compressed message bytes per RPC, by method.", TagKeys: []tag.Key{keyClientMethod}, Aggregation: bytesDistribution, } @@ -90,12 +90,12 @@ var ( TagKeys: []tag.Key{keyClientMethod}, Aggregation: bytesDistribution, } - // ClientReceivedCompressedBytesPerRPCView is the distribution of compressed - // received bytes per RPC, keyed on method. - ClientReceivedCompressedBytesPerRPCView = &view.View{ + // ClientReceivedCompressedMessageBytesPerRPCView is the distribution of + // compressed received message bytes per RPC, keyed on method. + ClientReceivedCompressedMessageBytesPerRPCView = &view.View{ Measure: clientReceivedCompressedBytesPerRPC, - Name: "grpc.io/client/received_compressed_bytes_per_rpc", - Description: "Distribution of received compressed bytes per RPC, by method.", + Name: "grpc.io/client/received_compressed_message_bytes_per_rpc", + Description: "Distribution of received compressed message bytes per RPC, by method.", TagKeys: []tag.Key{keyClientMethod}, Aggregation: bytesDistribution, } diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 943713f154d2..3fd2f0614a15 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -236,13 +236,13 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { ClientCompletedRPCsView, ServerCompletedRPCsView, ClientSentBytesPerRPCView, - ClientSentCompressedBytesPerRPCView, + ClientSentCompressedMessageBytesPerRPCView, ServerSentBytesPerRPCView, - ServerSentCompressedBytesPerRPCView, + ServerSentCompressedMessageBytesPerRPCView, ClientReceivedBytesPerRPCView, - ClientReceivedCompressedBytesPerRPCView, + ClientReceivedCompressedMessageBytesPerRPCView, ServerReceivedBytesPerRPCView, - ServerReceivedCompressedBytesPerRPCView, + ServerReceivedCompressedMessageBytesPerRPCView, ClientSentMessagesPerRPCView, ServerSentMessagesPerRPCView, ClientReceivedMessagesPerRPCView, @@ -505,11 +505,11 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { }, }, { - metric: ClientSentCompressedBytesPerRPCView, + metric: ClientSentCompressedMessageBytesPerRPCView, wantVI: &viewInformation{ aggType: view.AggTypeDistribution, aggBuckets: bytesDistributionBounds, - desc: "Distribution of sent compressed bytes per RPC, by method.", + desc: "Distribution of sent compressed message bytes per RPC, by method.", tagKeys: []tag.Key{ cmtk, }, @@ -579,11 +579,11 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { }, }, { - metric: ServerSentCompressedBytesPerRPCView, + metric: ServerSentCompressedMessageBytesPerRPCView, wantVI: &viewInformation{ aggType: view.AggTypeDistribution, aggBuckets: bytesDistributionBounds, - desc: "Distribution of sent compressed bytes per RPC, by method.", + desc: "Distribution of sent compressed message bytes per RPC, by method.", tagKeys: []tag.Key{ smtk, }, @@ -653,11 +653,11 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { }, }, { - metric: ClientReceivedCompressedBytesPerRPCView, + metric: ClientReceivedCompressedMessageBytesPerRPCView, wantVI: &viewInformation{ aggType: view.AggTypeDistribution, aggBuckets: bytesDistributionBounds, - desc: "Distribution of received compressed bytes per RPC, by method.", + desc: "Distribution of received compressed message bytes per RPC, by method.", tagKeys: []tag.Key{ cmtk, }, @@ -727,11 +727,11 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { }, }, { - metric: ServerReceivedCompressedBytesPerRPCView, + metric: ServerReceivedCompressedMessageBytesPerRPCView, wantVI: &viewInformation{ aggType: view.AggTypeDistribution, aggBuckets: bytesDistributionBounds, - desc: "Distribution of received compressed bytes per RPC, by method.", + desc: "Distribution of received compressed message bytes per RPC, by method.", tagKeys: []tag.Key{ smtk, }, diff --git a/stats/opencensus/server_metrics.go b/stats/opencensus/server_metrics.go index c9763bed441b..4f087c2209a7 100644 --- a/stats/opencensus/server_metrics.go +++ b/stats/opencensus/server_metrics.go @@ -69,11 +69,11 @@ var ( TagKeys: []tag.Key{keyServerMethod}, Aggregation: bytesDistribution, } - // ServerSentCompressedBytesPerRPCView is the distribution of received - // compressed bytes per RPC, keyed on method. - ServerSentCompressedBytesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_compressed_bytes_per_rpc", - Description: "Distribution of sent compressed bytes per RPC, by method.", + // ServerSentCompressedMessageBytesPerRPCView is the distribution of + // received compressed message bytes per RPC, keyed on method. + ServerSentCompressedMessageBytesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_compressed_message_bytes_per_rpc", + Description: "Distribution of sent compressed message bytes per RPC, by method.", Measure: serverSentCompressedBytesPerRPC, TagKeys: []tag.Key{keyServerMethod}, Aggregation: bytesDistribution, @@ -87,11 +87,11 @@ var ( TagKeys: []tag.Key{keyServerMethod}, Aggregation: bytesDistribution, } - // ServerReceivedCompressedBytesPerRPCView is the distribution of sent bytes - // per RPC, keyed on method. - ServerReceivedCompressedBytesPerRPCView = &view.View{ - Name: "grpc.io/server/received_compressed_bytes_per_rpc", - Description: "Distribution of received compressed bytes per RPC, by method.", + // ServerReceivedCompressedMessageBytesPerRPCView is the distribution of + // sent compressed message bytes per RPC, keyed on method. + ServerReceivedCompressedMessageBytesPerRPCView = &view.View{ + Name: "grpc.io/server/received_compressed_message_bytes_per_rpc", + Description: "Distribution of received compressed message bytes per RPC, by method.", Measure: serverReceivedCompressedBytesPerRPC, TagKeys: []tag.Key{keyServerMethod}, Aggregation: bytesDistribution, From a75fd73d616bea96cfa1853b5bcd0aa762be0b7b Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 17 Mar 2023 13:38:15 -0700 Subject: [PATCH 834/998] Change version to 1.55.0-dev (#6131) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 1a3ff5289096..99487240565d 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.54.0-dev" +const Version = "1.55.0-dev" From 66e35339a4b76a807ab757d5897b9b8a3e554edc Mon Sep 17 00:00:00 2001 From: Rusakov Andrei <42193836+psyhatter@users.noreply.github.com> Date: Sat, 18 Mar 2023 06:21:22 +0700 Subject: [PATCH 835/998] status: handle wrapped errors (#6031) --- status/status.go | 21 ++++++-------- status/status_test.go | 65 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 12 deletions(-) diff --git a/status/status.go b/status/status.go index 623be39f26ba..7577625b0c40 100644 --- a/status/status.go +++ b/status/status.go @@ -77,7 +77,8 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// *Status`, or if err wraps a type satisfying this, the appropriate Status is +// returned. // // - If err is nil, a Status is returned with codes.OK and no message. // @@ -88,9 +89,8 @@ func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { + var se interface{ GRPCStatus() *Status } + if errors.As(err, &se) { return se.GRPCStatus(), true } return New(codes.Unknown, err.Error()), false @@ -103,19 +103,16 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown + + return Convert(err).Code() } // FromContextError converts a context error or wrapped context error into a diff --git a/status/status_test.go b/status/status_test.go index 420fb6b8102c..244cb8151fd5 100644 --- a/status/status_test.go +++ b/status/status_test.go @@ -32,6 +32,7 @@ import ( cpb "google.golang.org/genproto/googleapis/rpc/code" epb "google.golang.org/genproto/googleapis/rpc/errdetails" spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/status" @@ -192,6 +193,70 @@ func (s) TestFromErrorUnknownError(t *testing.T) { } } +func (s) TestFromErrorWrapped(t *testing.T) { + const code, message = codes.Internal, "test description" + err := fmt.Errorf("wrapped error: %w", Error(code, message)) + s, ok := FromError(err) + if !ok || s.Code() != code || s.Message() != message || s.Err() == nil { + t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, code, message) + } +} + +func (s) TestFromErrorImplementsInterfaceWrapped(t *testing.T) { + const code, message = codes.Internal, "test description" + err := fmt.Errorf("wrapped error: %w", customError{Code: code, Message: message}) + s, ok := FromError(err) + if !ok || s.Code() != code || s.Message() != message || s.Err() == nil { + t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, code, message) + } +} + +func (s) TestCode(t *testing.T) { + const code = codes.Internal + err := Error(code, "test description") + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + +func (s) TestCodeOK(t *testing.T) { + if s, code := Code(nil), codes.OK; s != code { + t.Fatalf("Code(%v) = %v; want ", nil, s, code) + } +} + +func (s) TestCodeImplementsInterface(t *testing.T) { + const code = codes.Internal + err := customError{Code: code, Message: "test description"} + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + +func (s) TestCodeUnknownError(t *testing.T) { + const code = codes.Unknown + err := errors.New("unknown error") + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + +func (s) TestCodeWrapped(t *testing.T) { + const code = codes.Internal + err := fmt.Errorf("wrapped: %w", Error(code, "test description")) + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + +func (s) TestCodeImplementsInterfaceWrapped(t *testing.T) { + const code = codes.Internal + err := fmt.Errorf("wrapped: %w", customError{Code: code, Message: "test description"}) + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + func (s) TestConvertKnownError(t *testing.T) { code, message := codes.Internal, "test description" err := Error(code, message) From 70c52915099a3b30848d0cb22e2f8951dd5aed7f Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 17 Mar 2023 20:55:52 -0400 Subject: [PATCH 836/998] observability: remove import replace directive and switch it to point to latest commit (#6122) --- gcp/observability/go.mod | 30 +- gcp/observability/go.sum | 621 +++------------------- interop/observability/go.mod | 24 +- interop/observability/go.sum | 33 +- stats/opencensus/go.mod | 8 +- stats/opencensus/go.sum | 996 +---------------------------------- 6 files changed, 153 insertions(+), 1559 deletions(-) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 21fc31eeec88..db221876ecee 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -8,24 +8,24 @@ require ( github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 go.opencensus.io v0.24.0 - golang.org/x/oauth2 v0.4.0 - google.golang.org/api v0.103.0 - google.golang.org/grpc v1.52.0 - google.golang.org/grpc/stats/opencensus v0.0.0-20230221205128-8702a2ebf4b0 + golang.org/x/oauth2 v0.5.0 + google.golang.org/api v0.109.0 + google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba + google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 ) require ( - cloud.google.com/go v0.107.0 // indirect - cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go v0.109.0 // indirect + cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/longrunning v0.3.0 // indirect - cloud.google.com/go/monitoring v1.8.0 // indirect - cloud.google.com/go/trace v1.4.0 // indirect + cloud.google.com/go/longrunning v0.4.0 // indirect + cloud.google.com/go/monitoring v1.12.0 // indirect + cloud.google.com/go/trace v1.8.0 // indirect github.com/aws/aws-sdk-go v1.44.162 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect @@ -34,10 +34,6 @@ require ( golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect + google.golang.org/protobuf v1.29.1 // indirect ) - -replace google.golang.org/grpc => ../../ - -replace google.golang.org/grpc/stats/opencensus => ../../stats/opencensus diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index f5aaa975652c..ab1d5ef81a05 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -1,8 +1,8 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,7 +15,6 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -26,378 +25,46 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk= +cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/longrunning v0.4.0 h1:v+X4EwhHl6xE+TG1XgXj4T1XpKKs7ZevcAJ3FOu0YmY= +cloud.google.com/go/longrunning v0.4.0/go.mod h1:eF3Qsw58iX/bkKtVjMTYpH0LRjQ2goDkjkNQTlzq/ZM= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0 h1:qO9eLn2esajC9sxpqp1YKX37nXC3L4BfGnPS0Cx9dYo= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= @@ -406,37 +73,37 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -461,8 +128,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -478,8 +146,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -495,7 +161,6 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -505,29 +170,17 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -537,33 +190,24 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -583,16 +227,12 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -605,6 +245,7 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -627,11 +268,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -660,31 +299,16 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -700,18 +324,9 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -722,11 +337,10 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -758,13 +372,11 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -772,40 +384,18 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -815,18 +405,14 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -870,7 +456,6 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -878,16 +463,10 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -919,29 +498,9 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= +google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -949,6 +508,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -983,13 +543,10 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1011,61 +568,38 @@ google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa h1:qQPhfbPO23fwm/9lQr91L1u62Zo6cm+zI+slZT+uf+o= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba h1:puuDphNHQZRngQpzUGvfXMBFBv6DuahfWMZaj0jVtjw= +google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 h1:MeDNVH2KmQ9Z3AbXKsvU9UcbRR8LfpZVLmZAVWIX0nI= +google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204/go.mod h1:Dg7VaOjf0r9QhRn/YpwSf3vKQz1ixulffTlhEarxEXA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1079,9 +613,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/interop/observability/go.mod b/interop/observability/go.mod index e4ab61e5a503..adda4920a6e7 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -8,35 +8,35 @@ require ( ) require ( - cloud.google.com/go v0.107.0 // indirect - cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go v0.109.0 // indirect + cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/logging v1.6.1 // indirect - cloud.google.com/go/longrunning v0.3.0 // indirect - cloud.google.com/go/monitoring v1.8.0 // indirect - cloud.google.com/go/trace v1.4.0 // indirect + cloud.google.com/go/longrunning v0.4.0 // indirect + cloud.google.com/go/monitoring v1.12.0 // indirect + cloud.google.com/go/trace v1.8.0 // indirect contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect github.com/aws/aws-sdk-go v1.44.162 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.4.0 // indirect + golang.org/x/oauth2 v0.5.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect - google.golang.org/api v0.103.0 // indirect + google.golang.org/api v0.109.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/grpc/stats/opencensus v0.0.0-20230221205128-8702a2ebf4b0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect + google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 // indirect + google.golang.org/protobuf v1.29.1 // indirect ) replace google.golang.org/grpc => ../.. diff --git a/interop/observability/go.sum b/interop/observability/go.sum index f5aaa975652c..b2c181710f6a 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -33,8 +33,9 @@ cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+ cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk= +cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= @@ -116,8 +117,9 @@ cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -225,8 +227,9 @@ cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6 cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.0 h1:v+X4EwhHl6xE+TG1XgXj4T1XpKKs7ZevcAJ3FOu0YmY= +cloud.google.com/go/longrunning v0.4.0/go.mod h1:eF3Qsw58iX/bkKtVjMTYpH0LRjQ2goDkjkNQTlzq/ZM= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= @@ -242,8 +245,9 @@ cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSox cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -363,8 +367,9 @@ cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0 h1:qO9eLn2esajC9sxpqp1YKX37nXC3L4BfGnPS0Cx9dYo= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= @@ -461,8 +466,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -507,8 +513,9 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -710,8 +717,9 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -940,8 +948,9 @@ google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ= google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= +google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1063,8 +1072,9 @@ google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZV google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa h1:qQPhfbPO23fwm/9lQr91L1u62Zo6cm+zI+slZT+uf+o= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1080,8 +1090,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index cc895a5ae393..a1980f2c43c7 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -5,17 +5,15 @@ go 1.17 require ( github.com/google/go-cmp v0.5.9 go.opencensus.io v0.24.0 - google.golang.org/grpc v1.52.0 + google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba ) require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect golang.org/x/net v0.8.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.29.1 // indirect ) - -replace google.golang.org/grpc => ../../ diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index ee791270f20f..df04653cfc1d 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -1,1028 +1,99 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba h1:puuDphNHQZRngQpzUGvfXMBFBv6DuahfWMZaj0jVtjw= +google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1031,28 +102,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= From 78099db03fb90b47ec868fa87ac4696560b0d81a Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 20 Mar 2023 16:32:08 -0400 Subject: [PATCH 837/998] gcp/observability: Switch hex encoding to string() method (#6138) --- gcp/observability/logging.go | 8 ++++---- gcp/observability/observability_test.go | 7 +++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index b04c60ba0430..bcf7a6a7cf8b 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -327,14 +327,14 @@ func (bml *binaryMethodLogger) buildGCPLoggingEntry(ctx context.Context, c iblog // client side span, populated through opencensus trace package. if span := trace.FromContext(ctx); span != nil { sc := span.SpanContext() - gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + fmt.Sprintf("%x", sc.TraceID) - gcploggingEntry.SpanID = fmt.Sprintf("%x", sc.SpanID) + gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + sc.TraceID.String() + gcploggingEntry.SpanID = sc.SpanID.String() } } else { // server side span, populated through stats/opencensus package. if tID, sID, ok := opencensus.GetTraceAndSpanID(ctx); ok { - gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + fmt.Sprintf("%x", tID) - gcploggingEntry.SpanID = fmt.Sprintf("%x", sID) + gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + tID.String() + gcploggingEntry.SpanID = sID.String() } } return gcploggingEntry diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index bb58440d43ed..96372bea6a38 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -121,12 +121,11 @@ type traceAndSpanIDString struct { } // idsToString is a helper that converts from generated trace and span IDs to -// the string version stored in trace message events. (hex 16 lowercase encoded, -// and extra data attached to trace id). +// the string version stored in trace message events. func idsToString(tasi traceAndSpanID, projectID string) traceAndSpanIDString { return traceAndSpanIDString{ - traceID: "projects/" + projectID + "/traces/" + fmt.Sprintf("%x", tasi.traceID), - spanID: fmt.Sprintf("%x", tasi.spanID), + traceID: "projects/" + projectID + "/traces/" + tasi.traceID.String(), + spanID: tasi.spanID.String(), } } From 4efec30eb3dfc40cf69f1f987349d4774758d753 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 20 Mar 2023 19:31:30 -0400 Subject: [PATCH 838/998] stats/opencensus: remove leading slash for per call metrics (#6141) --- stats/opencensus/e2e_test.go | 73 +++++++++++++++++++++++++++++++--- stats/opencensus/opencensus.go | 2 +- 2 files changed, 69 insertions(+), 6 deletions(-) diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 3fd2f0614a15..0b86ede4c2d5 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -193,7 +193,7 @@ func (vi *viewInformation) Equal(vi2 *viewInformation) bool { // less. This must be called with non nil view information that is aggregated // with distribution data. Returns a nil error if correct count information // found, non nil error if correct information not found. -func distributionDataLatencyCount(vi *viewInformation, countWant int64) error { +func distributionDataLatencyCount(vi *viewInformation, countWant int64, wantTags [][]tag.Tag) error { var totalCount int64 var largestIndexWithFive int for i, bucket := range vi.aggBuckets { @@ -204,9 +204,21 @@ func distributionDataLatencyCount(vi *viewInformation, countWant int64) error { break } } + // Sort rows by string name. This is to take away non determinism in the row + // ordering passed to the Exporter, while keeping the row data. + sort.Slice(vi.rows, func(i, j int) bool { + return vi.rows[i].String() > vi.rows[j].String() + }) // Iterating through rows sums up data points for all methods. In this case, // a data point for the unary and for the streaming RPC. - for _, row := range vi.rows { + for i, row := range vi.rows { + // The method names corresponding to unary and streaming call should + // have the leading slash removed. + if diff := cmp.Diff(row.Tags, wantTags[i], cmp.Comparer(func(a tag.Key, b tag.Key) bool { + return a.Name() == b.Name() + })); diff != "" { + return fmt.Errorf("wrong tag keys for unary method -got, +want: %v", diff) + } // This could potentially have an extra measurement in buckets above 5s, // but that's fine. Count of buckets that could contain up to 5s is a // good enough assertion. @@ -305,8 +317,9 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { cstk := tag.MustNewKey("grpc_client_status") sstk := tag.MustNewKey("grpc_server_status") wantMetrics := []struct { - metric *view.View - wantVI *viewInformation + metric *view.View + wantVI *viewInformation + wantTags [][]tag.Tag // for non determinstic (i.e. latency) metrics. First dimension represents rows. }{ { metric: ClientStartedRPCsView, @@ -913,13 +926,63 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { }, { metric: ClientRoundtripLatencyView, + wantTags: [][]tag.Tag{ + { + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + { + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + }, }, { metric: ServerLatencyView, + wantTags: [][]tag.Tag{ + { + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + { + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + }, }, // Per call metrics: { metric: ClientAPILatencyView, + wantTags: [][]tag.Tag{ + { + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + { + Key: cstk, + Value: "OK", + }, + }, + { + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + { + Key: cstk, + Value: "OK", + }, + }, + }, }, } // Unregister all the views. Unregistering a view causes a synchronous @@ -945,7 +1008,7 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { // RPCs have a context timeout of 5s, so all the recorded // measurements (one per RPC - two total) should fall within 5 // second buckets. - if err := distributionDataLatencyCount(vi, 2); err != nil { + if err := distributionDataLatencyCount(vi, 2, wantMetric.wantTags); err != nil { t.Fatalf("Invalid OpenCensus export view data for metric %v: %v", metricName, err) } continue diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go index fc7ee341ea63..4f63b8c24b39 100644 --- a/stats/opencensus/opencensus.go +++ b/stats/opencensus/opencensus.go @@ -104,7 +104,7 @@ func perCallTracesAndMetrics(err error, span *trace.Span, startTime time.Time, m callLatency := float64(time.Since(startTime)) / float64(time.Millisecond) ocstats.RecordWithOptions(context.Background(), ocstats.WithTags( - tag.Upsert(keyClientMethod, method), + tag.Upsert(keyClientMethod, removeLeadingSlash(method)), tag.Upsert(keyClientStatus, canonicalString(s.Code())), ), ocstats.WithMeasurements( From a2ca46c4840562b0efae643b370b7eac316828ca Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 21 Mar 2023 13:19:15 -0700 Subject: [PATCH 839/998] examples: organize READMEs better (#6121) --- examples/README.md | 33 +++++++-------------------------- examples/helloworld/README.md | 29 +++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 26 deletions(-) create mode 100644 examples/helloworld/README.md diff --git a/examples/README.md b/examples/README.md index bb2138f26ffb..2a5c88cd1cbe 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,29 +1,10 @@ -# gRPC Hello World +# Examples -Follow these setup to run the [quick start][] example: +The following examples are provided to help users get started with gRPC-Go. +They are arranged as follows: - 1. Get the code: +* `helloworld` - a simple example showing a basic client and server +* `routeguide` - a more complicated example showing different types of streaming RPCs +* `features` - a collection of examples, each focused on a single gRPC feature - ```console - $ go get google.golang.org/grpc/examples/helloworld/greeter_client - $ go get google.golang.org/grpc/examples/helloworld/greeter_server - ``` - - 2. Run the server: - - ```console - $ $(go env GOPATH)/bin/greeter_server & - ``` - - 3. Run the client: - - ```console - $ $(go env GOPATH)/bin/greeter_client - Greeting: Hello world - ``` - -For more details (including instructions for making a small change to the -example code) or if you're having trouble running this example, see [Quick -Start][]. - -[quick start]: https://grpc.io/docs/languages/go/quickstart +`data` is a directory containing data used by the examples, e.g. TLS certificates. diff --git a/examples/helloworld/README.md b/examples/helloworld/README.md new file mode 100644 index 000000000000..bb2138f26ffb --- /dev/null +++ b/examples/helloworld/README.md @@ -0,0 +1,29 @@ +# gRPC Hello World + +Follow these setup to run the [quick start][] example: + + 1. Get the code: + + ```console + $ go get google.golang.org/grpc/examples/helloworld/greeter_client + $ go get google.golang.org/grpc/examples/helloworld/greeter_server + ``` + + 2. Run the server: + + ```console + $ $(go env GOPATH)/bin/greeter_server & + ``` + + 3. Run the client: + + ```console + $ $(go env GOPATH)/bin/greeter_client + Greeting: Hello world + ``` + +For more details (including instructions for making a small change to the +example code) or if you're having trouble running this example, see [Quick +Start][]. + +[quick start]: https://grpc.io/docs/languages/go/quickstart From 7651e620904800be4b20667fba9687b87f730924 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 21 Mar 2023 13:58:51 -0700 Subject: [PATCH 840/998] transport: add a draining state check before creating streams (#6142) --- internal/transport/http2_client.go | 2 +- internal/transport/transport_test.go | 28 +++++++++++++++++----------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 9826feb8c699..c0c9ca3bb8e4 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -782,7 +782,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() - if t.activeStreams == nil { // Can be niled from Close(). + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 0c5e00a75cce..c0d85b2a88d8 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -802,6 +802,9 @@ func (s) TestLargeMessageWithDelayRead(t *testing.T) { } } +// TestGracefulClose ensures that GracefulClose allows in-flight streams to +// proceed until they complete naturally, while not allowing creation of new +// streams during this window. func (s) TestGracefulClose(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, pingpong) defer cancel() @@ -817,6 +820,9 @@ func (s) TestGracefulClose(t *testing.T) { }() ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) defer cancel() + + // Create a stream that will exist for this whole test and confirm basic + // functionality. s, err := ct.NewStream(ctx, &CallHdr{}) if err != nil { t.Fatalf("NewStream(_, _) = _, %v, want _, ", err) @@ -837,31 +843,31 @@ func (s) TestGracefulClose(t *testing.T) { if _, err := s.Read(recvMsg); err != nil { t.Fatalf("Error while reading: %v", err) } + + // Gracefully close the transport, which should not affect the existing + // stream. ct.GracefulClose() + var wg sync.WaitGroup - // Expect the failure for all the follow-up streams because ct has been closed gracefully. + // Expect errors creating new streams because the client transport has been + // gracefully closed. for i := 0; i < 200; i++ { wg.Add(1) go func() { defer wg.Done() - str, err := ct.NewStream(ctx, &CallHdr{}) - if err != nil && err.(*NewStreamError).Err == ErrConnClosing { + _, err := ct.NewStream(ctx, &CallHdr{}) + if err != nil && err.(*NewStreamError).Err == ErrConnClosing && err.(*NewStreamError).AllowTransparentRetry { return - } else if err != nil { - t.Errorf("_.NewStream(_, _) = _, %v, want _, %v", err, ErrConnClosing) - return - } - ct.Write(str, nil, nil, &Options{Last: true}) - if _, err := str.Read(make([]byte, 8)); err != errStreamDrain && err != ErrConnClosing { - t.Errorf("_.Read(_) = _, %v, want _, %v or %v", err, errStreamDrain, ErrConnClosing) } + t.Errorf("_.NewStream(_, _) = _, %v, want _, %v", err, ErrConnClosing) }() } + + // Confirm the existing stream still functions as expected. ct.Write(s, nil, nil, &Options{Last: true}) if _, err := s.Read(incomingHeader); err != io.EOF { t.Fatalf("Client expected EOF from the server. Got: %v", err) } - // The stream which was created before graceful close can still proceed. wg.Wait() } From cdab8ae5c4544528261b36476c2d7c154c84f813 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 21 Mar 2023 15:37:39 -0700 Subject: [PATCH 841/998] clusterresolver: push empty config to child policy upon removal of cluster resource (#6125) --- .../clusterresolver/clusterresolver_test.go | 188 -------- .../clusterresolver/e2e_test/balancer_test.go | 450 ++++++++++++++++++ .../clusterresolver/e2e_test/eds_impl_test.go | 3 +- .../clusterresolver/resource_resolver.go | 17 +- .../clusterresolver/resource_resolver_dns.go | 6 + .../clusterresolver/resource_resolver_eds.go | 25 +- xds/internal/xdsclient/authority.go | 1 + xds/internal/xdsclient/client.go | 5 + xds/internal/xdsclient/clientimpl_watchers.go | 16 - 9 files changed, 501 insertions(+), 210 deletions(-) create mode 100644 xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 940de0aacbe2..f327c8cf5fc5 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -147,22 +147,6 @@ func (f *fakeChildBalancer) waitForClientConnStateChangeVerifyBalancerConfig(ctx return nil } -func (f *fakeChildBalancer) waitForClientConnStateChange(ctx context.Context) error { - _, err := f.clientConnState.Receive(ctx) - if err != nil { - return err - } - return nil -} - -func (f *fakeChildBalancer) waitForResolverError(ctx context.Context) error { - _, err := f.resolverError.Receive(ctx) - if err != nil { - return err - } - return nil -} - func (f *fakeChildBalancer) waitForSubConnStateChange(ctx context.Context, wantState *scStateChange) error { val, err := f.subConnState.Receive(ctx) if err != nil { @@ -258,178 +242,6 @@ func (s) TestSubConnStateChange(t *testing.T) { } } -// TestErrorFromXDSClientUpdate verifies that an error from xdsClient update is -// handled correctly. -// -// If it's resource-not-found, watch will NOT be canceled, the EDS impl will -// receive an empty EDS update, and new RPCs will fail. -// -// If it's connection error, nothing will happen. This will need to change to -// handle fallback. -func (s) TestErrorFromXDSClientUpdate(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", Name) - } - defer edsB.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSService), - }); err != nil { - t.Fatal(err) - } - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) - edsLB, err := waitForNewChildLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - if err := edsLB.waitForClientConnStateChange(ctx); err != nil { - t.Fatalf("EDS impl got unexpected update: %v", err) - } - - connectionErr := xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "connection error") - xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, connectionErr) - - sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if _, err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { - t.Fatal("watch was canceled, want not canceled (timeout error)") - } - - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { - t.Fatal(err) - } - if err := edsLB.waitForResolverError(ctx); err != nil { - t.Fatalf("want resolver error, got %v", err) - } - - resourceErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") - xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, resourceErr) - // Even if error is resource not found, watch shouldn't be canceled, because - // this is an EDS resource removed (and xds client actually never sends this - // error, but we still handles it). - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if _, err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { - t.Fatal("watch was canceled, want not canceled (timeout error)") - } - if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { - t.Fatal(err) - } - if err := edsLB.waitForResolverError(ctx); err != nil { - t.Fatalf("want resolver error, got %v", err) - } - - // An update with the same service name should not trigger a new watch. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSService), - }); err != nil { - t.Fatal(err) - } - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if _, err := xdsC.WaitForWatchEDS(sCtx); err != context.DeadlineExceeded { - t.Fatal("got unexpected new EDS watch") - } -} - -// TestErrorFromResolver verifies that resolver errors are handled correctly. -// -// If it's resource-not-found, watch will be canceled, the EDS impl will receive -// an empty EDS update, and new RPCs will fail. -// -// If it's connection error, nothing will happen. This will need to change to -// handle fallback. -func (s) TestErrorFromResolver(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", Name) - } - defer edsB.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSService), - }); err != nil { - t.Fatal(err) - } - - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - xdsC.InvokeWatchEDSCallback("", xdsresource.EndpointsUpdate{}, nil) - edsLB, err := waitForNewChildLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - if err := edsLB.waitForClientConnStateChange(ctx); err != nil { - t.Fatalf("EDS impl got unexpected update: %v", err) - } - - connectionErr := xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "connection error") - edsB.ResolverError(connectionErr) - - sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if _, err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { - t.Fatal("watch was canceled, want not canceled (timeout error)") - } - - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { - t.Fatal("eds impl got EDS resp, want timeout error") - } - if err := edsLB.waitForResolverError(ctx); err != nil { - t.Fatalf("want resolver error, got %v", err) - } - - resourceErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "clusterResolverBalancer resource not found error") - edsB.ResolverError(resourceErr) - if _, err := xdsC.WaitForCancelEDSWatch(ctx); err != nil { - t.Fatalf("want watch to be canceled, waitForCancel failed: %v", err) - } - if err := edsLB.waitForClientConnStateChange(sCtx); err != context.DeadlineExceeded { - t.Fatal(err) - } - if err := edsLB.waitForResolverError(ctx); err != nil { - t.Fatalf("want resolver error, got %v", err) - } - - // An update with the same service name should trigger a new watch, because - // the previous watch was canceled. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSService), - }); err != nil { - t.Fatal(err) - } - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } -} - // Given a list of resource names, verifies that EDS requests for the same are // sent by the EDS balancer, through the fake xDS client. func verifyExpectedRequests(ctx context.Context, fc *fakeclient.Client, resourceNames ...string) error { diff --git a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go new file mode 100644 index 000000000000..1513940c940b --- /dev/null +++ b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go @@ -0,0 +1,450 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e_test + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" + + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the "cds_experimental" LB policy. +) + +// TestErrorFromParentLB_ConnectionError tests the case where the parent of the +// clusterresolver LB policy sends its a connection error. The parent policy, +// CDS LB policy, sends a connection error when the ADS stream to the management +// server breaks. The test verifies that there is no perceivable effect because +// of this connection error, and that RPCs continue to work (because the LB +// policies are expected to use previously received xDS resources). +func (s) TestErrorFromParentLB_ConnectionError(t *testing.T) { + // Create a listener to be used by the management server. The test will + // close this listener to simulate ADS stream breakage. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + // Start an xDS management server with the above restartable listener, and + // push a channel when the stream is closed. + streamClosedCh := make(chan struct{}, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + Listener: lis, + OnStreamClosed: func(int64) { + select { + case streamClosedCh <- struct{}{}: + default: + } + }, + }) + defer cleanup() + + // Start a test backend and extract its host and port. + backend := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + backend.StartServer() + defer backend.Stop() + _, p, err := net.SplitHostPort(backend.Address) + if err != nil { + t.Fatalf("Failed to split test backend address %q: %v", backend.Address, err) + } + port, err := strconv.ParseUint(p, 10, 32) + if err != nil { + t.Fatalf("Failed to parse test backend port %q: %v", backend.Address, err) + } + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(port)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS xdsClient for use by the cluster_resolver LB policy. + xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cds LB policy as the top-level LB policy, and a corresponding config + // with a single cluster. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cds_experimental":{ + "cluster": "%s" + } + }] + }`, clusterName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Close the listener and ensure that the ADS stream breaks. + lis.Close() + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for ADS stream to close") + default: + } + + // Ensure that RPCs continue to succeed for the next one second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + } +} + +// TestErrorFromParentLB_ResourceNotFound tests the case where the parent of the +// clusterresolver LB policy sends it a resource-not-found error. The parent +// policy, CDS LB policy, sends a resource-not-found error when the cluster +// resource associated with these LB policies is removed by the management +// server. The test verifies that the associated EDS is canceled and RPCs fail. +// It also ensures that when the Cluster resource is added back, the EDS +// resource is re-requested and RPCs being to succeed. +func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { + // Start an xDS management server that uses a couple of channels to + // notify the test about the following events: + // - an EDS requested with the expected resource name is requested + // - EDS resource is unrequested, i.e, an EDS request with no resource name + // is received, which indicates that we are not longer interested in that + // resource. + edsResourceRequestedCh := make(chan struct{}, 1) + edsResourceCanceledCh := make(chan struct{}, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() == version.V3EndpointsURL { + switch len(req.GetResourceNames()) { + case 0: + select { + case edsResourceCanceledCh <- struct{}{}: + default: + } + case 1: + if req.GetResourceNames()[0] == edsServiceName { + select { + case edsResourceRequestedCh <- struct{}{}: + default: + } + } + default: + t.Errorf("Unexpected number of resources, %d, in an EDS request", len(req.GetResourceNames())) + } + } + return nil + }, + }) + defer cleanup() + + // Start a test backend and extract its host and port. + backend := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + backend.StartServer() + defer backend.Stop() + _, p, err := net.SplitHostPort(backend.Address) + if err != nil { + t.Fatalf("Failed to split test backend address %q: %v", backend.Address, err) + } + port, err := strconv.ParseUint(p, 10, 32) + if err != nil { + t.Fatalf("Failed to parse test backend port %q: %v", backend.Address, err) + } + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(port)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS xdsClient for use by the cluster_resolver LB policy. + xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cds LB policy as the top-level LB policy, and a corresponding config + // with a single cluster. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cds_experimental":{ + "cluster": "%s" + } + }] + }`, clusterName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn that kick starts the xDS workflow. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Wait for the EDS resource to be requested. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS resource to be requested") + case <-edsResourceRequestedCh: + } + + // Ensure that a successful RPC can be made. + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Delete the cluster resource from the mangement server. + resources.Clusters = nil + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for the EDS resource to be not requested anymore. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS resource to not requested") + case <-edsResourceCanceledCh: + } + + // Ensure that RPCs start to fail with expected error. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + _, err := client.EmptyCall(sCtx, &testpb.Empty{}) + if status.Code(err) == codes.Unavailable && strings.Contains(err.Error(), "all priorities are removed") { + break + } + if err != nil { + t.Logf("EmptyCall RPC failed: %v", err) + } + } + if ctx.Err() != nil { + t.Fatalf("RPCs did not fail after removal of Cluster resource") + } + + // Ensure that the ClientConn is in TransientFailure. + if state := cc.GetState(); state != connectivity.TransientFailure { + t.Fatalf("Unexpected connectivity state for ClientConn, got: %s, want %s", state, connectivity.TransientFailure) + } + + // Configure cluster and endpoints resources in the management server. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(port)})}, + SkipValidation: true, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for the EDS resource to be requested again. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS resource to be requested") + case <-edsResourceRequestedCh: + } + + // Ensure that a successful RPC can be made. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); err != nil { + t.Logf("EmptyCall RPC failed: %v", err) + continue + } + break + } + if ctx.Err() != nil { + t.Fatalf("RPCs did not fail after removal of Cluster resource") + } +} + +// TestEDSResourceRemoved tests the case where the EDS resource requested by the +// clusterresolver LB policy is removed from the management server. The test +// verifies that the EDS watch is not canceled and that RPCs continue to succeed +// with the previously received configuration. +func (s) TestEDSResourceRemoved(t *testing.T) { + // Start an xDS management server that uses a couple of channels to + // notify the test about the following events: + // - an EDS requested with the expected resource name is requested + // - EDS resource is unrequested, i.e, an EDS request with no resource name + // is received, which indicates that we are not longer interested in that + // resource. + edsResourceRequestedCh := make(chan struct{}, 1) + edsResourceCanceledCh := make(chan struct{}, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() == version.V3EndpointsURL { + switch len(req.GetResourceNames()) { + case 0: + select { + case edsResourceCanceledCh <- struct{}{}: + default: + } + case 1: + if req.GetResourceNames()[0] == edsServiceName { + select { + case edsResourceRequestedCh <- struct{}{}: + default: + } + } + default: + t.Errorf("Unexpected number of resources, %d, in an EDS request", len(req.GetResourceNames())) + } + } + return nil + }, + }) + defer cleanup() + + // Start a test backend and extract its host and port. + backend := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + backend.StartServer() + defer backend.Stop() + _, p, err := net.SplitHostPort(backend.Address) + if err != nil { + t.Fatalf("Failed to split test backend address %q: %v", backend.Address, err) + } + port, err := strconv.ParseUint(p, 10, 32) + if err != nil { + t.Fatalf("Failed to parse test backend port %q: %v", backend.Address, err) + } + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(port)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS xdsClient for use by the cluster_resolver LB policy. + xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cds LB policy as the top-level LB policy, and a corresponding config + // with a single cluster. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cds_experimental":{ + "cluster": "%s" + } + }] + }`, clusterName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Delete the endpoints resource from the mangement server. + resources.Endpoints = nil + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to succeed for the next one second, and that the EDS watch is not canceled. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + select { + case <-edsResourceCanceledCh: + t.Fatal("EDS watch canceled when not expected to be canceled") + default: + } + } +} diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index 83289ce40df2..ce497c1706d7 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -58,7 +58,8 @@ const ( localityName2 = "my-locality-2" localityName3 = "my-locality-3" - defaultTestTimeout = 5 * time.Second + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond ) type s struct { diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index e47aaf1ceba1..580734a02154 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -57,7 +57,8 @@ type endpointsResolver interface { // resolverNow triggers re-resolution of the resource. resolveNow() - // stop stops resolution of the resource. + // stop stops resolution of the resource. Implementations must not invoke + // any methods on the topLevelResolver interface once `stop()` returns. stop() } @@ -224,6 +225,20 @@ func (rr *resourceResolver) stop() { for _, r := range cm { r.r.stop() } + + // stop() is called when the LB policy is closed or when the underlying + // cluster resource is removed by the management server. In the latter case, + // an empty config update needs to be pushed to the child policy to ensure + // that a picker that fails RPCs is sent up to the channel. + // + // Resource resolver implementations are expected to not send any updates + // after they are stopped. Therefore, we don't have to worry about another + // write to this channel happening at the same time as this one. + select { + case <-rr.updateChannel: + default: + } + rr.updateChannel <- &resourceUpdate{} } // generateLocked collects updates from all resolvers. It pushes the combined diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index 4ce70e0fe6bb..06af9cc6df32 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -100,6 +100,12 @@ func (dr *dnsDiscoveryMechanism) resolveNow() { } } +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. The +// underlying dns resolver does not send any updates to the resolver.ClientConn +// interface passed to it (implemented by dnsDiscoveryMechanism in this case) +// after its `Close()` returns. Therefore, we can guarantee that no methods of +// the topLevelResolver are invoked after we return from this method. func (dr *dnsDiscoveryMechanism) stop() { if dr.dnsR != nil { dr.dnsR.Close() diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go index 62d932b85d5a..2517cf49159c 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_eds.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -21,6 +21,7 @@ package clusterresolver import ( "sync" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -29,8 +30,9 @@ type edsResourceWatcher interface { } type edsDiscoveryMechanism struct { - cancel func() + cancelWatch func() topLevelResolver topLevelResolver + stopped *grpcsync.Event mu sync.Mutex update xdsresource.EndpointsUpdate @@ -50,11 +52,23 @@ func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { func (er *edsDiscoveryMechanism) resolveNow() { } +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. func (er *edsDiscoveryMechanism) stop() { - er.cancel() + // Canceling a watch with the xDS client can race with an xDS response + // received around the same time, and can result in the watch callback being + // invoked after the watch is canceled. Callers need to handle this race, + // and we fire the stopped event here to ensure that a watch callback + // invocation around the same time becomes a no-op. + er.stopped.Fire() + er.cancelWatch() } func (er *edsDiscoveryMechanism) handleEndpointsUpdate(update xdsresource.EndpointsUpdate, err error) { + if er.stopped.HasFired() { + return + } + if err != nil { er.topLevelResolver.onError(err) return @@ -71,7 +85,10 @@ func (er *edsDiscoveryMechanism) handleEndpointsUpdate(update xdsresource.Endpoi // newEDSResolver returns an implementation of the endpointsResolver interface // that uses EDS to resolve the given name to endpoints. func newEDSResolver(nameToWatch string, watcher edsResourceWatcher, topLevelResolver topLevelResolver) *edsDiscoveryMechanism { - ret := &edsDiscoveryMechanism{topLevelResolver: topLevelResolver} - ret.cancel = watcher.WatchEndpoints(nameToWatch, ret.handleEndpointsUpdate) + ret := &edsDiscoveryMechanism{ + topLevelResolver: topLevelResolver, + stopped: grpcsync.NewEvent(), + } + ret.cancelWatch = watcher.WatchEndpoints(nameToWatch, ret.handleEndpointsUpdate) return ret } diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 1ea1d532e3d8..0a03e43a3148 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -483,6 +483,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // There are no more watchers for this resource, delete the state // associated with it, and instruct the transport to send a request // which does not include this resource name. + a.logger.Debugf("Removing last watch for type %q, resource name %q", rType.TypeEnum(), resourceName) delete(resources, resourceName) a.sendDiscoveryRequestLocked(rType, resources) } diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index 6e380b27d543..cc39fb2e4d16 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -45,6 +45,11 @@ type XDSClient interface { // instead use a resource-type-specific wrapper API provided by the relevant // resource type implementation. // + // + // During a race (e.g. an xDS response is received while the user is calling + // cancel()), there's a small window where the callback can be called after + // the watcher is canceled. Callers need to handle this case. + // // TODO: Once this generic client API is fully implemented and integrated, // delete the resource type specific watch APIs on this interface. WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) diff --git a/xds/internal/xdsclient/clientimpl_watchers.go b/xds/internal/xdsclient/clientimpl_watchers.go index 77c4a614a228..3a2505db43e9 100644 --- a/xds/internal/xdsclient/clientimpl_watchers.go +++ b/xds/internal/xdsclient/clientimpl_watchers.go @@ -48,10 +48,6 @@ func (l *listenerWatcher) OnResourceDoesNotExist() { // WatchListener uses LDS to discover information about the Listener resource // identified by resourceName. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchListener(resourceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { watcher := &listenerWatcher{resourceName: resourceName, cb: cb} return xdsresource.WatchListener(c, resourceName, watcher) @@ -80,10 +76,6 @@ func (r *routeConfigWatcher) OnResourceDoesNotExist() { // WatchRouteConfig uses RDS to discover information about the // RouteConfiguration resource identified by resourceName. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchRouteConfig(resourceName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { watcher := &routeConfigWatcher{resourceName: resourceName, cb: cb} return xdsresource.WatchRouteConfig(c, resourceName, watcher) @@ -115,10 +107,6 @@ func (c *clusterWatcher) OnResourceDoesNotExist() { // // WatchCluster can be called multiple times, with same or different // clusterNames. Each call will start an independent watcher for the resource. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchCluster(resourceName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { watcher := &clusterWatcher{resourceName: resourceName, cb: cb} return xdsresource.WatchCluster(c, resourceName, watcher) @@ -150,10 +138,6 @@ func (c *endpointsWatcher) OnResourceDoesNotExist() { // // WatchEndpoints can be called multiple times, with same or different // clusterNames. Each call will start an independent watcher for the resource. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. func (c *clientImpl) WatchEndpoints(resourceName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { watcher := &endpointsWatcher{resourceName: resourceName, cb: cb} return xdsresource.WatchEndpoints(c, resourceName, watcher) From a02aae6168aa683a1f106e285ec10eb6bc1f6ded Mon Sep 17 00:00:00 2001 From: Knut Zuidema Date: Wed, 22 Mar 2023 00:28:53 +0100 Subject: [PATCH 842/998] CONTRIBUTING.md: remove duplicated bullet point (#6139) --- CONTRIBUTING.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8e001134da20..608aa6e1ac5e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,10 +20,6 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. -- For speculative changes, consider opening an issue and discussing it first. If - you are suggesting a behavioral or API change, consider starting with a [gRFC - proposal](https://github.com/grpc/proposal). - - If you are searching for features to work on, issues labeled [Status: Help Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) is a great place to start. These issues are well-documented and usually can be From 9c25653be0d34eefdd9748b2e4e05e30ed0e9192 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 22 Mar 2023 09:19:57 -0700 Subject: [PATCH 843/998] cdsbalancer: improve log messages (#6134) --- .../balancer/cdsbalancer/cdsbalancer.go | 26 +++++++++---------- .../balancer/cdsbalancer/cdsbalancer_test.go | 2 ++ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 4a0beab131d1..9b55ee0f10ab 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -48,7 +48,7 @@ const ( ) var ( - errBalancerClosed = errors.New("cdsBalancer is closed") + errBalancerClosed = errors.New("cds_experimental LB policy is closed") // newChildBalancer is a helper function to build a new cluster_resolver // balancer and will be overridden in unittests. @@ -327,7 +327,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { return } - b.logger.Infof("Watch update from xds-client %p, content: %+v, security config: %v", b.xdsClient, pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg)) + b.logger.Infof("Received Cluster resource contains content: %s, security config: %s", pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg)) // Process the security config from the received update before building the // child policy or forwarding the update to it. We do this because the child @@ -338,7 +338,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { // If the security config is invalid, for example, if the provider // instance is not found in the bootstrap config, we need to put the // channel in transient failure. - b.logger.Warningf("Invalid security config update from xds-client %p: %v", b.xdsClient, err) + b.logger.Warningf("Received Cluster resource contains invalid security config: %v", err) b.handleErrorFromUpdate(err, false) return } @@ -388,7 +388,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { DNSHostname: cu.DNSHostName, } default: - b.logger.Infof("unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) + b.logger.Infof("Unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) } if envconfig.XDSOutlierDetection { dms[i].OutlierDetection = outlierDetectionToConfig(cu.OutlierDetection) @@ -416,7 +416,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { BalancerConfig: lbCfg, } if err := b.childLB.UpdateClientConnState(ccState); err != nil { - b.logger.Errorf("xds: cluster_resolver balancer.UpdateClientConnState(%+v) returned error: %v", ccState, err) + b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) } } @@ -435,13 +435,13 @@ func (b *cdsBalancer) run() { // SubConn updates are passthrough and are simply handed over to // the underlying cluster_resolver balancer. if b.childLB == nil { - b.logger.Errorf("xds: received scUpdate {%+v} with no cluster_resolver balancer", update) + b.logger.Errorf("Received SubConn update with no child policy: %+v", update) break } b.childLB.UpdateSubConnState(update.subConn, update.state) case exitIdle: if b.childLB == nil { - b.logger.Errorf("xds: received ExitIdle with no child balancer") + b.logger.Errorf("Received ExitIdle with no child policy") break } // This implementation assumes the child balancer supports @@ -515,7 +515,7 @@ func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { // xdsResolver. func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if b.closed.HasFired() { - b.logger.Warningf("xds: received ClientConnState {%+v} after cdsBalancer was closed", state) + b.logger.Errorf("Received balancer config after close") return errBalancerClosed } @@ -526,18 +526,18 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro } b.xdsClient = c } + b.logger.Infof("Received balancer config update: %s", pretty.ToJSON(state.BalancerConfig)) - b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(state.BalancerConfig)) // The errors checked here should ideally never happen because the // ServiceConfig in this case is prepared by the xdsResolver and is not // something that is received on the wire. lbCfg, ok := state.BalancerConfig.(*lbConfig) if !ok { - b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", state.BalancerConfig) + b.logger.Warningf("Received unexpected balancer config type: %T", state.BalancerConfig) return balancer.ErrBadResolverState } if lbCfg.ClusterName == "" { - b.logger.Warningf("xds: no clusterName found in LoadBalancingConfig: %+v", lbCfg) + b.logger.Warningf("Received balancer config with no cluster name") return balancer.ErrBadResolverState } b.updateCh.Put(&ccUpdate{clusterName: lbCfg.ClusterName}) @@ -547,7 +547,7 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro // ResolverError handles errors reported by the xdsResolver. func (b *cdsBalancer) ResolverError(err error) { if b.closed.HasFired() { - b.logger.Warningf("xds: received resolver error {%v} after cdsBalancer was closed", err) + b.logger.Warningf("Received resolver error after close: %v", err) return } b.updateCh.Put(&ccUpdate{err: err}) @@ -556,7 +556,7 @@ func (b *cdsBalancer) ResolverError(err error) { // UpdateSubConnState handles subConn updates from gRPC. func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { if b.closed.HasFired() { - b.logger.Warningf("xds: received subConn update {%v, %v} after cdsBalancer was closed", sc, state) + b.logger.Warningf("Received subConn update after close: {%v, %v}", sc, state) return } b.updateCh.Put(&scUpdate{subConn: sc, state: state}) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 6e6ed1d3d4ce..27b2f15b4652 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -692,6 +692,8 @@ func (s) TestCircuitBreaking(t *testing.T) { // TestClose verifies the Close() method in the CDS balancer. func (s) TestClose(t *testing.T) { + grpctest.TLogger.ExpectError("cds-lb.*Received balancer config after close") + // This creates a CDS balancer, pushes a ClientConnState update with a fake // xdsClient, and makes sure that the CDS balancer registers a watch on the // provided xdsClient. From a8a25ce994c279845c34b6457ad856999ca58c37 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 22 Mar 2023 09:20:36 -0700 Subject: [PATCH 844/998] transport: use prefix logging (#6135) --- internal/transport/controlbuf.go | 17 +++++---- internal/transport/handler_server.go | 9 +++-- internal/transport/http2_client.go | 23 ++++++------ internal/transport/http2_server.go | 53 +++++++++++++++------------- internal/transport/http_util.go | 2 -- internal/transport/logging.go | 40 +++++++++++++++++++++ test/end2end_test.go | 2 +- 7 files changed, 99 insertions(+), 47 deletions(-) create mode 100644 internal/transport/logging.go diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index c343c23a530e..be5a9c81eb97 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -30,6 +30,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -488,12 +489,13 @@ type loopyWriter struct { bdpEst *bdpEstimator draining bool conn net.Conn + logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -507,6 +509,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, conn: conn, + logger: logger, } return l } @@ -536,8 +539,8 @@ const minBatchSize = 1000 // left open to allow the I/O error to be encountered by the reader instead. func (l *loopyWriter) run() (err error) { defer func() { - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter exiting with error: %v", err) + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) } if !isIOError(err) { l.framer.writer.Flush() @@ -636,8 +639,8 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) } return nil } @@ -692,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) } } } diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index e6626bf96e7c..fbee581b8660 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -83,6 +84,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentSubtype: contentSubtype, stats: stats, } + st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) @@ -150,13 +152,14 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats []stats.Handler + logger *grpclog.PrefixLogger } func (ht *serverHandlerTransport) Close(err error) { ht.closeOnce.Do(func() { - if logger.V(logLevel) { - logger.Infof("Closing serverHandlerTransport: %v", err) + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) } close(ht.closedCh) }) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index c0c9ca3bb8e4..5216998a88ba 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" @@ -145,6 +146,7 @@ type http2Client struct { bufferPool *bufferPool connectionID uint64 + logger *grpclog.PrefixLogger } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { @@ -244,7 +246,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. if logger.V(logLevel) { - logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + logger.Infof("Aborting due to connect deadline expiring: %v", err) } conn.Close() } @@ -346,6 +348,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts bufferPool: newBufferPool(), onClose: onClose, } + t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -444,7 +447,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.run() close(t.writerDone) }() @@ -859,8 +862,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } } if transportDrainRequired { - if logger.V(logLevel) { - logger.Infof("transport: t.nextID > MaxStreamID. Draining") + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") } t.GracefulClose() } @@ -952,8 +955,8 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } - if logger.V(logLevel) { - logger.Infof("transport: closing: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. @@ -1009,8 +1012,8 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } - if logger.V(logLevel) { - logger.Infof("transport: GracefulClose called") + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") } t.onClose(GoAwayInvalid) t.state = draining @@ -1174,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode) + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) } statusCode = codes.Unknown } diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 99ae1a737469..4b406b8cb011 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -35,7 +35,9 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" @@ -129,6 +131,8 @@ type http2Server struct { // This lock may not be taken if mu is already held. maxStreamMu sync.Mutex maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger } // NewServerTransport creates a http2 transport with conn and configuration @@ -267,6 +271,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + t.logger = prefixLoggerForServerTransport(t) // Add peer information to the http2server context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -331,7 +336,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler t.loopy.run() close(t.writerDone) @@ -425,8 +430,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // "Transports must consider requests containing the Connection header // as malformed." - A41 case "connection": - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") } protocolError = true default: @@ -436,7 +441,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) - logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } mdata[hf.Name] = append(mdata[hf.Name], v) @@ -450,8 +455,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // error, this takes precedence over a client not speaking gRPC. if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) - if logger.V(logLevel) { - logger.Errorf("transport: %v", errMsg) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: http.StatusBadRequest, @@ -545,9 +550,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() - errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) - if logger.V(logLevel) { - logger.Infof("transport: %v", errMsg) + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: 405, @@ -563,8 +568,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( var err error if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() - if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) } stat, ok := status.FromError(err) if !ok { @@ -638,8 +643,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -682,8 +687,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) } } } @@ -942,8 +947,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if logger.V(logLevel) { - logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -1056,7 +1061,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -1171,8 +1176,8 @@ func (t *http2Server) keepalive() { select { case <-ageTimer.C: // Close the connection after grace period. - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to maximum connection age.") + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") } t.controlBuf.put(closeConnection{}) case <-t.done: @@ -1223,8 +1228,8 @@ func (t *http2Server) Close(err error) { t.mu.Unlock() return } - if logger.V(logLevel) { - logger.Infof("transport: closing: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) } t.state = closing streams := t.activeStreams @@ -1232,8 +1237,8 @@ func (t *http2Server) Close(err error) { t.mu.Unlock() t.controlBuf.finish() close(t.done) - if err := t.conn.Close(); err != nil && logger.V(logLevel) { - logger.Infof("transport: error closing conn during Close: %v", err) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } channelz.RemoveEntry(t.channelzID) // Cancel all active streams. diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index 8fcae4f4d079..19cbb18f5ab4 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -38,7 +38,6 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -86,7 +85,6 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } - logger = grpclog.Component("transport") ) // isReservedHeader checks whether hdr belongs to HTTP2 headers diff --git a/internal/transport/logging.go b/internal/transport/logging.go new file mode 100644 index 000000000000..42ed2b07af66 --- /dev/null +++ b/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/test/end2end_test.go b/test/end2end_test.go index a6d992286894..4009f0515017 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -5668,7 +5668,7 @@ func (s) TestStatusInvalidUTF8Message(t *testing.T) { // will fail to marshal the status because of the invalid utf8 message. Details // will be dropped when sending. func (s) TestStatusInvalidUTF8Details(t *testing.T) { - grpctest.TLogger.ExpectError("transport: failed to marshal rpc status") + grpctest.TLogger.ExpectError("Failed to marshal rpc status") var ( origMsg = string([]byte{0xff, 0xfe, 0xfd}) From 1d20f1b5008bf741af59797c3766b0ac4d482c25 Mon Sep 17 00:00:00 2001 From: Gregory Cooke Date: Thu, 23 Mar 2023 13:34:01 -0400 Subject: [PATCH 845/998] security/advancedtls: swap from deprecated pkix.CertificateList to x509.RevocationList (#6054) Swap from deprecated pkix.CertificateList to x509.RevocationList pkix.CertificateList is deprecated. We have an internal wrapper around this for representing CRLs. This PR updates that wrapper to use the preferred x509.RevocationList. This also replaces x509.ParseCRL (deprecated) with x509.ParseRevocationList. The former supported PEM input, while the latter requires DER, so I added a utility function parseRevocationList that does the PEM -> DER conversion if needed, taken from the x509.ParseCRL implementation. The one issue here is that x509.RevocationList was introduced in golang 1.19. We are still supporting 1.18. To solve this, I've put build restrictions on crl.go and crl_test.go to only build on 1.19+. Also, I also added the files crl_deprecated.go and crl_deprecated_test.go, which are identical copies to the crl.go and crl_test.go files before this PR. They have the build restriction of 0 { + var block *pem.Block + block, rest = pem.Decode(rest) + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatalf("ParseCertificate error %v", err) + } + t.Logf("Parsed Cert sub = %v iss = %v", c.Subject, c.Issuer) + certChain = append(certChain, c) + } + return certChain +} + +func loadCRL(t *testing.T, path string) *certificateListExt { + b, err := os.ReadFile(path) + if err != nil { + t.Fatalf("readFile(%v) failed err = %v", path, err) + } + crl, err := x509.ParseCRL(b) + if err != nil { + t.Fatalf("ParseCrl(%v) failed err = %v", path, err) + } + crlExt, err := parseCRLExtensions(crl) + if err != nil { + t.Fatalf("parseCRLExtensions(%v) failed err = %v", path, err) + } + crlExt.RawIssuer, err = extractCRLIssuer(b) + if err != nil { + t.Fatalf("extractCRLIssuer(%v) failed err= %v", path, err) + } + return crlExt +} + +func TestCachedCRL(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + tests := []struct { + desc string + val interface{} + ok bool + }{ + { + desc: "Valid", + val: &certificateListExt{ + CertList: &pkix.CertificateList{ + TBSCertList: pkix.TBSCertificateList{ + NextUpdate: time.Now().Add(time.Hour), + }, + }}, + ok: true, + }, + { + desc: "Expired", + val: &certificateListExt{ + CertList: &pkix.CertificateList{ + TBSCertList: pkix.TBSCertificateList{ + NextUpdate: time.Now().Add(-time.Hour), + }, + }}, + ok: false, + }, + { + desc: "Wrong Type", + val: "string", + ok: false, + }, + { + desc: "Empty", + val: nil, + ok: false, + }, + } + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if tt.val != nil { + cache.Add(hex.EncodeToString([]byte(tt.desc)), tt.val) + } + _, ok := cachedCrl([]byte(tt.desc), cache) + if tt.ok != ok { + t.Errorf("Cache ok error expected %v vs %v", tt.ok, ok) + } + }) + } +} + +func TestGetIssuerCRLCache(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + tests := []struct { + desc string + rawIssuer []byte + certs []*x509.Certificate + }{ + { + desc: "Valid", + rawIssuer: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1].RawIssuer, + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + }, + { + desc: "Unverified", + rawIssuer: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1].RawIssuer, + }, + { + desc: "Not Found", + rawIssuer: []byte("not_found"), + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + cache.Purge() + _, err := fetchIssuerCRL(tt.rawIssuer, tt.certs, RevocationConfig{ + RootDir: testdata.Path("."), + Cache: cache, + }) + if err == nil && cache.Len() == 0 { + t.Error("Verified CRL not added to cache") + } + if err != nil && cache.Len() != 0 { + t.Error("Unverified CRL added to cache") + } + }) + } +} + +func TestVerifyCrl(t *testing.T) { + tampered := loadCRL(t, testdata.Path("crl/1.crl")) + // Change the signature so it won't verify + tampered.CertList.SignatureValue.Bytes[0]++ + + verifyTests := []struct { + desc string + crl *certificateListExt + certs []*x509.Certificate + cert *x509.Certificate + errWant string + }{ + { + desc: "Pass intermediate", + crl: loadCRL(t, testdata.Path("crl/1.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "", + }, + { + desc: "Pass leaf", + crl: loadCRL(t, testdata.Path("crl/2.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[2], + errWant: "", + }, + { + desc: "Fail wrong cert chain", + crl: loadCRL(t, testdata.Path("crl/3.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/revokedInt.pem"))[1], + errWant: "No certificates mached", + }, + { + desc: "Fail no certs", + crl: loadCRL(t, testdata.Path("crl/1.crl")), + certs: []*x509.Certificate{}, + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "No certificates mached", + }, + { + desc: "Fail Tampered signature", + crl: tampered, + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "verification failure", + }, + } + + for _, tt := range verifyTests { + t.Run(tt.desc, func(t *testing.T) { + err := verifyCRL(tt.crl, tt.cert.RawIssuer, tt.certs) + switch { + case tt.errWant == "" && err != nil: + t.Errorf("Valid CRL did not verify err = %v", err) + case tt.errWant != "" && err == nil: + t.Error("Invalid CRL verified") + case tt.errWant != "" && !strings.Contains(err.Error(), tt.errWant): + t.Errorf("fetchIssuerCRL(_, %v, %v, _) = %v; want Contains(%v)", tt.cert.RawIssuer, tt.certs, err, tt.errWant) + } + }) + } +} + +func TestRevokedCert(t *testing.T) { + revokedIntChain := makeChain(t, testdata.Path("crl/revokedInt.pem")) + revokedLeafChain := makeChain(t, testdata.Path("crl/revokedLeaf.pem")) + validChain := makeChain(t, testdata.Path("crl/unrevoked.pem")) + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + var revocationTests = []struct { + desc string + in tls.ConnectionState + revoked bool + allowUndetermined bool + }{ + { + desc: "Single unrevoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain}}, + revoked: false, + }, + { + desc: "Single revoked intermediate", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedIntChain}}, + revoked: true, + }, + { + desc: "Single revoked leaf", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedLeafChain}}, + revoked: true, + }, + { + desc: "Multi one revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain, revokedLeafChain}}, + revoked: false, + }, + { + desc: "Multi revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedLeafChain, revokedIntChain}}, + revoked: true, + }, + { + desc: "Multi unrevoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain, validChain}}, + revoked: false, + }, + { + desc: "Undetermined revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{ + {&x509.Certificate{CRLDistributionPoints: []string{"test"}}}, + }}, + revoked: true, + }, + { + desc: "Undetermined allowed", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{ + {&x509.Certificate{CRLDistributionPoints: []string{"test"}}}, + }}, + revoked: false, + allowUndetermined: true, + }, + } + + for _, tt := range revocationTests { + t.Run(tt.desc, func(t *testing.T) { + err := CheckRevocation(tt.in, RevocationConfig{ + RootDir: testdata.Path("crl"), + AllowUndetermined: tt.allowUndetermined, + Cache: cache, + }) + t.Logf("CheckRevocation err = %v", err) + if tt.revoked && err == nil { + t.Error("Revoked certificate chain was allowed") + } else if !tt.revoked && err != nil { + t.Error("Unrevoked certificate not allowed") + } + }) + } +} + +func setupTLSConn(t *testing.T) (net.Listener, *x509.Certificate, *ecdsa.PrivateKey) { + t.Helper() + templ := x509.Certificate{ + SerialNumber: big.NewInt(5), + BasicConstraintsValid: true, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + Subject: pkix.Name{CommonName: "test-cert"}, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + IPAddresses: []net.IP{net.ParseIP("::1")}, + CRLDistributionPoints: []string{"http://static.corp.google.com/crl/campus-sln/borg"}, + } + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("ecdsa.GenerateKey failed err = %v", err) + } + rawCert, err := x509.CreateCertificate(rand.Reader, &templ, &templ, key.Public(), key) + if err != nil { + t.Fatalf("x509.CreateCertificate failed err = %v", err) + } + cert, err := x509.ParseCertificate(rawCert) + if err != nil { + t.Fatalf("x509.ParseCertificate failed err = %v", err) + } + + srvCfg := tls.Config{ + Certificates: []tls.Certificate{ + { + Certificate: [][]byte{cert.Raw}, + PrivateKey: key, + }, + }, + } + l, err := tls.Listen("tcp6", "[::1]:0", &srvCfg) + if err != nil { + t.Fatalf("tls.Listen failed err = %v", err) + } + return l, cert, key +} + +// TestVerifyConnection will setup a client/server connection and check revocation in the real TLS dialer +func TestVerifyConnection(t *testing.T) { + lis, cert, key := setupTLSConn(t) + defer func() { + lis.Close() + }() + + var handshakeTests = []struct { + desc string + revoked []pkix.RevokedCertificate + success bool + }{ + { + desc: "Empty CRL", + revoked: []pkix.RevokedCertificate{}, + success: true, + }, + { + desc: "Revoked Cert", + revoked: []pkix.RevokedCertificate{ + { + SerialNumber: cert.SerialNumber, + RevocationTime: time.Now(), + }, + }, + success: false, + }, + } + for _, tt := range handshakeTests { + t.Run(tt.desc, func(t *testing.T) { + // Accept one connection. + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("tls.Accept failed err = %v", err) + } else { + conn.Write([]byte("Hello, World!")) + conn.Close() + } + }() + + dir, err := os.MkdirTemp("", "crl_dir") + if err != nil { + t.Fatalf("os.MkdirTemp failed err = %v", err) + } + defer os.RemoveAll(dir) + + crl, err := cert.CreateCRL(rand.Reader, key, tt.revoked, time.Now(), time.Now().Add(time.Hour)) + if err != nil { + t.Fatalf("templ.CreateCRL failed err = %v", err) + } + + err = os.WriteFile(path.Join(dir, fmt.Sprintf("%s.r0", x509NameHash(cert.Subject.ToRDNSequence()))), crl, 0777) + if err != nil { + t.Fatalf("os.WriteFile failed err = %v", err) + } + + cp := x509.NewCertPool() + cp.AddCert(cert) + cliCfg := tls.Config{ + RootCAs: cp, + VerifyConnection: func(cs tls.ConnectionState) error { + return CheckRevocation(cs, RevocationConfig{RootDir: dir}) + }, + } + conn, err := tls.Dial(lis.Addr().Network(), lis.Addr().String(), &cliCfg) + t.Logf("tls.Dial err = %v", err) + if tt.success && err != nil { + t.Errorf("Expected success got err = %v", err) + } + if !tt.success && err == nil { + t.Error("Expected error, but got success") + } + if err == nil { + conn.Close() + } + }) + } +} + +func TestIssuerNonPrintableString(t *testing.T) { + rawIssuer, err := hex.DecodeString("300c310a300806022a030c023a29") + if err != nil { + t.Fatalf("failed to decode issuer: %s", err) + } + _, err = fetchCRL(rawIssuer, RevocationConfig{RootDir: testdata.Path("crl")}) + if err != nil { + t.Fatalf("fetchCRL failed: %s", err) + } +} + +// TestCRLCacheExpirationReloading tests the basic expiration and reloading of a +// cached CRL. The setup places an empty CRL in the cache, and a corresponding +// CRL with a revocation in the CRL directory. We then validate the certificate +// to verify that the certificate is not revoked. Then, we modify the +// NextUpdate time to be in the past so that when we next check for revocation, +// the existing cache entry should be seen as expired, and the CRL in the +// directory showing `revokedInt.pem` as revoked will be loaded, resulting in +// the check returning `RevocationRevoked`. +func TestCRLCacheExpirationReloading(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("Creating cache failed") + } + + var certs = makeChain(t, testdata.Path("crl/revokedInt.pem")) + // Certs[1] has the same issuer as the revoked cert + rawIssuer := certs[1].RawIssuer + + // `3.crl`` revokes `revokedInt.pem` + crl := loadCRL(t, testdata.Path("crl/3.crl")) + // Modify the crl so that the cert is NOT revoked and add it to the cache + crl.CertList.TBSCertList.RevokedCertificates = nil + crl.CertList.TBSCertList.NextUpdate = time.Now().Add(time.Hour) + cache.Add(hex.EncodeToString(rawIssuer), crl) + var cfg = RevocationConfig{RootDir: testdata.Path("crl"), Cache: cache} + revocationStatus := checkChain(certs, cfg) + if revocationStatus != RevocationUnrevoked { + t.Fatalf("Certificate check should be RevocationUnrevoked, was %v", revocationStatus) + } + + // Modify the entry in the cache so that the cache will be refreshed + crl.CertList.TBSCertList.NextUpdate = time.Now() + cache.Add(hex.EncodeToString(rawIssuer), crl) + + revocationStatus = checkChain(certs, cfg) + if revocationStatus != RevocationRevoked { + t.Fatalf("A certificate should have been `RevocationRevoked` but was %v", revocationStatus) + } +} diff --git a/security/advancedtls/crl_test.go b/security/advancedtls/crl_test.go index 65ef3ca1b738..021f10d35ae5 100644 --- a/security/advancedtls/crl_test.go +++ b/security/advancedtls/crl_test.go @@ -1,3 +1,6 @@ +// TODO(@gregorycooke) - Remove when only golang 1.19+ is supported +//go:build go1.19 + /* * * Copyright 2021 gRPC authors. @@ -185,7 +188,7 @@ qsSIp8gfxSyzkJP+Ngkm2DdLjlJQCZ9R0MZP9Xj4 for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - crl, err := x509.ParseCRL(tt.in) + crl, err := parseRevocationList(tt.in) if err != nil { t.Fatal(err) } @@ -216,13 +219,12 @@ XmcN4lG1e4nx+xjzp7MySYO42NRY3LkphVzJhu3dRBYhBKViRJxw9hLttChitJpF 6Kh6a0QzrEY/QDJGhE1VrAD2c5g/SKnHPDVoCWo4ACIICi76KQQSIWfIdp4W/SY3 qsSIp8gfxSyzkJP+Ngkm2DdLjlJQCZ9R0MZP9Xj4 -----END X509 CRL-----`) - crl, err := x509.ParseCRL(dummyCrlFile) + crl, err := parseRevocationList(dummyCrlFile) if err != nil { - t.Fatalf("x509.ParseCRL(dummyCrlFile) failed: %v", err) + t.Fatalf("parseRevocationList(dummyCrlFile) failed: %v", err) } crlExt := &certificateListExt{CertList: crl} - var crlIssuer pkix.Name - crlIssuer.FillFromRDNSequence(&crl.TBSCertList.Issuer) + var crlIssuer pkix.Name = crl.Issuer var revocationTests = []struct { desc string @@ -341,9 +343,9 @@ func loadCRL(t *testing.T, path string) *certificateListExt { if err != nil { t.Fatalf("readFile(%v) failed err = %v", path, err) } - crl, err := x509.ParseCRL(b) + crl, err := parseRevocationList(b) if err != nil { - t.Fatalf("ParseCrl(%v) failed err = %v", path, err) + t.Fatalf("parseCrl(%v) failed err = %v", path, err) } crlExt, err := parseCRLExtensions(crl) if err != nil { @@ -370,20 +372,16 @@ func TestCachedCRL(t *testing.T) { { desc: "Valid", val: &certificateListExt{ - CertList: &pkix.CertificateList{ - TBSCertList: pkix.TBSCertificateList{ - NextUpdate: time.Now().Add(time.Hour), - }, + CertList: &x509.RevocationList{ + NextUpdate: time.Now().Add(time.Hour), }}, ok: true, }, { desc: "Expired", val: &certificateListExt{ - CertList: &pkix.CertificateList{ - TBSCertList: pkix.TBSCertificateList{ - NextUpdate: time.Now().Add(-time.Hour), - }, + CertList: &x509.RevocationList{ + NextUpdate: time.Now().Add(-time.Hour), }}, ok: false, }, @@ -457,7 +455,7 @@ func TestGetIssuerCRLCache(t *testing.T) { func TestVerifyCrl(t *testing.T) { tampered := loadCRL(t, testdata.Path("crl/1.crl")) // Change the signature so it won't verify - tampered.CertList.SignatureValue.Bytes[0]++ + tampered.CertList.Signature[0]++ verifyTests := []struct { desc string @@ -606,7 +604,7 @@ func setupTLSConn(t *testing.T) (net.Listener, *x509.Certificate, *ecdsa.Private NotAfter: time.Now().Add(time.Hour), IsCA: true, Subject: pkix.Name{CommonName: "test-cert"}, - KeyUsage: x509.KeyUsageCertSign, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, IPAddresses: []net.IP{net.ParseIP("::1")}, CRLDistributionPoints: []string{"http://static.corp.google.com/crl/campus-sln/borg"}, @@ -687,9 +685,15 @@ func TestVerifyConnection(t *testing.T) { } defer os.RemoveAll(dir) - crl, err := cert.CreateCRL(rand.Reader, key, tt.revoked, time.Now(), time.Now().Add(time.Hour)) + template := &x509.RevocationList{ + RevokedCertificates: tt.revoked, + ThisUpdate: time.Now(), + NextUpdate: time.Now().Add(time.Hour), + Number: big.NewInt(1), + } + crl, err := x509.CreateRevocationList(rand.Reader, template, cert, key) if err != nil { - t.Fatalf("templ.CreateCRL failed err = %v", err) + t.Fatalf("templ.CreateRevocationList failed err = %v", err) } err = os.WriteFile(path.Join(dir, fmt.Sprintf("%s.r0", x509NameHash(cert.Subject.ToRDNSequence()))), crl, 0777) @@ -752,8 +756,8 @@ func TestCRLCacheExpirationReloading(t *testing.T) { // `3.crl`` revokes `revokedInt.pem` crl := loadCRL(t, testdata.Path("crl/3.crl")) // Modify the crl so that the cert is NOT revoked and add it to the cache - crl.CertList.TBSCertList.RevokedCertificates = nil - crl.CertList.TBSCertList.NextUpdate = time.Now().Add(time.Hour) + crl.CertList.RevokedCertificates = nil + crl.CertList.NextUpdate = time.Now().Add(time.Hour) cache.Add(hex.EncodeToString(rawIssuer), crl) var cfg = RevocationConfig{RootDir: testdata.Path("crl"), Cache: cache} revocationStatus := checkChain(certs, cfg) @@ -762,7 +766,7 @@ func TestCRLCacheExpirationReloading(t *testing.T) { } // Modify the entry in the cache so that the cache will be refreshed - crl.CertList.TBSCertList.NextUpdate = time.Now() + crl.CertList.NextUpdate = time.Now() cache.Add(hex.EncodeToString(rawIssuer), crl) revocationStatus = checkChain(certs, cfg) From 0fdfd40215dcd7a1da0a94755841d612ed5be7bf Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 23 Mar 2023 17:33:06 -0400 Subject: [PATCH 846/998] gcp/observability: Generate unique process identifier unconditionally (#6144) --- gcp/observability/opencensus.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index a46b7665c343..54ad84fa4796 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -20,6 +20,8 @@ package observability import ( "fmt" + "os" + "strconv" "time" "contrib.go.opencensus.io/exporter/stackdriver" @@ -82,6 +84,11 @@ func newStackdriverExporter(config *config) (tracingMetricsExporter, error) { mr := monitoredresource.Autodetect() logger.Infof("Detected MonitoredResource:: %+v", mr) var err error + // Custom labels completly overwrite any labels generated in the OpenCensus + // library, including their label that uniquely identifies the process. + // Thus, generate a unique process identifier here to uniquely identify + // process. + config.Labels["opencensus_task"] = generateUniqueProcessIdentifier() exporter, err := stackdriver.NewExporter(stackdriver.Options{ ProjectID: config.ProjectID, MonitoredResource: mr, @@ -96,6 +103,18 @@ func newStackdriverExporter(config *config) (tracingMetricsExporter, error) { return exporter, nil } +// generateUniqueProcessIdentifier returns a unique process identifier for the +// process this code is running in. This is the same way the OpenCensus library +// generates the unique process identifier, in the format of +// "go-@". +func generateUniqueProcessIdentifier() string { + hostname, err := os.Hostname() + if err != nil { + hostname = "localhost" + } + return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname +} + // This method accepts config and exporter; the exporter argument is exposed to // assist unit testing of the OpenCensus behavior. func startOpenCensus(config *config) error { From 277bb6429a052d97ac0759fad795410595b8e66c Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 23 Mar 2023 19:34:27 -0400 Subject: [PATCH 847/998] Revert "credentials/alts: defer ALTS stream creation until handshake time (#6077)" (#6148) This reverts commit c84a5005d97fb43bbc01b7d1484c0e4c844bec4c. --- .../alts/internal/handshaker/handshaker.go | 50 +++++---------- .../internal/handshaker/handshaker_test.go | 64 ------------------- 2 files changed, 17 insertions(+), 97 deletions(-) diff --git a/credentials/alts/internal/handshaker/handshaker.go b/credentials/alts/internal/handshaker/handshaker.go index c8a307531429..7b953a520e5b 100644 --- a/credentials/alts/internal/handshaker/handshaker.go +++ b/credentials/alts/internal/handshaker/handshaker.go @@ -138,7 +138,7 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions { // and server options (server options struct does not exist now. When // caller can provide endpoints, it should be created. -// altsHandshaker is used to complete an ALTS handshake between client and +// altsHandshaker is used to complete a ALTS handshaking between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. type altsHandshaker struct { @@ -146,8 +146,6 @@ type altsHandshaker struct { stream altsgrpc.HandshakerService_DoHandshakeClient // the connection to the peer. conn net.Conn - // a virtual connection to the ALTS handshaker service. - clientConn *grpc.ClientConn // client handshake options. clientOpts *ClientHandshakerOptions // server handshake options. @@ -156,33 +154,39 @@ type altsHandshaker struct { side core.Side } -// NewClientHandshaker creates a core.Handshaker that performs a client-side -// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker +// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) + if err != nil { + return nil, err + } return &altsHandshaker{ - stream: nil, + stream: stream, conn: c, - clientConn: conn, clientOpts: opts, side: core.ClientSide, }, nil } -// NewServerHandshaker creates a core.Handshaker that performs a server-side -// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker +// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) + if err != nil { + return nil, err + } return &altsHandshaker{ - stream: nil, + stream: stream, conn: c, - clientConn: conn, serverOpts: opts, side: core.ServerSide, }, nil } -// ClientHandshake starts and completes a client ALTS handshake for GCP. Once +// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { @@ -194,16 +198,6 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") } - // TODO(matthewstevenson88): Change unit tests to use public APIs so - // that h.stream can unconditionally be set based on h.clientConn. - if h.stream == nil { - stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) - if err != nil { - return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) - } - h.stream = stream - } - // Create target identities from service account list. targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) for _, account := range h.clientOpts.TargetServiceAccounts { @@ -235,7 +229,7 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent return conn, authInfo, nil } -// ServerHandshake starts and completes a server ALTS handshake for GCP. Once +// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { @@ -247,16 +241,6 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") } - // TODO(matthewstevenson88): Change unit tests to use public APIs so - // that h.stream can unconditionally be set based on h.clientConn. - if h.stream == nil { - stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) - if err != nil { - return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) - } - h.stream = stream - } - p := make([]byte, frameLimit) n, err := h.conn.Read(p) if err != nil { diff --git a/credentials/alts/internal/handshaker/handshaker_test.go b/credentials/alts/internal/handshaker/handshaker_test.go index 53aee6423158..14a0721054f2 100644 --- a/credentials/alts/internal/handshaker/handshaker_test.go +++ b/credentials/alts/internal/handshaker/handshaker_test.go @@ -25,8 +25,6 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" grpc "google.golang.org/grpc" core "google.golang.org/grpc/credentials/alts/internal" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" @@ -285,65 +283,3 @@ func (s) TestPeerNotResponding(t *testing.T) { t.Errorf("ClientHandshake() = %v, want %v", got, want) } } - -func (s) TestNewClientHandshaker(t *testing.T) { - conn := testutil.NewTestConn(nil, nil) - clientConn := &grpc.ClientConn{} - opts := &ClientHandshakerOptions{} - hs, err := NewClientHandshaker(context.Background(), clientConn, conn, opts) - if err != nil { - t.Errorf("NewClientHandshaker returned unexpected error: %v", err) - } - expectedHs := &altsHandshaker{ - stream: nil, - conn: conn, - clientConn: clientConn, - clientOpts: opts, - serverOpts: nil, - side: core.ClientSide, - } - cmpOpts := []cmp.Option{ - cmp.AllowUnexported(altsHandshaker{}), - cmpopts.IgnoreFields(altsHandshaker{}, "conn", "clientConn"), - } - if got, want := hs.(*altsHandshaker), expectedHs; !cmp.Equal(got, want, cmpOpts...) { - t.Errorf("NewClientHandshaker() returned unexpected handshaker: got: %v, want: %v", got, want) - } - if hs.(*altsHandshaker).stream != nil { - t.Errorf("NewClientHandshaker() returned handshaker with non-nil stream") - } - if hs.(*altsHandshaker).clientConn != clientConn { - t.Errorf("NewClientHandshaker() returned handshaker with unexpected clientConn") - } -} - -func (s) TestNewServerHandshaker(t *testing.T) { - conn := testutil.NewTestConn(nil, nil) - clientConn := &grpc.ClientConn{} - opts := &ServerHandshakerOptions{} - hs, err := NewServerHandshaker(context.Background(), clientConn, conn, opts) - if err != nil { - t.Errorf("NewServerHandshaker returned unexpected error: %v", err) - } - expectedHs := &altsHandshaker{ - stream: nil, - conn: conn, - clientConn: clientConn, - clientOpts: nil, - serverOpts: opts, - side: core.ServerSide, - } - cmpOpts := []cmp.Option{ - cmp.AllowUnexported(altsHandshaker{}), - cmpopts.IgnoreFields(altsHandshaker{}, "conn", "clientConn"), - } - if got, want := hs.(*altsHandshaker), expectedHs; !cmp.Equal(got, want, cmpOpts...) { - t.Errorf("NewServerHandshaker() returned unexpected handshaker: got: %v, want: %v", got, want) - } - if hs.(*altsHandshaker).stream != nil { - t.Errorf("NewServerHandshaker() returned handshaker with non-nil stream") - } - if hs.(*altsHandshaker).clientConn != clientConn { - t.Errorf("NewServerHandshaker() returned handshaker with unexpected clientConn") - } -} From c018273e5353ca8d4106a9d8917f904983b9dbeb Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 23 Mar 2023 19:57:26 -0400 Subject: [PATCH 848/998] examples: Add observability example (#6149) --- examples/features/observability/README.md | 3 + .../observability/client/clientConfig.json | 17 + .../features/observability/client/main.go | 70 ++ examples/features/observability/go.mod | 41 ++ examples/features/observability/go.sum | 640 ++++++++++++++++++ .../features/observability/server/main.go | 89 +++ .../observability/server/serverConfig.json | 17 + 7 files changed, 877 insertions(+) create mode 100644 examples/features/observability/README.md create mode 100644 examples/features/observability/client/clientConfig.json create mode 100644 examples/features/observability/client/main.go create mode 100644 examples/features/observability/go.mod create mode 100644 examples/features/observability/go.sum create mode 100644 examples/features/observability/server/main.go create mode 100644 examples/features/observability/server/serverConfig.json diff --git a/examples/features/observability/README.md b/examples/features/observability/README.md new file mode 100644 index 000000000000..f2aa52101069 --- /dev/null +++ b/examples/features/observability/README.md @@ -0,0 +1,3 @@ +This example is the Hello World example instrumented for logs, metrics, and tracing. + +Please refer to Microservices Observability user guide for setup. diff --git a/examples/features/observability/client/clientConfig.json b/examples/features/observability/client/clientConfig.json new file mode 100644 index 000000000000..b98ae25e1b77 --- /dev/null +++ b/examples/features/observability/client/clientConfig.json @@ -0,0 +1,17 @@ +{ + "cloud_monitoring": {}, + "cloud_trace": { + "sampling_rate": 1.0 + }, + "cloud_logging": { + "client_rpc_events": [{ + "methods": ["*"] + }], + "server_rpc_events": [{ + "methods": ["*"] + }] + }, + "labels": { + "environment" : "example-client" + } +} diff --git a/examples/features/observability/client/main.go b/examples/features/observability/client/main.go new file mode 100644 index 000000000000..4c1d994a30dd --- /dev/null +++ b/examples/features/observability/client/main.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package main implements a client for Greeter service. +package main + +import ( + "context" + "flag" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "google.golang.org/grpc/gcp/observability" +) + +const ( + defaultName = "world" +) + +var ( + addr = flag.String("addr", "localhost:50051", "the address to connect to") + name = flag.String("name", defaultName, "Name to greet") +) + +func main() { + // Turn on global telemetry for the whole binary. If a configuration is + // specified, any created gRPC Client Conn's or Servers will emit telemetry + // data according the the configuration. + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + err := observability.Start(ctx) + if err != nil { + log.Fatalf("observability.Start() failed: %v", err) + } + defer observability.End() + + flag.Parse() + // Set up a connection to the server. + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + // Contact the server and print out its response. + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: *name}) + if err != nil { + log.Fatalf("could not greet: %v", err) + } + log.Printf("Greeting: %s", r.GetMessage()) +} diff --git a/examples/features/observability/go.mod b/examples/features/observability/go.mod new file mode 100644 index 000000000000..aabfc0c024f7 --- /dev/null +++ b/examples/features/observability/go.mod @@ -0,0 +1,41 @@ +module google.golang.org/grpc/examples/features/observability + +go 1.17 + +require ( + google.golang.org/grpc v1.54.0 + google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc + google.golang.org/grpc/gcp/observability v0.0.0-20230323213306-0fdfd40215dc +) + +require ( + cloud.google.com/go v0.109.0 // indirect + cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/logging v1.6.1 // indirect + cloud.google.com/go/longrunning v0.4.0 // indirect + cloud.google.com/go/monitoring v1.12.0 // indirect + cloud.google.com/go/trace v1.8.0 // indirect + contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect + github.com/aws/aws-sdk-go v1.44.162 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/prometheus/prometheus v2.5.0+incompatible // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/oauth2 v0.5.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect + google.golang.org/api v0.109.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect + google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 // indirect + google.golang.org/protobuf v1.29.1 // indirect +) diff --git a/examples/features/observability/go.sum b/examples/features/observability/go.sum new file mode 100644 index 000000000000..8cf76ce1d480 --- /dev/null +++ b/examples/features/observability/go.sum @@ -0,0 +1,640 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk= +cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.4.0 h1:v+X4EwhHl6xE+TG1XgXj4T1XpKKs7ZevcAJ3FOu0YmY= +cloud.google.com/go/longrunning v0.4.0/go.mod h1:eF3Qsw58iX/bkKtVjMTYpH0LRjQ2goDkjkNQTlzq/ZM= +cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= +cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= +github.com/aws/aws-sdk-go v1.44.162/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= +google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa h1:qQPhfbPO23fwm/9lQr91L1u62Zo6cm+zI+slZT+uf+o= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc h1:H58v4RmBwciuKpwU6NFUn3w2hPZNL78HedaJUitCdpI= +google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc/go.mod h1:EXfxRt8PpWkTFBAXaWXB0Xgb1S/FFBXvFRry0nr2bHQ= +google.golang.org/grpc/gcp/observability v0.0.0-20230323213306-0fdfd40215dc h1:Zglt738ox4oYU2Ghlyz1ifPgRkyXNv+DvQNSNnIQgFU= +google.golang.org/grpc/gcp/observability v0.0.0-20230323213306-0fdfd40215dc/go.mod h1:5pEder79lE7uwh+IMQnYSAo7RrzRBH7RwxpXkPYiTnM= +google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 h1:MeDNVH2KmQ9Z3AbXKsvU9UcbRR8LfpZVLmZAVWIX0nI= +google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204/go.mod h1:Dg7VaOjf0r9QhRn/YpwSf3vKQz1ixulffTlhEarxEXA= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/examples/features/observability/server/main.go b/examples/features/observability/server/main.go new file mode 100644 index 000000000000..0aae0699342d --- /dev/null +++ b/examples/features/observability/server/main.go @@ -0,0 +1,89 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package main implements a server for Greeter service. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + "os" + "os/signal" + "syscall" + "time" + + "google.golang.org/grpc" + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "google.golang.org/grpc/gcp/observability" +) + +var ( + port = flag.Int("port", 50051, "The server port") +) + +// server is used to implement helloworld.GreeterServer. +type server struct { + pb.UnimplementedGreeterServer +} + +// SayHello implements helloworld.GreeterServer +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + log.Printf("Received: %v", in.GetName()) + return &pb.HelloReply{Message: "Hello " + in.GetName()}, nil +} + +func main() { + // Turn on global telemetry for the whole binary. If a configuration is + // specified, any created gRPC Client Conn's or Servers will emit telemetry + // data according the the configuration. + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + err := observability.Start(ctx) + if err != nil { + log.Fatalf("observability.Start() failed: %v", err) + } + defer observability.End() + + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + log.Printf("server listening at %v", lis.Addr()) + + // This server can potentially be terminated by an external signal from the + // Operating System. The following catches those signals and calls s.Stop(). + // This causes the s.Serve() call to return and run main()'s defers, + // including the observability.End() call that ensures any pending + // observability data is sent to Cloud Operations. + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + s.Stop() + }() + + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/examples/features/observability/server/serverConfig.json b/examples/features/observability/server/serverConfig.json new file mode 100644 index 000000000000..a2bf7f6b6e74 --- /dev/null +++ b/examples/features/observability/server/serverConfig.json @@ -0,0 +1,17 @@ +{ + "cloud_monitoring": {}, + "cloud_trace": { + "sampling_rate": 1.0 + }, + "cloud_logging": { + "client_rpc_events": [{ + "methods": ["*"] + }], + "server_rpc_events": [{ + "methods": ["*"] + }] + }, + "labels": { + "environment" : "example-server" + } +} From 44cebb8ff581b9be17a4b0b4a6ba313e2ab89ca1 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Mon, 27 Mar 2023 15:23:22 -0700 Subject: [PATCH 849/998] xds: enable XDS federation by default (#6151) --- internal/envconfig/xds.go | 2 +- xds/internal/xdsclient/xdsresource/name_test.go | 16 +++++----------- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 04136882c7bc..3b17705ba097 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -79,7 +79,7 @@ var ( // XDSFederation indicates whether federation support is enabled, which can // be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false) + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) // XDSRLS indicates whether processing of Cluster Specifier plugins and // support for the RLS CLuster Specifier is enabled, which can be enabled by diff --git a/xds/internal/xdsclient/xdsresource/name_test.go b/xds/internal/xdsclient/xdsresource/name_test.go index cfab669c54bc..a30b437658f5 100644 --- a/xds/internal/xdsclient/xdsresource/name_test.go +++ b/xds/internal/xdsclient/xdsresource/name_test.go @@ -92,13 +92,11 @@ func TestParseName(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if tt.env { - defer func() func() { - oldEnv := envconfig.XDSFederation - envconfig.XDSFederation = true - return func() { envconfig.XDSFederation = oldEnv } - }()() - } + defer func() func() { + oldEnv := envconfig.XDSFederation + envconfig.XDSFederation = tt.env + return func() { envconfig.XDSFederation = oldEnv } + }()() got := ParseName(tt.in) if !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(Name{}, "processingDirective")) { t.Errorf("ParseName() = %#v, want %#v", got, tt.want) @@ -113,10 +111,6 @@ func TestParseName(t *testing.T) { // TestNameStringCtxParamsOrder covers the case that if two names differ only in // context parameter __order__, the parsed name.String() has the same value. func TestNameStringCtxParamsOrder(t *testing.T) { - oldEnv := envconfig.XDSFederation - envconfig.XDSFederation = true - defer func() { envconfig.XDSFederation = oldEnv }() - const ( a = "xdstp://auth/type/id?a=1&b=2" b = "xdstp://auth/type/id?b=2&a=1" From a357bafad15583c588602cbd8ee67f22440b9652 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 27 Mar 2023 15:36:22 -0700 Subject: [PATCH 850/998] status: FromError: return entire error message text for wrapped errors (#6150) --- status/status.go | 15 +++++++++++---- status/status_test.go | 4 ++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/status/status.go b/status/status.go index 7577625b0c40..53910fb7c901 100644 --- a/status/status.go +++ b/status/status.go @@ -78,7 +78,8 @@ func FromProto(s *spb.Status) *Status { // // - If err was produced by this package or implements the method `GRPCStatus() // *Status`, or if err wraps a type satisfying this, the appropriate Status is -// returned. +// returned. For wrapped errors, the message returned contains the entire +// err.Error() text and not just the wrapped status. // // - If err is nil, a Status is returned with codes.OK and no message. // @@ -89,9 +90,15 @@ func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - var se interface{ GRPCStatus() *Status } - if errors.As(err, &se) { - return se.GRPCStatus(), true + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + return gs.GRPCStatus(), true + } + var gs grpcstatus + if errors.As(err, &gs) { + p := gs.GRPCStatus().Proto() + p.Message = err.Error() + return status.FromProto(p), true } return New(codes.Unknown, err.Error()), false } diff --git a/status/status_test.go b/status/status_test.go index 244cb8151fd5..b0bb3fcb67cc 100644 --- a/status/status_test.go +++ b/status/status_test.go @@ -197,7 +197,7 @@ func (s) TestFromErrorWrapped(t *testing.T) { const code, message = codes.Internal, "test description" err := fmt.Errorf("wrapped error: %w", Error(code, message)) s, ok := FromError(err) - if !ok || s.Code() != code || s.Message() != message || s.Err() == nil { + if !ok || s.Code() != code || s.Message() != err.Error() || s.Err() == nil { t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, code, message) } } @@ -206,7 +206,7 @@ func (s) TestFromErrorImplementsInterfaceWrapped(t *testing.T) { const code, message = codes.Internal, "test description" err := fmt.Errorf("wrapped error: %w", customError{Code: code, Message: message}) s, ok := FromError(err) - if !ok || s.Code() != code || s.Message() != message || s.Err() == nil { + if !ok || s.Code() != code || s.Message() != err.Error() || s.Err() == nil { t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, code, message) } } From 415ccdf15456d5e31c163367dd5d040796cb38be Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 28 Mar 2023 16:03:41 -0700 Subject: [PATCH 851/998] go.mod: update all dependencies after 1.54 branch cut (#6132) --- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 +- binarylog/grpc_binarylog_v1/binarylog.pb.go | 2 +- channelz/grpc_channelz_v1/channelz.pb.go | 2 +- cmd/protoc-gen-go-grpc/go.mod | 2 +- cmd/protoc-gen-go-grpc/go.sum | 4 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 2 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 2 +- .../grpc_gcp/transport_security_common.pb.go | 2 +- credentials/xds/xds.go | 2 +- examples/features/proto/echo/echo.pb.go | 2 +- examples/go.mod | 18 +- examples/go.sum | 328 +++++++++++++- .../helloworld/helloworld/helloworld.pb.go | 2 +- .../route_guide/routeguide/route_guide.pb.go | 2 +- gcp/observability/go.mod | 2 +- gcp/observability/go.sum | 4 +- go.mod | 18 +- go.sum | 419 +---------------- health/grpc_health_v1/health.pb.go | 2 +- internal/proto/grpc_lookup_v1/rls.pb.go | 2 +- .../proto/grpc_lookup_v1/rls_config.pb.go | 2 +- internal/testutils/xds/e2e/server.go | 3 +- interop/grpc_testing/benchmark_service.pb.go | 2 +- interop/grpc_testing/control.pb.go | 2 +- interop/grpc_testing/core/stats.pb.go | 2 +- interop/grpc_testing/empty.pb.go | 2 +- interop/grpc_testing/messages.pb.go | 14 +- interop/grpc_testing/payloads.pb.go | 2 +- .../report_qps_scenario_service.pb.go | 2 +- interop/grpc_testing/stats.pb.go | 2 +- interop/grpc_testing/test.pb.go | 2 +- interop/grpc_testing/worker_service.pb.go | 2 +- interop/observability/go.mod | 14 +- interop/observability/go.sum | 324 +++++++++++-- profiling/proto/service.pb.go | 2 +- .../grpc_reflection_v1/reflection.pb.go | 2 +- .../grpc_reflection_v1alpha/reflection.pb.go | 85 +++- reflection/grpc_testing/proto2.pb.go | 2 +- reflection/grpc_testing/proto2_ext.pb.go | 2 +- reflection/grpc_testing/proto2_ext2.pb.go | 2 +- reflection/grpc_testing/test.pb.go | 2 +- security/advancedtls/examples/go.mod | 14 +- security/advancedtls/examples/go.sum | 16 +- security/advancedtls/go.mod | 10 +- security/advancedtls/go.sum | 16 +- security/authorization/go.mod | 19 +- security/authorization/go.sum | 426 ++---------------- stats/opencensus/go.mod | 2 +- stats/opencensus/go.sum | 4 +- stress/grpc_testing/metrics.pb.go | 2 +- test/codec_perf/perf.pb.go | 2 +- test/grpc_testing/test.pb.go | 2 +- test/tools/go.mod | 12 +- test/tools/go.sum | 41 +- test/xds/xds_client_ack_nack_test.go | 3 +- .../clusterresolver/e2e_test/balancer_test.go | 3 +- xds/internal/xdsclient/bootstrap/bootstrap.go | 10 +- .../xdsclient/bootstrap/bootstrap_test.go | 9 - .../transport/transport_backoff_test.go | 2 +- 59 files changed, 869 insertions(+), 1015 deletions(-) diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 6620ed11c786..f070878bd993 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/binarylog/grpc_binarylog_v1/binarylog.pb.go b/binarylog/grpc_binarylog_v1/binarylog.pb.go index 8cd89dab9047..ec2c2fa14dd3 100644 --- a/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto diff --git a/channelz/grpc_channelz_v1/channelz.pb.go b/channelz/grpc_channelz_v1/channelz.pb.go index 3aa7c4c59f70..1c19e6b52c27 100644 --- a/channelz/grpc_channelz_v1/channelz.pb.go +++ b/channelz/grpc_channelz_v1/channelz.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/channelz/v1/channelz.proto diff --git a/cmd/protoc-gen-go-grpc/go.mod b/cmd/protoc-gen-go-grpc/go.mod index 7d0b7794dcad..d33b4f1d8c6f 100644 --- a/cmd/protoc-gen-go-grpc/go.mod +++ b/cmd/protoc-gen-go-grpc/go.mod @@ -2,4 +2,4 @@ module google.golang.org/grpc/cmd/protoc-gen-go-grpc go 1.17 -require google.golang.org/protobuf v1.28.1 +require google.golang.org/protobuf v1.30.0 diff --git a/cmd/protoc-gen-go-grpc/go.sum b/cmd/protoc-gen-go-grpc/go.sum index 00f5993c956c..1838366909dd 100644 --- a/cmd/protoc-gen-go-grpc/go.sum +++ b/cmd/protoc-gen-go-grpc/go.sum @@ -4,5 +4,5 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 16e814b9b910..83e3bae37b17 100644 --- a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/gcp/altscontext.proto diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 258a130a9d04..0b0093328bff 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/gcp/handshaker.proto diff --git a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index cc9a27059964..c2e564c7ded4 100644 --- a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/gcp/transport_security_common.proto diff --git a/credentials/xds/xds.go b/credentials/xds/xds.go index fd402b81655d..d232e6786746 100644 --- a/credentials/xds/xds.go +++ b/credentials/xds/xds.go @@ -165,7 +165,7 @@ func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawCo if cert := certs[0]; !hi.MatchingSANExists(cert) { // TODO: Print the complete certificate once the x509 package // supports a String() method on the Certificate type. - return fmt.Errorf("Received SANs {DNSNames: %v, EmailAddresses: %v, IPAddresses: %v, URIs: %v} do not match any of the accepted SANs", cert.DNSNames, cert.EmailAddresses, cert.IPAddresses, cert.URIs) + return fmt.Errorf("xds: received SANs {DNSNames: %v, EmailAddresses: %v, IPAddresses: %v, URIs: %v} do not match any of the accepted SANs", cert.DNSNames, cert.EmailAddresses, cert.IPAddresses, cert.URIs) } return nil } diff --git a/examples/features/proto/echo/echo.pb.go b/examples/features/proto/echo/echo.pb.go index 8b4c26ad7eca..c46af9a08bc8 100644 --- a/examples/features/proto/echo/echo.pb.go +++ b/examples/features/proto/echo/echo.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: examples/features/proto/echo/echo.proto diff --git a/examples/go.mod b/examples/go.mod index e4fd33eb6ba1..642f3e8f0117 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -3,22 +3,22 @@ module google.golang.org/grpc/examples go 1.17 require ( - github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b - github.com/golang/protobuf v1.5.2 - golang.org/x/oauth2 v0.4.0 - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f - google.golang.org/grpc v1.52.0 - google.golang.org/protobuf v1.28.1 + github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 + github.com/golang/protobuf v1.5.3 + golang.org/x/oauth2 v0.6.0 + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 + google.golang.org/grpc v1.53.0 + google.golang.org/protobuf v1.30.0 ) require ( - cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/envoyproxy/go-control-plane v0.10.3 // indirect - github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect + github.com/envoyproxy/go-control-plane v0.11.0 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.0 // indirect golang.org/x/net v0.8.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect diff --git a/examples/go.sum b/examples/go.sum index 6df1d79573e0..3a9c20845207 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -34,47 +34,67 @@ cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34h cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -84,26 +104,34 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= @@ -115,8 +143,8 @@ cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -124,30 +152,42 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= @@ -155,187 +195,254 @@ cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZx cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -345,50 +452,78 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= @@ -406,23 +541,40 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -450,11 +602,14 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -475,6 +630,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -497,6 +653,8 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -519,28 +677,52 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -552,7 +734,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -562,7 +747,7 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -572,18 +757,34 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -606,10 +807,11 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -645,7 +847,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -659,8 +861,9 @@ golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -687,8 +890,9 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -701,6 +905,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -715,6 +920,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -735,6 +941,7 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -748,9 +955,10 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -767,15 +975,17 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -789,7 +999,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -798,7 +1008,9 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -811,6 +1023,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -839,6 +1052,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -849,9 +1063,11 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -860,6 +1076,14 @@ golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -911,6 +1135,10 @@ google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91 google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -987,7 +1215,6 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -1023,13 +1250,26 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1045,10 +1285,12 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1061,6 +1303,42 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/examples/helloworld/helloworld/helloworld.pb.go b/examples/helloworld/helloworld/helloworld.pb.go index 7142f9bfc3ef..3fe5c1f2863d 100644 --- a/examples/helloworld/helloworld/helloworld.pb.go +++ b/examples/helloworld/helloworld/helloworld.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: examples/helloworld/helloworld/helloworld.proto diff --git a/examples/route_guide/routeguide/route_guide.pb.go b/examples/route_guide/routeguide/route_guide.pb.go index 024f81f06067..482ce5ff6bf1 100644 --- a/examples/route_guide/routeguide/route_guide.pb.go +++ b/examples/route_guide/routeguide/route_guide.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: examples/route_guide/routeguide/route_guide.proto diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index db221876ecee..6412012b1e2a 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -35,5 +35,5 @@ require ( golang.org/x/text v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect - google.golang.org/protobuf v1.29.1 // indirect + google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index ab1d5ef81a05..bbe99db6276d 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -613,8 +613,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/go.mod b/go.mod index 866a7f24c344..aa79385c4bfe 100644 --- a/go.mod +++ b/go.mod @@ -5,24 +5,24 @@ go 1.17 require ( github.com/cespare/xxhash/v2 v2.2.0 github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe - github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b - github.com/envoyproxy/go-control-plane v0.10.3 - github.com/golang/glog v1.0.0 - github.com/golang/protobuf v1.5.2 + github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 + github.com/envoyproxy/go-control-plane v0.11.0 + github.com/golang/glog v1.1.0 + github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 golang.org/x/net v0.8.0 - golang.org/x/oauth2 v0.4.0 + golang.org/x/oauth2 v0.6.0 golang.org/x/sys v0.6.0 - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f - google.golang.org/protobuf v1.28.1 + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 + google.golang.org/protobuf v1.30.0 ) require ( - cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.0 // indirect golang.org/x/text v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect ) diff --git a/go.sum b/go.sum index c7633c20ea1c..d882cea260b9 100644 --- a/go.sum +++ b/go.sum @@ -1,470 +1,93 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/health/grpc_health_v1/health.pb.go b/health/grpc_health_v1/health.pb.go index 0b1abc6467c2..142d35f753e9 100644 --- a/health/grpc_health_v1/health.pb.go +++ b/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/health/v1/health.proto diff --git a/internal/proto/grpc_lookup_v1/rls.pb.go b/internal/proto/grpc_lookup_v1/rls.pb.go index a5f053b92926..df4cd5484e4e 100644 --- a/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/lookup/v1/rls.proto diff --git a/internal/proto/grpc_lookup_v1/rls_config.pb.go b/internal/proto/grpc_lookup_v1/rls_config.pb.go index 1c33ce64ebf1..317a35a390c4 100644 --- a/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/lookup/v1/rls_config.proto diff --git a/internal/testutils/xds/e2e/server.go b/internal/testutils/xds/e2e/server.go index 9a1a4ec45490..94ecd80eb907 100644 --- a/internal/testutils/xds/e2e/server.go +++ b/internal/testutils/xds/e2e/server.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/internal/testutils/xds/fakeserver" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" @@ -93,7 +94,7 @@ type ManagementServerOptions struct { // OnStreamClosed is called immediately prior to closing an xDS stream. The // callback is invoked with the stream ID of the stream being closed. - OnStreamClosed func(int64) + OnStreamClosed func(int64, *v3corepb.Node) // OnStreamRequest is called when a request is received on the stream. The // callback is invoked with the stream ID of the stream on which the request diff --git a/interop/grpc_testing/benchmark_service.pb.go b/interop/grpc_testing/benchmark_service.pb.go index 26d54d9929e8..78d9e20c52b2 100644 --- a/interop/grpc_testing/benchmark_service.pb.go +++ b/interop/grpc_testing/benchmark_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/testing/benchmark_service.proto diff --git a/interop/grpc_testing/control.pb.go b/interop/grpc_testing/control.pb.go index 8cf2fb07a4f2..5524c8e972e8 100644 --- a/interop/grpc_testing/control.pb.go +++ b/interop/grpc_testing/control.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/testing/control.proto diff --git a/interop/grpc_testing/core/stats.pb.go b/interop/grpc_testing/core/stats.pb.go index 6780b71481c3..f6cc0f1daff0 100644 --- a/interop/grpc_testing/core/stats.pb.go +++ b/interop/grpc_testing/core/stats.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/core/stats.proto diff --git a/interop/grpc_testing/empty.pb.go b/interop/grpc_testing/empty.pb.go index bdbe3f8680fe..d23993223dba 100644 --- a/interop/grpc_testing/empty.pb.go +++ b/interop/grpc_testing/empty.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/testing/empty.proto diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index 6d6f5cd20304..21d7854de481 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/testing/messages.proto @@ -1195,17 +1195,17 @@ type LoadBalancerAccumulatedStatsResponse struct { // The total number of RPCs have ever issued for each type. // Deprecated: use stats_per_method.rpcs_started instead. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in grpc/testing/messages.proto. NumRpcsStartedByMethod map[string]int32 `protobuf:"bytes,1,rep,name=num_rpcs_started_by_method,json=numRpcsStartedByMethod,proto3" json:"num_rpcs_started_by_method,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // The total number of RPCs have ever completed successfully for each type. // Deprecated: use stats_per_method.result instead. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in grpc/testing/messages.proto. NumRpcsSucceededByMethod map[string]int32 `protobuf:"bytes,2,rep,name=num_rpcs_succeeded_by_method,json=numRpcsSucceededByMethod,proto3" json:"num_rpcs_succeeded_by_method,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // The total number of RPCs have ever failed for each type. // Deprecated: use stats_per_method.result instead. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in grpc/testing/messages.proto. NumRpcsFailedByMethod map[string]int32 `protobuf:"bytes,3,rep,name=num_rpcs_failed_by_method,json=numRpcsFailedByMethod,proto3" json:"num_rpcs_failed_by_method,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // Per-method RPC statistics. The key is the RpcType in string form; e.g. // 'EMPTY_CALL' or 'UNARY_CALL' @@ -1244,7 +1244,7 @@ func (*LoadBalancerAccumulatedStatsResponse) Descriptor() ([]byte, []int) { return file_grpc_testing_messages_proto_rawDescGZIP(), []int{15} } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in grpc/testing/messages.proto. func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsStartedByMethod() map[string]int32 { if x != nil { return x.NumRpcsStartedByMethod @@ -1252,7 +1252,7 @@ func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsStartedByMethod() map[s return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in grpc/testing/messages.proto. func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsSucceededByMethod() map[string]int32 { if x != nil { return x.NumRpcsSucceededByMethod @@ -1260,7 +1260,7 @@ func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsSucceededByMethod() map return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in grpc/testing/messages.proto. func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsFailedByMethod() map[string]int32 { if x != nil { return x.NumRpcsFailedByMethod diff --git a/interop/grpc_testing/payloads.pb.go b/interop/grpc_testing/payloads.pb.go index da19e9dcabe7..6c4af8a1aab8 100644 --- a/interop/grpc_testing/payloads.pb.go +++ b/interop/grpc_testing/payloads.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/testing/payloads.proto diff --git a/interop/grpc_testing/report_qps_scenario_service.pb.go b/interop/grpc_testing/report_qps_scenario_service.pb.go index 40086a564d30..831c38daaba7 100644 --- a/interop/grpc_testing/report_qps_scenario_service.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/testing/report_qps_scenario_service.proto diff --git a/interop/grpc_testing/stats.pb.go b/interop/grpc_testing/stats.pb.go index a76771d6c624..9d184f978fbb 100644 --- a/interop/grpc_testing/stats.pb.go +++ b/interop/grpc_testing/stats.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/testing/stats.proto diff --git a/interop/grpc_testing/test.pb.go b/interop/grpc_testing/test.pb.go index cf85993c7899..b1921f8565de 100644 --- a/interop/grpc_testing/test.pb.go +++ b/interop/grpc_testing/test.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/testing/test.proto diff --git a/interop/grpc_testing/worker_service.pb.go b/interop/grpc_testing/worker_service.pb.go index 25bd944fb0e1..40b94e1dd231 100644 --- a/interop/grpc_testing/worker_service.pb.go +++ b/interop/grpc_testing/worker_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/testing/worker_service.proto diff --git a/interop/observability/go.mod b/interop/observability/go.mod index adda4920a6e7..c964c954f168 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -8,11 +8,11 @@ require ( ) require ( - cloud.google.com/go v0.109.0 // indirect + cloud.google.com/go v0.110.0 // indirect cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/logging v1.6.1 // indirect - cloud.google.com/go/longrunning v0.4.0 // indirect + cloud.google.com/go/logging v1.7.0 // indirect + cloud.google.com/go/longrunning v0.4.1 // indirect cloud.google.com/go/monitoring v1.12.0 // indirect cloud.google.com/go/trace v1.8.0 // indirect contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect @@ -28,15 +28,15 @@ require ( github.com/prometheus/prometheus v2.5.0+incompatible // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.5.0 // indirect + golang.org/x/oauth2 v0.6.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect - google.golang.org/api v0.109.0 // indirect + google.golang.org/api v0.110.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 // indirect - google.golang.org/protobuf v1.29.1 // indirect + google.golang.org/protobuf v1.30.0 // indirect ) replace google.golang.org/grpc => ../.. diff --git a/interop/observability/go.sum b/interop/observability/go.sum index b2c181710f6a..f6008bb2ae1d 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -34,49 +34,68 @@ cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34h cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk= -cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -86,26 +105,34 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= @@ -117,7 +144,6 @@ cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= @@ -127,30 +153,42 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= @@ -158,91 +196,123 @@ cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZx cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.0 h1:v+X4EwhHl6xE+TG1XgXj4T1XpKKs7ZevcAJ3FOu0YmY= -cloud.google.com/go/longrunning v0.4.0/go.mod h1:eF3Qsw58iX/bkKtVjMTYpH0LRjQ2goDkjkNQTlzq/ZM= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= @@ -252,100 +322,133 @@ cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/ cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -355,16 +458,21 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= @@ -372,41 +480,63 @@ cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= github.com/aws/aws-sdk-go v1.44.162/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= @@ -422,21 +552,38 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -470,8 +617,10 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -492,6 +641,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -514,6 +664,7 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -543,13 +694,32 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= @@ -557,17 +727,22 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -579,7 +754,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -590,7 +768,7 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -600,18 +778,34 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -634,10 +828,11 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -673,7 +868,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -688,8 +883,9 @@ golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -717,9 +913,9 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -732,6 +928,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -747,6 +944,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -767,6 +965,7 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -780,11 +979,11 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -801,9 +1000,11 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -811,7 +1012,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -825,7 +1026,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -834,7 +1035,9 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -847,6 +1050,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -875,6 +1079,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -885,9 +1090,11 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -896,6 +1103,14 @@ golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -949,8 +1164,11 @@ google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91 google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= -google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1031,7 +1249,6 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -1067,14 +1284,26 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa h1:qQPhfbPO23fwm/9lQr91L1u62Zo6cm+zI+slZT+uf+o= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1091,10 +1320,11 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1109,6 +1339,42 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/profiling/proto/service.pb.go b/profiling/proto/service.pb.go index 384290deaf94..6ab6632c2198 100644 --- a/profiling/proto/service.pb.go +++ b/profiling/proto/service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: profiling/proto/service.proto diff --git a/reflection/grpc_reflection_v1/reflection.pb.go b/reflection/grpc_reflection_v1/reflection.pb.go index cee1e1d08a60..ececdb89c977 100644 --- a/reflection/grpc_reflection_v1/reflection.pb.go +++ b/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: grpc/reflection/v1/reflection.proto diff --git a/reflection/grpc_reflection_v1alpha/reflection.pb.go b/reflection/grpc_reflection_v1alpha/reflection.pb.go index 444f2a6f4f73..d54c07676d5b 100644 --- a/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. @@ -39,11 +39,14 @@ const ( ) // The message sent by the client when calling ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their @@ -91,6 +94,7 @@ func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetHost() string { if x != nil { return x.Host @@ -105,6 +109,7 @@ func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_ return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileByFilename() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { return x.FileByFilename @@ -112,6 +117,7 @@ func (x *ServerReflectionRequest) GetFileByFilename() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingSymbol() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { return x.FileContainingSymbol @@ -119,6 +125,7 @@ func (x *ServerReflectionRequest) GetFileContainingSymbol() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { return x.FileContainingExtension @@ -126,6 +133,7 @@ func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { return x.AllExtensionNumbersOfType @@ -133,6 +141,7 @@ func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetListServices() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { return x.ListServices @@ -146,6 +155,8 @@ type isServerReflectionRequest_MessageRequest interface { type ServerReflectionRequest_FileByFilename struct { // Find a proto file by the file name. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` } @@ -153,12 +164,16 @@ type ServerReflectionRequest_FileContainingSymbol struct { // Find the proto file that declares the given fully-qualified symbol name. // This field should be a fully-qualified symbol name // (e.g. .[.] or .). + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` } type ServerReflectionRequest_FileContainingExtension struct { // Find the proto file which defines an extension extending the given // message type with the given field number. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` } @@ -171,12 +186,16 @@ type ServerReflectionRequest_AllExtensionNumbersOfType struct { // StatusCode::UNIMPLEMENTED if it's not implemented. // This field should be a fully-qualified type name. The format is // . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` } type ServerReflectionRequest_ListServices struct { // List the full names of registered services. The content will not be // checked. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` } @@ -193,14 +212,19 @@ func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRe // The type name and extension number sent by the client when requesting // file_containing_extension. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Fully-qualified type name. The format should be . - ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` - ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` } func (x *ExtensionRequest) Reset() { @@ -235,6 +259,7 @@ func (*ExtensionRequest) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionRequest) GetContainingType() string { if x != nil { return x.ContainingType @@ -242,6 +267,7 @@ func (x *ExtensionRequest) GetContainingType() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionRequest) GetExtensionNumber() int32 { if x != nil { return x.ExtensionNumber @@ -250,12 +276,16 @@ func (x *ExtensionRequest) GetExtensionNumber() int32 { } // The message sent by the server to answer ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The server set one of the following fields according to the message_request // in the request. @@ -301,6 +331,7 @@ func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetValidHost() string { if x != nil { return x.ValidHost @@ -308,6 +339,7 @@ func (x *ServerReflectionResponse) GetValidHost() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { if x != nil { return x.OriginalRequest @@ -322,6 +354,7 @@ func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionRespon return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { return x.FileDescriptorResponse @@ -329,6 +362,7 @@ func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorRe return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { return x.AllExtensionNumbersResponse @@ -336,6 +370,7 @@ func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNu return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { return x.ListServicesResponse @@ -343,6 +378,7 @@ func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceRespons return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { return x.ErrorResponse @@ -361,21 +397,29 @@ type ServerReflectionResponse_FileDescriptorResponse struct { // FileDescriptorResponse message to encapsulate the repeated fields. // The reflection service is allowed to avoid sending FileDescriptorProtos // that were previously sent in response to earlier requests in the stream. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` } type ServerReflectionResponse_AllExtensionNumbersResponse struct { // This message is used to answer all_extension_numbers_of_type requst. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` } type ServerReflectionResponse_ListServicesResponse struct { // This message is used to answer list_services request. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` } type ServerReflectionResponse_ErrorResponse struct { // This message is used when an error occurs. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` } @@ -392,6 +436,8 @@ func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_Messag // Serialized FileDescriptorProto messages sent by the server answering // a file_by_filename, file_containing_symbol, or file_containing_extension // request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type FileDescriptorResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -400,6 +446,8 @@ type FileDescriptorResponse struct { // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` } @@ -435,6 +483,7 @@ func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { if x != nil { return x.FileDescriptorProto @@ -444,6 +493,8 @@ func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { // A list of extension numbers sent by the server answering // all_extension_numbers_of_type request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionNumberResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -451,7 +502,10 @@ type ExtensionNumberResponse struct { // Full name of the base type, including the package name. The format // is . - BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` } @@ -487,6 +541,7 @@ func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionNumberResponse) GetBaseTypeName() string { if x != nil { return x.BaseTypeName @@ -494,6 +549,7 @@ func (x *ExtensionNumberResponse) GetBaseTypeName() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { if x != nil { return x.ExtensionNumber @@ -502,6 +558,8 @@ func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { } // A list of ServiceResponse sent by the server answering list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ListServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -509,6 +567,8 @@ type ListServiceResponse struct { // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` } @@ -544,6 +604,7 @@ func (*ListServiceResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ListServiceResponse) GetService() []*ServiceResponse { if x != nil { return x.Service @@ -553,6 +614,8 @@ func (x *ListServiceResponse) GetService() []*ServiceResponse { // The information of a single service used by ListServiceResponse to answer // list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -560,6 +623,8 @@ type ServiceResponse struct { // Full name of a registered service, including its package name. The format // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -595,6 +660,7 @@ func (*ServiceResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServiceResponse) GetName() string { if x != nil { return x.Name @@ -603,13 +669,18 @@ func (x *ServiceResponse) GetName() string { } // The error code and error message sent by the server when an error occurs. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ErrorResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // This field uses the error codes defined in grpc::StatusCode. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` } @@ -645,6 +716,7 @@ func (*ErrorResponse) Descriptor() ([]byte, []int) { return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ErrorResponse) GetErrorCode() int32 { if x != nil { return x.ErrorCode @@ -652,6 +724,7 @@ func (x *ErrorResponse) GetErrorCode() int32 { return 0 } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ErrorResponse) GetErrorMessage() string { if x != nil { return x.ErrorMessage diff --git a/reflection/grpc_testing/proto2.pb.go b/reflection/grpc_testing/proto2.pb.go index 000220b9850c..b4471a86c5ef 100644 --- a/reflection/grpc_testing/proto2.pb.go +++ b/reflection/grpc_testing/proto2.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: reflection/grpc_testing/proto2.proto diff --git a/reflection/grpc_testing/proto2_ext.pb.go b/reflection/grpc_testing/proto2_ext.pb.go index c38d5c4c7aba..0b2147df6b2d 100644 --- a/reflection/grpc_testing/proto2_ext.pb.go +++ b/reflection/grpc_testing/proto2_ext.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: reflection/grpc_testing/proto2_ext.proto diff --git a/reflection/grpc_testing/proto2_ext2.pb.go b/reflection/grpc_testing/proto2_ext2.pb.go index 625d7a490b27..8776f65e2872 100644 --- a/reflection/grpc_testing/proto2_ext2.pb.go +++ b/reflection/grpc_testing/proto2_ext2.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: reflection/grpc_testing/proto2_ext2.proto diff --git a/reflection/grpc_testing/test.pb.go b/reflection/grpc_testing/test.pb.go index 40b35fda1ef4..6c9cd21f8f50 100644 --- a/reflection/grpc_testing/test.pb.go +++ b/reflection/grpc_testing/test.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: reflection/grpc_testing/test.proto diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index 34ab57d1fbfa..af92a29b0f01 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -3,19 +3,19 @@ module google.golang.org/grpc/security/advancedtls/examples go 1.17 require ( - google.golang.org/grpc v1.52.0 - google.golang.org/grpc/examples v0.0.0-20230111003119-9b73c42daa31 - google.golang.org/grpc/security/advancedtls v0.0.0-20230111003119-9b73c42daa31 + google.golang.org/grpc v1.53.0 + google.golang.org/grpc/examples v0.0.0-20230318005552-70c52915099a + google.golang.org/grpc/security/advancedtls v0.0.0-20230318005552-70c52915099a ) require ( - github.com/golang/protobuf v1.5.2 // indirect - golang.org/x/crypto v0.5.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + golang.org/x/crypto v0.7.0 // indirect golang.org/x/net v0.8.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/protobuf v1.30.0 // indirect ) replace google.golang.org/grpc => ../../.. diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 73641bde1039..02a09bda89bd 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -1,11 +1,11 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= @@ -13,9 +13,9 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index d7f44b15adbc..e9888f020960 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -4,18 +4,18 @@ go 1.17 require ( github.com/hashicorp/golang-lru v0.5.4 - golang.org/x/crypto v0.5.0 - google.golang.org/grpc v1.52.0 + golang.org/x/crypto v0.7.0 + google.golang.org/grpc v1.53.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) require ( - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect golang.org/x/net v0.8.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/protobuf v1.30.0 // indirect ) replace google.golang.org/grpc => ../../ diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index e9dfc37082a5..3e6e64f444b5 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,12 +1,12 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= @@ -14,9 +14,9 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/security/authorization/go.mod b/security/authorization/go.mod index 06259f41e754..737244a2696b 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -3,20 +3,19 @@ module google.golang.org/grpc/security/authorization go 1.17 require ( - github.com/envoyproxy/go-control-plane v0.10.3 - github.com/google/cel-go v0.12.5 + github.com/envoyproxy/go-control-plane v0.11.0 + github.com/google/cel-go v0.13.0 github.com/google/go-cmp v0.5.9 - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f - google.golang.org/grpc v1.52.0 - google.golang.org/protobuf v1.28.1 + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 + google.golang.org/grpc v1.53.0 + google.golang.org/protobuf v1.30.0 ) require ( github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect - github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b // indirect - github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/stoewer/go-strcase v1.2.0 // indirect - golang.org/x/net v0.8.0 // indirect + github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/stoewer/go-strcase v1.2.1 // indirect golang.org/x/text v0.8.0 // indirect ) diff --git a/security/authorization/go.sum b/security/authorization/go.sum index ce3d88ce2187..dc3b5eaeb6eb 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -1,460 +1,88 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cel-go v0.12.5 h1:DmzaiSgoaqGCjtpPQWl26/gND+yRpim56H1jCVev6d8= -github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/cel-go v0.13.0 h1:z+8OBOcmh7IeKyqwT/6IlnMvy621fYUqnTVPEdegGlU= +github.com/google/cel-go v0.13.0/go.mod h1:K2hpQgEjDp18J76a2DKFRlPBPpgRZgi6EbnpDgIhJ8s= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stoewer/go-strcase v1.2.1 h1:/1JWd+AcWPzkcGLEmjUCka99YqGOtTnp1H/wcP+uap4= +github.com/stoewer/go-strcase v1.2.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index a1980f2c43c7..9bce90a995e6 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -15,5 +15,5 @@ require ( golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/protobuf v1.29.1 // indirect + google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index df04653cfc1d..da89ea72944f 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -105,8 +105,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/stress/grpc_testing/metrics.pb.go b/stress/grpc_testing/metrics.pb.go index a7e49565a189..83d76795fa74 100644 --- a/stress/grpc_testing/metrics.pb.go +++ b/stress/grpc_testing/metrics.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: stress/grpc_testing/metrics.proto diff --git a/test/codec_perf/perf.pb.go b/test/codec_perf/perf.pb.go index 2602db7d3a22..c166c79e3db9 100644 --- a/test/codec_perf/perf.pb.go +++ b/test/codec_perf/perf.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: test/codec_perf/perf.proto diff --git a/test/grpc_testing/test.pb.go b/test/grpc_testing/test.pb.go index 1ad453ab61e7..641640f012dc 100644 --- a/test/grpc_testing/test.pb.go +++ b/test/grpc_testing/test.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v4.22.0 // source: test/grpc_testing/test.proto diff --git a/test/tools/go.mod b/test/tools/go.mod index 3a148eb8f738..01937569f6db 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -3,13 +3,11 @@ module google.golang.org/grpc/test/tools go 1.14 require ( - github.com/BurntSushi/toml v1.2.1 // indirect github.com/client9/misspell v0.3.4 - github.com/golang/protobuf v1.5.2 - golang.org/x/exp/typeparams v0.0.0-20230108222341-4b8118a2686a // indirect + github.com/golang/protobuf v1.5.3 + golang.org/x/exp/typeparams v0.0.0-20230315142452-642cacee5cc0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/sys v0.6.0 // indirect - golang.org/x/tools v0.6.0 - google.golang.org/protobuf v1.28.1 // indirect - honnef.co/go/tools v0.3.3 + golang.org/x/tools v0.7.0 + google.golang.org/protobuf v1.30.0 // indirect + honnef.co/go/tools v0.4.3 ) diff --git a/test/tools/go.sum b/test/tools/go.sum index 4c4b60ffc647..807d022fa890 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -1,71 +1,74 @@ -github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20230108222341-4b8118a2686a h1:uDSAx3XXnfrX4V3OAQGdxSR6CWIHAls7RJdpfCYaakI= -golang.org/x/exp/typeparams v0.0.0-20230108222341-4b8118a2686a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230315142452-642cacee5cc0 h1:cW6TvM2r3dR3P6P9tWdY51IZSMyVyh9ArUVnEmImTDM= +golang.org/x/exp/typeparams v0.0.0-20230315142452-642cacee5cc0/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.4.1-0.20221208213631-3f74d914ae6d/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= -honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= +honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= diff --git a/test/xds/xds_client_ack_nack_test.go b/test/xds/xds_client_ack_nack_test.go index 4ba750a05025..3b2a4329e17d 100644 --- a/test/xds/xds_client_ack_nack_test.go +++ b/test/xds/xds_client_ack_nack_test.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" testgrpc "google.golang.org/grpc/test/grpc_testing" testpb "google.golang.org/grpc/test/grpc_testing" @@ -116,7 +117,7 @@ func (s) TestClientResourceVersionAfterStreamRestart(t *testing.T) { } return nil }, - OnStreamClosed: func(int64) { + OnStreamClosed: func(int64, *v3corepb.Node) { streamRestarted.Fire() }, }) diff --git a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go index 1513940c940b..a69799aa730a 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" testgrpc "google.golang.org/grpc/test/grpc_testing" @@ -68,7 +69,7 @@ func (s) TestErrorFromParentLB_ConnectionError(t *testing.T) { streamClosedCh := make(chan struct{}, 1) managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ Listener: lis, - OnStreamClosed: func(int64) { + OnStreamClosed: func(int64, *v3corepb.Node) { select { case streamClosedCh <- struct{}{}: default: diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 896ef347cafe..2a953cf45202 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -42,11 +42,6 @@ import ( ) const ( - // The "server_features" field in the bootstrap file contains a list of - // features supported by the server. A value of "xds_v3" indicates that the - // server supports the v3 version of the xDS transport protocol. - serverFeaturesV3 = "xds_v3" - gRPCUserAgentName = "gRPC Go" clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" clientFeatureResourceWrapper = "xds.config.resource-in-sotw" @@ -189,10 +184,7 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) } - sc.Creds = ChannelCreds{ - Type: cc.Type, - Config: cc.Config, - } + sc.Creds = ChannelCreds(cc) sc.credsDialOption = grpc.WithCredentialsBundle(bundle) break } diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 7aa409b24c88..4ad4739ace5a 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -196,15 +196,6 @@ var ( UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, } - nilCredsConfigV3 = &Config{ - XDSServer: &ServerConfig{ - ServerURI: "trafficdirector.googleapis.com:443", - Creds: ChannelCreds{Type: "insecure"}, - ServerFeatures: []string{"xds_v3"}, - }, - NodeProto: v3NodeProto, - ClientDefaultListenerResourceNameTemplate: "%s", - } nilCredsConfigNoServerFeatures = &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", diff --git a/xds/internal/xdsclient/transport/transport_backoff_test.go b/xds/internal/xdsclient/transport/transport_backoff_test.go index 2838eae7554e..db7587ca7c30 100644 --- a/xds/internal/xdsclient/transport/transport_backoff_test.go +++ b/xds/internal/xdsclient/transport/transport_backoff_test.go @@ -62,7 +62,7 @@ func (s) TestTransport_BackoffAfterStreamFailure(t *testing.T) { streamErr := errors.New("ADS stream error") mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ // Push on a channel whenever the stream is closed. - OnStreamClosed: func(int64) { + OnStreamClosed: func(int64, *v3corepb.Node) { select { case streamCloseCh <- struct{}{}: default: From 42dd7ac9d9dfb163e922fe5236c6cef59967ed1d Mon Sep 17 00:00:00 2001 From: ulas <45449532+ulascansenturk@users.noreply.github.com> Date: Thu, 30 Mar 2023 03:04:12 +0300 Subject: [PATCH 852/998] Use anypb.New instead of ptypes.MarshalAny (#6074) --- channelz/service/func_linux.go | 9 +++++---- channelz/service/service.go | 7 +++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/channelz/service/func_linux.go b/channelz/service/func_linux.go index ae759aa74212..0873603c8520 100644 --- a/channelz/service/func_linux.go +++ b/channelz/service/func_linux.go @@ -25,6 +25,7 @@ import ( durpb "github.com/golang/protobuf/ptypes/duration" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" + "google.golang.org/protobuf/types/known/anypb" ) func convertToPtypesDuration(sec int64, usec int64) *durpb.Duration { @@ -34,7 +35,7 @@ func convertToPtypesDuration(sec int64, usec int64) *durpb.Duration { func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOption { var opts []*channelzpb.SocketOption if skopts.Linger != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionLinger{ + additional, err := anypb.New(&channelzpb.SocketOptionLinger{ Active: skopts.Linger.Onoff != 0, Duration: convertToPtypesDuration(int64(skopts.Linger.Linger), 0), }) @@ -48,7 +49,7 @@ func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOptio } } if skopts.RecvTimeout != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ + additional, err := anypb.New(&channelzpb.SocketOptionTimeout{ Duration: convertToPtypesDuration(int64(skopts.RecvTimeout.Sec), int64(skopts.RecvTimeout.Usec)), }) if err == nil { @@ -61,7 +62,7 @@ func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOptio } } if skopts.SendTimeout != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ + additional, err := anypb.New(&channelzpb.SocketOptionTimeout{ Duration: convertToPtypesDuration(int64(skopts.SendTimeout.Sec), int64(skopts.SendTimeout.Usec)), }) if err == nil { @@ -74,7 +75,7 @@ func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOptio } } if skopts.TCPInfo != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTcpInfo{ + additional, err := anypb.New(&channelzpb.SocketOptionTcpInfo{ TcpiState: uint32(skopts.TCPInfo.State), TcpiCaState: uint32(skopts.TCPInfo.Ca_state), TcpiRetransmits: uint32(skopts.TCPInfo.Retransmits), diff --git a/channelz/service/service.go b/channelz/service/service.go index 9e325376f6cd..ae19ed3792ea 100644 --- a/channelz/service/service.go +++ b/channelz/service/service.go @@ -25,15 +25,18 @@ import ( "github.com/golang/protobuf/ptypes" wrpb "github.com/golang/protobuf/ptypes/wrappers" - "google.golang.org/grpc" channelzgrpc "google.golang.org/grpc/channelz/grpc_channelz_v1" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" + + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/status" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" ) func init() { @@ -187,7 +190,7 @@ func securityToProto(se credentials.ChannelzSecurityValue) *channelzpb.Security otherSecurity := &channelzpb.Security_OtherSecurity{ Name: v.Name, } - if anyval, err := ptypes.MarshalAny(v.Value); err == nil { + if anyval, err := anypb.New(protoadapt.MessageV2Of(v.Value)); err == nil { otherSecurity.Value = anyval } return &channelzpb.Security{Model: &channelzpb.Security_Other{Other: otherSecurity}} From c3f1d5e59efa9f154cde80d74d0110394c01d464 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 30 Mar 2023 15:36:17 -0400 Subject: [PATCH 853/998] gcp/observability: Set the opencensus_task label only for metrics, not tracing and logging (#6155) --- gcp/observability/opencensus.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go index 54ad84fa4796..abd3cb55b803 100644 --- a/gcp/observability/opencensus.go +++ b/gcp/observability/opencensus.go @@ -87,12 +87,16 @@ func newStackdriverExporter(config *config) (tracingMetricsExporter, error) { // Custom labels completly overwrite any labels generated in the OpenCensus // library, including their label that uniquely identifies the process. // Thus, generate a unique process identifier here to uniquely identify - // process. - config.Labels["opencensus_task"] = generateUniqueProcessIdentifier() + // process for metrics exporting to function correctly. + metricsLabels := make(map[string]string, len(config.Labels)+1) + for k, v := range config.Labels { + metricsLabels[k] = v + } + metricsLabels["opencensus_task"] = generateUniqueProcessIdentifier() exporter, err := stackdriver.NewExporter(stackdriver.Options{ ProjectID: config.ProjectID, MonitoredResource: mr, - DefaultMonitoringLabels: labelsToMonitoringLabels(config.Labels), + DefaultMonitoringLabels: labelsToMonitoringLabels(metricsLabels), DefaultTraceAttributes: labelsToTraceAttributes(config.Labels), MonitoringClientOptions: cOptsDisableLogTrace, TraceClientOptions: cOptsDisableLogTrace, From 4a12595692aeee6d0a4366063930c9b709dfef4a Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 30 Mar 2023 15:37:05 -0400 Subject: [PATCH 854/998] stats/opencensus: Switch helper to return Span Context from context (#6156) --- interop/observability/go.mod | 2 -- interop/observability/go.sum | 2 ++ stats/opencensus/opencensus.go | 13 ++++++++----- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/interop/observability/go.mod b/interop/observability/go.mod index c964c954f168..e33455097b1c 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -42,5 +42,3 @@ require ( replace google.golang.org/grpc => ../.. replace google.golang.org/grpc/gcp/observability => ../../gcp/observability - -replace google.golang.org/grpc/stats/opencensus => ../../stats/opencensus diff --git a/interop/observability/go.sum b/interop/observability/go.sum index f6008bb2ae1d..151bf01079fa 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -1305,6 +1305,8 @@ google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 h1:MeDNVH2KmQ9Z3AbXKsvU9UcbRR8LfpZVLmZAVWIX0nI= +google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204/go.mod h1:Dg7VaOjf0r9QhRn/YpwSf3vKQz1ixulffTlhEarxEXA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go index 4f63b8c24b39..f1cd0880d35a 100644 --- a/stats/opencensus/opencensus.go +++ b/stats/opencensus/opencensus.go @@ -159,15 +159,18 @@ func getRPCInfo(ctx context.Context) *rpcInfo { return ri } -// GetTraceAndSpanID returns the trace and span ID of the span in the context. -// Returns true if IDs present and false if IDs not present. -func GetTraceAndSpanID(ctx context.Context) (trace.TraceID, trace.SpanID, bool) { +// SpanContextFromContext returns the Span Context about the Span in the +// context. Returns false if no Span in the context. +func SpanContextFromContext(ctx context.Context) (trace.SpanContext, bool) { ri, ok := ctx.Value(rpcInfoKey{}).(*rpcInfo) if !ok { - return trace.TraceID{}, trace.SpanID{}, false + return trace.SpanContext{}, false + } + if ri.ti == nil || ri.ti.span == nil { + return trace.SpanContext{}, false } sc := ri.ti.span.SpanContext() - return sc.TraceID, sc.SpanID, true + return sc, true } type clientStatsHandler struct { From 113d75fb456133e047ecad899ab86ef2649c4b31 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 30 Mar 2023 20:10:51 -0400 Subject: [PATCH 855/998] gcp/observability: Add isSampled bool to log entries (#6160) --- gcp/observability/go.mod | 2 +- gcp/observability/go.sum | 4 +-- gcp/observability/logging.go | 27 ++++++++++-------- gcp/observability/logging_test.go | 5 ++-- gcp/observability/observability_test.go | 38 ++++++++++++++----------- interop/observability/go.mod | 2 +- interop/observability/go.sum | 4 +-- 7 files changed, 45 insertions(+), 37 deletions(-) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 6412012b1e2a..695fa3700c80 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -11,7 +11,7 @@ require ( golang.org/x/oauth2 v0.5.0 google.golang.org/api v0.109.0 google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba - google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 + google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae ) require ( diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index bbe99db6276d..3865c5ce1014 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -598,8 +598,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba h1:puuDphNHQZRngQpzUGvfXMBFBv6DuahfWMZaj0jVtjw= google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 h1:MeDNVH2KmQ9Z3AbXKsvU9UcbRR8LfpZVLmZAVWIX0nI= -google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204/go.mod h1:Dg7VaOjf0r9QhRn/YpwSf3vKQz1ixulffTlhEarxEXA= +google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae h1:40UWCQ40A2NTDabsmbZNznFf9SUftDlaBASj7OCdKDY= +google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae/go.mod h1:qPsHQZhltTPryCUC0naykSpbIJDodlCLM/vNa607CrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index bcf7a6a7cf8b..bec80140275b 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -318,24 +318,27 @@ func (bml *binaryMethodLogger) buildGCPLoggingEntry(ctx context.Context, c iblog grpcLogEntry.MethodName = bml.methodName grpcLogEntry.Authority = bml.authority - gcploggingEntry := gcplogging.Entry{ - Timestamp: binLogEntry.GetTimestamp().AsTime(), - Severity: 100, - Payload: grpcLogEntry, - } + var sc trace.SpanContext + var ok bool if bml.clientSide { // client side span, populated through opencensus trace package. if span := trace.FromContext(ctx); span != nil { - sc := span.SpanContext() - gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + sc.TraceID.String() - gcploggingEntry.SpanID = sc.SpanID.String() + sc = span.SpanContext() + ok = true } } else { // server side span, populated through stats/opencensus package. - if tID, sID, ok := opencensus.GetTraceAndSpanID(ctx); ok { - gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + tID.String() - gcploggingEntry.SpanID = sID.String() - } + sc, ok = opencensus.SpanContextFromContext(ctx) + } + gcploggingEntry := gcplogging.Entry{ + Timestamp: binLogEntry.GetTimestamp().AsTime(), + Severity: 100, + Payload: grpcLogEntry, + } + if ok { + gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + sc.TraceID.String() + gcploggingEntry.SpanID = sc.SpanID.String() + gcploggingEntry.TraceSampled = sc.IsSampled() } return gcploggingEntry } diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index e76ba386d235..31fe89fe18dd 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -80,8 +80,9 @@ func (fle *fakeLoggingExporter) EmitGcpLoggingEntry(entry gcplogging.Entry) { } ids := &traceAndSpanIDString{ - traceID: entry.Trace, - spanID: entry.SpanID, + traceID: entry.Trace, + spanID: entry.SpanID, + isSampled: entry.TraceSampled, } fle.idsSeen = append(fle.idsSeen, ids) diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index 96372bea6a38..a32311f7bb74 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -110,22 +110,25 @@ func (fe *fakeOpenCensusExporter) ExportView(vd *view.Data) { } type traceAndSpanID struct { - spanName string - traceID trace.TraceID - spanID trace.SpanID + spanName string + traceID trace.TraceID + spanID trace.SpanID + isSampled bool } type traceAndSpanIDString struct { - traceID string - spanID string + traceID string + spanID string + isSampled bool } // idsToString is a helper that converts from generated trace and span IDs to // the string version stored in trace message events. -func idsToString(tasi traceAndSpanID, projectID string) traceAndSpanIDString { +func (tasi *traceAndSpanID) idsToString(projectID string) traceAndSpanIDString { return traceAndSpanIDString{ - traceID: "projects/" + projectID + "/traces/" + tasi.traceID.String(), - spanID: tasi.spanID.String(), + traceID: "projects/" + projectID + "/traces/" + tasi.traceID.String(), + spanID: tasi.spanID.String(), + isSampled: tasi.isSampled, } } @@ -135,9 +138,10 @@ func (fe *fakeOpenCensusExporter) ExportSpan(vd *trace.SpanData) { // will populate different contexts throughout the system, convert in // caller to string version as the logging code does. fe.idCh.Send(traceAndSpanID{ - spanName: vd.Name, - traceID: vd.TraceID, - spanID: vd.SpanID, + spanName: vd.Name, + traceID: vd.TraceID, + spanID: vd.SpanID, + isSampled: vd.IsSampled(), }) } @@ -650,7 +654,7 @@ func (s) TestLoggingLinkedWithTraceClientSide(t *testing.T) { var tasiSent traceAndSpanIDString for _, tasi := range traceAndSpanIDs { if strings.HasPrefix(tasi.spanName, "Sent.") { - tasiSent = idsToString(tasi, projectID) + tasiSent = tasi.idsToString(projectID) continue } } @@ -792,7 +796,7 @@ func (s) TestLoggingLinkedWithTraceServerSide(t *testing.T) { var tasiServer traceAndSpanIDString for _, tasi := range traceAndSpanIDs { if strings.HasPrefix(tasi.spanName, "grpc.") { - tasiServer = idsToString(tasi, projectID) + tasiServer = tasi.idsToString(projectID) continue } } @@ -944,11 +948,11 @@ func (s) TestLoggingLinkedWithTrace(t *testing.T) { var tasiServer traceAndSpanIDString for _, tasi := range traceAndSpanIDs { if strings.HasPrefix(tasi.spanName, "Sent.") { - tasiSent = idsToString(tasi, projectID) + tasiSent = tasi.idsToString(projectID) continue } if strings.HasPrefix(tasi.spanName, "grpc.") { - tasiServer = idsToString(tasi, projectID) + tasiServer = tasi.idsToString(projectID) } } @@ -1024,11 +1028,11 @@ func (s) TestLoggingLinkedWithTrace(t *testing.T) { var tasiServer traceAndSpanIDString for _, tasi := range traceAndSpanIDs { if strings.HasPrefix(tasi.spanName, "Sent.") { - tasiSent = idsToString(tasi, projectID) + tasiSent = tasi.idsToString(projectID) continue } if strings.HasPrefix(tasi.spanName, "grpc.") { - tasiServer = idsToString(tasi, projectID) + tasiServer = tasi.idsToString(projectID) } } diff --git a/interop/observability/go.mod b/interop/observability/go.mod index e33455097b1c..6155ccd3af44 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -35,7 +35,7 @@ require ( google.golang.org/api v0.110.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 // indirect + google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/interop/observability/go.sum b/interop/observability/go.sum index 151bf01079fa..603eea94add6 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -1305,8 +1305,8 @@ google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 h1:MeDNVH2KmQ9Z3AbXKsvU9UcbRR8LfpZVLmZAVWIX0nI= -google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204/go.mod h1:Dg7VaOjf0r9QhRn/YpwSf3vKQz1ixulffTlhEarxEXA= +google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae h1:40UWCQ40A2NTDabsmbZNznFf9SUftDlaBASj7OCdKDY= +google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae/go.mod h1:qPsHQZhltTPryCUC0naykSpbIJDodlCLM/vNa607CrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From c2899dddf5a69caf81b5c784fec6d1c86af54869 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 30 Mar 2023 20:46:44 -0400 Subject: [PATCH 856/998] examples/features/observability: Point o11y example to latest gcp/observability module (#6162) --- examples/features/observability/go.mod | 6 +++--- examples/features/observability/go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/features/observability/go.mod b/examples/features/observability/go.mod index aabfc0c024f7..e6d69f646bcb 100644 --- a/examples/features/observability/go.mod +++ b/examples/features/observability/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( google.golang.org/grpc v1.54.0 google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc - google.golang.org/grpc/gcp/observability v0.0.0-20230323213306-0fdfd40215dc + google.golang.org/grpc/gcp/observability v0.0.0-20230331001051-113d75fb4561 ) require ( @@ -36,6 +36,6 @@ require ( google.golang.org/api v0.109.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect - google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 // indirect - google.golang.org/protobuf v1.29.1 // indirect + google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae // indirect + google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/examples/features/observability/go.sum b/examples/features/observability/go.sum index 8cf76ce1d480..d6afd6f4a69d 100644 --- a/examples/features/observability/go.sum +++ b/examples/features/observability/go.sum @@ -600,10 +600,10 @@ google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc h1:H58v4RmBwciuKpwU6NFUn3w2hPZNL78HedaJUitCdpI= google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc/go.mod h1:EXfxRt8PpWkTFBAXaWXB0Xgb1S/FFBXvFRry0nr2bHQ= -google.golang.org/grpc/gcp/observability v0.0.0-20230323213306-0fdfd40215dc h1:Zglt738ox4oYU2Ghlyz1ifPgRkyXNv+DvQNSNnIQgFU= -google.golang.org/grpc/gcp/observability v0.0.0-20230323213306-0fdfd40215dc/go.mod h1:5pEder79lE7uwh+IMQnYSAo7RrzRBH7RwxpXkPYiTnM= -google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204 h1:MeDNVH2KmQ9Z3AbXKsvU9UcbRR8LfpZVLmZAVWIX0nI= -google.golang.org/grpc/stats/opencensus v0.0.0-20230317183452-b638faff2204/go.mod h1:Dg7VaOjf0r9QhRn/YpwSf3vKQz1ixulffTlhEarxEXA= +google.golang.org/grpc/gcp/observability v0.0.0-20230331001051-113d75fb4561 h1:8JM4LQVl9mzgGRprxdWVXGhSZ2QMzG/AcAAOp7vYruU= +google.golang.org/grpc/gcp/observability v0.0.0-20230331001051-113d75fb4561/go.mod h1:53Tjq5tV93OKCXBanbDNe1ZE112hGrkMi2jeC0ArL0A= +google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae h1:40UWCQ40A2NTDabsmbZNznFf9SUftDlaBASj7OCdKDY= +google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae/go.mod h1:qPsHQZhltTPryCUC0naykSpbIJDodlCLM/vNa607CrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -617,8 +617,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= From e97991991ce64c231c298dec9ee1714e0930ba7f Mon Sep 17 00:00:00 2001 From: my4 <38831921+my4-dev@users.noreply.github.com> Date: Sat, 1 Apr 2023 02:13:33 +0900 Subject: [PATCH 857/998] internal/grpcsync: move CallbackSerializer from xdsclient/internal to here (#6153) --- .../grpcsync}/callback_serializer.go | 18 +++++++++--------- .../grpcsync}/callback_serializer_test.go | 13 +++++++++---- xds/internal/xdsclient/authority.go | 5 +++-- xds/internal/xdsclient/authority_test.go | 3 ++- xds/internal/xdsclient/client_new.go | 2 +- xds/internal/xdsclient/clientimpl.go | 2 +- 6 files changed, 25 insertions(+), 18 deletions(-) rename {xds/internal/xdsclient => internal/grpcsync}/callback_serializer.go (76%) rename {xds/internal/xdsclient => internal/grpcsync}/callback_serializer_test.go (95%) diff --git a/xds/internal/xdsclient/callback_serializer.go b/internal/grpcsync/callback_serializer.go similarity index 76% rename from xds/internal/xdsclient/callback_serializer.go rename to internal/grpcsync/callback_serializer.go index 4c799e21638c..79993d34375a 100644 --- a/xds/internal/xdsclient/callback_serializer.go +++ b/internal/grpcsync/callback_serializer.go @@ -16,7 +16,7 @@ * */ -package xdsclient +package grpcsync import ( "context" @@ -24,22 +24,22 @@ import ( "google.golang.org/grpc/internal/buffer" ) -// callbackSerializer provides a mechanism to schedule callbacks in a +// CallbackSerializer provides a mechanism to schedule callbacks in a // synchronized manner. It provides a FIFO guarantee on the order of execution // of scheduled callbacks. New callbacks can be scheduled by invoking the // Schedule() method. // // This type is safe for concurrent access. -type callbackSerializer struct { +type CallbackSerializer struct { callbacks *buffer.Unbounded } -// newCallbackSerializer returns a new callbackSerializer instance. The provided +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided // context will be passed to the scheduled callbacks. Users should cancel the -// provided context to shutdown the callbackSerializer. It is guaranteed that no +// provided context to shutdown the CallbackSerializer. It is guaranteed that no // callbacks will be executed once this context is canceled. -func newCallbackSerializer(ctx context.Context) *callbackSerializer { - t := &callbackSerializer{callbacks: buffer.NewUnbounded()} +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + t := &CallbackSerializer{callbacks: buffer.NewUnbounded()} go t.run(ctx) return t } @@ -48,11 +48,11 @@ func newCallbackSerializer(ctx context.Context) *callbackSerializer { // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. -func (t *callbackSerializer) Schedule(f func(ctx context.Context)) { +func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) { t.callbacks.Put(f) } -func (t *callbackSerializer) run(ctx context.Context) { +func (t *CallbackSerializer) run(ctx context.Context) { for ctx.Err() == nil { select { case <-ctx.Done(): diff --git a/xds/internal/xdsclient/callback_serializer_test.go b/internal/grpcsync/callback_serializer_test.go similarity index 95% rename from xds/internal/xdsclient/callback_serializer_test.go rename to internal/grpcsync/callback_serializer_test.go index bf5339971be5..6cb1ee52d84a 100644 --- a/xds/internal/xdsclient/callback_serializer_test.go +++ b/internal/grpcsync/callback_serializer_test.go @@ -16,7 +16,7 @@ * */ -package xdsclient +package grpcsync import ( "context" @@ -28,11 +28,16 @@ import ( "github.com/google/go-cmp/cmp" ) +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. +) + // TestCallbackSerializer_Schedule_FIFO verifies that callbacks are executed in // the same order in which they were scheduled. func (s) TestCallbackSerializer_Schedule_FIFO(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - cs := newCallbackSerializer(ctx) + cs := NewCallbackSerializer(ctx) defer cancel() // We have two channels, one to record the order of scheduling, and the @@ -100,7 +105,7 @@ func (s) TestCallbackSerializer_Schedule_FIFO(t *testing.T) { // scheduled callbacks get executed. func (s) TestCallbackSerializer_Schedule_Concurrent(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - cs := newCallbackSerializer(ctx) + cs := NewCallbackSerializer(ctx) defer cancel() // Schedule callbacks concurrently by calling Schedule() from goroutines. @@ -136,7 +141,7 @@ func (s) TestCallbackSerializer_Schedule_Concurrent(t *testing.T) { // are not executed once Close() returns. func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - cs := newCallbackSerializer(ctx) + cs := NewCallbackSerializer(ctx) // Schedule a callback which blocks until the context passed to it is // canceled. It also closes a couple of channels to signal that it started diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 0a03e43a3148..af9b84d6cb26 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/transport" @@ -65,7 +66,7 @@ type authority struct { serverCfg *bootstrap.ServerConfig // Server config for this authority bootstrapCfg *bootstrap.Config // Full bootstrap configuration refCount int // Reference count of watches referring to this authority - serializer *callbackSerializer // Callback serializer for invoking watch callbacks + serializer *grpcsync.CallbackSerializer // Callback serializer for invoking watch callbacks resourceTypeGetter func(string) xdsresource.Type // ResourceType registry lookup transport *transport.Transport // Underlying xDS transport to the management server watchExpiryTimeout time.Duration // Resource watch expiry timeout @@ -99,7 +100,7 @@ type authorityArgs struct { // the second case. serverCfg *bootstrap.ServerConfig bootstrapCfg *bootstrap.Config - serializer *callbackSerializer + serializer *grpcsync.CallbackSerializer resourceTypeGetter func(string) xdsresource.Type watchExpiryTimeout time.Duration logger *grpclog.PrefixLogger diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index 928e83f6c878..1931bc2a7f5b 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -25,6 +25,7 @@ import ( "time" "github.com/google/uuid" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils" @@ -69,7 +70,7 @@ func setupTest(ctx context.Context, t *testing.T, opts e2e.ManagementServerOptio bootstrapCfg: &bootstrap.Config{ NodeProto: &v3corepb.Node{Id: nodeID}, }, - serializer: newCallbackSerializer(ctx), + serializer: grpcsync.NewCallbackSerializer(ctx), resourceTypeGetter: rtRegistry.get, watchExpiryTimeout: watchExpiryTimeout, logger: nil, diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go index 8335f9a88a8f..b330c19dfd17 100644 --- a/xds/internal/xdsclient/client_new.go +++ b/xds/internal/xdsclient/client_new.go @@ -69,7 +69,7 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i done: grpcsync.NewEvent(), config: config, watchExpiryTimeout: watchExpiryTimeout, - serializer: newCallbackSerializer(ctx), + serializer: grpcsync.NewCallbackSerializer(ctx), serializerClose: cancel, resourceTypes: newResourceTypeRegistry(), authorities: make(map[string]*authority), diff --git a/xds/internal/xdsclient/clientimpl.go b/xds/internal/xdsclient/clientimpl.go index 261b6bf48f9a..2c05ea66f5f9 100644 --- a/xds/internal/xdsclient/clientimpl.go +++ b/xds/internal/xdsclient/clientimpl.go @@ -37,7 +37,7 @@ type clientImpl struct { config *bootstrap.Config logger *grpclog.PrefixLogger watchExpiryTimeout time.Duration - serializer *callbackSerializer + serializer *grpcsync.CallbackSerializer serializerClose func() resourceTypes *resourceTypeRegistry From a51779dfbf331faa37864cd80fa2166dbc102dfc Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Mon, 3 Apr 2023 11:54:07 -0700 Subject: [PATCH 858/998] xdsclient/test: deflake TestTimerAndWatchStateOnSendCallback (#6169) --- xds/internal/xdsclient/authority_test.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index 1931bc2a7f5b..9b02cd7d7618 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -124,17 +124,20 @@ func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) { if err := updateResourceInServer(ctx, ms, rn, nodeID); err != nil { t.Fatalf("Failed to update server with resource: %q; err: %q", rn, err) } - select { - case <-ctx.Done(): - t.Fatal("Test timed out before watcher received an update from server.") - case err := <-w.ErrorCh: - t.Fatalf("Watch got an unexpected error update: %q. Want valid updates.", err) - case <-w.UpdateCh: - // This means the OnUpdate callback was invoked and the watcher was notified. - } - if err := compareWatchState(a, rn, watchStateReceived); err != nil { - t.Fatal(err) + for { + select { + case <-ctx.Done(): + t.Fatal("Test timed out before watcher received an update from server.") + case <-w.ErrorCh: + case <-w.UpdateCh: + // This means the OnUpdate callback was invoked and the watcher was notified. + if err := compareWatchState(a, rn, watchStateReceived); err != nil { + t.Fatal(err) + } + return + } } + } // This tests the resource's watch state transition when the ADS stream is closed From ea0a038347fc3c7fc16491f0f434465d8c7c7214 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Tue, 4 Apr 2023 10:11:54 -0700 Subject: [PATCH 859/998] xds/xdsclient: ignore resource deletion as per gRFC A53 (#6035) --- internal/testutils/xds/bootstrap/bootstrap.go | 7 + internal/testutils/xds/e2e/server.go | 6 + ...ds_client_ignore_resource_deletion_test.go | 480 ++++++++++++++++++ test/xds/xds_client_integration_test.go | 3 + xds/internal/xdsclient/authority.go | 31 +- xds/internal/xdsclient/authority_test.go | 2 +- xds/internal/xdsclient/bootstrap/bootstrap.go | 26 + .../xdsclient/bootstrap/bootstrap_test.go | 27 + 8 files changed, 574 insertions(+), 8 deletions(-) create mode 100644 test/xds/xds_client_ignore_resource_deletion_test.go diff --git a/internal/testutils/xds/bootstrap/bootstrap.go b/internal/testutils/xds/bootstrap/bootstrap.go index 88790716bf2a..786a6a4d7513 100644 --- a/internal/testutils/xds/bootstrap/bootstrap.go +++ b/internal/testutils/xds/bootstrap/bootstrap.go @@ -37,6 +37,10 @@ type Options struct { NodeID string // ServerURI is the address of the management server. ServerURI string + // IgnoreResourceDeletion, if true, results in a bootstrap config with the + // `server_features` list containing `ignore_resource_deletion`. This results + // in gRPC ignoring resource deletions from the management server, as per A53. + IgnoreResourceDeletion bool // ClientDefaultListenerResourceNameTemplate is the default listener // resource name template to be used on the gRPC client. ClientDefaultListenerResourceNameTemplate string @@ -108,6 +112,9 @@ func Contents(opts Options) ([]byte, error) { ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, } cfg.XdsServers[0].ServerFeatures = append(cfg.XdsServers[0].ServerFeatures, "xds_v3") + if opts.IgnoreResourceDeletion { + cfg.XdsServers[0].ServerFeatures = append(cfg.XdsServers[0].ServerFeatures, "ignore_resource_deletion") + } auths := make(map[string]authority) if envconfig.XDSFederation { diff --git a/internal/testutils/xds/e2e/server.go b/internal/testutils/xds/e2e/server.go index 94ecd80eb907..d81c3405440b 100644 --- a/internal/testutils/xds/e2e/server.go +++ b/internal/testutils/xds/e2e/server.go @@ -81,6 +81,12 @@ type ManagementServerOptions struct { // set to true only for tests which explicitly require the other behavior. AllowResourceSubset bool + // ServerFeaturesIgnoreResourceDeletion, if set, results in a bootstrap config + // where the server features list contains `ignore_resource_deletion`. This + // results in gRPC ignoring resource deletions from the management server, as + // per A53. + ServerFeaturesIgnoreResourceDeletion bool + // The callbacks defined below correspond to the state of the world (sotw) // version of the xDS API on the management server. diff --git a/test/xds/xds_client_ignore_resource_deletion_test.go b/test/xds/xds_client_ignore_resource_deletion_test.go new file mode 100644 index 000000000000..2bb6959ae552 --- /dev/null +++ b/test/xds/xds_client_ignore_resource_deletion_test.go @@ -0,0 +1,480 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "net" + "sync" + "testing" + "time" + + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds" + + clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + testgrpc "google.golang.org/grpc/test/grpc_testing" + testpb "google.golang.org/grpc/test/grpc_testing" +) + +const ( + serviceName = "my-service-xds" + rdsName = "route-" + serviceName + cdsName1 = "cluster1-" + serviceName + cdsName2 = "cluster2-" + serviceName + edsName1 = "eds1-" + serviceName + edsName2 = "eds2-" + serviceName +) + +var ( + // This route configuration resource contains two routes: + // - a route for the EmptyCall rpc, to be sent to cluster1 + // - a route for the UnaryCall rpc, to be sent to cluster2 + defaultRouteConfigWithTwoRoutes = &routepb.RouteConfiguration{ + Name: rdsName, + VirtualHosts: []*routepb.VirtualHost{{ + Domains: []string{serviceName}, + Routes: []*routepb.Route{ + { + Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/EmptyCall"}}, + Action: &routepb.Route_Route{Route: &routepb.RouteAction{ + ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: cdsName1}, + }}, + }, + { + Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/UnaryCall"}}, + Action: &routepb.Route_Route{Route: &routepb.RouteAction{ + ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: cdsName2}, + }}, + }, + }, + }}, + } +) + +// This test runs subtest each for a Listener resource and a Cluster resource deletion +// in the response from the server for the following cases: +// - testResourceDeletionIgnored: When ignore_resource_deletion is set, the +// xDSClient should not delete the resource. +// - testResourceDeletionNotIgnored: When ignore_resource_deletion is unset, +// the xDSClient should delete the resource. +// +// Resource deletion is only applicable to Listener and Cluster resources. +func (s) TestIgnoreResourceDeletionOnClient(t *testing.T) { + port1, cleanup := startTestService(t, nil) + t.Cleanup(cleanup) + + port2, cleanup := startTestService(t, nil) + t.Cleanup(cleanup) + + initialResourceOnServer := func(nodeID string) e2e.UpdateOptions { + return e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*routepb.RouteConfiguration{defaultRouteConfigWithTwoRoutes}, + Clusters: []*clusterpb.Cluster{ + e2e.DefaultCluster(cdsName1, edsName1, e2e.SecurityLevelNone), + e2e.DefaultCluster(cdsName2, edsName2, e2e.SecurityLevelNone), + }, + Endpoints: []*endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(edsName1, "localhost", []uint32{port1}), + e2e.DefaultEndpoint(edsName2, "localhost", []uint32{port2}), + }, + SkipValidation: true, + } + } + + tests := []struct { + name string + updateResource func(r *e2e.UpdateOptions) + }{ + { + name: "listener", + updateResource: func(r *e2e.UpdateOptions) { + r.Listeners = nil + }, + }, + { + name: "cluster", + updateResource: func(r *e2e.UpdateOptions) { + r.Clusters = nil + }, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s resource deletion ignored", test.name), func(t *testing.T) { + testResourceDeletionIgnored(t, initialResourceOnServer, test.updateResource) + }) + t.Run(fmt.Sprintf("%s resource deletion not ignored", test.name), func(t *testing.T) { + testResourceDeletionNotIgnored(t, initialResourceOnServer, test.updateResource) + }) + } +} + +// This subtest tests the scenario where the bootstrap config has "ignore_resource_deletion" +// set in "server_features" field. This subtest verifies that the resource was +// not deleted by the xDSClient when a resource is missing the xDS response and +// RPCs continue to succeed. +func testResourceDeletionIgnored(t *testing.T, initialResource func(string) e2e.UpdateOptions, updateResource func(r *e2e.UpdateOptions)) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + t.Cleanup(cancel) + mgmtServer := startManagementServer(t) + nodeID := uuid.New().String() + bs := generateBootstrapContents(t, mgmtServer.Address, true, nodeID) + xdsR := xdsResolverBuilder(t, bs) + resources := initialResource(nodeID) + + // Update the management server with initial resources setup. + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR)) + if err != nil { + t.Fatalf("Failed to dial local test server: %v.", err) + } + t.Cleanup(func() { cc.Close() }) + + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + + // Mutate resource and update on the server. + updateResource(&resources) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Make an RPC every 50ms for the next 500ms. This is to ensure that the + // updated resource is received from the management server and is processed by + // gRPC. Since resource deletions are ignored by the xDS client, we expect RPCs + // to all endpoints to keep succeeding. + timer := time.NewTimer(500 * time.Millisecond) + ticker := time.NewTicker(50 * time.Millisecond) + t.Cleanup(ticker.Stop) + for { + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + return + case <-timer.C: + return + case <-ticker.C: + } + } +} + +// This subtest tests the scenario where the bootstrap config has "ignore_resource_deletion" +// not set in "server_features" field. This subtest verifies that the resource was +// deleted by the xDSClient when a resource is missing the xDS response and subsequent +// RPCs fail. +func testResourceDeletionNotIgnored(t *testing.T, initialResource func(string) e2e.UpdateOptions, updateResource func(r *e2e.UpdateOptions)) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout*1000) + t.Cleanup(cancel) + mgmtServer := startManagementServer(t) + nodeID := uuid.New().String() + bs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID) + xdsR := xdsResolverBuilder(t, bs) + resources := initialResource(nodeID) + + // Update the management server with initial resources setup. + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + + // Mutate resource and update on the server. + updateResource(&resources) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Spin up go routines to verify RPCs fail after the update. + client := testgrpc.NewTestServiceClient(cc) + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + for ; ctx.Err() == nil; <-time.After(10 * time.Millisecond) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + return + } + } + }() + go func() { + defer wg.Done() + for ; ctx.Err() == nil; <-time.After(10 * time.Millisecond) { + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + return + } + } + }() + + wg.Wait() + if ctx.Err() != nil { + t.Fatal("Context expired before RPCs failed.") + } +} + +// This helper creates a management server for the test. +func startManagementServer(t *testing.T) *e2e.ManagementServer { + t.Helper() + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to start management server: %v", err) + } + t.Cleanup(mgmtServer.Stop) + return mgmtServer +} + +// This helper generates a custom bootstrap config for the test. +func generateBootstrapContents(t *testing.T, serverURI string, ignoreResourceDeletion bool, nodeID string) []byte { + t.Helper() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + NodeID: nodeID, + ServerURI: serverURI, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + IgnoreResourceDeletion: ignoreResourceDeletion, + }) + if err != nil { + t.Fatal(err) + } + return bootstrapContents +} + +// This helper creates an XDS resolver Builder from the bootstrap config passed +// as parameter. +func xdsResolverBuilder(t *testing.T, bs []byte) resolver.Builder { + t.Helper() + resolverBuilder := internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error)) + xdsR, err := resolverBuilder(bs) + if err != nil { + t.Fatalf("Creating xDS resolver for testing failed for config %q: %v", string(bs), err) + } + return xdsR +} + +// This helper creates an xDS-enabled gRPC server using the listener and the +// bootstrap config passed. It then registers the test service on the newly +// created gRPC server and starts serving. +func setupGRPCServerWithModeChangeChannelAndServe(t *testing.T, bootstrapContents []byte, lis net.Listener) chan connectivity.ServingMode { + t.Helper() + updateCh := make(chan connectivity.ServingMode, 1) + + // Create a server option to get notified about serving mode changes. + modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { + t.Logf("Serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + updateCh <- args.Mode + }) + server := xds.NewGRPCServer(grpc.Creds(insecure.NewCredentials()), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) + t.Cleanup(server.Stop) + testpb.RegisterTestServiceServer(server, &testService{}) + + // Serve. + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + return updateCh +} + +// This helper creates a new TCP listener. This helper also uses this listener to +// create a resource update with a listener resource. This helper returns the +// resource update and the TCP listener. +func resourceWithListenerForGRPCServer(t *testing.T, nodeID string) (e2e.UpdateOptions, net.Listener) { + t.Helper() + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + t.Cleanup(func() { lis.Close() }) + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("Failed to retrieve host and port of listener at %q: %v", lis.Addr(), err) + } + listener := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*listenerpb.Listener{listener}, + } + return resources, lis +} + +// This test creates a gRPC server which provides server-side xDS functionality +// by talking to a custom management server. This tests the scenario where bootstrap +// config with "server_features" includes "ignore_resource_deletion". In which +// case, when the listener resource is deleted on the management server, the gRPC +// server should continue to serve RPCs. +func (s) TestListenerResourceDeletionOnServerIgnored(t *testing.T) { + mgmtServer := startManagementServer(t) + nodeID := uuid.New().String() + bs := generateBootstrapContents(t, mgmtServer.Address, true, nodeID) + xdsR := xdsResolverBuilder(t, bs) + resources, lis := resourceWithListenerForGRPCServer(t, nodeID) + modeChangeCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for the server to update to ServingModeServing mode. + select { + case <-ctx.Done(): + t.Fatal("Test timed out waiting for a server to change to ServingModeServing.") + case mode := <-modeChangeCh: + if mode != connectivity.ServingModeServing { + t.Fatalf("Server switched to mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + + // Create a ClientConn and make a successful RPCs. + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + + // Update without a listener resource. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*listenerpb.Listener{}, + }); err != nil { + t.Fatal(err) + } + + // Perform RPCs every 100 ms for 1s and verify that the serving mode does not + // change on gRPC server. + timer := time.NewTimer(500 * time.Millisecond) + ticker := time.NewTicker(50 * time.Millisecond) + t.Cleanup(ticker.Stop) + for { + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + select { + case <-timer.C: + return + case mode := <-modeChangeCh: + t.Fatalf("Server switched to mode: %v when no switch was expected", mode) + case <-ticker.C: + } + } +} + +// This test creates a gRPC server which provides server-side xDS functionality +// by talking to a custom management server. This tests the scenario where bootstrap +// config with "server_features" does not include "ignore_resource_deletion". In +// which case, when the listener resource is deleted on the management server, the +// gRPC server should stop serving RPCs and switch mode to ServingModeNotServing. +func (s) TestListenerResourceDeletionOnServerNotIgnored(t *testing.T) { + mgmtServer := startManagementServer(t) + nodeID := uuid.New().String() + bs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID) + xdsR := xdsResolverBuilder(t, bs) + resources, lis := resourceWithListenerForGRPCServer(t, nodeID) + updateCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for the listener to move to "serving" mode. + select { + case <-ctx.Done(): + t.Fatal("Test timed out waiting for a mode change update.") + case mode := <-updateCh: + if mode != connectivity.ServingModeServing { + t.Fatalf("Listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + + // Create a ClientConn and make a successful RPCs. + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*listenerpb.Listener{}, // empty listener resource + }); err != nil { + t.Fatal(err) + } + + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh: + if mode != connectivity.ServingModeNotServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) + } + } +} + +// This helper makes both UnaryCall and EmptyCall RPCs using the ClientConn that +// is passed to this function. This helper panics for any failed RPCs. +func verifyRPCtoAllEndpoints(cc grpc.ClientConnInterface) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + return fmt.Errorf("rpc EmptyCall() failed: %v", err) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + return fmt.Errorf("rpc UnaryCall() failed: %v", err) + } + return nil +} diff --git a/test/xds/xds_client_integration_test.go b/test/xds/xds_client_integration_test.go index 4ffdac86ece1..6e0ae23df4bd 100644 --- a/test/xds/xds_client_integration_test.go +++ b/test/xds/xds_client_integration_test.go @@ -58,6 +58,9 @@ func startTestService(t *testing.T, server *stubserver.StubServer) (uint32, func if server == nil { server = &stubserver.StubServer{ EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + UnaryCallF: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, } } server.StartServer() diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index af9b84d6cb26..f0fc20100814 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -45,9 +45,10 @@ const ( ) type resourceState struct { - watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource - cache xdsresource.ResourceData // Most recent ACKed update for this resource - md xdsresource.UpdateMetadata // Metadata for the most recent update + watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource + cache xdsresource.ResourceData // Most recent ACKed update for this resource + md xdsresource.UpdateMetadata // Metadata for the most recent update + deletionIgnored bool // True if resource deletion was ignored for a prior update // Common watch state for all watchers of this resource. wTimer *time.Timer // Expiry timer @@ -211,8 +212,12 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty } continue } - // If we get here, it means that the update is a valid one. Notify - // watchers only if this is a first time update or it is different + + if state.deletionIgnored { + state.deletionIgnored = false + a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeEnum()) + } + // Notify watchers only if this is a first time update or it is different // from the one currently cached. if state.cache == nil || !state.cache.Equal(uErr.resource) { for watcher := range state.watchers { @@ -239,7 +244,8 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // If this resource type requires that all resources be present in every // SotW response from the server, a response that does not include a // previously seen resource will be interpreted as a deletion of that - // resource. + // resource unless ignore_resource_deletion option was set in the server + // config. if !rType.AllResourcesRequiredInSotW() { return } @@ -270,7 +276,18 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty if state.md.Status == xdsresource.ServiceStatusNotExist { continue } - + // Per A53, resource deletions are ignored if the `ignore_resource_deletion` + // server feature is enabled through the bootstrap configuration. If the + // resource deletion is to be ignored, the resource is not removed from + // the cache and the corresponding OnResourceDoesNotExist() callback is + // not invoked on the watchers. + if a.serverCfg.IgnoreResourceDeletion { + if !state.deletionIgnored { + state.deletionIgnored = true + a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeEnum()) + } + continue + } // If resource exists in cache, but not in the new update, delete // the resource from cache, and also send a resource not found error // to indicate resource removed. Metadata for the resource is still diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index 9b02cd7d7618..47979f4fa573 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -247,7 +247,7 @@ func compareWatchState(a *authority, rn string, wantState watchState) error { defer a.resourcesMu.Unlock() gotState := a.resources[listenerResourceType][rn].wState if gotState != wantState { - return fmt.Errorf("%v. Want: %v", gotState, wantState) + return fmt.Errorf("Got %v. Want: %v", gotState, wantState) } wTimer := a.resources[listenerResourceType][rn].wTimer diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go index 2a953cf45202..aec2fa51f30f 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -42,6 +42,16 @@ import ( ) const ( + // The "server_features" field in the bootstrap file contains a list of + // features supported by the server: + // - A value of "xds_v3" indicates that the server supports the v3 version of + // the xDS transport protocol. + // - A value of "ignore_resource_deletion" indicates that the client should + // ignore deletion of Listener and Cluster resources in updates from the + // server. + serverFeaturesV3 = "xds_v3" + serverFeaturesIgnoreResourceDeletion = "ignore_resource_deletion" + gRPCUserAgentName = "gRPC Go" clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" clientFeatureResourceWrapper = "xds.config.resource-in-sotw" @@ -134,6 +144,13 @@ type ServerConfig struct { // credentials and store it here as a grpc.DialOption for easy access when // dialing this xDS server. credsDialOption grpc.DialOption + + // IgnoreResourceDeletion controls the behavior of the xDS client when the + // server deletes a previously sent Listener or Cluster resource. If set, the + // xDS client will not invoke the watchers' OnResourceDoesNotExist() method + // when a resource is deleted, nor will it remove the existing resource value + // from its cache. + IgnoreResourceDeletion bool } // CredsDialOption returns the configured credentials as a grpc dial option. @@ -162,6 +179,10 @@ func (sc ServerConfig) MarshalJSON() ([]byte, error) { ChannelCreds: []channelCreds{{Type: sc.Creds.Type, Config: sc.Creds.Config}}, ServerFeatures: sc.ServerFeatures, } + server.ServerFeatures = []string{serverFeaturesV3} + if sc.IgnoreResourceDeletion { + server.ServerFeatures = append(server.ServerFeatures, serverFeaturesIgnoreResourceDeletion) + } return json.Marshal(server) } @@ -174,6 +195,11 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { sc.ServerURI = server.ServerURI sc.ServerFeatures = server.ServerFeatures + for _, f := range server.ServerFeatures { + if f == serverFeaturesIgnoreResourceDeletion { + sc.IgnoreResourceDeletion = true + } + } for _, cc := range server.ChannelCreds { // We stop at the first credential type that we support. c := bootstrap.GetCredentials(cc.Type) diff --git a/xds/internal/xdsclient/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go index 4ad4739ace5a..026460458b08 100644 --- a/xds/internal/xdsclient/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -181,6 +181,22 @@ var ( } ] }`, + "serverSupportsIgnoreResourceDeletion": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ + { "type": "google_default" } + ], + "server_features" : ["ignore_resource_deletion", "xds_v3"] + }] + }`, } metadata = &structpb.Struct{ Fields: map[string]*structpb.Value{ @@ -213,6 +229,16 @@ var ( NodeProto: v3NodeProto, ClientDefaultListenerResourceNameTemplate: "%s", } + nonNilCredsConfigWithDeletionIgnored = &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + IgnoreResourceDeletion: true, + ServerFeatures: []string{"ignore_resource_deletion", "xds_v3"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "%s", + } nonNilCredsConfigNoServerFeatures = &Config{ XDSServer: &ServerConfig{ ServerURI: "trafficdirector.googleapis.com:443", @@ -393,6 +419,7 @@ func TestNewConfigV3ProtoSuccess(t *testing.T) { {"multipleChannelCreds", nonNilCredsConfigV3}, {"goodBootstrap", nonNilCredsConfigV3}, {"multipleXDSServers", nonNilCredsConfigV3}, + {"serverSupportsIgnoreResourceDeletion", nonNilCredsConfigWithDeletionIgnored}, } for _, test := range tests { From 1d5b73a103b2d1049f643fac4e20ac3643f2caa5 Mon Sep 17 00:00:00 2001 From: Mskxn <118117161+Mskxn@users.noreply.github.com> Date: Wed, 5 Apr 2023 06:19:25 +0800 Subject: [PATCH 860/998] xds: add stop to avoid hanging in TestServeWithStop (#6172) --- xds/server_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/xds/server_test.go b/xds/server_test.go index e20f0400ee9a..fbaee7d2f50e 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -508,6 +508,7 @@ func (s) TestServeWithStop(t *testing.T) { lis, err := testutils.LocalTCPListener() if err != nil { + server.Stop() t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } @@ -525,6 +526,7 @@ func (s) TestServeWithStop(t *testing.T) { defer cancel() c, err := clientCh.Receive(ctx) if err != nil { + server.Stop() t.Fatalf("error when waiting for new xdsClient to be created: %v", err) } client := c.(*fakeclient.Client) From b0a8b1b9c19146dae6836e74ef331db4a1837dd7 Mon Sep 17 00:00:00 2001 From: Anirudh Ramachandra Date: Tue, 4 Apr 2023 17:25:40 -0700 Subject: [PATCH 861/998] Use string instead of enum for xds resource type (#6163) --- xds/internal/testutils/testutils.go | 16 ++++++---- xds/internal/xdsclient/authority.go | 19 ++++++----- xds/internal/xdsclient/clientimpl_watchers.go | 8 ++--- .../xdsclient/tests/authority_test.go | 8 ++--- .../xdsresource/cluster_resource_type.go | 8 ++++- .../xdsresource/endpoints_resource_type.go | 8 ++++- .../xdsresource/listener_resource_type.go | 8 ++++- .../xdsclient/xdsresource/resource_type.go | 20 ++++++------ .../xdsresource/route_config_resource_type.go | 8 ++++- xds/internal/xdsclient/xdsresource/type.go | 32 ------------------- 10 files changed, 65 insertions(+), 70 deletions(-) diff --git a/xds/internal/testutils/testutils.go b/xds/internal/testutils/testutils.go index a07f5a1171b3..44891780e0c4 100644 --- a/xds/internal/testutils/testutils.go +++ b/xds/internal/testutils/testutils.go @@ -29,17 +29,21 @@ import ( // BuildResourceName returns the resource name in the format of an xdstp:// // resource. -func BuildResourceName(typ xdsresource.ResourceType, auth, id string, ctxParams map[string]string) string { +func BuildResourceName(typeName, auth, id string, ctxParams map[string]string) string { var typS string - switch typ { - case xdsresource.ListenerResource: + switch typeName { + case xdsresource.ListenerResourceTypeName: typS = version.V3ListenerType - case xdsresource.RouteConfigResource: + case xdsresource.RouteConfigTypeName: typS = version.V3RouteConfigType - case xdsresource.ClusterResource: + case xdsresource.ClusterResourceTypeName: typS = version.V3ClusterType - case xdsresource.EndpointsResource: + case xdsresource.EndpointsResourceTypeName: typS = version.V3EndpointsType + default: + // If the name doesn't match any of the standard resources fallback + // to the type name. + typS = typeName } return (&xdsresource.Name{ Scheme: "xdstp", diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index f0fc20100814..61adf794e9b7 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -215,7 +215,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty if state.deletionIgnored { state.deletionIgnored = false - a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeEnum()) + a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeName()) } // Notify watchers only if this is a first time update or it is different // from the one currently cached. @@ -227,7 +227,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty } } // Sync cache. - a.logger.Debugf("Resource type %q with name %q added to cache", rType.TypeEnum().String(), name) + a.logger.Debugf("Resource type %q with name %q added to cache", rType.TypeName(), name) state.cache = uErr.resource // Set status to ACK, and clear error state. The metadata might be a // NACK metadata because some other resources in the same response @@ -284,7 +284,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty if a.serverCfg.IgnoreResourceDeletion { if !state.deletionIgnored { state.deletionIgnored = true - a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeEnum()) + a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) } continue } @@ -345,9 +345,8 @@ func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, return ret, md, nil } - typeStr := rType.TypeEnum().String() md.Status = xdsresource.ServiceStatusNACKed - errRet := combineErrors(typeStr, topLevelErrors, perResourceErrors) + errRet := combineErrors(rType.TypeName(), topLevelErrors, perResourceErrors) md.ErrState = &xdsresource.UpdateErrorMetadata{ Version: update.Version, Err: errRet, @@ -447,7 +446,7 @@ func (a *authority) close() { } func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { - a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeEnum(), resourceName) + a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeName(), resourceName) a.resourcesMu.Lock() defer a.resourcesMu.Unlock() @@ -464,7 +463,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // instruct the transport layer to send a DiscoveryRequest for the same. state := resources[resourceName] if state == nil { - a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeEnum(), resourceName) + a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeName(), resourceName) state = &resourceState{ watchers: make(map[xdsresource.ResourceWatcher]bool), md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, @@ -478,7 +477,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // If we have a cached copy of the resource, notify the new watcher. if state.cache != nil { - a.logger.Debugf("Resource type %q with resource name %q found in cache: %s", rType.TypeEnum(), resourceName, state.cache.ToJSON()) + a.logger.Debugf("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) resource := state.cache a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) } @@ -501,14 +500,14 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // There are no more watchers for this resource, delete the state // associated with it, and instruct the transport to send a request // which does not include this resource name. - a.logger.Debugf("Removing last watch for type %q, resource name %q", rType.TypeEnum(), resourceName) + a.logger.Debugf("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) delete(resources, resourceName) a.sendDiscoveryRequestLocked(rType, resources) } } func (a *authority) handleWatchTimerExpiry(rType xdsresource.Type, resourceName string, state *resourceState) { - a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeEnum().String()) + a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) a.resourcesMu.Lock() defer a.resourcesMu.Unlock() diff --git a/xds/internal/xdsclient/clientimpl_watchers.go b/xds/internal/xdsclient/clientimpl_watchers.go index 3a2505db43e9..3c3adad5341c 100644 --- a/xds/internal/xdsclient/clientimpl_watchers.go +++ b/xds/internal/xdsclient/clientimpl_watchers.go @@ -156,12 +156,12 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, // ref-counted client sets its pointer to `nil`. And if any watch APIs are // made on such a closed client, we will get here with a `nil` receiver. if c == nil || c.done.HasFired() { - logger.Warningf("Watch registered for name %q of type %q, but client is closed", rType.TypeEnum().String(), resourceName) + logger.Warningf("Watch registered for name %q of type %q, but client is closed", rType.TypeName(), resourceName) return func() {} } if err := c.resourceTypes.maybeRegister(rType); err != nil { - logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeEnum().String(), resourceName) + logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeName(), resourceName) c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) return func() {} } @@ -180,7 +180,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, n := xdsresource.ParseName(resourceName) a, unref, err := c.findAuthority(n) if err != nil { - logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeEnum().String(), resourceName, n.Authority) + logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) return func() {} } @@ -216,7 +216,7 @@ func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { url := rType.TypeURL() typ, ok := r.types[url] if ok && typ != rType { - return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeEnum()) + return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeName()) } r.types[url] = rType return nil diff --git a/xds/internal/xdsclient/tests/authority_test.go b/xds/internal/xdsclient/tests/authority_test.go index 527e2650d256..0345f4a4040c 100644 --- a/xds/internal/xdsclient/tests/authority_test.go +++ b/xds/internal/xdsclient/tests/authority_test.go @@ -45,16 +45,16 @@ var ( // These two resources use `testAuthority1`, which contains an empty server // config in the bootstrap file, and therefore will use the default // management server. - authorityTestResourceName11 = xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority1, cdsName+"1", nil) - authorityTestResourceName12 = xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority1, cdsName+"2", nil) + authorityTestResourceName11 = xdstestutils.BuildResourceName(xdsresource.ClusterResourceTypeName, testAuthority1, cdsName+"1", nil) + authorityTestResourceName12 = xdstestutils.BuildResourceName(xdsresource.ClusterResourceTypeName, testAuthority1, cdsName+"2", nil) // This resource uses `testAuthority2`, which contains an empty server // config in the bootstrap file, and therefore will use the default // management server. - authorityTestResourceName2 = xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority2, cdsName+"3", nil) + authorityTestResourceName2 = xdstestutils.BuildResourceName(xdsresource.ClusterResourceTypeName, testAuthority2, cdsName+"3", nil) // This resource uses `testAuthority3`, which contains a non-empty server // config in the bootstrap file, and therefore will use the non-default // management server. - authorityTestResourceName3 = xdstestutils.BuildResourceName(xdsresource.ClusterResource, testAuthority3, cdsName+"3", nil) + authorityTestResourceName3 = xdstestutils.BuildResourceName(xdsresource.ClusterResourceTypeName, testAuthority3, cdsName+"3", nil) ) // setupForAuthorityTests spins up two management servers, one to act as the diff --git a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index 0c9b4473ce2f..c51d38d3f06d 100644 --- a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -24,6 +24,12 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) +const ( + // ClusterResourceTypeName represents the transport agnostic name for the + // cluster resource. + ClusterResourceTypeName = "ClusterResource" +) + var ( // Compile time interface checks. _ Type = clusterResourceType{} @@ -33,7 +39,7 @@ var ( clusterType = clusterResourceType{ resourceTypeState: resourceTypeState{ typeURL: version.V3ClusterURL, - typeEnum: ClusterResource, + typeName: ClusterResourceTypeName, allResourcesRequiredInSotW: true, }, } diff --git a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index caa728376099..5a2dbbd20319 100644 --- a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -24,6 +24,12 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) +const ( + // EndpointsResourceTypeName represents the transport agnostic name for the + // endpoint resource. + EndpointsResourceTypeName = "EndpointsResource" +) + var ( // Compile time interface checks. _ Type = endpointsResourceType{} @@ -33,7 +39,7 @@ var ( endpointsType = endpointsResourceType{ resourceTypeState: resourceTypeState{ typeURL: version.V3EndpointsURL, - typeEnum: EndpointsResource, + typeName: "EndpointsResource", allResourcesRequiredInSotW: false, }, } diff --git a/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 2e230d857bd4..33ebd3efbf10 100644 --- a/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -27,6 +27,12 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) +const ( + // ListenerResourceTypeName represents the transport agnostic name for the + // listener resource. + ListenerResourceTypeName = "ListenerResource" +) + var ( // Compile time interface checks. _ Type = listenerResourceType{} @@ -36,7 +42,7 @@ var ( listenerType = listenerResourceType{ resourceTypeState: resourceTypeState{ typeURL: version.V3ListenerURL, - typeEnum: ListenerResource, + typeName: ListenerResourceTypeName, allResourcesRequiredInSotW: true, }, } diff --git a/xds/internal/xdsclient/xdsresource/resource_type.go b/xds/internal/xdsclient/xdsresource/resource_type.go index 8d07b2bb8668..f67f0ea15325 100644 --- a/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/xds/internal/xdsclient/xdsresource/resource_type.go @@ -84,14 +84,14 @@ type Type interface { // TypeURL is the xDS type URL of this resource type for v3 transport. TypeURL() string - // TypeEnum is an enumerated value for this resource type. This can be used - // for logging/debugging purposes, as well in cases where the resource type - // is to be uniquely identified but the actual functionality provided by the - // resource type is not required. + // TypeName identifies resources in a transport protocol agnostic way. This + // can be used for logging/debugging purposes, as well in cases where the + // resource type name is to be uniquely identified but the actual + // functionality provided by the resource type is not required. // - // TODO: once Type is renamed to ResourceType, rename ResourceType to - // ResourceTypeEnum. - TypeEnum() ResourceType + // TODO: once Type is renamed to ResourceType, rename TypeName to + // ResourceTypeName. + TypeName() string // AllResourcesRequiredInSotW indicates whether this resource type requires // that all resources be present in every SotW response from the server. If @@ -147,7 +147,7 @@ type DecodeResult struct { // implemented here for free. type resourceTypeState struct { typeURL string - typeEnum ResourceType + typeName string allResourcesRequiredInSotW bool } @@ -155,8 +155,8 @@ func (r resourceTypeState) TypeURL() string { return r.typeURL } -func (r resourceTypeState) TypeEnum() ResourceType { - return r.typeEnum +func (r resourceTypeState) TypeName() string { + return r.typeName } func (r resourceTypeState) AllResourcesRequiredInSotW() bool { diff --git a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index 448d122d49d0..d06af4ae1aff 100644 --- a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -24,6 +24,12 @@ import ( "google.golang.org/protobuf/types/known/anypb" ) +const ( + // RouteConfigTypeName represents the transport agnostic name for the + // route config resource. + RouteConfigTypeName = "RouteConfigResource" +) + var ( // Compile time interface checks. _ Type = routeConfigResourceType{} @@ -33,7 +39,7 @@ var ( routeConfigType = routeConfigResourceType{ resourceTypeState: resourceTypeState{ typeURL: version.V3RouteConfigURL, - typeEnum: RouteConfigResource, + typeName: "RouteConfigResource", allResourcesRequiredInSotW: false, }, } diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go index c629380d508b..1cb49cc1d1b1 100644 --- a/xds/internal/xdsclient/xdsresource/type.go +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -133,35 +133,3 @@ type UpdateWithMD struct { MD UpdateMetadata Raw *anypb.Any } - -// ResourceType identifies resources in a transport protocol agnostic way. These -// will be used in transport version agnostic code, while the versioned API -// clients will map these to appropriate version URLs. -type ResourceType int - -// Version agnostic resource type constants. -const ( - UnknownResource ResourceType = iota - ListenerResource - HTTPConnManagerResource - RouteConfigResource - ClusterResource - EndpointsResource -) - -func (r ResourceType) String() string { - switch r { - case ListenerResource: - return "ListenerResource" - case HTTPConnManagerResource: - return "HTTPConnManagerResource" - case RouteConfigResource: - return "RouteConfigResource" - case ClusterResource: - return "ClusterResource" - case EndpointsResource: - return "EndpointsResource" - default: - return "UnknownResource" - } -} From 10401b9289aec900539213e88ba50886e8d91653 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 5 Apr 2023 17:00:35 -0400 Subject: [PATCH 862/998] stats/opencensus: the backend to Sent. Attempt. and Recv. (#6173) --- gcp/observability/go.mod | 2 ++ gcp/observability/go.sum | 2 -- gcp/observability/observability_test.go | 39 ++++++++++++++----------- stats/opencensus/e2e_test.go | 10 +++---- stats/opencensus/opencensus.go | 2 +- stats/opencensus/trace.go | 2 +- 6 files changed, 30 insertions(+), 27 deletions(-) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 695fa3700c80..f04fb0a06c96 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -37,3 +37,5 @@ require ( google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect google.golang.org/protobuf v1.30.0 // indirect ) + +replace google.golang.org/grpc/stats/opencensus => ../../stats/opencensus diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index 3865c5ce1014..fbc7b2898090 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -598,8 +598,6 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba h1:puuDphNHQZRngQpzUGvfXMBFBv6DuahfWMZaj0jVtjw= google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae h1:40UWCQ40A2NTDabsmbZNznFf9SUftDlaBASj7OCdKDY= -google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae/go.mod h1:qPsHQZhltTPryCUC0naykSpbIJDodlCLM/vNa607CrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index a32311f7bb74..89e30c19d4ab 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -21,7 +21,6 @@ package observability import ( "context" "encoding/json" - "errors" "fmt" "io" "os" @@ -31,6 +30,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "go.opencensus.io/stats/view" "go.opencensus.io/trace" "google.golang.org/grpc/internal/envconfig" @@ -114,12 +114,15 @@ type traceAndSpanID struct { traceID trace.TraceID spanID trace.SpanID isSampled bool + spanKind int } type traceAndSpanIDString struct { traceID string spanID string isSampled bool + // SpanKind is the type of span. + SpanKind int } // idsToString is a helper that converts from generated trace and span IDs to @@ -129,6 +132,7 @@ func (tasi *traceAndSpanID) idsToString(projectID string) traceAndSpanIDString { traceID: "projects/" + projectID + "/traces/" + tasi.traceID.String(), spanID: tasi.spanID.String(), isSampled: tasi.isSampled, + SpanKind: tasi.spanKind, } } @@ -142,6 +146,7 @@ func (fe *fakeOpenCensusExporter) ExportSpan(vd *trace.SpanData) { traceID: vd.TraceID, spanID: vd.SpanID, isSampled: vd.IsSampled(), + spanKind: vd.SpanKind, }) } @@ -653,7 +658,7 @@ func (s) TestLoggingLinkedWithTraceClientSide(t *testing.T) { <-unaryDone.Done() var tasiSent traceAndSpanIDString for _, tasi := range traceAndSpanIDs { - if strings.HasPrefix(tasi.spanName, "Sent.") { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindClient { tasiSent = tasi.idsToString(projectID) continue } @@ -661,8 +666,8 @@ func (s) TestLoggingLinkedWithTraceClientSide(t *testing.T) { fle.mu.Lock() for _, tasiSeen := range fle.idsSeen { - if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{})); diff != "" { - readerErrCh.Send(errors.New("got unexpected id, should be a client span")) + if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff != "" { + readerErrCh.Send(fmt.Errorf("got unexpected id, should be a client span (-got, +want): %v", diff)) } } @@ -795,7 +800,7 @@ func (s) TestLoggingLinkedWithTraceServerSide(t *testing.T) { <-unaryDone.Done() var tasiServer traceAndSpanIDString for _, tasi := range traceAndSpanIDs { - if strings.HasPrefix(tasi.spanName, "grpc.") { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindServer { tasiServer = tasi.idsToString(projectID) continue } @@ -803,8 +808,8 @@ func (s) TestLoggingLinkedWithTraceServerSide(t *testing.T) { fle.mu.Lock() for _, tasiSeen := range fle.idsSeen { - if diff := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{})); diff != "" { - readerErrCh.Send(errors.New("got unexpected id, should be a server span")) + if diff := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff != "" { + readerErrCh.Send(fmt.Errorf("got unexpected id, should be a server span (-got, +want): %v", diff)) } } @@ -947,20 +952,20 @@ func (s) TestLoggingLinkedWithTrace(t *testing.T) { var tasiSent traceAndSpanIDString var tasiServer traceAndSpanIDString for _, tasi := range traceAndSpanIDs { - if strings.HasPrefix(tasi.spanName, "Sent.") { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindClient { tasiSent = tasi.idsToString(projectID) continue } - if strings.HasPrefix(tasi.spanName, "grpc.") { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindServer { tasiServer = tasi.idsToString(projectID) } } fle.mu.Lock() for _, tasiSeen := range fle.idsSeen { - if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{})); diff != "" { - if diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{})); diff2 != "" { - readerErrCh.Send(errors.New("got unexpected id, should be client or server span")) + if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff != "" { + if diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff2 != "" { + readerErrCh.Send(fmt.Errorf("got unexpected id, should be a client or server span (-got, +want): %v, %v", diff, diff2)) } } } @@ -1027,20 +1032,20 @@ func (s) TestLoggingLinkedWithTrace(t *testing.T) { var tasiSent traceAndSpanIDString var tasiServer traceAndSpanIDString for _, tasi := range traceAndSpanIDs { - if strings.HasPrefix(tasi.spanName, "Sent.") { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindClient { tasiSent = tasi.idsToString(projectID) continue } - if strings.HasPrefix(tasi.spanName, "grpc.") { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindServer { tasiServer = tasi.idsToString(projectID) } } fle.mu.Lock() for _, tasiSeen := range fle.idsSeen { - if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{})); diff != "" { - if diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{})); diff2 != "" { - readerErrCh.Send(errors.New("got unexpected id, should be client or server span")) + if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff != "" { + if diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff2 != "" { + readerErrCh.Send(fmt.Errorf("got unexpected id, should be a client or server span (-got, +want): %v, %v", diff, diff2)) } } } diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 0b86ede4c2d5..3eb6a1c8fa77 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -1426,8 +1426,7 @@ func (s) TestSpan(t *testing.T) { sc: trace.SpanContext{ TraceOptions: 1, }, - spanKind: trace.SpanKindClient, - name: "Attempt.grpc.testing.TestService.UnaryCall", + name: "Attempt.grpc.testing.TestService.UnaryCall", messageEvents: []trace.MessageEvent{ { EventType: trace.MessageEventTypeSent, @@ -1447,7 +1446,7 @@ func (s) TestSpan(t *testing.T) { TraceOptions: 1, }, spanKind: trace.SpanKindClient, - name: "Sent.grpc.testing.TestService.UnaryCall", + name: "grpc.testing.TestService.UnaryCall", hasRemoteParent: false, childSpanCount: 1, }, @@ -1517,7 +1516,7 @@ func (s) TestSpan(t *testing.T) { TraceOptions: 1, }, spanKind: trace.SpanKindClient, - name: "Sent.grpc.testing.TestService.FullDuplexCall", + name: "grpc.testing.TestService.FullDuplexCall", hasRemoteParent: false, childSpanCount: 1, }, @@ -1525,8 +1524,7 @@ func (s) TestSpan(t *testing.T) { sc: trace.SpanContext{ TraceOptions: 1, }, - spanKind: trace.SpanKindClient, - name: "Attempt.grpc.testing.TestService.FullDuplexCall", + name: "Attempt.grpc.testing.TestService.FullDuplexCall", messageEvents: []trace.MessageEvent{ { EventType: trace.MessageEventTypeSent, diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go index f1cd0880d35a..8bebb21575e1 100644 --- a/stats/opencensus/opencensus.go +++ b/stats/opencensus/opencensus.go @@ -88,7 +88,7 @@ func ServerOption(to TraceOptions) grpc.ServerOption { func (csh *clientStatsHandler) createCallSpan(ctx context.Context, method string) (context.Context, *trace.Span) { var span *trace.Span if !csh.to.DisableTrace { - mn := "Sent." + strings.Replace(removeLeadingSlash(method), "/", ".", -1) + mn := strings.Replace(removeLeadingSlash(method), "/", ".", -1) ctx, span = trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS), trace.WithSpanKind(trace.SpanKindClient)) } return ctx, span diff --git a/stats/opencensus/trace.go b/stats/opencensus/trace.go index afd5b4fd8912..a7cafb30f4d0 100644 --- a/stats/opencensus/trace.go +++ b/stats/opencensus/trace.go @@ -42,7 +42,7 @@ func (csh *clientStatsHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTa mn := "Attempt." + strings.Replace(removeLeadingSlash(rti.FullMethodName), "/", ".", -1) // Returned context is ignored because will populate context with data // that wraps the span instead. - _, span := trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS), trace.WithSpanKind(trace.SpanKindClient)) + _, span := trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS)) tcBin := propagation.Binary(span.SpanContext()) return stats.SetTrace(ctx, tcBin), &traceInfo{ From bfb57b8b49536cafe75c5692277b88378c55adba Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 5 Apr 2023 17:12:57 -0700 Subject: [PATCH 863/998] testing: delete internal copy of test service proto, and use canonical one (#6164) --- authz/grpc_authz_end2end_test.go | 72 +- balancer/grpclb/grpclb_test.go | 43 +- balancer/rls/helpers_test.go | 4 +- balancer/rls/picker_test.go | 4 +- gcp/observability/go.mod | 16 +- gcp/observability/go.sum | 872 +++++++++++- gcp/observability/logging_test.go | 64 +- gcp/observability/observability_test.go | 36 +- internal/stubserver/stubserver.go | 17 +- internal/testutils/pickfirst/pickfirst.go | 4 +- internal/testutils/roundrobin/roundrobin.go | 4 +- interop/observability/go.mod | 2 + interop/observability/go.sum | 2 - orca/call_metric_recorder_test.go | 6 +- orca/service_test.go | 6 +- stats/opencensus/e2e_test.go | 32 +- stats/opencensus/go.mod | 6 +- stats/opencensus/go.sum | 1268 ++++++++++++++++- status/status_ext_test.go | 9 +- test/authority_test.go | 8 +- test/balancer_switching_test.go | 19 +- test/balancer_test.go | 30 +- test/channelz_linux_test.go | 5 +- test/channelz_test.go | 50 +- test/clientconn_test.go | 4 +- test/compressor_test.go | 18 +- test/config_selector_test.go | 2 +- test/context_canceled_test.go | 8 +- test/control_plane_status_test.go | 2 +- test/creds_test.go | 24 +- test/end2end_test.go | 247 ++-- test/goaway_test.go | 34 +- test/gracefulstop_test.go | 12 +- test/grpc_testing/test.pb.go | 924 ------------ test/grpc_testing/test.proto | 156 -- test/grpc_testing/test_grpc.pb.go | 465 ------ test/healthcheck_test.go | 24 +- test/http_header_end2end_test.go | 5 +- test/insecure_creds_test.go | 13 +- test/interceptor_test.go | 6 +- test/invoke_test.go | 2 +- test/local_creds_test.go | 11 +- test/metadata_test.go | 6 +- test/pickfirst_test.go | 6 +- test/resolver_update_test.go | 12 +- test/retry_test.go | 46 +- test/roundrobin_test.go | 15 +- test/server_test.go | 8 +- test/stream_cleanup_test.go | 6 +- test/transport_test.go | 8 +- test/xds/xds_client_ack_nack_test.go | 4 +- test/xds/xds_client_affinity_test.go | 4 +- test/xds/xds_client_federation_test.go | 8 +- ...ds_client_ignore_resource_deletion_test.go | 4 +- test/xds/xds_client_integration_test.go | 4 +- test/xds/xds_client_outlier_detection_test.go | 6 +- test/xds/xds_client_retry_test.go | 4 +- .../xds_rls_clusterspecifier_plugin_test.go | 4 +- test/xds/xds_security_config_nack_test.go | 4 +- test/xds/xds_server_integration_test.go | 6 +- test/xds/xds_server_rbac_test.go | 8 +- test/xds/xds_server_serving_mode_test.go | 6 +- .../clusterimpl/tests/balancer_test.go | 4 +- .../clusterresolver/e2e_test/balancer_test.go | 4 +- .../clusterresolver/e2e_test/eds_impl_test.go | 12 +- .../e2e_test/outlierdetection_test.go | 11 +- .../ringhash/e2e/ringhash_balancer_test.go | 6 +- xds/internal/httpfilter/fault/fault_test.go | 15 +- 68 files changed, 2621 insertions(+), 2136 deletions(-) delete mode 100644 test/grpc_testing/test.pb.go delete mode 100644 test/grpc_testing/test.proto delete mode 100644 test/grpc_testing/test_grpc.pb.go diff --git a/authz/grpc_authz_end2end_test.go b/authz/grpc_authz_end2end_test.go index c7cf26e122a4..0a4cd1862e98 100644 --- a/authz/grpc_authz_end2end_test.go +++ b/authz/grpc_authz_end2end_test.go @@ -36,23 +36,25 @@ import ( "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - pb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type testServer struct { - pb.UnimplementedTestServiceServer + testgrpc.UnimplementedTestServiceServer } -func (s *testServer) UnaryCall(ctx context.Context, req *pb.SimpleRequest) (*pb.SimpleResponse, error) { - return &pb.SimpleResponse{}, nil +func (s *testServer) UnaryCall(ctx context.Context, req *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil } -func (s *testServer) StreamingInputCall(stream pb.TestService_StreamingInputCallServer) error { +func (s *testServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error { for { _, err := stream.Recv() if err == io.EOF { - return stream.SendAndClose(&pb.StreamingInputCallResponse{}) + return stream.SendAndClose(&testpb.StreamingInputCallResponse{}) } if err != nil { return err @@ -315,7 +317,7 @@ func (s) TestStaticPolicyEnd2End(t *testing.T) { grpc.ChainUnaryInterceptor(i.UnaryInterceptor), grpc.ChainStreamInterceptor(i.StreamInterceptor)) defer s.Stop() - pb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -329,14 +331,14 @@ func (s) TestStaticPolicyEnd2End(t *testing.T) { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } defer clientConn.Close() - client := pb.NewTestServiceClient(clientConn) + client := testgrpc.NewTestServiceClient(clientConn) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() ctx = metadata.NewOutgoingContext(ctx, test.md) // Verifying authorization decision for Unary RPC. - _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { t.Fatalf("[UnaryCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) } @@ -346,8 +348,8 @@ func (s) TestStaticPolicyEnd2End(t *testing.T) { if err != nil { t.Fatalf("failed StreamingInputCall err: %v", err) } - req := &pb.StreamingInputCallRequest{ - Payload: &pb.Payload{ + req := &testpb.StreamingInputCallRequest{ + Payload: &testpb.Payload{ Body: []byte("hi"), }, } @@ -385,7 +387,7 @@ func (s) TestAllowsRPCRequestWithPrincipalsFieldOnTLSAuthenticatedConnection(t * grpc.Creds(creds), grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) defer s.Stop() - pb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -403,13 +405,13 @@ func (s) TestAllowsRPCRequestWithPrincipalsFieldOnTLSAuthenticatedConnection(t * t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } defer clientConn.Close() - client := pb.NewTestServiceClient(clientConn) + client := testgrpc.NewTestServiceClient(clientConn) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // Verifying authorization decision. - if _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}); err != nil { + if _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("client.UnaryCall(_, _) = %v; want nil", err) } } @@ -450,7 +452,7 @@ func (s) TestAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection(t grpc.Creds(creds), grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) defer s.Stop() - pb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -481,13 +483,13 @@ func (s) TestAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection(t t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } defer clientConn.Close() - client := pb.NewTestServiceClient(clientConn) + client := testgrpc.NewTestServiceClient(clientConn) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // Verifying authorization decision. - if _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}); err != nil { + if _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("client.UnaryCall(_, _) = %v; want nil", err) } } @@ -504,7 +506,7 @@ func (s) TestFileWatcherEnd2End(t *testing.T) { grpc.ChainUnaryInterceptor(i.UnaryInterceptor), grpc.ChainStreamInterceptor(i.StreamInterceptor)) defer s.Stop() - pb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -519,14 +521,14 @@ func (s) TestFileWatcherEnd2End(t *testing.T) { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } defer clientConn.Close() - client := pb.NewTestServiceClient(clientConn) + client := testgrpc.NewTestServiceClient(clientConn) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() ctx = metadata.NewOutgoingContext(ctx, test.md) // Verifying authorization decision for Unary RPC. - _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { t.Fatalf("[UnaryCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) } @@ -536,8 +538,8 @@ func (s) TestFileWatcherEnd2End(t *testing.T) { if err != nil { t.Fatalf("failed StreamingInputCall err: %v", err) } - req := &pb.StreamingInputCallRequest{ - Payload: &pb.Payload{ + req := &testpb.StreamingInputCallRequest{ + Payload: &testpb.Payload{ Body: []byte("hi"), }, } @@ -552,9 +554,9 @@ func (s) TestFileWatcherEnd2End(t *testing.T) { } } -func retryUntil(ctx context.Context, tsc pb.TestServiceClient, want *status.Status) (lastErr error) { +func retryUntil(ctx context.Context, tsc testgrpc.TestServiceClient, want *status.Status) (lastErr error) { for ctx.Err() == nil { - _, lastErr = tsc.UnaryCall(ctx, &pb.SimpleRequest{}) + _, lastErr = tsc.UnaryCall(ctx, &testpb.SimpleRequest{}) if s := status.Convert(lastErr); s.Code() == want.Code() && s.Message() == want.Message() { return nil } @@ -573,7 +575,7 @@ func (s) TestFileWatcher_ValidPolicyRefresh(t *testing.T) { s := grpc.NewServer( grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) defer s.Stop() - pb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -588,13 +590,13 @@ func (s) TestFileWatcher_ValidPolicyRefresh(t *testing.T) { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } defer clientConn.Close() - client := pb.NewTestServiceClient(clientConn) + client := testgrpc.NewTestServiceClient(clientConn) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // Verifying authorization decision. - _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid1.wantStatus.Err()) } @@ -621,7 +623,7 @@ func (s) TestFileWatcher_InvalidPolicySkipReload(t *testing.T) { s := grpc.NewServer( grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) defer s.Stop() - pb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -636,13 +638,13 @@ func (s) TestFileWatcher_InvalidPolicySkipReload(t *testing.T) { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } defer clientConn.Close() - client := pb.NewTestServiceClient(clientConn) + client := testgrpc.NewTestServiceClient(clientConn) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // Verifying authorization decision. - _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid.wantStatus.Code() || got.Message() != valid.wantStatus.Message() { t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid.wantStatus.Err()) } @@ -656,7 +658,7 @@ func (s) TestFileWatcher_InvalidPolicySkipReload(t *testing.T) { time.Sleep(40 * time.Millisecond) // Verifying authorization decision. - _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid.wantStatus.Code() || got.Message() != valid.wantStatus.Message() { t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid.wantStatus.Err()) } @@ -672,7 +674,7 @@ func (s) TestFileWatcher_RecoversFromReloadFailure(t *testing.T) { s := grpc.NewServer( grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) defer s.Stop() - pb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -687,13 +689,13 @@ func (s) TestFileWatcher_RecoversFromReloadFailure(t *testing.T) { t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) } defer clientConn.Close() - client := pb.NewTestServiceClient(clientConn) + client := testgrpc.NewTestServiceClient(clientConn) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // Verifying authorization decision. - _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid1.wantStatus.Err()) } @@ -707,7 +709,7 @@ func (s) TestFileWatcher_RecoversFromReloadFailure(t *testing.T) { time.Sleep(120 * time.Millisecond) // Verifying authorization decision. - _, err = client.UnaryCall(ctx, &pb.SimpleRequest{}) + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid1.wantStatus.Err()) } diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index da2df41f2af8..1df63a9366e5 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -54,7 +54,8 @@ import ( durationpb "github.com/golang/protobuf/ptypes/duration" lbgrpc "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( @@ -298,7 +299,7 @@ func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServe } type testServer struct { - testpb.UnimplementedTestServiceServer + testgrpc.UnimplementedTestServiceServer addr string fallback bool @@ -318,7 +319,7 @@ func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.E return &testpb.Empty{}, nil } -func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { return nil } @@ -328,7 +329,7 @@ func startBackends(t *testing.T, sn string, fallback bool, lis ...net.Listener) sn: sn, } s := grpc.NewServer(grpc.Creds(creds)) - testpb.RegisterTestServiceServer(s, &testServer{addr: l.Addr().String(), fallback: fallback}) + testgrpc.RegisterTestServiceServer(s, &testServer{addr: l.Addr().String(), fallback: fallback}) servers = append(servers, s) go func(s *grpc.Server, l net.Listener) { s.Serve(l) @@ -466,7 +467,7 @@ func (s) TestGRPCLB_Basic(t *testing.T) { // Make one successful RPC. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } @@ -539,7 +540,7 @@ func (s) TestGRPCLB_Weighted(t *testing.T) { } tss.ls.sls <- &lbpb.ServerList{Servers: backends} - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) if err := roundrobin.CheckWeightedRoundRobinRPCs(ctx, testC, wantAddrs); err != nil { t.Fatal(err) } @@ -599,7 +600,7 @@ func (s) TestGRPCLB_DropRequest(t *testing.T) { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) var ( i int @@ -771,7 +772,7 @@ func (s) TestGRPCLB_BalancerDisconnects(t *testing.T) { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -836,7 +837,7 @@ func (s) TestGRPCLB_Fallback(t *testing.T) { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) // Push an update to the resolver with fallback backend address stored in // the `Addresses` field and an invalid remote balancer address stored in @@ -942,7 +943,7 @@ func (s) TestGRPCLB_ExplicitFallback(t *testing.T) { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -1012,7 +1013,7 @@ func (s) TestGRPCLB_FallBackWithNoServerAddress(t *testing.T) { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -1151,7 +1152,7 @@ func (s) TestGRPCLB_PickFirst(t *testing.T) { } rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, s) r.UpdateState(rs) - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, beServerAddrs[1:]); err != nil { t.Fatal(err) } @@ -1194,7 +1195,7 @@ func (s) TestGRPCLB_BackendConnectionErrorPropagation(t *testing.T) { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) rs := resolver.State{ Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, @@ -1246,7 +1247,7 @@ func testGRPCLBEmptyServerList(t *testing.T, svcfg string) { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) tss.ls.sls <- &lbpb.ServerList{Servers: beServers} @@ -1320,7 +1321,7 @@ func (s) TestGRPCLBWithTargetNameFieldInConfig(t *testing.T) { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) // Push a resolver update with grpclb configuration which does not contain the // target_name field. Our fake remote balancer is configured to always @@ -1453,7 +1454,7 @@ const ( func (s) TestGRPCLBStatsUnarySuccess(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1474,7 +1475,7 @@ func (s) TestGRPCLBStatsUnarySuccess(t *testing.T) { func (s) TestGRPCLBStatsUnaryDrop(t *testing.T) { if err := runAndCheckStats(t, true, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1496,7 +1497,7 @@ func (s) TestGRPCLBStatsUnaryDrop(t *testing.T) { func (s) TestGRPCLBStatsUnaryFailedToSend(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1518,7 +1519,7 @@ func (s) TestGRPCLBStatsUnaryFailedToSend(t *testing.T) { func (s) TestGRPCLBStatsStreamingSuccess(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1553,7 +1554,7 @@ func (s) TestGRPCLBStatsStreamingSuccess(t *testing.T) { func (s) TestGRPCLBStatsStreamingDrop(t *testing.T) { if err := runAndCheckStats(t, true, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1589,7 +1590,7 @@ func (s) TestGRPCLBStatsStreamingDrop(t *testing.T) { func (s) TestGRPCLBStatsStreamingFailedToSend(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. diff --git a/balancer/rls/helpers_test.go b/balancer/rls/helpers_test.go index fa2029b97ecf..9d2385efd658 100644 --- a/balancer/rls/helpers_test.go +++ b/balancer/rls/helpers_test.go @@ -34,12 +34,12 @@ import ( rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/stubserver" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/protobuf/types/known/durationpb" ) diff --git a/balancer/rls/picker_test.go b/balancer/rls/picker_test.go index 887b855f87b1..a2f84b265b1f 100644 --- a/balancer/rls/picker_test.go +++ b/balancer/rls/picker_test.go @@ -35,8 +35,8 @@ import ( "google.golang.org/protobuf/types/known/durationpb" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // Test verifies the scenario where there is no matching entry in the data cache diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index f04fb0a06c96..2dd34c605ffe 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -3,22 +3,22 @@ module google.golang.org/grpc/gcp/observability go 1.17 require ( - cloud.google.com/go/logging v1.6.1 + cloud.google.com/go/logging v1.7.0 contrib.go.opencensus.io/exporter/stackdriver v0.13.12 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 go.opencensus.io v0.24.0 - golang.org/x/oauth2 v0.5.0 - google.golang.org/api v0.109.0 - google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba + golang.org/x/oauth2 v0.6.0 + google.golang.org/api v0.110.0 + google.golang.org/grpc v1.53.0 google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae ) require ( - cloud.google.com/go v0.109.0 // indirect + cloud.google.com/go v0.110.0 // indirect cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/longrunning v0.4.0 // indirect + cloud.google.com/go/longrunning v0.4.1 // indirect cloud.google.com/go/monitoring v1.12.0 // indirect cloud.google.com/go/trace v1.8.0 // indirect github.com/aws/aws-sdk-go v1.44.162 // indirect @@ -34,8 +34,10 @@ require ( golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect google.golang.org/protobuf v1.30.0 // indirect ) +replace google.golang.org/grpc => ../.. + replace google.golang.org/grpc/stats/opencensus => ../../stats/opencensus diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index fbc7b2898090..f6008bb2ae1d 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -1,8 +1,8 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,6 +15,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -25,85 +26,569 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk= -cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= -cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/longrunning v0.4.0 h1:v+X4EwhHl6xE+TG1XgXj4T1XpKKs7ZevcAJ3FOu0YmY= -cloud.google.com/go/longrunning v0.4.0/go.mod h1:eF3Qsw58iX/bkKtVjMTYpH0LRjQ2goDkjkNQTlzq/ZM= +cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= github.com/aws/aws-sdk-go v1.44.162/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -132,8 +617,10 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -146,12 +633,15 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -161,6 +651,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -170,17 +661,31 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -189,25 +694,58 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -216,7 +754,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -227,25 +768,44 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -268,9 +828,12 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -299,16 +862,32 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -324,9 +903,19 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -337,10 +926,12 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -353,6 +944,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -372,11 +964,14 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -388,14 +983,38 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -405,14 +1024,20 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -425,6 +1050,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -453,20 +1079,38 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -498,9 +1142,33 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= -google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -508,7 +1176,6 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -543,10 +1210,13 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -568,35 +1238,72 @@ google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa h1:qQPhfbPO23fwm/9lQr91L1u62Zo6cm+zI+slZT+uf+o= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba h1:puuDphNHQZRngQpzUGvfXMBFBv6DuahfWMZaj0jVtjw= -google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -611,10 +1318,13 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -629,6 +1339,42 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go index 31fe89fe18dd..5e2bab44be48 100644 --- a/gcp/observability/logging_test.go +++ b/gcp/observability/logging_test.go @@ -31,11 +31,13 @@ import ( gcplogging "cloud.google.com/go/logging" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/test/grpc_testing" + + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func cmpLoggingEntryList(got []*grpcLogEntry, want []*grpcLogEntry) error { @@ -155,14 +157,14 @@ func (s) TestClientRPCEventsLogAll(t *testing.T) { defer cleanup() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { if _, err := stream.Recv(); err != nil { return err } - if err := stream.Send(&grpc_testing.StreamingOutputCallResponse{}); err != nil { + if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { return err } if _, err := stream.Recv(); err != io.EOF { @@ -178,7 +180,7 @@ func (s) TestClientRPCEventsLogAll(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } @@ -251,7 +253,7 @@ func (s) TestClientRPCEventsLogAll(t *testing.T) { if err != nil { t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) } - if err := stream.Send(&grpc_testing.StreamingOutputCallRequest{}); err != nil { + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { t.Fatalf("stream.Send() failed: %v", err) } if _, err := stream.Recv(); err != nil { @@ -365,14 +367,14 @@ func (s) TestServerRPCEventsLogAll(t *testing.T) { defer cleanup() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { if _, err := stream.Recv(); err != nil { return err } - if err := stream.Send(&grpc_testing.StreamingOutputCallResponse{}); err != nil { + if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { return err } if _, err := stream.Recv(); err != io.EOF { @@ -388,7 +390,7 @@ func (s) TestServerRPCEventsLogAll(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } grpcLogEntriesWant := []*grpcLogEntry{ @@ -458,7 +460,7 @@ func (s) TestServerRPCEventsLogAll(t *testing.T) { if err != nil { t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) } - if err := stream.Send(&grpc_testing.StreamingOutputCallRequest{}); err != nil { + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { t.Fatalf("stream.Send() failed: %v", err) } if _, err := stream.Recv(); err != nil { @@ -587,10 +589,10 @@ func (s) TestBothClientAndServerRPCEvents(t *testing.T) { defer cleanup() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { _, err := stream.Recv() if err != io.EOF { return err @@ -609,7 +611,7 @@ func (s) TestBothClientAndServerRPCEvents(t *testing.T) { // entries is checked in previous tests). ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } fle.mu.Lock() @@ -671,8 +673,8 @@ func (s) TestClientRPCEventsTruncateHeaderAndMetadata(t *testing.T) { defer cleanup() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, } if err := ss.Start(nil); err != nil { @@ -688,7 +690,7 @@ func (s) TestClientRPCEventsTruncateHeaderAndMetadata(t *testing.T) { "key2": []string{"value2"}, } ctx = metadata.NewOutgoingContext(ctx, md) - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: []byte("00000")}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: []byte("00000")}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } grpcLogEntriesWant := []*grpcLogEntry{ @@ -820,13 +822,13 @@ func (s) TestPrecedenceOrderingInConfiguration(t *testing.T) { defer cleanup() ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *grpc_testing.Empty) (*grpc_testing.Empty, error) { - return &grpc_testing.Empty{}, nil + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil }, - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { _, err := stream.Recv() if err != io.EOF { return err @@ -845,7 +847,7 @@ func (s) TestPrecedenceOrderingInConfiguration(t *testing.T) { // future. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } grpcLogEntriesWant := []*grpcLogEntry{ @@ -914,7 +916,7 @@ func (s) TestPrecedenceOrderingInConfiguration(t *testing.T) { // A unary empty RPC should match with the second event, which has the exclude // flag set. Thus, a unary empty RPC should cause no downstream logs. - if _, err := ss.Client.EmptyCall(ctx, &grpc_testing.Empty{}); err != nil { + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("Unexpected error from EmptyCall: %v", err) } // The exporter should have received no new log entries due to this call. @@ -1147,8 +1149,8 @@ func (s) TestMetadataTruncationAccountsKey(t *testing.T) { defer cleanup() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, } if err := ss.Start(nil); err != nil { @@ -1165,7 +1167,7 @@ func (s) TestMetadataTruncationAccountsKey(t *testing.T) { "key": []string{mdValue}, } ctx = metadata.NewOutgoingContext(ctx, md) - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: []byte("00000")}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: []byte("00000")}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go index 89e30c19d4ab..b2030d86b2fa 100644 --- a/gcp/observability/observability_test.go +++ b/gcp/observability/observability_test.go @@ -40,7 +40,9 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type s struct { @@ -381,10 +383,10 @@ func (s) TestOpenCensusIntegration(t *testing.T) { defer cleanup() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for { _, err := stream.Recv() if err == io.EOF { @@ -401,7 +403,7 @@ func (s) TestOpenCensusIntegration(t *testing.T) { for i := 0; i < defaultRequestCount; i++ { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: testOkPayload}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } } @@ -599,10 +601,10 @@ func (s) TestLoggingLinkedWithTraceClientSide(t *testing.T) { } defer cleanup() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { _, err := stream.Recv() if err != io.EOF { return err @@ -675,7 +677,7 @@ func (s) TestLoggingLinkedWithTraceClientSide(t *testing.T) { fle.mu.Unlock() readerErrCh.Send(nil) }() - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: testOkPayload}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } unaryDone.Fire() @@ -741,10 +743,10 @@ func (s) TestLoggingLinkedWithTraceServerSide(t *testing.T) { } defer cleanup() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { _, err := stream.Recv() if err != io.EOF { return err @@ -817,7 +819,7 @@ func (s) TestLoggingLinkedWithTraceServerSide(t *testing.T) { fle.mu.Unlock() readerErrCh.Send(nil) }() - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: testOkPayload}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } unaryDone.Fire() @@ -892,10 +894,10 @@ func (s) TestLoggingLinkedWithTrace(t *testing.T) { } defer cleanup() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { _, err := stream.Recv() if err != io.EOF { return err @@ -974,7 +976,7 @@ func (s) TestLoggingLinkedWithTrace(t *testing.T) { fle.mu.Unlock() readerErrCh.Send(nil) }() - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{Body: testOkPayload}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } unaryDone.Fire() diff --git a/internal/stubserver/stubserver.go b/internal/stubserver/stubserver.go index 482c96a83b68..9234fc28370e 100644 --- a/internal/stubserver/stubserver.go +++ b/internal/stubserver/stubserver.go @@ -17,7 +17,7 @@ */ // Package stubserver is a stubbable implementation of -// google.golang.org/grpc/test/grpc_testing for testing purposes. +// google.golang.org/grpc/interop/grpc_testing for testing purposes. package stubserver import ( @@ -33,22 +33,23 @@ import ( "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // StubServer is a server that is easy to customize within individual test // cases. type StubServer struct { // Guarantees we satisfy this interface; panics if unimplemented methods are called. - testpb.TestServiceServer + testgrpc.TestServiceServer // Customizable implementations of server handlers. EmptyCallF func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) UnaryCallF func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) - FullDuplexCallF func(stream testpb.TestService_FullDuplexCallServer) error + FullDuplexCallF func(stream testgrpc.TestService_FullDuplexCallServer) error // A client connected to this service the test may use. Created in Start(). - Client testpb.TestServiceClient + Client testgrpc.TestServiceClient CC *grpc.ClientConn S *grpc.Server @@ -75,7 +76,7 @@ func (ss *StubServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) ( } // FullDuplexCall is the handler for testpb.FullDuplexCall -func (ss *StubServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (ss *StubServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { return ss.FullDuplexCallF(stream) } @@ -107,7 +108,7 @@ func (ss *StubServer) StartServer(sopts ...grpc.ServerOption) error { ss.cleanups = append(ss.cleanups, func() { lis.Close() }) s := grpc.NewServer(sopts...) - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) go s.Serve(lis) ss.cleanups = append(ss.cleanups, s.Stop) ss.S = s @@ -137,7 +138,7 @@ func (ss *StubServer) StartClient(dopts ...grpc.DialOption) error { ss.cleanups = append(ss.cleanups, func() { cc.Close() }) - ss.Client = testpb.NewTestServiceClient(cc) + ss.Client = testgrpc.NewTestServiceClient(cc) return nil } diff --git a/internal/testutils/pickfirst/pickfirst.go b/internal/testutils/pickfirst/pickfirst.go index 6ed93948e389..aa90ffc531f4 100644 --- a/internal/testutils/pickfirst/pickfirst.go +++ b/internal/testutils/pickfirst/pickfirst.go @@ -29,8 +29,8 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // CheckRPCsToBackend makes a bunch of RPCs on the given ClientConn and verifies diff --git a/internal/testutils/roundrobin/roundrobin.go b/internal/testutils/roundrobin/roundrobin.go index 8f9e79b5056b..ba595735364d 100644 --- a/internal/testutils/roundrobin/roundrobin.go +++ b/internal/testutils/roundrobin/roundrobin.go @@ -32,8 +32,8 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) var logger = grpclog.Component("testutils-roundrobin") diff --git a/interop/observability/go.mod b/interop/observability/go.mod index 6155ccd3af44..727f46f29db9 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -42,3 +42,5 @@ require ( replace google.golang.org/grpc => ../.. replace google.golang.org/grpc/gcp/observability => ../../gcp/observability + +replace google.golang.org/grpc/stats/opencensus => ../../stats/opencensus diff --git a/interop/observability/go.sum b/interop/observability/go.sum index 603eea94add6..f6008bb2ae1d 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -1305,8 +1305,6 @@ google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae h1:40UWCQ40A2NTDabsmbZNznFf9SUftDlaBASj7OCdKDY= -google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae/go.mod h1:qPsHQZhltTPryCUC0naykSpbIJDodlCLM/vNa607CrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/orca/call_metric_recorder_test.go b/orca/call_metric_recorder_test.go index f18d7259c249..25d4af371d08 100644 --- a/orca/call_metric_recorder_test.go +++ b/orca/call_metric_recorder_test.go @@ -36,8 +36,8 @@ import ( "google.golang.org/grpc/orca" v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type s struct { @@ -268,7 +268,7 @@ func (s) TestE2ECallMetricsStreaming(t *testing.T) { } // Send one request to the server. - payload := &testpb.Payload{Type: testpb.PayloadType_RANDOM, Body: make([]byte, 32)} + payload := &testpb.Payload{Body: make([]byte, 32)} req := &testpb.StreamingOutputCallRequest{Payload: payload} if err := stream.Send(req); err != nil { t.Fatalf("stream.Send() failed: %v", err) diff --git a/orca/service_test.go b/orca/service_test.go index b9eff4365786..715d53241c71 100644 --- a/orca/service_test.go +++ b/orca/service_test.go @@ -38,8 +38,8 @@ import ( v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) const requestsMetricKey = "test-service-requests" @@ -98,7 +98,7 @@ func (s) Test_E2E_CustomBackendMetrics_OutOfBand(t *testing.T) { } // Register the test service implementation on the same grpc server, and start serving. - testpb.RegisterTestServiceServer(s, &testServiceImpl{orcaSrv: orcaSrv}) + testgrpc.RegisterTestServiceServer(s, &testServiceImpl{orcaSrv: orcaSrv}) go s.Serve(lis) defer s.Stop() t.Logf("Started gRPC server at %s...", lis.Addr().String()) diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 3eb6a1c8fa77..0f1975ba9d4b 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -38,7 +38,9 @@ import ( "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type s struct { @@ -275,12 +277,12 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { defer view.UnregisterExporter(fe) ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{Payload: &grpc_testing.Payload{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{Payload: &testpb.Payload{ Body: make([]byte, 10000), }}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for { _, err := stream.Recv() if err == io.EOF { @@ -297,7 +299,7 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { defer cancel() // Make two RPC's, a unary RPC and a streaming RPC. These should cause // certain metrics to be emitted. - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{ + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{ Body: make([]byte, 10000), }}, grpc.UseCompressor(gzip.Name)); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) @@ -1037,7 +1039,7 @@ func (s) TestOpenCensusTags(t *testing.T) { // populated at the client side application layer if populated. tmCh := testutils.NewChannel() ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { // Do the sends of the tag maps for assertions in this main testing // goroutine. Do the receives and assertions in a forked goroutine. if tm := tag.FromContext(ctx); tm != nil { @@ -1045,7 +1047,7 @@ func (s) TestOpenCensusTags(t *testing.T) { } else { tmCh.Send(errors.New("no tag map received server side")) } - return &grpc_testing.SimpleResponse{}, nil + return &testpb.SimpleResponse{}, nil }, } if err := ss.Start([]grpc.ServerOption{ServerOption(TraceOptions{})}, DialOption(TraceOptions{})); err != nil { @@ -1120,7 +1122,7 @@ func (s) TestOpenCensusTags(t *testing.T) { // keyServerMethod. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } @@ -1150,7 +1152,7 @@ func (s) TestOpenCensusTags(t *testing.T) { // Make a unary RPC with a populated OpenCensus tag map. The server side // should receive an OpenCensus tag map containing this populated tag map // with the keyServerMethod tag appended to it. - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { @@ -1352,10 +1354,10 @@ func (s) TestSpan(t *testing.T) { DisableTrace: false, } ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, in *grpc_testing.SimpleRequest) (*grpc_testing.SimpleResponse, error) { - return &grpc_testing.SimpleResponse{}, nil + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream grpc_testing.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for { _, err := stream.Recv() if err == io.EOF { @@ -1376,7 +1378,7 @@ func (s) TestSpan(t *testing.T) { // both from the client and the server. Note that RPCs trigger exports of // corresponding span data synchronously, thus the Span Data is guaranteed // to have been read by exporter and is ready to make assertions on. - if _, err := ss.Client.UnaryCall(ctx, &grpc_testing.SimpleRequest{Payload: &grpc_testing.Payload{}}); err != nil { + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } @@ -1475,10 +1477,10 @@ func (s) TestSpan(t *testing.T) { } // Send two messages. This should be recorded in the emitted spans message // events, with message IDs which increase for each message. - if err := stream.Send(&grpc_testing.StreamingOutputCallRequest{}); err != nil { + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { t.Fatalf("stream.Send failed: %v", err) } - if err := stream.Send(&grpc_testing.StreamingOutputCallRequest{}); err != nil { + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { t.Fatalf("stream.Send failed: %v", err) } diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index 9bce90a995e6..26dda8eadbea 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/google/go-cmp v0.5.9 go.opencensus.io v0.24.0 - google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba + google.golang.org/grpc v1.53.0 ) require ( @@ -14,6 +14,8 @@ require ( golang.org/x/net v0.8.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect google.golang.org/protobuf v1.30.0 // indirect ) + +replace google.golang.org/grpc => ../.. diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index da89ea72944f..0d346feaf4df 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -1,99 +1,1268 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba h1:puuDphNHQZRngQpzUGvfXMBFBv6DuahfWMZaj0jVtjw= -google.golang.org/grpc v1.53.0-dev.0.20230315171901-a1e657ce53ba/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -102,13 +1271,66 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/status/status_ext_test.go b/status/status_ext_test.go index 4c1efc56320f..33c8c71a0062 100644 --- a/status/status_ext_test.go +++ b/status/status_ext_test.go @@ -26,7 +26,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/status" - "google.golang.org/grpc/test/grpc_testing" + + testpb "google.golang.org/grpc/interop/grpc_testing" ) type s struct { @@ -49,7 +50,7 @@ func errWithDetails(t *testing.T, s *status.Status, details ...proto.Message) er func (s) TestErrorIs(t *testing.T) { // Test errors. testErr := status.Error(codes.Internal, "internal server error") - testErrWithDetails := errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{}) + testErrWithDetails := errWithDetails(t, status.New(codes.Internal, "internal server error"), &testpb.Empty{}) // Test cases. testCases := []struct { @@ -62,8 +63,8 @@ func (s) TestErrorIs(t *testing.T) { {err1: testErr, err2: status.Error(codes.Unknown, "internal server error"), want: false}, {err1: testErr, err2: errors.New("non-grpc error"), want: false}, {err1: testErrWithDetails, err2: status.Error(codes.Internal, "internal server error"), want: false}, - {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{}), want: true}, - {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{}, &grpc_testing.Empty{}), want: false}, + {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &testpb.Empty{}), want: true}, + {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &testpb.Empty{}, &testpb.Empty{}), want: false}, } for _, tc := range testCases { diff --git a/test/authority_test.go b/test/authority_test.go index ef0f56656b24..44095a23a2fe 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -39,7 +39,9 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func authorityChecker(ctx context.Context, expectedAuthority string) (*testpb.Empty, error) { @@ -202,7 +204,7 @@ func (s) TestColonPortAuthority(t *testing.T) { defer cc.Close() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - _, err = testpb.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}) + _, err = testgrpc.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}) if err != nil { t.Errorf("us.client.EmptyCall(_, _) = _, %v; want _, nil", err) } @@ -235,7 +237,7 @@ func (s) TestAuthorityReplacedWithResolverAddress(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err = testpb.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}); err != nil { + if _, err = testgrpc.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall() rpc failed: %v", err) } } diff --git a/test/balancer_switching_test.go b/test/balancer_switching_test.go index 0337cd19014a..716625a63b3f 100644 --- a/test/balancer_switching_test.go +++ b/test/balancer_switching_test.go @@ -36,7 +36,8 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) const ( @@ -137,7 +138,7 @@ func (s) TestBalancerSwitch_Basic(t *testing.T) { Addresses: addrs, ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), }) - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { t.Fatal(err) } @@ -173,7 +174,7 @@ func (s) TestBalancerSwitch_grpclbToPickFirst(t *testing.T) { r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[0:1]); err != nil { t.Fatal(err) } @@ -225,7 +226,7 @@ func (s) TestBalancerSwitch_pickFirstToGRPCLB(t *testing.T) { // to the grpclb server we created above. This will cause the channel to // switch to the "grpclb" balancer, which returns a single backend address. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } @@ -278,7 +279,7 @@ func (s) TestBalancerSwitch_RoundRobinToGRPCLB(t *testing.T) { r.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: scpr}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } @@ -344,7 +345,7 @@ func (s) TestBalancerSwitch_grpclbNotRegistered(t *testing.T) { Addresses: addrs, ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), }) - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } @@ -383,7 +384,7 @@ func (s) TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy(t *testing r.UpdateState(resolver.State{ Addresses: append(addrs[1:], resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), }) - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } @@ -431,7 +432,7 @@ func (s) TestBalancerSwitch_LoadBalancingConfigTrumps(t *testing.T) { r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { t.Fatal(err) } @@ -555,7 +556,7 @@ func (s) TestBalancerSwitch_Graceful(t *testing.T) { Addresses: addrs[1:], ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), }) - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { t.Fatal(err) } diff --git a/test/balancer_test.go b/test/balancer_test.go index bd782ffa6e4f..c9a769c9f5a4 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -46,8 +46,10 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) const testBalancerName = "testbalancer" @@ -165,7 +167,7 @@ func (s) TestCredsBundleFromBalancer(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("Test failed. Reason: %v", err) } @@ -202,7 +204,7 @@ func testPickExtraMetadata(t *testing.T, e env) { r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = "xds" cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -242,7 +244,7 @@ func testDoneInfo(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -319,7 +321,7 @@ func testDoneLoads(t *testing.T) { } defer ss.Stop() - tc := testpb.NewTestServiceClient(ss.CC) + tc := testgrpc.NewTestServiceClient(ss.CC) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -509,7 +511,7 @@ func (s) TestAddressAttributesInNewSubConn(t *testing.T) { } s := grpc.NewServer() - testpb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) defer s.Stop() t.Logf("Started gRPC server at %s...", lis.Addr().String()) @@ -525,7 +527,7 @@ func (s) TestAddressAttributesInNewSubConn(t *testing.T) { t.Fatal(err) } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) t.Log("Created a ClientConn...") // The first RPC should fail because there's no address. @@ -642,7 +644,7 @@ func (s) TestServersSwap(t *testing.T) { return &testpb.SimpleResponse{Username: username}, nil }, } - testpb.RegisterTestServiceServer(s, ts) + testgrpc.RegisterTestServiceServer(s, ts) go s.Serve(lis) return lis.Addr().String(), s.Stop } @@ -661,7 +663,7 @@ func (s) TestServersSwap(t *testing.T) { t.Fatalf("Error creating client: %v", err) } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) // Confirm we are connected to the first server if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil || res.Username != one { @@ -699,7 +701,7 @@ func (s) TestWaitForReady(t *testing.T) { return &testpb.SimpleResponse{Username: one}, nil }, } - testpb.RegisterTestServiceServer(s, ts) + testgrpc.RegisterTestServiceServer(s, ts) go s.Serve(lis) // Initialize client @@ -710,7 +712,7 @@ func (s) TestWaitForReady(t *testing.T) { t.Fatalf("Error creating client: %v", err) } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) // Report an error so non-WFR RPCs will give up early. r.CC.ReportError(errors.New("fake resolver error")) @@ -827,7 +829,7 @@ func (s) TestAuthorityInBuildOptions(t *testing.T) { } s := grpc.NewServer() - testpb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) defer s.Stop() t.Logf("Started gRPC server at %s...", lis.Addr().String()) @@ -845,7 +847,7 @@ func (s) TestAuthorityInBuildOptions(t *testing.T) { t.Fatal(err) } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) t.Log("Created a ClientConn...") ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -968,7 +970,7 @@ func (s) TestMetadataInPickResult(t *testing.T) { t.Fatalf("grpc.Dial(): %v", err) } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) t.Log("Making EmptyCall() RPC with custom metadata...") ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/test/channelz_linux_test.go b/test/channelz_linux_test.go index 0eef08df3c9f..e532fbb12c56 100644 --- a/test/channelz_linux_test.go +++ b/test/channelz_linux_test.go @@ -27,7 +27,8 @@ import ( "time" "google.golang.org/grpc/internal/channelz" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestCZSocketMetricsSocketOption(t *testing.T) { @@ -44,7 +45,7 @@ func testCZSocketMetricsSocketOption(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) doSuccessfulUnaryCall(tc, t) time.Sleep(10 * time.Millisecond) diff --git a/test/channelz_test.go b/test/channelz_test.go index 0633b0799fb2..0a6ff579773f 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -44,8 +44,10 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func czCleanupWrapper(cleanup func() error, t *testing.T) { @@ -518,7 +520,7 @@ func (s) TestCZChannelMetrics(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } @@ -598,7 +600,7 @@ func (s) TestCZServerMetrics(t *testing.T) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } @@ -646,7 +648,7 @@ func (s) TestCZServerMetrics(t *testing.T) { } type testServiceClientWrapper struct { - testpb.TestServiceClient + testgrpc.TestServiceClient mu sync.RWMutex streamsCreated int } @@ -671,35 +673,35 @@ func (t *testServiceClientWrapper) UnaryCall(ctx context.Context, in *testpb.Sim return t.TestServiceClient.UnaryCall(ctx, in, opts...) } -func (t *testServiceClientWrapper) StreamingOutputCall(ctx context.Context, in *testpb.StreamingOutputCallRequest, opts ...grpc.CallOption) (testpb.TestService_StreamingOutputCallClient, error) { +func (t *testServiceClientWrapper) StreamingOutputCall(ctx context.Context, in *testpb.StreamingOutputCallRequest, opts ...grpc.CallOption) (testgrpc.TestService_StreamingOutputCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.StreamingOutputCall(ctx, in, opts...) } -func (t *testServiceClientWrapper) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (testpb.TestService_StreamingInputCallClient, error) { +func (t *testServiceClientWrapper) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (testgrpc.TestService_StreamingInputCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.StreamingInputCall(ctx, opts...) } -func (t *testServiceClientWrapper) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testpb.TestService_FullDuplexCallClient, error) { +func (t *testServiceClientWrapper) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testgrpc.TestService_FullDuplexCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.FullDuplexCall(ctx, opts...) } -func (t *testServiceClientWrapper) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testpb.TestService_HalfDuplexCallClient, error) { +func (t *testServiceClientWrapper) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testgrpc.TestService_HalfDuplexCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.HalfDuplexCall(ctx, opts...) } -func doSuccessfulUnaryCall(tc testpb.TestServiceClient, t *testing.T) { +func doSuccessfulUnaryCall(tc testgrpc.TestServiceClient, t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -707,7 +709,7 @@ func doSuccessfulUnaryCall(tc testpb.TestServiceClient, t *testing.T) { } } -func doStreamingInputCallWithLargePayload(tc testpb.TestServiceClient, t *testing.T) { +func doStreamingInputCallWithLargePayload(tc testgrpc.TestServiceClient, t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() s, err := tc.StreamingInputCall(ctx) @@ -721,7 +723,7 @@ func doStreamingInputCallWithLargePayload(tc testpb.TestServiceClient, t *testin s.Send(&testpb.StreamingInputCallRequest{Payload: payload}) } -func doServerSideFailedUnaryCall(tc testpb.TestServiceClient, t *testing.T) { +func doServerSideFailedUnaryCall(tc testgrpc.TestServiceClient, t *testing.T) { const smallSize = 1 const largeSize = 2000 @@ -741,7 +743,7 @@ func doServerSideFailedUnaryCall(tc testpb.TestServiceClient, t *testing.T) { } } -func doClientSideInitiatedFailedStream(tc testpb.TestServiceClient, t *testing.T) { +func doClientSideInitiatedFailedStream(tc testgrpc.TestServiceClient, t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { @@ -774,7 +776,7 @@ func doClientSideInitiatedFailedStream(tc testpb.TestServiceClient, t *testing.T } // This func is to be used to test client side counting of failed streams. -func doServerSideInitiatedFailedStreamWithRSTStream(tc testpb.TestServiceClient, t *testing.T, l *listenerWrapper) { +func doServerSideInitiatedFailedStreamWithRSTStream(tc testgrpc.TestServiceClient, t *testing.T, l *listenerWrapper) { stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want ", err) @@ -812,7 +814,7 @@ func doServerSideInitiatedFailedStreamWithRSTStream(tc testpb.TestServiceClient, } // this func is to be used to test client side counting of failed streams. -func doServerSideInitiatedFailedStreamWithGoAway(tc testpb.TestServiceClient, t *testing.T, l *listenerWrapper) { +func doServerSideInitiatedFailedStreamWithGoAway(tc testgrpc.TestServiceClient, t *testing.T, l *listenerWrapper) { // This call is just to keep the transport from shutting down (socket will be deleted // in this case, and we will not be able to get metrics). s, err := tc.FullDuplexCall(context.Background()) @@ -854,7 +856,7 @@ func doServerSideInitiatedFailedStreamWithGoAway(tc testpb.TestServiceClient, t } } -func doIdleCallToInvokeKeepAlive(tc testpb.TestServiceClient, t *testing.T) { +func doIdleCallToInvokeKeepAlive(tc testgrpc.TestServiceClient, t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) _, err := tc.FullDuplexCall(ctx) if err != nil { @@ -875,7 +877,7 @@ func (s) TestCZClientSocketMetricsStreamsAndMessagesCount(t *testing.T) { rcw := te.startServerWithConnControl(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} doSuccessfulUnaryCall(tc, t) var scID, skID int64 @@ -974,7 +976,7 @@ func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *t // Avoid overflowing connection level flow control window, which will lead to // transport being closed. te.serverInitialConnWindowSize = 65536 * 2 - ts := &stubserver.StubServer{FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + ts := &stubserver.StubServer{FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { stream.Send(&testpb.StreamingOutputCallResponse{}) <-stream.Context().Done() return status.Errorf(codes.DeadlineExceeded, "deadline exceeded or cancelled") @@ -982,7 +984,7 @@ func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *t te.startServer(ts) defer te.tearDown() cc, dw := te.clientConnWithConnControl() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) stream, err := tc.FullDuplexCall(ctx) @@ -1062,7 +1064,7 @@ func (s) TestCZClientAndServerSocketMetricsFlowControl(t *testing.T) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) for i := 0; i < 10; i++ { doSuccessfulUnaryCall(tc, t) @@ -1225,7 +1227,7 @@ func (s) TestCZServerSocketMetricsStreamsAndMessagesCount(t *testing.T) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc, _ := te.clientConnWithConnControl() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} var svrID int64 if err := verifyResultWithDelay(func() (bool, error) { @@ -1296,7 +1298,7 @@ func (s) TestCZServerSocketMetricsKeepAlive(t *testing.T) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) start := time.Now() doIdleCallToInvokeKeepAlive(tc, t) @@ -1697,7 +1699,7 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // make sure the connection is up ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -1766,7 +1768,7 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // make sure the connection is up ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -1865,7 +1867,7 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // make sure the connection is up ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() diff --git a/test/clientconn_test.go b/test/clientconn_test.go index 58d0d54fe581..bdbe81d03040 100644 --- a/test/clientconn_test.go +++ b/test/clientconn_test.go @@ -29,10 +29,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/channelz" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" ) // TestClientConnClose_WithPendingRPC tests the scenario where the channel has diff --git a/test/compressor_test.go b/test/compressor_test.go index 9ca498bbba40..89daa765eeff 100644 --- a/test/compressor_test.go +++ b/test/compressor_test.go @@ -36,7 +36,9 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestCompressServerHasNoSupport(t *testing.T) { @@ -52,7 +54,7 @@ func testCompressServerHasNoSupport(t *testing.T, e env) { te.clientNopCompression = true te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const argSize = 271828 const respSize = 314159 @@ -93,7 +95,7 @@ func testCompressOK(t *testing.T, e env) { te.clientCompression = true te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) // Unary call const argSize = 271828 @@ -154,7 +156,7 @@ func testIdentityEncoding(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) // Unary call payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5) @@ -355,7 +357,7 @@ func testUnarySetSendCompressorSuccess(t *testing.T, resCompressor string, wantC func testStreamSetSendCompressorSuccess(t *testing.T, resCompressor string, wantCompressInvokes int32, dialOpts []grpc.DialOption) { wc := setupGzipWrapCompressor(t) ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { if _, err := stream.Recv(); err != nil { return err } @@ -448,7 +450,7 @@ func testUnarySetSendCompressorFailure(t *testing.T, resCompressor string, wantE func testStreamSetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { if _, err := stream.Recv(); err != nil { return err } @@ -511,7 +513,7 @@ func (s) TestUnarySetSendCompressorAfterHeaderSendFailure(t *testing.T) { func (s) TestStreamSetSendCompressorAfterHeaderSendFailure(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { // Send headers early and then set send compressor. grpc.SendHeader(stream.Context(), metadata.MD{}) err := grpc.SetSendCompressor(stream.Context(), "gzip") @@ -612,7 +614,7 @@ func testCompressorRegister(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) // Unary call const argSize = 271828 diff --git a/test/config_selector_test.go b/test/config_selector_test.go index 677a24839a1d..422bf77c018e 100644 --- a/test/config_selector_test.go +++ b/test/config_selector_test.go @@ -31,11 +31,11 @@ import ( "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" + testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" ) type funcConfigSelector struct { diff --git a/test/context_canceled_test.go b/test/context_canceled_test.go index 96ee69d8d521..a4b3810e16dc 100644 --- a/test/context_canceled_test.go +++ b/test/context_canceled_test.go @@ -29,12 +29,14 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestContextCanceled(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { stream.SetTrailer(metadata.New(map[string]string{"a": "b"})) return status.Error(codes.PermissionDenied, "perm denied") }, @@ -123,7 +125,7 @@ func (s) TestContextCanceled(t *testing.T) { // will be inconsistent, and it causes internal error. func (s) TestCancelWhileRecvingWithCompression(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for { if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: nil, diff --git a/test/control_plane_status_test.go b/test/control_plane_status_test.go index be191f456b2f..087dd30dd670 100644 --- a/test/control_plane_status_test.go +++ b/test/control_plane_status_test.go @@ -32,10 +32,10 @@ import ( "google.golang.org/grpc/internal/balancer/stub" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/stubserver" + testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" ) func (s) TestConfigSelectorStatusCodes(t *testing.T) { diff --git a/test/creds_test.go b/test/creds_test.go index 5323affa7903..70af9945cc8e 100644 --- a/test/creds_test.go +++ b/test/creds_test.go @@ -37,8 +37,10 @@ import ( "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) const ( @@ -92,7 +94,7 @@ func (s) TestCredsBundleBoth(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -116,7 +118,7 @@ func (s) TestCredsBundleTransportCredentials(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -134,7 +136,7 @@ func (s) TestCredsBundlePerRPCCredentials(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -170,7 +172,7 @@ func (s) TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) { defer te.tearDown() cc := te.clientConn(grpc.WithTransportCredentials(&clientTimeoutCreds{})) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() // This unary call should succeed, because ClientHandshake will succeed for the second time. @@ -196,7 +198,7 @@ func (s) TestGRPCMethodAccessibleToCredsViaContextRequestInfo(t *testing.T) { defer te.tearDown() cc := te.clientConn(grpc.WithPerRPCCredentials(&methodTestCreds{})) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() @@ -239,7 +241,7 @@ func (s) TestFailFastRPCErrorOnBadCertificates(t *testing.T) { } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) for i := 0; i < 1000; i++ { // This loop runs for at most 1 second. The first several RPCs will fail // with Unavailable because the connection hasn't started. When the @@ -267,7 +269,7 @@ func (s) TestWaitForReadyRPCErrorOnBadCertificates(t *testing.T) { } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) { @@ -332,7 +334,7 @@ func testPerRPCCredentialsViaDialOptions(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -353,7 +355,7 @@ func testPerRPCCredentialsViaCallOptions(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{authdata: authdata})); err != nil { @@ -395,7 +397,7 @@ func testPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{authdata: authdata})); err != nil { diff --git a/test/end2end_test.go b/test/end2end_test.go index 4009f0515017..b4e81cdf4cd9 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -41,20 +41,15 @@ import ( "time" "github.com/golang/protobuf/proto" - anypb "github.com/golang/protobuf/ptypes/any" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding" - _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/health" - healthgrpc "google.golang.org/grpc/health/grpc_health_v1" - healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -71,8 +66,16 @@ import ( "google.golang.org/grpc/status" "google.golang.org/grpc/tap" "google.golang.org/grpc/test/bufconn" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" + + anypb "github.com/golang/protobuf/ptypes/any" + spb "google.golang.org/genproto/googleapis/rpc/status" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/encoding/gzip" ) const defaultHealthService = "grpc.health.v1.Health" @@ -129,7 +132,7 @@ var ( var raceMode bool // set by race.go in race mode type testServer struct { - testpb.UnimplementedTestServiceServer + testgrpc.UnimplementedTestServiceServer security string // indicate the authentication protocol used by this server. earlyFail bool // whether to error out the execution of a service handler prematurely. @@ -162,8 +165,6 @@ func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: - case testpb.PayloadType_UNCOMPRESSABLE: - return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported") default: return nil, fmt.Errorf("unsupported payload type: %d", t) } @@ -244,7 +245,7 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* }, nil } -func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { +func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error { if md, ok := metadata.FromIncomingContext(stream.Context()); ok { if _, exists := md[":authority"]; !exists { return status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md) @@ -275,7 +276,7 @@ func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest return nil } -func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { +func (s *testServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error { var sum int for { in, err := stream.Recv() @@ -295,7 +296,7 @@ func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInput } } -func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { md, ok := metadata.FromIncomingContext(stream.Context()) if ok { if s.setAndSendHeader { @@ -359,7 +360,7 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ } } -func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { +func (s *testServer) HalfDuplexCall(stream testgrpc.TestService_HalfDuplexCallServer) error { var msgBuf []*testpb.StreamingOutputCallRequest for { in, err := stream.Recv() @@ -565,7 +566,7 @@ func newTest(t *testing.T, e env) *test { return te } -func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener { +func (te *test) listenAndServe(ts testgrpc.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener { te.t.Helper() te.t.Logf("Running test in %s environment...", te.e.name) sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)} @@ -625,7 +626,7 @@ func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, sopts = append(sopts, te.customServerOptions...) s := grpc.NewServer(sopts...) if ts != nil { - testpb.RegisterTestServiceServer(s, ts) + testgrpc.RegisterTestServiceServer(s, ts) } // Create a new default health server if enableHealthServer is set, or use @@ -690,20 +691,20 @@ func (w wrapHS) Stop() { w.s.Close() } -func (te *test) startServerWithConnControl(ts testpb.TestServiceServer) *listenerWrapper { +func (te *test) startServerWithConnControl(ts testgrpc.TestServiceServer) *listenerWrapper { l := te.listenAndServe(ts, listenWithConnControl) return l.(*listenerWrapper) } // startServer starts a gRPC server exposing the provided TestService // implementation. Callers should defer a call to te.tearDown to clean up -func (te *test) startServer(ts testpb.TestServiceServer) { +func (te *test) startServer(ts testgrpc.TestServiceServer) { te.t.Helper() te.listenAndServe(ts, net.Listen) } // startServers starts 'num' gRPC servers exposing the provided TestService. -func (te *test) startServers(ts testpb.TestServiceServer, num int) { +func (te *test) startServers(ts testgrpc.TestServiceServer, num int) { for i := 0; i < num; i++ { te.startServer(ts) te.srvs = append(te.srvs, te.srv.(*grpc.Server)) @@ -937,7 +938,7 @@ func (s) TestContextDeadlineNotIgnored(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) @@ -973,7 +974,7 @@ func testTimeoutOnDeadServer(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) @@ -1025,7 +1026,7 @@ func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) { rpcStartedOnServer := make(chan struct{}) rpcDoneOnClient := make(chan struct{}) ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { close(rpcStartedOnServer) <-rpcDoneOnClient return status.Error(codes.Internal, "arbitrary status") @@ -1078,7 +1079,7 @@ func testFailFast(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -1168,7 +1169,7 @@ func (s) TestGetMethodConfig(t *testing.T) { ] }`)}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // Make sure service config has been processed by grpc. for { @@ -1252,7 +1253,7 @@ func (s) TestServiceConfigWaitForReady(t *testing.T) { ] }`)}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // Make sure service config has been processed by grpc. for { @@ -1341,7 +1342,7 @@ func (s) TestServiceConfigTimeout(t *testing.T) { ] }`)}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // Make sure service config has been processed by grpc. for { @@ -1459,7 +1460,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { ] }`) r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc}) - tc := testpb.NewTestServiceClient(cc1) + tc := testgrpc.NewTestServiceClient(cc1) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -1530,7 +1531,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { defer te2.tearDown() cc2 := te2.clientConn(grpc.WithResolvers(r)) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc}) - tc = testpb.NewTestServiceClient(cc2) + tc = testgrpc.NewTestServiceClient(cc2) for { if cc2.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil { @@ -1590,7 +1591,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { cc3 := te3.clientConn(grpc.WithResolvers(r)) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc}) - tc = testpb.NewTestServiceClient(cc3) + tc = testgrpc.NewTestServiceClient(cc3) for { if cc3.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil { @@ -1675,7 +1676,7 @@ func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: te.srvAddr}}, @@ -1753,7 +1754,7 @@ func testPreloaderClientSend(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) // Test for streaming RPC recv. // Set context for send with proper RPC Information @@ -1811,7 +1812,7 @@ func testPreloaderClientSend(t *testing.T, e env) { func (s) TestPreloaderSenderSend(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for i := 0; i < 10; i++ { preparedMsg := &grpc.PreparedMsg{} err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{ @@ -1882,7 +1883,7 @@ func testMaxMsgSizeClientDefault(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const smallSize = 1 const largeSize = 4 * 1024 * 1024 @@ -1949,7 +1950,7 @@ func testMaxMsgSizeClientAPI(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const smallSize = 1 const largeSize = 1024 @@ -2037,7 +2038,7 @@ func testMaxMsgSizeServerAPI(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const smallSize = 1 const largeSize = 1024 @@ -2149,7 +2150,7 @@ func testTap(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -2194,7 +2195,7 @@ func testEmptyUnaryWithUserAgent(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) var header metadata.MD ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -2225,7 +2226,7 @@ func testFailedEmptyUnary(t *testing.T, e env) { te.userAgent = failAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) wantErr := detailedError @@ -2244,7 +2245,7 @@ func testLargeUnary(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const argSize = 271828 const respSize = 314159 @@ -2286,7 +2287,7 @@ func testExceedMsgLimit(t *testing.T, e env) { te.maxServerMsgSize, te.maxClientMsgSize = newInt(maxMsgSize), newInt(maxMsgSize) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) largeSize := int32(maxMsgSize + 1) const smallSize = 1 @@ -2368,7 +2369,7 @@ func testPeerClientSide(t *testing.T, e env) { te.userAgent = testAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) peer := new(peer.Peer) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -2410,7 +2411,7 @@ func testPeerNegative(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) peer := new(peer.Peer) ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -2429,7 +2430,7 @@ func testPeerFailedRPC(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -2484,7 +2485,7 @@ func testMetadataUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const argSize = 2718 const respSize = 314 @@ -2530,7 +2531,7 @@ func testMetadataOrderUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2") @@ -2568,7 +2569,7 @@ func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, multipleSetTrailer: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -2605,7 +2606,7 @@ func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, multipleSetTrailer: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) @@ -2640,7 +2641,7 @@ func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setAndSendHeader: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -2685,7 +2686,7 @@ func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -2730,7 +2731,7 @@ func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -2774,7 +2775,7 @@ func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setAndSendHeader: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx) @@ -2815,7 +2816,7 @@ func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -2880,7 +2881,7 @@ func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -2946,7 +2947,7 @@ func testMalformedHTTP2Metadata(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718) if err != nil { @@ -3020,7 +3021,7 @@ func (s) TestTransparentRetry(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) for i, tc := range testCases { stream, err := client.FullDuplexCall(ctx) @@ -3053,7 +3054,7 @@ func testCancel(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) const argSize = 2718 const respSize = 314 @@ -3090,7 +3091,7 @@ func testCancelNoIO(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // Start one blocked RPC for which we'll never send streaming // input. This will consume the 1 maximum concurrent streams, @@ -3156,7 +3157,7 @@ func testNoService(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) stream, err := tc.FullDuplexCall(te.ctx, grpc.WaitForReady(true)) if err != nil { @@ -3177,7 +3178,7 @@ func testPingPong(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) stream, err := tc.FullDuplexCall(te.ctx) if err != nil { @@ -3236,7 +3237,7 @@ func testMetadataStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(te.ctx, testMetadata) stream, err := tc.FullDuplexCall(ctx) @@ -3315,7 +3316,7 @@ func testServerStreaming(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { @@ -3373,7 +3374,7 @@ func testFailedServerStreaming(t *testing.T, e env) { te.userAgent = failAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { @@ -3407,10 +3408,10 @@ func equalError(x, y error) bool { // // All other TestServiceServer methods crash if called. type concurrentSendServer struct { - testpb.TestServiceServer + testgrpc.TestServiceServer } -func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { +func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error { for i := 0; i < 10; i++ { stream.Send(&testpb.StreamingOutputCallResponse{ Payload: &testpb.Payload{ @@ -3434,7 +3435,7 @@ func testServerStreamingConcurrent(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) doStreamingCall := func() { req := &testpb.StreamingOutputCallRequest{} @@ -3515,7 +3516,7 @@ func testClientStreaming(t *testing.T, e env, sizes []int) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(te.ctx, time.Second*30) defer cancel() @@ -3561,7 +3562,7 @@ func testClientStreamingError(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, earlyFail: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) stream, err := tc.StreamingInputCall(te.ctx) if err != nil { @@ -3608,7 +3609,7 @@ func testExceedMaxStreamsLimit(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) _, err := tc.StreamingInputCall(te.ctx) if err != nil { @@ -3648,7 +3649,7 @@ func testStreamsQuotaRecovery(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithCancel(context.Background()) defer cancel() if _, err := tc.StreamingInputCall(ctx); err != nil { @@ -3724,7 +3725,7 @@ func testUnaryClientInterceptor(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.NotFound { @@ -3752,7 +3753,7 @@ func testStreamClientInterceptor(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := []*testpb.ResponseParameters{ { Size: int32(1), @@ -3790,7 +3791,7 @@ func testUnaryServerInterceptor(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.PermissionDenied { @@ -3822,7 +3823,7 @@ func testStreamServerInterceptor(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := []*testpb.ResponseParameters{ { Size: int32(1), @@ -3863,21 +3864,21 @@ func testStreamServerInterceptor(t *testing.T, e env) { // Any unimplemented method will crash. Tests implement the method(s) // they need. type funcServer struct { - testpb.TestServiceServer + testgrpc.TestServiceServer unaryCall func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) - streamingInputCall func(stream testpb.TestService_StreamingInputCallServer) error - fullDuplexCall func(stream testpb.TestService_FullDuplexCallServer) error + streamingInputCall func(stream testgrpc.TestService_StreamingInputCallServer) error + fullDuplexCall func(stream testgrpc.TestService_FullDuplexCallServer) error } func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return s.unaryCall(ctx, in) } -func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { +func (s *funcServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error { return s.streamingInputCall(stream) } -func (s *funcServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (s *funcServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { return s.fullDuplexCall(stream) } @@ -3973,7 +3974,7 @@ func (s) TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) { func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) { te := newTest(t, e) recvErr := make(chan error, 1) - ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error { _, err := stream.Recv() recvErr <- err return nil @@ -4015,7 +4016,7 @@ func testClientInitialHeaderEndStream(t *testing.T, e env) { // checking. handlerDone := make(chan struct{}) te := newTest(t, e) - ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error { defer close(handlerDone) // Block on serverTester receiving RST_STREAM. This ensures server has closed // stream before stream.Recv(). @@ -4059,7 +4060,7 @@ func testClientSendDataAfterCloseSend(t *testing.T, e env) { // checking. handlerDone := make(chan struct{}) te := newTest(t, e) - ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error { defer close(handlerDone) // Block on serverTester receiving RST_STREAM. This ensures server has closed // stream before stream.Recv(). @@ -4111,7 +4112,7 @@ func (s) TestClientResourceExhaustedCancelFullDuplex(t *testing.T) { func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) { te := newTest(t, e) recvErr := make(chan error, 1) - ts := &funcServer{fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { + ts := &funcServer{fullDuplexCall: func(stream testgrpc.TestService_FullDuplexCallServer) error { defer close(recvErr) _, err := stream.Recv() if err != nil { @@ -4149,7 +4150,7 @@ func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) { // client side when server send a large message. te.maxClientReceiveMsgSize = newInt(10) cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -4203,7 +4204,7 @@ func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) { } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // This unary call should fail, but not timeout. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -4237,7 +4238,7 @@ func (s) TestFlowControlLogicalRace(t *testing.T) { defer lis.Close() s := grpc.NewServer() - testpb.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{ + testgrpc.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{ itemCount: itemCount, itemSize: itemSize, }) @@ -4250,7 +4251,7 @@ func (s) TestFlowControlLogicalRace(t *testing.T) { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } defer cc.Close() - cl := testpb.NewTestServiceClient(cc) + cl := testgrpc.NewTestServiceClient(cc) failures := 0 for i := 0; i < requestCount; i++ { @@ -4292,13 +4293,13 @@ func (s) TestFlowControlLogicalRace(t *testing.T) { } type flowControlLogicalRaceServer struct { - testpb.TestServiceServer + testgrpc.TestServiceServer itemSize int itemCount int } -func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error { +func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testgrpc.TestService_StreamingOutputCallServer) error { for i := 0; i < s.itemCount; i++ { err := srv.Send(&testpb.StreamingOutputCallResponse{ Payload: &testpb.Payload{ @@ -4598,7 +4599,7 @@ func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { // doFDC performs a FullDuplexCall with client and returns the error from the // first stream.Recv call, or nil if that error is io.EOF. Calls t.Fatal if // the stream cannot be established. - doFDC := func(ctx context.Context, client testpb.TestServiceClient) error { + doFDC := func(ctx context.Context, client testgrpc.TestServiceClient) error { stream, err := client.FullDuplexCall(ctx) if err != nil { t.Fatalf("Unwanted error: %v", err) @@ -4611,7 +4612,7 @@ func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { // endpoint ensures mdkey is NOT in metadata and returns an error if it is. endpoint := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { ctx := stream.Context() if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil { return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) @@ -4627,7 +4628,7 @@ func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { // proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint // without explicitly copying the metadata. proxy := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { ctx := stream.Context() if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil { return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) @@ -4749,7 +4750,7 @@ func (s) TestTapTimeout(t *testing.T) { func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { return status.Errorf(codes.Internal, "") }, } @@ -4824,7 +4825,7 @@ func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() stream, err := tc.FullDuplexCall(ctx) @@ -4876,7 +4877,7 @@ func testWaitForReadyConnection(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() // Non-blocking dial. - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() state := cc.GetState() @@ -4953,7 +4954,7 @@ func testEncodeDoesntPanic(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() te.customCodec = nil - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() // Failure case, should not panic. @@ -4988,7 +4989,7 @@ func testSvrWriteStatusEarlyWrite(t *testing.T, e env) { } te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := []*testpb.ResponseParameters{ { Size: int32(smallSize), @@ -5067,7 +5068,7 @@ func testGetMethodConfigTD(t *testing.T, e env) { ch <- sc cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. @@ -5118,7 +5119,7 @@ func testServiceConfigWaitForReadyTD(t *testing.T, e env) { ch <- sc cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. @@ -5182,7 +5183,7 @@ func testServiceConfigTimeoutTD(t *testing.T, e env) { ch <- sc cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // The following RPCs are expected to become non-fail-fast ones with 1ns deadline. ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { @@ -5272,7 +5273,7 @@ func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) { defer te1.tearDown() ch1 <- sc - tc := testpb.NewTestServiceClient(te1.clientConn()) + tc := testgrpc.NewTestServiceClient(te1.clientConn()) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -5334,7 +5335,7 @@ func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) { te2.startServer(&testServer{security: e.security}) defer te2.tearDown() ch2 <- sc - tc = testpb.NewTestServiceClient(te2.clientConn()) + tc = testgrpc.NewTestServiceClient(te2.clientConn()) // Test for unary RPC recv. req.Payload = smallPayload @@ -5383,7 +5384,7 @@ func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) { te3.startServer(&testServer{security: e.security}) defer te3.tearDown() ch3 <- sc - tc = testpb.NewTestServiceClient(te3.clientConn()) + tc = testgrpc.NewTestServiceClient(te3.clientConn()) // Test for unary RPC recv. req.Payload = smallPayload @@ -5544,7 +5545,7 @@ func (s) TestInterceptorCanAccessCallOptions(t *testing.T) { grpc.WaitForReady(true), grpc.MaxCallRecvMsgSize(1010), } - tc := testpb.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...))) + tc := testgrpc.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...))) var headers metadata.MD var trailers metadata.MD @@ -5601,7 +5602,7 @@ func (s) TestServeExitsWhenListenerClosed(t *testing.T) { s := grpc.NewServer() defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -5619,7 +5620,7 @@ func (s) TestServeExitsWhenListenerClosed(t *testing.T) { t.Fatalf("Failed to dial server: %v", err) } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -5720,7 +5721,7 @@ func testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T, e te.maxServerReceiveMsgSize = &smallSize te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1048576) if err != nil { t.Fatal(err) @@ -5759,7 +5760,7 @@ func testRPCTimeout(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) const argSize = 2718 const respSize = 314 @@ -5796,7 +5797,7 @@ func (s) TestDisabledIOBuffers(t *testing.T) { } ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for { in, err := stream.Recv() if err == io.EOF { @@ -5820,7 +5821,7 @@ func (s) TestDisabledIOBuffers(t *testing.T) { } s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0)) - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -5838,7 +5839,7 @@ func (s) TestDisabledIOBuffers(t *testing.T) { t.Fatalf("Failed to dial server") } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true)) @@ -5880,7 +5881,7 @@ func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216))) @@ -5912,7 +5913,7 @@ func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() var err error @@ -5943,7 +5944,7 @@ func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) defer te.tearDown() cc, dw := te.clientConnWithConnControl() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := tc.FullDuplexCall(ctx) @@ -5984,7 +5985,7 @@ func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() cc, _ := te.clientConnWithConnControl() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := tc.FullDuplexCall(ctx) @@ -6031,7 +6032,7 @@ func (s) TestNetPipeConn(t *testing.T) { ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil }} - testpb.RegisterTestServiceServer(s, ts) + testgrpc.RegisterTestServiceServer(s, ts) go s.Serve(pl) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -6040,7 +6041,7 @@ func (s) TestNetPipeConn(t *testing.T) { t.Fatalf("Error creating client: %v", err) } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) } @@ -6059,7 +6060,7 @@ func testLargeTimeout(t *testing.T, e env) { ts := &funcServer{} te.startServer(ts) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) timeouts := []time.Duration{ time.Duration(math.MaxInt64), // will be (correctly) converted to @@ -6114,7 +6115,7 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) defer cancel() @@ -6146,7 +6147,11 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { // We wait a second before providing a service config and resolving // addresses. So this will wait for that and then honor the // maxRequestMessageBytes it contains. - if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{ResponseType: testpb.PayloadType_UNCOMPRESSABLE}); status.Code(err) != codes.ResourceExhausted { + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1) + if err != nil { + t.Fatal(err) + } + if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{Payload: payload}); status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err) } if got := ctx.Err(); got != nil { @@ -6331,7 +6336,7 @@ func (s) TestClientCancellationPropagatesUnary(t *testing.T) { // all call options' after are executed. func (s) TestCanceledRPCCallOptionRace(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { err := stream.Send(&testpb.StreamingOutputCallResponse{}) if err != nil { return err @@ -6757,7 +6762,7 @@ func (s) TestUnexpectedEOF(t *testing.T) { // frame sent, simultaneously. func (s) TestRecvWhileReturningStatus(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { // The client never sends, so this Recv blocks until the server // returns and causes stream operations to return errors. go stream.Recv() @@ -6822,7 +6827,7 @@ func (s) TestGlobalBinaryLoggingOptions(t *testing.T) { UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil }, - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for { _, err := stream.Recv() if err == io.EOF { diff --git a/test/goaway_test.go b/test/goaway_test.go index 9971487051f4..48ef197e74cc 100644 --- a/test/goaway_test.go +++ b/test/goaway_test.go @@ -40,7 +40,9 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // TestGracefulClientOnGoAway attempts to ensure that when the server sends a @@ -62,7 +64,7 @@ func (s) TestGracefulClientOnGoAway(t *testing.T) { s := grpc.NewServer(grpc.KeepaliveParams(keepalive.ServerParameters{MaxConnectionAge: maxConnAge})) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -75,7 +77,7 @@ func (s) TestGracefulClientOnGoAway(t *testing.T) { t.Fatalf("Failed to dial server: %v", err) } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) endTime := time.Now().Add(testTime) for time.Now().Before(endTime) { @@ -90,7 +92,7 @@ func (s) TestGracefulClientOnGoAway(t *testing.T) { func (s) TestDetailedGoAwayErrorOnGracefulClosePropagatesToRPCError(t *testing.T) { rpcDoneOnClient := make(chan struct{}) ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { <-rpcDoneOnClient return status.Error(codes.Internal, "arbitrary status") }, @@ -130,7 +132,7 @@ func (s) TestDetailedGoAwayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) rpcDoneOnClient := make(chan struct{}) ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { <-rpcDoneOnClient return status.Error(codes.Internal, "arbitrary status") }, @@ -180,7 +182,7 @@ func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -218,7 +220,7 @@ func testServerGoAway(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // Finish an RPC to make sure the connection is good. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -270,7 +272,7 @@ func testServerGoAwayPendingRPC(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) if err != nil { @@ -344,7 +346,7 @@ func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) if err != nil { @@ -433,7 +435,7 @@ func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { @@ -472,7 +474,7 @@ func testConcurrentServerStopAndGoAway(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) @@ -553,7 +555,7 @@ func (s) TestGoAwayThenClose(t *testing.T) { unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil }, - fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { + fullDuplexCall: func(stream testgrpc.TestService_FullDuplexCallServer) error { if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { t.Errorf("unexpected error from send: %v", err) return err @@ -566,7 +568,7 @@ func (s) TestGoAwayThenClose(t *testing.T) { return err }, } - testpb.RegisterTestServiceServer(s1, ts) + testgrpc.RegisterTestServiceServer(s1, ts) go s1.Serve(lis1) conn2Established := grpcsync.NewEvent() @@ -576,7 +578,7 @@ func (s) TestGoAwayThenClose(t *testing.T) { } s2 := grpc.NewServer() defer s2.Stop() - testpb.RegisterTestServiceServer(s2, ts) + testgrpc.RegisterTestServiceServer(s2, ts) r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{ @@ -589,7 +591,7 @@ func (s) TestGoAwayThenClose(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) t.Log("Waiting for the ClientConn to enter READY state.") state := cc.GetState() @@ -692,7 +694,7 @@ func (s) TestGoAwayStreamIDSmallerThanCreatedStreams(t *testing.T) { } ct := val.(*clientTester) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) someStreamsCreated := grpcsync.NewEvent() goAwayWritten := grpcsync.NewEvent() go func() { diff --git a/test/gracefulstop_test.go b/test/gracefulstop_test.go index 15e0611a219b..51bb132e4944 100644 --- a/test/gracefulstop_test.go +++ b/test/gracefulstop_test.go @@ -32,7 +32,9 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type delayListener struct { @@ -111,7 +113,7 @@ func (s) TestGracefulStop(t *testing.T) { d := func(ctx context.Context, _ string) (net.Conn, error) { return dlis.Dial(ctx) } ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { _, err := stream.Recv() if err != nil { return err @@ -120,7 +122,7 @@ func (s) TestGracefulStop(t *testing.T) { }, } s := grpc.NewServer() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) // 1. Start Server wg := sync.WaitGroup{} @@ -152,7 +154,7 @@ func (s) TestGracefulStop(t *testing.T) { if err != nil { t.Fatalf("grpc.DialContext(_, %q, _) = %v", lis.Addr().String(), err) } - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) defer cc.Close() // 4. Send an RPC on the new connection. @@ -173,7 +175,7 @@ func (s) TestGracefulStopClosesConnAfterLastStream(t *testing.T) { handlerCalled := make(chan struct{}) gracefulStopCalled := make(chan struct{}) - ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error { close(handlerCalled) // Initiate call to GracefulStop. <-gracefulStopCalled // Wait for GOAWAYs to be received by the client. return nil diff --git a/test/grpc_testing/test.pb.go b/test/grpc_testing/test.pb.go deleted file mode 100644 index 641640f012dc..000000000000 --- a/test/grpc_testing/test.pb.go +++ /dev/null @@ -1,924 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v4.22.0 -// source: test/grpc_testing/test.proto - -package grpc_testing - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The type of payload that should be returned. -type PayloadType int32 - -const ( - // Compressable text format. - PayloadType_COMPRESSABLE PayloadType = 0 - // Uncompressable binary format. - PayloadType_UNCOMPRESSABLE PayloadType = 1 - // Randomly chosen from all other formats defined in this enum. - PayloadType_RANDOM PayloadType = 2 -) - -// Enum value maps for PayloadType. -var ( - PayloadType_name = map[int32]string{ - 0: "COMPRESSABLE", - 1: "UNCOMPRESSABLE", - 2: "RANDOM", - } - PayloadType_value = map[string]int32{ - "COMPRESSABLE": 0, - "UNCOMPRESSABLE": 1, - "RANDOM": 2, - } -) - -func (x PayloadType) Enum() *PayloadType { - p := new(PayloadType) - *p = x - return p -} - -func (x PayloadType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (PayloadType) Descriptor() protoreflect.EnumDescriptor { - return file_test_grpc_testing_test_proto_enumTypes[0].Descriptor() -} - -func (PayloadType) Type() protoreflect.EnumType { - return &file_test_grpc_testing_test_proto_enumTypes[0] -} - -func (x PayloadType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use PayloadType.Descriptor instead. -func (PayloadType) EnumDescriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{0} -} - -type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Empty) Reset() { - *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Empty) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Empty) ProtoMessage() {} - -func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Empty.ProtoReflect.Descriptor instead. -func (*Empty) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{0} -} - -// A block of data, to simply increase gRPC message size. -type Payload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The type of data in body. - Type PayloadType `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.testing.PayloadType" json:"type,omitempty"` - // Primary contents of payload. - Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` -} - -func (x *Payload) Reset() { - *x = Payload{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Payload) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Payload) ProtoMessage() {} - -func (x *Payload) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Payload.ProtoReflect.Descriptor instead. -func (*Payload) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{1} -} - -func (x *Payload) GetType() PayloadType { - if x != nil { - return x.Type - } - return PayloadType_COMPRESSABLE -} - -func (x *Payload) GetBody() []byte { - if x != nil { - return x.Body - } - return nil -} - -// Unary request. -type SimpleRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize,proto3" json:"response_size,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` - // Whether SimpleResponse should include username. - FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername,proto3" json:"fill_username,omitempty"` - // Whether SimpleResponse should include OAuth scope. - FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope,proto3" json:"fill_oauth_scope,omitempty"` -} - -func (x *SimpleRequest) Reset() { - *x = SimpleRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SimpleRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SimpleRequest) ProtoMessage() {} - -func (x *SimpleRequest) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SimpleRequest.ProtoReflect.Descriptor instead. -func (*SimpleRequest) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{2} -} - -func (x *SimpleRequest) GetResponseType() PayloadType { - if x != nil { - return x.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (x *SimpleRequest) GetResponseSize() int32 { - if x != nil { - return x.ResponseSize - } - return 0 -} - -func (x *SimpleRequest) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -func (x *SimpleRequest) GetFillUsername() bool { - if x != nil { - return x.FillUsername - } - return false -} - -func (x *SimpleRequest) GetFillOauthScope() bool { - if x != nil { - return x.FillOauthScope - } - return false -} - -// Unary response, as configured by the request. -type SimpleResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Payload to increase message size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` - // The user the request came from, for verifying authentication was - // successful when the client expected it. - Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` - // OAuth scope. - OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope,proto3" json:"oauth_scope,omitempty"` -} - -func (x *SimpleResponse) Reset() { - *x = SimpleResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SimpleResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SimpleResponse) ProtoMessage() {} - -func (x *SimpleResponse) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SimpleResponse.ProtoReflect.Descriptor instead. -func (*SimpleResponse) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{3} -} - -func (x *SimpleResponse) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -func (x *SimpleResponse) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -func (x *SimpleResponse) GetOauthScope() string { - if x != nil { - return x.OauthScope - } - return "" -} - -// Client-streaming request. -type StreamingInputCallRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (x *StreamingInputCallRequest) Reset() { - *x = StreamingInputCallRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingInputCallRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingInputCallRequest) ProtoMessage() {} - -func (x *StreamingInputCallRequest) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingInputCallRequest.ProtoReflect.Descriptor instead. -func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{4} -} - -func (x *StreamingInputCallRequest) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -// Client-streaming response. -type StreamingInputCallResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Aggregated size of payloads received from the client. - AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize,proto3" json:"aggregated_payload_size,omitempty"` -} - -func (x *StreamingInputCallResponse) Reset() { - *x = StreamingInputCallResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingInputCallResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingInputCallResponse) ProtoMessage() {} - -func (x *StreamingInputCallResponse) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingInputCallResponse.ProtoReflect.Descriptor instead. -func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{5} -} - -func (x *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { - if x != nil { - return x.AggregatedPayloadSize - } - return 0 -} - -// Configuration for a particular response. -type ResponseParameters struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - Size int32 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` - // Desired interval between consecutive responses in the response stream in - // microseconds. - IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs,proto3" json:"interval_us,omitempty"` -} - -func (x *ResponseParameters) Reset() { - *x = ResponseParameters{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseParameters) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseParameters) ProtoMessage() {} - -func (x *ResponseParameters) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseParameters.ProtoReflect.Descriptor instead. -func (*ResponseParameters) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{6} -} - -func (x *ResponseParameters) GetSize() int32 { - if x != nil { - return x.Size - } - return 0 -} - -func (x *ResponseParameters) GetIntervalUs() int32 { - if x != nil { - return x.IntervalUs - } - return 0 -} - -// Server-streaming request. -type StreamingOutputCallRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Configuration for each expected response message. - ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters,proto3" json:"response_parameters,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (x *StreamingOutputCallRequest) Reset() { - *x = StreamingOutputCallRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingOutputCallRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingOutputCallRequest) ProtoMessage() {} - -func (x *StreamingOutputCallRequest) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingOutputCallRequest.ProtoReflect.Descriptor instead. -func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{7} -} - -func (x *StreamingOutputCallRequest) GetResponseType() PayloadType { - if x != nil { - return x.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (x *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { - if x != nil { - return x.ResponseParameters - } - return nil -} - -func (x *StreamingOutputCallRequest) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -// Server-streaming response, as configured by the request and parameters. -type StreamingOutputCallResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Payload to increase response size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (x *StreamingOutputCallResponse) Reset() { - *x = StreamingOutputCallResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingOutputCallResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingOutputCallResponse) ProtoMessage() {} - -func (x *StreamingOutputCallResponse) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingOutputCallResponse.ProtoReflect.Descriptor instead. -func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{8} -} - -func (x *StreamingOutputCallResponse) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -var File_test_grpc_testing_test_proto protoreflect.FileDescriptor - -var file_test_grpc_testing_test_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x67, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x22, 0x07, 0x0a, 0x05, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x4c, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x12, 0x2d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x22, 0xf4, 0x01, 0x0a, 0x0d, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x66, - 0x69, 0x6c, 0x6c, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x6c, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x6c, - 0x4f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x22, 0x7e, 0x0a, 0x0e, 0x53, 0x69, - 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x22, 0x4c, 0x0a, 0x19, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x54, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x49, - 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x55, 0x73, 0x22, 0xe0, 0x01, 0x0a, 0x1a, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x4e, 0x0a, 0x1b, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, - 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2a, 0x3f, 0x0a, 0x0b, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x43, - 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, - 0x0e, 0x55, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x02, 0x32, 0xbb, 0x04, - 0x0a, 0x0b, 0x54, 0x65, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x35, 0x0a, - 0x09, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x13, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x09, 0x55, 0x6e, 0x61, 0x72, 0x79, 0x43, 0x61, 0x6c, - 0x6c, 0x12, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x69, - 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x13, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, - 0x61, 0x6c, 0x6c, 0x12, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x69, 0x0a, 0x12, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, - 0x12, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, - 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, - 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x28, 0x01, 0x12, 0x69, 0x0a, 0x0e, 0x46, 0x75, 0x6c, 0x6c, 0x44, 0x75, 0x70, - 0x6c, 0x65, 0x78, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, - 0x12, 0x69, 0x0a, 0x0e, 0x48, 0x61, 0x6c, 0x66, 0x44, 0x75, 0x70, 0x6c, 0x65, 0x78, 0x43, 0x61, - 0x6c, 0x6c, 0x12, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x2a, 0x5a, 0x28, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_test_grpc_testing_test_proto_rawDescOnce sync.Once - file_test_grpc_testing_test_proto_rawDescData = file_test_grpc_testing_test_proto_rawDesc -) - -func file_test_grpc_testing_test_proto_rawDescGZIP() []byte { - file_test_grpc_testing_test_proto_rawDescOnce.Do(func() { - file_test_grpc_testing_test_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_grpc_testing_test_proto_rawDescData) - }) - return file_test_grpc_testing_test_proto_rawDescData -} - -var file_test_grpc_testing_test_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_test_grpc_testing_test_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_test_grpc_testing_test_proto_goTypes = []interface{}{ - (PayloadType)(0), // 0: grpc.testing.PayloadType - (*Empty)(nil), // 1: grpc.testing.Empty - (*Payload)(nil), // 2: grpc.testing.Payload - (*SimpleRequest)(nil), // 3: grpc.testing.SimpleRequest - (*SimpleResponse)(nil), // 4: grpc.testing.SimpleResponse - (*StreamingInputCallRequest)(nil), // 5: grpc.testing.StreamingInputCallRequest - (*StreamingInputCallResponse)(nil), // 6: grpc.testing.StreamingInputCallResponse - (*ResponseParameters)(nil), // 7: grpc.testing.ResponseParameters - (*StreamingOutputCallRequest)(nil), // 8: grpc.testing.StreamingOutputCallRequest - (*StreamingOutputCallResponse)(nil), // 9: grpc.testing.StreamingOutputCallResponse -} -var file_test_grpc_testing_test_proto_depIdxs = []int32{ - 0, // 0: grpc.testing.Payload.type:type_name -> grpc.testing.PayloadType - 0, // 1: grpc.testing.SimpleRequest.response_type:type_name -> grpc.testing.PayloadType - 2, // 2: grpc.testing.SimpleRequest.payload:type_name -> grpc.testing.Payload - 2, // 3: grpc.testing.SimpleResponse.payload:type_name -> grpc.testing.Payload - 2, // 4: grpc.testing.StreamingInputCallRequest.payload:type_name -> grpc.testing.Payload - 0, // 5: grpc.testing.StreamingOutputCallRequest.response_type:type_name -> grpc.testing.PayloadType - 7, // 6: grpc.testing.StreamingOutputCallRequest.response_parameters:type_name -> grpc.testing.ResponseParameters - 2, // 7: grpc.testing.StreamingOutputCallRequest.payload:type_name -> grpc.testing.Payload - 2, // 8: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload - 1, // 9: grpc.testing.TestService.EmptyCall:input_type -> grpc.testing.Empty - 3, // 10: grpc.testing.TestService.UnaryCall:input_type -> grpc.testing.SimpleRequest - 8, // 11: grpc.testing.TestService.StreamingOutputCall:input_type -> grpc.testing.StreamingOutputCallRequest - 5, // 12: grpc.testing.TestService.StreamingInputCall:input_type -> grpc.testing.StreamingInputCallRequest - 8, // 13: grpc.testing.TestService.FullDuplexCall:input_type -> grpc.testing.StreamingOutputCallRequest - 8, // 14: grpc.testing.TestService.HalfDuplexCall:input_type -> grpc.testing.StreamingOutputCallRequest - 1, // 15: grpc.testing.TestService.EmptyCall:output_type -> grpc.testing.Empty - 4, // 16: grpc.testing.TestService.UnaryCall:output_type -> grpc.testing.SimpleResponse - 9, // 17: grpc.testing.TestService.StreamingOutputCall:output_type -> grpc.testing.StreamingOutputCallResponse - 6, // 18: grpc.testing.TestService.StreamingInputCall:output_type -> grpc.testing.StreamingInputCallResponse - 9, // 19: grpc.testing.TestService.FullDuplexCall:output_type -> grpc.testing.StreamingOutputCallResponse - 9, // 20: grpc.testing.TestService.HalfDuplexCall:output_type -> grpc.testing.StreamingOutputCallResponse - 15, // [15:21] is the sub-list for method output_type - 9, // [9:15] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name -} - -func init() { file_test_grpc_testing_test_proto_init() } -func file_test_grpc_testing_test_proto_init() { - if File_test_grpc_testing_test_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_test_grpc_testing_test_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Payload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SimpleRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SimpleResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingInputCallRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingInputCallResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseParameters); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingOutputCallRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingOutputCallResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_test_grpc_testing_test_proto_rawDesc, - NumEnums: 1, - NumMessages: 9, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_test_grpc_testing_test_proto_goTypes, - DependencyIndexes: file_test_grpc_testing_test_proto_depIdxs, - EnumInfos: file_test_grpc_testing_test_proto_enumTypes, - MessageInfos: file_test_grpc_testing_test_proto_msgTypes, - }.Build() - File_test_grpc_testing_test_proto = out.File - file_test_grpc_testing_test_proto_rawDesc = nil - file_test_grpc_testing_test_proto_goTypes = nil - file_test_grpc_testing_test_proto_depIdxs = nil -} diff --git a/test/grpc_testing/test.proto b/test/grpc_testing/test.proto deleted file mode 100644 index 0c6650401d59..000000000000 --- a/test/grpc_testing/test.proto +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. -syntax = "proto3"; - -option go_package = "google.golang.org/grpc/test/grpc_testing"; - -package grpc.testing; - -message Empty {} - -// The type of payload that should be returned. -enum PayloadType { - // Compressable text format. - COMPRESSABLE = 0; - - // Uncompressable binary format. - UNCOMPRESSABLE = 1; - - // Randomly chosen from all other formats defined in this enum. - RANDOM = 2; -} - -// A block of data, to simply increase gRPC message size. -message Payload { - // The type of data in body. - PayloadType type = 1; - // Primary contents of payload. - bytes body = 2; -} - -// Unary request. -message SimpleRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - PayloadType response_type = 1; - - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - int32 response_size = 2; - - // Optional input payload sent along with the request. - Payload payload = 3; - - // Whether SimpleResponse should include username. - bool fill_username = 4; - - // Whether SimpleResponse should include OAuth scope. - bool fill_oauth_scope = 5; -} - -// Unary response, as configured by the request. -message SimpleResponse { - // Payload to increase message size. - Payload payload = 1; - - // The user the request came from, for verifying authentication was - // successful when the client expected it. - string username = 2; - - // OAuth scope. - string oauth_scope = 3; -} - -// Client-streaming request. -message StreamingInputCallRequest { - // Optional input payload sent along with the request. - Payload payload = 1; - - // Not expecting any payload from the response. -} - -// Client-streaming response. -message StreamingInputCallResponse { - // Aggregated size of payloads received from the client. - int32 aggregated_payload_size = 1; -} - -// Configuration for a particular response. -message ResponseParameters { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - int32 size = 1; - - // Desired interval between consecutive responses in the response stream in - // microseconds. - int32 interval_us = 2; -} - -// Server-streaming request. -message StreamingOutputCallRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - PayloadType response_type = 1; - - // Configuration for each expected response message. - repeated ResponseParameters response_parameters = 2; - - // Optional input payload sent along with the request. - Payload payload = 3; -} - -// Server-streaming response, as configured by the request and parameters. -message StreamingOutputCallResponse { - // Payload to increase response size. - Payload payload = 1; -} - -// A simple service to test the various types of RPCs and experiment with -// performance with various types of payload. -service TestService { - // One empty request followed by one empty response. - rpc EmptyCall(Empty) returns (Empty); - - // One request followed by one response. - // The server returns the client payload as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - rpc StreamingOutputCall(StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - rpc StreamingInputCall(stream StreamingInputCallRequest) - returns (StreamingInputCallResponse); - - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - rpc FullDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - rpc HalfDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); -} diff --git a/test/grpc_testing/test_grpc.pb.go b/test/grpc_testing/test_grpc.pb.go deleted file mode 100644 index 897e70dafffa..000000000000 --- a/test/grpc_testing/test_grpc.pb.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 -// source: test/grpc_testing/test.proto - -package grpc_testing - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - TestService_EmptyCall_FullMethodName = "/grpc.testing.TestService/EmptyCall" - TestService_UnaryCall_FullMethodName = "/grpc.testing.TestService/UnaryCall" - TestService_StreamingOutputCall_FullMethodName = "/grpc.testing.TestService/StreamingOutputCall" - TestService_StreamingInputCall_FullMethodName = "/grpc.testing.TestService/StreamingInputCall" - TestService_FullDuplexCall_FullMethodName = "/grpc.testing.TestService/FullDuplexCall" - TestService_HalfDuplexCall_FullMethodName = "/grpc.testing.TestService/HalfDuplexCall" -) - -// TestServiceClient is the client API for TestService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type TestServiceClient interface { - // One empty request followed by one empty response. - EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) -} - -type testServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { - return &testServiceClient{cc} -} - -func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, TestService_EmptyCall_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { - out := new(SimpleResponse) - err := c.cc.Invoke(ctx, TestService_UnaryCall_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], TestService_StreamingOutputCall_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingOutputCallClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type TestService_StreamingOutputCallClient interface { - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingOutputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], TestService_StreamingInputCall_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingInputCallClient{stream} - return x, nil -} - -type TestService_StreamingInputCallClient interface { - Send(*StreamingInputCallRequest) error - CloseAndRecv() (*StreamingInputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingInputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(StreamingInputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], TestService_FullDuplexCall_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &testServiceFullDuplexCallClient{stream} - return x, nil -} - -type TestService_FullDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceFullDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], TestService_HalfDuplexCall_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &testServiceHalfDuplexCallClient{stream} - return x, nil -} - -type TestService_HalfDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceHalfDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TestServiceServer is the server API for TestService service. -// All implementations must embed UnimplementedTestServiceServer -// for forward compatibility -type TestServiceServer interface { - // One empty request followed by one empty response. - EmptyCall(context.Context, *Empty) (*Empty, error) - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(TestService_StreamingInputCallServer) error - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(TestService_FullDuplexCallServer) error - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(TestService_HalfDuplexCallServer) error - mustEmbedUnimplementedTestServiceServer() -} - -// UnimplementedTestServiceServer must be embedded to have forward compatible implementations. -type UnimplementedTestServiceServer struct { -} - -func (UnimplementedTestServiceServer) EmptyCall(context.Context, *Empty) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method EmptyCall not implemented") -} -func (UnimplementedTestServiceServer) UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UnaryCall not implemented") -} -func (UnimplementedTestServiceServer) StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error { - return status.Errorf(codes.Unimplemented, "method StreamingOutputCall not implemented") -} -func (UnimplementedTestServiceServer) StreamingInputCall(TestService_StreamingInputCallServer) error { - return status.Errorf(codes.Unimplemented, "method StreamingInputCall not implemented") -} -func (UnimplementedTestServiceServer) FullDuplexCall(TestService_FullDuplexCallServer) error { - return status.Errorf(codes.Unimplemented, "method FullDuplexCall not implemented") -} -func (UnimplementedTestServiceServer) HalfDuplexCall(TestService_HalfDuplexCallServer) error { - return status.Errorf(codes.Unimplemented, "method HalfDuplexCall not implemented") -} -func (UnimplementedTestServiceServer) mustEmbedUnimplementedTestServiceServer() {} - -// UnsafeTestServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to TestServiceServer will -// result in compilation errors. -type UnsafeTestServiceServer interface { - mustEmbedUnimplementedTestServiceServer() -} - -func RegisterTestServiceServer(s grpc.ServiceRegistrar, srv TestServiceServer) { - s.RegisterService(&TestService_ServiceDesc, srv) -} - -func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).EmptyCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TestService_EmptyCall_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SimpleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).UnaryCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: TestService_UnaryCall_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StreamingOutputCallRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) -} - -type TestService_StreamingOutputCallServer interface { - Send(*StreamingOutputCallResponse) error - grpc.ServerStream -} - -type testServiceStreamingOutputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) -} - -type TestService_StreamingInputCallServer interface { - SendAndClose(*StreamingInputCallResponse) error - Recv() (*StreamingInputCallRequest, error) - grpc.ServerStream -} - -type testServiceStreamingInputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { - m := new(StreamingInputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) -} - -type TestService_FullDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceFullDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) -} - -type TestService_HalfDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceHalfDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TestService_ServiceDesc is the grpc.ServiceDesc for TestService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var TestService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.TestService", - HandlerType: (*TestServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "EmptyCall", - Handler: _TestService_EmptyCall_Handler, - }, - { - MethodName: "UnaryCall", - Handler: _TestService_UnaryCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamingOutputCall", - Handler: _TestService_StreamingOutputCall_Handler, - ServerStreams: true, - }, - { - StreamName: "StreamingInputCall", - Handler: _TestService_StreamingInputCall_Handler, - ClientStreams: true, - }, - { - StreamName: "FullDuplexCall", - Handler: _TestService_FullDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "HalfDuplexCall", - Handler: _TestService_HalfDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "test/grpc_testing/test.proto", -} diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index f4daaaa77c6b..1fb4cf46e2be 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -32,15 +32,17 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/health" - healthgrpc "google.golang.org/grpc/health/grpc_health_v1" - healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) var testHealthCheckFunc = internal.HealthCheckFunc @@ -143,7 +145,7 @@ func setupServer(t *testing.T, watchFunc healthWatchFunc) (*grpc.Server, net.Lis } s := grpc.NewServer() healthgrpc.RegisterHealthServer(s, ts) - testpb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) t.Cleanup(func() { s.Stop() }) return s, lis, ts @@ -295,7 +297,7 @@ func (s) TestHealthCheckWithGoAway(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseServiceConfig(t, r, `{ @@ -373,7 +375,7 @@ func (s) TestHealthCheckWithConnClose(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseServiceConfig(t, r, `{ @@ -423,7 +425,7 @@ func (s) TestHealthCheckWithAddrConnDrain(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) sc := parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" @@ -503,7 +505,7 @@ func (s) TestHealthCheckWithClientConnClose(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseServiceConfig(t, r, `{ @@ -676,7 +678,7 @@ func testHealthCheckDisableWithDialOption(t *testing.T, addr string) { testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, extraDialOption: []grpc.DialOption{grpc.WithDisableHealthCheck()}, }) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, ServiceConfig: parseServiceConfig(t, r, `{ @@ -710,7 +712,7 @@ func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { cc, r := setupClient(t, &clientConfig{ testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, ServiceConfig: parseServiceConfig(t, r, `{ @@ -742,7 +744,7 @@ func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { func testHealthCheckDisableWithServiceConfig(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: addr}}}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/test/http_header_end2end_test.go b/test/http_header_end2end_test.go index efdbd530afbc..77867133f95c 100644 --- a/test/http_header_end2end_test.go +++ b/test/http_header_end2end_test.go @@ -28,7 +28,8 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) { @@ -248,7 +249,7 @@ func doHTTPHeaderTest(lisAddr string, errCode codes.Code) error { defer cc.Close() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) stream, err := client.FullDuplexCall(ctx) if err != nil { return fmt.Errorf("creating FullDuplex stream: %v", err) diff --git a/test/insecure_creds_test.go b/test/insecure_creds_test.go index 28e8b7318143..0647c81232ae 100644 --- a/test/insecure_creds_test.go +++ b/test/insecure_creds_test.go @@ -32,7 +32,8 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // testLegacyPerRPCCredentials is a PerRPCCredentials that has yet incorporated security level. @@ -111,7 +112,7 @@ func (s) TestInsecureCreds(t *testing.T) { s := grpc.NewServer(sOpts...) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -131,7 +132,7 @@ func (s) TestInsecureCreds(t *testing.T) { } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err = c.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -150,7 +151,7 @@ func (s) TestInsecureCreds_WithPerRPCCredentials_AsCallOption(t *testing.T) { s := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -171,7 +172,7 @@ func (s) TestInsecureCreds_WithPerRPCCredentials_AsCallOption(t *testing.T) { defer cc.Close() const wantErr = "transport: cannot send secure credentials on an insecure connection" - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) if _, err = c.EmptyCall(ctx, &testpb.Empty{}, copts...); err == nil || !strings.Contains(err.Error(), wantErr) { t.Fatalf("insecure credentials with per-RPC credentials requiring transport security returned error: %v; want %s", err, wantErr) } @@ -186,7 +187,7 @@ func (s) TestInsecureCreds_WithPerRPCCredentials_AsDialOption(t *testing.T) { s := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { diff --git a/test/interceptor_test.go b/test/interceptor_test.go index 34a7cad5cc52..c2004bb85465 100644 --- a/test/interceptor_test.go +++ b/test/interceptor_test.go @@ -27,7 +27,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type parentCtxkey struct{} @@ -252,7 +254,7 @@ func (s) TestChainStreamClientInterceptor_ContextValuePropagation(t *testing.T) // Start a stub server and use the above chain of interceptors while creating // a ClientConn to it. ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { if _, err := stream.Recv(); err != nil { return err } diff --git a/test/invoke_test.go b/test/invoke_test.go index 49ad9044ee38..e829df0a0603 100644 --- a/test/invoke_test.go +++ b/test/invoke_test.go @@ -27,8 +27,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/stubserver" + testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" ) // TestInvoke verifies a straightforward invocation of ClientConn.Invoke(). diff --git a/test/local_creds_test.go b/test/local_creds_test.go index 8d649ed5365f..b1cabdbb7e56 100644 --- a/test/local_creds_test.go +++ b/test/local_creds_test.go @@ -35,7 +35,8 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func testLocalCredsE2ESucceed(network, address string) error { @@ -73,7 +74,7 @@ func testLocalCredsE2ESucceed(network, address string) error { s := grpc.NewServer(sopts...) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen(network, address) if err != nil { @@ -101,7 +102,7 @@ func testLocalCredsE2ESucceed(network, address string) error { } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -171,7 +172,7 @@ func testLocalCredsE2EFail(dopts []grpc.DialOption) error { s := grpc.NewServer(sopts...) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -196,7 +197,7 @@ func testLocalCredsE2EFail(dopts []grpc.DialOption) error { } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() diff --git a/test/metadata_test.go b/test/metadata_test.go index a15e5cb1c6e7..e05d0172eaad 100644 --- a/test/metadata_test.go +++ b/test/metadata_test.go @@ -32,7 +32,9 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestInvalidMetadata(t *testing.T) { @@ -96,7 +98,7 @@ func (s) TestInvalidMetadata(t *testing.T) { EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { _, err := stream.Recv() if err != nil { return err diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go index 129693944475..15b6dcd84616 100644 --- a/test/pickfirst_test.go +++ b/test/pickfirst_test.go @@ -34,8 +34,8 @@ import ( "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) const pickFirstServiceConfig = `{"loadBalancingConfig": [{"pick_first":{}}]}` @@ -257,7 +257,7 @@ func (s) TestPickFirst_NewAddressWhileBlocking(t *testing.T) { } doneCh := make(chan struct{}) - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) go func() { // The channel is currently in TransientFailure and this RPC will block // until the channel becomes Ready, which will only happen when we push a diff --git a/test/resolver_update_test.go b/test/resolver_update_test.go index 6b568a227aa4..ddf67267bac7 100644 --- a/test/resolver_update_test.go +++ b/test/resolver_update_test.go @@ -39,7 +39,9 @@ import ( "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // TestResolverUpdateDuringBuild_ServiceConfigParseError makes the @@ -62,7 +64,7 @@ func (s) TestResolverUpdateDuringBuild_ServiceConfigParseError(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) const wantMsg = "error parsing service config" const wantCode = codes.Unavailable if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { @@ -94,7 +96,7 @@ func (s) TestResolverUpdateDuringBuild_ServiceConfigInvalidTypeError(t *testing. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) const wantMsg = "illegal service config type" const wantCode = codes.Unavailable if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { @@ -120,7 +122,7 @@ func (s) TestResolverUpdate_InvalidServiceConfigAsFirstUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) const wantMsg = "error parsing service config" const wantCode = codes.Unavailable if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { @@ -232,7 +234,7 @@ func (s) TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate(t *testing.T) { } // Ensure RPCs are successful. - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall RPC failed: %v", err) } diff --git a/test/retry_test.go b/test/retry_test.go index 01b074e4015c..06e2479ff67b 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -39,7 +39,9 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestRetryUnary(t *testing.T) { @@ -174,12 +176,12 @@ func (s) TestRetryStreaming(t *testing.T) { largePayload, _ := newPayload(testpb.PayloadType_COMPRESSABLE, 500) - type serverOp func(stream testpb.TestService_FullDuplexCallServer) error - type clientOp func(stream testpb.TestService_FullDuplexCallClient) error + type serverOp func(stream testgrpc.TestService_FullDuplexCallServer) error + type clientOp func(stream testgrpc.TestService_FullDuplexCallClient) error // Server Operations sAttempts := func(n int) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { const key = "grpc-previous-rpc-attempts" md, ok := metadata.FromIncomingContext(stream.Context()) if !ok { @@ -192,7 +194,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } sReq := func(b byte) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { want := req(b) if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) { return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want %v, ", got, err, want) @@ -201,7 +203,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } sReqPayload := func(p *testpb.Payload) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { want := &testpb.StreamingOutputCallRequest{Payload: p} if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) { return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want %v, ", got, err, want) @@ -210,7 +212,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } sRes := func(b byte) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { msg := res(b) if err := stream.Send(msg); err != nil { return status.Errorf(codes.Internal, "server: Send(%v) = %v; want ", msg, err) @@ -219,12 +221,12 @@ func (s) TestRetryStreaming(t *testing.T) { } } sErr := func(c codes.Code) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { return status.New(c, "").Err() } } sCloseSend := func() serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { if msg, err := stream.Recv(); msg != nil || err != io.EOF { return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want , io.EOF", msg, err) } @@ -232,7 +234,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } sPushback := func(s string) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { stream.SetTrailer(metadata.MD{"grpc-retry-pushback-ms": []string{s}}) return nil } @@ -240,7 +242,7 @@ func (s) TestRetryStreaming(t *testing.T) { // Client Operations cReq := func(b byte) clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { msg := req(b) if err := stream.Send(msg); err != nil { return fmt.Errorf("client: Send(%v) = %v; want ", msg, err) @@ -249,7 +251,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } cReqPayload := func(p *testpb.Payload) clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { msg := &testpb.StreamingOutputCallRequest{Payload: p} if err := stream.Send(msg); err != nil { return fmt.Errorf("client: Send(%v) = %v; want ", msg, err) @@ -258,7 +260,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } cRes := func(b byte) clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { want := res(b) if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) { return fmt.Errorf("client: Recv() = %v, %v; want %v, ", got, err, want) @@ -267,7 +269,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } cErr := func(c codes.Code) clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { want := status.New(c, "").Err() if c == codes.OK { want = io.EOF @@ -282,7 +284,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } cCloseSend := func() clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { if err := stream.CloseSend(); err != nil { return fmt.Errorf("client: CloseSend() = %v; want ", err) } @@ -291,13 +293,13 @@ func (s) TestRetryStreaming(t *testing.T) { } var curTime time.Time cGetTime := func() clientOp { - return func(_ testpb.TestService_FullDuplexCallClient) error { + return func(_ testgrpc.TestService_FullDuplexCallClient) error { curTime = time.Now() return nil } } cCheckElapsed := func(d time.Duration) clientOp { - return func(_ testpb.TestService_FullDuplexCallClient) error { + return func(_ testgrpc.TestService_FullDuplexCallClient) error { if elapsed := time.Since(curTime); elapsed < d { return fmt.Errorf("elapsed time: %v; want >= %v", elapsed, d) } @@ -305,13 +307,13 @@ func (s) TestRetryStreaming(t *testing.T) { } } cHdr := func() clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { _, err := stream.Header() return err } } cCtx := func() clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { stream.Context() return nil } @@ -400,7 +402,7 @@ func (s) TestRetryStreaming(t *testing.T) { var serverOpIter int var serverOps []serverOp ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for serverOpIter < len(serverOps) { op := serverOps[serverOpIter] serverOpIter++ @@ -533,7 +535,7 @@ func (s) TestRetryStats(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("unexpected EmptyCall error: %v", err) @@ -641,7 +643,7 @@ func (s) TestRetryTransparentWhenCommitted(t *testing.T) { first := grpcsync.NewEvent() ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { // signal? if !first.HasFired() { first.Fire() diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go index 80f04dd25ab4..8069e32358fb 100644 --- a/test/roundrobin_test.go +++ b/test/roundrobin_test.go @@ -37,7 +37,8 @@ import ( "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) const rrServiceConfig = `{"loadBalancingConfig": [{"round_robin":{}}]}` @@ -79,7 +80,7 @@ func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOpt t.Fatalf("grpc.Dial() failed: %v", err) } t.Cleanup(func() { cc.Close() }) - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) // At this point, the resolver has not returned any addresses to the channel. // This RPC must block until the context expires. @@ -125,7 +126,7 @@ func (s) TestRoundRobin_AddressesRemoved(t *testing.T) { } const msgWant = "produced zero addresses" - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); !strings.Contains(status.Convert(err).Message(), msgWant) { t.Fatalf("EmptyCall() = %v, want Contains(Message(), %q)", err, msgWant) } @@ -150,7 +151,7 @@ func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { } } - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) doneCh := make(chan struct{}) go func() { // The channel is currently in TransientFailure and this RPC will block @@ -208,7 +209,7 @@ func (s) TestRoundRobin_OneServerDown(t *testing.T) { for i := 0; i < len(backends)-1; i++ { addrs[i] = resolver.Address{Addr: backends[i].Address} } - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { t.Fatalf("RPCs are not being round robined across remaining servers: %v", err) } @@ -236,7 +237,7 @@ func (s) TestRoundRobin_AllServersDown(t *testing.T) { } // Failfast RPCs should fail with Unavailable. - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) == codes.Unavailable { return } @@ -291,7 +292,7 @@ func (s) TestRoundRobin_UpdateAddressAttributes(t *testing.T) { r.UpdateState(resolver.State{Addresses: []resolver.Address{addr}}) // Make an RPC and ensure it does not contain the metadata we are looking for. - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { diff --git a/test/server_test.go b/test/server_test.go index 411e0aa3c23d..6d525f7954e1 100644 --- a/test/server_test.go +++ b/test/server_test.go @@ -27,7 +27,9 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type ctxKey string @@ -40,7 +42,7 @@ func (s) TestServerReturningContextError(t *testing.T) { EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return nil, context.DeadlineExceeded }, - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { return context.DeadlineExceeded }, } @@ -286,7 +288,7 @@ func (s) TestChainStreamServerInterceptor(t *testing.T) { } ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { if callCounts[0] != 1 { return status.Errorf(codes.Internal, "callCounts[0] should be 1, but got=%d", callCounts[0]) } diff --git a/test/stream_cleanup_test.go b/test/stream_cleanup_test.go index 53298ea372fc..f1d7c536aa01 100644 --- a/test/stream_cleanup_test.go +++ b/test/stream_cleanup_test.go @@ -28,7 +28,9 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestStreamCleanup(t *testing.T) { @@ -68,7 +70,7 @@ func (s) TestStreamCleanupAfterSendStatus(t *testing.T) { serverReturnedStatus := make(chan struct{}) ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { defer func() { close(serverReturnedStatus) }() diff --git a/test/transport_test.go b/test/transport_test.go index c78abdc5693e..d58bdf8acd77 100644 --- a/test/transport_test.go +++ b/test/transport_test.go @@ -31,7 +31,9 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // connWrapperWithCloseCh wraps a net.Conn and fires an event when closed. @@ -85,7 +87,7 @@ func (s) TestClientTransportRestartsAfterStreamIDExhausted(t *testing.T) { }() ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { if _, err := stream.Recv(); err != nil { return status.Errorf(codes.Internal, "unexpected error receiving: %v", err) } @@ -108,7 +110,7 @@ func (s) TestClientTransportRestartsAfterStreamIDExhausted(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - var streams []testpb.TestService_FullDuplexCallClient + var streams []testgrpc.TestService_FullDuplexCallClient const numStreams = 3 // expected number of conns when each stream is created i.e., 3rd stream is created diff --git a/test/xds/xds_client_ack_nack_test.go b/test/xds/xds_client_ack_nack_test.go index 3b2a4329e17d..793bdc2fa624 100644 --- a/test/xds/xds_client_ack_nack_test.go +++ b/test/xds/xds_client_ack_nack_test.go @@ -32,8 +32,8 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // We are interested in LDS, RDS, CDS and EDS resources as part of the regular diff --git a/test/xds/xds_client_affinity_test.go b/test/xds/xds_client_affinity_test.go index 69ae3fc147c2..159d295e1104 100644 --- a/test/xds/xds_client_affinity_test.go +++ b/test/xds/xds_client_affinity_test.go @@ -31,8 +31,8 @@ import ( v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // hashRouteConfig returns a RouteConfig resource with hash policy set to diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go index 0bd16779e1e1..d94b2f40fa26 100644 --- a/test/xds/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -39,8 +39,8 @@ import ( v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // TestClientSideFederation tests that federation is supported. @@ -187,7 +187,7 @@ func (s) TestFederation_UnknownAuthorityInDialTarget(t *testing.T) { defer cc.Close() t.Log("Created ClientConn to test service") - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall() RPC: %v", err) } @@ -260,7 +260,7 @@ func (s) TestFederation_UnknownAuthorityInReceivedResponse(t *testing.T) { defer cc.Close() t.Log("Created ClientConn to test service") - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) _, err = client.EmptyCall(ctx, &testpb.Empty{}) if err == nil { t.Fatal("EmptyCall RPC succeeded for target with unknown authority when expected to fail") diff --git a/test/xds/xds_client_ignore_resource_deletion_test.go b/test/xds/xds_client_ignore_resource_deletion_test.go index 2bb6959ae552..5d1684cdd76d 100644 --- a/test/xds/xds_client_ignore_resource_deletion_test.go +++ b/test/xds/xds_client_ignore_resource_deletion_test.go @@ -41,8 +41,8 @@ import ( endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) const ( diff --git a/test/xds/xds_client_integration_test.go b/test/xds/xds_client_integration_test.go index 6e0ae23df4bd..a431ab5f3334 100644 --- a/test/xds/xds_client_integration_test.go +++ b/test/xds/xds_client_integration_test.go @@ -32,8 +32,8 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils/xds/e2e" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type s struct { diff --git a/test/xds/xds_client_outlier_detection_test.go b/test/xds/xds_client_outlier_detection_test.go index a2753c3ea8ca..d0afcca78ae6 100644 --- a/test/xds/xds_client_outlier_detection_test.go +++ b/test/xds/xds_client_outlier_detection_test.go @@ -34,10 +34,10 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils/xds/e2e" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/wrapperspb" ) @@ -121,7 +121,7 @@ func clusterWithOutlierDetection(clusterName, edsServiceName string, secLevel e2 // // Returns a non-nil error if context deadline expires before RPCs start to get // roundrobined across the given backends. -func checkRoundRobinRPCs(ctx context.Context, client testpb.TestServiceClient, addrs []resolver.Address) error { +func checkRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { wantAddrCount := make(map[string]int) for _, addr := range addrs { wantAddrCount[addr.Addr]++ diff --git a/test/xds/xds_client_retry_test.go b/test/xds/xds_client_retry_test.go index fa3f4b865f6e..6af0459af7c8 100644 --- a/test/xds/xds_client_retry_test.go +++ b/test/xds/xds_client_retry_test.go @@ -32,8 +32,8 @@ import ( "google.golang.org/protobuf/types/known/wrapperspb" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestClientSideRetry(t *testing.T) { diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go index afc30bc82133..a94e3f2bcedf 100644 --- a/test/xds/xds_rls_clusterspecifier_plugin_test.go +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -37,8 +37,8 @@ import ( v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" _ "google.golang.org/grpc/balancer/rls" // Register the RLS Load Balancing policy. ) diff --git a/test/xds/xds_security_config_nack_test.go b/test/xds/xds_security_config_nack_test.go index 2c5f5aea611f..f2974d47c181 100644 --- a/test/xds/xds_security_config_nack_test.go +++ b/test/xds/xds_security_config_nack_test.go @@ -31,8 +31,8 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { diff --git a/test/xds/xds_server_integration_test.go b/test/xds/xds_server_integration_test.go index cf0acccec51e..829843b19c48 100644 --- a/test/xds/xds_server_integration_test.go +++ b/test/xds/xds_server_integration_test.go @@ -34,12 +34,12 @@ import ( "google.golang.org/grpc/status" "google.golang.org/grpc/xds" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type testService struct { - testpb.TestServiceServer + testgrpc.TestServiceServer } func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { diff --git a/test/xds/xds_server_rbac_test.go b/test/xds/xds_server_rbac_test.go index 3841d061c375..346dfe816c18 100644 --- a/test/xds/xds_server_rbac_test.go +++ b/test/xds/xds_server_rbac_test.go @@ -45,8 +45,8 @@ import ( v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // TestServerSideXDS_RouteConfiguration is an e2e test which verifies routing @@ -830,7 +830,7 @@ func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { t.Fatalf("EmptyCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) } @@ -887,7 +887,7 @@ func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { t.Fatalf("EmptyCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) } diff --git a/test/xds/xds_server_serving_mode_test.go b/test/xds/xds_server_serving_mode_test.go index bf890133141a..2247a077e3bb 100644 --- a/test/xds/xds_server_serving_mode_test.go +++ b/test/xds/xds_server_serving_mode_test.go @@ -34,8 +34,8 @@ import ( "google.golang.org/grpc/xds" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // TestServerSideXDS_RedundantUpdateSuppression tests the scenario where the @@ -207,7 +207,7 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { // Initialize an xDS-enabled gRPC server and register the stubServer on it. server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) defer server.Stop() - testpb.RegisterTestServiceServer(server, &testService{}) + testgrpc.RegisterTestServiceServer(server, &testService{}) // Setup the management server to respond with server-side Listener // resources for both listeners. diff --git a/xds/internal/balancer/clusterimpl/tests/balancer_test.go b/xds/internal/balancer/clusterimpl/tests/balancer_test.go index 57fa21166796..02f2389c7e70 100644 --- a/xds/internal/balancer/clusterimpl/tests/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/tests/balancer_test.go @@ -37,8 +37,8 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" _ "google.golang.org/grpc/xds" ) diff --git a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go index a69799aa730a..26cc8986bc0c 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go @@ -44,8 +44,8 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the "cds_experimental" LB policy. ) diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index ce497c1706d7..75efb758b4fc 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -45,8 +45,8 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the "cluster_resolver_experimental" LB policy. ) @@ -225,7 +225,7 @@ func (s) TestEDS_OneLocality(t *testing.T) { defer cc.Close() // Ensure RPCs are being roundrobined across the single backend. - testClient := testpb.NewTestServiceClient(cc) + testClient := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[:1]); err != nil { t.Fatal(err) } @@ -333,7 +333,7 @@ func (s) TestEDS_MultipleLocalities(t *testing.T) { defer cc.Close() // Ensure RPCs are being weighted roundrobined across the two backends. - testClient := testpb.NewTestServiceClient(cc) + testClient := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, addrs[0:2]); err != nil { t.Fatal(err) } @@ -472,7 +472,7 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { // Ensure RPCs are being weighted roundrobined across healthy backends from // both localities. - testClient := testpb.NewTestServiceClient(cc) + testClient := testgrpc.NewTestServiceClient(cc) if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, append(addrs[0:2], addrs[6:8]...)); err != nil { t.Fatal(err) } @@ -537,7 +537,7 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { t.Fatalf("failed to dial local test server: %v", err) } defer cc.Close() - testClient := testpb.NewTestServiceClient(cc) + testClient := testgrpc.NewTestServiceClient(cc) if err := waitForAllPrioritiesRemovedError(ctx, t, testClient); err != nil { t.Fatal(err) } diff --git a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go index 0dfcc19d245e..c687dc576663 100644 --- a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go +++ b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go @@ -36,7 +36,10 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + _ "google.golang.org/grpc/xds/internal/balancer/outlierdetection" // To register helper functions which register/unregister Outlier Detection LB Policy. ) @@ -99,7 +102,7 @@ func setupBackends(t *testing.T) ([]string, func()) { // // Returns a non-nil error if context deadline expires before RPCs start to get // roundrobined across the given backends. -func checkRoundRobinRPCs(ctx context.Context, client testpb.TestServiceClient, addrs []resolver.Address) error { +func checkRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { wantAddrCount := make(map[string]int) for _, addr := range addrs { wantAddrCount[addr.Addr]++ @@ -224,7 +227,7 @@ func (s) TestOutlierDetectionAlgorithmsE2E(t *testing.T) { defer cc.Close() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - testServiceClient := testpb.NewTestServiceClient(cc) + testServiceClient := testgrpc.NewTestServiceClient(cc) // At first, due to no statistics on each of the backends, the 3 // upstreams should all be round robined across. @@ -301,7 +304,7 @@ func (s) TestNoopConfiguration(t *testing.T) { defer cc.Close() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - testServiceClient := testpb.NewTestServiceClient(cc) + testServiceClient := testgrpc.NewTestServiceClient(cc) for i := 0; i < 2; i++ { // Since the Outlier Detection Balancer starts with a noop diff --git a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go index 19a7aafb73b5..00da6e88fb95 100644 --- a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go +++ b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go @@ -31,8 +31,8 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" - testgrpc "google.golang.org/grpc/test/grpc_testing" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" _ "google.golang.org/grpc/xds/internal/balancer/ringhash" // Register the ring_hash_experimental LB policy. ) @@ -51,7 +51,7 @@ const ( ) type testService struct { - testpb.TestServiceServer + testgrpc.TestServiceServer } func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index beeaf43321ad..20dd0a2c95cd 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -50,7 +50,8 @@ import ( fpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" tpb "github.com/envoyproxy/go-control-plane/envoy/type/v3" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. @@ -67,14 +68,14 @@ func Test(t *testing.T) { } type testService struct { - testpb.TestServiceServer + testgrpc.TestServiceServer } func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil } -func (*testService) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (*testService) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { // End RPC after client does a CloseSend. for { if _, err := stream.Recv(); err == io.EOF { @@ -116,7 +117,7 @@ func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { // Initialize a gRPC server and register the stubServer on it. server := grpc.NewServer() - testpb.RegisterTestServiceServer(server, &testService{}) + testgrpc.RegisterTestServiceServer(server, &testService{}) // Create a local listener and pass it to Serve(). lis, err := testutils.LocalTCPListener() @@ -532,7 +533,7 @@ func (s) TestFaultInjection_Unary(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) count := 0 for _, want := range tc.want { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -607,9 +608,9 @@ func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) - streams := make(chan testpb.TestService_FullDuplexCallClient, 5) // startStream() is called 5 times + streams := make(chan testgrpc.TestService_FullDuplexCallClient, 5) // startStream() is called 5 times startStream := func() { str, err := client.FullDuplexCall(ctx) if err != nil { From 3489bb7d51741618f1b1e0c723ca6dc905118e3b Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Thu, 6 Apr 2023 13:29:59 -0700 Subject: [PATCH 864/998] xdsclient/test: deflake TestWatchResourceTimerCanRestartOnIgnoredADSRecvError (#6159) --- xds/internal/testutils/resource_watcher.go | 9 +++- xds/internal/xdsclient/authority_test.go | 59 +++++++++++++++++----- 2 files changed, 53 insertions(+), 15 deletions(-) diff --git a/xds/internal/testutils/resource_watcher.go b/xds/internal/testutils/resource_watcher.go index 01235b0ba66f..304f40de9d5a 100644 --- a/xds/internal/testutils/resource_watcher.go +++ b/xds/internal/testutils/resource_watcher.go @@ -31,6 +31,8 @@ type TestResourceWatcher struct { UpdateCh chan *xdsresource.ResourceData // ErrorCh is the channel on which errors from the xDS client are delivered. ErrorCh chan error + // ResourceDoesNotExistCh is the channel used to indicate calls to OnResourceDoesNotExist + ResourceDoesNotExistCh chan struct{} } // OnUpdate is invoked by the xDS client to report an update on the resource @@ -52,7 +54,12 @@ func (w *TestResourceWatcher) OnError(err error) { // OnResourceDoesNotExist is used by the xDS client to report that the resource // being watched no longer exists. -func (w *TestResourceWatcher) OnResourceDoesNotExist() {} +func (w *TestResourceWatcher) OnResourceDoesNotExist() { + select { + case w.ResourceDoesNotExistCh <- struct{}{}: + default: + } +} // NewTestResourceWatcher returns a TestResourceWatcher to watch for resources // via the xDS client. diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index 47979f4fa573..96748b7e3ef0 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -26,8 +26,10 @@ import ( "github.com/google/uuid" "google.golang.org/grpc/internal/grpcsync" + util "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/testutils" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -176,21 +178,39 @@ func (s) TestTimerAndWatchStateOnErrorCallback(t *testing.T) { // This tests the case where the ADS stream breaks after successfully receiving // a message on the stream. The test performs the following: -// - configures the management server with resourceA. +// - configures the management server with the ability to dropRequests based on +// a boolean flag. +// - update the mgmt server with resourceA. // - registers a watch for resourceA and verifies that the watcher's update // callback is invoked. // - registers a watch for resourceB and verifies that the watcher's update // callback is not invoked. This is because the management server does not // contain resourceB. -// - stops the management server to verify that the error propagated to the -// watcher is a connection error. This happens when the authority attempts -// to create a new stream. +// - force mgmt server to drop requests. Verify that watcher for resourceB gets +// connection error. +// - resume mgmt server to accept requests. +// - update the mgmt server with resourceB and verifies that the watcher's +// update callback is invoked. func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() + // Create a restartable listener which can close existing connections. + l, err := util.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis := util.NewRestartableListener(l) + defer lis.Close() + streamRestarted := grpcsync.NewEvent() + serverOpt := e2e.ManagementServerOptions{ + Listener: lis, + OnStreamClosed: func(int64, *v3corepb.Node) { + streamRestarted.Fire() + }, + } - // Using a shorter expiry timeout to verify that the watch timeout was never fired. - a, ms, nodeID := setupTest(ctx, t, emptyServerOpts, defaultTestWatchExpiryTimeout) + a, ms, nodeID := setupTest(ctx, t, serverOpt, defaultTestTimeout) + defer ms.Stop() defer a.close() nameA := "xdsclient-test-lds-resourceA" @@ -211,6 +231,9 @@ func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { case <-watcherA.UpdateCh: } + cancelA() + lis.Stop() + nameB := "xdsclient-test-lds-resourceB" watcherB := testutils.NewTestResourceWatcher() cancelB := a.watchResource(listenerResourceType, nameB, watcherB) @@ -218,14 +241,15 @@ func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { // Blocking on resource B watcher's error channel. This error should be due to // connectivity issue when reconnecting because the mgmt server was already been - // stopped. ALl other errors or an update will fail the test. - cancelA() - ms.Stop() + // stopped. Also verifying that OnResourceDoesNotExist() method was not invoked + // on the watcher. select { case <-ctx.Done(): t.Fatal("Test timed out before mgmt server got the request.") case u := <-watcherB.UpdateCh: t.Fatalf("Watch got an unexpected resource update: %v.", u) + case <-watcherB.ResourceDoesNotExistCh: + t.Fatalf("Illegal invocation of OnResourceDoesNotExist() method on the watcher.") case gotErr := <-watcherB.ErrorCh: wantErr := xdsresource.ErrorTypeConnection if xdsresource.ErrType(gotErr) != wantErr { @@ -233,13 +257,20 @@ func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { } } - // Since there was already a response on the stream, the timer for resource B - // should not fire. If the timer did fire, watch state would be in `watchStateTimeout`. - <-time.After(defaultTestWatchExpiryTimeout) - if err := compareWatchState(a, nameB, watchStateStarted); err != nil { - t.Fatalf("Invalid watch state: %v.", err) + // Updating server with resource B and also re-enabling requests on the server. + if err := updateResourceInServer(ctx, ms, nameB, nodeID); err != nil { + t.Fatalf("Failed to update server with resource: %q; err: %q", nameB, err) } + lis.Restart() + for { + select { + case <-ctx.Done(): + t.Fatal("Test timed out before watcher received the update.") + case <-watcherB.UpdateCh: + return + } + } } func compareWatchState(a *authority, rn string, wantState watchState) error { From 01f8b866af2de489738abace42941bfe1f462be3 Mon Sep 17 00:00:00 2001 From: ulas <45449532+ulascansenturk@users.noreply.github.com> Date: Fri, 7 Apr 2023 20:55:17 +0300 Subject: [PATCH 865/998] Add documentation on some anti-patterns (#6034) Co-authored-by: Doug Fawley --- Documentation/anti-patterns.md | 206 +++++++++++++++++++++++++++++++++ dialoptions.go | 9 ++ 2 files changed, 215 insertions(+) create mode 100644 Documentation/anti-patterns.md diff --git a/Documentation/anti-patterns.md b/Documentation/anti-patterns.md new file mode 100644 index 000000000000..08469fc179f7 --- /dev/null +++ b/Documentation/anti-patterns.md @@ -0,0 +1,206 @@ +## Anti-Patterns + +### Dialing in gRPC +[`grpc.Dial`](https://pkg.go.dev/google.golang.org/grpc#Dial) is a function in +the gRPC library that creates a virtual connection from the gRPC client to the +gRPC server. It takes a target URI (which can represent the name of a logical +backend service and could resolve to multiple actual addresses) and a list of +options, and returns a +[`ClientConn`](https://pkg.go.dev/google.golang.org/grpc#ClientConn) object that +represents the connection to the server. The `ClientConn` contains one or more +actual connections to real server backends and attempts to keep these +connections healthy by automatically reconnecting to them when they break. + +The `Dial` function can also be configured with various options to customize the +behavior of the client connection. For example, developers could use options +such a +[`WithTransportCredentials`](https://pkg.go.dev/google.golang.org/grpc#WithTransportCredentials) +to configure the transport credentials to use. + +While `Dial` is commonly referred to as a "dialing" function, it doesn't +actually perform the low-level network dialing operation like +[`net.Dial`](https://pkg.go.dev/net#Dial) would. Instead, it creates a virtual +connection from the gRPC client to the gRPC server. + +`Dial` does initiate the process of connecting to the server, but it uses the +ClientConn object to manage and maintain that connection over time. This is why +errors encountered during the initial connection are no different from those +that occur later on, and why it's important to handle errors from RPCs rather +than relying on options like +[`FailOnNonTempDialError`](https://pkg.go.dev/google.golang.org/grpc#FailOnNonTempDialError), +[`WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), and +[`WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). +In fact, `Dial` does not always establish a connection to servers by default. +The connection behavior is determined by the load balancing policy being used. +For instance, an "active" load balancing policy such as Round Robin attempts to +maintain a constant connection, while the default "pick first" policy delays +connection until an RPC is executed. Instead of using the WithBlock option, which +may not be recommended in some cases, you can call the +[`ClientConn.Connect`](https://pkg.go.dev/google.golang.org/grpc#ClientConn.Connect) +method to explicitly initiate a connection. + +### Using `FailOnNonTempDialError`, `WithBlock`, and `WithReturnConnectionError` + +The gRPC API provides several options that can be used to configure the behavior +of dialing and connecting to a gRPC server. Some of these options, such as +`FailOnNonTempDialError`, `WithBlock`, and `WithReturnConnectionError`, rely on +failures at dial time. However, we strongly discourage developers from using +these options, as they can introduce race conditions and result in unreliable +and difficult-to-debug code. + +One of the most important reasons for avoiding these options, which is often +overlooked, is that connections can fail at any point in time. This means that +you need to handle RPC failures caused by connection issues, regardless of +whether a connection was never established in the first place, or if it was +created and then immediately lost. Implementing proper error handling for RPCs +is crucial for maintaining the reliability and stability of your gRPC +communication. + +### Why we discourage using `FailOnNonTempDialError`, `WithBlock`, and `WithReturnConnectionError` + +When a client attempts to connect to a gRPC server, it can encounter a variety +of errors, including network connectivity issues, server-side errors, and +incorrect usage of the gRPC API. The options `FailOnNonTempDialError`, +`WithBlock`, and `WithReturnConnectionError` are designed to handle some of +these errors, but they do so by relying on failures at dial time. This means +that they may not provide reliable or accurate information about the status of +the connection. + +For example, if a client uses `WithBlock` to wait for a connection to be +established, it may end up waiting indefinitely if the server is not responding. +Similarly, if a client uses `WithReturnConnectionError` to return a connection +error if dialing fails, it may miss opportunities to recover from transient +network issues that are resolved shortly after the initial dial attempt. + +## Best practices for error handling in gRPC + +Instead of relying on failures at dial time, we strongly encourage developers to +rely on errors from RPCs. When a client makes an RPC, it can receive an error +response from the server. These errors can provide valuable information about +what went wrong, including information about network issues, server-side errors, +and incorrect usage of the gRPC API. + +By handling errors from RPCs correctly, developers can write more reliable and +robust gRPC applications. Here are some best practices for error handling in +gRPC: + +- Always check for error responses from RPCs and handle them appropriately. +- Use the `status` field of the error response to determine the type of error that + occurred. +- When retrying failed RPCs, consider using the built-in retry mechanism + provided by gRPC-Go, if available, instead of manually implementing retries. + Refer to the [gRPC-Go retry example + documentation](https://github.com/grpc/grpc-go/blob/master/examples/features/retry/README.md) + for more information. +- Avoid using `FailOnNonTempDialError`, `WithBlock`, and + `WithReturnConnectionError`, as these options can introduce race conditions and + result in unreliable and difficult-to-debug code. +- If making the outgoing RPC in order to handle an incoming RPC, be sure to + translate the status code before returning the error from your method handler. + For example, if the error is an `INVALID_ARGUMENT` error, that probably means + your service has a bug (otherwise it shouldn't have triggered this error), in + which case `INTERNAL` is more appropriate to return back to your users. + +### Example: Handling errors from an RPC + +The following code snippet demonstrates how to handle errors from an RPC in +gRPC: + +```go +ctx, cancel := context.WithTimeout(context.Background(), time.Second) +defer cancel() + +res, err := client.MyRPC(ctx, &MyRequest{}) +if err != nil { + // Handle the error appropriately, + // log it & return an error to the caller, etc. + log.Printf("Error calling MyRPC: %v", err) + return nil, err +} + +// Use the response as appropriate +log.Printf("MyRPC response: %v", res) +``` + +To determine the type of error that occurred, you can use the status field of +the error response: + + +```go +resp, err := client.MakeRPC(context.Background(), request) +if err != nil { + status, ok := status.FromError(err) + if ok { + // Handle the error based on its status code + if status.Code() == codes.NotFound { + log.Println("Requested resource not found") + } else { + log.Printf("RPC error: %v", status.Message()) + } + } else { + //Handle non-RPC errors + log.Printf("Non-RPC error: %v", err) + } + return +} + +// Use the response as needed +log.Printf("Response received: %v", resp) +``` + +### Example: Using a backoff strategy + + +When retrying failed RPCs, use a backoff strategy to avoid overwhelming the +server or exacerbating network issues: + + +```go +var res *MyResponse +var err error + +// If the user doesn't have a context with a deadline, create one +ctx, cancel := context.WithTimeout(context.Background(), time.Second) +defer cancel() + +// Retry the RPC call a maximum number of times +for i := 0; i < maxRetries; i++ { + + // Make the RPC call + res, err = client.MyRPC(ctx, &MyRequest{}) + + // Check if the RPC call was successful + if err == nil { + // The RPC was successful, so break out of the loop + break + } + + // The RPC failed, so wait for a backoff period before retrying + backoff := time.Duration(i) * time.Second + log.Printf("Error calling MyRPC: %v; retrying in %v", err, backoff) + time.Sleep(backoff) +} + +// Check if the RPC call was successful after all retries +if err != nil { + // All retries failed, so handle the error appropriately + log.Printf("Error calling MyRPC: %v", err) + return nil, err +} + +// Use the response as appropriate +log.Printf("MyRPC response: %v", res) +``` + + +## Conclusion + +The +[`FailOnNonTempDialError`](https://pkg.go.dev/google.golang.org/grpc#FailOnNonTempDialError), +[`WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), and +[`WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError) +options are designed to handle errors at dial time, but they can introduce race +conditions and result in unreliable and difficult-to-debug code. Instead of +relying on these options, we strongly encourage developers to rely on errors +from RPCs for error handling. By following best practices for error handling in +gRPC, developers can write more reliable and robust gRPC applications. diff --git a/dialoptions.go b/dialoptions.go index e9d6852fd2c1..cdc8263bda65 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -295,6 +295,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -306,6 +309,9 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -448,6 +454,9 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a From 17b693d78489f52f09187e8b9ddb27f8e3fe9fb8 Mon Sep 17 00:00:00 2001 From: Matthew Stevenson <52979934+matthewstevenson88@users.noreply.github.com> Date: Mon, 10 Apr 2023 08:59:12 -0700 Subject: [PATCH 866/998] alts: Perform full handshake in ALTS tests. (#6177) --- credentials/alts/alts_test.go | 106 ++++++++++++ .../internal/handshaker/service/service.go | 18 ++ .../alts/internal/testutil/testutil.go | 155 ++++++++++++++++++ 3 files changed, 279 insertions(+) diff --git a/credentials/alts/alts_test.go b/credentials/alts/alts_test.go index 22ad5a48b09e..7d0129d09cd3 100644 --- a/credentials/alts/alts_test.go +++ b/credentials/alts/alts_test.go @@ -22,12 +22,23 @@ package alts import ( + "context" "reflect" + "runtime" + "sync" "testing" + "time" "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/alts/internal/handshaker/service" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/credentials/alts/internal/testutil" "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type s struct { @@ -287,6 +298,55 @@ func (s) TestCheckRPCVersions(t *testing.T) { } } +// TestFullHandshake performs a full ALTS handshake between a test client and +// server, where both client and server offload to a local, fake handshaker +// service. +func TestFullHandshake(t *testing.T) { + // If GOMAXPROCS is set to less than 2, do not run this test. This test + // requires at least 2 goroutines to succeed (one goroutine where a + // server listens, another goroutine where a client runs). + if runtime.GOMAXPROCS(0) < 2 { + return + } + + // The vmOnGCP global variable MUST be reset to true after the client + // or server credentials have been created, but before the ALTS + // handshake begins. If vmOnGCP is not reset and this test is run + // anywhere except for a GCP VM, then the ALTS handshake will + // immediately fail. + once.Do(func() { + vmOnGCP = true + }) + vmOnGCP = true + + // Start the fake handshaker service and the server. + var wait sync.WaitGroup + defer wait.Wait() + stopHandshaker, handshakerAddress := startFakeHandshakerService(t, &wait) + defer stopHandshaker() + stopServer, serverAddress := startServer(t, handshakerAddress, &wait) + defer stopServer() + + // Ping the server, authenticating with ALTS. + clientCreds := NewClientCreds(&ClientOptions{HandshakerServiceAddress: handshakerAddress}) + conn, err := grpc.Dial(serverAddress, grpc.WithTransportCredentials(clientCreds)) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", serverAddress, err) + } + defer conn.Close() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + c := testgrpc.NewTestServiceClient(conn) + if _, err = c.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.WaitForReady(true)); err != nil { + t.Errorf("c.UnaryCall() failed: %v", err) + } + + // Close open connections to the fake handshaker service. + if err := service.CloseForTesting(); err != nil { + t.Errorf("service.CloseForTesting() failed: %v", err) + } +} + func version(major, minor uint32) *altspb.RpcProtocolVersions_Version { return &altspb.RpcProtocolVersions_Version{ Major: major, @@ -300,3 +360,49 @@ func versions(minMajor, minMinor, maxMajor, maxMinor uint32) *altspb.RpcProtocol MaxRpcVersion: version(maxMajor, maxMinor), } } + +func startFakeHandshakerService(t *testing.T, wait *sync.WaitGroup) (stop func(), address string) { + listener, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("LocalTCPListener() failed: %v", err) + } + s := grpc.NewServer() + altsgrpc.RegisterHandshakerServiceServer(s, &testutil.FakeHandshaker{}) + wait.Add(1) + go func() { + defer wait.Done() + if err := s.Serve(listener); err != nil { + t.Errorf("failed to serve: %v", err) + } + }() + return func() { s.Stop() }, listener.Addr().String() +} + +func startServer(t *testing.T, handshakerServiceAddress string, wait *sync.WaitGroup) (stop func(), address string) { + listener, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("LocalTCPListener() failed: %v", err) + } + serverOpts := &ServerOptions{HandshakerServiceAddress: handshakerServiceAddress} + creds := NewServerCreds(serverOpts) + s := grpc.NewServer(grpc.Creds(creds)) + testgrpc.RegisterTestServiceServer(s, &testServer{}) + wait.Add(1) + go func() { + defer wait.Done() + if err := s.Serve(listener); err != nil { + t.Errorf("s.Serve(%v) failed: %v", listener, err) + } + }() + return func() { s.Stop() }, listener.Addr().String() +} + +type testServer struct { + testgrpc.UnimplementedTestServiceServer +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{ + Payload: &testpb.Payload{}, + }, nil +} diff --git a/credentials/alts/internal/handshaker/service/service.go b/credentials/alts/internal/handshaker/service/service.go index 2de2c4affdaa..e1cdafb980cd 100644 --- a/credentials/alts/internal/handshaker/service/service.go +++ b/credentials/alts/internal/handshaker/service/service.go @@ -58,3 +58,21 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { } return hsConn, nil } + +// CloseForTesting closes all open connections to the handshaker service. +// +// For testing purposes only. +func CloseForTesting() error { + for _, hsConn := range hsConnMap { + if hsConn == nil { + continue + } + if err := hsConn.Close(); err != nil { + return err + } + } + + // Reset the connection map. + hsConnMap = make(map[string]*grpc.ClientConn) + return nil +} diff --git a/credentials/alts/internal/testutil/testutil.go b/credentials/alts/internal/testutil/testutil.go index e114719d5a88..24a61202a3da 100644 --- a/credentials/alts/internal/testutil/testutil.go +++ b/credentials/alts/internal/testutil/testutil.go @@ -22,11 +22,15 @@ package testutil import ( "bytes" "encoding/binary" + "fmt" "io" "net" "sync" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/alts/internal/conn" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" ) // Stats is used to collect statistics about concurrent handshake calls. @@ -123,3 +127,154 @@ func MakeFrame(pl string) []byte { copy(f[conn.MsgLenFieldSize:], []byte(pl)) return f } + +// FakeHandshaker is a fake implementation of the ALTS handshaker service. +type FakeHandshaker struct { + altsgrpc.HandshakerServiceServer +} + +// DoHandshake performs a fake ALTS handshake. +func (h *FakeHandshaker) DoHandshake(stream altsgrpc.HandshakerService_DoHandshakeServer) error { + var isAssistingClient bool + for { + req, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("stream recv failure: %v", err) + } + var resp *altspb.HandshakerResp + switch req := req.ReqOneof.(type) { + case *altspb.HandshakerReq_ClientStart: + isAssistingClient = true + resp, err = h.processStartClient(req.ClientStart) + if err != nil { + return fmt.Errorf("processStartClient failure: %v", err) + } + case *altspb.HandshakerReq_ServerStart: + isAssistingClient = false + resp, err = h.processServerStart(req.ServerStart) + if err != nil { + return fmt.Errorf("processServerClient failure: %v", err) + } + case *altspb.HandshakerReq_Next: + resp, err = h.processNext(req.Next, isAssistingClient) + if err != nil { + return fmt.Errorf("processNext failure: %v", err) + } + default: + return fmt.Errorf("handshake request has unexpected type: %v", req) + } + + if err = stream.Send(resp); err != nil { + return fmt.Errorf("stream send failure: %v", err) + } + } +} + +func (h *FakeHandshaker) processStartClient(req *altspb.StartClientHandshakeReq) (*altspb.HandshakerResp, error) { + if req.HandshakeSecurityProtocol != altspb.HandshakeProtocol_ALTS { + return nil, fmt.Errorf("unexpected handshake security protocol: %v", req.HandshakeSecurityProtocol) + } + if len(req.ApplicationProtocols) != 1 || req.ApplicationProtocols[0] != "grpc" { + return nil, fmt.Errorf("unexpected application protocols: %v", req.ApplicationProtocols) + } + if len(req.RecordProtocols) != 1 || req.RecordProtocols[0] != "ALTSRP_GCM_AES128_REKEY" { + return nil, fmt.Errorf("unexpected record protocols: %v", req.RecordProtocols) + } + return &altspb.HandshakerResp{ + OutFrames: []byte("ClientInit"), + BytesConsumed: 0, + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil +} + +func (h *FakeHandshaker) processServerStart(req *altspb.StartServerHandshakeReq) (*altspb.HandshakerResp, error) { + if len(req.ApplicationProtocols) != 1 || req.ApplicationProtocols[0] != "grpc" { + return nil, fmt.Errorf("unexpected application protocols: %v", req.ApplicationProtocols) + } + parameters, ok := req.GetHandshakeParameters()[int32(altspb.HandshakeProtocol_ALTS)] + if !ok { + return nil, fmt.Errorf("missing ALTS handshake parameters") + } + if len(parameters.RecordProtocols) != 1 || parameters.RecordProtocols[0] != "ALTSRP_GCM_AES128_REKEY" { + return nil, fmt.Errorf("unexpected record protocols: %v", parameters.RecordProtocols) + } + if string(req.InBytes) != "ClientInit" { + return nil, fmt.Errorf("unexpected in bytes: %v", req.InBytes) + } + return &altspb.HandshakerResp{ + OutFrames: []byte("ServerInitServerFinished"), + BytesConsumed: 10, + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil +} + +func (h *FakeHandshaker) processNext(req *altspb.NextHandshakeMessageReq, isAssistingClient bool) (*altspb.HandshakerResp, error) { + if isAssistingClient { + if !bytes.Equal(req.InBytes, []byte("ServerInitServerFinished")) { + return nil, fmt.Errorf("unexpected in bytes: got: %v, want: %v", req.InBytes, []byte("ServerInitServerFinished")) + } + return &altspb.HandshakerResp{ + OutFrames: []byte("ClientFinished"), + BytesConsumed: 24, + Result: &altspb.HandshakerResult{ + ApplicationProtocol: "grpc", + RecordProtocol: "ALTSRP_GCM_AES128_REKEY", + KeyData: []byte("negotiated-key-data-for-altsrp-gcm-aes128-rekey"), + PeerIdentity: &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: "server@bar.com", + }, + }, + PeerRpcVersions: &altspb.RpcProtocolVersions{ + MaxRpcVersion: &altspb.RpcProtocolVersions_Version{ + Minor: 1, + Major: 2, + }, + MinRpcVersion: &altspb.RpcProtocolVersions_Version{ + Minor: 1, + Major: 2, + }, + }, + }, + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil + } + if !bytes.Equal(req.InBytes, []byte("ClientFinished")) { + return nil, fmt.Errorf("unexpected in bytes: got: %v, want: %v", req.InBytes, []byte("ClientFinished")) + } + return &altspb.HandshakerResp{ + BytesConsumed: 14, + Result: &altspb.HandshakerResult{ + ApplicationProtocol: "grpc", + RecordProtocol: "ALTSRP_GCM_AES128_REKEY", + KeyData: []byte("negotiated-key-data-for-altsrp-gcm-aes128-rekey"), + PeerIdentity: &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: "client@baz.com", + }, + }, + PeerRpcVersions: &altspb.RpcProtocolVersions{ + MaxRpcVersion: &altspb.RpcProtocolVersions_Version{ + Minor: 1, + Major: 2, + }, + MinRpcVersion: &altspb.RpcProtocolVersions_Version{ + Minor: 1, + Major: 2, + }, + }, + }, + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil +} From 81b30924fc44b3b7ecc77a4da763fc462f94e98e Mon Sep 17 00:00:00 2001 From: Joel Jeske Date: Mon, 10 Apr 2023 14:27:04 -0500 Subject: [PATCH 867/998] security/advancedtls: add TlsVersionOption to select desired min/max TLS versions (#6007) Co-authored-by: ZhenLian --- security/advancedtls/advancedtls.go | 28 ++++ .../advancedtls_integration_test.go | 156 +++++++++++++++++- security/advancedtls/advancedtls_test.go | 30 ++++ 3 files changed, 206 insertions(+), 8 deletions(-) diff --git a/security/advancedtls/advancedtls.go b/security/advancedtls/advancedtls.go index 9a33bd583f6e..4b5d1f4825c9 100644 --- a/security/advancedtls/advancedtls.go +++ b/security/advancedtls/advancedtls.go @@ -184,6 +184,15 @@ type ClientOptions struct { // RevocationConfig is the configurations for certificate revocation checks. // It could be nil if such checks are not needed. RevocationConfig *RevocationConfig + // MinVersion contains the minimum TLS version that is acceptable. + // By default, TLS 1.2 is currently used as the minimum when acting as a + // client, and TLS 1.0 when acting as a server. TLS 1.0 is the minimum + // supported by this package, both as a client and as a server. + MinVersion uint16 + // MaxVersion contains the maximum TLS version that is acceptable. + // By default, the maximum version supported by this package is used, + // which is currently TLS 1.3. + MaxVersion uint16 } // ServerOptions contains the fields needed to be filled by the server. @@ -205,6 +214,15 @@ type ServerOptions struct { // RevocationConfig is the configurations for certificate revocation checks. // It could be nil if such checks are not needed. RevocationConfig *RevocationConfig + // MinVersion contains the minimum TLS version that is acceptable. + // By default, TLS 1.2 is currently used as the minimum when acting as a + // client, and TLS 1.0 when acting as a server. TLS 1.0 is the minimum + // supported by this package, both as a client and as a server. + MinVersion uint16 + // MaxVersion contains the maximum TLS version that is acceptable. + // By default, the maximum version supported by this package is used, + // which is currently TLS 1.3. + MaxVersion uint16 } func (o *ClientOptions) config() (*tls.Config, error) { @@ -222,11 +240,16 @@ func (o *ClientOptions) config() (*tls.Config, error) { if o.IdentityOptions.GetIdentityCertificatesForServer != nil { return nil, fmt.Errorf("GetIdentityCertificatesForServer cannot be specified on the client side") } + if o.MinVersion > o.MaxVersion { + return nil, fmt.Errorf("the minimum TLS version is larger than the maximum TLS version") + } config := &tls.Config{ ServerName: o.ServerNameOverride, // We have to set InsecureSkipVerify to true to skip the default checks and // use the verification function we built from buildVerifyFunc. InsecureSkipVerify: true, + MinVersion: o.MinVersion, + MaxVersion: o.MaxVersion, } // Propagate root-certificate-related fields in tls.Config. switch { @@ -293,6 +316,9 @@ func (o *ServerOptions) config() (*tls.Config, error) { if o.IdentityOptions.GetIdentityCertificatesForClient != nil { return nil, fmt.Errorf("GetIdentityCertificatesForClient cannot be specified on the server side") } + if o.MinVersion > o.MaxVersion { + return nil, fmt.Errorf("the minimum TLS version is larger than the maximum TLS version") + } clientAuth := tls.NoClientCert if o.RequireClientCert { // We have to set clientAuth to RequireAnyClientCert to force underlying @@ -302,6 +328,8 @@ func (o *ServerOptions) config() (*tls.Config, error) { } config := &tls.Config{ ClientAuth: clientAuth, + MinVersion: o.MinVersion, + MaxVersion: o.MaxVersion, } // Propagate root-certificate-related fields in tls.Config. switch { diff --git a/security/advancedtls/advancedtls_integration_test.go b/security/advancedtls/advancedtls_integration_test.go index 2e9076759857..3659497fd5d2 100644 --- a/security/advancedtls/advancedtls_integration_test.go +++ b/security/advancedtls/advancedtls_integration_test.go @@ -731,13 +731,12 @@ func (s) TestDefaultHostNameCheck(t *testing.T) { t.Fatalf("cs.LoadCerts() failed, err: %v", err) } for _, test := range []struct { - desc string - clientRoot *x509.CertPool - clientVerifyFunc CustomVerificationFunc - clientVType VerificationType - serverCert []tls.Certificate - serverVType VerificationType - expectError bool + desc string + clientRoot *x509.CertPool + clientVType VerificationType + serverCert []tls.Certificate + serverVType VerificationType + expectError bool }{ // Client side sets vType to CertAndHostVerification, and will do // default hostname check. Server uses a cert without "localhost" or @@ -787,7 +786,6 @@ func (s) TestDefaultHostNameCheck(t *testing.T) { pb.RegisterGreeterServer(s, greeterServer{}) go s.Serve(lis) clientOptions := &ClientOptions{ - VerifyPeer: test.clientVerifyFunc, RootOptions: RootCertificateOptions{ RootCACerts: test.clientRoot, }, @@ -811,3 +809,145 @@ func (s) TestDefaultHostNameCheck(t *testing.T) { }) } } + +func (s) TestTLSVersions(t *testing.T) { + cs := &testutils.CertStore{} + if err := cs.LoadCerts(); err != nil { + t.Fatalf("cs.LoadCerts() failed, err: %v", err) + } + for _, test := range []struct { + desc string + expectError bool + clientMinVersion uint16 + clientMaxVersion uint16 + serverMinVersion uint16 + serverMaxVersion uint16 + }{ + // Client side sets TLS version that is higher than required from the server side. + { + desc: "Client TLS version higher than server", + clientMinVersion: tls.VersionTLS13, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS12, + expectError: true, + }, + // Server side sets TLS version that is higher than required from the client side. + { + desc: "Server TLS version higher than client", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS12, + serverMinVersion: tls.VersionTLS13, + serverMaxVersion: tls.VersionTLS13, + expectError: true, + }, + // Client and server set proper TLS versions. + { + desc: "Good TLS version settings", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + { + desc: "Client 1.2 - 1.3 and server 1.2", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS12, + expectError: false, + }, + { + desc: "Client 1.2 - 1.3 and server 1.1 - 1.2", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS11, + serverMaxVersion: tls.VersionTLS12, + expectError: false, + }, + { + desc: "Client 1.2 - 1.3 and server 1.3", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS13, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + { + desc: "Client 1.2 - 1.2 and server 1.2 - 1.3", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS12, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + { + desc: "Client 1.1 - 1.2 and server 1.2 - 1.3", + clientMinVersion: tls.VersionTLS11, + clientMaxVersion: tls.VersionTLS12, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + { + desc: "Client 1.3 and server 1.2 - 1.3", + clientMinVersion: tls.VersionTLS13, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + } { + test := test + t.Run(test.desc, func(t *testing.T) { + // Start a server using ServerOptions in another goroutine. + serverOptions := &ServerOptions{ + IdentityOptions: IdentityCertificateOptions{ + Certificates: []tls.Certificate{cs.ServerPeerLocalhost1}, + }, + RequireClientCert: false, + VType: CertAndHostVerification, + MinVersion: test.serverMinVersion, + MaxVersion: test.serverMaxVersion, + } + serverTLSCreds, err := NewServerCreds(serverOptions) + if err != nil { + t.Fatalf("failed to create server creds: %v", err) + } + s := grpc.NewServer(grpc.Creds(serverTLSCreds)) + defer s.Stop() + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to listen: %v", err) + } + defer lis.Close() + addr := fmt.Sprintf("localhost:%v", lis.Addr().(*net.TCPAddr).Port) + pb.RegisterGreeterServer(s, greeterServer{}) + go s.Serve(lis) + clientOptions := &ClientOptions{ + RootOptions: RootCertificateOptions{ + RootCACerts: cs.ClientTrust1, + }, + VType: CertAndHostVerification, + MinVersion: test.clientMinVersion, + MaxVersion: test.clientMaxVersion, + } + clientTLSCreds, err := NewClientCreds(clientOptions) + if err != nil { + t.Fatalf("clientTLSCreds failed to create: %v", err) + } + shouldFail := false + if test.expectError { + shouldFail = true + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + conn, _, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 1", clientTLSCreds, shouldFail) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + }) + } +} diff --git a/security/advancedtls/advancedtls_test.go b/security/advancedtls/advancedtls_test.go index 7092d46e60fa..afad25e7cb4b 100644 --- a/security/advancedtls/advancedtls_test.go +++ b/security/advancedtls/advancedtls_test.go @@ -91,6 +91,8 @@ func (s) TestClientOptionsConfigErrorCases(t *testing.T) { clientVType VerificationType IdentityOptions IdentityCertificateOptions RootOptions RootCertificateOptions + MinVersion uint16 + MaxVersion uint16 }{ { desc: "Skip default verification and provide no root credentials", @@ -122,6 +124,11 @@ func (s) TestClientOptionsConfigErrorCases(t *testing.T) { }, }, }, + { + desc: "Invalid min/max TLS versions", + MinVersion: tls.VersionTLS13, + MaxVersion: tls.VersionTLS12, + }, } for _, test := range tests { test := test @@ -130,6 +137,8 @@ func (s) TestClientOptionsConfigErrorCases(t *testing.T) { VType: test.clientVType, IdentityOptions: test.IdentityOptions, RootOptions: test.RootOptions, + MinVersion: test.MinVersion, + MaxVersion: test.MaxVersion, } _, err := clientOptions.config() if err == nil { @@ -145,6 +154,8 @@ func (s) TestClientOptionsConfigSuccessCases(t *testing.T) { clientVType VerificationType IdentityOptions IdentityCertificateOptions RootOptions RootCertificateOptions + MinVersion uint16 + MaxVersion uint16 }{ { desc: "Use system default if no fields in RootCertificateOptions is specified", @@ -159,6 +170,8 @@ func (s) TestClientOptionsConfigSuccessCases(t *testing.T) { IdentityOptions: IdentityCertificateOptions{ IdentityProvider: fakeProvider{pt: provTypeIdentity}, }, + MinVersion: tls.VersionTLS12, + MaxVersion: tls.VersionTLS13, }, } for _, test := range tests { @@ -168,6 +181,8 @@ func (s) TestClientOptionsConfigSuccessCases(t *testing.T) { VType: test.clientVType, IdentityOptions: test.IdentityOptions, RootOptions: test.RootOptions, + MinVersion: test.MinVersion, + MaxVersion: test.MaxVersion, } clientConfig, err := clientOptions.config() if err != nil { @@ -192,6 +207,8 @@ func (s) TestServerOptionsConfigErrorCases(t *testing.T) { serverVType VerificationType IdentityOptions IdentityCertificateOptions RootOptions RootCertificateOptions + MinVersion uint16 + MaxVersion uint16 }{ { desc: "Skip default verification and provide no root credentials", @@ -229,6 +246,11 @@ func (s) TestServerOptionsConfigErrorCases(t *testing.T) { }, }, }, + { + desc: "Invalid min/max TLS versions", + MinVersion: tls.VersionTLS13, + MaxVersion: tls.VersionTLS12, + }, } for _, test := range tests { test := test @@ -238,6 +260,8 @@ func (s) TestServerOptionsConfigErrorCases(t *testing.T) { RequireClientCert: test.requireClientCert, IdentityOptions: test.IdentityOptions, RootOptions: test.RootOptions, + MinVersion: test.MinVersion, + MaxVersion: test.MaxVersion, } _, err := serverOptions.config() if err == nil { @@ -254,6 +278,8 @@ func (s) TestServerOptionsConfigSuccessCases(t *testing.T) { serverVType VerificationType IdentityOptions IdentityCertificateOptions RootOptions RootCertificateOptions + MinVersion uint16 + MaxVersion uint16 }{ { desc: "Use system default if no fields in RootCertificateOptions is specified", @@ -275,6 +301,8 @@ func (s) TestServerOptionsConfigSuccessCases(t *testing.T) { return nil, nil }, }, + MinVersion: tls.VersionTLS12, + MaxVersion: tls.VersionTLS13, }, } for _, test := range tests { @@ -285,6 +313,8 @@ func (s) TestServerOptionsConfigSuccessCases(t *testing.T) { RequireClientCert: test.requireClientCert, IdentityOptions: test.IdentityOptions, RootOptions: test.RootOptions, + MinVersion: test.MinVersion, + MaxVersion: test.MaxVersion, } serverConfig, err := serverOptions.config() if err != nil { From efb2f45956fe079c62722dec2c20f503cad78e2d Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 10 Apr 2023 17:08:17 -0400 Subject: [PATCH 868/998] test/xds: Fix test_grpc import path (#6180) --- test/xds/xds_client_ignore_resource_deletion_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/xds/xds_client_ignore_resource_deletion_test.go b/test/xds/xds_client_ignore_resource_deletion_test.go index 5d1684cdd76d..e59a197fd9ca 100644 --- a/test/xds/xds_client_ignore_resource_deletion_test.go +++ b/test/xds/xds_client_ignore_resource_deletion_test.go @@ -307,7 +307,7 @@ func setupGRPCServerWithModeChangeChannelAndServe(t *testing.T, bootstrapContent }) server := xds.NewGRPCServer(grpc.Creds(insecure.NewCredentials()), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) t.Cleanup(server.Stop) - testpb.RegisterTestServiceServer(server, &testService{}) + testgrpc.RegisterTestServiceServer(server, &testService{}) // Serve. go func() { From 8374ff8fbdbb66b2ceca8cd878b9215eac2f18a5 Mon Sep 17 00:00:00 2001 From: Anirudh Ramachandra Date: Tue, 11 Apr 2023 09:51:09 -0700 Subject: [PATCH 869/998] Export the unwrapResource method, to allow callers outside of the package (#6181) --- xds/internal/xdsclient/xdsresource/type.go | 4 ++-- xds/internal/xdsclient/xdsresource/unmarshal_cds.go | 2 +- xds/internal/xdsclient/xdsresource/unmarshal_eds.go | 2 +- xds/internal/xdsclient/xdsresource/unmarshal_lds.go | 2 +- xds/internal/xdsclient/xdsresource/unmarshal_rds.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go index 1cb49cc1d1b1..0fb3f274ed46 100644 --- a/xds/internal/xdsclient/xdsresource/type.go +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -78,9 +78,9 @@ func IsEndpointsResource(url string) bool { return url == version.V3EndpointsURL } -// unwrapResource unwraps and returns the inner resource if it's in a resource +// UnwrapResource unwraps and returns the inner resource if it's in a resource // wrapper. The original resource is returned if it's not wrapped. -func unwrapResource(r *anypb.Any) (*anypb.Any, error) { +func UnwrapResource(r *anypb.Any) (*anypb.Any, error) { url := r.GetTypeUrl() if url != version.V3ResourceWrapperURL { // Not wrapped. diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index e6d7261e59b9..e0bc1589b562 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -40,7 +40,7 @@ import ( const transportSocketName = "envoy.transport_sockets.tls" func unmarshalClusterResource(r *anypb.Any) (string, ClusterUpdate, error) { - r, err := unwrapResource(r) + r, err := UnwrapResource(r) if err != nil { return "", ClusterUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index 89386f6e2680..a3202f8c8100 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -33,7 +33,7 @@ import ( ) func unmarshalEndpointsResource(r *anypb.Any) (string, EndpointsUpdate, error) { - r, err := unwrapResource(r) + r, err := UnwrapResource(r) if err != nil { return "", EndpointsUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index ffcf9477f514..1cc8a0179582 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -34,7 +34,7 @@ import ( ) func unmarshalListenerResource(r *anypb.Any) (string, ListenerUpdate, error) { - r, err := unwrapResource(r) + r, err := UnwrapResource(r) if err != nil { return "", ListenerUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index be7f6624deda..a082d38c5aa5 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -34,7 +34,7 @@ import ( ) func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, error) { - r, err := unwrapResource(r) + r, err := UnwrapResource(r) if err != nil { return "", RouteConfigUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) } From 6eabd7e1834e47b20f55cbe9d473fc607c693358 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Tue, 11 Apr 2023 11:34:42 -0700 Subject: [PATCH 870/998] server: use least-requests loadbalancer for workers (#6004) --- server.go | 52 ++++++++++++++++++++++------------------------------ 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/server.go b/server.go index 087b9ad7c1f6..76d152a69c8f 100644 --- a/server.go +++ b/server.go @@ -43,7 +43,6 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" @@ -146,7 +145,7 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannels []chan *serverWorkerData + serverWorkerChannel chan *serverWorkerData } type serverOptions struct { @@ -561,40 +560,38 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows different requests to be +// data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker(ch chan *serverWorkerData) { - // To make sure all server workers don't reset at the same time, choose a - // random number of iterations before resetting. - threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) - for completed := 0; completed < threshold; completed++ { - data, ok := <-ch +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + data, ok := <-s.serverWorkerChannel if !ok { return } - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) - data.wg.Done() + s.handleSingleStream(data) } - go s.serverWorker(ch) + go s.serverWorker() } -// initServerWorkers creates worker goroutines and channels to process incoming +func (s *Server) handleSingleStream(data *serverWorkerData) { + defer data.wg.Done() + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) +} + +// initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + s.serverWorkerChannel = make(chan *serverWorkerData) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - s.serverWorkerChannels[i] = make(chan *serverWorkerData) - go s.serverWorker(s.serverWorkerChannels[i]) + go s.serverWorker() } } func (s *Server) stopServerWorkers() { - for i := uint32(0); i < s.opts.numServerWorkers; i++ { - close(s.serverWorkerChannels[i]) - } + close(s.serverWorkerChannel) } // NewServer creates a gRPC server which has no service registered and has not @@ -946,26 +943,21 @@ func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup - var roundRobinCounter uint32 st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) if s.opts.numServerWorkers > 0 { data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + case s.serverWorkerChannel <- data: + return default: // If all stream workers are busy, fallback to the default code path. - go func() { - s.handleStream(st, stream, s.traceInfo(st, stream)) - wg.Done() - }() } - } else { - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() } + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx From 06de8f851e41a0d976ada664f0c5233a59b1f339 Mon Sep 17 00:00:00 2001 From: Matthew Stevenson <52979934+matthewstevenson88@users.noreply.github.com> Date: Tue, 11 Apr 2023 11:36:21 -0700 Subject: [PATCH 871/998] alts: Add retry loop when making RPC in ALTS's TestFullHandshake. (#6183) --- credentials/alts/alts_test.go | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/credentials/alts/alts_test.go b/credentials/alts/alts_test.go index 7d0129d09cd3..aef9642f844d 100644 --- a/credentials/alts/alts_test.go +++ b/credentials/alts/alts_test.go @@ -31,6 +31,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/alts/internal/handshaker/service" altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" @@ -39,6 +40,12 @@ import ( "google.golang.org/grpc/internal/testutils" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/status" +) + +const ( + defaultTestLongTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond ) type s struct { @@ -301,7 +308,7 @@ func (s) TestCheckRPCVersions(t *testing.T) { // TestFullHandshake performs a full ALTS handshake between a test client and // server, where both client and server offload to a local, fake handshaker // service. -func TestFullHandshake(t *testing.T) { +func (s) TestFullHandshake(t *testing.T) { // If GOMAXPROCS is set to less than 2, do not run this test. This test // requires at least 2 goroutines to succeed (one goroutine where a // server listens, another goroutine where a client runs). @@ -334,11 +341,19 @@ func TestFullHandshake(t *testing.T) { t.Fatalf("grpc.Dial(%v) failed: %v", serverAddress, err) } defer conn.Close() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestLongTimeout) defer cancel() c := testgrpc.NewTestServiceClient(conn) - if _, err = c.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.WaitForReady(true)); err != nil { - t.Errorf("c.UnaryCall() failed: %v", err) + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + _, err = c.UnaryCall(ctx, &testpb.SimpleRequest{}) + if err == nil { + break + } + if code := status.Code(err); code == codes.Unavailable { + // The server is not ready yet. Try again. + continue + } + t.Fatalf("c.UnaryCall() failed: %v", err) } // Close open connections to the fake handshaker service. From 6237dfe701c72a6acb03e76e4446fdee535818cb Mon Sep 17 00:00:00 2001 From: Mskxn <118117161+Mskxn@users.noreply.github.com> Date: Wed, 12 Apr 2023 03:10:38 +0800 Subject: [PATCH 872/998] internal/stubserver: Close Client Conn in error handling of Start (#6174) --- internal/stubserver/stubserver.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/stubserver/stubserver.go b/internal/stubserver/stubserver.go index 9234fc28370e..73c12aed8517 100644 --- a/internal/stubserver/stubserver.go +++ b/internal/stubserver/stubserver.go @@ -133,6 +133,7 @@ func (ss *StubServer) StartClient(dopts ...grpc.DialOption) error { ss.R.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ss.Address}}}) } if err := waitForReady(cc); err != nil { + cc.Close() return err } From 89ec9609a5bc758aad139e0ad18be798b3c294a4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 11 Apr 2023 14:51:15 -0700 Subject: [PATCH 873/998] grpc: read the service config channel once instead of twice (#6186) --- clientconn.go | 15 +- test/end2end_test.go | 424 ----------------------- test/service_config_deprecated_test.go | 456 +++++++++++++++++++++++++ 3 files changed, 457 insertions(+), 438 deletions(-) create mode 100644 test/service_config_deprecated_test.go diff --git a/clientconn.go b/clientconn.go index b9cc055075ed..3a76142424db 100644 --- a/clientconn.go +++ b/clientconn.go @@ -244,19 +244,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - scSet = true - } - default: - } - } if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } @@ -272,7 +259,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - if cc.dopts.scChan != nil && !scSet { + if cc.dopts.scChan != nil { // Blocking wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: diff --git a/test/end2end_test.go b/test/end2end_test.go index b4e81cdf4cd9..42dbc1f73e65 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -5027,430 +5027,6 @@ func testSvrWriteStatusEarlyWrite(t *testing.T, e env) { } } -// The following functions with function name ending with TD indicates that they -// should be deleted after old service config API is deprecated and deleted. -func testServiceConfigSetupTD(t *testing.T, e env) (*test, chan grpc.ServiceConfig) { - te := newTest(t, e) - // We write before read. - ch := make(chan grpc.ServiceConfig, 1) - te.sc = ch - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - "Failed to dial : context canceled; please retry.", - ) - return te, ch -} - -func (s) TestServiceConfigGetMethodConfigTD(t *testing.T) { - for _, e := range listTestEnv() { - testGetMethodConfigTD(t, e) - } -} - -func testGetMethodConfigTD(t *testing.T, e env) { - te, ch := testServiceConfigSetupTD(t, e) - defer te.tearDown() - - mc1 := grpc.MethodConfig{ - WaitForReady: newBool(true), - Timeout: newDuration(time.Millisecond), - } - mc2 := grpc.MethodConfig{WaitForReady: newBool(false)} - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc1 - m["/grpc.testing.TestService/"] = mc2 - sc := grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - cc := te.clientConn() - tc := testgrpc.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - - m = make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/UnaryCall"] = mc1 - m["/grpc.testing.TestService/"] = mc2 - sc = grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - // Wait for the new service config to propagate. - for { - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - break - } - } - // The following RPCs are expected to become fail-fast. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) - } -} - -func (s) TestServiceConfigWaitForReadyTD(t *testing.T) { - for _, e := range listTestEnv() { - testServiceConfigWaitForReadyTD(t, e) - } -} - -func testServiceConfigWaitForReadyTD(t *testing.T, e env) { - te, ch := testServiceConfigSetupTD(t, e) - defer te.tearDown() - - // Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds. - mc := grpc.MethodConfig{ - WaitForReady: newBool(false), - Timeout: newDuration(time.Millisecond), - } - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc := grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - cc := te.clientConn() - tc := testgrpc.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) - } - - // Generate a service config update. - // Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. - mc.WaitForReady = newBool(true) - m = make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc = grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - // Wait for the new service config to take effect. - mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") - for { - if !*mc.WaitForReady { - time.Sleep(100 * time.Millisecond) - mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") - continue - } - break - } - // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) - } -} - -func (s) TestServiceConfigTimeoutTD(t *testing.T) { - for _, e := range listTestEnv() { - testServiceConfigTimeoutTD(t, e) - } -} - -func testServiceConfigTimeoutTD(t *testing.T, e env) { - te, ch := testServiceConfigSetupTD(t, e) - defer te.tearDown() - - // Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. - mc := grpc.MethodConfig{ - Timeout: newDuration(time.Hour), - } - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc := grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - cc := te.clientConn() - tc := testgrpc.NewTestServiceClient(cc) - // The following RPCs are expected to become non-fail-fast ones with 1ns deadline. - ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - cancel() - ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond) - if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) - } - cancel() - - // Generate a service config update. - // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. - mc.Timeout = newDuration(time.Nanosecond) - m = make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc = grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - // Wait for the new service config to take effect. - mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") - for { - if *mc.Timeout != time.Nanosecond { - time.Sleep(100 * time.Millisecond) - mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") - continue - } - break - } - - ctx, cancel = context.WithTimeout(context.Background(), time.Hour) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - cancel() - - ctx, cancel = context.WithTimeout(context.Background(), time.Hour) - if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) - } - cancel() -} - -func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) { - for _, e := range listTestEnv() { - testServiceConfigMaxMsgSizeTD(t, e) - } -} - -func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) { - // Setting up values and objects shared across all test cases. - const smallSize = 1 - const largeSize = 1024 - const extraLargeSize = 2048 - - smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) - if err != nil { - t.Fatal(err) - } - largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) - if err != nil { - t.Fatal(err) - } - extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) - if err != nil { - t.Fatal(err) - } - - mc := grpc.MethodConfig{ - MaxReqSize: newInt(extraLargeSize), - MaxRespSize: newInt(extraLargeSize), - } - - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/UnaryCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc := grpc.ServiceConfig{ - Methods: m, - } - // Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv). - te1, ch1 := testServiceConfigSetupTD(t, e) - te1.startServer(&testServer{security: e.security}) - defer te1.tearDown() - - ch1 <- sc - tc := testgrpc.NewTestServiceClient(te1.clientConn()) - - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: int32(extraLargeSize), - Payload: smallPayload, - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Test for unary RPC recv. - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for unary RPC send. - req.Payload = extraLargePayload - req.ResponseSize = int32(smallSize) - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for streaming RPC recv. - respParam := []*testpb.ResponseParameters{ - { - Size: int32(extraLargeSize), - }, - } - sreq := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: smallPayload, - } - stream, err := tc.FullDuplexCall(te1.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) - } - - // Test for streaming RPC send. - respParam[0].Size = int32(smallSize) - sreq.Payload = extraLargePayload - stream, err = tc.FullDuplexCall(te1.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) - } - - // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). - te2, ch2 := testServiceConfigSetupTD(t, e) - te2.maxClientReceiveMsgSize = newInt(1024) - te2.maxClientSendMsgSize = newInt(1024) - te2.startServer(&testServer{security: e.security}) - defer te2.tearDown() - ch2 <- sc - tc = testgrpc.NewTestServiceClient(te2.clientConn()) - - // Test for unary RPC recv. - req.Payload = smallPayload - req.ResponseSize = int32(largeSize) - - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for unary RPC send. - req.Payload = largePayload - req.ResponseSize = int32(smallSize) - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for streaming RPC recv. - stream, err = tc.FullDuplexCall(te2.ctx) - respParam[0].Size = int32(largeSize) - sreq.Payload = smallPayload - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) - } - - // Test for streaming RPC send. - respParam[0].Size = int32(smallSize) - sreq.Payload = largePayload - stream, err = tc.FullDuplexCall(te2.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) - } - - // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). - te3, ch3 := testServiceConfigSetupTD(t, e) - te3.maxClientReceiveMsgSize = newInt(4096) - te3.maxClientSendMsgSize = newInt(4096) - te3.startServer(&testServer{security: e.security}) - defer te3.tearDown() - ch3 <- sc - tc = testgrpc.NewTestServiceClient(te3.clientConn()) - - // Test for unary RPC recv. - req.Payload = smallPayload - req.ResponseSize = int32(largeSize) - - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) - } - - req.ResponseSize = int32(extraLargeSize) - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for unary RPC send. - req.Payload = largePayload - req.ResponseSize = int32(smallSize) - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) - } - - req.Payload = extraLargePayload - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for streaming RPC recv. - stream, err = tc.FullDuplexCall(te3.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - respParam[0].Size = int32(largeSize) - sreq.Payload = smallPayload - - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = _, %v, want ", stream, err) - } - - respParam[0].Size = int32(extraLargeSize) - - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) - } - - // Test for streaming RPC send. - respParam[0].Size = int32(smallSize) - sreq.Payload = largePayload - stream, err = tc.FullDuplexCall(te3.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - sreq.Payload = extraLargePayload - if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) - } -} - // TestMalformedStreamMethod starts a test server and sends an RPC with a // malformed method name. The server should respond with an UNIMPLEMENTED status // code in this case. diff --git a/test/service_config_deprecated_test.go b/test/service_config_deprecated_test.go new file mode 100644 index 000000000000..035f11526f79 --- /dev/null +++ b/test/service_config_deprecated_test.go @@ -0,0 +1,456 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// The following functions with function name ending with TD indicates that they +// should be deleted after old service config API is deprecated and deleted. +func testServiceConfigSetupTD(t *testing.T, e env) (*test, chan grpc.ServiceConfig) { + te := newTest(t, e) + // We write before read. + ch := make(chan grpc.ServiceConfig, 1) + te.sc = ch + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", + ) + return te, ch +} + +func (s) TestServiceConfigGetMethodConfigTD(t *testing.T) { + for _, e := range listTestEnv() { + testGetMethodConfigTD(t, e) + } +} + +func testGetMethodConfigTD(t *testing.T, e env) { + te, ch := testServiceConfigSetupTD(t, e) + defer te.tearDown() + + mc1 := grpc.MethodConfig{ + WaitForReady: newBool(true), + Timeout: newDuration(time.Millisecond), + } + mc2 := grpc.MethodConfig{WaitForReady: newBool(false)} + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc1 + m["/grpc.testing.TestService/"] = mc2 + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/UnaryCall"] = mc1 + m["/grpc.testing.TestService/"] = mc2 + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + // Wait for the new service config to propagate. + for { + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + break + } + } + // The following RPCs are expected to become fail-fast. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) + } +} + +func (s) TestServiceConfigWaitForReadyTD(t *testing.T) { + for _, e := range listTestEnv() { + testServiceConfigWaitForReadyTD(t, e) + } +} + +func testServiceConfigWaitForReadyTD(t *testing.T, e env) { + te, ch := testServiceConfigSetupTD(t, e) + defer te.tearDown() + + // Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds. + mc := grpc.MethodConfig{ + WaitForReady: newBool(false), + Timeout: newDuration(time.Millisecond), + } + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + + // Generate a service config update. + // Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. + mc.WaitForReady = newBool(true) + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + // Wait for the new service config to take effect. + mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") + for { + if !*mc.WaitForReady { + time.Sleep(100 * time.Millisecond) + mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") + continue + } + break + } + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } +} + +func (s) TestServiceConfigTimeoutTD(t *testing.T) { + for _, e := range listTestEnv() { + testServiceConfigTimeoutTD(t, e) + } +} + +func testServiceConfigTimeoutTD(t *testing.T, e env) { + te, ch := testServiceConfigSetupTD(t, e) + defer te.tearDown() + + // Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. + mc := grpc.MethodConfig{ + Timeout: newDuration(time.Hour), + } + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + // The following RPCs are expected to become non-fail-fast ones with 1ns deadline. + ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + cancel() + ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond) + if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + cancel() + + // Generate a service config update. + // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. + mc.Timeout = newDuration(time.Nanosecond) + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + // Wait for the new service config to take effect. + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") + for { + if *mc.Timeout != time.Nanosecond { + time.Sleep(100 * time.Millisecond) + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") + continue + } + break + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + cancel() + + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + cancel() +} + +func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) { + for _, e := range listTestEnv() { + testServiceConfigMaxMsgSizeTD(t, e) + } +} + +func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) { + // Setting up values and objects shared across all test cases. + const smallSize = 1 + const largeSize = 1024 + const extraLargeSize = 2048 + + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) + if err != nil { + t.Fatal(err) + } + + mc := grpc.MethodConfig{ + MaxReqSize: newInt(extraLargeSize), + MaxRespSize: newInt(extraLargeSize), + } + + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/UnaryCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + // Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te1, ch1 := testServiceConfigSetupTD(t, e) + te1.startServer(&testServer{security: e.security}) + defer te1.tearDown() + + ch1 <- sc + tc := testgrpc.NewTestServiceClient(te1.clientConn()) + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: int32(extraLargeSize), + Payload: smallPayload, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Test for unary RPC recv. + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = extraLargePayload + req.ResponseSize = int32(smallSize) + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + respParam := []*testpb.ResponseParameters{ + { + Size: int32(extraLargeSize), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: smallPayload, + } + stream, err := tc.FullDuplexCall(te1.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = int32(smallSize) + sreq.Payload = extraLargePayload + stream, err = tc.FullDuplexCall(te1.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } + + // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te2, ch2 := testServiceConfigSetupTD(t, e) + te2.maxClientReceiveMsgSize = newInt(1024) + te2.maxClientSendMsgSize = newInt(1024) + te2.startServer(&testServer{security: e.security}) + defer te2.tearDown() + ch2 <- sc + tc = testgrpc.NewTestServiceClient(te2.clientConn()) + + // Test for unary RPC recv. + req.Payload = smallPayload + req.ResponseSize = int32(largeSize) + + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = int32(smallSize) + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + stream, err = tc.FullDuplexCall(te2.ctx) + respParam[0].Size = int32(largeSize) + sreq.Payload = smallPayload + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = int32(smallSize) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te2.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } + + // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te3, ch3 := testServiceConfigSetupTD(t, e) + te3.maxClientReceiveMsgSize = newInt(4096) + te3.maxClientSendMsgSize = newInt(4096) + te3.startServer(&testServer{security: e.security}) + defer te3.tearDown() + ch3 <- sc + tc = testgrpc.NewTestServiceClient(te3.clientConn()) + + // Test for unary RPC recv. + req.Payload = smallPayload + req.ResponseSize = int32(largeSize) + + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) + } + + req.ResponseSize = int32(extraLargeSize) + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = int32(smallSize) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) + } + + req.Payload = extraLargePayload + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + stream, err = tc.FullDuplexCall(te3.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam[0].Size = int32(largeSize) + sreq.Payload = smallPayload + + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want ", stream, err) + } + + respParam[0].Size = int32(extraLargeSize) + + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = int32(smallSize) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te3.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + sreq.Payload = extraLargePayload + if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } +} From 5a50b970ccaddcd811b4a0da8e56fe1a2492f7da Mon Sep 17 00:00:00 2001 From: Matthew Stevenson <52979934+matthewstevenson88@users.noreply.github.com> Date: Tue, 11 Apr 2023 14:56:13 -0700 Subject: [PATCH 874/998] =?UTF-8?q?Revert=20"Revert=20"credentials/alts:?= =?UTF-8?q?=20defer=20ALTS=20stream=20creation=20until=20handshake=20?= =?UTF-8?q?=E2=80=A6"=20(#6179)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../alts/internal/handshaker/handshaker.go | 54 ++++++++++----- .../internal/handshaker/handshaker_test.go | 66 +++++++++++++++++++ 2 files changed, 102 insertions(+), 18 deletions(-) diff --git a/credentials/alts/internal/handshaker/handshaker.go b/credentials/alts/internal/handshaker/handshaker.go index 7b953a520e5b..150ae5576769 100644 --- a/credentials/alts/internal/handshaker/handshaker.go +++ b/credentials/alts/internal/handshaker/handshaker.go @@ -138,7 +138,7 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions { // and server options (server options struct does not exist now. When // caller can provide endpoints, it should be created. -// altsHandshaker is used to complete a ALTS handshaking between client and +// altsHandshaker is used to complete an ALTS handshake between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. type altsHandshaker struct { @@ -146,6 +146,8 @@ type altsHandshaker struct { stream altsgrpc.HandshakerService_DoHandshakeClient // the connection to the peer. conn net.Conn + // a virtual connection to the ALTS handshaker service. + clientConn *grpc.ClientConn // client handshake options. clientOpts *ClientHandshakerOptions // server handshake options. @@ -154,39 +156,33 @@ type altsHandshaker struct { side core.Side } -// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker +// NewClientHandshaker creates a core.Handshaker that performs a client-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) - if err != nil { - return nil, err - } return &altsHandshaker{ - stream: stream, + stream: nil, conn: c, + clientConn: conn, clientOpts: opts, side: core.ClientSide, }, nil } -// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker +// NewServerHandshaker creates a core.Handshaker that performs a server-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) - if err != nil { - return nil, err - } return &altsHandshaker{ - stream: stream, + stream: nil, conn: c, + clientConn: conn, serverOpts: opts, side: core.ServerSide, }, nil } -// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once +// ClientHandshake starts and completes a client ALTS handshake for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { @@ -198,6 +194,16 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") } + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + // Create target identities from service account list. targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) for _, account := range h.clientOpts.TargetServiceAccounts { @@ -229,7 +235,7 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent return conn, authInfo, nil } -// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once +// ServerHandshake starts and completes a server ALTS handshake for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { @@ -241,6 +247,16 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") } + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + p := make([]byte, frameLimit) n, err := h.conn.Read(p) if err != nil { @@ -371,5 +387,7 @@ func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []b // Close terminates the Handshaker. It should be called when the caller obtains // the secure connection. func (h *altsHandshaker) Close() { - h.stream.CloseSend() + if h.stream != nil { + h.stream.CloseSend() + } } diff --git a/credentials/alts/internal/handshaker/handshaker_test.go b/credentials/alts/internal/handshaker/handshaker_test.go index 14a0721054f2..49f07caf8deb 100644 --- a/credentials/alts/internal/handshaker/handshaker_test.go +++ b/credentials/alts/internal/handshaker/handshaker_test.go @@ -25,6 +25,8 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" grpc "google.golang.org/grpc" core "google.golang.org/grpc/credentials/alts/internal" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" @@ -283,3 +285,67 @@ func (s) TestPeerNotResponding(t *testing.T) { t.Errorf("ClientHandshake() = %v, want %v", got, want) } } + +func (s) TestNewClientHandshaker(t *testing.T) { + conn := testutil.NewTestConn(nil, nil) + clientConn := &grpc.ClientConn{} + opts := &ClientHandshakerOptions{} + hs, err := NewClientHandshaker(context.Background(), clientConn, conn, opts) + if err != nil { + t.Errorf("NewClientHandshaker returned unexpected error: %v", err) + } + expectedHs := &altsHandshaker{ + stream: nil, + conn: conn, + clientConn: clientConn, + clientOpts: opts, + serverOpts: nil, + side: core.ClientSide, + } + cmpOpts := []cmp.Option{ + cmp.AllowUnexported(altsHandshaker{}), + cmpopts.IgnoreFields(altsHandshaker{}, "conn", "clientConn"), + } + if got, want := hs.(*altsHandshaker), expectedHs; !cmp.Equal(got, want, cmpOpts...) { + t.Errorf("NewClientHandshaker() returned unexpected handshaker: got: %v, want: %v", got, want) + } + if hs.(*altsHandshaker).stream != nil { + t.Errorf("NewClientHandshaker() returned handshaker with non-nil stream") + } + if hs.(*altsHandshaker).clientConn != clientConn { + t.Errorf("NewClientHandshaker() returned handshaker with unexpected clientConn") + } + hs.Close() +} + +func (s) TestNewServerHandshaker(t *testing.T) { + conn := testutil.NewTestConn(nil, nil) + clientConn := &grpc.ClientConn{} + opts := &ServerHandshakerOptions{} + hs, err := NewServerHandshaker(context.Background(), clientConn, conn, opts) + if err != nil { + t.Errorf("NewServerHandshaker returned unexpected error: %v", err) + } + expectedHs := &altsHandshaker{ + stream: nil, + conn: conn, + clientConn: clientConn, + clientOpts: nil, + serverOpts: opts, + side: core.ServerSide, + } + cmpOpts := []cmp.Option{ + cmp.AllowUnexported(altsHandshaker{}), + cmpopts.IgnoreFields(altsHandshaker{}, "conn", "clientConn"), + } + if got, want := hs.(*altsHandshaker), expectedHs; !cmp.Equal(got, want, cmpOpts...) { + t.Errorf("NewServerHandshaker() returned unexpected handshaker: got: %v, want: %v", got, want) + } + if hs.(*altsHandshaker).stream != nil { + t.Errorf("NewServerHandshaker() returned handshaker with non-nil stream") + } + if hs.(*altsHandshaker).clientConn != clientConn { + t.Errorf("NewServerHandshaker() returned handshaker with unexpected clientConn") + } + hs.Close() +} From fe72db9589696a625af72117a600ba191227b7c5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 12 Apr 2023 09:30:30 -0700 Subject: [PATCH 875/998] testing: add helpers to start test service, and retrieve port (#6187) --- internal/stubserver/stubserver.go | 19 +++++++ internal/testutils/parse_port.go | 39 +++++++++++++ test/xds/xds_client_ack_nack_test.go | 7 ++- test/xds/xds_client_affinity_test.go | 8 ++- test/xds/xds_client_federation_test.go | 14 +++-- ...ds_client_ignore_resource_deletion_test.go | 13 +++-- test/xds/xds_client_integration_test.go | 37 ++----------- test/xds/xds_client_outlier_detection_test.go | 43 +++++++-------- test/xds/xds_client_retry_test.go | 18 +++--- .../xds_rls_clusterspecifier_plugin_test.go | 8 ++- test/xds/xds_security_config_nack_test.go | 7 ++- .../clusterimpl/tests/balancer_test.go | 25 ++------- .../clusterresolver/e2e_test/balancer_test.go | 55 ++++--------------- .../clusterresolver/e2e_test/eds_impl_test.go | 17 +----- 14 files changed, 140 insertions(+), 170 deletions(-) create mode 100644 internal/testutils/parse_port.go diff --git a/internal/stubserver/stubserver.go b/internal/stubserver/stubserver.go index 73c12aed8517..012021dc03e2 100644 --- a/internal/stubserver/stubserver.go +++ b/internal/stubserver/stubserver.go @@ -24,6 +24,7 @@ import ( "context" "fmt" "net" + "testing" "time" "google.golang.org/grpc" @@ -179,3 +180,21 @@ func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { } return g } + +// StartTestService spins up a stub server exposing the TestService on a local +// port. If the passed in server is nil, a stub server that implements only the +// EmptyCall and UnaryCall RPCs is started. +func StartTestService(t *testing.T, server *StubServer) *StubServer { + if server == nil { + server = &StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + UnaryCallF: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + } + } + server.StartServer() + + t.Logf("Started test service backend at %q", server.Address) + return server +} diff --git a/internal/testutils/parse_port.go b/internal/testutils/parse_port.go new file mode 100644 index 000000000000..c633af06a7db --- /dev/null +++ b/internal/testutils/parse_port.go @@ -0,0 +1,39 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package testutils + +import ( + "net" + "strconv" + "testing" +) + +// ParsePort returns the port from the given address string, as a unit32. +func ParsePort(t *testing.T, addr string) uint32 { + t.Helper() + + _, p, err := net.SplitHostPort(addr) + if err != nil { + t.Fatalf("Invalid serving address: %v", err) + } + port, err := strconv.ParseUint(p, 10, 32) + if err != nil { + t.Fatalf("Invalid serving port: %v", err) + } + return uint32(port) +} diff --git a/test/xds/xds_client_ack_nack_test.go b/test/xds/xds_client_ack_nack_test.go index 793bdc2fa624..87ff0077cd70 100644 --- a/test/xds/xds_client_ack_nack_test.go +++ b/test/xds/xds_client_ack_nack_test.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" @@ -123,15 +124,15 @@ func (s) TestClientResourceVersionAfterStreamRestart(t *testing.T) { }) defer cleanup1() - port, cleanup2 := startTestService(t, nil) - defer cleanup2() + server := stubserver.StartTestService(t, nil) + defer server.Stop() const serviceName = "my-service-client-side-xds" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: nodeID, Host: "localhost", - Port: port, + Port: testutils.ParsePort(t, server.Address), SecLevel: e2e.SecurityLevelNone, }) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/test/xds/xds_client_affinity_test.go b/test/xds/xds_client_affinity_test.go index 159d295e1104..7fff019fa526 100644 --- a/test/xds/xds_client_affinity_test.go +++ b/test/xds/xds_client_affinity_test.go @@ -26,6 +26,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" @@ -91,15 +93,15 @@ func (s) TestClientSideAffinitySanityCheck(t *testing.T) { managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() - port, cleanup2 := startTestService(t, nil) - defer cleanup2() + server := stubserver.StartTestService(t, nil) + defer server.Stop() const serviceName = "my-service-client-side-xds" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: nodeID, Host: "localhost", - Port: port, + Port: testutils.ParsePort(t, server.Address), SecLevel: e2e.SecurityLevelNone, }) // Replace RDS and CDS resources with ringhash config, but keep the resource diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go index d94b2f40fa26..1aebcd226104 100644 --- a/test/xds/xds_client_federation_test.go +++ b/test/xds/xds_client_federation_test.go @@ -30,6 +30,8 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/resolver" @@ -89,8 +91,8 @@ func (s) TestClientSideFederation(t *testing.T) { if err != nil { t.Fatalf("Failed to create xDS resolver for testing: %v", err) } - port, cleanup := startTestService(t, nil) - defer cleanup() + server := stubserver.StartTestService(t, nil) + defer server.Stop() const serviceName = "my-service-client-side-xds" // LDS is old style name. @@ -115,7 +117,7 @@ func (s) TestClientSideFederation(t *testing.T) { NodeID: nodeID, // This has only RDS and EDS. Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, cdsName)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, "localhost", []uint32{port})}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, SkipValidation: true, } @@ -161,15 +163,15 @@ func (s) TestFederation_UnknownAuthorityInDialTarget(t *testing.T) { managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() - port, cleanup2 := startTestService(t, nil) - defer cleanup2() + server := stubserver.StartTestService(t, nil) + defer server.Stop() const serviceName = "my-service-client-side-xds" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: nodeID, Host: "localhost", - Port: port, + Port: testutils.ParsePort(t, server.Address), SecLevel: e2e.SecurityLevelNone, }) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/test/xds/xds_client_ignore_resource_deletion_test.go b/test/xds/xds_client_ignore_resource_deletion_test.go index e59a197fd9ca..f5df318a943f 100644 --- a/test/xds/xds_client_ignore_resource_deletion_test.go +++ b/test/xds/xds_client_ignore_resource_deletion_test.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/internal/testutils/xds/e2e" @@ -89,11 +90,11 @@ var ( // // Resource deletion is only applicable to Listener and Cluster resources. func (s) TestIgnoreResourceDeletionOnClient(t *testing.T) { - port1, cleanup := startTestService(t, nil) - t.Cleanup(cleanup) + server1 := stubserver.StartTestService(t, nil) + t.Cleanup(server1.Stop) - port2, cleanup := startTestService(t, nil) - t.Cleanup(cleanup) + server2 := stubserver.StartTestService(t, nil) + t.Cleanup(server2.Stop) initialResourceOnServer := func(nodeID string) e2e.UpdateOptions { return e2e.UpdateOptions{ @@ -105,8 +106,8 @@ func (s) TestIgnoreResourceDeletionOnClient(t *testing.T) { e2e.DefaultCluster(cdsName2, edsName2, e2e.SecurityLevelNone), }, Endpoints: []*endpointpb.ClusterLoadAssignment{ - e2e.DefaultEndpoint(edsName1, "localhost", []uint32{port1}), - e2e.DefaultEndpoint(edsName2, "localhost", []uint32{port2}), + e2e.DefaultEndpoint(edsName1, "localhost", []uint32{testutils.ParsePort(t, server1.Address)}), + e2e.DefaultEndpoint(edsName2, "localhost", []uint32{testutils.ParsePort(t, server2.Address)}), }, SkipValidation: true, } diff --git a/test/xds/xds_client_integration_test.go b/test/xds/xds_client_integration_test.go index a431ab5f3334..e03c937f816e 100644 --- a/test/xds/xds_client_integration_test.go +++ b/test/xds/xds_client_integration_test.go @@ -21,8 +21,6 @@ package xds_test import ( "context" "fmt" - "net" - "strconv" "testing" "time" @@ -30,6 +28,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" testgrpc "google.golang.org/grpc/interop/grpc_testing" @@ -49,47 +48,19 @@ const ( defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ) -// startTestService spins up a server exposing the TestService on a local port. -// -// Returns the following: -// - the port the server is listening on -// - cleanup function to be invoked by the tests when done -func startTestService(t *testing.T, server *stubserver.StubServer) (uint32, func()) { - if server == nil { - server = &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, - UnaryCallF: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{}, nil - }, - } - } - server.StartServer() - - _, p, err := net.SplitHostPort(server.Address) - if err != nil { - t.Fatalf("invalid serving address for stub server: %v", err) - } - port, err := strconv.ParseUint(p, 10, 32) - if err != nil { - t.Fatalf("invalid serving port for stub server: %v", err) - } - t.Logf("Started test service backend at %q", server.Address) - return uint32(port), server.Stop -} - func (s) TestClientSideXDS(t *testing.T) { managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() - port, cleanup2 := startTestService(t, nil) - defer cleanup2() + server := stubserver.StartTestService(t, nil) + defer server.Stop() const serviceName = "my-service-client-side-xds" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: nodeID, Host: "localhost", - Port: port, + Port: testutils.ParsePort(t, server.Address), SecLevel: e2e.SecurityLevelNone, }) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/test/xds/xds_client_outlier_detection_test.go b/test/xds/xds_client_outlier_detection_test.go index d0afcca78ae6..fa08e9be9a3a 100644 --- a/test/xds/xds_client_outlier_detection_test.go +++ b/test/xds/xds_client_outlier_detection_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" @@ -52,15 +53,19 @@ func (s) TestOutlierDetection_NoopConfig(t *testing.T) { managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() - port, cleanup2 := startTestService(t, nil) - defer cleanup2() + server := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + server.StartServer() + t.Logf("Started test service backend at %q", server.Address) + defer server.Stop() const serviceName = "my-service-client-side-xds" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: nodeID, Host: "localhost", - Port: port, + Port: testutils.ParsePort(t, server.Address), SecLevel: e2e.SecurityLevelNone, }) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -170,31 +175,21 @@ func (s) TestOutlierDetectionWithOutlier(t *testing.T) { defer cleanup() // Working backend 1. - backend1 := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil - }, - } - port1, cleanup1 := startTestService(t, backend1) - defer cleanup1() + backend1 := stubserver.StartTestService(t, nil) + port1 := testutils.ParsePort(t, backend1.Address) + defer backend1.Stop() // Working backend 2. - backend2 := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil - }, - } - port2, cleanup2 := startTestService(t, backend2) - defer cleanup2() + backend2 := stubserver.StartTestService(t, nil) + port2 := testutils.ParsePort(t, backend2.Address) + defer backend2.Stop() // Backend 3 that will always return an error and eventually ejected. - backend3 := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - return nil, errors.New("some error") - }, - } - port3, cleanup3 := startTestService(t, backend3) - defer cleanup3() + backend3 := stubserver.StartTestService(t, &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return nil, errors.New("some error") }, + }) + port3 := testutils.ParsePort(t, backend3.Address) + defer backend3.Stop() const serviceName = "my-service-client-side-xds" resources := clientResourcesMultipleBackendsAndOD(e2e.ResourceParams{ diff --git a/test/xds/xds_client_retry_test.go b/test/xds/xds_client_retry_test.go index 6af0459af7c8..d7cb7b4bfb3c 100644 --- a/test/xds/xds_client_retry_test.go +++ b/test/xds/xds_client_retry_test.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/wrapperspb" @@ -39,7 +40,11 @@ import ( func (s) TestClientSideRetry(t *testing.T) { ctr := 0 errs := []codes.Code{codes.ResourceExhausted} - ss := &stubserver.StubServer{ + + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + server := stubserver.StartTestService(t, &stubserver.StubServer{ EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { defer func() { ctr++ }() if ctr < len(errs) { @@ -47,20 +52,15 @@ func (s) TestClientSideRetry(t *testing.T) { } return &testpb.Empty{}, nil }, - } - - managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) - defer cleanup1() - - port, cleanup2 := startTestService(t, ss) - defer cleanup2() + }) + defer server.Stop() const serviceName = "my-service-client-side-xds" resources := e2e.DefaultClientResources(e2e.ResourceParams{ DialTarget: serviceName, NodeID: nodeID, Host: "localhost", - Port: port, + Port: testutils.ParsePort(t, server.Address), SecLevel: e2e.SecurityLevelNone, }) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go index a94e3f2bcedf..bca198081a7c 100644 --- a/test/xds/xds_rls_clusterspecifier_plugin_test.go +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/rls" "google.golang.org/grpc/internal/testutils/xds/e2e" @@ -112,8 +113,9 @@ func testRLSinxDS(t *testing.T, lbPolicy e2e.LoadBalancingPolicy) { // RLS Balancer that communicates to this set up fake RLS Server. managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() - port, cleanup2 := startTestService(t, nil) - defer cleanup2() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() lis := testutils.NewListenerWrapper(t, nil) rlsServer, rlsRequestCh := rls.SetupFakeRLSServer(t, lis) @@ -129,7 +131,7 @@ func testRLSinxDS(t *testing.T, lbPolicy e2e.LoadBalancingPolicy) { DialTarget: serviceName, NodeID: nodeID, Host: "localhost", - Port: port, + Port: testutils.ParsePort(t, server.Address), SecLevel: e2e.SecurityLevelNone, }, rlsProto) diff --git a/test/xds/xds_security_config_nack_test.go b/test/xds/xds_security_config_nack_test.go index f2974d47c181..1dc3250935bf 100644 --- a/test/xds/xds_security_config_nack_test.go +++ b/test/xds/xds_security_config_nack_test.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" @@ -323,8 +324,8 @@ func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() - port, cleanup2 := startTestService(t, nil) - defer cleanup2() + server := stubserver.StartTestService(t, nil) + defer server.Stop() // This creates a `Cluster` resource with a security config which // refers to `e2e.ClientSideCertProviderInstance` for both root and @@ -333,7 +334,7 @@ func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { DialTarget: serviceName, NodeID: nodeID, Host: "localhost", - Port: port, + Port: testutils.ParsePort(t, server.Address), SecLevel: e2e.SecurityLevelMTLS, }) resources.Clusters[0].TransportSocket = test.securityConfig diff --git a/xds/internal/balancer/clusterimpl/tests/balancer_test.go b/xds/internal/balancer/clusterimpl/tests/balancer_test.go index 02f2389c7e70..cf0e7b0ce842 100644 --- a/xds/internal/balancer/clusterimpl/tests/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/tests/balancer_test.go @@ -21,8 +21,6 @@ package clusterimpl_test import ( "context" "fmt" - "net" - "strconv" "strings" "testing" "time" @@ -32,6 +30,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/status" @@ -68,22 +67,8 @@ func (s) TestConfigUpdateWithSameLoadReportingServerConfig(t *testing.T) { defer mgmtServerCleanup() // Start a server backend exposing the test service. - backend := &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, - } - backend.StartServer() - defer backend.Stop() - - // Extract the host and port where the server backend is running. - _, p, err := net.SplitHostPort(backend.Address) - if err != nil { - t.Fatalf("Invalid serving address for server backend: %v", err) - } - port, err := strconv.ParseUint(p, 10, 32) - if err != nil { - t.Fatalf("Invalid serving port for server backend: %v", err) - } - t.Logf("Started server backend at %q", backend.Address) + server := stubserver.StartTestService(t, nil) + defer server.Stop() // Configure the xDS management server with default resources. Override the // default cluster to include an LRS server config pointing to self. @@ -92,7 +77,7 @@ func (s) TestConfigUpdateWithSameLoadReportingServerConfig(t *testing.T) { DialTarget: serviceName, NodeID: nodeID, Host: "localhost", - Port: uint32(port), + Port: testutils.ParsePort(t, server.Address), SecLevel: e2e.SecurityLevelNone, }) resources.Clusters[0].LrsServer = &v3corepb.ConfigSource{ @@ -129,7 +114,7 @@ func (s) TestConfigUpdateWithSameLoadReportingServerConfig(t *testing.T) { e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ ClusterName: "endpoints-" + serviceName, Host: "localhost", - Ports: []uint32{uint32(port)}, + Ports: []uint32{testutils.ParsePort(t, server.Address)}, DropPercents: map[string]int{"test-drop-everything": 100}, }), } diff --git a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go index 26cc8986bc0c..7eaf29e5e1fa 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go @@ -19,8 +19,6 @@ package e2e_test import ( "context" "fmt" - "net" - "strconv" "strings" "testing" "time" @@ -79,25 +77,14 @@ func (s) TestErrorFromParentLB_ConnectionError(t *testing.T) { defer cleanup() // Start a test backend and extract its host and port. - backend := &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, - } - backend.StartServer() - defer backend.Stop() - _, p, err := net.SplitHostPort(backend.Address) - if err != nil { - t.Fatalf("Failed to split test backend address %q: %v", backend.Address, err) - } - port, err := strconv.ParseUint(p, 10, 32) - if err != nil { - t.Fatalf("Failed to parse test backend port %q: %v", backend.Address, err) - } + server := stubserver.StartTestService(t, nil) + defer server.Stop() // Configure cluster and endpoints resources in the management server. resources := e2e.UpdateOptions{ NodeID: nodeID, Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(port)})}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, SkipValidation: true, } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -197,25 +184,14 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { defer cleanup() // Start a test backend and extract its host and port. - backend := &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, - } - backend.StartServer() - defer backend.Stop() - _, p, err := net.SplitHostPort(backend.Address) - if err != nil { - t.Fatalf("Failed to split test backend address %q: %v", backend.Address, err) - } - port, err := strconv.ParseUint(p, 10, 32) - if err != nil { - t.Fatalf("Failed to parse test backend port %q: %v", backend.Address, err) - } + server := stubserver.StartTestService(t, nil) + defer server.Stop() // Configure cluster and endpoints resources in the management server. resources := e2e.UpdateOptions{ NodeID: nodeID, Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(port)})}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, SkipValidation: true, } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -303,7 +279,7 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { resources = e2e.UpdateOptions{ NodeID: nodeID, Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(port)})}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, SkipValidation: true, } if err := managementServer.Update(ctx, resources); err != nil { @@ -371,25 +347,14 @@ func (s) TestEDSResourceRemoved(t *testing.T) { defer cleanup() // Start a test backend and extract its host and port. - backend := &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, - } - backend.StartServer() - defer backend.Stop() - _, p, err := net.SplitHostPort(backend.Address) - if err != nil { - t.Fatalf("Failed to split test backend address %q: %v", backend.Address, err) - } - port, err := strconv.ParseUint(p, 10, 32) - if err != nil { - t.Fatalf("Failed to parse test backend port %q: %v", backend.Address, err) - } + server := stubserver.StartTestService(t, nil) + defer server.Stop() // Configure cluster and endpoints resources in the management server. resources := e2e.UpdateOptions{ NodeID: nodeID, Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(port)})}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, SkipValidation: true, } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index 75efb758b4fc..7aa951bfec50 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -20,8 +20,6 @@ import ( "context" "errors" "fmt" - "net" - "strconv" "strings" "testing" "time" @@ -33,6 +31,7 @@ import ( "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" rrutil "google.golang.org/grpc/internal/testutils/roundrobin" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/resolver" @@ -78,23 +77,11 @@ func backendAddressesAndPorts(t *testing.T, servers []*stubserver.StubServer) ([ ports := make([]uint32, len(servers)) for i := 0; i < len(servers); i++ { addrs[i] = resolver.Address{Addr: servers[i].Address} - ports[i] = extractPortFromAddress(t, servers[i].Address) + ports[i] = testutils.ParsePort(t, servers[i].Address) } return addrs, ports } -func extractPortFromAddress(t *testing.T, address string) uint32 { - _, p, err := net.SplitHostPort(address) - if err != nil { - t.Fatalf("invalid server address %q: %v", address, err) - } - port, err := strconv.ParseUint(p, 10, 32) - if err != nil { - t.Fatalf("invalid server address %q: %v", address, err) - } - return uint32(port) -} - func startTestServiceBackends(t *testing.T, numBackends int) ([]*stubserver.StubServer, func()) { servers := make([]*stubserver.StubServer, numBackends) for i := 0; i < numBackends; i++ { From d90621f9e920aa6fdb554252f7f476e0b7791c1d Mon Sep 17 00:00:00 2001 From: ethanvc Date: Fri, 14 Apr 2023 09:31:29 +0800 Subject: [PATCH 876/998] remove the unnecessary call to ResetTimer and StopTimer (#6185) --- benchmark/primitives/code_string_test.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/benchmark/primitives/code_string_test.go b/benchmark/primitives/code_string_test.go index 51b1ee48cf3c..095d0045c188 100644 --- a/benchmark/primitives/code_string_test.go +++ b/benchmark/primitives/code_string_test.go @@ -87,49 +87,39 @@ func (i codeBench) StringUsingMap() string { } func BenchmarkCodeStringStringer(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codeBench(uint32(i % 17)) _ = c.String() } - b.StopTimer() } func BenchmarkCodeStringMap(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codeBench(uint32(i % 17)) _ = c.StringUsingMap() } - b.StopTimer() } // codes.Code.String() does a switch. func BenchmarkCodeStringSwitch(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codes.Code(uint32(i % 17)) _ = c.String() } - b.StopTimer() } // Testing all codes (0<=c<=16) and also one overflow (17). func BenchmarkCodeStringStringerWithOverflow(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codeBench(uint32(i % 18)) _ = c.String() } - b.StopTimer() } // Testing all codes (0<=c<=16) and also one overflow (17). func BenchmarkCodeStringSwitchWithOverflow(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codes.Code(uint32(i % 18)) _ = c.String() } - b.StopTimer() } From eab9e20d1bbecbdcdf442c5f6669ad731a160c0f Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Fri, 14 Apr 2023 18:26:20 -0400 Subject: [PATCH 877/998] test/kokoro: increase PSM Security test timeout to 4h (#6193) --- test/kokoro/psm-security.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/kokoro/psm-security.cfg b/test/kokoro/psm-security.cfg index 7d86de68633d..040efe9d707e 100644 --- a/test/kokoro/psm-security.cfg +++ b/test/kokoro/psm-security.cfg @@ -2,7 +2,7 @@ # Location of the continuous shell script in repository. build_file: "grpc-go/test/kokoro/psm-security.sh" -timeout_mins: 180 +timeout_mins: 240 action { define_artifacts { From b91b8842e9437f711267bfc227c454de91e7cf15 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 17 Apr 2023 20:20:22 -0400 Subject: [PATCH 878/998] gcp/observability: Have o11y module point to grpc 1.54 and opencensus 1.0.0 (#6209) --- gcp/observability/go.mod | 6 ++---- gcp/observability/go.sum | 2 ++ interop/observability/go.mod | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 2dd34c605ffe..e52c78dc4122 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -10,8 +10,8 @@ require ( go.opencensus.io v0.24.0 golang.org/x/oauth2 v0.6.0 google.golang.org/api v0.110.0 - google.golang.org/grpc v1.53.0 - google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae + google.golang.org/grpc v1.54.0 + google.golang.org/grpc/stats/opencensus v1.0.0 ) require ( @@ -39,5 +39,3 @@ require ( ) replace google.golang.org/grpc => ../.. - -replace google.golang.org/grpc/stats/opencensus => ../../stats/opencensus diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index f6008bb2ae1d..cf60c66c8682 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -1305,6 +1305,8 @@ google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/stats/opencensus v1.0.0 h1:evSYcRZaSToQp+borzWE52+03joezZeXcKJvZDfkUJA= +google.golang.org/grpc/stats/opencensus v1.0.0/go.mod h1:FhdkeYvN43wLYUnapVuRJJ9JXkNwe403iLUW2LKSnjs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/interop/observability/go.mod b/interop/observability/go.mod index 727f46f29db9..e37b2dff79e2 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -3,7 +3,7 @@ module google.golang.org/grpc/interop/observability go 1.17 require ( - google.golang.org/grpc v1.53.0 + google.golang.org/grpc v1.54.0 google.golang.org/grpc/gcp/observability v0.0.0-20230214181353-f4feddb37523 ) @@ -35,7 +35,7 @@ require ( google.golang.org/api v0.110.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae // indirect + google.golang.org/grpc/stats/opencensus v1.0.0 // indirect google.golang.org/protobuf v1.30.0 // indirect ) From aa8c137da98cbb59ddcf58e7c7e130cdd8f4fec7 Mon Sep 17 00:00:00 2001 From: Luwei Ge Date: Tue, 18 Apr 2023 10:27:51 -0700 Subject: [PATCH 879/998] authz: add audit logging APIs (#6158) --- authz/audit_logger.go | 126 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 authz/audit_logger.go diff --git a/authz/audit_logger.go b/authz/audit_logger.go new file mode 100644 index 000000000000..992d66054fbb --- /dev/null +++ b/authz/audit_logger.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package authz + +import ( + "encoding/json" + "sync" +) + +// loggerBuilderRegistry holds a map of audit logger builders and a mutex +// to facilitate thread-safe reading/writing operations. +type loggerBuilderRegistry struct { + mu sync.Mutex + builders map[string]AuditLoggerBuilder +} + +var ( + registry = loggerBuilderRegistry{ + builders: make(map[string]AuditLoggerBuilder), + } +) + +// RegisterAuditLoggerBuilder registers the builder in a global map +// using b.Name() as the key. +// +// This should only be called during initialization time (i.e. in an init() +// function). If multiple builders are registered with the same name, +// the one registered last will take effect. +func RegisterAuditLoggerBuilder(b AuditLoggerBuilder) { + registry.mu.Lock() + defer registry.mu.Unlock() + registry.builders[b.Name()] = b +} + +// GetAuditLoggerBuilder returns a builder with the given name. +// It returns nil if the builder is not found in the registry. +func GetAuditLoggerBuilder(name string) AuditLoggerBuilder { + registry.mu.Lock() + defer registry.mu.Unlock() + return registry.builders[name] +} + +// AuditEvent contains information passed to the audit logger as part of an +// audit logging event. +type AuditEvent struct { + // FullMethodName is the full method name of the audited RPC, in the format + // of "/pkg.Service/Method". For example, "/helloworld.Greeter/SayHello". + FullMethodName string + // Principal is the identity of the caller. Currently it will only be + // available in certificate-based TLS authentication. + Principal string + // PolicyName is the authorization policy name or the xDS RBAC filter name. + PolicyName string + // MatchedRule is the matched rule or policy name in the xDS RBAC filter. + // It will be empty if there is no match. + MatchedRule string + // Authorized indicates whether the audited RPC is authorized or not. + Authorized bool +} + +// AuditLoggerConfig represents an opaque data structure holding an audit +// logger configuration. Concrete types representing configuration of specific +// audit loggers must embed this interface to implement it. +type AuditLoggerConfig interface { + auditLoggerConfig() +} + +// AuditLogger is the interface to be implemented by audit loggers. +// +// An audit logger is a logger instance that can be configured via the +// authorization policy API or xDS HTTP RBAC filters. When the authorization +// decision meets the condition for audit, all the configured audit loggers' +// Log() method will be invoked to log that event. +// +// TODO(lwge): Change the link to the merged gRFC once it's ready. +// Please refer to https://github.com/grpc/proposal/pull/346 for more details +// about audit logging. +type AuditLogger interface { + // Log performs audit logging for the provided audit event. + // + // This method is invoked in the RPC path and therefore implementations + // must not block. + Log(*AuditEvent) +} + +// AuditLoggerBuilder is the interface to be implemented by audit logger +// builders that are used at runtime to configure and instantiate audit loggers. +// +// Users who want to implement their own audit logging logic should +// implement this interface, along with the AuditLogger interface, and register +// it by calling RegisterAuditLoggerBuilder() at init time. +// +// TODO(lwge): Change the link to the merged gRFC once it's ready. +// Please refer to https://github.com/grpc/proposal/pull/346 for more details +// about audit logging. +type AuditLoggerBuilder interface { + // ParseAuditLoggerConfig parses the given JSON bytes into a structured + // logger config this builder can use to build an audit logger. + ParseAuditLoggerConfig(config json.RawMessage) (AuditLoggerConfig, error) + // Build builds an audit logger with the given logger config. + // This will only be called with valid configs returned from + // ParseAuditLoggerConfig() and any runtime issues such as failing to + // create a file should be handled by the logger implementation instead of + // failing the logger instantiation. So implementers need to make sure it + // can return a logger without error at this stage. + Build(AuditLoggerConfig) AuditLogger + // Name returns the name of logger built by this builder. + // This is used to register and pick the builder. + Name() string +} From 875c97a94dca8093bf01ff2fef490fbdd576373d Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 18 Apr 2023 14:13:24 -0400 Subject: [PATCH 880/998] examples/features/observability: use observability module v1.0.0 (#6210) --- examples/features/observability/go.mod | 16 ++++++------ examples/features/observability/go.sum | 34 +++++++++++++------------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/examples/features/observability/go.mod b/examples/features/observability/go.mod index e6d69f646bcb..657aca310d1e 100644 --- a/examples/features/observability/go.mod +++ b/examples/features/observability/go.mod @@ -5,15 +5,15 @@ go 1.17 require ( google.golang.org/grpc v1.54.0 google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc - google.golang.org/grpc/gcp/observability v0.0.0-20230331001051-113d75fb4561 + google.golang.org/grpc/gcp/observability v1.0.0 ) require ( - cloud.google.com/go v0.109.0 // indirect + cloud.google.com/go v0.110.0 // indirect cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/logging v1.6.1 // indirect - cloud.google.com/go/longrunning v0.4.0 // indirect + cloud.google.com/go/logging v1.7.0 // indirect + cloud.google.com/go/longrunning v0.4.1 // indirect cloud.google.com/go/monitoring v1.12.0 // indirect cloud.google.com/go/trace v1.8.0 // indirect contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect @@ -29,13 +29,13 @@ require ( github.com/prometheus/prometheus v2.5.0+incompatible // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.5.0 // indirect + golang.org/x/oauth2 v0.6.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect - google.golang.org/api v0.109.0 // indirect + google.golang.org/api v0.110.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa // indirect - google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/grpc/stats/opencensus v1.0.0 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/examples/features/observability/go.sum b/examples/features/observability/go.sum index d6afd6f4a69d..a9ca780544dd 100644 --- a/examples/features/observability/go.sum +++ b/examples/features/observability/go.sum @@ -25,8 +25,8 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk= -cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -39,11 +39,11 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= -cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/longrunning v0.4.0 h1:v+X4EwhHl6xE+TG1XgXj4T1XpKKs7ZevcAJ3FOu0YmY= -cloud.google.com/go/longrunning v0.4.0/go.mod h1:eF3Qsw58iX/bkKtVjMTYpH0LRjQ2goDkjkNQTlzq/ZM= +cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= +cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= @@ -324,8 +324,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -498,8 +498,8 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= -google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -568,8 +568,8 @@ google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa h1:qQPhfbPO23fwm/9lQr91L1u62Zo6cm+zI+slZT+uf+o= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -600,10 +600,10 @@ google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc h1:H58v4RmBwciuKpwU6NFUn3w2hPZNL78HedaJUitCdpI= google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc/go.mod h1:EXfxRt8PpWkTFBAXaWXB0Xgb1S/FFBXvFRry0nr2bHQ= -google.golang.org/grpc/gcp/observability v0.0.0-20230331001051-113d75fb4561 h1:8JM4LQVl9mzgGRprxdWVXGhSZ2QMzG/AcAAOp7vYruU= -google.golang.org/grpc/gcp/observability v0.0.0-20230331001051-113d75fb4561/go.mod h1:53Tjq5tV93OKCXBanbDNe1ZE112hGrkMi2jeC0ArL0A= -google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae h1:40UWCQ40A2NTDabsmbZNznFf9SUftDlaBASj7OCdKDY= -google.golang.org/grpc/stats/opencensus v0.0.0-20230330193705-4a12595692ae/go.mod h1:qPsHQZhltTPryCUC0naykSpbIJDodlCLM/vNa607CrE= +google.golang.org/grpc/gcp/observability v1.0.0 h1:YkGqlAFEFM69+GDI8MnuSV4RTvBWkx4AKealZ+yGizY= +google.golang.org/grpc/gcp/observability v1.0.0/go.mod h1:SmWxljYyQOJWPALwV6WhM3PdbH7sQsrCYIzlRy2PY00= +google.golang.org/grpc/stats/opencensus v1.0.0 h1:evSYcRZaSToQp+borzWE52+03joezZeXcKJvZDfkUJA= +google.golang.org/grpc/stats/opencensus v1.0.0/go.mod h1:FhdkeYvN43wLYUnapVuRJJ9JXkNwe403iLUW2LKSnjs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 0ed709c4a71d08e5877b4bfb41c2747b0d1c3240 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Tue, 18 Apr 2023 14:38:44 -0700 Subject: [PATCH 881/998] Change version to 1.56.0-dev (#6213) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 99487240565d..310603bed415 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.55.0-dev" +const Version = "1.56.0-dev" From ebeda756bcdb3fca4053a7947c4e2f248f1225c0 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Tue, 18 Apr 2023 16:53:20 -0700 Subject: [PATCH 882/998] tests: defalke TestTimerAndWatchStateOnSendCallback (#6206) --- xds/internal/testutils/resource_watcher.go | 18 +++++++----- xds/internal/xdsclient/authority_test.go | 34 +++++++--------------- 2 files changed, 21 insertions(+), 31 deletions(-) diff --git a/xds/internal/testutils/resource_watcher.go b/xds/internal/testutils/resource_watcher.go index 304f40de9d5a..aac9c1464774 100644 --- a/xds/internal/testutils/resource_watcher.go +++ b/xds/internal/testutils/resource_watcher.go @@ -35,37 +35,41 @@ type TestResourceWatcher struct { ResourceDoesNotExistCh chan struct{} } -// OnUpdate is invoked by the xDS client to report an update on the resource +// OnUpdate is invoked by the xDS client to report the latest update on the resource // being watched. func (w *TestResourceWatcher) OnUpdate(data xdsresource.ResourceData) { select { - case w.UpdateCh <- &data: + case <-w.UpdateCh: default: } + w.UpdateCh <- &data } -// OnError is invoked by the xDS client to report errors. +// OnError is invoked by the xDS client to report the latest error. func (w *TestResourceWatcher) OnError(err error) { select { - case w.ErrorCh <- err: + case <-w.ErrorCh: default: } + w.ErrorCh <- err } // OnResourceDoesNotExist is used by the xDS client to report that the resource // being watched no longer exists. func (w *TestResourceWatcher) OnResourceDoesNotExist() { select { - case w.ResourceDoesNotExistCh <- struct{}{}: + case <-w.ResourceDoesNotExistCh: default: } + w.ResourceDoesNotExistCh <- struct{}{} } // NewTestResourceWatcher returns a TestResourceWatcher to watch for resources // via the xDS client. func NewTestResourceWatcher() *TestResourceWatcher { return &TestResourceWatcher{ - UpdateCh: make(chan *xdsresource.ResourceData), - ErrorCh: make(chan error), + UpdateCh: make(chan *xdsresource.ResourceData, 1), + ErrorCh: make(chan error, 1), + ResourceDoesNotExistCh: make(chan struct{}, 1), } } diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go index 96748b7e3ef0..09a81759a1f3 100644 --- a/xds/internal/xdsclient/authority_test.go +++ b/xds/internal/xdsclient/authority_test.go @@ -38,8 +38,6 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. ) @@ -89,18 +87,7 @@ func setupTest(ctx context.Context, t *testing.T, opts e2e.ManagementServerOptio func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - // Setting up a mgmt server with a done channel when OnStreamRequest is invoked. - serverOnReqDoneCh := make(chan struct{}) - serverOpt := e2e.ManagementServerOptions{ - OnStreamRequest: func(int64, *v3discoverypb.DiscoveryRequest) error { - select { - case serverOnReqDoneCh <- struct{}{}: - default: - } - return nil - }, - } - a, ms, nodeID := setupTest(ctx, t, serverOpt, defaultTestTimeout) + a, ms, nodeID := setupTest(ctx, t, emptyServerOpts, defaultTestTimeout) defer ms.Stop() defer a.close() @@ -109,16 +96,16 @@ func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) { cancelResource := a.watchResource(listenerResourceType, rn, w) defer cancelResource() - if err := compareWatchState(a, rn, watchStateStarted); err != nil { - t.Fatal(err) + // Looping until the underlying transport has successfully sent the request to + // the server, which would call the onSend callback and transition the watchState + // to `watchStateRequested`. + for ctx.Err() == nil { + if err := compareWatchState(a, rn, watchStateRequested); err == nil { + break + } } - - // This blocking read is to verify that the underlying transport has successfully - // sent the request to the server, hence the onSend callback was already invoked. - // onSend callback should transition the watchState to `watchStateRequested`. - <-serverOnReqDoneCh - if err := compareWatchState(a, rn, watchStateRequested); err != nil { - t.Fatal(err) + if ctx.Err() != nil { + t.Fatalf("Test timed out before state transiton to %q was verified.", watchStateRequested) } // Updating mgmt server with the same lds resource. Blocking on watcher's update @@ -139,7 +126,6 @@ func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) { return } } - } // This tests the resource's watch state transition when the ADS stream is closed From 7dfd71831d2b327ec64c652bc2f22972304a5fb6 Mon Sep 17 00:00:00 2001 From: Ernest Nguyen Hung <58267404+erni27@users.noreply.github.com> Date: Wed, 19 Apr 2023 01:53:59 +0200 Subject: [PATCH 883/998] internal/buffer: add Close method to the Unbounded buffer type (#6161) --- balancer/rls/balancer.go | 7 ++++- balancer_conn_wrappers.go | 14 ++++++++-- internal/buffer/unbounded.go | 26 ++++++++++++++++--- internal/buffer/unbounded_test.go | 16 ++++++++++++ internal/grpcsync/callback_serializer.go | 6 ++++- internal/transport/http2_client.go | 2 +- .../balancer/cdsbalancer/cdsbalancer.go | 6 ++++- .../balancer/clusterimpl/clusterimpl.go | 6 ++++- .../clusterresolver/clusterresolver.go | 6 ++++- .../balancer/outlierdetection/balancer.go | 13 ++++++++-- xds/internal/balancer/priority/balancer.go | 6 ++++- xds/internal/xdsclient/transport/transport.go | 7 ++++- xds/server.go | 6 ++++- 13 files changed, 105 insertions(+), 16 deletions(-) diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go index b2f97a9509ed..076aae8c99f0 100644 --- a/balancer/rls/balancer.go +++ b/balancer/rls/balancer.go @@ -188,7 +188,10 @@ func (b *rlsBalancer) run() { for { select { - case u := <-b.updateCh.Get(): + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } b.updateCh.Load() switch update := u.(type) { case childPolicyIDAndState: @@ -450,6 +453,8 @@ func (b *rlsBalancer) Close() { b.dataCache.stop() b.cacheMu.Unlock() + b.updateCh.Close() + <-b.done.Done() } diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 0359956d36fa..eeaf5beb72a6 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -113,7 +113,10 @@ type subConnUpdate struct { func (ccb *ccBalancerWrapper) watcher() { for { select { - case u := <-ccb.updateCh.Get(): + case u, ok := <-ccb.updateCh.Get(): + if !ok { + break + } ccb.updateCh.Load() if ccb.closed.HasFired() { break @@ -155,8 +158,13 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) var res interface{} + var ok bool select { - case res = <-ccb.resultCh.Get(): + case res, ok = <-ccb.resultCh.Get(): + if !ok { + // The result channel is closed only when the balancer wrapper is closed. + return nil + } ccb.resultCh.Load() case <-ccb.closed.Done(): // Return early if the balancer wrapper is closed while we are waiting for @@ -296,6 +304,8 @@ func (ccb *ccBalancerWrapper) close() { func (ccb *ccBalancerWrapper) handleClose() { ccb.balancer.Close() + ccb.updateCh.Close() + ccb.resultCh.Close() ccb.done.Fire() } diff --git a/internal/buffer/unbounded.go b/internal/buffer/unbounded.go index 9f6a0c1200db..81c2f5fd761b 100644 --- a/internal/buffer/unbounded.go +++ b/internal/buffer/unbounded.go @@ -35,6 +35,7 @@ import "sync" // internal/transport/transport.go for an example of this. type Unbounded struct { c chan interface{} + closed bool mu sync.Mutex backlog []interface{} } @@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded { // Put adds t to the unbounded buffer. func (b *Unbounded) Put(t interface{}) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ func (b *Unbounded) Load() { default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. func (b *Unbounded) Get() <-chan interface{} { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/internal/buffer/unbounded_test.go b/internal/buffer/unbounded_test.go index 8cb800dd0f09..1708391e7f27 100644 --- a/internal/buffer/unbounded_test.go +++ b/internal/buffer/unbounded_test.go @@ -119,3 +119,19 @@ func (s) TestMultipleWriters(t *testing.T) { t.Errorf("reads: %#v, wantReads: %#v", reads, wantReads) } } + +// TestClose closes the buffer and makes sure that nothing is sent after the +// buffer is closed. +func (s) TestClose(t *testing.T) { + ub := NewUnbounded() + ub.Close() + if v, ok := <-ub.Get(); ok { + t.Errorf("Unbounded.Get() = %v, want closed channel", v) + } + ub.Put(1) + ub.Load() + if v, ok := <-ub.Get(); ok { + t.Errorf("Unbounded.Get() = %v, want closed channel", v) + } + ub.Close() +} diff --git a/internal/grpcsync/callback_serializer.go b/internal/grpcsync/callback_serializer.go index 79993d34375a..6df798c00eb1 100644 --- a/internal/grpcsync/callback_serializer.go +++ b/internal/grpcsync/callback_serializer.go @@ -56,8 +56,12 @@ func (t *CallbackSerializer) run(ctx context.Context) { for ctx.Err() == nil { select { case <-ctx.Done(): + t.callbacks.Close() return - case callback := <-t.callbacks.Get(): + case callback, ok := <-t.callbacks.Get(): + if !ok { + return + } t.callbacks.Load() callback.(func(ctx context.Context))(ctx) } diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 5216998a88ba..326bf0848000 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -1337,7 +1337,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 9b55ee0f10ab..1e3fb4d1286c 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -426,7 +426,10 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { func (b *cdsBalancer) run() { for { select { - case u := <-b.updateCh.Get(): + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } b.updateCh.Load() switch update := u.(type) { case *ccUpdate: @@ -466,6 +469,7 @@ func (b *cdsBalancer) run() { if b.cachedIdentity != nil { b.cachedIdentity.Close() } + b.updateCh.Close() b.logger.Infof("Shutdown") b.done.Fire() return diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 25090a5bb420..e1a18ae338d3 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -333,6 +333,7 @@ func (b *clusterImplBalancer) Close() { b.childLB = nil b.childState = balancer.State{} } + b.pickerUpdateCh.Close() <-b.done.Done() b.logger.Infof("Shutdown") } @@ -506,7 +507,10 @@ func (b *clusterImplBalancer) run() { defer b.done.Fire() for { select { - case update := <-b.pickerUpdateCh.Get(): + case update, ok := <-b.pickerUpdateCh.Get(): + if !ok { + return + } b.pickerUpdateCh.Load() b.mu.Lock() if b.closed.HasFired() { diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index e068d1014790..18dac2596d0a 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -265,7 +265,10 @@ func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bo func (b *clusterResolverBalancer) run() { for { select { - case u := <-b.updateCh.Get(): + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } b.updateCh.Load() switch update := u.(type) { case *ccUpdate: @@ -303,6 +306,7 @@ func (b *clusterResolverBalancer) run() { b.child.Close() b.child = nil } + b.updateCh.Close() // This is the *ONLY* point of return from this function. b.logger.Infof("Shutdown") b.done.Fire() diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 062a8e5e48d2..8d87e8f9884a 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -362,6 +362,9 @@ func (b *outlierDetectionBalancer) Close() { b.child.Close() b.childMu.Unlock() + b.scUpdateCh.Close() + b.pickerUpdateCh.Close() + b.mu.Lock() defer b.mu.Unlock() if b.intervalTimer != nil { @@ -681,7 +684,10 @@ func (b *outlierDetectionBalancer) run() { defer b.done.Fire() for { select { - case update := <-b.scUpdateCh.Get(): + case update, ok := <-b.scUpdateCh.Get(): + if !ok { + return + } b.scUpdateCh.Load() if b.closed.HasFired() { // don't send SubConn updates to child after the balancer has been closed return @@ -692,7 +698,10 @@ func (b *outlierDetectionBalancer) run() { case *ejectionUpdate: b.handleEjectedUpdate(u) } - case update := <-b.pickerUpdateCh.Get(): + case update, ok := <-b.pickerUpdateCh.Get(): + if !ok { + return + } b.pickerUpdateCh.Load() if b.closed.HasFired() { // don't send picker updates to grpc after the balancer has been closed return diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index 28062c51ee93..40c047d558b7 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -205,6 +205,7 @@ func (b *priorityBalancer) UpdateSubConnState(sc balancer.SubConn, state balance func (b *priorityBalancer) Close() { b.bg.Close() + b.childBalancerStateUpdate.Close() b.mu.Lock() defer b.mu.Unlock() @@ -247,7 +248,10 @@ type resumePickerUpdates struct { func (b *priorityBalancer) run() { for { select { - case u := <-b.childBalancerStateUpdate.Get(): + case u, ok := <-b.childBalancerStateUpdate.Get(): + if !ok { + return + } b.childBalancerStateUpdate.Load() // Needs to handle state update in a goroutine, because each state // update needs to start/close child policy, could result in diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index f02416f37c48..86803588a7cc 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -400,7 +400,11 @@ func (t *Transport) send(ctx context.Context) { continue } sendNodeProto = false - case u := <-t.adsRequestCh.Get(): + case u, ok := <-t.adsRequestCh.Get(): + if !ok { + // No requests will be sent after the adsRequestCh buffer is closed. + return + } t.adsRequestCh.Load() var ( @@ -621,6 +625,7 @@ func (t *Transport) processAckRequest(ack *ackRequest, stream grpc.ClientStream) func (t *Transport) Close() { t.adsRunnerCancel() <-t.adsRunnerDoneCh + t.adsRequestCh.Close() t.cc.Close() } diff --git a/xds/server.go b/xds/server.go index 24abb65f253f..55b678bb78a0 100644 --- a/xds/server.go +++ b/xds/server.go @@ -272,6 +272,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error { // need to explicitly close the listener. Cancellation of the xDS watch // is handled by the listenerWrapper. lw.Close() + modeUpdateCh.Close() return nil case <-goodUpdateCh: } @@ -295,7 +296,10 @@ func (s *GRPCServer) handleServingModeChanges(updateCh *buffer.Unbounded) { select { case <-s.quit.Done(): return - case u := <-updateCh.Get(): + case u, ok := <-updateCh.Get(): + if !ok { + return + } updateCh.Load() args := u.(*modeChangeArgs) if args.mode == connectivity.ServingModeNotServing { From ca604628aa8558b77ce24b11d5255e6f828d9222 Mon Sep 17 00:00:00 2001 From: Mskxn <118117161+Mskxn@users.noreply.github.com> Date: Wed, 19 Apr 2023 07:56:40 +0800 Subject: [PATCH 884/998] stubserver: Stop server when StartClient failed (#6190) --- internal/stubserver/stubserver.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/stubserver/stubserver.go b/internal/stubserver/stubserver.go index 012021dc03e2..94ef56482e18 100644 --- a/internal/stubserver/stubserver.go +++ b/internal/stubserver/stubserver.go @@ -86,7 +86,11 @@ func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) if err := ss.StartServer(sopts...); err != nil { return err } - return ss.StartClient(dopts...) + if err := ss.StartClient(dopts...); err != nil { + ss.Stop() + return err + } + return nil } // StartServer only starts the server. It does not create a client to it. From 16651f60ddc5b1adb810fca8b1a732d57fecb270 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Tue, 18 Apr 2023 17:02:56 -0700 Subject: [PATCH 885/998] go.mod: update all dependencies (#6214) --- examples/go.mod | 16 ++-- examples/go.sum | 129 +++++++++++++++++++++++-- gcp/observability/go.mod | 20 ++-- gcp/observability/go.sum | 137 ++++++++++++++++++++++++--- go.mod | 14 +-- go.sum | 28 +++--- interop/observability/go.mod | 20 ++-- interop/observability/go.sum | 137 ++++++++++++++++++++++++--- security/advancedtls/examples/go.mod | 16 ++-- security/advancedtls/examples/go.sum | 20 ++-- security/advancedtls/go.mod | 12 +-- security/advancedtls/go.sum | 20 ++-- security/authorization/go.mod | 15 +-- security/authorization/go.sum | 30 +++--- stats/opencensus/go.mod | 10 +- stats/opencensus/go.sum | 123 +++++++++++++++++++++++- test/tools/go.mod | 4 +- test/tools/go.sum | 22 ++--- 18 files changed, 614 insertions(+), 159 deletions(-) diff --git a/examples/go.mod b/examples/go.mod index 642f3e8f0117..7f79ab2b7e87 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -5,23 +5,23 @@ go 1.17 require ( github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 github.com/golang/protobuf v1.5.3 - golang.org/x/oauth2 v0.6.0 - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 - google.golang.org/grpc v1.53.0 + golang.org/x/oauth2 v0.7.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/grpc v1.54.0 google.golang.org/protobuf v1.30.0 ) require ( - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect github.com/envoyproxy/go-control-plane v0.11.0 // indirect - github.com/envoyproxy/protoc-gen-validate v0.10.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect ) diff --git a/examples/go.sum b/examples/go.sum index 3a9c20845207..edbb05e00946 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -41,13 +41,18 @@ cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wx cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= @@ -56,25 +61,35 @@ cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= @@ -95,6 +110,7 @@ cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oe cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -104,12 +120,16 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= @@ -121,9 +141,12 @@ cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5v cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= @@ -132,6 +155,7 @@ cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uX cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= @@ -143,8 +167,10 @@ cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -156,9 +182,12 @@ cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iW cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= @@ -166,6 +195,7 @@ cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= @@ -173,6 +203,7 @@ cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KF cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= @@ -182,6 +213,7 @@ cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxB cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= @@ -191,14 +223,17 @@ cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZW cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= @@ -206,6 +241,7 @@ cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= @@ -214,12 +250,14 @@ cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= @@ -227,15 +265,19 @@ cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aU cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= @@ -250,6 +292,7 @@ cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+o cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= @@ -265,19 +308,26 @@ cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQE cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= @@ -296,6 +346,7 @@ cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtq cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= @@ -312,22 +363,26 @@ cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJP cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= @@ -353,9 +408,11 @@ cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2om cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -363,8 +420,10 @@ cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjp cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -372,6 +431,7 @@ cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7d cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= @@ -388,6 +448,8 @@ cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0 cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= @@ -399,11 +461,13 @@ cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQk cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= @@ -414,35 +478,44 @@ cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3s cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -453,9 +526,11 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= @@ -470,12 +545,18 @@ cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV6 cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= @@ -487,11 +568,14 @@ cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiC cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= @@ -521,6 +605,7 @@ github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGW github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= @@ -541,6 +626,7 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -549,12 +635,14 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -665,6 +753,7 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -691,6 +780,7 @@ github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NB github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -709,6 +799,7 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -718,6 +809,7 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -747,6 +839,7 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -807,6 +900,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -847,6 +941,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -864,8 +959,9 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -891,8 +987,9 @@ golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -955,6 +1052,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -980,14 +1078,16 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1001,13 +1101,15 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1139,6 +1241,8 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1215,6 +1319,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -1268,8 +1373,15 @@ google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1286,6 +1398,7 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index e52c78dc4122..9262d214dd96 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -8,33 +8,33 @@ require ( github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 go.opencensus.io v0.24.0 - golang.org/x/oauth2 v0.6.0 - google.golang.org/api v0.110.0 + golang.org/x/oauth2 v0.7.0 + google.golang.org/api v0.114.0 google.golang.org/grpc v1.54.0 google.golang.org/grpc/stats/opencensus v1.0.0 ) require ( cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/longrunning v0.4.1 // indirect - cloud.google.com/go/monitoring v1.12.0 // indirect - cloud.google.com/go/trace v1.8.0 // indirect + cloud.google.com/go/monitoring v1.13.0 // indirect + cloud.google.com/go/trace v1.9.0 // indirect github.com/aws/aws-sdk-go v1.44.162 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect - golang.org/x/net v0.8.0 // indirect + golang.org/x/net v0.9.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index cf60c66c8682..a629a01ac913 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -42,13 +42,18 @@ cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wx cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= @@ -57,25 +62,35 @@ cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= @@ -96,6 +111,7 @@ cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oe cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -105,12 +121,16 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= @@ -122,9 +142,12 @@ cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5v cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= @@ -133,6 +156,7 @@ cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uX cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= @@ -144,8 +168,10 @@ cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -157,9 +183,12 @@ cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iW cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= @@ -167,6 +196,7 @@ cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= @@ -174,6 +204,7 @@ cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KF cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= @@ -183,6 +214,7 @@ cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxB cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= @@ -192,14 +224,17 @@ cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZW cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= @@ -207,6 +242,7 @@ cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= @@ -215,12 +251,14 @@ cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= @@ -228,15 +266,19 @@ cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aU cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= @@ -251,6 +293,7 @@ cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+o cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= @@ -265,21 +308,28 @@ cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHD cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= @@ -300,6 +350,7 @@ cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtq cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= @@ -316,24 +367,28 @@ cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJP cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= @@ -359,9 +414,11 @@ cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2om cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -369,8 +426,10 @@ cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjp cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -378,6 +437,7 @@ cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7d cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= @@ -394,6 +454,8 @@ cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0 cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= @@ -405,11 +467,13 @@ cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQk cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= @@ -420,35 +484,44 @@ cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3s cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -459,9 +532,11 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= @@ -476,14 +551,20 @@ cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV6 cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0 h1:olxC0QHC59zgJVALtgqfD9tGk0lfeCP5/AGXL3Px/no= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= @@ -495,11 +576,14 @@ cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiC cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= @@ -531,6 +615,7 @@ github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGW github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= @@ -552,6 +637,7 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -560,10 +646,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -676,8 +764,9 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99 github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -708,6 +797,7 @@ github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NB github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -727,6 +817,7 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= @@ -738,6 +829,7 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -768,6 +860,7 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -828,6 +921,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -868,6 +962,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -886,8 +981,9 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -914,8 +1010,9 @@ golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -979,6 +1076,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1006,8 +1104,9 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1015,6 +1114,7 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1028,13 +1128,15 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1167,8 +1269,10 @@ google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4q google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1249,6 +1353,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -1302,8 +1407,15 @@ google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/stats/opencensus v1.0.0 h1:evSYcRZaSToQp+borzWE52+03joezZeXcKJvZDfkUJA= google.golang.org/grpc/stats/opencensus v1.0.0/go.mod h1:FhdkeYvN43wLYUnapVuRJJ9JXkNwe403iLUW2LKSnjs= @@ -1322,6 +1434,7 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/go.mod b/go.mod index aa79385c4bfe..5707e6e7ef71 100644 --- a/go.mod +++ b/go.mod @@ -11,18 +11,18 @@ require ( github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 - golang.org/x/net v0.8.0 - golang.org/x/oauth2 v0.6.0 - golang.org/x/sys v0.6.0 - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 + golang.org/x/net v0.9.0 + golang.org/x/oauth2 v0.7.0 + golang.org/x/sys v0.7.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 google.golang.org/protobuf v1.30.0 ) require ( - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.10.0 // indirect - golang.org/x/text v0.8.0 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect + golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect ) diff --git a/go.sum b/go.sum index d882cea260b9..925f1485b1dd 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -20,8 +20,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= @@ -51,22 +51,22 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -79,8 +79,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/interop/observability/go.mod b/interop/observability/go.mod index e37b2dff79e2..1d743a02318b 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -9,12 +9,12 @@ require ( require ( cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/logging v1.7.0 // indirect cloud.google.com/go/longrunning v0.4.1 // indirect - cloud.google.com/go/monitoring v1.12.0 // indirect - cloud.google.com/go/trace v1.8.0 // indirect + cloud.google.com/go/monitoring v1.13.0 // indirect + cloud.google.com/go/trace v1.9.0 // indirect contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect github.com/aws/aws-sdk-go v1.44.162 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect @@ -23,18 +23,18 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - google.golang.org/api v0.110.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc/stats/opencensus v1.0.0 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/interop/observability/go.sum b/interop/observability/go.sum index f6008bb2ae1d..6c75fdb93aea 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -42,13 +42,18 @@ cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wx cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= @@ -57,25 +62,35 @@ cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= @@ -96,6 +111,7 @@ cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oe cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -105,12 +121,16 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= @@ -122,9 +142,12 @@ cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5v cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= @@ -133,6 +156,7 @@ cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uX cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= @@ -144,8 +168,10 @@ cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -157,9 +183,12 @@ cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iW cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= @@ -167,6 +196,7 @@ cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= @@ -174,6 +204,7 @@ cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KF cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= @@ -183,6 +214,7 @@ cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxB cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= @@ -192,14 +224,17 @@ cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZW cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= @@ -207,6 +242,7 @@ cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= @@ -215,12 +251,14 @@ cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= @@ -228,15 +266,19 @@ cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aU cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= @@ -251,6 +293,7 @@ cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+o cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= @@ -265,21 +308,28 @@ cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHD cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= @@ -300,6 +350,7 @@ cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtq cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= @@ -316,24 +367,28 @@ cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJP cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= @@ -359,9 +414,11 @@ cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2om cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -369,8 +426,10 @@ cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjp cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -378,6 +437,7 @@ cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7d cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= @@ -394,6 +454,8 @@ cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0 cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= @@ -405,11 +467,13 @@ cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQk cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= @@ -420,35 +484,44 @@ cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3s cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -459,9 +532,11 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= @@ -476,14 +551,20 @@ cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV6 cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0 h1:olxC0QHC59zgJVALtgqfD9tGk0lfeCP5/AGXL3Px/no= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= @@ -495,11 +576,14 @@ cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiC cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= @@ -531,6 +615,7 @@ github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGW github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= @@ -552,6 +637,7 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -560,10 +646,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -676,8 +764,9 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99 github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -708,6 +797,7 @@ github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NB github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -727,6 +817,7 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= @@ -738,6 +829,7 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -768,6 +860,7 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -828,6 +921,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -868,6 +962,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -886,8 +981,9 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -914,8 +1010,9 @@ golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -979,6 +1076,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1006,8 +1104,9 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1015,6 +1114,7 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1028,13 +1128,15 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1167,8 +1269,10 @@ google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4q google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1249,6 +1353,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -1302,8 +1407,15 @@ google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1320,6 +1432,7 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index af92a29b0f01..073dd1302718 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -3,18 +3,18 @@ module google.golang.org/grpc/security/advancedtls/examples go 1.17 require ( - google.golang.org/grpc v1.53.0 - google.golang.org/grpc/examples v0.0.0-20230318005552-70c52915099a - google.golang.org/grpc/security/advancedtls v0.0.0-20230318005552-70c52915099a + google.golang.org/grpc v1.54.0 + google.golang.org/grpc/examples v0.0.0-20230418213844-0ed709c4a71d + google.golang.org/grpc/security/advancedtls v0.0.0-20230418213844-0ed709c4a71d ) require ( github.com/golang/protobuf v1.5.3 // indirect - golang.org/x/crypto v0.7.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 02a09bda89bd..04465db1aee4 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -4,17 +4,17 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index e9888f020960..19b35bb2ee97 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -4,17 +4,17 @@ go 1.17 require ( github.com/hashicorp/golang-lru v0.5.4 - golang.org/x/crypto v0.7.0 - google.golang.org/grpc v1.53.0 + golang.org/x/crypto v0.8.0 + google.golang.org/grpc v1.54.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) require ( github.com/golang/protobuf v1.5.3 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 3e6e64f444b5..e73f06fbc4ee 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -5,17 +5,17 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= diff --git a/security/authorization/go.mod b/security/authorization/go.mod index 737244a2696b..b908dad955c8 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -4,18 +4,19 @@ go 1.17 require ( github.com/envoyproxy/go-control-plane v0.11.0 - github.com/google/cel-go v0.13.0 + github.com/google/cel-go v0.14.0 github.com/google/go-cmp v0.5.9 - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 - google.golang.org/grpc v1.53.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/grpc v1.54.0 google.golang.org/protobuf v1.30.0 ) require ( - github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 // indirect github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect - github.com/envoyproxy/protoc-gen-validate v0.10.0 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/stoewer/go-strcase v1.2.1 // indirect - golang.org/x/text v0.8.0 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + golang.org/x/exp v0.0.0-20230418202329-0354be287a23 // indirect + golang.org/x/text v0.9.0 // indirect ) diff --git a/security/authorization/go.sum b/security/authorization/go.sum index dc3b5eaeb6eb..9cc3fb5a8cf2 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -1,7 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= -github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 h1:X8MJ0fnN5FPdcGF5Ij2/OW+HgiJrRg3AfHAx1PJtIzM= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= @@ -13,8 +13,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -22,8 +22,8 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/cel-go v0.13.0 h1:z+8OBOcmh7IeKyqwT/6IlnMvy621fYUqnTVPEdegGlU= -github.com/google/cel-go v0.13.0/go.mod h1:K2hpQgEjDp18J76a2DKFRlPBPpgRZgi6EbnpDgIhJ8s= +github.com/google/cel-go v0.14.0 h1:LFobwuUDslWUHdQ48SXVXvQgPH2X1XVhsgOGNioAEZ4= +github.com/google/cel-go v0.14.0/go.mod h1:YzWEoI07MC/a/wj9in8GeVatqfypkldgBlwXh9bCwqY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -31,8 +31,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stoewer/go-strcase v1.2.1 h1:/1JWd+AcWPzkcGLEmjUCka99YqGOtTnp1H/wcP+uap4= -github.com/stoewer/go-strcase v1.2.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -42,6 +42,8 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23 h1:4NKENAGIctmZYLK9W+X1kDK8ObBFqOSCJM6WE7CvkJY= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -58,8 +60,8 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -69,13 +71,13 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index 26dda8eadbea..14828f4c2471 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -5,16 +5,16 @@ go 1.17 require ( github.com/google/go-cmp v0.5.9 go.opencensus.io v0.24.0 - google.golang.org/grpc v1.53.0 + google.golang.org/grpc v1.54.0 ) require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index 0d346feaf4df..2dc115e4a27d 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -41,13 +41,18 @@ cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wx cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= @@ -56,25 +61,35 @@ cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= @@ -95,6 +110,7 @@ cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oe cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -104,12 +120,16 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= @@ -121,9 +141,12 @@ cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5v cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= @@ -132,6 +155,7 @@ cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uX cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= @@ -144,6 +168,8 @@ cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -154,9 +180,12 @@ cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iW cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= @@ -164,6 +193,7 @@ cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= @@ -171,6 +201,7 @@ cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KF cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= @@ -180,6 +211,7 @@ cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxB cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= @@ -189,14 +221,17 @@ cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZW cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= @@ -204,6 +239,7 @@ cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= @@ -212,12 +248,14 @@ cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= @@ -225,15 +263,19 @@ cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aU cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= @@ -248,6 +290,7 @@ cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+o cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= @@ -263,19 +306,26 @@ cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQE cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= @@ -294,6 +344,7 @@ cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtq cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= @@ -310,22 +361,26 @@ cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJP cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= @@ -351,9 +406,11 @@ cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2om cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -361,8 +418,10 @@ cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjp cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -370,6 +429,7 @@ cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7d cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= @@ -386,6 +446,8 @@ cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0 cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= @@ -397,11 +459,13 @@ cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQk cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= @@ -412,35 +476,44 @@ cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3s cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -451,9 +524,11 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= @@ -468,12 +543,18 @@ cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV6 cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= @@ -485,11 +566,14 @@ cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiC cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= @@ -519,6 +603,7 @@ github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGW github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= @@ -536,6 +621,7 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -543,10 +629,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -658,6 +746,7 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -684,6 +773,7 @@ github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NB github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -702,6 +792,7 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -711,6 +802,7 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -741,6 +833,7 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -801,6 +894,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -841,6 +935,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -858,8 +953,9 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -886,6 +982,7 @@ golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -948,6 +1045,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -973,14 +1071,16 @@ golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -994,13 +1094,15 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1132,6 +1234,8 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1207,6 +1311,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -1260,8 +1365,15 @@ google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1278,6 +1390,7 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/test/tools/go.mod b/test/tools/go.mod index 01937569f6db..4e91307e87c5 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -5,9 +5,9 @@ go 1.14 require ( github.com/client9/misspell v0.3.4 github.com/golang/protobuf v1.5.3 - golang.org/x/exp/typeparams v0.0.0-20230315142452-642cacee5cc0 // indirect + golang.org/x/exp/typeparams v0.0.0-20230418202329-0354be287a23 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/tools v0.7.0 + golang.org/x/tools v0.8.0 google.golang.org/protobuf v1.30.0 // indirect honnef.co/go/tools v0.4.3 ) diff --git a/test/tools/go.sum b/test/tools/go.sum index 807d022fa890..af54bb815338 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -12,23 +12,23 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20230315142452-642cacee5cc0 h1:cW6TvM2r3dR3P6P9tWdY51IZSMyVyh9ArUVnEmImTDM= -golang.org/x/exp/typeparams v0.0.0-20230315142452-642cacee5cc0/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230418202329-0354be287a23 h1:EQdGOd2o46bhZIT8VRldX3rWAaqCsxQWznzxhLZLiM8= +golang.org/x/exp/typeparams v0.0.0-20230418202329-0354be287a23/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= @@ -41,27 +41,27 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.1-0.20221208213631-3f74d914ae6d/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= From 2cd95c7514a3d02aa2d98591c013885cb44fbdeb Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 19 Apr 2023 10:54:34 -0700 Subject: [PATCH 886/998] gcp/observability: remove redundant import (#6215) --- gcp/observability/logging.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go index bec80140275b..0ffbd93b3922 100644 --- a/gcp/observability/logging.go +++ b/gcp/observability/logging.go @@ -35,7 +35,6 @@ import ( binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/binarylog" iblog "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/stats/opencensus" @@ -438,7 +437,7 @@ func registerClientRPCEvents(config *config, exporter loggingExporter) { projectID: config.ProjectID, clientSide: true, } - internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(internal.WithBinaryLogger.(func(bl binarylog.Logger) grpc.DialOption)(clientSideLogger)) + internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(internal.WithBinaryLogger.(func(bl iblog.Logger) grpc.DialOption)(clientSideLogger)) } func registerServerRPCEvents(config *config, exporter loggingExporter) { @@ -478,7 +477,7 @@ func registerServerRPCEvents(config *config, exporter loggingExporter) { projectID: config.ProjectID, clientSide: false, } - internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(internal.BinaryLogger.(func(bl binarylog.Logger) grpc.ServerOption)(serverSideLogger)) + internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(internal.BinaryLogger.(func(bl iblog.Logger) grpc.ServerOption)(serverSideLogger)) } func startLogging(ctx context.Context, config *config) error { From 8c70261b5c3941bab070b422fa6d630acba509c2 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 20 Apr 2023 18:49:17 -0700 Subject: [PATCH 887/998] grpc: ClientConn cleanup in prep for channel idleness (#6189) --- clientconn.go | 160 ++++++++++++++++++++++++++------------- resolver_conn_wrapper.go | 81 ++++++++++++-------- 2 files changed, 160 insertions(+), 81 deletions(-) diff --git a/clientconn.go b/clientconn.go index 3a76142424db..e67a990b24fb 100644 --- a/clientconn.go +++ b/clientconn.go @@ -173,40 +173,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - pid := cc.dopts.channelzParentID - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) - ted := &channelz.TraceEventDesc{ - Desc: "Channel created", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) - cc.csMgr.channelzID = cc.channelzID + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return nil, errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -249,15 +220,12 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - resolverBuilder, err := cc.parseTargetAndFindResolver() - if err != nil { + if err := cc.parseTargetAndFindResolver(); err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) - if err != nil { + if err = cc.determineAuthority(); err != nil { return nil, err } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) if cc.dopts.scChan != nil { // Blocking wait for the initial service config. @@ -290,7 +258,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * }) // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + rWrapper, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) if err != nil { return nil, fmt.Errorf("failed to build resolver: %v", err) } @@ -328,6 +306,64 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * return cc, nil } +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing + } + } + } + return nil +} + +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID +} + // chainUnaryClientInterceptors chains all unary client interceptors into one. func chainUnaryClientInterceptors(cc *ClientConn) { interceptors := cc.dopts.chainUnaryInts @@ -471,6 +507,7 @@ type ClientConn struct { authority string // See determineAuthority(). dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. // The following provide their own synchronization, and therefore don't @@ -1552,7 +1589,14 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1564,7 +1608,8 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } } @@ -1579,15 +1624,16 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return nil, err + return err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } // parseTarget uses RFC 3986 semantics to parse the given target into a @@ -1610,7 +1656,15 @@ func parseTarget(target string) (resolver.Target, error) { // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1627,25 +1681,29 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) } + endpoint := cc.parsedTarget.Endpoint() + target := cc.target switch { case authorityFromDialOption != "": - return authorityFromDialOption, nil + cc.authority = authorityFromDialOption case authorityFromCreds != "": - return authorityFromCreds, nil + cc.authority = authorityFromCreds case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): // TODO: remove when the unix resolver implements optional interface to // return channel authority. - return "localhost", nil + cc.authority = "localhost" case strings.HasPrefix(endpoint, ":"): - return "localhost" + endpoint, nil + cc.authority = "localhost" + endpoint default: // TODO: Define an optional interface on the resolver builder to return // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - return endpoint, nil + cc.authority = endpoint } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil } diff --git a/resolver_conn_wrapper.go b/resolver_conn_wrapper.go index 05a9d4e0bac0..854e90f69ae5 100644 --- a/resolver_conn_wrapper.go +++ b/resolver_conn_wrapper.go @@ -23,7 +23,6 @@ import ( "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -31,35 +30,46 @@ import ( "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + done *grpcsync.Event + channelzID *channelz.Identifier + ignoreServiceConfig bool + resolverMu sync.Mutex resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State incomingMu sync.Mutex // Synchronizes all the incoming calls. + curState resolver.State +} + +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, + cc: cc, + done: grpcsync.NewEvent(), + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, } var err error @@ -69,7 +79,7 @@ func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapp // accessing ccr.resolver which is being assigned here. ccr.resolverMu.Lock() defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + ccr.resolver, err = opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { return nil, err } @@ -91,13 +101,15 @@ func (ccr *ccResolverWrapper) close() { ccr.resolverMu.Unlock() } +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { ccr.incomingMu.Lock() defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return nil } - ccr.addChannelzTraceEvent(s) + ccr.addChannelzTraceEventLocked(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -105,24 +117,27 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { return nil } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { ccr.incomingMu.Lock() defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ccr.cc.updateResolverState(resolver.State{}, err) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.incomingMu.Lock() defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.addChannelzTraceEventLocked(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -135,26 +150,32 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") return } scpr := parseServiceConfig(sc) if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.addChannelzTraceEventLocked(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { +// addChannelzTraceEventLocked adds a channelz trace event containing the new +// state received from resolver implementations. +// +// Caller must hold cc.incomingMu. +func (ccr *ccResolverWrapper) addChannelzTraceEventLocked(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool @@ -172,5 +193,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } From 83c460b8de09bac04be4ac982750853fab7809ec Mon Sep 17 00:00:00 2001 From: Gregory Cooke Date: Fri, 21 Apr 2023 14:48:11 -0400 Subject: [PATCH 888/998] authz: Move audit package (#6218) * Move audit logger to it's own package * remove audit prefixes since its the package name now * Add package comment --- authz/{ => audit}/audit_logger.go | 47 ++++++++++++++++--------------- 1 file changed, 24 insertions(+), 23 deletions(-) rename authz/{ => audit}/audit_logger.go (74%) diff --git a/authz/audit_logger.go b/authz/audit/audit_logger.go similarity index 74% rename from authz/audit_logger.go rename to authz/audit/audit_logger.go index 992d66054fbb..b9b721970387 100644 --- a/authz/audit_logger.go +++ b/authz/audit/audit_logger.go @@ -16,7 +16,8 @@ * */ -package authz +// Package audit contains interfaces for audit logging during authorization. +package audit import ( "encoding/json" @@ -27,38 +28,38 @@ import ( // to facilitate thread-safe reading/writing operations. type loggerBuilderRegistry struct { mu sync.Mutex - builders map[string]AuditLoggerBuilder + builders map[string]LoggerBuilder } var ( registry = loggerBuilderRegistry{ - builders: make(map[string]AuditLoggerBuilder), + builders: make(map[string]LoggerBuilder), } ) -// RegisterAuditLoggerBuilder registers the builder in a global map +// RegisterLoggerBuilder registers the builder in a global map // using b.Name() as the key. // // This should only be called during initialization time (i.e. in an init() // function). If multiple builders are registered with the same name, // the one registered last will take effect. -func RegisterAuditLoggerBuilder(b AuditLoggerBuilder) { +func RegisterLoggerBuilder(b LoggerBuilder) { registry.mu.Lock() defer registry.mu.Unlock() registry.builders[b.Name()] = b } -// GetAuditLoggerBuilder returns a builder with the given name. +// GetLoggerBuilder returns a builder with the given name. // It returns nil if the builder is not found in the registry. -func GetAuditLoggerBuilder(name string) AuditLoggerBuilder { +func GetLoggerBuilder(name string) LoggerBuilder { registry.mu.Lock() defer registry.mu.Unlock() return registry.builders[name] } -// AuditEvent contains information passed to the audit logger as part of an +// Event contains information passed to the audit logger as part of an // audit logging event. -type AuditEvent struct { +type Event struct { // FullMethodName is the full method name of the audited RPC, in the format // of "/pkg.Service/Method". For example, "/helloworld.Greeter/SayHello". FullMethodName string @@ -74,14 +75,14 @@ type AuditEvent struct { Authorized bool } -// AuditLoggerConfig represents an opaque data structure holding an audit +// LoggerConfig represents an opaque data structure holding an audit // logger configuration. Concrete types representing configuration of specific // audit loggers must embed this interface to implement it. -type AuditLoggerConfig interface { - auditLoggerConfig() +type LoggerConfig interface { + loggerConfig() } -// AuditLogger is the interface to be implemented by audit loggers. +// Logger is the interface to be implemented by audit loggers. // // An audit logger is a logger instance that can be configured via the // authorization policy API or xDS HTTP RBAC filters. When the authorization @@ -91,35 +92,35 @@ type AuditLoggerConfig interface { // TODO(lwge): Change the link to the merged gRFC once it's ready. // Please refer to https://github.com/grpc/proposal/pull/346 for more details // about audit logging. -type AuditLogger interface { +type Logger interface { // Log performs audit logging for the provided audit event. // // This method is invoked in the RPC path and therefore implementations // must not block. - Log(*AuditEvent) + Log(*Event) } -// AuditLoggerBuilder is the interface to be implemented by audit logger +// LoggerBuilder is the interface to be implemented by audit logger // builders that are used at runtime to configure and instantiate audit loggers. // // Users who want to implement their own audit logging logic should -// implement this interface, along with the AuditLogger interface, and register -// it by calling RegisterAuditLoggerBuilder() at init time. +// implement this interface, along with the Logger interface, and register +// it by calling RegisterLoggerBuilder() at init time. // // TODO(lwge): Change the link to the merged gRFC once it's ready. // Please refer to https://github.com/grpc/proposal/pull/346 for more details // about audit logging. -type AuditLoggerBuilder interface { - // ParseAuditLoggerConfig parses the given JSON bytes into a structured +type LoggerBuilder interface { + // ParseLoggerConfig parses the given JSON bytes into a structured // logger config this builder can use to build an audit logger. - ParseAuditLoggerConfig(config json.RawMessage) (AuditLoggerConfig, error) + ParseLoggerConfig(config json.RawMessage) (LoggerConfig, error) // Build builds an audit logger with the given logger config. // This will only be called with valid configs returned from - // ParseAuditLoggerConfig() and any runtime issues such as failing to + // ParseLoggerConfig() and any runtime issues such as failing to // create a file should be handled by the logger implementation instead of // failing the logger instantiation. So implementers need to make sure it // can return a logger without error at this stage. - Build(AuditLoggerConfig) AuditLogger + Build(LoggerConfig) Logger // Name returns the name of logger built by this builder. // This is used to register and pick the builder. Name() string From 8628e075df225d916eaeaec1488af4dcc02805ec Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 25 Apr 2023 11:17:53 -0600 Subject: [PATCH 889/998] xds/internal/balancer/outlierdetection: Add Channelz Logger to Outlier Detection LB (#6145) --- .../balancer/outlierdetection/balancer.go | 46 ++++++++++++++----- .../outlierdetection/balancer_test.go | 3 +- .../outlierdetection/subconn_wrapper.go | 5 ++ 3 files changed, 41 insertions(+), 13 deletions(-) diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 8d87e8f9884a..749449c2123e 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -26,6 +26,7 @@ import ( "errors" "fmt" "math" + "strings" "sync" "sync/atomic" "time" @@ -35,6 +36,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" @@ -62,13 +64,14 @@ type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &outlierDetectionBalancer{ - cc: cc, - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - addrs: make(map[string]*addressInfo), - scWrappers: make(map[balancer.SubConn]*subConnWrapper), - scUpdateCh: buffer.NewUnbounded(), - pickerUpdateCh: buffer.NewUnbounded(), + cc: cc, + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + addrs: make(map[string]*addressInfo), + scWrappers: make(map[balancer.SubConn]*subConnWrapper), + scUpdateCh: buffer.NewUnbounded(), + pickerUpdateCh: buffer.NewUnbounded(), + channelzParentID: bOpts.ChannelzParentID, } b.logger = prefixLogger(b) b.logger.Infof("Created") @@ -159,10 +162,11 @@ type outlierDetectionBalancer struct { // to suppress redundant picker updates. recentPickerNoop bool - closed *grpcsync.Event - done *grpcsync.Event - cc balancer.ClientConn - logger *grpclog.PrefixLogger + closed *grpcsync.Event + done *grpcsync.Event + cc balancer.ClientConn + logger *grpclog.PrefixLogger + channelzParentID *channelz.Identifier // childMu guards calls into child (to uphold the balancer.Balancer API // guarantee of synchronous calls). @@ -822,7 +826,10 @@ func (b *outlierDetectionBalancer) successRateAlgorithm() { return } successRate := float64(bucket.numSuccesses) / float64(bucket.numSuccesses+bucket.numFailures) - if successRate < (mean - stddev*(float64(ejectionCfg.StdevFactor)/1000)) { + requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) + if successRate < requiredSuccessRate { + channelz.Infof(logger, b.channelzParentID, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", + addrInfo.string(), successRate, mean, stddev, requiredSuccessRate) if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } @@ -849,6 +856,8 @@ func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { } failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { + channelz.Infof(logger, b.channelzParentID, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", + addrInfo.string(), failurePercentage) if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } @@ -863,7 +872,9 @@ func (b *outlierDetectionBalancer) ejectAddress(addrInfo *addressInfo) { addrInfo.ejectionTimeMultiplier++ for _, sbw := range addrInfo.sws { sbw.eject() + channelz.Infof(logger, b.channelzParentID, "Subchannel ejected: %s", sbw.string()) } + } // Caller must hold b.mu. @@ -872,6 +883,7 @@ func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { addrInfo.latestEjectionTimestamp = time.Time{} for _, sbw := range addrInfo.sws { sbw.uneject() + channelz.Infof(logger, b.channelzParentID, "Subchannel unejected: %s", sbw.string()) } } @@ -896,6 +908,16 @@ type addressInfo struct { sws []*subConnWrapper } +func (a *addressInfo) string() string { + var res strings.Builder + res.WriteString("[") + for _, sw := range a.sws { + res.WriteString(sw.string()) + } + res.WriteString("]") + return res.String() +} + func newAddressInfo() *addressInfo { return &addressInfo{ callCounter: newCallCounter(), diff --git a/xds/internal/balancer/outlierdetection/balancer_test.go b/xds/internal/balancer/outlierdetection/balancer_test.go index 8b86ebbb19f2..41447164c013 100644 --- a/xds/internal/balancer/outlierdetection/balancer_test.go +++ b/xds/internal/balancer/outlierdetection/balancer_test.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" @@ -304,7 +305,7 @@ func setup(t *testing.T) (*outlierDetectionBalancer, *testutils.TestClientConn, t.Fatalf("balancer.Get(%q) returned nil", Name) } tcc := testutils.NewTestClientConn(t) - odB := builder.Build(tcc, balancer.BuildOptions{}) + odB := builder.Build(tcc, balancer.BuildOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefChannel, time.Now().Unix(), nil)}) return odB.(*outlierDetectionBalancer), tcc, odB.Close } diff --git a/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/xds/internal/balancer/outlierdetection/subconn_wrapper.go index 8e25eb788b1d..be631387b2f3 100644 --- a/xds/internal/balancer/outlierdetection/subconn_wrapper.go +++ b/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -18,6 +18,7 @@ package outlierdetection import ( + "fmt" "unsafe" "google.golang.org/grpc/balancer" @@ -66,3 +67,7 @@ func (scw *subConnWrapper) uneject() { isEjected: false, }) } + +func (scw *subConnWrapper) string() string { + return fmt.Sprintf("%+v", scw.addresses) +} From eff0942e95d93112921414aee758e619ec86f26f Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 25 Apr 2023 22:47:15 -0400 Subject: [PATCH 890/998] xds/internal/xdsclient: Custom LB xDS Client Changes (#6165) --- internal/envconfig/xds.go | 13 +- xds/internal/balancer/ringhash/config.go | 11 +- xds/internal/balancer/ringhash/config_test.go | 12 + xds/internal/balancer/wrrlocality/balancer.go | 68 ++ .../balancer/wrrlocality/balancer_test.go | 121 ++++ .../xdsclient/tests/cds_watchers_test.go | 2 +- .../xdsclient/tests/resource_update_test.go | 2 +- .../xdsclient/xdslbregistry/converter.go | 154 +++++ .../xdslbregistry/test/converter_test.go | 384 +++++++++++ .../xdsresource/test/unmarshal_cds_test.go | 604 ++++++++++++++++++ .../xdsclient/xdsresource/type_cds.go | 7 + .../xdsclient/xdsresource/unmarshal_cds.go | 39 +- .../xdsresource/unmarshal_cds_test.go | 296 ++------- 13 files changed, 1452 insertions(+), 261 deletions(-) create mode 100644 xds/internal/balancer/wrrlocality/balancer.go create mode 100644 xds/internal/balancer/wrrlocality/balancer_test.go create mode 100644 xds/internal/xdsclient/xdslbregistry/converter.go create mode 100644 xds/internal/xdsclient/xdslbregistry/test/converter_test.go create mode 100644 xds/internal/xdsclient/xdsresource/test/unmarshal_cds_test.go diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 3b17705ba097..1d9152e8eeb2 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -61,11 +61,10 @@ var ( // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster - // and DNS cluster is enabled, which can be enabled by setting the - // environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to - // "true". + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, @@ -89,4 +88,8 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "true". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", false) ) diff --git a/xds/internal/balancer/ringhash/config.go b/xds/internal/balancer/ringhash/config.go index 4763120fa649..b4afcf100132 100644 --- a/xds/internal/balancer/ringhash/config.go +++ b/xds/internal/balancer/ringhash/config.go @@ -35,8 +35,9 @@ type LBConfig struct { } const ( - defaultMinSize = 1024 - defaultMaxSize = 4096 + defaultMinSize = 1024 + defaultMaxSize = 4096 + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M ) func parseConfig(c json.RawMessage) (*LBConfig, error) { @@ -44,6 +45,12 @@ func parseConfig(c json.RawMessage) (*LBConfig, error) { if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } + if cfg.MinRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("min_ring_size value of %d is greater than max supported value %d for this field", cfg.MinRingSize, ringHashSizeUpperBound) + } + if cfg.MaxRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("max_ring_size value of %d is greater than max supported value %d for this field", cfg.MaxRingSize, ringHashSizeUpperBound) + } if cfg.MinRingSize == 0 { cfg.MinRingSize = defaultMinSize } diff --git a/xds/internal/balancer/ringhash/config_test.go b/xds/internal/balancer/ringhash/config_test.go index d8f9ed30bb68..1077d3e7dafb 100644 --- a/xds/internal/balancer/ringhash/config_test.go +++ b/xds/internal/balancer/ringhash/config_test.go @@ -82,6 +82,18 @@ func (s) TestParseConfig(t *testing.T) { envConfigCap: 8000, want: &LBConfig{MinRingSize: 8000, MaxRingSize: 8000}, }, + { + name: "min greater than upper bound", + js: `{"minRingSize": 8388610, "maxRingSize": 10}`, + want: nil, + wantErr: true, + }, + { + name: "max greater than upper bound", + js: `{"minRingSize": 10, "maxRingSize": 8388610}`, + want: nil, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/xds/internal/balancer/wrrlocality/balancer.go b/xds/internal/balancer/wrrlocality/balancer.go new file mode 100644 index 000000000000..2ff6fccf89bd --- /dev/null +++ b/xds/internal/balancer/wrrlocality/balancer.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package wrrlocality provides an implementation of the wrr locality LB policy, +// as defined in [A52 - xDS Custom LB Policies]. +// +// [A52 - xDS Custom LB Policies]: https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md +package wrrlocality + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of wrr_locality balancer. +const Name = "xds_wrr_locality_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Name() string { + return Name +} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + return nil +} + +// LBConfig is the config for the wrr locality balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig + // ChildPolicy is the config for the child policy. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg *LBConfig + if err := json.Unmarshal(s, &lbCfg); err != nil { + return nil, fmt.Errorf("xds: invalid LBConfig for wrrlocality: %s, error: %v", string(s), err) + } + if lbCfg == nil || lbCfg.ChildPolicy == nil { + return nil, errors.New("xds: invalidw LBConfig for wrrlocality: child policy field must be set") + } + return lbCfg, nil +} diff --git a/xds/internal/balancer/wrrlocality/balancer_test.go b/xds/internal/balancer/wrrlocality/balancer_test.go new file mode 100644 index 000000000000..9283b02f14b2 --- /dev/null +++ b/xds/internal/balancer/wrrlocality/balancer_test.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package wrrlocality + +import ( + "encoding/json" + "errors" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/grpctest" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestParseConfig(t *testing.T) { + const errParseConfigName = "errParseConfigBalancer" + stub.Register(errParseConfigName, stub.BalancerFuncs{ + ParseConfig: func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return nil, errors.New("some error") + }, + }) + + parser := bb{} + tests := []struct { + name string + input string + wantCfg serviceconfig.LoadBalancingConfig + wantErr string + }{ + { + name: "happy-case-round robin-child", + input: `{"childPolicy": [{"round_robin": {}}]}`, + wantCfg: &LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }, + { + name: "invalid-json", + input: "{{invalidjson{{", + wantErr: "invalid character", + }, + + { + name: "child-policy-field-isn't-set", + input: `{}`, + wantErr: "child policy field must be set", + }, + { + name: "child-policy-type-is-empty", + input: `{"childPolicy": []}`, + wantErr: "invalid loadBalancingConfig: no supported policies found in []", + }, + { + name: "child-policy-empty-config", + input: `{"childPolicy": [{"": {}}]}`, + wantErr: "invalid loadBalancingConfig: no supported policies found in []", + }, + { + name: "child-policy-type-isn't-registered", + input: `{"childPolicy": [{"doesNotExistBalancer": {"cluster": "test_cluster"}}]}`, + wantErr: "invalid loadBalancingConfig: no supported policies found in [doesNotExistBalancer]", + }, + { + name: "child-policy-config-is-invalid", + input: `{"childPolicy": [{"errParseConfigBalancer": {"cluster": "test_cluster"}}]}`, + wantErr: "error parsing loadBalancingConfig for policy \"errParseConfigBalancer\"", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + gotCfg, gotErr := parser.ParseConfig(json.RawMessage(test.input)) + // Substring match makes this very tightly coupled to the + // internalserviceconfig.BalancerConfig error strings. However, it + // is important to distinguish the different types of error messages + // possible as the parser has a few defined buckets of ways it can + // error out. + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("ParseConfig(%v) = %v, wantErr %v", test.input, gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("ParseConfig(%v) = %v, wantErr %v", test.input, gotErr, test.wantErr) + } + if test.wantErr != "" { + return + } + if diff := cmp.Diff(gotCfg, test.wantCfg); diff != "" { + t.Fatalf("ParseConfig(%v) got unexpected output, diff (-got +want): %v", test.input, diff) + } + }) + } +} diff --git a/xds/internal/xdsclient/tests/cds_watchers_test.go b/xds/internal/xdsclient/tests/cds_watchers_test.go index d6d02724fc91..3583fa929d96 100644 --- a/xds/internal/xdsclient/tests/cds_watchers_test.go +++ b/xds/internal/xdsclient/tests/cds_watchers_test.go @@ -70,7 +70,7 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) } } - cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw")} + cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicyJSON")} if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { return fmt.Errorf("received unepected diff in the cluster resource update: (-want, got):\n%s", diff) } diff --git a/xds/internal/xdsclient/tests/resource_update_test.go b/xds/internal/xdsclient/tests/resource_update_test.go index dfb285de3a8d..ff6cf7c756a4 100644 --- a/xds/internal/xdsclient/tests/resource_update_test.go +++ b/xds/internal/xdsclient/tests/resource_update_test.go @@ -802,7 +802,7 @@ func (s) TestHandleClusterResponseFromManagementServer(t *testing.T) { } cmpOpts := []cmp.Option{ cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw"), + cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicyJSON"), } if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) diff --git a/xds/internal/xdsclient/xdslbregistry/converter.go b/xds/internal/xdsclient/xdslbregistry/converter.go new file mode 100644 index 000000000000..ef13802b0c12 --- /dev/null +++ b/xds/internal/xdsclient/xdslbregistry/converter.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdslbregistry provides utilities to convert proto load balancing +// configuration, defined by the xDS API spec, to JSON load balancing +// configuration. +package xdslbregistry + +import ( + "encoding/json" + "fmt" + "strings" + + v1udpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + "github.com/golang/protobuf/proto" + structpb "github.com/golang/protobuf/ptypes/struct" + + "google.golang.org/grpc/internal/envconfig" +) + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M +) + +// ConvertToServiceConfig converts a proto Load Balancing Policy configuration +// into a json string. Returns an error if: +// - no supported policy found +// - there is more than 16 layers of recursion in the configuration +// - a failure occurs when converting the policy +func ConvertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy) (json.RawMessage, error) { + return convertToServiceConfig(lbPolicy, 0) +} + +func convertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int) (json.RawMessage, error) { + // "Configurations that require more than 16 levels of recursion are + // considered invalid and should result in a NACK response." - A51 + if depth > 15 { + return nil, fmt.Errorf("lb policy %v exceeds max depth supported: 16 layers", lbPolicy) + } + + // "This function iterate over the list of policy messages in + // LoadBalancingPolicy, attempting to convert each one to gRPC form, + // stopping at the first supported policy." - A52 + for _, policy := range lbPolicy.GetPolicies() { + // The policy message contains a TypedExtensionConfig + // message with the configuration information. TypedExtensionConfig in turn + // uses an Any typed typed_config field to store policy configuration of any + // type. This typed_config field is used to determine both the name of a + // policy and the configuration for it, depending on its type: + switch policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl() { + case "type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash": + if !envconfig.XDSRingHash { + continue + } + rhProto := &v3ringhashpb.RingHash{} + if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), rhProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertRingHash(rhProto) + case "type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin": + return makeBalancerConfigJSON("round_robin", json.RawMessage("{}")), nil + case "type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality": + wrrlProto := &v3wrrlocalitypb.WrrLocality{} + if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), wrrlProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertWrrLocality(wrrlProto, depth) + case "type.googleapis.com/xds.type.v3.TypedStruct": + tsProto := &v3cncftypepb.TypedStruct{} + if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) + case "type.googleapis.com/udpa.type.v1.TypedStruct": + tsProto := &v1udpatypepb.TypedStruct{} + if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) + } + // Any entry not in the above list is unsupported and will be skipped. + // This includes Least Request as well, since grpc-go does not support + // the Least Request Load Balancing Policy. + } + return nil, fmt.Errorf("no supported policy found in policy list +%v", lbPolicy) +} + +// convertRingHash converts a proto representation of the ring_hash LB policy's +// configuration to gRPC JSON format. +func convertRingHash(cfg *v3ringhashpb.RingHash) (json.RawMessage, error) { + if cfg.GetHashFunction() != v3ringhashpb.RingHash_XX_HASH { + return nil, fmt.Errorf("unsupported ring_hash hash function %v", cfg.GetHashFunction()) + } + + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := cfg.GetMinimumRingSize(); min != nil { + minSize = min.GetValue() + } + if max := cfg.GetMaximumRingSize(); max != nil { + maxSize = max.GetValue() + } + + lbCfgJSON := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) + return makeBalancerConfigJSON("ring_hash_experimental", lbCfgJSON), nil +} + +func convertWrrLocality(cfg *v3wrrlocalitypb.WrrLocality, depth int) (json.RawMessage, error) { + epJSON, err := convertToServiceConfig(cfg.GetEndpointPickingPolicy(), depth+1) + if err != nil { + return nil, fmt.Errorf("error converting endpoint picking policy: %v for %+v", err, cfg) + } + lbCfgJSON := []byte(fmt.Sprintf(`{"childPolicy": %s}`, epJSON)) + return makeBalancerConfigJSON("xds_wrr_locality_experimental", lbCfgJSON), nil +} + +func convertCustomPolicy(typeURL string, s *structpb.Struct) (json.RawMessage, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + urls := strings.Split(typeURL, "/") + name := urls[len(urls)-1] + + rawJSON, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("error converting custom lb policy %v: %v for %+v", err, typeURL, s) + } + // The Struct contained in the TypedStruct will be returned as-is as the + // configuration JSON object. + return makeBalancerConfigJSON(name, rawJSON), nil +} + +func makeBalancerConfigJSON(name string, value json.RawMessage) []byte { + return []byte(fmt.Sprintf(`[{%q: %s}]`, name, value)) +} diff --git a/xds/internal/xdsclient/xdslbregistry/test/converter_test.go b/xds/internal/xdsclient/xdslbregistry/test/converter_test.go new file mode 100644 index 000000000000..7f31d68f1f8d --- /dev/null +++ b/xds/internal/xdsclient/xdslbregistry/test/converter_test.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package test contains test cases for the xDS LB Policy Registry. +package test + +import ( + "encoding/json" + "strings" + "testing" + + v1udpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + "github.com/golang/protobuf/proto" + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/google/go-cmp/cmp" + + _ "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/pretty" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type customLBConfig struct { + serviceconfig.LoadBalancingConfig +} + +// We have these tests in a separate test package in order to not take a +// dependency on the internal xDS balancer packages within the xDS Client. +func (s) TestConvertToServiceConfigSuccess(t *testing.T) { + const customLBPolicyName = "myorg.MyCustomLeastRequestPolicy" + stub.Register(customLBPolicyName, stub.BalancerFuncs{ + ParseConfig: func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return customLBConfig{}, nil + }, + }) + + tests := []struct { + name string + policy *v3clusterpb.LoadBalancingPolicy + wantConfig *internalserviceconfig.BalancerConfig + rhDisabled bool + }{ + { + name: "ring_hash", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + wantConfig: &internalserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 10, + MaxRingSize: 100, + }, + }, + }, + { + name: "round_robin", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + wantConfig: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + { + name: "round_robin_ring_hash_use_first_supported", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + wantConfig: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + { + name: "ring_hash_disabled_rh_rr_use_first_supported", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + wantConfig: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + rhDisabled: true, + }, + { + name: "custom_lb_type_v3_struct", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3cncftypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", + Value: &structpb.Struct{}, + }), + }, + }, + }, + }, + wantConfig: &internalserviceconfig.BalancerConfig{ + Name: "myorg.MyCustomLeastRequestPolicy", + Config: customLBConfig{}, + }, + }, + { + name: "custom_lb_type_v1_struct", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v1udpatypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", + Value: &structpb.Struct{}, + }), + }, + }, + }, + }, + wantConfig: &internalserviceconfig.BalancerConfig{ + Name: "myorg.MyCustomLeastRequestPolicy", + Config: customLBConfig{}, + }, + }, + { + name: "wrr_locality_child_round_robin", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + wantConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "wrr_locality_child_custom_lb_type_v3_struct", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(&v3cncftypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", + Value: &structpb.Struct{}, + }), + }, + }, + }, + }, + wantConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "myorg.MyCustomLeastRequestPolicy", + Config: customLBConfig{}, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.rhDisabled { + oldRingHashSupport := envconfig.XDSRingHash + envconfig.XDSRingHash = false + defer func() { + envconfig.XDSRingHash = oldRingHashSupport + }() + } + rawJSON, err := xdslbregistry.ConvertToServiceConfig(test.policy) + if err != nil { + t.Fatalf("ConvertToServiceConfig(%s) failed: %v", pretty.ToJSON(test.policy), err) + } + bc := &internalserviceconfig.BalancerConfig{} + // The converter registry is not guaranteed to emit json that is + // valid. It's scope is to simply convert from a proto message to + // internal gRPC JSON format. Thus, the tests cause valid JSON to + // eventually be emitted from ConvertToServiceConfig(), but this + // leaves this test brittle over time in case balancer validations + // change over time and add more failure cases. The simplicity of + // using this type (to get rid of non determinism in JSON strings) + // outweighs this brittleness, and also there are plans on + // decoupling the unmarshalling and validation step both present in + // this function in the future. In the future if balancer + // validations change, any configurations in this test that become + // invalid will need to be fixed. (need to make sure emissions above + // are valid configuration). Also, once this Unmarshal call is + // partitioned into Unmarshal vs. Validation in separate operations, + // the brittleness of this test will go away. + if err := json.Unmarshal(rawJSON, bc); err != nil { + t.Fatalf("failed to unmarshal JSON: %v", err) + } + if diff := cmp.Diff(bc, test.wantConfig); diff != "" { + t.Fatalf("ConvertToServiceConfig() got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} + +// TestConvertToServiceConfigFailure tests failure cases of the xDS LB registry +// of converting proto configuration to JSON configuration. +func (s) TestConvertToServiceConfigFailure(t *testing.T) { + tests := []struct { + name string + policy *v3clusterpb.LoadBalancingPolicy + wantErr string + }{ + { + name: "not xx_hash function", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_MURMUR_HASH_2, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + wantErr: "unsupported ring_hash hash function", + }, + { + name: "no-supported-policy", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + // Not supported by gRPC-Go. + TypedConfig: testutils.MarshalAny(&v3leastrequestpb.LeastRequest{}), + }, + }, + }, + }, + wantErr: "no supported policy found in policy list", + }, + // TODO: test validity right on the boundary of recursion 16 layers + // total. + { + name: "too much recursion", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(&v3roundrobinpb.RoundRobin{}))))))))))))))))))))))), + }, + }, + }, + }, + wantErr: "exceeds max depth", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, gotErr := xdslbregistry.ConvertToServiceConfig(test.policy) + // Test the error substring to test the different root causes of + // errors. This is more brittle over time, but it's important to + // test the root cause of the errors emitted from the + // ConvertToServiceConfig function call. Also, this package owns the + // error strings so breakages won't come unexpectedly. + if gotErr == nil || !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("ConvertToServiceConfig() = %v, wantErr %v", gotErr, test.wantErr) + } + }) + } +} + +// wrrLocality is a helper that takes a proto message and returns a +// WrrLocalityProto with the proto message marshaled into a proto.Any as a +// child. +func wrrLocality(m proto.Message) *v3wrrlocalitypb.WrrLocality { + return &v3wrrlocalitypb.WrrLocality{ + EndpointPickingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(m), + }, + }, + }, + }, + } +} + +// wrrLocalityAny takes a proto message and returns a wrr locality proto +// marshaled as an any with an any child set to the marshaled proto message. +func wrrLocalityAny(m proto.Message) *anypb.Any { + return testutils.MarshalAny(wrrLocality(m)) +} diff --git a/xds/internal/xdsclient/xdsresource/test/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/test/unmarshal_cds_test.go new file mode 100644 index 000000000000..3f4c226d74d3 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/test/unmarshal_cds_test.go @@ -0,0 +1,604 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package test contains test cases for unmarshalling of CDS resources. +package test + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + _ "google.golang.org/grpc/balancer/roundrobin" // To register round_robin load balancer. + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpctest" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + "github.com/golang/protobuf/proto" + anypb "github.com/golang/protobuf/ptypes/any" + structpb "github.com/golang/protobuf/ptypes/struct" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + clusterName = "clusterName" + serviceName = "service" +) + +var emptyUpdate = xdsresource.ClusterUpdate{ClusterName: clusterName, LRSServerConfig: xdsresource.ClusterLRSOff} + +func wrrLocality(m proto.Message) *v3wrrlocalitypb.WrrLocality { + return &v3wrrlocalitypb.WrrLocality{ + EndpointPickingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(m), + }, + }, + }, + }, + } +} + +func wrrLocalityAny(m proto.Message) *anypb.Any { + return testutils.MarshalAny(wrrLocality(m)) +} + +type customLBConfig struct { + serviceconfig.LoadBalancingConfig +} + +// We have this test in a separate test package in order to not take a +// dependency on the internal xDS balancer packages within the xDS Client. +func (s) TestValidateCluster_Success(t *testing.T) { + const customLBPolicyName = "myorg.MyCustomLeastRequestPolicy" + stub.Register(customLBPolicyName, stub.BalancerFuncs{ + ParseConfig: func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return customLBConfig{}, nil + }, + }) + + origCustomLBSupport := envconfig.XDSCustomLBPolicy + envconfig.XDSCustomLBPolicy = true + defer func() { + envconfig.XDSCustomLBPolicy = origCustomLBSupport + }() + tests := []struct { + name string + cluster *v3clusterpb.Cluster + wantUpdate xdsresource.ClusterUpdate + wantLBConfig *internalserviceconfig.BalancerConfig + customLBDisabled bool + }{ + { + name: "happy-case-logical-dns", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ + Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ + LbEndpoints: []*v3endpointpb.LbEndpoint{{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "dns_host", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 8080, + }, + }, + }, + }, + }, + }, + }}, + }}, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, + ClusterType: xdsresource.ClusterTypeLogicalDNS, + DNSHostName: "dns_host:8080", + }, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happy-case-aggregate-v3", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ + Clusters: []string{"a", "b", "c"}, + }), + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, LRSServerConfig: xdsresource.ClusterLRSOff, ClusterType: xdsresource.ClusterTypeAggregate, + PrioritizedClusterNames: []string{"a", "b", "c"}, + }, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happy-case-no-service-name-no-lrs", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: emptyUpdate, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happy-case-no-lrs", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: xdsresource.ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSOff}, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happiest-case", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf}, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happiest-case-with-circuitbreakers", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + CircuitBreakers: &v3clusterpb.CircuitBreakers{ + Thresholds: []*v3clusterpb.CircuitBreakers_Thresholds{ + { + Priority: v3corepb.RoutingPriority_DEFAULT, + MaxRequests: wrapperspb.UInt32(512), + }, + { + Priority: v3corepb.RoutingPriority_HIGH, + MaxRequests: nil, + }, + }, + }, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happiest-case-with-ring-hash-lb-policy-with-default-config", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, + }, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 1024, + MaxRingSize: 4096, + }, + }, + }, + { + name: "happiest-case-with-ring-hash-lb-policy-with-none-default-config", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }, + }, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, + LBPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + }, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 10, + MaxRingSize: 100, + }, + }, + }, + { + name: "happiest-case-with-ring-hash-lb-policy-configured-through-LoadBalancingPolicy", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 10, + MaxRingSize: 100, + }, + }, + }, + { + name: "happiest-case-with-wrrlocality-rr-child-configured-through-LoadBalancingPolicy", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happiest-case-with-custom-lb-configured-through-LoadBalancingPolicy", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(&v3cncftypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", + Value: &structpb.Struct{}, + }), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "myorg.MyCustomLeastRequestPolicy", + Config: customLBConfig{}, + }, + }, + }, + }, + { + name: "custom-lb-env-var-not-set-ignore-load-balancing-policy-use-lb-policy-and-enum", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MinimumRingSize: wrapperspb.UInt64(20), + MaximumRingSize: wrapperspb.UInt64(200), + }, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 20, + MaxRingSize: 200, + }, + }, + customLBDisabled: true, + }, + { + name: "load-balancing-policy-takes-precedence-over-lb-policy-and-enum", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MinimumRingSize: wrapperspb.UInt64(20), + MaximumRingSize: wrapperspb.UInt64(200), + }, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &internalserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 10, + MaxRingSize: 100, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.customLBDisabled { + envconfig.XDSCustomLBPolicy = false + defer func() { + envconfig.XDSCustomLBPolicy = true + }() + } + update, err := xdsresource.ValidateClusterAndConstructClusterUpdateForTesting(test.cluster) + if err != nil { + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) failed: %v", test.cluster, err) + } + // Ignore the raw JSON string into the cluster update. JSON bytes + // are nondeterministic (whitespace etc.) so we cannot reliably + // compare JSON bytes in a test. Thus, marshal into a Balancer + // Config struct and compare on that. Only need to test this JSON + // emission here, as this covers the possible output space. + if diff := cmp.Diff(update, test.wantUpdate, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "LBPolicy", "LBPolicyJSON")); diff != "" { + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) got diff: %v (-got, +want)", test.cluster, diff) + } + bc := &internalserviceconfig.BalancerConfig{} + if err := json.Unmarshal(update.LBPolicyJSON, bc); err != nil { + t.Fatalf("failed to unmarshal JSON: %v", err) + } + if diff := cmp.Diff(bc, test.wantLBConfig); diff != "" { + t.Fatalf("update.LBConfig got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} diff --git a/xds/internal/xdsclient/xdsresource/type_cds.go b/xds/internal/xdsclient/xdsresource/type_cds.go index d459717acd23..cd49852d8fcc 100644 --- a/xds/internal/xdsclient/xdsresource/type_cds.go +++ b/xds/internal/xdsclient/xdsresource/type_cds.go @@ -18,6 +18,7 @@ package xdsresource import ( + "encoding/json" "time" "google.golang.org/protobuf/types/known/anypb" @@ -156,6 +157,12 @@ type ClusterUpdate struct { // When we add more support policies, this can be made an interface, and // will be set to different types based on the policy type. LBPolicy *ClusterLBPolicyRingHash + // LBPolicyJSON represents the locality and endpoint picking policy in JSON, + // which will be the child policy of xds_cluster_impl. Once full support for + // this field across the system, the LBPolicy field will switch to this + // field. Right now we keep both to keep the system working even though + // downstream has not added support for this JSON field. + LBPolicyJSON json.RawMessage // OutlierDetection is the outlier detection configuration for this cluster. // If nil, it means this cluster does not use the outlier detection feature. diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index e0bc1589b562..966844881351 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -18,6 +18,7 @@ package xdsresource import ( + "encoding/json" "errors" "fmt" "net" @@ -30,11 +31,18 @@ import ( v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) +// ValidateClusterAndConstructClusterUpdateForTesting exports the +// validateClusterAndConstructClusterUpdate function for testing purposes. +var ValidateClusterAndConstructClusterUpdateForTesting = validateClusterAndConstructClusterUpdate + // TransportSocket proto message has a `name` field which is expected to be set // to this value by the management server. const transportSocketName = "envoy.transport_sockets.tls" @@ -70,9 +78,12 @@ const ( func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { var lbPolicy *ClusterLBPolicyRingHash + var lbCfgJSON json.RawMessage + var err error switch cluster.GetLbPolicy() { case v3clusterpb.Cluster_ROUND_ROBIN: lbPolicy = nil // The default is round_robin, and there's no config to set. + lbCfgJSON = []byte(fmt.Sprintf(`[{%q: {"childPolicy": [{"round_robin": {}}]}}]`, "xds_wrr_locality_experimental")) case v3clusterpb.Cluster_RING_HASH: if !envconfig.XDSRingHash { return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) @@ -85,25 +96,18 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // defaults to 8M entries, and limited to 8M entries var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize if min := rhc.GetMinimumRingSize(); min != nil { - if min.GetValue() > ringHashSizeUpperBound { - return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash mininum ring size %v in response: %+v", min.GetValue(), cluster) - } minSize = min.GetValue() } if max := rhc.GetMaximumRingSize(); max != nil { - if max.GetValue() > ringHashSizeUpperBound { - return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash maxinum ring size %v in response: %+v", max.GetValue(), cluster) - } maxSize = max.GetValue() } - if minSize > maxSize { - return ClusterUpdate{}, fmt.Errorf("ring_hash config min size %v is greater than max %v", minSize, maxSize) - } lbPolicy = &ClusterLBPolicyRingHash{MinimumRingSize: minSize, MaximumRingSize: maxSize} + + rhLBCfgJSON := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) + lbCfgJSON = []byte(fmt.Sprintf(`[{%q: %s}]`, "ring_hash_experimental", rhLBCfgJSON)) default: return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } - // Process security configuration received from the control plane iff the // corresponding environment variable is set. var sc *SecurityConfig @@ -124,11 +128,26 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu } } + if cluster.GetLoadBalancingPolicy() != nil && envconfig.XDSCustomLBPolicy { + lbCfgJSON, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy()) + if err != nil { + return ClusterUpdate{}, fmt.Errorf("error converting LoadBalancingPolicy %v in response: %+v: %v", cluster.GetLoadBalancingPolicy(), cluster, err) + } + // "It will be the responsibility of the XdsClient to validate the + // converted configuration. It will do this by having the gRPC LB policy + // registry parse the configuration." - A52 + bc := &internalserviceconfig.BalancerConfig{} + if err := json.Unmarshal(lbCfgJSON, bc); err != nil { + return ClusterUpdate{}, fmt.Errorf("JSON generated from xDS LB policy registry: %s is invalid: %v", pretty.FormatJSON(lbCfgJSON), err) + } + } + ret := ClusterUpdate{ ClusterName: cluster.GetName(), SecurityCfg: sc, MaxRequests: circuitBreakersFromCluster(cluster), LBPolicy: lbPolicy, + LBPolicyJSON: lbCfgJSON, OutlierDetection: od, } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 533fd85c3984..3b47ae697a99 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -23,7 +23,6 @@ import ( "testing" "time" - v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/envconfig" @@ -38,7 +37,10 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" anypb "github.com/golang/protobuf/ptypes/any" ) @@ -51,6 +53,11 @@ const ( var emptyUpdate = ClusterUpdate{ClusterName: clusterName, LRSServerConfig: ClusterLRSOff} func (s) TestValidateCluster_Failure(t *testing.T) { + oldCustomLBSupport := envconfig.XDSCustomLBPolicy + envconfig.XDSCustomLBPolicy = true + defer func() { + envconfig.XDSCustomLBPolicy = oldCustomLBSupport + }() tests := []struct { name string cluster *v3clusterpb.Cluster @@ -155,13 +162,12 @@ func (s) TestValidateCluster_Failure(t *testing.T) { wantErr: true, }, { - name: "ring-hash-min-bound-greater-than-max", + name: "ring-hash-max-bound-greater-than-upper-bound", cluster: &v3clusterpb.Cluster{ LbPolicy: v3clusterpb.Cluster_RING_HASH, LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ - MinimumRingSize: wrapperspb.UInt64(100), - MaximumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(ringHashSizeUpperBound + 1), }, }, }, @@ -169,12 +175,29 @@ func (s) TestValidateCluster_Failure(t *testing.T) { wantErr: true, }, { - name: "ring-hash-min-bound-greater-than-upper-bound", + name: "ring-hash-max-bound-greater-than-upper-bound-load-balancing-policy", cluster: &v3clusterpb.Cluster{ - LbPolicy: v3clusterpb.Cluster_RING_HASH, - LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ - RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ - MinimumRingSize: wrapperspb.UInt64(ringHashSizeUpperBound + 1), + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(ringHashSizeUpperBound + 1), + }), + }, + }, }, }, }, @@ -182,12 +205,25 @@ func (s) TestValidateCluster_Failure(t *testing.T) { wantErr: true, }, { - name: "ring-hash-max-bound-greater-than-upper-bound", + name: "least-request-unsupported-in-converter", cluster: &v3clusterpb.Cluster{ - LbPolicy: v3clusterpb.Cluster_RING_HASH, - LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ - RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ - MaximumRingSize: wrapperspb.UInt64(ringHashSizeUpperBound + 1), + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3leastrequestpb.LeastRequest{}), + }, + }, }, }, }, @@ -243,230 +279,6 @@ func (s) TestValidateCluster_Failure(t *testing.T) { } } -func (s) TestValidateCluster_Success(t *testing.T) { - tests := []struct { - name string - cluster *v3clusterpb.Cluster - wantUpdate ClusterUpdate - }{ - { - name: "happy-case-logical-dns", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ - Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ - LbEndpoints: []*v3endpointpb.LbEndpoint{{ - HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ - Endpoint: &v3endpointpb.Endpoint{ - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "dns_host", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 8080, - }, - }, - }, - }, - }, - }, - }}, - }}, - }, - }, - wantUpdate: ClusterUpdate{ - ClusterName: clusterName, - ClusterType: ClusterTypeLogicalDNS, - DNSHostName: "dns_host:8080", - }, - }, - { - name: "happy-case-aggregate-v3", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ - ClusterType: &v3clusterpb.Cluster_CustomClusterType{ - Name: "envoy.clusters.aggregate", - TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ - Clusters: []string{"a", "b", "c"}, - }), - }, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - }, - wantUpdate: ClusterUpdate{ - ClusterName: clusterName, LRSServerConfig: ClusterLRSOff, ClusterType: ClusterTypeAggregate, - PrioritizedClusterNames: []string{"a", "b", "c"}, - }, - }, - { - name: "happy-case-no-service-name-no-lrs", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - }, - wantUpdate: emptyUpdate, - }, - { - name: "happy-case-no-lrs", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSOff}, - }, - { - name: "happiest-case", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - LrsServer: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ - Self: &v3corepb.SelfConfigSource{}, - }, - }, - }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSServerSelf}, - }, - { - name: "happiest-case-with-circuitbreakers", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - CircuitBreakers: &v3clusterpb.CircuitBreakers{ - Thresholds: []*v3clusterpb.CircuitBreakers_Thresholds{ - { - Priority: v3corepb.RoutingPriority_DEFAULT, - MaxRequests: wrapperspb.UInt32(512), - }, - { - Priority: v3corepb.RoutingPriority_HIGH, - MaxRequests: nil, - }, - }, - }, - LrsServer: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ - Self: &v3corepb.SelfConfigSource{}, - }, - }, - }, - wantUpdate: ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSServerSelf, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, - }, - { - name: "happiest-case-with-ring-hash-lb-policy-with-default-config", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_RING_HASH, - LrsServer: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ - Self: &v3corepb.SelfConfigSource{}, - }, - }, - }, - wantUpdate: ClusterUpdate{ - ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSServerSelf, - LBPolicy: &ClusterLBPolicyRingHash{MinimumRingSize: defaultRingHashMinSize, MaximumRingSize: defaultRingHashMaxSize}, - }, - }, - { - name: "happiest-case-with-ring-hash-lb-policy-with-none-default-config", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_RING_HASH, - LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ - RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ - MinimumRingSize: wrapperspb.UInt64(10), - MaximumRingSize: wrapperspb.UInt64(100), - }, - }, - LrsServer: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ - Self: &v3corepb.SelfConfigSource{}, - }, - }, - }, - wantUpdate: ClusterUpdate{ - ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: ClusterLRSServerSelf, - LBPolicy: &ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, - }, - }, - } - - oldAggregateAndDNSSupportEnv := envconfig.XDSAggregateAndDNS - envconfig.XDSAggregateAndDNS = true - defer func() { envconfig.XDSAggregateAndDNS = oldAggregateAndDNSSupportEnv }() - oldRingHashSupport := envconfig.XDSRingHash - envconfig.XDSRingHash = true - defer func() { envconfig.XDSRingHash = oldRingHashSupport }() - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - update, err := validateClusterAndConstructClusterUpdate(test.cluster) - if err != nil { - t.Errorf("validateClusterAndConstructClusterUpdate(%+v) failed: %v", test.cluster, err) - } - if diff := cmp.Diff(update, test.wantUpdate, cmpopts.EquateEmpty()); diff != "" { - t.Errorf("validateClusterAndConstructClusterUpdate(%+v) got diff: %v (-got, +want)", test.cluster, diff) - } - }) - } -} - func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { // Turn off the env var protection for client-side security. origClientSideSecurityEnvVar := envconfig.XDSClientSideSecurity @@ -510,7 +322,7 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { if err != nil { t.Errorf("validateClusterAndConstructClusterUpdate() failed: %v", err) } - if diff := cmp.Diff(wantUpdate, gotUpdate); diff != "" { + if diff := cmp.Diff(wantUpdate, gotUpdate, cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicyJSON")); diff != "" { t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, got):\n%s", diff) } } @@ -1403,7 +1215,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { if (err != nil) != test.wantErr { t.Errorf("validateClusterAndConstructClusterUpdate() returned err %v wantErr %v)", err, test.wantErr) } - if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmp.AllowUnexported(regexp.Regexp{})); diff != "" { + if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmp.AllowUnexported(regexp.Regexp{}), cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicyJSON")); diff != "" { t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, +got):\n%s", diff) } }) @@ -1545,7 +1357,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { if name != test.wantName { t.Errorf("unmarshalClusterResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts, cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicyJSON")); diff != "" { t.Errorf("unmarshalClusterResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) } }) @@ -1695,7 +1507,7 @@ func (s) TestValidateClusterWithOutlierDetection(t *testing.T) { if (err != nil) != test.wantErr { t.Errorf("validateClusterAndConstructClusterUpdate() returned err %v wantErr %v)", err, test.wantErr) } - if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty()); diff != "" { + if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicyJSON")); diff != "" { t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, +got):\n%s", diff) } }) From de11139ae6d0e235ee0cb8789725a46d7bf108e2 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 26 Apr 2023 09:50:03 -0700 Subject: [PATCH 891/998] clusterresolver: improve tests (#6188) --- resolver/manual/manual.go | 11 +- .../clusterresolver/e2e_test/balancer_test.go | 1103 +++++++++++++++-- .../clusterresolver/e2e_test/eds_impl_test.go | 6 +- .../clusterresolver/resource_resolver_test.go | 829 ------------- 4 files changed, 1019 insertions(+), 930 deletions(-) diff --git a/resolver/manual/manual.go b/resolver/manual/manual.go index f6e7b5ae3581..f27978e1281f 100644 --- a/resolver/manual/manual.go +++ b/resolver/manual/manual.go @@ -21,6 +21,8 @@ package manual import ( + "sync" + "google.golang.org/grpc/resolver" ) @@ -50,6 +52,7 @@ type Resolver struct { scheme string // Fields actually belong to the resolver. + mu sync.Mutex // Guards access to CC. CC resolver.ClientConn bootstrapState *resolver.State } @@ -62,8 +65,10 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) + r.mu.Lock() r.CC = cc + r.mu.Unlock() + r.BuildCallback(target, cc, opts) if r.bootstrapState != nil { r.UpdateState(*r.bootstrapState) } @@ -87,10 +92,14 @@ func (r *Resolver) Close() { // UpdateState calls CC.UpdateState. func (r *Resolver) UpdateState(s resolver.State) { + r.mu.Lock() r.CC.UpdateState(s) + r.mu.Unlock() } // ReportError calls CC.ReportError. func (r *Resolver) ReportError(err error) { + r.mu.Lock() r.CC.ReportError(err) + r.mu.Unlock() } diff --git a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go index 7eaf29e5e1fa..3d0d08a3c983 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go @@ -19,10 +19,12 @@ package e2e_test import ( "context" "fmt" + "sort" "strings" "testing" "time" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" @@ -31,16 +33,19 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/wrapperspb" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" @@ -48,6 +53,124 @@ import ( _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the "cds_experimental" LB policy. ) +// makeAggregateClusterResource returns an aggregate cluster resource with the +// given name and list of child names. +func makeAggregateClusterResource(name string, childNames []string) *v3clusterpb.Cluster { + return &v3clusterpb.Cluster{ + Name: name, + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ + Clusters: childNames, + }), + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + } +} + +// makeLogicalDNSClusterResource returns a LOGICAL_DNS cluster resource with the +// given name and given DNS host and port. +func makeLogicalDNSClusterResource(name, dnsHost string, dnsPort uint32) *v3clusterpb.Cluster { + return &v3clusterpb.Cluster{ + Name: name, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ + Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ + LbEndpoints: []*v3endpointpb.LbEndpoint{{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: dnsHost, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: dnsPort, + }, + }, + }, + }, + }, + }, + }}, + }}, + }, + } +} + +// setupDNS unregisters the DNS resolver and registers a manual resolver for the +// same scheme. This allows the test to mock the DNS resolution by supplying the +// addresses of the test backends. +// +// Returns the following: +// - a channel onto which the DNS target being resolved is written to by the +// mock DNS resolver +// - a channel to notify close of the DNS resolver +// - a channel to notify re-resolution requests to the DNS resolver +// - a manual resolver which is used to mock the actual DNS resolution +// - a cleanup function which re-registers the original DNS resolver +func setupDNS() (chan resolver.Target, chan struct{}, chan resolver.ResolveNowOptions, *manual.Resolver, func()) { + targetCh := make(chan resolver.Target, 1) + closeCh := make(chan struct{}, 1) + resolveNowCh := make(chan resolver.ResolveNowOptions, 1) + + mr := manual.NewBuilderWithScheme("dns") + mr.BuildCallback = func(target resolver.Target, _ resolver.ClientConn, _ resolver.BuildOptions) { targetCh <- target } + mr.CloseCallback = func() { closeCh <- struct{}{} } + mr.ResolveNowCallback = func(opts resolver.ResolveNowOptions) { resolveNowCh <- opts } + + dnsResolverBuilder := resolver.Get("dns") + resolver.UnregisterForTesting("dns") + resolver.Register(mr) + + return targetCh, closeCh, resolveNowCh, mr, func() { resolver.Register(dnsResolverBuilder) } +} + +// setupAndDial performs common setup across all tests +// +// - creates an xDS client with the passed in bootstrap contents +// - creates a manual resolver that configures `cds_experimental` as the +// top-level LB policy. +// - creates a ClientConn to talk to the test backends +// +// Returns a function to close the ClientConn and the xDS client. +func setupAndDial(t *testing.T, bootstrapContents []byte) (*grpc.ClientConn, func()) { + t.Helper() + + // Create an xDS client for use by the cluster_resolver LB policy. + xdsC, xdsClose, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + + // Create a manual resolver and push a service config specifying the use of + // the cds LB policy as the top-level LB policy, and a corresponding config + // with a single cluster. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cds_experimental":{ + "cluster": "%s" + } + }] + }`, clusterName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsC)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + xdsClose() + t.Fatalf("Failed to dial local test server: %v", err) + } + return cc, func() { + xdsClose() + cc.Close() + } +} + // TestErrorFromParentLB_ConnectionError tests the case where the parent of the // clusterresolver LB policy sends its a connection error. The parent policy, // CDS LB policy, sends a connection error when the ADS stream to the management @@ -76,7 +199,6 @@ func (s) TestErrorFromParentLB_ConnectionError(t *testing.T) { }) defer cleanup() - // Start a test backend and extract its host and port. server := stubserver.StartTestService(t, nil) defer server.Stop() @@ -93,37 +215,14 @@ func (s) TestErrorFromParentLB_ConnectionError(t *testing.T) { t.Fatal(err) } - // Create an xDS xdsClient for use by the cluster_resolver LB policy. - xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) - if err != nil { - t.Fatalf("Failed to create xDS client: %v", err) - } - defer close() - - // Create a manual resolver and push a service config specifying the use of - // the cds LB policy as the top-level LB policy, and a corresponding config - // with a single cluster. - r := manual.NewBuilderWithScheme("whatever") - jsonSC := fmt.Sprintf(`{ - "loadBalancingConfig":[{ - "cds_experimental":{ - "cluster": "%s" - } - }] - }`, clusterName) - scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) - r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) - - // Create a ClientConn and make a successful RPC. - cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) + t.Fatalf("EmptyCall() failed: %v", err) } // Close the listener and ensure that the ADS stream breaks. @@ -134,10 +233,10 @@ func (s) TestErrorFromParentLB_ConnectionError(t *testing.T) { default: } - // Ensure that RPCs continue to succeed for the next one second. + // Ensure that RPCs continue to succeed for the next second. for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) + t.Fatalf("EmptyCall() failed: %v", err) } } } @@ -183,7 +282,6 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { }) defer cleanup() - // Start a test backend and extract its host and port. server := stubserver.StartTestService(t, nil) defer server.Stop() @@ -200,33 +298,10 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { t.Fatal(err) } - // Create an xDS xdsClient for use by the cluster_resolver LB policy. - xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) - if err != nil { - t.Fatalf("Failed to create xDS client: %v", err) - } - defer close() - - // Create a manual resolver and push a service config specifying the use of - // the cds LB policy as the top-level LB policy, and a corresponding config - // with a single cluster. - r := manual.NewBuilderWithScheme("whatever") - jsonSC := fmt.Sprintf(`{ - "loadBalancingConfig":[{ - "cds_experimental":{ - "cluster": "%s" - } - }] - }`, clusterName) - scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) - r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) - - // Create a ClientConn that kick starts the xDS workflow. - cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() // Wait for the EDS resource to be requested. select { @@ -238,7 +313,7 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { // Ensure that a successful RPC can be made. client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) + t.Fatalf("EmptyCall() failed: %v", err) } // Delete the cluster resource from the mangement server. @@ -263,7 +338,7 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { break } if err != nil { - t.Logf("EmptyCall RPC failed: %v", err) + t.Logf("EmptyCall failed: %v", err) } } if ctx.Err() != nil { @@ -298,7 +373,7 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); err != nil { - t.Logf("EmptyCall RPC failed: %v", err) + t.Logf("EmptyCall failed: %v", err) continue } break @@ -308,11 +383,11 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { } } -// TestEDSResourceRemoved tests the case where the EDS resource requested by the -// clusterresolver LB policy is removed from the management server. The test +// TestEDS_ResourceRemoved tests the case where the EDS resource requested by +// the clusterresolver LB policy is removed from the management server. The test // verifies that the EDS watch is not canceled and that RPCs continue to succeed // with the previously received configuration. -func (s) TestEDSResourceRemoved(t *testing.T) { +func (s) TestEDS_ResourceRemoved(t *testing.T) { // Start an xDS management server that uses a couple of channels to // notify the test about the following events: // - an EDS requested with the expected resource name is requested @@ -346,7 +421,6 @@ func (s) TestEDSResourceRemoved(t *testing.T) { }) defer cleanup() - // Start a test backend and extract its host and port. server := stubserver.StartTestService(t, nil) defer server.Stop() @@ -363,37 +437,14 @@ func (s) TestEDSResourceRemoved(t *testing.T) { t.Fatal(err) } - // Create an xDS xdsClient for use by the cluster_resolver LB policy. - xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) - if err != nil { - t.Fatalf("Failed to create xDS client: %v", err) - } - defer close() - - // Create a manual resolver and push a service config specifying the use of - // the cds LB policy as the top-level LB policy, and a corresponding config - // with a single cluster. - r := manual.NewBuilderWithScheme("whatever") - jsonSC := fmt.Sprintf(`{ - "loadBalancingConfig":[{ - "cds_experimental":{ - "cluster": "%s" - } - }] - }`, clusterName) - scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) - r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) - - // Create a ClientConn and make a successful RPC. - cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) + t.Fatalf("EmptyCall() failed: %v", err) } // Delete the endpoints resource from the mangement server. @@ -402,10 +453,11 @@ func (s) TestEDSResourceRemoved(t *testing.T) { t.Fatal(err) } - // Ensure that RPCs continue to succeed for the next one second, and that the EDS watch is not canceled. + // Ensure that RPCs continue to succeed for the next second, and that the + // EDS watch is not canceled. for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) + t.Fatalf("EmptyCall() failed: %v", err) } select { case <-edsResourceCanceledCh: @@ -414,3 +466,862 @@ func (s) TestEDSResourceRemoved(t *testing.T) { } } } + +// TestEDS_ClusterResourceDoesNotContainEDSServiceName tests the case where the +// Cluster resource sent by the management server does not contain an EDS +// service name. The test verifies that the cluster_resolver LB policy uses the +// cluster name for the EDS resource. +func (s) TestEDS_ClusterResourceDoesNotContainEDSServiceName(t *testing.T) { + edsResourceCh := make(chan string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) > 0 { + select { + case edsResourceCh <- req.GetResourceNames()[0]: + default: + } + } + return nil + }, + }) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources with the same name in the management server. The cluster resource does not specify an EDS service name. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, "", e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(clusterName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS request to be received on the management server") + case name := <-edsResourceCh: + if name != clusterName { + t.Fatalf("Received EDS request with resource name %q, want %q", name, clusterName) + } + } +} + +// TestEDS_ClusterResourceUpdates verifies different scenarios with regards to +// cluster resource updates. +// +// - The first cluster resource contains an eds_service_name. The test verifies +// that an EDS request is sent for the received eds_service_name. It also +// verifies that a subsequent RPC gets routed to a backend belonging to that +// service name. +// - The next cluster resource update contains no eds_service_name. The test +// verifies that a subsequent EDS request is sent for the cluster_name and +// that the previously received eds_service_name is no longer requested. It +// also verifies that a subsequent RPC gets routed to a backend belonging to +// the service represented by the cluster_name. +// - The next cluster resource update changes the circuit breaking +// configuration, but does not change the service name. The test verifies +// that a subsequent RPC gets routed to the same backend as before. +func (s) TestEDS_ClusterResourceUpdates(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server that pushes the EDS resource names onto a + // channel. + edsResourceNameCh := make(chan []string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) == 0 { + // This is the case for ACKs. Do nothing here. + return nil + } + select { + case <-ctx.Done(): + case edsResourceNameCh <- req.GetResourceNames(): + } + return nil + }, + AllowResourceSubset: true, + }) + defer cleanup() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS resource identified by the eds_service_name, + // and the second backend is used for the EDS resource identified by the + // cluster_name. + servers, cleanup2 := startTestServiceBackends(t, 2) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(ports[0])}), + e2e.DefaultEndpoint(clusterName, "localhost", []uint32{uint32(ports[1])}), + }, + SkipValidation: true, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Ensure EDS watch is registered for eds_service_name. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS request to be received on the management server") + case names := <-edsResourceNameCh: + if !cmp.Equal(names, []string{edsServiceName}) { + t.Fatalf("Received EDS request with resource names %v, want %v", names, []string{edsServiceName}) + } + } + + // Change the cluster resource to not contain an eds_service_name. + resources.Clusters = []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, "", e2e.SecurityLevelNone)} + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that an EDS watch for eds_service_name is canceled and new watch + // for cluster_name is registered. The actual order in which this happens is + // not deterministic, i.e the watch for old resource could be canceled + // before the new one is registered or vice-versa. In either case, + // eventually, we want to see a request to the management server for just + // the cluster_name. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + names := <-edsResourceNameCh + if cmp.Equal(names, []string{clusterName}) { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for old EDS watch %q to be canceled and new one %q to be registered", edsServiceName, clusterName) + } + + // Make a RPC, and ensure that it gets routed to second backend, + // corresponding to the cluster_name. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + continue + } + if peer.Addr.String() == addrs[1].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for EmptyCall() to be routed to correct backend %q", addrs[1].Addr) + } + + // Change cluster resource circuit breaking count. + resources.Clusters[0].CircuitBreakers = &v3clusterpb.CircuitBreakers{ + Thresholds: []*v3clusterpb.CircuitBreakers_Thresholds{ + { + Priority: v3corepb.RoutingPriority_DEFAULT, + MaxRequests: wrapperspb.UInt32(512), + }, + }, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to get routed to the second backend for the + // next second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[1].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[1].Addr) + } + } +} + +// TestAggregateCluster_WithTwoEDSClusters tests the case where the top-level +// cluster resource is an aggregate cluster. It verifies that RPCs fail when the +// management server has not responded to all requested EDS resources, and also +// that RPCs are routed to the highest priority cluster once all requested EDS +// resources have been sent by the management server. +func (s) TestAggregateCluster_WithTwoEDSClusters(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server that pushes the EDS resource names onto a + // channel when requested. + edsResourceNameCh := make(chan []string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) == 0 { + // This is the case for ACKs. Do nothing here. + return nil + } + select { + case edsResourceNameCh <- req.GetResourceNames(): + case <-ctx.Done(): + } + return nil + }, + AllowResourceSubset: true, + }) + defer cleanup() + + // Start two test backends and extract their host and port. The first + // backend belongs to EDS cluster "cluster-1", while the second backend + // belongs to EDS cluster "cluster-2". + servers, cleanup2 := startTestServiceBackends(t, 2) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster, two EDS clusters and only one endpoints + // resource (corresponding to the first EDS cluster) in the management + // server. + const clusterName1 = clusterName + "-cluster-1" + const clusterName2 = clusterName + "-cluster-2" + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{clusterName1, clusterName2}), + e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), + e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(clusterName1, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Wait for both EDS resources to be requested. + func() { + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + select { + case names := <-edsResourceNameCh: + // Copy and sort the sortedNames to avoid racing with an + // OnStreamRequest call. + sortedNames := make([]string, len(names)) + copy(sortedNames, names) + sort.Strings(sortedNames) + if cmp.Equal(sortedNames, []string{clusterName1, clusterName2}) { + return + } + default: + } + } + }() + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for all EDS resources %v to be requested", []string{clusterName1, clusterName2}) + } + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the management server has not responded with all EDS resources + // requested. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Update the management server with the second EDS resource. + resources.Endpoints = append(resources.Endpoints, e2e.DefaultEndpoint(clusterName2, "localhost", []uint32{uint32(ports[1])})) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Make an RPC and ensure that it gets routed to cluster-1, implicitly + // higher priority than cluster-2. + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } +} + +// TestAggregateCluster_WithTwoEDSClusters_PrioritiesChange tests the case where +// the top-level cluster resource is an aggregate cluster. It verifies that RPCs +// are routed to the highest priority EDS cluster. +func (s) TestAggregateCluster_WithTwoEDSClusters_PrioritiesChange(t *testing.T) { + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup() + + // Start two test backends and extract their host and port. The first + // backend belongs to EDS cluster "cluster-1", while the second backend + // belongs to EDS cluster "cluster-2". + servers, cleanup2 := startTestServiceBackends(t, 2) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster, two EDS clusters and the corresponding + // endpoints resources in the management server. + const clusterName1 = clusterName + "cluster-1" + const clusterName2 = clusterName + "cluster-2" + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{clusterName1, clusterName2}), + e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), + e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(clusterName1, "localhost", []uint32{uint32(ports[0])}), + e2e.DefaultEndpoint(clusterName2, "localhost", []uint32{uint32(ports[1])}), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Make an RPC and ensure that it gets routed to cluster-1, implicitly + // higher priority than cluster-2. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Swap the priorities of the EDS clusters in the aggregate cluster. + resources.Clusters = []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{clusterName2, clusterName1}), + e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), + e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for RPCs to get routed to cluster-2, which is now implicitly higher + // priority than cluster-1, after the priority switch above. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() == addrs[1].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatal("Timeout waiting for RPCs to be routed to cluster-2 after priority switch") + } +} + +// TestAggregateCluster_WithOneDNSCluster tests the case where the top-level +// cluster resource is an aggregate cluster that resolves to a single +// LOGICAL_DNS cluster. The test verifies that RPCs can be made to backends that +// make up the LOGICAL_DNS cluster. +func (s) TestAggregateCluster_WithOneDNSCluster(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, _ := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to a single LOGICAL_DNS cluster. + const ( + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{dnsClusterName}), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs}) + + // Make an RPC and ensure that it gets routed to the first backend since the + // child policy for a LOGICAL_DNS cluster is pick_first by default. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } +} + +// TestAggregateCluster_WithEDSAndDNS tests the case where the top-level cluster +// resource is an aggregate cluster that resolves to an EDS and a LOGICAL_DNS +// cluster. The test verifies that RPCs fail until both clusters are resolved to +// endpoints, and RPCs are routed to the higher priority EDS cluster. +func (s) TestAggregateCluster_WithEDSAndDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server that pushes the name of the requested EDS + // resource onto a channel. + edsResourceCh := make(chan string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) > 0 { + select { + case edsResourceCh <- req.GetResourceNames()[0]: + default: + } + } + return nil + }, + AllowResourceSubset: true, + }) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and DNS cluster. Also + // configure an endpoints resource for the EDS cluster. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsClusterName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that an EDS request is sent for the expected resource name. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS request to be received on the management server") + case name := <-edsResourceCh: + if name != edsClusterName { + t.Fatalf("Received EDS request with resource name %q, want %q", name, edsClusterName) + } + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the DNS resolver has not responded with endpoint addresses. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Make an RPC and ensure that it gets routed to the first backend since the + // EDS cluster is of higher priority than the LOGICAL_DNS cluster. + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } +} + +// TestAggregateCluster_SwitchEDSAndDNS tests the case where the top-level +// cluster resource is an aggregate cluster. It initially resolves to a single +// EDS cluster. The test verifies that RPCs are routed to backends in the EDS +// cluster. Subsequently, the aggregate cluster resolves to a single DNS +// cluster. The test verifies that RPCs are successful, this time to backends in +// the DNS cluster. +func (s) TestAggregateCluster_SwitchEDSAndDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to a single EDS cluster. Also, + // configure the underlying EDS cluster (and the corresponding endpoints + // resource) and DNS cluster (will be used later in the test). + const ( + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsServiceName}), + e2e.DefaultCluster(edsServiceName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the RPC is routed to the appropriate backend. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Update the aggregate cluster to point to a single DNS cluster. + resources.Clusters = []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{dnsClusterName}), + e2e.DefaultCluster(edsServiceName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Ensure that start getting routed to the backend corresponding to the + // LOGICAL_DNS cluster. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)) + if peer.Addr.String() == addrs[1].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for RPCs to be routed to backend %q in the DNS cluster", addrs[1].Addr) + } +} + +// TestAggregateCluster_BadEDS_GoodToBadDNS tests the case where the top-level +// cluster is an aggregate cluster that resolves to an EDS and LOGICAL_DNS +// cluster. The test first asserts that no RPCs can be made after receiving an +// EDS response with zero endpoints because no update has been received from the +// DNS resolver yet. Once the DNS resolver pushes an update, the test verifies +// that we switch to the DNS cluster and can make a successful RPC. At this +// point when the DNS cluster returns an error, the test verifies that RPCs are +// still successful. This is the expected behavior because pick_first (the leaf +// policy) ignores resolver errors when it is not in TransientFailure. +func (s) TestAggregateCluster_BadEDS_GoodToBadDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, _ := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and LOGICAL_DNS + // cluster. Also configure an empty endpoints resource for the EDS cluster + // that contains no endpoints. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + emptyEndpointResource := e2e.DefaultEndpoint(edsServiceName, "localhost", nil) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, edsServiceName, e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{emptyEndpointResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the EDS resource came back with no endpoints, and we are yet to + // push an update through the DNS resolver. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that RPCs start getting routed to the first backend since the + // child policy for a LOGICAL_DNS cluster is pick_first by default. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Logf("EmptyCall() failed: %v", err) + continue + } + if peer.Addr.String() == addrs[0].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for RPCs to be routed to backend %q in the DNS cluster", addrs[0].Addr) + } + + // Push an error from the DNS resolver as well. + dnsErr := fmt.Errorf("DNS error") + dnsR.ReportError(dnsErr) + + // Ensure that RPCs continue to succeed for the next second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + } +} + +// TestAggregateCluster_BadEDS_BadDNS tests the case where the top-level cluster +// is an aggregate cluster that resolves to an EDS and LOGICAL_DNS cluster. When +// the EDS request returns a resource that contains no endpoints, the test +// verifies that we switch to the DNS cluster. When the DNS cluster returns an +// error, the test verifies that RPCs fail with the error returned by the DNS +// resolver, and thus, ensures that pick_first (the leaf policy) does not ignore +// resolver errors. +func (s) TestAggregateCluster_BadEDS_BadDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Configure an aggregate cluster pointing to an EDS and LOGICAL_DNS + // cluster. Also configure an empty endpoints resource for the EDS cluster + // that contains no endpoints. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + emptyEndpointResource := e2e.DefaultEndpoint(edsServiceName, "localhost", nil) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, edsServiceName, e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{emptyEndpointResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the EDS resource came back with no endpoints, and we are yet to + // push an update through the DNS resolver. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Push an error from the DNS resolver as well. + dnsErr := fmt.Errorf("DNS error") + dnsR.ReportError(dnsErr) + + // Ensure that the error returned from the DNS resolver is reported to the + // caller of the RPC. + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if code := status.Code(err); code != codes.Unavailable { + t.Fatalf("EmptyCall() failed with code %s, want %s", code, codes.Unavailable) + } + if err == nil || !strings.Contains(err.Error(), dnsErr.Error()) { + t.Fatalf("EmptyCall() failed with error %v, want %v", err, dnsErr) + } +} diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index 7aa951bfec50..053b56f0dc86 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -83,11 +83,9 @@ func backendAddressesAndPorts(t *testing.T, servers []*stubserver.StubServer) ([ } func startTestServiceBackends(t *testing.T, numBackends int) ([]*stubserver.StubServer, func()) { - servers := make([]*stubserver.StubServer, numBackends) + var servers []*stubserver.StubServer for i := 0; i < numBackends; i++ { - servers[i] = &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, - } + servers = append(servers, stubserver.StartTestService(t, nil)) servers[i].StartServer() } diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go index 2252373e56e6..0ae151ee5215 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_test.go @@ -19,16 +19,9 @@ package clusterresolver import ( - "context" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -49,86 +42,6 @@ func init() { testEDSUpdates = append(testEDSUpdates, parseEDSRespProtoForTesting(clab2.Build())) } -// Test the simple case with one EDS resource to watch. -func (s) TestResourceResolverOneEDSResource(t *testing.T) { - for _, test := range []struct { - name string - clusterName, edsName string - wantName string - edsUpdate xdsresource.EndpointsUpdate - want []priorityConfig - }{ - {name: "watch EDS", - clusterName: testClusterName, - edsName: testEDSService, - wantName: testEDSService, - edsUpdate: testEDSUpdates[0], - want: []priorityConfig{{ - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - EDSServiceName: testEDSService, - }, - edsResp: testEDSUpdates[0], - childNameGen: newNameGenerator(0), - }}, - }, - { - name: "watch EDS no EDS name", // Will watch for cluster name. - clusterName: testClusterName, - wantName: testClusterName, - edsUpdate: testEDSUpdates[1], - want: []priorityConfig{{ - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - }, - edsResp: testEDSUpdates[1], - childNameGen: newNameGenerator(0), - }}, - }, - } { - t.Run(test.name, func(t *testing.T) { - fakeClient := fakeclient.NewClient() - rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]DiscoveryMechanism{{ - Type: DiscoveryMechanismTypeEDS, - Cluster: test.clusterName, - EDSServiceName: test.edsName, - }}) - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - gotEDSName, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName != test.wantName { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName, test.wantName) - } - - // Invoke callback, should get an update. - fakeClient.InvokeWatchEDSCallback("", test.edsUpdate, nil) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, test.want, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - // Close the resource resolver. Should stop EDS watch. - rr.stop() - edsNameCanceled, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled != test.wantName { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled, testEDSService) - } - }) - } -} - func setupDNS() (chan resolver.Target, chan struct{}, chan resolver.ResolveNowOptions, *manual.Resolver, func()) { dnsTargetCh := make(chan resolver.Target, 1) dnsCloseCh := make(chan struct{}, 1) @@ -144,745 +57,3 @@ func setupDNS() (chan resolver.Target, chan struct{}, chan resolver.ResolveNowOp } return dnsTargetCh, dnsCloseCh, resolveNowCh, mr, func() { newDNS = oldNewDNS } } - -// Test the simple case of one DNS resolver. -func (s) TestResourceResolverOneDNSResource(t *testing.T) { - for _, test := range []struct { - name string - target string - wantTarget resolver.Target - addrs []resolver.Address - want []priorityConfig - }{ - { - name: "watch DNS", - target: testDNSTarget, - wantTarget: resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}, - addrs: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}, - want: []priorityConfig{{ - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: testDNSTarget, - }, - addresses: []string{"1.1.1.1", "2.2.2.2"}, - childNameGen: newNameGenerator(0), - }}, - }, - } { - t.Run(test.name, func(t *testing.T) { - dnsTargetCh, dnsCloseCh, _, dnsR, cleanup := setupDNS() - defer cleanup() - fakeClient := fakeclient.NewClient() - rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]DiscoveryMechanism{{ - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: test.target, - }}) - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - select { - case target := <-dnsTargetCh: - if diff := cmp.Diff(target, test.wantTarget); diff != "" { - t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for building DNS resolver") - } - - // Invoke callback, should get an update. - dnsR.UpdateState(resolver.State{Addresses: test.addrs}) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, test.want, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - // Close the resource resolver. Should close the underlying resolver. - rr.stop() - select { - case <-dnsCloseCh: - case <-ctx.Done(): - t.Fatal("Timed out waiting for closing DNS resolver") - } - }) - } -} - -// Test that changing EDS name would cause a cancel and a new watch. -// -// Also, changes that don't actually change EDS names (e.g. changing cluster -// name but not service name, or change circuit breaking count) doesn't do -// anything. -// -// - update DiscoveryMechanism -// - same EDS name to watch, but different MaxCurrentCount: no new watch -// - different cluster name, but same EDS name: no new watch -func (s) TestResourceResolverChangeEDSName(t *testing.T) { - fakeClient := fakeclient.NewClient() - rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]DiscoveryMechanism{{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - EDSServiceName: testEDSService, - }}) - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName1 != testEDSService { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testEDSService) - } - - // Invoke callback, should get an update. - fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{{ - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - EDSServiceName: testEDSService, - }, - edsResp: testEDSUpdates[0], - childNameGen: newNameGenerator(0), - }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Change name to watch. - rr.updateMechanisms([]DiscoveryMechanism{{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - }}) - edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled1 != gotEDSName1 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, testEDSService) - } - gotEDSName2, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName2 != testClusterName { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName2, testClusterName) - } - // Shouldn't get any update, because the new resource hasn't received any - // update. - shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - select { - case u := <-rr.updateChannel: - t.Fatalf("get unexpected update %+v", u) - case <-shortCtx.Done(): - } - - // Invoke callback, should get an update. - fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{{ - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - }, - edsResp: testEDSUpdates[1], - childNameGen: newNameGenerator(1), - }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Change circuit breaking count, should get an update with new circuit - // breaking count, but shouldn't trigger new watch. - rr.updateMechanisms([]DiscoveryMechanism{{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - MaxConcurrentRequests: newUint32(123), - }}) - shortCtx, shortCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - if n, err := fakeClient.WaitForWatchEDS(shortCtx); err == nil { - t.Fatalf("unexpected watch started for EDS: %v", n) - } - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{{ - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - MaxConcurrentRequests: newUint32(123), - }, - edsResp: testEDSUpdates[1], - childNameGen: newNameGenerator(1), - }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Close the resource resolver. Should stop EDS watch. - rr.stop() - edsNameCanceled, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled != gotEDSName2 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled, gotEDSName2) - } -} - -// Test the case that same resources with the same priority should not add new -// EDS watch, and also should not trigger an update. -func (s) TestResourceResolverNoChangeNoUpdate(t *testing.T) { - fakeClient := fakeclient.NewClient() - rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]DiscoveryMechanism{ - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[0], - }, - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[1], - MaxConcurrentRequests: newUint32(100), - }, - }) - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName1 != testClusterNames[0] { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterNames[0]) - } - gotEDSName2, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName2 != testClusterNames[1] { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName2, testClusterNames[1]) - } - - // Invoke callback, should get an update. - fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) - // Shouldn't send update, because only one resource received an update. - shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - select { - case u := <-rr.updateChannel: - t.Fatalf("get unexpected update %+v", u) - case <-shortCtx.Done(): - } - fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{ - { - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[0], - }, - edsResp: testEDSUpdates[0], - childNameGen: newNameGenerator(0), - }, - { - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[1], - MaxConcurrentRequests: newUint32(100), - }, - edsResp: testEDSUpdates[1], - childNameGen: newNameGenerator(1), - }, - }, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Send the same resources with the same priorities, shouldn't any change. - rr.updateMechanisms([]DiscoveryMechanism{ - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[0], - }, - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[1], - MaxConcurrentRequests: newUint32(100), - }, - }) - shortCtx, shortCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - if n, err := fakeClient.WaitForWatchEDS(shortCtx); err == nil { - t.Fatalf("unexpected watch started for EDS: %v", n) - } - shortCtx, shortCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - select { - case u := <-rr.updateChannel: - t.Fatalf("unexpected update: %+v", u) - case <-shortCtx.Done(): - } - - // Close the resource resolver. Should stop EDS watch. - rr.stop() - edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled1 != gotEDSName1 && edsNameCanceled1 != gotEDSName2 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v or %v", edsNameCanceled1, gotEDSName1, gotEDSName2) - } - edsNameCanceled2, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled2 != gotEDSName2 && edsNameCanceled2 != gotEDSName1 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v or %v", edsNameCanceled2, gotEDSName1, gotEDSName2) - } -} - -// Test the case that same resources are watched, but with different priority. -// Should not add new EDS watch, but should trigger an update with the new -// priorities. -func (s) TestResourceResolverChangePriority(t *testing.T) { - fakeClient := fakeclient.NewClient() - rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]DiscoveryMechanism{ - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[0], - }, - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[1], - }, - }) - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName1 != testClusterNames[0] { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterNames[0]) - } - gotEDSName2, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName2 != testClusterNames[1] { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName2, testClusterNames[1]) - } - - // Invoke callback, should get an update. - fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) - // Shouldn't send update, because only one resource received an update. - shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - select { - case u := <-rr.updateChannel: - t.Fatalf("get unexpected update %+v", u) - case <-shortCtx.Done(): - } - fakeClient.InvokeWatchEDSCallback(gotEDSName2, testEDSUpdates[1], nil) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{ - { - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[0], - }, - edsResp: testEDSUpdates[0], - childNameGen: newNameGenerator(0), - }, - { - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[1], - }, - edsResp: testEDSUpdates[1], - childNameGen: newNameGenerator(1), - }, - }, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Send the same resources with different priorities, shouldn't trigger - // watch, but should trigger an update with the new priorities. - rr.updateMechanisms([]DiscoveryMechanism{ - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[1], - }, - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[0], - }, - }) - shortCtx, shortCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - if n, err := fakeClient.WaitForWatchEDS(shortCtx); err == nil { - t.Fatalf("unexpected watch started for EDS: %v", n) - } - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{ - { - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[1], - }, - edsResp: testEDSUpdates[1], - childNameGen: newNameGenerator(1), - }, - { - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterNames[0], - }, - edsResp: testEDSUpdates[0], - childNameGen: newNameGenerator(0), - }, - }, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Close the resource resolver. Should stop EDS watch. - rr.stop() - edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled1 != gotEDSName1 && edsNameCanceled1 != gotEDSName2 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v or %v", edsNameCanceled1, gotEDSName1, gotEDSName2) - } - edsNameCanceled2, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled2 != gotEDSName2 && edsNameCanceled2 != gotEDSName1 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v or %v", edsNameCanceled2, gotEDSName1, gotEDSName2) - } -} - -// Test the case that covers resource for both EDS and DNS. -func (s) TestResourceResolverEDSAndDNS(t *testing.T) { - dnsTargetCh, dnsCloseCh, _, dnsR, cleanup := setupDNS() - defer cleanup() - fakeClient := fakeclient.NewClient() - rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]DiscoveryMechanism{ - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - }, - { - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: testDNSTarget, - }, - }) - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName1 != testClusterName { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterName) - } - select { - case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { - t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for building DNS resolver") - } - - fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) - // Shouldn't send update, because only one resource received an update. - shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - select { - case u := <-rr.updateChannel: - t.Fatalf("get unexpected update %+v", u) - case <-shortCtx.Done(): - } - // Invoke DNS, should get an update. - dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{ - { - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - }, - edsResp: testEDSUpdates[0], - childNameGen: newNameGenerator(0), - }, - { - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: testDNSTarget, - }, - addresses: []string{"1.1.1.1", "2.2.2.2"}, - childNameGen: newNameGenerator(1), - }, - }, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Close the resource resolver. Should stop EDS watch. - rr.stop() - edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled1 != gotEDSName1 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, gotEDSName1) - } - select { - case <-dnsCloseCh: - case <-ctx.Done(): - t.Fatal("Timed out waiting for closing DNS resolver") - } -} - -// Test the case that covers resource changing between EDS and DNS. -func (s) TestResourceResolverChangeFromEDSToDNS(t *testing.T) { - dnsTargetCh, dnsCloseCh, _, dnsR, cleanup := setupDNS() - defer cleanup() - fakeClient := fakeclient.NewClient() - rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]DiscoveryMechanism{{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - }}) - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName1 != testClusterName { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterName) - } - - // Invoke callback, should get an update. - fakeClient.InvokeWatchEDSCallback(gotEDSName1, testEDSUpdates[0], nil) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{{ - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - }, - edsResp: testEDSUpdates[0], - childNameGen: newNameGenerator(0), - }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Update to watch DNS instead. Should cancel EDS, and start DNS. - rr.updateMechanisms([]DiscoveryMechanism{{ - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: testDNSTarget, - }}) - select { - case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { - t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for building DNS resolver") - } - edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled1 != gotEDSName1 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, gotEDSName1) - } - - dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{{ - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: testDNSTarget, - }, - addresses: []string{"1.1.1.1", "2.2.2.2"}, - childNameGen: newNameGenerator(1), - }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Close the resource resolver. Should stop DNS. - rr.stop() - select { - case <-dnsCloseCh: - case <-ctx.Done(): - t.Fatal("Timed out waiting for closing DNS resolver") - } -} - -// Test the case that covers errors for both EDS and DNS. -func (s) TestResourceResolverError(t *testing.T) { - dnsTargetCh, dnsCloseCh, _, dnsR, cleanup := setupDNS() - defer cleanup() - fakeClient := fakeclient.NewClient() - rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]DiscoveryMechanism{ - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - }, - { - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: testDNSTarget, - }, - }) - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - gotEDSName1, err := fakeClient.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) - } - if gotEDSName1 != testClusterName { - t.Fatalf("xdsClient.WatchEDS called for cluster: %v, want: %v", gotEDSName1, testClusterName) - } - select { - case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { - t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for building DNS resolver") - } - - // Invoke callback with an error, should get an update. - edsErr := fmt.Errorf("EDS error") - fakeClient.InvokeWatchEDSCallback(gotEDSName1, xdsresource.EndpointsUpdate{}, edsErr) - select { - case u := <-rr.updateChannel: - if u.err != edsErr { - t.Fatalf("got unexpected error from update, want %v, got %v", edsErr, u.err) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Invoke DNS with an error, should get an update. - dnsErr := fmt.Errorf("DNS error") - dnsR.ReportError(dnsErr) - select { - case u := <-rr.updateChannel: - if u.err != dnsErr { - t.Fatalf("got unexpected error from update, want %v, got %v", dnsErr, u.err) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - - // Close the resource resolver. Should stop EDS watch. - rr.stop() - edsNameCanceled1, err := fakeClient.WaitForCancelEDSWatch(ctx) - if err != nil { - t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) - } - if edsNameCanceled1 != gotEDSName1 { - t.Fatalf("xdsClient.CancelEDS called for %v, want: %v", edsNameCanceled1, gotEDSName1) - } - select { - case <-dnsCloseCh: - case <-ctx.Done(): - t.Fatal("Timed out waiting for closing DNS resolver") - } -} - -// Test re-resolve of the DNS resolver. -func (s) TestResourceResolverDNSResolveNow(t *testing.T) { - dnsTargetCh, dnsCloseCh, resolveNowCh, dnsR, cleanup := setupDNS() - defer cleanup() - fakeClient := fakeclient.NewClient() - rr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient}) - rr.updateMechanisms([]DiscoveryMechanism{{ - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: testDNSTarget, - }}) - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - select { - case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { - t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for building DNS resolver") - } - - // Invoke callback, should get an update. - dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "1.1.1.1"}, {Addr: "2.2.2.2"}}}) - select { - case u := <-rr.updateChannel: - if diff := cmp.Diff(u.priorities, []priorityConfig{{ - mechanism: DiscoveryMechanism{ - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: testDNSTarget, - }, - addresses: []string{"1.1.1.1", "2.2.2.2"}, - childNameGen: newNameGenerator(0), - }}, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != "" { - t.Fatalf("got unexpected resource update, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for update from update channel.") - } - rr.resolveNow() - select { - case <-resolveNowCh: - case <-ctx.Done(): - t.Fatal("Timed out waiting for re-resolve") - } - // Close the resource resolver. Should close the underlying resolver. - rr.stop() - select { - case <-dnsCloseCh: - case <-ctx.Done(): - t.Fatal("Timed out waiting for closing DNS resolver") - } -} From 497436cef13290bc6ea8d596ddc9b3a472d0aad3 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 26 Apr 2023 12:56:27 -0400 Subject: [PATCH 892/998] xds/internal/balancer/outlierdetection: Change string to String (#6222) --- xds/internal/balancer/outlierdetection/balancer.go | 14 ++++++-------- .../balancer/outlierdetection/subconn_wrapper.go | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 749449c2123e..97f5503f38d1 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -828,8 +828,7 @@ func (b *outlierDetectionBalancer) successRateAlgorithm() { successRate := float64(bucket.numSuccesses) / float64(bucket.numSuccesses+bucket.numFailures) requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) if successRate < requiredSuccessRate { - channelz.Infof(logger, b.channelzParentID, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", - addrInfo.string(), successRate, mean, stddev, requiredSuccessRate) + channelz.Infof(logger, b.channelzParentID, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", addrInfo, successRate, mean, stddev, requiredSuccessRate) if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } @@ -856,8 +855,7 @@ func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { } failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { - channelz.Infof(logger, b.channelzParentID, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", - addrInfo.string(), failurePercentage) + channelz.Infof(logger, b.channelzParentID, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", addrInfo, failurePercentage) if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } @@ -872,7 +870,7 @@ func (b *outlierDetectionBalancer) ejectAddress(addrInfo *addressInfo) { addrInfo.ejectionTimeMultiplier++ for _, sbw := range addrInfo.sws { sbw.eject() - channelz.Infof(logger, b.channelzParentID, "Subchannel ejected: %s", sbw.string()) + channelz.Infof(logger, b.channelzParentID, "Subchannel ejected: %s", sbw) } } @@ -883,7 +881,7 @@ func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { addrInfo.latestEjectionTimestamp = time.Time{} for _, sbw := range addrInfo.sws { sbw.uneject() - channelz.Infof(logger, b.channelzParentID, "Subchannel unejected: %s", sbw.string()) + channelz.Infof(logger, b.channelzParentID, "Subchannel unejected: %s", sbw) } } @@ -908,11 +906,11 @@ type addressInfo struct { sws []*subConnWrapper } -func (a *addressInfo) string() string { +func (a *addressInfo) String() string { var res strings.Builder res.WriteString("[") for _, sw := range a.sws { - res.WriteString(sw.string()) + res.WriteString(sw.String()) } res.WriteString("]") return res.String() diff --git a/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/xds/internal/balancer/outlierdetection/subconn_wrapper.go index be631387b2f3..71a996f29ae0 100644 --- a/xds/internal/balancer/outlierdetection/subconn_wrapper.go +++ b/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -68,6 +68,6 @@ func (scw *subConnWrapper) uneject() { }) } -func (scw *subConnWrapper) string() string { +func (scw *subConnWrapper) String() string { return fmt.Sprintf("%+v", scw.addresses) } From e853dbf004c343da4b8c6204524765ba6fbeef38 Mon Sep 17 00:00:00 2001 From: Gregory Cooke Date: Wed, 26 Apr 2023 15:05:18 -0400 Subject: [PATCH 893/998] authz: add conversion of json to RBAC Audit Logging config (#6192) Add conversion of json to RBAC Audit Logging config --- authz/rbac_translator.go | 94 ++++- authz/rbac_translator_test.go | 644 +++++++++++++++++++++++++++++++++- examples/go.mod | 2 +- examples/go.sum | 4 +- gcp/observability/go.sum | 2 +- go.mod | 2 +- go.sum | 4 +- interop/observability/go.sum | 2 +- stats/opencensus/go.sum | 2 +- 9 files changed, 740 insertions(+), 16 deletions(-) diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index b01fc2fcdb1d..6e083cfba8ea 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -28,9 +28,12 @@ import ( "fmt" "strings" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" ) type header struct { @@ -53,11 +56,23 @@ type rule struct { Request request } +type auditLogger struct { + Name string `json:"name"` + Config *structpb.Struct `json:"config"` + IsOptional bool `json:"is_optional"` +} + +type auditLoggingOptions struct { + AuditCondition string `json:"audit_condition"` + AuditLoggers []auditLogger `json:"audit_loggers"` +} + // Represents the SDK authorization policy provided by user. type authorizationPolicy struct { - Name string - DenyRules []rule `json:"deny_rules"` - AllowRules []rule `json:"allow_rules"` + Name string + DenyRules []rule `json:"deny_rules"` + AllowRules []rule `json:"allow_rules"` + AuditLoggingOptions auditLoggingOptions `json:"audit_logging_options"` } func principalOr(principals []*v3rbacpb.Principal) *v3rbacpb.Principal { @@ -266,6 +281,68 @@ func parseRules(rules []rule, prefixName string) (map[string]*v3rbacpb.Policy, e return policies, nil } +// Parse auditLoggingOptions to the associated RBAC protos. The single +// auditLoggingOptions results in two different parsed protos, one for the allow +// policy and one for the deny policy +func (options *auditLoggingOptions) toProtos() (allow *v3rbacpb.RBAC_AuditLoggingOptions, deny *v3rbacpb.RBAC_AuditLoggingOptions, err error) { + allow = &v3rbacpb.RBAC_AuditLoggingOptions{} + deny = &v3rbacpb.RBAC_AuditLoggingOptions{} + + if options.AuditCondition != "" { + rbacCondition, ok := v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition_value[options.AuditCondition] + if !ok { + return nil, nil, fmt.Errorf("failed to parse AuditCondition %v. Allowed values {NONE, ON_DENY, ON_ALLOW, ON_DENY_AND_ALLOW}", options.AuditCondition) + } + allow.AuditCondition = v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition(rbacCondition) + deny.AuditCondition = toDenyCondition(v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition(rbacCondition)) + } + + for i := range options.AuditLoggers { + config := &options.AuditLoggers[i] + if config.Config == nil { + return nil, nil, fmt.Errorf("AuditLogger Config field cannot be nil") + } + customConfig, err := anypb.New(config.Config) + if err != nil { + return nil, nil, fmt.Errorf("error parsing custom audit logger config: %v", err) + } + logger := &v3corepb.TypedExtensionConfig{Name: config.Name, TypedConfig: customConfig} + rbacConfig := v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + IsOptional: config.IsOptional, + AuditLogger: logger, + } + allow.LoggerConfigs = append(allow.LoggerConfigs, &rbacConfig) + deny.LoggerConfigs = append(deny.LoggerConfigs, &rbacConfig) + } + + return allow, deny, nil +} + +// Maps the AuditCondition coming from AuditLoggingOptions to the proper +// condition for the deny policy RBAC proto +func toDenyCondition(condition v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition) v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition { + // Mapping the overall policy AuditCondition to what it must be for the Deny and Allow RBAC + // See gRPC A59 for details - https://github.com/grpc/proposal/pull/346/files + // |Authorization Policy |DENY RBAC |ALLOW RBAC | + // |----------------------|-------------------|---------------------| + // |NONE |NONE |NONE | + // |ON_DENY |ON_DENY |ON_DENY | + // |ON_ALLOW |NONE |ON_ALLOW | + // |ON_DENY_AND_ALLOW |ON_DENY |ON_DENY_AND_ALLOW | + switch condition { + case v3rbacpb.RBAC_AuditLoggingOptions_NONE: + return v3rbacpb.RBAC_AuditLoggingOptions_NONE + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY: + return v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY + case v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW: + return v3rbacpb.RBAC_AuditLoggingOptions_NONE + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW: + return v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY + default: + return v3rbacpb.RBAC_AuditLoggingOptions_NONE + } +} + // translatePolicy translates SDK authorization policy in JSON format to two // Envoy RBAC polices (deny followed by allow policy) or only one Envoy RBAC // allow policy. If the input policy cannot be parsed or is invalid, an error @@ -283,6 +360,10 @@ func translatePolicy(policyStr string) ([]*v3rbacpb.RBAC, error) { if len(policy.AllowRules) == 0 { return nil, fmt.Errorf(`"allow_rules" is not present`) } + allowLogger, denyLogger, err := policy.AuditLoggingOptions.toProtos() + if err != nil { + return nil, err + } rbacs := make([]*v3rbacpb.RBAC, 0, 2) if len(policy.DenyRules) > 0 { denyPolicies, err := parseRules(policy.DenyRules, policy.Name) @@ -290,8 +371,9 @@ func translatePolicy(policyStr string) ([]*v3rbacpb.RBAC, error) { return nil, fmt.Errorf(`"deny_rules" %v`, err) } denyRBAC := &v3rbacpb.RBAC{ - Action: v3rbacpb.RBAC_DENY, - Policies: denyPolicies, + Action: v3rbacpb.RBAC_DENY, + Policies: denyPolicies, + AuditLoggingOptions: denyLogger, } rbacs = append(rbacs, denyRBAC) } @@ -299,6 +381,6 @@ func translatePolicy(policyStr string) ([]*v3rbacpb.RBAC, error) { if err != nil { return nil, fmt.Errorf(`"allow_rules" %v`, err) } - allowRBAC := &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_ALLOW, Policies: allowPolicies} + allowRBAC := &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_ALLOW, Policies: allowPolicies, AuditLoggingOptions: allowLogger} return append(rbacs, allowRBAC), nil } diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go index e8e2f76b5ed8..b1c125a5ce00 100644 --- a/authz/rbac_translator_test.go +++ b/authz/rbac_translator_test.go @@ -24,7 +24,10 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" @@ -42,7 +45,7 @@ func TestTranslatePolicy(t *testing.T) { "deny_rules": [ { "name": "deny_policy_1", - "source": { + "source": { "principals":[ "spiffe://foo.abc", "spiffe://bar*", @@ -117,6 +120,7 @@ func TestTranslatePolicy(t *testing.T) { }, }, }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{}, }, { Action: v3rbacpb.RBAC_ALLOW, @@ -202,6 +206,7 @@ func TestTranslatePolicy(t *testing.T) { }, }, }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{}, }, }, }, @@ -242,6 +247,553 @@ func TestTranslatePolicy(t *testing.T) { }, }, }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{}, + }, + }, + }, + "audit_logging_ALLOW empty config": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "ON_ALLOW", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "audit_logging_DENY_AND_ALLOW": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "ON_DENY_AND_ALLOW", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "audit_logging_NONE": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "audit_logging_custom_config simple": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {"abc":123, "xyz":"123"}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": "123"})}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": "123"})}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "audit_logging_custom_config nested": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {"abc":123, "xyz":{"abc":123}}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": map[string]interface{}{"abc": 123}})}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "missing audit logger config": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_condition": "NONE" + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{}, + }, + }, + }, + }, + "missing audit condition": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + IsOptional: false, + }, + }, + }, }, }, }, @@ -298,6 +850,83 @@ func TestTranslatePolicy(t *testing.T) { }`, wantErr: `"allow_rules" 0: "headers" 0: unsupported "key" :method`, }, + "bad audit condition": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_condition": "ABC", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantErr: `failed to parse AuditCondition ABC`, + }, + "bad audit logger config": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": "abc", + "is_optional": false + } + ] + } + }`, + wantErr: `failed to unmarshal policy`, + }, + "missing custom config audit logger": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "ON_DENY", + "audit_loggers": [ + { + "name": "stdout_logger", + "is_optional": false + } + ] + } + }`, + wantErr: "AuditLogger Config field cannot be nil", + }, } for name, test := range tests { t.Run(name, func(t *testing.T) { @@ -311,3 +940,16 @@ func TestTranslatePolicy(t *testing.T) { }) } } + +func anyPbHelper(t *testing.T, in map[string]interface{}) *anypb.Any { + t.Helper() + pb, err := structpb.NewStruct(in) + if err != nil { + t.Fatal(err) + } + ret, err := anypb.New(pb) + if err != nil { + t.Fatal(err) + } + return ret +} diff --git a/examples/go.mod b/examples/go.mod index 7f79ab2b7e87..7e49c3bce4c9 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -17,7 +17,7 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/envoyproxy/go-control-plane v0.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596 // indirect github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect golang.org/x/net v0.9.0 // indirect golang.org/x/sys v0.7.0 // indirect diff --git a/examples/go.sum b/examples/go.sum index edbb05e00946..8006bf69fef6 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -636,8 +636,8 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596 h1:MDgbDqe1rWfGBa+yCcthuqDSHvXFyenZI1U7f1IbWI8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index a629a01ac913..bb5535fab90b 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -647,7 +647,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= diff --git a/go.mod b/go.mod index 5707e6e7ef71..75ea83d9309c 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 - github.com/envoyproxy/go-control-plane v0.11.0 + github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596 github.com/golang/glog v1.1.0 github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.5.9 diff --git a/go.sum b/go.sum index 925f1485b1dd..bd4e7e729e2d 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,8 @@ github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLW github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596 h1:MDgbDqe1rWfGBa+yCcthuqDSHvXFyenZI1U7f1IbWI8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= diff --git a/interop/observability/go.sum b/interop/observability/go.sum index 6c75fdb93aea..4a52d183476d 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -647,7 +647,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index 2dc115e4a27d..43f540fb5667 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -630,7 +630,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= From da1a5eb25d2a3be62c5419c7004f9c0d669ba8bf Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Wed, 26 Apr 2023 16:58:00 -0700 Subject: [PATCH 894/998] tests: nix TestClientDoesntDeadlockWhileWritingErroneousLargeMessages (#6227) --- test/end2end_test.go | 43 ------------------------------------------- 1 file changed, 43 deletions(-) diff --git a/test/end2end_test.go b/test/end2end_test.go index 42dbc1f73e65..824d7c56c041 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -5281,49 +5281,6 @@ func (s) TestStatusInvalidUTF8Details(t *testing.T) { } } -func (s) TestClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T) { - for _, e := range listTestEnv() { - if e.httpHandler { - continue - } - testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t, e) - } -} - -func testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - smallSize := 1024 - te.maxServerReceiveMsgSize = &smallSize - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testgrpc.NewTestServiceClient(te.clientConn()) - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1048576) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - Payload: payload, - } - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for j := 0; j < 100; j++ { - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) - defer cancel() - if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.ResourceExhausted { - t.Errorf("TestService/UnaryCall(_,_) = _. %v, want code: %s", err, codes.ResourceExhausted) - return - } - } - }() - } - wg.Wait() -} - func (s) TestRPCTimeout(t *testing.T) { for _, e := range listTestEnv() { testRPCTimeout(t, e) From df82147145ba8af646bdd9ba2fdd06db0b2d798c Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 28 Apr 2023 17:05:41 -0400 Subject: [PATCH 895/998] internal: Document gcp/observability 1.0 dependencies in /internal (#6229) --- internal/binarylog/binarylog.go | 3 +++ internal/binarylog/method_logger.go | 9 +++++++++ internal/envconfig/observability.go | 6 ++++++ internal/internal.go | 24 ++++++++++++++++++++++++ 4 files changed, 42 insertions(+) diff --git a/internal/binarylog/binarylog.go b/internal/binarylog/binarylog.go index af03a40d990b..755fdebc1b15 100644 --- a/internal/binarylog/binarylog.go +++ b/internal/binarylog/binarylog.go @@ -32,6 +32,9 @@ var grpclogLogger = grpclog.Component("binarylog") // Logger specifies MethodLoggers for method names with a Log call that // takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { GetMethodLogger(methodName string) MethodLogger } diff --git a/internal/binarylog/method_logger.go b/internal/binarylog/method_logger.go index 56fcf008d3de..6c3f632215fd 100644 --- a/internal/binarylog/method_logger.go +++ b/internal/binarylog/method_logger.go @@ -49,6 +49,9 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type MethodLogger interface { Log(context.Context, LogEntryConfig) } @@ -65,6 +68,9 @@ type TruncatingMethodLogger struct { } // NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { return &TruncatingMethodLogger{ headerMaxLen: h, @@ -145,6 +151,9 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { toProto() *binlogpb.GrpcLogEntry } diff --git a/internal/envconfig/observability.go b/internal/envconfig/observability.go index 821dd0a7c198..dd314cfb18f4 100644 --- a/internal/envconfig/observability.go +++ b/internal/envconfig/observability.go @@ -28,9 +28,15 @@ const ( var ( // ObservabilityConfig is the json configuration for the gcp/observability // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfig = os.Getenv(envObservabilityConfig) // ObservabilityConfigFile is the json configuration for the // gcp/observability specified in a file with the location specified in // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ) diff --git a/internal/internal.go b/internal/internal.go index 836b6a3b3e78..42ff39c84446 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -60,6 +60,9 @@ var ( GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. CanonicalString interface{} // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An @@ -69,20 +72,35 @@ var ( // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. AddGlobalServerOptions interface{} // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalServerOptions func() // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. AddGlobalDialOptions interface{} // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. DisableGlobalDialOptions interface{} // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. @@ -93,9 +111,15 @@ var ( // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using From cf89a0b9310ad47c06e119238748edeb74c15f07 Mon Sep 17 00:00:00 2001 From: Gregory Cooke Date: Mon, 1 May 2023 14:37:26 -0400 Subject: [PATCH 896/998] authz: Swap to using the correct TypedConfig in audit logger parsing (#6235) Swap audit logger parsing to using the correct TypedConfig representation --- authz/rbac_translator.go | 12 +++++++++++- authz/rbac_translator_test.go | 31 ++++++++++++++++++------------- 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index 6e083cfba8ea..c4fa996e5995 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -28,6 +28,7 @@ import ( "fmt" "strings" + v1typepb "github.com/cncf/xds/go/udpa/type/v1" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" @@ -36,6 +37,10 @@ import ( "google.golang.org/protobuf/types/known/structpb" ) +// This is used when converting a custom config from raw JSON to a TypedStruct +// The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/" +const typedURLPrefix = "grpc.authz.audit_logging/" + type header struct { Key string Values []string @@ -302,10 +307,15 @@ func (options *auditLoggingOptions) toProtos() (allow *v3rbacpb.RBAC_AuditLoggin if config.Config == nil { return nil, nil, fmt.Errorf("AuditLogger Config field cannot be nil") } - customConfig, err := anypb.New(config.Config) + typedStruct := &v1typepb.TypedStruct{ + TypeUrl: typedURLPrefix + config.Name, + Value: config.Config, + } + customConfig, err := anypb.New(typedStruct) if err != nil { return nil, nil, fmt.Errorf("error parsing custom audit logger config: %v", err) } + logger := &v3corepb.TypedExtensionConfig{Name: config.Name, TypedConfig: customConfig} rbacConfig := v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ IsOptional: config.IsOptional, diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go index b1c125a5ce00..fff492cff7ab 100644 --- a/authz/rbac_translator_test.go +++ b/authz/rbac_translator_test.go @@ -22,6 +22,7 @@ import ( "strings" "testing" + v1typepb "github.com/cncf/xds/go/udpa/type/v1" "github.com/google/go-cmp/cmp" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" @@ -305,7 +306,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, IsOptional: false, }, }, @@ -339,7 +340,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, IsOptional: false, }, }, @@ -401,7 +402,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, IsOptional: false, }, }, @@ -435,7 +436,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, IsOptional: false, }, }, @@ -497,7 +498,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, IsOptional: false, }, }, @@ -531,7 +532,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, IsOptional: false, }, }, @@ -593,7 +594,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": "123"})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "stdout_logger")}, IsOptional: false, }, }, @@ -627,7 +628,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": "123"})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "stdout_logger")}, IsOptional: false, }, }, @@ -685,7 +686,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": map[string]interface{}{"abc": 123}})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": map[string]interface{}{"abc": 123}}, "stdout_logger")}, IsOptional: false, }, }, @@ -789,7 +790,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ - {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{})}, + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, IsOptional: false, }, }, @@ -941,15 +942,19 @@ func TestTranslatePolicy(t *testing.T) { } } -func anyPbHelper(t *testing.T, in map[string]interface{}) *anypb.Any { +func anyPbHelper(t *testing.T, in map[string]interface{}, name string) *anypb.Any { t.Helper() pb, err := structpb.NewStruct(in) + typedStruct := &v1typepb.TypedStruct{ + TypeUrl: typedURLPrefix + name, + Value: pb, + } if err != nil { t.Fatal(err) } - ret, err := anypb.New(pb) + customConfig, err := anypb.New(typedStruct) if err != nil { t.Fatal(err) } - return ret + return customConfig } From 019acf2e94863061694848dcc819a911399efac5 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 1 May 2023 14:11:23 -0700 Subject: [PATCH 897/998] stubserver: add option for allowing more services to be registered (#6240) --- internal/stubserver/stubserver.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/internal/stubserver/stubserver.go b/internal/stubserver/stubserver.go index 94ef56482e18..3c89ff6823bd 100644 --- a/internal/stubserver/stubserver.go +++ b/internal/stubserver/stubserver.go @@ -93,6 +93,19 @@ func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) return nil } +type registerServiceServerOption struct { + grpc.EmptyServerOption + f func(*grpc.Server) +} + +// RegisterServiceServerOption returns a ServerOption that will run f() in +// Start or StartServer with the grpc.Server created before serving. This +// allows other services to be registered on the test server (e.g. ORCA, +// health, or reflection). +func RegisterServiceServerOption(f func(*grpc.Server)) grpc.ServerOption { + return ®isterServiceServerOption{f: f} +} + // StartServer only starts the server. It does not create a client to it. func (ss *StubServer) StartServer(sopts ...grpc.ServerOption) error { if ss.Network == "" { @@ -113,6 +126,13 @@ func (ss *StubServer) StartServer(sopts ...grpc.ServerOption) error { ss.cleanups = append(ss.cleanups, func() { lis.Close() }) s := grpc.NewServer(sopts...) + for _, so := range sopts { + switch x := so.(type) { + case *registerServiceServerOption: + x.f(s) + } + } + testgrpc.RegisterTestServiceServer(s, ss) go s.Serve(lis) ss.cleanups = append(ss.cleanups, s.Stop) From b15382715d129e480e435af6d78048b91aecc95a Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 1 May 2023 14:14:32 -0700 Subject: [PATCH 898/998] xds: make glaze happy for test packages (#6237) --- .../xdsclient/xdslbregistry/{test => tests}/converter_test.go | 4 ++-- .../xdsresource/{test => tests}/unmarshal_cds_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) rename xds/internal/xdsclient/xdslbregistry/{test => tests}/converter_test.go (99%) rename xds/internal/xdsclient/xdsresource/{test => tests}/unmarshal_cds_test.go (99%) diff --git a/xds/internal/xdsclient/xdslbregistry/test/converter_test.go b/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go similarity index 99% rename from xds/internal/xdsclient/xdslbregistry/test/converter_test.go rename to xds/internal/xdsclient/xdslbregistry/tests/converter_test.go index 7f31d68f1f8d..2ffbad845f8b 100644 --- a/xds/internal/xdsclient/xdslbregistry/test/converter_test.go +++ b/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go @@ -16,8 +16,8 @@ * */ -// Package test contains test cases for the xDS LB Policy Registry. -package test +// Package tests_test contains test cases for the xDS LB Policy Registry. +package tests_test import ( "encoding/json" diff --git a/xds/internal/xdsclient/xdsresource/test/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go similarity index 99% rename from xds/internal/xdsclient/xdsresource/test/unmarshal_cds_test.go rename to xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go index 3f4c226d74d3..dc4e8591f827 100644 --- a/xds/internal/xdsclient/xdsresource/test/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go @@ -16,8 +16,8 @@ * */ -// Package test contains test cases for unmarshalling of CDS resources. -package test +// Package tests_test contains test cases for unmarshalling of CDS resources. +package tests_test import ( "encoding/json" From 21a339ce4a7d5ff33da438ffc399aeb2ce14da4f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 1 May 2023 16:50:35 -0700 Subject: [PATCH 899/998] grpc: handle RemoveSubConn inline in balancerWrapper (#6228) --- balancer_conn_wrappers.go | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index eeaf5beb72a6..978ed69fdf75 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -102,10 +102,6 @@ type switchToUpdate struct { name string } -type subConnUpdate struct { - acbw *acBalancerWrapper -} - // watcher is a long-running goroutine which reads updates from a channel and // invokes corresponding methods on the underlying balancer. It ensures that // these methods are invoked in a synchronous fashion. It also ensures that @@ -132,8 +128,6 @@ func (ccb *ccBalancerWrapper) watcher() { ccb.handleResolverError(update.err) case *switchToUpdate: ccb.handleSwitchTo(update.name) - case *subConnUpdate: - ccb.handleRemoveSubConn(update.acbw) default: logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } @@ -289,14 +283,6 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { ccb.curBalancerName = builder.Name() } -// handleRemoveSucConn handles a request from the underlying balancer to remove -// a subConn. -// -// See comments in RemoveSubConn() for more details. -func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) -} - func (ccb *ccBalancerWrapper) close() { ccb.closed.Fire() <-ccb.done.Done() @@ -326,21 +312,11 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it - // was required to handle the RemoveSubConn() method asynchronously by pushing - // the update onto the update channel. This was done to avoid a deadlock as - // switchBalancer() was holding cc.mu when calling Close() on the old - // balancer, which would in turn call RemoveSubConn(). - // - // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this - // asynchronously is probably not required anymore since the switchTo() method - // handles the balancer switch by pushing the update onto the channel. - // TODO(easwars): Handle this inline. acbw, ok := sc.(*acBalancerWrapper) if !ok { return } - ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { From 713bd04130a0d4b796d28d0ee987071f182dd06d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 1 May 2023 17:03:11 -0700 Subject: [PATCH 900/998] orca: minor cleanups (#6239) --- orca/call_metric_recorder_test.go | 17 ++++++-------- orca/internal/internal.go | 39 ++++++++++++++++++++++++++++++- orca/orca.go | 35 +++++++-------------------- orca/orca_test.go | 34 +++++++++++++++------------ orca/service.go | 2 +- 5 files changed, 73 insertions(+), 54 deletions(-) diff --git a/orca/call_metric_recorder_test.go b/orca/call_metric_recorder_test.go index 25d4af371d08..43d0e45291e2 100644 --- a/orca/call_metric_recorder_test.go +++ b/orca/call_metric_recorder_test.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/orca" + "google.golang.org/grpc/orca/internal" v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" testgrpc "google.golang.org/grpc/interop/grpc_testing" @@ -58,7 +59,6 @@ func (s) TestE2ECallMetricsUnary(t *testing.T) { desc string injectMetrics bool wantProto *v3orcapb.OrcaLoadReport - wantErr error }{ { desc: "with custom backend metrics", @@ -73,7 +73,6 @@ func (s) TestE2ECallMetricsUnary(t *testing.T) { { desc: "with no custom backend metrics", injectMetrics: false, - wantErr: orca.ErrLoadReportMissing, }, } @@ -146,9 +145,9 @@ func (s) TestE2ECallMetricsUnary(t *testing.T) { t.Fatalf("EmptyCall failed: %v", err) } - gotProto, err := orca.ToLoadReport(trailer) - if test.wantErr != nil && !errors.Is(err, test.wantErr) { - t.Fatalf("When retrieving load report, got error: %v, want: %v", err, orca.ErrLoadReportMissing) + gotProto, err := internal.ToLoadReport(trailer) + if err != nil { + t.Fatalf("When retrieving load report, got error: %v, want: ", err) } if test.wantProto != nil && !cmp.Equal(gotProto, test.wantProto, cmp.Comparer(proto.Equal)) { t.Fatalf("Received load report in trailer: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(test.wantProto)) @@ -165,7 +164,6 @@ func (s) TestE2ECallMetricsStreaming(t *testing.T) { desc string injectMetrics bool wantProto *v3orcapb.OrcaLoadReport - wantErr error }{ { desc: "with custom backend metrics", @@ -180,7 +178,6 @@ func (s) TestE2ECallMetricsStreaming(t *testing.T) { { desc: "with no custom backend metrics", injectMetrics: false, - wantErr: orca.ErrLoadReportMissing, }, } @@ -288,9 +285,9 @@ func (s) TestE2ECallMetricsStreaming(t *testing.T) { } } - gotProto, err := orca.ToLoadReport(stream.Trailer()) - if test.wantErr != nil && !errors.Is(err, test.wantErr) { - t.Fatalf("When retrieving load report, got error: %v, want: %v", err, orca.ErrLoadReportMissing) + gotProto, err := internal.ToLoadReport(stream.Trailer()) + if err != nil { + t.Fatalf("When retrieving load report, got error: %v, want: ", err) } if test.wantProto != nil && !cmp.Equal(gotProto, test.wantProto, cmp.Comparer(proto.Equal)) { t.Fatalf("Received load report in trailer: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(test.wantProto)) diff --git a/orca/internal/internal.go b/orca/internal/internal.go index 865d94d86945..35b899d9e877 100644 --- a/orca/internal/internal.go +++ b/orca/internal/internal.go @@ -20,7 +20,16 @@ // avoid polluting the godoc of the top-level orca package. package internal -import ibackoff "google.golang.org/grpc/internal/backoff" +import ( + "errors" + "fmt" + + ibackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) // AllowAnyMinReportingInterval prevents clamping of the MinReportingInterval // configured via ServiceOptions, to a minimum of 30s. @@ -32,3 +41,31 @@ var AllowAnyMinReportingInterval interface{} // func(*ServiceOptions) // // For testing purposes only. var DefaultBackoffFunc = ibackoff.DefaultExponential.Backoff + +// TrailerMetadataKey is the key in which the per-call backend metrics are +// transmitted. +const TrailerMetadataKey = "endpoint-load-metrics-bin" + +// ToLoadReport unmarshals a binary encoded [ORCA LoadReport] protobuf message +// from md and returns the corresponding struct. The load report is expected to +// be stored as the value for key "endpoint-load-metrics-bin". +// +// If no load report was found in the provided metadata, if multiple load +// reports are found, or if the load report found cannot be parsed, an error is +// returned. +// +// [ORCA LoadReport]: (https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15) +func ToLoadReport(md metadata.MD) (*v3orcapb.OrcaLoadReport, error) { + vs := md.Get(TrailerMetadataKey) + if len(vs) == 0 { + return nil, nil + } + if len(vs) != 1 { + return nil, errors.New("multiple orca load reports found in provided metadata") + } + ret := new(v3orcapb.OrcaLoadReport) + if err := proto.Unmarshal([]byte(vs[0]), ret); err != nil { + return nil, fmt.Errorf("failed to unmarshal load report found in metadata: %v", err) + } + return ret, nil +} diff --git a/orca/orca.go b/orca/orca.go index bacc4a89ab0b..2c958b6902e9 100644 --- a/orca/orca.go +++ b/orca/orca.go @@ -29,21 +29,19 @@ package orca import ( "context" "errors" - "fmt" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + igrpc "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" "google.golang.org/protobuf/proto" - - v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" ) var ( logger = grpclog.Component("orca-backend-metrics") - joinServerOptions = internal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) + joinServerOptions = igrpc.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) ) const trailerMetadataKey = "endpoint-load-metrics-bin" @@ -144,26 +142,6 @@ func (w *wrappedStream) Context() context.Context { // ErrLoadReportMissing indicates no ORCA load report was found in trailers. var ErrLoadReportMissing = errors.New("orca load report missing in provided metadata") -// ToLoadReport unmarshals a binary encoded [ORCA LoadReport] protobuf message -// from md and returns the corresponding struct. The load report is expected to -// be stored as the value for key "endpoint-load-metrics-bin". -// -// If no load report was found in the provided metadata, ErrLoadReportMissing is -// returned. -// -// [ORCA LoadReport]: (https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15) -func ToLoadReport(md metadata.MD) (*v3orcapb.OrcaLoadReport, error) { - vs := md.Get(trailerMetadataKey) - if len(vs) == 0 { - return nil, ErrLoadReportMissing - } - ret := new(v3orcapb.OrcaLoadReport) - if err := proto.Unmarshal([]byte(vs[0]), ret); err != nil { - return nil, fmt.Errorf("failed to unmarshal load report found in metadata: %v", err) - } - return ret, nil -} - // loadParser implements the Parser interface defined in `internal/balancerload` // package. This interface is used by the client stream to parse load reports // sent by the server in trailer metadata. The parsed loads are then sent to @@ -174,9 +152,12 @@ func ToLoadReport(md metadata.MD) (*v3orcapb.OrcaLoadReport, error) { type loadParser struct{} func (loadParser) Parse(md metadata.MD) interface{} { - lr, err := ToLoadReport(md) + lr, err := internal.ToLoadReport(md) if err != nil { - logger.Errorf("Parse(%v) failed: %v", err) + logger.Infof("Parse failed: %v", err) + } + if lr == nil && logger.V(2) { + logger.Infof("Missing ORCA load report data") } return lr } diff --git a/orca/orca_test.go b/orca/orca_test.go index fd356cfba437..096b54907148 100644 --- a/orca/orca_test.go +++ b/orca/orca_test.go @@ -25,12 +25,18 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/metadata" - "google.golang.org/grpc/orca" + "google.golang.org/grpc/orca/internal" v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" ) func TestToLoadReport(t *testing.T) { + goodReport := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 1.0, + MemUtilization: 50.0, + RequestCost: map[string]float64{"queryCost": 25.0}, + Utilization: map[string]float64{"queueSize": 75.0}, + } tests := []struct { name string md metadata.MD @@ -40,7 +46,7 @@ func TestToLoadReport(t *testing.T) { { name: "no load report in metadata", md: metadata.MD{}, - wantErr: true, + wantErr: false, }, { name: "badly marshaled load report", @@ -49,29 +55,27 @@ func TestToLoadReport(t *testing.T) { }(), wantErr: true, }, + { + name: "multiple load reports", + md: func() metadata.MD { + b, _ := proto.Marshal(goodReport) + return metadata.Pairs("endpoint-load-metrics-bin", string(b), "endpoint-load-metrics-bin", string(b)) + }(), + wantErr: true, + }, { name: "good load report", md: func() metadata.MD { - b, _ := proto.Marshal(&v3orcapb.OrcaLoadReport{ - CpuUtilization: 1.0, - MemUtilization: 50.0, - RequestCost: map[string]float64{"queryCost": 25.0}, - Utilization: map[string]float64{"queueSize": 75.0}, - }) + b, _ := proto.Marshal(goodReport) return metadata.Pairs("endpoint-load-metrics-bin", string(b)) }(), - want: &v3orcapb.OrcaLoadReport{ - CpuUtilization: 1.0, - MemUtilization: 50.0, - RequestCost: map[string]float64{"queryCost": 25.0}, - Utilization: map[string]float64{"queueSize": 75.0}, - }, + want: goodReport, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - got, err := orca.ToLoadReport(test.md) + got, err := internal.ToLoadReport(test.md) if (err != nil) != test.wantErr { t.Fatalf("orca.ToLoadReport(%v) = %v, wantErr: %v", test.md, err, test.wantErr) } diff --git a/orca/service.go b/orca/service.go index 9400ae0c7e64..ae011fd9a9d2 100644 --- a/orca/service.go +++ b/orca/service.go @@ -120,7 +120,7 @@ func (s *Service) determineReportingInterval(req *v3orcaservicepb.OrcaLoadReport } dur := req.GetReportInterval().AsDuration() if dur < s.minReportingInterval { - logger.Warningf("Received reporting interval %q is less than configured minimum: %v. Using default: %s", dur, s.minReportingInterval) + logger.Warningf("Received reporting interval %q is less than configured minimum: %v. Using minimum", dur, s.minReportingInterval) return s.minReportingInterval } return dur From b153b006cee37f7d99748ddb0bdc20ddd90bb425 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 1 May 2023 17:30:53 -0700 Subject: [PATCH 901/998] multiple: standardize import renaming for typed structs (#6238) --- authz/rbac_translator.go | 4 ++-- authz/rbac_translator_test.go | 4 ++-- xds/internal/xdsclient/xdslbregistry/converter.go | 8 ++++---- .../xdsclient/xdslbregistry/tests/converter_test.go | 10 +++++----- .../xdsresource/tests/unmarshal_cds_test.go | 4 ++-- xds/internal/xdsclient/xdsresource/unmarshal_lds.go | 12 ++++++------ .../xdsclient/xdsresource/unmarshal_lds_test.go | 8 ++++---- 7 files changed, 25 insertions(+), 25 deletions(-) diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index c4fa996e5995..ce5c15cb976d 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -28,7 +28,7 @@ import ( "fmt" "strings" - v1typepb "github.com/cncf/xds/go/udpa/type/v1" + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" @@ -307,7 +307,7 @@ func (options *auditLoggingOptions) toProtos() (allow *v3rbacpb.RBAC_AuditLoggin if config.Config == nil { return nil, nil, fmt.Errorf("AuditLogger Config field cannot be nil") } - typedStruct := &v1typepb.TypedStruct{ + typedStruct := &v1xdsudpatypepb.TypedStruct{ TypeUrl: typedURLPrefix + config.Name, Value: config.Config, } diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go index fff492cff7ab..fed0ef5c9d33 100644 --- a/authz/rbac_translator_test.go +++ b/authz/rbac_translator_test.go @@ -22,7 +22,7 @@ import ( "strings" "testing" - v1typepb "github.com/cncf/xds/go/udpa/type/v1" + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" "github.com/google/go-cmp/cmp" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/anypb" @@ -945,7 +945,7 @@ func TestTranslatePolicy(t *testing.T) { func anyPbHelper(t *testing.T, in map[string]interface{}, name string) *anypb.Any { t.Helper() pb, err := structpb.NewStruct(in) - typedStruct := &v1typepb.TypedStruct{ + typedStruct := &v1xdsudpatypepb.TypedStruct{ TypeUrl: typedURLPrefix + name, Value: pb, } diff --git a/xds/internal/xdsclient/xdslbregistry/converter.go b/xds/internal/xdsclient/xdslbregistry/converter.go index ef13802b0c12..158ad8b199d6 100644 --- a/xds/internal/xdsclient/xdslbregistry/converter.go +++ b/xds/internal/xdsclient/xdslbregistry/converter.go @@ -26,8 +26,8 @@ import ( "fmt" "strings" - v1udpatypepb "github.com/cncf/xds/go/udpa/type/v1" - v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" @@ -86,13 +86,13 @@ func convertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int } return convertWrrLocality(wrrlProto, depth) case "type.googleapis.com/xds.type.v3.TypedStruct": - tsProto := &v3cncftypepb.TypedStruct{} + tsProto := &v3xdsxdstypepb.TypedStruct{} if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { return nil, fmt.Errorf("failed to unmarshal resource: %v", err) } return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) case "type.googleapis.com/udpa.type.v1.TypedStruct": - tsProto := &v1udpatypepb.TypedStruct{} + tsProto := &v1xdsudpatypepb.TypedStruct{} if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { return nil, fmt.Errorf("failed to unmarshal resource: %v", err) } diff --git a/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go b/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go index 2ffbad845f8b..2607905dc903 100644 --- a/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go +++ b/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go @@ -24,8 +24,8 @@ import ( "strings" "testing" - v1udpatypepb "github.com/cncf/xds/go/udpa/type/v1" - v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" @@ -172,7 +172,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ { TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ - TypedConfig: testutils.MarshalAny(&v3cncftypepb.TypedStruct{ + TypedConfig: testutils.MarshalAny(&v3xdsxdstypepb.TypedStruct{ TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", Value: &structpb.Struct{}, }), @@ -191,7 +191,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ { TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ - TypedConfig: testutils.MarshalAny(&v1udpatypepb.TypedStruct{ + TypedConfig: testutils.MarshalAny(&v1xdsudpatypepb.TypedStruct{ TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", Value: &structpb.Struct{}, }), @@ -230,7 +230,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ { TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ - TypedConfig: wrrLocalityAny(&v3cncftypepb.TypedStruct{ + TypedConfig: wrrLocalityAny(&v3xdsxdstypepb.TypedStruct{ TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", Value: &structpb.Struct{}, }), diff --git a/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go index dc4e8591f827..7d20b1ff61e4 100644 --- a/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go @@ -37,7 +37,7 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/wrapperspb" - v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" @@ -457,7 +457,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ { TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ - TypedConfig: wrrLocalityAny(&v3cncftypepb.TypedStruct{ + TypedConfig: wrrLocalityAny(&v3xdsxdstypepb.TypedStruct{ TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", Value: &structpb.Struct{}, }), diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index 1cc8a0179582..8f18b02e28a6 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -22,8 +22,8 @@ import ( "fmt" "strconv" - v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" - v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v1udpaudpatypepb "github.com/cncf/udpa/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" @@ -121,16 +121,16 @@ func processClientSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { switch { - case ptypes.Is(config, &v3cncftypepb.TypedStruct{}): + case ptypes.Is(config, &v3xdsxdstypepb.TypedStruct{}): // The real type name is inside the new TypedStruct message. - s := new(v3cncftypepb.TypedStruct) + s := new(v3xdsxdstypepb.TypedStruct) if err := ptypes.UnmarshalAny(config, s); err != nil { return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) } return s, s.GetTypeUrl(), nil - case ptypes.Is(config, &v1udpatypepb.TypedStruct{}): + case ptypes.Is(config, &v1udpaudpatypepb.TypedStruct{}): // The real type name is inside the old TypedStruct message. - s := new(v1udpatypepb.TypedStruct) + s := new(v1udpaudpatypepb.TypedStruct) if err := ptypes.UnmarshalAny(config, s); err != nil { return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go index d2ce5ac34424..2dfeb5965b72 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -34,8 +34,8 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/durationpb" - v1udpatypepb "github.com/cncf/udpa/go/udpa/type/v1" - v3cncftypepb "github.com/cncf/xds/go/xds/type/v3" + v1udpaudpatypepb "github.com/cncf/udpa/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" rpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" @@ -1830,7 +1830,7 @@ var clientOnlyCustomFilterConfig = &anypb.Any{ } // This custom filter uses the old TypedStruct message from the cncf/udpa repo. -var customFilterOldTypedStructConfig = &v1udpatypepb.TypedStruct{ +var customFilterOldTypedStructConfig = &v1udpaudpatypepb.TypedStruct{ TypeUrl: "custom.filter", Value: &spb.Struct{ Fields: map[string]*spb.Value{ @@ -1841,7 +1841,7 @@ var customFilterOldTypedStructConfig = &v1udpatypepb.TypedStruct{ var wrappedCustomFilterOldTypedStructConfig *anypb.Any // This custom filter uses the new TypedStruct message from the cncf/xds repo. -var customFilterNewTypedStructConfig = &v3cncftypepb.TypedStruct{ +var customFilterNewTypedStructConfig = &v3xdsxdstypepb.TypedStruct{ TypeUrl: "custom.filter", Value: &spb.Struct{ Fields: map[string]*spb.Value{ From ed3ceba60557551492e669049870572d3a4b05be Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 2 May 2023 10:09:23 -0700 Subject: [PATCH 902/998] balancer: make producer RPCs block until the SubConn is READY (#6236) --- balancer_conn_wrappers.go | 13 ++-- clientconn.go | 30 +++++++- orca/producer.go | 15 ++-- test/balancer_test.go | 146 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 187 insertions(+), 17 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 978ed69fdf75..d0383f04748c 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -25,14 +25,12 @@ import ( "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -405,14 +403,13 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn { return acbw.ac } -var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") - // NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, returns errSubConnNotReady. +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport := acbw.ac.getReadyTransport() - if transport == nil { - return nil, errSubConnNotReady + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } diff --git a/clientconn.go b/clientconn.go index e67a990b24fb..50d08a49a205 100644 --- a/clientconn.go +++ b/clientconn.go @@ -742,6 +742,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -1122,7 +1123,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1136,6 +1138,9 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s if lastErr == nil { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) @@ -1438,6 +1443,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct diff --git a/orca/producer.go b/orca/producer.go index 559033116667..956d5ddfb52d 100644 --- a/orca/producer.go +++ b/orca/producer.go @@ -79,8 +79,8 @@ func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOpt p := pr.(*producer) p.registerListener(l, opts.ReportInterval) - // TODO: When we can register for SubConn state updates, don't call run() - // until READY and automatically call stop() on SHUTDOWN. + // TODO: When we can register for SubConn state updates, automatically call + // stop() on SHUTDOWN. // If stop is called multiple times, prevent it from having any effect on // subsequent calls. @@ -175,12 +175,11 @@ func (p *producer) run(ctx context.Context) { logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") return case status.Code(err) == codes.Unavailable: - // The SubConn is not currently ready; backoff silently. - // - // TODO: don't attempt the stream until the state is READY to - // minimize the chances of this case and to avoid using the - // exponential backoff mechanism, as we should know it's safe to - // retry when the state is READY again. + // TODO: this code should ideally log an error, too, but for now we + // receive this code when shutting down the ClientConn. Once we + // can determine the state or ensure the producer is stopped before + // the stream ends, we can log an error when it's not a natural + // shutdown. default: // Log all other errors. logger.Error("Received unexpected stream error:", err) diff --git a/test/balancer_test.go b/test/balancer_test.go index c9a769c9f5a4..950d31d13ed5 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/stubserver" @@ -1004,3 +1005,148 @@ func (s) TestMetadataInPickResult(t *testing.T) { t.Fatalf("Mismatch in custom metadata received at test backend, got: %v, want %v", gotMDVal, wantMDVal) } } + +// producerTestBalancerBuilder and producerTestBalancer start a producer which +// makes an RPC before the subconn is READY, then connects the subconn, and +// pushes the resulting error (expected to be nil) to rpcErrChan. +type producerTestBalancerBuilder struct { + rpcErrChan chan error + ctxChan chan context.Context +} + +func (bb *producerTestBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &producerTestBalancer{cc: cc, rpcErrChan: bb.rpcErrChan, ctxChan: bb.ctxChan} +} + +const producerTestBalancerName = "producer_test_balancer" + +func (bb *producerTestBalancerBuilder) Name() string { return producerTestBalancerName } + +type producerTestBalancer struct { + cc balancer.ClientConn + rpcErrChan chan error + ctxChan chan context.Context +} + +func (b *producerTestBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + // Create the subconn, but don't connect it. + sc, err := b.cc.NewSubConn(ccs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + return fmt.Errorf("error creating subconn: %v", err) + } + + // Create the producer. This will call the producer builder's Build + // method, which will try to start an RPC in a goroutine. + p := &testProducerBuilder{start: grpcsync.NewEvent(), rpcErrChan: b.rpcErrChan, ctxChan: b.ctxChan} + sc.GetOrBuildProducer(p) + + // Wait here until the producer is about to perform the RPC, which should + // block until connected. + <-p.start.Done() + + // Ensure the error chan doesn't get anything on it before we connect the + // subconn. + select { + case err := <-b.rpcErrChan: + go func() { b.rpcErrChan <- fmt.Errorf("Got unexpected data on rpcErrChan: %v", err) }() + default: + } + + // Now we can connect, which will unblock the RPC above. + sc.Connect() + + // The stub server requires a READY picker to be reported, to unblock its + // Start method. We won't make RPCs in our test, so a nil picker is okay. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: nil}) + return nil +} + +func (b *producerTestBalancer) ResolverError(err error) { + panic(fmt.Sprintf("Unexpected resolver error: %v", err)) +} + +func (b *producerTestBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {} +func (b *producerTestBalancer) Close() {} + +type testProducerBuilder struct { + start *grpcsync.Event + rpcErrChan chan error + ctxChan chan context.Context +} + +func (b *testProducerBuilder) Build(cci interface{}) (balancer.Producer, func()) { + c := testgrpc.NewTestServiceClient(cci.(grpc.ClientConnInterface)) + // Perform the RPC in a goroutine instead of during build because the + // subchannel's mutex is held here. + go func() { + ctx := <-b.ctxChan + b.start.Fire() + _, err := c.EmptyCall(ctx, &testpb.Empty{}) + b.rpcErrChan <- err + }() + return nil, func() {} +} + +// TestBalancerProducerBlockUntilReady tests that we get no RPC errors from +// producers when subchannels aren't ready. +func (s) TestBalancerProducerBlockUntilReady(t *testing.T) { + // rpcErrChan is given to the LB policy to report the status of the + // producer's one RPC. + ctxChan := make(chan context.Context, 1) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + ctxChan <- ctx + rpcErrChan := make(chan error) + balancer.Register(&producerTestBalancerBuilder{rpcErrChan: rpcErrChan, ctxChan: ctxChan}) + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + // Start the server & client with the test producer LB policy. + svcCfg := fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, producerTestBalancerName) + if err := ss.Start(nil, grpc.WithDefaultServiceConfig(svcCfg)); err != nil { + t.Fatalf("Error starting testing server: %v", err) + } + defer ss.Stop() + + // Receive the error from the producer's RPC, which should be nil. + if err := <-rpcErrChan; err != nil { + t.Fatalf("Received unexpected error from producer RPC: %v", err) + } +} + +// TestBalancerProducerHonorsContext tests that producers that perform RPC get +// context errors correctly. +func (s) TestBalancerProducerHonorsContext(t *testing.T) { + // rpcErrChan is given to the LB policy to report the status of the + // producer's one RPC. + ctxChan := make(chan context.Context, 1) + ctx, cancel := context.WithCancel(context.Background()) + ctxChan <- ctx + + rpcErrChan := make(chan error) + balancer.Register(&producerTestBalancerBuilder{rpcErrChan: rpcErrChan, ctxChan: ctxChan}) + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + // Start the server & client with the test producer LB policy. + svcCfg := fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, producerTestBalancerName) + if err := ss.Start(nil, grpc.WithDefaultServiceConfig(svcCfg)); err != nil { + t.Fatalf("Error starting testing server: %v", err) + } + defer ss.Stop() + + cancel() + + // Receive the error from the producer's RPC, which should be canceled. + if err := <-rpcErrChan; status.Code(err) != codes.Canceled { + t.Fatalf("RPC error: %v; want status.Code(err)=%v", err, codes.Canceled) + } +} From 40d01479bb3abdfee035dd1843894b30b0528da3 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 2 May 2023 14:07:59 -0700 Subject: [PATCH 903/998] googledirectpatph: enable ignore_resource_deletion in bootstrap (#6243) --- xds/googledirectpath/googlec2p.go | 2 +- xds/googledirectpath/googlec2p_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index fa7b770d878b..20891c7a4cb8 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -120,7 +120,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts { "server_uri": "%s", "channel_creds": [{"type": "google_default"}], - "server_features": ["xds_v3"] + "server_features": ["xds_v3", "ignore_resource_deletion"] }`, balancerName))) if err != nil { return nil, fmt.Errorf("failed to build bootstrap configuration: %v", err) diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 961f6546bf41..44e1a68e2387 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -216,7 +216,7 @@ func TestBuildXDS(t *testing.T) { wantServerConfig, err := bootstrap.ServerConfigFromJSON([]byte(fmt.Sprintf(`{ "server_uri": "%s", "channel_creds": [{"type": "google_default"}], - "server_features": ["xds_v3"] + "server_features": ["xds_v3", "ignore_resource_deletion"] }`, tdURL))) if err != nil { t.Fatalf("Failed to build server bootstrap config: %v", err) From add90153d43b1143dd8d74fc9675bf1fe3f1e607 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 2 May 2023 15:04:33 -0700 Subject: [PATCH 904/998] orca: allow a ServerMetricsProvider to be passed to the ORCA service and ServerOption (#6223) --- examples/features/orca/server/main.go | 22 +- orca/call_metric_recorder.go | 130 --------- orca/call_metrics.go | 196 +++++++++++++ ..._recorder_test.go => call_metrics_test.go} | 30 +- orca/orca.go | 113 +------- orca/producer_test.go | 18 +- orca/server_metrics.go | 270 ++++++++++++++++++ orca/service.go | 85 ++---- orca/service_test.go | 26 +- 9 files changed, 546 insertions(+), 344 deletions(-) delete mode 100644 orca/call_metric_recorder.go create mode 100644 orca/call_metrics.go rename orca/{call_metric_recorder_test.go => call_metrics_test.go} (91%) create mode 100644 orca/server_metrics.go diff --git a/examples/features/orca/server/main.go b/examples/features/orca/server/main.go index 5d4bdb163a17..e52d5d06eebf 100644 --- a/examples/features/orca/server/main.go +++ b/examples/features/orca/server/main.go @@ -44,9 +44,9 @@ type server struct { func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { // Report a sample cost for this query. - cmr := orca.CallMetricRecorderFromContext(ctx) + cmr := orca.CallMetricsRecorderFromContext(ctx) if cmr == nil { - return nil, status.Errorf(codes.Internal, "unable to retrieve call metric recorder (missing ORCA ServerOption?)") + return nil, status.Errorf(codes.Internal, "unable to retrieve call metrics recorder (missing ORCA ServerOption?)") } cmr.SetRequestCost("db_queries", 10) @@ -63,27 +63,31 @@ func main() { fmt.Printf("Server listening at %v\n", lis.Addr()) // Create the gRPC server with the orca.CallMetricsServerOption() option, - // which will enable per-call metric recording. - s := grpc.NewServer(orca.CallMetricsServerOption()) + // which will enable per-call metric recording. No ServerMetricsProvider + // is given here because the out-of-band reporting is enabled separately. + s := grpc.NewServer(orca.CallMetricsServerOption(nil)) pb.RegisterEchoServer(s, &server{}) // Register the orca service for out-of-band metric reporting, and set the // minimum reporting interval to 3 seconds. Note that, by default, the // minimum interval must be at least 30 seconds, but 3 seconds is set via // an internal-only option for illustration purposes only. - opts := orca.ServiceOptions{MinReportingInterval: 3 * time.Second} + smr := orca.NewServerMetricsRecorder() + opts := orca.ServiceOptions{ + MinReportingInterval: 3 * time.Second, + ServerMetricsProvider: smr, + } internal.ORCAAllowAnyMinReportingInterval.(func(so *orca.ServiceOptions))(&opts) - orcaSvc, err := orca.Register(s, opts) - if err != nil { + if err := orca.Register(s, opts); err != nil { log.Fatalf("Failed to register ORCA service: %v", err) } // Simulate CPU utilization reporting. go func() { for { - orcaSvc.SetCPUUtilization(.5) + smr.SetCPUUtilization(.5) time.Sleep(2 * time.Second) - orcaSvc.SetCPUUtilization(.9) + smr.SetCPUUtilization(.9) time.Sleep(2 * time.Second) } }() diff --git a/orca/call_metric_recorder.go b/orca/call_metric_recorder.go deleted file mode 100644 index 62f2a1a6c220..000000000000 --- a/orca/call_metric_recorder.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package orca - -import ( - "context" - "sync" - "sync/atomic" - - v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" -) - -// CallMetricRecorder provides functionality to record per-RPC custom backend -// metrics. See CallMetricsServerOption() for more details. -// -// Safe for concurrent use. -type CallMetricRecorder struct { - cpu atomic.Value // float64 - memory atomic.Value // float64 - - mu sync.RWMutex - requestCost map[string]float64 - utilization map[string]float64 -} - -func newCallMetricRecorder() *CallMetricRecorder { - return &CallMetricRecorder{ - requestCost: make(map[string]float64), - utilization: make(map[string]float64), - } -} - -// SetCPUUtilization records a measurement for the CPU utilization metric. -func (c *CallMetricRecorder) SetCPUUtilization(val float64) { - c.cpu.Store(val) -} - -// SetMemoryUtilization records a measurement for the memory utilization metric. -func (c *CallMetricRecorder) SetMemoryUtilization(val float64) { - c.memory.Store(val) -} - -// SetRequestCost records a measurement for a request cost metric, -// uniquely identifiable by name. -func (c *CallMetricRecorder) SetRequestCost(name string, val float64) { - c.mu.Lock() - c.requestCost[name] = val - c.mu.Unlock() -} - -// SetUtilization records a measurement for a utilization metric uniquely -// identifiable by name. -func (c *CallMetricRecorder) SetUtilization(name string, val float64) { - c.mu.Lock() - c.utilization[name] = val - c.mu.Unlock() -} - -// toLoadReportProto dumps the recorded measurements as an OrcaLoadReport proto. -func (c *CallMetricRecorder) toLoadReportProto() *v3orcapb.OrcaLoadReport { - c.mu.RLock() - defer c.mu.RUnlock() - - cost := make(map[string]float64, len(c.requestCost)) - for k, v := range c.requestCost { - cost[k] = v - } - util := make(map[string]float64, len(c.utilization)) - for k, v := range c.utilization { - util[k] = v - } - cpu, _ := c.cpu.Load().(float64) - mem, _ := c.memory.Load().(float64) - return &v3orcapb.OrcaLoadReport{ - CpuUtilization: cpu, - MemUtilization: mem, - RequestCost: cost, - Utilization: util, - } -} - -type callMetricRecorderCtxKey struct{} - -// CallMetricRecorderFromContext returns the RPC specific custom metrics -// recorder [CallMetricRecorder] embedded in the provided RPC context. -// -// Returns nil if no custom metrics recorder is found in the provided context, -// which will be the case when custom metrics reporting is not enabled. -func CallMetricRecorderFromContext(ctx context.Context) *CallMetricRecorder { - rw, ok := ctx.Value(callMetricRecorderCtxKey{}).(*recorderWrapper) - if !ok { - return nil - } - return rw.recorder() -} - -func newContextWithRecorderWrapper(ctx context.Context, r *recorderWrapper) context.Context { - return context.WithValue(ctx, callMetricRecorderCtxKey{}, r) -} - -// recorderWrapper is a wrapper around a CallMetricRecorder to ensures that -// concurrent calls to CallMetricRecorderFromContext() results in only one -// allocation of the underlying metric recorder. -type recorderWrapper struct { - once sync.Once - r *CallMetricRecorder -} - -func (rw *recorderWrapper) recorder() *CallMetricRecorder { - rw.once.Do(func() { - rw.r = newCallMetricRecorder() - }) - return rw.r -} diff --git a/orca/call_metrics.go b/orca/call_metrics.go new file mode 100644 index 000000000000..558c7bce6a8e --- /dev/null +++ b/orca/call_metrics.go @@ -0,0 +1,196 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "context" + "sync" + + "google.golang.org/grpc" + grpcinternal "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" + "google.golang.org/protobuf/proto" +) + +// CallMetricsRecorder allows a service method handler to record per-RPC +// metrics. It contains all utilization-based metrics from +// ServerMetricsRecorder as well as additional request cost metrics. +type CallMetricsRecorder interface { + ServerMetricsRecorder + + // SetRequestCost sets the relevant server metric. + SetRequestCost(name string, val float64) + // DeleteRequestCost deletes the relevant server metric to prevent it + // from being sent. + DeleteRequestCost(name string) + + // SetNamedMetric sets the relevant server metric. + SetNamedMetric(name string, val float64) + // DeleteNamedMetric deletes the relevant server metric to prevent it + // from being sent. + DeleteNamedMetric(name string) +} + +type callMetricsRecorderCtxKey struct{} + +// CallMetricsRecorderFromContext returns the RPC-specific custom metrics +// recorder embedded in the provided RPC context. +// +// Returns nil if no custom metrics recorder is found in the provided context, +// which will be the case when custom metrics reporting is not enabled. +func CallMetricsRecorderFromContext(ctx context.Context) CallMetricsRecorder { + rw, ok := ctx.Value(callMetricsRecorderCtxKey{}).(*recorderWrapper) + if !ok { + return nil + } + return rw.recorder() +} + +// recorderWrapper is a wrapper around a CallMetricsRecorder to ensure that +// concurrent calls to CallMetricsRecorderFromContext() results in only one +// allocation of the underlying metrics recorder, while also allowing for lazy +// initialization of the recorder itself. +type recorderWrapper struct { + once sync.Once + r CallMetricsRecorder + smp ServerMetricsProvider +} + +func (rw *recorderWrapper) recorder() CallMetricsRecorder { + rw.once.Do(func() { + rw.r = newServerMetricsRecorder() + }) + return rw.r +} + +// setTrailerMetadata adds a trailer metadata entry with key being set to +// `internal.TrailerMetadataKey` and value being set to the binary-encoded +// orca.OrcaLoadReport protobuf message. +// +// This function is called from the unary and streaming interceptors defined +// above. Any errors encountered here are not propagated to the caller because +// they are ignored there. Hence we simply log any errors encountered here at +// warning level, and return nothing. +func (rw *recorderWrapper) setTrailerMetadata(ctx context.Context) { + var sm *ServerMetrics + if rw.smp != nil { + sm = rw.smp.ServerMetrics() + sm.merge(rw.r.ServerMetrics()) + } else { + sm = rw.r.ServerMetrics() + } + + b, err := proto.Marshal(sm.toLoadReportProto()) + if err != nil { + logger.Warningf("Failed to marshal load report: %v", err) + return + } + if err := grpc.SetTrailer(ctx, metadata.Pairs(internal.TrailerMetadataKey, string(b))); err != nil { + logger.Warningf("Failed to set trailer metadata: %v", err) + } +} + +var joinServerOptions = grpcinternal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) + +// CallMetricsServerOption returns a server option which enables the reporting +// of per-RPC custom backend metrics for unary and streaming RPCs. +// +// Server applications interested in injecting custom backend metrics should +// pass the server option returned from this function as the first argument to +// grpc.NewServer(). +// +// Subsequently, server RPC handlers can retrieve a reference to the RPC +// specific custom metrics recorder [CallMetricsRecorder] to be used, via a call +// to CallMetricsRecorderFromContext(), and inject custom metrics at any time +// during the RPC lifecycle. +// +// The injected custom metrics will be sent as part of trailer metadata, as a +// binary-encoded [ORCA LoadReport] protobuf message, with the metadata key +// being set be "endpoint-load-metrics-bin". +// +// If a non-nil ServerMetricsProvider is provided, the gRPC server will +// transmit the metrics it provides, overwritten by any per-RPC metrics given +// to the CallMetricsRecorder. A ServerMetricsProvider is typically obtained +// by calling NewServerMetricsRecorder. +// +// [ORCA LoadReport]: https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15 +func CallMetricsServerOption(smp ServerMetricsProvider) grpc.ServerOption { + return joinServerOptions(grpc.ChainUnaryInterceptor(unaryInt(smp)), grpc.ChainStreamInterceptor(streamInt(smp))) +} + +func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ctxWithRecorder := newContextWithRecorderWrapper(ctx, rw) + + resp, err := handler(ctxWithRecorder, req) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ctx) + } + return resp, err + } +} + +func streamInt(smp ServerMetricsProvider) func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ws := &wrappedStream{ + ServerStream: ss, + ctx: newContextWithRecorderWrapper(ss.Context(), rw), + } + + err := handler(srv, ws) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ss.Context()) + } + return err + } +} + +func newContextWithRecorderWrapper(ctx context.Context, r *recorderWrapper) context.Context { + return context.WithValue(ctx, callMetricsRecorderCtxKey{}, r) +} + +// wrappedStream wraps the grpc.ServerStream received by the streaming +// interceptor. Overrides only the Context() method to return a context which +// contains a reference to the CallMetricsRecorder corresponding to this +// stream. +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedStream) Context() context.Context { + return w.ctx +} diff --git a/orca/call_metric_recorder_test.go b/orca/call_metrics_test.go similarity index 91% rename from orca/call_metric_recorder_test.go rename to orca/call_metrics_test.go index 43d0e45291e2..4374b593b9f1 100644 --- a/orca/call_metric_recorder_test.go +++ b/orca/call_metrics_test.go @@ -78,23 +78,24 @@ func (s) TestE2ECallMetricsUnary(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - // A server option to enables reporting of per-call backend metrics. - callMetricsServerOption := orca.CallMetricsServerOption() + // A server option to enable reporting of per-call backend metrics. + smr := orca.NewServerMetricsRecorder() + callMetricsServerOption := orca.CallMetricsServerOption(smr) + smr.SetCPUUtilization(1.0) // An interceptor to injects custom backend metrics, added only when // the injectMetrics field in the test is set. injectingInterceptor := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - recorder := orca.CallMetricRecorderFromContext(ctx) + recorder := orca.CallMetricsRecorderFromContext(ctx) if recorder == nil { err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") t.Error(err) return nil, err } - recorder.SetCPUUtilization(1.0) recorder.SetMemoryUtilization(50.0) // This value will be overwritten by a write to the same metric // from the server handler. - recorder.SetUtilization("queueSize", 1.0) + recorder.SetNamedUtilization("queueSize", 1.0) return handler(ctx, req) } @@ -106,14 +107,14 @@ func (s) TestE2ECallMetricsUnary(t *testing.T) { if !test.injectMetrics { return &testpb.Empty{}, nil } - recorder := orca.CallMetricRecorderFromContext(ctx) + recorder := orca.CallMetricsRecorderFromContext(ctx) if recorder == nil { err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") t.Error(err) return nil, err } recorder.SetRequestCost("queryCost", 25.0) - recorder.SetUtilization("queueSize", 75.0) + recorder.SetNamedUtilization("queueSize", 75.0) return &testpb.Empty{}, nil }, } @@ -183,23 +184,24 @@ func (s) TestE2ECallMetricsStreaming(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - // A server option to enables reporting of per-call backend metrics. - callMetricsServerOption := orca.CallMetricsServerOption() + // A server option to enable reporting of per-call backend metrics. + smr := orca.NewServerMetricsRecorder() + callMetricsServerOption := orca.CallMetricsServerOption(smr) + smr.SetCPUUtilization(1.0) // An interceptor which injects custom backend metrics, added only // when the injectMetrics field in the test is set. injectingInterceptor := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - recorder := orca.CallMetricRecorderFromContext(ss.Context()) + recorder := orca.CallMetricsRecorderFromContext(ss.Context()) if recorder == nil { err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") t.Error(err) return err } - recorder.SetCPUUtilization(1.0) recorder.SetMemoryUtilization(50.0) // This value will be overwritten by a write to the same metric // from the server handler. - recorder.SetUtilization("queueSize", 1.0) + recorder.SetNamedUtilization("queueSize", 1.0) return handler(srv, ss) } @@ -209,14 +211,14 @@ func (s) TestE2ECallMetricsStreaming(t *testing.T) { srv := stubserver.StubServer{ FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { if test.injectMetrics { - recorder := orca.CallMetricRecorderFromContext(stream.Context()) + recorder := orca.CallMetricsRecorderFromContext(stream.Context()) if recorder == nil { err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") t.Error(err) return err } recorder.SetRequestCost("queryCost", 25.0) - recorder.SetUtilization("queueSize", 75.0) + recorder.SetNamedUtilization("queueSize", 75.0) } // Streaming implementation replies with a dummy response until the diff --git a/orca/orca.go b/orca/orca.go index 2c958b6902e9..771db36af1c9 100644 --- a/orca/orca.go +++ b/orca/orca.go @@ -27,128 +27,21 @@ package orca import ( - "context" - "errors" - - "google.golang.org/grpc" "google.golang.org/grpc/grpclog" - igrpc "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/metadata" "google.golang.org/grpc/orca/internal" - "google.golang.org/protobuf/proto" -) - -var ( - logger = grpclog.Component("orca-backend-metrics") - joinServerOptions = igrpc.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) ) -const trailerMetadataKey = "endpoint-load-metrics-bin" - -// CallMetricsServerOption returns a server option which enables the reporting -// of per-RPC custom backend metrics for unary and streaming RPCs. -// -// Server applications interested in injecting custom backend metrics should -// pass the server option returned from this function as the first argument to -// grpc.NewServer(). -// -// Subsequently, server RPC handlers can retrieve a reference to the RPC -// specific custom metrics recorder [CallMetricRecorder] to be used, via a call -// to CallMetricRecorderFromContext(), and inject custom metrics at any time -// during the RPC lifecycle. -// -// The injected custom metrics will be sent as part of trailer metadata, as a -// binary-encoded [ORCA LoadReport] protobuf message, with the metadata key -// being set be "endpoint-load-metrics-bin". -// -// [ORCA LoadReport]: https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15 -func CallMetricsServerOption() grpc.ServerOption { - return joinServerOptions(grpc.ChainUnaryInterceptor(unaryInt), grpc.ChainStreamInterceptor(streamInt)) -} - -func unaryInt(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - // We don't allocate the metric recorder here. It will be allocated the - // first time the user calls CallMetricRecorderFromContext(). - rw := &recorderWrapper{} - ctxWithRecorder := newContextWithRecorderWrapper(ctx, rw) - - resp, err := handler(ctxWithRecorder, req) - - // It is safe to access the underlying metric recorder inside the wrapper at - // this point, as the user's RPC handler is done executing, and therefore - // there will be no more calls to CallMetricRecorderFromContext(), which is - // where the metric recorder is lazy allocated. - if rw.r == nil { - return resp, err - } - setTrailerMetadata(ctx, rw.r) - return resp, err -} - -func streamInt(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - // We don't allocate the metric recorder here. It will be allocated the - // first time the user calls CallMetricRecorderFromContext(). - rw := &recorderWrapper{} - ws := &wrappedStream{ - ServerStream: ss, - ctx: newContextWithRecorderWrapper(ss.Context(), rw), - } - - err := handler(srv, ws) - - // It is safe to access the underlying metric recorder inside the wrapper at - // this point, as the user's RPC handler is done executing, and therefore - // there will be no more calls to CallMetricRecorderFromContext(), which is - // where the metric recorder is lazy allocated. - if rw.r == nil { - return err - } - setTrailerMetadata(ss.Context(), rw.r) - return err -} - -// setTrailerMetadata adds a trailer metadata entry with key being set to -// `trailerMetadataKey` and value being set to the binary-encoded -// orca.OrcaLoadReport protobuf message. -// -// This function is called from the unary and streaming interceptors defined -// above. Any errors encountered here are not propagated to the caller because -// they are ignored there. Hence we simply log any errors encountered here at -// warning level, and return nothing. -func setTrailerMetadata(ctx context.Context, r *CallMetricRecorder) { - b, err := proto.Marshal(r.toLoadReportProto()) - if err != nil { - logger.Warningf("failed to marshal load report: %v", err) - return - } - if err := grpc.SetTrailer(ctx, metadata.Pairs(trailerMetadataKey, string(b))); err != nil { - logger.Warningf("failed to set trailer metadata: %v", err) - } -} - -// wrappedStream wraps the grpc.ServerStream received by the streaming -// interceptor. Overrides only the Context() method to return a context which -// contains a reference to the CallMetricRecorder corresponding to this stream. -type wrappedStream struct { - grpc.ServerStream - ctx context.Context -} - -func (w *wrappedStream) Context() context.Context { - return w.ctx -} - -// ErrLoadReportMissing indicates no ORCA load report was found in trailers. -var ErrLoadReportMissing = errors.New("orca load report missing in provided metadata") +var logger = grpclog.Component("orca-backend-metrics") // loadParser implements the Parser interface defined in `internal/balancerload` // package. This interface is used by the client stream to parse load reports // sent by the server in trailer metadata. The parsed loads are then sent to // balancers via balancer.DoneInfo. // -// The grpc package cannot directly call orca.ToLoadReport() as that would cause -// an import cycle. Hence this roundabout method is used. +// The grpc package cannot directly call toLoadReport() as that would cause an +// import cycle. Hence this roundabout method is used. type loadParser struct{} func (loadParser) Parse(md metadata.MD) interface{} { diff --git a/orca/producer_test.go b/orca/producer_test.go index f15317995dec..be41424063fa 100644 --- a/orca/producer_test.go +++ b/orca/producer_test.go @@ -128,11 +128,11 @@ func (s) TestProducer(t *testing.T) { // Register the OpenRCAService with a very short metrics reporting interval. const shortReportingInterval = 50 * time.Millisecond - opts := orca.ServiceOptions{MinReportingInterval: shortReportingInterval} + smr := orca.NewServerMetricsRecorder() + opts := orca.ServiceOptions{MinReportingInterval: shortReportingInterval, ServerMetricsProvider: smr} internal.AllowAnyMinReportingInterval.(func(*orca.ServiceOptions))(&opts) s := grpc.NewServer() - orcaSrv, err := orca.Register(s, opts) - if err != nil { + if err := orca.Register(s, opts); err != nil { t.Fatalf("orca.Register failed: %v", err) } go s.Serve(lis) @@ -157,9 +157,9 @@ func (s) TestProducer(t *testing.T) { defer oobLis.Stop() // Set a few metrics and wait for them on the client side. - orcaSrv.SetCPUUtilization(10) - orcaSrv.SetMemoryUtilization(100) - orcaSrv.SetUtilization("bob", 555) + smr.SetCPUUtilization(10) + smr.SetMemoryUtilization(100) + smr.SetNamedUtilization("bob", 555) loadReportWant := &v3orcapb.OrcaLoadReport{ CpuUtilization: 10, MemUtilization: 100, @@ -181,9 +181,9 @@ testReport: } // Change and add metrics and wait for them on the client side. - orcaSrv.SetCPUUtilization(50) - orcaSrv.SetMemoryUtilization(200) - orcaSrv.SetUtilization("mary", 321) + smr.SetCPUUtilization(50) + smr.SetMemoryUtilization(200) + smr.SetNamedUtilization("mary", 321) loadReportWant = &v3orcapb.OrcaLoadReport{ CpuUtilization: 50, MemUtilization: 200, diff --git a/orca/server_metrics.go b/orca/server_metrics.go new file mode 100644 index 000000000000..6b63d3d252bf --- /dev/null +++ b/orca/server_metrics.go @@ -0,0 +1,270 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "sync" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// ServerMetrics is the data returned from a server to a client to describe the +// current state of the server and/or the cost of a request when used per-call. +type ServerMetrics struct { + CPUUtilization float64 // CPU utilization: [0, 1.0]; unset=-1 + MemUtilization float64 // Memory utilization: [0, 1.0]; unset=-1 + QPS float64 // queries per second: [0, inf); unset=-1 + EPS float64 // errors per second: [0, inf); unset=-1 + + // The following maps must never be nil. + + Utilization map[string]float64 // Custom fields: [0, 1.0] + RequestCost map[string]float64 // Custom fields: [0, inf); not sent OOB + NamedMetrics map[string]float64 // Custom fields: [0, inf); not sent OOB +} + +// toLoadReportProto dumps sm as an OrcaLoadReport proto. +func (sm *ServerMetrics) toLoadReportProto() *v3orcapb.OrcaLoadReport { + ret := &v3orcapb.OrcaLoadReport{ + Utilization: sm.Utilization, + RequestCost: sm.RequestCost, + NamedMetrics: sm.NamedMetrics, + } + if sm.CPUUtilization != -1 { + ret.CpuUtilization = sm.CPUUtilization + } + if sm.MemUtilization != -1 { + ret.MemUtilization = sm.MemUtilization + } + if sm.QPS != -1 { + ret.RpsFractional = sm.QPS + } + if sm.EPS != -1 { + ret.Eps = sm.EPS + } + return ret +} + +// merge merges o into sm, overwriting any values present in both. +func (sm *ServerMetrics) merge(o *ServerMetrics) { + if o.CPUUtilization != -1 { + sm.CPUUtilization = o.CPUUtilization + } + if o.MemUtilization != -1 { + sm.MemUtilization = o.MemUtilization + } + if o.QPS != -1 { + sm.QPS = o.QPS + } + if o.EPS != -1 { + sm.EPS = o.EPS + } + mergeMap(sm.Utilization, o.Utilization) + mergeMap(sm.RequestCost, o.RequestCost) + mergeMap(sm.NamedMetrics, o.NamedMetrics) +} + +func mergeMap(a, b map[string]float64) { + for k, v := range b { + a[k] = v + } +} + +// ServerMetricsRecorder allows for recording and providing out of band server +// metrics. +type ServerMetricsRecorder interface { + ServerMetricsProvider + + // SetCPUUtilization sets the relevant server metric. + SetCPUUtilization(float64) + // DeleteCPUUtilization deletes the relevant server metric to prevent it + // from being sent. + DeleteCPUUtilization() + + // SetMemoryUtilization sets the relevant server metric. + SetMemoryUtilization(float64) + // DeleteMemoryUtilization deletes the relevant server metric to prevent it + // from being sent. + DeleteMemoryUtilization() + + // SetQPS sets the relevant server metric. + SetQPS(float64) + // DeleteQPS deletes the relevant server metric to prevent it from being + // sent. + DeleteQPS() + + // SetEPS sets the relevant server metric. + SetEPS(float64) + // DeleteEPS deletes the relevant server metric to prevent it from being + // sent. + DeleteEPS() + + // SetNamedUtilization sets the relevant server metric. + SetNamedUtilization(name string, val float64) + // DeleteNamedUtilization deletes the relevant server metric to prevent it + // from being sent. + DeleteNamedUtilization(name string) +} + +type serverMetricsRecorder struct { + mu sync.Mutex // protects state + state *ServerMetrics // the current metrics +} + +// NewServerMetricsRecorder returns an in-memory store for ServerMetrics and +// allows for safe setting and retrieving of ServerMetrics. Also implements +// ServerMetricsProvider for use with NewService. +func NewServerMetricsRecorder() ServerMetricsRecorder { + return newServerMetricsRecorder() +} + +func newServerMetricsRecorder() *serverMetricsRecorder { + return &serverMetricsRecorder{ + state: &ServerMetrics{ + CPUUtilization: -1, + MemUtilization: -1, + QPS: -1, + EPS: -1, + Utilization: make(map[string]float64), + RequestCost: make(map[string]float64), + NamedMetrics: make(map[string]float64), + }, + } +} + +// ServerMetrics returns a copy of the current ServerMetrics. +func (s *serverMetricsRecorder) ServerMetrics() *ServerMetrics { + s.mu.Lock() + defer s.mu.Unlock() + return &ServerMetrics{ + CPUUtilization: s.state.CPUUtilization, + MemUtilization: s.state.MemUtilization, + QPS: s.state.QPS, + EPS: s.state.EPS, + Utilization: copyMap(s.state.Utilization), + RequestCost: copyMap(s.state.RequestCost), + NamedMetrics: copyMap(s.state.NamedMetrics), + } +} + +func copyMap(m map[string]float64) map[string]float64 { + ret := make(map[string]float64, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +// SetCPUUtilization records a measurement for the CPU utilization metric. +func (s *serverMetricsRecorder) SetCPUUtilization(val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.CPUUtilization = val +} + +// DeleteCPUUtilization deletes the relevant server metric to prevent it from +// being sent. +func (s *serverMetricsRecorder) DeleteCPUUtilization() { + s.SetCPUUtilization(-1) +} + +// SetMemoryUtilization records a measurement for the memory utilization metric. +func (s *serverMetricsRecorder) SetMemoryUtilization(val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.MemUtilization = val +} + +// DeleteMemoryUtilization deletes the relevant server metric to prevent it +// from being sent. +func (s *serverMetricsRecorder) DeleteMemoryUtilization() { + s.SetMemoryUtilization(-1) +} + +// SetQPS records a measurement for the QPS metric. +func (s *serverMetricsRecorder) SetQPS(val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.QPS = val +} + +// DeleteQPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteQPS() { + s.SetQPS(-1) +} + +// SetEPS records a measurement for the EPS metric. +func (s *serverMetricsRecorder) SetEPS(val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.EPS = val +} + +// DeleteEPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteEPS() { + s.SetEPS(-1) +} + +// SetNamedUtilization records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedUtilization(name string, val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.Utilization[name] = val +} + +// DeleteNamedUtilization deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedUtilization(name string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.state.Utilization, name) +} + +// SetRequestCost records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetRequestCost(name string, val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.RequestCost[name] = val +} + +// DeleteRequestCost deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteRequestCost(name string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.state.RequestCost, name) +} + +// SetNamedMetric records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedMetric(name string, val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.NamedMetrics[name] = val +} + +// DeleteNamedMetric deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedMetric(name string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.state.NamedMetrics, name) +} diff --git a/orca/service.go b/orca/service.go index ae011fd9a9d2..7461a6b05a1a 100644 --- a/orca/service.go +++ b/orca/service.go @@ -19,7 +19,7 @@ package orca import ( - "sync" + "fmt" "time" "google.golang.org/grpc" @@ -28,7 +28,6 @@ import ( ointernal "google.golang.org/grpc/orca/internal" "google.golang.org/grpc/status" - v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" ) @@ -60,15 +59,16 @@ type Service struct { // Minimum reporting interval, as configured by the user, or the default. minReportingInterval time.Duration - // mu guards the custom metrics injected by the server application. - mu sync.RWMutex - cpu float64 - memory float64 - utilization map[string]float64 + smProvider ServerMetricsProvider } // ServiceOptions contains options to configure the ORCA service implementation. type ServiceOptions struct { + // ServerMetricsProvider is the provider to be used by the service for + // reporting OOB server metrics to clients. Typically obtained via + // NewServerMetricsRecorder. This field is required. + ServerMetricsProvider ServerMetricsProvider + // MinReportingInterval sets the lower bound for how often out-of-band // metrics are reported on the streaming RPC initiated by the client. If // unspecified, negative or less than the default value of 30s, the default @@ -81,11 +81,22 @@ type ServiceOptions struct { allowAnyMinReportingInterval bool } +// A ServerMetricsProvider provides ServerMetrics upon request. +type ServerMetricsProvider interface { + // ServerMetrics returns the current set of server metrics. It should + // return a read-only, immutable copy of the data that is active at the + // time of the call. + ServerMetrics() *ServerMetrics +} + // NewService creates a new ORCA service implementation configured using the // provided options. func NewService(opts ServiceOptions) (*Service, error) { // The default minimum supported reporting interval value can be overridden // for testing purposes through the orca internal package. + if opts.ServerMetricsProvider == nil { + return nil, fmt.Errorf("ServerMetricsProvider not specified") + } if !opts.allowAnyMinReportingInterval { if opts.MinReportingInterval < 0 || opts.MinReportingInterval < minReportingInterval { opts.MinReportingInterval = minReportingInterval @@ -93,20 +104,22 @@ func NewService(opts ServiceOptions) (*Service, error) { } service := &Service{ minReportingInterval: opts.MinReportingInterval, - utilization: make(map[string]float64), + smProvider: opts.ServerMetricsProvider, } return service, nil } // Register creates a new ORCA service implementation configured using the -// provided options and registers the same on the provided service registrar. -func Register(s *grpc.Server, opts ServiceOptions) (*Service, error) { +// provided options and registers the same on the provided grpc Server. +func Register(s *grpc.Server, opts ServiceOptions) error { + // TODO(https://github.com/cncf/xds/issues/41): replace *grpc.Server with + // grpc.ServiceRegistrar when possible. service, err := NewService(opts) if err != nil { - return nil, err + return err } v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, service) - return service, nil + return nil } // determineReportingInterval determines the reporting interval for out-of-band @@ -127,7 +140,7 @@ func (s *Service) determineReportingInterval(req *v3orcaservicepb.OrcaLoadReport } func (s *Service) sendMetricsResponse(stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { - return stream.Send(s.toLoadReportProto()) + return stream.Send(s.smProvider.ServerMetrics().toLoadReportProto()) } // StreamCoreMetrics streams custom backend metrics injected by the server @@ -148,49 +161,3 @@ func (s *Service) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, } } } - -// SetCPUUtilization records a measurement for the CPU utilization metric. -func (s *Service) SetCPUUtilization(val float64) { - s.mu.Lock() - s.cpu = val - s.mu.Unlock() -} - -// SetMemoryUtilization records a measurement for the memory utilization metric. -func (s *Service) SetMemoryUtilization(val float64) { - s.mu.Lock() - s.memory = val - s.mu.Unlock() -} - -// SetUtilization records a measurement for a utilization metric uniquely -// identifiable by name. -func (s *Service) SetUtilization(name string, val float64) { - s.mu.Lock() - s.utilization[name] = val - s.mu.Unlock() -} - -// DeleteUtilization deletes any previously recorded measurement for a -// utilization metric uniquely identifiable by name. -func (s *Service) DeleteUtilization(name string) { - s.mu.Lock() - delete(s.utilization, name) - s.mu.Unlock() -} - -// toLoadReportProto dumps the recorded measurements as an OrcaLoadReport proto. -func (s *Service) toLoadReportProto() *v3orcapb.OrcaLoadReport { - s.mu.RLock() - defer s.mu.RUnlock() - - util := make(map[string]float64, len(s.utilization)) - for k, v := range s.utilization { - util[k] = v - } - return &v3orcapb.OrcaLoadReport{ - CpuUtilization: s.cpu, - MemUtilization: s.memory, - Utilization: util, - } -} diff --git a/orca/service_test.go b/orca/service_test.go index 715d53241c71..e5cf59fccb4e 100644 --- a/orca/service_test.go +++ b/orca/service_test.go @@ -52,7 +52,7 @@ type testServiceImpl struct { requests int64 testgrpc.TestServiceServer - orcaSrv *orca.Service + smr orca.ServerMetricsRecorder } func (t *testServiceImpl) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { @@ -60,26 +60,26 @@ func (t *testServiceImpl) UnaryCall(context.Context, *testpb.SimpleRequest) (*te t.requests++ t.mu.Unlock() - t.orcaSrv.SetUtilization(requestsMetricKey, float64(t.requests)) - t.orcaSrv.SetCPUUtilization(50.0) - t.orcaSrv.SetMemoryUtilization(99.0) + t.smr.SetNamedUtilization(requestsMetricKey, float64(t.requests)) + t.smr.SetCPUUtilization(50.0) + t.smr.SetMemoryUtilization(99.0) return &testpb.SimpleResponse{}, nil } func (t *testServiceImpl) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { - t.orcaSrv.DeleteUtilization(requestsMetricKey) - t.orcaSrv.SetCPUUtilization(0) - t.orcaSrv.SetMemoryUtilization(0) + t.smr.DeleteNamedUtilization(requestsMetricKey) + t.smr.SetCPUUtilization(0) + t.smr.SetMemoryUtilization(0) return &testpb.Empty{}, nil } -// Test_E2E_CustomBackendMetrics_OutOfBand tests the injection of out-of-band +// TestE2E_CustomBackendMetrics_OutOfBand tests the injection of out-of-band // custom backend metrics from the server application, and verifies that // expected load reports are received at the client. // // TODO: Change this test to use the client API, when ready, to read the // out-of-band metrics pushed by the server. -func (s) Test_E2E_CustomBackendMetrics_OutOfBand(t *testing.T) { +func (s) TestE2E_CustomBackendMetrics_OutOfBand(t *testing.T) { lis, err := testutils.LocalTCPListener() if err != nil { t.Fatal(err) @@ -87,18 +87,18 @@ func (s) Test_E2E_CustomBackendMetrics_OutOfBand(t *testing.T) { // Override the min reporting interval in the internal package. const shortReportingInterval = 100 * time.Millisecond - opts := orca.ServiceOptions{MinReportingInterval: shortReportingInterval} + smr := orca.NewServerMetricsRecorder() + opts := orca.ServiceOptions{MinReportingInterval: shortReportingInterval, ServerMetricsProvider: smr} internal.AllowAnyMinReportingInterval.(func(*orca.ServiceOptions))(&opts) // Register the OpenRCAService with a very short metrics reporting interval. s := grpc.NewServer() - orcaSrv, err := orca.Register(s, opts) - if err != nil { + if err := orca.Register(s, opts); err != nil { t.Fatalf("orca.EnableOutOfBandMetricsReportingForTesting() failed: %v", err) } // Register the test service implementation on the same grpc server, and start serving. - testgrpc.RegisterTestServiceServer(s, &testServiceImpl{orcaSrv: orcaSrv}) + testgrpc.RegisterTestServiceServer(s, &testServiceImpl{smr: smr}) go s.Serve(lis) defer s.Stop() t.Logf("Started gRPC server at %s...", lis.Addr().String()) From 56b33d5cd0bdee3c53f1bbd3e00b3613abe59323 Mon Sep 17 00:00:00 2001 From: Tobo Atchou Date: Wed, 3 May 2023 18:58:06 +0200 Subject: [PATCH 905/998] server/transport: send appropriate debug_data in GOAWAY frames (#6220) --- internal/transport/handler_server.go | 2 +- internal/transport/handler_server_test.go | 9 +++++++++ internal/transport/http2_server.go | 10 +++++----- internal/transport/keepalive_test.go | 13 +++++++++++-- internal/transport/transport.go | 2 +- server.go | 6 +++--- 6 files changed, 30 insertions(+), 12 deletions(-) diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index fbee581b8660..98f80e3fa00a 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -453,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } diff --git a/internal/transport/handler_server_test.go b/internal/transport/handler_server_test.go index a6eb20285787..99ca211b323c 100644 --- a/internal/transport/handler_server_test.go +++ b/internal/transport/handler_server_test.go @@ -502,6 +502,15 @@ func (s) TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) { checkHeaderAndTrailer(t, hst.rw, wantHeader, wantTrailer) } +// TestHandlerTransport_Drain verifies that Drain() is not implemented +// by `serverHandlerTransport`. +func (s) TestHandlerTransport_Drain(t *testing.T) { + defer func() { recover() }() + st := newHandleStreamTest(t) + st.ht.Drain("whatever") + t.Errorf("serverHandlerTransport.Drain() should have panicked") +} + // checkHeaderAndTrailer checks that the resulting header and trailer matches the expectation. func checkHeaderAndTrailer(t *testing.T, rw testHandlerResponseWriter, wantHeader, wantTrailer http.Header) { // For trailer-only responses, the trailer values might be reported as part of the Header. They will however diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 4b406b8cb011..79e86ba08836 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -1166,12 +1166,12 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain() + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain() + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1318,14 +1318,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain() { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() if t.drainEvent != nil { return } t.drainEvent = grpcsync.NewEvent() - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1367,7 +1367,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index a020fecdc65d..b9e6d74cb739 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -27,6 +27,7 @@ import ( "fmt" "io" "net" + "strings" "testing" "time" @@ -69,9 +70,13 @@ func (s) TestMaxConnectionIdle(t *testing.T) { case <-ctx.Done(): t.Fatalf("context expired before receiving GoAway from the server.") case <-client.GoAway(): - if reason, _ := client.GetGoAwayReason(); reason != GoAwayNoReason { + reason, debugMsg := client.GetGoAwayReason() + if reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } + if !strings.Contains(debugMsg, "max_idle") { + t.Fatalf("GoAwayDebugMessage is %v, want %v", debugMsg, "max_idle") + } } } @@ -135,9 +140,13 @@ func (s) TestMaxConnectionAge(t *testing.T) { // for more than MaxConnectionIdle time. select { case <-client.GoAway(): - if reason, _ := client.GetGoAwayReason(); reason != GoAwayNoReason { + reason, debugMsg := client.GetGoAwayReason() + if reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } + if !strings.Contains(debugMsg, "max_age") { + t.Fatalf("GoAwayDebugMessage is %v, want %v", debugMsg, "max_age") + } case <-ctx.Done(): t.Fatalf("timed out before getting a GoAway from the server.") } diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 1b7d7fabc512..aa1c896595d9 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -726,7 +726,7 @@ type ServerTransport interface { RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() diff --git a/server.go b/server.go index 76d152a69c8f..81969e7c15a9 100644 --- a/server.go +++ b/server.go @@ -895,7 +895,7 @@ func (s *Server) drainServerTransports(addr string) { s.mu.Lock() conns := s.conns[addr] for st := range conns { - st.Drain() + st.Drain("") } s.mu.Unlock() } @@ -1046,7 +1046,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } if s.conns[addr] == nil { @@ -1856,7 +1856,7 @@ func (s *Server) GracefulStop() { if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain() + st.Drain("graceful_stop") } } s.drain = true From 47b3c5545c4d9bef0d42eb1ced7afb313dd7aa92 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 3 May 2023 13:47:37 -0700 Subject: [PATCH 906/998] orca: fix race at producer startup (#6245) --- orca/producer.go | 77 +++++++++++++++++++++++++++---------------- orca/producer_test.go | 12 +++---- orca/service_test.go | 11 ++++--- 3 files changed, 59 insertions(+), 41 deletions(-) diff --git a/orca/producer.go b/orca/producer.go index 956d5ddfb52d..227baeb01ddf 100644 --- a/orca/producer.go +++ b/orca/producer.go @@ -38,19 +38,13 @@ type producerBuilder struct{} // Build constructs and returns a producer and its cleanup function func (*producerBuilder) Build(cci interface{}) (balancer.Producer, func()) { - ctx, cancel := context.WithCancel(context.Background()) p := &producer{ client: v3orcaservicegrpc.NewOpenRcaServiceClient(cci.(grpc.ClientConnInterface)), - closed: grpcsync.NewEvent(), intervals: make(map[time.Duration]int), listeners: make(map[OOBListener]struct{}), backoff: internal.DefaultBackoffFunc, } - go p.run(ctx) - return p, func() { - cancel() - <-p.closed.Done() // Block until stream stopped. - } + return p, func() {} } var producerBuilderSingleton = &producerBuilder{} @@ -77,6 +71,7 @@ type OOBListenerOptions struct { func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOptions) (stop func()) { pr, close := sc.GetOrBuildProducer(producerBuilderSingleton) p := pr.(*producer) + p.registerListener(l, opts.ReportInterval) // TODO: When we can register for SubConn state updates, automatically call @@ -93,16 +88,18 @@ func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOpt type producer struct { client v3orcaservicegrpc.OpenRcaServiceClient - closed *grpcsync.Event // fired when closure completes // backoff is called between stream attempts to determine how long to delay // to avoid overloading a server experiencing problems. The attempt count // is incremented when stream errors occur and is reset when the stream // reports a result. backoff func(int) time.Duration - mu sync.Mutex - intervals map[time.Duration]int // map from interval time to count of listeners requesting that time - listeners map[OOBListener]struct{} // set of registered listeners + mu sync.Mutex + intervals map[time.Duration]int // map from interval time to count of listeners requesting that time + listeners map[OOBListener]struct{} // set of registered listeners + minInterval time.Duration + stop func() // stops the current run goroutine + stopped chan struct{} // closed when the run goroutine exits } // registerListener adds the listener and its requested report interval to the @@ -110,8 +107,13 @@ type producer struct { func (p *producer) registerListener(l OOBListener, interval time.Duration) { p.mu.Lock() defer p.mu.Unlock() + p.listeners[l] = struct{}{} p.intervals[interval]++ + if len(p.listeners) == 1 || interval < p.minInterval { + p.minInterval = interval + p.updateRunLocked() + } } // registerListener removes the listener and its requested report interval to @@ -119,31 +121,53 @@ func (p *producer) registerListener(l OOBListener, interval time.Duration) { func (p *producer) unregisterListener(l OOBListener, interval time.Duration) { p.mu.Lock() defer p.mu.Unlock() + delete(p.listeners, l) p.intervals[interval]-- if p.intervals[interval] == 0 { delete(p.intervals, interval) + + if p.minInterval == interval { + p.recomputeMinInterval() + p.updateRunLocked() + } } } -// minInterval returns the smallest key in p.intervals. -func (p *producer) minInterval() time.Duration { - p.mu.Lock() - defer p.mu.Unlock() - var min time.Duration +// recomputeMinInterval sets p.minInterval to the minimum key's value in +// p.intervals. +func (p *producer) recomputeMinInterval() { first := true - for t := range p.intervals { - if t < min || first { - min = t + for interval := range p.intervals { + if first || interval < p.minInterval { + p.minInterval = interval first = false } } - return min +} + +// updateRunLocked is called whenever the run goroutine needs to be started / +// stopped / restarted due to: 1. the initial listener being registered, 2. the +// final listener being unregistered, or 3. the minimum registered interval +// changing. +func (p *producer) updateRunLocked() { + if p.stop != nil { + p.stop() + <-p.stopped + p.stop = nil + } + if len(p.listeners) > 0 { + var ctx context.Context + ctx, p.stop = context.WithCancel(context.Background()) + p.stopped = make(chan struct{}) + go p.run(ctx, p.minInterval) + } } // run manages the ORCA OOB stream on the subchannel. -func (p *producer) run(ctx context.Context) { - defer p.closed.Fire() +func (p *producer) run(ctx context.Context, interval time.Duration) { + defer close(p.stopped) + backoffAttempt := 0 backoffTimer := time.NewTimer(0) for ctx.Err() == nil { @@ -153,7 +177,7 @@ func (p *producer) run(ctx context.Context) { return } - resetBackoff, err := p.runStream(ctx) + resetBackoff, err := p.runStream(ctx, interval) if resetBackoff { backoffTimer.Reset(0) @@ -190,8 +214,7 @@ func (p *producer) run(ctx context.Context) { // runStream runs a single stream on the subchannel and returns the resulting // error, if any, and whether or not the run loop should reset the backoff // timer to zero or advance it. -func (p *producer) runStream(ctx context.Context) (resetBackoff bool, err error) { - interval := p.minInterval() +func (p *producer) runStream(ctx context.Context, interval time.Duration) (resetBackoff bool, err error) { streamCtx, cancel := context.WithCancel(ctx) defer cancel() stream, err := p.client.StreamCoreMetrics(streamCtx, &v3orcaservicepb.OrcaLoadReportRequest{ @@ -212,9 +235,5 @@ func (p *producer) runStream(ctx context.Context) (resetBackoff bool, err error) l.OnLoadReport(report) } p.mu.Unlock() - if interval != p.minInterval() { - // restart stream to use new interval - return true, nil - } } } diff --git a/orca/producer_test.go b/orca/producer_test.go index be41424063fa..ce376e7405e2 100644 --- a/orca/producer_test.go +++ b/orca/producer_test.go @@ -519,12 +519,11 @@ func (s) TestProducerMultipleListeners(t *testing.T) { checkReports(2, 1, 0) // Register listener 3 with a more frequent interval; stream is recreated - // with this interval after the next report is received. The first report - // will go to all three listeners. + // with this interval. The next report will go to all three listeners. oobLis3.cleanup = orca.RegisterOOBListener(li.sc, oobLis3, lisOpts3) + awaitRequest(reportInterval3) fake.respCh <- loadReportWant checkReports(3, 2, 1) - awaitRequest(reportInterval3) // Another report without a change in listeners should go to all three listeners. fake.respCh <- loadReportWant @@ -536,13 +535,12 @@ func (s) TestProducerMultipleListeners(t *testing.T) { fake.respCh <- loadReportWant checkReports(5, 3, 3) - // Stop listener 3. This makes the interval longer, with stream recreation - // delayed until the next report is received. Reports should only go to - // listener 1 now. + // Stop listener 3. This makes the interval longer. Reports should only + // go to listener 1 now. oobLis3.Stop() + awaitRequest(reportInterval1) fake.respCh <- loadReportWant checkReports(6, 3, 3) - awaitRequest(reportInterval1) // Another report without a change in listeners should go to the first listener. fake.respCh <- loadReportWant checkReports(7, 3, 3) diff --git a/orca/service_test.go b/orca/service_test.go index e5cf59fccb4e..73ad28430264 100644 --- a/orca/service_test.go +++ b/orca/service_test.go @@ -86,7 +86,7 @@ func (s) TestE2E_CustomBackendMetrics_OutOfBand(t *testing.T) { } // Override the min reporting interval in the internal package. - const shortReportingInterval = 100 * time.Millisecond + const shortReportingInterval = 10 * time.Millisecond smr := orca.NewServerMetricsRecorder() opts := orca.ServiceOptions{MinReportingInterval: shortReportingInterval, ServerMetricsProvider: smr} internal.AllowAnyMinReportingInterval.(func(*orca.ServiceOptions))(&opts) @@ -110,20 +110,21 @@ func (s) TestE2E_CustomBackendMetrics_OutOfBand(t *testing.T) { } defer cc.Close() - // Spawn a goroutine which sends 100 unary RPCs to the test server. This + // Spawn a goroutine which sends 20 unary RPCs to the test server. This // will trigger the injection of custom backend metrics from the // testServiceImpl. + const numRequests = 20 ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() testStub := testgrpc.NewTestServiceClient(cc) errCh := make(chan error, 1) go func() { - for i := 0; i < 100; i++ { + for i := 0; i < numRequests; i++ { if _, err := testStub.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { errCh <- fmt.Errorf("UnaryCall failed: %v", err) return } - time.Sleep(10 * time.Millisecond) + time.Sleep(time.Millisecond) } errCh <- nil }() @@ -151,7 +152,7 @@ func (s) TestE2E_CustomBackendMetrics_OutOfBand(t *testing.T) { wantProto := &v3orcapb.OrcaLoadReport{ CpuUtilization: 50.0, MemUtilization: 99.0, - Utilization: map[string]float64{requestsMetricKey: 100.0}, + Utilization: map[string]float64{requestsMetricKey: numRequests}, } gotProto, err := stream.Recv() if err != nil { From ccad7b7570fd233e4558b034034672c0de89c87c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 4 May 2023 16:05:13 -0700 Subject: [PATCH 907/998] grpc: use CallbackSerializer in resolver_wrapper (#6234) --- internal/grpcsync/callback_serializer.go | 12 +- internal/grpcsync/callback_serializer_test.go | 13 +- resolver_conn_wrapper.go | 134 +++++++++--------- test/service_config_deprecated_test.go | 33 +++-- 4 files changed, 100 insertions(+), 92 deletions(-) diff --git a/internal/grpcsync/callback_serializer.go b/internal/grpcsync/callback_serializer.go index 6df798c00eb1..d91f92463542 100644 --- a/internal/grpcsync/callback_serializer.go +++ b/internal/grpcsync/callback_serializer.go @@ -31,6 +31,12 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { + // Done is closed once the serializer is shut down completely, i.e a + // scheduled callback, if any, that was running when the context passed to + // NewCallbackSerializer is cancelled, has completed and the serializer has + // deallocated all its resources. + Done chan struct{} + callbacks *buffer.Unbounded } @@ -39,7 +45,10 @@ type CallbackSerializer struct { // provided context to shutdown the CallbackSerializer. It is guaranteed that no // callbacks will be executed once this context is canceled. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{callbacks: buffer.NewUnbounded()} + t := &CallbackSerializer{ + Done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } go t.run(ctx) return t } @@ -53,6 +62,7 @@ func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) { } func (t *CallbackSerializer) run(ctx context.Context) { + defer close(t.Done) for ctx.Err() == nil { select { case <-ctx.Done(): diff --git a/internal/grpcsync/callback_serializer_test.go b/internal/grpcsync/callback_serializer_test.go index 6cb1ee52d84a..8c465af66aea 100644 --- a/internal/grpcsync/callback_serializer_test.go +++ b/internal/grpcsync/callback_serializer_test.go @@ -144,19 +144,13 @@ func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) { cs := NewCallbackSerializer(ctx) // Schedule a callback which blocks until the context passed to it is - // canceled. It also closes a couple of channels to signal that it started - // and finished respectively. + // canceled. It also closes a channel to signal that it has started. firstCallbackStartedCh := make(chan struct{}) - firstCallbackFinishCh := make(chan struct{}) cs.Schedule(func(ctx context.Context) { close(firstCallbackStartedCh) <-ctx.Done() - close(firstCallbackFinishCh) }) - // Wait for the first callback to start before scheduling the others. - <-firstCallbackStartedCh - // Schedule a bunch of callbacks. These should not be exeuted since the first // one started earlier is blocked. const numCallbacks = 10 @@ -174,11 +168,14 @@ func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) { t.Fatal(err) } + // Wait for the first callback to start before closing the scheduler. + <-firstCallbackStartedCh + // Cancel the context which will unblock the first callback. None of the // other callbacks (which have not started executing at this point) should // be executed after this. cancel() - <-firstCallbackFinishCh + <-cs.Done // Ensure that the newer callbacks are not executed. select { diff --git a/resolver_conn_wrapper.go b/resolver_conn_wrapper.go index 854e90f69ae5..ce12b52ecdc0 100644 --- a/resolver_conn_wrapper.go +++ b/resolver_conn_wrapper.go @@ -19,8 +19,8 @@ package grpc import ( + "context" "strings" - "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/channelz" @@ -42,15 +42,17 @@ type ccResolverWrapper struct { // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. cc resolverStateUpdater - done *grpcsync.Event channelzID *channelz.Identifier ignoreServiceConfig bool - resolverMu sync.Mutex - resolver resolver.Resolver - - incomingMu sync.Mutex // Synchronizes all the incoming calls. - curState resolver.State + // Outgoing (gRPC --> resolver) and incoming (resolver --> gRPC) calls are + // guaranteed to execute in a mutually exclusive manner as they are + // scheduled on the CallbackSerializer. Fields accessed *only* in serializer + // callbacks, can therefore be accessed without a mutex. + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + resolver resolver.Resolver + curState resolver.State } // ccResolverWrapperOpts wraps the arguments to be passed when creating a new @@ -65,104 +67,100 @@ type ccResolverWrapperOpts struct { // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ cc: cc, - done: grpcsync.NewEvent(), channelzID: opts.channelzID, ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = opts.builder.Build(opts.target, ccr, opts.bOpts) + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + ccr.resolver = r return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { + ccr.serializer.Schedule(func(_ context.Context) { ccr.resolver.ResolveNow(o) - } - ccr.resolverMu.Unlock() + }) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() + // Close the serializer to ensure that no more calls from the resolver are + // handled, before closing the resolver. + ccr.serializerCancel() + <-ccr.serializer.Done ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() } // UpdateState is called by resolver implementations to report new state to gRPC // which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { + errCh := make(chan error, 1) + ccr.serializer.Schedule(func(_ context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + + // If the resolver wrapper is closed when waiting for this state update to + // be handled, the callback serializer will be closed as well, and we can + // rely on its Done channel to ensure that we don't block here forever. + select { + case err := <-errCh: + return err + case <-ccr.serializer.Done: return nil } - ccr.addChannelzTraceEventLocked(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - return balancer.ErrBadResolverState - } - return nil } // ReportError is called by resolver implementations to report errors // encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) + ccr.serializer.Schedule(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } // NewAddress is called by the resolver implementation to send addresses to // gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - ccr.addChannelzTraceEventLocked(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializer.Schedule(func(_ context.Context) { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEventLocked(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializer.Schedule(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // ParseServiceConfig is called by resolver implementations to parse a JSON @@ -171,11 +169,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P return parseServiceConfig(scJSON) } -// addChannelzTraceEventLocked adds a channelz trace event containing the new +// addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. -// -// Caller must hold cc.incomingMu. -func (ccr *ccResolverWrapper) addChannelzTraceEventLocked(s resolver.State) { +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/test/service_config_deprecated_test.go b/test/service_config_deprecated_test.go index 035f11526f79..ecf43a5760fe 100644 --- a/test/service_config_deprecated_test.go +++ b/test/service_config_deprecated_test.go @@ -146,15 +146,18 @@ func testServiceConfigWaitForReadyTD(t *testing.T, e env) { ch <- sc // Wait for the new service config to take effect. - mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") - for { - if !*mc.WaitForReady { - time.Sleep(100 * time.Millisecond) - mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") - continue + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") + if *mc.WaitForReady { + break } - break } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for service config to take effect") + } + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) @@ -212,14 +215,16 @@ func testServiceConfigTimeoutTD(t *testing.T, e env) { ch <- sc // Wait for the new service config to take effect. - mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") - for { - if *mc.Timeout != time.Nanosecond { - time.Sleep(100 * time.Millisecond) - mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") - continue + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") + if *mc.Timeout == time.Nanosecond { + break } - break + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for service config to take effect") } ctx, cancel = context.WithTimeout(context.Background(), time.Hour) From 1f3fe1c8bc2cdc97521468580df01904376a6ea7 Mon Sep 17 00:00:00 2001 From: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Date: Sat, 6 May 2023 01:38:20 +1000 Subject: [PATCH 908/998] Update ClientStream.SendMsg doc (#6247) --- stream.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stream.go b/stream.go index d1226a4120f8..f79e31c147ee 100644 --- a/stream.go +++ b/stream.go @@ -123,6 +123,9 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On From 417cf846073b216abc00f5d15c291a3eba5fd00d Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 5 May 2023 11:08:42 -0700 Subject: [PATCH 909/998] test: deflake TestBalancerProducerHonorsContext (#6257) --- test/balancer_test.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/test/balancer_test.go b/test/balancer_test.go index 950d31d13ed5..8b88dc513b29 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -1012,10 +1012,11 @@ func (s) TestMetadataInPickResult(t *testing.T) { type producerTestBalancerBuilder struct { rpcErrChan chan error ctxChan chan context.Context + connect bool } func (bb *producerTestBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - return &producerTestBalancer{cc: cc, rpcErrChan: bb.rpcErrChan, ctxChan: bb.ctxChan} + return &producerTestBalancer{cc: cc, rpcErrChan: bb.rpcErrChan, ctxChan: bb.ctxChan, connect: bb.connect} } const producerTestBalancerName = "producer_test_balancer" @@ -1026,6 +1027,7 @@ type producerTestBalancer struct { cc balancer.ClientConn rpcErrChan chan error ctxChan chan context.Context + connect bool } func (b *producerTestBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { @@ -1052,8 +1054,10 @@ func (b *producerTestBalancer) UpdateClientConnState(ccs balancer.ClientConnStat default: } - // Now we can connect, which will unblock the RPC above. - sc.Connect() + if b.connect { + // Now we can connect, which will unblock the RPC above. + sc.Connect() + } // The stub server requires a READY picker to be reported, to unblock its // Start method. We won't make RPCs in our test, so a nil picker is okay. @@ -1096,8 +1100,9 @@ func (s) TestBalancerProducerBlockUntilReady(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() ctxChan <- ctx + rpcErrChan := make(chan error) - balancer.Register(&producerTestBalancerBuilder{rpcErrChan: rpcErrChan, ctxChan: ctxChan}) + balancer.Register(&producerTestBalancerBuilder{rpcErrChan: rpcErrChan, ctxChan: ctxChan, connect: true}) ss := &stubserver.StubServer{ EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { @@ -1128,7 +1133,7 @@ func (s) TestBalancerProducerHonorsContext(t *testing.T) { ctxChan <- ctx rpcErrChan := make(chan error) - balancer.Register(&producerTestBalancerBuilder{rpcErrChan: rpcErrChan, ctxChan: ctxChan}) + balancer.Register(&producerTestBalancerBuilder{rpcErrChan: rpcErrChan, ctxChan: ctxChan, connect: false}) ss := &stubserver.StubServer{ EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { From f193ec01834d14f6ce00e47c9f2965ffc4de1bbf Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 5 May 2023 14:25:11 -0700 Subject: [PATCH 910/998] orca: fix race when calling listeners coincides with updating the run goroutine (#6258) --- orca/producer.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/orca/producer.go b/orca/producer.go index 227baeb01ddf..3b7ed8b67d8a 100644 --- a/orca/producer.go +++ b/orca/producer.go @@ -44,7 +44,9 @@ func (*producerBuilder) Build(cci interface{}) (balancer.Producer, func()) { listeners: make(map[OOBListener]struct{}), backoff: internal.DefaultBackoffFunc, } - return p, func() {} + return p, func() { + <-p.stopped + } } var producerBuilderSingleton = &producerBuilder{} @@ -153,20 +155,19 @@ func (p *producer) recomputeMinInterval() { func (p *producer) updateRunLocked() { if p.stop != nil { p.stop() - <-p.stopped p.stop = nil } if len(p.listeners) > 0 { var ctx context.Context ctx, p.stop = context.WithCancel(context.Background()) p.stopped = make(chan struct{}) - go p.run(ctx, p.minInterval) + go p.run(ctx, p.stopped, p.minInterval) } } // run manages the ORCA OOB stream on the subchannel. -func (p *producer) run(ctx context.Context, interval time.Duration) { - defer close(p.stopped) +func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Duration) { + defer close(done) backoffAttempt := 0 backoffTimer := time.NewTimer(0) From c44f77e12db9c09d27504f972e3275d6e6c544ea Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 5 May 2023 16:07:27 -0700 Subject: [PATCH 911/998] grpc: use CallbackSerializer in balancer wrapper (#6254) --- balancer_conn_wrappers.go | 258 +++++++++++--------------------------- 1 file changed, 74 insertions(+), 184 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index d0383f04748c..1865a3f09c2b 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -27,7 +27,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" @@ -49,147 +48,60 @@ import ( type ccBalancerWrapper struct { cc *ClientConn - // Since these fields are accessed only from handleXxx() methods which are - // synchronized by the watcher goroutine, we do not need a mutex to protect - // these fields. - balancer *gracefulswitch.Balancer - curBalancerName string - - updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). - resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. - closed *grpcsync.Event // Indicates if close has been called. - done *grpcsync.Event // Indicates if close has completed its work. + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the + // CallbackSerializer. Fields accessed *only* in serializer callbacks, can + // therefore be accessed without a mutex. + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + balancer *gracefulswitch.Balancer + curBalancerName string } // newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer // is not created until the switchTo() method is invoked. func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - updateCh: buffer.NewUnbounded(), - resultCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), + cc: cc, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// The following xxxUpdate structs wrap the arguments received as part of the -// corresponding update. The watcher goroutine uses the 'type' of the update to -// invoke the appropriate handler routine to handle the update. - -type ccStateUpdate struct { - ccs *balancer.ClientConnState -} - -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -type exitIdleUpdate struct{} - -type resolverErrorUpdate struct { - err error -} - -type switchToUpdate struct { - name string -} - -// watcher is a long-running goroutine which reads updates from a channel and -// invokes corresponding methods on the underlying balancer. It ensures that -// these methods are invoked in a synchronous fashion. It also ensures that -// these methods are invoked in the order in which the updates were received. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case u, ok := <-ccb.updateCh.Get(): - if !ok { - break - } - ccb.updateCh.Load() - if ccb.closed.HasFired() { - break - } - switch update := u.(type) { - case *ccStateUpdate: - ccb.handleClientConnStateChange(update.ccs) - case *scStateUpdate: - ccb.handleSubConnStateChange(update) - case *exitIdleUpdate: - ccb.handleExitIdle() - case *resolverErrorUpdate: - ccb.handleResolverError(update.err) - case *switchToUpdate: - ccb.handleSwitchTo(update.name) - default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) - } - case <-ccb.closed.Done(): - } - - if ccb.closed.HasFired() { - ccb.handleClose() - return - } - } -} - // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. -// -// Unlike other methods invoked by grpc to push updates to the underlying -// balancer, this method cannot simply push the update onto the update channel -// and return. It needs to return the error returned by the underlying balancer -// back to grpc which propagates that to the resolver. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + errCh := make(chan error, 1) + ccb.serializer.Schedule(func(_ context.Context) { + // If the addresses specified in the update contain addresses of type + // "grpclb" and the selected LB policy is not "grpclb", these addresses + // will be filtered out and ccs will be modified with the updated + // address list. + if ccb.curBalancerName != grpclbName { + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs + } + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) - var res interface{} - var ok bool + // If the balancer wrapper is closed when waiting for this state update to + // be handled, the callback serializer will be closed as well, and we can + // rely on its Done channel to ensure that we don't block here forever. select { - case res, ok = <-ccb.resultCh.Get(): - if !ok { - // The result channel is closed only when the balancer wrapper is closed. - return nil - } - ccb.resultCh.Load() - case <-ccb.closed.Done(): - // Return early if the balancer wrapper is closed while we are waiting for - // the underlying balancer to process a ClientConnState update. - return nil - } - // If the returned error is nil, attempting to type assert to error leads to - // panic. So, this needs to handled separately. - if res == nil { + case err := <-errCh: + return err + case <-ccb.serializer.Done: return nil } - return res.(error) -} - -// handleClientConnStateChange handles a ClientConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -// -// If the addresses specified in the update contain addresses of type "grpclb" -// and the selected LB policy is not "grpclb", these addresses will be filtered -// out and ccs will be modified with the updated address list. -func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { - if ccb.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs - } - ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } // updateSubConnState is invoked by grpc to push a subConn state update to the @@ -202,39 +114,27 @@ func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connecti // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and // this function will be called with (nil, Shutdown). We don't need to call // balancer method in this case. + // + // TODO: Suppress the above mentioned state change to Shutdown, so we don't + // have to handle it here. if sc == nil { return } - ccb.updateCh.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) } -// handleSubConnStateChange handles a SubConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { - ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) -} - func (ccb *ccBalancerWrapper) exitIdle() { - ccb.updateCh.Put(&exitIdleUpdate{}) -} - -func (ccb *ccBalancerWrapper) handleExitIdle() { - if ccb.cc.GetState() != connectivity.Idle { - return - } - ccb.balancer.ExitIdle() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ExitIdle() + }) } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.updateCh.Put(&resolverErrorUpdate{err: err}) -} - -func (ccb *ccBalancerWrapper) handleResolverError(err error) { - ccb.balancer.ResolverError(err) + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -248,49 +148,39 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.updateCh.Put(&switchToUpdate{name: name}) -} - -// handleSwitchTo handles a balancer switch update from the update channel. It -// calls the SwitchTo() method on the gracefulswitch.Balancer with a -// balancer.Builder corresponding to name. If no balancer.Builder is registered -// for the given name, it uses the default LB policy which is "pick_first". -func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { - // TODO: Other languages use case-insensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } - // TODO: Ensure that name is a registered LB policy when we get here. - // We currently only validate the `loadBalancingConfig` field. We need to do - // the same for the `loadBalancingPolicy` field and reject the service config - // if the specified policy is not registered. - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) - } + // Use the default LB policy, pick_first, if no LB policy with name is + // found in the registry. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } - if err := ccb.balancer.SwitchTo(builder); err != nil { - channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) - return - } - ccb.curBalancerName = builder.Name() + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() + }) } func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() -} - -func (ccb *ccBalancerWrapper) handleClose() { + // Close the serializer to ensure that no more calls from gRPC are sent to + // the balancer. We don't have to worry about suppressing calls from a + // closed balancer because these are handled by the ClientConn (balancer + // wrapper is only ever closed when the ClientConn is closed). + ccb.serializerCancel() + <-ccb.serializer.Done ccb.balancer.Close() - ccb.updateCh.Close() - ccb.resultCh.Close() - ccb.done.Fire() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { From 5c4bee51c2ff3e713735d0c99547fc76bb739c2a Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 8 May 2023 10:01:08 -0700 Subject: [PATCH 912/998] balancer/weightedroundrobin: add load balancing policy (A58) (#6241) --- balancer/weightedroundrobin/balancer.go | 532 +++++++++++++ balancer/weightedroundrobin/balancer_test.go | 713 ++++++++++++++++++ balancer/weightedroundrobin/config.go | 60 ++ .../weightedroundrobin/internal/internal.go | 44 ++ balancer/weightedroundrobin/logging.go | 34 + balancer/weightedroundrobin/scheduler.go | 138 ++++ .../weightedroundrobin/weightedroundrobin.go | 23 +- internal/grpcrand/grpcrand.go | 7 + orca/producer.go | 13 +- xds/internal/balancer/clusterimpl/picker.go | 2 +- 10 files changed, 1545 insertions(+), 21 deletions(-) create mode 100644 balancer/weightedroundrobin/balancer.go create mode 100644 balancer/weightedroundrobin/balancer_test.go create mode 100644 balancer/weightedroundrobin/config.go create mode 100644 balancer/weightedroundrobin/internal/internal.go create mode 100644 balancer/weightedroundrobin/logging.go create mode 100644 balancer/weightedroundrobin/scheduler.go diff --git a/balancer/weightedroundrobin/balancer.go b/balancer/weightedroundrobin/balancer.go new file mode 100644 index 000000000000..e0d255222d52 --- /dev/null +++ b/balancer/weightedroundrobin/balancer.go @@ -0,0 +1,532 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin/internal" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// Name is the name of the weighted round robin balancer. +const Name = "weighted_round_robin_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &wrrBalancer{ + cc: cc, + subConns: resolver.NewAddressMap(), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + scMap: make(map[balancer.SubConn]*weightedSubConn), + connectivityState: connectivity.Connecting, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &lbConfig{ + // Default values as documented in A58. + OOBReportingPeriod: 10 * time.Second, + BlackoutPeriod: 10 * time.Second, + WeightExpirationPeriod: 3 * time.Minute, + WeightUpdatePeriod: time.Second, + ErrorUtilizationPenalty: 1, + } + if err := json.Unmarshal(js, lbCfg); err != nil { + return nil, fmt.Errorf("wrr: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + + if lbCfg.ErrorUtilizationPenalty < 0 { + return nil, fmt.Errorf("wrr: errorUtilizationPenalty must be non-negative") + } + + // For easier comparisons later, ensure the OOB reporting period is unset + // (0s) when OOB reports are disabled. + if !lbCfg.EnableOOBLoadReport { + lbCfg.OOBReportingPeriod = 0 + } + + // Impose lower bound of 100ms on weightUpdatePeriod. + if !internal.AllowAnyWeightUpdatePeriod && lbCfg.WeightUpdatePeriod < 100*time.Millisecond { + lbCfg.WeightUpdatePeriod = 100 * time.Millisecond + } + + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} + +// wrrBalancer implements the weighted round robin LB policy. +type wrrBalancer struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + cfg *lbConfig // active config + subConns *resolver.AddressMap // active weightedSubConns mapped by address + scMap map[balancer.SubConn]*weightedSubConn + connectivityState connectivity.State // aggregate state + csEvltr *balancer.ConnectivityStateEvaluator + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure + stopPicker func() +} + +func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + b.logger.Infof("UpdateCCS: %v", ccs) + b.resolverErr = nil + cfg, ok := ccs.BalancerConfig.(*lbConfig) + if !ok { + return fmt.Errorf("wrr: received nil or illegal BalancerConfig (type %T): %v", ccs.BalancerConfig, ccs.BalancerConfig) + } + + b.cfg = cfg + b.updateAddresses(ccs.ResolverState.Addresses) + + if len(ccs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("resolver produced zero addresses")) // will call regeneratePicker + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + + return nil +} + +func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { + addrsSet := resolver.NewAddressMap() + + // Loop through new address list and create subconns for any new addresses. + for _, addr := range addrs { + if _, ok := addrsSet.Get(addr); ok { + // Redundant address; skip. + continue + } + addrsSet.Set(addr, nil) + + var wsc *weightedSubConn + wsci, ok := b.subConns.Get(addr) + if ok { + wsc = wsci.(*weightedSubConn) + } else { + // addr is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) + if err != nil { + b.logger.Warningf("Failed to create new SubConn for address %v: %v", addr, err) + continue + } + wsc = &weightedSubConn{ + SubConn: sc, + logger: b.logger, + connectivityState: connectivity.Idle, + // Initially, we set load reports to off, because they are not + // running upon initial weightedSubConn creation. + cfg: &lbConfig{EnableOOBLoadReport: false}, + } + b.subConns.Set(addr, wsc) + b.scMap[sc] = wsc + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + // Update config for existing weightedSubConn or send update for first + // time to new one. Ensures an OOB listener is running if needed + // (and stops the existing one if applicable). + wsc.updateConfig(b.cfg) + } + + // Loop through existing subconns and remove ones that are not in addrs. + for _, addr := range b.subConns.Keys() { + if _, ok := addrsSet.Get(addr); ok { + // Existing address also in new address list; skip. + continue + } + // addr was removed by resolver. Remove. + wsci, _ := b.subConns.Get(addr) + wsc := wsci.(*weightedSubConn) + b.cc.RemoveSubConn(wsc.SubConn) + b.subConns.Delete(addr) + } +} + +func (b *wrrBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.connectivityState = connectivity.TransientFailure + } + if b.connectivityState != connectivity.TransientFailure { + // No need to update the picker since no error is being returned. + return + } + b.regeneratePicker() +} + +func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + wsc := b.scMap[sc] + if wsc == nil { + b.logger.Errorf("UpdateSubConnState called with an unknown SubConn: %p, %v", sc, state) + return + } + if b.logger.V(2) { + logger.Infof("UpdateSubConnState(%+v, %+v)", sc, state) + } + + cs := state.ConnectivityState + + if cs == connectivity.TransientFailure { + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + if cs == connectivity.Shutdown { + delete(b.scMap, sc) + // The subconn was removed from b.subConns when the address was removed + // in updateAddresses. + } + + oldCS := wsc.updateConnectivityState(cs) + b.connectivityState = b.csEvltr.RecordTransition(oldCS, cs) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (cs == connectivity.Ready) != (oldCS == connectivity.Ready) || + b.connectivityState == connectivity.TransientFailure { + b.regeneratePicker() + } +} + +// Close stops the balancer. It cancels any ongoing scheduler updates and +// stops any ORCA listeners. +func (b *wrrBalancer) Close() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + for _, wsc := range b.scMap { + // Ensure any lingering OOB watchers are stopped. + wsc.updateConnectivityState(connectivity.Shutdown) + } +} + +// ExitIdle is ignored; we always connect to all backends. +func (b *wrrBalancer) ExitIdle() {} + +func (b *wrrBalancer) readySubConns() []*weightedSubConn { + var ret []*weightedSubConn + for _, v := range b.subConns.Values() { + wsc := v.(*weightedSubConn) + if wsc.connectivityState == connectivity.Ready { + ret = append(ret, wsc) + } + } + return ret +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.connectivityState is +// TransientFailure. +func (b *wrrBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *wrrBalancer) regeneratePicker() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + + switch b.connectivityState { + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(b.mergeErrors()), + }) + return + case connectivity.Connecting, connectivity.Idle: + // Idle could happen very briefly if all subconns are Idle and we've + // asked them to connect but they haven't reported Connecting yet. + // Report the same as Connecting since this is temporary. + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }) + return + case connectivity.Ready: + b.connErr = nil + } + + p := &picker{ + v: grpcrand.Uint32(), // start the scheduler at a random point + cfg: b.cfg, + subConns: b.readySubConns(), + } + var ctx context.Context + ctx, b.stopPicker = context.WithCancel(context.Background()) + p.start(ctx) + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.connectivityState, + Picker: p, + }) +} + +// picker is the WRR policy's picker. It uses live-updating backend weights to +// update the scheduler periodically and ensure picks are routed proportional +// to those weights. +type picker struct { + scheduler unsafe.Pointer // *scheduler; accessed atomically + v uint32 // incrementing value used by the scheduler; accessed atomically + cfg *lbConfig // active config when picker created + subConns []*weightedSubConn // all READY subconns +} + +// scWeights returns a slice containing the weights from p.subConns in the same +// order as p.subConns. +func (p *picker) scWeights() []float64 { + ws := make([]float64, len(p.subConns)) + now := internal.TimeNow() + for i, wsc := range p.subConns { + ws[i] = wsc.weight(now, p.cfg.WeightExpirationPeriod, p.cfg.BlackoutPeriod) + } + return ws +} + +func (p *picker) inc() uint32 { + return atomic.AddUint32(&p.v, 1) +} + +func (p *picker) regenerateScheduler() { + s := newScheduler(p.scWeights(), p.inc) + atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) +} + +func (p *picker) start(ctx context.Context) { + p.regenerateScheduler() + if len(p.subConns) == 1 { + // No need to regenerate weights with only one backend. + return + } + go func() { + ticker := time.NewTicker(p.cfg.WeightUpdatePeriod) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + p.regenerateScheduler() + } + } + }() +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + // Read the scheduler atomically. All scheduler operations are threadsafe, + // and if the scheduler is replaced during this usage, we want to use the + // scheduler that was live when the pick started. + sched := *(*scheduler)(atomic.LoadPointer(&p.scheduler)) + + pickedSC := p.subConns[sched.nextIndex()] + pr := balancer.PickResult{SubConn: pickedSC.SubConn} + if !p.cfg.EnableOOBLoadReport { + pr.Done = func(info balancer.DoneInfo) { + if load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport); ok && load != nil { + pickedSC.OnLoadReport(load) + } + } + } + return pr, nil +} + +// weightedSubConn is the wrapper of a subconn that holds the subconn and its +// weight (and other parameters relevant to computing the effective weight). +// When needed, it also tracks connectivity state, listens for metrics updates +// by implementing the orca.OOBListener interface and manages that listener. +type weightedSubConn struct { + balancer.SubConn + logger *grpclog.PrefixLogger + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + connectivityState connectivity.State + stopORCAListener func() + + // The following fields are accessed asynchronously and are protected by + // mu. Note that mu may not be held when calling into the stopORCAListener + // or when registering a new listener, as those calls require the ORCA + // producer mu which is held when calling the listener, and the listener + // holds mu. + mu sync.Mutex + weightVal float64 + nonEmptySince time.Time + lastUpdated time.Time + cfg *lbConfig +} + +func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { + if w.logger.V(2) { + w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) + } + // Update weights of this subchannel according to the reported load + if load.CpuUtilization == 0 || load.RpsFractional == 0 { + if w.logger.V(2) { + w.logger.Infof("Ignoring empty load report for subchannel %v", w.SubConn) + } + return + } + + w.mu.Lock() + defer w.mu.Unlock() + + errorRate := load.Eps / load.RpsFractional + w.weightVal = load.RpsFractional / (load.CpuUtilization + errorRate*w.cfg.ErrorUtilizationPenalty) + if w.logger.V(2) { + w.logger.Infof("New weight for subchannel %v: %v", w.SubConn, w.weightVal) + } + + w.lastUpdated = internal.TimeNow() + if w.nonEmptySince == (time.Time{}) { + w.nonEmptySince = w.lastUpdated + } +} + +// updateConfig updates the parameters of the WRR policy and +// stops/starts/restarts the ORCA OOB listener. +func (w *weightedSubConn) updateConfig(cfg *lbConfig) { + w.mu.Lock() + oldCfg := w.cfg + w.cfg = cfg + w.mu.Unlock() + + newPeriod := cfg.OOBReportingPeriod + if cfg.EnableOOBLoadReport == oldCfg.EnableOOBLoadReport && + newPeriod == oldCfg.OOBReportingPeriod { + // Load reporting wasn't enabled before or after, or load reporting was + // enabled before and after, and had the same period. (Note that with + // load reporting disabled, OOBReportingPeriod is always 0.) + return + } + // (Optionally stop and) start the listener to use the new config's + // settings for OOB reporting. + + if w.stopORCAListener != nil { + w.stopORCAListener() + } + if !cfg.EnableOOBLoadReport { + w.stopORCAListener = nil + return + } + if w.logger.V(2) { + w.logger.Infof("Registering ORCA listener for %v with interval %v", w.SubConn, newPeriod) + } + opts := orca.OOBListenerOptions{ReportInterval: newPeriod} + w.stopORCAListener = orca.RegisterOOBListener(w.SubConn, w, opts) +} + +func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connectivity.State { + switch cs { + case connectivity.Idle: + // Always reconnect when idle. + w.SubConn.Connect() + case connectivity.Ready: + // If we transition back to READY state, reset nonEmptySince so that we + // apply the blackout period after we start receiving load data. Note + // that we cannot guarantee that we will never receive lingering + // callbacks for backend metric reports from the previous connection + // after the new connection has been established, but they should be + // masked by new backend metric reports from the new connection by the + // time the blackout period ends. + w.mu.Lock() + w.nonEmptySince = time.Time{} + w.mu.Unlock() + case connectivity.Shutdown: + if w.stopORCAListener != nil { + w.stopORCAListener() + } + } + + oldCS := w.connectivityState + + if oldCS == connectivity.TransientFailure && + (cs == connectivity.Connecting || cs == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return oldCS + } + + w.connectivityState = cs + + return oldCS +} + +// weight returns the current effective weight of the subconn, taking into +// account the parameters. Returns 0 for blacked out or expired data, which +// will cause the backend weight to be treated as the mean of the weights of +// the other backends. +func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration) float64 { + w.mu.Lock() + defer w.mu.Unlock() + // If the most recent update was longer ago than the expiration period, + // reset nonEmptySince so that we apply the blackout period again if we + // start getting data again in the future, and return 0. + if now.Sub(w.lastUpdated) >= weightExpirationPeriod { + w.nonEmptySince = time.Time{} + return 0 + } + // If we don't have at least blackoutPeriod worth of data, return 0. + if blackoutPeriod != 0 && (w.nonEmptySince == (time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { + return 0 + } + return w.weightVal +} diff --git a/balancer/weightedroundrobin/balancer_test.go b/balancer/weightedroundrobin/balancer_test.go new file mode 100644 index 000000000000..5dd62ebf872a --- /dev/null +++ b/balancer/weightedroundrobin/balancer_test.go @@ -0,0 +1,713 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin_test + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils/roundrobin" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + + wrr "google.golang.org/grpc/balancer/weightedroundrobin" + iwrr "google.golang.org/grpc/balancer/weightedroundrobin/internal" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const defaultTestTimeout = 10 * time.Second +const weightUpdatePeriod = 50 * time.Millisecond +const oobReportingInterval = 10 * time.Millisecond + +func init() { + iwrr.AllowAnyWeightUpdatePeriod = true +} + +func boolp(b bool) *bool { return &b } +func float64p(f float64) *float64 { return &f } +func durationp(d time.Duration) *time.Duration { return &d } + +var ( + perCallConfig = iwrr.LBConfig{ + EnableOOBLoadReport: boolp(false), + OOBReportingPeriod: durationp(5 * time.Millisecond), + BlackoutPeriod: durationp(0), + WeightExpirationPeriod: durationp(time.Minute), + WeightUpdatePeriod: durationp(weightUpdatePeriod), + ErrorUtilizationPenalty: float64p(0), + } + oobConfig = iwrr.LBConfig{ + EnableOOBLoadReport: boolp(true), + OOBReportingPeriod: durationp(5 * time.Millisecond), + BlackoutPeriod: durationp(0), + WeightExpirationPeriod: durationp(time.Minute), + WeightUpdatePeriod: durationp(weightUpdatePeriod), + ErrorUtilizationPenalty: float64p(0), + } +) + +type testServer struct { + *stubserver.StubServer + + oobMetrics orca.ServerMetricsRecorder // Attached to the OOB stream. + callMetrics orca.CallMetricsRecorder // Attached to per-call metrics. +} + +type reportType int + +const ( + reportNone reportType = iota + reportOOB + reportCall + reportBoth +) + +func startServer(t *testing.T, r reportType) *testServer { + t.Helper() + + smr := orca.NewServerMetricsRecorder() + cmr := orca.NewServerMetricsRecorder().(orca.CallMetricsRecorder) + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if r := orca.CallMetricsRecorderFromContext(ctx); r != nil { + // Copy metrics from what the test set in cmr into r. + sm := cmr.(orca.ServerMetricsProvider).ServerMetrics() + r.SetCPUUtilization(sm.CPUUtilization) + r.SetQPS(sm.QPS) + r.SetEPS(sm.EPS) + } + return &testpb.Empty{}, nil + }, + } + + var sopts []grpc.ServerOption + if r == reportCall || r == reportBoth { + sopts = append(sopts, orca.CallMetricsServerOption(nil)) + } + + if r == reportOOB || r == reportBoth { + oso := orca.ServiceOptions{ + ServerMetricsProvider: smr, + MinReportingInterval: 10 * time.Millisecond, + } + internal.ORCAAllowAnyMinReportingInterval.(func(so *orca.ServiceOptions))(&oso) + sopts = append(sopts, stubserver.RegisterServiceServerOption(func(s *grpc.Server) { + if err := orca.Register(s, oso); err != nil { + t.Fatalf("Failed to register orca service: %v", err) + } + })) + } + + if err := ss.StartServer(sopts...); err != nil { + t.Fatalf("Error starting server: %v", err) + } + t.Cleanup(ss.Stop) + + return &testServer{ + StubServer: ss, + oobMetrics: smr, + callMetrics: cmr, + } +} + +func svcConfig(t *testing.T, wrrCfg iwrr.LBConfig) string { + t.Helper() + m, err := json.Marshal(wrrCfg) + if err != nil { + t.Fatalf("Error marshaling JSON %v: %v", wrrCfg, err) + } + sc := fmt.Sprintf(`{"loadBalancingConfig": [ {%q:%v} ] }`, wrr.Name, string(m)) + t.Logf("Marshaled service config: %v", sc) + return sc +} + +// Tests basic functionality with one address. With only one address, load +// reporting doesn't affect routing at all. +func (s) TestBalancer_OneAddress(t *testing.T) { + testCases := []struct { + rt reportType + cfg iwrr.LBConfig + }{ + {rt: reportNone, cfg: perCallConfig}, + {rt: reportCall, cfg: perCallConfig}, + {rt: reportOOB, cfg: oobConfig}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("reportType:%v", tc.rt), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv := startServer(t, tc.rt) + + sc := svcConfig(t, tc.cfg) + if err := srv.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + + // Perform many RPCs to ensure the LB policy works with 1 address. + for i := 0; i < 100; i++ { + srv.callMetrics.SetQPS(float64(i)) + srv.oobMetrics.SetQPS(float64(i)) + if _, err := srv.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("Error from EmptyCall: %v", err) + } + time.Sleep(time.Millisecond) // Delay; test will run 100ms and should perform ~10 weight updates + } + }) + } +} + +// Tests two addresses with ORCA reporting disabled (should fall back to pure +// RR). +func (s) TestBalancer_TwoAddresses_ReportingDisabled(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportNone) + srv2 := startServer(t, reportNone) + + sc := svcConfig(t, perCallConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Perform many RPCs to ensure the LB policy works with 2 addresses. + for i := 0; i < 20; i++ { + roundrobin.CheckRoundRobinRPCs(ctx, srv1.Client, addrs) + } +} + +// Tests two addresses with per-call ORCA reporting enabled. Checks the +// backends are called in the appropriate ratios. +func (s) TestBalancer_TwoAddresses_ReportingEnabledPerCall(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportCall) + srv2 := startServer(t, reportCall) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.callMetrics.SetQPS(10.0) + srv1.callMetrics.SetCPUUtilization(1.0) + + srv2.callMetrics.SetQPS(10.0) + srv2.callMetrics.SetCPUUtilization(.1) + + sc := svcConfig(t, perCallConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) +} + +// Tests two addresses with OOB ORCA reporting enabled. Checks the backends +// are called in the appropriate ratios. +func (s) TestBalancer_TwoAddresses_ReportingEnabledOOB(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportOOB) + srv2 := startServer(t, reportOOB) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetCPUUtilization(1.0) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetCPUUtilization(.1) + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) +} + +// Tests two addresses with OOB ORCA reporting enabled, where the reports +// change over time. Checks the backends are called in the appropriate ratios +// before and after modifying the reports. +func (s) TestBalancer_TwoAddresses_UpdateLoads(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportOOB) + srv2 := startServer(t, reportOOB) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetCPUUtilization(1.0) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetCPUUtilization(.1) + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Update the loads so srv2 is loaded and srv1 is not; ensure RPCs are + // routed disproportionately to srv1. + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetCPUUtilization(.1) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetCPUUtilization(1.0) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod + oobReportingInterval) + checkWeights(ctx, t, srvWeight{srv1, 10}, srvWeight{srv2, 1}) +} + +// Tests two addresses with OOB ORCA reporting enabled, then with switching to +// per-call reporting. Checks the backends are called in the appropriate +// ratios before and after the change. +func (s) TestBalancer_TwoAddresses_OOBThenPerCall(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportBoth) + srv2 := startServer(t, reportBoth) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetCPUUtilization(1.0) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetCPUUtilization(.1) + + // For per-call metrics (not used initially), srv2 reports that it is + // loaded and srv1 reports low load. After confirming OOB works, switch to + // per-call and confirm the new routing weights are applied. + srv1.callMetrics.SetQPS(10.0) + srv1.callMetrics.SetCPUUtilization(.1) + + srv2.callMetrics.SetQPS(10.0) + srv2.callMetrics.SetCPUUtilization(1.0) + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Update to per-call weights. + c := svcConfig(t, perCallConfig) + parsedCfg := srv1.R.CC.ParseServiceConfig(c) + if parsedCfg.Err != nil { + panic(fmt.Sprintf("Error parsing config %q: %v", c, parsedCfg.Err)) + } + srv1.R.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parsedCfg}) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 10}, srvWeight{srv2, 1}) +} + +// Tests two addresses with OOB ORCA reporting enabled and a non-zero error +// penalty applied. +func (s) TestBalancer_TwoAddresses_ErrorPenalty(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportOOB) + srv2 := startServer(t, reportOOB) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). EPS values are set (but ignored + // initially due to ErrorUtilizationPenalty=0). Later EUP will be updated + // to 0.9 which will cause the weights to be equal and RPCs to be routed + // 50/50. + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetCPUUtilization(1.0) + srv1.oobMetrics.SetEPS(0) + // srv1 weight before: 10.0 / 1.0 = 10.0 + // srv1 weight after: 10.0 / 1.0 = 10.0 + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetCPUUtilization(.1) + srv2.oobMetrics.SetEPS(10.0) + // srv2 weight before: 10.0 / 0.1 = 100.0 + // srv2 weight after: 10.0 / 1.0 = 10.0 + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Update to include an error penalty in the weights. + newCfg := oobConfig + newCfg.ErrorUtilizationPenalty = float64p(0.9) + c := svcConfig(t, newCfg) + parsedCfg := srv1.R.CC.ParseServiceConfig(c) + if parsedCfg.Err != nil { + panic(fmt.Sprintf("Error parsing config %q: %v", c, parsedCfg.Err)) + } + srv1.R.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parsedCfg}) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod + oobReportingInterval) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 1}) +} + +// Tests that the blackout period causes backends to use 0 as their weight +// (meaning to use the average weight) until the blackout period elapses. +func (s) TestBalancer_TwoAddresses_BlackoutPeriod(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + var mu sync.Mutex + start := time.Now() + now := start + setNow := func(t time.Time) { + mu.Lock() + defer mu.Unlock() + now = t + } + iwrr.TimeNow = func() time.Time { + mu.Lock() + defer mu.Unlock() + return now + } + t.Cleanup(func() { iwrr.TimeNow = time.Now }) + + testCases := []struct { + blackoutPeriodCfg *time.Duration + blackoutPeriod time.Duration + }{{ + blackoutPeriodCfg: durationp(time.Second), + blackoutPeriod: time.Second, + }, { + blackoutPeriodCfg: nil, + blackoutPeriod: 10 * time.Second, // the default + }} + for _, tc := range testCases { + setNow(start) + srv1 := startServer(t, reportOOB) + srv2 := startServer(t, reportOOB) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetCPUUtilization(1.0) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetCPUUtilization(.1) + + cfg := oobConfig + cfg.BlackoutPeriod = tc.blackoutPeriodCfg + sc := svcConfig(t, cfg) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + // During the blackout period (1s) we should route roughly 50/50. + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 1}) + + // Advance time to right before the blackout period ends and the weights + // should still be zero. + setNow(start.Add(tc.blackoutPeriod - time.Nanosecond)) + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 1}) + + // Advance time to right after the blackout period ends and the weights + // should now activate. + setNow(start.Add(tc.blackoutPeriod)) + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + } +} + +// Tests that the weight expiration period causes backends to use 0 as their +// weight (meaning to use the average weight) once the expiration period +// elapses. +func (s) TestBalancer_TwoAddresses_WeightExpiration(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + var mu sync.Mutex + start := time.Now() + now := start + setNow := func(t time.Time) { + mu.Lock() + defer mu.Unlock() + now = t + } + iwrr.TimeNow = func() time.Time { + mu.Lock() + defer mu.Unlock() + return now + } + t.Cleanup(func() { iwrr.TimeNow = time.Now }) + + srv1 := startServer(t, reportBoth) + srv2 := startServer(t, reportBoth) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). Because the OOB reporting interval + // is 1 minute but the weights expire in 1 second, routing will go to 50/50 + // after the weights expire. + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetCPUUtilization(1.0) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetCPUUtilization(.1) + + cfg := oobConfig + cfg.OOBReportingPeriod = durationp(time.Minute) + sc := svcConfig(t, cfg) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Advance what time.Now returns to the weight expiration time minus 1s to + // ensure all weights are still honored. + setNow(start.Add(*cfg.WeightExpirationPeriod - time.Second)) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Advance what time.Now returns to the weight expiration time plus 1s to + // ensure all weights expired and addresses are routed evenly. + setNow(start.Add(*cfg.WeightExpirationPeriod + time.Second)) + + // Wait for the weight expiration period so the weights have expired. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 1}) +} + +// Tests logic surrounding subchannel management. +func (s) TestBalancer_AddressesChanging(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportBoth) + srv2 := startServer(t, reportBoth) + srv3 := startServer(t, reportBoth) + srv4 := startServer(t, reportBoth) + + // srv1: weight 10 + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetCPUUtilization(1.0) + // srv2: weight 100 + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetCPUUtilization(.1) + // srv3: weight 20 + srv3.oobMetrics.SetQPS(20.0) + srv3.oobMetrics.SetCPUUtilization(1.0) + // srv4: weight 200 + srv4.oobMetrics.SetQPS(20.0) + srv4.oobMetrics.SetCPUUtilization(.1) + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + srv2.Client = srv1.Client + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}, {Addr: srv3.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 3) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}, srvWeight{srv3, 2}) + + // Add backend 4 + addrs = append(addrs, resolver.Address{Addr: srv4.Address}) + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}, srvWeight{srv3, 2}, srvWeight{srv4, 20}) + + // Shutdown backend 3. RPCs will no longer be routed to it. + srv3.Stop() + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}, srvWeight{srv4, 20}) + + // Remove addresses 2 and 3. RPCs will no longer be routed to 2 either. + addrs = []resolver.Address{{Addr: srv1.Address}, {Addr: srv4.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv4, 20}) + + // Re-add 2 and remove the rest. + addrs = []resolver.Address{{Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv2, 10}) + + // Re-add 4. + addrs = append(addrs, resolver.Address{Addr: srv4.Address}) + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv2, 10}, srvWeight{srv4, 20}) +} + +func ensureReached(ctx context.Context, t *testing.T, c testgrpc.TestServiceClient, n int) { + t.Helper() + reached := make(map[string]struct{}) + for len(reached) != n { + var peer peer.Peer + if _, err := c.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + t.Fatalf("Error from EmptyCall: %v", err) + } + reached[peer.Addr.String()] = struct{}{} + } +} + +type srvWeight struct { + srv *testServer + w int +} + +const rrIterations = 100 + +// checkWeights does rrIterations RPCs and expects the different backends to be +// routed in a ratio as deterimined by the srvWeights passed in. Allows for +// some variance (+/- 2 RPCs per backend). +func checkWeights(ctx context.Context, t *testing.T, sws ...srvWeight) { + t.Helper() + + c := sws[0].srv.Client + + // Replace the weights with approximate counts of RPCs wanted given the + // iterations performed. + weightSum := 0 + for _, sw := range sws { + weightSum += sw.w + } + for i := range sws { + sws[i].w = rrIterations * sws[i].w / weightSum + } + + for attempts := 0; attempts < 10; attempts++ { + serverCounts := make(map[string]int) + for i := 0; i < rrIterations; i++ { + var peer peer.Peer + if _, err := c.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + t.Fatalf("Error from EmptyCall: %v; timed out waiting for weighted RR behavior?", err) + } + serverCounts[peer.Addr.String()]++ + } + if len(serverCounts) != len(sws) { + continue + } + success := true + for _, sw := range sws { + c := serverCounts[sw.srv.Address] + if c < sw.w-2 || c > sw.w+2 { + success = false + break + } + } + if success { + t.Logf("Passed iteration %v; counts: %v", attempts, serverCounts) + return + } + t.Logf("Failed iteration %v; counts: %v; want %+v", attempts, serverCounts, sws) + time.Sleep(5 * time.Millisecond) + } + t.Fatalf("Failed to route RPCs with proper ratio") +} diff --git a/balancer/weightedroundrobin/config.go b/balancer/weightedroundrobin/config.go new file mode 100644 index 000000000000..caad18faa11d --- /dev/null +++ b/balancer/weightedroundrobin/config.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "time" + + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // Whether to enable out-of-band utilization reporting collection from the + // endpoints. By default, per-request utilization reporting is used. + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + + // Load reporting interval to request from the server. Note that the + // server may not provide reports as frequently as the client requests. + // Used only when enable_oob_load_report is true. Default is 10 seconds. + OOBReportingPeriod time.Duration `json:"oobReportingPeriod,omitempty"` + + // A given endpoint must report load metrics continuously for at least this + // long before the endpoint weight will be used. This avoids churn when + // the set of endpoint addresses changes. Takes effect both immediately + // after we establish a connection to an endpoint and after + // weight_expiration_period has caused us to stop using the most recent + // load metrics. Default is 10 seconds. + BlackoutPeriod time.Duration `json:"blackoutPeriod,omitempty"` + + // If a given endpoint has not reported load metrics in this long, + // then we stop using the reported weight. This ensures that we do + // not continue to use very stale weights. Once we stop using a stale + // value, if we later start seeing fresh reports again, the + // blackout_period applies. Defaults to 3 minutes. + WeightExpirationPeriod time.Duration `json:"weightExpirationPeriod,omitempty"` + + // How often endpoint weights are recalculated. Default is 1 second. + WeightUpdatePeriod time.Duration `json:"weightUpdatePeriod,omitempty"` + + // The multiplier used to adjust endpoint weights with the error rate + // calculated as eps/qps. Default is 1.0. + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} diff --git a/balancer/weightedroundrobin/internal/internal.go b/balancer/weightedroundrobin/internal/internal.go new file mode 100644 index 000000000000..d39830261b21 --- /dev/null +++ b/balancer/weightedroundrobin/internal/internal.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal allows for easier testing of the weightedroundrobin +// package. +package internal + +import ( + "time" +) + +// AllowAnyWeightUpdatePeriod permits any setting of WeightUpdatePeriod for +// testing. Normally a minimum of 100ms is applied. +var AllowAnyWeightUpdatePeriod bool + +// LBConfig allows tests to produce a JSON form of the config from the struct +// instead of using a string. +type LBConfig struct { + EnableOOBLoadReport *bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod *time.Duration `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod *time.Duration `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod *time.Duration `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod *time.Duration `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty *float64 `json:"errorUtilizationPenalty,omitempty"` +} + +// TimeNow can be overridden by tests to return a different value for the +// current time. +var TimeNow = time.Now diff --git a/balancer/weightedroundrobin/logging.go b/balancer/weightedroundrobin/logging.go new file mode 100644 index 000000000000..43184ca9ab91 --- /dev/null +++ b/balancer/weightedroundrobin/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[%p] " + +var logger = grpclog.Component("weighted-round-robin") + +func prefixLogger(p *wrrBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/balancer/weightedroundrobin/scheduler.go b/balancer/weightedroundrobin/scheduler.go new file mode 100644 index 000000000000..e19428112e1e --- /dev/null +++ b/balancer/weightedroundrobin/scheduler.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "math" +) + +type scheduler interface { + nextIndex() int +} + +// newScheduler uses scWeights to create a new scheduler for selecting subconns +// in a picker. It will return a round robin implementation if at least +// len(scWeights)-1 are zero or there is only a single subconn, otherwise it +// will return an Earliest Deadline First (EDF) scheduler implementation that +// selects the subchannels according to their weights. +func newScheduler(scWeights []float64, inc func() uint32) scheduler { + n := len(scWeights) + if n == 0 { + return nil + } + if n == 1 { + return &rrScheduler{numSCs: 1, inc: inc} + } + sum := float64(0) + numZero := 0 + max := float64(0) + for _, w := range scWeights { + sum += w + if w > max { + max = w + } + if w == 0 { + numZero++ + } + } + if numZero >= n-1 { + return &rrScheduler{numSCs: uint32(n), inc: inc} + } + unscaledMean := sum / float64(n-numZero) + scalingFactor := maxWeight / max + mean := uint16(math.Round(scalingFactor * unscaledMean)) + + weights := make([]uint16, n) + allEqual := true + for i, w := range scWeights { + if w == 0 { + // Backends with weight = 0 use the mean. + weights[i] = mean + } else { + scaledWeight := uint16(math.Round(scalingFactor * w)) + weights[i] = scaledWeight + if scaledWeight != mean { + allEqual = false + } + } + } + + if allEqual { + return &rrScheduler{numSCs: uint32(n), inc: inc} + } + + logger.Infof("using edf scheduler with weights: %v", weights) + return &edfScheduler{weights: weights, inc: inc} +} + +const maxWeight = math.MaxUint16 + +// edfScheduler implements EDF using the same algorithm as grpc-c++ here: +// +// https://github.com/grpc/grpc/blob/master/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc +type edfScheduler struct { + inc func() uint32 + weights []uint16 +} + +// Returns the index in s.weights for the picker to choose. +func (s *edfScheduler) nextIndex() int { + const offset = maxWeight / 2 + + for { + idx := uint64(s.inc()) + + // The sequence number (idx) is split in two: the lower %n gives the + // index of the backend, and the rest gives the number of times we've + // iterated through all backends. `generation` is used to + // deterministically decide whether we pick or skip the backend on this + // iteration, in proportion to the backend's weight. + + backendIndex := idx % uint64(len(s.weights)) + generation := idx / uint64(len(s.weights)) + weight := uint64(s.weights[backendIndex]) + + // We pick a backend `weight` times per `maxWeight` generations. The + // multiply and modulus ~evenly spread out the picks for a given + // backend between different generations. The offset by `backendIndex` + // helps to reduce the chance of multiple consecutive non-picks: if we + // have two consecutive backends with an equal, say, 80% weight of the + // max, with no offset we would see 1/5 generations that skipped both. + // TODO(b/190488683): add test for offset efficacy. + mod := uint64(weight*generation+backendIndex*offset) % maxWeight + + if mod < maxWeight-weight { + continue + } + return int(backendIndex) + } +} + +// A simple RR scheduler to use for fallback when fewer than two backends have +// non-zero weights, or all backends have the the same weight, or when only one +// subconn exists. +type rrScheduler struct { + inc func() uint32 + numSCs uint32 +} + +func (s *rrScheduler) nextIndex() int { + idx := s.inc() + return int(idx % s.numSCs) +} diff --git a/balancer/weightedroundrobin/weightedroundrobin.go b/balancer/weightedroundrobin/weightedroundrobin.go index 6fc4d1910e67..bb029f07c36a 100644 --- a/balancer/weightedroundrobin/weightedroundrobin.go +++ b/balancer/weightedroundrobin/weightedroundrobin.go @@ -16,16 +16,21 @@ * */ -// Package weightedroundrobin defines a weighted roundrobin balancer. +// Package weightedroundrobin provides an implementation of the weighted round +// robin LB policy, as defined in [gRFC A58]. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +// +// [gRFC A58]: https://github.com/grpc/proposal/blob/master/A58-client-side-weighted-round-robin-lb-policy.md package weightedroundrobin import ( "google.golang.org/grpc/resolver" ) -// Name is the name of weighted_round_robin balancer. -const Name = "weighted_round_robin" - // attributeKey is the type used as the key to store AddrInfo in the // BalancerAttributes field of resolver.Address. type attributeKey struct{} @@ -44,11 +49,6 @@ func (a AddrInfo) Equal(o interface{}) bool { // SetAddrInfo returns a copy of addr in which the BalancerAttributes field is // updated with addrInfo. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) return addr @@ -56,11 +56,6 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { // GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of // addr. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func GetAddrInfo(addr resolver.Address) AddrInfo { v := addr.BalancerAttributes.Value(attributeKey{}) ai, _ := v.(AddrInfo) diff --git a/internal/grpcrand/grpcrand.go b/internal/grpcrand/grpcrand.go index 517ea70642a1..0b092cfbe15d 100644 --- a/internal/grpcrand/grpcrand.go +++ b/internal/grpcrand/grpcrand.go @@ -72,3 +72,10 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} diff --git a/orca/producer.go b/orca/producer.go index 3b7ed8b67d8a..ce108aad65ca 100644 --- a/orca/producer.go +++ b/orca/producer.go @@ -199,12 +199,13 @@ func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Du // Unimplemented; do not retry. logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") return - case status.Code(err) == codes.Unavailable: - // TODO: this code should ideally log an error, too, but for now we - // receive this code when shutting down the ClientConn. Once we - // can determine the state or ensure the producer is stopped before - // the stream ends, we can log an error when it's not a natural - // shutdown. + case status.Code(err) == codes.Unavailable, status.Code(err) == codes.Canceled: + // TODO: these codes should ideally log an error, too, but for now + // we receive them when shutting down the ClientConn (Unavailable + // if the stream hasn't started yet, and Canceled if it happens + // mid-stream). Once we can determine the state or ensure the + // producer is stopped before the stream ends, we can log an error + // when it's not a natural shutdown. default: // Log all other errors. logger.Error("Received unexpected stream error:", err) diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index 360fc44c9e4d..3f354424f28e 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -160,7 +160,7 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { d.loadStore.CallFinished(lIDStr, info.Err) load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport) - if !ok { + if !ok || load == nil { return } d.loadStore.CallServerLoad(lIDStr, serverLoadCPUName, load.CpuUtilization) From 5e587344eef8aaa06ebf76ee1997013b3a8fbed0 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 8 May 2023 21:29:36 -0400 Subject: [PATCH 913/998] xds: Add support for Custom LB Policies (#6224) --- attributes/attributes.go | 29 ++ .../weightedroundrobin/weightedroundrobin.go | 6 + .../weightedaggregator/aggregator.go | 8 + balancer/weightedtarget/weightedtarget.go | 12 + .../weightedtarget/weightedtarget_test.go | 21 +- internal/testutils/xds/e2e/clientresources.go | 63 ++- resolver/resolver.go | 16 +- test/xds/xds_client_custom_lb_test.go | 231 ++++++++++ .../balancer/cdsbalancer/cdsbalancer.go | 24 +- .../cdsbalancer/cdsbalancer_security_test.go | 32 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 115 +++-- .../balancer/cdsbalancer/cluster_handler.go | 12 +- .../cdsbalancer/cluster_handler_test.go | 14 - .../clusterimpl/tests/balancer_test.go | 11 +- .../clusterresolver/clusterresolver_test.go | 14 - .../balancer/clusterresolver/config_test.go | 4 - .../balancer/clusterresolver/configbuilder.go | 147 +------ .../clusterresolver/configbuilder_test.go | 399 +----------------- .../clusterresolver/e2e_test/eds_impl_test.go | 17 - .../balancer/clusterresolver/priority_test.go | 18 +- xds/internal/balancer/wrrlocality/balancer.go | 145 ++++++- .../balancer/wrrlocality/balancer_test.go | 131 ++++++ xds/internal/balancer/wrrlocality/logging.go | 34 ++ .../xdsclient/tests/cds_watchers_test.go | 2 +- .../xdsclient/tests/eds_watchers_test.go | 100 +++-- .../tests/federation_watchers_test.go | 6 +- .../xdsclient/tests/resource_update_test.go | 2 +- .../xdsresource/tests/unmarshal_cds_test.go | 5 +- .../xdsclient/xdsresource/type_cds.go | 25 +- .../xdsclient/xdsresource/unmarshal_cds.go | 18 +- .../xdsresource/unmarshal_cds_test.go | 8 +- .../xdsclient/xdsresource/unmarshal_eds.go | 11 + 32 files changed, 955 insertions(+), 725 deletions(-) create mode 100644 test/xds/xds_client_custom_lb_test.go create mode 100644 xds/internal/balancer/wrrlocality/logging.go diff --git a/attributes/attributes.go b/attributes/attributes.go index 02f5dc531891..3efca4591493 100644 --- a/attributes/attributes.go +++ b/attributes/attributes.go @@ -25,6 +25,11 @@ // later release. package attributes +import ( + "fmt" + "strings" +) + // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an @@ -99,3 +104,27 @@ func (a *Attributes) Equal(o *Attributes) bool { } return true } + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + var key, val string + if str, ok := k.(interface{ String() string }); ok { + key = str.String() + } + if str, ok := v.(interface{ String() string }); ok { + val = str.String() + } + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + first = false + } + sb.WriteString("}") + return sb.String() +} diff --git a/balancer/weightedroundrobin/weightedroundrobin.go b/balancer/weightedroundrobin/weightedroundrobin.go index bb029f07c36a..7567462e023d 100644 --- a/balancer/weightedroundrobin/weightedroundrobin.go +++ b/balancer/weightedroundrobin/weightedroundrobin.go @@ -28,6 +28,8 @@ package weightedroundrobin import ( + "fmt" + "google.golang.org/grpc/resolver" ) @@ -61,3 +63,7 @@ func GetAddrInfo(addr resolver.Address) AddrInfo { ai, _ := v.(AddrInfo) return ai } + +func (a AddrInfo) String() string { + return fmt.Sprintf("Weight: %d", a.Weight) +} diff --git a/balancer/weightedtarget/weightedaggregator/aggregator.go b/balancer/weightedtarget/weightedaggregator/aggregator.go index 37fc41c16885..27279257ed13 100644 --- a/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -178,6 +178,14 @@ func (wbsa *Aggregator) ResumeStateUpdates() { } } +// NeedUpdateStateOnResume sets the UpdateStateOnResume bool to true, letting a +// picker update be sent once ResumeStateUpdates is called. +func (wbsa *Aggregator) NeedUpdateStateOnResume() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.needUpdateStateOnResume = true +} + // UpdateState is called to report a balancer state change from sub-balancer. // It's usually called by the balancer group. // diff --git a/balancer/weightedtarget/weightedtarget.go b/balancer/weightedtarget/weightedtarget.go index 83bb7d701f19..3d5acdab6afe 100644 --- a/balancer/weightedtarget/weightedtarget.go +++ b/balancer/weightedtarget/weightedtarget.go @@ -143,6 +143,18 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat b.targets = newConfig.Targets + // If the targets length is zero, it means we have removed all child + // policies from the balancer group and aggregator. + // At the start of this UpdateClientConnState() operation, a call to + // b.stateAggregator.ResumeStateUpdates() is deferred. Thus, setting the + // needUpdateStateOnResume bool to true here will ensure a new picker is + // built as part of that deferred function. Since there are now no child + // policies, the aggregated connectivity state reported form the Aggregator + // will be TRANSIENT_FAILURE. + if len(b.targets) == 0 { + b.stateAggregator.NeedUpdateStateOnResume() + } + return nil } diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go index a20cb0dc1ce4..5658f302a49b 100644 --- a/balancer/weightedtarget/weightedtarget_test.go +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -166,7 +166,8 @@ func init() { // TestWeightedTarget covers the cases that a sub-balancer is added and a // sub-balancer is removed. It verifies that the addresses and balancer configs // are forwarded to the right sub-balancer. This test is intended to test the -// glue code in weighted_target. +// glue code in weighted_target. It also tests an empty target config update, +// which should trigger a transient failure state update. func (s) TestWeightedTarget(t *testing.T) { cc := testutils.NewTestClientConn(t) wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) @@ -306,6 +307,24 @@ func (s) TestWeightedTarget(t *testing.T) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc3) } } + // Update the Weighted Target Balancer with an empty address list and no + // targets. This should cause a Transient Failure State update to the Client + // Conn. + emptyConfig, err := wtbParser.ParseConfig([]byte(`{}`)) + if err != nil { + t.Fatalf("Failed to parse balancer config: %v", err) + } + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{}, + BalancerConfig: emptyConfig, + }); err != nil { + t.Fatalf("Failed to update ClientConn state: %v", err) + } + + state := <-cc.NewStateCh + if state != connectivity.TransientFailure { + t.Fatalf("Empty target update should have triggered a TF state update, got: %v", state) + } } // TestWeightedTarget_OneSubBalancer_AddRemoveBackend tests the case where we diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go index b38d27b24963..ff2a5d43398a 100644 --- a/internal/testutils/xds/e2e/clientresources.go +++ b/internal/testutils/xds/e2e/clientresources.go @@ -524,6 +524,14 @@ func ClusterResourceWithOptions(opts ClusterOptions) *v3clusterpb.Cluster { return cluster } +// LocalityOptions contains options to configure a Locality. +type LocalityOptions struct { + // Ports is a set of ports on "localhost" belonging to this locality. + Ports []uint32 + // Weight is the weight of the locality, used for load balancing. + Weight uint32 +} + // EndpointOptions contains options to configure an Endpoint (or // ClusterLoadAssignment) resource. type EndpointOptions struct { @@ -533,9 +541,8 @@ type EndpointOptions struct { // Host is the hostname of the endpoints. In our e2e tests, hostname must // always be "localhost". Host string - // Ports is a set of ports on "localhost" where the endpoints corresponding - // to this resource reside. - Ports []uint32 + // Localities is a set of localities belonging to this resource. + Localities []LocalityOptions // DropPercents is a map from drop category to a drop percentage. If unset, // no drops are configured. DropPercents map[string]int @@ -546,34 +553,50 @@ func DefaultEndpoint(clusterName string, host string, ports []uint32) *v3endpoin return EndpointResourceWithOptions(EndpointOptions{ ClusterName: clusterName, Host: host, - Ports: ports, + Localities: []LocalityOptions{ + { + Ports: ports, + Weight: 1, + }, + }, }) } // EndpointResourceWithOptions returns an xds Endpoint resource configured with // the provided options. func EndpointResourceWithOptions(opts EndpointOptions) *v3endpointpb.ClusterLoadAssignment { - var lbEndpoints []*v3endpointpb.LbEndpoint - for _, port := range opts.Ports { - lbEndpoints = append(lbEndpoints, &v3endpointpb.LbEndpoint{ - HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{Endpoint: &v3endpointpb.Endpoint{ - Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Protocol: v3corepb.SocketAddress_TCP, - Address: opts.Host, - PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: port}}, + var endpoints []*v3endpointpb.LocalityLbEndpoints + for i, locality := range opts.Localities { + var lbEndpoints []*v3endpointpb.LbEndpoint + for _, port := range locality.Ports { + lbEndpoints = append(lbEndpoints, &v3endpointpb.LbEndpoint{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: opts.Host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: port}}, + }}, }}, - }}, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, + }) + } + + endpoints = append(endpoints, &v3endpointpb.LocalityLbEndpoints{ + Locality: &v3corepb.Locality{ + Region: fmt.Sprintf("region-%d", i+1), + Zone: fmt.Sprintf("zone-%d", i+1), + SubZone: fmt.Sprintf("subzone-%d", i+1), + }, + LbEndpoints: lbEndpoints, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: locality.Weight}, + Priority: 0, }) } + cla := &v3endpointpb.ClusterLoadAssignment{ ClusterName: opts.ClusterName, - Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ - Locality: &v3corepb.Locality{SubZone: "subzone"}, - LbEndpoints: lbEndpoints, - LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, - Priority: 0, - }}, + Endpoints: endpoints, } var drops []*v3endpointpb.ClusterLoadAssignment_Policy_DropOverload diff --git a/resolver/resolver.go b/resolver/resolver.go index 6215e5ef2b02..353c10b69a5b 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -22,13 +22,13 @@ package resolver import ( "context" + "fmt" "net" "net/url" "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -124,7 +124,7 @@ type Address struct { Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attribes do not affect SubConn + // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. BalancerAttributes *attributes.Attributes @@ -151,7 +151,17 @@ func (a Address) Equal(o Address) bool { // String returns JSON formatted string representation of the address. func (a Address) String() string { - return pretty.ToJSON(a) + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() } // BuildOptions includes additional information for the builder to create diff --git a/test/xds/xds_client_custom_lb_test.go b/test/xds/xds_client_custom_lb_test.go new file mode 100644 index 000000000000..91ec874c64a7 --- /dev/null +++ b/test/xds/xds_client_custom_lb_test.go @@ -0,0 +1,231 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + "github.com/golang/protobuf/proto" + structpb "github.com/golang/protobuf/ptypes/struct" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/roundrobin" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" +) + +// wrrLocality is a helper that takes a proto message and returns a +// WrrLocalityProto with the proto message marshaled into a proto.Any as a +// child. +func wrrLocality(m proto.Message) *v3wrrlocalitypb.WrrLocality { + return &v3wrrlocalitypb.WrrLocality{ + EndpointPickingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(m), + }, + }, + }, + }, + } +} + +// clusterWithLBConfiguration returns a cluster resource with the proto message +// passed Marshaled to an any and specified through the load_balancing_policy +// field. +func clusterWithLBConfiguration(clusterName, edsServiceName string, secLevel e2e.SecurityLevel, m proto.Message) *v3clusterpb.Cluster { + cluster := e2e.DefaultCluster(clusterName, edsServiceName, secLevel) + cluster.LoadBalancingPolicy = &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(m), + }, + }, + }, + } + return cluster +} + +// TestWRRLocality tests RPC distribution across a scenario with 5 backends, +// with 2 backends in a locality with weight 1, and 3 backends in a second +// locality with weight 2. Through xDS, the test configures a +// wrr_locality_balancer with either a round robin or custom (specifying pick +// first) child load balancing policy, and asserts the correct distribution +// based on the locality weights and the endpoint picking policy specified. +func (s) TestWrrLocality(t *testing.T) { + oldCustomLBSupport := envconfig.XDSCustomLBPolicy + envconfig.XDSCustomLBPolicy = true + defer func() { + envconfig.XDSCustomLBPolicy = oldCustomLBSupport + }() + + backend1 := stubserver.StartTestService(t, nil) + port1 := testutils.ParsePort(t, backend1.Address) + defer backend1.Stop() + backend2 := stubserver.StartTestService(t, nil) + port2 := testutils.ParsePort(t, backend2.Address) + defer backend2.Stop() + backend3 := stubserver.StartTestService(t, nil) + port3 := testutils.ParsePort(t, backend3.Address) + defer backend3.Stop() + backend4 := stubserver.StartTestService(t, nil) + port4 := testutils.ParsePort(t, backend4.Address) + defer backend4.Stop() + backend5 := stubserver.StartTestService(t, nil) + port5 := testutils.ParsePort(t, backend5.Address) + defer backend5.Stop() + const serviceName = "my-service-client-side-xds" + + tests := []struct { + name string + // Configuration will be specified through load_balancing_policy field. + wrrLocalityConfiguration *v3wrrlocalitypb.WrrLocality + addressDistributionWant []resolver.Address + }{ + { + name: "rr_child", + wrrLocalityConfiguration: wrrLocality(&v3roundrobinpb.RoundRobin{}), + // Each addresses expected probability is locality weight of + // locality / total locality weights multiplied by 1 / number of + // endpoints in each locality (due to round robin across endpoints + // in a locality). Thus, address 1 and address 2 have 1/3 * 1/2 + // probability, and addresses 3 4 5 have 2/3 * 1/3 probability of + // being routed to. + addressDistributionWant: []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend1.Address}, + {Addr: backend1.Address}, + {Addr: backend1.Address}, + {Addr: backend1.Address}, + {Addr: backend1.Address}, + {Addr: backend2.Address}, + {Addr: backend2.Address}, + {Addr: backend2.Address}, + {Addr: backend2.Address}, + {Addr: backend2.Address}, + {Addr: backend2.Address}, + {Addr: backend3.Address}, + {Addr: backend3.Address}, + {Addr: backend3.Address}, + {Addr: backend3.Address}, + {Addr: backend3.Address}, + {Addr: backend3.Address}, + {Addr: backend3.Address}, + {Addr: backend3.Address}, + {Addr: backend4.Address}, + {Addr: backend4.Address}, + {Addr: backend4.Address}, + {Addr: backend4.Address}, + {Addr: backend4.Address}, + {Addr: backend4.Address}, + {Addr: backend4.Address}, + {Addr: backend4.Address}, + {Addr: backend5.Address}, + {Addr: backend5.Address}, + {Addr: backend5.Address}, + {Addr: backend5.Address}, + {Addr: backend5.Address}, + {Addr: backend5.Address}, + {Addr: backend5.Address}, + {Addr: backend5.Address}, + }, + }, + // This configures custom lb as the child of wrr_locality, which points + // to our pick_first implementation. Thus, the expected distribution of + // addresses is locality weight of locality / total locality weights as + // the probability of picking the first backend within the locality + // (e.g. Address 1 for locality 1, and Address 3 for locality 2). + { + name: "custom_lb_child_pick_first", + wrrLocalityConfiguration: wrrLocality(&v3xdsxdstypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/pick_first", + Value: &structpb.Struct{}, + }), + addressDistributionWant: []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend3.Address}, + {Addr: backend3.Address}, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + managementServer, nodeID, _, r, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + routeConfigName := "route-" + serviceName + clusterName := "cluster-" + serviceName + endpointsName := "endpoints-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(routeConfigName, serviceName, clusterName)}, + Clusters: []*v3clusterpb.Cluster{clusterWithLBConfiguration(clusterName, endpointsName, e2e.SecurityLevelNone, test.wrrLocalityConfiguration)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ + ClusterName: endpointsName, + Host: "localhost", + Localities: []e2e.LocalityOptions{ + { + Ports: []uint32{port1, port2}, + Weight: 1, + }, + { + Ports: []uint32{port3, port4, port5}, + Weight: 2, + }, + }, + })}, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if err := roundrobin.CheckWeightedRoundRobinRPCs(ctx, client, test.addressDistributionWant); err != nil { + t.Fatalf("Error in expected round robin: %v", err) + } + }) + } +} diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 1e3fb4d1286c..91d4a6aa8661 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -38,7 +38,6 @@ import ( "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" "google.golang.org/grpc/xds/internal/balancer/outlierdetection" - "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -394,23 +393,22 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { dms[i].OutlierDetection = outlierDetectionToConfig(cu.OutlierDetection) } } + lbCfg := &clusterresolver.LBConfig{ DiscoveryMechanisms: dms, } - // lbPolicy is set only when the policy is ringhash. The default (when it's - // not set) is roundrobin. And similarly, we only need to set XDSLBPolicy - // for ringhash (it also defaults to roundrobin). - if lbp := update.lbPolicy; lbp != nil { - lbCfg.XDSLBPolicy = &internalserviceconfig.BalancerConfig{ - Name: ringhash.Name, - Config: &ringhash.LBConfig{ - MinRingSize: lbp.MinimumRingSize, - MaxRingSize: lbp.MaximumRingSize, - }, - } + bc := &internalserviceconfig.BalancerConfig{} + if err := json.Unmarshal(update.lbPolicy, bc); err != nil { + // This will never occur, valid configuration is emitted from the xDS + // Client. Validity is already checked in the xDS Client, however, this + // double validation is present because Unmarshalling and Validating are + // coupled into one json.Unmarshal operation). We will switch this in + // the future to two separate operations. + b.logger.Errorf("Emitted lbPolicy %s from xDS Client is invalid: %v", update.lbPolicy, err) + return } - + lbCfg.XDSLBPolicy = bc ccState := balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), BalancerConfig: lbCfg, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 8d7face5e0a3..eb687aa70f76 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -63,6 +63,7 @@ var ( IdentityInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, }, + LBPolicy: wrrLocalityLBConfigJSON, } cdsUpdateWithMissingSecurityCfg = xdsresource.ClusterUpdate{ ClusterName: serviceName, @@ -248,8 +249,11 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -304,8 +308,11 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. - cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -461,7 +468,7 @@ func (s) TestSecurityConfigUpdate_BadToGood(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -495,7 +502,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -548,7 +555,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -564,7 +571,10 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // an update which contains bad security config. So, we expect the CDS // balancer to forward this error to the EDS balancer and eventually the // channel needs to be put in a bad state. - cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -598,7 +608,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -675,8 +685,9 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { RootInstanceName: "default1", SubjectAltNameMatchers: testSANMatchers, }, + LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -700,6 +711,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { RootInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, }, + LBPolicy: wrrLocalityLBConfigJSON, } if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 27b2f15b4652..d69465a96274 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/clusterresolver" "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" @@ -60,6 +61,20 @@ var ( noopODLBCfg = outlierdetection.LBConfig{ Interval: 1<<63 - 1, } + wrrLocalityLBConfig = &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + } + wrrLocalityLBConfigJSON, _ = json.Marshal(wrrLocalityLBConfig) + ringHashLBConfig = &internalserviceconfig.BalancerConfig{ + Name: ringhash.Name, + Config: &ringhash.LBConfig{MinRingSize: 10, MaxRingSize: 100}, + } + ringHashLBConfigJSON, _ = json.Marshal(ringHashLBConfig) ) type s struct { @@ -381,20 +396,27 @@ func (s) TestHandleClusterUpdate(t *testing.T) { wantCCS balancer.ClientConnState }{ { - name: "happy-case-with-lrs", - cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf}, - wantCCS: edsCCS(serviceName, nil, true, nil, noopODLBCfg), + name: "happy-case-with-lrs", + cdsUpdate: xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LRSServerConfig: xdsresource.ClusterLRSServerSelf, + LBPolicy: wrrLocalityLBConfigJSON, + }, + wantCCS: edsCCS(serviceName, nil, true, wrrLocalityLBConfig, noopODLBCfg), }, { - name: "happy-case-without-lrs", - cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName}, - wantCCS: edsCCS(serviceName, nil, false, nil, noopODLBCfg), + name: "happy-case-without-lrs", + cdsUpdate: xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + }, + wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg), }, { name: "happy-case-with-ring-hash-lb-policy", cdsUpdate: xdsresource.ClusterUpdate{ ClusterName: serviceName, - LBPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, + LBPolicy: ringHashLBConfigJSON, }, wantCCS: edsCCS(serviceName, nil, false, &internalserviceconfig.BalancerConfig{ Name: ringhash.Name, @@ -403,21 +425,25 @@ func (s) TestHandleClusterUpdate(t *testing.T) { }, { name: "happy-case-outlier-detection", - cdsUpdate: xdsresource.ClusterUpdate{ClusterName: serviceName, OutlierDetection: &xdsresource.OutlierDetection{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, - MaxEjectionPercent: 10, - SuccessRateStdevFactor: 1900, - EnforcingSuccessRate: 100, - SuccessRateMinimumHosts: 5, - SuccessRateRequestVolume: 100, - FailurePercentageThreshold: 85, - EnforcingFailurePercentage: 5, - FailurePercentageMinimumHosts: 5, - FailurePercentageRequestVolume: 50, - }}, - wantCCS: edsCCS(serviceName, nil, false, nil, outlierdetection.LBConfig{ + cdsUpdate: xdsresource.ClusterUpdate{ + ClusterName: serviceName, + OutlierDetection: &xdsresource.OutlierDetection{ + Interval: 10 * time.Second, + BaseEjectionTime: 30 * time.Second, + MaxEjectionTime: 300 * time.Second, + MaxEjectionPercent: 10, + SuccessRateStdevFactor: 1900, + EnforcingSuccessRate: 100, + SuccessRateMinimumHosts: 5, + SuccessRateRequestVolume: 100, + FailurePercentageThreshold: 85, + EnforcingFailurePercentage: 5, + FailurePercentageMinimumHosts: 5, + FailurePercentageRequestVolume: 50, + }, + LBPolicy: wrrLocalityLBConfigJSON, + }, + wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfig, outlierdetection.LBConfig{ Interval: 10 * time.Second, BaseEjectionTime: 30 * time.Second, MaxEjectionTime: 300 * time.Second, @@ -501,8 +527,11 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -586,8 +615,11 @@ func (s) TestResolverError(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -635,8 +667,11 @@ func (s) TestUpdateSubConnState(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -665,13 +700,16 @@ func (s) TestCircuitBreaking(t *testing.T) { cancel() cdsB.Close() }() - // Here we invoke the watch callback registered on the fake xdsClient. This // will trigger the watch handler on the CDS balancer, which will update // the service's counter with the new max requests. var maxRequests uint32 = 1 - cdsUpdate := xdsresource.ClusterUpdate{ClusterName: clusterName, MaxRequests: &maxRequests} - wantCCS := edsCCS(clusterName, &maxRequests, false, nil, noopODLBCfg) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: clusterName, + MaxRequests: &maxRequests, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(clusterName, &maxRequests, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -699,14 +737,16 @@ func (s) TestClose(t *testing.T) { // provided xdsClient. xdsC, cdsB, edsB, _, cancel := setupWithWatch(t) defer cancel() - // Here we invoke the watch callback registered on the fake xdsClient. This // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -776,8 +816,11 @@ func (s) TestExitIdle(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - cdsUpdate := xdsresource.ClusterUpdate{ClusterName: serviceName} - wantCCS := edsCCS(serviceName, nil, false, nil, noopODLBCfg) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go index 234511a45dcf..aa2d9674a790 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -17,6 +17,7 @@ package cdsbalancer import ( + "encoding/json" "errors" "sync" @@ -38,13 +39,9 @@ var ( type clusterHandlerUpdate struct { // securityCfg is the Security Config from the top (root) cluster. securityCfg *xdsresource.SecurityConfig - // lbPolicy is the lb policy from the top (root) cluster. - // - // Currently, we only support roundrobin or ringhash, and since roundrobin - // does need configs, this is only set to the ringhash config, if the policy - // is ringhash. In the future, if we support more policies, we can make this - // an interface, and set it to config of the other policies. - lbPolicy *xdsresource.ClusterLBPolicyRingHash + + // lbPolicy is the the child of the cluster_impl policy, for all priorities. + lbPolicy json.RawMessage // updates is a list of ClusterUpdates from all the leaf clusters. updates []xdsresource.ClusterUpdate @@ -123,6 +120,7 @@ func (ch *clusterHandler) constructClusterUpdate() { case <-ch.updateChannel: default: } + ch.updateChannel <- clusterHandlerUpdate{ securityCfg: ch.createdClusters[ch.rootClusterName].clusterUpdate.SecurityCfg, lbPolicy: ch.createdClusters[ch.rootClusterName].clusterUpdate.LBPolicy, diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go index caf10955014f..ee989ec3ef73 100644 --- a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -52,7 +52,6 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { name string clusterName string clusterUpdate xdsresource.ClusterUpdate - lbPolicy *xdsresource.ClusterLBPolicyRingHash }{ { name: "test-update-root-cluster-EDS-success", @@ -62,16 +61,6 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { ClusterName: edsService, }, }, - { - name: "test-update-root-cluster-EDS-with-ring-hash", - clusterName: logicalDNSService, - clusterUpdate: xdsresource.ClusterUpdate{ - ClusterType: xdsresource.ClusterTypeLogicalDNS, - ClusterName: logicalDNSService, - LBPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, - }, - lbPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, - }, { name: "test-update-root-cluster-Logical-DNS-success", clusterName: logicalDNSService, @@ -111,9 +100,6 @@ func (s) TestSuccessCaseLeafNode(t *testing.T) { if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{test.clusterUpdate}); diff != "" { t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) } - if diff := cmp.Diff(chu.lbPolicy, test.lbPolicy); diff != "" { - t.Fatalf("got unexpected lb policy in cluster update, diff (-got, +want): %v", diff) - } case <-ctx.Done(): t.Fatal("Timed out waiting for update from update channel.") } diff --git a/xds/internal/balancer/clusterimpl/tests/balancer_test.go b/xds/internal/balancer/clusterimpl/tests/balancer_test.go index cf0e7b0ce842..d335ecd7e844 100644 --- a/xds/internal/balancer/clusterimpl/tests/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/tests/balancer_test.go @@ -112,9 +112,14 @@ func (s) TestConfigUpdateWithSameLoadReportingServerConfig(t *testing.T) { // drops all RPCs, but with no change in the load reporting server config. resources.Endpoints = []*v3endpointpb.ClusterLoadAssignment{ e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ - ClusterName: "endpoints-" + serviceName, - Host: "localhost", - Ports: []uint32{testutils.ParsePort(t, server.Address)}, + ClusterName: "endpoints-" + serviceName, + Host: "localhost", + Localities: []e2e.LocalityOptions{ + { + Ports: []uint32{testutils.ParsePort(t, server.Address)}, + Weight: 1, + }, + }, DropPercents: map[string]int{"test-drop-everything": 100}, }), } diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index f327c8cf5fc5..65cb7a9bf981 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -27,8 +27,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" @@ -378,7 +376,6 @@ func (s) TestOutlierDetection(t *testing.T) { t.Fatal(err) } - localityID := xdsinternal.LocalityID{Zone: "zone"} // The priority configuration generated should have Outlier Detection as a // direct child due to Outlier Detection being turned on. pCfgWant := &priority.LBConfig{ @@ -393,17 +390,6 @@ func (s) TestOutlierDetection(t *testing.T) { Config: &clusterimpl.LBConfig{ Cluster: testClusterName, EDSServiceName: "test-eds-service-name", - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedtarget.Name, - Config: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(localityID.ToString): { - Weight: 100, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, - }, - }, }, }, }, diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index 2455b88d8079..fd17f3ede6d1 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -286,10 +286,6 @@ func TestParseConfig(t *testing.T) { } } -func newString(s string) *string { - return &s -} - func newUint32(i uint32) *uint32 { return &i } diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index b76a40355cc8..06b0aec2f311 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -23,9 +23,7 @@ import ( "fmt" "sort" - "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" - "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" @@ -34,7 +32,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/clusterimpl" "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/priority" - "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -63,33 +61,6 @@ type priorityConfig struct { // // The built tree of balancers (see test for the output struct). // -// If xds lb policy is ROUND_ROBIN, the children will be weighted_target for -// locality picking, and round_robin for endpoint picking. -// -// ┌────────┐ -// │priority│ -// └┬──────┬┘ -// │ │ -// ┌───────────▼┐ ┌▼───────────┐ -// │cluster_impl│ │cluster_impl│ -// └─┬──────────┘ └──────────┬─┘ -// │ │ -// ┌──────────────▼─┐ ┌─▼──────────────┐ -// │locality_picking│ │locality_picking│ -// └┬──────────────┬┘ └┬──────────────┬┘ -// │ │ │ │ -// ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ -// │LRS│ │LRS│ │LRS│ │LRS│ -// └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘ -// │ │ │ │ -// ┌──────────▼─────┐ ┌─────▼──────────┐ ┌──────────▼─────┐ ┌─────▼──────────┐ -// │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ -// └────────────────┘ └────────────────┘ └────────────────┘ └────────────────┘ -// -// If xds lb policy is RING_HASH, the children will be just a ring_hash policy. -// The endpoints from all localities will be flattened to one addresses list, -// and the ring_hash policy will pick endpoints from it. -// // ┌────────┐ // │priority│ // └┬──────┬┘ @@ -99,13 +70,8 @@ type priorityConfig struct { // └──────┬─────┘ └─────┬──────┘ // │ │ // ┌──────▼─────┐ ┌─────▼──────┐ -// │ ring_hash │ │ ring_hash │ +// │xDSLBPolicy │ │xDSLBPolicy │ (Locality and Endpoint picking layer) // └────────────┘ └────────────┘ -// -// If endpointPickingPolicy is nil, roundrobin will be used. -// -// Custom locality picking policy isn't support, and weighted_target is always -// used. func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) if err != nil { @@ -284,55 +250,11 @@ func dedupSortedIntSlice(a []int) []int { return a[:i+1] } -// rrBalancerConfig is a const roundrobin config, used as child of -// weighted-roundrobin. To avoid allocating memory everytime. -var rrBalancerConfig = &internalserviceconfig.BalancerConfig{Name: roundrobin.Name} - // priorityLocalitiesToClusterImpl takes a list of localities (with the same // priority), and generates a cluster impl policy config, and a list of -// addresses. +// addresses with their path hierarchy set to [priority-name, locality-name], so +// priority and the xDS LB Policy know which child policy each address is for. func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { - clusterImplCfg := &clusterimpl.LBConfig{ - Cluster: mechanism.Cluster, - EDSServiceName: mechanism.EDSServiceName, - LoadReportingServer: mechanism.LoadReportingServer, - MaxConcurrentRequests: mechanism.MaxConcurrentRequests, - DropCategories: drops, - // ChildPolicy is not set. Will be set based on xdsLBPolicy - } - - if xdsLBPolicy == nil || xdsLBPolicy.Name == roundrobin.Name { - // If lb policy is ROUND_ROBIN: - // - locality-picking policy is weighted_target - // - endpoint-picking policy is round_robin - logger.Infof("xds lb policy is %q, building config with weighted_target + round_robin", roundrobin.Name) - // Child of weighted_target is hardcoded to round_robin. - wtConfig, addrs := localitiesToWeightedTarget(localities, priorityName, rrBalancerConfig) - clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: weightedtarget.Name, Config: wtConfig} - return clusterImplCfg, addrs, nil - } - - if xdsLBPolicy.Name == ringhash.Name { - // If lb policy is RIHG_HASH, will build one ring_hash policy as child. - // The endpoints from all localities will be flattened to one addresses - // list, and the ring_hash policy will pick endpoints from it. - logger.Infof("xds lb policy is %q, building config with ring_hash", ringhash.Name) - addrs := localitiesToRingHash(localities, priorityName) - // Set child to ring_hash, note that the ring_hash config is from - // xdsLBPolicy. - clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: ringhash.Name, Config: xdsLBPolicy.Config} - return clusterImplCfg, addrs, nil - } - - return nil, nil, fmt.Errorf("unsupported xds LB policy %q, not one of {%q,%q}", xdsLBPolicy.Name, roundrobin.Name, ringhash.Name) -} - -// localitiesToRingHash takes a list of localities (with the same priority), and -// generates a list of addresses. -// -// The addresses have path hierarchy set to [priority-name], so priority knows -// which child policy they are for. -func localitiesToRingHash(localities []xdsresource.Locality, priorityName string) []resolver.Address { var addrs []resolver.Address for _, locality := range localities { var lw uint32 = 1 @@ -350,54 +272,29 @@ func localitiesToRingHash(localities []xdsresource.Locality, priorityName string if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { continue } - + addr := resolver.Address{Addr: endpoint.Address} + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) + // "To provide the xds_wrr_locality load balancer information about + // locality weights received from EDS, the cluster resolver will + // populate a new locality weight attribute for each address The + // attribute will have the weight (as an integer) of the locality + // the address is part of." - A52 + addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: lw}) var ew uint32 = 1 if endpoint.Weight != 0 { ew = endpoint.Weight } - - // The weight of each endpoint is locality_weight * endpoint_weight. - ai := weightedroundrobin.AddrInfo{Weight: lw * ew} - addr := weightedroundrobin.SetAddrInfo(resolver.Address{Addr: endpoint.Address}, ai) - addr = hierarchy.Set(addr, []string{priorityName, localityStr}) - addr = internal.SetLocalityID(addr, locality.ID) - addrs = append(addrs, addr) - } - } - return addrs -} - -// localitiesToWeightedTarget takes a list of localities (with the same -// priority), and generates a weighted target config, and list of addresses. -// -// The addresses have path hierarchy set to [priority-name, locality-name], so -// priority and weighted target know which child policy they are for. -func localitiesToWeightedTarget(localities []xdsresource.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) { - weightedTargets := make(map[string]weightedtarget.Target) - var addrs []resolver.Address - for _, locality := range localities { - localityStr, err := locality.ID.ToString() - if err != nil { - localityStr = fmt.Sprintf("%+v", locality.ID) - } - weightedTargets[localityStr] = weightedtarget.Target{Weight: locality.Weight, ChildPolicy: childPolicy} - for _, endpoint := range locality.Endpoints { - // Filter out all "unhealthy" endpoints (unknown and healthy are - // both considered to be healthy: - // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). - if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { - continue - } - - addr := resolver.Address{Addr: endpoint.Address} - if childPolicy.Name == weightedroundrobin.Name && endpoint.Weight != 0 { - ai := weightedroundrobin.AddrInfo{Weight: endpoint.Weight} - addr = weightedroundrobin.SetAddrInfo(addr, ai) - } - addr = hierarchy.Set(addr, []string{priorityName, localityStr}) - addr = internal.SetLocalityID(addr, locality.ID) + addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: lw * ew}) addrs = append(addrs, addr) } } - return &weightedtarget.LBConfig{Targets: weightedTargets}, addrs + return &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + EDSServiceName: mechanism.EDSServiceName, + LoadReportingServer: mechanism.LoadReportingServer, + MaxConcurrentRequests: mechanism.MaxConcurrentRequests, + DropCategories: drops, + ChildPolicy: xdsLBPolicy, + }, addrs, nil } diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index 5fbb0b95e339..6c94cae9ed47 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -30,7 +30,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" - "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" @@ -39,6 +38,7 @@ import ( "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -68,7 +68,8 @@ var ( return out[i].Addr < out[j].Addr }) return out - })} + }), + } noopODCfg = outlierdetection.LBConfig{ Interval: 1<<63 - 1, @@ -230,21 +231,6 @@ func TestBuildPriorityConfig(t *testing.T) { Cluster: testClusterName, EDSServiceName: testEDSServiceName, DropCategories: []clusterimpl.DropConfig{}, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedtarget.Name, - Config: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(testLocalityIDs[0].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - assertString(testLocalityIDs[1].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, - }, - }, }, }, }, @@ -262,21 +248,6 @@ func TestBuildPriorityConfig(t *testing.T) { Cluster: testClusterName, EDSServiceName: testEDSServiceName, DropCategories: []clusterimpl.DropConfig{}, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedtarget.Name, - Config: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(testLocalityIDs[2].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - assertString(testLocalityIDs[3].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, - }, - }, }, }, }, @@ -393,21 +364,6 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { RequestsPerMillion: testDropOverMillion, }, }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedtarget.Name, - Config: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(testLocalityIDs[0].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - assertString(testLocalityIDs[1].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, - }, - }, }, "priority-2-1": { Cluster: testClusterName, @@ -420,32 +376,17 @@ func TestBuildClusterImplConfigForEDS(t *testing.T) { RequestsPerMillion: testDropOverMillion, }, }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedtarget.Name, - Config: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(testLocalityIDs[2].ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - assertString(testLocalityIDs[3].ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, - }, - }, }, } wantAddrs := []resolver.Address{ - testAddrWithAttrs(testAddressStrs[0][0], nil, "priority-2-0", &testLocalityIDs[0]), - testAddrWithAttrs(testAddressStrs[0][1], nil, "priority-2-0", &testLocalityIDs[0]), - testAddrWithAttrs(testAddressStrs[1][0], nil, "priority-2-0", &testLocalityIDs[1]), - testAddrWithAttrs(testAddressStrs[1][1], nil, "priority-2-0", &testLocalityIDs[1]), - testAddrWithAttrs(testAddressStrs[2][0], nil, "priority-2-1", &testLocalityIDs[2]), - testAddrWithAttrs(testAddressStrs[2][1], nil, "priority-2-1", &testLocalityIDs[2]), - testAddrWithAttrs(testAddressStrs[3][0], nil, "priority-2-1", &testLocalityIDs[3]), - testAddrWithAttrs(testAddressStrs[3][1], nil, "priority-2-1", &testLocalityIDs[3]), + testAddrWithAttrs(testAddressStrs[0][0], 20, 1, "priority-2-0", &testLocalityIDs[0]), + testAddrWithAttrs(testAddressStrs[0][1], 20, 1, "priority-2-0", &testLocalityIDs[0]), + testAddrWithAttrs(testAddressStrs[1][0], 80, 1, "priority-2-0", &testLocalityIDs[1]), + testAddrWithAttrs(testAddressStrs[1][1], 80, 1, "priority-2-0", &testLocalityIDs[1]), + testAddrWithAttrs(testAddressStrs[2][0], 20, 1, "priority-2-1", &testLocalityIDs[2]), + testAddrWithAttrs(testAddressStrs[2][1], 20, 1, "priority-2-1", &testLocalityIDs[2]), + testAddrWithAttrs(testAddressStrs[3][0], 80, 1, "priority-2-1", &testLocalityIDs[3]), + testAddrWithAttrs(testAddressStrs[3][1], 80, 1, "priority-2-1", &testLocalityIDs[3]), } if diff := cmp.Diff(gotNames, wantNames); diff != "" { @@ -594,31 +535,13 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { wantConfig: &clusterimpl.LBConfig{ Cluster: testClusterName, EDSServiceName: testEDSService, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedtarget.Name, - Config: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, - }, - assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, - }, - }, - }, - }, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, wantAddrs: []resolver.Address{ - testAddrWithAttrs("addr-1-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-1-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-2-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - testAddrWithAttrs("addr-2-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-1-1", 20, 90, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", 20, 10, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", 80, 90, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", 80, 10, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), }, }, { @@ -651,26 +574,12 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { }, }, wantAddrs: []resolver.Address{ - testAddrWithAttrs("addr-1-1", newUint32(1800), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-1-2", newUint32(200), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-2-1", newUint32(7200), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - testAddrWithAttrs("addr-2-2", newUint32(800), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-1-1", 20, 90, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", 20, 10, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", 80, 90, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", 80, 10, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), }, }, - { - name: "unsupported child", - localities: []xdsresource.Locality{{ - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, - }, - ID: internal.LocalityID{Zone: "test-zone-1"}, - Weight: 20, - }}, - priorityName: "test-priority", - childPolicy: &internalserviceconfig.BalancerConfig{Name: "some-child"}, - wantErr: true, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -688,267 +597,6 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { } } -func TestLocalitiesToWeightedTarget(t *testing.T) { - tests := []struct { - name string - localities []xdsresource.Locality - priorityName string - childPolicy *internalserviceconfig.BalancerConfig - lrsServer *string - wantConfig *weightedtarget.LBConfig - wantAddrs []resolver.Address - }{ - { - name: "roundrobin as child, with LRS", - localities: []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - }, - ID: internal.LocalityID{Zone: "test-zone-1"}, - Weight: 20, - }, - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - }, - ID: internal.LocalityID{Zone: "test-zone-2"}, - Weight: 80, - }, - }, - priorityName: "test-priority", - childPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - lrsServer: newString("test-lrs-server"), - wantConfig: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - }, - }, - }, - wantAddrs: []resolver.Address{ - testAddrWithAttrs("addr-1-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-1-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-2-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - testAddrWithAttrs("addr-2-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - }, - }, - { - name: "roundrobin as child, no LRS", - localities: []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - }, - ID: internal.LocalityID{Zone: "test-zone-1"}, - Weight: 20, - }, - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - }, - ID: internal.LocalityID{Zone: "test-zone-2"}, - Weight: 80, - }, - }, - priorityName: "test-priority", - childPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, - // lrsServer is nil, so LRS policy will not be used. - wantConfig: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, - }, - assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, - }, - }, - }, - wantAddrs: []resolver.Address{ - testAddrWithAttrs("addr-1-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-1-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-2-1", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - testAddrWithAttrs("addr-2-2", nil, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - }, - }, - { - name: "weighted round robin as child, no LRS", - localities: []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, - }, - ID: internal.LocalityID{Zone: "test-zone-1"}, - Weight: 20, - }, - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, - }, - ID: internal.LocalityID{Zone: "test-zone-2"}, - Weight: 80, - }, - }, - priorityName: "test-priority", - childPolicy: &internalserviceconfig.BalancerConfig{Name: weightedroundrobin.Name}, - // lrsServer is nil, so LRS policy will not be used. - wantConfig: &weightedtarget.LBConfig{ - Targets: map[string]weightedtarget.Target{ - assertString(internal.LocalityID{Zone: "test-zone-1"}.ToString): { - Weight: 20, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedroundrobin.Name, - }, - }, - assertString(internal.LocalityID{Zone: "test-zone-2"}.ToString): { - Weight: 80, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: weightedroundrobin.Name, - }, - }, - }, - }, - wantAddrs: []resolver.Address{ - testAddrWithAttrs("addr-1-1", newUint32(90), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-1-2", newUint32(10), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-2-1", newUint32(90), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - testAddrWithAttrs("addr-2-2", newUint32(10), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, got1 := localitiesToWeightedTarget(tt.localities, tt.priorityName, tt.childPolicy) - if diff := cmp.Diff(got, tt.wantConfig); diff != "" { - t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) - } - if diff := cmp.Diff(got1, tt.wantAddrs, cmp.AllowUnexported(attributes.Attributes{})); diff != "" { - t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) - } - }) - } -} - -func TestLocalitiesToRingHash(t *testing.T) { - tests := []struct { - name string - localities []xdsresource.Locality - priorityName string - wantAddrs []resolver.Address - }{ - { - // Check that address weights are locality_weight * endpoint_weight. - name: "with locality and endpoint weight", - localities: []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, - }, - ID: internal.LocalityID{Zone: "test-zone-1"}, - Weight: 20, - }, - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, - }, - ID: internal.LocalityID{Zone: "test-zone-2"}, - Weight: 80, - }, - }, - priorityName: "test-priority", - wantAddrs: []resolver.Address{ - testAddrWithAttrs("addr-1-1", newUint32(1800), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-1-2", newUint32(200), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-2-1", newUint32(7200), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - testAddrWithAttrs("addr-2-2", newUint32(800), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - }, - }, - { - // Check that endpoint_weight is 0, weight is the locality weight. - name: "locality weight only", - localities: []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - }, - ID: internal.LocalityID{Zone: "test-zone-1"}, - Weight: 20, - }, - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy}, - }, - ID: internal.LocalityID{Zone: "test-zone-2"}, - Weight: 80, - }, - }, - priorityName: "test-priority", - wantAddrs: []resolver.Address{ - testAddrWithAttrs("addr-1-1", newUint32(20), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-1-2", newUint32(20), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-2-1", newUint32(80), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - testAddrWithAttrs("addr-2-2", newUint32(80), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - }, - }, - { - // Check that locality_weight is 0, weight is the endpoint weight. - name: "endpoint weight only", - localities: []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, - }, - ID: internal.LocalityID{Zone: "test-zone-1"}, - }, - { - Endpoints: []xdsresource.Endpoint{ - {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, - {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, - }, - ID: internal.LocalityID{Zone: "test-zone-2"}, - }, - }, - priorityName: "test-priority", - wantAddrs: []resolver.Address{ - testAddrWithAttrs("addr-1-1", newUint32(90), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-1-2", newUint32(10), "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), - testAddrWithAttrs("addr-2-1", newUint32(90), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - testAddrWithAttrs("addr-2-2", newUint32(10), "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := localitiesToRingHash(tt.localities, tt.priorityName) - if diff := cmp.Diff(got, tt.wantAddrs, cmp.AllowUnexported(attributes.Attributes{})); diff != "" { - t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) - } - }) - } -} - func assertString(f func() (string, error)) string { s, err := f() if err != nil { @@ -957,17 +605,16 @@ func assertString(f func() (string, error)) string { return s } -func testAddrWithAttrs(addrStr string, weight *uint32, priority string, lID *internal.LocalityID) resolver.Address { +func testAddrWithAttrs(addrStr string, localityWeight, endpointWeight uint32, priority string, lID *internal.LocalityID) resolver.Address { addr := resolver.Address{Addr: addrStr} - if weight != nil { - addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: *weight}) - } path := []string{priority} if lID != nil { path = append(path, assertString(lID.ToString)) addr = internal.SetLocalityID(addr, *lID) } addr = hierarchy.Set(addr, path) + addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: localityWeight}) + addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: localityWeight * endpointWeight}) return addr } diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index 053b56f0dc86..c7c2ab9945f0 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -364,23 +364,6 @@ func (s) TestEDS_MultipleLocalities(t *testing.T) { if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, wantAddrs); err != nil { t.Fatal(err) } - - // Change the weight of locality2 and ensure weighted roundrobin. Since - // locality2 has twice the weight of locality3, it will be picked twice as - // frequently as locality3 for RPCs. And since locality2 has a single - // backend and locality3 has two backends, the backend in locality2 will - // receive four times the traffic of each of locality3's backends. - resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ - {name: localityName2, weight: 2, ports: ports[1:2]}, - {name: localityName3, weight: 1, ports: ports[2:4]}, - }) - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - wantAddrs = []resolver.Address{addrs[1], addrs[1], addrs[1], addrs[1], addrs[2], addrs[3]} - if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, wantAddrs); err != nil { - t.Fatal(err) - } } // TestEDS_EndpointsHealth tests the cluster_resolver LB policy using an EDS diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index fdcef37f2d8e..68325a31c17e 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -26,6 +26,7 @@ import ( corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" @@ -35,15 +36,24 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/xdsclient" ) var ( - testClusterNames = []string{"test-cluster-1", "test-cluster-2"} - testSubZones = []string{"I", "II", "III", "IV"} - testEndpointAddrs []string + testClusterNames = []string{"test-cluster-1", "test-cluster-2"} + testSubZones = []string{"I", "II", "III", "IV"} + testEndpointAddrs []string + wrrLocalityLBConfig = &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + } ) const testBackendAddrsCount = 12 @@ -75,6 +85,7 @@ func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) Cluster: testClusterName, Type: DiscoveryMechanismTypeEDS, }}, + XDSLBPolicy: wrrLocalityLBConfig, }, }); err != nil { edsb.Close() @@ -844,6 +855,7 @@ func (s) TestFallbackToDNS(t *testing.T) { DNSHostname: testDNSTarget, }, }, + XDSLBPolicy: wrrLocalityLBConfig, }, }); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/wrrlocality/balancer.go b/xds/internal/balancer/wrrlocality/balancer.go index 2ff6fccf89bd..ac63e84e62fb 100644 --- a/xds/internal/balancer/wrrlocality/balancer.go +++ b/xds/internal/balancer/wrrlocality/balancer.go @@ -28,8 +28,12 @@ import ( "fmt" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/internal/grpclog" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal" ) // Name is the name of wrr_locality balancer. @@ -45,10 +49,6 @@ func (bb) Name() string { return Name } -func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { - return nil -} - // LBConfig is the config for the wrr locality balancer. type LBConfig struct { serviceconfig.LoadBalancingConfig @@ -56,13 +56,146 @@ type LBConfig struct { ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } +// To plumb in a different child in tests. +var weightedTargetName = weightedtarget.Name + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(weightedTargetName) + if builder == nil { + // Shouldn't happen, registered through imported weighted target, + // defensive programming. + return nil + } + + // Doesn't need to intercept any balancer.ClientConn operations; pass + // through by just giving cc to child balancer. + wtb := builder.Build(cc, bOpts) + if wtb == nil { + // shouldn't happen, defensive programming. + return nil + } + wtbCfgParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported weighted target builder has this method. + return nil + } + wrrL := &wrrLocalityBalancer{ + child: wtb, + childParser: wtbCfgParser, + } + + wrrL.logger = prefixLogger(wrrL) + wrrL.logger.Infof("Created") + return wrrL +} + func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var lbCfg *LBConfig if err := json.Unmarshal(s, &lbCfg); err != nil { - return nil, fmt.Errorf("xds: invalid LBConfig for wrrlocality: %s, error: %v", string(s), err) + return nil, fmt.Errorf("xds_wrr_locality: invalid LBConfig: %s, error: %v", string(s), err) } if lbCfg == nil || lbCfg.ChildPolicy == nil { - return nil, errors.New("xds: invalidw LBConfig for wrrlocality: child policy field must be set") + return nil, errors.New("xds_wrr_locality: invalid LBConfig: child policy field must be set") } return lbCfg, nil } + +type attributeKey struct{} + +// Equal allows the values to be compared by Attributes.Equal. +func (a AddrInfo) Equal(o interface{}) bool { + oa, ok := o.(AddrInfo) + return ok && oa.LocalityWeight == a.LocalityWeight +} + +// AddrInfo is the locality weight of the locality an address is a part of. +type AddrInfo struct { + LocalityWeight uint32 +} + +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with AddrInfo. +func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) + return addr +} + +func (a AddrInfo) String() string { + return fmt.Sprintf("Locality Weight: %d", a.LocalityWeight) +} + +// getAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. Returns false if no AddrInfo found. +func getAddrInfo(addr resolver.Address) (AddrInfo, bool) { + v := addr.BalancerAttributes.Value(attributeKey{}) + ai, ok := v.(AddrInfo) + return ai, ok +} + +// wrrLocalityBalancer wraps a weighted target balancer, and builds +// configuration for the weighted target once it receives configuration +// specifying the weighted target child balancer and locality weight +// information. +type wrrLocalityBalancer struct { + // child will be a weighted target balancer, and will be built it at + // wrrLocalityBalancer build time. Other than preparing configuration, other + // balancer operations are simply pass through. + child balancer.Balancer + + childParser balancer.ConfigParser + + logger *grpclog.PrefixLogger +} + +func (b *wrrLocalityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("Received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + weightedTargets := make(map[string]weightedtarget.Target) + for _, addr := range s.ResolverState.Addresses { + // This get of LocalityID could potentially return a zero value. This + // shouldn't happen though (this attribute that is set actually gets + // used to build localities in the first place), and thus don't error + // out, and just build a weighted target with undefined behavior. + locality, err := internal.GetLocalityID(addr).ToString() + if err != nil { + // Should never happen. + logger.Errorf("Failed to marshal LocalityID: %v, skipping this locality in weighted target") + } + ai, ok := getAddrInfo(addr) + if !ok { + return fmt.Errorf("xds_wrr_locality: missing locality weight information in address %q", addr) + } + weightedTargets[locality] = weightedtarget.Target{Weight: ai.LocalityWeight, ChildPolicy: lbCfg.ChildPolicy} + } + wtCfg := &weightedtarget.LBConfig{Targets: weightedTargets} + wtCfgJSON, err := json.Marshal(wtCfg) + if err != nil { + // Shouldn't happen. + return fmt.Errorf("xds_wrr_locality: error marshalling prepared config: %v", wtCfg) + } + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.childParser.ParseConfig(wtCfgJSON); err != nil { + return fmt.Errorf("xds_wrr_locality: config generated %v is invalid: %v", wtCfgJSON, err) + } + + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: sc, + }) +} + +func (b *wrrLocalityBalancer) ResolverError(err error) { + b.child.ResolverError(err) +} + +func (b *wrrLocalityBalancer) UpdateSubConnState(sc balancer.SubConn, scState balancer.SubConnState) { + b.child.UpdateSubConnState(sc, scState) +} + +func (b *wrrLocalityBalancer) Close() { + b.child.Close() +} diff --git a/xds/internal/balancer/wrrlocality/balancer_test.go b/xds/internal/balancer/wrrlocality/balancer_test.go index 9283b02f14b2..f0da7413bdb8 100644 --- a/xds/internal/balancer/wrrlocality/balancer_test.go +++ b/xds/internal/balancer/wrrlocality/balancer_test.go @@ -19,17 +19,28 @@ package wrrlocality import ( + "context" "encoding/json" "errors" "strings" "testing" + "time" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal" +) + +const ( + defaultTestTimeout = 5 * time.Second ) type s struct { @@ -119,3 +130,123 @@ func (s) TestParseConfig(t *testing.T) { }) } } + +// TestUpdateClientConnState tests the UpdateClientConnState method of the +// wrr_locality_experimental balancer. This UpdateClientConn operation should +// take the localities and their weights in the addresses passed in, alongside +// the endpoint picking policy defined in the Balancer Config and construct a +// weighted target configuration corresponding to these inputs. +func (s) TestUpdateClientConnState(t *testing.T) { + // Configure the stub balancer defined below as the child policy of + // wrrLocalityBalancer. + cfgCh := testutils.NewChannel() + oldWeightedTargetName := weightedTargetName + defer func() { + weightedTargetName = oldWeightedTargetName + }() + weightedTargetName = "fake_weighted_target" + stub.Register("fake_weighted_target", stub.BalancerFuncs{ + ParseConfig: func(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg weightedtarget.LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + return &cfg, nil + }, + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + wtCfg, ok := ccs.BalancerConfig.(*weightedtarget.LBConfig) + if !ok { + return errors.New("child received config that was not a weighted target config") + } + defer cfgCh.Send(wtCfg) + return nil + }, + }) + + builder := balancer.Get(Name) + if builder == nil { + t.Fatalf("balancer.Get(%q) returned nil", Name) + } + tcc := testutils.NewTestClientConn(t) + bal := builder.Build(tcc, balancer.BuildOptions{}) + defer bal.Close() + wrrL := bal.(*wrrLocalityBalancer) + + // Create the addresses with two localities with certain locality weights. + // This represents what addresses the wrr_locality balancer will receive in + // UpdateClientConnState. + addr1 := resolver.Address{ + Addr: "locality-1", + } + addr1 = internal.SetLocalityID(addr1, internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }) + addr1 = SetAddrInfo(addr1, AddrInfo{LocalityWeight: 2}) + + addr2 := resolver.Address{ + Addr: "locality-2", + } + addr2 = internal.SetLocalityID(addr2, internal.LocalityID{ + Region: "region-2", + Zone: "zone-2", + SubZone: "subzone-2", + }) + addr2 = SetAddrInfo(addr2, AddrInfo{LocalityWeight: 1}) + addrs := []resolver.Address{addr1, addr2} + + err := wrrL.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + ResolverState: resolver.State{ + Addresses: addrs, + }, + }) + if err != nil { + t.Fatalf("Unexpected error from UpdateClientConnState: %v", err) + } + + // Note that these inline strings declared as the key in Targets built from + // Locality ID are not exactly what is shown in the example in the gRFC. + // However, this is an implementation detail that does not affect + // correctness (confirmed with Java team). The important thing is to get + // those three pieces of information region, zone, and subzone down to the + // child layer. + wantWtCfg := &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + "{\"region\":\"region-1\",\"zone\":\"zone-1\",\"subZone\":\"subzone-1\"}": { + Weight: 2, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + "{\"region\":\"region-2\",\"zone\":\"zone-2\",\"subZone\":\"subzone-2\"}": { + Weight: 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cfg, err := cfgCh.Receive(ctx) + if err != nil { + t.Fatalf("No signal received from UpdateClientConnState() on the child: %v", err) + } + + gotWtCfg, ok := cfg.(*weightedtarget.LBConfig) + if !ok { + // Shouldn't happen - only sends a config on this channel. + t.Fatalf("Unexpected config type: %T", gotWtCfg) + } + + if diff := cmp.Diff(gotWtCfg, wantWtCfg); diff != "" { + t.Fatalf("Child received unexpected config, diff (-got, +want): %v", diff) + } +} diff --git a/xds/internal/balancer/wrrlocality/logging.go b/xds/internal/balancer/wrrlocality/logging.go new file mode 100644 index 000000000000..42ccea0a92b2 --- /dev/null +++ b/xds/internal/balancer/wrrlocality/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package wrrlocality + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[wrrlocality-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *wrrLocalityBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/xds/internal/xdsclient/tests/cds_watchers_test.go b/xds/internal/xdsclient/tests/cds_watchers_test.go index 3583fa929d96..9670caaca0a6 100644 --- a/xds/internal/xdsclient/tests/cds_watchers_test.go +++ b/xds/internal/xdsclient/tests/cds_watchers_test.go @@ -70,7 +70,7 @@ func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantU return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) } } - cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicyJSON")} + cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicy")} if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { return fmt.Errorf("received unepected diff in the cluster resource update: (-want, got):\n%s", diff) } diff --git a/xds/internal/xdsclient/tests/eds_watchers_test.go b/xds/internal/xdsclient/tests/eds_watchers_test.go index 9b220fc59f2c..4cc365e70ead 100644 --- a/xds/internal/xdsclient/tests/eds_watchers_test.go +++ b/xds/internal/xdsclient/tests/eds_watchers_test.go @@ -134,9 +134,13 @@ func (s) TestEDSWatch(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, @@ -153,9 +157,13 @@ func (s) TestEDSWatch(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, @@ -265,9 +273,13 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, @@ -277,9 +289,13 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, @@ -295,9 +311,13 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, @@ -307,9 +327,13 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, @@ -460,9 +484,13 @@ func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, @@ -541,9 +569,13 @@ func (s) TestEDSWatch_ResourceCaching(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, @@ -669,9 +701,13 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, @@ -801,9 +837,13 @@ func (s) TestEDSWatch_PartialValid(t *testing.T) { Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, - ID: internal.LocalityID{SubZone: "subzone"}, - Priority: 0, - Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, }, }, }, diff --git a/xds/internal/xdsclient/tests/federation_watchers_test.go b/xds/internal/xdsclient/tests/federation_watchers_test.go index 974e6221aab9..4298ce6c0885 100644 --- a/xds/internal/xdsclient/tests/federation_watchers_test.go +++ b/xds/internal/xdsclient/tests/federation_watchers_test.go @@ -305,7 +305,11 @@ func (s) TestFederation_EndpointsResourceContextParamOrder(t *testing.T) { { Endpoints: []xdsresource.Endpoint{{Address: "localhost:666", Weight: 1}}, Weight: 1, - ID: internal.LocalityID{SubZone: "subzone"}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, }, }, }, diff --git a/xds/internal/xdsclient/tests/resource_update_test.go b/xds/internal/xdsclient/tests/resource_update_test.go index ff6cf7c756a4..7dd368aa5e24 100644 --- a/xds/internal/xdsclient/tests/resource_update_test.go +++ b/xds/internal/xdsclient/tests/resource_update_test.go @@ -802,7 +802,7 @@ func (s) TestHandleClusterResponseFromManagementServer(t *testing.T) { } cmpOpts := []cmp.Option{ cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicyJSON"), + cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicy"), } if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) diff --git a/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go index 7d20b1ff61e4..96ad204ad4b3 100644 --- a/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go @@ -357,7 +357,6 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, wantUpdate: xdsresource.ClusterUpdate{ ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, - LBPolicy: &xdsresource.ClusterLBPolicyRingHash{MinimumRingSize: 10, MaximumRingSize: 100}, }, wantLBConfig: &internalserviceconfig.BalancerConfig{ Name: "ring_hash_experimental", @@ -589,11 +588,11 @@ func (s) TestValidateCluster_Success(t *testing.T) { // compare JSON bytes in a test. Thus, marshal into a Balancer // Config struct and compare on that. Only need to test this JSON // emission here, as this covers the possible output space. - if diff := cmp.Diff(update, test.wantUpdate, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "LBPolicy", "LBPolicyJSON")); diff != "" { + if diff := cmp.Diff(update, test.wantUpdate, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "LBPolicy")); diff != "" { t.Errorf("validateClusterAndConstructClusterUpdate(%+v) got diff: %v (-got, +want)", test.cluster, diff) } bc := &internalserviceconfig.BalancerConfig{} - if err := json.Unmarshal(update.LBPolicyJSON, bc); err != nil { + if err := json.Unmarshal(update.LBPolicy, bc); err != nil { t.Fatalf("failed to unmarshal JSON: %v", err) } if diff := cmp.Diff(bc, test.wantLBConfig); diff != "" { diff --git a/xds/internal/xdsclient/xdsresource/type_cds.go b/xds/internal/xdsclient/xdsresource/type_cds.go index cd49852d8fcc..8ea9608dc9b7 100644 --- a/xds/internal/xdsclient/xdsresource/type_cds.go +++ b/xds/internal/xdsclient/xdsresource/type_cds.go @@ -52,13 +52,6 @@ const ( ClusterLRSServerSelf ) -// ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its -// config. -type ClusterLBPolicyRingHash struct { - MinimumRingSize uint64 - MaximumRingSize uint64 -} - // OutlierDetection is the outlier detection configuration for a cluster. type OutlierDetection struct { // Interval is the time interval between ejection analysis sweeps. This can @@ -148,21 +141,9 @@ type ClusterUpdate struct { // a prioritized list of cluster names. PrioritizedClusterNames []string - // LBPolicy is the lb policy for this cluster. - // - // This only support round_robin and ring_hash. - // - if it's nil, the lb policy is round_robin - // - if it's not nil, the lb policy is ring_hash, the this field has the config. - // - // When we add more support policies, this can be made an interface, and - // will be set to different types based on the policy type. - LBPolicy *ClusterLBPolicyRingHash - // LBPolicyJSON represents the locality and endpoint picking policy in JSON, - // which will be the child policy of xds_cluster_impl. Once full support for - // this field across the system, the LBPolicy field will switch to this - // field. Right now we keep both to keep the system working even though - // downstream has not added support for this JSON field. - LBPolicyJSON json.RawMessage + // LBPolicy represents the locality and endpoint picking policy in JSON, + // which will be the child policy of xds_cluster_impl. + LBPolicy json.RawMessage // OutlierDetection is the outlier detection configuration for this cluster. // If nil, it means this cluster does not use the outlier detection feature. diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 966844881351..c117ce6e7b52 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -77,13 +77,11 @@ const ( ) func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { - var lbPolicy *ClusterLBPolicyRingHash - var lbCfgJSON json.RawMessage + var lbPolicy json.RawMessage var err error switch cluster.GetLbPolicy() { case v3clusterpb.Cluster_ROUND_ROBIN: - lbPolicy = nil // The default is round_robin, and there's no config to set. - lbCfgJSON = []byte(fmt.Sprintf(`[{%q: {"childPolicy": [{"round_robin": {}}]}}]`, "xds_wrr_locality_experimental")) + lbPolicy = []byte(`[{"xds_wrr_locality_experimental": {"childPolicy": [{"round_robin": {}}]}}]`) case v3clusterpb.Cluster_RING_HASH: if !envconfig.XDSRingHash { return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) @@ -101,10 +99,9 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu if max := rhc.GetMaximumRingSize(); max != nil { maxSize = max.GetValue() } - lbPolicy = &ClusterLBPolicyRingHash{MinimumRingSize: minSize, MaximumRingSize: maxSize} - rhLBCfgJSON := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) - lbCfgJSON = []byte(fmt.Sprintf(`[{%q: %s}]`, "ring_hash_experimental", rhLBCfgJSON)) + rhLBCfg := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) + lbPolicy = []byte(fmt.Sprintf(`[{"ring_hash_experimental": %s}]`, rhLBCfg)) default: return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } @@ -129,7 +126,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu } if cluster.GetLoadBalancingPolicy() != nil && envconfig.XDSCustomLBPolicy { - lbCfgJSON, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy()) + lbPolicy, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy()) if err != nil { return ClusterUpdate{}, fmt.Errorf("error converting LoadBalancingPolicy %v in response: %+v: %v", cluster.GetLoadBalancingPolicy(), cluster, err) } @@ -137,8 +134,8 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // converted configuration. It will do this by having the gRPC LB policy // registry parse the configuration." - A52 bc := &internalserviceconfig.BalancerConfig{} - if err := json.Unmarshal(lbCfgJSON, bc); err != nil { - return ClusterUpdate{}, fmt.Errorf("JSON generated from xDS LB policy registry: %s is invalid: %v", pretty.FormatJSON(lbCfgJSON), err) + if err := json.Unmarshal(lbPolicy, bc); err != nil { + return ClusterUpdate{}, fmt.Errorf("JSON generated from xDS LB policy registry: %s is invalid: %v", pretty.FormatJSON(lbPolicy), err) } } @@ -147,7 +144,6 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu SecurityCfg: sc, MaxRequests: circuitBreakersFromCluster(cluster), LBPolicy: lbPolicy, - LBPolicyJSON: lbCfgJSON, OutlierDetection: od, } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 3b47ae697a99..0c69d27ad42d 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -322,7 +322,7 @@ func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { if err != nil { t.Errorf("validateClusterAndConstructClusterUpdate() failed: %v", err) } - if diff := cmp.Diff(wantUpdate, gotUpdate, cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicyJSON")); diff != "" { + if diff := cmp.Diff(wantUpdate, gotUpdate, cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicy")); diff != "" { t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, got):\n%s", diff) } } @@ -1215,7 +1215,7 @@ func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { if (err != nil) != test.wantErr { t.Errorf("validateClusterAndConstructClusterUpdate() returned err %v wantErr %v)", err, test.wantErr) } - if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmp.AllowUnexported(regexp.Regexp{}), cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicyJSON")); diff != "" { + if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmp.AllowUnexported(regexp.Regexp{}), cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicy")); diff != "" { t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, +got):\n%s", diff) } }) @@ -1357,7 +1357,7 @@ func (s) TestUnmarshalCluster(t *testing.T) { if name != test.wantName { t.Errorf("unmarshalClusterResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts, cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicyJSON")); diff != "" { + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts, cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicy")); diff != "" { t.Errorf("unmarshalClusterResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) } }) @@ -1507,7 +1507,7 @@ func (s) TestValidateClusterWithOutlierDetection(t *testing.T) { if (err != nil) != test.wantErr { t.Errorf("validateClusterAndConstructClusterUpdate() returned err %v wantErr %v)", err, test.wantErr) } - if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicyJSON")); diff != "" { + if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicy")); diff != "" { t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, +got):\n%s", diff) } }) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index a3202f8c8100..95333aaf61d5 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -141,6 +141,17 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, SubZone: l.SubZone, } lidStr, _ := lid.ToString() + + // "Since an xDS configuration can place a given locality under multiple + // priorities, it is possible to see locality weight attributes with + // different values for the same locality." - A52 + // + // This is handled in the client by emitting the locality weight + // specified for the priority it is specified in. If the same locality + // has a different weight in two priorities, each priority will specify + // a locality with the locality weight specified for that priority, and + // thus the subsequent tree of balancers linked to that priority will + // use that locality weight as well. if localitiesWithPriority[lidStr] { return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) } From b3fbd87a9e45b1e3bef39d9b12b3609a89f4e4c9 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 10 May 2023 13:26:37 -0700 Subject: [PATCH 914/998] interop: add ORCA test cases and functionality (#6266) --- interop/client/client.go | 14 ++- interop/observability/go.mod | 2 + interop/observability/go.sum | 2 + interop/orcalb.go | 170 +++++++++++++++++++++++++++++++++++ interop/server/server.go | 18 +++- interop/test_utils.go | 156 +++++++++++++++++++++++++++++++- 6 files changed, 357 insertions(+), 5 deletions(-) create mode 100644 interop/orcalb.go diff --git a/interop/client/client.go b/interop/client/client.go index 980ed9942589..a4228190e12c 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -17,6 +17,10 @@ */ // Binary client is an interop client. +// +// See interop test case descriptions [here]. +// +// [here]: https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md package main import ( @@ -94,7 +98,9 @@ var ( custom_metadata: server will echo custom metadata; unimplemented_method: client attempts to call unimplemented method; unimplemented_service: client attempts to call unimplemented service; - pick_first_unary: all requests are sent to one server despite multiple servers are resolved.`) + pick_first_unary: all requests are sent to one server despite multiple servers are resolved; + orca_per_rpc: the client verifies ORCA per-RPC metrics are provided; + orca_oob: the client verifies ORCA out-of-band metrics are provided.`) logger = grpclog.Component("interop") ) @@ -308,6 +314,12 @@ func main() { case "channel_soak": interop.DoSoakTest(tc, serverAddr, opts, true /* resetChannel */, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Duration(*soakMinTimeMsBetweenRPCs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) logger.Infoln("ChannelSoak done") + case "orca_per_rpc": + interop.DoORCAPerRPCTest(tc) + logger.Infoln("ORCAPerRPC done") + case "orca_oob": + interop.DoORCAOOBTest(tc) + logger.Infoln("ORCAOOB done") default: logger.Fatal("Unsupported test case: ", *testCase) } diff --git a/interop/observability/go.mod b/interop/observability/go.mod index 1d743a02318b..784ea504d1a0 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -18,6 +18,8 @@ require ( contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect github.com/aws/aws-sdk-go v1.44.162 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect diff --git a/interop/observability/go.sum b/interop/observability/go.sum index 4a52d183476d..167fb14bc0ce 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -638,6 +638,7 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -651,6 +652,7 @@ github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= diff --git a/interop/orcalb.go b/interop/orcalb.go new file mode 100644 index 000000000000..28ea7524d7b7 --- /dev/null +++ b/interop/orcalb.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package interop + +import ( + "context" + "fmt" + "sync" + "time" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/orca" +) + +func init() { + balancer.Register(orcabb{}) +} + +type orcabb struct{} + +func (orcabb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &orcab{cc: cc} +} + +func (orcabb) Name() string { + return "test_backend_metrics_load_balancer" +} + +type orcab struct { + cc balancer.ClientConn + sc balancer.SubConn + cancelWatch func() + + reportMu sync.Mutex + report *v3orcapb.OrcaLoadReport +} + +func (o *orcab) UpdateClientConnState(s balancer.ClientConnState) error { + if o.sc != nil { + o.sc.UpdateAddresses(s.ResolverState.Addresses) + return nil + } + + if len(s.ResolverState.Addresses) == 0 { + o.ResolverError(fmt.Errorf("produced no addresses")) + return fmt.Errorf("resolver produced no addresses") + } + var err error + o.sc, err = o.cc.NewSubConn(s.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(fmt.Errorf("error creating subconn: %v", err))}) + return nil + } + o.cancelWatch = orca.RegisterOOBListener(o.sc, o, orca.OOBListenerOptions{ReportInterval: time.Second}) + o.sc.Connect() + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)}) + return nil +} + +func (o *orcab) ResolverError(err error) { + if o.sc == nil { + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(fmt.Errorf("resolver error: %v", err))}) + } +} + +func (o *orcab) UpdateSubConnState(sc balancer.SubConn, scState balancer.SubConnState) { + if o.sc != sc { + logger.Errorf("received subconn update for unknown subconn: %v vs %v", o.sc, sc) + return + } + switch scState.ConnectivityState { + case connectivity.Ready: + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: &scPicker{sc: sc, o: o}}) + case connectivity.TransientFailure: + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(fmt.Errorf("all subchannels in transient failure: %v", scState.ConnectionError))}) + case connectivity.Connecting: + // Ignore; picker already set to "connecting". + case connectivity.Idle: + sc.Connect() + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)}) + case connectivity.Shutdown: + // Ignore; we are closing but handle that in Close instead. + } +} + +func (o *orcab) Close() { + o.cancelWatch() +} + +func (o *orcab) OnLoadReport(r *v3orcapb.OrcaLoadReport) { + o.reportMu.Lock() + defer o.reportMu.Unlock() + logger.Infof("received OOB load report: %v", r) + o.report = r +} + +type scPicker struct { + sc balancer.SubConn + o *orcab +} + +func (p *scPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + doneCB := func(di balancer.DoneInfo) { + if lr, _ := di.ServerLoad.(*v3orcapb.OrcaLoadReport); lr != nil && + (lr.CpuUtilization != 0 || lr.MemUtilization != 0 || len(lr.Utilization) > 0 || len(lr.RequestCost) > 0) { + // Since all RPCs will respond with a load report due to the + // presence of the DialOption, we need to inspect every field and + // use the out-of-band report instead if all are unset/zero. + setContextCMR(info.Ctx, lr) + } else { + p.o.reportMu.Lock() + defer p.o.reportMu.Unlock() + if lr := p.o.report; lr != nil { + setContextCMR(info.Ctx, lr) + } + } + } + return balancer.PickResult{SubConn: p.sc, Done: doneCB}, nil +} + +func setContextCMR(ctx context.Context, lr *v3orcapb.OrcaLoadReport) { + if r := orcaResultFromContext(ctx); r != nil { + *r = lr + } +} + +type orcaKey string + +var orcaCtxKey = orcaKey("orcaResult") + +// contextWithORCAResult sets a key in ctx with a pointer to an ORCA load +// report that is to be filled in by the "test_backend_metrics_load_balancer" +// LB policy's Picker's Done callback. +// +// If a per-call load report is provided from the server for the call, result +// will be filled with that, otherwise the most recent OOB load report is used. +// If no OOB report has been received, result is not modified. +func contextWithORCAResult(ctx context.Context, result **v3orcapb.OrcaLoadReport) context.Context { + return context.WithValue(ctx, orcaCtxKey, result) +} + +// orcaResultFromContext returns the ORCA load report stored in the context. +// The LB policy uses this to communicate the load report back to the interop +// client application. +func orcaResultFromContext(ctx context.Context) **v3orcapb.OrcaLoadReport { + v := ctx.Value(orcaCtxKey) + if v == nil { + return nil + } + return v.(**v3orcapb.OrcaLoadReport) +} diff --git a/interop/server/server.go b/interop/server/server.go index 0778dbf961f0..67fbc3119963 100644 --- a/interop/server/server.go +++ b/interop/server/server.go @@ -17,18 +17,25 @@ */ // Binary server is an interop server. +// +// See interop test case descriptions [here]. +// +// [here]: https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md package main import ( "flag" "net" "strconv" + "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/interop" + "google.golang.org/grpc/orca" "google.golang.org/grpc/testdata" testgrpc "google.golang.org/grpc/interop/grpc_testing" @@ -56,7 +63,7 @@ func main() { logger.Fatalf("failed to listen: %v", err) } logger.Infof("interop server listening on %v", lis.Addr()) - var opts []grpc.ServerOption + opts := []grpc.ServerOption{orca.CallMetricsServerOption(nil)} if *useTLS { if *certFile == "" { *certFile = testdata.Path("server1.pem") @@ -78,6 +85,13 @@ func main() { opts = append(opts, grpc.Creds(altsTC)) } server := grpc.NewServer(opts...) - testgrpc.RegisterTestServiceServer(server, interop.NewTestServer()) + metricsRecorder := orca.NewServerMetricsRecorder() + sopts := orca.ServiceOptions{ + MinReportingInterval: time.Second, + ServerMetricsProvider: metricsRecorder, + } + internal.ORCAAllowAnyMinReportingInterval.(func(*orca.ServiceOptions))(&sopts) + orca.Register(server, sopts) + testgrpc.RegisterTestServiceServer(server, interop.NewTestServer(interop.NewTestServerOptions{MetricsRecorder: metricsRecorder})) server.Serve(lis) } diff --git a/interop/test_utils.go b/interop/test_utils.go index 6f6cde7d846c..0057c071217a 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -17,6 +17,10 @@ */ // Package interop contains functions used by interop client/server. +// +// See interop test case descriptions [here]. +// +// [here]: https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md package interop import ( @@ -36,9 +40,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" ) @@ -772,10 +778,23 @@ func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.D type testServer struct { testgrpc.UnimplementedTestServiceServer + + metricsRecorder orca.ServerMetricsRecorder +} + +// NewTestServerOptions contains options that control the behavior of the test +// server returned by NewTestServer. +type NewTestServerOptions struct { + MetricsRecorder orca.ServerMetricsRecorder } -// NewTestServer creates a test server for test service. -func NewTestServer() testgrpc.TestServiceServer { +// NewTestServer creates a test server for test service. opts carries optional +// settings and does not need to be provided. If multiple opts are provided, +// only the first one is used. +func NewTestServer(opts ...NewTestServerOptions) testgrpc.TestServiceServer { + if len(opts) > 0 { + return &testServer{metricsRecorder: opts[0].MetricsRecorder} + } return &testServer{} } @@ -818,11 +837,34 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* if err != nil { return nil, err } + if r, orcaData := orca.CallMetricsRecorderFromContext(ctx), in.GetOrcaPerQueryReport(); r != nil && orcaData != nil { + // Transfer the request's per-Call ORCA data to the call metrics + // recorder in the context, if present. + setORCAMetrics(r, orcaData) + } + if r, orcaData := s.metricsRecorder, in.GetOrcaOobReport(); r != nil && orcaData != nil { + // Transfer the request's OOB ORCA data to the server metrics recorder + // in the server, if present. + setORCAMetrics(r, orcaData) + } return &testpb.SimpleResponse{ Payload: pl, }, nil } +func setORCAMetrics(r orca.ServerMetricsRecorder, orcaData *testpb.TestOrcaReport) { + r.SetCPUUtilization(orcaData.CpuUtilization) + r.SetMemoryUtilization(orcaData.MemoryUtilization) + if rq, ok := r.(orca.CallMetricsRecorder); ok { + for k, v := range orcaData.RequestCost { + rq.SetRequestCost(k, v) + } + } + for k, v := range orcaData.Utilization { + r.SetNamedUtilization(k, v) + } +} + func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error { cs := args.GetResponseParameters() for _, c := range cs { @@ -883,6 +925,13 @@ func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallSe if st != nil && st.Code != 0 { return status.Error(codes.Code(st.Code), st.Message) } + + if r, orcaData := s.metricsRecorder, in.GetOrcaOobReport(); r != nil && orcaData != nil { + // Transfer the request's OOB ORCA data to the server metrics recorder + // in the server, if present. + setORCAMetrics(r, orcaData) + } + cs := in.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { @@ -933,3 +982,106 @@ func (s *testServer) HalfDuplexCall(stream testgrpc.TestService_HalfDuplexCallSe } return nil } + +// DoORCAPerRPCTest performs a unary RPC that enables ORCA per-call reporting +// and verifies the load report sent back to the LB policy's Done callback. +func DoORCAPerRPCTest(tc testgrpc.TestServiceClient) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + orcaRes := &v3orcapb.OrcaLoadReport{} + _, err := tc.UnaryCall(contextWithORCAResult(ctx, &orcaRes), &testpb.SimpleRequest{ + OrcaPerQueryReport: &testpb.TestOrcaReport{ + CpuUtilization: 0.8210, + MemoryUtilization: 0.5847, + RequestCost: map[string]float64{"cost": 3456.32}, + Utilization: map[string]float64{"util": 0.30499}, + }, + }) + if err != nil { + logger.Fatalf("/TestService/UnaryCall RPC failed: ", err) + } + want := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 0.8210, + MemUtilization: 0.5847, + RequestCost: map[string]float64{"cost": 3456.32}, + Utilization: map[string]float64{"util": 0.30499}, + } + if !proto.Equal(orcaRes, want) { + logger.Fatalf("/TestService/UnaryCall RPC received ORCA load report %+v; want %+v", orcaRes, want) + } +} + +// DoORCAOOBTest performs a streaming RPC that enables ORCA OOB reporting and +// verifies the load report sent to the LB policy's OOB listener. +func DoORCAOOBTest(tc testgrpc.TestServiceClient) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error starting stream: %v", err) + } + err = stream.Send(&testpb.StreamingOutputCallRequest{ + OrcaOobReport: &testpb.TestOrcaReport{ + CpuUtilization: 0.8210, + MemoryUtilization: 0.5847, + Utilization: map[string]float64{"util": 0.30499}, + }, + ResponseParameters: []*testpb.ResponseParameters{{Size: 1}}, + }) + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error sending: %v", err) + } + _, err = stream.Recv() + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error receiving: %v", err) + } + + ctx2, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + want := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 0.8210, + MemUtilization: 0.5847, + Utilization: map[string]float64{"util": 0.30499}, + } + checkORCAMetrics(ctx2, tc, want) + + err = stream.Send(&testpb.StreamingOutputCallRequest{ + OrcaOobReport: &testpb.TestOrcaReport{ + CpuUtilization: 0.29309, + MemoryUtilization: 0.2, + Utilization: map[string]float64{"util": 0.2039}, + }, + ResponseParameters: []*testpb.ResponseParameters{{Size: 1}}, + }) + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error sending: %v", err) + } + _, err = stream.Recv() + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error receiving: %v", err) + } + + ctx3, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + want = &v3orcapb.OrcaLoadReport{ + CpuUtilization: 0.29309, + MemUtilization: 0.2, + Utilization: map[string]float64{"util": 0.2039}, + } + checkORCAMetrics(ctx3, tc, want) +} + +func checkORCAMetrics(ctx context.Context, tc testgrpc.TestServiceClient, want *v3orcapb.OrcaLoadReport) { + for ctx.Err() == nil { + orcaRes := &v3orcapb.OrcaLoadReport{} + if _, err := tc.UnaryCall(contextWithORCAResult(ctx, &orcaRes), &testpb.SimpleRequest{}); err != nil { + logger.Fatalf("/TestService/UnaryCall RPC failed: ", err) + } + if proto.Equal(orcaRes, want) { + return + } + logger.Infof("/TestService/UnaryCall RPC received ORCA load report %+v; want %+v", orcaRes, want) + time.Sleep(time.Second) + } + logger.Fatalf("timed out waiting for expected ORCA load report") +} From afcbdc9ace7b4af94d014620727ea331cc3047fe Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Wed, 10 May 2023 19:30:34 -0400 Subject: [PATCH 915/998] xds/internal/xdsclient/xdslbregistry: Continue in converter if type not found (#6268) --- .../xdsclient/xdslbregistry/converter.go | 33 ++++++++++++++++--- .../xdslbregistry/tests/converter_test.go | 19 +++++++++++ 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/xds/internal/xdsclient/xdslbregistry/converter.go b/xds/internal/xdsclient/xdslbregistry/converter.go index 158ad8b199d6..6a5546d90159 100644 --- a/xds/internal/xdsclient/xdslbregistry/converter.go +++ b/xds/internal/xdsclient/xdslbregistry/converter.go @@ -34,6 +34,7 @@ import ( "github.com/golang/protobuf/proto" structpb "github.com/golang/protobuf/ptypes/struct" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/envconfig" ) @@ -90,13 +91,24 @@ func convertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { return nil, fmt.Errorf("failed to unmarshal resource: %v", err) } - return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) + json, cont, err := convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) + if cont { + continue + } + return json, err case "type.googleapis.com/udpa.type.v1.TypedStruct": tsProto := &v1xdsudpatypepb.TypedStruct{} if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { return nil, fmt.Errorf("failed to unmarshal resource: %v", err) } - return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) + if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + json, cont, err := convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) + if cont { + continue + } + return json, err } // Any entry not in the above list is unsupported and will be skipped. // This includes Least Request as well, since grpc-go does not support @@ -133,20 +145,31 @@ func convertWrrLocality(cfg *v3wrrlocalitypb.WrrLocality, depth int) (json.RawMe return makeBalancerConfigJSON("xds_wrr_locality_experimental", lbCfgJSON), nil } -func convertCustomPolicy(typeURL string, s *structpb.Struct) (json.RawMessage, error) { +// convertCustomPolicy attempts to prepare json configuration for a custom lb +// proto, which specifies the gRPC balancer type and configuration. Returns the +// converted json, a bool representing whether the caller should continue to the +// next policy, which is true if the gRPC Balancer registry does not contain +// that balancer type, and an error which should cause caller to error if error +// converting. +func convertCustomPolicy(typeURL string, s *structpb.Struct) (json.RawMessage, bool, error) { // The gRPC policy name will be the "type name" part of the value of the // type_url field in the TypedStruct. We get this by using the part after // the last / character. Can assume a valid type_url from the control plane. urls := strings.Split(typeURL, "/") name := urls[len(urls)-1] + if balancer.Get(name) == nil { + return nil, true, nil + } + rawJSON, err := json.Marshal(s) if err != nil { - return nil, fmt.Errorf("error converting custom lb policy %v: %v for %+v", err, typeURL, s) + return nil, false, fmt.Errorf("error converting custom lb policy %v: %v for %+v", err, typeURL, s) } + // The Struct contained in the TypedStruct will be returned as-is as the // configuration JSON object. - return makeBalancerConfigJSON(name, rawJSON), nil + return makeBalancerConfigJSON(name, rawJSON), false, nil } func makeBalancerConfigJSON(name string, value json.RawMessage) []byte { diff --git a/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go b/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go index 2607905dc903..c6d947d6bfde 100644 --- a/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go +++ b/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go @@ -170,6 +170,16 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { name: "custom_lb_type_v3_struct", policy: &v3clusterpb.LoadBalancingPolicy{ Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + // The type not registered in gRPC Policy registry. + // Should fallback to next policy in list. + TypedConfig: testutils.MarshalAny(&v3xdsxdstypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.ThisTypeDoesNotExist", + Value: &structpb.Struct{}, + }), + }, + }, { TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ TypedConfig: testutils.MarshalAny(&v3xdsxdstypepb.TypedStruct{ @@ -318,6 +328,15 @@ func (s) TestConvertToServiceConfigFailure(t *testing.T) { name: "no-supported-policy", policy: &v3clusterpb.LoadBalancingPolicy{ Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + // The type not registered in gRPC Policy registry. + TypedConfig: testutils.MarshalAny(&v3xdsxdstypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.ThisTypeDoesNotExist", + Value: &structpb.Struct{}, + }), + }, + }, { TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ // Not supported by gRPC-Go. From 7d6134424ab0fbeff84a0e6324bc82fdde2e29f3 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 11 May 2023 09:24:03 -0700 Subject: [PATCH 916/998] examples: fix authz example to receive streaming error properly (#6270) --- examples/features/authz/client/main.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/examples/features/authz/client/main.go b/examples/features/authz/client/main.go index 85d085d24580..2654314e5e11 100644 --- a/examples/features/authz/client/main.go +++ b/examples/features/authz/client/main.go @@ -55,7 +55,15 @@ func callBidiStreamingEcho(ctx context.Context, client ecpb.EchoClient, opts ... return status.Errorf(status.Code(err), "BidirectionalStreamingEcho RPC failed: %v", err) } for i := 0; i < 5; i++ { - if err := c.Send(&ecpb.EchoRequest{Message: fmt.Sprintf("Request %d", i+1)}); err != nil { + err := c.Send(&ecpb.EchoRequest{Message: fmt.Sprintf("Request %d", i+1)}) + if err == io.EOF { + // Bidi streaming RPC errors happen and make Send return io.EOF, + // not the RPC error itself. Call Recv to determine the error. + break + } + if err != nil { + // Some local errors are reported this way, e.g. errors serializing + // the request message. return status.Errorf(status.Code(err), "sending StreamingEcho message: %v", err) } } From 1536887cc692aa0f1cdca8e911bad4e483dedabb Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 11 May 2023 12:29:32 -0400 Subject: [PATCH 917/998] interop/xds: Add Custom LB needed for interop test (#6262) --- balancer/balancer.go | 2 +- balancer/rls/picker.go | 6 +- interop/xds/client/client.go | 1 + interop/xds/custom_lb.go | 140 ++++++++++++++++++++++++++++++++++ interop/xds/custom_lb_test.go | 135 ++++++++++++++++++++++++++++++++ stream.go | 4 +- test/balancer_test.go | 6 +- 7 files changed, 285 insertions(+), 9 deletions(-) create mode 100644 interop/xds/custom_lb.go create mode 100644 interop/xds/custom_lb_test.go diff --git a/balancer/balancer.go b/balancer/balancer.go index 09d61dd1b55b..8f00523c0e24 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -286,7 +286,7 @@ type PickResult struct { // // LB policies with child policies are responsible for propagating metadata // injected by their children to the ClientConn, as part of Pick(). - Metatada metadata.MD + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and diff --git a/balancer/rls/picker.go b/balancer/rls/picker.go index 3305f4529fd9..c2d972739689 100644 --- a/balancer/rls/picker.go +++ b/balancer/rls/picker.go @@ -166,10 +166,10 @@ func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info bala if err != nil { return res, err } - if res.Metatada == nil { - res.Metatada = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) + if res.Metadata == nil { + res.Metadata = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) } else { - res.Metatada.Append(rlsDataHeaderName, dcEntry.headerData) + res.Metadata.Append(rlsDataHeaderName, dcEntry.headerData) } return res, nil } diff --git a/interop/xds/client/client.go b/interop/xds/client/client.go index f5e8469e72cb..ff03428e1105 100644 --- a/interop/xds/client/client.go +++ b/interop/xds/client/client.go @@ -43,6 +43,7 @@ import ( testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" + _ "google.golang.org/grpc/interop/xds" // to register Custom LB. ) func init() { diff --git a/interop/xds/custom_lb.go b/interop/xds/custom_lb.go new file mode 100644 index 000000000000..a08d82554008 --- /dev/null +++ b/interop/xds/custom_lb.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xds contains various xds interop helpers for usage in interop tests. +package xds + +import ( + "encoding/json" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + balancer.Register(rpcBehaviorBB{}) +} + +const name = "test.RpcBehaviorLoadBalancer" + +type rpcBehaviorBB struct{} + +func (rpcBehaviorBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &rpcBehaviorLB{ + ClientConn: cc, + } + // round_robin child to complete balancer tree with a usable leaf policy and + // have RPCs actually work. + builder := balancer.Get(roundrobin.Name) + if builder == nil { + // Shouldn't happen, defensive programming. Registered from import of + // roundrobin package. + return nil + } + rr := builder.Build(b, bOpts) + if rr == nil { + // Shouldn't happen, defensive programming. + return nil + } + b.Balancer = rr + return b +} + +func (rpcBehaviorBB) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &lbConfig{} + if err := json.Unmarshal(s, lbCfg); err != nil { + return nil, fmt.Errorf("rpc-behavior-lb: unable to marshal lbConfig: %s, error: %v", string(s), err) + } + return lbCfg, nil + +} + +func (rpcBehaviorBB) Name() string { + return name +} + +type lbConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + RPCBehavior string `json:"rpcBehavior,omitempty"` +} + +// rpcBehaviorLB is a load balancer that wraps a round robin balancer and +// appends the rpc-behavior metadata field to any metadata in pick results based +// on what is specified in configuration. +type rpcBehaviorLB struct { + // embed a ClientConn to wrap only UpdateState() operation + balancer.ClientConn + // embed a Balancer to wrap only UpdateClientConnState() operation + balancer.Balancer + + mu sync.Mutex + cfg *lbConfig +} + +func (b *rpcBehaviorLB) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*lbConfig) + if !ok { + return fmt.Errorf("test.RpcBehaviorLoadBalancer:received config with unexpected type %T: %s", s.BalancerConfig, pretty.ToJSON(s.BalancerConfig)) + } + b.mu.Lock() + b.cfg = lbCfg + b.mu.Unlock() + return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + }) +} + +func (b *rpcBehaviorLB) UpdateState(state balancer.State) { + b.mu.Lock() + rpcBehavior := b.cfg.RPCBehavior + b.mu.Unlock() + + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: newRPCBehaviorPicker(state.Picker, rpcBehavior), + }) +} + +// rpcBehaviorPicker wraps a picker and adds the rpc-behavior metadata field +// into the child pick result's metadata. +type rpcBehaviorPicker struct { + childPicker balancer.Picker + rpcBehavior string +} + +// Pick appends the rpc-behavior metadata entry to the pick result of the child. +func (p *rpcBehaviorPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + pr, err := p.childPicker.Pick(info) + if err != nil { + return balancer.PickResult{}, err + } + pr.Metadata = metadata.Join(pr.Metadata, metadata.Pairs("rpc-behavior", p.rpcBehavior)) + return pr, nil +} + +func newRPCBehaviorPicker(childPicker balancer.Picker, rpcBehavior string) *rpcBehaviorPicker { + return &rpcBehaviorPicker{ + childPicker: childPicker, + rpcBehavior: rpcBehavior, + } +} diff --git a/interop/xds/custom_lb_test.go b/interop/xds/custom_lb_test.go new file mode 100644 index 000000000000..fc3a7f71c5c9 --- /dev/null +++ b/interop/xds/custom_lb_test.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" +) + +var defaultTestTimeout = 5 * time.Second + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestCustomLB tests the Custom LB for the interop client. It configures the +// custom lb as the top level Load Balancing policy of the channel, then asserts +// it can successfully make an RPC and also that the rpc behavior the Custom LB +// is configured with makes it's way to the server in metadata. +func (s) TestCustomLB(t *testing.T) { + errCh := testutils.NewChannel() + // Setup a backend which verifies the expected rpc-behavior metadata is + // present in the request. + backend := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + errCh.Send(errors.New("failed to receive metadata")) + return &testpb.SimpleResponse{}, nil + } + rpcBMD := md.Get("rpc-behavior") + if len(rpcBMD) != 1 { + errCh.Send(errors.New("only one value received for metadata key rpc-behavior")) + return &testpb.SimpleResponse{}, nil + } + wantVal := "error-code-0" + if rpcBMD[0] != wantVal { + errCh.Send(fmt.Errorf("metadata val for key \"rpc-behavior\": got val %v, want val %v", rpcBMD[0], wantVal)) + return &testpb.SimpleResponse{}, nil + } + // Success. + errCh.Send(nil) + return &testpb.SimpleResponse{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started good TestService backend at: %q", backend.Address) + defer backend.Stop() + + lbCfgJSON := `{ + "loadBalancingConfig": [ + { + "test.RpcBehaviorLoadBalancer": { + "rpcBehavior": "error-code-0" + } + } + ] + }` + + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(lbCfgJSON) + mr := manual.NewBuilderWithScheme("customlb-e2e") + defer mr.Close() + mr.InitialState(resolver.State{ + Addresses: []resolver.Address{ + {Addr: backend.Address}, + }, + ServiceConfig: sc, + }) + + cc, err := grpc.Dial(mr.Scheme()+":///", grpc.WithResolvers(mr), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testServiceClient := testgrpc.NewTestServiceClient(cc) + + // Make a Unary RPC. This RPC should be successful due to the round_robin + // leaf balancer. Also, the custom load balancer should inject the + // "rpc-behavior" string it is configured with into the metadata sent to + // server. + if _, err := testServiceClient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("error receiving from errCh: %v", err) + } + + // Should receive nil on the error channel which implies backend verified it + // correctly received the correct "rpc-behavior" metadata. + if err, ok := val.(error); ok { + t.Fatalf("error in backend verifications on metadata received: %v", err) + } +} diff --git a/stream.go b/stream.go index f79e31c147ee..06ec22cd0a9d 100644 --- a/stream.go +++ b/stream.go @@ -472,7 +472,7 @@ func (a *csAttempt) newStream() error { // It is safe to overwrite the csAttempt's context here, since all state // maintained in it are local to the attempt. When the attempt has to be // retried, a new instance of csAttempt will be created. - if a.pickResult.Metatada != nil { + if a.pickResult.Metadata != nil { // We currently do not have a function it the metadata package which // merges given metadata with existing metadata in a context. Existing // function `AppendToOutgoingContext()` takes a variadic argument of key @@ -482,7 +482,7 @@ func (a *csAttempt) newStream() error { // in a form passable to AppendToOutgoingContext(), or create a version // of AppendToOutgoingContext() that accepts a metadata.MD. md, _ := metadata.FromOutgoingContext(a.ctx) - md = metadata.Join(md, a.pickResult.Metatada) + md = metadata.Join(md, a.pickResult.Metadata) a.ctx = metadata.NewOutgoingContext(a.ctx, md) } diff --git a/test/balancer_test.go b/test/balancer_test.go index 8b88dc513b29..4026c75b46e3 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -922,10 +922,10 @@ func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, erro return balancer.PickResult{}, err } - if res.Metatada == nil { - res.Metatada = metadata.Pairs(metadataHeaderInjectedByBalancer, metadataValueInjectedByBalancer) + if res.Metadata == nil { + res.Metadata = metadata.Pairs(metadataHeaderInjectedByBalancer, metadataValueInjectedByBalancer) } else { - res.Metatada.Append(metadataHeaderInjectedByBalancer, metadataValueInjectedByBalancer) + res.Metadata.Append(metadataHeaderInjectedByBalancer, metadataValueInjectedByBalancer) } return res, nil } From 523dcddf9aaba17e8b131cab225a2cfdacc79bdb Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 11 May 2023 09:37:17 -0700 Subject: [PATCH 918/998] weightedroundrobin: fix test race accessing timeNow (#6269) --- balancer/weightedroundrobin/balancer_test.go | 29 ++++++++++++++++---- 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/balancer/weightedroundrobin/balancer_test.go b/balancer/weightedroundrobin/balancer_test.go index 5dd62ebf872a..f0cf4dab2f4e 100644 --- a/balancer/weightedroundrobin/balancer_test.go +++ b/balancer/weightedroundrobin/balancer_test.go @@ -23,6 +23,7 @@ import ( "encoding/json" "fmt" "sync" + "sync/atomic" "testing" "time" @@ -448,12 +449,13 @@ func (s) TestBalancer_TwoAddresses_BlackoutPeriod(t *testing.T) { defer mu.Unlock() now = t } - iwrr.TimeNow = func() time.Time { + + setTimeNow(func() time.Time { mu.Lock() defer mu.Unlock() return now - } - t.Cleanup(func() { iwrr.TimeNow = time.Now }) + }) + t.Cleanup(func() { setTimeNow(time.Now) }) testCases := []struct { blackoutPeriodCfg *time.Duration @@ -526,12 +528,12 @@ func (s) TestBalancer_TwoAddresses_WeightExpiration(t *testing.T) { defer mu.Unlock() now = t } - iwrr.TimeNow = func() time.Time { + setTimeNow(func() time.Time { mu.Lock() defer mu.Unlock() return now - } - t.Cleanup(func() { iwrr.TimeNow = time.Now }) + }) + t.Cleanup(func() { setTimeNow(time.Now) }) srv1 := startServer(t, reportBoth) srv2 := startServer(t, reportBoth) @@ -711,3 +713,18 @@ func checkWeights(ctx context.Context, t *testing.T, sws ...srvWeight) { } t.Fatalf("Failed to route RPCs with proper ratio") } + +func init() { + setTimeNow(time.Now) + iwrr.TimeNow = timeNow +} + +var timeNowFunc atomic.Value // func() time.Time + +func timeNow() time.Time { + return timeNowFunc.Load().(func() time.Time)() +} + +func setTimeNow(f func() time.Time) { + timeNowFunc.Store(f) +} From 1db474c85cb3e56b9114f6d91ea6040625a6fea9 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 11 May 2023 11:56:53 -0700 Subject: [PATCH 919/998] weightedroundrobin: fix duration format in lb config (#6271) --- balancer/weightedroundrobin/balancer.go | 19 +-- balancer/weightedroundrobin/balancer_test.go | 33 ++--- balancer/weightedroundrobin/config.go | 11 +- .../weightedroundrobin/internal/internal.go | 14 +- internal/serviceconfig/duration.go | 130 ++++++++++++++++++ internal/serviceconfig/duration_test.go | 87 ++++++++++++ 6 files changed, 256 insertions(+), 38 deletions(-) create mode 100644 internal/serviceconfig/duration.go create mode 100644 internal/serviceconfig/duration_test.go diff --git a/balancer/weightedroundrobin/balancer.go b/balancer/weightedroundrobin/balancer.go index e0d255222d52..e957b91b1966 100644 --- a/balancer/weightedroundrobin/balancer.go +++ b/balancer/weightedroundrobin/balancer.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/orca" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -66,10 +67,10 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba func (bb) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { lbCfg := &lbConfig{ // Default values as documented in A58. - OOBReportingPeriod: 10 * time.Second, - BlackoutPeriod: 10 * time.Second, - WeightExpirationPeriod: 3 * time.Minute, - WeightUpdatePeriod: time.Second, + OOBReportingPeriod: iserviceconfig.Duration(10 * time.Second), + BlackoutPeriod: iserviceconfig.Duration(10 * time.Second), + WeightExpirationPeriod: iserviceconfig.Duration(3 * time.Minute), + WeightUpdatePeriod: iserviceconfig.Duration(time.Second), ErrorUtilizationPenalty: 1, } if err := json.Unmarshal(js, lbCfg); err != nil { @@ -87,8 +88,8 @@ func (bb) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, er } // Impose lower bound of 100ms on weightUpdatePeriod. - if !internal.AllowAnyWeightUpdatePeriod && lbCfg.WeightUpdatePeriod < 100*time.Millisecond { - lbCfg.WeightUpdatePeriod = 100 * time.Millisecond + if !internal.AllowAnyWeightUpdatePeriod && lbCfg.WeightUpdatePeriod < iserviceconfig.Duration(100*time.Millisecond) { + lbCfg.WeightUpdatePeriod = iserviceconfig.Duration(100 * time.Millisecond) } return lbCfg, nil @@ -337,7 +338,7 @@ func (p *picker) scWeights() []float64 { ws := make([]float64, len(p.subConns)) now := internal.TimeNow() for i, wsc := range p.subConns { - ws[i] = wsc.weight(now, p.cfg.WeightExpirationPeriod, p.cfg.BlackoutPeriod) + ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod)) } return ws } @@ -358,7 +359,7 @@ func (p *picker) start(ctx context.Context) { return } go func() { - ticker := time.NewTicker(p.cfg.WeightUpdatePeriod) + ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod)) for { select { case <-ctx.Done(): @@ -469,7 +470,7 @@ func (w *weightedSubConn) updateConfig(cfg *lbConfig) { if w.logger.V(2) { w.logger.Infof("Registering ORCA listener for %v with interval %v", w.SubConn, newPeriod) } - opts := orca.OOBListenerOptions{ReportInterval: newPeriod} + opts := orca.OOBListenerOptions{ReportInterval: time.Duration(newPeriod)} w.stopORCAListener = orca.RegisterOOBListener(w.SubConn, w, opts) } diff --git a/balancer/weightedroundrobin/balancer_test.go b/balancer/weightedroundrobin/balancer_test.go index f0cf4dab2f4e..a0a84a7f057b 100644 --- a/balancer/weightedroundrobin/balancer_test.go +++ b/balancer/weightedroundrobin/balancer_test.go @@ -53,31 +53,32 @@ func Test(t *testing.T) { const defaultTestTimeout = 10 * time.Second const weightUpdatePeriod = 50 * time.Millisecond +const weightExpirationPeriod = time.Minute const oobReportingInterval = 10 * time.Millisecond func init() { iwrr.AllowAnyWeightUpdatePeriod = true } -func boolp(b bool) *bool { return &b } -func float64p(f float64) *float64 { return &f } -func durationp(d time.Duration) *time.Duration { return &d } +func boolp(b bool) *bool { return &b } +func float64p(f float64) *float64 { return &f } +func stringp(s string) *string { return &s } var ( perCallConfig = iwrr.LBConfig{ EnableOOBLoadReport: boolp(false), - OOBReportingPeriod: durationp(5 * time.Millisecond), - BlackoutPeriod: durationp(0), - WeightExpirationPeriod: durationp(time.Minute), - WeightUpdatePeriod: durationp(weightUpdatePeriod), + OOBReportingPeriod: stringp("0.005s"), + BlackoutPeriod: stringp("0s"), + WeightExpirationPeriod: stringp("60s"), + WeightUpdatePeriod: stringp(".050s"), ErrorUtilizationPenalty: float64p(0), } oobConfig = iwrr.LBConfig{ EnableOOBLoadReport: boolp(true), - OOBReportingPeriod: durationp(5 * time.Millisecond), - BlackoutPeriod: durationp(0), - WeightExpirationPeriod: durationp(time.Minute), - WeightUpdatePeriod: durationp(weightUpdatePeriod), + OOBReportingPeriod: stringp("0.005s"), + BlackoutPeriod: stringp("0s"), + WeightExpirationPeriod: stringp("60s"), + WeightUpdatePeriod: stringp(".050s"), ErrorUtilizationPenalty: float64p(0), } ) @@ -458,10 +459,10 @@ func (s) TestBalancer_TwoAddresses_BlackoutPeriod(t *testing.T) { t.Cleanup(func() { setTimeNow(time.Now) }) testCases := []struct { - blackoutPeriodCfg *time.Duration + blackoutPeriodCfg *string blackoutPeriod time.Duration }{{ - blackoutPeriodCfg: durationp(time.Second), + blackoutPeriodCfg: stringp("1s"), blackoutPeriod: time.Second, }, { blackoutPeriodCfg: nil, @@ -549,7 +550,7 @@ func (s) TestBalancer_TwoAddresses_WeightExpiration(t *testing.T) { srv2.oobMetrics.SetCPUUtilization(.1) cfg := oobConfig - cfg.OOBReportingPeriod = durationp(time.Minute) + cfg.OOBReportingPeriod = stringp("60s") sc := svcConfig(t, cfg) if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { t.Fatalf("Error starting client: %v", err) @@ -566,7 +567,7 @@ func (s) TestBalancer_TwoAddresses_WeightExpiration(t *testing.T) { // Advance what time.Now returns to the weight expiration time minus 1s to // ensure all weights are still honored. - setNow(start.Add(*cfg.WeightExpirationPeriod - time.Second)) + setNow(start.Add(weightExpirationPeriod - time.Second)) // Wait for the weight update period to allow the new weights to be processed. time.Sleep(weightUpdatePeriod) @@ -574,7 +575,7 @@ func (s) TestBalancer_TwoAddresses_WeightExpiration(t *testing.T) { // Advance what time.Now returns to the weight expiration time plus 1s to // ensure all weights expired and addresses are routed evenly. - setNow(start.Add(*cfg.WeightExpirationPeriod + time.Second)) + setNow(start.Add(weightExpirationPeriod + time.Second)) // Wait for the weight expiration period so the weights have expired. time.Sleep(weightUpdatePeriod) diff --git a/balancer/weightedroundrobin/config.go b/balancer/weightedroundrobin/config.go index caad18faa11d..38f89d32fb43 100644 --- a/balancer/weightedroundrobin/config.go +++ b/balancer/weightedroundrobin/config.go @@ -19,8 +19,7 @@ package weightedroundrobin import ( - "time" - + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -34,7 +33,7 @@ type lbConfig struct { // Load reporting interval to request from the server. Note that the // server may not provide reports as frequently as the client requests. // Used only when enable_oob_load_report is true. Default is 10 seconds. - OOBReportingPeriod time.Duration `json:"oobReportingPeriod,omitempty"` + OOBReportingPeriod iserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` // A given endpoint must report load metrics continuously for at least this // long before the endpoint weight will be used. This avoids churn when @@ -42,17 +41,17 @@ type lbConfig struct { // after we establish a connection to an endpoint and after // weight_expiration_period has caused us to stop using the most recent // load metrics. Default is 10 seconds. - BlackoutPeriod time.Duration `json:"blackoutPeriod,omitempty"` + BlackoutPeriod iserviceconfig.Duration `json:"blackoutPeriod,omitempty"` // If a given endpoint has not reported load metrics in this long, // then we stop using the reported weight. This ensures that we do // not continue to use very stale weights. Once we stop using a stale // value, if we later start seeing fresh reports again, the // blackout_period applies. Defaults to 3 minutes. - WeightExpirationPeriod time.Duration `json:"weightExpirationPeriod,omitempty"` + WeightExpirationPeriod iserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` // How often endpoint weights are recalculated. Default is 1 second. - WeightUpdatePeriod time.Duration `json:"weightUpdatePeriod,omitempty"` + WeightUpdatePeriod iserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` // The multiplier used to adjust endpoint weights with the error rate // calculated as eps/qps. Default is 1.0. diff --git a/balancer/weightedroundrobin/internal/internal.go b/balancer/weightedroundrobin/internal/internal.go index d39830261b21..7b64fbf4e574 100644 --- a/balancer/weightedroundrobin/internal/internal.go +++ b/balancer/weightedroundrobin/internal/internal.go @@ -31,14 +31,14 @@ var AllowAnyWeightUpdatePeriod bool // LBConfig allows tests to produce a JSON form of the config from the struct // instead of using a string. type LBConfig struct { - EnableOOBLoadReport *bool `json:"enableOobLoadReport,omitempty"` - OOBReportingPeriod *time.Duration `json:"oobReportingPeriod,omitempty"` - BlackoutPeriod *time.Duration `json:"blackoutPeriod,omitempty"` - WeightExpirationPeriod *time.Duration `json:"weightExpirationPeriod,omitempty"` - WeightUpdatePeriod *time.Duration `json:"weightUpdatePeriod,omitempty"` - ErrorUtilizationPenalty *float64 `json:"errorUtilizationPenalty,omitempty"` + EnableOOBLoadReport *bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod *string `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod *string `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod *string `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod *string `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty *float64 `json:"errorUtilizationPenalty,omitempty"` } // TimeNow can be overridden by tests to return a different value for the -// current time. +// current iserviceconfig. var TimeNow = time.Now diff --git a/internal/serviceconfig/duration.go b/internal/serviceconfig/duration.go new file mode 100644 index 000000000000..11d82afcc7ec --- /dev/null +++ b/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/internal/serviceconfig/duration_test.go b/internal/serviceconfig/duration_test.go new file mode 100644 index 000000000000..5696541aa870 --- /dev/null +++ b/internal/serviceconfig/duration_test.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "fmt" + "math" + "strings" + "testing" + "time" + + "google.golang.org/grpc/internal/grpcrand" +) + +// Tests both marshalling and unmarshalling of Durations. +func TestDuration_MarshalUnmarshal(t *testing.T) { + testCases := []struct { + json string + td time.Duration + unmarshalErr error + noMarshal bool + }{ + // Basic values. + {json: `"1s"`, td: time.Second}, + {json: `"-100.700s"`, td: -100*time.Second - 700*time.Millisecond}, + {json: `".050s"`, td: 50 * time.Millisecond, noMarshal: true}, + {json: `"-.001s"`, td: -1 * time.Millisecond, noMarshal: true}, + {json: `"-0.200s"`, td: -200 * time.Millisecond}, + // Positive near / out of bounds. + {json: `"9223372036s"`, td: 9223372036 * time.Second}, + {json: `"9223372037s"`, td: math.MaxInt64, noMarshal: true}, + {json: `"9223372036.854775807s"`, td: math.MaxInt64}, + {json: `"9223372036.854775808s"`, td: math.MaxInt64, noMarshal: true}, + {json: `"315576000000s"`, td: math.MaxInt64, noMarshal: true}, + {json: `"315576000001s"`, unmarshalErr: fmt.Errorf("out of range")}, + // Negative near / out of bounds. + {json: `"-9223372036s"`, td: -9223372036 * time.Second}, + {json: `"-9223372037s"`, td: math.MinInt64, noMarshal: true}, + {json: `"-9223372036.854775808s"`, td: math.MinInt64}, + {json: `"-9223372036.854775809s"`, td: math.MinInt64, noMarshal: true}, + {json: `"-315576000000s"`, td: math.MinInt64, noMarshal: true}, + {json: `"-315576000001s"`, unmarshalErr: fmt.Errorf("out of range")}, + // Parse errors. + {json: `123s`, unmarshalErr: fmt.Errorf("invalid character")}, + {json: `"5m"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"5.3.2s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"x.3s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"3.xs"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"3.1234567890s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `".s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + } + for _, tc := range testCases { + // Seed `got` with a random value to ensure we properly reset it in all + // non-error cases. + got := Duration(grpcrand.Uint64()) + err := got.UnmarshalJSON([]byte(tc.json)) + if (err == nil && time.Duration(got) != tc.td) || + (err != nil) != (tc.unmarshalErr != nil) || !strings.Contains(fmt.Sprint(err), fmt.Sprint(tc.unmarshalErr)) { + t.Errorf("UnmarshalJSON of %v = %v, %v; want %v, %v", tc.json, time.Duration(got), err, tc.td, tc.unmarshalErr) + } + + if tc.unmarshalErr == nil && !tc.noMarshal { + d := Duration(tc.td) + got, err := d.MarshalJSON() + if string(got) != tc.json || err != nil { + t.Errorf("MarshalJSON of %v = %v, %v; want %v, nil", d, string(got), err, tc.json) + } + } + } +} From fd376a5cbdc9d05257936f683396093812b7ce22 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 12 May 2023 11:01:06 -0700 Subject: [PATCH 920/998] test: fix flaky TimeoutOnDeadServer test; some cleanups (#6276) --- test/channelz_test.go | 28 ++--------- test/clientconn_state_transition_test.go | 18 +++++++ test/creds_test.go | 36 ++++--------- test/end2end_test.go | 64 ++++-------------------- test/goaway_test.go | 13 +---- test/healthcheck_test.go | 33 ++++-------- test/pickfirst_test.go | 6 +-- test/roundrobin_test.go | 19 ++----- 8 files changed, 55 insertions(+), 162 deletions(-) diff --git a/test/channelz_test.go b/test/channelz_test.go index 0a6ff579773f..d43c155a15df 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -1531,21 +1531,11 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { t.Fatal(err) } - // Wait for ready ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - for src := te.cc.GetState(); src != connectivity.Ready; src = te.cc.GetState() { - if !te.cc.WaitForStateChange(ctx, src) { - t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.Ready) - } - } + awaitState(ctx, t, te.cc, connectivity.Ready) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}}) - // Wait for not-ready. - for src := te.cc.GetState(); src == connectivity.Ready; src = te.cc.GetState() { - if !te.cc.WaitForStateChange(ctx, src) { - t.Fatalf("timed out waiting for state change. got %v; want !%v", src, connectivity.Ready) - } - } + awaitNotState(ctx, t, te.cc, connectivity.Ready) if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) @@ -2016,21 +2006,11 @@ func (s) TestCZTraceOverwriteSubChannelDeletion(t *testing.T) { t.Fatal(err) } - // Wait for ready ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - for src := te.cc.GetState(); src != connectivity.Ready; src = te.cc.GetState() { - if !te.cc.WaitForStateChange(ctx, src) { - t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.Ready) - } - } + awaitState(ctx, t, te.cc, connectivity.Ready) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}}) - // Wait for not-ready. - for src := te.cc.GetState(); src == connectivity.Ready; src = te.cc.GetState() { - if !te.cc.WaitForStateChange(ctx, src) { - t.Fatalf("timed out waiting for state change. got %v; want !%v", src, connectivity.Ready) - } - } + awaitNotState(ctx, t, te.cc, connectivity.Ready) // verify that the subchannel no longer exist due to trace referencing it got overwritten. if err := verifyResultWithDelay(func() (bool, error) { diff --git a/test/clientconn_state_transition_test.go b/test/clientconn_state_transition_test.go index 1f15c6905ad6..57f932d1eb5e 100644 --- a/test/clientconn_state_transition_test.go +++ b/test/clientconn_state_transition_test.go @@ -519,3 +519,21 @@ func stayConnected(ctx context.Context, cc *grpc.ClientConn) { } } } + +func awaitState(ctx context.Context, t *testing.T, cc *grpc.ClientConn, stateWant connectivity.State) { + t.Helper() + for state := cc.GetState(); state != stateWant; state = cc.GetState() { + if !cc.WaitForStateChange(ctx, state) { + t.Fatalf("timed out waiting for state change. got %v; want %v", state, stateWant) + } + } +} + +func awaitNotState(ctx context.Context, t *testing.T, cc *grpc.ClientConn, stateDoNotWant connectivity.State) { + t.Helper() + for state := cc.GetState(); state == stateDoNotWant; state = cc.GetState() { + if !cc.WaitForStateChange(ctx, state) { + t.Fatalf("timed out waiting for state change. got %v; want NOT %v", state, stateDoNotWant) + } + } +} diff --git a/test/creds_test.go b/test/creds_test.go index 70af9945cc8e..06c716a3ee92 100644 --- a/test/creds_test.go +++ b/test/creds_test.go @@ -200,7 +200,7 @@ func (s) TestGRPCMethodAccessibleToCredsViaContextRequestInfo(t *testing.T) { cc := te.clientConn(grpc.WithPerRPCCredentials(&methodTestCreds{})) tc := testgrpc.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMethod { t.Fatalf("ss.client.EmptyCall(_, _) = _, %v; want _, _.Message()=%q", err, wantMethod) @@ -233,7 +233,7 @@ func (s) TestFailFastRPCErrorOnBadCertificates(t *testing.T) { defer te.tearDown() opts := []grpc.DialOption{grpc.WithTransportCredentials(clientAlwaysFailCred{})} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() cc, err := grpc.DialContext(ctx, te.srvAddr, opts...) if err != nil { @@ -261,17 +261,15 @@ func (s) TestWaitForReadyRPCErrorOnBadCertificates(t *testing.T) { defer te.tearDown() opts := []grpc.DialOption{grpc.WithTransportCredentials(clientAlwaysFailCred{})} - dctx, dcancel := context.WithTimeout(context.Background(), 10*time.Second) - defer dcancel() - cc, err := grpc.DialContext(dctx, te.srvAddr, opts...) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, err := grpc.DialContext(ctx, te.srvAddr, opts...) if err != nil { t.Fatalf("Dial(_) = %v, want %v", err, nil) } defer cc.Close() tc := testgrpc.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) { return } @@ -444,17 +442,9 @@ func (s) TestCredsHandshakeAuthority(t *testing.T) { defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } - if !cc.WaitForStateChange(ctx, s) { - t.Fatalf("ClientConn is not ready after 100 ms") - } - } + awaitState(ctx, t, cc, connectivity.Ready) if cred.got != testAuthority { t.Fatalf("client creds got authority: %q, want: %q", cred.got, testAuthority) @@ -484,17 +474,9 @@ func (s) TestCredsHandshakeServerNameAuthority(t *testing.T) { defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String(), ServerName: testServerName}}}) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } - if !cc.WaitForStateChange(ctx, s) { - t.Fatalf("ClientConn is not ready after 100 ms") - } - } + awaitState(ctx, t, cc, connectivity.Ready) if cred.got != testServerName { t.Fatalf("client creds got authority: %q, want: %q", cred.got, testAuthority) diff --git a/test/end2end_test.go b/test/end2end_test.go index 824d7c56c041..865285b35a2d 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -965,38 +965,25 @@ func (s) TestTimeoutOnDeadServer(t *testing.T) { func testTimeoutOnDeadServer(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } + // Wait for the client to report READY, stop the server, then wait for the + // client to notice the connection is gone. + awaitState(ctx, t, cc, connectivity.Ready) te.srv.Stop() - cancel() - - // Wait for the client to notice the connection is gone. - ctx, cancel = context.WithTimeout(context.Background(), 500*time.Millisecond) - state := cc.GetState() - for ; state == connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { - } - cancel() - if state == connectivity.Ready { - t.Fatalf("Timed out waiting for non-ready state") - } - ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond) + awaitNotState(ctx, t, cc, connectivity.Ready) + ctx, cancel = context.WithTimeout(ctx, 5*time.Millisecond) _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) cancel() - if e.balancer != "" && status.Code(err) != codes.DeadlineExceeded { - // If e.balancer == nil, the ac will stop reconnecting because the dialer returns non-temp error, - // the error will be an internal error. + if status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded) } awaitNewConnLogOutput() @@ -1070,11 +1057,6 @@ func (s) TestFailFast(t *testing.T) { func testFailFast(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) te.startServer(&testServer{security: e.security}) defer te.tearDown() @@ -1114,9 +1096,6 @@ func testServiceConfigSetup(t *testing.T, e env) *test { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) return te @@ -1746,9 +1725,6 @@ func testPreloaderClientSend(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) @@ -1875,9 +1851,6 @@ func testMaxMsgSizeClientDefault(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) @@ -1942,9 +1915,6 @@ func testMaxMsgSizeClientAPI(t *testing.T, e env) { te.maxClientReceiveMsgSize = newInt(1024) te.maxClientSendMsgSize = newInt(1024) te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) @@ -2030,9 +2000,6 @@ func testMaxMsgSizeServerAPI(t *testing.T, e env) { te.maxServerReceiveMsgSize = newInt(1024) te.maxServerSendMsgSize = newInt(1024) te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) @@ -2141,11 +2108,6 @@ func testTap(t *testing.T, e env) { te.userAgent = testAppUA ttap := &myTap{} te.tapHandle = ttap.handle - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) te.startServer(&testServer{security: e.security}) defer te.tearDown() @@ -4878,17 +4840,9 @@ func testWaitForReadyConnection(t *testing.T, e env) { cc := te.clientConn() // Non-blocking dial. tc := testgrpc.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - state := cc.GetState() - // Wait for connection to be Ready. - for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { - } - if state != connectivity.Ready { - t.Fatalf("Want connection state to be Ready, got %v", state) - } - ctx, cancel = context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) // Make a fail-fast RPC. if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err) diff --git a/test/goaway_test.go b/test/goaway_test.go index 48ef197e74cc..c44bb831b70b 100644 --- a/test/goaway_test.go +++ b/test/goaway_test.go @@ -594,12 +594,7 @@ func (s) TestGoAwayThenClose(t *testing.T) { client := testgrpc.NewTestServiceClient(cc) t.Log("Waiting for the ClientConn to enter READY state.") - state := cc.GetState() - for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { - } - if ctx.Err() != nil { - t.Fatalf("timed out waiting for READY channel state; last state = %v", state) - } + awaitState(ctx, t, cc, connectivity.Ready) // We make a streaming RPC and do an one-message-round-trip to make sure // it's created on connection 1. @@ -622,11 +617,7 @@ func (s) TestGoAwayThenClose(t *testing.T) { go s1.GracefulStop() t.Log("Waiting for the ClientConn to enter IDLE state.") - for ; state != connectivity.Idle && cc.WaitForStateChange(ctx, state); state = cc.GetState() { - } - if ctx.Err() != nil { - t.Fatalf("timed out waiting for IDLE channel state; last state = %v", state) - } + awaitState(ctx, t, cc, connectivity.Idle) t.Log("Performing another RPC to create a connection to server 2.") if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index 1fb4cf46e2be..a6865b803026 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -212,44 +212,33 @@ func (s) TestHealthCheckWatchStateChange(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if ok := cc.WaitForStateChange(ctx, connectivity.Idle); !ok { - t.Fatal("ClientConn is still in IDLE state when the context times out.") - } - if ok := cc.WaitForStateChange(ctx, connectivity.Connecting); !ok { - t.Fatal("ClientConn is still in CONNECTING state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.Idle) + awaitNotState(ctx, t, cc, connectivity.Connecting) + awaitState(ctx, t, cc, connectivity.TransientFailure) if s := cc.GetState(); s != connectivity.TransientFailure { t.Fatalf("ClientConn is in %v state, want TRANSIENT FAILURE", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - if ok := cc.WaitForStateChange(ctx, connectivity.TransientFailure); !ok { - t.Fatal("ClientConn is still in TRANSIENT FAILURE state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.TransientFailure) if s := cc.GetState(); s != connectivity.Ready { t.Fatalf("ClientConn is in %v state, want READY", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - if ok := cc.WaitForStateChange(ctx, connectivity.Ready); !ok { - t.Fatal("ClientConn is still in READY state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.Ready) if s := cc.GetState(); s != connectivity.TransientFailure { t.Fatalf("ClientConn is in %v state, want TRANSIENT FAILURE", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - if ok := cc.WaitForStateChange(ctx, connectivity.TransientFailure); !ok { - t.Fatal("ClientConn is still in TRANSIENT FAILURE state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.TransientFailure) if s := cc.GetState(); s != connectivity.Ready { t.Fatalf("ClientConn is in %v state, want READY", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_UNKNOWN) - if ok := cc.WaitForStateChange(ctx, connectivity.Ready); !ok { - t.Fatal("ClientConn is still in READY state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.Ready) if s := cc.GetState(); s != connectivity.TransientFailure { t.Fatalf("ClientConn is in %v state, want TRANSIENT FAILURE", s) } @@ -278,12 +267,8 @@ func (s) TestHealthCheckHealthServerNotRegistered(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if ok := cc.WaitForStateChange(ctx, connectivity.Idle); !ok { - t.Fatal("ClientConn is still in IDLE state when the context times out.") - } - if ok := cc.WaitForStateChange(ctx, connectivity.Connecting); !ok { - t.Fatal("ClientConn is still in CONNECTING state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.Idle) + awaitNotState(ctx, t, cc, connectivity.Connecting) if s := cc.GetState(); s != connectivity.Ready { t.Fatalf("ClientConn is in %v state, want READY", s) } diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go index 15b6dcd84616..800d2f4178c2 100644 --- a/test/pickfirst_test.go +++ b/test/pickfirst_test.go @@ -250,11 +250,7 @@ func (s) TestPickFirst_NewAddressWhileBlocking(t *testing.T) { // Send a resolver update with no addresses. This should push the channel into // TransientFailure. r.UpdateState(resolver.State{}) - for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { - if !cc.WaitForStateChange(ctx, state) { - t.Fatalf("timeout waiting for state change. got %v; want %v", state, connectivity.TransientFailure) - } - } + awaitState(ctx, t, cc, connectivity.TransientFailure) doneCh := make(chan struct{}) client := testgrpc.NewTestServiceClient(cc) diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go index 8069e32358fb..92fed10ffed0 100644 --- a/test/roundrobin_test.go +++ b/test/roundrobin_test.go @@ -119,11 +119,7 @@ func (s) TestRoundRobin_AddressesRemoved(t *testing.T) { // Send a resolver update with no addresses. This should push the channel into // TransientFailure. r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) - for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { - if !cc.WaitForStateChange(ctx, state) { - t.Fatalf("timeout waiting for state change. got %v; want %v", state, connectivity.TransientFailure) - } - } + awaitState(ctx, t, cc, connectivity.TransientFailure) const msgWant = "produced zero addresses" client := testgrpc.NewTestServiceClient(cc) @@ -145,11 +141,7 @@ func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { // Send a resolver update with no addresses. This should push the channel into // TransientFailure. r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) - for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { - if !cc.WaitForStateChange(ctx, state) { - t.Fatalf("timeout waiting for state change. got %v; want %v", state, connectivity.TransientFailure) - } - } + awaitState(ctx, t, cc, connectivity.TransientFailure) client := testgrpc.NewTestServiceClient(cc) doneCh := make(chan struct{}) @@ -229,12 +221,7 @@ func (s) TestRoundRobin_AllServersDown(t *testing.T) { b.Stop() } - // Wait for TransientFailure. - for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { - if !cc.WaitForStateChange(ctx, state) { - t.Fatalf("timeout waiting for state change. got %v; want %v", state, connectivity.TransientFailure) - } - } + awaitState(ctx, t, cc, connectivity.TransientFailure) // Failfast RPCs should fail with Unavailable. client := testgrpc.NewTestServiceClient(cc) From 68381e7bd2c31b3bd5ca5f31b25e338192f16049 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 12 May 2023 15:28:07 -0400 Subject: [PATCH 921/998] xds: WRR in xDS (#6272) --- test/xds/xds_client_custom_lb_test.go | 124 +++++---- .../xdsclient/xdslbregistry/converter.go | 177 ------------ .../xdsclient/xdsresource/unmarshal_cds.go | 3 +- xds/internal/xdslbregistry/converter.go | 260 ++++++++++++++++++ .../tests => xdslbregistry}/converter_test.go | 56 ++-- 5 files changed, 367 insertions(+), 253 deletions(-) delete mode 100644 xds/internal/xdsclient/xdslbregistry/converter.go create mode 100644 xds/internal/xdslbregistry/converter.go rename xds/internal/{xdsclient/xdslbregistry/tests => xdslbregistry}/converter_test.go (87%) diff --git a/test/xds/xds_client_custom_lb_test.go b/test/xds/xds_client_custom_lb_test.go index 91ec874c64a7..749eb7f9aa64 100644 --- a/test/xds/xds_client_custom_lb_test.go +++ b/test/xds/xds_client_custom_lb_test.go @@ -22,6 +22,17 @@ import ( "context" "fmt" "testing" + "time" + + "google.golang.org/grpc" + _ "google.golang.org/grpc/balancer/weightedroundrobin" // To register weighted_round_robin_experimental. + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/roundrobin" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" @@ -29,20 +40,14 @@ import ( v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" "github.com/golang/protobuf/proto" structpb "github.com/golang/protobuf/ptypes/struct" testgrpc "google.golang.org/grpc/interop/grpc_testing" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/stubserver" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/testutils/roundrobin" - "google.golang.org/grpc/internal/testutils/xds/e2e" - "google.golang.org/grpc/resolver" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/wrapperspb" ) // wrrLocality is a helper that takes a proto message and returns a @@ -113,7 +118,10 @@ func (s) TestWrrLocality(t *testing.T) { name string // Configuration will be specified through load_balancing_policy field. wrrLocalityConfiguration *v3wrrlocalitypb.WrrLocality - addressDistributionWant []resolver.Address + addressDistributionWant []struct { + addr string + count int + } }{ { name: "rr_child", @@ -124,43 +132,15 @@ func (s) TestWrrLocality(t *testing.T) { // in a locality). Thus, address 1 and address 2 have 1/3 * 1/2 // probability, and addresses 3 4 5 have 2/3 * 1/3 probability of // being routed to. - addressDistributionWant: []resolver.Address{ - {Addr: backend1.Address}, - {Addr: backend1.Address}, - {Addr: backend1.Address}, - {Addr: backend1.Address}, - {Addr: backend1.Address}, - {Addr: backend1.Address}, - {Addr: backend2.Address}, - {Addr: backend2.Address}, - {Addr: backend2.Address}, - {Addr: backend2.Address}, - {Addr: backend2.Address}, - {Addr: backend2.Address}, - {Addr: backend3.Address}, - {Addr: backend3.Address}, - {Addr: backend3.Address}, - {Addr: backend3.Address}, - {Addr: backend3.Address}, - {Addr: backend3.Address}, - {Addr: backend3.Address}, - {Addr: backend3.Address}, - {Addr: backend4.Address}, - {Addr: backend4.Address}, - {Addr: backend4.Address}, - {Addr: backend4.Address}, - {Addr: backend4.Address}, - {Addr: backend4.Address}, - {Addr: backend4.Address}, - {Addr: backend4.Address}, - {Addr: backend5.Address}, - {Addr: backend5.Address}, - {Addr: backend5.Address}, - {Addr: backend5.Address}, - {Addr: backend5.Address}, - {Addr: backend5.Address}, - {Addr: backend5.Address}, - {Addr: backend5.Address}, + addressDistributionWant: []struct { + addr string + count int + }{ + {addr: backend1.Address, count: 6}, + {addr: backend2.Address, count: 6}, + {addr: backend3.Address, count: 8}, + {addr: backend4.Address, count: 8}, + {addr: backend5.Address, count: 8}, }, }, // This configures custom lb as the child of wrr_locality, which points @@ -174,10 +154,44 @@ func (s) TestWrrLocality(t *testing.T) { TypeUrl: "type.googleapis.com/pick_first", Value: &structpb.Struct{}, }), - addressDistributionWant: []resolver.Address{ - {Addr: backend1.Address}, - {Addr: backend3.Address}, - {Addr: backend3.Address}, + addressDistributionWant: []struct { + addr string + count int + }{ + {addr: backend1.Address, count: 1}, + {addr: backend3.Address, count: 2}, + }, + }, + // Sanity check for weighted round robin. Don't need to test super + // specific behaviors, as that is covered in unit tests. Set up weighted + // round robin as the endpoint picking policy with per RPC load reports + // enabled. Due the server not sending trailers with load reports, the + // weighted round robin policy should essentially function as round + // robin, and thus should have the same distribution as round robin + // above. + { + name: "custom_lb_child_wrr/", + wrrLocalityConfiguration: wrrLocality(&v3clientsideweightedroundrobinpb.ClientSideWeightedRoundRobin{ + EnableOobLoadReport: &wrapperspb.BoolValue{ + Value: false, + }, + // BlackoutPeriod long enough to cause load report weights to + // trigger in the scope of test case, but no load reports + // configured anyway. + BlackoutPeriod: durationpb.New(10 * time.Second), + WeightExpirationPeriod: durationpb.New(10 * time.Second), + WeightUpdatePeriod: durationpb.New(time.Second), + ErrorUtilizationPenalty: &wrapperspb.FloatValue{Value: 1}, + }), + addressDistributionWant: []struct { + addr string + count int + }{ + {addr: backend1.Address, count: 6}, + {addr: backend2.Address, count: 6}, + {addr: backend3.Address, count: 8}, + {addr: backend4.Address, count: 8}, + {addr: backend5.Address, count: 8}, }, }, } @@ -223,7 +237,13 @@ func (s) TestWrrLocality(t *testing.T) { defer cc.Close() client := testgrpc.NewTestServiceClient(cc) - if err := roundrobin.CheckWeightedRoundRobinRPCs(ctx, client, test.addressDistributionWant); err != nil { + var addrDistWant []resolver.Address + for _, addrAndCount := range test.addressDistributionWant { + for i := 0; i < addrAndCount.count; i++ { + addrDistWant = append(addrDistWant, resolver.Address{Addr: addrAndCount.addr}) + } + } + if err := roundrobin.CheckWeightedRoundRobinRPCs(ctx, client, addrDistWant); err != nil { t.Fatalf("Error in expected round robin: %v", err) } }) diff --git a/xds/internal/xdsclient/xdslbregistry/converter.go b/xds/internal/xdsclient/xdslbregistry/converter.go deleted file mode 100644 index 6a5546d90159..000000000000 --- a/xds/internal/xdsclient/xdslbregistry/converter.go +++ /dev/null @@ -1,177 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package xdslbregistry provides utilities to convert proto load balancing -// configuration, defined by the xDS API spec, to JSON load balancing -// configuration. -package xdslbregistry - -import ( - "encoding/json" - "fmt" - "strings" - - v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" - v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" - v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" - "github.com/golang/protobuf/proto" - structpb "github.com/golang/protobuf/ptypes/struct" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/envconfig" -) - -const ( - defaultRingHashMinSize = 1024 - defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M -) - -// ConvertToServiceConfig converts a proto Load Balancing Policy configuration -// into a json string. Returns an error if: -// - no supported policy found -// - there is more than 16 layers of recursion in the configuration -// - a failure occurs when converting the policy -func ConvertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy) (json.RawMessage, error) { - return convertToServiceConfig(lbPolicy, 0) -} - -func convertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int) (json.RawMessage, error) { - // "Configurations that require more than 16 levels of recursion are - // considered invalid and should result in a NACK response." - A51 - if depth > 15 { - return nil, fmt.Errorf("lb policy %v exceeds max depth supported: 16 layers", lbPolicy) - } - - // "This function iterate over the list of policy messages in - // LoadBalancingPolicy, attempting to convert each one to gRPC form, - // stopping at the first supported policy." - A52 - for _, policy := range lbPolicy.GetPolicies() { - // The policy message contains a TypedExtensionConfig - // message with the configuration information. TypedExtensionConfig in turn - // uses an Any typed typed_config field to store policy configuration of any - // type. This typed_config field is used to determine both the name of a - // policy and the configuration for it, depending on its type: - switch policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl() { - case "type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash": - if !envconfig.XDSRingHash { - continue - } - rhProto := &v3ringhashpb.RingHash{} - if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), rhProto); err != nil { - return nil, fmt.Errorf("failed to unmarshal resource: %v", err) - } - return convertRingHash(rhProto) - case "type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin": - return makeBalancerConfigJSON("round_robin", json.RawMessage("{}")), nil - case "type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality": - wrrlProto := &v3wrrlocalitypb.WrrLocality{} - if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), wrrlProto); err != nil { - return nil, fmt.Errorf("failed to unmarshal resource: %v", err) - } - return convertWrrLocality(wrrlProto, depth) - case "type.googleapis.com/xds.type.v3.TypedStruct": - tsProto := &v3xdsxdstypepb.TypedStruct{} - if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { - return nil, fmt.Errorf("failed to unmarshal resource: %v", err) - } - json, cont, err := convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) - if cont { - continue - } - return json, err - case "type.googleapis.com/udpa.type.v1.TypedStruct": - tsProto := &v1xdsudpatypepb.TypedStruct{} - if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { - return nil, fmt.Errorf("failed to unmarshal resource: %v", err) - } - if err := proto.Unmarshal(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), tsProto); err != nil { - return nil, fmt.Errorf("failed to unmarshal resource: %v", err) - } - json, cont, err := convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) - if cont { - continue - } - return json, err - } - // Any entry not in the above list is unsupported and will be skipped. - // This includes Least Request as well, since grpc-go does not support - // the Least Request Load Balancing Policy. - } - return nil, fmt.Errorf("no supported policy found in policy list +%v", lbPolicy) -} - -// convertRingHash converts a proto representation of the ring_hash LB policy's -// configuration to gRPC JSON format. -func convertRingHash(cfg *v3ringhashpb.RingHash) (json.RawMessage, error) { - if cfg.GetHashFunction() != v3ringhashpb.RingHash_XX_HASH { - return nil, fmt.Errorf("unsupported ring_hash hash function %v", cfg.GetHashFunction()) - } - - var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize - if min := cfg.GetMinimumRingSize(); min != nil { - minSize = min.GetValue() - } - if max := cfg.GetMaximumRingSize(); max != nil { - maxSize = max.GetValue() - } - - lbCfgJSON := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) - return makeBalancerConfigJSON("ring_hash_experimental", lbCfgJSON), nil -} - -func convertWrrLocality(cfg *v3wrrlocalitypb.WrrLocality, depth int) (json.RawMessage, error) { - epJSON, err := convertToServiceConfig(cfg.GetEndpointPickingPolicy(), depth+1) - if err != nil { - return nil, fmt.Errorf("error converting endpoint picking policy: %v for %+v", err, cfg) - } - lbCfgJSON := []byte(fmt.Sprintf(`{"childPolicy": %s}`, epJSON)) - return makeBalancerConfigJSON("xds_wrr_locality_experimental", lbCfgJSON), nil -} - -// convertCustomPolicy attempts to prepare json configuration for a custom lb -// proto, which specifies the gRPC balancer type and configuration. Returns the -// converted json, a bool representing whether the caller should continue to the -// next policy, which is true if the gRPC Balancer registry does not contain -// that balancer type, and an error which should cause caller to error if error -// converting. -func convertCustomPolicy(typeURL string, s *structpb.Struct) (json.RawMessage, bool, error) { - // The gRPC policy name will be the "type name" part of the value of the - // type_url field in the TypedStruct. We get this by using the part after - // the last / character. Can assume a valid type_url from the control plane. - urls := strings.Split(typeURL, "/") - name := urls[len(urls)-1] - - if balancer.Get(name) == nil { - return nil, true, nil - } - - rawJSON, err := json.Marshal(s) - if err != nil { - return nil, false, fmt.Errorf("error converting custom lb policy %v: %v for %+v", err, typeURL, s) - } - - // The Struct contained in the TypedStruct will be returned as-is as the - // configuration JSON object. - return makeBalancerConfigJSON(name, rawJSON), false, nil -} - -func makeBalancerConfigJSON(name string, value json.RawMessage) []byte { - return []byte(fmt.Sprintf(`[{%q: %s}]`, name, value)) -} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index c117ce6e7b52..8ec1dfda3fe3 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -30,12 +30,13 @@ import ( v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/pretty" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/xds/matcher" - "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/grpc/xds/internal/xdslbregistry" "google.golang.org/protobuf/types/known/anypb" ) diff --git a/xds/internal/xdslbregistry/converter.go b/xds/internal/xdslbregistry/converter.go new file mode 100644 index 000000000000..c154518731bb --- /dev/null +++ b/xds/internal/xdslbregistry/converter.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdslbregistry provides utilities to convert proto load balancing +// configuration, defined by the xDS API spec, to JSON load balancing +// configuration. These converters are registered by proto type in a registry, +// which gets pulled from based off proto type passed in. +package xdslbregistry + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/envconfig" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + structpb "github.com/golang/protobuf/ptypes/struct" +) + +var ( + // m is a map from proto type to converter. + m = make(map[string]converter) +) + +func init() { + // Construct map here to avoid an initialization cycle. + m = map[string]converter{ + "type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash": convertRingHashProtoToServiceConfig, + "type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin": convertRoundRobinProtoToServiceConfig, + "type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality": convertWRRLocalityProtoToServiceConfig, + "type.googleapis.com/envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin": convertWeightedRoundRobinProtoToServiceConfig, + "type.googleapis.com/xds.type.v3.TypedStruct": convertV3TypedStructToServiceConfig, + "type.googleapis.com/udpa.type.v1.TypedStruct": convertV1TypedStructToServiceConfig, + } +} + +// converter converts raw proto bytes into the internal Go JSON representation +// of the proto passed. Returns the json message, and an error. If both +// returned are nil, it represents continuing to the next proto. +type converter func([]byte, int) (json.RawMessage, error) + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M +) + +// ConvertToServiceConfig converts a proto Load Balancing Policy configuration +// into a json string. Returns an error if: +// - no supported policy found +// - there is more than 16 layers of recursion in the configuration +// - a failure occurs when converting the policy +func ConvertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy) (json.RawMessage, error) { + return convertToServiceConfig(lbPolicy, 0) +} + +func convertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int) (json.RawMessage, error) { + // "Configurations that require more than 16 levels of recursion are + // considered invalid and should result in a NACK response." - A51 + if depth > 15 { + return nil, fmt.Errorf("lb policy %v exceeds max depth supported: 16 layers", lbPolicy) + } + + // "This function iterate over the list of policy messages in + // LoadBalancingPolicy, attempting to convert each one to gRPC form, + // stopping at the first supported policy." - A52 + for _, policy := range lbPolicy.GetPolicies() { + policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl() + converter := m[policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl()] + // "Any entry not in the above list is unsupported and will be skipped." + // - A52 + // This includes Least Request as well, since grpc-go does not support + // the Least Request Load Balancing Policy. + if converter == nil { + continue + } + json, err := converter(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), depth) + if json == nil && err == nil { + continue + } + return json, err + } + return nil, fmt.Errorf("no supported policy found in policy list +%v", lbPolicy) +} + +func convertRingHashProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { + if !envconfig.XDSRingHash { + return nil, nil + } + rhProto := &v3ringhashpb.RingHash{} + if err := proto.Unmarshal(rawProto, rhProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + if rhProto.GetHashFunction() != v3ringhashpb.RingHash_XX_HASH { + return nil, fmt.Errorf("unsupported ring_hash hash function %v", rhProto.GetHashFunction()) + } + + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhProto.GetMinimumRingSize(); min != nil { + minSize = min.GetValue() + } + if max := rhProto.GetMaximumRingSize(); max != nil { + maxSize = max.GetValue() + } + + rhCfg := &ringhash.LBConfig{ + MinRingSize: minSize, + MaxRingSize: maxSize, + } + + rhCfgJSON, err := json.Marshal(rhCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", rhCfg, err) + } + return makeBalancerConfigJSON(ringhash.Name, rhCfgJSON), nil +} + +func convertRoundRobinProtoToServiceConfig([]byte, int) (json.RawMessage, error) { + return makeBalancerConfigJSON("round_robin", json.RawMessage("{}")), nil +} + +type wrrLocalityLBConfig struct { + ChildPolicy json.RawMessage `json:"childPolicy,omitempty"` +} + +func convertWRRLocalityProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { + wrrlProto := &v3wrrlocalitypb.WrrLocality{} + if err := proto.Unmarshal(rawProto, wrrlProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + epJSON, err := convertToServiceConfig(wrrlProto.GetEndpointPickingPolicy(), depth+1) + if err != nil { + return nil, fmt.Errorf("error converting endpoint picking policy: %v for %+v", err, wrrlProto) + } + wrrLCfg := wrrLocalityLBConfig{ + ChildPolicy: epJSON, + } + + lbCfgJSON, err := json.Marshal(wrrLCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLCfg, err) + } + return makeBalancerConfigJSON(wrrlocality.Name, lbCfgJSON), nil +} + +func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { + cswrrProto := &v3clientsideweightedroundrobinpb.ClientSideWeightedRoundRobin{} + if err := proto.Unmarshal(rawProto, cswrrProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + wrrLBCfg := &wrrLBConfig{} + // Only set fields if specified in proto. If not set, ParseConfig of the WRR + // will populate the config with defaults. + if enableOOBLoadReportCfg := cswrrProto.GetEnableOobLoadReport(); enableOOBLoadReportCfg != nil { + wrrLBCfg.EnableOOBLoadReport = enableOOBLoadReportCfg.GetValue() + } + if oobReportingPeriodCfg := cswrrProto.GetOobReportingPeriod(); oobReportingPeriodCfg != nil { + wrrLBCfg.OOBReportingPeriod = internalserviceconfig.Duration(oobReportingPeriodCfg.AsDuration()) + } + if blackoutPeriodCfg := cswrrProto.GetBlackoutPeriod(); blackoutPeriodCfg != nil { + wrrLBCfg.BlackoutPeriod = internalserviceconfig.Duration(blackoutPeriodCfg.AsDuration()) + } + if weightExpirationPeriodCfg := cswrrProto.GetBlackoutPeriod(); weightExpirationPeriodCfg != nil { + wrrLBCfg.WeightExpirationPeriod = internalserviceconfig.Duration(weightExpirationPeriodCfg.AsDuration()) + } + if weightUpdatePeriodCfg := cswrrProto.GetWeightUpdatePeriod(); weightUpdatePeriodCfg != nil { + wrrLBCfg.WeightUpdatePeriod = internalserviceconfig.Duration(weightUpdatePeriodCfg.AsDuration()) + } + if errorUtilizationPenaltyCfg := cswrrProto.GetErrorUtilizationPenalty(); errorUtilizationPenaltyCfg != nil { + wrrLBCfg.ErrorUtilizationPenalty = float64(errorUtilizationPenaltyCfg.GetValue()) + } + + lbCfgJSON, err := json.Marshal(wrrLBCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLBCfg, err) + } + return makeBalancerConfigJSON(weightedroundrobin.Name, lbCfgJSON), nil +} + +func convertV1TypedStructToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { + tsProto := &v1xdsudpatypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +func convertV3TypedStructToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { + tsProto := &v3xdsxdstypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +// convertCustomPolicy attempts to prepare json configuration for a custom lb +// proto, which specifies the gRPC balancer type and configuration. Returns the +// converted json and an error which should cause caller to error if error +// converting. If both json and error returned are nil, it means the gRPC +// Balancer registry does not contain that balancer type, and the caller should +// continue to the next policy. +func convertCustomPolicy(typeURL string, s *structpb.Struct) (json.RawMessage, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + pos := strings.LastIndex(typeURL, "/") + name := typeURL[pos+1:] + + if balancer.Get(name) == nil { + return nil, nil + } + + rawJSON, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("error converting custom lb policy %v: %v for %+v", err, typeURL, s) + } + + // The Struct contained in the TypedStruct will be returned as-is as the + // configuration JSON object. + return makeBalancerConfigJSON(name, rawJSON), nil +} + +type wrrLBConfig struct { + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod internalserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod internalserviceconfig.Duration `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod internalserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod internalserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} + +func makeBalancerConfigJSON(name string, value json.RawMessage) []byte { + return []byte(fmt.Sprintf(`[{%q: %s}]`, name, value)) +} diff --git a/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go b/xds/internal/xdslbregistry/converter_test.go similarity index 87% rename from xds/internal/xdsclient/xdslbregistry/tests/converter_test.go rename to xds/internal/xdslbregistry/converter_test.go index c6d947d6bfde..9d418d9fe45a 100644 --- a/xds/internal/xdsclient/xdslbregistry/tests/converter_test.go +++ b/xds/internal/xdslbregistry/converter_test.go @@ -16,8 +16,8 @@ * */ -// Package tests_test contains test cases for the xDS LB Policy Registry. -package tests_test +// Package xdslbregistry_test contains test cases for the xDS LB Policy Registry. +package xdslbregistry_test import ( "encoding/json" @@ -46,7 +46,7 @@ import ( "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/wrrlocality" - "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + "google.golang.org/grpc/xds/internal/xdslbregistry" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/wrapperspb" ) @@ -63,8 +63,15 @@ type customLBConfig struct { serviceconfig.LoadBalancingConfig } -// We have these tests in a separate test package in order to not take a -// dependency on the internal xDS balancer packages within the xDS Client. +func wrrLocalityBalancerConfig(childPolicy *internalserviceconfig.BalancerConfig) *internalserviceconfig.BalancerConfig { + return &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: childPolicy, + }, + } +} + func (s) TestConvertToServiceConfigSuccess(t *testing.T) { const customLBPolicyName = "myorg.MyCustomLeastRequestPolicy" stub.Register(customLBPolicyName, stub.BalancerFuncs{ @@ -225,14 +232,9 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: &internalserviceconfig.BalancerConfig{ - Name: wrrlocality.Name, - Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: "round_robin", - }, - }, - }, + wantConfig: wrrLocalityBalancerConfig(&internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }), }, { name: "wrr_locality_child_custom_lb_type_v3_struct", @@ -248,15 +250,25 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: &internalserviceconfig.BalancerConfig{ - Name: wrrlocality.Name, - Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: "myorg.MyCustomLeastRequestPolicy", - Config: customLBConfig{}, + wantConfig: wrrLocalityBalancerConfig(&internalserviceconfig.BalancerConfig{ + Name: "myorg.MyCustomLeastRequestPolicy", + Config: customLBConfig{}, + }), + }, + { + name: "on-the-boundary-of-recursive-limit", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(&v3roundrobinpb.RoundRobin{}))))))))))))))), + }, }, }, }, + wantConfig: wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(&internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }))))))))))))))), }, } @@ -347,15 +359,13 @@ func (s) TestConvertToServiceConfigFailure(t *testing.T) { }, wantErr: "no supported policy found in policy list", }, - // TODO: test validity right on the boundary of recursion 16 layers - // total. { - name: "too much recursion", + name: "exceeds-boundary-of-recursive-limit-by-1", policy: &v3clusterpb.LoadBalancingPolicy{ Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ { TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ - TypedConfig: wrrLocalityAny(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(&v3roundrobinpb.RoundRobin{}))))))))))))))))))))))), + TypedConfig: wrrLocalityAny(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(&v3roundrobinpb.RoundRobin{})))))))))))))))), }, }, }, From 5dcfb37c0b43586965ed1ffb86bc63d706bc2c4e Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 12 May 2023 14:09:59 -0700 Subject: [PATCH 922/998] interop: hold lock on server for OOB metrics updates; share 30s timeout (#6277) --- interop/interop_test.sh | 23 ++++++++++++++++++++++- interop/test_utils.go | 21 ++++++++++----------- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/interop/interop_test.sh b/interop/interop_test.sh index 99e12c3c3829..65e12a16b6cd 100755 --- a/interop/interop_test.sh +++ b/interop/interop_test.sh @@ -45,6 +45,20 @@ pass () { echo "$(tput setaf 2) $(date): $1 $(tput sgr 0)" } +withTimeout () { + timer=$1 + shift + + # Run command in the background. + cmd=$(printf '%q ' "$@") + eval "$cmd" & + wpid=$! + # Kill after 20 seconds. + sleep $timer && kill $wpid & + # Wait for the background thread. + wait $wpid +} + # Don't run some tests that need a special environment: # "google_default_credentials" # "compute_engine_channel_credentials" @@ -70,6 +84,8 @@ CASES=( "custom_metadata" "unimplemented_method" "unimplemented_service" + "orca_per_rpc" + "orca_oob" ) # Build server @@ -96,7 +112,12 @@ for case in ${CASES[@]}; do echo "$(tput setaf 4) $(date): testing: ${case} $(tput sgr 0)" CLIENT_LOG="$(mktemp)" - if ! GRPC_GO_LOG_SEVERITY_LEVEL=info timeout 20 go run ./interop/client --use_tls --server_host_override=foo.test.google.fr --use_test_ca --test_case="${case}" &> $CLIENT_LOG; then + if ! GRPC_GO_LOG_SEVERITY_LEVEL=info withTimeout 20 go run ./interop/client \ + --use_tls \ + --server_host_override=foo.test.google.fr \ + --use_test_ca --test_case="${case}" \ + --service_config_json='{ "loadBalancingConfig": [{ "test_backend_metrics_load_balancer": {} }]}' \ + &> $CLIENT_LOG; then fail "FAIL: test case ${case} got server log: $(cat $SERVER_LOG) diff --git a/interop/test_utils.go b/interop/test_utils.go index 0057c071217a..29916876eeb4 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -30,6 +30,7 @@ import ( "io" "os" "strings" + "sync" "time" "github.com/golang/protobuf/proto" @@ -779,6 +780,7 @@ func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.D type testServer struct { testgrpc.UnimplementedTestServiceServer + orcaMu sync.Mutex metricsRecorder orca.ServerMetricsRecorder } @@ -842,11 +844,6 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* // recorder in the context, if present. setORCAMetrics(r, orcaData) } - if r, orcaData := s.metricsRecorder, in.GetOrcaOobReport(); r != nil && orcaData != nil { - // Transfer the request's OOB ORCA data to the server metrics recorder - // in the server, if present. - setORCAMetrics(r, orcaData) - } return &testpb.SimpleResponse{ Payload: pl, }, nil @@ -912,6 +909,7 @@ func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallSe stream.SetTrailer(trailer) } } + hasORCALock := false for { in, err := stream.Recv() if err == io.EOF { @@ -929,6 +927,11 @@ func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallSe if r, orcaData := s.metricsRecorder, in.GetOrcaOobReport(); r != nil && orcaData != nil { // Transfer the request's OOB ORCA data to the server metrics recorder // in the server, if present. + if !hasORCALock { + s.orcaMu.Lock() + defer s.orcaMu.Unlock() + hasORCALock = true + } setORCAMetrics(r, orcaData) } @@ -1036,14 +1039,12 @@ func DoORCAOOBTest(tc testgrpc.TestServiceClient) { logger.Fatalf("/TestService/FullDuplexCall received error receiving: %v", err) } - ctx2, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() want := &v3orcapb.OrcaLoadReport{ CpuUtilization: 0.8210, MemUtilization: 0.5847, Utilization: map[string]float64{"util": 0.30499}, } - checkORCAMetrics(ctx2, tc, want) + checkORCAMetrics(ctx, tc, want) err = stream.Send(&testpb.StreamingOutputCallRequest{ OrcaOobReport: &testpb.TestOrcaReport{ @@ -1061,14 +1062,12 @@ func DoORCAOOBTest(tc testgrpc.TestServiceClient) { logger.Fatalf("/TestService/FullDuplexCall received error receiving: %v", err) } - ctx3, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() want = &v3orcapb.OrcaLoadReport{ CpuUtilization: 0.29309, MemUtilization: 0.2, Utilization: map[string]float64{"util": 0.2039}, } - checkORCAMetrics(ctx3, tc, want) + checkORCAMetrics(ctx, tc, want) } func checkORCAMetrics(ctx context.Context, tc testgrpc.TestServiceClient, want *v3orcapb.OrcaLoadReport) { From 0bdae480582d74b0b7851e7ebc6dbe411bf7e5c3 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 15 May 2023 14:40:35 -0700 Subject: [PATCH 923/998] interop: fix interop_test.sh shutdown (#6279) --- interop/interop_test.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/interop/interop_test.sh b/interop/interop_test.sh index 65e12a16b6cd..7fc290a12c6b 100755 --- a/interop/interop_test.sh +++ b/interop/interop_test.sh @@ -53,10 +53,16 @@ withTimeout () { cmd=$(printf '%q ' "$@") eval "$cmd" & wpid=$! - # Kill after 20 seconds. + # Kill after $timer seconds. sleep $timer && kill $wpid & + kpid=$! # Wait for the background thread. wait $wpid + res=$? + # Kill the killer pid in case it's still running. + kill $kpid || true + wait $kpid || true + return $res } # Don't run some tests that need a special environment: From 1230f0e43c314798e5ce007526737160b447504f Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 15 May 2023 18:19:18 -0400 Subject: [PATCH 924/998] xds/internal/xdsclient: Split registry up and two separate packages (#6278) --- .../xdslbregistry/converter}/converter.go | 71 +++------------- .../xdsclient/xdslbregistry/xdslbregistry.go | 85 +++++++++++++++++++ .../xdslbregistry/xdslbregistry_test.go} | 27 +++--- .../xdsresource/tests/unmarshal_cds_test.go | 1 + .../xdsclient/xdsresource/unmarshal_cds.go | 4 +- xds/xds.go | 15 ++-- 6 files changed, 120 insertions(+), 83 deletions(-) rename xds/internal/{xdslbregistry => xdsclient/xdslbregistry/converter}/converter.go (70%) create mode 100644 xds/internal/xdsclient/xdslbregistry/xdslbregistry.go rename xds/internal/{xdslbregistry/converter_test.go => xdsclient/xdslbregistry/xdslbregistry_test.go} (98%) diff --git a/xds/internal/xdslbregistry/converter.go b/xds/internal/xdsclient/xdslbregistry/converter/converter.go similarity index 70% rename from xds/internal/xdslbregistry/converter.go rename to xds/internal/xdsclient/xdslbregistry/converter/converter.go index c154518731bb..27dc6533087b 100644 --- a/xds/internal/xdslbregistry/converter.go +++ b/xds/internal/xdsclient/xdslbregistry/converter/converter.go @@ -16,11 +16,11 @@ * */ -// Package xdslbregistry provides utilities to convert proto load balancing +// Package converter provides converters to convert proto load balancing // configuration, defined by the xDS API spec, to JSON load balancing // configuration. These converters are registered by proto type in a registry, // which gets pulled from based off proto type passed in. -package xdslbregistry +package converter import ( "encoding/json" @@ -34,81 +34,30 @@ import ( internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" structpb "github.com/golang/protobuf/ptypes/struct" ) -var ( - // m is a map from proto type to converter. - m = make(map[string]converter) -) - func init() { - // Construct map here to avoid an initialization cycle. - m = map[string]converter{ - "type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash": convertRingHashProtoToServiceConfig, - "type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin": convertRoundRobinProtoToServiceConfig, - "type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality": convertWRRLocalityProtoToServiceConfig, - "type.googleapis.com/envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin": convertWeightedRoundRobinProtoToServiceConfig, - "type.googleapis.com/xds.type.v3.TypedStruct": convertV3TypedStructToServiceConfig, - "type.googleapis.com/udpa.type.v1.TypedStruct": convertV1TypedStructToServiceConfig, - } + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash", convertRingHashProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin", convertRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality", convertWRRLocalityProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin", convertWeightedRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/xds.type.v3.TypedStruct", convertV3TypedStructToServiceConfig) + xdslbregistry.Register("type.googleapis.com/udpa.type.v1.TypedStruct", convertV1TypedStructToServiceConfig) } -// converter converts raw proto bytes into the internal Go JSON representation -// of the proto passed. Returns the json message, and an error. If both -// returned are nil, it represents continuing to the next proto. -type converter func([]byte, int) (json.RawMessage, error) - const ( defaultRingHashMinSize = 1024 defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M ) -// ConvertToServiceConfig converts a proto Load Balancing Policy configuration -// into a json string. Returns an error if: -// - no supported policy found -// - there is more than 16 layers of recursion in the configuration -// - a failure occurs when converting the policy -func ConvertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy) (json.RawMessage, error) { - return convertToServiceConfig(lbPolicy, 0) -} - -func convertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int) (json.RawMessage, error) { - // "Configurations that require more than 16 levels of recursion are - // considered invalid and should result in a NACK response." - A51 - if depth > 15 { - return nil, fmt.Errorf("lb policy %v exceeds max depth supported: 16 layers", lbPolicy) - } - - // "This function iterate over the list of policy messages in - // LoadBalancingPolicy, attempting to convert each one to gRPC form, - // stopping at the first supported policy." - A52 - for _, policy := range lbPolicy.GetPolicies() { - policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl() - converter := m[policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl()] - // "Any entry not in the above list is unsupported and will be skipped." - // - A52 - // This includes Least Request as well, since grpc-go does not support - // the Least Request Load Balancing Policy. - if converter == nil { - continue - } - json, err := converter(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), depth) - if json == nil && err == nil { - continue - } - return json, err - } - return nil, fmt.Errorf("no supported policy found in policy list +%v", lbPolicy) -} - func convertRingHashProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { if !envconfig.XDSRingHash { return nil, nil @@ -154,7 +103,7 @@ func convertWRRLocalityProtoToServiceConfig(rawProto []byte, depth int) (json.Ra if err := proto.Unmarshal(rawProto, wrrlProto); err != nil { return nil, fmt.Errorf("failed to unmarshal resource: %v", err) } - epJSON, err := convertToServiceConfig(wrrlProto.GetEndpointPickingPolicy(), depth+1) + epJSON, err := xdslbregistry.ConvertToServiceConfig(wrrlProto.GetEndpointPickingPolicy(), depth+1) if err != nil { return nil, fmt.Errorf("error converting endpoint picking policy: %v for %+v", err, wrrlProto) } diff --git a/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go b/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go new file mode 100644 index 000000000000..0f3d1df4db20 --- /dev/null +++ b/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdslbregistry provides a registry of converters that convert proto +// from load balancing configuration, defined by the xDS API spec, to JSON load +// balancing configuration. +package xdslbregistry + +import ( + "encoding/json" + "fmt" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" +) + +var ( + // m is a map from proto type to Converter. + m = make(map[string]Converter) +) + +// Register registers the converter to the map keyed on a proto type. Must be +// called at init time. Not thread safe. +func Register(protoType string, c Converter) { + m[protoType] = c +} + +// SetRegistry sets the xDS LB registry. Must be called at init time. Not thread +// safe. +func SetRegistry(registry map[string]Converter) { + m = registry +} + +// Converter converts raw proto bytes into the internal Go JSON representation +// of the proto passed. Returns the json message, and an error. If both +// returned are nil, it represents continuing to the next proto. +type Converter func([]byte, int) (json.RawMessage, error) + +// ConvertToServiceConfig converts a proto Load Balancing Policy configuration +// into a json string. Returns an error if: +// - no supported policy found +// - there is more than 16 layers of recursion in the configuration +// - a failure occurs when converting the policy +func ConvertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int) (json.RawMessage, error) { + // "Configurations that require more than 16 levels of recursion are + // considered invalid and should result in a NACK response." - A51 + if depth > 15 { + return nil, fmt.Errorf("lb policy %v exceeds max depth supported: 16 layers", lbPolicy) + } + + // "This function iterate over the list of policy messages in + // LoadBalancingPolicy, attempting to convert each one to gRPC form, + // stopping at the first supported policy." - A52 + for _, policy := range lbPolicy.GetPolicies() { + policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl() + converter := m[policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl()] + // "Any entry not in the above list is unsupported and will be skipped." + // - A52 + // This includes Least Request as well, since grpc-go does not support + // the Least Request Load Balancing Policy. + if converter == nil { + continue + } + json, err := converter(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), depth) + if json == nil && err == nil { + continue + } + return json, err + } + return nil, fmt.Errorf("no supported policy found in policy list +%v", lbPolicy) +} diff --git a/xds/internal/xdslbregistry/converter_test.go b/xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go similarity index 98% rename from xds/internal/xdslbregistry/converter_test.go rename to xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go index 9d418d9fe45a..b3f19c2e5953 100644 --- a/xds/internal/xdslbregistry/converter_test.go +++ b/xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go @@ -24,18 +24,8 @@ import ( "strings" "testing" - v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" - v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" - v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" - v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" - v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" "github.com/golang/protobuf/proto" - structpb "github.com/golang/protobuf/ptypes/struct" "github.com/google/go-cmp/cmp" - _ "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/envconfig" @@ -44,11 +34,22 @@ import ( internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/serviceconfig" + _ "google.golang.org/grpc/xds" // Register the xDS LB Registry Converters. "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/wrrlocality" - "google.golang.org/grpc/xds/internal/xdslbregistry" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/wrapperspb" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + structpb "github.com/golang/protobuf/ptypes/struct" ) type s struct { @@ -281,7 +282,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { envconfig.XDSRingHash = oldRingHashSupport }() } - rawJSON, err := xdslbregistry.ConvertToServiceConfig(test.policy) + rawJSON, err := xdslbregistry.ConvertToServiceConfig(test.policy, 0) if err != nil { t.Fatalf("ConvertToServiceConfig(%s) failed: %v", pretty.ToJSON(test.policy), err) } @@ -376,7 +377,7 @@ func (s) TestConvertToServiceConfigFailure(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - _, gotErr := xdslbregistry.ConvertToServiceConfig(test.policy) + _, gotErr := xdslbregistry.ConvertToServiceConfig(test.policy, 0) // Test the error substring to test the different root causes of // errors. This is more brittle over time, but it's important to // test the root cause of the errors emitted from the diff --git a/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go index 96ad204ad4b3..afa418815a0b 100644 --- a/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go @@ -32,6 +32,7 @@ import ( internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/serviceconfig" + _ "google.golang.org/grpc/xds" // Register the xDS LB Registry Converters. "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/wrrlocality" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 8ec1dfda3fe3..d07ad2ea1aee 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -35,8 +35,8 @@ import ( "google.golang.org/grpc/internal/pretty" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - "google.golang.org/grpc/xds/internal/xdslbregistry" "google.golang.org/protobuf/types/known/anypb" ) @@ -127,7 +127,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu } if cluster.GetLoadBalancingPolicy() != nil && envconfig.XDSCustomLBPolicy { - lbPolicy, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy()) + lbPolicy, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy(), 0) if err != nil { return ClusterUpdate{}, fmt.Errorf("error converting LoadBalancingPolicy %v in response: %+v: %v", cluster.GetLoadBalancingPolicy(), cluster, err) } diff --git a/xds/xds.go b/xds/xds.go index 8e6def6014a7..bd6ed9c90f13 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -36,13 +36,14 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" - _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. - _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. - _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. - _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. - _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver + _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. + _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter" // Register the xDS LB Registry Converters. v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) From 4eb88d7d67c84177572ca435ed58aef878ae2d50 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 15 May 2023 15:48:02 -0700 Subject: [PATCH 925/998] cleanup: use new Duration type in base ServiceConfig (#6284) --- service_config.go | 75 +++++------------------------------------- service_config_test.go | 51 ---------------------------- 2 files changed, 8 insertions(+), 118 deletions(-) diff --git a/service_config.go b/service_config.go index f22acace4253..0df11fc09882 100644 --- a/service_config.go +++ b/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -106,8 +104,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -129,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -201,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -252,15 +206,10 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) @@ -312,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -332,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } diff --git a/service_config_test.go b/service_config_test.go index b3c6988e8d97..90ed40a68021 100644 --- a/service_config_test.go +++ b/service_config_test.go @@ -20,8 +20,6 @@ package grpc import ( "encoding/json" - "fmt" - "math" "reflect" "testing" "time" @@ -449,55 +447,6 @@ func (s) TestParseMethodConfigDuplicatedName(t *testing.T) { }) } -func (s) TestParseDuration(t *testing.T) { - testCases := []struct { - s *string - want *time.Duration - err bool - }{ - {s: nil, want: nil}, - {s: newString("1s"), want: newDuration(time.Second)}, - {s: newString("-1s"), want: newDuration(-time.Second)}, - {s: newString("1.1s"), want: newDuration(1100 * time.Millisecond)}, - {s: newString("1.s"), want: newDuration(time.Second)}, - {s: newString("1.0s"), want: newDuration(time.Second)}, - {s: newString(".002s"), want: newDuration(2 * time.Millisecond)}, - {s: newString(".002000s"), want: newDuration(2 * time.Millisecond)}, - {s: newString("0.003s"), want: newDuration(3 * time.Millisecond)}, - {s: newString("0.000004s"), want: newDuration(4 * time.Microsecond)}, - {s: newString("5000.000000009s"), want: newDuration(5000*time.Second + 9*time.Nanosecond)}, - {s: newString("4999.999999999s"), want: newDuration(5000*time.Second - time.Nanosecond)}, - {s: newString("1"), err: true}, - {s: newString("s"), err: true}, - {s: newString(".s"), err: true}, - {s: newString("1 s"), err: true}, - {s: newString(" 1s"), err: true}, - {s: newString("1ms"), err: true}, - {s: newString("1.1.1s"), err: true}, - {s: newString("Xs"), err: true}, - {s: newString("as"), err: true}, - {s: newString(".0000000001s"), err: true}, - {s: newString(fmt.Sprint(math.MaxInt32) + "s"), want: newDuration(math.MaxInt32 * time.Second)}, - {s: newString(fmt.Sprint(int64(math.MaxInt32)+1) + "s"), err: true}, - } - for _, tc := range testCases { - got, err := parseDuration(tc.s) - if tc.err != (err != nil) || - (got == nil) != (tc.want == nil) || - (got != nil && *got != *tc.want) { - wantErr := "" - if tc.err { - wantErr = "" - } - s := "" - if tc.s != nil { - s = `&"` + *tc.s + `"` - } - t.Errorf("parseDuration(%v) = %v, %v; want %v, %v", s, got, err, tc.want, wantErr) - } - } -} - func newBool(b bool) *bool { return &b } From 24fd25216321976ff8428880d837e3d8fcaac8d2 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 15 May 2023 15:49:07 -0700 Subject: [PATCH 926/998] proto: update generated code to match grpc-proto changes (#6283) --- interop/grpc_testing/messages.pb.go | 579 ++++++++++++++-------------- 1 file changed, 282 insertions(+), 297 deletions(-) diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index 21d7854de481..ccc27a936697 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -384,8 +384,6 @@ type SimpleRequest struct { FillGrpclbRouteType bool `protobuf:"varint,10,opt,name=fill_grpclb_route_type,json=fillGrpclbRouteType,proto3" json:"fill_grpclb_route_type,omitempty"` // If set the server should record this metrics report data for the current RPC. OrcaPerQueryReport *TestOrcaReport `protobuf:"bytes,11,opt,name=orca_per_query_report,json=orcaPerQueryReport,proto3" json:"orca_per_query_report,omitempty"` - // If set the server should update this metrics report data at the OOB server. - OrcaOobReport *TestOrcaReport `protobuf:"bytes,12,opt,name=orca_oob_report,json=orcaOobReport,proto3" json:"orca_oob_report,omitempty"` } func (x *SimpleRequest) Reset() { @@ -497,13 +495,6 @@ func (x *SimpleRequest) GetOrcaPerQueryReport() *TestOrcaReport { return nil } -func (x *SimpleRequest) GetOrcaOobReport() *TestOrcaReport { - if x != nil { - return x.OrcaOobReport - } - return nil -} - // Unary response, as configured by the request. type SimpleResponse struct { state protoimpl.MessageState @@ -1642,7 +1633,7 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x45, 0x63, 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xb9, 0x05, 0x0a, 0x0d, 0x53, 0x69, 0x6d, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xf3, 0x04, 0x0a, 0x0d, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, @@ -1681,268 +1672,263 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x12, 0x6f, 0x72, 0x63, 0x61, - 0x50, 0x65, 0x72, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x44, - 0x0a, 0x0f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0d, 0x6f, 0x72, 0x63, 0x61, 0x4f, 0x6f, 0x62, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x22, 0x82, 0x02, 0x0a, 0x0e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, - 0x6f, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x61, 0x75, 0x74, 0x68, - 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x5f, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x72, 0x70, - 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x67, 0x72, - 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, - 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x65, 0x78, - 0x70, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0x54, - 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, - 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x17, - 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x69, 0x7a, 0x65, 0x22, 0x82, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x75, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x55, 0x73, - 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x63, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0xe9, 0x02, 0x0a, 0x1a, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x41, 0x0a, 0x0f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x63, 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x44, 0x0a, 0x0f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x72, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, - 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0d, 0x6f, 0x72, 0x63, 0x61, 0x4f, 0x6f, 0x62, 0x52, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x4e, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, - 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x4a, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, - 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, - 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x52, - 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, - 0x73, 0x22, 0x46, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, - 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x09, - 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x4c, 0x6f, 0x61, + 0x50, 0x65, 0x72, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x82, + 0x02, 0x0a, 0x0e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, + 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x43, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0x54, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x82, + 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x55, 0x73, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x64, 0x22, 0xe9, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x41, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, + 0x63, 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x0f, 0x6f, 0x72, 0x63, + 0x61, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x52, 0x0d, 0x6f, 0x72, 0x63, 0x61, 0x4f, 0x6f, 0x62, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x22, + 0x4e, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, + 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, + 0x4a, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x73, 0x22, 0x46, 0x0a, 0x0d, 0x52, + 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, + 0x73, 0x73, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, + 0x66, 0x4d, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x22, 0xe2, 0x04, 0x0a, 0x19, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0c, 0x72, 0x70, 0x63, + 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, + 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, + 0x50, 0x65, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x0e, 0x72, 0x70, 0x63, 0x73, 0x5f, + 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x70, 0x63, 0x73, + 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xb1, 0x01, 0x0a, 0x0a, 0x52, 0x70, 0x63, + 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, + 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, - 0x63, 0x22, 0xe2, 0x04, 0x0a, 0x19, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x59, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, - 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, - 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x75, - 0x6d, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, - 0x0e, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, + 0x72, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, + 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, + 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x73, 0x0a, 0x11, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xb1, - 0x01, 0x0a, 0x0a, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x64, 0x0a, - 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, - 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, - 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, - 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, - 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x1a, 0x73, 0x0a, 0x11, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x86, 0x09, - 0x0a, 0x24, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, 0x6d, 0x5f, 0x72, - 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x16, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, - 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, 0x75, 0x6d, 0x5f, - 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, - 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, - 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, - 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x18, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x8b, - 0x01, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, - 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x70, 0x0a, 0x10, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x49, - 0x0a, 0x1b, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, - 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, 0x4e, 0x75, 0x6d, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, + 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, + 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x86, 0x09, 0x0a, 0x24, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, + 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, + 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x52, 0x70, + 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, - 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x18, + 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, + 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x8b, 0x01, 0x0a, 0x19, 0x6e, 0x75, 0x6d, + 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, + 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x70, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x73, 0x50, + 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x49, 0x0a, 0x1b, 0x4e, 0x75, 0x6d, 0x52, + 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0xcf, 0x01, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, - 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x81, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x42, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, - 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xcf, 0x01, 0x0a, 0x0b, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x70, + 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, + 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x81, 0x01, 0x0a, + 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x05, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x1a, 0x74, 0x0a, 0x08, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, + 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, + 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, - 0x63, 0x1a, 0x74, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, - 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, - 0x10, 0x01, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x03, - 0x0a, 0x0e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, - 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x6d, - 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x11, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x74, 0x69, - 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, - 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x74, - 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, - 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, - 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, - 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, - 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x1f, 0x0a, 0x0b, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4f, - 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x2a, 0x6f, 0x0a, 0x0f, - 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1e, - 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x1d, - 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x1d, 0x0a, - 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x03, 0x0a, 0x0e, 0x54, 0x65, 0x73, 0x74, + 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x70, + 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x74, + 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x11, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, + 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, + 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x43, 0x6f, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, + 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x1f, 0x0a, 0x0b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, + 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x2a, 0x6f, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, + 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, + 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, + 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, + 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, + 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2004,38 +1990,37 @@ var file_grpc_testing_messages_proto_depIdxs = []int32{ 5, // 4: grpc.testing.SimpleRequest.response_status:type_name -> grpc.testing.EchoStatus 3, // 5: grpc.testing.SimpleRequest.expect_compressed:type_name -> grpc.testing.BoolValue 21, // 6: grpc.testing.SimpleRequest.orca_per_query_report:type_name -> grpc.testing.TestOrcaReport - 21, // 7: grpc.testing.SimpleRequest.orca_oob_report:type_name -> grpc.testing.TestOrcaReport - 4, // 8: grpc.testing.SimpleResponse.payload:type_name -> grpc.testing.Payload - 1, // 9: grpc.testing.SimpleResponse.grpclb_route_type:type_name -> grpc.testing.GrpclbRouteType - 4, // 10: grpc.testing.StreamingInputCallRequest.payload:type_name -> grpc.testing.Payload - 3, // 11: grpc.testing.StreamingInputCallRequest.expect_compressed:type_name -> grpc.testing.BoolValue - 3, // 12: grpc.testing.ResponseParameters.compressed:type_name -> grpc.testing.BoolValue - 0, // 13: grpc.testing.StreamingOutputCallRequest.response_type:type_name -> grpc.testing.PayloadType - 10, // 14: grpc.testing.StreamingOutputCallRequest.response_parameters:type_name -> grpc.testing.ResponseParameters - 4, // 15: grpc.testing.StreamingOutputCallRequest.payload:type_name -> grpc.testing.Payload - 5, // 16: grpc.testing.StreamingOutputCallRequest.response_status:type_name -> grpc.testing.EchoStatus - 21, // 17: grpc.testing.StreamingOutputCallRequest.orca_oob_report:type_name -> grpc.testing.TestOrcaReport - 4, // 18: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload - 23, // 19: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry - 24, // 20: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry - 26, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry - 27, // 22: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry - 28, // 23: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry - 30, // 24: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry - 2, // 25: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType - 32, // 26: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata - 33, // 27: grpc.testing.TestOrcaReport.request_cost:type_name -> grpc.testing.TestOrcaReport.RequestCostEntry - 34, // 28: grpc.testing.TestOrcaReport.utilization:type_name -> grpc.testing.TestOrcaReport.UtilizationEntry - 25, // 29: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry - 22, // 30: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer - 31, // 31: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry - 29, // 32: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats - 2, // 33: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType - 34, // [34:34] is the sub-list for method output_type - 34, // [34:34] is the sub-list for method input_type - 34, // [34:34] is the sub-list for extension type_name - 34, // [34:34] is the sub-list for extension extendee - 0, // [0:34] is the sub-list for field type_name + 4, // 7: grpc.testing.SimpleResponse.payload:type_name -> grpc.testing.Payload + 1, // 8: grpc.testing.SimpleResponse.grpclb_route_type:type_name -> grpc.testing.GrpclbRouteType + 4, // 9: grpc.testing.StreamingInputCallRequest.payload:type_name -> grpc.testing.Payload + 3, // 10: grpc.testing.StreamingInputCallRequest.expect_compressed:type_name -> grpc.testing.BoolValue + 3, // 11: grpc.testing.ResponseParameters.compressed:type_name -> grpc.testing.BoolValue + 0, // 12: grpc.testing.StreamingOutputCallRequest.response_type:type_name -> grpc.testing.PayloadType + 10, // 13: grpc.testing.StreamingOutputCallRequest.response_parameters:type_name -> grpc.testing.ResponseParameters + 4, // 14: grpc.testing.StreamingOutputCallRequest.payload:type_name -> grpc.testing.Payload + 5, // 15: grpc.testing.StreamingOutputCallRequest.response_status:type_name -> grpc.testing.EchoStatus + 21, // 16: grpc.testing.StreamingOutputCallRequest.orca_oob_report:type_name -> grpc.testing.TestOrcaReport + 4, // 17: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload + 23, // 18: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry + 24, // 19: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry + 26, // 20: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry + 27, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry + 28, // 22: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry + 30, // 23: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry + 2, // 24: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 32, // 25: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata + 33, // 26: grpc.testing.TestOrcaReport.request_cost:type_name -> grpc.testing.TestOrcaReport.RequestCostEntry + 34, // 27: grpc.testing.TestOrcaReport.utilization:type_name -> grpc.testing.TestOrcaReport.UtilizationEntry + 25, // 28: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry + 22, // 29: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer + 31, // 30: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry + 29, // 31: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats + 2, // 32: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name } func init() { file_grpc_testing_messages_proto_init() } From 8eba9c2de14ab211f21633104165fb082d272bfe Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 15 May 2023 15:49:19 -0700 Subject: [PATCH 927/998] github: upgrade to v3 of checkout & setup-go (#6280) --- .github/workflows/testing.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index f7f0fbec6e45..afb830852597 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -24,11 +24,11 @@ jobs: steps: # Setup the environment. - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: '1.20' - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 # Run the vet checks. - name: vet @@ -89,12 +89,12 @@ jobs: run: echo "${{ matrix.grpcenv }}" >> $GITHUB_ENV - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: ${{ matrix.goversion }} - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 # Only run vet for 'vet' runs. - name: Run vet.sh From 756119c7de49e91b6f3b9d693b9850e1598938eb Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 16 May 2023 15:46:31 -0400 Subject: [PATCH 928/998] xds/outlierdetection: forward metadata from child picker (#6287) --- xds/internal/balancer/outlierdetection/balancer.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 97f5503f38d1..1b35f518b48b 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -412,13 +412,15 @@ func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, erro // programming. logger.Errorf("Picked SubConn from child picker is not a SubConnWrapper") return balancer.PickResult{ - SubConn: pr.SubConn, - Done: done, + SubConn: pr.SubConn, + Done: done, + Metadata: pr.Metadata, }, nil } return balancer.PickResult{ - SubConn: scw.SubConn, - Done: done, + SubConn: scw.SubConn, + Done: done, + Metadata: pr.Metadata, }, nil } From 92e65c890c9abdc571d88d7dd885ff6c4ae9dd7b Mon Sep 17 00:00:00 2001 From: Sergii Tkachenko Date: Tue, 16 May 2023 18:20:55 -0400 Subject: [PATCH 929/998] test/kokoro: Add custom_lb_test to the xds_k8s_lb job (#6290) --- test/kokoro/xds_k8s_lb.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh index f1f01794a056..d50f0f5484fb 100755 --- a/test/kokoro/xds_k8s_lb.sh +++ b/test/kokoro/xds_k8s_lb.sh @@ -158,7 +158,16 @@ main() { # Run tests cd "${TEST_DRIVER_FULL_DIR}" local failed_tests=0 - test_suites=("api_listener_test" "change_backend_service_test" "failover_test" "remove_neg_test" "round_robin_test" "affinity_test" "outlier_detection_test") + test_suites=( + "affinity_test" + "api_listener_test" + "change_backend_service_test" + "custom_lb_test" + "failover_test" + "outlier_detection_test" + "remove_neg_test" + "round_robin_test" + ) for test in "${test_suites[@]}"; do run_test $test || (( ++failed_tests )) done From 52fef6da12c6bebdb10abdf7466bc0249bf18123 Mon Sep 17 00:00:00 2001 From: erm-g <110920239+erm-g@users.noreply.github.com> Date: Wed, 17 May 2023 14:03:37 +0000 Subject: [PATCH 930/998] authz: Stdout logger (#6230) * Draft of StdoutLogger * Fitting StdoutLogger to lb patterns * conversion from proto to json for laudit loggers * Tests for multiple loggers and empty Options * Added LoggerConfig impl * Switched to grpcLogger and added a unit test comparing log with os.StdOut * Minor fix in exception handling wording * Added timestamp for logging statement * Changed format to json and added custom marshalling * Migration to log.go and additional test for a full event * Migration of stdout logger to a separate package * migration to grpcLogger, unit test fix * Delete xds parsing functionality. Will be done in a separate PR * Delete xds parsing functionality. Will be done in a separate PR * Address PR comments (embedding interface, table test, pointer optimizations) * vet.sh fixes * Address PR comments * Commit for go tidy changes * vet.sh fix for buf usage * Address PR comments * Address PR comments * Address PR comments (easwars) * Address PR comments (luwei) * Migrate printing to standard out from log package level func to a Logger struct func. Add timestamp testing logic. Add registry presense test. * Changed event Timestamp format back to RFC3339 * Address PR comments * Address PR comments * Address PR comments * Address PR comments --- authz/audit/stdout/stdout_logger.go | 107 +++++++++++++++++ authz/audit/stdout/stdout_logger_test.go | 140 +++++++++++++++++++++++ 2 files changed, 247 insertions(+) create mode 100644 authz/audit/stdout/stdout_logger.go create mode 100644 authz/audit/stdout/stdout_logger_test.go diff --git a/authz/audit/stdout/stdout_logger.go b/authz/audit/stdout/stdout_logger.go new file mode 100644 index 000000000000..ee095527ccec --- /dev/null +++ b/authz/audit/stdout/stdout_logger.go @@ -0,0 +1,107 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stdout defines an stdout audit logger. +package stdout + +import ( + "encoding/json" + "log" + "os" + "time" + + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/grpclog" +) + +var grpcLogger = grpclog.Component("authz-audit") + +func init() { + audit.RegisterLoggerBuilder(&loggerBuilder{ + goLogger: log.New(os.Stdout, "", 0), + }) +} + +type event struct { + FullMethodName string `json:"rpc_method"` + Principal string `json:"principal"` + PolicyName string `json:"policy_name"` + MatchedRule string `json:"matched_rule"` + Authorized bool `json:"authorized"` + Timestamp string `json:"timestamp"` // Time when the audit event is logged via Log method +} + +// logger implements the audit.Logger interface by logging to standard output. +type logger struct { + goLogger *log.Logger +} + +// Log marshals the audit.Event to json and prints it to standard output. +func (l *logger) Log(event *audit.Event) { + jsonContainer := map[string]interface{}{ + "grpc_audit_log": convertEvent(event), + } + jsonBytes, err := json.Marshal(jsonContainer) + if err != nil { + grpcLogger.Errorf("failed to marshal AuditEvent data to JSON: %v", err) + return + } + l.goLogger.Println(string(jsonBytes)) +} + +// loggerConfig represents the configuration for the stdout logger. +// It is currently empty and implements the audit.Logger interface by embedding it. +type loggerConfig struct { + audit.LoggerConfig +} + +type loggerBuilder struct { + goLogger *log.Logger +} + +func (loggerBuilder) Name() string { + return "stdout_logger" +} + +// Build returns a new instance of the stdout logger. +// Passed in configuration is ignored as the stdout logger does not +// expect any configuration to be provided. +func (lb *loggerBuilder) Build(audit.LoggerConfig) audit.Logger { + return &logger{ + goLogger: lb.goLogger, + } +} + +// ParseLoggerConfig is a no-op since the stdout logger does not accept any configuration. +func (*loggerBuilder) ParseLoggerConfig(config json.RawMessage) (audit.LoggerConfig, error) { + if len(config) != 0 && string(config) != "{}" { + grpcLogger.Warningf("Stdout logger doesn't support custom configs. Ignoring:\n%s", string(config)) + } + return &loggerConfig{}, nil +} + +func convertEvent(auditEvent *audit.Event) *event { + return &event{ + FullMethodName: auditEvent.FullMethodName, + Principal: auditEvent.Principal, + PolicyName: auditEvent.PolicyName, + MatchedRule: auditEvent.MatchedRule, + Authorized: auditEvent.Authorized, + Timestamp: time.Now().Format(time.RFC3339Nano), + } +} diff --git a/authz/audit/stdout/stdout_logger_test.go b/authz/audit/stdout/stdout_logger_test.go new file mode 100644 index 000000000000..a389b942e2c7 --- /dev/null +++ b/authz/audit/stdout/stdout_logger_test.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stdout + +import ( + "bytes" + "encoding/json" + "log" + "os" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/internal/grpctest" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestStdoutLogger_Log(t *testing.T) { + tests := map[string]struct { + event *audit.Event + wantMessage string + wantErr string + }{ + "few fields": { + event: &audit.Event{PolicyName: "test policy", Principal: "test principal"}, + wantMessage: `{"fullMethodName":"","principal":"test principal","policyName":"test policy","matchedRule":"","authorized":false`, + }, + "all fields": { + event: &audit.Event{ + FullMethodName: "/helloworld.Greeter/SayHello", + Principal: "spiffe://example.org/ns/default/sa/default/backend", + PolicyName: "example-policy", + MatchedRule: "dev-access", + Authorized: true, + }, + wantMessage: `{"fullMethodName":"/helloworld.Greeter/SayHello",` + + `"principal":"spiffe://example.org/ns/default/sa/default/backend","policyName":"example-policy",` + + `"matchedRule":"dev-access","authorized":true`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + before := time.Now().Unix() + var buf bytes.Buffer + builder := &loggerBuilder{goLogger: log.New(&buf, "", 0)} + auditLogger := builder.Build(nil) + + auditLogger.Log(test.event) + + var container map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &container); err != nil { + t.Fatalf("Failed to unmarshal audit log event: %v", err) + } + innerEvent := extractEvent(container["grpc_audit_log"].(map[string]interface{})) + if innerEvent.Timestamp == "" { + t.Fatalf("Resulted event has no timestamp: %v", innerEvent) + } + after := time.Now().Unix() + innerEventUnixTime, err := time.Parse(time.RFC3339Nano, innerEvent.Timestamp) + if err != nil { + t.Fatalf("Failed to convert event timestamp into Unix time format: %v", err) + } + if before > innerEventUnixTime.Unix() || after < innerEventUnixTime.Unix() { + t.Errorf("The audit event timestamp is outside of the test interval: test start %v, event timestamp %v, test end %v", before, innerEventUnixTime.Unix(), after) + } + if diff := cmp.Diff(trimEvent(innerEvent), test.event); diff != "" { + t.Fatalf("Unexpected message\ndiff (-got +want):\n%s", diff) + } + }) + } +} + +func (s) TestStdoutLoggerBuilder_NilConfig(t *testing.T) { + builder := &loggerBuilder{ + goLogger: log.New(os.Stdout, "", log.LstdFlags), + } + config, err := builder.ParseLoggerConfig(nil) + if err != nil { + t.Fatalf("Failed to parse stdout logger configuration: %v", err) + } + if l := builder.Build(config); l == nil { + t.Fatal("Failed to build stdout audit logger") + } +} + +func (s) TestStdoutLoggerBuilder_Registration(t *testing.T) { + if audit.GetLoggerBuilder("stdout_logger") == nil { + t.Fatal("stdout logger is not registered") + } +} + +// extractEvent extracts an stdout.event from a map +// unmarshalled from a logged json message. +func extractEvent(container map[string]interface{}) event { + return event{ + FullMethodName: container["rpc_method"].(string), + Principal: container["principal"].(string), + PolicyName: container["policy_name"].(string), + MatchedRule: container["matched_rule"].(string), + Authorized: container["authorized"].(bool), + Timestamp: container["timestamp"].(string), + } +} + +// trimEvent converts a logged stdout.event into an audit.Event +// by removing Timestamp field. It is used for comparing events during testing. +func trimEvent(testEvent event) *audit.Event { + return &audit.Event{ + FullMethodName: testEvent.FullMethodName, + Principal: testEvent.Principal, + PolicyName: testEvent.PolicyName, + MatchedRule: testEvent.MatchedRule, + Authorized: testEvent.Authorized, + } +} From 390c392f8422e46121b85fba4d0c1c9faf37317d Mon Sep 17 00:00:00 2001 From: Gregory Cooke Date: Wed, 17 May 2023 10:21:06 -0400 Subject: [PATCH 931/998] authz: Rbac engine audit logging (#6225) add the functionality to actually do audit logging in rbac_engine.go and associated tests for that functionality. --- authz/grpc_authz_server_interceptors.go | 4 +- authz/rbac_translator.go | 43 +- authz/rbac_translator_test.go | 128 ++- internal/xds/rbac/converter.go | 98 ++ internal/xds/rbac/converter_test.go | 114 +++ internal/xds/rbac/rbac_engine.go | 97 +- internal/xds/rbac/rbac_engine_test.go | 1104 ++++++++++++++++++++--- xds/internal/httpfilter/rbac/rbac.go | 5 +- 8 files changed, 1407 insertions(+), 186 deletions(-) create mode 100644 internal/xds/rbac/converter.go create mode 100644 internal/xds/rbac/converter_test.go diff --git a/authz/grpc_authz_server_interceptors.go b/authz/grpc_authz_server_interceptors.go index ab93af13f37e..3e5f598a97d1 100644 --- a/authz/grpc_authz_server_interceptors.go +++ b/authz/grpc_authz_server_interceptors.go @@ -44,11 +44,11 @@ type StaticInterceptor struct { // NewStatic returns a new StaticInterceptor from a static authorization policy // JSON string. func NewStatic(authzPolicy string) (*StaticInterceptor, error) { - rbacs, err := translatePolicy(authzPolicy) + rbacs, policyName, err := translatePolicy(authzPolicy) if err != nil { return nil, err } - chainEngine, err := rbac.NewChainEngine(rbacs) + chainEngine, err := rbac.NewChainEngine(rbacs, policyName) if err != nil { return nil, err } diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index ce5c15cb976d..d88797d49907 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -39,7 +39,7 @@ import ( // This is used when converting a custom config from raw JSON to a TypedStruct // The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/" -const typedURLPrefix = "grpc.authz.audit_logging/" +const typeURLPrefix = "grpc.authz.audit_logging/" type header struct { Key string @@ -62,14 +62,14 @@ type rule struct { } type auditLogger struct { - Name string `json:"name"` - Config *structpb.Struct `json:"config"` - IsOptional bool `json:"is_optional"` + Name string `json:"name"` + Config structpb.Struct `json:"config"` + IsOptional bool `json:"is_optional"` } type auditLoggingOptions struct { - AuditCondition string `json:"audit_condition"` - AuditLoggers []auditLogger `json:"audit_loggers"` + AuditCondition string `json:"audit_condition"` + AuditLoggers []*auditLogger `json:"audit_loggers"` } // Represents the SDK authorization policy provided by user. @@ -302,14 +302,13 @@ func (options *auditLoggingOptions) toProtos() (allow *v3rbacpb.RBAC_AuditLoggin deny.AuditCondition = toDenyCondition(v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition(rbacCondition)) } - for i := range options.AuditLoggers { - config := &options.AuditLoggers[i] - if config.Config == nil { - return nil, nil, fmt.Errorf("AuditLogger Config field cannot be nil") + for i, config := range options.AuditLoggers { + if config.Name == "" { + return nil, nil, fmt.Errorf("missing required field: name in audit_logging_options.audit_loggers[%v]", i) } typedStruct := &v1xdsudpatypepb.TypedStruct{ - TypeUrl: typedURLPrefix + config.Name, - Value: config.Config, + TypeUrl: typeURLPrefix + config.Name, + Value: &config.Config, } customConfig, err := anypb.New(typedStruct) if err != nil { @@ -355,30 +354,30 @@ func toDenyCondition(condition v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition) // translatePolicy translates SDK authorization policy in JSON format to two // Envoy RBAC polices (deny followed by allow policy) or only one Envoy RBAC -// allow policy. If the input policy cannot be parsed or is invalid, an error -// will be returned. -func translatePolicy(policyStr string) ([]*v3rbacpb.RBAC, error) { +// allow policy. Also returns the overall policy name. If the input policy +// cannot be parsed or is invalid, an error will be returned. +func translatePolicy(policyStr string) ([]*v3rbacpb.RBAC, string, error) { policy := &authorizationPolicy{} d := json.NewDecoder(bytes.NewReader([]byte(policyStr))) d.DisallowUnknownFields() if err := d.Decode(policy); err != nil { - return nil, fmt.Errorf("failed to unmarshal policy: %v", err) + return nil, "", fmt.Errorf("failed to unmarshal policy: %v", err) } if policy.Name == "" { - return nil, fmt.Errorf(`"name" is not present`) + return nil, "", fmt.Errorf(`"name" is not present`) } if len(policy.AllowRules) == 0 { - return nil, fmt.Errorf(`"allow_rules" is not present`) + return nil, "", fmt.Errorf(`"allow_rules" is not present`) } allowLogger, denyLogger, err := policy.AuditLoggingOptions.toProtos() if err != nil { - return nil, err + return nil, "", err } rbacs := make([]*v3rbacpb.RBAC, 0, 2) if len(policy.DenyRules) > 0 { denyPolicies, err := parseRules(policy.DenyRules, policy.Name) if err != nil { - return nil, fmt.Errorf(`"deny_rules" %v`, err) + return nil, "", fmt.Errorf(`"deny_rules" %v`, err) } denyRBAC := &v3rbacpb.RBAC{ Action: v3rbacpb.RBAC_DENY, @@ -389,8 +388,8 @@ func translatePolicy(policyStr string) ([]*v3rbacpb.RBAC, error) { } allowPolicies, err := parseRules(policy.AllowRules, policy.Name) if err != nil { - return nil, fmt.Errorf(`"allow_rules" %v`, err) + return nil, "", fmt.Errorf(`"allow_rules" %v`, err) } allowRBAC := &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_ALLOW, Policies: allowPolicies, AuditLoggingOptions: allowLogger} - return append(rbacs, allowRBAC), nil + return append(rbacs, allowRBAC), policy.Name, nil } diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go index fed0ef5c9d33..23b6fb669e9c 100644 --- a/authz/rbac_translator_test.go +++ b/authz/rbac_translator_test.go @@ -36,9 +36,10 @@ import ( func TestTranslatePolicy(t *testing.T) { tests := map[string]struct { - authzPolicy string - wantErr string - wantPolicies []*v3rbacpb.RBAC + authzPolicy string + wantErr string + wantPolicies []*v3rbacpb.RBAC + wantPolicyName string }{ "valid policy": { authzPolicy: `{ @@ -210,6 +211,7 @@ func TestTranslatePolicy(t *testing.T) { AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{}, }, }, + wantPolicyName: "authz", }, "allow authenticated": { authzPolicy: `{ @@ -798,6 +800,101 @@ func TestTranslatePolicy(t *testing.T) { }, }, }, + "missing custom config audit logger": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "ON_DENY", + "audit_loggers": [ + { + "name": "stdout_logger", + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, "unknown field": { authzPolicy: `{"random": 123}`, wantErr: "failed to unmarshal policy", @@ -897,7 +994,7 @@ func TestTranslatePolicy(t *testing.T) { }`, wantErr: `failed to unmarshal policy`, }, - "missing custom config audit logger": { + "missing audit logger name": { authzPolicy: `{ "name": "authz", "allow_rules": [ @@ -907,37 +1004,32 @@ func TestTranslatePolicy(t *testing.T) { "principals":["*", ""] } }], - "deny_rules": [ - { - "name": "deny_policy_1", - "source": { - "principals":[ - "spiffe://foo.abc" - ] - } - }], "audit_logging_options": { - "audit_condition": "ON_DENY", + "audit_condition": "NONE", "audit_loggers": [ { - "name": "stdout_logger", + "name": "", + "config": {}, "is_optional": false } ] } }`, - wantErr: "AuditLogger Config field cannot be nil", + wantErr: `missing required field: name`, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { - gotPolicies, gotErr := translatePolicy(test.authzPolicy) + gotPolicies, gotPolicyName, gotErr := translatePolicy(test.authzPolicy) if gotErr != nil && !strings.HasPrefix(gotErr.Error(), test.wantErr) { t.Fatalf("unexpected error\nwant:%v\ngot:%v", test.wantErr, gotErr) } if diff := cmp.Diff(gotPolicies, test.wantPolicies, protocmp.Transform()); diff != "" { t.Fatalf("unexpected policy\ndiff (-want +got):\n%s", diff) } + if test.wantPolicyName != "" && gotPolicyName != test.wantPolicyName { + t.Fatalf("unexpected policy name\nwant:%v\ngot:%v", test.wantPolicyName, gotPolicyName) + } }) } } @@ -946,7 +1038,7 @@ func anyPbHelper(t *testing.T, in map[string]interface{}, name string) *anypb.An t.Helper() pb, err := structpb.NewStruct(in) typedStruct := &v1xdsudpatypepb.TypedStruct{ - TypeUrl: typedURLPrefix + name, + TypeUrl: typeURLPrefix + name, Value: pb, } if err != nil { diff --git a/internal/xds/rbac/converter.go b/internal/xds/rbac/converter.go new file mode 100644 index 000000000000..db22fd5a9e08 --- /dev/null +++ b/internal/xds/rbac/converter.go @@ -0,0 +1,98 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "encoding/json" + "fmt" + "strings" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + "google.golang.org/grpc/authz/audit" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" +) + +const udpaTypedStuctType = "type.googleapis.com/udpa.type.v1.TypedStruct" +const xdsTypedStuctType = "type.googleapis.com/xds.type.v3.TypedStruct" + +func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig) (audit.Logger, error) { + if loggerConfig.GetAuditLogger().GetTypedConfig() == nil { + return nil, fmt.Errorf("missing required field: TypedConfig") + } + customConfig, loggerName, err := getCustomConfig(loggerConfig.AuditLogger.TypedConfig) + if err != nil { + return nil, err + } + if loggerName == "" { + return nil, fmt.Errorf("field TypedConfig.TypeURL cannot be an empty string") + } + factory := audit.GetLoggerBuilder(loggerName) + if factory == nil { + if loggerConfig.IsOptional { + return nil, nil + } + return nil, fmt.Errorf("no builder registered for %v", loggerName) + } + auditLoggerConfig, err := factory.ParseLoggerConfig(customConfig) + if err != nil { + return nil, fmt.Errorf("custom config could not be parsed by registered factory. error: %v", err) + } + auditLogger := factory.Build(auditLoggerConfig) + return auditLogger, nil +} + +func getCustomConfig(config *anypb.Any) (json.RawMessage, string, error) { + switch config.GetTypeUrl() { + case udpaTypedStuctType: + typedStruct := &v1xdsudpatypepb.TypedStruct{} + if err := config.UnmarshalTo(typedStruct); err != nil { + return nil, "", fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomConfig(typedStruct.TypeUrl, typedStruct.Value) + case xdsTypedStuctType: + typedStruct := &v3xdsxdstypepb.TypedStruct{} + if err := config.UnmarshalTo(typedStruct); err != nil { + return nil, "", fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomConfig(typedStruct.TypeUrl, typedStruct.Value) + } + return nil, "", fmt.Errorf("custom config not implemented for type [%v]", config.GetTypeUrl()) +} + +func convertCustomConfig(typeURL string, s *structpb.Struct) (json.RawMessage, string, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + urls := strings.Split(typeURL, "/") + if len(urls) == 0 { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: typeURL must have a url-like format with the typeName being the value after the last /", typeURL, s) + } + name := urls[len(urls)-1] + + rawJSON := []byte("{}") + var err error + if s != nil { + rawJSON, err = json.Marshal(s) + if err != nil { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: %v", typeURL, s, err) + } + } + return rawJSON, name, nil +} diff --git a/internal/xds/rbac/converter_test.go b/internal/xds/rbac/converter_test.go new file mode 100644 index 000000000000..253b9db2d50d --- /dev/null +++ b/internal/xds/rbac/converter_test.go @@ -0,0 +1,114 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "strings" + "testing" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + "google.golang.org/grpc/authz/audit" + "google.golang.org/protobuf/types/known/anypb" +) + +func (s) TestBuildLoggerErrors(t *testing.T) { + tests := []struct { + name string + loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig + expectedLogger audit.Logger + expectedError string + }{ + { + name: "nil typed config", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + TypedConfig: nil, + }, + }, + expectedError: "missing required field: TypedConfig", + }, + { + name: "Unsupported Type", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: &anypb.Any{}, + }, + }, + expectedError: "custom config not implemented for type ", + }, + { + name: "Empty name", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, ""), + }, + }, + expectedError: "field TypedConfig.TypeURL cannot be an empty string", + }, + { + name: "No registered logger", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "UnregisteredLogger", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "UnregisteredLogger"), + }, + IsOptional: false, + }, + expectedError: "no builder registered for UnregisteredLogger", + }, + { + name: "fail to parse custom config", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{"abc": "BADVALUE", "xyz": "123"}, "fail to parse custom config_TestAuditLoggerCustomConfig")}, + IsOptional: false, + }, + expectedError: "custom config could not be parsed", + }, + { + name: "no registered logger but optional passes", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "UnregisteredLogger", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "no registered logger but optional passes_UnregisteredLogger"), + }, + IsOptional: true, + }, + expectedLogger: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b := TestAuditLoggerCustomConfigBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b) + logger, err := buildLogger(test.loggerConfig) + if err != nil && !strings.HasPrefix(err.Error(), test.expectedError) { + t.Fatalf("expected error: %v. got error: %v", test.expectedError, err) + } else { + if logger != test.expectedLogger { + t.Fatalf("expected logger: %v. got logger: %v", test.expectedLogger, logger) + } + } + + }) + } + +} diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go index a212579c63e2..63237affe23f 100644 --- a/internal/xds/rbac/rbac_engine.go +++ b/internal/xds/rbac/rbac_engine.go @@ -30,6 +30,7 @@ import ( v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" "google.golang.org/grpc" + "google.golang.org/grpc/authz/audit" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" @@ -51,10 +52,10 @@ type ChainEngine struct { // NewChainEngine returns a chain of RBAC engines, used to make authorization // decisions on incoming RPCs. Returns a non-nil error for invalid policies. -func NewChainEngine(policies []*v3rbacpb.RBAC) (*ChainEngine, error) { +func NewChainEngine(policies []*v3rbacpb.RBAC, policyName string) (*ChainEngine, error) { engines := make([]*engine, 0, len(policies)) for _, policy := range policies { - engine, err := newEngine(policy) + engine, err := newEngine(policy, policyName) if err != nil { return nil, err } @@ -94,13 +95,16 @@ func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { switch { case engine.action == v3rbacpb.RBAC_ALLOW && !ok: cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) return status.Errorf(codes.PermissionDenied, "incoming RPC did not match an allow policy") case engine.action == v3rbacpb.RBAC_DENY && ok: cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) return status.Errorf(codes.PermissionDenied, "incoming RPC matched a deny policy %q", matchingPolicyName) } // Every policy in the engine list must be queried. Thus, iterate to the // next policy. + engine.doAuditLogging(rpcData, matchingPolicyName, true) } // If the incoming RPC gets through all of the engines successfully (i.e. // doesn't not match an allow or match a deny engine), the RPC is authorized @@ -110,14 +114,18 @@ func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { // engine is used for matching incoming RPCs to policies. type engine struct { - policies map[string]*policyMatcher + // TODO(gtcooke94) - differentiate between `policyName`, `policies`, and `rules` + policyName string + policies map[string]*policyMatcher // action must be ALLOW or DENY. - action v3rbacpb.RBAC_Action + action v3rbacpb.RBAC_Action + auditLoggers []audit.Logger + auditCondition v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition } -// newEngine creates an RBAC Engine based on the contents of policy. Returns a +// newEngine creates an RBAC Engine based on the contents of a policy. Returns a // non-nil error if the policy is invalid. -func newEngine(config *v3rbacpb.RBAC) (*engine, error) { +func newEngine(config *v3rbacpb.RBAC, policyName string) (*engine, error) { a := config.GetAction() if a != v3rbacpb.RBAC_ALLOW && a != v3rbacpb.RBAC_DENY { return nil, fmt.Errorf("unsupported action %s", config.Action) @@ -131,18 +139,47 @@ func newEngine(config *v3rbacpb.RBAC) (*engine, error) { } policies[name] = matcher } + + auditLoggers, auditCondition, err := parseAuditOptions(config.GetAuditLoggingOptions()) + if err != nil { + return nil, err + } return &engine{ - policies: policies, - action: a, + policyName: policyName, + policies: policies, + action: a, + auditLoggers: auditLoggers, + auditCondition: auditCondition, }, nil } +func parseAuditOptions(opts *v3rbacpb.RBAC_AuditLoggingOptions) ([]audit.Logger, v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition, error) { + if opts == nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, nil + } + var auditLoggers []audit.Logger + for _, logger := range opts.LoggerConfigs { + auditLogger, err := buildLogger(logger) + if err != nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, err + } + if auditLogger == nil { + // This occurs when the audit logger is not registered but also + // marked optional. + continue + } + auditLoggers = append(auditLoggers, auditLogger) + } + return auditLoggers, opts.GetAuditCondition(), nil + +} + // findMatchingPolicy determines if an incoming RPC matches a policy. On a // successful match, it returns the name of the matching policy and a true bool // to specify that there was a matching policy found. It returns false in // the case of not finding a matching policy. -func (r *engine) findMatchingPolicy(rpcData *rpcData) (string, bool) { - for policy, matcher := range r.policies { +func (e *engine) findMatchingPolicy(rpcData *rpcData) (string, bool) { + for policy, matcher := range e.policies { if matcher.match(rpcData) { return policy, true } @@ -238,3 +275,43 @@ type rpcData struct { // handshake. certs []*x509.Certificate } + +func (e *engine) doAuditLogging(rpcData *rpcData, rule string, authorized bool) { + // In the RBAC world, we need to have a SPIFFE ID as the principal for this + // to be meaningful + principal := "" + if rpcData.peerInfo != nil && rpcData.peerInfo.AuthInfo != nil && rpcData.peerInfo.AuthInfo.AuthType() == "tls" { + // If AuthType = tls, then we can cast AuthInfo to TLSInfo. + tlsInfo := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo) + if tlsInfo.SPIFFEID != nil { + principal = tlsInfo.SPIFFEID.String() + } + } + + //TODO(gtcooke94) check if we need to log before creating the event + event := &audit.Event{ + FullMethodName: rpcData.fullMethod, + Principal: principal, + PolicyName: e.policyName, + MatchedRule: rule, + Authorized: authorized, + } + for _, logger := range e.auditLoggers { + switch e.auditCondition { + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY: + if !authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW: + if authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW: + logger.Log(event) + } + } +} + +// This is used when converting a custom config from raw JSON to a TypedStruct. +// The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/". +const typeURLPrefix = "grpc.authz.audit_logging/" diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go index 19bc4e8ca891..32c357f4953f 100644 --- a/internal/xds/rbac/rbac_engine_test.go +++ b/internal/xds/rbac/rbac_engine_test.go @@ -21,10 +21,15 @@ import ( "crypto/tls" "crypto/x509" "crypto/x509/pkix" + "encoding/json" + "fmt" "net" "net/url" + "reflect" "testing" + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" @@ -32,12 +37,15 @@ import ( v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc" + "google.golang.org/grpc/authz/audit" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" ) type s struct { @@ -62,9 +70,10 @@ func (a *addr) String() string { return a.ipAddress } // raise errors. func (s) TestNewChainEngine(t *testing.T) { tests := []struct { - name string - policies []*v3rbacpb.RBAC - wantErr bool + name string + policies []*v3rbacpb.RBAC + wantErr bool + policyName string }{ { name: "SuccessCaseAnyMatchSingular", @@ -424,16 +433,256 @@ func (s) TestNewChainEngine(t *testing.T) { }, }, }, + { + name: "SimpleAuditLogger", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "SimpleAuditLogger_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + { + name: "AuditLoggerCustomConfig", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "AuditLoggerCustomConfig_TestAuditLoggerCustomConfig")}, + IsOptional: false, + }, + }, + }, + }, + }, + policyName: "test_policy", + }, + { + name: "AuditLoggerCustomConfigXdsTypedStruct", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createXDSTypedStruct(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "AuditLoggerCustomConfigXdsTypedStruct_TestAuditLoggerCustomConfig")}, + IsOptional: false, + }, + }, + }, + }, + }, + policyName: "test_policy", + }, + { + name: "Missing Optional AuditLogger doesn't fail", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "UnsupportedLogger", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "Missing Optional AuditLogger doesn't fail_UnsupportedLogger")}, + IsOptional: true, + }, + }, + }, + }, + }, + }, + { + name: "Missing Non-Optional AuditLogger fails", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "UnsupportedLogger", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "Missing Non-Optional AuditLogger fails_UnsupportedLogger")}, + IsOptional: false, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Cannot_parse_missing_CustomConfig", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + }, + IsOptional: false, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Cannot_parse_bad_CustomConfig", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{"abc": "BADVALUE", "xyz": "123"}, "Cannot_parse_bad_CustomConfig_TestAuditLoggerCustomConfig")}, + IsOptional: false, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Cannot_parse_missing_typedConfig_name", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "")}, + IsOptional: false, + }, + }, + }, + }, + }, + wantErr: true, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if _, err := NewChainEngine(test.policies); (err != nil) != test.wantErr { + b := TestAuditLoggerBufferBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b) + b2 := TestAuditLoggerCustomConfigBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b2) + if _, err := NewChainEngine(test.policies, test.policyName); (err != nil) != test.wantErr { t.Fatalf("NewChainEngine(%+v) returned err: %v, wantErr: %v", test.policies, err, test.wantErr) } }) } } +type rbacQuery struct { + rpcData *rpcData + wantStatusCode codes.Code + wantAuditEvents []*audit.Event +} + // TestChainEngine tests the chain of RBAC Engines by configuring the chain of // engines in a certain way in different scenarios. After configuring the chain // of engines in a certain way, this test pings the chain of engines with @@ -446,10 +695,8 @@ func (s) TestChainEngine(t *testing.T) { tests := []struct { name string rbacConfigs []*v3rbacpb.RBAC - rbacQueries []struct { - rpcData *rpcData - wantStatusCode codes.Code - } + rbacQueries []rbacQuery + policyName string }{ // SuccessCaseAnyMatch tests a single RBAC Engine instantiated with // a config with a policy with any rules for both permissions and @@ -471,10 +718,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ { rpcData: &rpcData{ fullMethod: "some method", @@ -505,10 +749,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ // This RPC should match with the local host fan policy. Thus, // this RPC should be allowed to proceed. { @@ -571,10 +812,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ // This incoming RPC Call should match with the service admin // policy. { @@ -659,10 +897,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ // This incoming RPC Call should match with the not-secret-content policy. { rpcData: &rpcData{ @@ -701,10 +936,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ // This incoming RPC Call should match with the certain-direct-remote-ip policy. { rpcData: &rpcData{ @@ -745,10 +977,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ // This incoming RPC Call should match with the certain-remote-ip policy. { rpcData: &rpcData{ @@ -785,10 +1014,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ // This incoming RPC Call shouldn't match with the // certain-destination-ip policy, as the test listens on local // host. @@ -836,10 +1062,7 @@ func (s) TestChainEngine(t *testing.T) { Action: v3rbacpb.RBAC_DENY, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ // This RPC should match with the allow policy, and shouldn't // match with the deny and thus should be allowed to proceed. { @@ -903,10 +1126,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ // This incoming RPC Call should match with the service admin // policy. No authentication info is provided, so the // authenticated matcher should match to the string matcher on @@ -956,10 +1176,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ { rpcData: &rpcData{ fullMethod: "some method", @@ -992,10 +1209,7 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - rbacQueries: []struct { - rpcData *rpcData - wantStatusCode codes.Code - }{ + rbacQueries: []rbacQuery{ { rpcData: &rpcData{ fullMethod: "some method", @@ -1007,85 +1221,709 @@ func (s) TestChainEngine(t *testing.T) { }, }, }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - // Instantiate the chainedRBACEngine with different configurations that are - // interesting to test and to query. - cre, err := NewChainEngine(test.rbacConfigs) - if err != nil { - t.Fatalf("Error constructing RBAC Engine: %v", err) - } - // Query the created chain of RBAC Engines with different args to see - // if the chain of RBAC Engines configured as such works as intended. - for _, data := range test.rbacQueries { - func() { - // Construct the context with three data points that have enough - // information to represent incoming RPC's. This will be how a - // user uses this API. A user will have to put MD, PeerInfo, and - // the connection the RPC is sent on in the context. - ctx := metadata.NewIncomingContext(context.Background(), data.rpcData.md) - - // Make a TCP connection with a certain destination port. The - // address/port of this connection will be used to populate the - // destination ip/port in RPCData struct. This represents what - // the user of ChainEngine will have to place into - // context, as this is only way to get destination ip and port. - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error listening: %v", err) - } - defer lis.Close() - connCh := make(chan net.Conn, 1) - go func() { - conn, err := lis.Accept() - if err != nil { - t.Errorf("Error accepting connection: %v", err) - return - } - connCh <- conn - }() - _, err = net.Dial("tcp", lis.Addr().String()) - if err != nil { - t.Fatalf("Error dialing: %v", err) - } - conn := <-connCh - defer conn.Close() - getConnection = func(context.Context) net.Conn { - return conn - } - ctx = peer.NewContext(ctx, data.rpcData.peerInfo) - stream := &ServerTransportStreamWithMethod{ - method: data.rpcData.fullMethod, - } - - ctx = grpc.NewContextWithServerTransportStream(ctx, stream) - err = cre.IsAuthorized(ctx) - if gotCode := status.Code(err); gotCode != data.wantStatusCode { - t.Fatalf("IsAuthorized(%+v, %+v) returned (%+v), want(%+v)", ctx, data.rpcData.fullMethod, gotCode, data.wantStatusCode) - } - }() - } - }) - } -} - -type ServerTransportStreamWithMethod struct { - method string -} - -func (sts *ServerTransportStreamWithMethod) Method() string { - return sts.method -} - -func (sts *ServerTransportStreamWithMethod) SetHeader(md metadata.MD) error { - return nil -} - -func (sts *ServerTransportStreamWithMethod) SendHeader(md metadata.MD) error { - return nil -} - -func (sts *ServerTransportStreamWithMethod) SetTrailer(md metadata.MD) error { - return nil + // AllowAndDenyPolicy tests a policy with an allow (on path) and + // deny (on port) policy chained together. This represents how a user + // configured interceptor would use this, and also is a potential + // configuration for a dynamic xds interceptor. Further, it tests that + // the audit logger works properly in each scenario. + { + name: "AuditLoggingAllowAndDenyPolicy_ON_ALLOW", + policyName: "test_policy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_ALLOW_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_ALLOW_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + { + rpcData: &rpcData{ + fullMethod: "", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + URIs: []*url.URL{ + { + Scheme: "spiffe", + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + }, + SPIFFEID: &url.URL{ + Scheme: "spiffe", + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + wantStatusCode: codes.OK, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "", + Principal: "spiffe://cluster.local/ns/default/sa/admin", + PolicyName: "test_policy", + MatchedRule: "certain-source-ip", + Authorized: true, + }, + }, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + { + name: "AuditLoggingAllowAndDenyPolicy_ON_DENY", + policyName: "test_policy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_DENY_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_DENY_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + // Audit logging matches with nothing. + { + rpcData: &rpcData{ + fullMethod: "", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + // Audit logging matches with deny and short circuits. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + URIs: []*url.URL{ + { + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + }, + }, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "localhost-fan-page", + PolicyName: "test_policy", + MatchedRule: "localhost-fan", + Authorized: false, + }, + }, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. + // Audit logging matches with the allow policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "", + PolicyName: "test_policy", + MatchedRule: "", + Authorized: false, + }, + }, + }, + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. + // Audit logging will have the deny logged. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "localhost-fan-page", + PolicyName: "test_policy", + MatchedRule: "localhost-fan", + Authorized: false, + }, + }, + }, + }, + }, + { + name: "AuditLoggingAllowAndDenyPolicy_NONE", + policyName: "test_policy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_NONE_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_NONE_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + // Audit logging is NONE. + { + rpcData: &rpcData{ + fullMethod: "", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + // Audit logging is NONE. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. + // Audit logging is NONE. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. + // Audit logging is NONE. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + { + name: "AuditLoggingAllowAndDenyPolicy_ON_DENY_AND_ALLOW", + policyName: "test_policy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_DENY_AND_ALLOW_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_DENY_AND_ALLOW_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + // Audit logging matches with nothing. + { + rpcData: &rpcData{ + fullMethod: "", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "", + PolicyName: "test_policy", + MatchedRule: "certain-source-ip", + Authorized: true, + }, + }, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + // Audit logging matches with deny and short circuits. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "localhost-fan-page", + PolicyName: "test_policy", + MatchedRule: "localhost-fan", + Authorized: false, + }, + }, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. + // Audit logging matches with the allow policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "", + PolicyName: "test_policy", + MatchedRule: "", + Authorized: false, + }, + }, + }, + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. + // Audit logging will have the deny logged. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "localhost-fan-page", + PolicyName: "test_policy", + MatchedRule: "localhost-fan", + Authorized: false, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b := TestAuditLoggerBufferBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b) + b2 := TestAuditLoggerCustomConfigBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b2) + + // Instantiate the chainedRBACEngine with different configurations that are + // interesting to test and to query. + cre, err := NewChainEngine(test.rbacConfigs, test.policyName) + if err != nil { + t.Fatalf("Error constructing RBAC Engine: %v", err) + } + // Query the created chain of RBAC Engines with different args to see + // if the chain of RBAC Engines configured as such works as intended. + for _, data := range test.rbacQueries { + func() { + // Construct the context with three data points that have enough + // information to represent incoming RPC's. This will be how a + // user uses this API. A user will have to put MD, PeerInfo, and + // the connection the RPC is sent on in the context. + ctx := metadata.NewIncomingContext(context.Background(), data.rpcData.md) + + // Make a TCP connection with a certain destination port. The + // address/port of this connection will be used to populate the + // destination ip/port in RPCData struct. This represents what + // the user of ChainEngine will have to place into context, + // as this is only way to get destination ip and port. + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Error listening: %v", err) + } + defer lis.Close() + connCh := make(chan net.Conn, 1) + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("Error accepting connection: %v", err) + return + } + connCh <- conn + }() + _, err = net.Dial("tcp", lis.Addr().String()) + if err != nil { + t.Fatalf("Error dialing: %v", err) + } + conn := <-connCh + defer conn.Close() + getConnection = func(context.Context) net.Conn { + return conn + } + ctx = peer.NewContext(ctx, data.rpcData.peerInfo) + stream := &ServerTransportStreamWithMethod{ + method: data.rpcData.fullMethod, + } + + ctx = grpc.NewContextWithServerTransportStream(ctx, stream) + err = cre.IsAuthorized(ctx) + if gotCode := status.Code(err); gotCode != data.wantStatusCode { + t.Fatalf("IsAuthorized(%+v, %+v) returned (%+v), want(%+v)", ctx, data.rpcData.fullMethod, gotCode, data.wantStatusCode) + } + if !reflect.DeepEqual(b.auditEvents, data.wantAuditEvents) { + t.Fatalf("Unexpected audit event for query:%v", data) + } + + // This builder's auditEvents can be shared for several queries, make sure it's empty. + b.auditEvents = nil + }() + } + }) + } +} + +type ServerTransportStreamWithMethod struct { + method string +} + +func (sts *ServerTransportStreamWithMethod) Method() string { + return sts.method +} + +func (sts *ServerTransportStreamWithMethod) SetHeader(md metadata.MD) error { + return nil +} + +func (sts *ServerTransportStreamWithMethod) SendHeader(md metadata.MD) error { + return nil +} + +func (sts *ServerTransportStreamWithMethod) SetTrailer(md metadata.MD) error { + return nil +} + +// An audit logger that will log to the auditEvents slice. +type TestAuditLoggerBuffer struct { + auditEvents *[]*audit.Event +} + +func (logger *TestAuditLoggerBuffer) Log(e *audit.Event) { + *(logger.auditEvents) = append(*(logger.auditEvents), e) +} + +// Builds TestAuditLoggerBuffer. +type TestAuditLoggerBufferBuilder struct { + auditEvents []*audit.Event + testName string +} + +// The required config for TestAuditLoggerBuffer. +type TestAuditLoggerBufferConfig struct { + audit.LoggerConfig +} + +func (b *TestAuditLoggerBufferBuilder) ParseLoggerConfig(configJSON json.RawMessage) (config audit.LoggerConfig, err error) { + return TestAuditLoggerBufferConfig{}, nil +} + +func (b *TestAuditLoggerBufferBuilder) Build(config audit.LoggerConfig) audit.Logger { + return &TestAuditLoggerBuffer{auditEvents: &b.auditEvents} +} + +func (b *TestAuditLoggerBufferBuilder) Name() string { + return b.testName + "_TestAuditLoggerBuffer" +} + +// An audit logger to test using a custom config. +type TestAuditLoggerCustomConfig struct{} + +func (logger *TestAuditLoggerCustomConfig) Log(*audit.Event) {} + +// Build TestAuditLoggerCustomConfig. This builds a TestAuditLoggerCustomConfig +// logger that uses a custom config. +type TestAuditLoggerCustomConfigBuilder struct { + testName string +} + +// The custom config for the TestAuditLoggerCustomConfig logger. +type TestAuditLoggerCustomConfigConfig struct { + audit.LoggerConfig + Abc int + Xyz string +} + +// Parses TestAuditLoggerCustomConfigConfig. Hard-coded to match with it's test +// case above. +func (b TestAuditLoggerCustomConfigBuilder) ParseLoggerConfig(configJSON json.RawMessage) (audit.LoggerConfig, error) { + c := TestAuditLoggerCustomConfigConfig{} + err := json.Unmarshal(configJSON, &c) + if err != nil { + return nil, fmt.Errorf("could not parse custom config: %v", err) + } + return c, nil +} + +func (b *TestAuditLoggerCustomConfigBuilder) Build(config audit.LoggerConfig) audit.Logger { + return &TestAuditLoggerCustomConfig{} +} + +func (b *TestAuditLoggerCustomConfigBuilder) Name() string { + return b.testName + "_TestAuditLoggerCustomConfig" +} + +// Builds custom configs for audit logger RBAC protos. +func createUDPATypedStruct(t *testing.T, in map[string]interface{}, name string) *anypb.Any { + t.Helper() + pb, err := structpb.NewStruct(in) + if err != nil { + t.Fatalf("createUDPATypedStructFailed during structpb.NewStruct: %v", err) + } + typedURL := "" + if name != "" { + typedURL = typeURLPrefix + name + } + typedStruct := &v1xdsudpatypepb.TypedStruct{ + TypeUrl: typedURL, + Value: pb, + } + customConfig, err := anypb.New(typedStruct) + if err != nil { + t.Fatalf("createUDPATypedStructFailed during anypb.New: %v", err) + } + return customConfig +} + +// Builds custom configs for audit logger RBAC protos. +func createXDSTypedStruct(t *testing.T, in map[string]interface{}, name string) *anypb.Any { + t.Helper() + pb, err := structpb.NewStruct(in) + if err != nil { + t.Fatalf("createXDSTypedStructFailed during structpb.NewStruct: %v", err) + } + typedStruct := &v3xdsxdstypepb.TypedStruct{ + TypeUrl: typeURLPrefix + name, + Value: pb, + } + customConfig, err := anypb.New(typedStruct) + if err != nil { + t.Fatalf("createXDSTypedStructFailed during anypb.New: %v", err) + } + return customConfig } diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go index 209283c3bf59..277fcfc5927a 100644 --- a/xds/internal/httpfilter/rbac/rbac.go +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -126,7 +126,10 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { return config{}, nil } - ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}) + // TODO(gregorycooke) - change the call chain to here so we have the filter + // name to input here instead of an empty string. It will come from here: + // https://github.com/grpc/grpc-go/blob/eff0942e95d93112921414aee758e619ec86f26f/xds/internal/xdsclient/xdsresource/unmarshal_lds.go#L199 + ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "") if err != nil { // "At this time, if the RBAC.action is Action.LOG then the policy will be // completely ignored, as if RBAC was not configurated." - A41 From 417d4b6895679bd9378cb37c2afecf6a292eb267 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 17 May 2023 14:57:56 -0700 Subject: [PATCH 932/998] examples: add error_handling example; move errors to error_details (#6293) --- examples/examples_test.sh | 9 ++- .../{errors => error_details}/README.md | 0 .../{errors => error_details}/client/main.go | 0 .../{errors => error_details}/server/main.go | 0 examples/features/error_handling/README.md | 22 ++++++ .../features/error_handling/client/main.go | 70 +++++++++++++++++++ .../features/error_handling/server/main.go | 65 +++++++++++++++++ 7 files changed, 163 insertions(+), 3 deletions(-) rename examples/features/{errors => error_details}/README.md (100%) rename examples/features/{errors => error_details}/client/main.go (100%) rename examples/features/{errors => error_details}/server/main.go (100%) create mode 100644 examples/features/error_handling/README.md create mode 100644 examples/features/error_handling/client/main.go create mode 100644 examples/features/error_handling/server/main.go diff --git a/examples/examples_test.sh b/examples/examples_test.sh index 9ae49d37c5e9..bead4d0dcbe1 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -57,7 +57,8 @@ EXAMPLES=( "features/compression" "features/deadline" "features/encryption/TLS" - "features/errors" + "features/error_details" + "features/error_handling" "features/interceptor" "features/load_balancing" "features/metadata" @@ -109,7 +110,8 @@ declare -A EXPECTED_SERVER_OUTPUT=( ["features/compression"]="UnaryEcho called with message \"compress\"" ["features/deadline"]="" ["features/encryption/TLS"]="" - ["features/errors"]="" + ["features/error_details"]="" + ["features/error_handling"]="" ["features/interceptor"]="unary echoing message \"hello world\"" ["features/load_balancing"]="serving on :50051" ["features/metadata"]="message:\"this is examples/metadata\", sending echo" @@ -130,7 +132,8 @@ declare -A EXPECTED_CLIENT_OUTPUT=( ["features/compression"]="UnaryEcho call returned \"compress\", " ["features/deadline"]="wanted = DeadlineExceeded, got = DeadlineExceeded" ["features/encryption/TLS"]="UnaryEcho: hello world" - ["features/errors"]="Greeting: Hello world" + ["features/error_details"]="Greeting: Hello world" + ["features/error_handling"]="Received error" ["features/interceptor"]="UnaryEcho: hello world" ["features/load_balancing"]="calling helloworld.Greeter/SayHello with pick_first" ["features/metadata"]="this is examples/metadata" diff --git a/examples/features/errors/README.md b/examples/features/error_details/README.md similarity index 100% rename from examples/features/errors/README.md rename to examples/features/error_details/README.md diff --git a/examples/features/errors/client/main.go b/examples/features/error_details/client/main.go similarity index 100% rename from examples/features/errors/client/main.go rename to examples/features/error_details/client/main.go diff --git a/examples/features/errors/server/main.go b/examples/features/error_details/server/main.go similarity index 100% rename from examples/features/errors/server/main.go rename to examples/features/error_details/server/main.go diff --git a/examples/features/error_handling/README.md b/examples/features/error_handling/README.md new file mode 100644 index 000000000000..c6c4ba2c2e2d --- /dev/null +++ b/examples/features/error_handling/README.md @@ -0,0 +1,22 @@ +# Description + +This example demonstrates basic RPC error handling in gRPC. + +# Run the sample code + +Run the server, which returns an error if the RPC request's `Name` field is +empty. + +```sh +$ go run ./server/main.go +``` + +Then run the client in another terminal, which does two requests: one with an +empty Name field and one with it populated with the current username provided by +os/user. + +```sh +$ go run ./client/main.go +``` + +It should print the status codes it received from the server. diff --git a/examples/features/error_handling/client/main.go b/examples/features/error_handling/client/main.go new file mode 100644 index 000000000000..bd4ec0a1d33f --- /dev/null +++ b/examples/features/error_handling/client/main.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client. +package main + +import ( + "context" + "flag" + "log" + "os/user" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "google.golang.org/grpc/status" +) + +var addr = flag.String("addr", "localhost:50052", "the address to connect to") + +func main() { + flag.Parse() + + name := "unknown" + if u, err := user.Current(); err == nil && u.Username != "" { + name = u.Username + } + + // Set up a connection to the server. + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("Failed to connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + for _, reqName := range []string{"", name} { + log.Printf("Calling SayHello with Name:%q", reqName) + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: reqName}) + if err != nil { + if status.Code(err) != codes.InvalidArgument { + log.Printf("Received unexpected error: %v", err) + continue + } + log.Printf("Received error: %v", err) + continue + } + log.Printf("Received response: %s", r.Message) + } +} diff --git a/examples/features/error_handling/server/main.go b/examples/features/error_handling/server/main.go new file mode 100644 index 000000000000..4471c560add9 --- /dev/null +++ b/examples/features/error_handling/server/main.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + pb "google.golang.org/grpc/examples/helloworld/helloworld" +) + +var port = flag.Int("port", 50052, "port number") + +// server is used to implement helloworld.GreeterServer. +type server struct { + pb.UnimplementedGreeterServer +} + +// SayHello implements helloworld.GreeterServer. +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + if in.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "request missing required field: Name") + } + return &pb.HelloReply{Message: "Hello " + in.Name}, nil +} + +func main() { + flag.Parse() + + address := fmt.Sprintf(":%v", *port) + lis, err := net.Listen("tcp", address) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} From 098b2d00c5bcbc5c696dcab9a2a7f7b442f8142e Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 18 May 2023 14:28:53 -0400 Subject: [PATCH 933/998] xds/internal/balancer/outlierdetection: Switch Outlier Detection to use new duration field (#6286) --- .../balancer/cdsbalancer/cdsbalancer.go | 6 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 24 ++--- .../balancer/outlierdetection/balancer.go | 18 ++-- .../outlierdetection/balancer_test.go | 93 +++++++++---------- .../balancer/outlierdetection/config.go | 12 +-- .../e2e_test/outlierdetection_test.go | 24 ++--- 6 files changed, 85 insertions(+), 92 deletions(-) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 91d4a6aa8661..c9a1611c169b 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -308,9 +308,9 @@ func outlierDetectionToConfig(od *xdsresource.OutlierDetection) outlierdetection } return outlierdetection.LBConfig{ - Interval: od.Interval, - BaseEjectionTime: od.BaseEjectionTime, - MaxEjectionTime: od.MaxEjectionTime, + Interval: internalserviceconfig.Duration(od.Interval), + BaseEjectionTime: internalserviceconfig.Duration(od.BaseEjectionTime), + MaxEjectionTime: internalserviceconfig.Duration(od.MaxEjectionTime), MaxEjectionPercent: od.MaxEjectionPercent, SuccessRateEjection: sre, FailurePercentageEjection: fpe, diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index d69465a96274..35923bc8624a 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -444,9 +444,9 @@ func (s) TestHandleClusterUpdate(t *testing.T) { LBPolicy: wrrLocalityLBConfigJSON, }, wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfig, outlierdetection.LBConfig{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + Interval: internalserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: internalserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: internalserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, SuccessRateEjection: &outlierdetection.SuccessRateEjection{ StdevFactor: 1900, @@ -918,9 +918,9 @@ func (s) TestOutlierDetectionToConfig(t *testing.T) { FailurePercentageRequestVolume: 50, }, odLBCfgWant: outlierdetection.LBConfig{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + Interval: internalserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: internalserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: internalserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, SuccessRateEjection: nil, FailurePercentageEjection: &outlierdetection.FailurePercentageEjection{ @@ -951,9 +951,9 @@ func (s) TestOutlierDetectionToConfig(t *testing.T) { FailurePercentageRequestVolume: 50, }, odLBCfgWant: outlierdetection.LBConfig{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + Interval: internalserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: internalserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: internalserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, SuccessRateEjection: &outlierdetection.SuccessRateEjection{ StdevFactor: 1900, @@ -981,9 +981,9 @@ func (s) TestOutlierDetectionToConfig(t *testing.T) { FailurePercentageRequestVolume: 50, }, odLBCfgWant: outlierdetection.LBConfig{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + Interval: internalserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: internalserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: internalserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, SuccessRateEjection: &outlierdetection.SuccessRateEjection{ StdevFactor: 1900, diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 1b35f518b48b..548514f6d05d 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -225,9 +225,9 @@ func (b *outlierDetectionBalancer) onIntervalConfig() { for _, addrInfo := range b.addrs { addrInfo.callCounter.clear() } - interval = b.cfg.Interval + interval = time.Duration(b.cfg.Interval) } else { - interval = b.cfg.Interval - now().Sub(b.timerStartTime) + interval = time.Duration(b.cfg.Interval) - now().Sub(b.timerStartTime) if interval < 0 { interval = 0 } @@ -589,14 +589,14 @@ func (b *outlierDetectionBalancer) Target() string { return b.cc.Target() } -func max(x, y int64) int64 { +func max(x, y time.Duration) time.Duration { if x < y { return y } return x } -func min(x, y int64) int64 { +func min(x, y time.Duration) time.Duration { if x < y { return x } @@ -754,10 +754,10 @@ func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { // to uneject the address below. continue } - et := b.cfg.BaseEjectionTime.Nanoseconds() * addrInfo.ejectionTimeMultiplier - met := max(b.cfg.BaseEjectionTime.Nanoseconds(), b.cfg.MaxEjectionTime.Nanoseconds()) - curTimeAfterEt := now().After(addrInfo.latestEjectionTimestamp.Add(time.Duration(min(et, met)))) - if curTimeAfterEt { + et := time.Duration(b.cfg.BaseEjectionTime) * time.Duration(addrInfo.ejectionTimeMultiplier) + met := max(time.Duration(b.cfg.BaseEjectionTime), time.Duration(b.cfg.MaxEjectionTime)) + uet := addrInfo.latestEjectionTimestamp.Add(min(et, met)) + if now().After(uet) { b.unejectAddress(addrInfo) } } @@ -767,7 +767,7 @@ func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { if b.intervalTimer != nil { b.intervalTimer.Stop() } - b.intervalTimer = afterFunc(b.cfg.Interval, b.intervalTimerAlgorithm) + b.intervalTimer = afterFunc(time.Duration(b.cfg.Interval), b.intervalTimerAlgorithm) } // addrsWithAtLeastRequestVolume returns a slice of address information of all diff --git a/xds/internal/balancer/outlierdetection/balancer_test.go b/xds/internal/balancer/outlierdetection/balancer_test.go index 41447164c013..4f542d61e572 100644 --- a/xds/internal/balancer/outlierdetection/balancer_test.go +++ b/xds/internal/balancer/outlierdetection/balancer_test.go @@ -37,7 +37,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -78,7 +78,6 @@ func (s) TestParseConfig(t *testing.T) { { name: "noop-lb-config", input: `{ - "interval": 9223372036854775807, "childPolicy": [ { "xds_cluster_impl_experimental": { @@ -88,8 +87,7 @@ func (s) TestParseConfig(t *testing.T) { ] }`, wantCfg: &LBConfig{ - Interval: math.MaxInt64, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "xds_cluster_impl_experimental", Config: &clusterimpl.LBConfig{ Cluster: "test_cluster", @@ -100,9 +98,9 @@ func (s) TestParseConfig(t *testing.T) { { name: "good-lb-config", input: `{ - "interval": 10000000000, - "baseEjectionTime": 30000000000, - "maxEjectionTime": 300000000000, + "interval": "10s", + "baseEjectionTime": "30s", + "maxEjectionTime": "300s", "maxEjectionPercent": 10, "successRateEjection": { "stdevFactor": 1900, @@ -125,9 +123,9 @@ func (s) TestParseConfig(t *testing.T) { ] }`, wantCfg: &LBConfig{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + Interval: iserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, SuccessRateEjection: &SuccessRateEjection{ StdevFactor: 1900, @@ -141,7 +139,7 @@ func (s) TestParseConfig(t *testing.T) { MinimumHosts: 5, RequestVolume: 50, }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "xds_cluster_impl_experimental", Config: &clusterimpl.LBConfig{ Cluster: "test_cluster", @@ -151,18 +149,18 @@ func (s) TestParseConfig(t *testing.T) { }, { name: "interval-is-negative", - input: `{"interval": -10}`, - wantErr: "OutlierDetectionLoadBalancingConfig.interval = -10ns; must be >= 0", + input: `{"interval": "-10s"}`, + wantErr: "OutlierDetectionLoadBalancingConfig.interval = -10s; must be >= 0", }, { name: "base-ejection-time-is-negative", - input: `{"baseEjectionTime": -10}`, - wantErr: "OutlierDetectionLoadBalancingConfig.base_ejection_time = -10ns; must be >= 0", + input: `{"baseEjectionTime": "-10s"}`, + wantErr: "OutlierDetectionLoadBalancingConfig.base_ejection_time = -10s; must be >= 0", }, { name: "max-ejection-time-is-negative", - input: `{"maxEjectionTime": -10}`, - wantErr: "OutlierDetectionLoadBalancingConfig.max_ejection_time = -10ns; must be >= 0", + input: `{"maxEjectionTime": "-10s"}`, + wantErr: "OutlierDetectionLoadBalancingConfig.max_ejection_time = -10s; must be >= 0", }, { name: "max-ejection-percent-is-greater-than-100", @@ -199,9 +197,9 @@ func (s) TestParseConfig(t *testing.T) { { name: "child-policy-not-present", input: `{ - "interval": 10000000000, - "baseEjectionTime": 30000000000, - "maxEjectionTime": 300000000000, + "interval": "10s", + "baseEjectionTime": "30s", + "maxEjectionTime": "300s", "maxEjectionPercent": 10, "successRateEjection": { "stdevFactor": 1900, @@ -221,7 +219,6 @@ func (s) TestParseConfig(t *testing.T) { { name: "child-policy-present-but-parse-error", input: `{ - "interval": 9223372036854775807, "childPolicy": [ { "errParseConfigBalancer": { @@ -235,7 +232,6 @@ func (s) TestParseConfig(t *testing.T) { { name: "no-supported-child-policy", input: `{ - "interval": 9223372036854775807, "childPolicy": [ { "doesNotExistBalancer": { @@ -258,7 +254,7 @@ func (s) TestParseConfig(t *testing.T) { ] }`, wantCfg: &LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "xds_cluster_impl_experimental", Config: &clusterimpl.LBConfig{ Cluster: "test_cluster", @@ -362,8 +358,7 @@ func (s) TestChildBasicOperations(t *testing.T) { // it's first update. od.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - Interval: math.MaxInt64, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name() + "child1", Config: bc, }, @@ -386,7 +381,7 @@ func (s) TestChildBasicOperations(t *testing.T) { od.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ Interval: math.MaxInt64, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name() + "child2", Config: emptyChildConfig{}, }, @@ -475,9 +470,9 @@ func (s) TestUpdateAddresses(t *testing.T) { }, }, BalancerConfig: &LBConfig{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + Interval: iserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, FailurePercentageEjection: &FailurePercentageEjection{ Threshold: 50, @@ -485,7 +480,7 @@ func (s) TestUpdateAddresses(t *testing.T) { MinimumHosts: 2, RequestVolume: 3, }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name(), Config: emptyChildConfig{}, }, @@ -651,14 +646,14 @@ func (s) TestDurationOfInterval(t *testing.T) { od.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - Interval: 8 * time.Second, + Interval: iserviceconfig.Duration(8 * time.Second), SuccessRateEjection: &SuccessRateEjection{ StdevFactor: 1900, EnforcementPercentage: 100, MinimumHosts: 5, RequestVolume: 100, }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name(), Config: emptyChildConfig{}, }, @@ -691,14 +686,14 @@ func (s) TestDurationOfInterval(t *testing.T) { // interval timer of ~4 seconds. od.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - Interval: 9 * time.Second, + Interval: iserviceconfig.Duration(9 * time.Second), SuccessRateEjection: &SuccessRateEjection{ StdevFactor: 1900, EnforcementPercentage: 100, MinimumHosts: 5, RequestVolume: 100, }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name(), Config: emptyChildConfig{}, }, @@ -718,8 +713,8 @@ func (s) TestDurationOfInterval(t *testing.T) { // interval timer at all due to it being a no-op. od.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &LBConfig{ - Interval: 10 * time.Second, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name(), Config: emptyChildConfig{}, }, @@ -793,8 +788,8 @@ func (s) TestEjectUnejectSuccessRate(t *testing.T) { }, BalancerConfig: &LBConfig{ Interval: math.MaxInt64, // so the interval will never run unless called manually in test. - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, FailurePercentageEjection: &FailurePercentageEjection{ Threshold: 50, @@ -802,7 +797,7 @@ func (s) TestEjectUnejectSuccessRate(t *testing.T) { MinimumHosts: 3, RequestVolume: 3, }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name(), Config: emptyChildConfig{}, }, @@ -997,8 +992,8 @@ func (s) TestEjectFailureRate(t *testing.T) { }, BalancerConfig: &LBConfig{ Interval: math.MaxInt64, // so the interval will never run unless called manually in test. - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, SuccessRateEjection: &SuccessRateEjection{ StdevFactor: 500, @@ -1006,7 +1001,7 @@ func (s) TestEjectFailureRate(t *testing.T) { MinimumHosts: 3, RequestVolume: 3, }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name(), Config: emptyChildConfig{}, }, @@ -1103,10 +1098,10 @@ func (s) TestEjectFailureRate(t *testing.T) { }, BalancerConfig: &LBConfig{ Interval: math.MaxInt64, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name(), Config: emptyChildConfig{}, }, @@ -1173,8 +1168,8 @@ func (s) TestConcurrentOperations(t *testing.T) { }, BalancerConfig: &LBConfig{ Interval: math.MaxInt64, // so the interval will never run unless called manually in test. - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), MaxEjectionPercent: 10, SuccessRateEjection: &SuccessRateEjection{ // Have both Success Rate and Failure Percentage to step through all the interval timer code StdevFactor: 500, @@ -1188,7 +1183,7 @@ func (s) TestConcurrentOperations(t *testing.T) { MinimumHosts: 3, RequestVolume: 3, }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name(), Config: emptyChildConfig{}, }, @@ -1311,7 +1306,7 @@ func (s) TestConcurrentOperations(t *testing.T) { }, BalancerConfig: &LBConfig{ Interval: math.MaxInt64, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: t.Name(), Config: emptyChildConfig{}, }, diff --git a/xds/internal/balancer/outlierdetection/config.go b/xds/internal/balancer/outlierdetection/config.go index c931674ae409..9c4383cf6ece 100644 --- a/xds/internal/balancer/outlierdetection/config.go +++ b/xds/internal/balancer/outlierdetection/config.go @@ -18,9 +18,7 @@ package outlierdetection import ( - "time" - - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -128,15 +126,15 @@ type LBConfig struct { // Interval is the time interval between ejection analysis sweeps. This can // result in both new ejections as well as addresses being returned to // service. Defaults to 10s. - Interval time.Duration `json:"interval,omitempty"` + Interval iserviceconfig.Duration `json:"interval,omitempty"` // BaseEjectionTime is the base time that a host is ejected for. The real // time is equal to the base time multiplied by the number of times the host // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. - BaseEjectionTime time.Duration `json:"baseEjectionTime,omitempty"` + BaseEjectionTime iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` // MaxEjectionTime is the maximum time that an address is ejected for. If // not specified, the default value (300s) or the BaseEjectionTime value is // applied, whichever is larger. - MaxEjectionTime time.Duration `json:"maxEjectionTime,omitempty"` + MaxEjectionTime iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` // MaxEjectionPercent is the maximum % of an upstream cluster that can be // ejected due to outlier detection. Defaults to 10% but will eject at least // one host regardless of the value. @@ -148,7 +146,7 @@ type LBConfig struct { // algorithm. If set, failure rate ejections will be performed. FailurePercentageEjection *FailurePercentageEjection `json:"failurePercentageEjection,omitempty"` // ChildPolicy is the config for the child policy. - ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` + ChildPolicy *iserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } // EqualIgnoringChildPolicy returns whether the LBConfig is same with the diff --git a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go index c687dc576663..e08ddc98ea79 100644 --- a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go +++ b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go @@ -159,9 +159,9 @@ func (s) TestOutlierDetectionAlgorithmsE2E(t *testing.T) { "loadBalancingConfig": [ { "outlier_detection_experimental": { - "interval": 50000000, - "baseEjectionTime": 100000000, - "maxEjectionTime": 300000000000, + "interval": "0.050s", + "baseEjectionTime": "0.100s", + "maxEjectionTime": "300s", "maxEjectionPercent": 33, "successRateEjection": { "stdevFactor": 50, @@ -182,9 +182,9 @@ func (s) TestOutlierDetectionAlgorithmsE2E(t *testing.T) { "loadBalancingConfig": [ { "outlier_detection_experimental": { - "interval": 50000000, - "baseEjectionTime": 100000000, - "maxEjectionTime": 300000000000, + "interval": "0.050s", + "baseEjectionTime": "0.100s", + "maxEjectionTime": "300s", "maxEjectionPercent": 33, "failurePercentageEjection": { "threshold": 50, @@ -277,9 +277,9 @@ func (s) TestNoopConfiguration(t *testing.T) { "loadBalancingConfig": [ { "outlier_detection_experimental": { - "interval": 50000000, - "baseEjectionTime": 100000000, - "maxEjectionTime": 300000000000, + "interval": "0.050s", + "baseEjectionTime": "0.100s", + "maxEjectionTime": "300s", "maxEjectionPercent": 33, "childPolicy": [{"round_robin": {}}] } @@ -325,9 +325,9 @@ func (s) TestNoopConfiguration(t *testing.T) { "loadBalancingConfig": [ { "outlier_detection_experimental": { - "interval": 50000000, - "baseEjectionTime": 100000000, - "maxEjectionTime": 300000000000, + "interval": "0.050s", + "baseEjectionTime": "0.100s", + "maxEjectionTime": "300s", "maxEjectionPercent": 33, "failurePercentageEjection": { "threshold": 50, From 9b7a947cdcb2cf5664eec045362b5222f7ac7dcc Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 22 May 2023 12:42:45 -0700 Subject: [PATCH 934/998] grpc: support channel idleness (#6263) --- balancer_conn_wrappers.go | 243 ++++++++-- call.go | 5 + clientconn.go | 328 ++++++++++---- clientconn_test.go | 2 +- dialoptions.go | 22 + idle.go | 287 ++++++++++++ idle_test.go | 360 +++++++++++++++ internal/grpcsync/callback_serializer.go | 56 ++- internal/grpcsync/callback_serializer_test.go | 50 ++- picker_wrapper.go | 26 +- resolver_conn_wrapper.go | 98 ++-- stream.go | 5 + test/clientconn_state_transition_test.go | 7 + test/idleness_test.go | 423 ++++++++++++++++++ 14 files changed, 1735 insertions(+), 177 deletions(-) create mode 100644 idle.go create mode 100644 idle_test.go create mode 100644 test/idleness_test.go diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 1865a3f09c2b..4f9944697dde 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -32,6 +32,15 @@ import ( "google.golang.org/grpc/resolver" ) +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle +) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -46,16 +55,25 @@ import ( // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the - // CallbackSerializer. Fields accessed *only* in serializer callbacks, can - // therefore be accessed without a mutex. - serializer *grpcsync.CallbackSerializer - serializerCancel context.CancelFunc - balancer *gracefulswitch.Balancer - curBalancerName string + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. + balancer *gracefulswitch.Balancer + curBalancerName string + + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } // newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer @@ -64,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalanc ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ cc: cc, + opts: bopts, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } @@ -74,8 +93,12 @@ func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalanc // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.mu.Lock() errCh := make(chan error, 1) - ccb.serializer.Schedule(func(_ context.Context) { + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { // If the addresses specified in the update contain addresses of type // "grpclb" and the selected LB policy is not "grpclb", these addresses // will be filtered out and ccs will be modified with the updated @@ -92,16 +115,19 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat } errCh <- ccb.balancer.UpdateClientConnState(*ccs) }) - - // If the balancer wrapper is closed when waiting for this state update to - // be handled, the callback serializer will be closed as well, and we can - // rely on its Done channel to ensure that we don't block here forever. - select { - case err := <-errCh: - return err - case <-ccb.serializer.Done: - return nil + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") } + ccb.mu.Unlock() + + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + return <-errCh } // updateSubConnState is invoked by grpc to push a subConn state update to the @@ -120,21 +146,19 @@ func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connecti if sc == nil { return } + ccb.mu.Lock() ccb.serializer.Schedule(func(_ context.Context) { ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) -} - -func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.ExitIdle() - }) + ccb.mu.Unlock() } func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.mu.Lock() ccb.serializer.Schedule(func(_ context.Context) { ccb.balancer.ResolverError(err) }) + ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -148,42 +172,149 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.mu.Lock() ccb.serializer.Schedule(func(_ context.Context) { // TODO: Other languages use case-sensitive balancer registries. We should // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. if strings.EqualFold(ccb.curBalancerName, name) { return } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() +} - // Use the default LB policy, pick_first, if no LB policy with name is - // found in the registry. - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) - } +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +func (ccb *ccBalancerWrapper) close() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) +} + +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() + return + } + + ccb.mode = m + done := ccb.serializer.Done + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" + }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-done + // Spawn a goroutine to close the balancer (since it may block trying to + // cleanup all allocated resources) and return early. + go b.Close() +} + +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } - if err := ccb.balancer.SwitchTo(builder); err != nil { - channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() return } - ccb.curBalancerName = builder.Name() + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + }) + ccb.mu.Unlock() + + <-done } -func (ccb *ccBalancerWrapper) close() { - // Close the serializer to ensure that no more calls from gRPC are sent to - // the balancer. We don't have to worry about suppressing calls from a - // closed balancer because these are handled by the ClientConn (balancer - // wrapper is only ever closed when the ClientConn is closed). - ccb.serializerCancel() - <-ccb.serializer.Done - ccb.balancer.Close() +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + } + if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } @@ -200,6 +331,18 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -208,6 +351,10 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -216,6 +363,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + if ccb.isIdleOrClosed() { + return + } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -226,6 +377,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } diff --git a/call.go b/call.go index 9e20e4d385f9..e6a1dc5d75ed 100644 --- a/call.go +++ b/call.go @@ -27,6 +27,11 @@ import ( // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) diff --git a/clientconn.go b/clientconn.go index 50d08a49a205..1def61e5a23d 100644 --- a/clientconn.go +++ b/clientconn.go @@ -69,6 +69,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -134,17 +137,29 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), - } + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) disableGlobalOpts := false for _, opt := range opts { @@ -243,67 +258,175 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err } - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil } - cc.mu.Lock() - cc.resolverWrapper = rWrapper - cc.mu.Unlock() // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { + for { + s := cc.GetState() + if s == connectivity.Idle { cc.Connect() - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { return nil, err } - return nil, ctx.Err() } } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } } +} - return cc, nil +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + logger.Error("ClientConn asked to exit idle mode when not in idle mode") + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper() + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() + } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() + + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err + } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") + } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + logger.Error("ClientConn asked to enter idle mode when not active") + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle + cc.mu.Unlock() + + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil } // validateTransportCredentials performs a series of checks on the configured @@ -350,17 +473,7 @@ func (cc *ClientConn) validateTransportCredentials() error { // Doesn't grab cc.mu as this method is expected to be called only at Dial time. func (cc *ClientConn) channelzRegistration(target string) { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - ted := &channelz.TraceEventDesc{ - Desc: "Channel created", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.addTraceEvent("created") cc.csMgr.channelzID = cc.channelzID } @@ -509,6 +622,7 @@ type ClientConn struct { channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idlenessManager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -529,11 +643,31 @@ type ClientConn struct { sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -573,7 +707,7 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.balancerWrapper.exitIdle() + cc.balancerWrapper.exitIdleMode() } func (cc *ClientConn) scWatcher() { @@ -1061,39 +1195,40 @@ func (cc *ClientConn) Close() error { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - cc.blockingpicker.close() + if pWrapper != nil { + pWrapper.close() + } if bWrapper != nil { bWrapper.close() } if rWrapper != nil { rWrapper.close() } + if idlenessMgr != nil { + idlenessMgr.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - ted := &channelz.TraceEventDesc{ - Desc: "Channel deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. @@ -1735,3 +1870,32 @@ func (cc *ClientConn) determineAuthority() error { channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil +} diff --git a/clientconn_test.go b/clientconn_test.go index 9004f3177fdd..3cd04a743444 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -370,7 +370,7 @@ func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { }() bc := backoff.Config{ BaseDelay: 200 * time.Millisecond, - Multiplier: 1.1, + Multiplier: 2.0, Jitter: 0, MaxDelay: 120 * time.Second, } diff --git a/dialoptions.go b/dialoptions.go index cdc8263bda65..51c8997d5d18 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -77,6 +77,7 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder + idleTimeout time.Duration } // DialOption configures how we set up the connection. @@ -627,6 +628,7 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, + idleTimeout: 30 * time.Minute, } } @@ -655,3 +657,23 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// A default timeout of 30 min will be used if this dial option is not set at +// dial time and idleness can be disabled by passing a timeout of zero. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} diff --git a/idle.go b/idle.go new file mode 100644 index 000000000000..dc3dc72f6b09 --- /dev/null +++ b/idle.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type idlenessEnforcer interface { + exitIdleMode() error + enterIdleMode() error +} + +// idlenessManager defines the functionality required to track RPC activity on a +// channel. +type idlenessManager interface { + onCallBegin() error + onCallEnd() + close() +} + +type noopIdlenessManager struct{} + +func (noopIdlenessManager) onCallBegin() error { return nil } +func (noopIdlenessManager) onCallEnd() {} +func (noopIdlenessManager) close() {} + +// idlenessManagerImpl implements the idlenessManager interface. It uses atomic +// operations to synchronize access to shared state and a mutex to guarantee +// mutual exclusion in a critical section. +type idlenessManagerImpl struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and onCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// newIdlenessManager creates a new idleness manager implementation for the +// given idle timeout. +func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { + if idleTimeout == 0 { + return noopIdlenessManager{} + } + + i := &idlenessManagerImpl{ + enforcer: enforcer, + timeout: int64(idleTimeout), + } + i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) + return i +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if i.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + i.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (i *idlenessManagerImpl) handleIdleTimeout() { + if i.isClosed() { + return + } + + if atomic.LoadInt32(&i.activeCallsCount) > 0 { + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) + i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if i.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.resetIdleTimer(time.Duration(i.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (i *idlenessManagerImpl) tryEnterIdleMode() bool { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := i.enforcer.enterIdleMode(); err != nil { + logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + i.actuallyIdle = true + return true +} + +// onCallBegin is invoked at the start of every RPC. +func (i *idlenessManagerImpl) onCallBegin() error { + if i.isClosed() { + return nil + } + + if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := i.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&i.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (i *idlenessManagerImpl) exitIdleMode() error { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if !i.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and onCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in onCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := i.enforcer.exitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + return nil +} + +// onCallEnd is invoked at the end of every RPC. +func (i *idlenessManagerImpl) onCallEnd() { + if i.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&i.activeCallsCount, -1) +} + +func (i *idlenessManagerImpl) isClosed() bool { + return atomic.LoadInt32(&i.closed) == 1 +} + +func (i *idlenessManagerImpl) close() { + atomic.StoreInt32(&i.closed, 1) + + i.idleMu.Lock() + i.timer.Stop() + i.timer = nil + i.idleMu.Unlock() +} diff --git a/idle_test.go b/idle_test.go new file mode 100644 index 000000000000..a20b4e09947b --- /dev/null +++ b/idle_test.go @@ -0,0 +1,360 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" +) + +const ( + defaultTestIdleTimeout = 500 * time.Millisecond // A short idle_timeout for tests. + defaultTestShortTimeout = 10 * time.Millisecond // A small deadline to wait for events expected to not happen. +) + +type testIdlenessEnforcer struct { + exitIdleCh chan struct{} + enterIdleCh chan struct{} +} + +func (ti *testIdlenessEnforcer) exitIdleMode() error { + ti.exitIdleCh <- struct{}{} + return nil + +} + +func (ti *testIdlenessEnforcer) enterIdleMode() error { + ti.enterIdleCh <- struct{}{} + return nil + +} + +func newTestIdlenessEnforcer() *testIdlenessEnforcer { + return &testIdlenessEnforcer{ + exitIdleCh: make(chan struct{}, 1), + enterIdleCh: make(chan struct{}, 1), + } +} + +// overrideNewTimer overrides the new timer creation function by ensuring that a +// message is pushed on the returned channel everytime the timer fires. +func overrideNewTimer(t *testing.T) <-chan struct{} { + t.Helper() + + ch := make(chan struct{}, 1) + origTimeAfterFunc := timeAfterFunc + timeAfterFunc = func(d time.Duration, callback func()) *time.Timer { + return time.AfterFunc(d, func() { + select { + case ch <- struct{}{}: + default: + } + callback() + }) + } + t.Cleanup(func() { timeAfterFunc = origTimeAfterFunc }) + return ch +} + +// TestIdlenessManager_Disabled tests the case where the idleness manager is +// disabled by passing an idle_timeout of 0. Verifies the following things: +// - timer callback does not fire +// - an RPC does not trigger a call to exitIdleMode on the ClientConn +// - more calls to RPC termination (as compared to RPC initiation) does not +// result in an error log +func (s) TestIdlenessManager_Disabled(t *testing.T) { + callbackCh := overrideNewTimer(t) + + // Create an idleness manager that is disabled because of idleTimeout being + // set to `0`. + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(0)) + + // Ensure that the timer callback does not fire within a short deadline. + select { + case <-callbackCh: + t.Fatal("Idle timer callback fired when manager is disabled") + case <-time.After(defaultTestShortTimeout): + } + + // The first invocation of onCallBegin() would lead to a call to + // exitIdleMode() on the enforcer, unless the idleness manager is disabled. + mgr.onCallBegin() + select { + case <-enforcer.exitIdleCh: + t.Fatalf("exitIdleMode() called on enforcer when manager is disabled") + case <-time.After(defaultTestShortTimeout): + } + + // If the number of calls to onCallEnd() exceeds the number of calls to + // onCallBegin(), the idleness manager is expected to throw an error log + // (which will cause our TestLogger to fail the test). But since the manager + // is disabled, this should not happen. + mgr.onCallEnd() + mgr.onCallEnd() + + // The idleness manager is explicitly not closed here. But since the manager + // is disabled, it will not start the run goroutine, and hence we expect the + // leakchecker to not find any leaked goroutines. +} + +// TestIdlenessManager_Enabled_TimerFires tests the case where the idle manager +// is enabled. Ensures that when there are no RPCs, the timer callback is +// invoked and the enterIdleMode() method is invoked on the enforcer. +func (s) TestIdlenessManager_Enabled_TimerFires(t *testing.T) { + callbackCh := overrideNewTimer(t) + + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(defaultTestIdleTimeout)) + defer mgr.close() + + // Ensure that the timer callback fires within a appropriate amount of time. + select { + case <-callbackCh: + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for idle timer callback to fire") + } + + // Ensure that the channel moves to idle mode eventually. + select { + case <-enforcer.enterIdleCh: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout waiting for channel to move to idle") + } +} + +// TestIdlenessManager_Enabled_OngoingCall tests the case where the idle manager +// is enabled. Ensures that when there is an ongoing RPC, the channel does not +// enter idle mode. +func (s) TestIdlenessManager_Enabled_OngoingCall(t *testing.T) { + callbackCh := overrideNewTimer(t) + + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(defaultTestIdleTimeout)) + defer mgr.close() + + // Fire up a goroutine that simulates an ongoing RPC that is terminated + // after the timer callback fires for the first time. + timerFired := make(chan struct{}) + go func() { + mgr.onCallBegin() + <-timerFired + mgr.onCallEnd() + }() + + // Ensure that the timer callback fires and unblock the above goroutine. + select { + case <-callbackCh: + close(timerFired) + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for idle timer callback to fire") + } + + // The invocation of the timer callback should not put the channel in idle + // mode since we had an ongoing RPC. + select { + case <-enforcer.enterIdleCh: + t.Fatalf("enterIdleMode() called on enforcer when active RPC exists") + case <-time.After(defaultTestShortTimeout): + } + + // Since we terminated the ongoing RPC and we have no other active RPCs, the + // channel must move to idle eventually. + select { + case <-enforcer.enterIdleCh: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout waiting for channel to move to idle") + } +} + +// TestIdlenessManager_Enabled_ActiveSinceLastCheck tests the case where the +// idle manager is enabled. Ensures that when there are active RPCs in the last +// period (even though there is no active call when the timer fires), the +// channel does not enter idle mode. +func (s) TestIdlenessManager_Enabled_ActiveSinceLastCheck(t *testing.T) { + callbackCh := overrideNewTimer(t) + + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(defaultTestIdleTimeout)) + defer mgr.close() + + // Fire up a goroutine that simulates unary RPCs until the timer callback + // fires. + timerFired := make(chan struct{}) + go func() { + for ; ; <-time.After(defaultTestShortTimeout) { + mgr.onCallBegin() + mgr.onCallEnd() + + select { + case <-timerFired: + return + default: + } + } + }() + + // Ensure that the timer callback fires, and that we don't enter idle as + // part of this invocation of the timer callback, since we had some RPCs in + // this period. + select { + case <-callbackCh: + close(timerFired) + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for idle timer callback to fire") + } + select { + case <-enforcer.enterIdleCh: + t.Fatalf("enterIdleMode() called on enforcer when one RPC completed in the last period") + case <-time.After(defaultTestShortTimeout): + } + + // Since the unrary RPC terminated and we have no other active RPCs, the + // channel must move to idle eventually. + select { + case <-enforcer.enterIdleCh: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout waiting for channel to move to idle") + } +} + +// TestIdlenessManager_Enabled_ExitIdleOnRPC tests the case where the idle +// manager is enabled. Ensures that the channel moves out of idle when an RPC is +// initiated. +func (s) TestIdlenessManager_Enabled_ExitIdleOnRPC(t *testing.T) { + overrideNewTimer(t) + + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(defaultTestIdleTimeout)) + defer mgr.close() + + // Ensure that the channel moves to idle since there are no RPCs. + select { + case <-enforcer.enterIdleCh: + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for channel to move to idle mode") + } + + for i := 0; i < 100; i++ { + // A call to onCallBegin and onCallEnd simulates an RPC. + go func() { + if err := mgr.onCallBegin(); err != nil { + t.Errorf("onCallBegin() failed: %v", err) + } + mgr.onCallEnd() + }() + } + + // Ensure that the channel moves out of idle as a result of the above RPC. + select { + case <-enforcer.exitIdleCh: + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for channel to move out of idle mode") + } + + // Ensure that only one call to exit idle mode is made to the CC. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-enforcer.exitIdleCh: + t.Fatal("More than one call to exit idle mode on the ClientConn; only one expected") + case <-sCtx.Done(): + } +} + +type racyIdlenessState int32 + +const ( + stateInital racyIdlenessState = iota + stateEnteredIdle + stateExitedIdle + stateActiveRPCs +) + +// racyIdlnessEnforcer is a test idleness enforcer used specifically to test the +// race between idle timeout and incoming RPCs. +type racyIdlenessEnforcer struct { + state *racyIdlenessState // Accessed atomically. +} + +// exitIdleMode sets the internal state to stateExitedIdle. We should only ever +// exit idle when we are currently in idle. +func (ri *racyIdlenessEnforcer) exitIdleMode() error { + if !atomic.CompareAndSwapInt32((*int32)(ri.state), int32(stateEnteredIdle), int32(stateExitedIdle)) { + return fmt.Errorf("idleness enforcer asked to exit idle when it did not enter idle earlier") + } + return nil +} + +// enterIdleMode attempts to set the internal state to stateEnteredIdle. We should only ever enter idle before RPCs start. +func (ri *racyIdlenessEnforcer) enterIdleMode() error { + if !atomic.CompareAndSwapInt32((*int32)(ri.state), int32(stateInital), int32(stateEnteredIdle)) { + return fmt.Errorf("idleness enforcer asked to enter idle after rpcs started") + } + return nil +} + +// TestIdlenessManager_IdleTimeoutRacesWithOnCallBegin tests the case where +// firing of the idle timeout races with an incoming RPC. The test verifies that +// if the timer callback win the race and puts the channel in idle, the RPCs can +// kick it out of idle. And if the RPCs win the race and keep the channel +// active, then the timer callback should not attempt to put the channel in idle +// mode. +func (s) TestIdlenessManager_IdleTimeoutRacesWithOnCallBegin(t *testing.T) { + // Run multiple iterations to simulate different possibilities. + for i := 0; i < 10; i++ { + t.Run(fmt.Sprintf("iteration=%d", i), func(t *testing.T) { + var idlenessState racyIdlenessState + enforcer := &racyIdlenessEnforcer{state: &idlenessState} + + // Configure a large idle timeout so that we can control the + // race between the timer callback and RPCs. + mgr := newIdlenessManager(enforcer, time.Duration(10*time.Minute)) + defer mgr.close() + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + m := mgr.(interface{ handleIdleTimeout() }) + <-time.After(defaultTestIdleTimeout) + m.handleIdleTimeout() + }() + for j := 0; j < 100; j++ { + wg.Add(1) + go func() { + defer wg.Done() + // Wait for the configured idle timeout and simulate an RPC to + // race with the idle timeout timer callback. + <-time.After(defaultTestIdleTimeout) + if err := mgr.onCallBegin(); err != nil { + t.Errorf("onCallBegin() failed: %v", err) + } + atomic.StoreInt32((*int32)(&idlenessState), int32(stateActiveRPCs)) + mgr.onCallEnd() + }() + } + wg.Wait() + }) + } +} diff --git a/internal/grpcsync/callback_serializer.go b/internal/grpcsync/callback_serializer.go index d91f92463542..37b8d4117e77 100644 --- a/internal/grpcsync/callback_serializer.go +++ b/internal/grpcsync/callback_serializer.go @@ -20,6 +20,7 @@ package grpcsync import ( "context" + "sync" "google.golang.org/grpc/internal/buffer" ) @@ -31,19 +32,21 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { - // Done is closed once the serializer is shut down completely, i.e a - // scheduled callback, if any, that was running when the context passed to - // NewCallbackSerializer is cancelled, has completed and the serializer has - // deallocated all its resources. + // Done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. Done chan struct{} callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided // context will be passed to the scheduled callbacks. Users should cancel the // provided context to shutdown the CallbackSerializer. It is guaranteed that no -// callbacks will be executed once this context is canceled. +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { t := &CallbackSerializer{ Done: make(chan struct{}), @@ -57,17 +60,30 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) { +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + t.closedMu.Lock() + defer t.closedMu.Unlock() + + if t.closed { + return false + } t.callbacks.Put(f) + return true } func (t *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + defer close(t.Done) for ctx.Err() == nil { select { case <-ctx.Done(): - t.callbacks.Close() - return + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. case callback, ok := <-t.callbacks.Get(): if !ok { return @@ -76,4 +92,28 @@ func (t *CallbackSerializer) run(ctx context.Context) { callback.(func(ctx context.Context))(ctx) } } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing t.Done. + t.closedMu.Lock() + t.closed = true + backlog = t.fetchPendingCallbacks() + t.callbacks.Close() + t.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-t.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + t.callbacks.Load() + default: + return backlog + } + } } diff --git a/internal/grpcsync/callback_serializer_test.go b/internal/grpcsync/callback_serializer_test.go index 8c465af66aea..cdbd446f8101 100644 --- a/internal/grpcsync/callback_serializer_test.go +++ b/internal/grpcsync/callback_serializer_test.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "fmt" "sync" "testing" "time" @@ -141,7 +140,10 @@ func (s) TestCallbackSerializer_Schedule_Concurrent(t *testing.T) { // are not executed once Close() returns. func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - cs := NewCallbackSerializer(ctx) + defer cancel() + + serializerCtx, serializerCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + cs := NewCallbackSerializer(serializerCtx) // Schedule a callback which blocks until the context passed to it is // canceled. It also closes a channel to signal that it has started. @@ -151,36 +153,54 @@ func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) { <-ctx.Done() }) - // Schedule a bunch of callbacks. These should not be exeuted since the first - // one started earlier is blocked. + // Schedule a bunch of callbacks. These should be exeuted since the are + // scheduled before the serializer is closed. const numCallbacks = 10 - errCh := make(chan error, numCallbacks) + callbackCh := make(chan int, numCallbacks) for i := 0; i < numCallbacks; i++ { - cs.Schedule(func(_ context.Context) { - errCh <- fmt.Errorf("callback %d executed when not expected to", i) - }) + num := i + if !cs.Schedule(func(context.Context) { callbackCh <- num }) { + t.Fatal("Schedule failed to accept a callback when the serializer is yet to be closed") + } } // Ensure that none of the newer callbacks are executed at this point. select { case <-time.After(defaultTestShortTimeout): - case err := <-errCh: - t.Fatal(err) + case <-callbackCh: + t.Fatal("Newer callback executed when older one is still executing") } // Wait for the first callback to start before closing the scheduler. <-firstCallbackStartedCh - // Cancel the context which will unblock the first callback. None of the + // Cancel the context which will unblock the first callback. All of the // other callbacks (which have not started executing at this point) should // be executed after this. - cancel() + serializerCancel() + + // Ensure that the newer callbacks are executed. + for i := 0; i < numCallbacks; i++ { + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for callback scheduled before close to be executed") + case num := <-callbackCh: + if num != i { + t.Fatalf("Executing callback %d, want %d", num, i) + } + } + } <-cs.Done - // Ensure that the newer callbacks are not executed. + done := make(chan struct{}) + if cs.Schedule(func(context.Context) { close(done) }) { + t.Fatal("Scheduled a callback after closing the serializer") + } + + // Ensure that the lates callback is executed at this point. select { case <-time.After(defaultTestShortTimeout): - case err := <-errCh: - t.Fatal(err) + case <-done: + t.Fatal("Newer callback executed when scheduled after closing serializer") } } diff --git a/picker_wrapper.go b/picker_wrapper.go index c525dc070fc6..8e24d864986d 100644 --- a/picker_wrapper.go +++ b/picker_wrapper.go @@ -36,6 +36,7 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool + idle bool blockingCh chan struct{} picker balancer.Picker } @@ -47,7 +48,11 @@ func newPickerWrapper() *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -187,6 +192,25 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + // dropError is a wrapper error that indicates the LB policy wishes to drop the // RPC and not retry it. type dropError struct { diff --git a/resolver_conn_wrapper.go b/resolver_conn_wrapper.go index ce12b52ecdc0..b408b3688f2e 100644 --- a/resolver_conn_wrapper.go +++ b/resolver_conn_wrapper.go @@ -21,6 +21,7 @@ package grpc import ( "context" "strings" + "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/channelz" @@ -44,15 +45,20 @@ type ccResolverWrapper struct { cc resolverStateUpdater channelzID *channelz.Identifier ignoreServiceConfig bool - - // Outgoing (gRPC --> resolver) and incoming (resolver --> gRPC) calls are - // guaranteed to execute in a mutually exclusive manner as they are - // scheduled on the CallbackSerializer. Fields accessed *only* in serializer - // callbacks, can therefore be accessed without a mutex. - serializer *grpcsync.CallbackSerializer - serializerCancel context.CancelFunc - resolver resolver.Resolver - curState resolver.State + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. } // ccResolverWrapperOpts wraps the arguments to be passed when creating a new @@ -72,38 +78,81 @@ func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) ( cc: cc, channelzID: opts.channelzID, ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(_ context.Context) { - ccr.resolver.ResolveNow(o) - }) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + return + } + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + // Close the serializer to ensure that no more calls from the resolver are - // handled, before closing the resolver. + // handled, before actually closing the resolver. ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. <-ccr.serializer.Done - ccr.resolver.Close() + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() +} + +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() } // UpdateState is called by resolver implementations to report new state to gRPC // which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { errCh := make(chan error, 1) - ccr.serializer.Schedule(func(_ context.Context) { + ok := ccr.serializer.Schedule(func(context.Context) { ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { @@ -112,22 +161,19 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { } errCh <- nil }) - - // If the resolver wrapper is closed when waiting for this state update to - // be handled, the callback serializer will be closed as well, and we can - // rely on its Done channel to ensure that we don't block here forever. - select { - case err := <-errCh: - return err - case <-ccr.serializer.Done: + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. return nil } + return <-errCh } // ReportError is called by resolver implementations to report errors // encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializer.Schedule(func(_ context.Context) { + ccr.serializerScheduleLocked(func(_ context.Context) { channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ccr.cc.updateResolverState(resolver.State{}, err) }) @@ -136,7 +182,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { // NewAddress is called by the resolver implementation to send addresses to // gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializer.Schedule(func(_ context.Context) { + ccr.serializerScheduleLocked(func(_ context.Context) { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) @@ -146,7 +192,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializer.Schedule(func(_ context.Context) { + ccr.serializerScheduleLocked(func(_ context.Context) { channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.ignoreServiceConfig { channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") diff --git a/stream.go b/stream.go index 06ec22cd0a9d..75ab86268ba1 100644 --- a/stream.go +++ b/stream.go @@ -155,6 +155,11 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return nil, err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) diff --git a/test/clientconn_state_transition_test.go b/test/clientconn_state_transition_test.go index 57f932d1eb5e..a14ff4588a0f 100644 --- a/test/clientconn_state_transition_test.go +++ b/test/clientconn_state_transition_test.go @@ -537,3 +537,10 @@ func awaitNotState(ctx context.Context, t *testing.T, cc *grpc.ClientConn, state } } } + +func awaitNoStateChange(ctx context.Context, t *testing.T, cc *grpc.ClientConn, currState connectivity.State) { + t.Helper() + if cc.WaitForStateChange(ctx, currState) { + t.Fatalf("State changed from %q to %q when no state change was expected", currState, cc.GetState()) + } +} diff --git a/test/idleness_test.go b/test/idleness_test.go new file mode 100644 index 000000000000..88366ed3ae12 --- /dev/null +++ b/test/idleness_test.go @@ -0,0 +1,423 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +const defaultTestShortIdleTimeout = 500 * time.Millisecond + +// channelzTraceEventFound looks up the top-channels in channelz (expects a +// single one), and checks if there is a trace event on the channel matching the +// provided description string. +func channelzTraceEventFound(ctx context.Context, wantDesc string) error { + for ctx.Err() == nil { + tcs, _ := channelz.GetTopChannels(0, 0) + if l := len(tcs); l != 1 { + return fmt.Errorf("when looking for channelz trace event with description %q, found %d top-level channels, want 1", wantDesc, l) + } + if tcs[0].Trace == nil { + return fmt.Errorf("when looking for channelz trace event with description %q, no trace events found for top-level channel", wantDesc) + } + + for _, e := range tcs[0].Trace.Events { + if strings.Contains(e.Desc, wantDesc) { + return nil + } + } + } + return fmt.Errorf("when looking for channelz trace event with description %q, %w", wantDesc, ctx.Err()) +} + +// channelzTraceEventNotFound looks up the top-channels in channelz (expects a +// single one), and verifies that there is no trace event on the channel +// matching the provided description string. +func channelzTraceEventNotFound(ctx context.Context, wantDesc string) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + + err := channelzTraceEventFound(sCtx, wantDesc) + if err == nil { + return fmt.Errorf("found channelz trace event with description %q, when expected not to", wantDesc) + } + if !errors.Is(err, context.DeadlineExceeded) { + return err + } + return nil +} + +// Tests the case where channel idleness is disabled by passing an idle_timeout +// of 0. Verifies that a READY channel with no RPCs does not move to IDLE. +func (s) TestChannelIdleness_Disabled_NoActivity(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Create a ClientConn with idle_timeout set to 0. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(0), // Disable idleness. + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Start a test backend and push an address update via the resolver. + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Veirfy that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Veirfy that the ClientConn stay in READY. + sCtx, sCancel := context.WithTimeout(ctx, 3*defaultTestShortIdleTimeout) + defer sCancel() + awaitNoStateChange(sCtx, t, cc, connectivity.Ready) + + // Verify that there are no idleness related channelz events. + if err := channelzTraceEventNotFound(ctx, "entering idle mode"); err != nil { + t.Fatal(err) + } + if err := channelzTraceEventNotFound(ctx, "exiting idle mode"); err != nil { + t.Fatal(err) + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Verifies that a READY channel with no RPCs moves to IDLE. +func (s) TestChannelIdleness_Enabled_NoActivity(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Create a ClientConn with a short idle_timeout. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Start a test backend and push an address update via the resolver. + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Veirfy that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Veirfy that the ClientConn moves to IDLE as there is no activity. + awaitState(ctx, t, cc, connectivity.Idle) + + // Verify idleness related channelz events. + if err := channelzTraceEventFound(ctx, "entering idle mode"); err != nil { + t.Fatal(err) + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Verifies that a READY channel with an ongoing RPC stays READY. +func (s) TestChannelIdleness_Enabled_OngoingCall(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Create a ClientConn with a short idle_timeout. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Start a test backend which keeps a unary RPC call active by blocking on a + // channel that is closed by the test later on. Also push an address update + // via the resolver. + blockCh := make(chan struct{}) + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + <-blockCh + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Cleanup(backend.Stop) + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Veirfy that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Spawn a goroutine which checks expected state transitions and idleness + // channelz trace events. It eventually closes `blockCh`, thereby unblocking + // the server RPC handler and the unary call below. + errCh := make(chan error, 1) + go func() { + // Veirfy that the ClientConn stay in READY. + sCtx, sCancel := context.WithTimeout(ctx, 3*defaultTestShortIdleTimeout) + defer sCancel() + awaitNoStateChange(sCtx, t, cc, connectivity.Ready) + + // Verify that there are no idleness related channelz events. + if err := channelzTraceEventNotFound(ctx, "entering idle mode"); err != nil { + errCh <- err + return + } + if err := channelzTraceEventNotFound(ctx, "exiting idle mode"); err != nil { + errCh <- err + return + } + + // Unblock the unary RPC on the server. + close(blockCh) + errCh <- nil + }() + + // Make a unary RPC that blocks on the server, thereby ensuring that the + // count of active RPCs on the client is non-zero. + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Errorf("EmptyCall RPC failed: %v", err) + } + + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + case <-ctx.Done(): + t.Fatalf("Timeout when trying to verify that an active RPC keeps channel from moving to IDLE") + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Verifies that activity on a READY channel (frequent and short +// RPCs) keeps it from moving to IDLE. +func (s) TestChannelIdleness_Enabled_ActiveSinceLastCheck(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Create a ClientConn with a short idle_timeout. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Start a test backend and push an address update via the resolver. + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Veirfy that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // For a duration of three times the configured idle timeout, making RPCs + // every now and then and ensure that the channel does not move out of + // READY. + sCtx, sCancel := context.WithTimeout(ctx, 3*defaultTestShortIdleTimeout) + defer sCancel() + go func() { + for ; sCtx.Err() == nil; <-time.After(defaultTestShortIdleTimeout / 4) { + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); err != nil { + // While iterating through this for loop, at some point in time, + // the context deadline will expire. It is safe to ignore that + // error code. + if status.Code(err) != codes.DeadlineExceeded { + t.Errorf("EmptyCall RPC failed: %v", err) + return + } + } + } + }() + + // Veirfy that the ClientConn stay in READY. + awaitNoStateChange(sCtx, t, cc, connectivity.Ready) + + // Verify that there are no idleness related channelz events. + if err := channelzTraceEventNotFound(ctx, "entering idle mode"); err != nil { + t.Fatal(err) + } + if err := channelzTraceEventNotFound(ctx, "exiting idle mode"); err != nil { + t.Fatal(err) + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Verifies that a READY channel with no RPCs moves to IDLE. Also +// verifies that a subsequent RPC on the IDLE channel kicks it out of IDLE. +func (s) TestChannelIdleness_Enabled_ExitIdleOnRPC(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Start a test backend and set the bootstrap state of the resolver to + // include this address. This will ensure that when the resolver is + // restarted when exiting idle, it will push the same address to grpc again. + r := manual.NewBuilderWithScheme("whatever") + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Create a ClientConn with a short idle_timeout. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Veirfy that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Veirfy that the ClientConn moves to IDLE as there is no activity. + awaitState(ctx, t, cc, connectivity.Idle) + + // Verify idleness related channelz events. + if err := channelzTraceEventFound(ctx, "entering idle mode"); err != nil { + t.Fatal(err) + } + + // Make an RPC and ensure that it succeeds and moves the channel back to + // READY. + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall RPC failed: %v", err) + } + awaitState(ctx, t, cc, connectivity.Ready) + if err := channelzTraceEventFound(ctx, "exiting idle mode"); err != nil { + t.Fatal(err) + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Simulates a race between the idle timer firing and RPCs being +// initiated, after a period of inactivity on the channel. +// +// After a period of inactivity (for the configured idle timeout duration), when +// RPCs are started, there are two possibilities: +// - the idle timer wins the race and puts the channel in idle. The RPCs then +// kick it out of idle. +// - the RPCs win the race, and therefore the channel never moves to idle. +// +// In either of these cases, all RPCs must succeed. +func (s) TestChannelIdleness_Enabled_IdleTimeoutRacesWithRPCs(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Start a test backend and set the bootstrap state of the resolver to + // include this address. This will ensure that when the resolver is + // restarted when exiting idle, it will push the same address to grpc again. + r := manual.NewBuilderWithScheme("whatever") + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Create a ClientConn with a short idle_timeout. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Veirfy that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Make an RPC every defaultTestShortTimeout duration so as to race with the + // idle timeout. Whether the idle timeout wins the race or the RPC wins the + // race, RPCs must succeed. + client := testgrpc.NewTestServiceClient(cc) + for i := 0; i < 20; i++ { + <-time.After(defaultTestShortTimeout) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Errorf("EmptyCall RPC failed: %v", err) + } + } +} From 511a96359f5525920014c2d1df31aac07f5bbeaf Mon Sep 17 00:00:00 2001 From: apolcyn Date: Mon, 22 May 2023 15:32:29 -0700 Subject: [PATCH 935/998] interop: let the interop client send additional metadata, controlled by a flag (#6295) --- interop/client/client.go | 43 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/interop/client/client.go b/interop/client/client.go index a4228190e12c..8238e0a106d7 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -24,12 +24,14 @@ package main import ( + "context" "crypto/tls" "crypto/x509" "flag" "net" "os" "strconv" + "strings" "time" "golang.org/x/oauth2" @@ -41,6 +43,7 @@ import ( "google.golang.org/grpc/credentials/oauth" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/testdata" @@ -75,6 +78,7 @@ var ( soakOverallTimeoutSeconds = flag.Int("soak_overall_timeout_seconds", 10, "The overall number of seconds after which a soak test should stop and fail, if the desired number of iterations have not yet completed.") soakMinTimeMsBetweenRPCs = flag.Int("soak_min_time_ms_between_rpcs", 0, "The minimum time in milliseconds between consecutive RPCs in a soak test (rpc_soak or channel_soak), useful for limiting QPS") tlsServerName = flag.String("server_host_override", "", "The server name used to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") + additionalMetadata = flag.String("additional_metadata", "", "Additional metadata to send in each request, as a semicolon-separated list of key:value pairs.") testCase = flag.String("test_case", "large_unary", `Configure different test cases. Valid options are: empty_unary : empty (zero bytes) request and response; @@ -115,6 +119,34 @@ const ( credsComputeEngineCreds ) +// Parses the --additional_metadata flag and returns metadata to send on each RPC, +// formatted as per https://pkg.go.dev/google.golang.org/grpc/metadata#Pairs. +// Allow any character but semicolons in values. If the flag is empty, return a nil map. +func parseAdditionalMetadataFlag() []string { + if len(*additionalMetadata) == 0 { + return nil + } + r := *additionalMetadata + addMd := make([]string, 0) + for len(r) > 0 { + i := strings.Index(r, ":") + if i < 0 { + logger.Fatalf("Error parsing --additional_metadata flag: missing colon separator") + } + addMd = append(addMd, r[:i]) // append key + r = r[i+1:] + i = strings.Index(r, ";") + // append value + if i < 0 { + addMd = append(addMd, r) + break + } + addMd = append(addMd, r[:i]) + r = r[i+1:] + } + return addMd +} + func main() { flag.Parse() logger.Infof("Client running with test case %q", *testCase) @@ -214,6 +246,17 @@ func main() { if len(*serviceConfigJSON) > 0 { opts = append(opts, grpc.WithDisableServiceConfig(), grpc.WithDefaultServiceConfig(*serviceConfigJSON)) } + if addMd := parseAdditionalMetadataFlag(); addMd != nil { + unaryAddMd := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ctx = metadata.AppendToOutgoingContext(ctx, addMd...) + return invoker(ctx, method, req, reply, cc, opts...) + } + streamingAddMd := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ctx = metadata.AppendToOutgoingContext(ctx, addMd...) + return streamer(ctx, desc, cc, method, opts...) + } + opts = append(opts, grpc.WithUnaryInterceptor(unaryAddMd), grpc.WithStreamInterceptor(streamingAddMd)) + } conn, err := grpc.Dial(serverAddr, opts...) if err != nil { logger.Fatalf("Fail to dial: %v", err) From 2a266e78a0307e22d3adfdb5c20f9c4bd12a00ac Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Mon, 22 May 2023 15:39:17 -0700 Subject: [PATCH 936/998] authz: use pointer to to structpb.Struct instead of value (#6307) --- authz/rbac_translator.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go index d88797d49907..730ec9dc426a 100644 --- a/authz/rbac_translator.go +++ b/authz/rbac_translator.go @@ -62,9 +62,9 @@ type rule struct { } type auditLogger struct { - Name string `json:"name"` - Config structpb.Struct `json:"config"` - IsOptional bool `json:"is_optional"` + Name string `json:"name"` + Config *structpb.Struct `json:"config"` + IsOptional bool `json:"is_optional"` } type auditLoggingOptions struct { @@ -306,9 +306,12 @@ func (options *auditLoggingOptions) toProtos() (allow *v3rbacpb.RBAC_AuditLoggin if config.Name == "" { return nil, nil, fmt.Errorf("missing required field: name in audit_logging_options.audit_loggers[%v]", i) } + if config.Config == nil { + config.Config = &structpb.Struct{} + } typedStruct := &v1xdsudpatypepb.TypedStruct{ TypeUrl: typeURLPrefix + config.Name, - Value: &config.Config, + Value: config.Config, } customConfig, err := anypb.New(typedStruct) if err != nil { From e9799e79dbdb75fd9dfb2e864fa8b0a1b36990f8 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 23 May 2023 09:48:08 -0700 Subject: [PATCH 937/998] client: support a 1:1 mapping with acbws and addrConns (#6302) --- balancer_conn_wrappers.go | 72 +++------------------ clientconn.go | 131 +++++++++++++++++++------------------- picker_wrapper.go | 12 ++-- stream.go | 17 +++-- 4 files changed, 91 insertions(+), 141 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 4f9944697dde..4a4dea189d0e 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -133,19 +133,6 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat // updateSubConnState is invoked by grpc to push a subConn state update to the // underlying balancer. func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - // - // TODO: Suppress the above mentioned state change to Shutdown, so we don't - // have to handle it here. - if sc == nil { - return - } ccb.mu.Lock() ccb.serializer.Schedule(func(_ context.Context) { ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) @@ -315,7 +302,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") } - if len(addrs) <= 0 { + if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) @@ -324,9 +311,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return nil, err } acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} - acbw.ac.mu.Lock() ac.acbw = acbw - acbw.ac.mu.Unlock() return acbw, nil } @@ -347,7 +332,7 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { if !ok { return } - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -391,63 +376,24 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + ac *addrConn // read-only + mu sync.Mutex - ac *addrConn producers map[balancer.ProducerBuilder]*refCountedProducer } -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - - if acState == connectivity.Shutdown { - return - } +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} - newAC, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = newAC - newAC.mu.Lock() - newAC.acbw = acbw - newAC.mu.Unlock() - if acState != connectivity.Idle { - go newAC.connect() - } - } +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) } func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() go acbw.ac.connect() } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} - // NewStream begins a streaming RPC on the addrConn. If the addrConn is not // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. diff --git a/clientconn.go b/clientconn.go index 1def61e5a23d..5e45f01f91cf 100644 --- a/clientconn.go +++ b/clientconn.go @@ -24,7 +24,6 @@ import ( "fmt" "math" "net/url" - "reflect" "strings" "sync" "sync/atomic" @@ -970,9 +969,6 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() @@ -991,58 +987,60 @@ func equalAddresses(a, b []resolver.Address) bool { return true } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If the addresses is the same as the old list, it does nothing and returns -// true. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) - if ac.state == connectivity.Shutdown || - ac.state == connectivity.TransientFailure || - ac.state == connectivity.Idle { - ac.addrs = addrs - return true - } + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if equalAddresses(ac.addrs, addrs) { - return true + ac.mu.Unlock() + return } - if ac.state == connectivity.Connecting { - return false + ac.addrs = addrs + + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } } } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => Close => onClose, which + // requires locking ac.mu. + defer ac.transport.GracefulClose() + ac.transport = nil + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() } // getServerName determines the serverName to be used in the connection @@ -1301,7 +1299,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { ac.mu.Lock() - if ac.state == connectivity.Shutdown { + acCtx := ac.ctx + if acCtx.Err() != nil { ac.mu.Unlock() return } @@ -1329,15 +1328,14 @@ func (ac *addrConn) resetTransport() { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) // After exhausting all addresses, the addrConn enters // TRANSIENT_FAILURE. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if acCtx.Err() != nil { return } + ac.mu.Lock() ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1352,13 +1350,13 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() case <-b: timer.Stop() - case <-ac.ctx.Done(): + case <-acCtx.Done(): timer.Stop() return } ac.mu.Lock() - if ac.state != connectivity.Shutdown { + if acCtx.Err() == nil { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1373,14 +1371,13 @@ func (ac *addrConn) resetTransport() { // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if ctx.Err() != nil { return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1394,7 +1391,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { return nil } @@ -1411,19 +1408,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ac.ctx) + hctx, hcancel := context.WithCancel(ctx) onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() // adjust params based on GoAwayReason ac.adjustParams(r) - if ac.state == connectivity.Shutdown { - // Already shut down. tearDown() already cleared the transport and - // canceled hctx via ac.ctx, and we expected this connection to be - // closed, so do nothing here. + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. return } hcancel() @@ -1442,7 +1440,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID @@ -1459,7 +1457,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() - if ac.state == connectivity.Shutdown { + if ctx.Err() != nil { // This can happen if the subConn was removed while in `Connecting` // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not @@ -1471,6 +1469,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. go newTr.Close(transport.ErrConnClosing) return nil } diff --git a/picker_wrapper.go b/picker_wrapper.go index 8e24d864986d..02f975951242 100644 --- a/picker_wrapper.go +++ b/picker_wrapper.go @@ -68,10 +68,8 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { // - wraps the done function in the passed in result to increment the calls // failed or calls succeeded channelz counter before invoking the actual // done function. -func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() done := result.Done result.Done = func(b balancer.DoneInfo) { @@ -157,14 +155,14 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acw.getAddrConn().getReadyTransport(); t != nil { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - doneChannelzWrapper(acw, &pickResult) + doneChannelzWrapper(acbw, &pickResult) return t, pickResult, nil } return t, pickResult, nil diff --git a/stream.go b/stream.go index 75ab86268ba1..10092685b228 100644 --- a/stream.go +++ b/stream.go @@ -1273,14 +1273,19 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin as.p = &parser{r: s} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) From 2ae10b2883064d4d776675b4c03439ba14bf513f Mon Sep 17 00:00:00 2001 From: Anirudh Ramachandra Date: Tue, 23 May 2023 12:50:47 -0700 Subject: [PATCH 938/998] xdsclient: remove interface check related to ResourceData (#6308) --- xds/internal/xdsclient/xdsresource/cluster_resource_type.go | 3 +-- xds/internal/xdsclient/xdsresource/endpoints_resource_type.go | 3 +-- xds/internal/xdsclient/xdsresource/listener_resource_type.go | 3 +-- .../xdsclient/xdsresource/route_config_resource_type.go | 3 +-- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index c51d38d3f06d..183801c1c68c 100644 --- a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -32,8 +32,7 @@ const ( var ( // Compile time interface checks. - _ Type = clusterResourceType{} - _ ResourceData = &ClusterResourceData{} + _ Type = clusterResourceType{} // Singleton instantiation of the resource type implementation. clusterType = clusterResourceType{ diff --git a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index 5a2dbbd20319..775a8aa19423 100644 --- a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -32,8 +32,7 @@ const ( var ( // Compile time interface checks. - _ Type = endpointsResourceType{} - _ ResourceData = &EndpointsResourceData{} + _ Type = endpointsResourceType{} // Singleton instantiation of the resource type implementation. endpointsType = endpointsResourceType{ diff --git a/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 33ebd3efbf10..0aff941389ec 100644 --- a/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -35,8 +35,7 @@ const ( var ( // Compile time interface checks. - _ Type = listenerResourceType{} - _ ResourceData = &ListenerResourceData{} + _ Type = listenerResourceType{} // Singleton instantiation of the resource type implementation. listenerType = listenerResourceType{ diff --git a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index d06af4ae1aff..8ce5cb28596e 100644 --- a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -32,8 +32,7 @@ const ( var ( // Compile time interface checks. - _ Type = routeConfigResourceType{} - _ ResourceData = &RouteConfigResourceData{} + _ Type = routeConfigResourceType{} // Singleton instantiation of the resource type implementation. routeConfigType = routeConfigResourceType{ From a6e1acfc4420debbf347b0730e69d25b0a9c69ac Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 23 May 2023 13:39:38 -0700 Subject: [PATCH 939/998] grpc: support sticky TF in pick_first LB policy (#6306) --- pickfirst.go | 12 +++++- test/pickfirst_test.go | 86 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+), 1 deletion(-) diff --git a/pickfirst.go b/pickfirst.go index fc91b4d266de..89e54196e1e3 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -119,7 +119,6 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b } return } - b.state = state.ConnectivityState if state.ConnectivityState == connectivity.Shutdown { b.subConn = nil return @@ -132,11 +131,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, }) case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &idlePicker{subConn: subConn}, @@ -147,6 +156,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go index 800d2f4178c2..75cb2a659ed5 100644 --- a/test/pickfirst_test.go +++ b/test/pickfirst_test.go @@ -20,15 +20,18 @@ package test import ( "context" + "sync" "testing" "time" "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/pickfirst" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" @@ -293,3 +296,86 @@ func (s) TestPickFirst_NewAddressWhileBlocking(t *testing.T) { case <-doneCh: } } + +func (s) TestPickFirst_StickyTransientFailure(t *testing.T) { + // Spin up a local server which closes the connection as soon as it receives + // one. It also sends a signal on a channel whenver it received a connection. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + t.Cleanup(func() { lis.Close() }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + connCh := make(chan struct{}, 1) + go func() { + for { + conn, err := lis.Accept() + if err != nil { + return + } + select { + case connCh <- struct{}{}: + conn.Close() + case <-ctx.Done(): + return + } + } + }() + + // Dial the above server with a ConnectParams that does a constant backoff + // of defaultTestShortTimeout duration. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(pickFirstServiceConfig), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: defaultTestShortTimeout, + Multiplier: float64(0), + Jitter: float64(0), + MaxDelay: defaultTestShortTimeout, + }, + }), + } + cc, err := grpc.Dial(lis.Addr().String(), dopts...) + if err != nil { + t.Fatalf("Failed to dial server at %q: %v", lis.Addr(), err) + } + t.Cleanup(func() { cc.Close() }) + + var wg sync.WaitGroup + wg.Add(2) + // Spin up a goroutine that waits for the channel to move to + // TransientFailure. After that it checks that the channel stays in + // TransientFailure, until Shutdown. + go func() { + defer wg.Done() + for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { + if !cc.WaitForStateChange(ctx, state) { + t.Errorf("Timeout when waiting for state to change to TransientFailure. Current state is %s", state) + return + } + } + + // TODO(easwars): this waits for 10s. Need shorter deadline here. Basically once the second goroutine exits, we should exit from here too. + if cc.WaitForStateChange(ctx, connectivity.TransientFailure) { + if state := cc.GetState(); state != connectivity.Shutdown { + t.Errorf("Unexpected state change from TransientFailure to %s", cc.GetState()) + } + } + }() + // Spin up a goroutine which ensures that the pick_first LB policy is + // constantly trying to reconnect. + go func() { + defer wg.Done() + for i := 0; i < 10; i++ { + select { + case <-connCh: + case <-time.After(2 * defaultTestShortTimeout): + t.Error("Timeout when waiting for pick_first to reconnect") + } + } + }() + wg.Wait() +} From 59134c303c31ba2ce65da8962c050918098f2a25 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 24 May 2023 10:37:54 -0700 Subject: [PATCH 940/998] client: add support for pickfirst address shuffling from gRFC A62 (#6311) --- balancer_conn_wrappers.go | 6 +++- internal/grpcrand/grpcrand.go | 7 +++++ pickfirst.go | 39 ++++++++++++++++++++++++-- test/pickfirst_test.go | 52 +++++++++++++++++++++++++++++++++++ test/resolver_update_test.go | 1 + 5 files changed, 101 insertions(+), 4 deletions(-) diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 4a4dea189d0e..04b9ad411691 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -127,7 +127,11 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat // We get here only if the above call to Schedule succeeds, in which case it // is guaranteed that the scheduled function will run. Therefore it is safe // to block on this channel. - return <-errCh + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + return err } // updateSubConnState is invoked by grpc to push a subConn state update to the diff --git a/internal/grpcrand/grpcrand.go b/internal/grpcrand/grpcrand.go index 0b092cfbe15d..d08e3e907666 100644 --- a/internal/grpcrand/grpcrand.go +++ b/internal/grpcrand/grpcrand.go @@ -79,3 +79,10 @@ func Uint32() uint32 { defer mu.Unlock() return r.Uint32() } + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/pickfirst.go b/pickfirst.go index 89e54196e1e3..611bef7995cd 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -19,11 +19,14 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/serviceconfig" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -43,10 +46,28 @@ func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &pfConfig{} + if err := json.Unmarshal(js, cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { state connectivity.State cc balancer.ClientConn subConn balancer.SubConn + cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { @@ -69,7 +90,8 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if len(state.ResolverState.Addresses) == 0 { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -82,12 +104,23 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return balancer.ErrBadResolverState } + if state.BalancerConfig != nil { + cfg, ok := state.BalancerConfig.(*pfConfig) + if !ok { + return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + b.cfg = cfg + } + + if b.cfg != nil && b.cfg.ShuffleAddressList { + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) if err != nil { if logger.V(2) { logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go index 75cb2a659ed5..62310d4d330e 100644 --- a/test/pickfirst_test.go +++ b/test/pickfirst_test.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/pickfirst" @@ -379,3 +380,54 @@ func (s) TestPickFirst_StickyTransientFailure(t *testing.T) { }() wg.Wait() } + +func (s) TestPickFirst_ShuffleAddressList(t *testing.T) { + const serviceConfig = `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}` + + // Install a shuffler that always reverses two entries. + origShuf := grpcrand.Shuffle + defer func() { grpcrand.Shuffle = origShuf }() + grpcrand.Shuffle = func(n int, f func(int, int)) { + if n != 2 { + t.Errorf("Shuffle called with n=%v; want 2", n) + } + f(0, 1) // reverse the two addresses + } + + // Set up our backends. + cc, r, backends := setupPickFirst(t, 2) + addrs := stubBackendsToResolverAddrs(backends) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Push an update with both addresses and shuffling disabled. We should + // connect to backend 0. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0], addrs[1]}}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Send a config with shuffling enabled. This will reverse the addresses, + // but the channel should still be connected to backend 0. + shufState := resolver.State{ + ServiceConfig: parseServiceConfig(t, r, serviceConfig), + Addresses: []resolver.Address{addrs[0], addrs[1]}, + } + r.UpdateState(shufState) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Send a resolver update with no addresses. This should push the channel + // into TransientFailure. + r.UpdateState(resolver.State{}) + awaitState(ctx, t, cc, connectivity.TransientFailure) + + // Send the same config as last time with shuffling enabled. Since we are + // not connected to backend 0, we should connect to backend 1. + r.UpdateState(shufState) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } +} diff --git a/test/resolver_update_test.go b/test/resolver_update_test.go index ddf67267bac7..416f7175c53a 100644 --- a/test/resolver_update_test.go +++ b/test/resolver_update_test.go @@ -174,6 +174,7 @@ func (s) TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate(t *testing.T) { } bal := bd.Data.(balancer.Balancer) ccUpdateCh.Send(ccs) + ccs.BalancerConfig = nil return bal.UpdateClientConnState(ccs) }, UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { From f19266cca454a52234ceffce443b6c68d395a68b Mon Sep 17 00:00:00 2001 From: Gregory Cooke Date: Thu, 25 May 2023 13:24:45 -0400 Subject: [PATCH 941/998] xds: support built-in Stdout audit logger type (#6298) This PR adds the functionality to parse and build the known StdoutLogger that we include as an implemented AuditLogger. --- authz/audit/stdout/stdout_logger.go | 7 ++- examples/go.mod | 2 +- examples/go.sum | 4 +- gcp/observability/go.sum | 2 +- go.mod | 2 +- go.sum | 4 +- internal/xds/rbac/converter.go | 35 ++++++++------- internal/xds/rbac/converter_test.go | 66 ++++++++++++++++++++++++++--- interop/observability/go.sum | 2 +- stats/opencensus/go.sum | 2 +- 10 files changed, 94 insertions(+), 32 deletions(-) diff --git a/authz/audit/stdout/stdout_logger.go b/authz/audit/stdout/stdout_logger.go index ee095527ccec..c4ba21fa4682 100644 --- a/authz/audit/stdout/stdout_logger.go +++ b/authz/audit/stdout/stdout_logger.go @@ -31,6 +31,9 @@ import ( var grpcLogger = grpclog.Component("authz-audit") +// Name is the string to identify this logger type in the registry +const Name = "stdout_logger" + func init() { audit.RegisterLoggerBuilder(&loggerBuilder{ goLogger: log.New(os.Stdout, "", 0), @@ -46,7 +49,7 @@ type event struct { Timestamp string `json:"timestamp"` // Time when the audit event is logged via Log method } -// logger implements the audit.Logger interface by logging to standard output. +// logger implements the audit.logger interface by logging to standard output. type logger struct { goLogger *log.Logger } @@ -75,7 +78,7 @@ type loggerBuilder struct { } func (loggerBuilder) Name() string { - return "stdout_logger" + return Name } // Build returns a new instance of the stdout logger. diff --git a/examples/go.mod b/examples/go.mod index 7e49c3bce4c9..0c75c14493b0 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -17,7 +17,7 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596 // indirect + github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41 // indirect github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect golang.org/x/net v0.9.0 // indirect golang.org/x/sys v0.7.0 // indirect diff --git a/examples/go.sum b/examples/go.sum index 8006bf69fef6..7bc1a3576a60 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -636,8 +636,8 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596 h1:MDgbDqe1rWfGBa+yCcthuqDSHvXFyenZI1U7f1IbWI8= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41 h1:TNyxMch3whemmD2xddvlcYav9UR0hUvFeWnMUMSdhHA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index bb5535fab90b..2b1c8b61771f 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -647,7 +647,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= diff --git a/go.mod b/go.mod index 75ea83d9309c..ecff5ff74e01 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 - github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596 + github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41 github.com/golang/glog v1.1.0 github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.5.9 diff --git a/go.sum b/go.sum index bd4e7e729e2d..80188b3fda93 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,8 @@ github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLW github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596 h1:MDgbDqe1rWfGBa+yCcthuqDSHvXFyenZI1U7f1IbWI8= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41 h1:TNyxMch3whemmD2xddvlcYav9UR0hUvFeWnMUMSdhHA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= diff --git a/internal/xds/rbac/converter.go b/internal/xds/rbac/converter.go index db22fd5a9e08..713e39cf31cb 100644 --- a/internal/xds/rbac/converter.go +++ b/internal/xds/rbac/converter.go @@ -24,14 +24,14 @@ import ( v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3auditloggersstreampb "github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3" "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/authz/audit/stdout" + "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/structpb" ) -const udpaTypedStuctType = "type.googleapis.com/udpa.type.v1.TypedStruct" -const xdsTypedStuctType = "type.googleapis.com/xds.type.v3.TypedStruct" - func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig) (audit.Logger, error) { if loggerConfig.GetAuditLogger().GetTypedConfig() == nil { return nil, fmt.Errorf("missing required field: TypedConfig") @@ -59,23 +59,26 @@ func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConf } func getCustomConfig(config *anypb.Any) (json.RawMessage, string, error) { - switch config.GetTypeUrl() { - case udpaTypedStuctType: - typedStruct := &v1xdsudpatypepb.TypedStruct{} - if err := config.UnmarshalTo(typedStruct); err != nil { - return nil, "", fmt.Errorf("failed to unmarshal resource: %v", err) - } - return convertCustomConfig(typedStruct.TypeUrl, typedStruct.Value) - case xdsTypedStuctType: - typedStruct := &v3xdsxdstypepb.TypedStruct{} - if err := config.UnmarshalTo(typedStruct); err != nil { - return nil, "", fmt.Errorf("failed to unmarshal resource: %v", err) - } - return convertCustomConfig(typedStruct.TypeUrl, typedStruct.Value) + any, err := config.UnmarshalNew() + if err != nil { + return nil, "", err + } + switch m := any.(type) { + case *v1xdsudpatypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3xdsxdstypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3auditloggersstreampb.StdoutAuditLog: + return convertStdoutConfig(m) } return nil, "", fmt.Errorf("custom config not implemented for type [%v]", config.GetTypeUrl()) } +func convertStdoutConfig(config *v3auditloggersstreampb.StdoutAuditLog) (json.RawMessage, string, error) { + json, err := protojson.Marshal(config) + return json, stdout.Name, err +} + func convertCustomConfig(typeURL string, s *structpb.Struct) (json.RawMessage, string, error) { // The gRPC policy name will be the "type name" part of the value of the // type_url field in the TypedStruct. We get this by using the part after diff --git a/internal/xds/rbac/converter_test.go b/internal/xds/rbac/converter_test.go index 253b9db2d50d..9b8004f7bd5c 100644 --- a/internal/xds/rbac/converter_test.go +++ b/internal/xds/rbac/converter_test.go @@ -17,12 +17,16 @@ package rbac import ( + "reflect" "strings" "testing" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3auditloggersstreampb "github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3" "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/authz/audit/stdout" + "google.golang.org/grpc/internal/testutils" "google.golang.org/protobuf/types/known/anypb" ) @@ -47,7 +51,7 @@ func (s) TestBuildLoggerErrors(t *testing.T) { loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ AuditLogger: &v3corepb.TypedExtensionConfig{ Name: "TestAuditLoggerBuffer", - TypedConfig: &anypb.Any{}, + TypedConfig: testutils.MarshalAny(&v3rbacpb.RBAC_AuditLoggingOptions{}), }, }, expectedError: "custom config not implemented for type ", @@ -102,13 +106,65 @@ func (s) TestBuildLoggerErrors(t *testing.T) { logger, err := buildLogger(test.loggerConfig) if err != nil && !strings.HasPrefix(err.Error(), test.expectedError) { t.Fatalf("expected error: %v. got error: %v", test.expectedError, err) - } else { - if logger != test.expectedLogger { - t.Fatalf("expected logger: %v. got logger: %v", test.expectedLogger, logger) - } + } + if logger != test.expectedLogger { + t.Fatalf("expected logger: %v. got logger: %v", test.expectedLogger, logger) } }) } +} +func (s) TestBuildLoggerKnownTypes(t *testing.T) { + tests := []struct { + name string + loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig + expectedType reflect.Type + }{ + { + name: "stdout logger", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: stdout.Name, + TypedConfig: createStdoutPb(t), + }, + IsOptional: false, + }, + expectedType: reflect.TypeOf(audit.GetLoggerBuilder(stdout.Name).Build(nil)), + }, + { + name: "stdout logger with generic TypedConfig", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: stdout.Name, + TypedConfig: createXDSTypedStruct(t, map[string]interface{}{}, stdout.Name), + }, + IsOptional: false, + }, + expectedType: reflect.TypeOf(audit.GetLoggerBuilder(stdout.Name).Build(nil)), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + logger, err := buildLogger(test.loggerConfig) + if err != nil { + t.Fatalf("expected success. got error: %v", err) + } + loggerType := reflect.TypeOf(logger) + if test.expectedType != loggerType { + t.Fatalf("logger not of expected type. want: %v got: %v", test.expectedType, loggerType) + } + }) + } +} + +// Builds stdout config for audit logger proto. +func createStdoutPb(t *testing.T) *anypb.Any { + t.Helper() + pb := &v3auditloggersstreampb.StdoutAuditLog{} + customConfig, err := anypb.New(pb) + if err != nil { + t.Fatalf("createStdoutPb failed during anypb.New: %v", err) + } + return customConfig } diff --git a/interop/observability/go.sum b/interop/observability/go.sum index 167fb14bc0ce..8c008e984f8c 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -648,7 +648,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index 43f540fb5667..7bdc3927073a 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -630,7 +630,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230406144219-ba92d50b6596/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= From 157db1907efe7bfce70a1cbd4901d4c400d35ec7 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 25 May 2023 17:13:37 -0400 Subject: [PATCH 942/998] stats/opencensus: Fix flaky test span (#6296) --- stats/opencensus/e2e_test.go | 137 +++++++++++++++++++++-------------- stats/opencensus/trace.go | 5 +- 2 files changed, 87 insertions(+), 55 deletions(-) diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 0f1975ba9d4b..d70d9f87024d 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -1338,6 +1338,25 @@ func (fe *fakeExporter) ExportSpan(sd *trace.SpanData) { fe.seenSpans = append(fe.seenSpans, gotSI) } +// waitForServerSpan waits until a server span appears somewhere in the span +// list in an exporter. Returns an error if no server span found within the +// passed context's timeout. +func waitForServerSpan(ctx context.Context, fe *fakeExporter) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + fe.mu.Lock() + for _, seenSpan := range fe.seenSpans { + if seenSpan.spanKind == trace.SpanKindServer { + fe.mu.Unlock() + return nil + } + } + fe.mu.Unlock() + } + return fmt.Errorf("timeout when waiting for server span to be present in exporter") +} + // TestSpan tests emitted spans from gRPC. It configures a system with a gRPC // Client and gRPC server with the OpenCensus Dial and Server Option configured, // and makes a Unary RPC and a Streaming RPC. This should cause spans with @@ -1375,18 +1394,30 @@ func (s) TestSpan(t *testing.T) { // Make a Unary RPC. This should cause a span with message events // corresponding to the request message and response message to be emitted - // both from the client and the server. Note that RPCs trigger exports of - // corresponding span data synchronously, thus the Span Data is guaranteed - // to have been read by exporter and is ready to make assertions on. + // both from the client and the server. if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{}}); err != nil { t.Fatalf("Unexpected error from UnaryCall: %v", err) } - - // The spans received are server first, then client. This is due to the RPC - // finishing on the server first. The ordering of message events for a Unary - // Call is as follows: (client send, server recv), (server send (server span - // end), client recv (client span end)). wantSI := []spanInformation{ + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + name: "Attempt.grpc.testing.TestService.UnaryCall", + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 (see comment above) + UncompressedByteSize: 2, + CompressedByteSize: 2, + }, + { + EventType: trace.MessageEventTypeRecv, + MessageID: 1, // First msg recv so 1 (see comment above) + }, + }, + hasRemoteParent: false, + }, { // Sampling rate of 100 percent, so this should populate every span // with the information that this span is being sampled. Here and @@ -1424,25 +1455,6 @@ func (s) TestSpan(t *testing.T) { // instrumentation code, so I'm iffy on it but fine. hasRemoteParent: true, }, - { - sc: trace.SpanContext{ - TraceOptions: 1, - }, - name: "Attempt.grpc.testing.TestService.UnaryCall", - messageEvents: []trace.MessageEvent{ - { - EventType: trace.MessageEventTypeSent, - MessageID: 1, // First msg send so 1 (see comment above) - UncompressedByteSize: 2, - CompressedByteSize: 2, - }, - { - EventType: trace.MessageEventTypeRecv, - MessageID: 1, // First msg recv so 1 (see comment above) - }, - }, - hasRemoteParent: false, - }, { sc: trace.SpanContext{ TraceOptions: 1, @@ -1453,19 +1465,32 @@ func (s) TestSpan(t *testing.T) { childSpanCount: 1, }, } + if err := waitForServerSpan(ctx, fe); err != nil { + t.Fatal(err) + } + var spanInfoSort = func(i, j int) bool { + // This will order into attempt span (which has an unset span kind to + // not prepend Sent. to span names in backends), then call span, then + // server span. + return fe.seenSpans[i].spanKind < fe.seenSpans[j].spanKind + } + fe.mu.Lock() + // Sort the underlying seen Spans for cmp.Diff assertions and ID + // relationship assertions. + sort.Slice(fe.seenSpans, spanInfoSort) if diff := cmp.Diff(fe.seenSpans, wantSI); diff != "" { + fe.mu.Unlock() t.Fatalf("got unexpected spans, diff (-got, +want): %v", diff) } - fe.mu.Lock() if err := validateTraceAndSpanIDs(fe.seenSpans); err != nil { fe.mu.Unlock() t.Fatalf("Error in runtime data assertions: %v", err) } - if !cmp.Equal(fe.seenSpans[0].parentSpanID, fe.seenSpans[1].sc.SpanID) { - t.Fatalf("server span should point to the client attempt span as its parent. parentSpanID: %v, clientAttemptSpanID: %v", fe.seenSpans[0].parentSpanID, fe.seenSpans[1].sc.SpanID) + if !cmp.Equal(fe.seenSpans[1].parentSpanID, fe.seenSpans[0].sc.SpanID) { + t.Fatalf("server span should point to the client attempt span as its parent. parentSpanID: %v, clientAttemptSpanID: %v", fe.seenSpans[1].parentSpanID, fe.seenSpans[0].sc.SpanID) } - if !cmp.Equal(fe.seenSpans[1].parentSpanID, fe.seenSpans[2].sc.SpanID) { - t.Fatalf("client attempt span should point to the client call span as its parent. parentSpanID: %v, clientCallSpanID: %v", fe.seenSpans[1].parentSpanID, fe.seenSpans[2].sc.SpanID) + if !cmp.Equal(fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) { + t.Fatalf("client attempt span should point to the client call span as its parent. parentSpanID: %v, clientCallSpanID: %v", fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) } fe.seenSpans = nil @@ -1490,6 +1515,23 @@ func (s) TestSpan(t *testing.T) { } wantSI = []spanInformation{ + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + name: "Attempt.grpc.testing.TestService.FullDuplexCall", + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 + }, + { + EventType: trace.MessageEventTypeSent, + MessageID: 2, // Second msg send so 2 + }, + }, + hasRemoteParent: false, + }, { sc: trace.SpanContext{ TraceOptions: 1, @@ -1522,36 +1564,25 @@ func (s) TestSpan(t *testing.T) { hasRemoteParent: false, childSpanCount: 1, }, - { - sc: trace.SpanContext{ - TraceOptions: 1, - }, - name: "Attempt.grpc.testing.TestService.FullDuplexCall", - messageEvents: []trace.MessageEvent{ - { - EventType: trace.MessageEventTypeSent, - MessageID: 1, // First msg send so 1 - }, - { - EventType: trace.MessageEventTypeSent, - MessageID: 2, // Second msg send so 2 - }, - }, - hasRemoteParent: false, - }, + } + if err := waitForServerSpan(ctx, fe); err != nil { + t.Fatal(err) } fe.mu.Lock() defer fe.mu.Unlock() + // Sort the underlying seen Spans for cmp.Diff assertions and ID + // relationship assertions. + sort.Slice(fe.seenSpans, spanInfoSort) if diff := cmp.Diff(fe.seenSpans, wantSI); diff != "" { t.Fatalf("got unexpected spans, diff (-got, +want): %v", diff) } if err := validateTraceAndSpanIDs(fe.seenSpans); err != nil { t.Fatalf("Error in runtime data assertions: %v", err) } - if !cmp.Equal(fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) { - t.Fatalf("server span should point to the client attempt span as its parent. parentSpanID: %v, clientAttemptSpanID: %v", fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) + if !cmp.Equal(fe.seenSpans[1].parentSpanID, fe.seenSpans[0].sc.SpanID) { + t.Fatalf("server span should point to the client attempt span as its parent. parentSpanID: %v, clientAttemptSpanID: %v", fe.seenSpans[1].parentSpanID, fe.seenSpans[0].sc.SpanID) } - if !cmp.Equal(fe.seenSpans[2].parentSpanID, fe.seenSpans[1].sc.SpanID) { - t.Fatalf("client attempt span should point to the client call span as its parent. parentSpanID: %v, clientCallSpanID: %v", fe.seenSpans[2].parentSpanID, fe.seenSpans[1].sc.SpanID) + if !cmp.Equal(fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) { + t.Fatalf("client attempt span should point to the client call span as its parent. parentSpanID: %v, clientCallSpanID: %v", fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) } } diff --git a/stats/opencensus/trace.go b/stats/opencensus/trace.go index a7cafb30f4d0..f41cb838adc9 100644 --- a/stats/opencensus/trace.go +++ b/stats/opencensus/trace.go @@ -40,8 +40,9 @@ type traceInfo struct { func (csh *clientStatsHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) (context.Context, *traceInfo) { // TODO: get consensus on whether this method name of "s.m" is correct. mn := "Attempt." + strings.Replace(removeLeadingSlash(rti.FullMethodName), "/", ".", -1) - // Returned context is ignored because will populate context with data - // that wraps the span instead. + // Returned context is ignored because will populate context with data that + // wraps the span instead. Don't set span kind client on this attempt span + // to prevent backend from prepending span name with "Sent.". _, span := trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS)) tcBin := propagation.Binary(span.SpanContext()) From 4d3f221d1d16276c02dafec47828d16d1337f9ac Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 25 May 2023 18:05:14 -0400 Subject: [PATCH 943/998] xds/internal/xdsclient: Add support for String Matcher Header Matcher in RDS (#6313) --- internal/xds/matcher/matcher_header.go | 31 +++++++ internal/xds/matcher/matcher_header_test.go | 80 +++++++++++++++++++ xds/internal/xdsclient/xdsresource/matcher.go | 2 + .../xdsclient/xdsresource/type_rds.go | 1 + .../xdsclient/xdsresource/unmarshal_rds.go | 12 ++- .../xdsresource/unmarshal_rds_test.go | 47 +++++++++++ 6 files changed, 171 insertions(+), 2 deletions(-) diff --git a/internal/xds/matcher/matcher_header.go b/internal/xds/matcher/matcher_header.go index fd4833d3fff8..01433f4122a2 100644 --- a/internal/xds/matcher/matcher_header.go +++ b/internal/xds/matcher/matcher_header.go @@ -241,3 +241,34 @@ func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { func (hcm *HeaderContainsMatcher) String() string { return fmt.Sprintf("headerContains:%v%v", hcm.key, hcm.contains) } + +// HeaderStringMatcher matches on whether the header value matches against the +// StringMatcher specified. +type HeaderStringMatcher struct { + key string + stringMatcher StringMatcher + invert bool +} + +// NewHeaderStringMatcher returns a new HeaderStringMatcher. +func NewHeaderStringMatcher(key string, sm StringMatcher, invert bool) *HeaderStringMatcher { + return &HeaderStringMatcher{ + key: key, + stringMatcher: sm, + invert: invert, + } +} + +// Match returns whether the passed in HTTP Headers match according to the +// specified StringMatcher. +func (hsm *HeaderStringMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return hsm.stringMatcher.Match(v) != hsm.invert +} + +func (hsm *HeaderStringMatcher) String() string { + return fmt.Sprintf("headerString:%v:%v", hsm.key, hsm.stringMatcher) +} diff --git a/internal/xds/matcher/matcher_header_test.go b/internal/xds/matcher/matcher_header_test.go index f567f3198242..9a20cf12b0f9 100644 --- a/internal/xds/matcher/matcher_header_test.go +++ b/internal/xds/matcher/matcher_header_test.go @@ -467,3 +467,83 @@ func TestHeaderSuffixMatcherMatch(t *testing.T) { }) } } + +func TestHeaderStringMatch(t *testing.T) { + tests := []struct { + name string + key string + sm StringMatcher + invert bool + md metadata.MD + want bool + }{ + { + name: "should-match", + key: "th", + sm: StringMatcher{ + exactMatch: newStringP("tv"), + }, + invert: false, + md: metadata.Pairs("th", "tv"), + want: true, + }, + { + name: "not match", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: false, + md: metadata.Pairs("th", "not-match"), + want: false, + }, + { + name: "invert string match", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: true, + md: metadata.Pairs("th", "not-match"), + want: true, + }, + { + name: "header missing", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: false, + md: metadata.Pairs("not-specified-key", "not-match"), + want: false, + }, + { + name: "header missing invert true", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: true, + md: metadata.Pairs("not-specified-key", "not-match"), + want: false, + }, + { + name: "header empty string invert", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: true, + md: metadata.Pairs("th", ""), + want: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hsm := NewHeaderStringMatcher(test.key, test.sm, test.invert) + if got := hsm.Match(test.md); got != test.want { + t.Errorf("match() = %v, want %v", got, test.want) + } + }) + } +} diff --git a/xds/internal/xdsclient/xdsresource/matcher.go b/xds/internal/xdsclient/xdsresource/matcher.go index 6a056235f3bd..77aa85b68e58 100644 --- a/xds/internal/xdsclient/xdsresource/matcher.go +++ b/xds/internal/xdsclient/xdsresource/matcher.go @@ -59,6 +59,8 @@ func RouteToMatcher(r *Route) (*CompositeMatcher, error) { matcherT = matcher.NewHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End, invert) case h.PresentMatch != nil: matcherT = matcher.NewHeaderPresentMatcher(h.Name, *h.PresentMatch, invert) + case h.StringMatch != nil: + matcherT = matcher.NewHeaderStringMatcher(h.Name, *h.StringMatch, invert) default: return nil, fmt.Errorf("illegal route: missing header_match_specifier") } diff --git a/xds/internal/xdsclient/xdsresource/type_rds.go b/xds/internal/xdsclient/xdsresource/type_rds.go index 0504346c399f..ad59209163e7 100644 --- a/xds/internal/xdsclient/xdsresource/type_rds.go +++ b/xds/internal/xdsclient/xdsresource/type_rds.go @@ -171,6 +171,7 @@ type HeaderMatcher struct { SuffixMatch *string RangeMatch *Int64Range PresentMatch *bool + StringMatch *matcher.StringMatcher } // Int64Range is a range for header range match. diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index a082d38c5aa5..c51a0c24b508 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -24,13 +24,15 @@ import ( "strings" "time" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/protobuf/types/known/anypb" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" ) func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, error) { @@ -273,6 +275,12 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif header.PrefixMatch = &ht.PrefixMatch case *v3routepb.HeaderMatcher_SuffixMatch: header.SuffixMatch = &ht.SuffixMatch + case *v3routepb.HeaderMatcher_StringMatch: + sm, err := matcher.StringMatcherFromProto(ht.StringMatch) + if err != nil { + return nil, nil, fmt.Errorf("route %+v has an invalid string matcher: %v", err, ht.StringMatch) + } + header.StringMatch = &sm default: return nil, nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index 5dd4e042d72d..5e0d1e4523b6 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/clusterspecifier" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" @@ -923,6 +924,7 @@ func (s) TestUnmarshalRouteConfig(t *testing.T) { } func (s) TestRoutesProtoToSlice(t *testing.T) { + sm, _ := matcher.StringMatcherFromProto(&v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "tv"}}) var ( goodRouteWithFilterConfigs = func(cfgs map[string]*anypb.Any) []*v3routepb.Route { // Sets per-filter config in cluster "B" and in the route. @@ -1085,6 +1087,51 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { }}, wantErr: false, }, + { + name: "good with string matcher", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: "/a/"}}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_StringMatch{StringMatch: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "tv"}}}, + }, + }, + RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ + DefaultValue: &v3typepb.FractionalPercent{ + Numerator: 1, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}}}, + }, + }, + wantRoutes: []*Route{{ + Regex: func() *regexp.Regexp { return regexp.MustCompile("/a/") }(), + Headers: []*HeaderMatcher{ + { + Name: "th", + InvertMatch: newBoolP(false), + StringMatch: &sm, + }, + }, + Fraction: newUInt32P(10000), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + ActionType: RouteActionRoute, + }}, + wantErr: false, + }, { name: "query is ignored", routes: []*v3routepb.Route{ From e325737cace3fa71bd1f51e5cbf1546f43e58a2f Mon Sep 17 00:00:00 2001 From: Matthew Stevenson <52979934+matthewstevenson88@users.noreply.github.com> Date: Thu, 25 May 2023 15:05:50 -0700 Subject: [PATCH 944/998] alts: Fix flaky ALTS TestFullHandshake test. (#6300) * Fix flaky ALTS FullHandshake test. * Fix one other flake possibility. * fix typo in comment * Wait for full handshake frames to arrive from peer. * Remove runtime.GOMAXPROCS from the test. * Only set vmOnGCP once. --- credentials/alts/alts_test.go | 12 +---- .../alts/internal/testutil/testutil.go | 52 ++++++++++++++----- 2 files changed, 39 insertions(+), 25 deletions(-) diff --git a/credentials/alts/alts_test.go b/credentials/alts/alts_test.go index aef9642f844d..9a95d462806b 100644 --- a/credentials/alts/alts_test.go +++ b/credentials/alts/alts_test.go @@ -24,7 +24,6 @@ package alts import ( "context" "reflect" - "runtime" "sync" "testing" "time" @@ -309,21 +308,12 @@ func (s) TestCheckRPCVersions(t *testing.T) { // server, where both client and server offload to a local, fake handshaker // service. func (s) TestFullHandshake(t *testing.T) { - // If GOMAXPROCS is set to less than 2, do not run this test. This test - // requires at least 2 goroutines to succeed (one goroutine where a - // server listens, another goroutine where a client runs). - if runtime.GOMAXPROCS(0) < 2 { - return - } - // The vmOnGCP global variable MUST be reset to true after the client // or server credentials have been created, but before the ALTS // handshake begins. If vmOnGCP is not reset and this test is run // anywhere except for a GCP VM, then the ALTS handshake will // immediately fail. - once.Do(func() { - vmOnGCP = true - }) + once.Do(func() {}) vmOnGCP = true // Start the fake handshaker service and the server. diff --git a/credentials/alts/internal/testutil/testutil.go b/credentials/alts/internal/testutil/testutil.go index 24a61202a3da..cdc88c8f9da0 100644 --- a/credentials/alts/internal/testutil/testutil.go +++ b/credentials/alts/internal/testutil/testutil.go @@ -136,6 +136,7 @@ type FakeHandshaker struct { // DoHandshake performs a fake ALTS handshake. func (h *FakeHandshaker) DoHandshake(stream altsgrpc.HandshakerService_DoHandshakeServer) error { var isAssistingClient bool + var handshakeFramesReceivedSoFar []byte for { req, err := stream.Recv() if err != nil { @@ -153,15 +154,38 @@ func (h *FakeHandshaker) DoHandshake(stream altsgrpc.HandshakerService_DoHandsha return fmt.Errorf("processStartClient failure: %v", err) } case *altspb.HandshakerReq_ServerStart: + // If we have received the full ClientInit, send the ServerInit and + // ServerFinished. Otherwise, wait for more bytes to arrive from the client. isAssistingClient = false - resp, err = h.processServerStart(req.ServerStart) + handshakeFramesReceivedSoFar = append(handshakeFramesReceivedSoFar, req.ServerStart.InBytes...) + sendHandshakeFrame := bytes.Equal(handshakeFramesReceivedSoFar, []byte("ClientInit")) + resp, err = h.processServerStart(req.ServerStart, sendHandshakeFrame) if err != nil { - return fmt.Errorf("processServerClient failure: %v", err) + return fmt.Errorf("processServerStart failure: %v", err) } case *altspb.HandshakerReq_Next: - resp, err = h.processNext(req.Next, isAssistingClient) + // If we have received all handshake frames, send the handshake result. + // Otherwise, wait for more bytes to arrive from the peer. + oldHandshakesBytes := len(handshakeFramesReceivedSoFar) + handshakeFramesReceivedSoFar = append(handshakeFramesReceivedSoFar, req.Next.InBytes...) + isHandshakeComplete := false + if isAssistingClient { + isHandshakeComplete = bytes.HasPrefix(handshakeFramesReceivedSoFar, []byte("ServerInitServerFinished")) + } else { + isHandshakeComplete = bytes.HasPrefix(handshakeFramesReceivedSoFar, []byte("ClientInitClientFinished")) + } + if !isHandshakeComplete { + resp = &altspb.HandshakerResp{ + BytesConsumed: uint32(len(handshakeFramesReceivedSoFar) - oldHandshakesBytes), + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + } + break + } + resp, err = h.getHandshakeResult(isAssistingClient) if err != nil { - return fmt.Errorf("processNext failure: %v", err) + return fmt.Errorf("getHandshakeResult failure: %v", err) } default: return fmt.Errorf("handshake request has unexpected type: %v", req) @@ -192,7 +216,7 @@ func (h *FakeHandshaker) processStartClient(req *altspb.StartClientHandshakeReq) }, nil } -func (h *FakeHandshaker) processServerStart(req *altspb.StartServerHandshakeReq) (*altspb.HandshakerResp, error) { +func (h *FakeHandshaker) processServerStart(req *altspb.StartServerHandshakeReq, sendHandshakeFrame bool) (*altspb.HandshakerResp, error) { if len(req.ApplicationProtocols) != 1 || req.ApplicationProtocols[0] != "grpc" { return nil, fmt.Errorf("unexpected application protocols: %v", req.ApplicationProtocols) } @@ -203,8 +227,14 @@ func (h *FakeHandshaker) processServerStart(req *altspb.StartServerHandshakeReq) if len(parameters.RecordProtocols) != 1 || parameters.RecordProtocols[0] != "ALTSRP_GCM_AES128_REKEY" { return nil, fmt.Errorf("unexpected record protocols: %v", parameters.RecordProtocols) } - if string(req.InBytes) != "ClientInit" { - return nil, fmt.Errorf("unexpected in bytes: %v", req.InBytes) + if sendHandshakeFrame { + return &altspb.HandshakerResp{ + OutFrames: []byte("ServerInitServerFinished"), + BytesConsumed: uint32(len(req.InBytes)), + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil } return &altspb.HandshakerResp{ OutFrames: []byte("ServerInitServerFinished"), @@ -215,11 +245,8 @@ func (h *FakeHandshaker) processServerStart(req *altspb.StartServerHandshakeReq) }, nil } -func (h *FakeHandshaker) processNext(req *altspb.NextHandshakeMessageReq, isAssistingClient bool) (*altspb.HandshakerResp, error) { +func (h *FakeHandshaker) getHandshakeResult(isAssistingClient bool) (*altspb.HandshakerResp, error) { if isAssistingClient { - if !bytes.Equal(req.InBytes, []byte("ServerInitServerFinished")) { - return nil, fmt.Errorf("unexpected in bytes: got: %v, want: %v", req.InBytes, []byte("ServerInitServerFinished")) - } return &altspb.HandshakerResp{ OutFrames: []byte("ClientFinished"), BytesConsumed: 24, @@ -248,9 +275,6 @@ func (h *FakeHandshaker) processNext(req *altspb.NextHandshakeMessageReq, isAssi }, }, nil } - if !bytes.Equal(req.InBytes, []byte("ClientFinished")) { - return nil, fmt.Errorf("unexpected in bytes: got: %v, want: %v", req.InBytes, []byte("ClientFinished")) - } return &altspb.HandshakerResp{ BytesConsumed: 14, Result: &altspb.HandshakerResult{ From 9b9b364f6983ec56273447640563b677d3f7e152 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 25 May 2023 19:54:17 -0400 Subject: [PATCH 945/998] internal/envconfig: Set Custom LB Env Var to true by default (#6317) --- internal/envconfig/xds.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 1d9152e8eeb2..8b3418785450 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -89,7 +89,7 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "true". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", false) + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) From 6c2529bca81cb4eb8fd032ac3af370dd145d65b6 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 30 May 2023 09:52:23 -0700 Subject: [PATCH 946/998] xds: support pick_first custom load balancing policy (A62) (#6314) --- examples/go.mod | 2 +- examples/go.sum | 5 +- gcp/observability/go.sum | 3 +- go.mod | 2 +- go.sum | 6 +- internal/envconfig/envconfig.go | 4 + interop/observability/go.sum | 3 +- pickfirst.go | 3 +- stats/opencensus/go.sum | 3 +- test/pickfirst_test.go | 44 +++++ xds/internal/balancer/wrrlocality/balancer.go | 2 +- .../xdslbregistry/converter/converter.go | 39 ++++- .../xdslbregistry/xdslbregistry_test.go | 151 ++++++++++-------- 13 files changed, 182 insertions(+), 85 deletions(-) diff --git a/examples/go.mod b/examples/go.mod index 0c75c14493b0..c631aae7da39 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -17,7 +17,7 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41 // indirect + github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f // indirect github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect golang.org/x/net v0.9.0 // indirect golang.org/x/sys v0.7.0 // indirect diff --git a/examples/go.sum b/examples/go.sum index 7bc1a3576a60..d257912f6717 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -636,8 +636,8 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41 h1:TNyxMch3whemmD2xddvlcYav9UR0hUvFeWnMUMSdhHA= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -821,6 +821,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index 2b1c8b61771f..4e70d82da940 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -647,7 +647,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -841,6 +841,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/go.mod b/go.mod index ecff5ff74e01..088c703575da 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 - github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41 + github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f github.com/golang/glog v1.1.0 github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.5.9 diff --git a/go.sum b/go.sum index 80188b3fda93..4e7adc822040 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,8 @@ github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLW github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41 h1:TNyxMch3whemmD2xddvlcYav9UR0hUvFeWnMUMSdhHA= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= @@ -40,7 +40,7 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index 5ba9d94d49c2..80fd5c7d2a4f 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -36,6 +36,10 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy, which can be enabled by setting the environment + // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/interop/observability/go.sum b/interop/observability/go.sum index 8c008e984f8c..b21857e14a68 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -648,7 +648,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -843,6 +843,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/pickfirst.go b/pickfirst.go index 611bef7995cd..abe266b021d2 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/serviceconfig" ) @@ -112,7 +113,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.cfg = cfg } - if b.cfg != nil && b.cfg.ShuffleAddressList { + if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } if b.subConn != nil { diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index 7bdc3927073a..48faad4b66dd 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -630,7 +630,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230517004634-d1c5e72e4c41/go.mod h1:84cjSkVxFD9Pi/gvI5AOq5NPhGsmS8oPsJLtCON6eK8= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -814,6 +814,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go index 62310d4d330e..55659b928a57 100644 --- a/test/pickfirst_test.go +++ b/test/pickfirst_test.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" @@ -381,7 +382,10 @@ func (s) TestPickFirst_StickyTransientFailure(t *testing.T) { wg.Wait() } +// Tests the PF LB policy with shuffling enabled. func (s) TestPickFirst_ShuffleAddressList(t *testing.T) { + defer func(old bool) { envconfig.PickFirstLBConfig = old }(envconfig.PickFirstLBConfig) + envconfig.PickFirstLBConfig = true const serviceConfig = `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}` // Install a shuffler that always reverses two entries. @@ -390,6 +394,7 @@ func (s) TestPickFirst_ShuffleAddressList(t *testing.T) { grpcrand.Shuffle = func(n int, f func(int, int)) { if n != 2 { t.Errorf("Shuffle called with n=%v; want 2", n) + return } f(0, 1) // reverse the two addresses } @@ -431,3 +436,42 @@ func (s) TestPickFirst_ShuffleAddressList(t *testing.T) { t.Fatal(err) } } + +// Tests the PF LB policy with the environment variable support of address list +// shuffling disabled. +func (s) TestPickFirst_ShuffleAddressListDisabled(t *testing.T) { + defer func(old bool) { envconfig.PickFirstLBConfig = old }(envconfig.PickFirstLBConfig) + envconfig.PickFirstLBConfig = false + const serviceConfig = `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}` + + // Install a shuffler that always reverses two entries. + origShuf := grpcrand.Shuffle + defer func() { grpcrand.Shuffle = origShuf }() + grpcrand.Shuffle = func(n int, f func(int, int)) { + if n != 2 { + t.Errorf("Shuffle called with n=%v; want 2", n) + return + } + f(0, 1) // reverse the two addresses + } + + // Set up our backends. + cc, r, backends := setupPickFirst(t, 2) + addrs := stubBackendsToResolverAddrs(backends) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Send a config with shuffling enabled. This will reverse the addresses, + // so we should connect to backend 1 if shuffling is supported. However + // with it disabled at the start of the test, we will connect to backend 0 + // instead. + shufState := resolver.State{ + ServiceConfig: parseServiceConfig(t, r, serviceConfig), + Addresses: []resolver.Address{addrs[0], addrs[1]}, + } + r.UpdateState(shufState) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/balancer/wrrlocality/balancer.go b/xds/internal/balancer/wrrlocality/balancer.go index ac63e84e62fb..4df2e4ed0086 100644 --- a/xds/internal/balancer/wrrlocality/balancer.go +++ b/xds/internal/balancer/wrrlocality/balancer.go @@ -51,7 +51,7 @@ func (bb) Name() string { // LBConfig is the config for the wrr locality balancer. type LBConfig struct { - serviceconfig.LoadBalancingConfig + serviceconfig.LoadBalancingConfig `json:"-"` // ChildPolicy is the config for the child policy. ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } diff --git a/xds/internal/xdsclient/xdslbregistry/converter/converter.go b/xds/internal/xdsclient/xdslbregistry/converter/converter.go index 27dc6533087b..c5d5afe4ebdc 100644 --- a/xds/internal/xdsclient/xdslbregistry/converter/converter.go +++ b/xds/internal/xdsclient/xdslbregistry/converter/converter.go @@ -28,7 +28,9 @@ import ( "strings" "github.com/golang/protobuf/proto" + "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/internal/envconfig" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" @@ -39,18 +41,20 @@ import ( v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" + v3pickfirstpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3" v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" structpb "github.com/golang/protobuf/ptypes/struct" ) func init() { + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin", convertWeightedRoundRobinProtoToServiceConfig) xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash", convertRingHashProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst", convertPickFirstProtoToServiceConfig) xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin", convertRoundRobinProtoToServiceConfig) xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality", convertWRRLocalityProtoToServiceConfig) - xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin", convertWeightedRoundRobinProtoToServiceConfig) - xdslbregistry.Register("type.googleapis.com/xds.type.v3.TypedStruct", convertV3TypedStructToServiceConfig) xdslbregistry.Register("type.googleapis.com/udpa.type.v1.TypedStruct", convertV1TypedStructToServiceConfig) + xdslbregistry.Register("type.googleapis.com/xds.type.v3.TypedStruct", convertV3TypedStructToServiceConfig) } const ( @@ -58,7 +62,7 @@ const ( defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M ) -func convertRingHashProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { +func convertRingHashProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { if !envconfig.XDSRingHash { return nil, nil } @@ -90,8 +94,29 @@ func convertRingHashProtoToServiceConfig(rawProto []byte, depth int) (json.RawMe return makeBalancerConfigJSON(ringhash.Name, rhCfgJSON), nil } +type pfConfig struct { + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func convertPickFirstProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + if !envconfig.PickFirstLBConfig { + return nil, nil + } + pfProto := &v3pickfirstpb.PickFirst{} + if err := proto.Unmarshal(rawProto, pfProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + pfCfg := &pfConfig{ShuffleAddressList: pfProto.GetShuffleAddressList()} + js, err := json.Marshal(pfCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", pfCfg, err) + } + return makeBalancerConfigJSON(grpc.PickFirstBalancerName, js), nil +} + func convertRoundRobinProtoToServiceConfig([]byte, int) (json.RawMessage, error) { - return makeBalancerConfigJSON("round_robin", json.RawMessage("{}")), nil + return makeBalancerConfigJSON(roundrobin.Name, json.RawMessage("{}")), nil } type wrrLocalityLBConfig struct { @@ -118,7 +143,7 @@ func convertWRRLocalityProtoToServiceConfig(rawProto []byte, depth int) (json.Ra return makeBalancerConfigJSON(wrrlocality.Name, lbCfgJSON), nil } -func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { +func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { cswrrProto := &v3clientsideweightedroundrobinpb.ClientSideWeightedRoundRobin{} if err := proto.Unmarshal(rawProto, cswrrProto); err != nil { return nil, fmt.Errorf("failed to unmarshal resource: %v", err) @@ -152,7 +177,7 @@ func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, depth int) ( return makeBalancerConfigJSON(weightedroundrobin.Name, lbCfgJSON), nil } -func convertV1TypedStructToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { +func convertV1TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { tsProto := &v1xdsudpatypepb.TypedStruct{} if err := proto.Unmarshal(rawProto, tsProto); err != nil { return nil, fmt.Errorf("failed to unmarshal resource: %v", err) @@ -160,7 +185,7 @@ func convertV1TypedStructToServiceConfig(rawProto []byte, depth int) (json.RawMe return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) } -func convertV3TypedStructToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { +func convertV3TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { tsProto := &v3xdsxdstypepb.TypedStruct{} if err := proto.Unmarshal(rawProto, tsProto); err != nil { return nil, fmt.Errorf("failed to unmarshal resource: %v", err) diff --git a/xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go b/xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go index b3f19c2e5953..f1ce5496b794 100644 --- a/xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go +++ b/xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go @@ -33,9 +33,7 @@ import ( "google.golang.org/grpc/internal/pretty" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/serviceconfig" _ "google.golang.org/grpc/xds" // Register the xDS LB Registry Converters. - "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/balancer/wrrlocality" "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" "google.golang.org/protobuf/types/known/anypb" @@ -46,6 +44,7 @@ import ( v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" + v3pickfirstpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3" v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" @@ -60,10 +59,6 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -type customLBConfig struct { - serviceconfig.LoadBalancingConfig -} - func wrrLocalityBalancerConfig(childPolicy *internalserviceconfig.BalancerConfig) *internalserviceconfig.BalancerConfig { return &internalserviceconfig.BalancerConfig{ Name: wrrlocality.Name, @@ -75,17 +70,14 @@ func wrrLocalityBalancerConfig(childPolicy *internalserviceconfig.BalancerConfig func (s) TestConvertToServiceConfigSuccess(t *testing.T) { const customLBPolicyName = "myorg.MyCustomLeastRequestPolicy" - stub.Register(customLBPolicyName, stub.BalancerFuncs{ - ParseConfig: func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - return customLBConfig{}, nil - }, - }) + stub.Register(customLBPolicyName, stub.BalancerFuncs{}) tests := []struct { name string policy *v3clusterpb.LoadBalancingPolicy - wantConfig *internalserviceconfig.BalancerConfig + wantConfig string // JSON config rhDisabled bool + pfDisabled bool }{ { name: "ring_hash", @@ -102,13 +94,35 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: &internalserviceconfig.BalancerConfig{ - Name: "ring_hash_experimental", - Config: &ringhash.LBConfig{ - MinRingSize: 10, - MaxRingSize: 100, + wantConfig: `[{"ring_hash_experimental": { "minRingSize": 10, "maxRingSize": 100 }}]`, + }, + { + name: "pick_first_shuffle", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3pickfirstpb.PickFirst{ + ShuffleAddressList: true, + }), + }, + }, + }, + }, + wantConfig: `[{"pick_first": { "shuffleAddressList": true }}]`, + }, + { + name: "pick_first", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3pickfirstpb.PickFirst{}), + }, + }, }, }, + wantConfig: `[{"pick_first": { "shuffleAddressList": false }}]`, }, { name: "round_robin", @@ -121,9 +135,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: &internalserviceconfig.BalancerConfig{ - Name: "round_robin", - }, + wantConfig: `[{"round_robin": {}}]`, }, { name: "round_robin_ring_hash_use_first_supported", @@ -145,9 +157,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: &internalserviceconfig.BalancerConfig{ - Name: "round_robin", - }, + wantConfig: `[{"round_robin": {}}]`, }, { name: "ring_hash_disabled_rh_rr_use_first_supported", @@ -169,11 +179,30 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: &internalserviceconfig.BalancerConfig{ - Name: "round_robin", - }, + wantConfig: `[{"round_robin": {}}]`, rhDisabled: true, }, + { + name: "pick_first_disabled_pf_rr_use_first_supported", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3pickfirstpb.PickFirst{ + ShuffleAddressList: true, + }), + }, + }, + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + wantConfig: `[{"round_robin": {}}]`, + pfDisabled: true, + }, { name: "custom_lb_type_v3_struct", policy: &v3clusterpb.LoadBalancingPolicy{ @@ -198,10 +227,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: &internalserviceconfig.BalancerConfig{ - Name: "myorg.MyCustomLeastRequestPolicy", - Config: customLBConfig{}, - }, + wantConfig: `[{"myorg.MyCustomLeastRequestPolicy": {}}]`, }, { name: "custom_lb_type_v1_struct", @@ -217,10 +243,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: &internalserviceconfig.BalancerConfig{ - Name: "myorg.MyCustomLeastRequestPolicy", - Config: customLBConfig{}, - }, + wantConfig: `[{"myorg.MyCustomLeastRequestPolicy": {}}]`, }, { name: "wrr_locality_child_round_robin", @@ -233,9 +256,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: wrrLocalityBalancerConfig(&internalserviceconfig.BalancerConfig{ - Name: "round_robin", - }), + wantConfig: `[{"xds_wrr_locality_experimental": { "childPolicy": [{"round_robin": {}}] }}]`, }, { name: "wrr_locality_child_custom_lb_type_v3_struct", @@ -251,10 +272,7 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: wrrLocalityBalancerConfig(&internalserviceconfig.BalancerConfig{ - Name: "myorg.MyCustomLeastRequestPolicy", - Config: customLBConfig{}, - }), + wantConfig: `[{"xds_wrr_locality_experimental": { "childPolicy": [{"myorg.MyCustomLeastRequestPolicy": {}}] }}]`, }, { name: "on-the-boundary-of-recursive-limit", @@ -267,51 +285,52 @@ func (s) TestConvertToServiceConfigSuccess(t *testing.T) { }, }, }, - wantConfig: wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(&internalserviceconfig.BalancerConfig{ + wantConfig: jsonMarshal(t, wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(&internalserviceconfig.BalancerConfig{ Name: "round_robin", - }))))))))))))))), + })))))))))))))))), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { if test.rhDisabled { - oldRingHashSupport := envconfig.XDSRingHash + defer func(old bool) { envconfig.XDSRingHash = old }(envconfig.XDSRingHash) envconfig.XDSRingHash = false - defer func() { - envconfig.XDSRingHash = oldRingHashSupport - }() + } + if !test.pfDisabled { + defer func(old bool) { envconfig.PickFirstLBConfig = old }(envconfig.PickFirstLBConfig) + envconfig.PickFirstLBConfig = true } rawJSON, err := xdslbregistry.ConvertToServiceConfig(test.policy, 0) if err != nil { t.Fatalf("ConvertToServiceConfig(%s) failed: %v", pretty.ToJSON(test.policy), err) } - bc := &internalserviceconfig.BalancerConfig{} - // The converter registry is not guaranteed to emit json that is - // valid. It's scope is to simply convert from a proto message to - // internal gRPC JSON format. Thus, the tests cause valid JSON to - // eventually be emitted from ConvertToServiceConfig(), but this - // leaves this test brittle over time in case balancer validations - // change over time and add more failure cases. The simplicity of - // using this type (to get rid of non determinism in JSON strings) - // outweighs this brittleness, and also there are plans on - // decoupling the unmarshalling and validation step both present in - // this function in the future. In the future if balancer - // validations change, any configurations in this test that become - // invalid will need to be fixed. (need to make sure emissions above - // are valid configuration). Also, once this Unmarshal call is - // partitioned into Unmarshal vs. Validation in separate operations, - // the brittleness of this test will go away. - if err := json.Unmarshal(rawJSON, bc); err != nil { - t.Fatalf("failed to unmarshal JSON: %v", err) + // got and want must be unmarshalled since JSON strings shouldn't + // generally be directly compared. + var got []map[string]interface{} + if err := json.Unmarshal(rawJSON, &got); err != nil { + t.Fatalf("Error unmarshalling rawJSON (%q): %v", rawJSON, err) + } + var want []map[string]interface{} + if err := json.Unmarshal(json.RawMessage(test.wantConfig), &want); err != nil { + t.Fatalf("Error unmarshalling wantConfig (%q): %v", test.wantConfig, err) } - if diff := cmp.Diff(bc, test.wantConfig); diff != "" { + if diff := cmp.Diff(got, want); diff != "" { t.Fatalf("ConvertToServiceConfig() got unexpected output, diff (-got +want): %v", diff) } }) } } +func jsonMarshal(t *testing.T, x interface{}) string { + t.Helper() + js, err := json.Marshal(x) + if err != nil { + t.Fatalf("Error marshalling to JSON (%+v): %v", x, err) + } + return string(js) +} + // TestConvertToServiceConfigFailure tests failure cases of the xDS LB registry // of converting proto configuration to JSON configuration. func (s) TestConvertToServiceConfigFailure(t *testing.T) { From 3ea58ce4326b39885a397a4c0965240501b035e8 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 30 May 2023 16:33:59 -0700 Subject: [PATCH 947/998] client: disable channel idleness by default (#6328) --- dialoptions.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/dialoptions.go b/dialoptions.go index 51c8997d5d18..15a3d5102a9a 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -628,7 +628,6 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, - idleTimeout: 30 * time.Minute, } } @@ -665,8 +664,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// A default timeout of 30 min will be used if this dial option is not set at -// dial time and idleness can be disabled by passing a timeout of zero. +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. // // # Experimental // From 1f23f6c2e0a13ccd0557c7e61e206207fb3c82fd Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 31 May 2023 10:23:01 -0700 Subject: [PATCH 948/998] client: fix Connect to handle channel idleness properly (#6331) --- clientconn.go | 6 ++++- test/idleness_test.go | 58 +++++++++++++++++++++++++++++++++++-------- 2 files changed, 52 insertions(+), 12 deletions(-) diff --git a/clientconn.go b/clientconn.go index 5e45f01f91cf..314addcaa1cb 100644 --- a/clientconn.go +++ b/clientconn.go @@ -325,7 +325,8 @@ func (cc *ClientConn) exitIdleMode() error { return errConnClosing } if cc.idlenessState != ccIdlenessStateIdle { - logger.Error("ClientConn asked to exit idle mode when not in idle mode") + cc.mu.Unlock() + logger.Info("ClientConn asked to exit idle mode when not in idle mode") return nil } @@ -706,6 +707,9 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. cc.balancerWrapper.exitIdleMode() } diff --git a/test/idleness_test.go b/test/idleness_test.go index 88366ed3ae12..b7e7ea6cd7cf 100644 --- a/test/idleness_test.go +++ b/test/idleness_test.go @@ -107,12 +107,12 @@ func (s) TestChannelIdleness_Disabled_NoActivity(t *testing.T) { t.Cleanup(backend.Stop) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) - // Veirfy that the ClientConn moves to READY. + // Verify that the ClientConn moves to READY. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() awaitState(ctx, t, cc, connectivity.Ready) - // Veirfy that the ClientConn stay in READY. + // Verify that the ClientConn stay in READY. sCtx, sCancel := context.WithTimeout(ctx, 3*defaultTestShortIdleTimeout) defer sCancel() awaitNoStateChange(sCtx, t, cc, connectivity.Ready) @@ -152,12 +152,12 @@ func (s) TestChannelIdleness_Enabled_NoActivity(t *testing.T) { t.Cleanup(backend.Stop) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) - // Veirfy that the ClientConn moves to READY. + // Verify that the ClientConn moves to READY. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() awaitState(ctx, t, cc, connectivity.Ready) - // Veirfy that the ClientConn moves to IDLE as there is no activity. + // Verify that the ClientConn moves to IDLE as there is no activity. awaitState(ctx, t, cc, connectivity.Idle) // Verify idleness related channelz events. @@ -203,7 +203,7 @@ func (s) TestChannelIdleness_Enabled_OngoingCall(t *testing.T) { t.Cleanup(backend.Stop) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) - // Veirfy that the ClientConn moves to READY. + // Verify that the ClientConn moves to READY. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() awaitState(ctx, t, cc, connectivity.Ready) @@ -213,7 +213,7 @@ func (s) TestChannelIdleness_Enabled_OngoingCall(t *testing.T) { // the server RPC handler and the unary call below. errCh := make(chan error, 1) go func() { - // Veirfy that the ClientConn stay in READY. + // Verify that the ClientConn stay in READY. sCtx, sCancel := context.WithTimeout(ctx, 3*defaultTestShortIdleTimeout) defer sCancel() awaitNoStateChange(sCtx, t, cc, connectivity.Ready) @@ -277,7 +277,7 @@ func (s) TestChannelIdleness_Enabled_ActiveSinceLastCheck(t *testing.T) { t.Cleanup(backend.Stop) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) - // Veirfy that the ClientConn moves to READY. + // Verify that the ClientConn moves to READY. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() awaitState(ctx, t, cc, connectivity.Ready) @@ -302,7 +302,7 @@ func (s) TestChannelIdleness_Enabled_ActiveSinceLastCheck(t *testing.T) { } }() - // Veirfy that the ClientConn stay in READY. + // Verify that the ClientConn stay in READY. awaitNoStateChange(sCtx, t, cc, connectivity.Ready) // Verify that there are no idleness related channelz events. @@ -343,12 +343,12 @@ func (s) TestChannelIdleness_Enabled_ExitIdleOnRPC(t *testing.T) { } t.Cleanup(func() { cc.Close() }) - // Veirfy that the ClientConn moves to READY. + // Verify that the ClientConn moves to READY. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() awaitState(ctx, t, cc, connectivity.Ready) - // Veirfy that the ClientConn moves to IDLE as there is no activity. + // Verify that the ClientConn moves to IDLE as there is no activity. awaitState(ctx, t, cc, connectivity.Idle) // Verify idleness related channelz events. @@ -405,7 +405,7 @@ func (s) TestChannelIdleness_Enabled_IdleTimeoutRacesWithRPCs(t *testing.T) { } t.Cleanup(func() { cc.Close() }) - // Veirfy that the ClientConn moves to READY. + // Verify that the ClientConn moves to READY. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() awaitState(ctx, t, cc, connectivity.Ready) @@ -421,3 +421,39 @@ func (s) TestChannelIdleness_Enabled_IdleTimeoutRacesWithRPCs(t *testing.T) { } } } + +// Tests the case where the channel is IDLE and we call cc.Connect. +func (s) TestChannelIdleness_Connect(t *testing.T) { + // Start a test backend and set the bootstrap state of the resolver to + // include this address. This will ensure that when the resolver is + // restarted when exiting idle, it will push the same address to grpc again. + r := manual.NewBuilderWithScheme("whatever") + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Create a ClientConn with a short idle_timeout. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Verify that the ClientConn moves to IDLE. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + awaitState(ctx, t, cc, connectivity.Idle) + + // Connect should exit channel idleness. + cc.Connect() + + // Verify that the ClientConn moves back to READY. + awaitState(ctx, t, cc, connectivity.Ready) +} From 47f8ed81726e93655356391bb7e5836489196507 Mon Sep 17 00:00:00 2001 From: Xuan Wang Date: Wed, 31 May 2023 17:46:03 -0700 Subject: [PATCH 949/998] interop: Don't fail target if sub-target already failed (#6332) --- test/kokoro/psm-security.sh | 3 --- test/kokoro/xds_k8s_lb.sh | 3 --- 2 files changed, 6 deletions(-) diff --git a/test/kokoro/psm-security.sh b/test/kokoro/psm-security.sh index f99cb9a87883..46e3709d2e82 100755 --- a/test/kokoro/psm-security.sh +++ b/test/kokoro/psm-security.sh @@ -161,9 +161,6 @@ main() { run_test $test || (( ++failed_tests )) done echo "Failed test suites: ${failed_tests}" - if (( failed_tests > 0 )); then - exit 1 - fi } main "$@" diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh index d50f0f5484fb..5876e924fad6 100755 --- a/test/kokoro/xds_k8s_lb.sh +++ b/test/kokoro/xds_k8s_lb.sh @@ -172,9 +172,6 @@ main() { run_test $test || (( ++failed_tests )) done echo "Failed test suites: ${failed_tests}" - if (( failed_tests > 0 )); then - exit 1 - fi } main "$@" From 2b1d70be0280e4b4b3f4a57869a2d362f5029fa8 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Thu, 1 Jun 2023 15:31:27 -0700 Subject: [PATCH 950/998] xds: enable RLS in xDS by default (#6343) --- internal/envconfig/xds.go | 6 +++--- xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go index 8b3418785450..02b4b6a1c109 100644 --- a/internal/envconfig/xds.go +++ b/internal/envconfig/xds.go @@ -81,10 +81,10 @@ var ( XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be enabled by + // support for the RLS CLuster Specifier is enabled, which can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "true". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go index 5e0d1e4523b6..fa10d2aa2694 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -1299,7 +1299,7 @@ func (s) TestRoutesProtoToSlice(t *testing.T) { ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{}}}, }, }, - wantRoutes: []*Route{}, + wantErr: true, }, { name: "default totalWeight is 100 in weighted clusters action", From 8edfa1a17b3ff88cdb8cc9ec39fce22b812cf842 Mon Sep 17 00:00:00 2001 From: erm-g <110920239+erm-g@users.noreply.github.com> Date: Thu, 1 Jun 2023 23:32:33 +0000 Subject: [PATCH 951/998] authz: End2End test for AuditLogger (#6304) * Draft of e2e test * No Audit, Audit on Allow and Deny * Audit on Allow, Audit on Deny * fix typo * SPIFFE related testing * SPIFFE Id validation and certs creation script * Address PR comments * Wrap tests using grpctest.Tester * Address PR comments * Change package name to authz_test to fit other end2end tests * Add licence header, remove SPIFFE slice * Licence year change * Address PR comments part 1 * Address PR comments part 2 * Address PR comments part 3 * Address PR comments final part * Drop newline for a brace * Address PR comments, fix outdated function comment * Address PR comments * Fix typo * Remove unused var * Address PR comment, change most test error handling to Errorf * Address PR comments --- authz/audit/audit_logging_test.go | 377 +++++++++++++++++++ testdata/x509/client_with_spiffe_cert.pem | 33 ++ testdata/x509/client_with_spiffe_key.pem | 52 +++ testdata/x509/client_with_spiffe_openssl.cnf | 17 + testdata/x509/create.sh | 19 + 5 files changed, 498 insertions(+) create mode 100644 authz/audit/audit_logging_test.go create mode 100644 testdata/x509/client_with_spiffe_cert.pem create mode 100644 testdata/x509/client_with_spiffe_key.pem create mode 100644 testdata/x509/client_with_spiffe_openssl.cnf diff --git a/authz/audit/audit_logging_test.go b/authz/audit/audit_logging_test.go new file mode 100644 index 000000000000..e3a4ef25b021 --- /dev/null +++ b/authz/audit/audit_logging_test.go @@ -0,0 +1,377 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package audit_test + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "io" + "net" + "os" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/authz" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/status" + "google.golang.org/grpc/testdata" + + _ "google.golang.org/grpc/authz/audit/stdout" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type statAuditLogger struct { + authzDecisionStat map[bool]int // Map to hold the counts of authorization decisions + lastEvent *audit.Event // Field to store last received event +} + +func (s *statAuditLogger) Log(event *audit.Event) { + s.authzDecisionStat[event.Authorized]++ + *s.lastEvent = *event +} + +type loggerBuilder struct { + authzDecisionStat map[bool]int + lastEvent *audit.Event +} + +func (loggerBuilder) Name() string { + return "stat_logger" +} + +func (lb *loggerBuilder) Build(audit.LoggerConfig) audit.Logger { + return &statAuditLogger{ + authzDecisionStat: lb.authzDecisionStat, + lastEvent: lb.lastEvent, + } +} + +func (*loggerBuilder) ParseLoggerConfig(config json.RawMessage) (audit.LoggerConfig, error) { + return nil, nil +} + +// TestAuditLogger examines audit logging invocations using four different +// authorization policies. It covers scenarios including a disabled audit, +// auditing both 'allow' and 'deny' outcomes, and separately auditing 'allow' +// and 'deny' outcomes. Additionally, it checks if SPIFFE ID from a certificate +// is propagated correctly. +func (s) TestAuditLogger(t *testing.T) { + // Each test data entry contains an authz policy for a grpc server, + // how many 'allow' and 'deny' outcomes we expect (each test case makes 2 + // unary calls and one client-streaming call), and a structure to check if + // the audit.Event fields are properly populated. Additionally, we specify + // directly which authz outcome we expect from each type of call. + tests := []struct { + name string + authzPolicy string + wantAuthzOutcomes map[bool]int + eventContent *audit.Event + wantUnaryCallCode codes.Code + wantStreamingCallCode codes.Code + }{ + { + name: "No audit", + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_UnaryCall", + "request": { + "paths": [ + "/grpc.testing.TestService/UnaryCall" + ] + } + } + ], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stat_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantAuthzOutcomes: map[bool]int{true: 0, false: 0}, + wantUnaryCallCode: codes.OK, + wantStreamingCallCode: codes.PermissionDenied, + }, + { + name: "Allow All Deny Streaming - Audit All", + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_all", + "request": { + "paths": [ + "*" + ] + } + } + ], + "deny_rules": [ + { + "name": "deny_all", + "request": { + "paths": [ + "/grpc.testing.TestService/StreamingInputCall" + ] + } + } + ], + "audit_logging_options": { + "audit_condition": "ON_DENY_AND_ALLOW", + "audit_loggers": [ + { + "name": "stat_logger", + "config": {}, + "is_optional": false + }, + { + "name": "stdout_logger", + "is_optional": false + } + ] + } + }`, + wantAuthzOutcomes: map[bool]int{true: 2, false: 1}, + eventContent: &audit.Event{ + FullMethodName: "/grpc.testing.TestService/StreamingInputCall", + Principal: "spiffe://foo.bar.com/client/workload/1", + PolicyName: "authz", + MatchedRule: "authz_deny_all", + Authorized: false, + }, + wantUnaryCallCode: codes.OK, + wantStreamingCallCode: codes.PermissionDenied, + }, + { + name: "Allow Unary - Audit Allow", + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_UnaryCall", + "request": { + "paths": [ + "/grpc.testing.TestService/UnaryCall" + ] + } + } + ], + "audit_logging_options": { + "audit_condition": "ON_ALLOW", + "audit_loggers": [ + { + "name": "stat_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantAuthzOutcomes: map[bool]int{true: 2, false: 0}, + wantUnaryCallCode: codes.OK, + wantStreamingCallCode: codes.PermissionDenied, + }, + { + name: "Allow Typo - Audit Deny", + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_UnaryCall", + "request": { + "paths": [ + "/grpc.testing.TestService/UnaryCall_Z" + ] + } + } + ], + "audit_logging_options": { + "audit_condition": "ON_DENY", + "audit_loggers": [ + { + "name": "stat_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantAuthzOutcomes: map[bool]int{true: 0, false: 3}, + wantUnaryCallCode: codes.PermissionDenied, + wantStreamingCallCode: codes.PermissionDenied, + }, + } + // Construct the credentials for the tests and the stub server + serverCreds := loadServerCreds(t) + clientCreds := loadClientCreds(t) + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Setup test statAuditLogger, gRPC test server with authzPolicy, unary + // and stream interceptors. + lb := &loggerBuilder{ + authzDecisionStat: map[bool]int{true: 0, false: 0}, + lastEvent: &audit.Event{}, + } + audit.RegisterLoggerBuilder(lb) + i, _ := authz.NewStatic(test.authzPolicy) + + s := grpc.NewServer( + grpc.Creds(serverCreds), + grpc.ChainUnaryInterceptor(i.UnaryInterceptor), + grpc.ChainStreamInterceptor(i.StreamInterceptor)) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, ss) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Error listening: %v", err) + } + go s.Serve(lis) + + // Setup gRPC test client with certificates containing a SPIFFE Id. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(clientCreds)) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := testgrpc.NewTestServiceClient(clientConn) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != test.wantUnaryCallCode { + t.Errorf("Unexpected UnaryCall fail: got %v want %v", err, test.wantUnaryCallCode) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != test.wantUnaryCallCode { + t.Errorf("Unexpected UnaryCall fail: got %v want %v", err, test.wantUnaryCallCode) + } + stream, err := client.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("StreamingInputCall failed:%v", err) + } + req := &testpb.StreamingInputCallRequest{ + Payload: &testpb.Payload{ + Body: []byte("hi"), + }, + } + if err := stream.Send(req); err != nil && err != io.EOF { + t.Fatalf("stream.Send failed:%v", err) + } + if _, err := stream.CloseAndRecv(); status.Code(err) != test.wantStreamingCallCode { + t.Errorf("Unexpected stream.CloseAndRecv fail: got %v want %v", err, test.wantStreamingCallCode) + } + + // Compare expected number of allows/denies with content of the internal + // map of statAuditLogger. + if diff := cmp.Diff(lb.authzDecisionStat, test.wantAuthzOutcomes); diff != "" { + t.Errorf("Authorization decisions do not match\ndiff (-got +want):\n%s", diff) + } + // Compare last event received by statAuditLogger with expected event. + if test.eventContent != nil { + if diff := cmp.Diff(lb.lastEvent, test.eventContent); diff != "" { + t.Errorf("Unexpected message\ndiff (-got +want):\n%s", diff) + } + } + }) + } +} + +// loadServerCreds constructs TLS containing server certs and CA +func loadServerCreds(t *testing.T) credentials.TransportCredentials { + t.Helper() + cert := loadKeys(t, "x509/server1_cert.pem", "x509/server1_key.pem") + certPool := loadCACerts(t, "x509/client_ca_cert.pem") + return credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + Certificates: []tls.Certificate{cert}, + ClientCAs: certPool, + }) +} + +// loadClientCreds constructs TLS containing client certs and CA +func loadClientCreds(t *testing.T) credentials.TransportCredentials { + t.Helper() + cert := loadKeys(t, "x509/client_with_spiffe_cert.pem", "x509/client_with_spiffe_key.pem") + roots := loadCACerts(t, "x509/server_ca_cert.pem") + return credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + ServerName: "x.test.example.com", + }) + +} + +// loadKeys loads X509 key pair from the provided file paths. +// It is used for loading both client and server certificates for the test +func loadKeys(t *testing.T, certPath, key string) tls.Certificate { + t.Helper() + cert, err := tls.LoadX509KeyPair(testdata.Path(certPath), testdata.Path(key)) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(%q, %q) failed: %v", certPath, key, err) + } + return cert +} + +// loadCACerts loads CA certificates and constructs x509.CertPool +// It is used for loading both client and server CAs for the test +func loadCACerts(t *testing.T, certPath string) *x509.CertPool { + t.Helper() + ca, err := os.ReadFile(testdata.Path(certPath)) + if err != nil { + t.Fatalf("os.ReadFile(%q) failed: %v", certPath, err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(ca) { + t.Fatal("Failed to append certificates") + } + return roots +} diff --git a/testdata/x509/client_with_spiffe_cert.pem b/testdata/x509/client_with_spiffe_cert.pem new file mode 100644 index 000000000000..b982fcbe554f --- /dev/null +++ b/testdata/x509/client_with_spiffe_cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFxzCCA6+gAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx +CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIzMDUyMjA1MDA1NVoXDTMzMDUxOTA1MDA1 +NVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANXyLXGYzQFwLGwjzkeuo/y41voDH1Y9J+ee4qJU +OFuMKKXx5ai7n7dik4//J12OqJbbr416cFkKmcojwwbAdncXMV58EF82Bt8QRov0 +Vtoio/wxlyRlxDlVYwr56W+0EVP9Q+kzA/dTnMgOQYIeSix96CUQRy8XDu1YX3rk +fiUkND9xxuQw8OXi3LXguv/lilLVC/lXiXwa0RWEgMZZU2S1/lAElAG3aZuuWULG +K+PpKPuqkcptbUPCvNN1eUs9/D82aoFuqRCmpTC+7bUO+SJSggpUHcgTbXT9i6OO +9eR0ijcaQjtb0Y6ro+Cv60YOnlGC8It3KoY2SxioyqdceRUohqs4T4hjBEckzz11 +AC0Pj0Gp4NJPcOY68EjhD5rvncn76RRr3z2XZpd+2Nz+Fldxk/aaejfdgqs9lo1g +C+aP+nk9oqSpFAc+rpHsblLZehUur/FHhenn1pYWqkSJsAG0sFW4sDHATRIfva3c +HNHB5kBzruGymywBGO0xOw7+s5XzPiNnbXT5FBY1rKG7RwlqdtDh6LWJRHmEblWV +tPHNiY+rrStv0rN7Hk/YKcSXd5JiTjk3GXjO1YJJVEraEWHlxzdGy+xu/m0iJLed +pxZwuxxdZ/Q2+Ht+X9pO2DsW8BQFbddCwbooxKygwSlmHCN1gRSWqWMZY5nzsxxY +tic9AgMBAAGjgawwgakwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUyiXne0d3g9zN +gjhCdl2d9ykxIfgwDgYDVR0PAQH/BAQDAgXgMBYGA1UdJQEB/wQMMAoGCCsGAQUF +BwMCMDEGA1UdEQQqMCiGJnNwaWZmZTovL2Zvby5iYXIuY29tL2NsaWVudC93b3Jr +bG9hZC8xMB8GA1UdIwQYMBaAFOr3a0MblN9W9Opu7VsDn3crpoDCMA0GCSqGSIb3 +DQEBCwUAA4ICAQB3pt3mLXDDcReko9eTFahkNyU2zGP7CSi1RcgfP1aJDLBTjePb +JUhoY14tSpOGSliXWscXbNveW+Yk+tB411r8SJuXIkaYZM2BJQDWFzL7aLfAQSx5 +rf8tHVyKO89uBoQtgEnxZp9BFhBp/b2y5DLrZWjM6W9s21C9S9UIFjD8UwrKicNH +HGxIC6AZ6yc0x2Nsq/KW1IZ6HDueZRB4tud75wwkPVpS5fb+XqIJEBP7lgYrJjGp +aLLxV2vn1kX2/qbH31hhWVpNyPkpFsT+IbkPFLDyQoZKHbewD6M56+KBRTTENETQ +hFLgJB0HiICJ2I6cqw1UbDJMJFkcnThsuI8Wg9dxZ+OffYeZ5bnFCVIg0WUi9oMK +JDXZAqYDwBaQHyNszaYzZ5VE2Gd/K8PEDevW4RblI+vAOamIM5w1DjQHWf7n1byt +nGwnxt4IQ5vwlrdX3FDcEkhacHdcniX/FTpYrfOistPh+QpBAvA92DG1CbAf2nKY +yXLx+Ho7tUEBGioU4XvRHccwumfatf5z+JO/EvIi2yWd1tanl5J3o/sSs9ixJfx4 +aSuM+zAwf8EM+YGqYMCZ896+T6/r7NAg+YIDYu1K5b5QqYyPanqNqUf9VTR4oQ4v ++jdb5PkujXbjENvkAhNbUyUbQJ+IU0KHm3/sdhRPN5tuc9C+BTSQvlmKkw== +-----END CERTIFICATE----- diff --git a/testdata/x509/client_with_spiffe_key.pem b/testdata/x509/client_with_spiffe_key.pem new file mode 100644 index 000000000000..6adcdc3122c3 --- /dev/null +++ b/testdata/x509/client_with_spiffe_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDV8i1xmM0BcCxs +I85HrqP8uNb6Ax9WPSfnnuKiVDhbjCil8eWou5+3YpOP/yddjqiW26+NenBZCpnK +I8MGwHZ3FzFefBBfNgbfEEaL9FbaIqP8MZckZcQ5VWMK+elvtBFT/UPpMwP3U5zI +DkGCHkosfeglEEcvFw7tWF965H4lJDQ/ccbkMPDl4ty14Lr/5YpS1Qv5V4l8GtEV +hIDGWVNktf5QBJQBt2mbrllCxivj6Sj7qpHKbW1DwrzTdXlLPfw/NmqBbqkQpqUw +vu21DvkiUoIKVB3IE210/YujjvXkdIo3GkI7W9GOq6Pgr+tGDp5RgvCLdyqGNksY +qMqnXHkVKIarOE+IYwRHJM89dQAtD49BqeDST3DmOvBI4Q+a753J++kUa989l2aX +ftjc/hZXcZP2mno33YKrPZaNYAvmj/p5PaKkqRQHPq6R7G5S2XoVLq/xR4Xp59aW +FqpEibABtLBVuLAxwE0SH72t3BzRweZAc67hspssARjtMTsO/rOV8z4jZ210+RQW +Nayhu0cJanbQ4ei1iUR5hG5VlbTxzYmPq60rb9Kzex5P2CnEl3eSYk45Nxl4ztWC +SVRK2hFh5cc3Rsvsbv5tIiS3nacWcLscXWf0Nvh7fl/aTtg7FvAUBW3XQsG6KMSs +oMEpZhwjdYEUlqljGWOZ87McWLYnPQIDAQABAoICAAY5tM7QZm67R9+hrxfw4f6x +ljfSLXBB+U5JFkko8DbhvjEN9+PQCda5PJf9EbUsOIWjQNl6DZjZsR3rqnog0ZGn +kB0yuPs8RDjrbVIXOwu/5EurWb2KZIpSjL4+BWflsndiMD6x6FSjDzXXDFrv7LKc +u0uQzLF3F00avDSEP5NvGUIbWnE7Z1cZIdj9ABQAJuVAI8gOnwaIdTsODv02jjGp +BgxoBbKDFsSb7yb9QzuvhizEitd8FajaGsqAaZYh6JwiRjkb8jl0z+u6MoqJNACm +q/gG+JLg1deIpS6OM2OBbKAr2G+HvXJMVklsdQkl1b+DcuJsBkW/gLHn/3WdQDyq +t9sB8XrUx3S5dy6oroj9tcrwwlpUPbx3/37BX7OEn/NlIWZojI62hGexoFaggu3O +Jg0JJYH8THlQqs9G/oXmRTQKngse2FLEIPePie9vIIvokiQtG4T6miOK+6QmxYZq +H+KPT8AQG8j7AEexo4is1mEayapmTxftIYANOLFaT82BhsUOZksa698Sz7k1Cf/o +MSFn6CocGLflMEzdBqEq0wbBkdeuKUKlXG3ztXlqElN1xFdbzkaP/Tzl1ooq3kLR +0cLBCJNrHxTo1tuW4qTn+s4GLHpM4PE4YZgMehVWtyRFBb7mrSXsESw03POvulBx +65vA86DtCPm/jVuC5lQBAoIBAQD8IWDHYtQnvn/za6etc0fc7eTyq1jmoS/gh33y +eHaY6IccUN2LXCxgTJYwmfy57De58Im5AcOnkgGvw2Hw2i6+A5i4tKkyCa9awW4A +M20QOnyQpF+9uiIqGzI72skpfH20SvgTstTFtgGr3UBOqTfcApL+1X4guvGnY+Cx +uHUAPzbis9G3CNOWb4iiLhUcBnTDZyB3MPM4S1U8E5JLFo86+va6gbqdBP4ac+KH +08XDk/z6ohp9p796o6IiBQyZEsVaYLCrzjSOXeFfE5Fyj2z53oGlws+/PdhXKo02 +3++zRESiLVuGbCmAN17nKwDbZu9kFfGNP2WdwhJt9Yey91I9AoIBAQDZOsXWNkyP +zoDcSrvJznMPFcwQCbMNyU7A+axXpAsxDqn16AQj5/t1PWqufjRSdC7gVUWFcQ2K +ldUHkNyGtqHVCcNpqsMZJT51NlgTOl1A3GLnmm+tAiMCcr7aySeNnlj08fW278Ek +fnxpgUqGtXjTFpArULSFdZulXNPAP85ZDBburJtdhMfiT3GyQ1iRZcXkzsUVzNU1 +nGGk0jtCodlzQKiz3/aHO63G0GAjtdPuXpzGm7nBJSgLD0GabkCdkHDFULOaraYy +t1zsCsg7tQWa4KGRDNkcJKzoz3zf1sI4g87UJceGoXdB+mfluyKtnFhqjFalFW8Y +14Yb8YYdYHkBAoIBAC1pZaED7+poqWsSjNT02pC0WHRM4GpJxfHO9aRihhnsZ8l1 +1zFunJ+Lq9F9KsPiA/d9l5C2/KKF7b/WlSFoatrWkv9RqtfUXr0d8c4fdRljL2Rt +9sCZceXbmCSnt2u9fHaouh3yK9ige5SU+Swx1lnOLOOxWFJU2Ymot6PK8Wfl+uDC +OpeZA2MpG5b6bdrqXsWDIZnWOzh8eRGlBMh5e7rH0QCutQnrCEmDbd3BCvG7Cemq +oNLZD+fq6Rzvg+FePCWXHLsVHOo3how1XhEgPCSVKwzMFdcAMKMiiuTDWM0VEreT +K9T+TktFrdY9LJ5X3+5K9YLXVFohxmf/vT1CxpECggEBAIfegeVU+xgrYl/nAoPb +9A1oZcVWO78QvYhn4YrDmRhrApVDNGu86oPPEU3otBMqhjNcQmqPZpfa1W6xBa3g +x2H3hFkwLG0q5WDsx7PnGnK6JcaUyurcXkdmu8ceb/XdJ+i0+ioc1aJc1rYq3xFY +qiTlhPECvpaHE/4fDHa/sfHyZNmN7nNU3KzJYeTMyLXQgTF2vsC+6FBq6ovrzpMD +pn224I35NDorcqrapHdRgCgk10xGFK4g7mXUegT8lr+2m0JfEqdZm403MRCWQd1O +gR35CDUwYw9+RQQs2v8qVTqB/riklKK5lV0YISoInU0XcBncg0koGd/g1gneTDNN +pwECggEBAM4sDCCPplzbyd0yXLGo9P3RYIsNFnKnIm0YGRPrevBaiux3Qhr7Whpi +eV04BJ7Q58Z2WFzPFMhdXU45y4c6jIbmikdplEW1TASgXxOTvTfhg8P8ljdLPx+R +3CvQi4BPkJ3ZtYrHLKXKF/9aseyHLlSzuNUAJ6H0YxVi0tmzCFG82SWcFOzhR2Ec +cWDptGTRt9YY+Eo5rhPYbX/s8fCcW2u9QGnRnX35F8vJOp8Q7eCONIaN6faV4Yos +1wk6WXjZfDgEdjxmrnqXrgxdup82uD4Q1agmkxAjPl/9frLtHMW87Y0OixJb/Sve +eSCMKThlBQ57WubHTi2TbFBVKph/rP0= +-----END PRIVATE KEY----- diff --git a/testdata/x509/client_with_spiffe_openssl.cnf b/testdata/x509/client_with_spiffe_openssl.cnf new file mode 100644 index 000000000000..cf96f271d4a5 --- /dev/null +++ b/testdata/x509/client_with_spiffe_openssl.cnf @@ -0,0 +1,17 @@ +[req] +distinguished_name = req_distinguished_name +attributes = req_attributes + +[req_distinguished_name] + +[req_attributes] + +[test_client] +basicConstraints = critical,CA:FALSE +subjectKeyIdentifier = hash +keyUsage = critical,nonRepudiation,digitalSignature,keyEncipherment +extendedKeyUsage = critical,clientAuth +subjectAltName = @alt_names + +[alt_names] +URI = spiffe://foo.bar.com/client/workload/1 \ No newline at end of file diff --git a/testdata/x509/create.sh b/testdata/x509/create.sh index 2cbc971fb8d7..378bd10cf24f 100755 --- a/testdata/x509/create.sh +++ b/testdata/x509/create.sh @@ -128,5 +128,24 @@ openssl req -x509 \ -addext "subjectAltName = URI:spiffe://foo.bar.com/client/workload/1, URI:https://bar.baz.com/client" \ -sha256 +# Generate a cert with SPIFFE ID using client_with_spiffe_openssl.cnf +openssl req -new \ + -key client_with_spiffe_key.pem \ + -out client_with_spiffe_csr.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \ + -config ./client_with_spiffe_openssl.cnf \ + -reqexts test_client +openssl x509 -req \ + -in client_with_spiffe_csr.pem \ + -CAkey client_ca_key.pem \ + -CA client_ca_cert.pem \ + -days 3650 \ + -set_serial 1000 \ + -out client_with_spiffe_cert.pem \ + -extfile ./client_with_spiffe_openssl.cnf \ + -extensions test_client \ + -sha256 +openssl verify -verbose -CAfile client_with_spiffe_cert.pem + # Cleanup the CSRs. rm *_csr.pem From 02188e64374d15d04449ecee04fa054b2d8a81af Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 2 Jun 2023 10:25:54 -0700 Subject: [PATCH 952/998] Change version to 1.57.0-dev (#6346) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 310603bed415..387ebf5970f8 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.56.0-dev" +const Version = "1.57.0-dev" From c9d3ea5673252d212c69f3d3c10ce1d7b287a86b Mon Sep 17 00:00:00 2001 From: Chris Smith Date: Fri, 2 Jun 2023 11:38:02 -0600 Subject: [PATCH 953/998] deps: google.golang.org/genproto to latest in all modules (#6319) --- examples/features/observability/go.mod | 14 ++++++------ examples/features/observability/go.sum | 30 +++++++++++++++----------- examples/go.mod | 4 +++- examples/go.sum | 11 ++++++++-- gcp/observability/go.mod | 4 +++- gcp/observability/go.sum | 11 ++++++++-- go.mod | 4 +++- go.sum | 8 +++++-- interop/observability/go.mod | 4 +++- interop/observability/go.sum | 11 ++++++++-- security/advancedtls/examples/go.mod | 2 +- security/advancedtls/examples/go.sum | 4 ++-- security/advancedtls/go.mod | 2 +- security/advancedtls/go.sum | 4 ++-- security/authorization/go.mod | 3 ++- security/authorization/go.sum | 6 ++++-- stats/opencensus/go.mod | 2 +- stats/opencensus/go.sum | 9 ++++++-- 18 files changed, 90 insertions(+), 43 deletions(-) diff --git a/examples/features/observability/go.mod b/examples/features/observability/go.mod index 657aca310d1e..a7b3e2f8dc38 100644 --- a/examples/features/observability/go.mod +++ b/examples/features/observability/go.mod @@ -10,12 +10,12 @@ require ( require ( cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/logging v1.7.0 // indirect cloud.google.com/go/longrunning v0.4.1 // indirect - cloud.google.com/go/monitoring v1.12.0 // indirect - cloud.google.com/go/trace v1.8.0 // indirect + cloud.google.com/go/monitoring v1.13.0 // indirect + cloud.google.com/go/trace v1.9.0 // indirect contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect github.com/aws/aws-sdk-go v1.44.162 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect @@ -24,7 +24,7 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect go.opencensus.io v0.24.0 // indirect @@ -33,9 +33,11 @@ require ( golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect - google.golang.org/api v0.110.0 // indirect + google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect google.golang.org/grpc/stats/opencensus v1.0.0 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/examples/features/observability/go.sum b/examples/features/observability/go.sum index a9ca780544dd..d28df85cd149 100644 --- a/examples/features/observability/go.sum +++ b/examples/features/observability/go.sum @@ -33,20 +33,20 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= -cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -57,8 +57,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= -cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0 h1:olxC0QHC59zgJVALtgqfD9tGk0lfeCP5/AGXL3Px/no= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -176,8 +176,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -498,8 +498,8 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -568,8 +568,12 @@ google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/examples/go.mod b/examples/go.mod index c631aae7da39..b46a2e93828b 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -6,7 +6,7 @@ require ( github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 github.com/golang/protobuf v1.5.3 golang.org/x/oauth2 v0.7.0 - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 google.golang.org/grpc v1.54.0 google.golang.org/protobuf v1.30.0 ) @@ -23,6 +23,8 @@ require ( golang.org/x/sys v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect ) replace google.golang.org/grpc => ../ diff --git a/examples/go.sum b/examples/go.sum index d257912f6717..1d44586616cd 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -1381,8 +1381,15 @@ google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVix google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 9262d214dd96..6f186e92ef59 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -34,7 +34,9 @@ require ( golang.org/x/sys v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index 4e70d82da940..eb97cabf6707 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -1415,8 +1415,15 @@ google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVix google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/stats/opencensus v1.0.0 h1:evSYcRZaSToQp+borzWE52+03joezZeXcKJvZDfkUJA= google.golang.org/grpc/stats/opencensus v1.0.0/go.mod h1:FhdkeYvN43wLYUnapVuRJJ9JXkNwe403iLUW2LKSnjs= diff --git a/go.mod b/go.mod index 088c703575da..4852f39474bc 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( golang.org/x/net v0.9.0 golang.org/x/oauth2 v0.7.0 golang.org/x/sys v0.7.0 - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 google.golang.org/protobuf v1.30.0 ) @@ -25,4 +25,6 @@ require ( github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect ) diff --git a/go.sum b/go.sum index 4e7adc822040..59a6385afade 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,12 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/interop/observability/go.mod b/interop/observability/go.mod index 784ea504d1a0..6b53ad2bf320 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -36,7 +36,9 @@ require ( golang.org/x/text v0.9.0 // indirect google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect google.golang.org/grpc/stats/opencensus v1.0.0 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/interop/observability/go.sum b/interop/observability/go.sum index b21857e14a68..5b811d50e3f7 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -1417,8 +1417,15 @@ google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVix google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index 073dd1302718..f59f6f900251 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -14,7 +14,7 @@ require ( golang.org/x/net v0.9.0 // indirect golang.org/x/sys v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 04465db1aee4..91c31e699877 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -13,8 +13,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index 19b35bb2ee97..928f86fff832 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -14,7 +14,7 @@ require ( golang.org/x/net v0.9.0 // indirect golang.org/x/sys v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index e73f06fbc4ee..2766a3d8223a 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -14,8 +14,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= diff --git a/security/authorization/go.mod b/security/authorization/go.mod index b908dad955c8..68745b91c164 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -6,7 +6,7 @@ require ( github.com/envoyproxy/go-control-plane v0.11.0 github.com/google/cel-go v0.14.0 github.com/google/go-cmp v0.5.9 - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 google.golang.org/grpc v1.54.0 google.golang.org/protobuf v1.30.0 ) @@ -19,4 +19,5 @@ require ( github.com/stoewer/go-strcase v1.3.0 // indirect golang.org/x/exp v0.0.0-20230418202329-0354be287a23 // indirect golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ) diff --git a/security/authorization/go.sum b/security/authorization/go.sum index 9cc3fb5a8cf2..4147bd7420a4 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -71,8 +71,10 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index 14828f4c2471..45e95ac7d1c7 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -14,7 +14,7 @@ require ( golang.org/x/net v0.9.0 // indirect golang.org/x/sys v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect google.golang.org/protobuf v1.30.0 // indirect ) diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index 48faad4b66dd..4d2b719ef23d 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -1373,8 +1373,13 @@ google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVix google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= From 68576b3c42bbecacfeba3bee89f509f1bb7b0072 Mon Sep 17 00:00:00 2001 From: Anirudh Ramachandra Date: Tue, 6 Jun 2023 08:36:01 -0700 Subject: [PATCH 954/998] client: encode the authority by default (#6318) --- clientconn.go | 7 ++++++- test/authority_test.go | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/clientconn.go b/clientconn.go index 314addcaa1cb..91a3b605c931 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1870,7 +1870,12 @@ func (cc *ClientConn) determineAuthority() error { // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - cc.authority = endpoint + + // Path escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = url.PathEscape(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil diff --git a/test/authority_test.go b/test/authority_test.go index 44095a23a2fe..a4d481f24f92 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -126,7 +126,7 @@ var authorityTests = []authorityTest{ name: "UnixPassthrough", address: "/tmp/sock.sock", target: "passthrough:///unix:///tmp/sock.sock", - authority: "unix:///tmp/sock.sock", + authority: "unix:%2F%2F%2Ftmp%2Fsock.sock", dialTargetWant: "unix:///tmp/sock.sock", }, { From 81c513a49ce51a1681c311363d065b5431354a21 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 6 Jun 2023 10:37:24 -0700 Subject: [PATCH 955/998] opencensus: stop overwriting ctx parameter in tests (#6350) --- stats/opencensus/e2e_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index d70d9f87024d..1f68675f0c1d 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -1342,8 +1342,6 @@ func (fe *fakeExporter) ExportSpan(sd *trace.SpanData) { // list in an exporter. Returns an error if no server span found within the // passed context's timeout. func waitForServerSpan(ctx context.Context, fe *fakeExporter) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() for ; ctx.Err() == nil; <-time.After(time.Millisecond) { fe.mu.Lock() for _, seenSpan := range fe.seenSpans { From 1b6666374d69729c8820b3e4e1e4efb971a00d08 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 6 Jun 2023 15:09:16 -0600 Subject: [PATCH 956/998] benchmark: Add sleepBetweenRPCs and connections parameters (#6299) --- benchmark/benchmain/main.go | 217 ++++++++++++++++++++++-------------- benchmark/benchmark.go | 21 +++- benchmark/stats/stats.go | 12 +- 3 files changed, 164 insertions(+), 86 deletions(-) diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index 3d054f358037..971a2d453c9a 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -47,6 +47,7 @@ import ( "fmt" "io" "log" + "math/rand" "net" "os" "reflect" @@ -109,6 +110,8 @@ var ( clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a a comma-separated list") serverReadBufferSize = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a a comma-separated list") serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a a comma-separated list") + sleepBetweenRPCs = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a a comma-separated list") + connections = flag.Int("connections", 1, "The number of connections. Each connection will handle maxConcurrentCalls RPC streams") logger = grpclog.Component("benchmark") ) @@ -194,9 +197,9 @@ func runModesFromWorkloads(workload string) runModes { type startFunc func(mode string, bf stats.Features) type stopFunc func(count uint64) type ucStopFunc func(req uint64, resp uint64) -type rpcCallFunc func(pos int) -type rpcSendFunc func(pos int) -type rpcRecvFunc func(pos int) +type rpcCallFunc func(cn, pos int) +type rpcSendFunc func(cn, pos int) +type rpcRecvFunc func(cn, pos int) type rpcCleanupFunc func() func unaryBenchmark(start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats) { @@ -233,40 +236,46 @@ func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Fea bmEnd := time.Now().Add(bf.BenchTime + warmuptime) var wg sync.WaitGroup - wg.Add(2 * bf.MaxConcurrentCalls) - for i := 0; i < bf.MaxConcurrentCalls; i++ { - go func(pos int) { - defer wg.Done() - for { - t := time.Now() - if t.After(bmEnd) { - return + wg.Add(2 * bf.Connections * bf.MaxConcurrentCalls) + maxSleep := int(bf.SleepBetweenRPCs) + for cn := 0; cn < bf.Connections; cn++ { + for pos := 0; pos < bf.MaxConcurrentCalls; pos++ { + go func(cn, pos int) { + defer wg.Done() + for { + if maxSleep > 0 { + time.Sleep(time.Duration(rand.Intn(maxSleep))) + } + t := time.Now() + if t.After(bmEnd) { + return + } + sender(cn, pos) + atomic.AddUint64(&req, 1) } - sender(pos) - atomic.AddUint64(&req, 1) - } - }(i) - go func(pos int) { - defer wg.Done() - for { - t := time.Now() - if t.After(bmEnd) { - return + }(cn, pos) + go func(cn, pos int) { + defer wg.Done() + for { + t := time.Now() + if t.After(bmEnd) { + return + } + recver(cn, pos) + atomic.AddUint64(&resp, 1) } - recver(pos) - atomic.AddUint64(&resp, 1) - } - }(i) + }(cn, pos) + } } wg.Wait() stop(req, resp) } -// makeClient returns a gRPC client for the grpc.testing.BenchmarkService +// makeClients returns a gRPC client (or multiple clients) for the grpc.testing.BenchmarkService // service. The client is configured using the different options in the passed // 'bf'. Also returns a cleanup function to close the client and release // resources. -func makeClient(bf stats.Features) (testgrpc.BenchmarkServiceClient, func()) { +func makeClients(bf stats.Features) ([]testpb.BenchmarkServiceClient, func()) { nw := &latency.Network{Kbps: bf.Kbps, Latency: bf.Latency, MTU: bf.MTU} opts := []grpc.DialOption{} sopts := []grpc.ServerOption{} @@ -346,16 +355,24 @@ func makeClient(bf stats.Features) (testgrpc.BenchmarkServiceClient, func()) { } lis = nw.Listener(lis) stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...) - conn := bm.NewClientConn("" /* target not used */, opts...) - return testgrpc.NewBenchmarkServiceClient(conn), func() { - conn.Close() + conns := make([]*grpc.ClientConn, bf.Connections) + clients := make([]testpb.BenchmarkServiceClient, bf.Connections) + for cn := 0; cn < bf.Connections; cn++ { + conns[cn] = bm.NewClientConn("" /* target not used */, opts...) + clients[cn] = testgrpc.NewBenchmarkServiceClient(conns[cn]) + } + + return clients, func() { + for _, conn := range conns { + conn.Close() + } stopper() } } func makeFuncUnary(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { - tc, cleanup := makeClient(bf) - return func(int) { + clients, cleanup := makeClients(bf) + return func(cn, pos int) { reqSizeBytes := bf.ReqSizeBytes respSizeBytes := bf.RespSizeBytes if bf.ReqPayloadCurve != nil { @@ -364,23 +381,28 @@ func makeFuncUnary(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { if bf.RespPayloadCurve != nil { respSizeBytes = bf.RespPayloadCurve.ChooseRandom() } - unaryCaller(tc, reqSizeBytes, respSizeBytes) + unaryCaller(clients[cn], reqSizeBytes, respSizeBytes) }, cleanup } func makeFuncStream(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { - tc, cleanup := makeClient(bf) + clients, cleanup := makeClients(bf) - streams := make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) - for i := 0; i < bf.MaxConcurrentCalls; i++ { - stream, err := tc.StreamingCall(context.Background()) - if err != nil { - logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + streams := make([][]testgrpc.BenchmarkService_StreamingCallClient, bf.Connections) + for cn := 0; cn < bf.Connections; cn++ { + tc := clients[cn] + streams[cn] = make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) + for pos := 0; pos < bf.MaxConcurrentCalls; pos++ { + + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + streams[cn][pos] = stream } - streams[i] = stream } - return func(pos int) { + return func(cn, pos int) { reqSizeBytes := bf.ReqSizeBytes respSizeBytes := bf.RespSizeBytes if bf.ReqPayloadCurve != nil { @@ -389,51 +411,59 @@ func makeFuncStream(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { if bf.RespPayloadCurve != nil { respSizeBytes = bf.RespPayloadCurve.ChooseRandom() } - streamCaller(streams[pos], reqSizeBytes, respSizeBytes) + streamCaller(streams[cn][pos], reqSizeBytes, respSizeBytes) }, cleanup } func makeFuncUnconstrainedStreamPreloaded(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) { streams, req, cleanup := setupUnconstrainedStream(bf) - preparedMsg := make([]*grpc.PreparedMsg, len(streams)) - for i, stream := range streams { - preparedMsg[i] = &grpc.PreparedMsg{} - err := preparedMsg[i].Encode(stream, req) - if err != nil { - logger.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[i], req, stream, err) + preparedMsg := make([][]*grpc.PreparedMsg, len(streams)) + for cn, connStreams := range streams { + preparedMsg[cn] = make([]*grpc.PreparedMsg, len(connStreams)) + for pos, stream := range connStreams { + preparedMsg[cn][pos] = &grpc.PreparedMsg{} + err := preparedMsg[cn][pos].Encode(stream, req) + if err != nil { + logger.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[cn][pos], req, stream, err) + } } } - return func(pos int) { - streams[pos].SendMsg(preparedMsg[pos]) - }, func(pos int) { - streams[pos].Recv() + return func(cn, pos int) { + streams[cn][pos].SendMsg(preparedMsg[cn][pos]) + }, func(cn, pos int) { + streams[cn][pos].Recv() }, cleanup } func makeFuncUnconstrainedStream(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) { streams, req, cleanup := setupUnconstrainedStream(bf) - return func(pos int) { - streams[pos].Send(req) - }, func(pos int) { - streams[pos].Recv() + return func(cn, pos int) { + streams[cn][pos].Send(req) + }, func(cn, pos int) { + streams[cn][pos].Recv() }, cleanup } -func setupUnconstrainedStream(bf stats.Features) ([]testgrpc.BenchmarkService_StreamingCallClient, *testpb.SimpleRequest, rpcCleanupFunc) { - tc, cleanup := makeClient(bf) +func setupUnconstrainedStream(bf stats.Features) ([][]testgrpc.BenchmarkService_StreamingCallClient, *testpb.SimpleRequest, rpcCleanupFunc) { + clients, cleanup := makeClients(bf) - streams := make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) - md := metadata.Pairs(benchmark.UnconstrainedStreamingHeader, "1") + streams := make([][]testgrpc.BenchmarkService_StreamingCallClient, bf.Connections) + md := metadata.Pairs(benchmark.UnconstrainedStreamingHeader, "1", + benchmark.UnconstrainedStreamingDelayHeader, bf.SleepBetweenRPCs.String()) ctx := metadata.NewOutgoingContext(context.Background(), md) - for i := 0; i < bf.MaxConcurrentCalls; i++ { - stream, err := tc.StreamingCall(ctx) - if err != nil { - logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + for cn := 0; cn < bf.Connections; cn++ { + tc := clients[cn] + streams[cn] = make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) + for pos := 0; pos < bf.MaxConcurrentCalls; pos++ { + stream, err := tc.StreamingCall(ctx) + if err != nil { + logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + streams[cn][pos] = stream } - streams[i] = stream } pl := bm.NewPayload(testpb.PayloadType_COMPRESSABLE, bf.ReqSizeBytes) @@ -461,32 +491,45 @@ func streamCaller(stream testgrpc.BenchmarkService_StreamingCallClient, reqSize, } func runBenchmark(caller rpcCallFunc, start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats, mode string) { - // Warm up connection. - for i := 0; i < warmupCallCount; i++ { - caller(0) + // if SleepBetweenRPCs > 0 we skip the warmup because otherwise + // we are going to send a set of simultaneous requests on every connection, + // which is something we are trying to avoid when using SleepBetweenRPCs. + if bf.SleepBetweenRPCs == 0 { + // Warm up connections. + for i := 0; i < warmupCallCount; i++ { + for cn := 0; cn < bf.Connections; cn++ { + caller(cn, 0) + } + } } // Run benchmark. start(mode, bf) var wg sync.WaitGroup - wg.Add(bf.MaxConcurrentCalls) + wg.Add(bf.Connections * bf.MaxConcurrentCalls) bmEnd := time.Now().Add(bf.BenchTime) + maxSleep := int(bf.SleepBetweenRPCs) var count uint64 - for i := 0; i < bf.MaxConcurrentCalls; i++ { - go func(pos int) { - defer wg.Done() - for { - t := time.Now() - if t.After(bmEnd) { - return + for cn := 0; cn < bf.Connections; cn++ { + for pos := 0; pos < bf.MaxConcurrentCalls; pos++ { + go func(cn, pos int) { + defer wg.Done() + for { + if maxSleep > 0 { + time.Sleep(time.Duration(rand.Intn(maxSleep))) + } + t := time.Now() + if t.After(bmEnd) { + return + } + start := time.Now() + caller(cn, pos) + elapse := time.Since(start) + atomic.AddUint64(&count, 1) + s.AddDuration(elapse) } - start := time.Now() - caller(pos) - elapse := time.Since(start) - atomic.AddUint64(&count, 1) - s.AddDuration(elapse) - } - }(i) + }(cn, pos) + } } wg.Wait() stop(count) @@ -504,6 +547,7 @@ type benchOpts struct { benchmarkResultFile string useBufconn bool enableKeepalive bool + connections int features *featureOpts } @@ -528,6 +572,7 @@ type featureOpts struct { clientWriteBufferSize []int serverReadBufferSize []int serverWriteBufferSize []int + sleepBetweenRPCs []time.Duration } // makeFeaturesNum returns a slice of ints of size 'maxFeatureIndex' where each @@ -572,6 +617,8 @@ func makeFeaturesNum(b *benchOpts) []int { featuresNum[i] = len(b.features.serverReadBufferSize) case stats.ServerWriteBufferSize: featuresNum[i] = len(b.features.serverWriteBufferSize) + case stats.SleepBetweenRPCs: + featuresNum[i] = len(b.features.sleepBetweenRPCs) default: log.Fatalf("Unknown feature index %v in generateFeatures. maxFeatureIndex is %v", i, stats.MaxFeatureIndex) } @@ -625,6 +672,7 @@ func (b *benchOpts) generateFeatures(featuresNum []int) []stats.Features { UseBufConn: b.useBufconn, EnableKeepalive: b.enableKeepalive, BenchTime: b.benchTime, + Connections: b.connections, // These features can potentially change for each iteration. EnableTrace: b.features.enableTrace[curPos[stats.EnableTraceIndex]], Latency: b.features.readLatencies[curPos[stats.ReadLatenciesIndex]], @@ -638,6 +686,7 @@ func (b *benchOpts) generateFeatures(featuresNum []int) []stats.Features { ClientWriteBufferSize: b.features.clientWriteBufferSize[curPos[stats.ClientWriteBufferSize]], ServerReadBufferSize: b.features.serverReadBufferSize[curPos[stats.ServerReadBufferSize]], ServerWriteBufferSize: b.features.serverWriteBufferSize[curPos[stats.ServerWriteBufferSize]], + SleepBetweenRPCs: b.features.sleepBetweenRPCs[curPos[stats.SleepBetweenRPCs]], } if len(b.features.reqPayloadCurves) == 0 { f.ReqSizeBytes = b.features.reqSizeBytes[curPos[stats.ReqSizeBytesIndex]] @@ -693,6 +742,7 @@ func processFlags() *benchOpts { benchmarkResultFile: *benchmarkResultFile, useBufconn: *useBufconn, enableKeepalive: *enableKeepalive, + connections: *connections, features: &featureOpts{ enableTrace: setToggleMode(*traceMode), readLatencies: append([]time.Duration(nil), *readLatency...), @@ -708,6 +758,7 @@ func processFlags() *benchOpts { clientWriteBufferSize: append([]int(nil), *clientWriteBufferSize...), serverReadBufferSize: append([]int(nil), *serverReadBufferSize...), serverWriteBufferSize: append([]int(nil), *serverWriteBufferSize...), + sleepBetweenRPCs: append([]time.Duration(nil), *sleepBetweenRPCs...), }, } diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index b2c3356abea9..2e11167004db 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -26,7 +26,9 @@ import ( "fmt" "io" "log" + "math/rand" "net" + "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -77,6 +79,10 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* // of ping-pong. const UnconstrainedStreamingHeader = "unconstrained-streaming" +// UnconstrainedStreamingDelayHeader is used to pass the maximum amount of time +// the server should sleep between consecutive RPC responses. +const UnconstrainedStreamingDelayHeader = "unconstrained-streaming-delay" + func (s *testServer) StreamingCall(stream testgrpc.BenchmarkService_StreamingCallServer) error { if md, ok := metadata.FromIncomingContext(stream.Context()); ok && len(md[UnconstrainedStreamingHeader]) != 0 { return s.UnconstrainedStreamingCall(stream) @@ -103,6 +109,16 @@ func (s *testServer) StreamingCall(stream testgrpc.BenchmarkService_StreamingCal } func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService_StreamingCallServer) error { + maxSleep := 0 + if md, ok := metadata.FromIncomingContext(stream.Context()); ok && len(md[UnconstrainedStreamingDelayHeader]) != 0 { + val := md[UnconstrainedStreamingDelayHeader][0] + d, err := time.ParseDuration(val) + if err != nil { + return fmt.Errorf("can't parse %q header: %s", UnconstrainedStreamingDelayHeader, err) + } + maxSleep = int(d) + } + in := new(testpb.SimpleRequest) // Receive a message to learn response type and size. err := stream.RecvMsg(in) @@ -135,9 +151,12 @@ func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService go func() { for { + if maxSleep > 0 { + time.Sleep(time.Duration(rand.Intn(maxSleep))) + } err := stream.Send(response) switch status.Code(err) { - case codes.Unavailable: + case codes.Unavailable, codes.Canceled: return case codes.OK: default: diff --git a/benchmark/stats/stats.go b/benchmark/stats/stats.go index f5d8666648df..74070fd76c07 100644 --- a/benchmark/stats/stats.go +++ b/benchmark/stats/stats.go @@ -56,6 +56,7 @@ const ( ClientWriteBufferSize ServerReadBufferSize ServerWriteBufferSize + SleepBetweenRPCs // MaxFeatureIndex is a place holder to indicate the total number of feature // indices we have. Any new feature indices should be added above this. @@ -78,6 +79,8 @@ type Features struct { EnableKeepalive bool // BenchTime indicates the duration of the benchmark run. BenchTime time.Duration + // Connections configures the number of grpc connections between client and server. + Connections int // Features defined above are usually the same for all benchmark runs in a // particular invocation, while the features defined below could vary from @@ -121,6 +124,8 @@ type Features struct { ServerReadBufferSize int // ServerWriteBufferSize is the size of the server write buffer in bytes. If negative, use the default buffer size. ServerWriteBufferSize int + // SleepBetweenRPCs configures optional delay between RPCs. + SleepBetweenRPCs time.Duration } // String returns all the feature values as a string. @@ -139,12 +144,13 @@ func (f Features) String() string { return fmt.Sprintf("networkMode_%v-bufConn_%v-keepalive_%v-benchTime_%v-"+ "trace_%v-latency_%v-kbps_%v-MTU_%v-maxConcurrentCalls_%v-%s-%s-"+ "compressor_%v-channelz_%v-preloader_%v-clientReadBufferSize_%v-"+ - "clientWriteBufferSize_%v-serverReadBufferSize_%v-serverWriteBufferSize_%v-", + "clientWriteBufferSize_%v-serverReadBufferSize_%v-serverWriteBufferSize_%v-"+ + "sleepBetweenRPCs_%v-connections_%v-", f.NetworkMode, f.UseBufConn, f.EnableKeepalive, f.BenchTime, f.EnableTrace, f.Latency, f.Kbps, f.MTU, f.MaxConcurrentCalls, reqPayloadString, respPayloadString, f.ModeCompressor, f.EnableChannelz, f.EnablePreloader, f.ClientReadBufferSize, f.ClientWriteBufferSize, f.ServerReadBufferSize, - f.ServerWriteBufferSize) + f.ServerWriteBufferSize, f.SleepBetweenRPCs, f.Connections) } // SharedFeatures returns the shared features as a pretty printable string. @@ -216,6 +222,8 @@ func (f Features) partialString(b *bytes.Buffer, wantFeatures []bool, sep, delim b.WriteString(fmt.Sprintf("ServerReadBufferSize%v%v%v", sep, f.ServerReadBufferSize, delim)) case ServerWriteBufferSize: b.WriteString(fmt.Sprintf("ServerWriteBufferSize%v%v%v", sep, f.ServerWriteBufferSize, delim)) + case SleepBetweenRPCs: + b.WriteString(fmt.Sprintf("SleepBetweenRPCs%v%v%v", sep, f.SleepBetweenRPCs, delim)) default: log.Fatalf("Unknown feature index %v. maxFeatureIndex is %v", i, MaxFeatureIndex) } From 761c084e5ac8669104533efa7c614aa1f881c96a Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 6 Jun 2023 17:09:22 -0700 Subject: [PATCH 957/998] xds/ringhash: cache connectivity state of subchannels inside picker (#6351) --- .../ringhash/e2e/ringhash_balancer_test.go | 5 +--- xds/internal/balancer/ringhash/picker.go | 15 ++++++++---- xds/internal/balancer/ringhash/picker_test.go | 10 ++++---- xds/internal/balancer/ringhash/ringhash.go | 24 ++++--------------- 4 files changed, 22 insertions(+), 32 deletions(-) diff --git a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go index 00da6e88fb95..4105e3550b7c 100644 --- a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go +++ b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go @@ -119,10 +119,7 @@ func (s) TestRingHash_ReconnectToMoveOutOfTransientFailure(t *testing.T) { // Make an RPC to get the ring_hash LB policy to reconnect and thereby move // to TRANSIENT_FAILURE upon connection failure. client.EmptyCall(ctx, &testpb.Empty{}) - for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { - if cc.GetState() == connectivity.TransientFailure { - break - } + for state := cc.GetState(); state != connectivity.TransientFailure && cc.WaitForStateChange(ctx, state); state = cc.GetState() { } if err := ctx.Err(); err != nil { t.Fatalf("Timeout waiting for channel to reach %q after server shutdown: %v", connectivity.TransientFailure, err) diff --git a/xds/internal/balancer/ringhash/picker.go b/xds/internal/balancer/ringhash/picker.go index ec3b5605690d..b450716fa0f0 100644 --- a/xds/internal/balancer/ringhash/picker.go +++ b/xds/internal/balancer/ringhash/picker.go @@ -29,12 +29,17 @@ import ( ) type picker struct { - ring *ring - logger *grpclog.PrefixLogger + ring *ring + logger *grpclog.PrefixLogger + subConnStates map[*subConn]connectivity.State } func newPicker(ring *ring, logger *grpclog.PrefixLogger) *picker { - return &picker{ring: ring, logger: logger} + states := make(map[*subConn]connectivity.State) + for _, e := range ring.items { + states[e.sc] = e.sc.effectiveState() + } + return &picker{ring: ring, logger: logger, subConnStates: states} } // handleRICSResult is the return type of handleRICS. It's needed to wrap the @@ -54,7 +59,7 @@ type handleRICSResult struct { // or Shutdown. If it's true, the PickResult and error should be returned from // Pick() as is. func (p *picker) handleRICS(e *ringEntry) (handleRICSResult, bool) { - switch state := e.sc.effectiveState(); state { + switch state := p.subConnStates[e.sc]; state { case connectivity.Ready: return handleRICSResult{pr: balancer.PickResult{SubConn: e.sc.sc}}, true case connectivity.Idle: @@ -118,7 +123,7 @@ func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, erro // but don't not trigger Connect() on the other SubConns. var firstNonFailedFound bool for ee := nextSkippingDuplicates(p.ring, e2); ee != e; ee = nextSkippingDuplicates(p.ring, ee) { - scState := ee.sc.effectiveState() + scState := p.subConnStates[ee.sc] if scState == connectivity.Ready { return balancer.PickResult{SubConn: ee.sc.sc}, nil } diff --git a/xds/internal/balancer/ringhash/picker_test.go b/xds/internal/balancer/ringhash/picker_test.go index 5963110b0ff1..7accb1b4c00f 100644 --- a/xds/internal/balancer/ringhash/picker_test.go +++ b/xds/internal/balancer/ringhash/picker_test.go @@ -26,6 +26,8 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + igrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/testutils" ) @@ -96,7 +98,7 @@ func (s) TestPickerPickFirstTwo(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - p := &picker{ring: tt.ring} + p := newPicker(tt.ring, igrpclog.NewPrefixLogger(grpclog.Component("xds"), "rh_test")) got, err := p.Pick(balancer.PickInfo{ Ctx: SetRequestHash(context.Background(), tt.hash), }) @@ -126,7 +128,7 @@ func (s) TestPickerPickTriggerTFConnect(t *testing.T) { connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Idle, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, }) - p := &picker{ring: ring} + p := newPicker(ring, igrpclog.NewPrefixLogger(grpclog.Component("xds"), "rh_test")) _, err := p.Pick(balancer.PickInfo{Ctx: SetRequestHash(context.Background(), 5)}) if err == nil { t.Fatalf("Pick() error = %v, want non-nil", err) @@ -156,7 +158,7 @@ func (s) TestPickerPickTriggerTFReturnReady(t *testing.T) { ring := newTestRing([]connectivity.State{ connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Ready, }) - p := &picker{ring: ring} + p := newPicker(ring, igrpclog.NewPrefixLogger(grpclog.Component("xds"), "rh_test")) pr, err := p.Pick(balancer.PickInfo{Ctx: SetRequestHash(context.Background(), 5)}) if err != nil { t.Fatalf("Pick() error = %v, want nil", err) @@ -182,7 +184,7 @@ func (s) TestPickerPickTriggerTFWithIdle(t *testing.T) { ring := newTestRing([]connectivity.State{ connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Idle, connectivity.TransientFailure, connectivity.TransientFailure, }) - p := &picker{ring: ring} + p := newPicker(ring, igrpclog.NewPrefixLogger(grpclog.Component("xds"), "rh_test")) _, err := p.Pick(balancer.PickInfo{Ctx: SetRequestHash(context.Background(), 5)}) if err == balancer.ErrNoSubConnAvailable { t.Fatalf("Pick() error = %v, want %v", err, balancer.ErrNoSubConnAvailable) diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index b9caefa63a2d..005efd1c581c 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -347,37 +347,23 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance newSCState := scs.effectiveState() b.logger.Infof("SubConn's effective old state was: %v, new state is %v", oldSCState, newSCState) - var sendUpdate bool - oldBalancerState := b.state b.state = b.csEvltr.recordTransition(oldSCState, newSCState) - if oldBalancerState != b.state { - sendUpdate = true - } switch s { - case connectivity.Idle: - // No need to send an update. No queued RPC can be unblocked. If the - // overall state changed because of this, sendUpdate is already true. - case connectivity.Connecting: - // No need to send an update. No queued RPC can be unblocked. If the - // overall state changed because of this, sendUpdate is already true. - case connectivity.Ready: - // We need to regenerate the picker even if the ring has not changed - // because we could be moving from TRANSIENT_FAILURE to READY, in which - // case, we need to update the error picker returned earlier. - b.regeneratePicker() - sendUpdate = true case connectivity.TransientFailure: // Save error to be reported via picker. b.connErr = state.ConnectionError - b.regeneratePicker() case connectivity.Shutdown: // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) } - if sendUpdate { + if oldSCState != newSCState { + // Because the picker caches the state of the subconns, we always + // regenerate and update the picker when the effective SubConn state + // changes. + b.regeneratePicker() b.logger.Infof("Pushing new state %v and picker %p", b.state, b.picker) b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } From 6578ef72240d14f83140602042a3ed9b4faa9b2f Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 7 Jun 2023 08:37:11 -0700 Subject: [PATCH 958/998] client: handle empty address lists correctly in addrConn.updateAddrs (#6354) --- clientconn.go | 6 ++- test/subconn_test.go | 125 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 2 deletions(-) create mode 100644 test/subconn_test.go diff --git a/clientconn.go b/clientconn.go index 91a3b605c931..a27c0573dc1d 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1033,8 +1033,10 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // We have to defer here because GracefulClose => Close => onClose, which // requires locking ac.mu. - defer ac.transport.GracefulClose() - ac.transport = nil + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil + } if len(addrs) == 0 { ac.updateConnectivityState(connectivity.Idle, nil) diff --git a/test/subconn_test.go b/test/subconn_test.go new file mode 100644 index 000000000000..524acf9f70e3 --- /dev/null +++ b/test/subconn_test.go @@ -0,0 +1,125 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/stubserver" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/resolver" +) + +type tsccPicker struct { + sc balancer.SubConn +} + +func (p *tsccPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{SubConn: p.sc}, nil +} + +// TestSubConnEmpty tests that removing all addresses from a SubConn and then +// re-adding them does not cause a panic and properly reconnects. +func (s) TestSubConnEmpty(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // sc is the one SubConn used throughout the test. Created on demand and + // re-used on every update. + var sc balancer.SubConn + + // Simple custom balancer that sets the address list to empty if the + // resolver produces no addresses. Pickfirst, by default, will remove the + // SubConn in this case instead. + bal := stub.BalancerFuncs{ + UpdateClientConnState: func(d *stub.BalancerData, ccs balancer.ClientConnState) error { + if sc == nil { + var err error + sc, err = d.ClientConn.NewSubConn(ccs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error creating initial subconn: %v", err) + } + } else { + d.ClientConn.UpdateAddresses(sc, ccs.ResolverState.Addresses) + } + sc.Connect() + + if len(ccs.ResolverState.Addresses) == 0 { + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(errors.New("no addresses")), + }) + } else { + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &tsccPicker{sc: sc}, + }) + } + return nil + }, + UpdateSubConnState: func(d *stub.BalancerData, sc balancer.SubConn, scs balancer.SubConnState) { + switch scs.ConnectivityState { + case connectivity.Ready: + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &tsccPicker{sc: sc}, + }) + case connectivity.TransientFailure: + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(fmt.Errorf("error connecting: %v", scs.ConnectionError)), + }) + } + }, + } + stub.Register("tscc", bal) + + // Start the stub server with our stub balancer. + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil, grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"tscc":{}}]}`)); err != nil { + t.Fatalf("Error starting server: %v", err) + } + defer ss.Stop() + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall failed: %v", err) + } + + t.Log("Removing addresses from resolver and SubConn") + ss.R.UpdateState(resolver.State{Addresses: []resolver.Address{}}) + awaitState(ctx, t, ss.CC, connectivity.TransientFailure) + + t.Log("Re-adding addresses to resolver and SubConn") + ss.R.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ss.Address}}}) + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall failed: %v", err) + } +} From 7aeea8f496e0c8cb7c356e467919b74a13820746 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 7 Jun 2023 13:31:08 -0700 Subject: [PATCH 959/998] orca: add application utilization and range checking (#6357) --- examples/go.mod | 2 +- examples/go.sum | 4 +- gcp/observability/go.sum | 2 +- go.mod | 2 +- go.sum | 4 +- interop/observability/go.mod | 2 +- interop/observability/go.sum | 4 +- orca/call_metrics_test.go | 32 ++----- orca/orca_test.go | 14 ++- orca/producer_test.go | 28 +++--- orca/server_metrics.go | 127 ++++++++++++++++++++----- orca/server_metrics_test.go | 175 +++++++++++++++++++++++++++++++++++ orca/service_test.go | 13 ++- stats/opencensus/go.sum | 2 +- 14 files changed, 335 insertions(+), 76 deletions(-) create mode 100644 orca/server_metrics_test.go diff --git a/examples/go.mod b/examples/go.mod index b46a2e93828b..e6aa00e7c62a 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -3,7 +3,7 @@ module google.golang.org/grpc/examples go 1.17 require ( - github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 github.com/golang/protobuf v1.5.3 golang.org/x/oauth2 v0.7.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 diff --git a/examples/go.sum b/examples/go.sum index 1d44586616cd..15496ae7e61c 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -627,8 +627,8 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index eb97cabf6707..b06bf212fd0b 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -638,7 +638,7 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= diff --git a/go.mod b/go.mod index 4852f39474bc..d78084a3ae0d 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/cespare/xxhash/v2 v2.2.0 github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe - github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f github.com/golang/glog v1.1.0 github.com/golang/protobuf v1.5.3 diff --git a/go.sum b/go.sum index 59a6385afade..1907f1aa632b 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= diff --git a/interop/observability/go.mod b/interop/observability/go.mod index 6b53ad2bf320..99213d553185 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -18,7 +18,7 @@ require ( contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect github.com/aws/aws-sdk-go v1.44.162 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect diff --git a/interop/observability/go.sum b/interop/observability/go.sum index 5b811d50e3f7..823435f2f3fd 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -638,8 +638,8 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= diff --git a/orca/call_metrics_test.go b/orca/call_metrics_test.go index 4374b593b9f1..b0e6af646c91 100644 --- a/orca/call_metrics_test.go +++ b/orca/call_metrics_test.go @@ -23,13 +23,11 @@ import ( "errors" "io" "testing" - "time" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" @@ -41,16 +39,6 @@ import ( testpb "google.golang.org/grpc/interop/grpc_testing" ) -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -const defaultTestTimeout = 5 * time.Second - // TestE2ECallMetricsUnary tests the injection of custom backend metrics from // the server application for a unary RPC, and verifies that expected load // reports are received at the client. @@ -65,9 +53,9 @@ func (s) TestE2ECallMetricsUnary(t *testing.T) { injectMetrics: true, wantProto: &v3orcapb.OrcaLoadReport{ CpuUtilization: 1.0, - MemUtilization: 50.0, + MemUtilization: 0.9, RequestCost: map[string]float64{"queryCost": 25.0}, - Utilization: map[string]float64{"queueSize": 75.0}, + Utilization: map[string]float64{"queueSize": 0.75}, }, }, { @@ -92,7 +80,7 @@ func (s) TestE2ECallMetricsUnary(t *testing.T) { t.Error(err) return nil, err } - recorder.SetMemoryUtilization(50.0) + recorder.SetMemoryUtilization(0.9) // This value will be overwritten by a write to the same metric // from the server handler. recorder.SetNamedUtilization("queueSize", 1.0) @@ -114,7 +102,7 @@ func (s) TestE2ECallMetricsUnary(t *testing.T) { return nil, err } recorder.SetRequestCost("queryCost", 25.0) - recorder.SetNamedUtilization("queueSize", 75.0) + recorder.SetNamedUtilization("queueSize", 0.75) return &testpb.Empty{}, nil }, } @@ -171,9 +159,9 @@ func (s) TestE2ECallMetricsStreaming(t *testing.T) { injectMetrics: true, wantProto: &v3orcapb.OrcaLoadReport{ CpuUtilization: 1.0, - MemUtilization: 50.0, - RequestCost: map[string]float64{"queryCost": 25.0}, - Utilization: map[string]float64{"queueSize": 75.0}, + MemUtilization: 0.5, + RequestCost: map[string]float64{"queryCost": 0.25}, + Utilization: map[string]float64{"queueSize": 0.75}, }, }, { @@ -198,7 +186,7 @@ func (s) TestE2ECallMetricsStreaming(t *testing.T) { t.Error(err) return err } - recorder.SetMemoryUtilization(50.0) + recorder.SetMemoryUtilization(0.5) // This value will be overwritten by a write to the same metric // from the server handler. recorder.SetNamedUtilization("queueSize", 1.0) @@ -217,8 +205,8 @@ func (s) TestE2ECallMetricsStreaming(t *testing.T) { t.Error(err) return err } - recorder.SetRequestCost("queryCost", 25.0) - recorder.SetNamedUtilization("queueSize", 75.0) + recorder.SetRequestCost("queryCost", 0.25) + recorder.SetNamedUtilization("queueSize", 0.75) } // Streaming implementation replies with a dummy response until the diff --git a/orca/orca_test.go b/orca/orca_test.go index 096b54907148..4f85e7b01592 100644 --- a/orca/orca_test.go +++ b/orca/orca_test.go @@ -20,9 +20,11 @@ package orca_test import ( "testing" + "time" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/metadata" "google.golang.org/grpc/orca/internal" @@ -30,7 +32,17 @@ import ( v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" ) -func TestToLoadReport(t *testing.T) { +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const defaultTestTimeout = 5 * time.Second + +func (s) TestToLoadReport(t *testing.T) { goodReport := &v3orcapb.OrcaLoadReport{ CpuUtilization: 1.0, MemUtilization: 50.0, diff --git a/orca/producer_test.go b/orca/producer_test.go index ce376e7405e2..212cf2500f6b 100644 --- a/orca/producer_test.go +++ b/orca/producer_test.go @@ -158,12 +158,12 @@ func (s) TestProducer(t *testing.T) { // Set a few metrics and wait for them on the client side. smr.SetCPUUtilization(10) - smr.SetMemoryUtilization(100) - smr.SetNamedUtilization("bob", 555) + smr.SetMemoryUtilization(0.1) + smr.SetNamedUtilization("bob", 0.555) loadReportWant := &v3orcapb.OrcaLoadReport{ CpuUtilization: 10, - MemUtilization: 100, - Utilization: map[string]float64{"bob": 555}, + MemUtilization: 0.1, + Utilization: map[string]float64{"bob": 0.555}, } testReport: @@ -181,13 +181,13 @@ testReport: } // Change and add metrics and wait for them on the client side. - smr.SetCPUUtilization(50) - smr.SetMemoryUtilization(200) - smr.SetNamedUtilization("mary", 321) + smr.SetCPUUtilization(0.5) + smr.SetMemoryUtilization(0.2) + smr.SetNamedUtilization("mary", 0.321) loadReportWant = &v3orcapb.OrcaLoadReport{ - CpuUtilization: 50, - MemUtilization: 200, - Utilization: map[string]float64{"bob": 555, "mary": 321}, + CpuUtilization: 0.5, + MemUtilization: 0.2, + Utilization: map[string]float64{"bob": 0.555, "mary": 0.321}, } for { @@ -322,8 +322,8 @@ func (s) TestProducerBackoff(t *testing.T) { // Define a load report to send and expect the client to see. loadReportWant := &v3orcapb.OrcaLoadReport{ CpuUtilization: 10, - MemUtilization: 100, - Utilization: map[string]float64{"bob": 555}, + MemUtilization: 0.1, + Utilization: map[string]float64{"bob": 0.555}, } // Unblock the fake. @@ -444,8 +444,8 @@ func (s) TestProducerMultipleListeners(t *testing.T) { // Define a load report to send and expect the client to see. loadReportWant := &v3orcapb.OrcaLoadReport{ CpuUtilization: 10, - MemUtilization: 100, - Utilization: map[string]float64{"bob": 555}, + MemUtilization: 0.1, + Utilization: map[string]float64{"bob": 0.555}, } // Receive reports and update counts for the three listeners. diff --git a/orca/server_metrics.go b/orca/server_metrics.go index 6b63d3d252bf..f2cdb9b0b26f 100644 --- a/orca/server_metrics.go +++ b/orca/server_metrics.go @@ -27,8 +27,9 @@ import ( // ServerMetrics is the data returned from a server to a client to describe the // current state of the server and/or the cost of a request when used per-call. type ServerMetrics struct { - CPUUtilization float64 // CPU utilization: [0, 1.0]; unset=-1 + CPUUtilization float64 // CPU utilization: [0, inf); unset=-1 MemUtilization float64 // Memory utilization: [0, 1.0]; unset=-1 + AppUtilization float64 // Application utilization: [0, inf); unset=-1 QPS float64 // queries per second: [0, inf); unset=-1 EPS float64 // errors per second: [0, inf); unset=-1 @@ -52,6 +53,9 @@ func (sm *ServerMetrics) toLoadReportProto() *v3orcapb.OrcaLoadReport { if sm.MemUtilization != -1 { ret.MemUtilization = sm.MemUtilization } + if sm.AppUtilization != -1 { + ret.ApplicationUtilization = sm.AppUtilization + } if sm.QPS != -1 { ret.RpsFractional = sm.QPS } @@ -63,21 +67,24 @@ func (sm *ServerMetrics) toLoadReportProto() *v3orcapb.OrcaLoadReport { // merge merges o into sm, overwriting any values present in both. func (sm *ServerMetrics) merge(o *ServerMetrics) { + mergeMap(sm.Utilization, o.Utilization) + mergeMap(sm.RequestCost, o.RequestCost) + mergeMap(sm.NamedMetrics, o.NamedMetrics) if o.CPUUtilization != -1 { sm.CPUUtilization = o.CPUUtilization } if o.MemUtilization != -1 { sm.MemUtilization = o.MemUtilization } + if o.AppUtilization != -1 { + sm.AppUtilization = o.AppUtilization + } if o.QPS != -1 { sm.QPS = o.QPS } if o.EPS != -1 { sm.EPS = o.EPS } - mergeMap(sm.Utilization, o.Utilization) - mergeMap(sm.RequestCost, o.RequestCost) - mergeMap(sm.NamedMetrics, o.NamedMetrics) } func mergeMap(a, b map[string]float64) { @@ -91,34 +98,46 @@ func mergeMap(a, b map[string]float64) { type ServerMetricsRecorder interface { ServerMetricsProvider - // SetCPUUtilization sets the relevant server metric. + // SetCPUUtilization sets the CPU utilization server metric. Must be + // greater than zero. SetCPUUtilization(float64) - // DeleteCPUUtilization deletes the relevant server metric to prevent it - // from being sent. + // DeleteCPUUtilization deletes the CPU utilization server metric to + // prevent it from being sent. DeleteCPUUtilization() - // SetMemoryUtilization sets the relevant server metric. + // SetMemoryUtilization sets the memory utilization server metric. Must be + // in the range [0, 1]. SetMemoryUtilization(float64) - // DeleteMemoryUtilization deletes the relevant server metric to prevent it - // from being sent. + // DeleteMemoryUtilization deletes the memory utiliztion server metric to + // prevent it from being sent. DeleteMemoryUtilization() - // SetQPS sets the relevant server metric. + // SetApplicationUtilization sets the application utilization server + // metric. Must be greater than zero. + SetApplicationUtilization(float64) + // DeleteApplicationUtilization deletes the application utilization server + // metric to prevent it from being sent. + DeleteApplicationUtilization() + + // SetQPS sets the Queries Per Second server metric. Must be greater than + // zero. SetQPS(float64) - // DeleteQPS deletes the relevant server metric to prevent it from being - // sent. + // DeleteQPS deletes the Queries Per Second server metric to prevent it + // from being sent. DeleteQPS() - // SetEPS sets the relevant server metric. + // SetEPS sets the Errors Per Second server metric. Must be greater than + // zero. SetEPS(float64) - // DeleteEPS deletes the relevant server metric to prevent it from being - // sent. + // DeleteEPS deletes the Errors Per Second server metric to prevent it from + // being sent. DeleteEPS() - // SetNamedUtilization sets the relevant server metric. + // SetNamedUtilization sets the named utilization server metric for the + // name provided. val must be in the range [0, 1]. SetNamedUtilization(name string, val float64) - // DeleteNamedUtilization deletes the relevant server metric to prevent it - // from being sent. + // DeleteNamedUtilization deletes the named utilization server metric for + // the name provided to prevent it from being sent. DeleteNamedUtilization(name string) } @@ -139,6 +158,7 @@ func newServerMetricsRecorder() *serverMetricsRecorder { state: &ServerMetrics{ CPUUtilization: -1, MemUtilization: -1, + AppUtilization: -1, QPS: -1, EPS: -1, Utilization: make(map[string]float64), @@ -155,6 +175,7 @@ func (s *serverMetricsRecorder) ServerMetrics() *ServerMetrics { return &ServerMetrics{ CPUUtilization: s.state.CPUUtilization, MemUtilization: s.state.MemUtilization, + AppUtilization: s.state.AppUtilization, QPS: s.state.QPS, EPS: s.state.EPS, Utilization: copyMap(s.state.Utilization), @@ -173,6 +194,12 @@ func copyMap(m map[string]float64) map[string]float64 { // SetCPUUtilization records a measurement for the CPU utilization metric. func (s *serverMetricsRecorder) SetCPUUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring CPU Utilization value out of range: %v", val) + } + return + } s.mu.Lock() defer s.mu.Unlock() s.state.CPUUtilization = val @@ -181,11 +208,19 @@ func (s *serverMetricsRecorder) SetCPUUtilization(val float64) { // DeleteCPUUtilization deletes the relevant server metric to prevent it from // being sent. func (s *serverMetricsRecorder) DeleteCPUUtilization() { - s.SetCPUUtilization(-1) + s.mu.Lock() + defer s.mu.Unlock() + s.state.CPUUtilization = -1 } // SetMemoryUtilization records a measurement for the memory utilization metric. func (s *serverMetricsRecorder) SetMemoryUtilization(val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Memory Utilization value out of range: %v", val) + } + return + } s.mu.Lock() defer s.mu.Unlock() s.state.MemUtilization = val @@ -194,11 +229,41 @@ func (s *serverMetricsRecorder) SetMemoryUtilization(val float64) { // DeleteMemoryUtilization deletes the relevant server metric to prevent it // from being sent. func (s *serverMetricsRecorder) DeleteMemoryUtilization() { - s.SetMemoryUtilization(-1) + s.mu.Lock() + defer s.mu.Unlock() + s.state.MemUtilization = -1 +} + +// SetApplicationUtilization records a measurement for a generic utilization +// metric. +func (s *serverMetricsRecorder) SetApplicationUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring Application Utilization value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.AppUtilization = val +} + +// DeleteApplicationUtilization deletes the relevant server metric to prevent +// it from being sent. +func (s *serverMetricsRecorder) DeleteApplicationUtilization() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.AppUtilization = -1 } // SetQPS records a measurement for the QPS metric. func (s *serverMetricsRecorder) SetQPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring QPS value out of range: %v", val) + } + return + } s.mu.Lock() defer s.mu.Unlock() s.state.QPS = val @@ -206,11 +271,19 @@ func (s *serverMetricsRecorder) SetQPS(val float64) { // DeleteQPS deletes the relevant server metric to prevent it from being sent. func (s *serverMetricsRecorder) DeleteQPS() { - s.SetQPS(-1) + s.mu.Lock() + defer s.mu.Unlock() + s.state.QPS = -1 } // SetEPS records a measurement for the EPS metric. func (s *serverMetricsRecorder) SetEPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring EPS value out of range: %v", val) + } + return + } s.mu.Lock() defer s.mu.Unlock() s.state.EPS = val @@ -218,12 +291,20 @@ func (s *serverMetricsRecorder) SetEPS(val float64) { // DeleteEPS deletes the relevant server metric to prevent it from being sent. func (s *serverMetricsRecorder) DeleteEPS() { - s.SetEPS(-1) + s.mu.Lock() + defer s.mu.Unlock() + s.state.EPS = -1 } // SetNamedUtilization records a measurement for a utilization metric uniquely // identifiable by name. func (s *serverMetricsRecorder) SetNamedUtilization(name string, val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Named Utilization value out of range: %v", val) + } + return + } s.mu.Lock() defer s.mu.Unlock() s.state.Utilization[name] = val diff --git a/orca/server_metrics_test.go b/orca/server_metrics_test.go new file mode 100644 index 000000000000..ecc80d0e584b --- /dev/null +++ b/orca/server_metrics_test.go @@ -0,0 +1,175 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/grpctest" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestServerMetrics_Setters(t *testing.T) { + smr := NewServerMetricsRecorder() + + smr.SetCPUUtilization(0.1) + smr.SetMemoryUtilization(0.2) + smr.SetApplicationUtilization(0.3) + smr.SetQPS(0.4) + smr.SetEPS(0.5) + smr.SetNamedUtilization("x", 0.6) + + want := &ServerMetrics{ + CPUUtilization: 0.1, + MemUtilization: 0.2, + AppUtilization: 0.3, + QPS: 0.4, + EPS: 0.5, + Utilization: map[string]float64{"x": 0.6}, + NamedMetrics: map[string]float64{}, + RequestCost: map[string]float64{}, + } + + got := smr.ServerMetrics() + if d := cmp.Diff(got, want); d != "" { + t.Fatalf("unexpected server metrics: -got +want: %v", d) + } +} + +func (s) TestServerMetrics_Deleters(t *testing.T) { + smr := NewServerMetricsRecorder() + + smr.SetCPUUtilization(0.1) + smr.SetMemoryUtilization(0.2) + smr.SetApplicationUtilization(0.3) + smr.SetQPS(0.4) + smr.SetEPS(0.5) + smr.SetNamedUtilization("x", 0.6) + smr.SetNamedUtilization("y", 0.7) + + // Now delete everything except named_utilization "y". + smr.DeleteCPUUtilization() + smr.DeleteMemoryUtilization() + smr.DeleteApplicationUtilization() + smr.DeleteQPS() + smr.DeleteEPS() + smr.DeleteNamedUtilization("x") + + want := &ServerMetrics{ + CPUUtilization: -1, + MemUtilization: -1, + AppUtilization: -1, + QPS: -1, + EPS: -1, + Utilization: map[string]float64{"y": 0.7}, + NamedMetrics: map[string]float64{}, + RequestCost: map[string]float64{}, + } + + got := smr.ServerMetrics() + if d := cmp.Diff(got, want); d != "" { + t.Fatalf("unexpected server metrics: -got +want: %v", d) + } +} + +func (s) TestServerMetrics_Setters_Range(t *testing.T) { + smr := NewServerMetricsRecorder() + + smr.SetCPUUtilization(0.1) + smr.SetMemoryUtilization(0.2) + smr.SetApplicationUtilization(0.3) + smr.SetQPS(0.4) + smr.SetEPS(0.5) + smr.SetNamedUtilization("x", 0.6) + + // Negatives for all these fields should be ignored. + smr.SetCPUUtilization(-2) + smr.SetMemoryUtilization(-3) + smr.SetApplicationUtilization(-4) + smr.SetQPS(-0.1) + smr.SetEPS(-0.6) + smr.SetNamedUtilization("x", -2) + + // Memory and named utilizations over 1 are ignored. + smr.SetMemoryUtilization(1.1) + smr.SetNamedUtilization("x", 1.1) + + want := &ServerMetrics{ + CPUUtilization: 0.1, + MemUtilization: 0.2, + AppUtilization: 0.3, + QPS: 0.4, + EPS: 0.5, + Utilization: map[string]float64{"x": 0.6}, + NamedMetrics: map[string]float64{}, + RequestCost: map[string]float64{}, + } + + got := smr.ServerMetrics() + if d := cmp.Diff(got, want); d != "" { + t.Fatalf("unexpected server metrics: -got +want: %v", d) + } +} + +func (s) TestServerMetrics_Merge(t *testing.T) { + sm1 := &ServerMetrics{ + CPUUtilization: 0.1, + MemUtilization: 0.2, + AppUtilization: 0.3, + QPS: -1, + EPS: 0, + Utilization: map[string]float64{"x": 0.6}, + NamedMetrics: map[string]float64{"y": 0.2}, + RequestCost: map[string]float64{"a": 0.1}, + } + + sm2 := &ServerMetrics{ + CPUUtilization: -1, + AppUtilization: 0, + QPS: 0.9, + EPS: 20, + Utilization: map[string]float64{"x": 0.5, "y": 0.4}, + NamedMetrics: map[string]float64{"x": 0.1}, + RequestCost: map[string]float64{"a": 0.2}, + } + + want := &ServerMetrics{ + CPUUtilization: 0.1, + MemUtilization: 0, + AppUtilization: 0, + QPS: 0.9, + EPS: 20, + Utilization: map[string]float64{"x": 0.5, "y": 0.4}, + NamedMetrics: map[string]float64{"x": 0.1, "y": 0.2}, + RequestCost: map[string]float64{"a": 0.2}, + } + + sm1.merge(sm2) + if d := cmp.Diff(sm1, want); d != "" { + t.Fatalf("unexpected server metrics: -got +want: %v", d) + } +} diff --git a/orca/service_test.go b/orca/service_test.go index 73ad28430264..9c4defbe266b 100644 --- a/orca/service_test.go +++ b/orca/service_test.go @@ -60,9 +60,10 @@ func (t *testServiceImpl) UnaryCall(context.Context, *testpb.SimpleRequest) (*te t.requests++ t.mu.Unlock() - t.smr.SetNamedUtilization(requestsMetricKey, float64(t.requests)) + t.smr.SetNamedUtilization(requestsMetricKey, float64(t.requests)*0.01) t.smr.SetCPUUtilization(50.0) - t.smr.SetMemoryUtilization(99.0) + t.smr.SetMemoryUtilization(0.9) + t.smr.SetApplicationUtilization(1.2) return &testpb.SimpleResponse{}, nil } @@ -70,6 +71,7 @@ func (t *testServiceImpl) EmptyCall(context.Context, *testpb.Empty) (*testpb.Emp t.smr.DeleteNamedUtilization(requestsMetricKey) t.smr.SetCPUUtilization(0) t.smr.SetMemoryUtilization(0) + t.smr.DeleteApplicationUtilization() return &testpb.Empty{}, nil } @@ -150,9 +152,10 @@ func (s) TestE2E_CustomBackendMetrics_OutOfBand(t *testing.T) { } wantProto := &v3orcapb.OrcaLoadReport{ - CpuUtilization: 50.0, - MemUtilization: 99.0, - Utilization: map[string]float64{requestsMetricKey: numRequests}, + CpuUtilization: 50.0, + MemUtilization: 0.9, + ApplicationUtilization: 1.2, + Utilization: map[string]float64{requestsMetricKey: numRequests * 0.01}, } gotProto, err := stream.Recv() if err != nil { diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index 4d2b719ef23d..f3aff2587e11 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -622,7 +622,7 @@ github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 2ac1aaedb828b6a73d21bb630a55d6276d2ca303 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 7 Jun 2023 13:49:09 -0700 Subject: [PATCH 960/998] weightedroundrobin: prefer application_utilization to cpu_utilization (#6358) --- balancer/weightedroundrobin/balancer.go | 8 +- balancer/weightedroundrobin/balancer_test.go | 109 ++++++++++++------- 2 files changed, 73 insertions(+), 44 deletions(-) diff --git a/balancer/weightedroundrobin/balancer.go b/balancer/weightedroundrobin/balancer.go index e957b91b1966..a164d1bedd7e 100644 --- a/balancer/weightedroundrobin/balancer.go +++ b/balancer/weightedroundrobin/balancer.go @@ -419,7 +419,11 @@ func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) } // Update weights of this subchannel according to the reported load - if load.CpuUtilization == 0 || load.RpsFractional == 0 { + utilization := load.ApplicationUtilization + if utilization == 0 { + utilization = load.CpuUtilization + } + if utilization == 0 || load.RpsFractional == 0 { if w.logger.V(2) { w.logger.Infof("Ignoring empty load report for subchannel %v", w.SubConn) } @@ -430,7 +434,7 @@ func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { defer w.mu.Unlock() errorRate := load.Eps / load.RpsFractional - w.weightVal = load.RpsFractional / (load.CpuUtilization + errorRate*w.cfg.ErrorUtilizationPenalty) + w.weightVal = load.RpsFractional / (utilization + errorRate*w.cfg.ErrorUtilizationPenalty) if w.logger.V(2) { w.logger.Infof("New weight for subchannel %v: %v", w.SubConn, w.weightVal) } diff --git a/balancer/weightedroundrobin/balancer_test.go b/balancer/weightedroundrobin/balancer_test.go index a0a84a7f057b..1d67bcf1f008 100644 --- a/balancer/weightedroundrobin/balancer_test.go +++ b/balancer/weightedroundrobin/balancer_test.go @@ -110,7 +110,7 @@ func startServer(t *testing.T, r reportType) *testServer { if r := orca.CallMetricsRecorderFromContext(ctx); r != nil { // Copy metrics from what the test set in cmr into r. sm := cmr.(orca.ServerMetricsProvider).ServerMetrics() - r.SetCPUUtilization(sm.CPUUtilization) + r.SetApplicationUtilization(sm.AppUtilization) r.SetQPS(sm.QPS) r.SetEPS(sm.EPS) } @@ -230,10 +230,10 @@ func (s) TestBalancer_TwoAddresses_ReportingEnabledPerCall(t *testing.T) { // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed // disproportionately to srv2 (10:1). srv1.callMetrics.SetQPS(10.0) - srv1.callMetrics.SetCPUUtilization(1.0) + srv1.callMetrics.SetApplicationUtilization(1.0) srv2.callMetrics.SetQPS(10.0) - srv2.callMetrics.SetCPUUtilization(.1) + srv2.callMetrics.SetApplicationUtilization(.1) sc := svcConfig(t, perCallConfig) if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { @@ -253,33 +253,58 @@ func (s) TestBalancer_TwoAddresses_ReportingEnabledPerCall(t *testing.T) { // Tests two addresses with OOB ORCA reporting enabled. Checks the backends // are called in the appropriate ratios. func (s) TestBalancer_TwoAddresses_ReportingEnabledOOB(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() + testCases := []struct { + name string + utilSetter func(orca.ServerMetricsRecorder, float64) + }{{ + name: "application_utilization", + utilSetter: func(smr orca.ServerMetricsRecorder, val float64) { + smr.SetApplicationUtilization(val) + }, + }, { + name: "cpu_utilization", + utilSetter: func(smr orca.ServerMetricsRecorder, val float64) { + smr.SetCPUUtilization(val) + }, + }, { + name: "application over cpu", + utilSetter: func(smr orca.ServerMetricsRecorder, val float64) { + smr.SetApplicationUtilization(val) + smr.SetCPUUtilization(2.0) // ignored because ApplicationUtilization is set + }, + }} - srv1 := startServer(t, reportOOB) - srv2 := startServer(t, reportOOB) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() - // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed - // disproportionately to srv2 (10:1). - srv1.oobMetrics.SetQPS(10.0) - srv1.oobMetrics.SetCPUUtilization(1.0) + srv1 := startServer(t, reportOOB) + srv2 := startServer(t, reportOOB) - srv2.oobMetrics.SetQPS(10.0) - srv2.oobMetrics.SetCPUUtilization(.1) + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.oobMetrics.SetQPS(10.0) + tc.utilSetter(srv1.oobMetrics, 1.0) - sc := svcConfig(t, oobConfig) - if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { - t.Fatalf("Error starting client: %v", err) - } - addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} - srv1.R.UpdateState(resolver.State{Addresses: addrs}) + srv2.oobMetrics.SetQPS(10.0) + tc.utilSetter(srv2.oobMetrics, 0.1) - // Call each backend once to ensure the weights have been received. - ensureReached(ctx, t, srv1.Client, 2) + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) - // Wait for the weight update period to allow the new weights to be processed. - time.Sleep(weightUpdatePeriod) - checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + }) + } } // Tests two addresses with OOB ORCA reporting enabled, where the reports @@ -295,10 +320,10 @@ func (s) TestBalancer_TwoAddresses_UpdateLoads(t *testing.T) { // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed // disproportionately to srv2 (10:1). srv1.oobMetrics.SetQPS(10.0) - srv1.oobMetrics.SetCPUUtilization(1.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) srv2.oobMetrics.SetQPS(10.0) - srv2.oobMetrics.SetCPUUtilization(.1) + srv2.oobMetrics.SetApplicationUtilization(.1) sc := svcConfig(t, oobConfig) if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { @@ -317,10 +342,10 @@ func (s) TestBalancer_TwoAddresses_UpdateLoads(t *testing.T) { // Update the loads so srv2 is loaded and srv1 is not; ensure RPCs are // routed disproportionately to srv1. srv1.oobMetrics.SetQPS(10.0) - srv1.oobMetrics.SetCPUUtilization(.1) + srv1.oobMetrics.SetApplicationUtilization(.1) srv2.oobMetrics.SetQPS(10.0) - srv2.oobMetrics.SetCPUUtilization(1.0) + srv2.oobMetrics.SetApplicationUtilization(1.0) // Wait for the weight update period to allow the new weights to be processed. time.Sleep(weightUpdatePeriod + oobReportingInterval) @@ -340,19 +365,19 @@ func (s) TestBalancer_TwoAddresses_OOBThenPerCall(t *testing.T) { // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed // disproportionately to srv2 (10:1). srv1.oobMetrics.SetQPS(10.0) - srv1.oobMetrics.SetCPUUtilization(1.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) srv2.oobMetrics.SetQPS(10.0) - srv2.oobMetrics.SetCPUUtilization(.1) + srv2.oobMetrics.SetApplicationUtilization(.1) // For per-call metrics (not used initially), srv2 reports that it is // loaded and srv1 reports low load. After confirming OOB works, switch to // per-call and confirm the new routing weights are applied. srv1.callMetrics.SetQPS(10.0) - srv1.callMetrics.SetCPUUtilization(.1) + srv1.callMetrics.SetApplicationUtilization(.1) srv2.callMetrics.SetQPS(10.0) - srv2.callMetrics.SetCPUUtilization(1.0) + srv2.callMetrics.SetApplicationUtilization(1.0) sc := svcConfig(t, oobConfig) if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { @@ -396,13 +421,13 @@ func (s) TestBalancer_TwoAddresses_ErrorPenalty(t *testing.T) { // to 0.9 which will cause the weights to be equal and RPCs to be routed // 50/50. srv1.oobMetrics.SetQPS(10.0) - srv1.oobMetrics.SetCPUUtilization(1.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) srv1.oobMetrics.SetEPS(0) // srv1 weight before: 10.0 / 1.0 = 10.0 // srv1 weight after: 10.0 / 1.0 = 10.0 srv2.oobMetrics.SetQPS(10.0) - srv2.oobMetrics.SetCPUUtilization(.1) + srv2.oobMetrics.SetApplicationUtilization(.1) srv2.oobMetrics.SetEPS(10.0) // srv2 weight before: 10.0 / 0.1 = 100.0 // srv2 weight after: 10.0 / 1.0 = 10.0 @@ -476,10 +501,10 @@ func (s) TestBalancer_TwoAddresses_BlackoutPeriod(t *testing.T) { // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed // disproportionately to srv2 (10:1). srv1.oobMetrics.SetQPS(10.0) - srv1.oobMetrics.SetCPUUtilization(1.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) srv2.oobMetrics.SetQPS(10.0) - srv2.oobMetrics.SetCPUUtilization(.1) + srv2.oobMetrics.SetApplicationUtilization(.1) cfg := oobConfig cfg.BlackoutPeriod = tc.blackoutPeriodCfg @@ -544,10 +569,10 @@ func (s) TestBalancer_TwoAddresses_WeightExpiration(t *testing.T) { // is 1 minute but the weights expire in 1 second, routing will go to 50/50 // after the weights expire. srv1.oobMetrics.SetQPS(10.0) - srv1.oobMetrics.SetCPUUtilization(1.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) srv2.oobMetrics.SetQPS(10.0) - srv2.oobMetrics.SetCPUUtilization(.1) + srv2.oobMetrics.SetApplicationUtilization(.1) cfg := oobConfig cfg.OOBReportingPeriod = stringp("60s") @@ -594,16 +619,16 @@ func (s) TestBalancer_AddressesChanging(t *testing.T) { // srv1: weight 10 srv1.oobMetrics.SetQPS(10.0) - srv1.oobMetrics.SetCPUUtilization(1.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) // srv2: weight 100 srv2.oobMetrics.SetQPS(10.0) - srv2.oobMetrics.SetCPUUtilization(.1) + srv2.oobMetrics.SetApplicationUtilization(.1) // srv3: weight 20 srv3.oobMetrics.SetQPS(20.0) - srv3.oobMetrics.SetCPUUtilization(1.0) + srv3.oobMetrics.SetApplicationUtilization(1.0) // srv4: weight 200 srv4.oobMetrics.SetQPS(20.0) - srv4.oobMetrics.SetCPUUtilization(.1) + srv4.oobMetrics.SetApplicationUtilization(.1) sc := svcConfig(t, oobConfig) if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { From 907bdaa1eb138ac31922a41fe7477476f583bddb Mon Sep 17 00:00:00 2001 From: Matthew Stevenson <52979934+matthewstevenson88@users.noreply.github.com> Date: Wed, 7 Jun 2023 18:54:06 -0700 Subject: [PATCH 961/998] alts: Read max number of concurrent ALTS handshakes from environment variable. (#6267) * Read max number of concurrent ALTS handshakes from environment variable. * Refactor to use new envconfig file. * Remove impossible if condition in acquire(). * Use weighted semaphore. * Add e2e test for concurrent ALTS handshakes. * Separate into client and server semaphores. * Use TryAcquire instead of Acquire. * Attempt to fix go.sum error. * Run go mod tidy compat=1.17. * Update go.mod for examples subdirectory. * Run go mod tidy -compat=1.17 on examples subdirectory. * Update go.mod in subdirectories. * Update go.mod in security/advancedtls/examples. * Missed another go.mod update. * Do not upgrade glog because it requires Golang 1.19. * Fix glog version in examples/go.sum. * More glog cleanup. * Fix glog issue in gcp/observability/go.sum. * Move ALTS env var into envconfig.go. * Fix go.mod files. * Revert go.sum files. * Revert interop/observability/go.mod change. * Run go mod tidy -compat=1.17 on examples/. * Run gofmt. * Add comment describing test init function. --- credentials/alts/alts_test.go | 91 +++++++++++++------ .../alts/internal/handshaker/handshaker.go | 57 ++++-------- .../internal/handshaker/handshaker_test.go | 13 +-- examples/go.mod | 1 + examples/go.sum | 1 + go.mod | 1 + go.sum | 1 + internal/envconfig/envconfig.go | 3 + 8 files changed, 95 insertions(+), 73 deletions(-) diff --git a/credentials/alts/alts_test.go b/credentials/alts/alts_test.go index 9a95d462806b..20062fe77539 100644 --- a/credentials/alts/alts_test.go +++ b/credentials/alts/alts_test.go @@ -31,6 +31,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/alts/internal/handshaker" "google.golang.org/grpc/credentials/alts/internal/handshaker/service" altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" @@ -51,6 +52,14 @@ type s struct { grpctest.Tester } +func init() { + // The vmOnGCP global variable MUST be forced to true. Otherwise, if + // this test is run anywhere except on a GCP VM, then an ALTS handshake + // will immediately fail. + once.Do(func() {}) + vmOnGCP = true +} + func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } @@ -308,14 +317,6 @@ func (s) TestCheckRPCVersions(t *testing.T) { // server, where both client and server offload to a local, fake handshaker // service. func (s) TestFullHandshake(t *testing.T) { - // The vmOnGCP global variable MUST be reset to true after the client - // or server credentials have been created, but before the ALTS - // handshake begins. If vmOnGCP is not reset and this test is run - // anywhere except for a GCP VM, then the ALTS handshake will - // immediately fail. - once.Do(func() {}) - vmOnGCP = true - // Start the fake handshaker service and the server. var wait sync.WaitGroup defer wait.Wait() @@ -325,26 +326,41 @@ func (s) TestFullHandshake(t *testing.T) { defer stopServer() // Ping the server, authenticating with ALTS. - clientCreds := NewClientCreds(&ClientOptions{HandshakerServiceAddress: handshakerAddress}) - conn, err := grpc.Dial(serverAddress, grpc.WithTransportCredentials(clientCreds)) - if err != nil { - t.Fatalf("grpc.Dial(%v) failed: %v", serverAddress, err) + establishAltsConnection(t, handshakerAddress, serverAddress) + + // Close open connections to the fake handshaker service. + if err := service.CloseForTesting(); err != nil { + t.Errorf("service.CloseForTesting() failed: %v", err) } - defer conn.Close() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestLongTimeout) - defer cancel() - c := testgrpc.NewTestServiceClient(conn) - for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { - _, err = c.UnaryCall(ctx, &testpb.SimpleRequest{}) - if err == nil { - break - } - if code := status.Code(err); code == codes.Unavailable { - // The server is not ready yet. Try again. - continue - } - t.Fatalf("c.UnaryCall() failed: %v", err) +} + +// TestConcurrentHandshakes performs a several, concurrent ALTS handshakes +// between a test client and server, where both client and server offload to a +// local, fake handshaker service. +func (s) TestConcurrentHandshakes(t *testing.T) { + // Set the max number of concurrent handshakes to 3, so that we can + // test the handshaker behavior when handshakes are queued by + // performing more than 3 concurrent handshakes (specifically, 10). + handshaker.ResetConcurrentHandshakeSemaphoreForTesting(3) + + // Start the fake handshaker service and the server. + var wait sync.WaitGroup + defer wait.Wait() + stopHandshaker, handshakerAddress := startFakeHandshakerService(t, &wait) + defer stopHandshaker() + stopServer, serverAddress := startServer(t, handshakerAddress, &wait) + defer stopServer() + + // Ping the server, authenticating with ALTS. + var waitForConnections sync.WaitGroup + for i := 0; i < 10; i++ { + waitForConnections.Add(1) + go func() { + establishAltsConnection(t, handshakerAddress, serverAddress) + waitForConnections.Done() + }() } + waitForConnections.Wait() // Close open connections to the fake handshaker service. if err := service.CloseForTesting(); err != nil { @@ -366,6 +382,29 @@ func versions(minMajor, minMinor, maxMajor, maxMinor uint32) *altspb.RpcProtocol } } +func establishAltsConnection(t *testing.T, handshakerAddress, serverAddress string) { + clientCreds := NewClientCreds(&ClientOptions{HandshakerServiceAddress: handshakerAddress}) + conn, err := grpc.Dial(serverAddress, grpc.WithTransportCredentials(clientCreds)) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", serverAddress, err) + } + defer conn.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestLongTimeout) + defer cancel() + c := testgrpc.NewTestServiceClient(conn) + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + _, err = c.UnaryCall(ctx, &testpb.SimpleRequest{}) + if err == nil { + break + } + if code := status.Code(err); code == codes.Unavailable { + // The server is not ready yet. Try again. + continue + } + t.Fatalf("c.UnaryCall() failed: %v", err) + } +} + func startFakeHandshakerService(t *testing.T, wait *sync.WaitGroup) (stop func(), address string) { listener, err := testutils.LocalTCPListener() if err != nil { diff --git a/credentials/alts/internal/handshaker/handshaker.go b/credentials/alts/internal/handshaker/handshaker.go index 150ae5576769..0854e7af6518 100644 --- a/credentials/alts/internal/handshaker/handshaker.go +++ b/credentials/alts/internal/handshaker/handshaker.go @@ -25,8 +25,8 @@ import ( "fmt" "io" "net" - "sync" + "golang.org/x/sync/semaphore" grpc "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -35,15 +35,13 @@ import ( "google.golang.org/grpc/credentials/alts/internal/conn" altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/internal/envconfig" ) const ( // The maximum byte size of receive frames. frameLimit = 64 * 1024 // 64 KB rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" - // maxPendingHandshakes represents the maximum number of concurrent - // handshakes. - maxPendingHandshakes = 100 ) var ( @@ -59,9 +57,9 @@ var ( return conn.NewAES128GCMRekey(s, keyData) }, } - // control number of concurrent created (but not closed) handshakers. - mu sync.Mutex - concurrentHandshakes = int64(0) + // control number of concurrent created (but not closed) handshakes. + clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) + serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) // errDropped occurs when maxPendingHandshakes is reached. errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") // errOutOfBound occurs when the handshake service returns a consumed @@ -77,30 +75,6 @@ func init() { } } -func acquire() bool { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - success := maxPendingHandshakes-concurrentHandshakes >= n - if success { - concurrentHandshakes += n - } - mu.Unlock() - return success -} - -func release() { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - concurrentHandshakes -= n - if concurrentHandshakes < 0 { - mu.Unlock() - panic("bad release") - } - mu.Unlock() -} - // ClientHandshakerOptions contains the client handshaker options that can // provided by the caller. type ClientHandshakerOptions struct { @@ -134,10 +108,6 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions { return &ServerHandshakerOptions{} } -// TODO: add support for future local and remote endpoint in both client options -// and server options (server options struct does not exist now. When -// caller can provide endpoints, it should be created. - // altsHandshaker is used to complete an ALTS handshake between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. @@ -185,10 +155,10 @@ func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // ClientHandshake starts and completes a client ALTS handshake for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !clientHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer clientHandshakes.Release(1) if h.side != core.ClientSide { return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") @@ -238,10 +208,10 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent // ServerHandshake starts and completes a server ALTS handshake for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !serverHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer serverHandshakes.Release(1) if h.side != core.ServerSide { return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") @@ -264,8 +234,6 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent } // Prepare server parameters. - // TODO: currently only ALTS parameters are provided. Might need to use - // more options in the future. params := make(map[int32]*altspb.ServerHandshakeParameters) params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ RecordProtocols: recordProtocols, @@ -391,3 +359,10 @@ func (h *altsHandshaker) Close() { h.stream.CloseSend() } } + +// ResetConcurrentHandshakeSemaphoreForTesting resets the handshake semaphores +// to allow numberOfAllowedHandshakes concurrent handshakes each. +func ResetConcurrentHandshakeSemaphoreForTesting(numberOfAllowedHandshakes int64) { + clientHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) + serverHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) +} diff --git a/credentials/alts/internal/handshaker/handshaker_test.go b/credentials/alts/internal/handshaker/handshaker_test.go index 49f07caf8deb..40d66161c7b6 100644 --- a/credentials/alts/internal/handshaker/handshaker_test.go +++ b/credentials/alts/internal/handshaker/handshaker_test.go @@ -31,6 +31,7 @@ import ( core "google.golang.org/grpc/credentials/alts/internal" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" "google.golang.org/grpc/credentials/alts/internal/testutil" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" ) @@ -134,7 +135,7 @@ func (s) TestClientHandshake(t *testing.T) { numberOfHandshakes int }{ {0 * time.Millisecond, 1}, - {100 * time.Millisecond, 10 * maxPendingHandshakes}, + {100 * time.Millisecond, 10 * int(envconfig.ALTSMaxConcurrentHandshakes)}, } { errc := make(chan error) stat.Reset() @@ -182,8 +183,8 @@ func (s) TestClientHandshake(t *testing.T) { } // Ensure that there are no concurrent calls more than the limit. - if stat.MaxConcurrentCalls > maxPendingHandshakes { - t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, maxPendingHandshakes) + if stat.MaxConcurrentCalls > int(envconfig.ALTSMaxConcurrentHandshakes) { + t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, envconfig.ALTSMaxConcurrentHandshakes) } } } @@ -194,7 +195,7 @@ func (s) TestServerHandshake(t *testing.T) { numberOfHandshakes int }{ {0 * time.Millisecond, 1}, - {100 * time.Millisecond, 10 * maxPendingHandshakes}, + {100 * time.Millisecond, 10 * int(envconfig.ALTSMaxConcurrentHandshakes)}, } { errc := make(chan error) stat.Reset() @@ -239,8 +240,8 @@ func (s) TestServerHandshake(t *testing.T) { } // Ensure that there are no concurrent calls more than the limit. - if stat.MaxConcurrentCalls > maxPendingHandshakes { - t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, maxPendingHandshakes) + if stat.MaxConcurrentCalls > int(envconfig.ALTSMaxConcurrentHandshakes) { + t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, envconfig.ALTSMaxConcurrentHandshakes) } } } diff --git a/examples/go.mod b/examples/go.mod index e6aa00e7c62a..0bd97db78757 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -20,6 +20,7 @@ require ( github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f // indirect github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect golang.org/x/net v0.9.0 // indirect + golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/examples/go.sum b/examples/go.sum index 15496ae7e61c..6511b1b756ac 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -1005,6 +1005,7 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/go.mod b/go.mod index d78084a3ae0d..acd6f919f793 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/google/uuid v1.3.0 golang.org/x/net v0.9.0 golang.org/x/oauth2 v0.7.0 + golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sys v0.7.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 google.golang.org/protobuf v1.30.0 diff --git a/go.sum b/go.sum index 1907f1aa632b..98a106b2a17c 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,7 @@ golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index 80fd5c7d2a4f..77c2c0b89f6c 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -40,6 +40,9 @@ var ( // pick_first LB policy, which can be enabled by setting the environment // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) func boolFromEnv(envVar string, def bool) bool { From 89790ea90c7558600c40f5ccc1dae341f13befa1 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Thu, 8 Jun 2023 13:02:21 +0900 Subject: [PATCH 962/998] grpclb: fix typo (#6356) --- balancer/grpclb/grpclb_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index 1df63a9366e5..9dbfd3466401 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -973,7 +973,7 @@ func (s) TestGRPCLB_FallBackWithNoServerAddress(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer cancel() if err := resolveNowCh.SendContext(ctx, nil); err != nil { - t.Error("timeout when attemping to send on resolverNowCh") + t.Error("timeout when attempting to send on resolverNowCh") } } From 7a7caf363d9b33bfa5ddac83e7dab744f695fb5b Mon Sep 17 00:00:00 2001 From: Keita Shinyama Date: Fri, 9 Jun 2023 00:54:16 +0900 Subject: [PATCH 963/998] protoc-gen-go-grpc: Update README.md file (#6349) --- cmd/protoc-gen-go-grpc/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/protoc-gen-go-grpc/README.md b/cmd/protoc-gen-go-grpc/README.md index 4758125de0d2..a2d4d010212a 100644 --- a/cmd/protoc-gen-go-grpc/README.md +++ b/cmd/protoc-gen-go-grpc/README.md @@ -14,7 +14,7 @@ To restore this behavior, set the option `require_unimplemented_servers=false`. E.g.: ``` - protoc --go-grpc_out=require_unimplemented_servers=false[,other options...]:. \ + protoc --go-grpc_out=. --go-grpc_opt=require_unimplemented_servers=false[,other options...] \ ``` Note that this is not recommended, and the option is only provided to restore From 1c0572a5ec7979560c41b7e540ddcdecb25c0512 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 8 Jun 2023 15:42:28 -0400 Subject: [PATCH 964/998] benchmark: fix package used to reference service to use grpc suffix instead of pb (#6362) --- benchmark/benchmain/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index 971a2d453c9a..78ca59363841 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -275,7 +275,7 @@ func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Fea // service. The client is configured using the different options in the passed // 'bf'. Also returns a cleanup function to close the client and release // resources. -func makeClients(bf stats.Features) ([]testpb.BenchmarkServiceClient, func()) { +func makeClients(bf stats.Features) ([]testgrpc.BenchmarkServiceClient, func()) { nw := &latency.Network{Kbps: bf.Kbps, Latency: bf.Latency, MTU: bf.MTU} opts := []grpc.DialOption{} sopts := []grpc.ServerOption{} @@ -356,7 +356,7 @@ func makeClients(bf stats.Features) ([]testpb.BenchmarkServiceClient, func()) { lis = nw.Listener(lis) stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...) conns := make([]*grpc.ClientConn, bf.Connections) - clients := make([]testpb.BenchmarkServiceClient, bf.Connections) + clients := make([]testgrpc.BenchmarkServiceClient, bf.Connections) for cn := 0; cn < bf.Connections; cn++ { conns[cn] = bm.NewClientConn("" /* target not used */, opts...) clients[cn] = testgrpc.NewBenchmarkServiceClient(conns[cn]) From 3e8eca8088c7eb7122261ec7dbaf340e70758123 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 9 Jun 2023 19:06:18 -0400 Subject: [PATCH 965/998] Revert "client: encode the authority by default (#6318)" (#6365) This reverts commit 68576b3c42bbecacfeba3bee89f509f1bb7b0072. --- clientconn.go | 7 +------ test/authority_test.go | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/clientconn.go b/clientconn.go index a27c0573dc1d..95a7459b02f6 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1872,12 +1872,7 @@ func (cc *ClientConn) determineAuthority() error { // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - - // Path escape the endpoint to handle use cases where the endpoint - // might not be a valid authority by default. - // For example an endpoint which has multiple paths like - // 'a/b/c', which is not a valid authority by default. - cc.authority = url.PathEscape(endpoint) + cc.authority = endpoint } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil diff --git a/test/authority_test.go b/test/authority_test.go index a4d481f24f92..44095a23a2fe 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -126,7 +126,7 @@ var authorityTests = []authorityTest{ name: "UnixPassthrough", address: "/tmp/sock.sock", target: "passthrough:///unix:///tmp/sock.sock", - authority: "unix:%2F%2F%2Ftmp%2Fsock.sock", + authority: "unix:///tmp/sock.sock", dialTargetWant: "unix:///tmp/sock.sock", }, { From 3c6084b7d4b5198de0eee1f82bf15ce02759c3d3 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 9 Jun 2023 19:32:27 -0400 Subject: [PATCH 966/998] xds/outlierdetection: fix config handling (#6361) --- internal/balancer/nop/nop.go | 62 ++++ test/xds/xds_client_outlier_detection_test.go | 119 +++++- .../balancer/cdsbalancer/cdsbalancer.go | 101 +++--- .../cdsbalancer/cdsbalancer_security_test.go | 14 +- .../balancer/cdsbalancer/cdsbalancer_test.go | 277 +++++--------- .../clusterresolver/clusterresolver.go | 59 ++- .../clusterresolver/clusterresolver_test.go | 21 +- .../balancer/clusterresolver/config.go | 20 +- .../balancer/clusterresolver/config_test.go | 110 +++++- .../balancer/clusterresolver/configbuilder.go | 4 +- .../clusterresolver/configbuilder_test.go | 59 +-- .../clusterresolver/e2e_test/eds_impl_test.go | 12 +- .../balancer/clusterresolver/priority_test.go | 4 +- .../balancer/outlierdetection/balancer.go | 29 +- .../outlierdetection/balancer_test.go | 339 +++++++++++++++--- .../balancer/outlierdetection/config.go | 62 ++++ .../xdsresource/tests/unmarshal_cds_test.go | 48 +-- .../xdsclient/xdsresource/type_cds.go | 68 +--- .../xdsclient/xdsresource/unmarshal_cds.go | 191 ++++++---- .../xdsresource/unmarshal_cds_test.go | 125 ++++--- 20 files changed, 1119 insertions(+), 605 deletions(-) create mode 100644 internal/balancer/nop/nop.go diff --git a/internal/balancer/nop/nop.go b/internal/balancer/nop/nop.go new file mode 100644 index 000000000000..0c96f1b81186 --- /dev/null +++ b/internal/balancer/nop/nop.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package nop implements a balancer with all of its balancer operations as +// no-ops, other than returning a Transient Failure Picker on a Client Conn +// update. +package nop + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" +) + +// bal is a balancer with all of its balancer operations as no-ops, other than +// returning a Transient Failure Picker on a Client Conn update. +type bal struct { + cc balancer.ClientConn + err error +} + +// NewBalancer returns a no-op balancer. +func NewBalancer(cc balancer.ClientConn, err error) balancer.Balancer { + return &bal{ + cc: cc, + err: err, + } +} + +// UpdateClientConnState updates the bal's Client Conn with an Error Picker +// and a Connectivity State of TRANSIENT_FAILURE. +func (b *bal) UpdateClientConnState(_ balancer.ClientConnState) error { + b.cc.UpdateState(balancer.State{ + Picker: base.NewErrPicker(b.err), + ConnectivityState: connectivity.TransientFailure, + }) + return nil +} + +// ResolverError is a no-op. +func (b *bal) ResolverError(_ error) {} + +// UpdateSubConnState is a no-op. +func (b *bal) UpdateSubConnState(_ balancer.SubConn, _ balancer.SubConnState) {} + +// Close is a no-op. +func (b *bal) Close() {} diff --git a/test/xds/xds_client_outlier_detection_test.go b/test/xds/xds_client_outlier_detection_test.go index fa08e9be9a3a..d91b35a883aa 100644 --- a/test/xds/xds_client_outlier_detection_test.go +++ b/test/xds/xds_client_outlier_detection_test.go @@ -90,9 +90,9 @@ func (s) TestOutlierDetection_NoopConfig(t *testing.T) { // clientResourcesMultipleBackendsAndOD returns xDS resources which correspond // to multiple upstreams, corresponding different backends listening on // different localhost:port combinations. The resources also configure an -// Outlier Detection Balancer set up with Failure Percentage Algorithm, which -// ejects endpoints based on failure rate. -func clientResourcesMultipleBackendsAndOD(params e2e.ResourceParams, ports []uint32) e2e.UpdateOptions { +// Outlier Detection Balancer configured through the passed in Outlier Detection +// proto. +func clientResourcesMultipleBackendsAndOD(params e2e.ResourceParams, ports []uint32, od *v3clusterpb.OutlierDetection) e2e.UpdateOptions { routeConfigName := "route-" + params.DialTarget clusterName := "cluster-" + params.DialTarget endpointsName := "endpoints-" + params.DialTarget @@ -100,23 +100,14 @@ func clientResourcesMultipleBackendsAndOD(params e2e.ResourceParams, ports []uin NodeID: params.NodeID, Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(params.DialTarget, routeConfigName)}, Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(routeConfigName, params.DialTarget, clusterName)}, - Clusters: []*v3clusterpb.Cluster{clusterWithOutlierDetection(clusterName, endpointsName, params.SecLevel)}, + Clusters: []*v3clusterpb.Cluster{clusterWithOutlierDetection(clusterName, endpointsName, params.SecLevel, od)}, Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(endpointsName, params.Host, ports)}, } } -func clusterWithOutlierDetection(clusterName, edsServiceName string, secLevel e2e.SecurityLevel) *v3clusterpb.Cluster { +func clusterWithOutlierDetection(clusterName, edsServiceName string, secLevel e2e.SecurityLevel, od *v3clusterpb.OutlierDetection) *v3clusterpb.Cluster { cluster := e2e.DefaultCluster(clusterName, edsServiceName, secLevel) - cluster.OutlierDetection = &v3clusterpb.OutlierDetection{ - Interval: &durationpb.Duration{Nanos: 50000000}, // .5 seconds - BaseEjectionTime: &durationpb.Duration{Seconds: 30}, - MaxEjectionTime: &durationpb.Duration{Seconds: 300}, - MaxEjectionPercent: &wrapperspb.UInt32Value{Value: 1}, - FailurePercentageThreshold: &wrapperspb.UInt32Value{Value: 50}, - EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 100}, - FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 8}, - FailurePercentageMinimumHosts: &wrapperspb.UInt32Value{Value: 3}, - } + cluster.OutlierDetection = od return cluster } @@ -197,7 +188,103 @@ func (s) TestOutlierDetectionWithOutlier(t *testing.T) { NodeID: nodeID, Host: "localhost", SecLevel: e2e.SecurityLevelNone, - }, []uint32{port1, port2, port3}) + }, []uint32{port1, port2, port3}, &v3clusterpb.OutlierDetection{ + Interval: &durationpb.Duration{Nanos: 50000000}, // .5 seconds + BaseEjectionTime: &durationpb.Duration{Seconds: 30}, + MaxEjectionTime: &durationpb.Duration{Seconds: 300}, + MaxEjectionPercent: &wrapperspb.UInt32Value{Value: 1}, + FailurePercentageThreshold: &wrapperspb.UInt32Value{Value: 50}, + EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 100}, + FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 8}, + FailurePercentageMinimumHosts: &wrapperspb.UInt32Value{Value: 3}, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + + fullAddresses := []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend2.Address}, + {Addr: backend3.Address}, + } + // At first, due to no statistics on each of the backends, the 3 + // upstreams should all be round robined across. + if err = checkRoundRobinRPCs(ctx, client, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The addresses which don't return errors. + okAddresses := []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend2.Address}, + } + // After calling the three upstreams, one of them constantly error + // and should eventually be ejected for a period of time. This + // period of time should cause the RPC's to be round robined only + // across the two that are healthy. + if err = checkRoundRobinRPCs(ctx, client, okAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } +} + +// TestOutlierDetectionXDSDefaultOn tests that Outlier Detection is by default +// configured on in the xDS Flow. If the Outlier Detection proto message is +// present with SuccessRateEjection unset, then Outlier Detection should be +// turned on. The test setups and xDS system with xDS resources with Outlier +// Detection present in the CDS update, but with SuccessRateEjection unset, and +// asserts that Outlier Detection is turned on and ejects upstreams. +func (s) TestOutlierDetectionXDSDefaultOn(t *testing.T) { + managementServer, nodeID, _, r, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Working backend 1. + backend1 := stubserver.StartTestService(t, nil) + port1 := testutils.ParsePort(t, backend1.Address) + defer backend1.Stop() + + // Working backend 2. + backend2 := stubserver.StartTestService(t, nil) + port2 := testutils.ParsePort(t, backend2.Address) + defer backend2.Stop() + + // Backend 3 that will always return an error and eventually ejected. + backend3 := stubserver.StartTestService(t, &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return nil, errors.New("some error") }, + }) + port3 := testutils.ParsePort(t, backend3.Address) + defer backend3.Stop() + + // Configure CDS resources with Outlier Detection set but + // EnforcingSuccessRate unset. This should cause Outlier Detection to be + // configured with SuccessRateEjection present in configuration, which will + // eventually be populated with its default values along with the knobs set + // as SuccessRate fields in the proto, and thus Outlier Detection should be + // on and actively eject upstreams. + const serviceName = "my-service-client-side-xds" + resources := clientResourcesMultipleBackendsAndOD(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + SecLevel: e2e.SecurityLevelNone, + }, []uint32{port1, port2, port3}, &v3clusterpb.OutlierDetection{ + // Need to set knobs to trigger ejection within the test time frame. + Interval: &durationpb.Duration{Nanos: 50000000}, + // EnforcingSuccessRateSet to nil, causes success rate algorithm to be + // turned on. + SuccessRateMinimumHosts: &wrapperspb.UInt32Value{Value: 1}, + SuccessRateRequestVolume: &wrapperspb.UInt32Value{Value: 8}, + SuccessRateStdevFactor: &wrapperspb.UInt32Value{Value: 1}, + }) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if err := managementServer.Update(ctx, resources); err != nil { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index c9a1611c169b..bcdeaf681ab5 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -27,17 +27,16 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal/balancer/nop" "google.golang.org/grpc/internal/buffer" xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" - "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -75,11 +74,25 @@ type bb struct{} // Build creates a new CDS balancer with the ClientConn. func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(clusterresolver.Name) + if builder == nil { + // Shouldn't happen, registered through imported Cluster Resolver, + // defensive programming. + logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", clusterresolver.Name)) + } + crParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Cluster Resolver builder has this method. + logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name)) + } b := &cdsBalancer{ bOpts: opts, updateCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), + crParser: crParser, xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), } b.logger = prefixLogger((b)) @@ -160,6 +173,7 @@ type cdsBalancer struct { logger *grpclog.PrefixLogger closed *grpcsync.Event done *grpcsync.Event + crParser balancer.ConfigParser // The certificate providers are cached here to that they can be closed when // a new provider is to be created. @@ -271,52 +285,6 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc return provider, nil } -func outlierDetectionToConfig(od *xdsresource.OutlierDetection) outlierdetection.LBConfig { // Already validated - no need to return error - if od == nil { - // "If the outlier_detection field is not set in the Cluster message, a - // "no-op" outlier_detection config will be generated, with interval set - // to the maximum possible value and all other fields unset." - A50 - return outlierdetection.LBConfig{ - Interval: 1<<63 - 1, - } - } - - // "if the enforcing_success_rate field is set to 0, the config - // success_rate_ejection field will be null and all success_rate_* fields - // will be ignored." - A50 - var sre *outlierdetection.SuccessRateEjection - if od.EnforcingSuccessRate != 0 { - sre = &outlierdetection.SuccessRateEjection{ - StdevFactor: od.SuccessRateStdevFactor, - EnforcementPercentage: od.EnforcingSuccessRate, - MinimumHosts: od.SuccessRateMinimumHosts, - RequestVolume: od.SuccessRateRequestVolume, - } - } - - // "If the enforcing_failure_percent field is set to 0 or null, the config - // failure_percent_ejection field will be null and all failure_percent_* - // fields will be ignored." - A50 - var fpe *outlierdetection.FailurePercentageEjection - if od.EnforcingFailurePercentage != 0 { - fpe = &outlierdetection.FailurePercentageEjection{ - Threshold: od.FailurePercentageThreshold, - EnforcementPercentage: od.EnforcingFailurePercentage, - MinimumHosts: od.FailurePercentageMinimumHosts, - RequestVolume: od.FailurePercentageRequestVolume, - } - } - - return outlierdetection.LBConfig{ - Interval: internalserviceconfig.Duration(od.Interval), - BaseEjectionTime: internalserviceconfig.Duration(od.BaseEjectionTime), - MaxEjectionTime: internalserviceconfig.Duration(od.MaxEjectionTime), - MaxEjectionPercent: od.MaxEjectionPercent, - SuccessRateEjection: sre, - FailurePercentageEjection: fpe, - } -} - // handleWatchUpdate handles a watch update from the xDS Client. Good updates // lead to clientConn updates being invoked on the underlying cluster_resolver balancer. func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { @@ -390,28 +358,43 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { b.logger.Infof("Unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) } if envconfig.XDSOutlierDetection { - dms[i].OutlierDetection = outlierDetectionToConfig(cu.OutlierDetection) + odJSON := cu.OutlierDetection + // "In the cds LB policy, if the outlier_detection field is not set in + // the Cluster resource, a "no-op" outlier_detection config will be + // generated in the corresponding DiscoveryMechanism config, with all + // fields unset." - A50 + if odJSON == nil { + // This will pick up top level defaults in Cluster Resolver + // ParseConfig, but sre and fpe will be nil still so still a + // "no-op" config. + odJSON = json.RawMessage(`{}`) + } + dms[i].OutlierDetection = odJSON } } + // Prepare Cluster Resolver config, marshal into JSON, and then Parse it to + // get configuration to send downward to Cluster Resolver. lbCfg := &clusterresolver.LBConfig{ DiscoveryMechanisms: dms, + XDSLBPolicy: update.lbPolicy, + } + crLBCfgJSON, err := json.Marshal(lbCfg) + if err != nil { + // Shouldn't happen, since we just prepared struct. + b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", lbCfg) + return } - bc := &internalserviceconfig.BalancerConfig{} - if err := json.Unmarshal(update.lbPolicy, bc); err != nil { - // This will never occur, valid configuration is emitted from the xDS - // Client. Validity is already checked in the xDS Client, however, this - // double validation is present because Unmarshalling and Validating are - // coupled into one json.Unmarshal operation). We will switch this in - // the future to two separate operations. - b.logger.Errorf("Emitted lbPolicy %s from xDS Client is invalid: %v", update.lbPolicy, err) + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.crParser.ParseConfig(crLBCfgJSON); err != nil { + b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", crLBCfgJSON, err) return } - lbCfg.XDSLBPolicy = bc + ccState := balancer.ClientConnState{ ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), - BalancerConfig: lbCfg, + BalancerConfig: sc, } if err := b.childLB.UpdateClientConnState(ccState); err != nil { b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index eb687aa70f76..fcd2e26960c0 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -253,7 +253,7 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { ClusterName: serviceName, LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -312,7 +312,7 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { ClusterName: serviceName, LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -468,7 +468,7 @@ func (s) TestSecurityConfigUpdate_BadToGood(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -502,7 +502,7 @@ func (s) TestGoodSecurityConfig(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -555,7 +555,7 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -608,7 +608,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the // newChildBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -687,7 +687,7 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { }, LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 35923bc8624a..19e937536917 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -58,9 +58,8 @@ var ( Type: "insecure", }, } - noopODLBCfg = outlierdetection.LBConfig{ - Interval: 1<<63 - 1, - } + noopODLBCfg = outlierdetection.LBConfig{} + noopODLBCfgJSON, _ = json.Marshal(noopODLBCfg) wrrLocalityLBConfig = &internalserviceconfig.BalancerConfig{ Name: wrrlocality.Name, Config: &wrrlocality.LBConfig{ @@ -166,7 +165,11 @@ func (tb *testEDSBalancer) waitForClientConnUpdate(ctx context.Context, wantCCS if xdsclient.FromResolverState(gotCCS.ResolverState) == nil { return fmt.Errorf("want resolver state with XDSClient attached, got one without") } - if diff := cmp.Diff(gotCCS, wantCCS, cmpopts.IgnoreFields(resolver.State{}, "Attributes")); diff != "" { + + // Calls into Cluster Resolver LB Config Equal(), which ignores JSON + // configuration but compares the Parsed Configuration of the JSON fields + // emitted from ParseConfig() on the cluster resolver. + if diff := cmp.Diff(gotCCS, wantCCS, cmpopts.IgnoreFields(resolver.State{}, "Attributes"), cmp.AllowUnexported(clusterresolver.LBConfig{})); diff != "" { return fmt.Errorf("received unexpected ClientConnState, diff (-got +want): %v", diff) } return nil @@ -229,9 +232,26 @@ func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { } } -// edsCCS is a helper function to construct a good update passed from the -// cdsBalancer to the edsBalancer. -func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy *internalserviceconfig.BalancerConfig, odConfig outlierdetection.LBConfig) balancer.ClientConnState { +// edsCCS is a helper function to construct a Client Conn update which +// represents what the CDS Balancer passes to the Cluster Resolver. It calls +// into Cluster Resolver's ParseConfig to get the service config to fill out the +// Client Conn State. This is to fill out unexported parts of the Cluster +// Resolver config struct. Returns an empty Client Conn State if it encounters +// an error building out the Client Conn State. +func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy json.RawMessage, odConfig json.RawMessage) balancer.ClientConnState { + builder := balancer.Get(clusterresolver.Name) + if builder == nil { + // Shouldn't happen, registered through imported Cluster Resolver, + // defensive programming. + logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) + return balancer.ClientConnState{} // will fail the calling test eventually through error in diff. + } + crParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Cluster Resolver builder has this method. + logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) + return balancer.ClientConnState{} + } discoveryMechanism := clusterresolver.DiscoveryMechanism{ Type: clusterresolver.DiscoveryMechanismTypeEDS, Cluster: service, @@ -246,8 +266,21 @@ func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy *inter XDSLBPolicy: xdslbpolicy, } + crLBCfgJSON, err := json.Marshal(lbCfg) + if err != nil { + // Shouldn't happen, since we just prepared struct. + logger.Errorf("cds_balancer: error marshalling prepared config: %v", lbCfg) + return balancer.ClientConnState{} + } + + var sc serviceconfig.LoadBalancingConfig + if sc, err = crParser.ParseConfig(crLBCfgJSON); err != nil { + logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", crLBCfgJSON, err) + return balancer.ClientConnState{} + } + return balancer.ClientConnState{ - BalancerConfig: lbCfg, + BalancerConfig: sc, } } @@ -402,7 +435,7 @@ func (s) TestHandleClusterUpdate(t *testing.T) { LRSServerConfig: xdsresource.ClusterLRSServerSelf, LBPolicy: wrrLocalityLBConfigJSON, }, - wantCCS: edsCCS(serviceName, nil, true, wrrLocalityLBConfig, noopODLBCfg), + wantCCS: edsCCS(serviceName, nil, true, wrrLocalityLBConfigJSON, noopODLBCfgJSON), }, { name: "happy-case-without-lrs", @@ -410,7 +443,7 @@ func (s) TestHandleClusterUpdate(t *testing.T) { ClusterName: serviceName, LBPolicy: wrrLocalityLBConfigJSON, }, - wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg), + wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON), }, { name: "happy-case-with-ring-hash-lb-policy", @@ -418,49 +451,64 @@ func (s) TestHandleClusterUpdate(t *testing.T) { ClusterName: serviceName, LBPolicy: ringHashLBConfigJSON, }, - wantCCS: edsCCS(serviceName, nil, false, &internalserviceconfig.BalancerConfig{ - Name: ringhash.Name, - Config: &ringhash.LBConfig{MinRingSize: 10, MaxRingSize: 100}, - }, noopODLBCfg), + wantCCS: edsCCS(serviceName, nil, false, ringHashLBConfigJSON, noopODLBCfgJSON), }, { - name: "happy-case-outlier-detection", + name: "happy-case-outlier-detection-xds-defaults", + // i.e. od proto set but no proto fields set cdsUpdate: xdsresource.ClusterUpdate{ ClusterName: serviceName, - OutlierDetection: &xdsresource.OutlierDetection{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, - MaxEjectionPercent: 10, - SuccessRateStdevFactor: 1900, - EnforcingSuccessRate: 100, - SuccessRateMinimumHosts: 5, - SuccessRateRequestVolume: 100, - FailurePercentageThreshold: 85, - EnforcingFailurePercentage: 5, - FailurePercentageMinimumHosts: 5, - FailurePercentageRequestVolume: 50, - }, + OutlierDetection: json.RawMessage(`{ + "successRateEjection": {} + }`), LBPolicy: wrrLocalityLBConfigJSON, }, - wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfig, outlierdetection.LBConfig{ - Interval: internalserviceconfig.Duration(10 * time.Second), - BaseEjectionTime: internalserviceconfig.Duration(30 * time.Second), - MaxEjectionTime: internalserviceconfig.Duration(300 * time.Second), - MaxEjectionPercent: 10, - SuccessRateEjection: &outlierdetection.SuccessRateEjection{ - StdevFactor: 1900, - EnforcementPercentage: 100, - MinimumHosts: 5, - RequestVolume: 100, + wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, json.RawMessage(`{ + "successRateEjection": {} + }`)), + }, + { + name: "happy-case-outlier-detection-all-fields-set", + cdsUpdate: xdsresource.ClusterUpdate{ + ClusterName: serviceName, + OutlierDetection: json.RawMessage(`{ + "interval": "10s", + "baseEjectionTime": "30s", + "maxEjectionTime": "300s", + "maxEjectionPercent": 10, + "successRateEjection": { + "stdevFactor": 1900, + "enforcementPercentage": 100, + "minimumHosts": 5, + "requestVolume": 100 }, - FailurePercentageEjection: &outlierdetection.FailurePercentageEjection{ - Threshold: 85, - EnforcementPercentage: 5, - MinimumHosts: 5, - RequestVolume: 50, + "failurePercentageEjection": { + "threshold": 85, + "enforcementPercentage": 5, + "minimumHosts": 5, + "requestVolume": 50 + } + }`), + LBPolicy: wrrLocalityLBConfigJSON, + }, + wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, json.RawMessage(`{ + "interval": "10s", + "baseEjectionTime": "30s", + "maxEjectionTime": "300s", + "maxEjectionPercent": 10, + "successRateEjection": { + "stdevFactor": 1900, + "enforcementPercentage": 100, + "minimumHosts": 5, + "requestVolume": 100 }, - }), + "failurePercentageEjection": { + "threshold": 85, + "enforcementPercentage": 5, + "minimumHosts": 5, + "requestVolume": 50 + } + }`)), }, } @@ -531,7 +579,7 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { ClusterName: serviceName, LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -619,7 +667,7 @@ func (s) TestResolverError(t *testing.T) { ClusterName: serviceName, LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -671,7 +719,7 @@ func (s) TestUpdateSubConnState(t *testing.T) { ClusterName: serviceName, LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -709,7 +757,7 @@ func (s) TestCircuitBreaking(t *testing.T) { MaxRequests: &maxRequests, LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(clusterName, &maxRequests, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(clusterName, &maxRequests, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -746,7 +794,7 @@ func (s) TestClose(t *testing.T) { ClusterName: serviceName, LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -820,7 +868,7 @@ func (s) TestExitIdle(t *testing.T) { ClusterName: serviceName, LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfig, noopODLBCfg) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -882,130 +930,3 @@ func (s) TestParseConfig(t *testing.T) { }) } } - -func (s) TestOutlierDetectionToConfig(t *testing.T) { - tests := []struct { - name string - od *xdsresource.OutlierDetection - odLBCfgWant outlierdetection.LBConfig - }{ - // "if the outlier_detection field is not set in the Cluster resource, - // a "no-op" outlier_detection config will be generated in the - // corresponding DiscoveryMechanism config, with interval set to the - // maximum possible value and all other fields unset." - A50 - { - name: "no-op-outlier-detection-config", - od: nil, - odLBCfgWant: noopODLBCfg, - }, - // "if the enforcing_success_rate field is set to 0, the config - // success_rate_ejection field will be null and all success_rate_* - // fields will be ignored." - A50 - { - name: "enforcing-success-rate-zero", - od: &xdsresource.OutlierDetection{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, - MaxEjectionPercent: 10, - SuccessRateStdevFactor: 1900, - EnforcingSuccessRate: 0, - SuccessRateMinimumHosts: 5, - SuccessRateRequestVolume: 100, - FailurePercentageThreshold: 85, - EnforcingFailurePercentage: 5, - FailurePercentageMinimumHosts: 5, - FailurePercentageRequestVolume: 50, - }, - odLBCfgWant: outlierdetection.LBConfig{ - Interval: internalserviceconfig.Duration(10 * time.Second), - BaseEjectionTime: internalserviceconfig.Duration(30 * time.Second), - MaxEjectionTime: internalserviceconfig.Duration(300 * time.Second), - MaxEjectionPercent: 10, - SuccessRateEjection: nil, - FailurePercentageEjection: &outlierdetection.FailurePercentageEjection{ - Threshold: 85, - EnforcementPercentage: 5, - MinimumHosts: 5, - RequestVolume: 50, - }, - }, - }, - // "If the enforcing_failure_percent field is set to 0 or null, the - // config failure_percent_ejection field will be null and all - // failure_percent_* fields will be ignored." - A50 - { - name: "enforcing-failure-percentage-zero", - od: &xdsresource.OutlierDetection{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, - MaxEjectionPercent: 10, - SuccessRateStdevFactor: 1900, - EnforcingSuccessRate: 100, - SuccessRateMinimumHosts: 5, - SuccessRateRequestVolume: 100, - FailurePercentageThreshold: 85, - EnforcingFailurePercentage: 0, - FailurePercentageMinimumHosts: 5, - FailurePercentageRequestVolume: 50, - }, - odLBCfgWant: outlierdetection.LBConfig{ - Interval: internalserviceconfig.Duration(10 * time.Second), - BaseEjectionTime: internalserviceconfig.Duration(30 * time.Second), - MaxEjectionTime: internalserviceconfig.Duration(300 * time.Second), - MaxEjectionPercent: 10, - SuccessRateEjection: &outlierdetection.SuccessRateEjection{ - StdevFactor: 1900, - EnforcementPercentage: 100, - MinimumHosts: 5, - RequestVolume: 100, - }, - FailurePercentageEjection: nil, - }, - }, - { - name: "normal-conversion", - od: &xdsresource.OutlierDetection{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, - MaxEjectionPercent: 10, - SuccessRateStdevFactor: 1900, - EnforcingSuccessRate: 100, - SuccessRateMinimumHosts: 5, - SuccessRateRequestVolume: 100, - FailurePercentageThreshold: 85, - EnforcingFailurePercentage: 5, - FailurePercentageMinimumHosts: 5, - FailurePercentageRequestVolume: 50, - }, - odLBCfgWant: outlierdetection.LBConfig{ - Interval: internalserviceconfig.Duration(10 * time.Second), - BaseEjectionTime: internalserviceconfig.Duration(30 * time.Second), - MaxEjectionTime: internalserviceconfig.Duration(300 * time.Second), - MaxEjectionPercent: 10, - SuccessRateEjection: &outlierdetection.SuccessRateEjection{ - StdevFactor: 1900, - EnforcementPercentage: 100, - MinimumHosts: 5, - RequestVolume: 100, - }, - FailurePercentageEjection: &outlierdetection.FailurePercentageEjection{ - Threshold: 85, - EnforcementPercentage: 5, - MinimumHosts: 5, - RequestVolume: 50, - }, - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - odLBCfgGot := outlierDetectionToConfig(test.od) - if diff := cmp.Diff(odLBCfgGot, test.odLBCfgWant); diff != "" { - t.Fatalf("outlierDetectionToConfig(%v) (-want, +got):\n%s", test.od, diff) - } - }) - } -} diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index 18dac2596d0a..5eadd1ac1d0e 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -25,21 +25,21 @@ import ( "encoding/json" "errors" "fmt" - "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/nop" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/priority" - "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -65,12 +65,12 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal priorityBuilder := balancer.Get(priority.Name) if priorityBuilder == nil { logger.Errorf("%q LB policy is needed but not registered", priority.Name) - return nil + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", priority.Name)) } priorityConfigParser, ok := priorityBuilder.(balancer.ConfigParser) if !ok { logger.Errorf("%q LB policy does not implement a config parser", priority.Name) - return nil + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", priority.Name)) } b := &clusterResolverBalancer{ @@ -99,15 +99,48 @@ func (bb) Name() string { return Name } -func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg LBConfig - if err := json.Unmarshal(c, &cfg); err != nil { - return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(c), err) +func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + odBuilder := balancer.Get(outlierdetection.Name) + if odBuilder == nil { + // Shouldn't happen, registered through imported Outlier Detection, + // defensive programming. + return nil, fmt.Errorf("%q LB policy is needed but not registered", outlierdetection.Name) } - if lbp := cfg.XDSLBPolicy; lbp != nil && !strings.EqualFold(lbp.Name, roundrobin.Name) && !strings.EqualFold(lbp.Name, ringhash.Name) { - return nil, fmt.Errorf("unsupported child policy with name %q, not one of {%q,%q}", lbp.Name, roundrobin.Name, ringhash.Name) + odParser, ok := odBuilder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Outlier Detection builder has this method. + return nil, fmt.Errorf("%q LB policy does not implement a config parser", outlierdetection.Name) + } + + var cfg *LBConfig + if err := json.Unmarshal(j, &cfg); err != nil { + return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(j), err) + } + + if envconfig.XDSOutlierDetection { + for i, dm := range cfg.DiscoveryMechanisms { + lbCfg, err := odParser.ParseConfig(dm.OutlierDetection) + if err != nil { + return nil, fmt.Errorf("error parsing Outlier Detection config %v: %v", dm.OutlierDetection, err) + } + odCfg, ok := lbCfg.(*outlierdetection.LBConfig) + if !ok { + // Shouldn't happen, Parser built at build time with Outlier Detection + // builder pulled from gRPC LB Registry. + return nil, fmt.Errorf("odParser returned config with unexpected type %T: %v", lbCfg, lbCfg) + } + cfg.DiscoveryMechanisms[i].outlierDetection = *odCfg + } + } + if err := json.Unmarshal(cfg.XDSLBPolicy, &cfg.xdsLBPolicy); err != nil { + // This will never occur, valid configuration is emitted from the xDS + // Client. Validity is already checked in the xDS Client, however, this + // double validation is present because Unmarshalling and Validating are + // coupled into one json.Unmarshal operation). We will switch this in + // the future to two separate operations. + return nil, fmt.Errorf("error unmarshaling xDS LB Policy: %v", err) } - return &cfg, nil + return cfg, nil } // ccUpdate wraps a clientConn update received from gRPC. @@ -208,7 +241,7 @@ func (b *clusterResolverBalancer) updateChildConfig() { b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } - childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, b.config.XDSLBPolicy) + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) if err != nil { b.logger.Warningf("Failed to build child policy config: %v", err) return diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 65cb7a9bf981..6d798a1543b0 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -29,7 +29,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/grpctest" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" xdsinternal "google.golang.org/grpc/xds/internal" @@ -325,12 +325,16 @@ func newLBConfigWithOneEDS(edsServiceName string) *LBConfig { Type: DiscoveryMechanismTypeEDS, EDSServiceName: edsServiceName, }}, + xdsLBPolicy: iserviceconfig.BalancerConfig{ + Name: "ROUND_ROBIN", + Config: nil, + }, } } func newLBConfigWithOneEDSAndOutlierDetection(edsServiceName string, odCfg outlierdetection.LBConfig) *LBConfig { lbCfg := newLBConfigWithOneEDS(edsServiceName) - lbCfg.DiscoveryMechanisms[0].OutlierDetection = odCfg + lbCfg.DiscoveryMechanisms[0].outlierDetection = odCfg return lbCfg } @@ -381,15 +385,22 @@ func (s) TestOutlierDetection(t *testing.T) { pCfgWant := &priority.LBConfig{ Children: map[string]*priority.Child{ "priority-0-0": { - Config: &internalserviceconfig.BalancerConfig{ + Config: &iserviceconfig.BalancerConfig{ Name: outlierdetection.Name, Config: &outlierdetection.LBConfig{ - Interval: 1<<63 - 1, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ Cluster: testClusterName, EDSServiceName: "test-eds-service-name", + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "ROUND_ROBIN", + Config: nil, + }, }, }, }, diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go index 2458b106772f..c67608819185 100644 --- a/xds/internal/balancer/clusterresolver/config.go +++ b/xds/internal/balancer/clusterresolver/config.go @@ -102,11 +102,13 @@ type DiscoveryMechanism struct { DNSHostname string `json:"dnsHostname,omitempty"` // OutlierDetection is the Outlier Detection LB configuration for this // priority. - OutlierDetection outlierdetection.LBConfig `json:"outlierDetection,omitempty"` + OutlierDetection json.RawMessage `json:"outlierDetection,omitempty"` + outlierDetection outlierdetection.LBConfig } // Equal returns whether the DiscoveryMechanism is the same with the parameter. func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { + od := &dm.outlierDetection switch { case dm.Cluster != b.Cluster: return false @@ -118,7 +120,7 @@ func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { return false case dm.DNSHostname != b.DNSHostname: return false - case !dm.OutlierDetection.EqualIgnoringChildPolicy(&b.OutlierDetection): + case !od.EqualIgnoringChildPolicy(&b.outlierDetection): return false } @@ -151,16 +153,6 @@ type LBConfig struct { DiscoveryMechanisms []DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` // XDSLBPolicy specifies the policy for locality picking and endpoint picking. - // - // Note that it's not normal balancing policy, and it can only be either - // ROUND_ROBIN or RING_HASH. - // - // For ROUND_ROBIN, the policy name will be "ROUND_ROBIN", and the config - // will be empty. This sets the locality-picking policy to weighted_target - // and the endpoint-picking policy to round_robin. - // - // For RING_HASH, the policy name will be "RING_HASH", and the config will - // be lb config for the ring_hash_experimental LB Policy. ring_hash policy - // is responsible for both locality picking and endpoint picking. - XDSLBPolicy *internalserviceconfig.BalancerConfig `json:"xdsLbPolicy,omitempty"` + XDSLBPolicy json.RawMessage `json:"xdsLbPolicy,omitempty"` + xdsLBPolicy internalserviceconfig.BalancerConfig } diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go index fd17f3ede6d1..608c17ef78c8 100644 --- a/xds/internal/balancer/clusterresolver/config_test.go +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -21,10 +21,13 @@ package clusterresolver import ( "encoding/json" "testing" + "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) @@ -101,8 +104,10 @@ const ( }, "maxConcurrentRequests": 314, "type": "EDS", - "edsServiceName": "test-eds-service-name" - }] + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"ROUND_ROBIN":{}}] }` testJSONConfig2 = `{ "discoveryMechanisms": [{ @@ -113,10 +118,13 @@ const ( }, "maxConcurrentRequests": 314, "type": "EDS", - "edsServiceName": "test-eds-service-name" + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} },{ - "type": "LOGICAL_DNS" - }] + "type": "LOGICAL_DNS", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"ROUND_ROBIN":{}}] }` testJSONConfig3 = `{ "discoveryMechanisms": [{ @@ -127,7 +135,8 @@ const ( }, "maxConcurrentRequests": 314, "type": "EDS", - "edsServiceName": "test-eds-service-name" + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} }], "xdsLbPolicy":[{"ROUND_ROBIN":{}}] }` @@ -140,7 +149,8 @@ const ( }, "maxConcurrentRequests": 314, "type": "EDS", - "edsServiceName": "test-eds-service-name" + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} }], "xdsLbPolicy":[{"ring_hash_experimental":{}}] }` @@ -153,9 +163,10 @@ const ( }, "maxConcurrentRequests": 314, "type": "EDS", - "edsServiceName": "test-eds-service-name" + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} }], - "xdsLbPolicy":[{"pick_first":{}}] + "xdsLbPolicy":[{"ROUND_ROBIN":{}}] }` ) @@ -190,9 +201,19 @@ func TestParseConfig(t *testing.T) { MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, }, }, - XDSLBPolicy: nil, + xdsLBPolicy: iserviceconfig.BalancerConfig{ // do we want to make this not pointer + Name: "ROUND_ROBIN", + Config: nil, + }, }, wantErr: false, }, @@ -207,12 +228,29 @@ func TestParseConfig(t *testing.T) { MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, }, { Type: DiscoveryMechanismTypeLogicalDNS, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, }, }, - XDSLBPolicy: nil, + xdsLBPolicy: iserviceconfig.BalancerConfig{ + Name: "ROUND_ROBIN", + Config: nil, + }, }, wantErr: false, }, @@ -227,9 +265,16 @@ func TestParseConfig(t *testing.T) { MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, }, }, - XDSLBPolicy: &internalserviceconfig.BalancerConfig{ + xdsLBPolicy: iserviceconfig.BalancerConfig{ Name: "ROUND_ROBIN", Config: nil, }, @@ -247,9 +292,16 @@ func TestParseConfig(t *testing.T) { MaxConcurrentRequests: newUint32(testMaxRequests), Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, }, }, - XDSLBPolicy: &internalserviceconfig.BalancerConfig{ + xdsLBPolicy: iserviceconfig.BalancerConfig{ Name: ringhash.Name, Config: &ringhash.LBConfig{MinRingSize: 1024, MaxRingSize: 4096}, // Ringhash LB config with default min and max. }, @@ -257,9 +309,31 @@ func TestParseConfig(t *testing.T) { wantErr: false, }, { - name: "unsupported picking policy", - js: testJSONConfig5, - wantErr: true, + name: "noop-outlier-detection", + js: testJSONConfig5, + want: &LBConfig{ + DiscoveryMechanisms: []DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, + }, + }, + xdsLBPolicy: iserviceconfig.BalancerConfig{ + Name: "ROUND_ROBIN", + Config: nil, + }, + }, + wantErr: false, }, } for _, tt := range tests { @@ -279,7 +353,7 @@ func TestParseConfig(t *testing.T) { if tt.wantErr { return } - if diff := cmp.Diff(got, tt.want); diff != "" { + if diff := cmp.Diff(got, tt.want, cmp.AllowUnexported(LBConfig{}), cmpopts.IgnoreFields(LBConfig{}, "XDSLBPolicy")); diff != "" { t.Errorf("parseConfig() got unexpected output, diff (-got +want): %v", diff) } }) diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go index 06b0aec2f311..4b83dfb2bfa0 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -100,7 +100,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi retAddrs = append(retAddrs, addrs...) var odCfgs map[string]*outlierdetection.LBConfig if envconfig.XDSOutlierDetection { - odCfgs = convertClusterImplMapToOutlierDetection(configs, p.mechanism.OutlierDetection) + odCfgs = convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection) for n, c := range odCfgs { retConfig.Children[n] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c}, @@ -124,7 +124,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi retAddrs = append(retAddrs, addrs...) var odCfg *outlierdetection.LBConfig if envconfig.XDSOutlierDetection { - odCfg = makeClusterImplOutlierDetectionChild(config, p.mechanism.OutlierDetection) + odCfg = makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection) retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, // Not ignore re-resolution from DNS children, they will trigger diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go index 6c94cae9ed47..b30686b18561 100644 --- a/xds/internal/balancer/clusterresolver/configbuilder_test.go +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -24,6 +24,7 @@ import ( "fmt" "sort" "testing" + "time" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/attributes" @@ -31,7 +32,7 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/internal/hierarchy" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" @@ -72,7 +73,10 @@ var ( } noopODCfg = outlierdetection.LBConfig{ - Interval: 1<<63 - 1, + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, } ) @@ -194,7 +198,7 @@ func TestBuildPriorityConfig(t *testing.T) { Cluster: testClusterName, Type: DiscoveryMechanismTypeEDS, EDSServiceName: testEDSServiceName, - OutlierDetection: noopODCfg, + outlierDetection: noopODCfg, }, edsResp: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ @@ -211,7 +215,7 @@ func TestBuildPriorityConfig(t *testing.T) { mechanism: DiscoveryMechanism{ Cluster: testClusterName2, Type: DiscoveryMechanismTypeLogicalDNS, - OutlierDetection: noopODCfg, + outlierDetection: noopODCfg, }, addresses: testAddressStrs[4], childNameGen: newNameGenerator(1), @@ -221,11 +225,14 @@ func TestBuildPriorityConfig(t *testing.T) { wantConfig := &priority.LBConfig{ Children: map[string]*priority.Child{ "priority-0-0": { - Config: &internalserviceconfig.BalancerConfig{ + Config: &iserviceconfig.BalancerConfig{ Name: outlierdetection.Name, Config: &outlierdetection.LBConfig{ - Interval: 1<<63 - 1, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ Cluster: testClusterName, @@ -238,11 +245,14 @@ func TestBuildPriorityConfig(t *testing.T) { IgnoreReresolutionRequests: true, }, "priority-0-1": { - Config: &internalserviceconfig.BalancerConfig{ + Config: &iserviceconfig.BalancerConfig{ Name: outlierdetection.Name, Config: &outlierdetection.LBConfig{ - Interval: 1<<63 - 1, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ Cluster: testClusterName, @@ -255,15 +265,18 @@ func TestBuildPriorityConfig(t *testing.T) { IgnoreReresolutionRequests: true, }, "priority-1": { - Config: &internalserviceconfig.BalancerConfig{ + Config: &iserviceconfig.BalancerConfig{ Name: outlierdetection.Name, Config: &outlierdetection.LBConfig{ - Interval: 1<<63 - 1, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ Cluster: testClusterName2, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: "pick_first"}, + ChildPolicy: &iserviceconfig.BalancerConfig{Name: "pick_first"}, }, }, }, @@ -283,7 +296,7 @@ func TestBuildClusterImplConfigForDNS(t *testing.T) { wantName := "priority-3" wantConfig := &clusterimpl.LBConfig{ Cluster: testClusterName2, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "pick_first", }, } @@ -500,7 +513,7 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { localities []xdsresource.Locality priorityName string mechanism DiscoveryMechanism - childPolicy *internalserviceconfig.BalancerConfig + childPolicy *iserviceconfig.BalancerConfig wantConfig *clusterimpl.LBConfig wantAddrs []resolver.Address wantErr bool @@ -525,7 +538,7 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { }, }, priorityName: "test-priority", - childPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + childPolicy: &iserviceconfig.BalancerConfig{Name: roundrobin.Name}, mechanism: DiscoveryMechanism{ Cluster: testClusterName, Type: DiscoveryMechanismTypeEDS, @@ -535,7 +548,7 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { wantConfig: &clusterimpl.LBConfig{ Cluster: testClusterName, EDSServiceName: testEDSService, - ChildPolicy: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}, + ChildPolicy: &iserviceconfig.BalancerConfig{Name: roundrobin.Name}, }, wantAddrs: []resolver.Address{ testAddrWithAttrs("addr-1-1", 20, 90, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), @@ -565,10 +578,10 @@ func TestPriorityLocalitiesToClusterImpl(t *testing.T) { }, }, priorityName: "test-priority", - childPolicy: &internalserviceconfig.BalancerConfig{Name: ringhash.Name, Config: &ringhash.LBConfig{MinRingSize: 1, MaxRingSize: 2}}, + childPolicy: &iserviceconfig.BalancerConfig{Name: ringhash.Name, Config: &ringhash.LBConfig{MinRingSize: 1, MaxRingSize: 2}}, // lrsServer is nil, so LRS policy will not be used. wantConfig: &clusterimpl.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: ringhash.Name, Config: &ringhash.LBConfig{MinRingSize: 1, MaxRingSize: 2}, }, @@ -638,7 +651,7 @@ func TestConvertClusterImplMapToOutlierDetection(t *testing.T) { wantODCfgs: map[string]*outlierdetection.LBConfig{ "child1": { Interval: 1<<63 - 1, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ Cluster: "cluster1", @@ -663,7 +676,7 @@ func TestConvertClusterImplMapToOutlierDetection(t *testing.T) { wantODCfgs: map[string]*outlierdetection.LBConfig{ "child1": { Interval: 1<<63 - 1, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ Cluster: "cluster1", @@ -672,7 +685,7 @@ func TestConvertClusterImplMapToOutlierDetection(t *testing.T) { }, "child2": { Interval: 1<<63 - 1, - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: clusterimpl.Name, Config: &clusterimpl.LBConfig{ Cluster: "cluster2", diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index c7c2ab9945f0..0be84f7b74fb 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -193,7 +193,8 @@ func (s) TestEDS_OneLocality(t *testing.T) { "discoveryMechanisms": [{ "cluster": "%s", "type": "EDS", - "edsServiceName": "%s" + "edsServiceName": "%s", + "outlierDetection": {} }], "xdsLbPolicy":[{"round_robin":{}}] } @@ -301,7 +302,8 @@ func (s) TestEDS_MultipleLocalities(t *testing.T) { "discoveryMechanisms": [{ "cluster": "%s", "type": "EDS", - "edsServiceName": "%s" + "edsServiceName": "%s", + "outlierDetection": {} }], "xdsLbPolicy":[{"round_robin":{}}] } @@ -422,7 +424,8 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { "discoveryMechanisms": [{ "cluster": "%s", "type": "EDS", - "edsServiceName": "%s" + "edsServiceName": "%s", + "outlierDetection": {} }], "xdsLbPolicy":[{"round_robin":{}}] } @@ -488,7 +491,8 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { "discoveryMechanisms": [{ "cluster": "%s", "type": "EDS", - "edsServiceName": "%s" + "edsServiceName": "%s", + "outlierDetection": {} }], "xdsLbPolicy":[{"round_robin":{}}] } diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index 68325a31c17e..4d2904c67ff6 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -85,7 +85,7 @@ func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) Cluster: testClusterName, Type: DiscoveryMechanismTypeEDS, }}, - XDSLBPolicy: wrrLocalityLBConfig, + xdsLBPolicy: *wrrLocalityLBConfig, }, }); err != nil { edsb.Close() @@ -855,7 +855,7 @@ func (s) TestFallbackToDNS(t *testing.T) { DNSHostname: testDNSTarget, }, }, - XDSLBPolicy: wrrLocalityLBConfig, + xdsLBPolicy: *wrrLocalityLBConfig, }, }); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 548514f6d05d..eaf4f7fc9ab7 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -23,7 +23,6 @@ package outlierdetection import ( "encoding/json" - "errors" "fmt" "math" "strings" @@ -41,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -81,19 +81,27 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba } func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var lbCfg *LBConfig - if err := json.Unmarshal(s, &lbCfg); err != nil { // Validates child config if present as well. + lbCfg := &LBConfig{ + // Default top layer values as documented in A50. + Interval: iserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + } + + // This unmarshalling handles underlying layers sre and fpe which have their + // own defaults for their fields if either sre or fpe are present. + if err := json.Unmarshal(s, lbCfg); err != nil { // Validates child config if present as well. return nil, fmt.Errorf("xds: unable to unmarshal LBconfig: %s, error: %v", string(s), err) } // Note: in the xds flow, these validations will never fail. The xdsclient // performs the same validations as here on the xds Outlier Detection - // resource before parsing into the internal struct which gets marshaled - // into JSON before calling this function. A50 defines two separate places - // for these validations to take place, the xdsclient and this ParseConfig - // method. "When parsing a config from JSON, if any of these requirements is - // violated, that should be treated as a parsing error." - A50 - + // resource before parsing resource into JSON which this function gets + // called with. A50 defines two separate places for these validations to + // take place, the xdsclient and this ParseConfig method. "When parsing a + // config from JSON, if any of these requirements is violated, that should + // be treated as a parsing error." - A50 switch { // "The google.protobuf.Duration fields interval, base_ejection_time, and // max_ejection_time must obey the restrictions in the @@ -122,10 +130,7 @@ func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, err return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold = %v; must be <= 100", lbCfg.FailurePercentageEjection.Threshold) case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.EnforcementPercentage > 100: return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = %v; must be <= 100", lbCfg.FailurePercentageEjection.EnforcementPercentage) - case lbCfg.ChildPolicy == nil: - return nil, errors.New("OutlierDetectionLoadBalancingConfig.child_policy must be present") } - return lbCfg, nil } diff --git a/xds/internal/balancer/outlierdetection/balancer_test.go b/xds/internal/balancer/outlierdetection/balancer_test.go index 4f542d61e572..3d1efe8dcd56 100644 --- a/xds/internal/balancer/outlierdetection/balancer_test.go +++ b/xds/internal/balancer/outlierdetection/balancer_test.go @@ -68,7 +68,20 @@ func (s) TestParseConfig(t *testing.T) { }) parser := bb{} - + const ( + defaultInterval = iserviceconfig.Duration(10 * time.Second) + defaultBaseEjectionTime = iserviceconfig.Duration(30 * time.Second) + defaultMaxEjectionTime = iserviceconfig.Duration(300 * time.Second) + defaultMaxEjectionPercent = 10 + defaultSuccessRateStdevFactor = 1900 + defaultEnforcingSuccessRate = 100 + defaultSuccessRateMinimumHosts = 5 + defaultSuccessRateRequestVolume = 100 + defaultFailurePercentageThreshold = 85 + defaultEnforcingFailurePercentage = 0 + defaultFailurePercentageMinimumHosts = 5 + defaultFailurePercentageRequestVolume = 50 + ) tests := []struct { name string input string @@ -76,8 +89,35 @@ func (s) TestParseConfig(t *testing.T) { wantErr string }{ { - name: "noop-lb-config", + name: "no-fields-set-should-get-default", + input: `{ + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + + { + name: "some-top-level-fields-set", input: `{ + "interval": "15s", + "maxEjectionTime": "350s", "childPolicy": [ { "xds_cluster_impl_experimental": { @@ -86,7 +126,184 @@ func (s) TestParseConfig(t *testing.T) { } ] }`, + // Should get set fields + defaults for unset fields. + wantCfg: &LBConfig{ + Interval: iserviceconfig.Duration(15 * time.Second), + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: iserviceconfig.Duration(350 * time.Second), + MaxEjectionPercent: defaultMaxEjectionPercent, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "success-rate-ejection-present-but-no-fields", + input: `{ + "successRateEjection": {}, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + // Should get defaults of success-rate-ejection struct. + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: defaultSuccessRateStdevFactor, + EnforcementPercentage: defaultEnforcingSuccessRate, + MinimumHosts: defaultSuccessRateMinimumHosts, + RequestVolume: defaultSuccessRateRequestVolume, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "success-rate-ejection-present-partially-set", + input: `{ + "successRateEjection": { + "stdevFactor": 1000, + "minimumHosts": 5 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + // Should get set fields + defaults for others in success rate + // ejection layer. + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1000, + EnforcementPercentage: defaultEnforcingSuccessRate, + MinimumHosts: 5, + RequestVolume: defaultSuccessRateRequestVolume, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "success-rate-ejection-present-fully-set", + input: `{ + "successRateEjection": { + "stdevFactor": 1000, + "enforcementPercentage": 50, + "minimumHosts": 5, + "requestVolume": 50 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1000, + EnforcementPercentage: 50, + MinimumHosts: 5, + RequestVolume: 50, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "failure-percentage-ejection-present-but-no-fields", + input: `{ + "failurePercentageEjection": {}, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + // Should get defaults of failure percentage ejection layer. + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: defaultFailurePercentageThreshold, + EnforcementPercentage: defaultEnforcingFailurePercentage, + MinimumHosts: defaultFailurePercentageMinimumHosts, + RequestVolume: defaultFailurePercentageRequestVolume, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "failure-percentage-ejection-present-partially-set", + input: `{ + "failurePercentageEjection": { + "threshold": 80, + "minimumHosts": 10 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + // Should get set fields + defaults for others in success rate + // ejection layer. wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 80, + EnforcementPercentage: defaultEnforcingFailurePercentage, + MinimumHosts: 10, + RequestVolume: defaultFailurePercentageRequestVolume, + }, ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "xds_cluster_impl_experimental", Config: &clusterimpl.LBConfig{ @@ -96,7 +313,81 @@ func (s) TestParseConfig(t *testing.T) { }, }, { - name: "good-lb-config", + name: "failure-percentage-ejection-present-fully-set", + input: `{ + "failurePercentageEjection": { + "threshold": 80, + "enforcementPercentage": 100, + "minimumHosts": 10, + "requestVolume": 40 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 80, + EnforcementPercentage: 100, + MinimumHosts: 10, + RequestVolume: 40, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { // to make sure zero values aren't overwritten by defaults + name: "lb-config-every-field-set-zero-value", + input: `{ + "interval": "0s", + "baseEjectionTime": "0s", + "maxEjectionTime": "0s", + "maxEjectionPercent": 0, + "successRateEjection": { + "stdevFactor": 0, + "enforcementPercentage": 0, + "minimumHosts": 0, + "requestVolume": 0 + }, + "failurePercentageEjection": { + "threshold": 0, + "enforcementPercentage": 0, + "minimumHosts": 0, + "requestVolume": 0 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + SuccessRateEjection: &SuccessRateEjection{}, + FailurePercentageEjection: &FailurePercentageEjection{}, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "lb-config-every-field-set", input: `{ "interval": "10s", "baseEjectionTime": "30s", @@ -194,28 +485,6 @@ func (s) TestParseConfig(t *testing.T) { }`, wantErr: "OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = 150; must be <= 100", }, - { - name: "child-policy-not-present", - input: `{ - "interval": "10s", - "baseEjectionTime": "30s", - "maxEjectionTime": "300s", - "maxEjectionPercent": 10, - "successRateEjection": { - "stdevFactor": 1900, - "enforcementPercentage": 100, - "minimumHosts": 5, - "requestVolume": 100 - }, - "failurePercentageEjection": { - "threshold": 85, - "enforcementPercentage": 5, - "minimumHosts": 5, - "requestVolume": 50 - } - }`, - wantErr: "OutlierDetectionLoadBalancingConfig.child_policy must be present", - }, { name: "child-policy-present-but-parse-error", input: `{ @@ -242,26 +511,6 @@ func (s) TestParseConfig(t *testing.T) { }`, wantErr: "invalid loadBalancingConfig: no supported policies found", }, - { - name: "child-policy", - input: `{ - "childPolicy": [ - { - "xds_cluster_impl_experimental": { - "cluster": "test_cluster" - } - } - ] - }`, - wantCfg: &LBConfig{ - ChildPolicy: &iserviceconfig.BalancerConfig{ - Name: "xds_cluster_impl_experimental", - Config: &clusterimpl.LBConfig{ - Cluster: "test_cluster", - }, - }, - }, - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/xds/internal/balancer/outlierdetection/config.go b/xds/internal/balancer/outlierdetection/config.go index 9c4383cf6ece..196a562ed69d 100644 --- a/xds/internal/balancer/outlierdetection/config.go +++ b/xds/internal/balancer/outlierdetection/config.go @@ -18,6 +18,9 @@ package outlierdetection import ( + "encoding/json" + "time" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -52,6 +55,24 @@ type SuccessRateEjection struct { RequestVolume uint32 `json:"requestVolume,omitempty"` } +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type successRateEjection SuccessRateEjection + +// UnmarshalJSON unmarshals JSON into SuccessRateEjection. If a +// SuccessRateEjection field is not set, that field will get its default value. +func (sre *SuccessRateEjection) UnmarshalJSON(j []byte) error { + sre.StdevFactor = 1900 + sre.EnforcementPercentage = 100 + sre.MinimumHosts = 5 + sre.RequestVolume = 100 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*successRateEjection)(sre)) +} + // Equal returns whether the SuccessRateEjection is the same with the parameter. func (sre *SuccessRateEjection) Equal(sre2 *SuccessRateEjection) bool { if sre == nil && sre2 == nil { @@ -99,6 +120,25 @@ type FailurePercentageEjection struct { RequestVolume uint32 `json:"requestVolume,omitempty"` } +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type failurePercentageEjection FailurePercentageEjection + +// UnmarshalJSON unmarshals JSON into FailurePercentageEjection. If a +// FailurePercentageEjection field is not set, that field will get its default +// value. +func (fpe *FailurePercentageEjection) UnmarshalJSON(j []byte) error { + fpe.Threshold = 85 + fpe.EnforcementPercentage = 0 + fpe.MinimumHosts = 5 + fpe.RequestVolume = 50 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*failurePercentageEjection)(fpe)) +} + // Equal returns whether the FailurePercentageEjection is the same with the // parameter. func (fpe *FailurePercentageEjection) Equal(fpe2 *FailurePercentageEjection) bool { @@ -149,6 +189,28 @@ type LBConfig struct { ChildPolicy *iserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type lbConfig LBConfig + +// UnmarshalJSON unmarshals JSON into LBConfig. If a top level LBConfig field +// (i.e. not next layer sre or fpe) is not set, that field will get its default +// value. If sre or fpe is not set, it will stay unset, otherwise it will +// unmarshal on those types populating with default values for their fields if +// needed. +func (lbc *LBConfig) UnmarshalJSON(j []byte) error { + // Default top layer values as documented in A50. + lbc.Interval = iserviceconfig.Duration(10 * time.Second) + lbc.BaseEjectionTime = iserviceconfig.Duration(30 * time.Second) + lbc.MaxEjectionTime = iserviceconfig.Duration(300 * time.Second) + lbc.MaxEjectionPercent = 10 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*lbConfig)(lbc)) +} + // EqualIgnoringChildPolicy returns whether the LBConfig is same with the // parameter outside of the child policy, only comparing the Outlier Detection // specific configuration. diff --git a/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go index afa418815a0b..e8665925739b 100644 --- a/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go @@ -29,7 +29,7 @@ import ( "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/serviceconfig" _ "google.golang.org/grpc/xds" // Register the xDS LB Registry Converters. @@ -107,7 +107,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { name string cluster *v3clusterpb.Cluster wantUpdate xdsresource.ClusterUpdate - wantLBConfig *internalserviceconfig.BalancerConfig + wantLBConfig *iserviceconfig.BalancerConfig customLBDisabled bool }{ { @@ -142,10 +142,10 @@ func (s) TestValidateCluster_Success(t *testing.T) { ClusterType: xdsresource.ClusterTypeLogicalDNS, DNSHostName: "dns_host:8080", }, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: wrrlocality.Name, Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "round_robin", }, }, @@ -169,10 +169,10 @@ func (s) TestValidateCluster_Success(t *testing.T) { ClusterName: clusterName, LRSServerConfig: xdsresource.ClusterLRSOff, ClusterType: xdsresource.ClusterTypeAggregate, PrioritizedClusterNames: []string{"a", "b", "c"}, }, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: wrrlocality.Name, Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "round_robin", }, }, @@ -193,10 +193,10 @@ func (s) TestValidateCluster_Success(t *testing.T) { LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, wantUpdate: emptyUpdate, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: wrrlocality.Name, Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "round_robin", }, }, @@ -218,10 +218,10 @@ func (s) TestValidateCluster_Success(t *testing.T) { LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, }, wantUpdate: xdsresource.ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSOff}, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: wrrlocality.Name, Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "round_robin", }, }, @@ -248,10 +248,10 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, wantUpdate: xdsresource.ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf}, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: wrrlocality.Name, Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "round_robin", }, }, @@ -290,10 +290,10 @@ func (s) TestValidateCluster_Success(t *testing.T) { }, }, wantUpdate: xdsresource.ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: wrrlocality.Name, Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "round_robin", }, }, @@ -322,7 +322,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { wantUpdate: xdsresource.ClusterUpdate{ ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, }, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: "ring_hash_experimental", Config: &ringhash.LBConfig{ MinRingSize: 1024, @@ -359,7 +359,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { wantUpdate: xdsresource.ClusterUpdate{ ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, }, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: "ring_hash_experimental", Config: &ringhash.LBConfig{ MinRingSize: 10, @@ -397,7 +397,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { wantUpdate: xdsresource.ClusterUpdate{ ClusterName: clusterName, EDSServiceName: serviceName, }, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: "ring_hash_experimental", Config: &ringhash.LBConfig{ MinRingSize: 10, @@ -431,10 +431,10 @@ func (s) TestValidateCluster_Success(t *testing.T) { wantUpdate: xdsresource.ClusterUpdate{ ClusterName: clusterName, EDSServiceName: serviceName, }, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: wrrlocality.Name, Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "round_robin", }, }, @@ -469,10 +469,10 @@ func (s) TestValidateCluster_Success(t *testing.T) { wantUpdate: xdsresource.ClusterUpdate{ ClusterName: clusterName, EDSServiceName: serviceName, }, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: wrrlocality.Name, Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ Name: "myorg.MyCustomLeastRequestPolicy", Config: customLBConfig{}, }, @@ -516,7 +516,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { wantUpdate: xdsresource.ClusterUpdate{ ClusterName: clusterName, EDSServiceName: serviceName, }, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: "ring_hash_experimental", Config: &ringhash.LBConfig{ MinRingSize: 20, @@ -562,7 +562,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { wantUpdate: xdsresource.ClusterUpdate{ ClusterName: clusterName, EDSServiceName: serviceName, }, - wantLBConfig: &internalserviceconfig.BalancerConfig{ + wantLBConfig: &iserviceconfig.BalancerConfig{ Name: "ring_hash_experimental", Config: &ringhash.LBConfig{ MinRingSize: 10, @@ -592,7 +592,7 @@ func (s) TestValidateCluster_Success(t *testing.T) { if diff := cmp.Diff(update, test.wantUpdate, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "LBPolicy")); diff != "" { t.Errorf("validateClusterAndConstructClusterUpdate(%+v) got diff: %v (-got, +want)", test.cluster, diff) } - bc := &internalserviceconfig.BalancerConfig{} + bc := &iserviceconfig.BalancerConfig{} if err := json.Unmarshal(update.LBPolicy, bc); err != nil { t.Fatalf("failed to unmarshal JSON: %v", err) } diff --git a/xds/internal/xdsclient/xdsresource/type_cds.go b/xds/internal/xdsclient/xdsresource/type_cds.go index 8ea9608dc9b7..269d9ebdae15 100644 --- a/xds/internal/xdsclient/xdsresource/type_cds.go +++ b/xds/internal/xdsclient/xdsresource/type_cds.go @@ -19,7 +19,6 @@ package xdsresource import ( "encoding/json" - "time" "google.golang.org/protobuf/types/known/anypb" ) @@ -52,71 +51,6 @@ const ( ClusterLRSServerSelf ) -// OutlierDetection is the outlier detection configuration for a cluster. -type OutlierDetection struct { - // Interval is the time interval between ejection analysis sweeps. This can - // result in both new ejections as well as addresses being returned to - // service. Defaults to 10s. - Interval time.Duration - // BaseEjectionTime is the base time that a host is ejected for. The real - // time is equal to the base time multiplied by the number of times the host - // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. - BaseEjectionTime time.Duration - // MaxEjectionTime is the maximum time that an address is ejected for. If - // not specified, the default value (300s) or the BaseEjectionTime value is - // applied, whichever is larger. - MaxEjectionTime time.Duration - // MaxEjectionPercent is the maximum % of an upstream cluster that can be - // ejected due to outlier detection. Defaults to 10% but will eject at least - // one host regardless of the value. - MaxEjectionPercent uint32 - // SuccessRateStdevFactor is used to determine the ejection threshold for - // success rate outlier ejection. The ejection threshold is the difference - // between the mean success rate, and the product of this factor and the - // standard deviation of the mean success rate: mean - (stdev * - // success_rate_stdev_factor). This factor is divided by a thousand to get a - // double. That is, if the desired factor is 1.9, the runtime value should - // be 1900. Defaults to 1900. - SuccessRateStdevFactor uint32 - // EnforcingSuccessRate is the % chance that a host will be actually ejected - // when an outlier status is detected through success rate statistics. This - // setting can be used to disable ejection or to ramp it up slowly. Defaults - // to 100. - EnforcingSuccessRate uint32 - // SuccessRateMinimumHosts is the number of hosts in a cluster that must - // have enough request volume to detect success rate outliers. If the number - // of hosts is less than this setting, outlier detection via success rate - // statistics is not performed for any host in the cluster. Defaults to 5. - SuccessRateMinimumHosts uint32 - // SuccessRateRequestVolume is the minimum number of total requests that - // must be collected in one interval (as defined by the interval duration - // above) to include this host in success rate based outlier detection. If - // the volume is lower than this setting, outlier detection via success rate - // statistics is not performed for that host. Defaults to 100. - SuccessRateRequestVolume uint32 - // FailurePercentageThreshold is the failure percentage to use when - // determining failure percentage-based outlier detection. If the failure - // percentage of a given host is greater than or equal to this value, it - // will be ejected. Defaults to 85. - FailurePercentageThreshold uint32 - // EnforcingFailurePercentage is the % chance that a host will be actually - // ejected when an outlier status is detected through failure percentage - // statistics. This setting can be used to disable ejection or to ramp it up - // slowly. Defaults to 0. - EnforcingFailurePercentage uint32 - // FailurePercentageMinimumHosts is the minimum number of hosts in a cluster - // in order to perform failure percentage-based ejection. If the total - // number of hosts in the cluster is less than this value, failure - // percentage-based ejection will not be performed. Defaults to 5. - FailurePercentageMinimumHosts uint32 - // FailurePercentageRequestVolume is the minimum number of total requests - // that must be collected in one interval (as defined by the interval - // duration above) to perform failure percentage-based ejection for this - // host. If the volume is lower than this setting, failure percentage-based - // ejection will not be performed for this host. Defaults to 50. - FailurePercentageRequestVolume uint32 -} - // ClusterUpdate contains information from a received CDS response, which is of // interest to the registered CDS watcher. type ClusterUpdate struct { @@ -147,7 +81,7 @@ type ClusterUpdate struct { // OutlierDetection is the outlier detection configuration for this cluster. // If nil, it means this cluster does not use the outlier detection feature. - OutlierDetection *OutlierDetection + OutlierDetection json.RawMessage // Raw is the resource from the xds response. Raw *anypb.Any diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index d07ad2ea1aee..9f8530111a73 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -33,7 +33,7 @@ import ( "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/pretty" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" @@ -118,7 +118,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // Process outlier detection received from the control plane iff the // corresponding environment variable is set. - var od *OutlierDetection + var od json.RawMessage if envconfig.XDSOutlierDetection { var err error if od, err = outlierConfigFromCluster(cluster); err != nil { @@ -134,7 +134,7 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // "It will be the responsibility of the XdsClient to validate the // converted configuration. It will do this by having the gRPC LB policy // registry parse the configuration." - A52 - bc := &internalserviceconfig.BalancerConfig{} + bc := &iserviceconfig.BalancerConfig{} if err := json.Unmarshal(lbPolicy, bc); err != nil { return ClusterUpdate{}, fmt.Errorf("JSON generated from xDS LB policy registry: %s is invalid: %v", pretty.FormatJSON(lbPolicy), err) } @@ -490,59 +490,87 @@ func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { return nil } -// outlierConfigFromCluster extracts the relevant outlier detection -// configuration from the received cluster resource. Returns nil if no -// OutlierDetection field set in the cluster resource. -func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (*OutlierDetection, error) { +// idurationp takes a time.Duration and converts it to an internal duration, and +// returns a pointer to that internal duration. +func idurationp(d time.Duration) *iserviceconfig.Duration { + id := iserviceconfig.Duration(d) + return &id +} + +func uint32p(i uint32) *uint32 { + return &i +} + +// Helper types to prepare Outlier Detection JSON. Pointer types to distinguish +// between unset and a zero value. +type successRateEjection struct { + StdevFactor *uint32 `json:"stdevFactor,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type failurePercentageEjection struct { + Threshold *uint32 `json:"threshold,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type odLBConfig struct { + Interval *iserviceconfig.Duration `json:"interval,omitempty"` + BaseEjectionTime *iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` + MaxEjectionTime *iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` + MaxEjectionPercent *uint32 `json:"maxEjectionPercent,omitempty"` + SuccessRateEjection *successRateEjection `json:"successRateEjection,omitempty"` + FailurePercentageEjection *failurePercentageEjection `json:"failurePercentageEjection,omitempty"` +} + +// outlierConfigFromCluster converts the received Outlier Detection +// configuration into JSON configuration for Outlier Detection, taking into +// account xDS Defaults. Returns nil if no OutlierDetection field set in the +// cluster resource. +func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (json.RawMessage, error) { od := cluster.GetOutlierDetection() if od == nil { return nil, nil } - const ( - defaultInterval = 10 * time.Second - defaultBaseEjectionTime = 30 * time.Second - defaultMaxEjectionTime = 300 * time.Second - defaultMaxEjectionPercent = 10 - defaultSuccessRateStdevFactor = 1900 - defaultEnforcingSuccessRate = 100 - defaultSuccessRateMinimumHosts = 5 - defaultSuccessRateRequestVolume = 100 - defaultFailurePercentageThreshold = 85 - defaultEnforcingFailurePercentage = 0 - defaultFailurePercentageMinimumHosts = 5 - defaultFailurePercentageRequestVolume = 50 - ) + + // "The outlier_detection field of the Cluster resource should have its fields + // validated according to the rules for the corresponding LB policy config + // fields in the above "Validation" section. If any of these requirements is + // violated, the Cluster resource should be NACKed." - A50 // "The google.protobuf.Duration fields interval, base_ejection_time, and // max_ejection_time must obey the restrictions in the // google.protobuf.Duration documentation and they must have non-negative // values." - A50 - interval := defaultInterval + var interval *iserviceconfig.Duration if i := od.GetInterval(); i != nil { if err := i.CheckValid(); err != nil { return nil, fmt.Errorf("outlier_detection.interval is invalid with error: %v", err) } - if interval = i.AsDuration(); interval < 0 { - return nil, fmt.Errorf("outlier_detection.interval = %v; must be a valid duration and >= 0", interval) + if interval = idurationp(i.AsDuration()); *interval < 0 { + return nil, fmt.Errorf("outlier_detection.interval = %v; must be a valid duration and >= 0", *interval) } } - baseEjectionTime := defaultBaseEjectionTime + var baseEjectionTime *iserviceconfig.Duration if bet := od.GetBaseEjectionTime(); bet != nil { if err := bet.CheckValid(); err != nil { return nil, fmt.Errorf("outlier_detection.base_ejection_time is invalid with error: %v", err) } - if baseEjectionTime = bet.AsDuration(); baseEjectionTime < 0 { - return nil, fmt.Errorf("outlier_detection.base_ejection_time = %v; must be >= 0", baseEjectionTime) + if baseEjectionTime = idurationp(bet.AsDuration()); *baseEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.base_ejection_time = %v; must be >= 0", *baseEjectionTime) } } - maxEjectionTime := defaultMaxEjectionTime + var maxEjectionTime *iserviceconfig.Duration if met := od.GetMaxEjectionTime(); met != nil { if err := met.CheckValid(); err != nil { return nil, fmt.Errorf("outlier_detection.max_ejection_time is invalid: %v", err) } - if maxEjectionTime = met.AsDuration(); maxEjectionTime < 0 { - return nil, fmt.Errorf("outlier_detection.max_ejection_time = %v; must be >= 0", maxEjectionTime) + if maxEjectionTime = idurationp(met.AsDuration()); *maxEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.max_ejection_time = %v; must be >= 0", *maxEjectionTime) } } @@ -550,64 +578,91 @@ func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (*OutlierDetection, // failure_percentage_threshold, and enforcing_failure_percentage must have // values less than or equal to 100. If any of these requirements is // violated, the Cluster resource should be NACKed." - A50 - maxEjectionPercent := uint32(defaultMaxEjectionPercent) + var maxEjectionPercent *uint32 if mep := od.GetMaxEjectionPercent(); mep != nil { - if maxEjectionPercent = mep.GetValue(); maxEjectionPercent > 100 { - return nil, fmt.Errorf("outlier_detection.max_ejection_percent = %v; must be <= 100", maxEjectionPercent) + if maxEjectionPercent = uint32p(mep.GetValue()); *maxEjectionPercent > 100 { + return nil, fmt.Errorf("outlier_detection.max_ejection_percent = %v; must be <= 100", *maxEjectionPercent) } } - enforcingSuccessRate := uint32(defaultEnforcingSuccessRate) + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var enforcingSuccessRate *uint32 if esr := od.GetEnforcingSuccessRate(); esr != nil { - if enforcingSuccessRate = esr.GetValue(); enforcingSuccessRate > 100 { - return nil, fmt.Errorf("outlier_detection.enforcing_success_rate = %v; must be <= 100", enforcingSuccessRate) + if enforcingSuccessRate = uint32p(esr.GetValue()); *enforcingSuccessRate > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_success_rate = %v; must be <= 100", *enforcingSuccessRate) } } - failurePercentageThreshold := uint32(defaultFailurePercentageThreshold) + var failurePercentageThreshold *uint32 if fpt := od.GetFailurePercentageThreshold(); fpt != nil { - if failurePercentageThreshold = fpt.GetValue(); failurePercentageThreshold > 100 { - return nil, fmt.Errorf("outlier_detection.failure_percentage_threshold = %v; must be <= 100", failurePercentageThreshold) + if failurePercentageThreshold = uint32p(fpt.GetValue()); *failurePercentageThreshold > 100 { + return nil, fmt.Errorf("outlier_detection.failure_percentage_threshold = %v; must be <= 100", *failurePercentageThreshold) } } - enforcingFailurePercentage := uint32(defaultEnforcingFailurePercentage) + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var enforcingFailurePercentage *uint32 if efp := od.GetEnforcingFailurePercentage(); efp != nil { - if enforcingFailurePercentage = efp.GetValue(); enforcingFailurePercentage > 100 { - return nil, fmt.Errorf("outlier_detection.enforcing_failure_percentage = %v; must be <= 100", enforcingFailurePercentage) + if enforcingFailurePercentage = uint32p(efp.GetValue()); *enforcingFailurePercentage > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_failure_percentage = %v; must be <= 100", *enforcingFailurePercentage) } } - successRateStdevFactor := uint32(defaultSuccessRateStdevFactor) + var successRateStdevFactor *uint32 if srsf := od.GetSuccessRateStdevFactor(); srsf != nil { - successRateStdevFactor = srsf.GetValue() + successRateStdevFactor = uint32p(srsf.GetValue()) } - successRateMinimumHosts := uint32(defaultSuccessRateMinimumHosts) + var successRateMinimumHosts *uint32 if srmh := od.GetSuccessRateMinimumHosts(); srmh != nil { - successRateMinimumHosts = srmh.GetValue() + successRateMinimumHosts = uint32p(srmh.GetValue()) } - successRateRequestVolume := uint32(defaultSuccessRateRequestVolume) + var successRateRequestVolume *uint32 if srrv := od.GetSuccessRateRequestVolume(); srrv != nil { - successRateRequestVolume = srrv.GetValue() + successRateRequestVolume = uint32p(srrv.GetValue()) } - failurePercentageMinimumHosts := uint32(defaultFailurePercentageMinimumHosts) + var failurePercentageMinimumHosts *uint32 if fpmh := od.GetFailurePercentageMinimumHosts(); fpmh != nil { - failurePercentageMinimumHosts = fpmh.GetValue() + failurePercentageMinimumHosts = uint32p(fpmh.GetValue()) } - failurePercentageRequestVolume := uint32(defaultFailurePercentageRequestVolume) + var failurePercentageRequestVolume *uint32 if fprv := od.GetFailurePercentageRequestVolume(); fprv != nil { - failurePercentageRequestVolume = fprv.GetValue() - } - - return &OutlierDetection{ - Interval: interval, - BaseEjectionTime: baseEjectionTime, - MaxEjectionTime: maxEjectionTime, - MaxEjectionPercent: maxEjectionPercent, - EnforcingSuccessRate: enforcingSuccessRate, - FailurePercentageThreshold: failurePercentageThreshold, - EnforcingFailurePercentage: enforcingFailurePercentage, - SuccessRateStdevFactor: successRateStdevFactor, - SuccessRateMinimumHosts: successRateMinimumHosts, - SuccessRateRequestVolume: successRateRequestVolume, - FailurePercentageMinimumHosts: failurePercentageMinimumHosts, - FailurePercentageRequestVolume: failurePercentageRequestVolume, - }, nil + failurePercentageRequestVolume = uint32p(fprv.GetValue()) + } + + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var sre *successRateEjection + if enforcingSuccessRate == nil || *enforcingSuccessRate != 0 { + sre = &successRateEjection{ + StdevFactor: successRateStdevFactor, + EnforcementPercentage: enforcingSuccessRate, + MinimumHosts: successRateMinimumHosts, + RequestVolume: successRateRequestVolume, + } + } + + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var fpe *failurePercentageEjection + if enforcingFailurePercentage != nil && *enforcingFailurePercentage != 0 { + fpe = &failurePercentageEjection{ + Threshold: failurePercentageThreshold, + EnforcementPercentage: enforcingFailurePercentage, + MinimumHosts: failurePercentageMinimumHosts, + RequestVolume: failurePercentageRequestVolume, + } + } + + odLBCfg := &odLBConfig{ + Interval: interval, + BaseEjectionTime: baseEjectionTime, + MaxEjectionTime: maxEjectionTime, + MaxEjectionPercent: maxEjectionPercent, + SuccessRateEjection: sre, + FailurePercentageEjection: fpe, + } + return json.Marshal(odLBCfg) } diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index 0c69d27ad42d..e057b951326d 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -18,10 +18,10 @@ package xdsresource import ( + "encoding/json" "regexp" "strings" "testing" - "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -30,8 +30,6 @@ import ( "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - "google.golang.org/protobuf/types/known/durationpb" - "google.golang.org/protobuf/types/known/wrapperspb" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -43,6 +41,8 @@ import ( v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" anypb "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/wrapperspb" ) const ( @@ -1382,43 +1382,55 @@ func (s) TestValidateClusterWithOutlierDetection(t *testing.T) { OutlierDetection: od, } } - odToClusterUpdate := func(od *OutlierDetection) ClusterUpdate { - return ClusterUpdate{ - ClusterName: clusterName, - LRSServerConfig: ClusterLRSOff, - OutlierDetection: od, - } - } tests := []struct { - name string - cluster *v3clusterpb.Cluster - wantUpdate ClusterUpdate - wantErr bool + name string + cluster *v3clusterpb.Cluster + wantODCfg string + wantErr bool }{ { - name: "successful-case-all-defaults", - // Outlier detection proto is present without any fields specified, - // so should trigger all default values in the update. - cluster: odToClusterProto(&v3clusterpb.OutlierDetection{}), - wantUpdate: odToClusterUpdate(&OutlierDetection{ - Interval: 10 * time.Second, - BaseEjectionTime: 30 * time.Second, - MaxEjectionTime: 300 * time.Second, - MaxEjectionPercent: 10, - SuccessRateStdevFactor: 1900, - EnforcingSuccessRate: 100, - SuccessRateMinimumHosts: 5, - SuccessRateRequestVolume: 100, - FailurePercentageThreshold: 85, - EnforcingFailurePercentage: 0, - FailurePercentageMinimumHosts: 5, - FailurePercentageRequestVolume: 50, + name: "success-and-failure-null", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{}), + wantODCfg: `{"successRateEjection": {}}`, + }, + { + name: "success-and-failure-zero", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{ + EnforcingSuccessRate: &wrapperspb.UInt32Value{Value: 0}, // Thus doesn't create sre - to focus on fpe + EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 0}, }), + wantODCfg: `{}`, }, { - name: "successful-case-all-fields-configured-and-valid", + name: "some-fields-set", cluster: odToClusterProto(&v3clusterpb.OutlierDetection{ + Interval: &durationpb.Duration{Seconds: 1}, + MaxEjectionTime: &durationpb.Duration{Seconds: 3}, + EnforcingSuccessRate: &wrapperspb.UInt32Value{Value: 3}, + SuccessRateRequestVolume: &wrapperspb.UInt32Value{Value: 5}, + EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 7}, + FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 9}, + }), + wantODCfg: `{ + "interval": "1s", + "maxEjectionTime": "3s", + "successRateEjection": { + "enforcementPercentage": 3, + "requestVolume": 5 + }, + "failurePercentageEjection": { + "enforcementPercentage": 7, + "requestVolume": 9 + } + }`, + }, + { + name: "every-field-set-non-zero", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{ + // all fields set (including ones that will be layered) should + // pick up those too and explicitly all fields, including those + // put in layers, in the JSON generated. Interval: &durationpb.Duration{Seconds: 1}, BaseEjectionTime: &durationpb.Duration{Seconds: 2}, MaxEjectionTime: &durationpb.Duration{Seconds: 3}, @@ -1432,20 +1444,24 @@ func (s) TestValidateClusterWithOutlierDetection(t *testing.T) { FailurePercentageMinimumHosts: &wrapperspb.UInt32Value{Value: 8}, FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 9}, }), - wantUpdate: odToClusterUpdate(&OutlierDetection{ - Interval: time.Second, - BaseEjectionTime: time.Second * 2, - MaxEjectionTime: time.Second * 3, - MaxEjectionPercent: 1, - SuccessRateStdevFactor: 2, - EnforcingSuccessRate: 3, - SuccessRateMinimumHosts: 4, - SuccessRateRequestVolume: 5, - FailurePercentageThreshold: 6, - EnforcingFailurePercentage: 7, - FailurePercentageMinimumHosts: 8, - FailurePercentageRequestVolume: 9, - }), + wantODCfg: `{ + "interval": "1s", + "baseEjectionTime": "2s", + "maxEjectionTime": "3s", + "maxEjectionPercent": 1, + "successRateEjection": { + "stdevFactor": 2, + "enforcementPercentage": 3, + "minimumHosts": 4, + "requestVolume": 5 + }, + "failurePercentageEjection": { + "threshold": 6, + "enforcementPercentage": 7, + "minimumHosts": 8, + "requestVolume": 9 + } + }`, }, { name: "interval-is-negative", @@ -1507,8 +1523,21 @@ func (s) TestValidateClusterWithOutlierDetection(t *testing.T) { if (err != nil) != test.wantErr { t.Errorf("validateClusterAndConstructClusterUpdate() returned err %v wantErr %v)", err, test.wantErr) } - if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicy")); diff != "" { - t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, +got):\n%s", diff) + if test.wantErr { + return + } + // got and want must be unmarshalled since JSON strings shouldn't + // generally be directly compared. + var got map[string]interface{} + if err := json.Unmarshal(update.OutlierDetection, &got); err != nil { + t.Fatalf("Error unmarshalling update.OutlierDetection (%q): %v", update.OutlierDetection, err) + } + var want map[string]interface{} + if err := json.Unmarshal(json.RawMessage(test.wantODCfg), &want); err != nil { + t.Fatalf("Error unmarshalling wantODCfg (%q): %v", test.wantODCfg, err) + } + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("cluster.OutlierDetection got unexpected output, diff (-got, +want): %v", diff) } }) } From 642dd63a85275a96d172f446911fd04111e2c74c Mon Sep 17 00:00:00 2001 From: Joshua Humphries <2035234+jhump@users.noreply.github.com> Date: Mon, 12 Jun 2023 17:21:44 -0400 Subject: [PATCH 967/998] reflection: expose both v1 and v1alpha reflection services (#6329) --- reflection/adapt.go | 187 ++++++++++++++++++++++ reflection/serverreflection.go | 89 +++++++---- reflection/serverreflection_test.go | 235 ++++++++++++++++++++-------- 3 files changed, 411 insertions(+), 100 deletions(-) create mode 100644 reflection/adapt.go diff --git a/reflection/adapt.go b/reflection/adapt.go new file mode 100644 index 000000000000..fa5aad500b6e --- /dev/null +++ b/reflection/adapt.go @@ -0,0 +1,187 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + v1grpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1pb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1grpc.ServerReflectionServer) v1alphagrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1grpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphagrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1pb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1pb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Request(resp), nil +} + +func v1ToV1AlphaResponse(v1 *v1pb.ServerReflectionResponse) *v1alphapb.ServerReflectionResponse { + var v1alpha v1alphapb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1pb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1pb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1pb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphapb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphapb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphapb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1pb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +func v1AlphaToV1Request(v1alpha *v1alphapb.ServerReflectionRequest) *v1pb.ServerReflectionRequest { + var v1 v1pb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphapb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1pb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1pb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphapb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1pb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1pb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1pb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphapb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1pb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func v1ToV1AlphaRequest(v1 *v1pb.ServerReflectionRequest) *v1alphapb.ServerReflectionRequest { + var v1alpha v1alphapb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1pb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1pb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1pb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphapb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1pb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1pb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/reflection/serverreflection.go b/reflection/serverreflection.go index e2f9ebfbbce8..a70295bcaa7d 100644 --- a/reflection/serverreflection.go +++ b/reflection/serverreflection.go @@ -48,8 +48,9 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" + v1grpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1pb "google.golang.org/grpc/reflection/grpc_reflection_v1" v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -63,9 +64,19 @@ type GRPCServer interface { var _ GRPCServer = (*grpc.Server)(nil) // Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. func Register(s GRPCServer) { - svr := NewServer(ServerOptions{Services: s}) - v1alphagrpc.RegisterServerReflectionServer(s, svr) + svr := NewServerV1(ServerOptions{Services: s}) + v1alphagrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1grpc.RegisterServerReflectionServer(s, svr) +} + +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1grpc.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -120,13 +131,27 @@ type ServerOptions struct { // NewServer returns a reflection server implementation using the given options. // This can be used to customize behavior of the reflection service. Most usages -// should prefer to use Register instead. +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. // // # Experimental // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) +} + +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServerV1(opts ServerOptions) v1grpc.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -215,11 +240,11 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*v1pb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*v1alphapb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*v1pb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &v1alphapb.ServiceResponse{Name: svc}) + resp = append(resp, &v1pb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -228,7 +253,7 @@ func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1grpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -239,79 +264,79 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerR return err } - out := &v1alphapb.ServerReflectionResponse{ + out := &v1pb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *v1alphapb.ServerReflectionRequest_FileByFilename: + case *v1pb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1pb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1pb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1pb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: + case *v1pb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1pb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1pb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1pb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingExtension: + case *v1pb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1pb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1pb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1pb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1pb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1pb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ + out.MessageResponse = &v1pb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1pb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *v1alphapb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphapb.ListServiceResponse{ + case *v1pb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1pb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1pb.ListServiceResponse{ Service: s.listServices(), }, } diff --git a/reflection/serverreflection_test.go b/reflection/serverreflection_test.go index 04e7ba1dbfbd..8a3ca7163f0e 100644 --- a/reflection/serverreflection_test.go +++ b/reflection/serverreflection_test.go @@ -37,6 +37,8 @@ import ( "google.golang.org/protobuf/types/descriptorpb" "google.golang.org/protobuf/types/dynamicpb" + v1grpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1pb "google.golang.org/grpc/reflection/grpc_reflection_v1" v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" pb "google.golang.org/grpc/reflection/grpc_testing" @@ -44,11 +46,8 @@ import ( ) var ( - s = NewServer(ServerOptions{}).(*serverReflectionServer) + s = NewServerV1(ServerOptions{}).(*serverReflectionServer) // fileDescriptor of each test proto file. - fdTest *descriptorpb.FileDescriptorProto - fdTestv3 *descriptorpb.FileDescriptorProto - fdProto2 *descriptorpb.FileDescriptorProto fdProto2Ext *descriptorpb.FileDescriptorProto fdProto2Ext2 *descriptorpb.FileDescriptorProto fdDynamic *descriptorpb.FileDescriptorProto @@ -113,9 +112,9 @@ func loadFileDescDynamic(b []byte) (*descriptorpb.FileDescriptorProto, protorefl } func init() { - fdTest, fdTestByte = loadFileDesc("reflection/grpc_testing/test.proto") - fdTestv3, fdTestv3Byte = loadFileDesc("testv3.proto") - fdProto2, fdProto2Byte = loadFileDesc("reflection/grpc_testing/proto2.proto") + _, fdTestByte = loadFileDesc("reflection/grpc_testing/test.proto") + _, fdTestv3Byte = loadFileDesc("testv3.proto") + _, fdProto2Byte = loadFileDesc("reflection/grpc_testing/proto2.proto") fdProto2Ext, fdProto2ExtByte = loadFileDesc("reflection/grpc_testing/proto2_ext.proto") fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("reflection/grpc_testing/proto2_ext2.proto") fdDynamic, fdDynamicFile, fdDynamicByte = loadFileDescDynamic(pbv3.FileDynamicProtoRawDesc) @@ -210,6 +209,7 @@ func (x) TestReflectionEnd2end(t *testing.T) { // Register reflection service on s. Register(s) go s.Serve(lis) + t.Cleanup(s.Stop) // Create client. conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) @@ -218,33 +218,50 @@ func (x) TestReflectionEnd2end(t *testing.T) { } defer conn.Close() - c := v1alphagrpc.NewServerReflectionClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - stream, err := c.ServerReflectionInfo(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("cannot get ServerReflectionInfo: %v", err) + clientV1 := v1grpc.NewServerReflectionClient(conn) + clientV1Alpha := v1alphagrpc.NewServerReflectionClient(conn) + testCases := []struct { + name string + client v1grpc.ServerReflectionClient + }{ + { + name: "v1", + client: clientV1, + }, + { + name: "v1alpha", + client: v1AlphaClientAdapter{stub: clientV1Alpha}, + }, } + for _, testCase := range testCases { + c := testCase.client + t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := c.ServerReflectionInfo(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("cannot get ServerReflectionInfo: %v", err) + } - testFileByFilenameTransitiveClosure(t, stream, true) - testFileByFilenameTransitiveClosure(t, stream, false) - testFileByFilename(t, stream) - testFileByFilenameError(t, stream) - testFileContainingSymbol(t, stream) - testFileContainingSymbolError(t, stream) - testFileContainingExtension(t, stream) - testFileContainingExtensionError(t, stream) - testAllExtensionNumbersOfType(t, stream) - testAllExtensionNumbersOfTypeError(t, stream) - testListServices(t, stream) - - s.Stop() + testFileByFilenameTransitiveClosure(t, stream, true) + testFileByFilenameTransitiveClosure(t, stream, false) + testFileByFilename(t, stream) + testFileByFilenameError(t, stream) + testFileContainingSymbol(t, stream) + testFileContainingSymbolError(t, stream) + testFileContainingExtension(t, stream) + testFileContainingExtensionError(t, stream) + testAllExtensionNumbersOfType(t, stream) + testAllExtensionNumbersOfTypeError(t, stream) + testListServices(t, stream) + }) + } } -func testFileByFilenameTransitiveClosure(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient, expectClosure bool) { +func testFileByFilenameTransitiveClosure(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient, expectClosure bool) { filename := "reflection/grpc_testing/proto2_ext2.proto" - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_FileByFilename{ FileByFilename: filename, }, }); err != nil { @@ -256,7 +273,7 @@ func testFileByFilenameTransitiveClosure(t *testing.T, stream v1alphagrpc.Server t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: + case *v1pb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], fdProto2Ext2Byte) { t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], fdProto2Ext2Byte) } @@ -274,7 +291,7 @@ func testFileByFilenameTransitiveClosure(t *testing.T, stream v1alphagrpc.Server } } -func testFileByFilename(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { +func testFileByFilename(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { filename string want []byte @@ -284,8 +301,8 @@ func testFileByFilename(t *testing.T, stream v1alphagrpc.ServerReflection_Server {"reflection/grpc_testing/proto2_ext.proto", fdProto2ExtByte}, {"dynamic.proto", fdDynamicByte}, } { - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_FileByFilename{ FileByFilename: test.filename, }, }); err != nil { @@ -298,7 +315,7 @@ func testFileByFilename(t *testing.T, stream v1alphagrpc.ServerReflection_Server } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: + case *v1pb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", test.filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -308,14 +325,14 @@ func testFileByFilename(t *testing.T, stream v1alphagrpc.ServerReflection_Server } } -func testFileByFilenameError(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { +func testFileByFilenameError(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "test.poto", "proo2.proto", "proto2_et.proto", } { - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_FileByFilename{ FileByFilename: test, }, }); err != nil { @@ -328,14 +345,14 @@ func testFileByFilenameError(t *testing.T, stream v1alphagrpc.ServerReflection_S } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_ErrorResponse: + case *v1pb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileByFilename(%v) = %v, want type ", test, r.MessageResponse) } } } -func testFileContainingSymbol(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingSymbol(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { symbol string want []byte @@ -361,8 +378,8 @@ func testFileContainingSymbol(t *testing.T, stream v1alphagrpc.ServerReflection_ {"grpc.testing.DynamicReq", fdDynamicByte}, {"grpc.testing.DynamicRes", fdDynamicByte}, } { - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_FileContainingSymbol{ + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test.symbol, }, }); err != nil { @@ -375,7 +392,7 @@ func testFileContainingSymbol(t *testing.T, stream v1alphagrpc.ServerReflection_ } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: + case *v1pb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingSymbol(%v)\nreceived: %q,\nwant: %q", test.symbol, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -385,15 +402,15 @@ func testFileContainingSymbol(t *testing.T, stream v1alphagrpc.ServerReflection_ } } -func testFileContainingSymbolError(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingSymbolError(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.SerchService", "grpc.testing.SearchService.SearchE", "grpc.tesing.SearchResponse", "gpc.testing.ToBeExtended", } { - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_FileContainingSymbol{ + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test, }, }); err != nil { @@ -406,14 +423,14 @@ func testFileContainingSymbolError(t *testing.T, stream v1alphagrpc.ServerReflec } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_ErrorResponse: + case *v1pb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingSymbol(%v) = %v, want type ", test, r.MessageResponse) } } } -func testFileContainingExtension(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingExtension(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 @@ -425,9 +442,9 @@ func testFileContainingExtension(t *testing.T, stream v1alphagrpc.ServerReflecti {"grpc.testing.ToBeExtended", 23, fdProto2Ext2Byte}, {"grpc.testing.ToBeExtended", 29, fdProto2Ext2Byte}, } { - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1alphapb.ExtensionRequest{ + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1pb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, @@ -442,7 +459,7 @@ func testFileContainingExtension(t *testing.T, stream v1alphagrpc.ServerReflecti } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: + case *v1pb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingExtension(%v, %v)\nreceived: %q,\nwant: %q", test.typeName, test.extNum, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -452,7 +469,7 @@ func testFileContainingExtension(t *testing.T, stream v1alphagrpc.ServerReflecti } } -func testFileContainingExtensionError(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingExtensionError(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 @@ -460,9 +477,9 @@ func testFileContainingExtensionError(t *testing.T, stream v1alphagrpc.ServerRef {"grpc.testing.ToBExtended", 17}, {"grpc.testing.ToBeExtended", 15}, } { - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1alphapb.ExtensionRequest{ + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1pb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, @@ -477,14 +494,14 @@ func testFileContainingExtensionError(t *testing.T, stream v1alphagrpc.ServerRef } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_ErrorResponse: + case *v1pb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) } } } -func testAllExtensionNumbersOfType(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { +func testAllExtensionNumbersOfType(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string want []int32 @@ -492,8 +509,8 @@ func testAllExtensionNumbersOfType(t *testing.T, stream v1alphagrpc.ServerReflec {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, {"grpc.testing.DynamicReq", nil}, } { - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType{ + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test.typeName, }, }); err != nil { @@ -506,7 +523,7 @@ func testAllExtensionNumbersOfType(t *testing.T, stream v1alphagrpc.ServerReflec } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse: + case *v1pb.ServerReflectionResponse_AllExtensionNumbersResponse: extNum := r.GetAllExtensionNumbersResponse().ExtensionNumber sort.Sort(intArray(extNum)) if r.GetAllExtensionNumbersResponse().BaseTypeName != test.typeName || @@ -519,12 +536,12 @@ func testAllExtensionNumbersOfType(t *testing.T, stream v1alphagrpc.ServerReflec } } -func testAllExtensionNumbersOfTypeError(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { +func testAllExtensionNumbersOfTypeError(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.ToBeExtendedE", } { - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType{ + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test, }, }); err != nil { @@ -537,16 +554,16 @@ func testAllExtensionNumbersOfTypeError(t *testing.T, stream v1alphagrpc.ServerR } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_ErrorResponse: + case *v1pb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test, r.MessageResponse) } } } -func testListServices(t *testing.T, stream v1alphagrpc.ServerReflection_ServerReflectionInfoClient) { - if err := stream.Send(&v1alphapb.ServerReflectionRequest{ - MessageRequest: &v1alphapb.ServerReflectionRequest_ListServices{}, +func testListServices(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { + if err := stream.Send(&v1pb.ServerReflectionRequest{ + MessageRequest: &v1pb.ServerReflectionRequest_ListServices{}, }); err != nil { t.Fatalf("failed to send request: %v", err) } @@ -557,11 +574,12 @@ func testListServices(t *testing.T, stream v1alphagrpc.ServerReflection_ServerRe } switch r.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_ListServicesResponse: + case *v1pb.ServerReflectionResponse_ListServicesResponse: services := r.GetListServicesResponse().Service want := []string{ "grpc.testingv3.SearchServiceV3", "grpc.testing.SearchService", + "grpc.reflection.v1.ServerReflection", "grpc.reflection.v1alpha.ServerReflection", "grpc.testing.DynamicService", } @@ -607,3 +625,84 @@ func registerDynamicProto(srv *grpc.Server, fdp *descriptorpb.FileDescriptorProt srv.RegisterService(sd, struct{}{}) } } + +type v1AlphaClientAdapter struct { + stub v1alphagrpc.ServerReflectionClient +} + +func (v v1AlphaClientAdapter) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (v1grpc.ServerReflection_ServerReflectionInfoClient, error) { + stream, err := v.stub.ServerReflectionInfo(ctx, opts...) + if err != nil { + return nil, err + } + return v1AlphaClientStreamAdapter{stream}, nil +} + +type v1AlphaClientStreamAdapter struct { + v1alphagrpc.ServerReflection_ServerReflectionInfoClient +} + +func (s v1AlphaClientStreamAdapter) Send(request *v1pb.ServerReflectionRequest) error { + return s.ServerReflection_ServerReflectionInfoClient.Send(v1ToV1AlphaRequest(request)) +} + +func (s v1AlphaClientStreamAdapter) Recv() (*v1pb.ServerReflectionResponse, error) { + resp, err := s.ServerReflection_ServerReflectionInfoClient.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Response(resp), nil +} + +func v1AlphaToV1Response(v1alpha *v1alphapb.ServerReflectionResponse) *v1pb.ServerReflectionResponse { + var v1 v1pb.ServerReflectionResponse + v1.ValidHost = v1alpha.ValidHost + if v1alpha.OriginalRequest != nil { + v1.OriginalRequest = v1AlphaToV1Request(v1alpha.OriginalRequest) + } + switch mr := v1alpha.MessageResponse.(type) { + case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1.MessageResponse = &v1pb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1pb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1.MessageResponse = &v1pb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1pb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1alphapb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1pb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1pb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1.MessageResponse = &v1pb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1pb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1alphapb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1pb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1 +} From dd350d02da45d67d19705fa8e82544284801428a Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 20 Jun 2023 17:04:30 -0400 Subject: [PATCH 968/998] stats/opencensus: Fix flaky metrics test (#6372) --- stats/opencensus/e2e_test.go | 41 ++++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go index 1f68675f0c1d..3b3c2bbbd908 100644 --- a/stats/opencensus/e2e_test.go +++ b/stats/opencensus/e2e_test.go @@ -237,6 +237,36 @@ func distributionDataLatencyCount(vi *viewInformation, countWant int64, wantTags return nil } +// waitForServerCompletedRPCs waits until both Unary and Streaming metric rows +// appear, in two separate rows, for server completed RPC's view. Returns an +// error if the Unary and Streaming metric are not found within the passed +// context's timeout. +func waitForServerCompletedRPCs(ctx context.Context) error { + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + rows, err := view.RetrieveData("grpc.io/server/completed_rpcs") + if err != nil { + continue + } + unaryFound := false + streamingFound := false + for _, row := range rows { + for _, tag := range row.Tags { + if tag.Value == "grpc.testing.TestService/UnaryCall" { + unaryFound = true + break + } else if tag.Value == "grpc.testing.TestService/FullDuplexCall" { + streamingFound = true + break + } + } + if unaryFound && streamingFound { + return nil + } + } + } + return fmt.Errorf("timeout when waiting for Unary and Streaming rows to be present for \"grpc.io/server/completed_rpcs\"") +} + // TestAllMetricsOneFunction tests emitted metrics from gRPC. It registers all // the metrics provided by this package. It then configures a system with a gRPC // Client and gRPC server with the OpenCensus Dial and Server Option configured, @@ -987,10 +1017,13 @@ func (s) TestAllMetricsOneFunction(t *testing.T) { }, }, } - // Unregister all the views. Unregistering a view causes a synchronous - // upload of any collected data for the view to any registered exporters. - // Thus, after this unregister call, the exporter has the data to make - // assertions on immediately. + // Server Side stats.End call happens asynchronously for both Unary and + // Streaming calls with respect to the RPC returning client side. Thus, add + // a sync point at the global view package level for these two rows to be + // recorded, which will be synchronously uploaded to exporters right after. + if err := waitForServerCompletedRPCs(ctx); err != nil { + t.Fatal(err) + } view.Unregister(allViews...) // Assert the expected emissions for each metric match the expected // emissions. From a9c79427b1faec9dda8597ea1f5fdf369bab08be Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Thu, 22 Jun 2023 14:24:52 -0400 Subject: [PATCH 969/998] benchmark: Add support for Poisson load in benchmark client (#6378) --- benchmark/worker/benchmark_client.go | 143 ++++++++++++++++++--------- internal/grpcrand/grpcrand.go | 7 ++ 2 files changed, 105 insertions(+), 45 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 7f02728709ca..cc1af4f418a5 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/status" "google.golang.org/grpc/testdata" @@ -185,11 +186,21 @@ func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benc } } - // TODO add open loop distribution. - switch config.LoadParams.Load.(type) { + // If set, perform an open loop, if not perform a closed loop. An open loop + // asynchronously starts RPCs based on random start times derived from a + // Poisson distribution. A closed loop performs RPCs in a blocking manner, + // and runs the next RPC after the previous RPC completes and returns. + var poissonLambda *float64 + switch t := config.LoadParams.Load.(type) { case *testpb.LoadParams_ClosedLoop: case *testpb.LoadParams_Poisson: - return status.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) + if t.Poisson == nil { + return status.Errorf(codes.InvalidArgument, "poisson is nil, needs to be set") + } + if t.Poisson.OfferedLoad <= 0 { + return status.Errorf(codes.InvalidArgument, "poisson.offered is <= 0: %v, needs to be >0", t.Poisson.OfferedLoad) + } + poissonLambda = &t.Poisson.OfferedLoad default: return status.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) } @@ -198,11 +209,9 @@ func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benc switch config.RpcType { case testpb.RpcType_UNARY: - bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) - // TODO open loop. + bc.unaryLoop(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, poissonLambda) case testpb.RpcType_STREAMING: - bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) - // TODO open loop. + bc.streamingLoop(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType, poissonLambda) default: return status.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) } @@ -246,7 +255,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) return bc, nil } -func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { +func (bc *benchmarkClient) unaryLoop(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, poissonLambda *float64) { for ic, conn := range conns { client := testgrpc.NewBenchmarkServiceClient(conn) // For each connection, create rpcCountPerConn goroutines to do rpc. @@ -260,36 +269,44 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe // Now relying on worker client to reserve time to do warm up. // The worker client needs to wait for some time after client is created, // before starting benchmark. - done := make(chan bool) - for { - go func() { - start := time.Now() - if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { + if poissonLambda == nil { // Closed loop. + done := make(chan bool) + for { + go func() { + start := time.Now() + if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { + select { + case <-bc.stop: + case done <- false: + } + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) select { case <-bc.stop: - case done <- false: + case done <- true: } - return - } - elapse := time.Since(start) - bc.lockingHistograms[idx].add(int64(elapse)) + }() select { case <-bc.stop: - case done <- true: + return + case <-done: } - }() - select { - case <-bc.stop: - return - case <-done: } + } else { // Open loop. + timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / *poissonLambda) * float64(time.Second)) + time.AfterFunc(timeBetweenRPCs, func() { + bc.poissonUnary(client, idx, reqSize, respSize, *poissonLambda) + }) } + }(idx) } } } -func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { +func (bc *benchmarkClient) streamingLoop(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string, poissonLambda *float64) { var doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error if payloadType == "bytebuf" { doRPC = benchmark.DoByteBufStreamingRoundTrip @@ -304,33 +321,69 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou if err != nil { logger.Fatalf("%v.StreamingCall(_) = _, %v", c, err) } - // Create histogram for each goroutine. idx := ic*rpcCountPerConn + j bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) - // Start goroutine on the created mutex and histogram. - go func(idx int) { - // TODO: do warm up if necessary. - // Now relying on worker client to reserve time to do warm up. - // The worker client needs to wait for some time after client is created, - // before starting benchmark. - for { - start := time.Now() - if err := doRPC(stream, reqSize, respSize); err != nil { - return - } - elapse := time.Since(start) - bc.lockingHistograms[idx].add(int64(elapse)) - select { - case <-bc.stop: - return - default: + if poissonLambda == nil { // Closed loop. + // Start goroutine on the created mutex and histogram. + go func(idx int) { + // TODO: do warm up if necessary. + // Now relying on worker client to reserve time to do warm up. + // The worker client needs to wait for some time after client is created, + // before starting benchmark. + for { + start := time.Now() + if err := doRPC(stream, reqSize, respSize); err != nil { + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + select { + case <-bc.stop: + return + default: + } } - } - }(idx) + }(idx) + } else { // Open loop. + timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / *poissonLambda) * float64(time.Second)) + time.AfterFunc(timeBetweenRPCs, func() { + bc.poissonStreaming(stream, idx, reqSize, respSize, *poissonLambda, doRPC) + }) + } } } } +func (bc *benchmarkClient) poissonUnary(client testgrpc.BenchmarkServiceClient, idx int, reqSize int, respSize int, lambda float64) { + go func() { + start := time.Now() + if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + }() + timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / lambda) * float64(time.Second)) + time.AfterFunc(timeBetweenRPCs, func() { + bc.poissonUnary(client, idx, reqSize, respSize, lambda) + }) +} + +func (bc *benchmarkClient) poissonStreaming(stream testgrpc.BenchmarkService_StreamingCallClient, idx int, reqSize int, respSize int, lambda float64, doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error) { + go func() { + start := time.Now() + if err := doRPC(stream, reqSize, respSize); err != nil { + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + }() + timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / lambda) * float64(time.Second)) + time.AfterFunc(timeBetweenRPCs, func() { + bc.poissonStreaming(stream, idx, reqSize, respSize, lambda, doRPC) + }) +} + // getStats returns the stats for benchmark client. // It resets lastResetTime and all histograms if argument reset is true. func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { diff --git a/internal/grpcrand/grpcrand.go b/internal/grpcrand/grpcrand.go index d08e3e907666..aa97273e7d13 100644 --- a/internal/grpcrand/grpcrand.go +++ b/internal/grpcrand/grpcrand.go @@ -80,6 +80,13 @@ func Uint32() uint32 { return r.Uint32() } +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + // Shuffle implements rand.Shuffle on the grpcrand global source. var Shuffle = func(n int, f func(int, int)) { mu.Lock() From f24b4c7ee600ec7d537ba19b5899932d3b5b6fbf Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 22 Jun 2023 14:25:26 -0700 Subject: [PATCH 970/998] clusterresolver: remove redundant tests (#6388) --- .../clusterresolver/clusterresolver_test.go | 78 ------------- .../balancer/clusterresolver/priority_test.go | 105 ------------------ .../clusterresolver/resource_resolver_test.go | 59 ---------- 3 files changed, 242 deletions(-) delete mode 100644 xds/internal/balancer/clusterresolver/resource_resolver_test.go diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 6d798a1543b0..2f4fc703f177 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -240,84 +240,6 @@ func (s) TestSubConnStateChange(t *testing.T) { } } -// Given a list of resource names, verifies that EDS requests for the same are -// sent by the EDS balancer, through the fake xDS client. -func verifyExpectedRequests(ctx context.Context, fc *fakeclient.Client, resourceNames ...string) error { - for _, name := range resourceNames { - if name == "" { - // ResourceName empty string indicates a cancel. - if _, err := fc.WaitForCancelEDSWatch(ctx); err != nil { - return fmt.Errorf("timed out when expecting resource %q", name) - } - continue - } - - resName, err := fc.WaitForWatchEDS(ctx) - if err != nil { - return fmt.Errorf("timed out when expecting resource %q, %p", name, fc) - } - if resName != name { - return fmt.Errorf("got EDS request for resource %q, expected: %q", resName, name) - } - } - return nil -} - -// TestClientWatchEDS verifies that the xdsClient inside the top-level EDS LB -// policy registers an EDS watch for expected resource upon receiving an update -// from gRPC. -func (s) TestClientWatchEDS(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", Name) - } - defer edsB.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // If eds service name is not set, should watch for cluster name. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS("cluster-1"), - }); err != nil { - t.Fatal(err) - } - if err := verifyExpectedRequests(ctx, xdsC, "cluster-1"); err != nil { - t.Fatal(err) - } - - // Update with an non-empty edsServiceName should trigger an EDS watch for - // the same. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS("foobar-1"), - }); err != nil { - t.Fatal(err) - } - if err := verifyExpectedRequests(ctx, xdsC, "", "foobar-1"); err != nil { - t.Fatal(err) - } - - // Also test the case where the edsServerName changes from one non-empty - // name to another, and make sure a new watch is registered. The previously - // registered watch will be cancelled, which will result in an EDS request - // with no resource names being sent to the server. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS("foobar-2"), - }); err != nil { - t.Fatal(err) - } - if err := verifyExpectedRequests(ctx, xdsC, "", "foobar-2"); err != nil { - t.Fatal(err) - } -} - func newLBConfigWithOneEDS(edsServiceName string) *LBConfig { return &LBConfig{ DiscoveryMechanisms: []DiscoveryMechanism{{ diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go index 4d2904c67ff6..526204ca22c6 100644 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ b/xds/internal/balancer/clusterresolver/priority_test.go @@ -832,108 +832,3 @@ func (s) TestEDSPriority_FirstPriorityRemoved(t *testing.T) { t.Fatal(err) } } - -// Watch resources from EDS and DNS, with EDS as the higher priority. Lower -// priority is used when higher priority is not ready. -func (s) TestFallbackToDNS(t *testing.T) { - const testDNSEndpointAddr = "3.1.4.1:5" - // dnsTargetCh, dnsCloseCh, resolveNowCh, dnsR, cleanup := setupDNS() - dnsTargetCh, _, resolveNowCh, dnsR, cleanupDNS := setupDNS() - defer cleanupDNS() - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{ - { - Type: DiscoveryMechanismTypeEDS, - Cluster: testClusterName, - }, - { - Type: DiscoveryMechanismTypeLogicalDNS, - DNSHostname: testDNSTarget, - }, - }, - xdsLBPolicy: *wrrLocalityLBConfig, - }, - }); err != nil { - t.Fatal(err) - } - - ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer ctxCancel() - select { - case target := <-dnsTargetCh: - if diff := cmp.Diff(target, resolver.Target{Scheme: "dns", URL: *testutils.MustParseURL("dns:///" + testDNSTarget)}); diff != "" { - t.Fatalf("got unexpected DNS target to watch, diff (-got, +want): %v", diff) - } - case <-ctx.Done(): - t.Fatal("Timed out waiting for building DNS resolver") - } - - // One locality with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - - // Also send a DNS update, because the balancer needs both updates from all - // resources to move on. - dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: testDNSEndpointAddr}}}) - - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - - // p0 is ready. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p0 subconns. - if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { - t.Fatal(err) - } - - // Turn down 0, p1 (DNS) will be used. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - - // The transient failure above should not trigger a re-resolve to the DNS - // resolver. Need to read to clear the channel, to avoid potential deadlock - // writing to the channel later. - shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - select { - case <-resolveNowCh: - t.Fatal("unexpected re-resolve trigger by transient failure from EDS endpoint") - case <-shortCtx.Done(): - } - - // The addresses used to create new SubConn should be the DNS endpoint. - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testDNSEndpointAddr; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with 1. - if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { - t.Fatal(err) - } - - // Turn down the DNS endpoint, this should trigger an re-resolve in the DNS - // resolver. - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - - // The transient failure above should trigger a re-resolve to the DNS - // resolver. Need to read to clear the channel, to avoid potential deadlock - // writing to the channel later. - select { - case <-resolveNowCh: - case <-ctx.Done(): - t.Fatal("Timed out waiting for re-resolve") - } -} diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_test.go b/xds/internal/balancer/clusterresolver/resource_resolver_test.go deleted file mode 100644 index 0ae151ee5215..000000000000 --- a/xds/internal/balancer/clusterresolver/resource_resolver_test.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package clusterresolver - -import ( - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -const ( - testDNSTarget = "dns.com" -) - -var ( - testEDSUpdates []xdsresource.EndpointsUpdate -) - -func init() { - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - testEDSUpdates = append(testEDSUpdates, parseEDSRespProtoForTesting(clab1.Build())) - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - testEDSUpdates = append(testEDSUpdates, parseEDSRespProtoForTesting(clab2.Build())) -} - -func setupDNS() (chan resolver.Target, chan struct{}, chan resolver.ResolveNowOptions, *manual.Resolver, func()) { - dnsTargetCh := make(chan resolver.Target, 1) - dnsCloseCh := make(chan struct{}, 1) - resolveNowCh := make(chan resolver.ResolveNowOptions, 1) - - mr := manual.NewBuilderWithScheme("dns") - mr.BuildCallback = func(target resolver.Target, _ resolver.ClientConn, _ resolver.BuildOptions) { dnsTargetCh <- target } - mr.CloseCallback = func() { dnsCloseCh <- struct{}{} } - mr.ResolveNowCallback = func(opts resolver.ResolveNowOptions) { resolveNowCh <- opts } - oldNewDNS := newDNS - newDNS = func(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - return mr.Build(target, cc, opts) - } - return dnsTargetCh, dnsCloseCh, resolveNowCh, mr, func() { newDNS = oldNewDNS } -} From 963238a605b6dc8da3409ac47661f30ec95e82b8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 23 Jun 2023 08:27:34 -0700 Subject: [PATCH 971/998] clusterresolver: move tests around to different files (#6392) --- .../e2e_test/aggregate_cluster_test.go | 773 +++++++++++++ .../clusterresolver/e2e_test/balancer_test.go | 1023 ----------------- .../clusterresolver/e2e_test/eds_impl_test.go | 299 ++++- 3 files changed, 1070 insertions(+), 1025 deletions(-) create mode 100644 xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go diff --git a/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go b/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go new file mode 100644 index 000000000000..ea0cdf6fc257 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go @@ -0,0 +1,773 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e_test + +import ( + "context" + "fmt" + "sort" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// makeAggregateClusterResource returns an aggregate cluster resource with the +// given name and list of child names. +func makeAggregateClusterResource(name string, childNames []string) *v3clusterpb.Cluster { + return &v3clusterpb.Cluster{ + Name: name, + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ + Clusters: childNames, + }), + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + } +} + +// makeLogicalDNSClusterResource returns a LOGICAL_DNS cluster resource with the +// given name and given DNS host and port. +func makeLogicalDNSClusterResource(name, dnsHost string, dnsPort uint32) *v3clusterpb.Cluster { + return &v3clusterpb.Cluster{ + Name: name, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ + Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ + LbEndpoints: []*v3endpointpb.LbEndpoint{{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: dnsHost, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: dnsPort, + }, + }, + }, + }, + }, + }, + }}, + }}, + }, + } +} + +// setupDNS unregisters the DNS resolver and registers a manual resolver for the +// same scheme. This allows the test to mock the DNS resolution by supplying the +// addresses of the test backends. +// +// Returns the following: +// - a channel onto which the DNS target being resolved is written to by the +// mock DNS resolver +// - a channel to notify close of the DNS resolver +// - a channel to notify re-resolution requests to the DNS resolver +// - a manual resolver which is used to mock the actual DNS resolution +// - a cleanup function which re-registers the original DNS resolver +func setupDNS() (chan resolver.Target, chan struct{}, chan resolver.ResolveNowOptions, *manual.Resolver, func()) { + targetCh := make(chan resolver.Target, 1) + closeCh := make(chan struct{}, 1) + resolveNowCh := make(chan resolver.ResolveNowOptions, 1) + + mr := manual.NewBuilderWithScheme("dns") + mr.BuildCallback = func(target resolver.Target, _ resolver.ClientConn, _ resolver.BuildOptions) { targetCh <- target } + mr.CloseCallback = func() { closeCh <- struct{}{} } + mr.ResolveNowCallback = func(opts resolver.ResolveNowOptions) { resolveNowCh <- opts } + + dnsResolverBuilder := resolver.Get("dns") + resolver.UnregisterForTesting("dns") + resolver.Register(mr) + + return targetCh, closeCh, resolveNowCh, mr, func() { resolver.Register(dnsResolverBuilder) } +} + +// TestAggregateCluster_WithTwoEDSClusters tests the case where the top-level +// cluster resource is an aggregate cluster. It verifies that RPCs fail when the +// management server has not responded to all requested EDS resources, and also +// that RPCs are routed to the highest priority cluster once all requested EDS +// resources have been sent by the management server. +func (s) TestAggregateCluster_WithTwoEDSClusters(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server that pushes the EDS resource names onto a + // channel when requested. + edsResourceNameCh := make(chan []string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) == 0 { + // This is the case for ACKs. Do nothing here. + return nil + } + select { + case edsResourceNameCh <- req.GetResourceNames(): + case <-ctx.Done(): + } + return nil + }, + AllowResourceSubset: true, + }) + defer cleanup() + + // Start two test backends and extract their host and port. The first + // backend belongs to EDS cluster "cluster-1", while the second backend + // belongs to EDS cluster "cluster-2". + servers, cleanup2 := startTestServiceBackends(t, 2) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster, two EDS clusters and only one endpoints + // resource (corresponding to the first EDS cluster) in the management + // server. + const clusterName1 = clusterName + "-cluster-1" + const clusterName2 = clusterName + "-cluster-2" + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{clusterName1, clusterName2}), + e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), + e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(clusterName1, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Wait for both EDS resources to be requested. + func() { + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + select { + case names := <-edsResourceNameCh: + // Copy and sort the sortedNames to avoid racing with an + // OnStreamRequest call. + sortedNames := make([]string, len(names)) + copy(sortedNames, names) + sort.Strings(sortedNames) + if cmp.Equal(sortedNames, []string{clusterName1, clusterName2}) { + return + } + default: + } + } + }() + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for all EDS resources %v to be requested", []string{clusterName1, clusterName2}) + } + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the management server has not responded with all EDS resources + // requested. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Update the management server with the second EDS resource. + resources.Endpoints = append(resources.Endpoints, e2e.DefaultEndpoint(clusterName2, "localhost", []uint32{uint32(ports[1])})) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Make an RPC and ensure that it gets routed to cluster-1, implicitly + // higher priority than cluster-2. + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } +} + +// TestAggregateCluster_WithTwoEDSClusters_PrioritiesChange tests the case where +// the top-level cluster resource is an aggregate cluster. It verifies that RPCs +// are routed to the highest priority EDS cluster. +func (s) TestAggregateCluster_WithTwoEDSClusters_PrioritiesChange(t *testing.T) { + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup() + + // Start two test backends and extract their host and port. The first + // backend belongs to EDS cluster "cluster-1", while the second backend + // belongs to EDS cluster "cluster-2". + servers, cleanup2 := startTestServiceBackends(t, 2) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster, two EDS clusters and the corresponding + // endpoints resources in the management server. + const clusterName1 = clusterName + "cluster-1" + const clusterName2 = clusterName + "cluster-2" + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{clusterName1, clusterName2}), + e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), + e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(clusterName1, "localhost", []uint32{uint32(ports[0])}), + e2e.DefaultEndpoint(clusterName2, "localhost", []uint32{uint32(ports[1])}), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Make an RPC and ensure that it gets routed to cluster-1, implicitly + // higher priority than cluster-2. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Swap the priorities of the EDS clusters in the aggregate cluster. + resources.Clusters = []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{clusterName2, clusterName1}), + e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), + e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for RPCs to get routed to cluster-2, which is now implicitly higher + // priority than cluster-1, after the priority switch above. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() == addrs[1].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatal("Timeout waiting for RPCs to be routed to cluster-2 after priority switch") + } +} + +// TestAggregateCluster_WithOneDNSCluster tests the case where the top-level +// cluster resource is an aggregate cluster that resolves to a single +// LOGICAL_DNS cluster. The test verifies that RPCs can be made to backends that +// make up the LOGICAL_DNS cluster. +func (s) TestAggregateCluster_WithOneDNSCluster(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, _ := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to a single LOGICAL_DNS cluster. + const ( + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{dnsClusterName}), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs}) + + // Make an RPC and ensure that it gets routed to the first backend since the + // child policy for a LOGICAL_DNS cluster is pick_first by default. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } +} + +// TestAggregateCluster_WithEDSAndDNS tests the case where the top-level cluster +// resource is an aggregate cluster that resolves to an EDS and a LOGICAL_DNS +// cluster. The test verifies that RPCs fail until both clusters are resolved to +// endpoints, and RPCs are routed to the higher priority EDS cluster. +func (s) TestAggregateCluster_WithEDSAndDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server that pushes the name of the requested EDS + // resource onto a channel. + edsResourceCh := make(chan string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) > 0 { + select { + case edsResourceCh <- req.GetResourceNames()[0]: + default: + } + } + return nil + }, + AllowResourceSubset: true, + }) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and DNS cluster. Also + // configure an endpoints resource for the EDS cluster. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsClusterName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that an EDS request is sent for the expected resource name. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS request to be received on the management server") + case name := <-edsResourceCh: + if name != edsClusterName { + t.Fatalf("Received EDS request with resource name %q, want %q", name, edsClusterName) + } + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the DNS resolver has not responded with endpoint addresses. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Make an RPC and ensure that it gets routed to the first backend since the + // EDS cluster is of higher priority than the LOGICAL_DNS cluster. + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } +} + +// TestAggregateCluster_SwitchEDSAndDNS tests the case where the top-level +// cluster resource is an aggregate cluster. It initially resolves to a single +// EDS cluster. The test verifies that RPCs are routed to backends in the EDS +// cluster. Subsequently, the aggregate cluster resolves to a single DNS +// cluster. The test verifies that RPCs are successful, this time to backends in +// the DNS cluster. +func (s) TestAggregateCluster_SwitchEDSAndDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to a single EDS cluster. Also, + // configure the underlying EDS cluster (and the corresponding endpoints + // resource) and DNS cluster (will be used later in the test). + const ( + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsServiceName}), + e2e.DefaultCluster(edsServiceName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the RPC is routed to the appropriate backend. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Update the aggregate cluster to point to a single DNS cluster. + resources.Clusters = []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{dnsClusterName}), + e2e.DefaultCluster(edsServiceName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Ensure that start getting routed to the backend corresponding to the + // LOGICAL_DNS cluster. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)) + if peer.Addr.String() == addrs[1].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for RPCs to be routed to backend %q in the DNS cluster", addrs[1].Addr) + } +} + +// TestAggregateCluster_BadEDS_GoodToBadDNS tests the case where the top-level +// cluster is an aggregate cluster that resolves to an EDS and LOGICAL_DNS +// cluster. The test first asserts that no RPCs can be made after receiving an +// EDS response with zero endpoints because no update has been received from the +// DNS resolver yet. Once the DNS resolver pushes an update, the test verifies +// that we switch to the DNS cluster and can make a successful RPC. At this +// point when the DNS cluster returns an error, the test verifies that RPCs are +// still successful. This is the expected behavior because pick_first (the leaf +// policy) ignores resolver errors when it is not in TransientFailure. +func (s) TestAggregateCluster_BadEDS_GoodToBadDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, _ := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and LOGICAL_DNS + // cluster. Also configure an empty endpoints resource for the EDS cluster + // that contains no endpoints. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + emptyEndpointResource := e2e.DefaultEndpoint(edsServiceName, "localhost", nil) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, edsServiceName, e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{emptyEndpointResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the EDS resource came back with no endpoints, and we are yet to + // push an update through the DNS resolver. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that RPCs start getting routed to the first backend since the + // child policy for a LOGICAL_DNS cluster is pick_first by default. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Logf("EmptyCall() failed: %v", err) + continue + } + if peer.Addr.String() == addrs[0].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for RPCs to be routed to backend %q in the DNS cluster", addrs[0].Addr) + } + + // Push an error from the DNS resolver as well. + dnsErr := fmt.Errorf("DNS error") + dnsR.ReportError(dnsErr) + + // Ensure that RPCs continue to succeed for the next second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + } +} + +// TestAggregateCluster_BadEDS_BadDNS tests the case where the top-level cluster +// is an aggregate cluster that resolves to an EDS and LOGICAL_DNS cluster. When +// the EDS request returns a resource that contains no endpoints, the test +// verifies that we switch to the DNS cluster. When the DNS cluster returns an +// error, the test verifies that RPCs fail with the error returned by the DNS +// resolver, and thus, ensures that pick_first (the leaf policy) does not ignore +// resolver errors. +func (s) TestAggregateCluster_BadEDS_BadDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Configure an aggregate cluster pointing to an EDS and LOGICAL_DNS + // cluster. Also configure an empty endpoints resource for the EDS cluster + // that contains no endpoints. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + emptyEndpointResource := e2e.DefaultEndpoint(edsServiceName, "localhost", nil) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, edsServiceName, e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{emptyEndpointResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the EDS resource came back with no endpoints, and we are yet to + // push an update through the DNS resolver. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Push an error from the DNS resolver as well. + dnsErr := fmt.Errorf("DNS error") + dnsR.ReportError(dnsErr) + + // Ensure that the error returned from the DNS resolver is reported to the + // caller of the RPC. + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if code := status.Code(err); code != codes.Unavailable { + t.Fatalf("EmptyCall() failed with code %s, want %s", code, codes.Unavailable) + } + if err == nil || !strings.Contains(err.Error(), dnsErr.Error()) { + t.Fatalf("EmptyCall() failed with error %v, want %v", err, dnsErr) + } +} diff --git a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go index 3d0d08a3c983..044bbae3d0fe 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go @@ -19,12 +19,10 @@ package e2e_test import ( "context" "fmt" - "sort" "strings" "testing" "time" - "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" @@ -33,19 +31,16 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" - "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - "google.golang.org/protobuf/types/known/wrapperspb" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" @@ -53,81 +48,6 @@ import ( _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the "cds_experimental" LB policy. ) -// makeAggregateClusterResource returns an aggregate cluster resource with the -// given name and list of child names. -func makeAggregateClusterResource(name string, childNames []string) *v3clusterpb.Cluster { - return &v3clusterpb.Cluster{ - Name: name, - ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ - ClusterType: &v3clusterpb.Cluster_CustomClusterType{ - Name: "envoy.clusters.aggregate", - TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ - Clusters: childNames, - }), - }, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - } -} - -// makeLogicalDNSClusterResource returns a LOGICAL_DNS cluster resource with the -// given name and given DNS host and port. -func makeLogicalDNSClusterResource(name, dnsHost string, dnsPort uint32) *v3clusterpb.Cluster { - return &v3clusterpb.Cluster{ - Name: name, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ - Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ - LbEndpoints: []*v3endpointpb.LbEndpoint{{ - HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ - Endpoint: &v3endpointpb.Endpoint{ - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: dnsHost, - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: dnsPort, - }, - }, - }, - }, - }, - }, - }}, - }}, - }, - } -} - -// setupDNS unregisters the DNS resolver and registers a manual resolver for the -// same scheme. This allows the test to mock the DNS resolution by supplying the -// addresses of the test backends. -// -// Returns the following: -// - a channel onto which the DNS target being resolved is written to by the -// mock DNS resolver -// - a channel to notify close of the DNS resolver -// - a channel to notify re-resolution requests to the DNS resolver -// - a manual resolver which is used to mock the actual DNS resolution -// - a cleanup function which re-registers the original DNS resolver -func setupDNS() (chan resolver.Target, chan struct{}, chan resolver.ResolveNowOptions, *manual.Resolver, func()) { - targetCh := make(chan resolver.Target, 1) - closeCh := make(chan struct{}, 1) - resolveNowCh := make(chan resolver.ResolveNowOptions, 1) - - mr := manual.NewBuilderWithScheme("dns") - mr.BuildCallback = func(target resolver.Target, _ resolver.ClientConn, _ resolver.BuildOptions) { targetCh <- target } - mr.CloseCallback = func() { closeCh <- struct{}{} } - mr.ResolveNowCallback = func(opts resolver.ResolveNowOptions) { resolveNowCh <- opts } - - dnsResolverBuilder := resolver.Get("dns") - resolver.UnregisterForTesting("dns") - resolver.Register(mr) - - return targetCh, closeCh, resolveNowCh, mr, func() { resolver.Register(dnsResolverBuilder) } -} - // setupAndDial performs common setup across all tests // // - creates an xDS client with the passed in bootstrap contents @@ -382,946 +302,3 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { t.Fatalf("RPCs did not fail after removal of Cluster resource") } } - -// TestEDS_ResourceRemoved tests the case where the EDS resource requested by -// the clusterresolver LB policy is removed from the management server. The test -// verifies that the EDS watch is not canceled and that RPCs continue to succeed -// with the previously received configuration. -func (s) TestEDS_ResourceRemoved(t *testing.T) { - // Start an xDS management server that uses a couple of channels to - // notify the test about the following events: - // - an EDS requested with the expected resource name is requested - // - EDS resource is unrequested, i.e, an EDS request with no resource name - // is received, which indicates that we are not longer interested in that - // resource. - edsResourceRequestedCh := make(chan struct{}, 1) - edsResourceCanceledCh := make(chan struct{}, 1) - managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ - OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { - if req.GetTypeUrl() == version.V3EndpointsURL { - switch len(req.GetResourceNames()) { - case 0: - select { - case edsResourceCanceledCh <- struct{}{}: - default: - } - case 1: - if req.GetResourceNames()[0] == edsServiceName { - select { - case edsResourceRequestedCh <- struct{}{}: - default: - } - } - default: - t.Errorf("Unexpected number of resources, %d, in an EDS request", len(req.GetResourceNames())) - } - } - return nil - }, - }) - defer cleanup() - - server := stubserver.StartTestService(t, nil) - defer server.Stop() - - // Configure cluster and endpoints resources in the management server. - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, - SkipValidation: true, - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - client := testgrpc.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - - // Delete the endpoints resource from the mangement server. - resources.Endpoints = nil - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Ensure that RPCs continue to succeed for the next second, and that the - // EDS watch is not canceled. - for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { - if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - select { - case <-edsResourceCanceledCh: - t.Fatal("EDS watch canceled when not expected to be canceled") - default: - } - } -} - -// TestEDS_ClusterResourceDoesNotContainEDSServiceName tests the case where the -// Cluster resource sent by the management server does not contain an EDS -// service name. The test verifies that the cluster_resolver LB policy uses the -// cluster name for the EDS resource. -func (s) TestEDS_ClusterResourceDoesNotContainEDSServiceName(t *testing.T) { - edsResourceCh := make(chan string, 1) - managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ - OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { - if req.GetTypeUrl() != version.V3EndpointsURL { - return nil - } - if len(req.GetResourceNames()) > 0 { - select { - case edsResourceCh <- req.GetResourceNames()[0]: - default: - } - } - return nil - }, - }) - defer cleanup() - - server := stubserver.StartTestService(t, nil) - defer server.Stop() - - // Configure cluster and endpoints resources with the same name in the management server. The cluster resource does not specify an EDS service name. - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, "", e2e.SecurityLevelNone)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(clusterName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, - SkipValidation: true, - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - client := testgrpc.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - - select { - case <-ctx.Done(): - t.Fatal("Timeout when waiting for EDS request to be received on the management server") - case name := <-edsResourceCh: - if name != clusterName { - t.Fatalf("Received EDS request with resource name %q, want %q", name, clusterName) - } - } -} - -// TestEDS_ClusterResourceUpdates verifies different scenarios with regards to -// cluster resource updates. -// -// - The first cluster resource contains an eds_service_name. The test verifies -// that an EDS request is sent for the received eds_service_name. It also -// verifies that a subsequent RPC gets routed to a backend belonging to that -// service name. -// - The next cluster resource update contains no eds_service_name. The test -// verifies that a subsequent EDS request is sent for the cluster_name and -// that the previously received eds_service_name is no longer requested. It -// also verifies that a subsequent RPC gets routed to a backend belonging to -// the service represented by the cluster_name. -// - The next cluster resource update changes the circuit breaking -// configuration, but does not change the service name. The test verifies -// that a subsequent RPC gets routed to the same backend as before. -func (s) TestEDS_ClusterResourceUpdates(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start an xDS management server that pushes the EDS resource names onto a - // channel. - edsResourceNameCh := make(chan []string, 1) - managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ - OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { - if req.GetTypeUrl() != version.V3EndpointsURL { - return nil - } - if len(req.GetResourceNames()) == 0 { - // This is the case for ACKs. Do nothing here. - return nil - } - select { - case <-ctx.Done(): - case edsResourceNameCh <- req.GetResourceNames(): - } - return nil - }, - AllowResourceSubset: true, - }) - defer cleanup() - - // Start two test backends and extract their host and port. The first - // backend is used for the EDS resource identified by the eds_service_name, - // and the second backend is used for the EDS resource identified by the - // cluster_name. - servers, cleanup2 := startTestServiceBackends(t, 2) - defer cleanup2() - addrs, ports := backendAddressesAndPorts(t, servers) - - // Configure cluster and endpoints resources in the management server. - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{ - e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(ports[0])}), - e2e.DefaultEndpoint(clusterName, "localhost", []uint32{uint32(ports[1])}), - }, - SkipValidation: true, - } - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - client := testgrpc.NewTestServiceClient(cc) - peer := &peer.Peer{} - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - if peer.Addr.String() != addrs[0].Addr { - t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) - } - - // Ensure EDS watch is registered for eds_service_name. - select { - case <-ctx.Done(): - t.Fatal("Timeout when waiting for EDS request to be received on the management server") - case names := <-edsResourceNameCh: - if !cmp.Equal(names, []string{edsServiceName}) { - t.Fatalf("Received EDS request with resource names %v, want %v", names, []string{edsServiceName}) - } - } - - // Change the cluster resource to not contain an eds_service_name. - resources.Clusters = []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, "", e2e.SecurityLevelNone)} - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Ensure that an EDS watch for eds_service_name is canceled and new watch - // for cluster_name is registered. The actual order in which this happens is - // not deterministic, i.e the watch for old resource could be canceled - // before the new one is registered or vice-versa. In either case, - // eventually, we want to see a request to the management server for just - // the cluster_name. - for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { - names := <-edsResourceNameCh - if cmp.Equal(names, []string{clusterName}) { - break - } - } - if ctx.Err() != nil { - t.Fatalf("Timeout when waiting for old EDS watch %q to be canceled and new one %q to be registered", edsServiceName, clusterName) - } - - // Make a RPC, and ensure that it gets routed to second backend, - // corresponding to the cluster_name. - for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { - continue - } - if peer.Addr.String() == addrs[1].Addr { - break - } - } - if ctx.Err() != nil { - t.Fatalf("Timeout when waiting for EmptyCall() to be routed to correct backend %q", addrs[1].Addr) - } - - // Change cluster resource circuit breaking count. - resources.Clusters[0].CircuitBreakers = &v3clusterpb.CircuitBreakers{ - Thresholds: []*v3clusterpb.CircuitBreakers_Thresholds{ - { - Priority: v3corepb.RoutingPriority_DEFAULT, - MaxRequests: wrapperspb.UInt32(512), - }, - }, - } - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Ensure that RPCs continue to get routed to the second backend for the - // next second. - for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - if peer.Addr.String() != addrs[1].Addr { - t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[1].Addr) - } - } -} - -// TestAggregateCluster_WithTwoEDSClusters tests the case where the top-level -// cluster resource is an aggregate cluster. It verifies that RPCs fail when the -// management server has not responded to all requested EDS resources, and also -// that RPCs are routed to the highest priority cluster once all requested EDS -// resources have been sent by the management server. -func (s) TestAggregateCluster_WithTwoEDSClusters(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start an xDS management server that pushes the EDS resource names onto a - // channel when requested. - edsResourceNameCh := make(chan []string, 1) - managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ - OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { - if req.GetTypeUrl() != version.V3EndpointsURL { - return nil - } - if len(req.GetResourceNames()) == 0 { - // This is the case for ACKs. Do nothing here. - return nil - } - select { - case edsResourceNameCh <- req.GetResourceNames(): - case <-ctx.Done(): - } - return nil - }, - AllowResourceSubset: true, - }) - defer cleanup() - - // Start two test backends and extract their host and port. The first - // backend belongs to EDS cluster "cluster-1", while the second backend - // belongs to EDS cluster "cluster-2". - servers, cleanup2 := startTestServiceBackends(t, 2) - defer cleanup2() - addrs, ports := backendAddressesAndPorts(t, servers) - - // Configure an aggregate cluster, two EDS clusters and only one endpoints - // resource (corresponding to the first EDS cluster) in the management - // server. - const clusterName1 = clusterName + "-cluster-1" - const clusterName2 = clusterName + "-cluster-2" - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{ - makeAggregateClusterResource(clusterName, []string{clusterName1, clusterName2}), - e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), - e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), - }, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(clusterName1, "localhost", []uint32{uint32(ports[0])})}, - SkipValidation: true, - } - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - // Wait for both EDS resources to be requested. - func() { - for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { - select { - case names := <-edsResourceNameCh: - // Copy and sort the sortedNames to avoid racing with an - // OnStreamRequest call. - sortedNames := make([]string, len(names)) - copy(sortedNames, names) - sort.Strings(sortedNames) - if cmp.Equal(sortedNames, []string{clusterName1, clusterName2}) { - return - } - default: - } - } - }() - if ctx.Err() != nil { - t.Fatalf("Timeout when waiting for all EDS resources %v to be requested", []string{clusterName1, clusterName2}) - } - - // Make an RPC with a short deadline. We expect this RPC to not succeed - // because the management server has not responded with all EDS resources - // requested. - client := testgrpc.NewTestServiceClient(cc) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) - } - - // Update the management server with the second EDS resource. - resources.Endpoints = append(resources.Endpoints, e2e.DefaultEndpoint(clusterName2, "localhost", []uint32{uint32(ports[1])})) - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Make an RPC and ensure that it gets routed to cluster-1, implicitly - // higher priority than cluster-2. - peer := &peer.Peer{} - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - if peer.Addr.String() != addrs[0].Addr { - t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) - } -} - -// TestAggregateCluster_WithTwoEDSClusters_PrioritiesChange tests the case where -// the top-level cluster resource is an aggregate cluster. It verifies that RPCs -// are routed to the highest priority EDS cluster. -func (s) TestAggregateCluster_WithTwoEDSClusters_PrioritiesChange(t *testing.T) { - // Start an xDS management server. - managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) - defer cleanup() - - // Start two test backends and extract their host and port. The first - // backend belongs to EDS cluster "cluster-1", while the second backend - // belongs to EDS cluster "cluster-2". - servers, cleanup2 := startTestServiceBackends(t, 2) - defer cleanup2() - addrs, ports := backendAddressesAndPorts(t, servers) - - // Configure an aggregate cluster, two EDS clusters and the corresponding - // endpoints resources in the management server. - const clusterName1 = clusterName + "cluster-1" - const clusterName2 = clusterName + "cluster-2" - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{ - makeAggregateClusterResource(clusterName, []string{clusterName1, clusterName2}), - e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), - e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), - }, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{ - e2e.DefaultEndpoint(clusterName1, "localhost", []uint32{uint32(ports[0])}), - e2e.DefaultEndpoint(clusterName2, "localhost", []uint32{uint32(ports[1])}), - }, - SkipValidation: true, - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - // Make an RPC and ensure that it gets routed to cluster-1, implicitly - // higher priority than cluster-2. - client := testgrpc.NewTestServiceClient(cc) - peer := &peer.Peer{} - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - if peer.Addr.String() != addrs[0].Addr { - t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) - } - - // Swap the priorities of the EDS clusters in the aggregate cluster. - resources.Clusters = []*v3clusterpb.Cluster{ - makeAggregateClusterResource(clusterName, []string{clusterName2, clusterName1}), - e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), - e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), - } - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Wait for RPCs to get routed to cluster-2, which is now implicitly higher - // priority than cluster-1, after the priority switch above. - for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - if peer.Addr.String() == addrs[1].Addr { - break - } - } - if ctx.Err() != nil { - t.Fatal("Timeout waiting for RPCs to be routed to cluster-2 after priority switch") - } -} - -// TestAggregateCluster_WithOneDNSCluster tests the case where the top-level -// cluster resource is an aggregate cluster that resolves to a single -// LOGICAL_DNS cluster. The test verifies that RPCs can be made to backends that -// make up the LOGICAL_DNS cluster. -func (s) TestAggregateCluster_WithOneDNSCluster(t *testing.T) { - dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() - defer cleanup1() - - // Start an xDS management server. - managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) - defer cleanup2() - - // Start two test backends. - servers, cleanup3 := startTestServiceBackends(t, 2) - defer cleanup3() - addrs, _ := backendAddressesAndPorts(t, servers) - - // Configure an aggregate cluster pointing to a single LOGICAL_DNS cluster. - const ( - dnsClusterName = clusterName + "-dns" - dnsHostName = "dns_host" - dnsPort = uint32(8080) - ) - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{ - makeAggregateClusterResource(clusterName, []string{dnsClusterName}), - makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), - }, - SkipValidation: true, - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - // Ensure that the DNS resolver is started for the expected target. - select { - case <-ctx.Done(): - t.Fatal("Timeout when waiting for DNS resolver to be started") - case target := <-dnsTargetCh: - got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) - if got != want { - t.Fatalf("DNS resolution started for target %q, want %q", got, want) - } - } - - // Update DNS resolver with test backend addresses. - dnsR.UpdateState(resolver.State{Addresses: addrs}) - - // Make an RPC and ensure that it gets routed to the first backend since the - // child policy for a LOGICAL_DNS cluster is pick_first by default. - client := testgrpc.NewTestServiceClient(cc) - peer := &peer.Peer{} - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - if peer.Addr.String() != addrs[0].Addr { - t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) - } -} - -// TestAggregateCluster_WithEDSAndDNS tests the case where the top-level cluster -// resource is an aggregate cluster that resolves to an EDS and a LOGICAL_DNS -// cluster. The test verifies that RPCs fail until both clusters are resolved to -// endpoints, and RPCs are routed to the higher priority EDS cluster. -func (s) TestAggregateCluster_WithEDSAndDNS(t *testing.T) { - dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() - defer cleanup1() - - // Start an xDS management server that pushes the name of the requested EDS - // resource onto a channel. - edsResourceCh := make(chan string, 1) - managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ - OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { - if req.GetTypeUrl() != version.V3EndpointsURL { - return nil - } - if len(req.GetResourceNames()) > 0 { - select { - case edsResourceCh <- req.GetResourceNames()[0]: - default: - } - } - return nil - }, - AllowResourceSubset: true, - }) - defer cleanup2() - - // Start two test backends and extract their host and port. The first - // backend is used for the EDS cluster and the second backend is used for - // the LOGICAL_DNS cluster. - servers, cleanup3 := startTestServiceBackends(t, 2) - defer cleanup3() - addrs, ports := backendAddressesAndPorts(t, servers) - - // Configure an aggregate cluster pointing to an EDS and DNS cluster. Also - // configure an endpoints resource for the EDS cluster. - const ( - edsClusterName = clusterName + "-eds" - dnsClusterName = clusterName + "-dns" - dnsHostName = "dns_host" - dnsPort = uint32(8080) - ) - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{ - makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), - e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), - makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), - }, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsClusterName, "localhost", []uint32{uint32(ports[0])})}, - SkipValidation: true, - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - // Ensure that an EDS request is sent for the expected resource name. - select { - case <-ctx.Done(): - t.Fatal("Timeout when waiting for EDS request to be received on the management server") - case name := <-edsResourceCh: - if name != edsClusterName { - t.Fatalf("Received EDS request with resource name %q, want %q", name, edsClusterName) - } - } - - // Ensure that the DNS resolver is started for the expected target. - select { - case <-ctx.Done(): - t.Fatal("Timeout when waiting for DNS resolver to be started") - case target := <-dnsTargetCh: - got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) - if got != want { - t.Fatalf("DNS resolution started for target %q, want %q", got, want) - } - } - - // Make an RPC with a short deadline. We expect this RPC to not succeed - // because the DNS resolver has not responded with endpoint addresses. - client := testgrpc.NewTestServiceClient(cc) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) - } - - // Update DNS resolver with test backend addresses. - dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) - - // Make an RPC and ensure that it gets routed to the first backend since the - // EDS cluster is of higher priority than the LOGICAL_DNS cluster. - peer := &peer.Peer{} - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - if peer.Addr.String() != addrs[0].Addr { - t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) - } -} - -// TestAggregateCluster_SwitchEDSAndDNS tests the case where the top-level -// cluster resource is an aggregate cluster. It initially resolves to a single -// EDS cluster. The test verifies that RPCs are routed to backends in the EDS -// cluster. Subsequently, the aggregate cluster resolves to a single DNS -// cluster. The test verifies that RPCs are successful, this time to backends in -// the DNS cluster. -func (s) TestAggregateCluster_SwitchEDSAndDNS(t *testing.T) { - dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() - defer cleanup1() - - // Start an xDS management server. - managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) - defer cleanup2() - - // Start two test backends and extract their host and port. The first - // backend is used for the EDS cluster and the second backend is used for - // the LOGICAL_DNS cluster. - servers, cleanup3 := startTestServiceBackends(t, 2) - defer cleanup3() - addrs, ports := backendAddressesAndPorts(t, servers) - - // Configure an aggregate cluster pointing to a single EDS cluster. Also, - // configure the underlying EDS cluster (and the corresponding endpoints - // resource) and DNS cluster (will be used later in the test). - const ( - dnsClusterName = clusterName + "-dns" - dnsHostName = "dns_host" - dnsPort = uint32(8080) - ) - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{ - makeAggregateClusterResource(clusterName, []string{edsServiceName}), - e2e.DefaultCluster(edsServiceName, "", e2e.SecurityLevelNone), - makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), - }, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(ports[0])})}, - SkipValidation: true, - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - // Ensure that the RPC is routed to the appropriate backend. - client := testgrpc.NewTestServiceClient(cc) - peer := &peer.Peer{} - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - if peer.Addr.String() != addrs[0].Addr { - t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) - } - - // Update the aggregate cluster to point to a single DNS cluster. - resources.Clusters = []*v3clusterpb.Cluster{ - makeAggregateClusterResource(clusterName, []string{dnsClusterName}), - e2e.DefaultCluster(edsServiceName, "", e2e.SecurityLevelNone), - makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), - } - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Ensure that the DNS resolver is started for the expected target. - select { - case <-ctx.Done(): - t.Fatal("Timeout when waiting for DNS resolver to be started") - case target := <-dnsTargetCh: - got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) - if got != want { - t.Fatalf("DNS resolution started for target %q, want %q", got, want) - } - } - - // Update DNS resolver with test backend addresses. - dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) - - // Ensure that start getting routed to the backend corresponding to the - // LOGICAL_DNS cluster. - for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { - client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)) - if peer.Addr.String() == addrs[1].Addr { - break - } - } - if ctx.Err() != nil { - t.Fatalf("Timeout when waiting for RPCs to be routed to backend %q in the DNS cluster", addrs[1].Addr) - } -} - -// TestAggregateCluster_BadEDS_GoodToBadDNS tests the case where the top-level -// cluster is an aggregate cluster that resolves to an EDS and LOGICAL_DNS -// cluster. The test first asserts that no RPCs can be made after receiving an -// EDS response with zero endpoints because no update has been received from the -// DNS resolver yet. Once the DNS resolver pushes an update, the test verifies -// that we switch to the DNS cluster and can make a successful RPC. At this -// point when the DNS cluster returns an error, the test verifies that RPCs are -// still successful. This is the expected behavior because pick_first (the leaf -// policy) ignores resolver errors when it is not in TransientFailure. -func (s) TestAggregateCluster_BadEDS_GoodToBadDNS(t *testing.T) { - dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() - defer cleanup1() - - // Start an xDS management server. - managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) - defer cleanup2() - - // Start two test backends. - servers, cleanup3 := startTestServiceBackends(t, 2) - defer cleanup3() - addrs, _ := backendAddressesAndPorts(t, servers) - - // Configure an aggregate cluster pointing to an EDS and LOGICAL_DNS - // cluster. Also configure an empty endpoints resource for the EDS cluster - // that contains no endpoints. - const ( - edsClusterName = clusterName + "-eds" - dnsClusterName = clusterName + "-dns" - dnsHostName = "dns_host" - dnsPort = uint32(8080) - ) - emptyEndpointResource := e2e.DefaultEndpoint(edsServiceName, "localhost", nil) - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{ - makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), - e2e.DefaultCluster(edsClusterName, edsServiceName, e2e.SecurityLevelNone), - makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), - }, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{emptyEndpointResource}, - SkipValidation: true, - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - // Make an RPC with a short deadline. We expect this RPC to not succeed - // because the EDS resource came back with no endpoints, and we are yet to - // push an update through the DNS resolver. - client := testgrpc.NewTestServiceClient(cc) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) - } - - // Ensure that the DNS resolver is started for the expected target. - select { - case <-ctx.Done(): - t.Fatal("Timeout when waiting for DNS resolver to be started") - case target := <-dnsTargetCh: - got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) - if got != want { - t.Fatalf("DNS resolution started for target %q, want %q", got, want) - } - } - - // Update DNS resolver with test backend addresses. - dnsR.UpdateState(resolver.State{Addresses: addrs}) - - // Ensure that RPCs start getting routed to the first backend since the - // child policy for a LOGICAL_DNS cluster is pick_first by default. - for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { - peer := &peer.Peer{} - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { - t.Logf("EmptyCall() failed: %v", err) - continue - } - if peer.Addr.String() == addrs[0].Addr { - break - } - } - if ctx.Err() != nil { - t.Fatalf("Timeout when waiting for RPCs to be routed to backend %q in the DNS cluster", addrs[0].Addr) - } - - // Push an error from the DNS resolver as well. - dnsErr := fmt.Errorf("DNS error") - dnsR.ReportError(dnsErr) - - // Ensure that RPCs continue to succeed for the next second. - for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { - peer := &peer.Peer{} - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { - t.Fatalf("EmptyCall() failed: %v", err) - } - if peer.Addr.String() != addrs[0].Addr { - t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) - } - } -} - -// TestAggregateCluster_BadEDS_BadDNS tests the case where the top-level cluster -// is an aggregate cluster that resolves to an EDS and LOGICAL_DNS cluster. When -// the EDS request returns a resource that contains no endpoints, the test -// verifies that we switch to the DNS cluster. When the DNS cluster returns an -// error, the test verifies that RPCs fail with the error returned by the DNS -// resolver, and thus, ensures that pick_first (the leaf policy) does not ignore -// resolver errors. -func (s) TestAggregateCluster_BadEDS_BadDNS(t *testing.T) { - dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() - defer cleanup1() - - // Start an xDS management server. - managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) - defer cleanup2() - - // Configure an aggregate cluster pointing to an EDS and LOGICAL_DNS - // cluster. Also configure an empty endpoints resource for the EDS cluster - // that contains no endpoints. - const ( - edsClusterName = clusterName + "-eds" - dnsClusterName = clusterName + "-dns" - dnsHostName = "dns_host" - dnsPort = uint32(8080) - ) - emptyEndpointResource := e2e.DefaultEndpoint(edsServiceName, "localhost", nil) - resources := e2e.UpdateOptions{ - NodeID: nodeID, - Clusters: []*v3clusterpb.Cluster{ - makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), - e2e.DefaultCluster(edsClusterName, edsServiceName, e2e.SecurityLevelNone), - makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), - }, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{emptyEndpointResource}, - SkipValidation: true, - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := managementServer.Update(ctx, resources); err != nil { - t.Fatal(err) - } - - // Create xDS client, configure cds_experimental LB policy with a manual - // resolver, and dial the test backends. - cc, cleanup := setupAndDial(t, bootstrapContents) - defer cleanup() - - // Make an RPC with a short deadline. We expect this RPC to not succeed - // because the EDS resource came back with no endpoints, and we are yet to - // push an update through the DNS resolver. - client := testgrpc.NewTestServiceClient(cc) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) - } - - // Ensure that the DNS resolver is started for the expected target. - select { - case <-ctx.Done(): - t.Fatal("Timeout when waiting for DNS resolver to be started") - case target := <-dnsTargetCh: - got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) - if got != want { - t.Fatalf("DNS resolution started for target %q, want %q", got, want) - } - } - - // Push an error from the DNS resolver as well. - dnsErr := fmt.Errorf("DNS error") - dnsR.ReportError(dnsErr) - - // Ensure that the error returned from the DNS resolver is reported to the - // caller of the RPC. - _, err := client.EmptyCall(ctx, &testpb.Empty{}) - if code := status.Code(err); code != codes.Unavailable { - t.Fatalf("EmptyCall() failed with code %s, want %s", code, codes.Unavailable) - } - if err == nil || !strings.Contains(err.Error(), dnsErr.Error()) { - t.Fatalf("EmptyCall() failed with error %v, want %v", err, dnsErr) - } -} diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index 0be84f7b74fb..b45084edae0f 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -24,6 +24,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" @@ -34,16 +35,20 @@ import ( "google.golang.org/grpc/internal/testutils" rrutil "google.golang.org/grpc/internal/testutils/roundrobin" "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/wrapperspb" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" @@ -125,7 +130,7 @@ func endpointResource(clusterName string, localities []localityInfo) *v3endpoint localityEndpoints = append(localityEndpoints, &v3endpointpb.LocalityLbEndpoints{ Locality: &v3corepb.Locality{SubZone: locality.name}, LbEndpoints: endpoints, - LoadBalancingWeight: &wrapperspb.UInt32Value{Value: locality.weight}, + LoadBalancingWeight: wrapperspb.UInt32(locality.weight), }) } return &v3endpointpb.ClusterLoadAssignment{ @@ -534,6 +539,296 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { } } +// TestEDS_ResourceRemoved tests the case where the EDS resource requested by +// the clusterresolver LB policy is removed from the management server. The test +// verifies that the EDS watch is not canceled and that RPCs continue to succeed +// with the previously received configuration. +func (s) TestEDS_ResourceRemoved(t *testing.T) { + // Start an xDS management server that uses a couple of channels to + // notify the test about the following events: + // - an EDS requested with the expected resource name is requested + // - EDS resource is unrequested, i.e, an EDS request with no resource name + // is received, which indicates that we are not longer interested in that + // resource. + edsResourceRequestedCh := make(chan struct{}, 1) + edsResourceCanceledCh := make(chan struct{}, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() == version.V3EndpointsURL { + switch len(req.GetResourceNames()) { + case 0: + select { + case edsResourceCanceledCh <- struct{}{}: + default: + } + case 1: + if req.GetResourceNames()[0] == edsServiceName { + select { + case edsResourceRequestedCh <- struct{}{}: + default: + } + } + default: + t.Errorf("Unexpected number of resources, %d, in an EDS request", len(req.GetResourceNames())) + } + } + return nil + }, + }) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + // Delete the endpoints resource from the mangement server. + resources.Endpoints = nil + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to succeed for the next second, and that the + // EDS watch is not canceled. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + select { + case <-edsResourceCanceledCh: + t.Fatal("EDS watch canceled when not expected to be canceled") + default: + } + } +} + +// TestEDS_ClusterResourceDoesNotContainEDSServiceName tests the case where the +// Cluster resource sent by the management server does not contain an EDS +// service name. The test verifies that the cluster_resolver LB policy uses the +// cluster name for the EDS resource. +func (s) TestEDS_ClusterResourceDoesNotContainEDSServiceName(t *testing.T) { + edsResourceCh := make(chan string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) > 0 { + select { + case edsResourceCh <- req.GetResourceNames()[0]: + default: + } + } + return nil + }, + }) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources with the same name in the management server. The cluster resource does not specify an EDS service name. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, "", e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(clusterName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS request to be received on the management server") + case name := <-edsResourceCh: + if name != clusterName { + t.Fatalf("Received EDS request with resource name %q, want %q", name, clusterName) + } + } +} + +// TestEDS_ClusterResourceUpdates verifies different scenarios with regards to +// cluster resource updates. +// +// - The first cluster resource contains an eds_service_name. The test verifies +// that an EDS request is sent for the received eds_service_name. It also +// verifies that a subsequent RPC gets routed to a backend belonging to that +// service name. +// - The next cluster resource update contains no eds_service_name. The test +// verifies that a subsequent EDS request is sent for the cluster_name and +// that the previously received eds_service_name is no longer requested. It +// also verifies that a subsequent RPC gets routed to a backend belonging to +// the service represented by the cluster_name. +// - The next cluster resource update changes the circuit breaking +// configuration, but does not change the service name. The test verifies +// that a subsequent RPC gets routed to the same backend as before. +func (s) TestEDS_ClusterResourceUpdates(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server that pushes the EDS resource names onto a + // channel. + edsResourceNameCh := make(chan []string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) == 0 { + // This is the case for ACKs. Do nothing here. + return nil + } + select { + case <-ctx.Done(): + case edsResourceNameCh <- req.GetResourceNames(): + } + return nil + }, + AllowResourceSubset: true, + }) + defer cleanup() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS resource identified by the eds_service_name, + // and the second backend is used for the EDS resource identified by the + // cluster_name. + servers, cleanup2 := startTestServiceBackends(t, 2) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(ports[0])}), + e2e.DefaultEndpoint(clusterName, "localhost", []uint32{uint32(ports[1])}), + }, + SkipValidation: true, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Ensure EDS watch is registered for eds_service_name. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS request to be received on the management server") + case names := <-edsResourceNameCh: + if !cmp.Equal(names, []string{edsServiceName}) { + t.Fatalf("Received EDS request with resource names %v, want %v", names, []string{edsServiceName}) + } + } + + // Change the cluster resource to not contain an eds_service_name. + resources.Clusters = []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, "", e2e.SecurityLevelNone)} + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that an EDS watch for eds_service_name is canceled and new watch + // for cluster_name is registered. The actual order in which this happens is + // not deterministic, i.e the watch for old resource could be canceled + // before the new one is registered or vice-versa. In either case, + // eventually, we want to see a request to the management server for just + // the cluster_name. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + names := <-edsResourceNameCh + if cmp.Equal(names, []string{clusterName}) { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for old EDS watch %q to be canceled and new one %q to be registered", edsServiceName, clusterName) + } + + // Make a RPC, and ensure that it gets routed to second backend, + // corresponding to the cluster_name. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + continue + } + if peer.Addr.String() == addrs[1].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for EmptyCall() to be routed to correct backend %q", addrs[1].Addr) + } + + // Change cluster resource circuit breaking count. + resources.Clusters[0].CircuitBreakers = &v3clusterpb.CircuitBreakers{ + Thresholds: []*v3clusterpb.CircuitBreakers_Thresholds{ + { + Priority: v3corepb.RoutingPriority_DEFAULT, + MaxRequests: wrapperspb.UInt32(512), + }, + }, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to get routed to the second backend for the + // next second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[1].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[1].Addr) + } + } +} + // waitForAllPrioritiesRemovedError repeatedly makes RPCs using the // TestServiceClient until they fail with an error which indicates that all // priorities have been removed. A non-nil error is returned if the context From 10f5b50a1110b50324c11c5255c9cdd2d39cc70f Mon Sep 17 00:00:00 2001 From: Xuan Wang Date: Fri, 23 Jun 2023 10:48:41 -0700 Subject: [PATCH 972/998] [PSM interop] Don't fail target if sub-target already failed (#6390) --- test/kokoro/xds_url_map.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/kokoro/xds_url_map.sh b/test/kokoro/xds_url_map.sh index 3084242252b8..a571ea1f00ff 100755 --- a/test/kokoro/xds_url_map.sh +++ b/test/kokoro/xds_url_map.sh @@ -141,7 +141,7 @@ main() { build_docker_images_if_needed # Run tests cd "${TEST_DRIVER_FULL_DIR}" - run_test url_map + run_test url_map || echo "Failed url_map test" } main "$@" From dd931c8036f48a18f0a36ada800c70cc85e328cf Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 23 Jun 2023 13:22:48 -0700 Subject: [PATCH 973/998] xds: clusterresolver e2e test cleanup (#6391) --- internal/testutils/xds/e2e/clientresources.go | 31 ++- test/xds/xds_client_custom_lb_test.go | 8 +- .../clusterimpl/tests/balancer_test.go | 4 +- .../clusterresolver/e2e_test/eds_impl_test.go | 189 ++++++++++-------- 4 files changed, 137 insertions(+), 95 deletions(-) diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go index ff2a5d43398a..9d46483e3d23 100644 --- a/internal/testutils/xds/e2e/clientresources.go +++ b/internal/testutils/xds/e2e/clientresources.go @@ -526,10 +526,23 @@ func ClusterResourceWithOptions(opts ClusterOptions) *v3clusterpb.Cluster { // LocalityOptions contains options to configure a Locality. type LocalityOptions struct { - // Ports is a set of ports on "localhost" belonging to this locality. - Ports []uint32 + // Name is the unique locality name. + Name string // Weight is the weight of the locality, used for load balancing. Weight uint32 + // Backends is a set of backends belonging to this locality. + Backends []BackendOptions +} + +// BackendOptions contains options to configure individual backends in a +// locality. +type BackendOptions struct { + // Port number on which the backend is accepting connections. All backends + // are expected to run on localhost, hence host name is not stored here. + Port uint32 + // Health status of the backend. Default is UNKNOWN which is treated the + // same as HEALTHY. + HealthStatus v3corepb.HealthStatus } // EndpointOptions contains options to configure an Endpoint (or @@ -550,13 +563,17 @@ type EndpointOptions struct { // DefaultEndpoint returns a basic xds Endpoint resource. func DefaultEndpoint(clusterName string, host string, ports []uint32) *v3endpointpb.ClusterLoadAssignment { + var bOpts []BackendOptions + for _, p := range ports { + bOpts = append(bOpts, BackendOptions{Port: p}) + } return EndpointResourceWithOptions(EndpointOptions{ ClusterName: clusterName, Host: host, Localities: []LocalityOptions{ { - Ports: ports, - Weight: 1, + Backends: bOpts, + Weight: 1, }, }, }) @@ -568,16 +585,18 @@ func EndpointResourceWithOptions(opts EndpointOptions) *v3endpointpb.ClusterLoad var endpoints []*v3endpointpb.LocalityLbEndpoints for i, locality := range opts.Localities { var lbEndpoints []*v3endpointpb.LbEndpoint - for _, port := range locality.Ports { + for _, b := range locality.Backends { lbEndpoints = append(lbEndpoints, &v3endpointpb.LbEndpoint{ HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{Endpoint: &v3endpointpb.Endpoint{ Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ SocketAddress: &v3corepb.SocketAddress{ Protocol: v3corepb.SocketAddress_TCP, Address: opts.Host, - PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: port}}, + PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: b.Port}, + }, }}, }}, + HealthStatus: b.HealthStatus, LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, }) } diff --git a/test/xds/xds_client_custom_lb_test.go b/test/xds/xds_client_custom_lb_test.go index 749eb7f9aa64..4624c252b0bb 100644 --- a/test/xds/xds_client_custom_lb_test.go +++ b/test/xds/xds_client_custom_lb_test.go @@ -213,12 +213,12 @@ func (s) TestWrrLocality(t *testing.T) { Host: "localhost", Localities: []e2e.LocalityOptions{ { - Ports: []uint32{port1, port2}, - Weight: 1, + Backends: []e2e.BackendOptions{{Port: port1}, {Port: port2}}, + Weight: 1, }, { - Ports: []uint32{port3, port4, port5}, - Weight: 2, + Backends: []e2e.BackendOptions{{Port: port3}, {Port: port4}, {Port: port5}}, + Weight: 2, }, }, })}, diff --git a/xds/internal/balancer/clusterimpl/tests/balancer_test.go b/xds/internal/balancer/clusterimpl/tests/balancer_test.go index d335ecd7e844..1cb8492949a0 100644 --- a/xds/internal/balancer/clusterimpl/tests/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/tests/balancer_test.go @@ -116,8 +116,8 @@ func (s) TestConfigUpdateWithSameLoadReportingServerConfig(t *testing.T) { Host: "localhost", Localities: []e2e.LocalityOptions{ { - Ports: []uint32{testutils.ParsePort(t, server.Address)}, - Weight: 1, + Backends: []e2e.BackendOptions{{Port: testutils.ParsePort(t, server.Address)}}, + Weight: 1, }, }, DropPercents: map[string]int{"test-drop-everything": 100}, diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index b45084edae0f..f49528499356 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -101,57 +101,16 @@ func startTestServiceBackends(t *testing.T, numBackends int) ([]*stubserver.Stub } } -// endpointResource returns an EDS resource for the given cluster name and -// localities. Backends within a locality are all assumed to be on the same -// machine (localhost). -func endpointResource(clusterName string, localities []localityInfo) *v3endpointpb.ClusterLoadAssignment { - var localityEndpoints []*v3endpointpb.LocalityLbEndpoints - for _, locality := range localities { - var endpoints []*v3endpointpb.LbEndpoint - for i, port := range locality.ports { - endpoint := &v3endpointpb.LbEndpoint{ - HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ - Endpoint: &v3endpointpb.Endpoint{ - Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Protocol: v3corepb.SocketAddress_TCP, - Address: "localhost", - PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: port}}, - }, - }, - }, - }, - } - if i < len(locality.healthStatus) { - endpoint.HealthStatus = locality.healthStatus[i] - } - endpoints = append(endpoints, endpoint) - } - localityEndpoints = append(localityEndpoints, &v3endpointpb.LocalityLbEndpoints{ - Locality: &v3corepb.Locality{SubZone: locality.name}, - LbEndpoints: endpoints, - LoadBalancingWeight: wrapperspb.UInt32(locality.weight), - }) - } - return &v3endpointpb.ClusterLoadAssignment{ - ClusterName: clusterName, - Endpoints: localityEndpoints, - } -} - -type localityInfo struct { - name string - weight uint32 - ports []uint32 - healthStatus []v3corepb.HealthStatus -} - // clientEndpointsResource returns an EDS resource for the specified nodeID, // service name and localities. -func clientEndpointsResource(nodeID, edsServiceName string, localities []localityInfo) e2e.UpdateOptions { +func clientEndpointsResource(nodeID, edsServiceName string, localities []e2e.LocalityOptions) e2e.UpdateOptions { return e2e.UpdateOptions{ - NodeID: nodeID, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{endpointResource(edsServiceName, localities)}, + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ + ClusterName: edsServiceName, + Host: "localhost", + Localities: localities, + })}, SkipValidation: true, } } @@ -175,7 +134,11 @@ func (s) TestEDS_OneLocality(t *testing.T) { // Create xDS resources for consumption by the test. We start off with a // single backend in a single EDS locality. - resources := clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[:1]}}) + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}}, + }}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if err := managementServer.Update(ctx, resources); err != nil { @@ -223,7 +186,11 @@ func (s) TestEDS_OneLocality(t *testing.T) { // Add a backend to the same locality, and ensure RPCs are sent in a // roundrobin fashion across the two backends. - resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[:2]}}) + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}, {Port: ports[1]}}, + }}) if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -233,7 +200,11 @@ func (s) TestEDS_OneLocality(t *testing.T) { // Remove the first backend, and ensure all RPCs are sent to the second // backend. - resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[1:2]}}) + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }}) if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -242,7 +213,11 @@ func (s) TestEDS_OneLocality(t *testing.T) { } // Replace the backend, and ensure all RPCs are sent to the new backend. - resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[2:3]}}) + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[2]}}, + }}) if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -281,9 +256,17 @@ func (s) TestEDS_MultipleLocalities(t *testing.T) { // Create xDS resources for consumption by the test. We start off with two // localities, and single backend in each of them. - resources := clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ - {name: localityName1, weight: 1, ports: ports[:1]}, - {name: localityName2, weight: 1, ports: ports[1:2]}, + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}}, + }, + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }, }) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -332,10 +315,22 @@ func (s) TestEDS_MultipleLocalities(t *testing.T) { // Add another locality with a single backend, and ensure RPCs are being // weighted roundrobined across the three backends. - resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ - {name: localityName1, weight: 1, ports: ports[:1]}, - {name: localityName2, weight: 1, ports: ports[1:2]}, - {name: localityName3, weight: 1, ports: ports[2:3]}, + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}}, + }, + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }, + { + Name: localityName3, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[2]}}, + }, }) if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) @@ -346,9 +341,17 @@ func (s) TestEDS_MultipleLocalities(t *testing.T) { // Remove the first locality, and ensure RPCs are being weighted // roundrobined across the remaining two backends. - resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ - {name: localityName2, weight: 1, ports: ports[1:2]}, - {name: localityName3, weight: 1, ports: ports[2:3]}, + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }, + { + Name: localityName3, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[2]}}, + }, }) if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) @@ -360,9 +363,17 @@ func (s) TestEDS_MultipleLocalities(t *testing.T) { // Add a backend to one locality, and ensure weighted roundrobin. Since RPCs // are roundrobined across localities, locality2's backend will receive // twice the traffic. - resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ - {name: localityName2, weight: 1, ports: ports[1:2]}, - {name: localityName3, weight: 1, ports: ports[2:4]}, + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }, + { + Name: localityName3, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[2]}, {Port: ports[3]}}, + }, }) if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) @@ -389,23 +400,31 @@ func (s) TestEDS_EndpointsHealth(t *testing.T) { // Create xDS resources for consumption by the test. Two localities with // six backends each, with two of the six backends being healthy. Both // UNKNOWN and HEALTHY are considered by gRPC for load balancing. - resources := clientEndpointsResource(nodeID, edsServiceName, []localityInfo{ - {name: localityName1, weight: 1, ports: ports[:6], healthStatus: []v3corepb.HealthStatus{ - v3corepb.HealthStatus_UNKNOWN, - v3corepb.HealthStatus_HEALTHY, - v3corepb.HealthStatus_UNHEALTHY, - v3corepb.HealthStatus_DRAINING, - v3corepb.HealthStatus_TIMEOUT, - v3corepb.HealthStatus_DEGRADED, - }}, - {name: localityName2, weight: 1, ports: ports[6:12], healthStatus: []v3corepb.HealthStatus{ - v3corepb.HealthStatus_UNKNOWN, - v3corepb.HealthStatus_HEALTHY, - v3corepb.HealthStatus_UNHEALTHY, - v3corepb.HealthStatus_DRAINING, - v3corepb.HealthStatus_TIMEOUT, - v3corepb.HealthStatus_DEGRADED, - }}, + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{ + {Port: ports[0], HealthStatus: v3corepb.HealthStatus_UNKNOWN}, + {Port: ports[1], HealthStatus: v3corepb.HealthStatus_HEALTHY}, + {Port: ports[2], HealthStatus: v3corepb.HealthStatus_UNHEALTHY}, + {Port: ports[3], HealthStatus: v3corepb.HealthStatus_DRAINING}, + {Port: ports[4], HealthStatus: v3corepb.HealthStatus_TIMEOUT}, + {Port: ports[5], HealthStatus: v3corepb.HealthStatus_DEGRADED}, + }, + }, + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{ + {Port: ports[6], HealthStatus: v3corepb.HealthStatus_UNKNOWN}, + {Port: ports[7], HealthStatus: v3corepb.HealthStatus_HEALTHY}, + {Port: ports[8], HealthStatus: v3corepb.HealthStatus_UNHEALTHY}, + {Port: ports[9], HealthStatus: v3corepb.HealthStatus_DRAINING}, + {Port: ports[10], HealthStatus: v3corepb.HealthStatus_TIMEOUT}, + {Port: ports[11], HealthStatus: v3corepb.HealthStatus_DEGRADED}, + }, + }, }) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -520,7 +539,11 @@ func (s) TestEDS_EmptyUpdate(t *testing.T) { } // Add a locality with one backend and ensure RPCs are successful. - resources = clientEndpointsResource(nodeID, edsServiceName, []localityInfo{{name: localityName1, weight: 1, ports: ports[:1]}}) + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}}, + }}) if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } From 0b3a81eabc28fe20b41d84ea5ba5cd08c5d051f4 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 23 Jun 2023 13:39:57 -0700 Subject: [PATCH 974/998] clusterresolver: remove priority LB related tests (#6395) --- .../balancer/clusterresolver/priority_test.go | 834 ------------------ .../balancer/clusterresolver/testutil_test.go | 113 --- .../balancer/priority/balancer_test.go | 10 + 3 files changed, 10 insertions(+), 947 deletions(-) delete mode 100644 xds/internal/balancer/clusterresolver/priority_test.go delete mode 100644 xds/internal/balancer/clusterresolver/testutil_test.go diff --git a/xds/internal/balancer/clusterresolver/priority_test.go b/xds/internal/balancer/clusterresolver/priority_test.go deleted file mode 100644 index 526204ca22c6..000000000000 --- a/xds/internal/balancer/clusterresolver/priority_test.go +++ /dev/null @@ -1,834 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package clusterresolver - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/weightedtarget" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/balancergroup" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/clusterimpl" - "google.golang.org/grpc/xds/internal/balancer/priority" - "google.golang.org/grpc/xds/internal/balancer/wrrlocality" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" -) - -var ( - testClusterNames = []string{"test-cluster-1", "test-cluster-2"} - testSubZones = []string{"I", "II", "III", "IV"} - testEndpointAddrs []string - wrrLocalityLBConfig = &internalserviceconfig.BalancerConfig{ - Name: wrrlocality.Name, - Config: &wrrlocality.LBConfig{ - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: "round_robin", - }, - }, - } -) - -const testBackendAddrsCount = 12 - -func init() { - for i := 0; i < testBackendAddrsCount; i++ { - testEndpointAddrs = append(testEndpointAddrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) - } - balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond - clusterimpl.NewRandomWRR = testutils.NewTestWRR - weightedtarget.NewRandomWRR = testutils.NewTestWRR - balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond * 100 -} - -func setupTestEDS(t *testing.T, initChild *internalserviceconfig.BalancerConfig) (balancer.Balancer, *testutils.TestClientConn, *fakeclient.Client, func()) { - xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) - cc := testutils.NewTestClientConn(t) - builder := balancer.Get(Name) - edsb := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{URL: *testutils.MustParseURL("dns:///" + testEDSService)}}) - if edsb == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", Name) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := edsb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{{ - Cluster: testClusterName, - Type: DiscoveryMechanismTypeEDS, - }}, - xdsLBPolicy: *wrrLocalityLBConfig, - }, - }); err != nil { - edsb.Close() - t.Fatal(err) - } - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - edsb.Close() - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - return edsb, cc, xdsC, func() { - edsb.Close() - } -} - -// When a high priority is ready, adding/removing lower locality doesn't cause -// changes. -// -// Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0. -func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - - // Two localities, with priorities [0, 1], each with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - - // p0 is ready. - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p0 subconns. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { - t.Fatal(err) - } - - // Add p2, it shouldn't cause any updates. - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) - - select { - case <-cc.NewSubConnCh: - t.Fatalf("got unexpected new SubConn") - case <-cc.RemoveSubConnCh: - t.Fatalf("got unexpected remove SubConn") - case <-time.After(defaultTestShortTimeout): - } - - select { - case p := <-cc.NewPickerCh: - // If we do get a new picker, ensure it is still a p1 picker. - if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, testutils.SubConnFromPicker(p)); err != nil { - t.Fatal(err) - } - default: - // No new picker; we were previously using p1 and should still be using - // p1, so this is okay. No need to wait for defaultTestShortTimeout - // since we just waited immediately above. - } - - // Remove p2, no updates. - clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) - - select { - case <-cc.NewSubConnCh: - t.Fatalf("got unexpected new SubConn") - case <-cc.RemoveSubConnCh: - t.Fatalf("got unexpected remove SubConn") - case <-time.After(defaultTestShortTimeout): - } - - select { - case p := <-cc.NewPickerCh: - // If we do get a new picker, ensure it is still a p1 picker. - if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, testutils.SubConnFromPicker(p)); err != nil { - t.Fatal(err) - } - default: - // No new picker; we were previously using p1 and should still be using - // p1, so this is okay. No need to wait for defaultTestShortTimeout - // since we just waited immediately above. - } - -} - -// Lower priority is used when higher priority is not ready. -// -// Init 0 and 1; 0 is up, use 0; 0 is down, 1 is up, use 1; add 2, use 1; 1 is -// down, use 2; remove 2, use 1. -func (s) TestEDSPriority_SwitchPriority(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - - // Two localities, with priorities [0, 1], each with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - - // p0 is ready. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p0 subconns. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { - t.Fatal(err) - } - - // Turn down 0, 1 is used. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with 1. - if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { - t.Fatal(err) - } - - // Add p2, it shouldn't cause any updates. - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) - - select { - case <-cc.NewSubConnCh: - t.Fatalf("got unexpected new SubConn") - case <-cc.RemoveSubConnCh: - t.Fatalf("got unexpected remove SubConn") - case <-time.After(defaultTestShortTimeout): - } - - select { - case p := <-cc.NewPickerCh: - // If we do get a new picker, ensure it is still a p1 picker. - if err := testutils.IsRoundRobin([]balancer.SubConn{sc1}, testutils.SubConnFromPicker(p)); err != nil { - t.Fatal(err) - } - default: - // No new picker; we were previously using p1 and should still be using - // p1, so this is okay. No need to wait for defaultTestShortTimeout - // since we just waited immediately above. - } - - // Turn down 1, use 2 - scConnErr := errors.New("subConn connection error") - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ - ConnectivityState: connectivity.TransientFailure, - ConnectionError: scConnErr, - }) - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with 2. - if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { - t.Fatal(err) - } - - // Remove 2, use 1. - clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) - - // p2 SubConns are removed. - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove) - } - - // Should get an update with 1's old picker, to override 2's old picker. - want := errors.New("last connection error: subConn connection error") - if err := cc.WaitForPickerWithErr(ctx, want); err != nil { - t.Fatal(err) - } - -} - -// Add a lower priority while the higher priority is down. -// -// Init 0 and 1; 0 and 1 both down; add 2, use 2. -func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - // Two localities, with different priorities, each with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - - // Turn down 0, 1 is used. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - // Turn down 1, pick should error. - scConnErr := errors.New("subConn connection error") - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ - ConnectivityState: connectivity.TransientFailure, - ConnectionError: scConnErr, - }) - - // Test pick failure. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - want := errors.New("last connection error: subConn connection error") - if err := cc.WaitForPickerWithErr(ctx, want); err != nil { - t.Fatal(err) - } - - // Add p2, it should create a new SubConn. - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with 2. - if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { - t.Fatal(err) - } - -} - -// When a higher priority becomes available, all lower priorities are closed. -// -// Init 0,1,2; 0 and 1 down, use 2; 0 up, close 1 and 2. -func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - // Two localities, with priorities [0,1,2], each with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab1.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - - // Turn down 0, 1 is used. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - // Turn down 1, 2 is used. - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with 2. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { - t.Fatal(err) - } - - // When 0 becomes ready, 0 should be used, 1 and 2 should all be closed. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - var ( - scToRemove []balancer.SubConn - scToRemoveMap = make(map[balancer.SubConn]struct{}) - ) - // Each subconn is removed. - // The sub-balancer to be closed is priority's child, clusterimpl, who has - // weightedtarget as children. - // - When clusterimpl is closed, it closes weightedtarget, and this - // weightedtarget's balancer removes all the subconns. - for i := 0; i < 2; i++ { - // We expect 2 subconns, so we recv from channel 2 times. - scToRemoveMap[<-cc.RemoveSubConnCh] = struct{}{} - } - for sc := range scToRemoveMap { - scToRemove = append(scToRemove, sc) - } - - // sc1 and sc2 should be removed. - // - // With localities caching, the lower priorities are closed after a timeout, - // in goroutines. The order is no longer guaranteed. - if !(cmp.Equal(scToRemove[0], sc1, cmp.AllowUnexported(testutils.TestSubConn{})) && - cmp.Equal(scToRemove[1], sc2, cmp.AllowUnexported(testutils.TestSubConn{}))) && - !(cmp.Equal(scToRemove[0], sc2, cmp.AllowUnexported(testutils.TestSubConn{})) && - cmp.Equal(scToRemove[1], sc1, cmp.AllowUnexported(testutils.TestSubConn{}))) { - t.Errorf("RemoveSubConn, want [%v, %v], got %v", sc1, sc2, scToRemove) - } - - // Test pick with 0. - if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { - t.Fatal(err) - } -} - -// At init, start the next lower priority after timeout if the higher priority -// doesn't get ready. -// -// Init 0,1; 0 is not ready (in connecting), after timeout, use 1. -func (s) TestEDSPriority_InitTimeout(t *testing.T) { - const testPriorityInitTimeout = time.Second - defer func() func() { - old := priority.DefaultPriorityInitTimeout - priority.DefaultPriorityInitTimeout = testPriorityInitTimeout - return func() { - priority.DefaultPriorityInitTimeout = old - } - }()() - - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - // Two localities, with different priorities, each with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - - // Keep 0 in connecting, 1 will be used after init timeout. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - - // Make sure new SubConn is created before timeout. - select { - case <-time.After(testPriorityInitTimeout * 3 / 4): - case <-cc.NewSubConnAddrsCh: - t.Fatalf("Got a new SubConn too early (Within timeout). Expect a new SubConn only after timeout") - } - - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with 1. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { - t.Fatal(err) - } -} - -// Add localities to existing priorities. -// -// - start with 2 locality with p0 and p1 -// - add localities to existing p0 and p1 -func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - // Two localities, with different priorities, each with one backend. - clab0 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab0.Build()), nil) - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p0 subconns. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { - t.Fatal(err) - } - - // Turn down p0 subconns, p1 subconns will be created. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p1 subconns. - if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { - t.Fatal(err) - } - - // Reconnect p0 subconns, p1 subconn will be closed. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - - // Test roundrobin with only p0 subconns. - if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { - t.Fatal(err) - } - - // Add two localities, with two priorities, with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab1.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - clab1.AddLocality(testSubZones[3], 1, 1, testEndpointAddrs[3:4], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only two p0 subconns. - if err := cc.WaitForRoundRobinPicker(ctx, sc0, sc2); err != nil { - t.Fatal(err) - } - - // Turn down p0 subconns, p1 subconns will be created. - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - - sc3 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - sc4 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p1 subconns. - if err := cc.WaitForRoundRobinPicker(ctx, sc3, sc4); err != nil { - t.Fatal(err) - } -} - -// EDS removes all localities, and re-adds them. -func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { - const testPriorityInitTimeout = time.Second - defer func() func() { - old := priority.DefaultPriorityInitTimeout - priority.DefaultPriorityInitTimeout = testPriorityInitTimeout - return func() { - priority.DefaultPriorityInitTimeout = old - } - }()() - - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - // Two localities, with different priorities, each with one backend. - clab0 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab0.Build()), nil) - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p0 subconns. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { - t.Fatal(err) - } - - // Remove all priorities. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - // p0 subconn should be removed. - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc0, scToRemove) - } - - // time.Sleep(time.Second) - - // Test pick return TransientFailure. - if err := cc.WaitForPickerWithErr(ctx, priority.ErrAllPrioritiesRemoved); err != nil { - t.Fatal(err) - } - - // Re-add two localities, with previous priorities, but different backends. - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[3:4], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) - addrs01 := <-cc.NewSubConnAddrsCh - if got, want := addrs01[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc01 := <-cc.NewSubConnCh - - // Don't send any update to p0, so to not override the old state of p0. - // Later, connect to p1 and then remove p1. This will fallback to p0, and - // will send p0's old picker if they are not correctly removed. - - // p1 will be used after priority init timeout. - addrs11 := <-cc.NewSubConnAddrsCh - if got, want := addrs11[0].Addr, testEndpointAddrs[3]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc11 := <-cc.NewSubConnCh - edsb.UpdateSubConnState(sc11, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc11, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p1 subconns. - if err := cc.WaitForRoundRobinPicker(ctx, sc11); err != nil { - t.Fatal(err) - } - - // Remove p1 from EDS, to fallback to p0. - clab3 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab3.Build()), nil) - - // p1 subconn should be removed. - scToRemove1 := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove1, sc11, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc11, scToRemove1) - } - - // Test pick return TransientFailure. - if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { - t.Fatal(err) - } - - // Send an ready update for the p0 sc that was received when re-adding - // localities to EDS. - edsb.UpdateSubConnState(sc01, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc01, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p0 subconns. - if err := cc.WaitForRoundRobinPicker(ctx, sc01); err != nil { - t.Fatal(err) - } - - select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") - case <-cc.NewSubConnCh: - t.Fatalf("got unexpected new SubConn") - case <-cc.RemoveSubConnCh: - t.Fatalf("got unexpected remove SubConn") - case <-time.After(defaultTestShortTimeout): - } -} - -// Test the case where the high priority contains no backends. The low priority -// will be used. -func (s) TestEDSPriority_HighPriorityNoEndpoints(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - // Two localities, with priorities [0, 1], each with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - - // p0 is ready. - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p0 subconns. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { - t.Fatal(err) - } - - // Remove addresses from priority 0, should use p1. - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, nil, nil) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) - // p0 will remove the subconn, and ClientConn will send a sc update to - // shutdown. - scToRemove := <-cc.RemoveSubConnCh - edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - - // p1 is ready. - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p1 subconns. - if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { - t.Fatal(err) - } -} - -// Test the case where the high priority contains no healthy backends. The low -// priority will be used. -func (s) TestEDSPriority_HighPriorityAllUnhealthy(t *testing.T) { - edsb, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - // Two localities, with priorities [0, 1], each with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - - // p0 is ready. - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p0 subconns. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { - t.Fatal(err) - } - - // Set priority 0 endpoints to all unhealthy, should use p1. - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], &xdstestutils.AddLocalityOptions{ - Health: []corepb.HealthStatus{corepb.HealthStatus_UNHEALTHY}, - }) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) - // p0 will remove the subconn, and ClientConn will send a sc update to - // transient failure. - scToRemove := <-cc.RemoveSubConnCh - edsb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - - // p1 is ready. - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - edsb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin with only p1 subconns. - if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { - t.Fatal(err) - } -} - -// Test the case where the first and only priority is removed. -func (s) TestEDSPriority_FirstPriorityRemoved(t *testing.T) { - const testPriorityInitTimeout = time.Second - defer func() func() { - old := priority.DefaultPriorityInitTimeout - priority.DefaultPriorityInitTimeout = testPriorityInitTimeout - return func() { - priority.DefaultPriorityInitTimeout = old - } - }()() - - _, cc, xdsC, cleanup := setupTestEDS(t, nil) - defer cleanup() - // One localities, with priorities [0], each with one backend. - clab1 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab1.Build()), nil) - // Remove the only localities. - clab2 := xdstestutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - xdsC.InvokeWatchEDSCallback("", parseEDSRespProtoForTesting(clab2.Build()), nil) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.WaitForErrPicker(ctx); err != nil { - t.Fatal(err) - } -} diff --git a/xds/internal/balancer/clusterresolver/testutil_test.go b/xds/internal/balancer/clusterresolver/testutil_test.go deleted file mode 100644 index 2792f802b258..000000000000 --- a/xds/internal/balancer/clusterresolver/testutil_test.go +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package clusterresolver - -import ( - "fmt" - "net" - "strconv" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" - typepb "github.com/envoyproxy/go-control-plane/envoy/type" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// parseEDSRespProtoForTesting parses EDS response, and panic if parsing fails. -// -// TODO: delete this. The EDS balancer tests should build an EndpointsUpdate -// directly, instead of building and parsing a proto message. -func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsresource.EndpointsUpdate { - u, err := parseEDSRespProto(m) - if err != nil { - panic(err.Error()) - } - return u -} - -// parseEDSRespProto turns EDS response proto message to EndpointsUpdate. -func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsresource.EndpointsUpdate, error) { - ret := xdsresource.EndpointsUpdate{} - for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { - ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) - } - priorities := make(map[uint32]struct{}) - for _, locality := range m.Endpoints { - l := locality.GetLocality() - if l == nil { - return xdsresource.EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) - } - lid := internal.LocalityID{ - Region: l.Region, - Zone: l.Zone, - SubZone: l.SubZone, - } - priority := locality.GetPriority() - priorities[priority] = struct{}{} - ret.Localities = append(ret.Localities, xdsresource.Locality{ - ID: lid, - Endpoints: parseEndpoints(locality.GetLbEndpoints()), - Weight: locality.GetLoadBalancingWeight().GetValue(), - Priority: priority, - }) - } - for i := 0; i < len(priorities); i++ { - if _, ok := priorities[uint32(i)]; !ok { - return xdsresource.EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) - } - } - return ret, nil -} - -func parseAddress(socketAddress *corepb.SocketAddress) string { - return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) -} - -func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) xdsresource.OverloadDropConfig { - percentage := dropPolicy.GetDropPercentage() - var ( - numerator = percentage.GetNumerator() - denominator uint32 - ) - switch percentage.GetDenominator() { - case typepb.FractionalPercent_HUNDRED: - denominator = 100 - case typepb.FractionalPercent_TEN_THOUSAND: - denominator = 10000 - case typepb.FractionalPercent_MILLION: - denominator = 1000000 - } - return xdsresource.OverloadDropConfig{ - Category: dropPolicy.GetCategory(), - Numerator: numerator, - Denominator: denominator, - } -} - -func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsresource.Endpoint { - endpoints := make([]xdsresource.Endpoint, 0, len(lbEndpoints)) - for _, lbEndpoint := range lbEndpoints { - endpoints = append(endpoints, xdsresource.Endpoint{ - HealthStatus: xdsresource.EndpointHealthStatus(lbEndpoint.GetHealthStatus()), - Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), - Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), - }) - } - return endpoints -} diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 5e29edf6698a..22ecca84bf25 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -149,6 +149,11 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { case <-time.After(time.Millisecond * 100): } + // Test roundrobin with only p0 subconns. + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } + // Remove p2, no updates. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ @@ -175,6 +180,11 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { t.Fatalf("got unexpected remove SubConn") case <-time.After(time.Millisecond * 100): } + + // Test roundrobin with only p0 subconns. + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } } // Lower priority is used when higher priority is not ready. From 0673105ebcb956e8bf50b96e28209ab7845a65ad Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 23 Jun 2023 13:51:28 -0700 Subject: [PATCH 975/998] clusterresolver: switch a couple of tests to e2e style (#6394) --- .../clusterresolver/clusterresolver_test.go | 300 +----------------- .../clusterresolver/e2e_test/balancer_test.go | 223 +++++++++++++ 2 files changed, 224 insertions(+), 299 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/clusterresolver_test.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go index 2f4fc703f177..bdf6e60b35c6 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver_test.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -19,26 +19,10 @@ package clusterresolver import ( - "context" - "fmt" "testing" "time" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/grpctest" - iserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/resolver" - xdsinternal "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/balancer/clusterimpl" - "google.golang.org/grpc/xds/internal/balancer/outlierdetection" - "google.golang.org/grpc/xds/internal/balancer/priority" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" - "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -47,295 +31,13 @@ const ( testEDSService = "test-eds-service-name" testClusterName = "test-cluster-name" testClusterName2 = "google_cfe_some-name" + testBalancerNameFooBar = "foo.bar" ) -var ( - // A non-empty endpoints update which is expected to be accepted by the EDS - // LB policy. - defaultEndpointsUpdate = xdsresource.EndpointsUpdate{ - Localities: []xdsresource.Locality{ - { - Endpoints: []xdsresource.Endpoint{{Address: "endpoint1"}}, - ID: xdsinternal.LocalityID{Zone: "zone"}, - Priority: 1, - Weight: 100, - }, - }, - } -) - -func init() { - balancer.Register(bb{}) -} - type s struct { grpctest.Tester - - cleanup func() -} - -func (ss s) Teardown(t *testing.T) { - xdsclient.ClearAllCountersForTesting() - ss.Tester.Teardown(t) - if ss.cleanup != nil { - ss.cleanup() - } } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } - -const testBalancerNameFooBar = "foo.bar" - -func newNoopTestClientConn() *noopTestClientConn { - return &noopTestClientConn{} -} - -// noopTestClientConn is used in EDS balancer config update tests that only -// cover the config update handling, but not SubConn/load-balancing. -type noopTestClientConn struct { - balancer.ClientConn -} - -func (t *noopTestClientConn) NewSubConn([]resolver.Address, balancer.NewSubConnOptions) (balancer.SubConn, error) { - return nil, nil -} - -func (noopTestClientConn) Target() string { return testEDSService } - -type scStateChange struct { - sc balancer.SubConn - state balancer.SubConnState -} - -type fakeChildBalancer struct { - cc balancer.ClientConn - subConnState *testutils.Channel - clientConnState *testutils.Channel - resolverError *testutils.Channel -} - -func (f *fakeChildBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - f.clientConnState.Send(state) - return nil -} - -func (f *fakeChildBalancer) ResolverError(err error) { - f.resolverError.Send(err) -} - -func (f *fakeChildBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - f.subConnState.Send(&scStateChange{sc: sc, state: state}) -} - -func (f *fakeChildBalancer) Close() {} - -func (f *fakeChildBalancer) ExitIdle() {} - -func (f *fakeChildBalancer) waitForClientConnStateChangeVerifyBalancerConfig(ctx context.Context, wantCCS balancer.ClientConnState) error { - ccs, err := f.clientConnState.Receive(ctx) - if err != nil { - return err - } - gotCCS := ccs.(balancer.ClientConnState) - if diff := cmp.Diff(gotCCS, wantCCS, cmpopts.IgnoreFields(balancer.ClientConnState{}, "ResolverState")); diff != "" { - return fmt.Errorf("received unexpected ClientConnState, diff (-got +want): %v", diff) - } - return nil -} - -func (f *fakeChildBalancer) waitForSubConnStateChange(ctx context.Context, wantState *scStateChange) error { - val, err := f.subConnState.Receive(ctx) - if err != nil { - return err - } - gotState := val.(*scStateChange) - if !cmp.Equal(gotState, wantState, cmp.AllowUnexported(scStateChange{})) { - return fmt.Errorf("got subconnStateChange %v, want %v", gotState, wantState) - } - return nil -} - -func newFakeChildBalancer(cc balancer.ClientConn) balancer.Balancer { - return &fakeChildBalancer{ - cc: cc, - subConnState: testutils.NewChannelWithSize(10), - clientConnState: testutils.NewChannelWithSize(10), - resolverError: testutils.NewChannelWithSize(10), - } -} - -type fakeSubConn struct{} - -func (*fakeSubConn) UpdateAddresses([]resolver.Address) { panic("implement me") } -func (*fakeSubConn) Connect() { panic("implement me") } -func (*fakeSubConn) GetOrBuildProducer(balancer.ProducerBuilder) (balancer.Producer, func()) { - panic("implement me") -} - -// waitForNewChildLB makes sure that a new child LB is created by the top-level -// clusterResolverBalancer. -func waitForNewChildLB(ctx context.Context, ch *testutils.Channel) (*fakeChildBalancer, error) { - val, err := ch.Receive(ctx) - if err != nil { - return nil, fmt.Errorf("error when waiting for a new edsLB: %v", err) - } - return val.(*fakeChildBalancer), nil -} - -// setup overrides the functions which are used to create the xdsClient and the -// edsLB, creates fake version of them and makes them available on the provided -// channels. The returned cancel function should be called by the test for -// cleanup. -func setup(childLBCh *testutils.Channel) (*fakeclient.Client, func()) { - xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) - - origNewChildBalancer := newChildBalancer - newChildBalancer = func(_ balancer.Builder, cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { - childLB := newFakeChildBalancer(cc) - defer func() { childLBCh.Send(childLB) }() - return childLB - } - return xdsC, func() { newChildBalancer = origNewChildBalancer } -} - -// TestSubConnStateChange verifies if the top-level clusterResolverBalancer passes on -// the subConnState to appropriate child balancer. -func (s) TestSubConnStateChange(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", Name) - } - defer edsB.Close() - - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDS(testEDSService), - }); err != nil { - t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - xdsC.InvokeWatchEDSCallback("", defaultEndpointsUpdate, nil) - edsLB, err := waitForNewChildLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - - fsc := &fakeSubConn{} - state := balancer.SubConnState{ConnectivityState: connectivity.Ready} - edsB.UpdateSubConnState(fsc, state) - if err := edsLB.waitForSubConnStateChange(ctx, &scStateChange{sc: fsc, state: state}); err != nil { - t.Fatal(err) - } -} - -func newLBConfigWithOneEDS(edsServiceName string) *LBConfig { - return &LBConfig{ - DiscoveryMechanisms: []DiscoveryMechanism{{ - Cluster: testClusterName, - Type: DiscoveryMechanismTypeEDS, - EDSServiceName: edsServiceName, - }}, - xdsLBPolicy: iserviceconfig.BalancerConfig{ - Name: "ROUND_ROBIN", - Config: nil, - }, - } -} - -func newLBConfigWithOneEDSAndOutlierDetection(edsServiceName string, odCfg outlierdetection.LBConfig) *LBConfig { - lbCfg := newLBConfigWithOneEDS(edsServiceName) - lbCfg.DiscoveryMechanisms[0].outlierDetection = odCfg - return lbCfg -} - -// TestOutlierDetection tests the Balancer Config sent down to the child -// priority balancer when Outlier Detection is turned on. The Priority -// Configuration sent downward should have a top level Outlier Detection Policy -// for each priority. -func (s) TestOutlierDetection(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - builder := balancer.Get(Name) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", Name) - } - defer edsB.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Update Cluster Resolver with Client Conn State with Outlier Detection - // configuration present. This is what will be passed down to this balancer, - // as CDS Balancer gets the Cluster Update and converts the Outlier - // Detection data to an Outlier Detection configuration and sends it to this - // level. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, xdsC), - BalancerConfig: newLBConfigWithOneEDSAndOutlierDetection(testEDSService, noopODCfg), - }); err != nil { - t.Fatal(err) - } - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - - // Invoke EDS Callback - causes child balancer to be built and then - // UpdateClientConnState called on it with Outlier Detection as a direct - // child. - xdsC.InvokeWatchEDSCallback("", defaultEndpointsUpdate, nil) - edsLB, err := waitForNewChildLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - - // The priority configuration generated should have Outlier Detection as a - // direct child due to Outlier Detection being turned on. - pCfgWant := &priority.LBConfig{ - Children: map[string]*priority.Child{ - "priority-0-0": { - Config: &iserviceconfig.BalancerConfig{ - Name: outlierdetection.Name, - Config: &outlierdetection.LBConfig{ - Interval: iserviceconfig.Duration(10 * time.Second), // default interval - BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), - MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), - MaxEjectionPercent: 10, - ChildPolicy: &iserviceconfig.BalancerConfig{ - Name: clusterimpl.Name, - Config: &clusterimpl.LBConfig{ - Cluster: testClusterName, - EDSServiceName: "test-eds-service-name", - ChildPolicy: &iserviceconfig.BalancerConfig{ - Name: "ROUND_ROBIN", - Config: nil, - }, - }, - }, - }, - }, - IgnoreReresolutionRequests: true, - }, - }, - Priorities: []string{"priority-0-0"}, - } - - if err := edsLB.waitForClientConnStateChangeVerifyBalancerConfig(ctx, balancer.ClientConnState{ - BalancerConfig: pCfgWant, - }); err != nil { - t.Fatalf("EDS impl got unexpected update: %v", err) - } -} diff --git a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go index 044bbae3d0fe..69b7c51cf8fa 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go @@ -23,11 +23,16 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/buffer" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" @@ -35,8 +40,14 @@ import ( "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/wrapperspb" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -302,3 +313,215 @@ func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { t.Fatalf("RPCs did not fail after removal of Cluster resource") } } + +// wrappedPriorityBuilder implements the balancer.Builder interface and builds +// an LB policy which is a thin wrapper around the priority LB policy. The built +// LB policy and makes certain events available to the test (SubConn state +// changes and LB config updates). +type wrappedPriorityBuilder struct { + balancer.Builder + balancer.ConfigParser + // We use an unbounded buffer instead of a vanilla channel to ensure that no + // state updates are lost *and* pushing to the channel is non-blocking (to + // ensure that the sending goroutine does not block if the test is not + // reading from the channel). + scStateCh *buffer.Unbounded + lbCfgCh chan serviceconfig.LoadBalancingConfig +} + +func newWrappedPriorityBuilder(b balancer.Builder) *wrappedPriorityBuilder { + return &wrappedPriorityBuilder{ + scStateCh: buffer.NewUnbounded(), + lbCfgCh: make(chan serviceconfig.LoadBalancingConfig, 1), + Builder: b, + ConfigParser: b.(balancer.ConfigParser), + } +} + +func (b *wrappedPriorityBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + priorityLB := b.Builder.Build(cc, opts) + return &wrappedPriorityBalancer{ + Balancer: priorityLB, + scStateCh: b.scStateCh, + lbCfgCh: b.lbCfgCh, + } +} + +type wrappedPriorityBalancer struct { + balancer.Balancer + scStateCh *buffer.Unbounded + lbCfgCh chan serviceconfig.LoadBalancingConfig +} + +func (b *wrappedPriorityBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.scStateCh.Put(state) + b.Balancer.UpdateSubConnState(sc, state) +} + +func (b *wrappedPriorityBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + select { + case b.lbCfgCh <- ccs.BalancerConfig: + default: + } + return b.Balancer.UpdateClientConnState(ccs) +} + +func (b *wrappedPriorityBalancer) Close() { + b.scStateCh.Close() + b.Balancer.Close() +} + +// Test verifies that SubConn state changes are propagated to the child policy +// by the cluster resolver LB policy. +func (s) TestSubConnStateChangePropagationToChildPolicy(t *testing.T) { + // Unregister the priority balancer builder for the duration of this test, + // and register a policy under the same name that makes SubConn state + // changes pushed to it available to the test. + priorityBuilder := balancer.Get(priority.Name) + internal.BalancerUnregister(priorityBuilder.Name()) + testChildPolicy := newWrappedPriorityBuilder(priorityBuilder) + balancer.Register(testChildPolicy) + defer balancer.Register(priorityBuilder) + + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + for { + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for child policy to see a READY SubConn") + case s := <-testChildPolicy.scStateCh.Get(): + testChildPolicy.scStateCh.Load() + state := s.(balancer.SubConnState) + if state.ConnectivityState == connectivity.Ready { + return + } + } + } +} + +// Test verifies that when the received Cluster resource contains outlier +// detection configuration, the LB config pushed to the child policy contains +// the appropriate configuration for the outlier detection LB policy. +func (s) TestOutlierDetectionConfigPropagationToChildPolicy(t *testing.T) { + // Unregister the priority balancer builder for the duration of this test, + // and register a policy under the same name that makes the LB config + // pushed to it available to the test. + priorityBuilder := balancer.Get(priority.Name) + internal.BalancerUnregister(priorityBuilder.Name()) + testChildPolicy := newWrappedPriorityBuilder(priorityBuilder) + balancer.Register(testChildPolicy) + defer balancer.Register(priorityBuilder) + + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources in the management server. + cluster := e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone) + cluster.OutlierDetection = &v3clusterpb.OutlierDetection{ + Interval: durationpb.New(10 * time.Second), + BaseEjectionTime: durationpb.New(30 * time.Second), + MaxEjectionTime: durationpb.New(300 * time.Second), + MaxEjectionPercent: wrapperspb.UInt32(10), + SuccessRateStdevFactor: wrapperspb.UInt32(2000), + EnforcingSuccessRate: wrapperspb.UInt32(50), + SuccessRateMinimumHosts: wrapperspb.UInt32(10), + SuccessRateRequestVolume: wrapperspb.UInt32(50), + } + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{cluster}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + _, cleanup = setupAndDial(t, bootstrapContents) + defer cleanup() + + // The priority configuration generated should have Outlier Detection as a + // direct child due to Outlier Detection being turned on. + wantCfg := &priority.LBConfig{ + Children: map[string]*priority.Child{ + "priority-0-0": { + Config: &iserviceconfig.BalancerConfig{ + Name: outlierdetection.Name, + Config: &outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + SuccessRateEjection: &outlierdetection.SuccessRateEjection{ + StdevFactor: 2000, + EnforcementPercentage: 50, + MinimumHosts: 10, + RequestVolume: 50, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: clusterName, + EDSServiceName: edsServiceName, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }, + }, + }, + }, + }, + IgnoreReresolutionRequests: true, + }, + }, + Priorities: []string{"priority-0-0"}, + } + + select { + case lbCfg := <-testChildPolicy.lbCfgCh: + gotCfg := lbCfg.(*priority.LBConfig) + if diff := cmp.Diff(wantCfg, gotCfg); diff != "" { + t.Fatalf("Child policy received unexpected diff in config (-want +got):\n%s", diff) + } + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for child policy to receive its configuration") + } +} From 789cf4e39455b08c013a5df8f728844681761493 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 26 Jun 2023 11:23:39 -0700 Subject: [PATCH 976/998] reflection: rename proto imports for disambiguation in import script (#6411) --- reflection/adapt.go | 104 ++++++++++----------- reflection/serverreflection.go | 74 +++++++-------- reflection/serverreflection_test.go | 140 ++++++++++++++-------------- 3 files changed, 159 insertions(+), 159 deletions(-) diff --git a/reflection/adapt.go b/reflection/adapt.go index fa5aad500b6e..33b907a36da4 100644 --- a/reflection/adapt.go +++ b/reflection/adapt.go @@ -19,35 +19,35 @@ package reflection import ( - v1grpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1pb "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // asV1Alpha returns an implementation of the v1alpha version of the reflection // interface that delegates all calls to the given v1 version. -func asV1Alpha(svr v1grpc.ServerReflectionServer) v1alphagrpc.ServerReflectionServer { +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { return v1AlphaServerImpl{svr: svr} } type v1AlphaServerImpl struct { - svr v1grpc.ServerReflectionServer + svr v1reflectiongrpc.ServerReflectionServer } -func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) } type v1AlphaServerStreamAdapter struct { - v1alphagrpc.ServerReflection_ServerReflectionInfoServer + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer } -func (s v1AlphaServerStreamAdapter) Send(response *v1pb.ServerReflectionResponse) error { +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) } -func (s v1AlphaServerStreamAdapter) Recv() (*v1pb.ServerReflectionRequest, error) { +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() if err != nil { return nil, err @@ -55,48 +55,48 @@ func (s v1AlphaServerStreamAdapter) Recv() (*v1pb.ServerReflectionRequest, error return v1AlphaToV1Request(resp), nil } -func v1ToV1AlphaResponse(v1 *v1pb.ServerReflectionResponse) *v1alphapb.ServerReflectionResponse { - var v1alpha v1alphapb.ServerReflectionResponse +func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse v1alpha.ValidHost = v1.ValidHost if v1.OriginalRequest != nil { v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) } switch mr := v1.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_FileDescriptorResponse: + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: if mr != nil { - v1alpha.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{ + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), }, } } - case *v1pb.ServerReflectionResponse_AllExtensionNumbersResponse: + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: if mr != nil { - v1alpha.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), }, } } - case *v1pb.ServerReflectionResponse_ListServicesResponse: + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: if mr != nil { - svcs := make([]*v1alphapb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) for i, svc := range mr.ListServicesResponse.GetService() { - svcs[i] = &v1alphapb.ServiceResponse{ + svcs[i] = &v1alphareflectionpb.ServiceResponse{ Name: svc.GetName(), } } - v1alpha.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphapb.ListServiceResponse{ + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ Service: svcs, }, } } - case *v1pb.ServerReflectionResponse_ErrorResponse: + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: if mr != nil { - v1alpha.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ ErrorCode: mr.ErrorResponse.GetErrorCode(), ErrorMessage: mr.ErrorResponse.GetErrorMessage(), }, @@ -108,33 +108,33 @@ func v1ToV1AlphaResponse(v1 *v1pb.ServerReflectionResponse) *v1alphapb.ServerRef return &v1alpha } -func v1AlphaToV1Request(v1alpha *v1alphapb.ServerReflectionRequest) *v1pb.ServerReflectionRequest { - var v1 v1pb.ServerReflectionRequest +func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest v1.Host = v1alpha.Host switch mr := v1alpha.MessageRequest.(type) { - case *v1alphapb.ServerReflectionRequest_FileByFilename: - v1.MessageRequest = &v1pb.ServerReflectionRequest_FileByFilename{ + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ FileByFilename: mr.FileByFilename, } - case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: - v1.MessageRequest = &v1pb.ServerReflectionRequest_FileContainingSymbol{ + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: mr.FileContainingSymbol, } - case *v1alphapb.ServerReflectionRequest_FileContainingExtension: + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: if mr.FileContainingExtension != nil { - v1.MessageRequest = &v1pb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1pb.ExtensionRequest{ + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ ContainingType: mr.FileContainingExtension.GetContainingType(), ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), }, } } - case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: - v1.MessageRequest = &v1pb.ServerReflectionRequest_AllExtensionNumbersOfType{ + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, } - case *v1alphapb.ServerReflectionRequest_ListServices: - v1.MessageRequest = &v1pb.ServerReflectionRequest_ListServices{ + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ ListServices: mr.ListServices, } default: @@ -143,40 +143,40 @@ func v1AlphaToV1Request(v1alpha *v1alphapb.ServerReflectionRequest) *v1pb.Server return &v1 } -func v1ToV1AlphaRequest(v1 *v1pb.ServerReflectionRequest) *v1alphapb.ServerReflectionRequest { - var v1alpha v1alphapb.ServerReflectionRequest +func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest v1alpha.Host = v1.Host switch mr := v1.MessageRequest.(type) { - case *v1pb.ServerReflectionRequest_FileByFilename: + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: if mr != nil { - v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_FileByFilename{ + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ FileByFilename: mr.FileByFilename, } } - case *v1pb.ServerReflectionRequest_FileContainingSymbol: + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: if mr != nil { - v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_FileContainingSymbol{ + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: mr.FileContainingSymbol, } } - case *v1pb.ServerReflectionRequest_FileContainingExtension: + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: if mr != nil { - v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1alphapb.ExtensionRequest{ + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ ContainingType: mr.FileContainingExtension.GetContainingType(), ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), }, } } - case *v1pb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: if mr != nil { - v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType{ + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, } } - case *v1pb.ServerReflectionRequest_ListServices: + case *v1reflectionpb.ServerReflectionRequest_ListServices: if mr != nil { - v1alpha.MessageRequest = &v1alphapb.ServerReflectionRequest_ListServices{ + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ ListServices: mr.ListServices, } } diff --git a/reflection/serverreflection.go b/reflection/serverreflection.go index a70295bcaa7d..76dae09d8886 100644 --- a/reflection/serverreflection.go +++ b/reflection/serverreflection.go @@ -48,9 +48,9 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" - v1grpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1pb "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -67,8 +67,8 @@ var _ GRPCServer = (*grpc.Server)(nil) // Both the v1 and v1alpha versions are registered. func Register(s GRPCServer) { svr := NewServerV1(ServerOptions{Services: s}) - v1alphagrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) - v1grpc.RegisterServerReflectionServer(s, svr) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) } // RegisterV1 registers only the v1 version of the server reflection service @@ -76,7 +76,7 @@ func Register(s GRPCServer) { // users should use Register instead, at least until clients have upgraded. func RegisterV1(s GRPCServer) { svr := NewServerV1(ServerOptions{Services: s}) - v1grpc.RegisterServerReflectionServer(s, svr) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -139,7 +139,7 @@ type ServerOptions struct { // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { return asV1Alpha(NewServerV1(opts)) } @@ -151,7 +151,7 @@ func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServerV1(opts ServerOptions) v1grpc.ServerReflectionServer { +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -166,7 +166,7 @@ func NewServerV1(opts ServerOptions) v1grpc.ServerReflectionServer { } type serverReflectionServer struct { - v1alphagrpc.UnimplementedServerReflectionServer + v1alphareflectiongrpc.UnimplementedServerReflectionServer s ServiceInfoProvider descResolver protodesc.Resolver extResolver ExtensionResolver @@ -240,11 +240,11 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*v1pb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*v1pb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &v1pb.ServiceResponse{Name: svc}) + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -253,7 +253,7 @@ func (s *serverReflectionServer) listServices() []*v1pb.ServiceResponse { } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream v1grpc.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -264,79 +264,79 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream v1grpc.ServerReflec return err } - out := &v1pb.ServerReflectionResponse{ + out := &v1reflectionpb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *v1pb.ServerReflectionRequest_FileByFilename: + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1pb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1pb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1pb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1pb.ServerReflectionRequest_FileContainingSymbol: + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1pb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1pb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1pb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1pb.ServerReflectionRequest_FileContainingExtension: + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1pb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1pb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1pb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1pb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1pb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1pb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1pb.ExtensionNumberResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *v1pb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1pb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1pb.ListServiceResponse{ + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ Service: s.listServices(), }, } diff --git a/reflection/serverreflection_test.go b/reflection/serverreflection_test.go index 8a3ca7163f0e..e61fd22d5a18 100644 --- a/reflection/serverreflection_test.go +++ b/reflection/serverreflection_test.go @@ -37,10 +37,10 @@ import ( "google.golang.org/protobuf/types/descriptorpb" "google.golang.org/protobuf/types/dynamicpb" - v1grpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1pb "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" pb "google.golang.org/grpc/reflection/grpc_testing" pbv3 "google.golang.org/grpc/reflection/grpc_testing_not_regenerate" ) @@ -218,11 +218,11 @@ func (x) TestReflectionEnd2end(t *testing.T) { } defer conn.Close() - clientV1 := v1grpc.NewServerReflectionClient(conn) - clientV1Alpha := v1alphagrpc.NewServerReflectionClient(conn) + clientV1 := v1reflectiongrpc.NewServerReflectionClient(conn) + clientV1Alpha := v1alphareflectiongrpc.NewServerReflectionClient(conn) testCases := []struct { name string - client v1grpc.ServerReflectionClient + client v1reflectiongrpc.ServerReflectionClient }{ { name: "v1", @@ -258,10 +258,10 @@ func (x) TestReflectionEnd2end(t *testing.T) { } } -func testFileByFilenameTransitiveClosure(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient, expectClosure bool) { +func testFileByFilenameTransitiveClosure(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient, expectClosure bool) { filename := "reflection/grpc_testing/proto2_ext2.proto" - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileByFilename{ FileByFilename: filename, }, }); err != nil { @@ -273,7 +273,7 @@ func testFileByFilenameTransitiveClosure(t *testing.T, stream v1grpc.ServerRefle t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_FileDescriptorResponse: + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], fdProto2Ext2Byte) { t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], fdProto2Ext2Byte) } @@ -291,7 +291,7 @@ func testFileByFilenameTransitiveClosure(t *testing.T, stream v1grpc.ServerRefle } } -func testFileByFilename(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { +func testFileByFilename(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { filename string want []byte @@ -301,8 +301,8 @@ func testFileByFilename(t *testing.T, stream v1grpc.ServerReflection_ServerRefle {"reflection/grpc_testing/proto2_ext.proto", fdProto2ExtByte}, {"dynamic.proto", fdDynamicByte}, } { - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileByFilename{ FileByFilename: test.filename, }, }); err != nil { @@ -315,7 +315,7 @@ func testFileByFilename(t *testing.T, stream v1grpc.ServerReflection_ServerRefle } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_FileDescriptorResponse: + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", test.filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -325,14 +325,14 @@ func testFileByFilename(t *testing.T, stream v1grpc.ServerReflection_ServerRefle } } -func testFileByFilenameError(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { +func testFileByFilenameError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "test.poto", "proo2.proto", "proto2_et.proto", } { - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileByFilename{ FileByFilename: test, }, }); err != nil { @@ -345,14 +345,14 @@ func testFileByFilenameError(t *testing.T, stream v1grpc.ServerReflection_Server } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_ErrorResponse: + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileByFilename(%v) = %v, want type ", test, r.MessageResponse) } } } -func testFileContainingSymbol(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingSymbol(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { symbol string want []byte @@ -378,8 +378,8 @@ func testFileContainingSymbol(t *testing.T, stream v1grpc.ServerReflection_Serve {"grpc.testing.DynamicReq", fdDynamicByte}, {"grpc.testing.DynamicRes", fdDynamicByte}, } { - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_FileContainingSymbol{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test.symbol, }, }); err != nil { @@ -392,7 +392,7 @@ func testFileContainingSymbol(t *testing.T, stream v1grpc.ServerReflection_Serve } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_FileDescriptorResponse: + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingSymbol(%v)\nreceived: %q,\nwant: %q", test.symbol, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -402,15 +402,15 @@ func testFileContainingSymbol(t *testing.T, stream v1grpc.ServerReflection_Serve } } -func testFileContainingSymbolError(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingSymbolError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.SerchService", "grpc.testing.SearchService.SearchE", "grpc.tesing.SearchResponse", "gpc.testing.ToBeExtended", } { - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_FileContainingSymbol{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test, }, }); err != nil { @@ -423,14 +423,14 @@ func testFileContainingSymbolError(t *testing.T, stream v1grpc.ServerReflection_ } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_ErrorResponse: + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingSymbol(%v) = %v, want type ", test, r.MessageResponse) } } } -func testFileContainingExtension(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingExtension(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 @@ -442,9 +442,9 @@ func testFileContainingExtension(t *testing.T, stream v1grpc.ServerReflection_Se {"grpc.testing.ToBeExtended", 23, fdProto2Ext2Byte}, {"grpc.testing.ToBeExtended", 29, fdProto2Ext2Byte}, } { - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1pb.ExtensionRequest{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, @@ -459,7 +459,7 @@ func testFileContainingExtension(t *testing.T, stream v1grpc.ServerReflection_Se } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_FileDescriptorResponse: + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingExtension(%v, %v)\nreceived: %q,\nwant: %q", test.typeName, test.extNum, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -469,7 +469,7 @@ func testFileContainingExtension(t *testing.T, stream v1grpc.ServerReflection_Se } } -func testFileContainingExtensionError(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingExtensionError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 @@ -477,9 +477,9 @@ func testFileContainingExtensionError(t *testing.T, stream v1grpc.ServerReflecti {"grpc.testing.ToBExtended", 17}, {"grpc.testing.ToBeExtended", 15}, } { - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1pb.ExtensionRequest{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, @@ -494,14 +494,14 @@ func testFileContainingExtensionError(t *testing.T, stream v1grpc.ServerReflecti } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_ErrorResponse: + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) } } } -func testAllExtensionNumbersOfType(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { +func testAllExtensionNumbersOfType(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string want []int32 @@ -509,8 +509,8 @@ func testAllExtensionNumbersOfType(t *testing.T, stream v1grpc.ServerReflection_ {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, {"grpc.testing.DynamicReq", nil}, } { - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_AllExtensionNumbersOfType{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test.typeName, }, }); err != nil { @@ -523,7 +523,7 @@ func testAllExtensionNumbersOfType(t *testing.T, stream v1grpc.ServerReflection_ } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_AllExtensionNumbersResponse: + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: extNum := r.GetAllExtensionNumbersResponse().ExtensionNumber sort.Sort(intArray(extNum)) if r.GetAllExtensionNumbersResponse().BaseTypeName != test.typeName || @@ -536,12 +536,12 @@ func testAllExtensionNumbersOfType(t *testing.T, stream v1grpc.ServerReflection_ } } -func testAllExtensionNumbersOfTypeError(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { +func testAllExtensionNumbersOfTypeError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.ToBeExtendedE", } { - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_AllExtensionNumbersOfType{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test, }, }); err != nil { @@ -554,16 +554,16 @@ func testAllExtensionNumbersOfTypeError(t *testing.T, stream v1grpc.ServerReflec } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_ErrorResponse: + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test, r.MessageResponse) } } } -func testListServices(t *testing.T, stream v1grpc.ServerReflection_ServerReflectionInfoClient) { - if err := stream.Send(&v1pb.ServerReflectionRequest{ - MessageRequest: &v1pb.ServerReflectionRequest_ListServices{}, +func testListServices(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_ListServices{}, }); err != nil { t.Fatalf("failed to send request: %v", err) } @@ -574,7 +574,7 @@ func testListServices(t *testing.T, stream v1grpc.ServerReflection_ServerReflect } switch r.MessageResponse.(type) { - case *v1pb.ServerReflectionResponse_ListServicesResponse: + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: services := r.GetListServicesResponse().Service want := []string{ "grpc.testingv3.SearchServiceV3", @@ -627,10 +627,10 @@ func registerDynamicProto(srv *grpc.Server, fdp *descriptorpb.FileDescriptorProt } type v1AlphaClientAdapter struct { - stub v1alphagrpc.ServerReflectionClient + stub v1alphareflectiongrpc.ServerReflectionClient } -func (v v1AlphaClientAdapter) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (v1grpc.ServerReflection_ServerReflectionInfoClient, error) { +func (v v1AlphaClientAdapter) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient, error) { stream, err := v.stub.ServerReflectionInfo(ctx, opts...) if err != nil { return nil, err @@ -639,14 +639,14 @@ func (v v1AlphaClientAdapter) ServerReflectionInfo(ctx context.Context, opts ... } type v1AlphaClientStreamAdapter struct { - v1alphagrpc.ServerReflection_ServerReflectionInfoClient + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoClient } -func (s v1AlphaClientStreamAdapter) Send(request *v1pb.ServerReflectionRequest) error { +func (s v1AlphaClientStreamAdapter) Send(request *v1reflectionpb.ServerReflectionRequest) error { return s.ServerReflection_ServerReflectionInfoClient.Send(v1ToV1AlphaRequest(request)) } -func (s v1AlphaClientStreamAdapter) Recv() (*v1pb.ServerReflectionResponse, error) { +func (s v1AlphaClientStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionResponse, error) { resp, err := s.ServerReflection_ServerReflectionInfoClient.Recv() if err != nil { return nil, err @@ -654,48 +654,48 @@ func (s v1AlphaClientStreamAdapter) Recv() (*v1pb.ServerReflectionResponse, erro return v1AlphaToV1Response(resp), nil } -func v1AlphaToV1Response(v1alpha *v1alphapb.ServerReflectionResponse) *v1pb.ServerReflectionResponse { - var v1 v1pb.ServerReflectionResponse +func v1AlphaToV1Response(v1alpha *v1alphareflectionpb.ServerReflectionResponse) *v1reflectionpb.ServerReflectionResponse { + var v1 v1reflectionpb.ServerReflectionResponse v1.ValidHost = v1alpha.ValidHost if v1alpha.OriginalRequest != nil { v1.OriginalRequest = v1AlphaToV1Request(v1alpha.OriginalRequest) } switch mr := v1alpha.MessageResponse.(type) { - case *v1alphapb.ServerReflectionResponse_FileDescriptorResponse: + case *v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse: if mr != nil { - v1.MessageResponse = &v1pb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1pb.FileDescriptorResponse{ + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{ FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), }, } } - case *v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse: + case *v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: if mr != nil { - v1.MessageResponse = &v1pb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1pb.ExtensionNumberResponse{ + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), }, } } - case *v1alphapb.ServerReflectionResponse_ListServicesResponse: + case *v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse: if mr != nil { - svcs := make([]*v1pb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + svcs := make([]*v1reflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) for i, svc := range mr.ListServicesResponse.GetService() { - svcs[i] = &v1pb.ServiceResponse{ + svcs[i] = &v1reflectionpb.ServiceResponse{ Name: svc.GetName(), } } - v1.MessageResponse = &v1pb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1pb.ListServiceResponse{ + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ Service: svcs, }, } } - case *v1alphapb.ServerReflectionResponse_ErrorResponse: + case *v1alphareflectionpb.ServerReflectionResponse_ErrorResponse: if mr != nil { - v1.MessageResponse = &v1pb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1pb.ErrorResponse{ + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: mr.ErrorResponse.GetErrorCode(), ErrorMessage: mr.ErrorResponse.GetErrorMessage(), }, From 1634254ac6b69e92f07f65aefc69bf30152ad326 Mon Sep 17 00:00:00 2001 From: Jaewan Park Date: Wed, 28 Jun 2023 00:58:10 +0900 Subject: [PATCH 977/998] rpc_util: Reuse memory buffer for receiving message (#5862) --- benchmark/benchmain/main.go | 33 ++++++++ benchmark/stats/stats.go | 10 ++- dialoptions.go | 23 +++++ rpc_util.go | 27 +++--- rpc_util_test.go | 4 +- server.go | 27 +++++- shared_buffer_pool.go | 154 ++++++++++++++++++++++++++++++++++ shared_buffer_pool_test.go | 48 +++++++++++ stream.go | 4 +- test/recv_buffer_pool_test.go | 90 ++++++++++++++++++++ 10 files changed, 400 insertions(+), 20 deletions(-) create mode 100644 shared_buffer_pool.go create mode 100644 shared_buffer_pool_test.go create mode 100644 test/recv_buffer_pool_test.go diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index 78ca59363841..1366c18c972b 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -112,6 +112,7 @@ var ( serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a a comma-separated list") sleepBetweenRPCs = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a a comma-separated list") connections = flag.Int("connections", 1, "The number of connections. Each connection will handle maxConcurrentCalls RPC streams") + recvBufferPool = flags.StringWithAllowedValues("recvBufferPool", recvBufferPoolNil, "Configures the shared receive buffer pool. One of: nil, simple, all", allRecvBufferPools) logger = grpclog.Component("benchmark") ) @@ -136,6 +137,10 @@ const ( networkModeLAN = "LAN" networkModeWAN = "WAN" networkLongHaul = "Longhaul" + // Shared recv buffer pool + recvBufferPoolNil = "nil" + recvBufferPoolSimple = "simple" + recvBufferPoolAll = "all" numStatsBuckets = 10 warmupCallCount = 10 @@ -147,6 +152,7 @@ var ( allCompModes = []string{compModeOff, compModeGzip, compModeNop, compModeAll} allToggleModes = []string{toggleModeOff, toggleModeOn, toggleModeBoth} allNetworkModes = []string{networkModeNone, networkModeLocal, networkModeLAN, networkModeWAN, networkLongHaul} + allRecvBufferPools = []string{recvBufferPoolNil, recvBufferPoolSimple, recvBufferPoolAll} defaultReadLatency = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay. defaultReadKbps = []int{0, 10240} // if non-positive, infinite defaultReadMTU = []int{0} // if non-positive, infinite @@ -330,6 +336,15 @@ func makeClients(bf stats.Features) ([]testgrpc.BenchmarkServiceClient, func()) if bf.ServerWriteBufferSize >= 0 { sopts = append(sopts, grpc.WriteBufferSize(bf.ServerWriteBufferSize)) } + switch bf.RecvBufferPool { + case recvBufferPoolNil: + // Do nothing. + case recvBufferPoolSimple: + opts = append(opts, grpc.WithRecvBufferPool(grpc.NewSharedBufferPool())) + sopts = append(sopts, grpc.RecvBufferPool(grpc.NewSharedBufferPool())) + default: + logger.Fatalf("Unknown shared recv buffer pool type: %v", bf.RecvBufferPool) + } sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(bf.MaxConcurrentCalls+1))) opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) @@ -573,6 +588,7 @@ type featureOpts struct { serverReadBufferSize []int serverWriteBufferSize []int sleepBetweenRPCs []time.Duration + recvBufferPools []string } // makeFeaturesNum returns a slice of ints of size 'maxFeatureIndex' where each @@ -619,6 +635,8 @@ func makeFeaturesNum(b *benchOpts) []int { featuresNum[i] = len(b.features.serverWriteBufferSize) case stats.SleepBetweenRPCs: featuresNum[i] = len(b.features.sleepBetweenRPCs) + case stats.RecvBufferPool: + featuresNum[i] = len(b.features.recvBufferPools) default: log.Fatalf("Unknown feature index %v in generateFeatures. maxFeatureIndex is %v", i, stats.MaxFeatureIndex) } @@ -687,6 +705,7 @@ func (b *benchOpts) generateFeatures(featuresNum []int) []stats.Features { ServerReadBufferSize: b.features.serverReadBufferSize[curPos[stats.ServerReadBufferSize]], ServerWriteBufferSize: b.features.serverWriteBufferSize[curPos[stats.ServerWriteBufferSize]], SleepBetweenRPCs: b.features.sleepBetweenRPCs[curPos[stats.SleepBetweenRPCs]], + RecvBufferPool: b.features.recvBufferPools[curPos[stats.RecvBufferPool]], } if len(b.features.reqPayloadCurves) == 0 { f.ReqSizeBytes = b.features.reqSizeBytes[curPos[stats.ReqSizeBytesIndex]] @@ -759,6 +778,7 @@ func processFlags() *benchOpts { serverReadBufferSize: append([]int(nil), *serverReadBufferSize...), serverWriteBufferSize: append([]int(nil), *serverWriteBufferSize...), sleepBetweenRPCs: append([]time.Duration(nil), *sleepBetweenRPCs...), + recvBufferPools: setRecvBufferPool(*recvBufferPool), }, } @@ -834,6 +854,19 @@ func setCompressorMode(val string) []string { } } +func setRecvBufferPool(val string) []string { + switch val { + case recvBufferPoolNil, recvBufferPoolSimple: + return []string{val} + case recvBufferPoolAll: + return []string{recvBufferPoolNil, recvBufferPoolSimple} + default: + // This should never happen because a wrong value passed to this flag would + // be caught during flag.Parse(). + return []string{} + } +} + func main() { opts := processFlags() before(opts) diff --git a/benchmark/stats/stats.go b/benchmark/stats/stats.go index 74070fd76c07..3989e25dbf4b 100644 --- a/benchmark/stats/stats.go +++ b/benchmark/stats/stats.go @@ -57,6 +57,7 @@ const ( ServerReadBufferSize ServerWriteBufferSize SleepBetweenRPCs + RecvBufferPool // MaxFeatureIndex is a place holder to indicate the total number of feature // indices we have. Any new feature indices should be added above this. @@ -126,6 +127,8 @@ type Features struct { ServerWriteBufferSize int // SleepBetweenRPCs configures optional delay between RPCs. SleepBetweenRPCs time.Duration + // RecvBufferPool represents the shared recv buffer pool used. + RecvBufferPool string } // String returns all the feature values as a string. @@ -145,12 +148,13 @@ func (f Features) String() string { "trace_%v-latency_%v-kbps_%v-MTU_%v-maxConcurrentCalls_%v-%s-%s-"+ "compressor_%v-channelz_%v-preloader_%v-clientReadBufferSize_%v-"+ "clientWriteBufferSize_%v-serverReadBufferSize_%v-serverWriteBufferSize_%v-"+ - "sleepBetweenRPCs_%v-connections_%v-", + "sleepBetweenRPCs_%v-connections_%v-recvBufferPool_%v-", f.NetworkMode, f.UseBufConn, f.EnableKeepalive, f.BenchTime, f.EnableTrace, f.Latency, f.Kbps, f.MTU, f.MaxConcurrentCalls, reqPayloadString, respPayloadString, f.ModeCompressor, f.EnableChannelz, f.EnablePreloader, f.ClientReadBufferSize, f.ClientWriteBufferSize, f.ServerReadBufferSize, - f.ServerWriteBufferSize, f.SleepBetweenRPCs, f.Connections) + f.ServerWriteBufferSize, f.SleepBetweenRPCs, f.Connections, + f.RecvBufferPool) } // SharedFeatures returns the shared features as a pretty printable string. @@ -224,6 +228,8 @@ func (f Features) partialString(b *bytes.Buffer, wantFeatures []bool, sep, delim b.WriteString(fmt.Sprintf("ServerWriteBufferSize%v%v%v", sep, f.ServerWriteBufferSize, delim)) case SleepBetweenRPCs: b.WriteString(fmt.Sprintf("SleepBetweenRPCs%v%v%v", sep, f.SleepBetweenRPCs, delim)) + case RecvBufferPool: + b.WriteString(fmt.Sprintf("RecvBufferPool%v%v%v", sep, f.RecvBufferPool, delim)) default: log.Fatalf("Unknown feature index %v. maxFeatureIndex is %v", i, MaxFeatureIndex) } diff --git a/dialoptions.go b/dialoptions.go index 15a3d5102a9a..23ea95237ea0 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -78,6 +78,7 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -628,6 +629,7 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, + recvBufferPool: nopBufferPool{}, } } @@ -676,3 +678,24 @@ func WithIdleTimeout(d time.Duration) DialOption { o.idleTimeout = d }) } + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/rpc_util.go b/rpc_util.go index 2030736a306b..a844d28f49d0 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -577,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -726,12 +727,12 @@ type payloadInfo struct { } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) @@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -792,15 +793,17 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } diff --git a/rpc_util_test.go b/rpc_util_test.go index 90912d52a226..84f2348655b9 100644 --- a/rpc_util_test.go +++ b/rpc_util_test.go @@ -65,7 +65,7 @@ func (s) TestSimpleParsing(t *testing.T) { {append([]byte{0, 1, 0, 0, 0}, bigMsg...), nil, bigMsg, compressionNone}, } { buf := fullReader{bytes.NewReader(test.p)} - parser := &parser{r: buf} + parser := &parser{r: buf, recvBufferPool: nopBufferPool{}} pt, b, err := parser.recvMsg(math.MaxInt32) if err != test.err || !bytes.Equal(b, test.b) || pt != test.pt { t.Fatalf("parser{%v}.recvMsg(_) = %v, %v, %v\nwant %v, %v, %v", test.p, pt, b, err, test.pt, test.b, test.err) @@ -77,7 +77,7 @@ func (s) TestMultipleParsing(t *testing.T) { // Set a byte stream consists of 3 messages with their headers. p := []byte{0, 0, 0, 0, 1, 'a', 0, 0, 0, 0, 2, 'b', 'c', 0, 0, 0, 0, 1, 'd'} b := fullReader{bytes.NewReader(p)} - parser := &parser{r: b} + parser := &parser{r: b, recvBufferPool: nopBufferPool{}} wantRecvs := []struct { pt payloadFormat diff --git a/server.go b/server.go index 81969e7c15a9..e076ec7143bb 100644 --- a/server.go +++ b/server.go @@ -174,6 +174,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool } var defaultServerOptions = serverOptions{ @@ -182,6 +183,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -552,6 +554,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -1296,7 +1319,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1506,7 +1529,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, diff --git a/shared_buffer_pool.go b/shared_buffer_pool.go new file mode 100644 index 000000000000..c3a5a9ac1f19 --- /dev/null +++ b/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(interface{}) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() interface{} { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/shared_buffer_pool_test.go b/shared_buffer_pool_test.go new file mode 100644 index 000000000000..f5ed7c8314f1 --- /dev/null +++ b/shared_buffer_pool_test.go @@ -0,0 +1,48 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "testing" + +func (s) TestSharedBufferPool(t *testing.T) { + pools := []SharedBufferPool{ + nopBufferPool{}, + NewSharedBufferPool(), + } + + lengths := []int{ + level4PoolMaxSize + 1, + level4PoolMaxSize, + level3PoolMaxSize, + level2PoolMaxSize, + level1PoolMaxSize, + level0PoolMaxSize, + } + + for _, p := range pools { + for _, l := range lengths { + bs := p.Get(l) + if len(bs) != l { + t.Fatalf("Expected buffer of length %d, got %d", l, len(bs)) + } + + p.Put(&bs) + } + } +} diff --git a/stream.go b/stream.go index 10092685b228..de32a7597145 100644 --- a/stream.go +++ b/stream.go @@ -507,7 +507,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -1270,7 +1270,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is diff --git a/test/recv_buffer_pool_test.go b/test/recv_buffer_pool_test.go new file mode 100644 index 000000000000..8bb6db4a77af --- /dev/null +++ b/test/recv_buffer_pool_test.go @@ -0,0 +1,90 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "bytes" + "context" + "io" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal/stubserver" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +func (s) TestRecvBufferPool(t *testing.T) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + for i := 0; i < 10; i++ { + preparedMsg := &grpc.PreparedMsg{} + err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{ + Payload: &testpb.Payload{ + Body: []byte{'0' + uint8(i)}, + }, + }) + if err != nil { + return err + } + stream.SendMsg(preparedMsg) + } + return nil + }, + } + if err := ss.Start( + []grpc.ServerOption{grpc.RecvBufferPool(grpc.NewSharedBufferPool())}, + grpc.WithRecvBufferPool(grpc.NewSharedBufferPool()), + ); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + var ngot int + var buf bytes.Buffer + for { + reply, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + ngot++ + if buf.Len() > 0 { + buf.WriteByte(',') + } + buf.Write(reply.GetPayload().GetBody()) + } + if want := 10; ngot != want { + t.Errorf("Got %d replies, want %d", ngot, want) + } + if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want { + t.Errorf("Got replies %q; want %q", got, want) + } +} From e8599844e79f461b30aec8952b27fc95325f48e8 Mon Sep 17 00:00:00 2001 From: Tobo Atchou Date: Tue, 27 Jun 2023 18:27:20 +0200 Subject: [PATCH 978/998] server: with TLS, set TCP user timeout on the underlying raw connection (#5646) (#6321) --- internal/transport/http2_server.go | 2 +- internal/transport/keepalive_test.go | 119 ++++++++++++++++++++++----- internal/transport/transport_test.go | 7 +- 3 files changed, 103 insertions(+), 25 deletions(-) diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 79e86ba08836..f9606401289d 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -238,7 +238,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index b9e6d74cb739..a46bcf020df8 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -24,18 +24,23 @@ package transport import ( "context" + "crypto/tls" + "crypto/x509" "fmt" "io" "net" + "os" "strings" "testing" "time" "golang.org/x/net/http2" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/testdata" ) const defaultTestTimeout = 10 * time.Second @@ -581,24 +586,49 @@ func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T // the keepalive timeout, as detailed in proposal A18. func (s) TestTCPUserTimeout(t *testing.T) { tests := []struct { + tls bool time time.Duration timeout time.Duration clientWantTimeout time.Duration serverWantTimeout time.Duration }{ { + false, 10 * time.Second, 10 * time.Second, 10 * 1000 * time.Millisecond, 10 * 1000 * time.Millisecond, }, { + false, 0, 0, 0, 20 * 1000 * time.Millisecond, }, { + false, + infinity, + infinity, + 0, + 0, + }, + { + true, + 10 * time.Second, + 10 * time.Second, + 10 * 1000 * time.Millisecond, + 10 * 1000 * time.Millisecond, + }, + { + true, + 0, + 0, + 0, + 20 * 1000 * time.Millisecond, + }, + { + true, infinity, infinity, 0, @@ -606,22 +636,32 @@ func (s) TestTCPUserTimeout(t *testing.T) { }, } for _, tt := range tests { + sopts := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + Time: tt.time, + Timeout: tt.timeout, + }, + } + + copts := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: tt.time, + Timeout: tt.timeout, + }, + } + + if tt.tls { + copts.TransportCredentials = makeTLSCreds(t, "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + sopts.Credentials = makeTLSCreds(t, "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + + } + server, client, cancel := setUpWithOptions( t, 0, - &ServerConfig{ - KeepaliveParams: keepalive.ServerParameters{ - Time: tt.time, - Timeout: tt.timeout, - }, - }, + sopts, normal, - ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: tt.time, - Timeout: tt.timeout, - }, - }, + copts, ) defer func() { client.Close(fmt.Errorf("closed manually by test")) @@ -630,6 +670,7 @@ func (s) TestTCPUserTimeout(t *testing.T) { }() var sc *http2Server + var srawConn net.Conn // Wait until the server transport is setup. for { server.mu.Lock() @@ -644,6 +685,7 @@ func (s) TestTCPUserTimeout(t *testing.T) { if !ok { t.Fatalf("Failed to convert %v to *http2Server", k) } + srawConn = server.conns[k] } server.mu.Unlock() break @@ -657,25 +699,60 @@ func (s) TestTCPUserTimeout(t *testing.T) { } client.CloseStream(stream, io.EOF) - cltOpt, err := syscall.GetTCPUserTimeout(client.conn) - if err != nil { - t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) + // check client TCP user timeout only when non TLS + // TODO : find a way to get the underlying conn for client when TLS + if !tt.tls { + cltOpt, err := syscall.GetTCPUserTimeout(client.conn) + if err != nil { + t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) + } + if cltOpt < 0 { + t.Skipf("skipping test on unsupported environment") + } + if gotTimeout := time.Duration(cltOpt) * time.Millisecond; gotTimeout != tt.clientWantTimeout { + t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.clientWantTimeout) + } } - if cltOpt < 0 { - t.Skipf("skipping test on unsupported environment") + scConn := sc.conn + if tt.tls { + if _, ok := sc.conn.(*net.TCPConn); ok { + t.Fatalf("sc.conn is should have wrapped conn with TLS") + } + scConn = srawConn } - if gotTimeout := time.Duration(cltOpt) * time.Millisecond; gotTimeout != tt.clientWantTimeout { - t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.clientWantTimeout) + // verify the type of scConn (on which TCP user timeout will be got) + if _, ok := scConn.(*net.TCPConn); !ok { + t.Fatalf("server underlying conn is of type %T, want net.TCPConn", scConn) } - - srvOpt, err := syscall.GetTCPUserTimeout(sc.conn) + srvOpt, err := syscall.GetTCPUserTimeout(scConn) if err != nil { t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) } if gotTimeout := time.Duration(srvOpt) * time.Millisecond; gotTimeout != tt.serverWantTimeout { t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.serverWantTimeout) } + + } +} + +func makeTLSCreds(t *testing.T, certPath, keyPath, rootsPath string) credentials.TransportCredentials { + cert, err := tls.LoadX509KeyPair(testdata.Path(certPath), testdata.Path(keyPath)) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(%q, %q) failed: %v", certPath, keyPath, err) + } + b, err := os.ReadFile(testdata.Path(rootsPath)) + if err != nil { + t.Fatalf("os.ReadFile(%q) failed: %v", rootsPath, err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(b) { + t.Fatal("failed to append certificates") } + return credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + InsecureSkipVerify: true, + }) } // checkForHealthyStream attempts to create a stream and return error if any. diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index c0d85b2a88d8..258ef7411cf0 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -297,7 +297,7 @@ type server struct { port string startedErr chan error // error (or nil) with server start value mu sync.Mutex - conns map[ServerTransport]bool + conns map[ServerTransport]net.Conn h *testStreamHandler ready chan struct{} channelzID *channelz.Identifier @@ -329,13 +329,14 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT return } s.port = p - s.conns = make(map[ServerTransport]bool) + s.conns = make(map[ServerTransport]net.Conn) s.startedErr <- nil for { conn, err := s.lis.Accept() if err != nil { return } + rawConn := conn transport, err := NewServerTransport(conn, serverConfig) if err != nil { return @@ -346,7 +347,7 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT transport.Close(errors.New("s.conns is nil")) return } - s.conns[transport] = true + s.conns[transport] = rawConn h := &testStreamHandler{t: transport.(*http2Server)} s.h = h s.mu.Unlock() From 7eb57278c062ddc12d79d425f7ce7a5d19fb13e5 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 27 Jun 2023 13:37:55 -0700 Subject: [PATCH 979/998] xds: switch EDS watch to new generic xdsClient API (#6414) --- .../clusterresolver/resource_resolver_eds.go | 46 ++-- xds/internal/xdsclient/client.go | 1 - xds/internal/xdsclient/clientimpl_watchers.go | 31 --- xds/internal/xdsclient/tests/dump_test.go | 8 +- .../xdsclient/tests/eds_watchers_test.go | 211 +++++++++--------- .../tests/federation_watchers_test.go | 24 +- .../xdsclient/tests/resource_update_test.go | 20 +- .../xdsclient/xdsresource/type_eds.go | 8 - 8 files changed, 156 insertions(+), 193 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go index 2517cf49159c..6fec20151f42 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_eds.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -25,11 +25,8 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -type edsResourceWatcher interface { - WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() -} - type edsDiscoveryMechanism struct { + nameToWatch string cancelWatch func() topLevelResolver topLevelResolver stopped *grpcsync.Event @@ -64,31 +61,44 @@ func (er *edsDiscoveryMechanism) stop() { er.cancelWatch() } -func (er *edsDiscoveryMechanism) handleEndpointsUpdate(update xdsresource.EndpointsUpdate, err error) { - if er.stopped.HasFired() { - return +// newEDSResolver returns an implementation of the endpointsResolver interface +// that uses EDS to resolve the given name to endpoints. +func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelResolver topLevelResolver) *edsDiscoveryMechanism { + ret := &edsDiscoveryMechanism{ + nameToWatch: nameToWatch, + topLevelResolver: topLevelResolver, + stopped: grpcsync.NewEvent(), } + ret.cancelWatch = xdsresource.WatchEndpoints(producer, nameToWatch, ret) + return ret +} - if err != nil { - er.topLevelResolver.onError(err) +// OnUpdate is invoked to report an update for the resource being watched. +func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData) { + if er.stopped.HasFired() { return } er.mu.Lock() - er.update = update + er.update = update.Resource er.updateReceived = true er.mu.Unlock() er.topLevelResolver.onUpdate() } -// newEDSResolver returns an implementation of the endpointsResolver interface -// that uses EDS to resolve the given name to endpoints. -func newEDSResolver(nameToWatch string, watcher edsResourceWatcher, topLevelResolver topLevelResolver) *edsDiscoveryMechanism { - ret := &edsDiscoveryMechanism{ - topLevelResolver: topLevelResolver, - stopped: grpcsync.NewEvent(), +func (er *edsDiscoveryMechanism) OnError(err error) { + if er.stopped.HasFired() { + return } - ret.cancelWatch = watcher.WatchEndpoints(nameToWatch, ret.handleEndpointsUpdate) - return ret + + er.topLevelResolver.onError(err) +} + +func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { + if er.stopped.HasFired() { + return + } + + er.topLevelResolver.onError(xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Endpoints not found in received response", er.nameToWatch)) } diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go index cc39fb2e4d16..44f6d3bc0a1c 100644 --- a/xds/internal/xdsclient/client.go +++ b/xds/internal/xdsclient/client.go @@ -33,7 +33,6 @@ type XDSClient interface { WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() - WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() // WatchResource uses xDS to discover the resource associated with the // provided resource name. The resource type implementation determines how diff --git a/xds/internal/xdsclient/clientimpl_watchers.go b/xds/internal/xdsclient/clientimpl_watchers.go index 3c3adad5341c..e503349dbc29 100644 --- a/xds/internal/xdsclient/clientimpl_watchers.go +++ b/xds/internal/xdsclient/clientimpl_watchers.go @@ -112,37 +112,6 @@ func (c *clientImpl) WatchCluster(resourceName string, cb func(xdsresource.Clust return xdsresource.WatchCluster(c, resourceName, watcher) } -// This is only required temporarily, while we modify the -// clientImpl.WatchEndpoints API to be implemented via the wrapper -// WatchEndpoints() API which calls the WatchResource() API. -type endpointsWatcher struct { - resourceName string - cb func(xdsresource.EndpointsUpdate, error) -} - -func (c *endpointsWatcher) OnUpdate(update *xdsresource.EndpointsResourceData) { - c.cb(update.Resource, nil) -} - -func (c *endpointsWatcher) OnError(err error) { - c.cb(xdsresource.EndpointsUpdate{}, err) -} - -func (c *endpointsWatcher) OnResourceDoesNotExist() { - err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Endpoints not found in received response", c.resourceName) - c.cb(xdsresource.EndpointsUpdate{}, err) -} - -// WatchEndpoints uses EDS to discover information about the -// ClusterLoadAssignment resource identified by resourceName. -// -// WatchEndpoints can be called multiple times, with same or different -// clusterNames. Each call will start an independent watcher for the resource. -func (c *clientImpl) WatchEndpoints(resourceName string, cb func(xdsresource.EndpointsUpdate, error)) (cancel func()) { - watcher := &endpointsWatcher{resourceName: resourceName, cb: cb} - return xdsresource.WatchEndpoints(c, resourceName, watcher) -} - // WatchResource uses xDS to discover the resource associated with the provided // resource name. The resource type implementation determines how xDS requests // are sent out and how responses are deserialized and validated. Upon receipt diff --git a/xds/internal/xdsclient/tests/dump_test.go b/xds/internal/xdsclient/tests/dump_test.go index 4fe4ffb0e864..5f2c5e05e4dd 100644 --- a/xds/internal/xdsclient/tests/dump_test.go +++ b/xds/internal/xdsclient/tests/dump_test.go @@ -60,6 +60,12 @@ func compareDump(ctx context.Context, client xdsclient.XDSClient, want map[strin } } +type noopEndpointsWatcher struct{} + +func (noopEndpointsWatcher) OnUpdate(update *xdsresource.EndpointsResourceData) {} +func (noopEndpointsWatcher) OnError(err error) {} +func (noopEndpointsWatcher) OnResourceDoesNotExist() {} + func (s) TestDumpResources(t *testing.T) { // Initialize the xDS resources to be used in this test. ldsTargets := []string{"lds.target.good:0000", "lds.target.good:1111"} @@ -122,7 +128,7 @@ func (s) TestDumpResources(t *testing.T) { client.WatchCluster(target, func(xdsresource.ClusterUpdate, error) {}) } for _, target := range edsTargets { - client.WatchEndpoints(target, func(xdsresource.EndpointsUpdate, error) {}) + xdsresource.WatchEndpoints(client, target, noopEndpointsWatcher{}) } want := map[string]map[string]xdsresource.UpdateWithMD{ "type.googleapis.com/envoy.config.listener.v3.Listener": { diff --git a/xds/internal/xdsclient/tests/eds_watchers_test.go b/xds/internal/xdsclient/tests/eds_watchers_test.go index 4cc365e70ead..0d81c3848c8d 100644 --- a/xds/internal/xdsclient/tests/eds_watchers_test.go +++ b/xds/internal/xdsclient/tests/eds_watchers_test.go @@ -52,6 +52,31 @@ const ( edsPort3 = 3 ) +type endpointsUpdateErrTuple struct { + update xdsresource.EndpointsUpdate + err error +} + +type endpointsWatcher struct { + updateCh *testutils.Channel +} + +func newEndpointsWatcher() *endpointsWatcher { + return &endpointsWatcher{updateCh: testutils.NewChannel()} +} + +func (ew *endpointsWatcher) OnUpdate(update *xdsresource.EndpointsResourceData) { + ew.updateCh.Send(endpointsUpdateErrTuple{update: update.Resource}) +} + +func (ew *endpointsWatcher) OnError(err error) { + ew.updateCh.SendOrFail(endpointsUpdateErrTuple{err: err}) +} + +func (ew *endpointsWatcher) OnResourceDoesNotExist() { + ew.updateCh.SendOrFail(endpointsUpdateErrTuple{err: xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Endpoints not found in received response")}) +} + // badEndpointsResource returns a endpoints resource for the given // edsServiceName which contains an endpoint with a load_balancing weight of // `0`. This is expected to be NACK'ed by the xDS client. @@ -71,19 +96,19 @@ const wantEndpointsNACKErr = "EDS response contains an endpoint with zero weight // // Returns an error if no update is received before the context deadline expires // or the received update does not match the expected one. -func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.EndpointsUpdateErrTuple) error { +func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate endpointsUpdateErrTuple) error { u, err := updateCh.Receive(ctx) if err != nil { return fmt.Errorf("timeout when waiting for a endpoints resource from the management server: %v", err) } - got := u.(xdsresource.EndpointsUpdateErrTuple) - if wantUpdate.Err != nil { - if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { + got := u.(endpointsUpdateErrTuple) + if wantUpdate.err != nil { + if gotType, wantType := xdsresource.ErrType(got.err), xdsresource.ErrType(wantUpdate.err); gotType != wantType { return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) } } cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.EndpointsUpdate{}, "Raw")} - if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { + if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" { return fmt.Errorf("received unepected diff in the endpoints resource update: (-want, got):\n%s", diff) } return nil @@ -121,7 +146,7 @@ func (s) TestEDSWatch(t *testing.T) { watchedResource *v3endpointpb.ClusterLoadAssignment // The resource being watched. updatedWatchedResource *v3endpointpb.ClusterLoadAssignment // The watched resource after an update. notWatchedResource *v3endpointpb.ClusterLoadAssignment // A resource which is not being watched. - wantUpdate xdsresource.EndpointsUpdateErrTuple + wantUpdate endpointsUpdateErrTuple }{ { desc: "old style resource", @@ -129,8 +154,8 @@ func (s) TestEDSWatch(t *testing.T) { watchedResource: e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}), updatedWatchedResource: e2e.DefaultEndpoint(edsName, edsHost2, []uint32{edsPort2}), notWatchedResource: e2e.DefaultEndpoint("unsubscribed-eds-resource", edsHost3, []uint32{edsPort3}), - wantUpdate: xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdate: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, @@ -152,8 +177,8 @@ func (s) TestEDSWatch(t *testing.T) { watchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}), updatedWatchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost2, []uint32{edsPort2}), notWatchedResource: e2e.DefaultEndpoint("unsubscribed-eds-resource", edsHost3, []uint32{edsPort3}), - wantUpdate: xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdate: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, @@ -186,10 +211,8 @@ func (s) TestEDSWatch(t *testing.T) { // Register a watch for a endpoint resource and have the watch // callback push the received update on to a channel. - updateCh := testutils.NewChannel() - edsCancel := client.WatchEndpoints(test.resourceName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, test.resourceName, ew) // Configure the management server to return a single endpoint // resource, corresponding to the one being watched. @@ -205,7 +228,7 @@ func (s) TestEDSWatch(t *testing.T) { } // Verify the contents of the received update. - if err := verifyEndpointsUpdate(ctx, updateCh, test.wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew.updateCh, test.wantUpdate); err != nil { t.Fatal(err) } @@ -219,7 +242,7 @@ func (s) TestEDSWatch(t *testing.T) { if err := mgmtServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } - if err := verifyNoEndpointsUpdate(ctx, updateCh); err != nil { + if err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil { t.Fatal(err) } @@ -234,7 +257,7 @@ func (s) TestEDSWatch(t *testing.T) { if err := mgmtServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } - if err := verifyNoEndpointsUpdate(ctx, updateCh); err != nil { + if err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil { t.Fatal(err) } }) @@ -260,16 +283,16 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { resourceName string watchedResource *v3endpointpb.ClusterLoadAssignment // The resource being watched. updatedWatchedResource *v3endpointpb.ClusterLoadAssignment // The watched resource after an update. - wantUpdateV1 xdsresource.EndpointsUpdateErrTuple - wantUpdateV2 xdsresource.EndpointsUpdateErrTuple + wantUpdateV1 endpointsUpdateErrTuple + wantUpdateV2 endpointsUpdateErrTuple }{ { desc: "old style resource", resourceName: edsName, watchedResource: e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}), updatedWatchedResource: e2e.DefaultEndpoint(edsName, edsHost2, []uint32{edsPort2}), - wantUpdateV1: xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdateV1: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, @@ -284,8 +307,8 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { }, }, }, - wantUpdateV2: xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdateV2: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}}, @@ -306,8 +329,8 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { resourceName: edsNameNewStyle, watchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}), updatedWatchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost2, []uint32{edsPort2}), - wantUpdateV1: xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdateV1: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, @@ -322,8 +345,8 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { }, }, }, - wantUpdateV2: xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdateV2: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}}, @@ -356,15 +379,11 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { // Register two watches for the same endpoint resource and have the // callbacks push the received updates on to a channel. - updateCh1 := testutils.NewChannel() - edsCancel1 := client.WatchEndpoints(test.resourceName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh1.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, test.resourceName, ew1) defer edsCancel1() - updateCh2 := testutils.NewChannel() - edsCancel2 := client.WatchEndpoints(test.resourceName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, test.resourceName, ew2) // Configure the management server to return a single endpoint // resource, corresponding to the one being watched. @@ -380,10 +399,10 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { } // Verify the contents of the received update. - if err := verifyEndpointsUpdate(ctx, updateCh1, test.wantUpdateV1); err != nil { + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, test.wantUpdateV1); err != nil { t.Fatal(err) } - if err := verifyEndpointsUpdate(ctx, updateCh2, test.wantUpdateV1); err != nil { + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, test.wantUpdateV1); err != nil { t.Fatal(err) } @@ -394,10 +413,10 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { if err := mgmtServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } - if err := verifyNoEndpointsUpdate(ctx, updateCh1); err != nil { + if err := verifyNoEndpointsUpdate(ctx, ew1.updateCh); err != nil { t.Fatal(err) } - if err := verifyNoEndpointsUpdate(ctx, updateCh2); err != nil { + if err := verifyNoEndpointsUpdate(ctx, ew2.updateCh); err != nil { t.Fatal(err) } @@ -411,10 +430,10 @@ func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { if err := mgmtServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } - if err := verifyEndpointsUpdate(ctx, updateCh1, test.wantUpdateV2); err != nil { + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, test.wantUpdateV2); err != nil { t.Fatal(err) } - if err := verifyNoEndpointsUpdate(ctx, updateCh2); err != nil { + if err := verifyNoEndpointsUpdate(ctx, ew2.updateCh); err != nil { t.Fatal(err) } }) @@ -442,22 +461,16 @@ func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { // Register two watches for the same endpoint resource and have the // callbacks push the received updates on to a channel. - updateCh1 := testutils.NewChannel() - edsCancel1 := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh1.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, edsName, ew1) defer edsCancel1() - updateCh2 := testutils.NewChannel() - edsCancel2 := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, edsName, ew2) defer edsCancel2() // Register the third watch for a different endpoint resource. - updateCh3 := testutils.NewChannel() - edsCancel3 := client.WatchEndpoints(edsNameNewStyle, func(u xdsresource.EndpointsUpdate, err error) { - updateCh3.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew3 := newEndpointsWatcher() + edsCancel3 := xdsresource.WatchEndpoints(client, edsNameNewStyle, ew3) defer edsCancel3() // Configure the management server to return two endpoint resources, @@ -479,8 +492,8 @@ func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { // Verify the contents of the received update for the all watchers. The two // resources returned differ only in the resource name. Therefore the // expected update is the same for all the watchers. - wantUpdate := xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, @@ -495,13 +508,13 @@ func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { }, }, } - if err := verifyEndpointsUpdate(ctx, updateCh1, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, wantUpdate); err != nil { t.Fatal(err) } - if err := verifyEndpointsUpdate(ctx, updateCh2, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil { t.Fatal(err) } - if err := verifyEndpointsUpdate(ctx, updateCh3, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew3.updateCh, wantUpdate); err != nil { t.Fatal(err) } } @@ -544,10 +557,8 @@ func (s) TestEDSWatch_ResourceCaching(t *testing.T) { // Register a watch for an endpoint resource and have the watch callback // push the received update on to a channel. - updateCh1 := testutils.NewChannel() - edsCancel1 := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh1.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, edsName, ew1) defer edsCancel1() // Configure the management server to return a single endpoint resource, @@ -564,8 +575,8 @@ func (s) TestEDSWatch_ResourceCaching(t *testing.T) { } // Verify the contents of the received update. - wantUpdate := xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, @@ -580,7 +591,7 @@ func (s) TestEDSWatch_ResourceCaching(t *testing.T) { }, }, } - if err := verifyEndpointsUpdate(ctx, updateCh1, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, wantUpdate); err != nil { t.Fatal(err) } select { @@ -591,12 +602,10 @@ func (s) TestEDSWatch_ResourceCaching(t *testing.T) { // Register another watch for the same resource. This should get the update // from the cache. - updateCh2 := testutils.NewChannel() - edsCancel2 := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, edsName, ew2) defer edsCancel2() - if err := verifyEndpointsUpdate(ctx, updateCh2, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil { t.Fatal(err) } @@ -633,10 +642,8 @@ func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { // Register a watch for a resource which is expected to fail with an error // after the watch expiry timer fires. - updateCh := testutils.NewChannel() - edsCancel := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, edsName, ew) defer edsCancel() // Wait for the watch expiry timer to fire. @@ -646,7 +653,7 @@ func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "") - if err := verifyEndpointsUpdate(ctx, updateCh, xdsresource.EndpointsUpdateErrTuple{Err: wantErr}); err != nil { + if err := verifyEndpointsUpdate(ctx, ew.updateCh, endpointsUpdateErrTuple{err: wantErr}); err != nil { t.Fatal(err) } } @@ -676,10 +683,8 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { // Register a watch for an endpoint resource and have the watch callback // push the received update on to a channel. - updateCh := testutils.NewChannel() - edsCancel := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, edsName, ew) defer edsCancel() // Configure the management server to return a single endpoint resource, @@ -696,8 +701,8 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { } // Verify the contents of the received update. - wantUpdate := xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, @@ -712,14 +717,14 @@ func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { }, }, } - if err := verifyEndpointsUpdate(ctx, updateCh, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew.updateCh, wantUpdate); err != nil { t.Fatal(err) } // Wait for the watch expiry timer to fire, and verify that the callback is // not invoked. <-time.After(defaultTestWatchExpiryTimeout) - if err := verifyNoEndpointsUpdate(ctx, updateCh); err != nil { + if err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil { t.Fatal(err) } } @@ -741,12 +746,8 @@ func (s) TestEDSWatch_NACKError(t *testing.T) { // Register a watch for a route configuration resource and have the watch // callback push the received update on to a channel. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - updateCh := testutils.NewChannel() - edsCancel := client.WatchEndpoints(edsName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh.SendContext(ctx, xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, edsName, ew) defer edsCancel() // Configure the management server to return a single route configuration @@ -756,16 +757,18 @@ func (s) TestEDSWatch_NACKError(t *testing.T) { Endpoints: []*v3endpointpb.ClusterLoadAssignment{badEndpointsResource(edsName, edsHost1, []uint32{edsPort1})}, SkipValidation: true, } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if err := mgmtServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } // Verify that the expected error is propagated to the watcher. - u, err := updateCh.Receive(ctx) + u, err := ew.updateCh.Receive(ctx) if err != nil { t.Fatalf("timeout when waiting for an endpoints resource from the management server: %v", err) } - gotErr := u.(xdsresource.EndpointsUpdateErrTuple).Err + gotErr := u.(endpointsUpdateErrTuple).err if gotErr == nil || !strings.Contains(gotErr.Error(), wantEndpointsNACKErr) { t.Fatalf("update received with error: %v, want %q", gotErr, wantEndpointsNACKErr) } @@ -791,19 +794,13 @@ func (s) TestEDSWatch_PartialValid(t *testing.T) { // Register two watches for two endpoint resources. The first watch is // expected to receive an error because the received resource is NACKed. // The second watch is expected to get a good update. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() badResourceName := rdsName - updateCh1 := testutils.NewChannel() - edsCancel1 := client.WatchEndpoints(badResourceName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh1.SendContext(ctx, xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, badResourceName, ew1) defer edsCancel1() goodResourceName := ldsNameNewStyle - updateCh2 := testutils.NewChannel() - edsCancel2 := client.WatchEndpoints(goodResourceName, func(u xdsresource.EndpointsUpdate, err error) { - updateCh2.SendContext(ctx, xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, goodResourceName, ew2) defer edsCancel2() // Configure the management server to return two endpoints resources, @@ -816,24 +813,26 @@ func (s) TestEDSWatch_PartialValid(t *testing.T) { }, SkipValidation: true, } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if err := mgmtServer.Update(ctx, resources); err != nil { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } // Verify that the expected error is propagated to the watcher which // requested for the bad resource. - u, err := updateCh1.Receive(ctx) + u, err := ew1.updateCh.Receive(ctx) if err != nil { t.Fatalf("timeout when waiting for an endpoints resource from the management server: %v", err) } - gotErr := u.(xdsresource.EndpointsUpdateErrTuple).Err + gotErr := u.(endpointsUpdateErrTuple).err if gotErr == nil || !strings.Contains(gotErr.Error(), wantEndpointsNACKErr) { t.Fatalf("update received with error: %v, want %q", gotErr, wantEndpointsNACKErr) } // Verify that the watcher watching the good resource receives an update. - wantUpdate := xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, @@ -848,7 +847,7 @@ func (s) TestEDSWatch_PartialValid(t *testing.T) { }, }, } - if err := verifyEndpointsUpdate(ctx, updateCh2, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/tests/federation_watchers_test.go b/xds/internal/xdsclient/tests/federation_watchers_test.go index 4298ce6c0885..ed59b63ac794 100644 --- a/xds/internal/xdsclient/tests/federation_watchers_test.go +++ b/xds/internal/xdsclient/tests/federation_watchers_test.go @@ -275,16 +275,12 @@ func (s) TestFederation_EndpointsResourceContextParamOrder(t *testing.T) { // Register two watches for endpoint resources with the same query string, // but context parameters in different order. - updateCh1 := testutils.NewChannel() - cdsCancel1 := client.WatchEndpoints(resourceName1, func(u xdsresource.EndpointsUpdate, err error) { - updateCh1.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) - defer cdsCancel1() - updateCh2 := testutils.NewChannel() - cdsCancel2 := client.WatchEndpoints(resourceName2, func(u xdsresource.EndpointsUpdate, err error) { - updateCh2.Send(xdsresource.EndpointsUpdateErrTuple{Update: u, Err: err}) - }) - defer cdsCancel2() + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, resourceName1, ew1) + defer edsCancel1() + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, resourceName2, ew2) + defer edsCancel2() // Configure the management server for the non-default authority to return a // single endpoints resource, corresponding to the watches registered. @@ -299,8 +295,8 @@ func (s) TestFederation_EndpointsResourceContextParamOrder(t *testing.T) { t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) } - wantUpdate := xdsresource.EndpointsUpdateErrTuple{ - Update: xdsresource.EndpointsUpdate{ + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ Localities: []xdsresource.Locality{ { Endpoints: []xdsresource.Endpoint{{Address: "localhost:666", Weight: 1}}, @@ -315,10 +311,10 @@ func (s) TestFederation_EndpointsResourceContextParamOrder(t *testing.T) { }, } // Verify the contents of the received update. - if err := verifyEndpointsUpdate(ctx, updateCh1, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, wantUpdate); err != nil { t.Fatal(err) } - if err := verifyEndpointsUpdate(ctx, updateCh2, wantUpdate); err != nil { + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil { t.Fatal(err) } } diff --git a/xds/internal/xdsclient/tests/resource_update_test.go b/xds/internal/xdsclient/tests/resource_update_test.go index 7dd368aa5e24..a0b326186e88 100644 --- a/xds/internal/xdsclient/tests/resource_update_test.go +++ b/xds/internal/xdsclient/tests/resource_update_test.go @@ -1072,18 +1072,10 @@ func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { defer close() t.Logf("Created xDS client to %s", mgmtServer.Address) - // A wrapper struct to wrap the update and the associated error, as - // received by the resource watch callback. - type updateAndErr struct { - update xdsresource.EndpointsUpdate - err error - } - updateAndErrCh := testutils.NewChannel() - // Register a watch, and push the results on to a channel. - client.WatchEndpoints(test.resourceName, func(update xdsresource.EndpointsUpdate, err error) { - updateAndErrCh.Send(updateAndErr{update: update, err: err}) - }) + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, test.resourceName, ew) + defer edsCancel() t.Logf("Registered a watch for Endpoint %q", test.resourceName) // Wait for the discovery request to be sent out. @@ -1109,12 +1101,12 @@ func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { // Wait for an update from the xDS client and compare with expected // update. - val, err = updateAndErrCh.Receive(ctx) + val, err = ew.updateCh.Receive(ctx) if err != nil { t.Fatalf("Timeout when waiting for watch callback to invoked after response from management server: %v", err) } - gotUpdate := val.(updateAndErr).update - gotErr := val.(updateAndErr).err + gotUpdate := val.(endpointsUpdateErrTuple).update + gotErr := val.(endpointsUpdateErrTuple).err if (gotErr != nil) != (test.wantErr != "") { t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) } diff --git a/xds/internal/xdsclient/xdsresource/type_eds.go b/xds/internal/xdsclient/xdsresource/type_eds.go index ec70f32ca436..1254d250c99b 100644 --- a/xds/internal/xdsclient/xdsresource/type_eds.go +++ b/xds/internal/xdsclient/xdsresource/type_eds.go @@ -73,11 +73,3 @@ type EndpointsUpdate struct { // Raw is the resource from the xds response. Raw *anypb.Any } - -// EndpointsUpdateErrTuple is a tuple with the update and error. It contains the -// results from unmarshal functions. It's used to pass unmarshal results of -// multiple resources together, e.g. in maps like `map[string]{Update,error}`. -type EndpointsUpdateErrTuple struct { - Update EndpointsUpdate - Err error -} From 575a9365fa2978366e4eac0fc8f85c0d6e813e6a Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 27 Jun 2023 17:11:30 -0400 Subject: [PATCH 980/998] xds: Fail xDS Server Serve() if called after Stop() or GracefulStop() (#6410) --- xds/server.go | 7 +++++ xds/server_test.go | 66 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/xds/server.go b/xds/server.go index 55b678bb78a0..c346c6a47c1b 100644 --- a/xds/server.go +++ b/xds/server.go @@ -173,6 +173,9 @@ func (s *GRPCServer) GetServiceInfo() map[string]grpc.ServiceInfo { func (s *GRPCServer) initXDSClient() error { s.clientMu.Lock() defer s.clientMu.Unlock() + if s.quit.HasFired() { + return grpc.ErrServerStopped + } if s.xdsC != nil { return nil @@ -332,6 +335,8 @@ func (s *GRPCServer) handleServingModeChanges(updateCh *buffer.Unbounded) { // corresponding pending RPCs on the client side will get notified by connection // errors. func (s *GRPCServer) Stop() { + s.clientMu.Lock() + defer s.clientMu.Unlock() s.quit.Fire() s.gs.Stop() if s.xdsC != nil { @@ -343,6 +348,8 @@ func (s *GRPCServer) Stop() { // from accepting new connections and RPCs and blocks until all the pending RPCs // are finished. func (s *GRPCServer) GracefulStop() { + s.clientMu.Lock() + defer s.clientMu.Unlock() s.quit.Fire() s.gs.GracefulStop() if s.xdsC != nil { diff --git a/xds/server_test.go b/xds/server_test.go index fbaee7d2f50e..b91049a05fde 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -25,6 +25,7 @@ import ( "net" "reflect" "strings" + "sync" "testing" "time" @@ -315,6 +316,24 @@ func (p *fakeProvider) Close() { p.Distributor.Stop() } +// setupClientOverride sets up an override for new xdsClient creation. +func setupClientOverride(t *testing.T) func() { + origNewXDSClient := newXDSClient + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + c := fakeclient.NewClient() + c.SetBootstrapConfig(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), + NodeProto: xdstestutils.EmptyNodeProtoV3, + ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, + CertProviderConfigs: certProviderConfigs, + }) + return c, func() {}, nil + } + return func() { + newXDSClient = origNewXDSClient + } +} + // setupOverrides sets up overrides for bootstrap config, new xdsClient creation // and new gRPC.Server creation. func setupOverrides(t *testing.T) (*fakeGRPCServer, *testutils.Channel, func()) { @@ -890,3 +909,50 @@ func verifyCertProviderNotCreated() error { } return nil } + +// TestServeReturnsErrorAfterClose tests that the xds Server returns +// grpc.ErrServerStopped if Serve is called after Close on the server. +func (s) TestServeReturnsErrorAfterClose(t *testing.T) { + cancel := setupClientOverride(t) + defer cancel() + server := NewGRPCServer() + + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + server.Stop() + err = server.Serve(lis) + if err == nil || !strings.Contains(err.Error(), grpc.ErrServerStopped.Error()) { + t.Fatalf("server erorred with wrong error, want: %v, got :%v", grpc.ErrServerStopped, err) + } +} + +// TestServeAndCloseDoNotRace tests that Serve and Close on the xDS Server do +// not race and leak the xDS Client. A leak would be found by the leak checker. +func (s) TestServeAndCloseDoNotRace(t *testing.T) { + cleanup := setupClientOverride(t) + defer cleanup() + + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + server := NewGRPCServer() + wg.Add(1) + go func() { + server.Serve(lis) + wg.Done() + }() + wg.Add(1) + go func() { + server.Stop() + wg.Done() + }() + } + wg.Wait() +} From 07718ef6b327a1f786f19211b5ff6fd078b85cc3 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 27 Jun 2023 18:30:20 -0400 Subject: [PATCH 981/998] internal/xds/rbac: Add support for string matcher in RBAC header matching (#6419) --- internal/xds/rbac/matchers.go | 6 ++++++ internal/xds/rbac/rbac_engine_test.go | 1 + 2 files changed, 7 insertions(+) diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go index 9873da268db6..c9f71d32cbb2 100644 --- a/internal/xds/rbac/matchers.go +++ b/internal/xds/rbac/matchers.go @@ -285,6 +285,12 @@ func newHeaderMatcher(headerMatcherConfig *v3route_componentspb.HeaderMatcher) ( m = internalmatcher.NewHeaderSuffixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetSuffixMatch(), headerMatcherConfig.InvertMatch) case *v3route_componentspb.HeaderMatcher_ContainsMatch: m = internalmatcher.NewHeaderContainsMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetContainsMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_StringMatch: + sm, err := internalmatcher.StringMatcherFromProto(headerMatcherConfig.GetStringMatch()) + if err != nil { + return nil, fmt.Errorf("invalid string matcher %+v: %v", headerMatcherConfig.GetStringMatch(), err) + } + m = internalmatcher.NewHeaderStringMatcher(headerMatcherConfig.Name, sm, headerMatcherConfig.InvertMatch) default: return nil, errors.New("unknown header matcher type") } diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go index 32c357f4953f..94464cf184ab 100644 --- a/internal/xds/rbac/rbac_engine_test.go +++ b/internal/xds/rbac/rbac_engine_test.go @@ -389,6 +389,7 @@ func (s) TestNewChainEngine(t *testing.T) { {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "GET"}}}}, {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: "GET"}}}}, {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ContainsMatch{ContainsMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ContainsMatch{ContainsMatch: "GET"}}}}, }}}, }, }, From 67e881c3583ede465f8786e1c293e95faf00e16c Mon Sep 17 00:00:00 2001 From: Gregory Cooke Date: Thu, 29 Jun 2023 15:45:33 -0400 Subject: [PATCH 982/998] xds: E2E Test for Audit Logging (#6377) Add E2E Test for Audit Logging through the XDS path --- test/xds/xds_server_rbac_test.go | 96 ++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/test/xds/xds_server_rbac_test.go b/test/xds/xds_server_rbac_test.go index 346dfe816c18..914b59db31c6 100644 --- a/test/xds/xds_server_rbac_test.go +++ b/test/xds/xds_server_rbac_test.go @@ -20,14 +20,18 @@ package xds_test import ( "context" + "encoding/json" "fmt" "net" "strconv" "strings" "testing" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc" + "google.golang.org/grpc/authz/audit" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" @@ -36,6 +40,7 @@ import ( "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" @@ -415,6 +420,8 @@ func (s) TestRBACHTTPFilter(t *testing.T) { rbacCfg *rpb.RBAC wantStatusEmptyCall codes.Code wantStatusUnaryCall codes.Code + wantAuthzOutcomes map[bool]int + eventContent *audit.Event }{ // This test tests an RBAC HTTP Filter which is configured to allow any RPC. // Any RPC passing through this RBAC HTTP Filter should proceed as normal. @@ -433,10 +440,30 @@ func (s) TestRBACHTTPFilter(t *testing.T) { }, }, }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + { + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "stat_logger", + TypedConfig: createXDSTypedStruct(t, map[string]interface{}{}, "stat_logger"), + }, + IsOptional: false, + }, + }, + }, }, }, wantStatusEmptyCall: codes.OK, wantStatusUnaryCall: codes.OK, + wantAuthzOutcomes: map[bool]int{true: 2, false: 0}, + // TODO(gtcooke94) add policy name (RBAC filter name) once + // https://github.com/grpc/grpc-go/pull/6327 is merged. + eventContent: &audit.Event{ + FullMethodName: "/grpc.testing.TestService/UnaryCall", + MatchedRule: "anyone", + Authorized: true, + }, }, // This test tests an RBAC HTTP Filter which is configured to allow only // RPC's with certain paths ("UnaryCall"). Only unary calls passing @@ -605,6 +632,12 @@ func (s) TestRBACHTTPFilter(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { func() { + lb := &loggerBuilder{ + authzDecisionStat: map[bool]int{true: 0, false: 0}, + lastEvent: &audit.Event{}, + } + audit.RegisterLoggerBuilder(lb) + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) defer cleanup1() @@ -660,6 +693,17 @@ func (s) TestRBACHTTPFilter(t *testing.T) { } // Toggle RBAC back on for next iterations. envconfig.XDSRBAC = true + + if test.wantAuthzOutcomes != nil { + if diff := cmp.Diff(lb.authzDecisionStat, test.wantAuthzOutcomes); diff != "" { + t.Fatalf("authorization decision do not match\ndiff (-got +want):\n%s", diff) + } + } + if test.eventContent != nil { + if diff := cmp.Diff(lb.lastEvent, test.eventContent); diff != "" { + t.Fatalf("unexpected event\ndiff (-got +want):\n%s", diff) + } + } }() }) } @@ -895,3 +939,55 @@ func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) { t.Fatalf("UnaryCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) } } + +type statAuditLogger struct { + authzDecisionStat map[bool]int // Map to hold counts of authorization decisions + lastEvent *audit.Event // Field to store last received event +} + +func (s *statAuditLogger) Log(event *audit.Event) { + s.authzDecisionStat[event.Authorized]++ + *s.lastEvent = *event +} + +type loggerBuilder struct { + authzDecisionStat map[bool]int + lastEvent *audit.Event +} + +func (loggerBuilder) Name() string { + return "stat_logger" +} + +func (lb *loggerBuilder) Build(audit.LoggerConfig) audit.Logger { + return &statAuditLogger{ + authzDecisionStat: lb.authzDecisionStat, + lastEvent: lb.lastEvent, + } +} + +func (*loggerBuilder) ParseLoggerConfig(config json.RawMessage) (audit.LoggerConfig, error) { + return nil, nil +} + +// This is used when converting a custom config from raw JSON to a TypedStruct. +// The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/". +const typeURLPrefix = "grpc.authz.audit_logging/" + +// Builds custom configs for audit logger RBAC protos. +func createXDSTypedStruct(t *testing.T, in map[string]interface{}, name string) *anypb.Any { + t.Helper() + pb, err := structpb.NewStruct(in) + if err != nil { + t.Fatalf("createXDSTypedStruct failed during structpb.NewStruct: %v", err) + } + typedStruct := &v3xdsxdstypepb.TypedStruct{ + TypeUrl: typeURLPrefix + name, + Value: pb, + } + customConfig, err := anypb.New(typedStruct) + if err != nil { + t.Fatalf("createXDSTypedStruct failed during anypb.New: %v", err) + } + return customConfig +} From ea492f555f677ec48b2429463ca2e889571a4d78 Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Fri, 30 Jun 2023 13:10:52 -0700 Subject: [PATCH 983/998] xdsclient: indicate authority serverURI in authority + transport logs (#6425) --- xds/internal/xdsclient/clientimpl_authority.go | 3 ++- xds/internal/xdsclient/logging.go | 12 +++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/xds/internal/xdsclient/clientimpl_authority.go b/xds/internal/xdsclient/clientimpl_authority.go index 2531b39472f5..925566cf44f3 100644 --- a/xds/internal/xdsclient/clientimpl_authority.go +++ b/xds/internal/xdsclient/clientimpl_authority.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -109,7 +110,7 @@ func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *auth serializer: c.serializer, resourceTypeGetter: c.resourceTypes.get, watchExpiryTimeout: c.watchExpiryTimeout, - logger: c.logger, + logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI)), }) if err != nil { return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) diff --git a/xds/internal/xdsclient/logging.go b/xds/internal/xdsclient/logging.go index e28ea0d04103..2269cb293da9 100644 --- a/xds/internal/xdsclient/logging.go +++ b/xds/internal/xdsclient/logging.go @@ -25,10 +25,16 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const prefix = "[xds-client %p] " - var logger = grpclog.Component("xds") func prefixLogger(p *clientImpl) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) + return internalgrpclog.NewPrefixLogger(logger, clientPrefix(p)) +} + +func clientPrefix(p *clientImpl) string { + return fmt.Sprintf("[xds-client %p] ", p) +} + +func authorityPrefix(p *clientImpl, serverURI string) string { + return fmt.Sprintf("%s[%s] ", clientPrefix(p), serverURI) } From 6b8f42742c884b61f45610f999cecbb0ad4ebff5 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 30 Jun 2023 13:13:35 -0700 Subject: [PATCH 984/998] orca: remove useless log statement (#6424) --- orca/orca.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/orca/orca.go b/orca/orca.go index 771db36af1c9..6b475562b1b9 100644 --- a/orca/orca.go +++ b/orca/orca.go @@ -49,9 +49,6 @@ func (loadParser) Parse(md metadata.MD) interface{} { if err != nil { logger.Infof("Parse failed: %v", err) } - if lr == nil && logger.V(2) { - logger.Infof("Missing ORCA load report data") - } return lr } From 620a118c67c6e2392562ba32352670dd92dd02b6 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Fri, 30 Jun 2023 17:34:16 -0400 Subject: [PATCH 985/998] xds/internal/balancer/clusterimpl: Switch cluster impl child to graceful switch (#6420) --- .../balancer/clusterimpl/clusterimpl.go | 51 ++++--------------- 1 file changed, 11 insertions(+), 40 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index e1a18ae338d3..d316d6a62a91 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" @@ -69,6 +70,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba requestCountMax: defaultRequestCountMax, } b.logger = prefixLogger(b) + b.child = gracefulswitch.NewBalancer(b, bOpts) go b.run() b.logger.Infof("Created") return b @@ -102,7 +104,7 @@ type clusterImplBalancer struct { xdsClient xdsclient.XDSClient config *LBConfig - childLB balancer.Balancer + child *gracefulswitch.Balancer cancelLoadReport func() edsServiceName string lrsServer *bootstrap.ServerConfig @@ -251,31 +253,19 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) return err } - // If child policy is a different type, recreate the sub-balancer. if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { - if b.childLB != nil { - b.childLB.Close() + if err := b.child.SwitchTo(bb); err != nil { + return fmt.Errorf("error switching to child of type %q: %v", newConfig.ChildPolicy.Name, err) } - b.childLB = bb.Build(b, b.bOpts) } b.config = newConfig - if b.childLB == nil { - // This is not an expected situation, and should be super rare in - // practice. - // - // When this happens, we already applied all the other configurations - // (drop/circuit breaking), but there's no child policy. This balancer - // will be stuck, and we report the error to the parent. - return fmt.Errorf("child policy is nil, this means balancer %q's Build() returned nil", newConfig.ChildPolicy.Name) - } - // Notify run() of this new config, in case drop and request counter need // update (which means a new picker needs to be generated). b.pickerUpdateCh.Put(newConfig) // Addresses and sub-balancer config are sent to sub-balancer. - return b.childLB.UpdateClientConnState(balancer.ClientConnState{ + return b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, BalancerConfig: b.config.ChildPolicy.Config, }) @@ -286,10 +276,7 @@ func (b *clusterImplBalancer) ResolverError(err error) { b.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err) return } - - if b.childLB != nil { - b.childLB.ResolverError(err) - } + b.child.ResolverError(err) } func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { @@ -318,9 +305,7 @@ func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer } } b.scWrappersMu.Unlock() - if b.childLB != nil { - b.childLB.UpdateSubConnState(sc, s) - } + b.child.UpdateSubConnState(sc, s) } func (b *clusterImplBalancer) Close() { @@ -328,29 +313,15 @@ func (b *clusterImplBalancer) Close() { b.closed.Fire() b.mu.Unlock() - if b.childLB != nil { - b.childLB.Close() - b.childLB = nil - b.childState = balancer.State{} - } + b.child.Close() + b.childState = balancer.State{} b.pickerUpdateCh.Close() <-b.done.Done() b.logger.Infof("Shutdown") } func (b *clusterImplBalancer) ExitIdle() { - if b.childLB == nil { - return - } - if ei, ok := b.childLB.(balancer.ExitIdler); ok { - ei.ExitIdle() - return - } - // Fallback for children that don't support ExitIdle -- connect to all - // SubConns. - for _, sc := range b.scWrappers { - sc.Connect() - } + b.child.ExitIdle() } // Override methods to accept updates from the child LB. From 51042db745b60c9f20fb163fe7083b72a46b219d Mon Sep 17 00:00:00 2001 From: my4 <38831921+my4-dev@users.noreply.github.com> Date: Sat, 1 Jul 2023 07:07:46 +0900 Subject: [PATCH 986/998] internal/grpcsync: Provide an internal-only pub-sub type API (#6167) Co-authored-by: Easwar Swaminathan --- internal/grpcsync/pubsub.go | 136 ++++++++++++++++++++ internal/grpcsync/pubsub_test.go | 211 +++++++++++++++++++++++++++++++ 2 files changed, 347 insertions(+) create mode 100644 internal/grpcsync/pubsub.go create mode 100644 internal/grpcsync/pubsub_test.go diff --git a/internal/grpcsync/pubsub.go b/internal/grpcsync/pubsub.go new file mode 100644 index 000000000000..f58b5ffa6b1e --- /dev/null +++ b/internal/grpcsync/pubsub.go @@ -0,0 +1,136 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg interface{}) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, and +// it is guaranteed that no more subscriber callback will be invoked. +type PubSub struct { + cs *CallbackSerializer + cancel context.CancelFunc + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg interface{} + subscribers map[Subscriber]bool + stopped bool +} + +// NewPubSub returns a new PubSub instance. +func NewPubSub() *PubSub { + ctx, cancel := context.WithCancel(context.Background()) + return &PubSub{ + cs: NewCallbackSerializer(ctx), + cancel: cancel, + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + if ps.stopped { + return func() {} + } + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg interface{}) { + ps.mu.Lock() + defer ps.mu.Unlock() + + if ps.stopped { + return + } + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Stop shuts down the PubSub and releases any resources allocated by it. +// It is guaranteed that no subscriber callbacks would be invoked once this +// method returns. +func (ps *PubSub) Stop() { + ps.mu.Lock() + defer ps.mu.Unlock() + ps.stopped = true + + ps.cancel() +} diff --git a/internal/grpcsync/pubsub_test.go b/internal/grpcsync/pubsub_test.go new file mode 100644 index 000000000000..9aebf3593a5b --- /dev/null +++ b/internal/grpcsync/pubsub_test.go @@ -0,0 +1,211 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +type testSubscriber struct { + mu sync.Mutex + msgs []int + onMsgCh chan struct{} +} + +func newTestSubscriber(chSize int) *testSubscriber { + return &testSubscriber{onMsgCh: make(chan struct{}, chSize)} +} + +func (ts *testSubscriber) OnMessage(msg interface{}) { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.msgs = append(ts.msgs, msg.(int)) + select { + case ts.onMsgCh <- struct{}{}: + default: + } +} + +func (ts *testSubscriber) receivedMsgs() []int { + ts.mu.Lock() + defer ts.mu.Unlock() + + msgs := make([]int, len(ts.msgs)) + copy(msgs, ts.msgs) + + return msgs +} + +func (s) TestPubSub_PublishNoMsg(t *testing.T) { + pubsub := NewPubSub() + defer pubsub.Stop() + + ts := newTestSubscriber(1) + pubsub.Subscribe(ts) + + select { + case <-ts.onMsgCh: + t.Fatalf("Subscriber callback invoked when no message was published") + case <-time.After(defaultTestShortTimeout): + } +} + +func (s) TestPubSub_PublishMsgs_RegisterSubs_And_Stop(t *testing.T) { + pubsub := NewPubSub() + + const numPublished = 10 + + ts1 := newTestSubscriber(numPublished) + pubsub.Subscribe(ts1) + wantMsgs1 := []int{} + + var wg sync.WaitGroup + wg.Add(2) + // Publish ten messages on the pubsub and ensure that they are received in order by the subscriber. + go func() { + for i := 0; i < numPublished; i++ { + pubsub.Publish(i) + wantMsgs1 = append(wantMsgs1, i) + } + wg.Done() + }() + + isTimeout := false + go func() { + for i := 0; i < numPublished; i++ { + select { + case <-ts1.onMsgCh: + case <-time.After(defaultTestTimeout): + isTimeout = true + } + } + wg.Done() + }() + + wg.Wait() + if isTimeout { + t.Fatalf("Timeout when expecting the onMessage() callback to be invoked") + } + if gotMsgs1 := ts1.receivedMsgs(); !cmp.Equal(gotMsgs1, wantMsgs1) { + t.Fatalf("Received messages is %v, want %v", gotMsgs1, wantMsgs1) + } + + // Register another subscriber and ensure that it receives the last published message. + ts2 := newTestSubscriber(numPublished) + pubsub.Subscribe(ts2) + wantMsgs2 := wantMsgs1[len(wantMsgs1)-1:] + + select { + case <-ts2.onMsgCh: + case <-time.After(defaultTestShortTimeout): + t.Fatalf("Timeout when expecting the onMessage() callback to be invoked") + } + if gotMsgs2 := ts2.receivedMsgs(); !cmp.Equal(gotMsgs2, wantMsgs2) { + t.Fatalf("Received messages is %v, want %v", gotMsgs2, wantMsgs2) + } + + wg.Add(3) + // Publish ten messages on the pubsub and ensure that they are received in order by the subscribers. + go func() { + for i := 0; i < numPublished; i++ { + pubsub.Publish(i) + wantMsgs1 = append(wantMsgs1, i) + wantMsgs2 = append(wantMsgs2, i) + } + wg.Done() + }() + errCh := make(chan error, 1) + go func() { + for i := 0; i < numPublished; i++ { + select { + case <-ts1.onMsgCh: + case <-time.After(defaultTestTimeout): + errCh <- fmt.Errorf("") + } + } + wg.Done() + }() + go func() { + for i := 0; i < numPublished; i++ { + select { + case <-ts2.onMsgCh: + case <-time.After(defaultTestTimeout): + errCh <- fmt.Errorf("") + } + } + wg.Done() + }() + wg.Wait() + select { + case <-errCh: + t.Fatalf("Timeout when expecting the onMessage() callback to be invoked") + default: + } + if gotMsgs1 := ts1.receivedMsgs(); !cmp.Equal(gotMsgs1, wantMsgs1) { + t.Fatalf("Received messages is %v, want %v", gotMsgs1, wantMsgs1) + } + if gotMsgs2 := ts2.receivedMsgs(); !cmp.Equal(gotMsgs2, wantMsgs2) { + t.Fatalf("Received messages is %v, want %v", gotMsgs2, wantMsgs2) + } + + pubsub.Stop() + + go func() { + pubsub.Publish(99) + }() + // Ensure that the subscriber callback is not invoked as instantiated + // pubsub has already closed. + select { + case <-ts1.onMsgCh: + t.Fatalf("The callback was invoked after pubsub being stopped") + case <-ts2.onMsgCh: + t.Fatalf("The callback was invoked after pubsub being stopped") + case <-time.After(defaultTestShortTimeout): + } +} + +func (s) TestPubSub_PublishMsgs_BeforeRegisterSub(t *testing.T) { + pubsub := NewPubSub() + defer pubsub.Stop() + + const numPublished = 3 + for i := 0; i < numPublished; i++ { + pubsub.Publish(i) + } + + ts := newTestSubscriber(numPublished) + pubsub.Subscribe(ts) + + wantMsgs := []int{numPublished - 1} + // Ensure that the subscriber callback is invoked with a previously + // published message. + select { + case <-ts.onMsgCh: + if gotMsgs := ts.receivedMsgs(); !cmp.Equal(gotMsgs, wantMsgs) { + t.Fatalf("Received messages is %v, want %v", gotMsgs, wantMsgs) + } + case <-time.After(defaultTestShortTimeout): + t.Fatalf("Timeout when expecting the onMessage() callback to be invoked") + } +} From acbfcbb8e8f555225c86b05cb1c0f5b0674b12af Mon Sep 17 00:00:00 2001 From: Arvind Bright Date: Fri, 30 Jun 2023 16:31:29 -0700 Subject: [PATCH 987/998] internal/grpcsync: refactor test (#6427) --- internal/grpcsync/pubsub_test.go | 111 +++++++++++++------------------ 1 file changed, 45 insertions(+), 66 deletions(-) diff --git a/internal/grpcsync/pubsub_test.go b/internal/grpcsync/pubsub_test.go index 9aebf3593a5b..c610f99b2633 100644 --- a/internal/grpcsync/pubsub_test.go +++ b/internal/grpcsync/pubsub_test.go @@ -19,44 +19,26 @@ package grpcsync import ( - "fmt" "sync" "testing" "time" - - "github.com/google/go-cmp/cmp" ) type testSubscriber struct { - mu sync.Mutex - msgs []int - onMsgCh chan struct{} + onMsgCh chan int } func newTestSubscriber(chSize int) *testSubscriber { - return &testSubscriber{onMsgCh: make(chan struct{}, chSize)} + return &testSubscriber{onMsgCh: make(chan int, chSize)} } func (ts *testSubscriber) OnMessage(msg interface{}) { - ts.mu.Lock() - defer ts.mu.Unlock() - ts.msgs = append(ts.msgs, msg.(int)) select { - case ts.onMsgCh <- struct{}{}: + case ts.onMsgCh <- msg.(int): default: } } -func (ts *testSubscriber) receivedMsgs() []int { - ts.mu.Lock() - defer ts.mu.Unlock() - - msgs := make([]int, len(ts.msgs)) - copy(msgs, ts.msgs) - - return msgs -} - func (s) TestPubSub_PublishNoMsg(t *testing.T) { pubsub := NewPubSub() defer pubsub.Stop() @@ -66,7 +48,7 @@ func (s) TestPubSub_PublishNoMsg(t *testing.T) { select { case <-ts.onMsgCh: - t.Fatalf("Subscriber callback invoked when no message was published") + t.Fatal("Subscriber callback invoked when no message was published") case <-time.After(defaultTestShortTimeout): } } @@ -78,7 +60,6 @@ func (s) TestPubSub_PublishMsgs_RegisterSubs_And_Stop(t *testing.T) { ts1 := newTestSubscriber(numPublished) pubsub.Subscribe(ts1) - wantMsgs1 := []int{} var wg sync.WaitGroup wg.Add(2) @@ -86,43 +67,41 @@ func (s) TestPubSub_PublishMsgs_RegisterSubs_And_Stop(t *testing.T) { go func() { for i := 0; i < numPublished; i++ { pubsub.Publish(i) - wantMsgs1 = append(wantMsgs1, i) } wg.Done() }() - isTimeout := false go func() { + defer wg.Done() for i := 0; i < numPublished; i++ { select { - case <-ts1.onMsgCh: + case m := <-ts1.onMsgCh: + if m != i { + t.Errorf("Received unexpected message: %q; want: %q", m, i) + return + } case <-time.After(defaultTestTimeout): - isTimeout = true + t.Error("Timeout when expecting the onMessage() callback to be invoked") + return } } - wg.Done() }() - wg.Wait() - if isTimeout { - t.Fatalf("Timeout when expecting the onMessage() callback to be invoked") - } - if gotMsgs1 := ts1.receivedMsgs(); !cmp.Equal(gotMsgs1, wantMsgs1) { - t.Fatalf("Received messages is %v, want %v", gotMsgs1, wantMsgs1) + if t.Failed() { + t.FailNow() } // Register another subscriber and ensure that it receives the last published message. ts2 := newTestSubscriber(numPublished) pubsub.Subscribe(ts2) - wantMsgs2 := wantMsgs1[len(wantMsgs1)-1:] select { - case <-ts2.onMsgCh: + case m := <-ts2.onMsgCh: + if m != numPublished-1 { + t.Fatalf("Received unexpected message: %q; want: %q", m, numPublished-1) + } case <-time.After(defaultTestShortTimeout): - t.Fatalf("Timeout when expecting the onMessage() callback to be invoked") - } - if gotMsgs2 := ts2.receivedMsgs(); !cmp.Equal(gotMsgs2, wantMsgs2) { - t.Fatalf("Received messages is %v, want %v", gotMsgs2, wantMsgs2) + t.Fatal("Timeout when expecting the onMessage() callback to be invoked") } wg.Add(3) @@ -130,43 +109,43 @@ func (s) TestPubSub_PublishMsgs_RegisterSubs_And_Stop(t *testing.T) { go func() { for i := 0; i < numPublished; i++ { pubsub.Publish(i) - wantMsgs1 = append(wantMsgs1, i) - wantMsgs2 = append(wantMsgs2, i) } wg.Done() }() - errCh := make(chan error, 1) go func() { + defer wg.Done() for i := 0; i < numPublished; i++ { select { - case <-ts1.onMsgCh: + case m := <-ts1.onMsgCh: + if m != i { + t.Errorf("Received unexpected message: %q; want: %q", m, i) + return + } case <-time.After(defaultTestTimeout): - errCh <- fmt.Errorf("") + t.Error("Timeout when expecting the onMessage() callback to be invoked") + return } } - wg.Done() + }() go func() { + defer wg.Done() for i := 0; i < numPublished; i++ { select { - case <-ts2.onMsgCh: + case m := <-ts2.onMsgCh: + if m != i { + t.Errorf("Received unexpected message: %q; want: %q", m, i) + return + } case <-time.After(defaultTestTimeout): - errCh <- fmt.Errorf("") + t.Error("Timeout when expecting the onMessage() callback to be invoked") + return } } - wg.Done() }() wg.Wait() - select { - case <-errCh: - t.Fatalf("Timeout when expecting the onMessage() callback to be invoked") - default: - } - if gotMsgs1 := ts1.receivedMsgs(); !cmp.Equal(gotMsgs1, wantMsgs1) { - t.Fatalf("Received messages is %v, want %v", gotMsgs1, wantMsgs1) - } - if gotMsgs2 := ts2.receivedMsgs(); !cmp.Equal(gotMsgs2, wantMsgs2) { - t.Fatalf("Received messages is %v, want %v", gotMsgs2, wantMsgs2) + if t.Failed() { + t.FailNow() } pubsub.Stop() @@ -178,9 +157,9 @@ func (s) TestPubSub_PublishMsgs_RegisterSubs_And_Stop(t *testing.T) { // pubsub has already closed. select { case <-ts1.onMsgCh: - t.Fatalf("The callback was invoked after pubsub being stopped") + t.Fatal("The callback was invoked after pubsub being stopped") case <-ts2.onMsgCh: - t.Fatalf("The callback was invoked after pubsub being stopped") + t.Fatal("The callback was invoked after pubsub being stopped") case <-time.After(defaultTestShortTimeout): } } @@ -197,15 +176,15 @@ func (s) TestPubSub_PublishMsgs_BeforeRegisterSub(t *testing.T) { ts := newTestSubscriber(numPublished) pubsub.Subscribe(ts) - wantMsgs := []int{numPublished - 1} // Ensure that the subscriber callback is invoked with a previously // published message. select { - case <-ts.onMsgCh: - if gotMsgs := ts.receivedMsgs(); !cmp.Equal(gotMsgs, wantMsgs) { - t.Fatalf("Received messages is %v, want %v", gotMsgs, wantMsgs) + case d := <-ts.onMsgCh: + if d != numPublished-1 { + t.Fatalf("Unexpected message received: %q; %q", d, numPublished-1) } + case <-time.After(defaultTestShortTimeout): - t.Fatalf("Timeout when expecting the onMessage() callback to be invoked") + t.Fatal("Timeout when expecting the onMessage() callback to be invoked") } } From df3e021458f763bbe99d7ef219151a4220157179 Mon Sep 17 00:00:00 2001 From: Antoine Tollenaere Date: Wed, 5 Jul 2023 18:59:56 +0200 Subject: [PATCH 988/998] status: fix panic when servers return a wrapped error with status OK (#6374) --- status/status.go | 29 +++++++++++++++++++++++++---- status/status_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 4 deletions(-) diff --git a/status/status.go b/status/status.go index 53910fb7c901..bcf2e4d81beb 100644 --- a/status/status.go +++ b/status/status.go @@ -77,11 +77,18 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, or if err wraps a type satisfying this, the appropriate Status is -// returned. For wrapped errors, the message returned contains the entire -// err.Error() text and not just the wrapped status. +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. // // - Otherwise, err is an error not compatible with this package. In this // case, a Status is returned with codes.Unknown and err's Error() message, @@ -92,10 +99,24 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { + if gs.GRPCStatus() == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } return gs.GRPCStatus(), true } var gs grpcstatus if errors.As(err, &gs) { + if gs.GRPCStatus() == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } p := gs.GRPCStatus().Proto() p.Message = err.Error() return status.FromProto(p), true diff --git a/status/status_test.go b/status/status_test.go index b0bb3fcb67cc..216d18bb27b9 100644 --- a/status/status_test.go +++ b/status/status_test.go @@ -202,6 +202,33 @@ func (s) TestFromErrorWrapped(t *testing.T) { } } +type customErrorNilStatus struct { +} + +func (c customErrorNilStatus) Error() string { + return "test" +} + +func (c customErrorNilStatus) GRPCStatus() *Status { + return nil +} + +func (s) TestFromErrorImplementsInterfaceReturnsOKStatus(t *testing.T) { + err := customErrorNilStatus{} + s, ok := FromError(err) + if ok || s.Code() != codes.Unknown || s.Message() != err.Error() { + t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, codes.Unknown, err.Error()) + } +} + +func (s) TestFromErrorImplementsInterfaceReturnsOKStatusWrapped(t *testing.T) { + err := fmt.Errorf("wrapping: %w", customErrorNilStatus{}) + s, ok := FromError(err) + if ok || s.Code() != codes.Unknown || s.Message() != err.Error() { + t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, codes.Unknown, err.Error()) + } +} + func (s) TestFromErrorImplementsInterfaceWrapped(t *testing.T) { const code, message = codes.Internal, "test description" err := fmt.Errorf("wrapped error: %w", customError{Code: code, Message: message}) From 11feb0a9afd844fd2ab1f18dca02ad6a344b21bf Mon Sep 17 00:00:00 2001 From: Gina Yeh Date: Wed, 5 Jul 2023 10:47:46 -0700 Subject: [PATCH 989/998] resolver: delete Target.Scheme and Target.Authority (#6363) * Delete resolver.Target.Scheme and resolver.Target.Authority * cleanup - wrap block comments @ 80 columns --- clientconn.go | 10 +- clientconn_parsed_target_test.go | 91 ++++--------------- internal/resolver/dns/dns_resolver.go | 74 ++++++++------- internal/resolver/dns/dns_resolver_test.go | 9 +- resolver/resolver.go | 4 - xds/googledirectpath/googlec2p.go | 2 - .../clusterresolver/resource_resolver_dns.go | 2 +- 7 files changed, 71 insertions(+), 121 deletions(-) diff --git a/clientconn.go b/clientconn.go index 95a7459b02f6..8e1d93fc45b3 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1807,19 +1807,15 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and url. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - URL: *u, - }, nil + return resolver.Target{URL: *u}, nil } // Determine channel authority. The order of precedence is as follows: diff --git a/clientconn_parsed_target_test.go b/clientconn_parsed_target_test.go index e957bca78c1e..1ff46aaf08c7 100644 --- a/clientconn_parsed_target_test.go +++ b/clientconn_parsed_target_test.go @@ -23,7 +23,6 @@ import ( "errors" "fmt" "net" - "net/url" "testing" "time" @@ -38,64 +37,29 @@ func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { defScheme := resolver.GetDefaultScheme() tests := []struct { target string - badScheme bool wantParsed resolver.Target }{ // No scheme is specified. - {target: "://", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://"))}}, - {target: ":///", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ":///"))}}, - {target: "://a/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://a/"))}}, - {target: ":///a", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ":///a"))}}, - {target: "://a/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://a/b"))}}, - {target: "/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "/"))}}, - {target: "a/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a/b"))}}, - {target: "a//b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a//b"))}}, - {target: "google.com", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "google.com"))}}, - {target: "google.com/?a=b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "google.com/?a=b"))}}, - {target: "/unix/socket/address", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "/unix/socket/address"))}}, + {target: "://a/b", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://a/b"))}}, + {target: "a//b", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a//b"))}}, // An unregistered scheme is specified. - {target: "a:///", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:///"))}}, - {target: "a://b/", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a://b/"))}}, - {target: "a:///b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:///b"))}}, - {target: "a://b/c", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a://b/c"))}}, - {target: "a:b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:b"))}}, - {target: "a:/b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:/b"))}}, - {target: "a://b", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a://b"))}}, + {target: "a:///", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:///"))}}, + {target: "a:b", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:b"))}}, // A registered scheme is specified. - {target: "dns:///google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "", URL: *testutils.MustParseURL("dns:///google.com")}}, - {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", URL: *testutils.MustParseURL("dns://a.server.com/google.com")}}, - {target: "dns://a.server.com/google.com/?a=b", wantParsed: resolver.Target{Scheme: "dns", Authority: "a.server.com", URL: *testutils.MustParseURL("dns://a.server.com/google.com/?a=b")}}, - {target: "unix:///a/b/c", wantParsed: resolver.Target{Scheme: "unix", Authority: "", URL: *testutils.MustParseURL("unix:///a/b/c")}}, - {target: "unix-abstract:a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:a/b/c")}}, - {target: "unix-abstract:a b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:a b")}}, - {target: "unix-abstract:a:b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:a:b")}}, - {target: "unix-abstract:a-b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:a-b")}}, - {target: "unix-abstract:/ a///://::!@#$%25^&*()b", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:/ a///://::!@#$%25^&*()b")}}, - {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:passthrough:abc")}}, - {target: "unix-abstract:unix:///abc", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:unix:///abc")}}, - {target: "unix-abstract:///a/b/c", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:///a/b/c")}}, - {target: "unix-abstract:///", wantParsed: resolver.Target{Scheme: "unix-abstract", Authority: "", URL: *testutils.MustParseURL("unix-abstract:///")}}, - {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{Scheme: "passthrough", Authority: "", URL: *testutils.MustParseURL("passthrough:///unix:///a/b/c")}}, + {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{URL: *testutils.MustParseURL("dns://a.server.com/google.com")}}, + {target: "unix-abstract:/ a///://::!@#$%25^&*()b", wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix-abstract:/ a///://::!@#$%25^&*()b")}}, + {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix-abstract:passthrough:abc")}}, + {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{URL: *testutils.MustParseURL("passthrough:///unix:///a/b/c")}}, // Cases for `scheme:absolute-path`. - {target: "dns:/a/b/c", wantParsed: resolver.Target{Scheme: "dns", Authority: "", URL: *testutils.MustParseURL("dns:/a/b/c")}}, - {target: "unregistered:/a/b/c", badScheme: true, wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL("unregistered:/a/b/c")}}, + {target: "dns:/a/b/c", wantParsed: resolver.Target{URL: *testutils.MustParseURL("dns:/a/b/c")}}, + {target: "unregistered:/a/b/c", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "unregistered:/a/b/c"))}}, } for _, test := range tests { t.Run(test.target, func(t *testing.T) { - target := test.target - if test.badScheme { - target = defScheme + ":///" + target - } - url, err := url.Parse(target) - if err != nil { - t.Fatalf("Unexpected error parsing URL: %v", err) - } - test.wantParsed.URL = *url - cc, err := Dial(test.target, WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Dial(%q) failed: %v", test.target, err) @@ -132,7 +96,6 @@ func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { defScheme := resolver.GetDefaultScheme() tests := []struct { target string - badScheme bool wantParsed resolver.Target wantDialerAddress string }{ @@ -140,72 +103,58 @@ func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { // different behaviors with a custom dialer. { target: "unix:a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", URL: *testutils.MustParseURL("unix:a/b/c")}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix:a/b/c")}, wantDialerAddress: "unix:a/b/c", }, { target: "unix:/a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", URL: *testutils.MustParseURL("unix:/a/b/c")}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix:/a/b/c")}, wantDialerAddress: "unix:///a/b/c", }, { target: "unix:///a/b/c", - wantParsed: resolver.Target{Scheme: "unix", Authority: "", URL: *testutils.MustParseURL("unix:///a/b/c")}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix:///a/b/c")}, wantDialerAddress: "unix:///a/b/c", }, { target: "dns:///127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: "dns", Authority: "", URL: *testutils.MustParseURL("dns:///127.0.0.1:50051")}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL("dns:///127.0.0.1:50051")}, wantDialerAddress: "127.0.0.1:50051", }, { target: ":///127.0.0.1:50051", - badScheme: true, - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ":///127.0.0.1:50051"))}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ":///127.0.0.1:50051"))}, wantDialerAddress: ":///127.0.0.1:50051", }, { target: "dns://authority/127.0.0.1:50051", - wantParsed: resolver.Target{Scheme: "dns", Authority: "authority", URL: *testutils.MustParseURL("dns://authority/127.0.0.1:50051")}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL("dns://authority/127.0.0.1:50051")}, wantDialerAddress: "127.0.0.1:50051", }, { target: "://authority/127.0.0.1:50051", - badScheme: true, - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://authority/127.0.0.1:50051"))}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://authority/127.0.0.1:50051"))}, wantDialerAddress: "://authority/127.0.0.1:50051", }, { target: "/unix/socket/address", - badScheme: true, - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "/unix/socket/address"))}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "/unix/socket/address"))}, wantDialerAddress: "/unix/socket/address", }, { target: "", - badScheme: true, - wantParsed: resolver.Target{Scheme: defScheme, Authority: "", URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ""))}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ""))}, wantDialerAddress: "", }, { target: "passthrough://a.server.com/google.com", - wantParsed: resolver.Target{Scheme: "passthrough", Authority: "a.server.com", URL: *testutils.MustParseURL("passthrough://a.server.com/google.com")}, + wantParsed: resolver.Target{URL: *testutils.MustParseURL("passthrough://a.server.com/google.com")}, wantDialerAddress: "google.com", }, } for _, test := range tests { t.Run(test.target, func(t *testing.T) { - target := test.target - if test.badScheme { - target = defScheme + ":///" + target - } - url, err := url.Parse(target) - if err != nil { - t.Fatalf("Unexpected error parsing URL: %v", err) - } - test.wantParsed.URL = *url - addrCh := make(chan string, 1) dialer := func(ctx context.Context, address string) (net.Conn, error) { addrCh <- address diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index 09a667f33cb0..99e1e5b36c89 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -62,7 +62,8 @@ const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. @@ -86,14 +87,14 @@ var ( minDNSResRate = 30 * time.Second ) -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: addressDialer(authorityWithPort), }, nil } @@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { @@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.URL.Host) + d.resolver, err = newNetResolver(target.URL.Host) if err != nil { return nil, err } @@ -180,19 +182,22 @@ type dnsResolver struct { ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() { var timer *time.Timer if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 timer = newTimerDNSResRate(minDNSResRate) select { @@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() { case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } @@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index d67ee7d080bf..a66ffffd3ce1 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -1419,14 +1419,14 @@ func TestCustomAuthority(t *testing.T) { true, }, } - oldCustomAuthorityDialler := customAuthorityDialler + oldAddressDialer := addressDialer defer func() { - customAuthorityDialler = oldCustomAuthorityDialler + addressDialer = oldAddressDialer }() for _, a := range tests { errChan := make(chan error, 1) - customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + addressDialer = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { if authority != a.authorityWant { errChan <- fmt.Errorf("wrong custom authority passed to resolver. input: %s expected: %s actual: %s", a.authority, a.authorityWant, authority) } else { @@ -1441,8 +1441,7 @@ func TestCustomAuthority(t *testing.T) { b := NewBuilder() cc := &testClientConn{target: mockEndpointTarget, errChan: make(chan error, 1)} target := resolver.Target{ - Authority: a.authority, - URL: *testutils.MustParseURL(fmt.Sprintf("scheme://%s/%s", a.authority, mockEndpointTarget)), + URL: *testutils.MustParseURL(fmt.Sprintf("scheme://%s/%s", a.authority, mockEndpointTarget)), } r, err := b.Build(target, cc, resolver.BuildOptions{}) diff --git a/resolver/resolver.go b/resolver/resolver.go index 353c10b69a5b..1b7f382e3a03 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -264,10 +264,6 @@ type ClientConn interface { // - "unknown_scheme://authority/endpoint" // Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 20891c7a4cb8..f8f749835c24 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -97,7 +97,6 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts if !runDirectPath() { // If not xDS, fallback to DNS. - t.Scheme = dnsName t.URL.Scheme = dnsName return resolver.Get(dnsName).Build(t, cc, opts) } @@ -144,7 +143,6 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts } // Create and return an xDS resolver. - t.Scheme = xdsName t.URL.Scheme = xdsName if envconfig.XDSFederation { t = resolver.Target{ diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index 06af9cc6df32..4146959e22aa 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -75,7 +75,7 @@ func newDNSResolver(target string, topLevelResolver topLevelResolver) *dnsDiscov return ret } - r, err := newDNS(resolver.Target{Scheme: "dns", URL: *u}, ret, resolver.BuildOptions{}) + r, err := newDNS(resolver.Target{URL: *u}, ret, resolver.BuildOptions{}) if err != nil { topLevelResolver.onError(fmt.Errorf("failed to build DNS resolver for target %q: %v", target, err)) return ret From fc0aa4689cd434c21cd4cbbbe9ef5af842ccb326 Mon Sep 17 00:00:00 2001 From: Anirudh Ramachandra Date: Mon, 10 Jul 2023 14:48:27 -0700 Subject: [PATCH 990/998] client: encode the authority by default (#6428) --- clientconn.go | 61 +++++++++++++++++++++++++++++++++++++++++- clientconn_test.go | 37 +++++++++++++++++++++++++ test/authority_test.go | 2 +- 3 files changed, 98 insertions(+), 2 deletions(-) diff --git a/clientconn.go b/clientconn.go index 8e1d93fc45b3..04a12c57e42e 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1818,6 +1818,61 @@ func parseTarget(target string) (resolver.Target, error) { return resolver.Target{URL: *u}, nil } +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) +} + // Determine channel authority. The order of precedence is as follows: // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake @@ -1868,7 +1923,11 @@ func (cc *ClientConn) determineAuthority() error { // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - cc.authority = endpoint + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil diff --git a/clientconn_test.go b/clientconn_test.go index 3cd04a743444..281c9618606f 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -1221,3 +1221,40 @@ func stayConnected(cc *ClientConn) { } } } + +func (s) TestURLAuthorityEscape(t *testing.T) { + tests := []struct { + name string + authority string + want string + }{ + { + name: "ipv6_authority", + authority: "[::1]", + want: "[::1]", + }, + { + name: "with_user_and_host", + authority: "userinfo@host:10001", + want: "userinfo@host:10001", + }, + { + name: "with_multiple_slashes", + authority: "projects/123/network/abc/service", + want: "projects%2F123%2Fnetwork%2Fabc%2Fservice", + }, + { + name: "all_possible_allowed_chars", + authority: "abc123-._~!$&'()*+,;=@:[]", + want: "abc123-._~!$&'()*+,;=@:[]", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got, want := encodeAuthority(test.authority), test.want; got != want { + t.Errorf("encodeAuthority(%s) = %s, want %s", test.authority, got, test.want) + } + }) + } +} diff --git a/test/authority_test.go b/test/authority_test.go index 44095a23a2fe..a4d481f24f92 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -126,7 +126,7 @@ var authorityTests = []authorityTest{ name: "UnixPassthrough", address: "/tmp/sock.sock", target: "passthrough:///unix:///tmp/sock.sock", - authority: "unix:///tmp/sock.sock", + authority: "unix:%2F%2F%2Ftmp%2Fsock.sock", dialTargetWant: "unix:///tmp/sock.sock", }, { From bf5b7aecd53ba679d72d43bbf61dee40633f6344 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 10 Jul 2023 19:56:45 -0700 Subject: [PATCH 991/998] clusterresolver: handle EDS nacks and resource-not-found errors correctly (#6436) --- .../clusterresolver/clusterresolver.go | 2 +- .../e2e_test/aggregate_cluster_test.go | 294 ++++++++++++++++++ .../clusterresolver/e2e_test/eds_impl_test.go | 227 +++++++++++++- .../clusterresolver/resource_resolver.go | 7 +- .../clusterresolver/resource_resolver_eds.go | 56 +++- 5 files changed, 571 insertions(+), 15 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index 5eadd1ac1d0e..6faf81ab552c 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -85,7 +85,7 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal b.logger = prefixLogger(b) b.logger.Infof("Created") - b.resourceWatcher = newResourceResolver(b) + b.resourceWatcher = newResourceResolver(b, b.logger) b.cc = &ccWrapper{ ClientConn: cc, resourceWatcher: b.resourceWatcher, diff --git a/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go b/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go index ea0cdf6fc257..b4740eea6d0b 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go @@ -27,13 +27,21 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/wrapperspb" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -771,3 +779,289 @@ func (s) TestAggregateCluster_BadEDS_BadDNS(t *testing.T) { t.Fatalf("EmptyCall() failed with error %v, want %v", err, dnsErr) } } + +// TestAggregateCluster_NoFallback_EDSNackedWithPreviousGoodUpdate tests the +// scenario where the top-level cluster is an aggregate cluster that resolves to +// an EDS and LOGICAL_DNS cluster. The management server first sends a good EDS +// response for the EDS cluster and the test verifies that RPCs get routed to +// the EDS cluster. The management server then sends a bad EDS response. The +// test verifies that the cluster_resolver LB policy continues to use the +// previously received good update and that RPCs still get routed to the EDS +// cluster. +func (s) TestAggregateCluster_NoFallback_EDSNackedWithPreviousGoodUpdate(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + mgmtServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and DNS cluster. Also + // configure an endpoints resource for the EDS cluster. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsClusterName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Make an RPC and ensure that it gets routed to the first backend since the + // EDS cluster is of higher priority than the LOGICAL_DNS cluster. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Push an EDS resource from the management server that is expected to be + // NACKed by the xDS client. Since the cluster_resolver LB policy has a + // previously received good EDS resource, it will continue to use that. + resources.Endpoints[0].Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to get routed to the EDS cluster for the next + // second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + } +} + +// TestAggregateCluster_Fallback_EDSNackedWithoutPreviousGoodUpdate tests the +// scenario where the top-level cluster is an aggregate cluster that resolves to +// an EDS and LOGICAL_DNS cluster. The management server sends a bad EDS +// response. The test verifies that the cluster_resolver LB policy falls back to +// the LOGICAL_DNS cluster, because it is supposed to treat the bad EDS response +// as though it received an update with no endpoints. +func (s) TestAggregateCluster_Fallback_EDSNackedWithoutPreviousGoodUpdate(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + mgmtServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and DNS cluster. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsClusterName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + + // Set a load balancing weight of 0 for the backend in the EDS resource. + // This is expected to be NACKed by the xDS client. Since the + // cluster_resolver LB policy has no previously received good EDS resource, + // it will treat this as though it received an update with no endpoints. + resources.Endpoints[0].Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Make an RPC and ensure that it gets routed to the LOGICAL_DNS cluster. + peer := &peer.Peer{} + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[1].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[1].Addr) + } +} + +// TestAggregateCluster_Fallback_EDS_ResourceNotFound tests the scenario where +// the top-level cluster is an aggregate cluster that resolves to an EDS and +// LOGICAL_DNS cluster. The management server does not respond with the EDS +// cluster. The test verifies that the cluster_resolver LB policy falls back to +// the LOGICAL_DNS cluster in this case. +func (s) TestAggregateCluster_Fallback_EDS_ResourceNotFound(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + mgmtServer, nodeID, _, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start a test backend for the LOGICAL_DNS cluster. + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure an aggregate cluster pointing to an EDS and DNS cluster. No + // endpoints are configured for the EDS cluster. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client talking to the above management server, configured + // with a short watch expiry timeout. + xdsClient, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cds LB policy as the top-level LB policy, and a corresponding config + // with a single cluster. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cds_experimental":{ + "cluster": "%s" + } + }] + }`, clusterName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the DNS resolver has not responded with endpoint addresses. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: server.Address}}}) + + // Make an RPC and ensure that it gets routed to the LOGICAL_DNS cluster. + // Even though the EDS cluster is of higher priority, since the management + // server does not respond with an EDS resource, the cluster_resolver LB + // policy is expected to fallback to the LOGICAL_DNS cluster once the watch + // timeout expires. + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != server.Address { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, server.Address) + } +} diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index f49528499356..f2089e9640a0 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -25,6 +25,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" @@ -41,7 +42,9 @@ import ( "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/balancer/priority" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/wrapperspb" @@ -62,8 +65,9 @@ const ( localityName2 = "my-locality-2" localityName3 = "my-locality-3" - defaultTestTimeout = 5 * time.Second - defaultTestShortTimeout = 10 * time.Millisecond + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond + defaultTestWatchExpiryTimeout = 500 * time.Millisecond ) type s struct { @@ -852,6 +856,225 @@ func (s) TestEDS_ClusterResourceUpdates(t *testing.T) { } } +// TestEDS_BadUpdateWithoutPreviousGoodUpdate tests the case where the +// management server sends a bad update (one that is NACKed by the xDS client). +// Since the cluster_resolver LB policy does not have a previously received good +// update, it is expected to treat this bad update as though it received an +// update with no endpoints. Hence RPCs are expected to fail with "all +// priorities removed" error. +func (s) TestEDS_BadUpdateWithoutPreviousGoodUpdate(t *testing.T) { + // Spin up a management server to receive xDS resources from. + mgmtServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + // Start a backend server that implements the TestService. + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Create an EDS resource with a load balancing weight of 0. This will + // result in the resource being NACKed by the xDS client. Since the + // cluster_resolver LB policy does not have a previously received good EDS + // update, it should treat this update as an empty EDS update. + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: testutils.ParsePort(t, server.Address)}}, + }}) + resources.Endpoints[0].Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn and verify that RPCs fail with "all priorities + // removed" error. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + client := testgrpc.NewTestServiceClient(cc) + if err := waitForAllPrioritiesRemovedError(ctx, t, client); err != nil { + t.Fatal(err) + } +} + +// TestEDS_BadUpdateWithPreviousGoodUpdate tests the case where the +// cluster_resolver LB policy receives a good EDS update from the management +// server and the test verifies that RPCs are successful. Then, a bad update is +// received from the management server (one that is NACKed by the xDS client). +// The test verifies that the previously received good update is still being +// used and that RPCs are still successful. +func (s) TestEDS_BadUpdateWithPreviousGoodUpdate(t *testing.T) { + // Spin up a management server to receive xDS resources from. + mgmtServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + // Start a backend server that implements the TestService. + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Create an EDS resource for consumption by the test. + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: testutils.ParsePort(t, server.Address)}}, + }}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Ensure RPCs are being roundrobined across the single backend. + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, []resolver.Address{{Addr: server.Address}}); err != nil { + t.Fatal(err) + } + + // Update the endpoints resource in the management server with a load + // balancing weight of 0. This will result in the resource being NACKed by + // the xDS client. But since the cluster_resolver LB policy has a previously + // received good EDS update, it should continue using it. + resources.Endpoints[0].Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to succeed for the next second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, []resolver.Address{{Addr: server.Address}}); err != nil { + t.Fatal(err) + } + } +} + +// TestEDS_ResourceNotFound tests the case where the requested EDS resource does +// not exist on the management server. Once the watch timer associated with the +// requested resource expires, the cluster_resolver LB policy receives a +// "resource-not-found" callback from the xDS client and is expected to treat it +// as though it received an update with no endpoints. Hence RPCs are expected to +// fail with "all priorities removed" error. +func (s) TestEDS_ResourceNotFound(t *testing.T) { + // Spin up a management server to receive xDS resources from. + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server, configured + // with a short watch expiry timeout. + nodeID := uuid.New().String() + xdsClient, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Configure no resources on the management server. + resources := e2e.UpdateOptions{NodeID: nodeID, SkipValidation: true} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Create a manual resolver and push a service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn and verify that RPCs fail with "all priorities + // removed" error. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + client := testgrpc.NewTestServiceClient(cc) + if err := waitForAllPrioritiesRemovedError(ctx, t, client); err != nil { + t.Fatal(err) + } +} + // waitForAllPrioritiesRemovedError repeatedly makes RPCs using the // TestServiceClient until they fail with an error which indicates that all // priorities have been removed. A non-nil error is returned if the context diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index 580734a02154..142548ab3861 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -21,6 +21,7 @@ package clusterresolver import ( "sync" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -84,6 +85,7 @@ type discoveryMechanismAndResolver struct { type resourceResolver struct { parent *clusterResolverBalancer + logger *grpclog.PrefixLogger updateChannel chan *resourceUpdate // mu protects the slice and map, and content of the resolvers in the slice. @@ -104,9 +106,10 @@ type resourceResolver struct { childNameGeneratorSeqID uint64 } -func newResourceResolver(parent *clusterResolverBalancer) *resourceResolver { +func newResourceResolver(parent *clusterResolverBalancer, logger *grpclog.PrefixLogger) *resourceResolver { return &resourceResolver{ parent: parent, + logger: logger, updateChannel: make(chan *resourceUpdate, 1), childrenMap: make(map[discoveryMechanismKey]discoveryMechanismAndResolver), } @@ -172,7 +175,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { var resolver endpointsResolver switch dm.Type { case DiscoveryMechanismTypeEDS: - resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr) + resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr, rr.logger) case DiscoveryMechanismTypeLogicalDNS: resolver = newDNSResolver(dmKey.name, rr) } diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go index 6fec20151f42..86af73cbae21 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_eds.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -21,6 +21,7 @@ package clusterresolver import ( "sync" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -30,20 +31,20 @@ type edsDiscoveryMechanism struct { cancelWatch func() topLevelResolver topLevelResolver stopped *grpcsync.Event + logger *grpclog.PrefixLogger - mu sync.Mutex - update xdsresource.EndpointsUpdate - updateReceived bool + mu sync.Mutex + update *xdsresource.EndpointsUpdate // Nil indicates no update received so far. } func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { er.mu.Lock() defer er.mu.Unlock() - if !er.updateReceived { + if er.update == nil { return nil, false } - return er.update, true + return *er.update, true } func (er *edsDiscoveryMechanism) resolveNow() { @@ -63,10 +64,11 @@ func (er *edsDiscoveryMechanism) stop() { // newEDSResolver returns an implementation of the endpointsResolver interface // that uses EDS to resolve the given name to endpoints. -func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelResolver topLevelResolver) *edsDiscoveryMechanism { +func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *edsDiscoveryMechanism { ret := &edsDiscoveryMechanism{ nameToWatch: nameToWatch, topLevelResolver: topLevelResolver, + logger: logger, stopped: grpcsync.NewEvent(), } ret.cancelWatch = xdsresource.WatchEndpoints(producer, nameToWatch, ret) @@ -80,8 +82,7 @@ func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceD } er.mu.Lock() - er.update = update.Resource - er.updateReceived = true + er.update = &update.Resource er.mu.Unlock() er.topLevelResolver.onUpdate() @@ -92,7 +93,28 @@ func (er *edsDiscoveryMechanism) OnError(err error) { return } - er.topLevelResolver.onError(err) + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported error: %v", er.nameToWatch, err) + } + + er.mu.Lock() + if er.update != nil { + // Continue using a previously received good configuration if one + // exists. + er.mu.Unlock() + return + } + + // Else report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.update = &xdsresource.EndpointsUpdate{} + er.mu.Unlock() + + er.topLevelResolver.onUpdate() } func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { @@ -100,5 +122,19 @@ func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { return } - er.topLevelResolver.onError(xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Endpoints not found in received response", er.nameToWatch)) + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported resource-does-not-exist error", er.nameToWatch) + } + + // Report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.mu.Lock() + er.update = &xdsresource.EndpointsUpdate{} + er.mu.Unlock() + + er.topLevelResolver.onUpdate() } From f0280f9d3d2f3fbe45de1ab5873a19ec678c74c8 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Tue, 11 Jul 2023 08:52:37 -0700 Subject: [PATCH 992/998] xds: require EDS service name in new-style CDS clusters (gRFC A47) (#6438) --- .../xdsclient/xdsresource/unmarshal_cds.go | 4 ++++ .../xdsclient/xdsresource/unmarshal_cds_test.go | 17 +++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 9f8530111a73..cb1d0b2dfdcb 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -23,6 +23,7 @@ import ( "fmt" "net" "strconv" + "strings" "time" v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" @@ -173,6 +174,9 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu } ret.ClusterType = ClusterTypeEDS ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + if strings.HasPrefix(ret.ClusterName, "xdstp:") && ret.EDSServiceName == "" { + return ClusterUpdate{}, fmt.Errorf("CDS's EDS service name is not set with a new-style cluster name: %+v", cluster) + } return ret, nil case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: if !envconfig.XDSAggregateAndDNS { diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go index e057b951326d..67f0f7896b26 100644 --- a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -1347,6 +1347,23 @@ func (s) TestUnmarshalCluster(t *testing.T) { Raw: v3ClusterAnyWithEDSConfigSourceSelf, }, }, + { + name: "xdstp cluster resource with unset EDS service name", + resource: testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: "xdstp:foo", + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: "", + }, + }), + wantName: "xdstp:foo", + wantErr: true, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { From db32c5bfeb563e7ce6661b37d6a55688cbeb4a20 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 11 Jul 2023 19:02:15 +0200 Subject: [PATCH 993/998] Fix preloader mode in benchmarks (#6359) --- benchmark/benchmain/main.go | 88 +++++++++++++++++++++++-------------- benchmark/benchmark.go | 58 +++++++++++++++++++++--- 2 files changed, 106 insertions(+), 40 deletions(-) diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index 1366c18c972b..f4b96a5d460b 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -53,6 +53,7 @@ import ( "reflect" "runtime" "runtime/pprof" + "strconv" "strings" "sync" "sync/atomic" @@ -81,7 +82,8 @@ var ( traceMode = flags.StringWithAllowedValues("trace", toggleModeOff, fmt.Sprintf("Trace mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes) preloaderMode = flags.StringWithAllowedValues("preloader", toggleModeOff, - fmt.Sprintf("Preloader mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes) + fmt.Sprintf("Preloader mode - One of: %v, preloader works only in streaming and unconstrained modes and will be ignored in unary mode", + strings.Join(allToggleModes, ", ")), allToggleModes) channelzOn = flags.StringWithAllowedValues("channelz", toggleModeOff, fmt.Sprintf("Channelz mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes) compressorMode = flags.StringWithAllowedValues("compression", compModeOff, @@ -401,20 +403,11 @@ func makeFuncUnary(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { } func makeFuncStream(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { - clients, cleanup := makeClients(bf) + streams, req, cleanup := setupStream(bf, false) - streams := make([][]testgrpc.BenchmarkService_StreamingCallClient, bf.Connections) - for cn := 0; cn < bf.Connections; cn++ { - tc := clients[cn] - streams[cn] = make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) - for pos := 0; pos < bf.MaxConcurrentCalls; pos++ { - - stream, err := tc.StreamingCall(context.Background()) - if err != nil { - logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) - } - streams[cn][pos] = stream - } + var preparedMsg [][]*grpc.PreparedMsg + if bf.EnablePreloader { + preparedMsg = prepareMessages(streams, req) } return func(cn, pos int) { @@ -426,24 +419,25 @@ func makeFuncStream(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { if bf.RespPayloadCurve != nil { respSizeBytes = bf.RespPayloadCurve.ChooseRandom() } - streamCaller(streams[cn][pos], reqSizeBytes, respSizeBytes) + var req interface{} + if bf.EnablePreloader { + req = preparedMsg[cn][pos] + } else { + pl := bm.NewPayload(testpb.PayloadType_COMPRESSABLE, reqSizeBytes) + req = &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSizeBytes), + Payload: pl, + } + } + streamCaller(streams[cn][pos], req) }, cleanup } func makeFuncUnconstrainedStreamPreloaded(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) { - streams, req, cleanup := setupUnconstrainedStream(bf) + streams, req, cleanup := setupStream(bf, true) - preparedMsg := make([][]*grpc.PreparedMsg, len(streams)) - for cn, connStreams := range streams { - preparedMsg[cn] = make([]*grpc.PreparedMsg, len(connStreams)) - for pos, stream := range connStreams { - preparedMsg[cn][pos] = &grpc.PreparedMsg{} - err := preparedMsg[cn][pos].Encode(stream, req) - if err != nil { - logger.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[cn][pos], req, stream, err) - } - } - } + preparedMsg := prepareMessages(streams, req) return func(cn, pos int) { streams[cn][pos].SendMsg(preparedMsg[cn][pos]) @@ -453,7 +447,7 @@ func makeFuncUnconstrainedStreamPreloaded(bf stats.Features) (rpcSendFunc, rpcRe } func makeFuncUnconstrainedStream(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) { - streams, req, cleanup := setupUnconstrainedStream(bf) + streams, req, cleanup := setupStream(bf, true) return func(cn, pos int) { streams[cn][pos].Send(req) @@ -462,13 +456,19 @@ func makeFuncUnconstrainedStream(bf stats.Features) (rpcSendFunc, rpcRecvFunc, r }, cleanup } -func setupUnconstrainedStream(bf stats.Features) ([][]testgrpc.BenchmarkService_StreamingCallClient, *testpb.SimpleRequest, rpcCleanupFunc) { +func setupStream(bf stats.Features, unconstrained bool) ([][]testgrpc.BenchmarkService_StreamingCallClient, *testpb.SimpleRequest, rpcCleanupFunc) { clients, cleanup := makeClients(bf) streams := make([][]testgrpc.BenchmarkService_StreamingCallClient, bf.Connections) - md := metadata.Pairs(benchmark.UnconstrainedStreamingHeader, "1", - benchmark.UnconstrainedStreamingDelayHeader, bf.SleepBetweenRPCs.String()) - ctx := metadata.NewOutgoingContext(context.Background(), md) + ctx := context.Background() + if unconstrained { + md := metadata.Pairs(benchmark.UnconstrainedStreamingHeader, "1", benchmark.UnconstrainedStreamingDelayHeader, bf.SleepBetweenRPCs.String()) + ctx = metadata.NewOutgoingContext(ctx, md) + } + if bf.EnablePreloader { + md := metadata.Pairs(benchmark.PreloadMsgSizeHeader, strconv.Itoa(bf.RespSizeBytes), benchmark.UnconstrainedStreamingDelayHeader, bf.SleepBetweenRPCs.String()) + ctx = metadata.NewOutgoingContext(ctx, md) + } for cn := 0; cn < bf.Connections; cn++ { tc := clients[cn] streams[cn] = make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) @@ -491,6 +491,20 @@ func setupUnconstrainedStream(bf stats.Features) ([][]testgrpc.BenchmarkService_ return streams, req, cleanup } +func prepareMessages(streams [][]testgrpc.BenchmarkService_StreamingCallClient, req *testpb.SimpleRequest) [][]*grpc.PreparedMsg { + preparedMsg := make([][]*grpc.PreparedMsg, len(streams)) + for cn, connStreams := range streams { + preparedMsg[cn] = make([]*grpc.PreparedMsg, len(connStreams)) + for pos, stream := range connStreams { + preparedMsg[cn][pos] = &grpc.PreparedMsg{} + if err := preparedMsg[cn][pos].Encode(stream, req); err != nil { + logger.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[cn][pos], req, stream, err) + } + } + } + return preparedMsg +} + // Makes a UnaryCall gRPC request using the given BenchmarkServiceClient and // request and response sizes. func unaryCaller(client testgrpc.BenchmarkServiceClient, reqSize, respSize int) { @@ -499,8 +513,8 @@ func unaryCaller(client testgrpc.BenchmarkServiceClient, reqSize, respSize int) } } -func streamCaller(stream testgrpc.BenchmarkService_StreamingCallClient, reqSize, respSize int) { - if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil { +func streamCaller(stream testgrpc.BenchmarkService_StreamingCallClient, req interface{}) { + if err := bm.DoStreamingRoundTripPreloaded(stream, req); err != nil { logger.Fatalf("DoStreamingRoundTrip failed: %v", err) } } @@ -790,6 +804,9 @@ func processFlags() *benchOpts { if len(opts.features.reqSizeBytes) != 0 { log.Fatalf("you may not specify -reqPayloadCurveFiles and -reqSizeBytes at the same time") } + if len(opts.features.enablePreloader) != 0 { + log.Fatalf("you may not specify -reqPayloadCurveFiles and -preloader at the same time") + } for _, file := range *reqPayloadCurveFiles { pc, err := stats.NewPayloadCurve(file) if err != nil { @@ -807,6 +824,9 @@ func processFlags() *benchOpts { if len(opts.features.respSizeBytes) != 0 { log.Fatalf("you may not specify -respPayloadCurveFiles and -respSizeBytes at the same time") } + if len(opts.features.enablePreloader) != 0 { + log.Fatalf("you may not specify -respPayloadCurveFiles and -preloader at the same time") + } for _, file := range *respPayloadCurveFiles { pc, err := stats.NewPayloadCurve(file) if err != nil { diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 2e11167004db..27101954aa30 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -28,6 +28,7 @@ import ( "log" "math/rand" "net" + "strconv" "time" "google.golang.org/grpc" @@ -83,13 +84,35 @@ const UnconstrainedStreamingHeader = "unconstrained-streaming" // the server should sleep between consecutive RPC responses. const UnconstrainedStreamingDelayHeader = "unconstrained-streaming-delay" +// PreloadMsgSizeHeader indicates that the client is going to ask for +// a fixed response size and passes this size to the server. +// The server is expected to preload the response on startup. +const PreloadMsgSizeHeader = "preload-msg-size" + func (s *testServer) StreamingCall(stream testgrpc.BenchmarkService_StreamingCallServer) error { + preloadMsgSize := 0 + if md, ok := metadata.FromIncomingContext(stream.Context()); ok && len(md[PreloadMsgSizeHeader]) != 0 { + val := md[PreloadMsgSizeHeader][0] + var err error + preloadMsgSize, err = strconv.Atoi(val) + if err != nil { + return fmt.Errorf("%q header value is not an integer: %s", PreloadMsgSizeHeader, err) + } + } + if md, ok := metadata.FromIncomingContext(stream.Context()); ok && len(md[UnconstrainedStreamingHeader]) != 0 { - return s.UnconstrainedStreamingCall(stream) + return s.UnconstrainedStreamingCall(stream, preloadMsgSize) } response := &testpb.SimpleResponse{ Payload: new(testpb.Payload), } + preloadedResponse := &grpc.PreparedMsg{} + if preloadMsgSize > 0 { + setPayload(response.Payload, testpb.PayloadType_COMPRESSABLE, preloadMsgSize) + if err := preloadedResponse.Encode(stream, response); err != nil { + return err + } + } in := new(testpb.SimpleRequest) for { // use ServerStream directly to reuse the same testpb.SimpleRequest object @@ -101,14 +124,19 @@ func (s *testServer) StreamingCall(stream testgrpc.BenchmarkService_StreamingCal if err != nil { return err } - setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) - if err := stream.Send(response); err != nil { + if preloadMsgSize > 0 { + err = stream.SendMsg(preloadedResponse) + } else { + setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) + err = stream.Send(response) + } + if err != nil { return err } } } -func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService_StreamingCallServer) error { +func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService_StreamingCallServer, preloadMsgSize int) error { maxSleep := 0 if md, ok := metadata.FromIncomingContext(stream.Context()); ok && len(md[UnconstrainedStreamingDelayHeader]) != 0 { val := md[UnconstrainedStreamingDelayHeader][0] @@ -135,6 +163,13 @@ func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService } setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) + preloadedResponse := &grpc.PreparedMsg{} + if preloadMsgSize > 0 { + if err := preloadedResponse.Encode(stream, response); err != nil { + return err + } + } + go func() { for { // Using RecvMsg rather than Recv to prevent reallocation of SimpleRequest. @@ -154,7 +189,12 @@ func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService if maxSleep > 0 { time.Sleep(time.Duration(rand.Intn(maxSleep))) } - err := stream.Send(response) + var err error + if preloadMsgSize > 0 { + err = stream.SendMsg(preloadedResponse) + } else { + err = stream.Send(response) + } switch status.Code(err) { case codes.Unavailable, codes.Canceled: return @@ -258,7 +298,13 @@ func DoStreamingRoundTrip(stream testgrpc.BenchmarkService_StreamingCallClient, ResponseSize: int32(respSize), Payload: pl, } - if err := stream.Send(req); err != nil { + return DoStreamingRoundTripPreloaded(stream, req) +} + +// DoStreamingRoundTripPreloaded performs a round trip for a single streaming rpc with preloaded payload. +func DoStreamingRoundTripPreloaded(stream testgrpc.BenchmarkService_StreamingCallClient, req interface{}) error { + // req could be either *testpb.SimpleRequest or *grpc.PreparedMsg + if err := stream.SendMsg(req); err != nil { return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want ", err) } if _, err := stream.Recv(); err != nil { From 8e9c8f8e7133eb7ed64438837636dae2c5d92ce6 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 11 Jul 2023 18:35:39 -0700 Subject: [PATCH 994/998] grpc: do not use balancer attributes during address comparison (#6439) --- attributes/attributes.go | 28 +++- clientconn.go | 20 ++- internal/stubserver/stubserver.go | 14 +- resolver/resolver.go | 4 + test/pickfirst_test.go | 264 +++++++++++++++++++++++++++--- 5 files changed, 290 insertions(+), 40 deletions(-) diff --git a/attributes/attributes.go b/attributes/attributes.go index 3efca4591493..49712aca33ae 100644 --- a/attributes/attributes.go +++ b/attributes/attributes.go @@ -112,19 +112,31 @@ func (a *Attributes) String() string { sb.WriteString("{") first := true for k, v := range a.m { - var key, val string - if str, ok := k.(interface{ String() string }); ok { - key = str.String() - } - if str, ok := v.(interface{ String() string }); ok { - val = str.String() - } if !first { sb.WriteString(", ") } - sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) first = false } sb.WriteString("}") return sb.String() } + +func str(x interface{}) string { + if v, ok := x.(fmt.Stringer); ok { + return v.String() + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/clientconn.go b/clientconn.go index 04a12c57e42e..bfd7555a8bf2 100644 --- a/clientconn.go +++ b/clientconn.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -867,6 +868,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi cc.balancerWrapper.updateSubConnState(sc, s, err) } +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. @@ -874,7 +889,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), @@ -995,8 +1010,9 @@ func equalAddresses(a, b []resolver.Address) bool { // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + addrs = copyAddressesWithoutBalancerAttributes(addrs) if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return diff --git a/internal/stubserver/stubserver.go b/internal/stubserver/stubserver.go index 3c89ff6823bd..cfc4b0f2e82d 100644 --- a/internal/stubserver/stubserver.go +++ b/internal/stubserver/stubserver.go @@ -60,6 +60,10 @@ type StubServer struct { Address string Target string + // Custom listener to use for serving. If unspecified, a new listener is + // created on a local port. + Listener net.Listener + cleanups []func() // Lambdas executed in Stop(); populated by Start(). // Set automatically if Target == "" @@ -118,9 +122,13 @@ func (ss *StubServer) StartServer(sopts ...grpc.ServerOption) error { ss.R = manual.NewBuilderWithScheme("whatever") } - lis, err := net.Listen(ss.Network, ss.Address) - if err != nil { - return fmt.Errorf("net.Listen(%q, %q) = %v", ss.Network, ss.Address, err) + lis := ss.Listener + if lis == nil { + var err error + lis, err = net.Listen(ss.Network, ss.Address) + if err != nil { + return fmt.Errorf("net.Listen(%q, %q) = %v", ss.Network, ss.Address, err) + } } ss.Address = lis.Addr().String() ss.cleanups = append(ss.cleanups, func() { lis.Close() }) diff --git a/resolver/resolver.go b/resolver/resolver.go index 1b7f382e3a03..d8db6f5d34eb 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -142,6 +142,10 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go index 55659b928a57..d41786a556a3 100644 --- a/test/pickfirst_test.go +++ b/test/pickfirst_test.go @@ -20,7 +20,7 @@ package test import ( "context" - "sync" + "fmt" "testing" "time" @@ -299,6 +299,11 @@ func (s) TestPickFirst_NewAddressWhileBlocking(t *testing.T) { } } +// TestPickFirst_StickyTransientFailure tests the case where pick_first is +// configured on a channel, and the backend is configured to close incoming +// connections as soon as they are accepted. The test verifies that the channel +// enters TransientFailure and stays there. The test also verifies that the +// pick_first LB policy is constantly trying to reconnect to the backend. func (s) TestPickFirst_StickyTransientFailure(t *testing.T) { // Spin up a local server which closes the connection as soon as it receives // one. It also sends a signal on a channel whenver it received a connection. @@ -346,40 +351,27 @@ func (s) TestPickFirst_StickyTransientFailure(t *testing.T) { } t.Cleanup(func() { cc.Close() }) - var wg sync.WaitGroup - wg.Add(2) - // Spin up a goroutine that waits for the channel to move to - // TransientFailure. After that it checks that the channel stays in - // TransientFailure, until Shutdown. - go func() { - defer wg.Done() - for state := cc.GetState(); state != connectivity.TransientFailure; state = cc.GetState() { - if !cc.WaitForStateChange(ctx, state) { - t.Errorf("Timeout when waiting for state to change to TransientFailure. Current state is %s", state) - return - } - } + awaitState(ctx, t, cc, connectivity.TransientFailure) - // TODO(easwars): this waits for 10s. Need shorter deadline here. Basically once the second goroutine exits, we should exit from here too. + // Spawn a goroutine to ensure that the channel stays in TransientFailure. + // The call to cc.WaitForStateChange will return false when the main + // goroutine exits and the context is cancelled. + go func() { if cc.WaitForStateChange(ctx, connectivity.TransientFailure) { if state := cc.GetState(); state != connectivity.Shutdown { t.Errorf("Unexpected state change from TransientFailure to %s", cc.GetState()) } } }() - // Spin up a goroutine which ensures that the pick_first LB policy is - // constantly trying to reconnect. - go func() { - defer wg.Done() - for i := 0; i < 10; i++ { - select { - case <-connCh: - case <-time.After(2 * defaultTestShortTimeout): - t.Error("Timeout when waiting for pick_first to reconnect") - } + + // Ensures that the pick_first LB policy is constantly trying to reconnect. + for i := 0; i < 10; i++ { + select { + case <-connCh: + case <-time.After(2 * defaultTestShortTimeout): + t.Error("Timeout when waiting for pick_first to reconnect") } - }() - wg.Wait() + } } // Tests the PF LB policy with shuffling enabled. @@ -475,3 +467,221 @@ func (s) TestPickFirst_ShuffleAddressListDisabled(t *testing.T) { t.Fatal(err) } } + +// setupPickFirstWithListenerWrapper is very similar to setupPickFirst, but uses +// a wrapped listener that the test can use to track accepted connections. +func setupPickFirstWithListenerWrapper(t *testing.T, backendCount int, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, []*stubserver.StubServer, []*testutils.ListenerWrapper) { + t.Helper() + + // Initialize channelz. Used to determine pending RPC count. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + backends := make([]*stubserver.StubServer, backendCount) + addrs := make([]resolver.Address, backendCount) + listeners := make([]*testutils.ListenerWrapper, backendCount) + for i := 0; i < backendCount; i++ { + lis := testutils.NewListenerWrapper(t, nil) + backend := &stubserver.StubServer{ + Listener: lis, + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + + backends[i] = backend + addrs[i] = resolver.Address{Addr: backend.Address} + listeners[i] = lis + } + + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(pickFirstServiceConfig), + } + dopts = append(dopts, opts...) + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // At this point, the resolver has not returned any addresses to the channel. + // This RPC must block until the context expires. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + return cc, r, backends, listeners +} + +// TestPickFirst_AddressUpdateWithAttributes tests the case where an address +// update received by the pick_first LB policy differs in attributes. Addresses +// which differ in attributes are considered different from the perspective of +// subconn creation and connection establishment and the test verifies that new +// connections are created when attributes change. +func (s) TestPickFirst_AddressUpdateWithAttributes(t *testing.T) { + cc, r, backends, listeners := setupPickFirstWithListenerWrapper(t, 2) + + // Add a set of attributes to the addresses before pushing them to the + // pick_first LB policy through the manual resolver. + addrs := stubBackendsToResolverAddrs(backends) + for i := range addrs { + addrs[i].Attributes = addrs[i].Attributes.WithValue("test-attribute-1", fmt.Sprintf("%d", i)) + } + r.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that RPCs succeed to the first backend in the list. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Grab the wrapped connection from the listener wrapper. This will be used + // to verify the connection is closed. + val, err := listeners[0].NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive new connection from wrapped listener: %v", err) + } + conn := val.(*testutils.ConnWrapper) + + // Add another set of attributes to the addresses, and push them to the + // pick_first LB policy through the manual resolver. Leave the order of the + // addresses unchanged. + for i := range addrs { + addrs[i].Attributes = addrs[i].Attributes.WithValue("test-attribute-2", fmt.Sprintf("%d", i)) + } + r.UpdateState(resolver.State{Addresses: addrs}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // A change in the address attributes results in the new address being + // considered different to the current address. This will result in the old + // connection being closed and a new connection to the same backend (since + // address order is not modified). + if _, err := conn.CloseCh.Receive(ctx); err != nil { + t.Fatalf("Timeout when expecting existing connection to be closed: %v", err) + } + val, err = listeners[0].NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive new connection from wrapped listener: %v", err) + } + conn = val.(*testutils.ConnWrapper) + + // Add another set of attributes to the addresses, and push them to the + // pick_first LB policy through the manual resolver. Reverse of the order + // of addresses. + for i := range addrs { + addrs[i].Attributes = addrs[i].Attributes.WithValue("test-attribute-3", fmt.Sprintf("%d", i)) + } + addrs[0], addrs[1] = addrs[1], addrs[0] + r.UpdateState(resolver.State{Addresses: addrs}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Ensure that the old connection is closed and a new connection is + // established to the first address in the new list. + if _, err := conn.CloseCh.Receive(ctx); err != nil { + t.Fatalf("Timeout when expecting existing connection to be closed: %v", err) + } + _, err = listeners[1].NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive new connection from wrapped listener: %v", err) + } +} + +// TestPickFirst_AddressUpdateWithBalancerAttributes tests the case where an +// address update received by the pick_first LB policy differs in balancer +// attributes, which are meant only for consumption by LB policies. In this +// case, the test verifies that new connections are not created when the address +// update only changes the balancer attributes. +func (s) TestPickFirst_AddressUpdateWithBalancerAttributes(t *testing.T) { + cc, r, backends, listeners := setupPickFirstWithListenerWrapper(t, 2) + + // Add a set of balancer attributes to the addresses before pushing them to + // the pick_first LB policy through the manual resolver. + addrs := stubBackendsToResolverAddrs(backends) + for i := range addrs { + addrs[i].BalancerAttributes = addrs[i].BalancerAttributes.WithValue("test-attribute-1", fmt.Sprintf("%d", i)) + } + r.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that RPCs succeed to the expected backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Grab the wrapped connection from the listener wrapper. This will be used + // to verify the connection is not closed. + val, err := listeners[0].NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive new connection from wrapped listener: %v", err) + } + conn := val.(*testutils.ConnWrapper) + + // Add a set of balancer attributes to the addresses before pushing them to + // the pick_first LB policy through the manual resolver. Leave the order of + // the addresses unchanged. + for i := range addrs { + addrs[i].BalancerAttributes = addrs[i].BalancerAttributes.WithValue("test-attribute-2", fmt.Sprintf("%d", i)) + } + r.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that no new connection is established, and ensure that the old + // connection is not closed. + for i := range listeners { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := listeners[i].NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("Unexpected error when expecting no new connection: %v", err) + } + } + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("Unexpected error when expecting existing connection to stay active: %v", err) + } + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Add a set of balancer attributes to the addresses before pushing them to + // the pick_first LB policy through the manual resolver. Reverse of the + // order of addresses. + for i := range addrs { + addrs[i].BalancerAttributes = addrs[i].BalancerAttributes.WithValue("test-attribute-3", fmt.Sprintf("%d", i)) + } + addrs[0], addrs[1] = addrs[1], addrs[0] + r.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that no new connection is established, and ensure that the old + // connection is not closed. + for i := range listeners { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := listeners[i].NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("Unexpected error when expecting no new connection: %v", err) + } + } + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("Unexpected error when expecting existing connection to stay active: %v", err) + } + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } +} From d1868a539b5acb10daceeeb4fcbc6a8dedc7a532 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 12 Jul 2023 17:35:22 -0700 Subject: [PATCH 995/998] clusterresolver: add logs for dns discovery mechanism error cases (#6444) --- .../balancer/clusterresolver/resource_resolver.go | 2 +- .../clusterresolver/resource_resolver_dns.go | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go index 142548ab3861..0aa47913d693 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -177,7 +177,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { case DiscoveryMechanismTypeEDS: resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr, rr.logger) case DiscoveryMechanismTypeLogicalDNS: - resolver = newDNSResolver(dmKey.name, rr) + resolver = newDNSResolver(dmKey.name, rr, rr.logger) } dmAndResolver = discoveryMechanismAndResolver{ dm: dm, diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index 4146959e22aa..0da74f628db5 100644 --- a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -23,6 +23,8 @@ import ( "net/url" "sync" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -42,6 +44,7 @@ type dnsDiscoveryMechanism struct { target string topLevelResolver topLevelResolver dnsR resolver.Resolver + logger *grpclog.PrefixLogger mu sync.Mutex addrs []string @@ -64,10 +67,11 @@ type dnsDiscoveryMechanism struct { // // The `dnsR` field is unset if we run into erros in this function. Therefore, a // nil check is required wherever we access that field. -func newDNSResolver(target string, topLevelResolver topLevelResolver) *dnsDiscoveryMechanism { +func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *dnsDiscoveryMechanism { ret := &dnsDiscoveryMechanism{ target: target, topLevelResolver: topLevelResolver, + logger: logger, } u, err := url.Parse("dns:///" + target) if err != nil { @@ -116,6 +120,10 @@ func (dr *dnsDiscoveryMechanism) stop() { // updates from the real DNS resolver. func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported an update: %s", dr.target, pretty.ToJSON(state)) + } + dr.mu.Lock() addrs := make([]string, len(state.Addresses)) for i, a := range state.Addresses { @@ -130,6 +138,10 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { } func (dr *dnsDiscoveryMechanism) ReportError(err error) { + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported error: %v", dr.target, err) + } + dr.topLevelResolver.onError(err) } From 9489082068eb8cade6ea4f943ac83a205e0e382b Mon Sep 17 00:00:00 2001 From: Jongwoo Han Date: Fri, 14 Jul 2023 03:20:21 +0900 Subject: [PATCH 996/998] github: replace deprecated command with environment file (#6417) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d8f2cd854a51..d3dda5376c08 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -51,7 +51,7 @@ jobs: run: | PACKAGE_NAME=protoc-gen-go-grpc.${GITHUB_REF#refs/tags/cmd/protoc-gen-go-grpc/}.${{ matrix.goos }}.${{ matrix.goarch }}.tar.gz tar -czvf $PACKAGE_NAME -C build . - echo ::set-output name=name::${PACKAGE_NAME} + echo "name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT - name: Upload asset uses: actions/upload-release-asset@v1 From 6b64be9784958b37d2b1d7618b37a7f8a824654b Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Wed, 26 Jul 2023 10:20:15 -0700 Subject: [PATCH 997/998] resolver/weighted_round_robin: remove experimental suffix from name --- balancer/weightedroundrobin/balancer.go | 2 +- test/xds/xds_client_custom_lb_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/balancer/weightedroundrobin/balancer.go b/balancer/weightedroundrobin/balancer.go index a164d1bedd7e..797b9aa0a960 100644 --- a/balancer/weightedroundrobin/balancer.go +++ b/balancer/weightedroundrobin/balancer.go @@ -43,7 +43,7 @@ import ( ) // Name is the name of the weighted round robin balancer. -const Name = "weighted_round_robin_experimental" +const Name = "weighted_round_robin" func init() { balancer.Register(bb{}) diff --git a/test/xds/xds_client_custom_lb_test.go b/test/xds/xds_client_custom_lb_test.go index 4624c252b0bb..87bd437785a6 100644 --- a/test/xds/xds_client_custom_lb_test.go +++ b/test/xds/xds_client_custom_lb_test.go @@ -25,7 +25,7 @@ import ( "time" "google.golang.org/grpc" - _ "google.golang.org/grpc/balancer/weightedroundrobin" // To register weighted_round_robin_experimental. + _ "google.golang.org/grpc/balancer/weightedroundrobin" // To register weighted_round_robin "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/stubserver" From 87bf02ad24f6cc071d2553eb5d62332194bba1fe Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 26 Jul 2023 10:22:46 -0700 Subject: [PATCH 998/998] Change version to 1.57.0 (#6448) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index 387ebf5970f8..353cfd52862c 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.57.0-dev" +const Version = "1.57.0"